From d8cfaa2fbb24a12118dab0c270e36413ff4e6259 Mon Sep 17 00:00:00 2001 From: Jitka Obselkova <41325380+jobselko@users.noreply.github.com> Date: Thu, 19 Mar 2020 17:56:14 +0100 Subject: [PATCH 001/892] Parsers for Domain information groper (#2499) * Add parsers for dig Signed-off-by: Jitka Obselkova * Change the base class Signed-off-by: Jitka Obselkova --- docs/shared_parsers_catalog/dig.rst | 3 + insights/parsers/dig.py | 171 ++++++++++++++++++++++++ insights/parsers/tests/test_dig.py | 193 ++++++++++++++++++++++++++++ 3 files changed, 367 insertions(+) create mode 100644 docs/shared_parsers_catalog/dig.rst create mode 100644 insights/parsers/dig.py create mode 100644 insights/parsers/tests/test_dig.py diff --git a/docs/shared_parsers_catalog/dig.rst b/docs/shared_parsers_catalog/dig.rst new file mode 100644 index 000000000..a7538f8cd --- /dev/null +++ b/docs/shared_parsers_catalog/dig.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.dig + :members: + :show-inheritance: diff --git a/insights/parsers/dig.py b/insights/parsers/dig.py new file mode 100644 index 000000000..48419afd4 --- /dev/null +++ b/insights/parsers/dig.py @@ -0,0 +1,171 @@ +""" +Domain information groper (Dig) parsers +======================================= + +Parsers included in this module are: + +DigDnssec - command ``/usr/bin/dig +dnssec . SOA`` +-------------------------------------------------- +DigEdns - command ``/usr/bin/dig +edns=0 . SOA`` +------------------------------------------------ +DigNoedns - command ``/usr/bin/dig +noedns . SOA`` +-------------------------------------------------- +""" + +import re + +from insights.core import CommandParser +from insights.core.plugins import parser +from insights.parsers import SkipException +from insights.specs import Specs + + +HEADER_TEMPLATE = re.compile(r';; ->>HEADER<<-.*status: (\S+),') +RRSIG_TEMPLATE = re.compile(r'RRSIG') + + +class Dig(CommandParser): + """ + Base class for classes using ``dig`` command. + + Attributes: + status (string): Determines if the lookup succeeded. + has_signature (bool): True, if signature is present. + command (string): Specific ``dig`` command used. + + Raises: + SkipException: When content is empty or cannot be parsed. + """ + def __init__(self, context, command): + self.status = None + self.has_signature = False + self.command = command + super(Dig, self).__init__(context) + + def parse_content(self, content): + if not content: + raise SkipException('No content.') + + for line in content: + match = HEADER_TEMPLATE.search(line) + if match: + self.status = match.group(1) + if RRSIG_TEMPLATE.search(line): + self.has_signature = True + + +@parser(Specs.dig_dnssec) +class DigDnssec(Dig): + """ + Class for parsing ``/usr/bin/dig +dnssec . SOA`` command. + + Sample output of this command is:: + + ; <<>> DiG 9.11.1-P3-RedHat-9.11.1-2.P3.fc26 <<>> +dnssec nic.cz. SOA + ;; global options: +cmd + ;; Got answer: + ;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 58794 + ;; flags: qr rd ra; QUERY: 1, ANSWER: 2, AUTHORITY: 0, ADDITIONAL: 1 + + ;; OPT PSEUDOSECTION: + ; EDNS: version: 0, flags: do; udp: 4096 + ;; QUESTION SECTION: + ;nic.cz. IN SOA + + ;; ANSWER SECTION: + nic.cz. 278 IN SOA a.ns.nic.cz. + hostmaster.nic.cz. 1508686803 10800 3600 1209600 7200 + nic.cz. 278 IN RRSIG SOA 13 2 1800 + 20171105143612 20171022144003 41758 nic.cz. + hq3rr8dASRlucMJxu2QZnX6MVaMYsKhmGGxBOwpkeUrGjfo6clzG6MZN + 2Jy78fWYC/uwyIsI3nZMUKv573eCWg== + + ;; Query time: 22 msec + ;; SERVER: 10.38.5.26#53(10.38.5.26) + ;; WHEN: Tue Oct 24 14:28:56 CEST 2017 + ;; MSG SIZE rcvd: 189 + + Examples: + >>> dig_dnssec.status + 'NOERROR' + >>> dig_dnssec.has_signature + True + >>> dig_dnssec.command + '/usr/bin/dig +dnssec . SOA' + """ + def __init__(self, context): + super(DigDnssec, self).__init__(context, '/usr/bin/dig +dnssec . SOA') + + +@parser(Specs.dig_edns) +class DigEdns(Dig): + """ + Class for parsing ``/usr/bin/dig +edns=0 . SOA`` command. + + Sample output of this command is:: + + ; <<>> DiG 9.11.1-P3-RedHat-9.11.1-3.P3.fc26 <<>> +edns=0 . SOA + ;; global options: +cmd + ;; Got answer: + ;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 11158 + ;; flags: qr rd ra ad; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1 + + ;; OPT PSEUDOSECTION: + ; EDNS: version: 0, flags:; udp: 4096 + ;; QUESTION SECTION: + ;. IN SOA + + ;; ANSWER SECTION: + . 19766 IN SOA a.root-servers.net. nstld.verisign-grs.com. 2017120600 1800 900 604800 86400 + + ;; Query time: 22 msec + ;; SERVER: 10.38.5.26#53(10.38.5.26) + ;; WHEN: Thu Dec 07 09:38:33 CET 2017 + ;; MSG SIZE rcvd: 103 + + Examples: + >>> dig_edns.status + 'NOERROR' + >>> dig_edns.has_signature + False + >>> dig_edns.command + '/usr/bin/dig +edns=0 . SOA' + """ + def __init__(self, context): + super(DigEdns, self).__init__(context, '/usr/bin/dig +edns=0 . SOA') + + +@parser(Specs.dig_noedns) +class DigNoedns(Dig): + """ + Class for parsing ``/usr/bin/dig +noedns . SOA`` command. + + Sample output of this command is:: + + ; <<>> DiG 9.11.1-P3-RedHat-9.11.1-3.P3.fc26 <<>> +noedns . SOA + ;; global options: +cmd + ;; Got answer: + ;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 47135 + ;; flags: qr rd ra ad; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0 + + ;; QUESTION SECTION: + ;. IN SOA + + ;; ANSWER SECTION: + . 20195 IN SOA a.root-servers.net. nstld.verisign-grs.com. 2017120600 1800 900 604800 86400 + + ;; Query time: 22 msec + ;; SERVER: 10.38.5.26#53(10.38.5.26) + ;; WHEN: Thu Dec 07 09:31:24 CET 2017 + ;; MSG SIZE rcvd: 92 + + Examples: + >>> dig_noedns.status + 'NOERROR' + >>> dig_noedns.has_signature + False + >>> dig_noedns.command + '/usr/bin/dig +noedns . SOA' + """ + def __init__(self, context): + super(DigNoedns, self).__init__(context, '/usr/bin/dig +noedns . SOA') diff --git a/insights/parsers/tests/test_dig.py b/insights/parsers/tests/test_dig.py new file mode 100644 index 000000000..026866387 --- /dev/null +++ b/insights/parsers/tests/test_dig.py @@ -0,0 +1,193 @@ +import doctest +import pytest + +from insights.parsers import dig, SkipException +from insights.parsers.dig import Dig, DigDnssec, DigEdns, DigNoedns +from insights.tests import context_wrap + +SIGNED_DNSSEC = """; <<>> DiG 9.11.1-P3-RedHat-9.11.1-2.P3.fc26 <<>> +dnssec nic.cz. SOA +;; global options: +cmd +;; Got answer: +;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 58794 +;; flags: qr rd ra; QUERY: 1, ANSWER: 2, AUTHORITY: 0, ADDITIONAL: 1 + +;; OPT PSEUDOSECTION: +; EDNS: version: 0, flags: do; udp: 4096 +;; QUESTION SECTION: +;nic.cz. IN SOA + +;; ANSWER SECTION: +nic.cz. 278 IN SOA a.ns.nic.cz. +hostmaster.nic.cz. 1508686803 10800 3600 1209600 7200 +nic.cz. 278 IN RRSIG SOA 13 2 1800 +20171105143612 20171022144003 41758 nic.cz. +hq3rr8dASRlucMJxu2QZnX6MVaMYsKhmGGxBOwpkeUrGjfo6clzG6MZN +2Jy78fWYC/uwyIsI3nZMUKv573eCWg== + +;; Query time: 22 msec +;; SERVER: 10.38.5.26#53(10.38.5.26) +;; WHEN: Tue Oct 24 14:28:56 CEST 2017 +;; MSG SIZE rcvd: 189""" + +NOT_SIGNED_DNSSEC = """; <<>> DiG 9.11.1-P3-RedHat-9.11.1-2.P3.fc26 <<>> +dnssec google.com. SOA +;; global options: +cmd +;; Got answer: +;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 13253 +;; flags: qr rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1 + +;; OPT PSEUDOSECTION: +; EDNS: version: 0, flags: do; udp: 4096 +;; QUESTION SECTION: +;google.com. IN SOA + +;; ANSWER SECTION: +google.com. 60 IN SOA ns1.google.com. +dns-admin.google.com. 173219439 900 900 1800 60 + +;; Query time: 46 msec +;; SERVER: 10.38.5.26#53(10.38.5.26) +;; WHEN: Tue Oct 24 14:28:20 CEST 2017 +;; MSG SIZE rcvd: 89""" + +BAD_DNSSEC = """; <<>> DiG 9.11.1-P3-RedHat-9.11.1-2.P3.fc26 <<>> +dnssec google.com. SOA +;; global options: +cmd +;; Got answer: +;; ->>HEADER<<- opcode: QUERY, status: REFUSED, id: 13253 +;; flags: qr rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1 + +;; OPT PSEUDOSECTION: +; EDNS: version: 0, flags: do; udp: 4096 +;; QUESTION SECTION: +;google.com. IN SOA + +;; ANSWER SECTION: +google.com. 60 IN SOA ns1.google.com. +dns-admin.google.com. 173219439 900 900 1800 60 + +;; Query time: 46 msec +;; SERVER: 10.38.5.26#53(10.38.5.26) +;; WHEN: Tue Oct 24 14:28:20 CEST 2017 +;; MSG SIZE rcvd: 89""" + +GOOD_EDNS = """; <<>> DiG 9.11.1-P3-RedHat-9.11.1-3.P3.fc26 <<>> +edns=0 . SOA +;; global options: +cmd +;; Got answer: +;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 11158 +;; flags: qr rd ra ad; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1 + +;; OPT PSEUDOSECTION: +; EDNS: version: 0, flags:; udp: 4096 +;; QUESTION SECTION: +;. IN SOA + +;; ANSWER SECTION: +. 19766 IN SOA a.root-servers.net. nstld.verisign-grs.com. 2017120600 1800 900 604800 86400 + +;; Query time: 22 msec +;; SERVER: 10.38.5.26#53(10.38.5.26) +;; WHEN: Thu Dec 07 09:38:33 CET 2017 +;; MSG SIZE rcvd: 103""" + +BAD_EDNS = """; <<>> DiG 9.11.1-P3-RedHat-9.11.1-3.P3.fc26 <<>> +edns=0 . SOA +;; global options: +cmd +;; Got answer: +;; ->>HEADER<<- opcode: QUERY, status: SERVFAIL, id: 11158 +;; flags: qr rd ra ad; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1 + +;; OPT PSEUDOSECTION: +; EDNS: version: 0, flags:; udp: 4096 +;; QUESTION SECTION: +;. IN SOA + +;; ANSWER SECTION: +. 19766 IN SOA a.root-servers.net. nstld.verisign-grs.com. 2017120600 1800 900 604800 86400 + +;; Query time: 22 msec +;; SERVER: 10.38.5.26#53(10.38.5.26) +;; WHEN: Thu Dec 07 09:38:33 CET 2017 +;; MSG SIZE rcvd: 103""" + +GOOD_NOEDNS = """; <<>> DiG 9.11.1-P3-RedHat-9.11.1-3.P3.fc26 <<>> +noedns . SOA +;; global options: +cmd +;; Got answer: +;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 47135 +;; flags: qr rd ra ad; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0 + +;; QUESTION SECTION: +;. IN SOA + +;; ANSWER SECTION: +. 20195 IN SOA a.root-servers.net. nstld.verisign-grs.com. 2017120600 1800 900 604800 86400 + +;; Query time: 22 msec +;; SERVER: 10.38.5.26#53(10.38.5.26) +;; WHEN: Thu Dec 07 09:31:24 CET 2017 +;; MSG SIZE rcvd: 92""" + +BAD_NOEDNS = """; <<>> DiG 9.11.1-P3-RedHat-9.11.1-2.P3.fc26 <<>> +noedns +ewf-dwqfwqf-gdsa.com SOA +;; global options: +cmd +;; Got answer: +;; ->>HEADER<<- opcode: QUERY, status: NXDOMAIN, id: 30634 +;; flags: qr rd ra; QUERY: 1, ANSWER: 0, AUTHORITY: 1, ADDITIONAL: 0 + +;; QUESTION SECTION: +;ewf-dwqfwqf-gdsa.com. IN SOA + +;; AUTHORITY SECTION: +com. 900 IN SOA a.gtld-servers.net. +nstld.verisign-grs.com. 1508851057 1800 900 604800 86400 + +;; Query time: 29 msec +;; SERVER: 10.38.5.26#53(10.38.5.26) +;; WHEN: Tue Oct 24 15:17:53 CEST 2017 +;; MSG SIZE rcvd: 111""" + + +def test_dig_no_data(): + with pytest.raises(SkipException): + Dig(context_wrap(""), "") + + +def test_dig_dnssec(): + dig_dnssec = DigDnssec(context_wrap(SIGNED_DNSSEC)) + assert dig_dnssec.status == "NOERROR" + assert dig_dnssec.has_signature + + dig_dnssec = DigDnssec(context_wrap(NOT_SIGNED_DNSSEC)) + assert dig_dnssec.status == "NOERROR" + assert not dig_dnssec.has_signature + + dig_dnssec = DigDnssec(context_wrap(BAD_DNSSEC)) + assert dig_dnssec.status == "REFUSED" + assert not dig_dnssec.has_signature + + +def test_dig_edns(): + dig_edns = DigEdns(context_wrap(GOOD_EDNS)) + assert dig_edns.status == "NOERROR" + assert not dig_edns.has_signature + + dig_edns = DigEdns(context_wrap(BAD_EDNS)) + assert dig_edns.status == "SERVFAIL" + assert not dig_edns.has_signature + + +def test_dig_noedns(): + dig_noedns = DigNoedns(context_wrap(GOOD_NOEDNS)) + assert dig_noedns.status == "NOERROR" + assert not dig_noedns.has_signature + + dig_noedns = DigNoedns(context_wrap(BAD_NOEDNS)) + assert dig_noedns.status == "NXDOMAIN" + assert not dig_noedns.has_signature + + +def test_doc_examples(): + env = { + "dig_dnssec": DigDnssec(context_wrap(SIGNED_DNSSEC)), + "dig_edns": DigEdns(context_wrap(GOOD_EDNS)), + "dig_noedns": DigNoedns(context_wrap(GOOD_NOEDNS)), + } + failed, total = doctest.testmod(dig, globs=env) + assert failed == 0 From 1ebf1b91cebb2347bb2ace47d4d37c140c559d58 Mon Sep 17 00:00:00 2001 From: Stephen Date: Thu, 19 Mar 2020 13:50:55 -0400 Subject: [PATCH 002/892] Custom net log level (#2503) * Replace net_logger with a custom log level NETWORK log level used to log any network traffic Signed-off-by: Stephen Adams * Fix bugs related to logging statements Signed-off-by: Stephen Adams * Set network in each file with a net_logger Signed-off-by: Stephen Adams * Fix NETWORK logger in collection rules Signed-off-by: Stephen Adams * Move more to network log level, add level to constants Signed-off-by: Stephen Adams * Remove log format from net debug console logs Signed-off-by: Stephen Adams --- insights/client/__init__.py | 4 +- insights/client/apps/aws/__init__.py | 12 ++--- insights/client/client.py | 7 ++- insights/client/collection_rules.py | 6 +-- insights/client/connection.py | 74 ++++++++++++++-------------- insights/client/constants.py | 1 + 6 files changed, 54 insertions(+), 50 deletions(-) diff --git a/insights/client/__init__.py b/insights/client/__init__.py index 27fa277bb..fcebe6a19 100644 --- a/insights/client/__init__.py +++ b/insights/client/__init__.py @@ -20,8 +20,8 @@ get_tags, write_tags) +NETWORK = constants.custom_network_log_level logger = logging.getLogger(__name__) -net_logger = logging.getLogger("network") class InsightsClient(object): @@ -174,7 +174,7 @@ def _fetch(self, path, etag_file, target_path, force): # If the etag was found and we are not force fetching # Then add it to the request - net_logger.info("GET %s", url) + logger.log(NETWORK, "GET %s", url) if current_etag and not force: logger.debug('Requesting new file with etag %s', current_etag) etag_headers = {'If-None-Match': current_etag} diff --git a/insights/client/apps/aws/__init__.py b/insights/client/apps/aws/__init__.py index 6fc01b4fe..18f5c50d2 100644 --- a/insights/client/apps/aws/__init__.py +++ b/insights/client/apps/aws/__init__.py @@ -11,7 +11,7 @@ from insights.client.utilities import write_to_disk logger = logging.getLogger(__name__) -net_logger = logging.getLogger('network') +NETWORK = constants.custom_network_log_level IDENTITY_URI = 'http://169.254.169.254/latest/dynamic/instance-identity' IDENTITY_DOC_URI = IDENTITY_URI + '/document' @@ -52,13 +52,13 @@ def get_uri(conn, uri): Fetch information from URIs ''' try: - net_logger.info('GET %s', uri) + logger.log(NETWORK, 'GET %s', uri) res = conn.session.get(uri, timeout=conn.config.http_timeout) except (ConnectionError, Timeout) as e: logger.error(e) logger.error('Could not reach %s', uri) return None - net_logger.info('Status code: %s', res.status_code) + logger.log(NETWORK, 'Status code: %s', res.status_code) return res @@ -91,8 +91,8 @@ def post_to_hydra(conn, data): # POST to hydra try: json_data = json.dumps(data) - net_logger.info('POST %s', hydra_endpoint) - net_logger.info('POST body: %s', json_data) + logger.log(NETWORK, 'POST %s', hydra_endpoint) + logger.log(NETWORK, 'POST body: %s', json_data) res = conn.session.post(hydra_endpoint, data=json_data, timeout=conn.config.http_timeout) except MissingSchema as e: logger.error(e) @@ -101,7 +101,7 @@ def post_to_hydra(conn, data): logger.error(e) logger.error('Could not reach %s', hydra_endpoint) return False - net_logger.info('Status code: %s', res.status_code) + logger.log(NETWORK, 'Status code: %s', res.status_code) try: res.raise_for_status() except HTTPError as e: diff --git a/insights/client/client.py b/insights/client/client.py index 945c1baaf..e4cc3cc89 100644 --- a/insights/client/client.py +++ b/insights/client/client.py @@ -26,6 +26,7 @@ from .constants import InsightsConstants as constants from .schedule import get_scheduler +NETWORK = constants.custom_network_log_level LOG_FORMAT = ("%(asctime)s %(levelname)8s %(name)s %(message)s") logger = logging.getLogger(__name__) @@ -53,6 +54,8 @@ def get_console_handler(config): target_level = logging.FATAL elif config.verbose: target_level = logging.DEBUG + elif config.net_debug: + target_level = NETWORK elif config.quiet: target_level = logging.ERROR else: @@ -68,6 +71,7 @@ def get_console_handler(config): def configure_level(config): + config_level = 'NETWORK' if config.net_debug else config.loglevel config_level = 'DEBUG' if config.verbose else config.loglevel init_log_level = logging.getLevelName(config_level) @@ -78,13 +82,12 @@ def configure_level(config): logger.setLevel(init_log_level) logging.root.setLevel(init_log_level) - net_debug_level = logging.INFO if config.net_debug else logging.ERROR - logging.getLogger('network').setLevel(net_debug_level) if not config.verbose: logging.getLogger('insights.core.dr').setLevel(logging.WARNING) def set_up_logging(config): + logging.addLevelName(NETWORK, "NETWORK") if len(logging.root.handlers) == 0: logging.root.addHandler(get_console_handler(config)) logging.root.addHandler(get_file_handler(config)) diff --git a/insights/client/collection_rules.py b/insights/client/collection_rules.py index 5d09c0fea..f8fca4469 100644 --- a/insights/client/collection_rules.py +++ b/insights/client/collection_rules.py @@ -19,7 +19,7 @@ APP_NAME = constants.app_name logger = logging.getLogger(__name__) -net_logger = logging.getLogger('network') +NETWORK = constants.custom_network_log_level expected_keys = ('commands', 'files', 'patterns', 'keywords') @@ -103,7 +103,7 @@ def get_collection_rules(self, raw=False): logger.debug("Attemping to download collection rules from %s", self.collection_rules_url) - net_logger.info("GET %s", self.collection_rules_url) + logger.log(NETWORK, "GET %s", self.collection_rules_url) try: req = self.conn.session.get( self.collection_rules_url, headers=({'accept': 'text/plain'})) @@ -140,7 +140,7 @@ def fetch_gpg(self): self.collection_rules_url + ".asc") headers = ({'accept': 'text/plain'}) - net_logger.info("GET %s", self.collection_rules_url + '.asc') + logger.log(NETWORK, "GET %s", self.collection_rules_url + '.asc') config_sig = self.conn.session.get(self.collection_rules_url + '.asc', headers=headers) if config_sig.status_code == 200: diff --git a/insights/client/connection.py b/insights/client/connection.py index 3270767f6..736408d9d 100644 --- a/insights/client/connection.py +++ b/insights/client/connection.py @@ -38,8 +38,8 @@ warnings.simplefilter('ignore') APP_NAME = constants.app_name +NETWORK = constants.custom_network_log_level logger = logging.getLogger(__name__) -net_logger = logging.getLogger("network") """ urllib3's logging is chatty @@ -151,7 +151,7 @@ def _init_session(self): # HACKY try: # Need to make a request that will fail to get proxies set up - net_logger.info("GET %s", self.base_url) + logger.log(NETWORK, "GET %s", self.base_url) session.request( "GET", self.base_url, timeout=self.config.http_timeout) except requests.ConnectionError: @@ -321,9 +321,9 @@ def _legacy_test_urls(self, url, method): test_url + ext, timeout=self.config.http_timeout, data=test_flag) elif method is "GET": test_req = self.session.get(test_url + ext, timeout=self.config.http_timeout) - logger.info("HTTP Status Code: %d", test_req.status_code) - logger.info("HTTP Status Text: %s", test_req.reason) - logger.info("HTTP Response Text: %s", test_req.text) + logger.log(NETWORK, "HTTP Status Code: %d", test_req.status_code) + logger.log(NETWORK, "HTTP Status Text: %s", test_req.reason) + logger.log(NETWORK, "HTTP Response Text: %s", test_req.text) # Strata returns 405 on a GET sometimes, this isn't a big deal if test_req.status_code in (200, 201): logger.info( @@ -357,9 +357,9 @@ def _test_urls(self, url, method): test_req = self.session.post(url, timeout=self.config.http_timeout, files=test_files) elif method is "GET": test_req = self.session.get(url, timeout=self.config.http_timeout) - logger.info("HTTP Status Code: %d", test_req.status_code) - logger.info("HTTP Status Text: %s", test_req.reason) - logger.info("HTTP Response Text: %s", test_req.text) + logger.log(NETWORK, "HTTP Status Code: %d", test_req.status_code) + logger.log(NETWORK, "HTTP Status Text: %s", test_req.reason) + logger.log(NETWORK, "HTTP Response Text: %s", test_req.text) if test_req.status_code in (200, 201, 202): logger.info( "Successfully connected to: %s", url) @@ -414,16 +414,16 @@ def handle_fail_rcs(self, req): """ try: - logger.debug("HTTP Status Code: %s", req.status_code) - logger.debug("HTTP Response Text: %s", req.text) - logger.debug("HTTP Response Reason: %s", req.reason) - logger.debug("HTTP Response Content: %s", req.content) + logger.log(NETWORK, "HTTP Status Code: %s", req.status_code) + logger.log(NETWORK, "HTTP Response Text: %s", req.text) + logger.log(NETWORK, "HTTP Response Reason: %s", req.reason) + logger.log(NETWORK, "HTTP Response Content: %s", req.content) except: logger.error("Malformed HTTP Request.") # attempt to read the HTTP response JSON message try: - logger.debug("HTTP Response Message: %s", req.json()["message"]) + logger.log(NETWORK, "HTTP Response Message: %s", req.json()["message"]) except: logger.debug("No HTTP Response message present.") @@ -436,7 +436,7 @@ def handle_fail_rcs(self, req): logger.error("Authorization Required.") logger.error("Please ensure correct credentials " "in " + constants.default_conf_file) - logger.debug("HTTP Response Text: %s", req.text) + logger.log(NETWORK, "HTTP Response Text: %s", req.text) if req.status_code == 402: # failed registration because of entitlement limit hit logger.debug('Registration failed by 402 error.') @@ -444,10 +444,10 @@ def handle_fail_rcs(self, req): logger.error(req.json()["message"]) except LookupError: logger.error("Got 402 but no message") - logger.debug("HTTP Response Text: %s", req.text) + logger.log(NETWORK, "HTTP Response Text: %s", req.text) except: logger.error("Got 402 but no message") - logger.debug("HTTP Response Text: %s", req.text) + logger.log(NETWORK, "HTTP Response Text: %s", req.text) if req.status_code == 403 and self.auto_config: # Insights disabled in satellite rhsm_hostname = urlparse(self.base_url).hostname @@ -462,10 +462,10 @@ def handle_fail_rcs(self, req): write_unregistered_file(unreg_date) except LookupError: unreg_date = "412, but no unreg_date or message" - logger.debug("HTTP Response Text: %s", req.text) + logger.log(NETWORK, "HTTP Response Text: %s", req.text) except: unreg_date = "412, but no unreg_date or message" - logger.debug("HTTP Response Text: %s", req.text) + logger.log(NETWORK, "HTTP Response Text: %s", req.text) if req.status_code == 413: logger.error('Archive is too large to upload.') if req.status_code == 415: @@ -514,10 +514,10 @@ def get_branch_info(self): logger.debug(u'Obtaining branch information from %s', self.branch_info_url) - net_logger.info(u'GET %s', self.branch_info_url) + logger.log(NETWORK, u'GET %s', self.branch_info_url) response = self.session.get(self.branch_info_url, timeout=self.config.http_timeout) - logger.debug(u'GET branch_info status: %s', response.status_code) + logger.log(NETWORK, u'GET branch_info status: %s', response.status_code) if response.status_code != 200: logger.debug("There was an error obtaining branch information.") logger.debug(u'Bad status from server: %s', response.status_code) @@ -565,7 +565,7 @@ def create_system(self, new_machine_id=False): post_system_url = self.api_url + '/v1/systems' logger.debug("POST System: %s", post_system_url) logger.debug(data) - net_logger.info("POST %s", post_system_url) + logger.log(NETWORK, "POST %s", post_system_url) return self.session.post(post_system_url, headers={'Content-Type': 'application/json'}, data=data) @@ -585,7 +585,7 @@ def group_systems(self, group_name, systems): group_get_path = group_path + ('?display_name=%s' % quote(group_name)) logger.debug("GET group: %s", group_get_path) - net_logger.info("GET %s", group_get_path) + logger.log(NETWORK, "GET %s", group_get_path) get_group = self.session.get(group_get_path) logger.debug("GET group status: %s", get_group.status_code) if get_group.status_code == 200: @@ -595,7 +595,7 @@ def group_systems(self, group_name, systems): # Group does not exist, POST to create logger.debug("POST group") data = json.dumps({'display_name': group_name}) - net_logger.info("POST", group_path) + logger.log(NETWORK, "POST", group_path) post_group = self.session.post(group_path, headers=headers, data=data) @@ -606,7 +606,7 @@ def group_systems(self, group_name, systems): logger.debug("PUT group") data = json.dumps(systems) - net_logger.info("PUT %s", group_path + ('/%s/systems' % api_group_id)) + logger.log(NETWORK, "PUT %s", group_path + ('/%s/systems' % api_group_id)) put_group = self.session.put(group_path + ('/%s/systems' % api_group_id), headers=headers, @@ -633,7 +633,7 @@ def _legacy_api_registration_check(self): machine_id = generate_machine_id() try: url = self.api_url + '/v1/systems/' + machine_id - net_logger.info("GET %s", url) + logger.log(NETWORK, "GET %s", url) res = self.session.get(url, timeout=self.config.http_timeout) except requests.ConnectionError: # can't connect, run connection test @@ -679,7 +679,7 @@ def _fetch_system_by_machine_id(self): url = self.base_url + '/platform/inventory/v1/hosts?insights_id=' + machine_id else: url = self.base_url + '/inventory/v1/hosts?insights_id=' + machine_id - net_logger.info("GET %s", url) + logger.log(NETWORK, "GET %s", url) res = self.session.get(url, timeout=self.config.http_timeout) except (requests.ConnectionError, requests.Timeout) as e: logger.error(e) @@ -730,7 +730,7 @@ def _legacy_unregister(self): try: logger.debug("Unregistering %s", machine_id) url = self.api_url + "/v1/systems/" + machine_id - net_logger.info("DELETE %s", url) + logger.log(NETWORK, "DELETE %s", url) self.session.delete(url) logger.info( "Successfully unregistered from the Red Hat Insights Service") @@ -751,7 +751,7 @@ def unregister(self): try: logger.debug("Unregistering host...") url = self.api_url + "/inventory/v1/hosts/" + results[0]['id'] - net_logger.info("DELETE %s", url) + logger.log(NETWORK, "DELETE %s", url) response = self.session.delete(url) response.raise_for_status() logger.info( @@ -832,10 +832,10 @@ def _legacy_upload_archive(self, data_collected, duration): logger.debug("Uploading %s to %s", data_collected, upload_url) headers = {'x-rh-collection-time': str(duration)} - net_logger.info("POST %s", upload_url) + logger.log(NETWORK, "POST %s", upload_url) upload = self.session.post(upload_url, files=files, headers=headers) - logger.debug("Upload status: %s %s %s", + logger.log(NETWORK, "Upload status: %s %s %s", upload.status_code, upload.reason, upload.text) if upload.status_code in (200, 201): the_json = json.loads(upload.text) @@ -878,10 +878,10 @@ def upload_archive(self, data_collected, content_type, duration): } logger.debug("Uploading %s to %s", data_collected, upload_url) - net_logger.info("POST %s", upload_url) + logger.log(NETWORK, "POST %s", upload_url) upload = self.session.post(upload_url, files=files, headers={}) - logger.debug("Upload status: %s %s %s", + logger.log(NETWORK, "Upload status: %s %s %s", upload.status_code, upload.reason, upload.text) logger.debug('Request ID: %s', upload.headers.get('x-rh-insights-request-id', None)) if upload.status_code in (200, 202): @@ -903,14 +903,14 @@ def _legacy_set_display_name(self, display_name): try: url = self.api_url + '/v1/systems/' + machine_id - net_logger.info("GET %s", url) + logger.log(NETWORK, "GET %s", url) res = self.session.get(url, timeout=self.config.http_timeout) old_display_name = json.loads(res.content).get('display_name', None) if display_name == old_display_name: logger.debug('Display name unchanged: %s', old_display_name) return True - net_logger.info("PUT %s", url) + logger.log(NETWORK, "PUT %s", url) res = self.session.put(url, timeout=self.config.http_timeout, headers={'Content-Type': 'application/json'}, @@ -948,7 +948,7 @@ def set_display_name(self, display_name): req_url = self.base_url + '/inventory/v1/hosts/' + inventory_id try: - net_logger.info("PATCH %s", req_url) + logger.log(NETWORK, "PATCH %s", req_url) res = self.session.patch(req_url, json={'display_name': display_name}) except (requests.ConnectionError, requests.Timeout) as e: logger.error(e) @@ -972,7 +972,7 @@ def get_diagnosis(self, remediation_id=None): # validate this? params['remediation'] = remediation_id try: - net_logger.info("GET %s", diag_url) + logger.log(NETWORK, "GET %s", diag_url) res = self.session.get(diag_url, params=params, timeout=self.config.http_timeout) except (requests.ConnectionError, requests.Timeout) as e: logger.error(e) @@ -999,7 +999,7 @@ def _get(self, url): if item is not None: headers["If-None-Match"] = item.etag - net_logger.info("GET %s", url) + logger.log(NETWORK, "GET %s", url) res = self.session.get(url, headers=headers) if res.status_code in [requests.codes.OK, requests.codes.NOT_MODIFIED]: diff --git a/insights/client/constants.py b/insights/client/constants.py index b249bed01..ae7e0954c 100644 --- a/insights/client/constants.py +++ b/insights/client/constants.py @@ -14,6 +14,7 @@ class InsightsConstants(object): simple_find_replace_dir = '/etc/redhat-access-insights' default_log_file = os.path.join(log_dir, app_name + '.log') default_payload_log = os.path.join(log_dir, app_name + '-payload.log') + custom_network_log_level = 11 default_sed_file = os.path.join(default_conf_dir, '.exp.sed') base_url = 'cert-api.access.redhat.com/r/insights/platform' legacy_base_url = 'cert-api.access.redhat.com/r/insights' From 1f1959cd5a1d21111feb5fc9b81b01c5289f6ed1 Mon Sep 17 00:00:00 2001 From: Sheng Lao <39508521+shlao@users.noreply.github.com> Date: Tue, 24 Mar 2020 22:34:02 +0800 Subject: [PATCH 003/892] Add Parser for OpenShift certificates symbolic link file (#2495) * Add Parser for OpenShift certificates symbolic link file Signed-off-by: shlao * Fixed the Title Signed-off-by: shlao * Add Raises section in docstr Signed-off-by: shlao * generally describes the parsers in doc summary Signed-off-by: shlao Co-authored-by: Xiangce Liu --- .../readlink_openshift_certs.rst | 3 + insights/parsers/readlink_openshift_certs.py | 71 +++++++++++++++++++ .../tests/test_readlink_openshift_certs.py | 46 ++++++++++++ insights/specs/__init__.py | 2 + insights/specs/default.py | 2 + insights/specs/insights_archive.py | 2 + 6 files changed, 126 insertions(+) create mode 100644 docs/shared_parsers_catalog/readlink_openshift_certs.rst create mode 100644 insights/parsers/readlink_openshift_certs.py create mode 100644 insights/parsers/tests/test_readlink_openshift_certs.py diff --git a/docs/shared_parsers_catalog/readlink_openshift_certs.rst b/docs/shared_parsers_catalog/readlink_openshift_certs.rst new file mode 100644 index 000000000..9d42ecff5 --- /dev/null +++ b/docs/shared_parsers_catalog/readlink_openshift_certs.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.readlink_openshift_certs + :members: + :show-inheritance: diff --git a/insights/parsers/readlink_openshift_certs.py b/insights/parsers/readlink_openshift_certs.py new file mode 100644 index 000000000..78150967e --- /dev/null +++ b/insights/parsers/readlink_openshift_certs.py @@ -0,0 +1,71 @@ +""" +ReadLink parsers for Openshift certificate symbolic file links +============================================================== + +This module contains the following parsers: + +ReadLinkEKubeletClientCurrent - command ``/usr/bin/readlink -e /etc/origin/node/certificates/kubelet-client-current.pem`` +------------------------------------------------------------------------------------------------------------------------- +ReadLinkEKubeletServerCurrent - command ``/usr/bin/readlink -e /etc/origin/node/certificates/kubelet-server-current.pem`` +------------------------------------------------------------------------------------------------------------------------- +""" + +from insights.specs import Specs +from insights.parsers import SkipException +from insights import parser, CommandParser + + +@parser(Specs.readlink_e_shift_cert_client) +class ReadLinkEKubeletClientCurrent(CommandParser): + """ + Class for command: /usr/bin/readlink -e /etc/origin/node/certificates/kubelet-client-current.pem + + Sample content from command is:: + + /etc/origin/node/certificates/kubelet-client-2019-10-18-23-17-35.pem + + Examples: + >>> client.path + '/etc/origin/node/certificates/kubelet-client-2019-10-18-23-17-35.pem' + + Raises: + SkipException: When input content is empty + """ + def parse_content(self, content): + if content is None or len(content) == 0: + raise SkipException("No Data from command: /usr/bin/readlink -e /etc/origin/node/certificates/kubelet-client-current.pem") + + self._path = content[-1] + + @property + def path(self): + """Returns real file path of /etc/origin/node/certificates/kubelet-client-current.pem""" + return self._path + + +@parser(Specs.readlink_e_shift_cert_server) +class ReadLinkEKubeletServerCurrent(CommandParser): + """ + Class for command: /usr/bin/readlink -e /etc/origin/node/certificates/kubelet-server-current.pem + + Sample content from command is:: + + /etc/origin/node/certificates/kubelet-server-2018-10-18-23-29-14.pem + + Examples: + >>> server.path + '/etc/origin/node/certificates/kubelet-server-2018-10-18-23-29-14.pem' + + Raises: + SkipException: When input content is empty + """ + def parse_content(self, content): + if content is None or len(content) == 0: + raise SkipException("No Data from command: /usr/bin/readlink -e /etc/origin/node/certificates/kubelet-server-current.pem") + + self._path = content[-1] + + @property + def path(self): + """Returns real file path of /etc/origin/node/certificates/kubelet-server-current.pem""" + return self._path diff --git a/insights/parsers/tests/test_readlink_openshift_certs.py b/insights/parsers/tests/test_readlink_openshift_certs.py new file mode 100644 index 000000000..45fec7aa8 --- /dev/null +++ b/insights/parsers/tests/test_readlink_openshift_certs.py @@ -0,0 +1,46 @@ +import pytest +import doctest + +from insights.tests import context_wrap +from insights.parsers import readlink_openshift_certs, SkipException + +CLIENT_REAL_FILE_PATH = ''' +/etc/origin/node/certificates/kubelet-client-2019-10-18-23-17-35.pem +'''.strip() + +SERVER_REAL_FILE_PATH = ''' +/etc/origin/node/certificates/kubelet-server-2018-10-18-23-29-14.pem +'''.strip() + +BAD_FILE_PATH = "" + + +def test_doc_examples(): + env = { + 'client': readlink_openshift_certs.ReadLinkEKubeletClientCurrent( + context_wrap(CLIENT_REAL_FILE_PATH)), + 'server': readlink_openshift_certs.ReadLinkEKubeletServerCurrent( + context_wrap(SERVER_REAL_FILE_PATH)), + } + failed, total = doctest.testmod(readlink_openshift_certs, globs=env) + assert failed == 0 + + +def test_readlink_openshift_certs(): + client = readlink_openshift_certs.ReadLinkEKubeletClientCurrent(context_wrap(CLIENT_REAL_FILE_PATH)) + assert len(client.path) > 0 + assert client.path == CLIENT_REAL_FILE_PATH + + server = readlink_openshift_certs.ReadLinkEKubeletServerCurrent(context_wrap(SERVER_REAL_FILE_PATH)) + assert len(server.path) > 0 + assert server.path == SERVER_REAL_FILE_PATH + + +def test_fail(): + with pytest.raises(SkipException) as e: + readlink_openshift_certs.ReadLinkEKubeletClientCurrent(context_wrap(BAD_FILE_PATH)) + assert "No Data from command: /usr/bin/readlink -e /etc/origin/node/certificates/kubelet-client-current.pem" in str(e) + + with pytest.raises(SkipException) as e: + readlink_openshift_certs.ReadLinkEKubeletServerCurrent(context_wrap(BAD_FILE_PATH)) + assert "No Data from command: /usr/bin/readlink -e /etc/origin/node/certificates/kubelet-server-current.pem" in str(e) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 8c8fb166e..dfd53774c 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -491,6 +491,8 @@ class Specs(SpecSet): rc_local = RegistryPoint() rdma_conf = RegistryPoint() readlink_e_etc_mtab = RegistryPoint() + readlink_e_shift_cert_client = RegistryPoint() + readlink_e_shift_cert_server = RegistryPoint() redhat_release = RegistryPoint() resolv_conf = RegistryPoint() rhev_data_center = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index e3242a520..20eafe224 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -790,6 +790,8 @@ def package_and_httpd(broker): rc_local = simple_file("/etc/rc.d/rc.local") rdma_conf = simple_file("/etc/rdma/rdma.conf") readlink_e_etc_mtab = simple_command("/usr/bin/readlink -e /etc/mtab") + readlink_e_shift_cert_client = simple_command("/usr/bin/readlink -e /etc/origin/node/certificates/kubelet-client-current.pem") + readlink_e_shift_cert_server = simple_command("/usr/bin/readlink -e /etc/origin/node/certificates/kubelet-server-current.pem") redhat_release = simple_file("/etc/redhat-release") resolv_conf = simple_file("/etc/resolv.conf") rhosp_release = simple_file("/etc/rhosp-release") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index bc89ec527..19f4871ed 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -232,6 +232,8 @@ class InsightsArchiveSpecs(Specs): rabbitmq_report = simple_file("insights_commands/rabbitmqctl_report") rabbitmq_users = simple_file("insights_commands/rabbitmqctl_list_users") readlink_e_etc_mtab = simple_file("insights_commands/readlink_-e_.etc.mtab") + readlink_e_shift_cert_client = simple_file("insights_commands/readlink_-e_.etc.origin.node.certificates.kubelet-client-current.pem") + readlink_e_shift_cert_server = simple_file("insights_commands/readlink_-e_.etc.origin.node.certificates.kubelet-server-current.pem") rhn_charsets = simple_file("insights_commands/rhn-charsets") rhn_schema_stats = simple_file("insights_commands/rhn-schema-stats") rhn_schema_version = simple_file("insights_commands/rhn-schema-version") From 879cc41c0478e31590c3c7d8226606e39d2c791f Mon Sep 17 00:00:00 2001 From: vishwanathjadhav Date: Wed, 25 Mar 2020 07:53:04 +0530 Subject: [PATCH 004/892] Enhanced to handle the None value of release version (#2510) * Enhanced to handle the None value of release version Signed-off-by: vishawanathjadhav * Optimized code as per review comments Signed-off-by: vishawanathjadhav * Added Raises field Signed-off-by: vishawanathjadhav --- insights/parsers/rhsm_releasever.py | 7 +++++-- insights/parsers/tests/test_rhsm_releasever.py | 7 +++++++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/insights/parsers/rhsm_releasever.py b/insights/parsers/rhsm_releasever.py index 701f331af..490db7199 100644 --- a/insights/parsers/rhsm_releasever.py +++ b/insights/parsers/rhsm_releasever.py @@ -25,6 +25,9 @@ class RhsmReleaseVer(JSONParser): {"releaseVer": "6.10"} + Raises: + SkipException: When the json content of the file is empty.(i.e release version is empty. eg. {}) + Examples: >>> type(rhsm_releasever) @@ -37,6 +40,7 @@ class RhsmReleaseVer(JSONParser): >>> rhsm_releasever.minor 10 """ + def parse_content(self, content): """ Parse the contents of file ``/var/lib/rhsm/cache/releasever.json``. @@ -45,8 +49,7 @@ def parse_content(self, content): self.set = self.major = self.minor = None if 'releaseVer' not in self.data: raise SkipException('releaseVer is not in data') - - rel = self.data['releaseVer'] + rel = self.data.get('releaseVer') or '' rel_splits = rel.split('.') # Release: 6.7 if len(rel_splits) == 2: diff --git a/insights/parsers/tests/test_rhsm_releasever.py b/insights/parsers/tests/test_rhsm_releasever.py index 8991067a1..fb967ff9c 100644 --- a/insights/parsers/tests/test_rhsm_releasever.py +++ b/insights/parsers/tests/test_rhsm_releasever.py @@ -9,6 +9,7 @@ RHEL_MAJ_1 = '{"releaseVer": "7Server"}' RHEL_MAJ_2 = '{"releaseVer": "8"}' RHEL_NONE = '{"releaseVer": ""}' +RHEL_NONE_2 = '{"releaseVer": null}' RHEL_EMPTY = '{}' @@ -37,6 +38,12 @@ def test_rhsm_releasever(): assert relver.major is None assert relver.minor is None + relver = RhsmReleaseVer(context_wrap(RHEL_NONE_2)) + assert relver['releaseVer'] is None + assert relver.set is None + assert relver.major is None + assert relver.minor is None + with pytest.raises(SkipException) as e_info: relver = RhsmReleaseVer(context_wrap(RHEL_EMPTY)) assert "releaseVer is not in data" in str(e_info.value) From 8bbcb988d9a09cb55bdfd73f210bff44a00c6514 Mon Sep 17 00:00:00 2001 From: Stephen Date: Wed, 25 Mar 2020 18:36:20 -0400 Subject: [PATCH 005/892] Deduplicate http response content when upload fails (#2512) * Deduplicate http response content when upload fails We are currently logging debug information twice when using --net-debug. This change makes it so that we only report the failure once in stdout. The output for the failure message still provides relevant info that's needed to find out what happened. The --net-debug prints some additional info rather than printing the same thing twice Signed-off-by: Stephen Adams * set http status messages to debug Instead of logging HTTP errors as info, we'll log them as debug to quite down a normal non-debug client run Signed-off-by: Stephen Adams --- insights/client/connection.py | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/insights/client/connection.py b/insights/client/connection.py index 736408d9d..cbb178bad 100644 --- a/insights/client/connection.py +++ b/insights/client/connection.py @@ -413,14 +413,6 @@ def handle_fail_rcs(self, req): Bail out if we get a 401 and leave a message """ - try: - logger.log(NETWORK, "HTTP Status Code: %s", req.status_code) - logger.log(NETWORK, "HTTP Response Text: %s", req.text) - logger.log(NETWORK, "HTTP Response Reason: %s", req.reason) - logger.log(NETWORK, "HTTP Response Content: %s", req.content) - except: - logger.error("Malformed HTTP Request.") - # attempt to read the HTTP response JSON message try: logger.log(NETWORK, "HTTP Response Message: %s", req.json()["message"]) @@ -429,9 +421,9 @@ def handle_fail_rcs(self, req): # handle specific status codes if req.status_code >= 400: - logger.info("Debug Information:\nHTTP Status Code: %s", + logger.debug("Debug Information:\nHTTP Status Code: %s", req.status_code) - logger.info("HTTP Status Text: %s", req.reason) + logger.debug("HTTP Status Text: %s", req.reason) if req.status_code == 401: logger.error("Authorization Required.") logger.error("Please ensure correct credentials " From 8c8809f8b76a1db451ce6640f85cf1f882cc2a98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20Lobato=20Garc=C3=ADa?= Date: Thu, 26 Mar 2020 14:29:34 +0100 Subject: [PATCH 006/892] compliance: Download tailoring file for tailored policies (#2492) * compliance: Download tailoring file for tailored policies Signed-off-by: Daniel Lobato Garcia * Add tailored to testing mock responses Signed-off-by: Daniel Lobato Garcia * Add Tailoring file path to mock lambda Signed-off-by: Daniel Lobato Garcia * Adapt responses to new API endpoint Signed-off-by: Daniel Lobato Garcia * Fix test syntax Signed-off-by: Daniel Lobato Garcia * Correct search parameter Signed-off-by: Daniel Lobato Garcia * Modify get_policies response test Signed-off-by: Daniel Lobato Garcia * Add tests for download and buliding tailoring command Signed-off-by: Daniel Lobato Garcia * Added test for request without 'tailored' Signed-off-by: Daniel Lobato Garcia * Update mocking library Signed-off-by: Daniel Lobato Garcia --- insights/client/apps/compliance/__init__.py | 54 +++++++++++++++--- insights/tests/client/apps/test_compliance.py | 55 +++++++++++++++---- 2 files changed, 90 insertions(+), 19 deletions(-) diff --git a/insights/client/apps/compliance/__init__.py b/insights/client/apps/compliance/__init__.py index 862f50111..33999461a 100644 --- a/insights/client/apps/compliance/__init__.py +++ b/insights/client/apps/compliance/__init__.py @@ -30,22 +30,50 @@ def oscap_scan(self): if not policies: logger.error("System is not associated with any profiles. Assign profiles by either uploading a SCAP scan or using the compliance web UI.\n") exit(constants.sig_kill_bad) - profile_ref_ids = [policy['ref_id'] for policy in policies] - for profile_ref_id in profile_ref_ids: + for policy in policies: self.run_scan( - profile_ref_id, - self.find_scap_policy(profile_ref_id), - '/var/tmp/oscap_results-{0}.xml'.format(profile_ref_id) + policy['attributes']['ref_id'], + self.find_scap_policy(policy['attributes']['ref_id']), + '/var/tmp/oscap_results-{0}.xml'.format(policy['attributes']['ref_id']), + tailoring_file_path=self.download_tailoring_file(policy) ) return self.archive.create_tar_file(), COMPLIANCE_CONTENT_TYPE + def download_tailoring_file(self, policy): + if 'tailored' not in policy['attributes'] or policy['attributes']['tailored'] is False: + return None + + # Download tailoring file to pass as argument to run_scan + logger.debug( + "Policy {0} is a tailored policy. Starting tailoring file download...".format(policy['attributes']['ref_id']) + ) + tailoring_file_path = "/var/tmp/oscap_tailoring_file-{0}.xml".format(policy['attributes']['ref_id']) + response = self.conn.session.get( + "https://{0}/compliance/profiles/{1}/tailoring_file".format(self.config.base_url, policy['id']) + ) + logger.debug("Response code: {0}".format(response.status_code)) + if response.content is None: + logger.info("Problem downloading tailoring file for {0} to {1}".format(policy['attributes']['ref_id'], tailoring_file_path)) + return None + + with open(tailoring_file_path, mode="w+b") as f: + f.write(response.content) + logger.info("Saved tailoring file for {0} to {1}".format(policy['attributes']['ref_id'], tailoring_file_path)) + + logger.debug("Policy {0} tailoring file download finished".format(policy['attributes']['ref_id'])) + + return tailoring_file_path + # TODO: Not a typo! This endpoint gives OSCAP policies, not profiles # We need to update compliance-backend to fix this def get_policies(self): - response = self.conn.session.get("https://{0}/compliance/systems".format(self.config.base_url), params={'search': 'name={0}'.format(self.hostname)}) + response = self.conn.session.get("https://{0}/compliance/profiles".format(self.config.base_url), + params={'search': 'system_names={0}'.format(self.hostname)}) + logger.debug("Content of the response: {0} - {1}".format(response, + response.json())) if response.status_code == 200: - return (response.json().get('data') or [{}])[0].get('attributes', {}).get('profiles', []) + return (response.json().get('data') or []) else: return [] @@ -67,11 +95,19 @@ def find_scap_policy(self, profile_ref_id): exit(constants.sig_kill_bad) return filenames[0] - def run_scan(self, profile_ref_id, policy_xml, output_path): + def build_oscap_command(self, profile_ref_id, policy_xml, output_path, tailoring_file_path): + command = 'oscap xccdf eval --profile ' + profile_ref_id + if tailoring_file_path: + command += ' --tailoring-file ' + tailoring_file_path + command += ' --results ' + output_path + ' ' + policy_xml + return command + + def run_scan(self, profile_ref_id, policy_xml, output_path, tailoring_file_path=None): logger.info('Running scan for {0}... this may take a while'.format(profile_ref_id)) env = os.environ.copy() env.update({'TZ': 'UTC'}) - rc, oscap = call('oscap xccdf eval --profile ' + profile_ref_id + ' --results ' + output_path + ' ' + policy_xml, keep_rc=True, env=env) + oscap_command = self.build_oscap_command(profile_ref_id, policy_xml, output_path, tailoring_file_path) + rc, oscap = call(oscap_command, keep_rc=True, env=env) if rc and rc != NONCOMPLIANT_STATUS: logger.error('Scan failed') logger.error(oscap) diff --git a/insights/tests/client/apps/test_compliance.py b/insights/tests/client/apps/test_compliance.py index 332cc7d47..672a66c63 100644 --- a/insights/tests/client/apps/test_compliance.py +++ b/insights/tests/client/apps/test_compliance.py @@ -1,7 +1,7 @@ # -*- coding: UTF-8 -*- from insights.client.apps.compliance import ComplianceClient, COMPLIANCE_CONTENT_TYPE -from mock.mock import patch, Mock +from mock.mock import patch, Mock, mock_open from pytest import raises import os @@ -12,9 +12,9 @@ @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None, compressor='gz') def test_oscap_scan(config, assert_rpms): compliance_client = ComplianceClient(config) - compliance_client.get_policies = lambda: [{'ref_id': 'foo'}] + compliance_client.get_policies = lambda: [{'attributes': {'ref_id': 'foo', 'tailored': False}}] compliance_client.find_scap_policy = lambda ref_id: '/usr/share/xml/scap/foo.xml' - compliance_client.run_scan = lambda ref_id, policy_xml, output_path: None + compliance_client.run_scan = lambda ref_id, policy_xml, output_path, tailoring_file_path: None compliance_client.archive.archive_tmp_dir = '/tmp' compliance_client.archive.archive_name = 'insights-compliance-test' archive, content_type = compliance_client.oscap_scan() @@ -26,7 +26,7 @@ def test_oscap_scan(config, assert_rpms): @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) def test_missing_packages(config, call): compliance_client = ComplianceClient(config) - compliance_client.get_policies = lambda: [{'ref_id': 'foo'}] + compliance_client.get_policies = lambda: [{'attributes': {'ref_id': 'foo'}}] compliance_client.find_scap_policy = lambda ref_id: '/usr/share/xml/scap/foo.xml' compliance_client.run_scan = lambda ref_id, policy_xml: None with raises(SystemExit): @@ -37,7 +37,7 @@ def test_missing_packages(config, call): @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) def test_errored_rpm_call(config, call): compliance_client = ComplianceClient(config) - compliance_client.get_policies = lambda: [{'ref_id': 'foo'}] + compliance_client.get_policies = lambda: [{'attributes': {'ref_id': 'foo'}}] compliance_client.find_scap_policy = lambda ref_id: '/usr/share/xml/scap/foo.xml' compliance_client.run_scan = lambda ref_id, policy_xml: None with raises(SystemExit): @@ -48,9 +48,9 @@ def test_errored_rpm_call(config, call): def test_get_policies(config): compliance_client = ComplianceClient(config) compliance_client.hostname = 'foo' - compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': [{'attributes': {'profiles': 'data'}}]}))) - assert compliance_client.get_policies() == 'data' - compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/systems', params={'search': 'name=foo'}) + compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': [{'attributes': 'data'}]}))) + assert compliance_client.get_policies() == [{'attributes': 'data'}] + compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_names=foo'}) @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) @@ -59,7 +59,7 @@ def test_get_policies_no_policies(config): compliance_client.hostname = 'foo' compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': []}))) assert compliance_client.get_policies() == [] - compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/systems', params={'search': 'name=foo'}) + compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_names=foo'}) @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) @@ -68,7 +68,7 @@ def test_get_policies_error(config): compliance_client.hostname = 'foo' compliance_client.conn.session.get = Mock(return_value=Mock(status_code=500)) assert compliance_client.get_policies() == [] - compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/systems', params={'search': 'name=foo'}) + compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_names=foo'}) @patch("insights.client.apps.compliance.linux_distribution", return_value=(None, '6.5', None)) @@ -123,3 +123,38 @@ def test_run_scan_fail(config, call): with raises(SystemExit): compliance_client.run_scan('ref_id', '/nonexistent', output_path) call.assert_called_with("oscap xccdf eval --profile ref_id --results " + output_path + ' /nonexistent', keep_rc=True, env=env) + + +@patch("insights.client.config.InsightsConfig") +def test_tailored_file_is_not_downloaded_if_not_needed(config): + compliance_client = ComplianceClient(config) + assert compliance_client.download_tailoring_file({'attributes': {'tailored': False}}) is None + + +@patch("insights.client.config.InsightsConfig") +def test_tailored_file_is_not_downloaded_if_tailored_is_missing(config): + compliance_client = ComplianceClient(config) + assert compliance_client.download_tailoring_file({'id': 'foo', 'attributes': {'ref_id': 'aaaaa'}}) is None + + +@patch("insights.client.apps.compliance.open", new_callable=mock_open) +@patch("insights.client.config.InsightsConfig") +def test_tailored_file_is_downloaded_if_needed(config, call): + compliance_client = ComplianceClient(config) + compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': [{'attributes': 'data'}]}))) + tailoring_file_path = "/var/tmp/oscap_tailoring_file-aaaaa.xml" + assert tailoring_file_path == compliance_client.download_tailoring_file({'id': 'foo', 'attributes': {'tailored': True, 'ref_id': 'aaaaa'}}) + + +@patch("insights.client.config.InsightsConfig") +def test_build_oscap_command_does_not_append_tailoring_path(config): + compliance_client = ComplianceClient(config) + expected_command = 'oscap xccdf eval --profile aaaaa --results output_path xml_sample' + assert expected_command == compliance_client.build_oscap_command('aaaaa', 'xml_sample', 'output_path', None) + + +@patch("insights.client.config.InsightsConfig") +def test_build_oscap_command_append_tailoring_path(config): + compliance_client = ComplianceClient(config) + expected_command = 'oscap xccdf eval --profile aaaaa --tailoring-file tailoring_path --results output_path xml_sample' + assert expected_command == compliance_client.build_oscap_command('aaaaa', 'xml_sample', 'output_path', 'tailoring_path') From 41c8e357e8523493162a25809405c0b79738ea40 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 26 Mar 2020 21:53:02 +0800 Subject: [PATCH 007/892] Add new parser for "/proc/sys/kernel/sched_rt_runtime_us" (#2500) * Add new parser for "/proc/sys/kernel/sched_rt_runtime_us" Signed-off-by: Huanhuan Li * Fix flake8 error Signed-off-by: Huanhuan Li * Update doc string based on comments Signed-off-by: Huanhuan Li * Update doc string to right format Signed-off-by: Huanhuan Li * Add parser for "/sys/kernel/debug/sched_features" Signed-off-by: Huanhuan Li --- docs/shared_parsers_catalog/sys_kernel.rst | 3 + insights/parsers/sys_kernel.py | 74 ++++++++++++++++++++++ insights/parsers/tests/test_sys_kernel.py | 51 +++++++++++++++ insights/specs/__init__.py | 2 + insights/specs/default.py | 2 + 5 files changed, 132 insertions(+) create mode 100644 docs/shared_parsers_catalog/sys_kernel.rst create mode 100644 insights/parsers/sys_kernel.py create mode 100644 insights/parsers/tests/test_sys_kernel.py diff --git a/docs/shared_parsers_catalog/sys_kernel.rst b/docs/shared_parsers_catalog/sys_kernel.rst new file mode 100644 index 000000000..09937a841 --- /dev/null +++ b/docs/shared_parsers_catalog/sys_kernel.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.sys_kernel + :members: + :show-inheritance: diff --git a/insights/parsers/sys_kernel.py b/insights/parsers/sys_kernel.py new file mode 100644 index 000000000..55f348a0e --- /dev/null +++ b/insights/parsers/sys_kernel.py @@ -0,0 +1,74 @@ +""" +System kernel files under ``/proc/sys/kernel`` or ``/sys/kernel`` +================================================================= + +This module contains the following parsers: + +SchedRTRuntime - file ``/proc/sys/kernel/sched_rt_runtime_us`` +-------------------------------------------------------------- +SchedFeatures - file ``/sys/kernel/debug/sched_features`` +--------------------------------------------------------- +""" + +from insights import Parser, parser, get_active_lines +from insights.parsers import ParseException +from insights.specs import Specs + + +@parser(Specs.sched_rt_runtime_us) +class SchedRTRuntime(Parser): + """ + Class for parsing the `/proc/sys/kernel/sched_rt_runtime_us` file. + + Typical content of the file is:: + + 950000 + + Examples: + >>> type(srt) + + >>> srt.runtime_us + 950000 + + Attributes: + runtime_us (int): The value of sched_rt_runtime_us + + Raises: + ParseException: Raised when there is more than one line or the value isn't interger. + """ + + def parse_content(self, content): + lines = get_active_lines(content) + if len(lines) != 1: + raise ParseException("Unexpected file content") + try: + self.runtime_us = int(lines[0]) + except: + raise ParseException("Unexpected file content") + + +@parser(Specs.sys_kernel_sched_features) +class SchedFeatures(Parser): + """ + Class for parsing the `/sys/kernel/debug/sched_features` file. + + Typical content of the file is:: + + GENTLE_FAIR_SLEEPERS START_DEBIT NO_NEXT_BUDDY LAST_BUDDY CACHE_HOT_BUDDY + + Examples: + >>> type(sfs) + + >>> "GENTLE_FAIR_SLEEPERS" in sfs.features + True + >>> "TEST1" in sfs.features + False + + Attributes: + features (list): A list with all the features + """ + + def parse_content(self, content): + self.features = [] + for line in get_active_lines(content): + self.features.extend(line.split()) diff --git a/insights/parsers/tests/test_sys_kernel.py b/insights/parsers/tests/test_sys_kernel.py new file mode 100644 index 000000000..2e8d7de58 --- /dev/null +++ b/insights/parsers/tests/test_sys_kernel.py @@ -0,0 +1,51 @@ +import doctest +import pytest + +from insights.parsers import sys_kernel +from insights.parsers.sys_kernel import SchedRTRuntime, SchedFeatures +from insights.tests import context_wrap +from insights.core import ParseException + +SYS_KERNEL_RUNTIME_CONTENT_1 = """ +-1 +""".strip() + +SYS_KERNEL_RUNTIME_CONTENT_2 = """ +950000 +""".strip() + +SYS_KERNEL_RUNTIME_CONTENT_3 = """ +950000 +-1 +""".strip() + +SYS_KERNEL_RUNTIME_CONTENT_4 = """ +sss1 +""".strip() + +SYS_KERNEL_FEATURES = """ +GENTLE_FAIR_SLEEPERS START_DEBIT NO_NEXT_BUDDY LAST_BUDDY CACHE_HOT_BUDDY +""".strip() + + +def test_sys_runtime_docs(): + failed, total = doctest.testmod( + sys_kernel, + globs={ + 'srt': SchedRTRuntime(context_wrap(SYS_KERNEL_RUNTIME_CONTENT_2)), + 'sfs': SchedFeatures(context_wrap(SYS_KERNEL_FEATURES)), + } + ) + assert failed == 0 + + +def test_sys_kernel_1(): + result = SchedRTRuntime(context_wrap(SYS_KERNEL_RUNTIME_CONTENT_1)) + assert result.runtime_us == -1 + + +def test_exception(): + with pytest.raises(ParseException): + SchedRTRuntime(context_wrap(SYS_KERNEL_RUNTIME_CONTENT_3)) + with pytest.raises(ParseException): + SchedRTRuntime(context_wrap(SYS_KERNEL_RUNTIME_CONTENT_4)) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index dfd53774c..a3af0e680 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -529,6 +529,7 @@ class Specs(SpecSet): satellite_version_rb = RegistryPoint() satellite_custom_hiera = RegistryPoint() scheduler = RegistryPoint(multi_output=True) + sched_rt_runtime_us = RegistryPoint() scsi = RegistryPoint() sctp_asc = RegistryPoint() sctp_eps = RegistryPoint() @@ -567,6 +568,7 @@ class Specs(SpecSet): swift_log = RegistryPoint(filterable=True) swift_object_expirer_conf = RegistryPoint() swift_proxy_server_conf = RegistryPoint() + sys_kernel_sched_features = RegistryPoint() sysconfig_chronyd = RegistryPoint() sysconfig_httpd = RegistryPoint() sysconfig_irqbalance = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 20eafe224..02398c577 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -890,6 +890,7 @@ def sap_sid(broker): satellite_custom_hiera = simple_file("/etc/foreman-installer/custom-hiera.yaml") block_devices = listdir("/sys/block") scheduler = foreach_collect(block_devices, "/sys/block/%s/queue/scheduler") + sched_rt_runtime_us = simple_file("/proc/sys/kernel/sched_rt_runtime_us") scsi = simple_file("/proc/scsi/scsi") scsi_eh_deadline = glob_file('/sys/class/scsi_host/host[0-9]*/eh_deadline') scsi_fwver = glob_file('/sys/class/scsi_host/host[0-9]*/fwrev') @@ -931,6 +932,7 @@ def block(broker): swift_log = first_file(["/var/log/containers/swift/swift.log", "/var/log/swift/swift.log"]) swift_object_expirer_conf = first_file(["/var/lib/config-data/puppet-generated/swift/etc/swift/object-expirer.conf", "/etc/swift/object-expirer.conf"]) swift_proxy_server_conf = first_file(["/var/lib/config-data/puppet-generated/swift/etc/swift/proxy-server.conf", "/etc/swift/proxy-server.conf"]) + sys_kernel_sched_features = simple_file("/sys/kernel/debug/sched_features") sysconfig_chronyd = simple_file("/etc/sysconfig/chronyd") sysconfig_httpd = simple_file("/etc/sysconfig/httpd") sysconfig_irqbalance = simple_file("etc/sysconfig/irqbalance") From 96d1968bd1d98350d0898f580e84ff57e22d125c Mon Sep 17 00:00:00 2001 From: Jitka Obselkova <41325380+jobselko@users.noreply.github.com> Date: Thu, 26 Mar 2020 15:23:38 +0100 Subject: [PATCH 008/892] Add parser for named_checkconf (#2506) Signed-off-by: Jitka Obselkova --- .../named_checkconf.rst | 3 + insights/parsers/named_checkconf.py | 186 +++++++++++++++ .../parsers/tests/test_named_checkconf.py | 211 ++++++++++++++++++ 3 files changed, 400 insertions(+) create mode 100644 docs/shared_parsers_catalog/named_checkconf.rst create mode 100644 insights/parsers/named_checkconf.py create mode 100644 insights/parsers/tests/test_named_checkconf.py diff --git a/docs/shared_parsers_catalog/named_checkconf.rst b/docs/shared_parsers_catalog/named_checkconf.rst new file mode 100644 index 000000000..9a6710eb8 --- /dev/null +++ b/docs/shared_parsers_catalog/named_checkconf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.named_checkconf + :members: + :show-inheritance: diff --git a/insights/parsers/named_checkconf.py b/insights/parsers/named_checkconf.py new file mode 100644 index 000000000..d447323da --- /dev/null +++ b/insights/parsers/named_checkconf.py @@ -0,0 +1,186 @@ +""" +NamedCheckconf parser - command ``named-checkconf -p`` +====================================================== + +Named-checkconf is a syntax checking tool for named configuration file. +Named is a name server used by BIND. +""" + +import re + +from insights.core import CommandParser +from insights.core.plugins import parser +from insights.parsers import SkipException +from insights.specs import Specs + +# regex for matching 'dnssec-enable no' +DNSSEC_DISABLED = re.compile(r'dnssec-enable\s+no;') +# regex for matching 'disable-algorithms' section +DISABLE_ALGORITHMS = re.compile(r'disable-algorithms[^}]*};') +# regex for matching 'disable-ds-digests' section +DISABLE_DS_DIGESTS = re.compile(r'disable-ds-digests[^}]*};') +# regex for matching values in single or double quotation marks +INNER_VALLUES = re.compile(r'(?:\"|\')(.*)(?:\"|\')') + + +@parser(Specs.named_checkconf_p) +class NamedCheckconf(CommandParser): + """ + Class for parsing the ``named-checkconf -p`` command. + + Attributes: + is_dnssec_disabled (bool): True, if dnssec is not enabled, False otherwise. + dnssec_line (string): The line which disabled dnssec, if it is not enabled, None otherwise. + disable_algorithms (dict): Dictionary where the key is a domain and + the value is a list of all algorithms associated with it. + disable_ds_digests (dict): Dictionary where the key is a domain and + the value is a list of all digests associated with it. + + Raises: + SkipException: When content is empty or cannot be parsed. + + Sample output of this command is:: + + logging { + channel "default_debug" { + file "data/named.run"; + severity dynamic; + }; + }; + options { + directory "/var/named"; + dump-file "/var/named/data/cache_dump.db"; + listen-on port 53 { + 127.0.0.1/32; + }; + listen-on-v6 port 53 { + ::1/128; + }; + managed-keys-directory "/var/named/dynamic"; + memstatistics-file "/var/named/data/named_mem_stats.txt"; + pid-file "/run/named/named.pid"; + recursing-file "/var/named/data/named.recursing"; + secroots-file "/var/named/data/named.secroots"; + session-keyfile "/run/named/session.key"; + statistics-file "/var/named/data/named_stats.txt"; + disable-algorithms "." { + "RSAMD5"; + "DSA"; + }; + disable-ds-digests "." { + "GOST"; + }; + dnssec-enable yes; + dnssec-validation yes; + recursion yes; + allow-query { + "localhost"; + }; + }; + managed-keys { + "." initial-key 257 3 8 "AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF + FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX + bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD + X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz + W5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGOYl7OyQdXfZ57relS + Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq + QxA+Uk1ihz0="; + "." initial-key 257 3 8 "AwEAAaz/tAm8yTn4Mfeh5eyI96WSVexTBAvkMgJzkKTOiW1vkIbzxeF3 + +/4RgWOq7HrxRixHlFlExOLAJr5emLvN7SWXgnLh4+B5xQlNVz8Og8kv + ArMtNROxVQuCaSnIDdD5LKyWbRd2n9WGe2R8PzgCmr3EgVLrjyBxWezF + 0jLHwVN8efS3rCj/EWgvIWgb9tarpVUDK/b58Da+sqqls3eNbuv7pr+e + oZG+SrDK6nWeL3c6H5Apxz7LjVc1uTIdsIXxuOLYA4/ilBmSVIzuDWfd + RUfhHdY6+cn8HFRm+2hM8AnXGXws9555KrUB5qihylGa8subX2Nn6UwN + R1AkUTV74bU="; + }; + zone "." IN { + type hint; + file "named.ca"; + }; + zone "localhost.localdomain" IN { + type master; + file "named.localhost"; + allow-update { + "none"; + }; + }; + zone "localhost" IN { + type master; + file "named.localhost"; + allow-update { + "none"; + }; + }; + zone "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa" IN { + type master; + file "named.loopback"; + allow-update { + "none"; + }; + }; + zone "1.0.0.127.in-addr.arpa" IN { + type master; + file "named.loopback"; + allow-update { + "none"; + }; + }; + zone "0.in-addr.arpa" IN { + type master; + file "named.empty"; + allow-update { + "none"; + }; + }; + + Examples: + >>> type(named_checkconf) + + >>> named_checkconf.is_dnssec_disabled + False + >>> named_checkconf.dnssec_line is None + True + >>> named_checkconf.disable_algorithms + {'.': ['RSAMD5', 'DSA']} + >>> named_checkconf.disable_ds_digests + {'.': ['GOST']} + """ + + def __init__(self, context): + self.is_dnssec_disabled = False # dnssec is enabled by default + self.dnssec_line = None + self.disable_algorithms = {} + self.disable_ds_digests = {} + super(NamedCheckconf, self).__init__(context) + + def parse_content(self, content): + if not content: + raise SkipException('No content.') + + full_result = '\n'.join(content) + + match_dnssec = DNSSEC_DISABLED.search(full_result) + if match_dnssec: + self.is_dnssec_disabled = True + self.dnssec_line = match_dnssec.group(0) + + self.disable_algorithms = self.retrieve_disabled(DISABLE_ALGORITHMS, full_result) + self.disable_ds_digests = self.retrieve_disabled(DISABLE_DS_DIGESTS, full_result) + + def retrieve_disabled(self, section_regex, source): + """ + Parses 'disable-algorithms' or 'disable_ds_digests' section into a dictionary, + where the key is a domain and the value is a list of all algorithms/digests associated with it. + + Attributes: + section_regex (string): The regular expression for a given section. + source (string): The source in which a given section is searched for. + """ + dict_of_sections = dict() + for match_entry in section_regex.finditer(source): + entry = match_entry.group(0) + # collects all values in quotation marks for given section + entry_values = [match_value.group(1) for match_value in INNER_VALLUES.finditer(entry)] + dict_of_sections[entry_values[0]] = entry_values[1:] + + return dict_of_sections diff --git a/insights/parsers/tests/test_named_checkconf.py b/insights/parsers/tests/test_named_checkconf.py new file mode 100644 index 000000000..c11fa6c08 --- /dev/null +++ b/insights/parsers/tests/test_named_checkconf.py @@ -0,0 +1,211 @@ +import doctest +import pytest + +from insights.parsers import named_checkconf, SkipException +from insights.parsers.named_checkconf import NamedCheckconf +from insights.tests import context_wrap + + +CONFIG_DNSSEC_ENABLED = """ +options { + bindkeys-file "/etc/named.iscdlv.key"; + directory "/var/named"; + dump-file "/var/named/data/cache_dump.db"; + listen-on port 53 { + 127.0.0.1/32; + }; + listen-on-v6 port 53 { + ::1/128; + }; + managed-keys-directory "/var/named/dynamic"; + memstatistics-file "/var/named/data/named_mem_stats.txt"; + statistics-file "/var/named/data/named_stats.txt"; + dnssec-enable yes; + dnssec-validation yes; + recursion yes; + allow-query { + "localhost"; + }; +}; +""" + +CONFIG_DNSSEC_DISABLED = """ +options { + bindkeys-file "/etc/named.iscdlv.key"; + directory "/var/named"; + dump-file "/var/named/data/cache_dump.db"; + listen-on port 53 { + 127.0.0.1/32; + }; + listen-on-v6 port 53 { + ::1/128; + }; + managed-keys-directory "/var/named/dynamic"; + memstatistics-file "/var/named/data/named_mem_stats.txt"; + statistics-file "/var/named/data/named_stats.txt"; + dnssec-enable no; + dnssec-validation yes; + recursion yes; + allow-query { + "localhost"; + }; +}; +""" + +CONFIG_DNSSEC_DEFAULT = """ +options { + bindkeys-file "/etc/named.iscdlv.key"; + directory "/var/named"; + dump-file "/var/named/data/cache_dump.db"; + listen-on port 53 { + 127.0.0.1/32; + }; + listen-on-v6 port 53 { + ::1/128; + }; + managed-keys-directory "/var/named/dynamic"; + memstatistics-file "/var/named/data/named_mem_stats.txt"; + statistics-file "/var/named/data/named_stats.txt"; + dnssec-validation yes; + recursion yes; + allow-query { + "localhost"; + }; +}; +""" + +CONFIG_DISABLED_SECTIONS = """ +logging { + channel "default_debug" { + file "data/named.run"; + severity dynamic; + }; +}; +options { + directory "/var/named"; + dump-file "/var/named/data/cache_dump.db"; + listen-on port 53 { + 127.0.0.1/32; + }; + listen-on-v6 port 53 { + ::1/128; + }; + managed-keys-directory "/var/named/dynamic"; + memstatistics-file "/var/named/data/named_mem_stats.txt"; + pid-file "/run/named/named.pid"; + recursing-file "/var/named/data/named.recursing"; + secroots-file "/var/named/data/named.secroots"; + session-keyfile "/run/named/session.key"; + statistics-file "/var/named/data/named_stats.txt"; + disable-algorithms "." { + "RSAMD5"; + "DSA"; + }; + disable-ds-digests "." { + "GOST"; + }; + dnssec-enable yes; + dnssec-validation yes; + recursion yes; + allow-query { + "localhost"; + }; +}; +managed-keys { + "." initial-key 257 3 8 "AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF + FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX + bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD + X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz + W5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGOYl7OyQdXfZ57relS + Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq + QxA+Uk1ihz0="; + "." initial-key 257 3 8 "AwEAAaz/tAm8yTn4Mfeh5eyI96WSVexTBAvkMgJzkKTOiW1vkIbzxeF3 + +/4RgWOq7HrxRixHlFlExOLAJr5emLvN7SWXgnLh4+B5xQlNVz8Og8kv + ArMtNROxVQuCaSnIDdD5LKyWbRd2n9WGe2R8PzgCmr3EgVLrjyBxWezF + 0jLHwVN8efS3rCj/EWgvIWgb9tarpVUDK/b58Da+sqqls3eNbuv7pr+e + oZG+SrDK6nWeL3c6H5Apxz7LjVc1uTIdsIXxuOLYA4/ilBmSVIzuDWfd + RUfhHdY6+cn8HFRm+2hM8AnXGXws9555KrUB5qihylGa8subX2Nn6UwN + R1AkUTV74bU="; +}; +zone "." IN { + type hint; + file "named.ca"; +}; +zone "localhost.localdomain" IN { + type master; + file "named.localhost"; + allow-update { + "none"; + }; +}; +zone "localhost" IN { + type master; + file "named.localhost"; + allow-update { + "none"; + }; +}; +zone "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa" IN { + type master; + file "named.loopback"; + allow-update { + "none"; + }; +}; +zone "1.0.0.127.in-addr.arpa" IN { + type master; + file "named.loopback"; + allow-update { + "none"; + }; +}; +zone "0.in-addr.arpa" IN { + type master; + file "named.empty"; + allow-update { + "none"; + }; +}; +""" + + +def test_config_no_data(): + with pytest.raises(SkipException): + NamedCheckconf(context_wrap("")) + + +def test_config_dnssec(): + dnssec_disabled = NamedCheckconf(context_wrap(CONFIG_DNSSEC_DISABLED)) + assert dnssec_disabled.is_dnssec_disabled + assert dnssec_disabled.dnssec_line == "dnssec-enable no;" + assert dnssec_disabled.disable_algorithms == {} + assert dnssec_disabled.disable_ds_digests == {} + + dnssec_enabled = NamedCheckconf(context_wrap(CONFIG_DNSSEC_ENABLED)) + assert not dnssec_enabled.is_dnssec_disabled + assert dnssec_enabled.dnssec_line is None + assert dnssec_enabled.disable_algorithms == {} + assert dnssec_enabled.disable_ds_digests == {} + + # dnssec line is not preset - dnssec is enabled by default + dnssec_default = NamedCheckconf(context_wrap(CONFIG_DNSSEC_DEFAULT)) + assert not dnssec_default.is_dnssec_disabled + assert dnssec_default.dnssec_line is None + assert dnssec_default.disable_algorithms == {} + assert dnssec_default.disable_ds_digests == {} + + +def test_config_disabled_sections(): + disabled_sections = NamedCheckconf(context_wrap(CONFIG_DISABLED_SECTIONS)) + assert not disabled_sections.is_dnssec_disabled + assert disabled_sections.dnssec_line is None + assert disabled_sections.disable_algorithms == {".": ["RSAMD5", "DSA"]} + assert disabled_sections.disable_ds_digests == {".": ["GOST"]} + + +def test_doc_examples(): + env = { + "named_checkconf": NamedCheckconf(context_wrap(CONFIG_DISABLED_SECTIONS)), + } + failed, total = doctest.testmod(named_checkconf, globs=env) + assert failed == 0 From b9f93cdcb6f4565aa0dcff9e8cd732ea36046c6f Mon Sep 17 00:00:00 2001 From: Jitka Obselkova <41325380+jobselko@users.noreply.github.com> Date: Thu, 26 Mar 2020 15:59:39 +0100 Subject: [PATCH 009/892] Add files attribute in krb (#2514) Signed-off-by: Jitka Obselkova --- insights/combiners/krb5.py | 5 +++++ insights/combiners/tests/test_krb5.py | 1 + 2 files changed, 6 insertions(+) diff --git a/insights/combiners/krb5.py b/insights/combiners/krb5.py index 06f326b89..e3e696cb8 100644 --- a/insights/combiners/krb5.py +++ b/insights/combiners/krb5.py @@ -58,6 +58,8 @@ class AllKrb5Conf(LegacyItemAccess): True >>> all_krb5['realms']['dns_lookup_realm'] 'false' + >>> all_krb5.files + ['krb5.conf', 'test.conf', 'test2.conf'] Attributes: includedir (list): The directory list that `krb5.conf` includes via @@ -66,6 +68,7 @@ class AllKrb5Conf(LegacyItemAccess): via `include` directive module (list): The module list that `krb5.conf` specifed via 'module' directive + files (list): The list of configuration file names. """ def __init__(self, krb5configs): @@ -74,8 +77,10 @@ def __init__(self, krb5configs): self.includedir = [] self.include = [] self.module = [] + self.files = [] for krb5_parser in krb5configs: + self.files.append(krb5_parser.file_name) if krb5_parser.file_path == "/etc/krb5.conf": main_data = krb5_parser.data self.includedir = krb5_parser.includedir diff --git a/insights/combiners/tests/test_krb5.py b/insights/combiners/tests/test_krb5.py index 727b8c95a..0506b8e33 100644 --- a/insights/combiners/tests/test_krb5.py +++ b/insights/combiners/tests/test_krb5.py @@ -103,3 +103,4 @@ def test_active_krb5_nest(): assert result.include == ["/etc/krb5test.conf"] assert result.includedir == ["/etc/krb5.conf.d/"] assert result.module == ["/etc/krb5test.conf:residual"] + assert result.files == ['krb5.conf', 'test.conf', 'test2.conf'] From 9aee7fdf81ab07107a1c9a0658efffd32072e668 Mon Sep 17 00:00:00 2001 From: Stephen Date: Thu, 26 Mar 2020 15:11:21 -0400 Subject: [PATCH 010/892] Ignore filters.yaml in insights-core (#2516) We don't want the filters to end up in pypi, so this is just a safety measure to make sure they don't accidentally get committed to the repo Signed-off-by: Stephen Adams --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index eca490f49..bdce95f05 100644 --- a/.gitignore +++ b/.gitignore @@ -48,4 +48,5 @@ share/ .Python .python* .pytest_cache -.vscode \ No newline at end of file +.vscode +insights/filters.yaml From 98ebf59d268e8b2ca468ae3d05f121b6737addac Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Fri, 27 Mar 2020 19:41:49 +0800 Subject: [PATCH 011/892] Deprecate KpatchPatches (#2508) * Deprecate the kpatch_patches Signed-off-by: Xiangce Liu * Remove spec kpatch_patch_files - This spec is never collected - Only keep it in the spec/__init__.py for unit test Signed-off-by: Xiangce Liu * Fix flake8 error Signed-off-by: Xiangce Liu * Fix doc error Signed-off-by: Xiangce Liu * Remove the parser since it's unused Signed-off-by: Xiangce Liu * Remove the spec entry as well Signed-off-by: Xiangce Liu --- .../shared_parsers_catalog/kpatch_patches.rst | 3 -- insights/parsers/kpatch_patches.py | 36 ------------------ insights/parsers/tests/test_kpatch_patches.py | 38 ------------------- insights/specs/__init__.py | 1 - insights/specs/default.py | 8 ---- insights/specs/insights_archive.py | 1 - 6 files changed, 87 deletions(-) delete mode 100644 docs/shared_parsers_catalog/kpatch_patches.rst delete mode 100644 insights/parsers/kpatch_patches.py delete mode 100644 insights/parsers/tests/test_kpatch_patches.py diff --git a/docs/shared_parsers_catalog/kpatch_patches.rst b/docs/shared_parsers_catalog/kpatch_patches.rst deleted file mode 100644 index be8ac5533..000000000 --- a/docs/shared_parsers_catalog/kpatch_patches.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.parsers.kpatch_patches - :members: - :show-inheritance: diff --git a/insights/parsers/kpatch_patches.py b/insights/parsers/kpatch_patches.py deleted file mode 100644 index 12fd99002..000000000 --- a/insights/parsers/kpatch_patches.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -KpatchPatches - report locally stored kpatch patches -==================================================== - -This parser creates a list of the module names of locally -stored kpatch modules returned by command ``ls /var/lib/kpatch/\`uname -r\`/``. -If no modules are installed, a ContentException will be raised. - -""" - -from .. import parser, CommandParser -from insights.specs import Specs - - -@parser(Specs.kpatch_patch_files) -class KpatchPatches(CommandParser): - """ - A parser for getting modules names of locally stored kpatch-patch files. - - Sample output of `ls /var/lib/kpatch/\`uname -r\`/` looks like:: - - kpatch-3_10_0-1062-1-5.ko kpatch-3_10_0-1062-1-6.ko - - Attributes: - patches (str): List of the name of kpatch patches. The dashes are - converted to underscores, file suffix are removed, and duplicated - names are removed as well - - Examples: - >>> kp.patches - ['kpatch_3_10_0_1062_1_5', 'kpatch_3_10_0_1062_1_6'] - """ - - def parse_content(self, content): - # convert dashes to underscores, remove file suffixes, remove duplicates - self.patches = list(set([p.split('.')[0].replace("-", "_") for p in content])) diff --git a/insights/parsers/tests/test_kpatch_patches.py b/insights/parsers/tests/test_kpatch_patches.py deleted file mode 100644 index a54e64b95..000000000 --- a/insights/parsers/tests/test_kpatch_patches.py +++ /dev/null @@ -1,38 +0,0 @@ -from insights.parsers import kpatch_patches -from insights.tests import context_wrap -from insights.core.plugins import ContentException -import pytest - - -ASSORTED_KPATCHES = """ -asdfasdfasdf_asdfasdfasdf-asdfasdfasdf_asdfasdfasdf.ko -asdfasdfasdf_asdfasdfasdf-asdfasdfasdf_asdfasdfasdf.ko.xz -foo-bar.ko -foo-bar.ko.xz -foo.ko -foo.ko.xz -test_klp_callbacks_demo.ko -test_klp_callbacks_demo.ko.xz -""".strip() - -NO_KPATCH = """ -/bin/ls: cannot access '/var/lib/kpatch/4.18.0-147.8.el8.x86_64': No such file or directory -""".strip() - - -# Try a bunch of random potential patch names -# Compare to expected module names -def test_assorted(): - kp = kpatch_patches.KpatchPatches(context_wrap(ASSORTED_KPATCHES)) - for patch in [ - 'asdfasdfasdf_asdfasdfasdf_asdfasdfasdf_asdfasdfasdf', - 'foo_bar', - 'foo', - 'test_klp_callbacks_demo']: - assert patch in kp.patches - - -# Try the case of no patches installed -def test_no_kpatch(): - with pytest.raises(ContentException): - kpatch_patches.KpatchPatches(context_wrap(NO_KPATCH)) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index a3af0e680..dfa337d81 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -264,7 +264,6 @@ class Specs(SpecSet): keystone_crontab_container = RegistryPoint() keystone_log = RegistryPoint(filterable=True) kpatch_list = RegistryPoint() - kpatch_patch_files = RegistryPoint() krb5 = RegistryPoint(multi_output=True) ksmstate = RegistryPoint() kubepods_cpu_quota = RegistryPoint(multi_output=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index 02398c577..c58506334 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -26,7 +26,6 @@ from insights.core.spec_factory import first_file, listdir from insights.parsers.mount import Mount, ProcMounts from insights.parsers.dnf_module import DnfModuleList -from insights.parsers.uname import Uname from insights.combiners.cloud_provider import CloudProvider from insights.combiners.satellite_version import SatelliteVersion from insights.components.rhel_version import IsRhel8 @@ -499,14 +498,7 @@ def semid(broker): keystone_crontab = simple_command("/usr/bin/crontab -l -u keystone") keystone_crontab_container = simple_command("docker exec keystone_cron /usr/bin/crontab -l -u keystone") keystone_log = first_file(["/var/log/containers/keystone/keystone.log", "/var/log/keystone/keystone.log"]) - - @datasource(Uname, context=HostContext) - def kpatch_patches_running_kernel_dir(broker): - un = broker[Uname] - return r"/var/lib/kpatch/" + un.kernel - kpatch_list = simple_command("/usr/sbin/kpatch list") - kpatch_patch_files = command_with_args("ls %s", kpatch_patches_running_kernel_dir) krb5 = glob_file([r"etc/krb5.conf", r"etc/krb5.conf.d/*"]) ksmstate = simple_file("/sys/kernel/mm/ksm/run") kubepods_cpu_quota = glob_file("/sys/fs/cgroup/cpu/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod[a-f0-9_]*.slice/cpu.cfs_quota_us") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 19f4871ed..163d49eab 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -112,7 +112,6 @@ class InsightsArchiveSpecs(Specs): keystone_crontab = simple_file("insights_commands/crontab_-l_-u_keystone") keystone_crontab_container = simple_file("insights_commands/docker_exec_keystone_cron_.usr.bin.crontab_-l_-u_keystone") kpatch_list = simple_file("insights_commands/kpatch_list") - kpatch_patch_files = simple_file("insights_commands/ls_.var.lib.kpatch.*") libkeyutils = simple_file("insights_commands/find_-L_.lib_.lib64_-name_libkeyutils.so") libkeyutils_objdumps = simple_file("insights_commands/find_-L_.lib_.lib64_-name_libkeyutils.so.1_-exec_objdump_-x") locale = simple_file("insights_commands/locale") From 9b10bfe096c8c1776090ee7e1760d8d9534fd564 Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Tue, 31 Mar 2020 11:20:39 -0400 Subject: [PATCH 012/892] systemd: use --now to only run systemctl once (#2504) Signed-off-by: Link Dupont --- insights/client/schedule.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/insights/client/schedule.py b/insights/client/schedule.py index 7649b6246..aef868452 100644 --- a/insights/client/schedule.py +++ b/insights/client/schedule.py @@ -65,8 +65,7 @@ def set_daily(self): logger.debug('Starting systemd timer') try: # Start timers in the case of rhel 7 running systemd - systemctl_timer = run_command_get_output('systemctl start insights-client.timer') - systemctl_timer = run_command_get_output('systemctl enable insights-client.timer') + systemctl_timer = run_command_get_output('systemctl enable --now insights-client.timer') logger.debug("Starting Insights Client systemd timer.") logger.debug("Status: %s", systemctl_timer['status']) logger.debug("Output: %s", systemctl_timer['output']) @@ -79,8 +78,7 @@ def remove_scheduling(self): logger.debug('Stopping all systemd timers') try: # Stop timers in the case of rhel 7 running systemd - systemctl_timer = run_command_get_output('systemctl disable insights-client.timer') - systemctl_timer = run_command_get_output('systemctl stop insights-client.timer') + systemctl_timer = run_command_get_output('systemctl disable --now insights-client.timer') logger.debug("Stopping Insights Client systemd timer.") logger.debug("Status: %s", systemctl_timer['status']) logger.debug("Output: %s", systemctl_timer['output']) From a7e9e80de833fc1d2c48eaa85024d3392b1f7b1c Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Wed, 1 Apr 2020 10:39:56 +0800 Subject: [PATCH 013/892] Add parser for "/usr/bin/firewall-cmd --list-all-zones" (#2518) * Add parser for "/usr/bin/firewall-cmd --list-all-zones" Signed-off-by: Huanhuan Li * Update code to fix that the attribute value may cross lines Signed-off-by: Huanhuan Li * Fix flake8 error Signed-off-by: Huanhuan Li * Keep zone attributes in zone line Signed-off-by: Huanhuan Li * Delete useless line Signed-off-by: Huanhuan Li * Update code to make it more simple Signed-off-by: Huanhuan Li * Remove space in zone attributes Signed-off-by: Huanhuan Li * Add more Examples and simplify the code Signed-off-by: Xiangce Liu Co-authored-by: Xiangce Liu --- docs/shared_parsers_catalog/firewall_cmd.rst | 3 + insights/parsers/firewall_cmd.py | 116 ++++++++++++++++ insights/parsers/tests/test_firewall_cmd.py | 136 +++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 6 files changed, 258 insertions(+) create mode 100644 docs/shared_parsers_catalog/firewall_cmd.rst create mode 100644 insights/parsers/firewall_cmd.py create mode 100644 insights/parsers/tests/test_firewall_cmd.py diff --git a/docs/shared_parsers_catalog/firewall_cmd.rst b/docs/shared_parsers_catalog/firewall_cmd.rst new file mode 100644 index 000000000..97203eb4d --- /dev/null +++ b/docs/shared_parsers_catalog/firewall_cmd.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.firewall_cmd + :members: + :show-inheritance: diff --git a/insights/parsers/firewall_cmd.py b/insights/parsers/firewall_cmd.py new file mode 100644 index 000000000..2c782abb0 --- /dev/null +++ b/insights/parsers/firewall_cmd.py @@ -0,0 +1,116 @@ +""" +FirewallD commands +================== + +This module contains the following parsers: + +FirewallCmdListALLZones - command ``/usr/bin/firewall-cmd --list-all-zones`` +---------------------------------------------------------------------------- +""" + +from insights import parser, CommandParser +from insights.parsers import ParseException +from insights.specs import Specs + + +@parser(Specs.firewall_cmd_list_all_zones) +class FirewallCmdListALLZones(CommandParser): + """ + Class for parsing the `/usr/bin/firewall-cmd --list-all-zones` command. + + Typical content of the command is:: + + public (active) + target: default + icmp-block-inversion: no + interfaces: eno1 + sources: + services: dhcpv6-client ssh + ports: + protocols: + masquerade: no + forward-ports: port=80:proto=tcp:toport=12345:toaddr= + port=81:proto=tcp:toport=1234:toaddr= + port=83:proto=tcp:toport=456:toaddr=10.72.47.45 + source-ports: + icmp-blocks: + rich rules: + rule family="ipv4" source address="10.0.0.0/24" destination address="192.168.0.10/32" port port="8080-8090" protocol="tcp" accept + rule family="ipv4" source address="10.0.0.0/24" destination address="192.168.0.10/32" port port="443" protocol="tcp" reject + rule family="ipv4" source address="192.168.0.10/24" reject + rule family="ipv6" source address="1:2:3:4:6::" forward-port port="4011" protocol="tcp" to-port="4012" to-addr="1::2:3:4:7" + + + trusted + target: ACCEPT + icmp-block-inversion: no + interfaces: + sources: + services: + ports: + protocols: + masquerade: no + forward-ports: + source-ports: + icmp-blocks: + rich rules: + + Examples: + >>> type(zones) + + >>> 'public' in zones.active_zones + True + >>> 'ACCEPT' in zones.zones['trusted']['target'] + True + >>> zones.zones['public']['services'] + ['dhcpv6-client ssh'] + >>> 'port=83:proto=tcp:toport=456:toaddr=10.72.47.45' in zones.zones['public']['forward-ports'] + True + + + Attributes: + zones (dict): A dict of zone info + + Raises: + ParseException: Raised when the output is in invalid format + """ + + def __init__(self, context): + super(FirewallCmdListALLZones, self).__init__(context, ["firewalld is not running"]) + + @property + def active_zones(self): + """Return a list of active zone name""" + return [zone for zone, d in self.zones.items() if 'active' in d.get('_attributes', [])] + + def parse_content(self, content): + self.zones = dict() + zone_line = True + zone_name = '' + zone_attr_index = -1 + zone_attr_name = '' + for line in content: + line_strip = line.strip() + if not line_strip: + zone_line = True + continue + if zone_line: + name_info = line_strip.split(None, 1) + zone_name = name_info[0] + self.zones[zone_name] = {} + if len(name_info) > 1: + self.zones[zone_name]['_attributes'] = [i.strip() for i in name_info[1].strip('()').split(',')] + zone_line = False + zone_attr_index = -1 + else: + current_index = len(line.rstrip()) - len(line_strip) + zone_attr_index = current_index if zone_attr_index == -1 else zone_attr_index + if current_index == zone_attr_index: + attrs = [i.strip() for i in line.split(':', 1)] + if len(attrs) != 2: + raise ParseException('Invalid format') + zone_attr_name, attr_value = attrs + attr_value = [attr_value] if attr_value else [] + self.zones[zone_name][zone_attr_name] = attr_value + else: + self.zones[zone_name][zone_attr_name].append(line_strip) diff --git a/insights/parsers/tests/test_firewall_cmd.py b/insights/parsers/tests/test_firewall_cmd.py new file mode 100644 index 000000000..535f824c9 --- /dev/null +++ b/insights/parsers/tests/test_firewall_cmd.py @@ -0,0 +1,136 @@ +import doctest +import pytest +from insights.parsers import firewall_cmd +from insights.parsers.firewall_cmd import FirewallCmdListALLZones +from insights.tests import context_wrap +from insights.parsers import ParseException +from insights.core.plugins import ContentException + +FIREWALL_LIST_ZONES_1 = """ +FirewallD is not running +""".strip() + +FIREWALL_LIST_ZONES_2 = """ +-bash: firewall-cmd: command not found +""".strip() + +FIREWALL_LIST_ZONES_3 = """ +block + target: %%REJECT%% + icmp-block-inversion: no + interfaces: + sources: + services: + ports: + protocols: + masquerade: no + forward-ports: + source-ports: + icmp-blocks: + rich rules: + + +dmz + target: default + icmp-block-inversion: no + interfaces: + sources: + services: ssh + ports: + protocols: + masquerade: no + forward-ports: + source-ports: + icmp-blocks: + rich rules: + + +public (active, default) + target: default + icmp-block-inversion: no + interfaces: eno1 + sources: + services: dhcpv6-client ssh + ports: + protocols: + masquerade: no + forward-ports: port=80:proto=tcp:toport=12345:toaddr= + port=81:proto=tcp:toport=1234:toaddr= + port=83:proto=tcp:toport=456:toaddr=10.72.47.45 + source-ports: + icmp-blocks: + rich rules: + rule family="ipv4" source address="10.0.0.0/24" destination address="192.168.0.10/32" port port="8080-8090" protocol="tcp" accept + rule family="ipv4" source address="10.0.0.0/24" destination address="192.168.0.10/32" port port="443" protocol="tcp" reject + rule family="ipv4" source address="192.168.0.10/24" reject + rule family="ipv6" source address="1:2:3:4:6::" forward-port port="4011" protocol="tcp" to-port="4012" to-addr="1::2:3:4:7" + + +trusted + target: ACCEPT + icmp-block-inversion: yes + interfaces: + sources: + services: + ports: + protocols: + masquerade: no + forward-ports: + source-ports: + icmp-blocks: + rich rules: +""".strip() + +FIREWALL_LIST_ZONES_4 = """ +public (active) + target: default + icmp-block-inversion: no + interfaces: eno1 + sources: + services: dhcpv6-client ssh + ports: + protocols: + masquerade: no + forward-ports: + source-ports + icmp-blocks + rich rules +""".strip() + + +def test_docs(): + env = { + 'zones': FirewallCmdListALLZones(context_wrap(FIREWALL_LIST_ZONES_3)) + } + failed, total = doctest.testmod(firewall_cmd, globs=env) + assert failed == 0 + + +def test_empty_content(): + with pytest.raises(ContentException): + FirewallCmdListALLZones(context_wrap(FIREWALL_LIST_ZONES_1)) + with pytest.raises(ContentException): + FirewallCmdListALLZones(context_wrap(FIREWALL_LIST_ZONES_2)) + with pytest.raises(ParseException): + FirewallCmdListALLZones(context_wrap(FIREWALL_LIST_ZONES_4)) + + +def test_firewall_info(): + zones = FirewallCmdListALLZones(context_wrap(FIREWALL_LIST_ZONES_3)) + assert 'trusted' not in zones.active_zones + assert zones.zones['public']['services'] == ['dhcpv6-client ssh'] + assert zones.zones['public']['icmp-block-inversion'] == ['no'] + assert zones.zones['trusted']['services'] == [] + assert zones.zones['trusted']['icmp-block-inversion'] == ['yes'] + zone_info = ['target', 'icmp-block-inversion', 'interfaces', 'sources', 'services', + 'ports', 'protocols', 'masquerade', 'forward-ports', 'source-ports', + 'icmp-blocks', 'rich rules'] + assert all(key in zones.zones['public'] for key in zone_info) + assert 'port=80:proto=tcp:toport=12345:toaddr=' in zones.zones['public']['forward-ports'] + assert 'port=83:proto=tcp:toport=456:toaddr=10.72.47.45' in zones.zones['public']['forward-ports'] + assert len(zones.zones['public']['forward-ports']) == 3 + assert len(zones.zones['public']['rich rules']) == 4 + assert 'active' in zones.zones['public']['_attributes'] + assert 'default' in zones.zones['public']['_attributes'] + assert 'rule family="ipv4" source address="10.0.0.0/24" destination address="192.168.0.10/32" port port="8080-8090" protocol="tcp" accept' in zones.zones['public']['rich rules'] + assert 'rule family="ipv4" source address="192.168.0.10/24" reject' in zones.zones['public']['rich rules'] diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index dfa337d81..890c7a831 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -157,6 +157,7 @@ class Specs(SpecSet): fdisk_l = RegistryPoint() fdisk_l_sos = RegistryPoint(multi_output=True) findmnt_lo_propagation = RegistryPoint() + firewall_cmd_list_all_zones = RegistryPoint() firewalld_conf = RegistryPoint(filterable=True) foreman_production_log = RegistryPoint(filterable=True) foreman_proxy_conf = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index c58506334..1fb8fa681 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -318,6 +318,7 @@ def dumpdev(broker): fcoeadm_i = simple_command("/usr/sbin/fcoeadm -i") fdisk_l = simple_command("/sbin/fdisk -l") findmnt_lo_propagation = simple_command("/bin/findmnt -lo+PROPAGATION") + firewall_cmd_list_all_zones = simple_command("/usr/bin/firewall-cmd --list-all-zones") firewalld_conf = simple_file("/etc/firewalld/firewalld.conf") foreman_production_log = simple_file("/var/log/foreman/production.log") foreman_proxy_conf = simple_file("/etc/foreman-proxy/settings.yml") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 163d49eab..6fe3373f2 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -69,6 +69,7 @@ class InsightsArchiveSpecs(Specs): fcoeadm_i = simple_file("insights_commands/fcoeadm_-i") fdisk_l = simple_file("insights_commands/fdisk_-l") findmnt_lo_propagation = simple_file("insights_commands/findmnt_-lo_PROPAGATION") + firewall_cmd_list_all_zones = simple_file("insights_commands/firewall-cmd_--list-all-zones") foreman_rake_db_migrate_status = simple_file('insights_commands/foreman-rake_db_migrate_status') getcert_list = simple_file("insights_commands/getcert_list") getconf_page_size = simple_file("insights_commands/getconf_PAGE_SIZE") From 7b82a808c6d9c4e761862e3f82dc93d142324b66 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 2 Apr 2020 03:36:55 +0800 Subject: [PATCH 014/892] Update DiskFree to return block_size (for sosreport) (#2505) * Change specs of df to 'df -alk' and 'df -alPk' - Add an interface to return the block size to compatible with sos archives - Remove the deprecated `shared` in examples - Add test and refine the Examples - Remove the dependant on defaultdict - Fix: change the return value of two properties to list Signed-off-by: Xiangce Liu * Remove the '-k' from command for insights-archives Signed-off-by: Xiangce Liu * Return the integer block_size directly Signed-off-by: Xiangce Liu * Add raw_block_size and test Signed-off-by: Xiangce Liu * Fix doc error Signed-off-by: Xiangce Liu * Fix bug in calculating and test Signed-off-by: Xiangce Liu --- insights/parsers/df.py | 268 ++++++++++++++++++++---------- insights/parsers/tests/test_df.py | 109 +++++++++++- 2 files changed, 285 insertions(+), 92 deletions(-) diff --git a/insights/parsers/df.py b/insights/parsers/df.py index 4c584e604..5f0e44250 100644 --- a/insights/parsers/df.py +++ b/insights/parsers/df.py @@ -4,25 +4,10 @@ Module for the processing of output from the ``df`` command. The base class ``DiskFree`` provides all of the functionality for all classes. -Data is avaliable as rows of the output contained -in one ``Record`` object for each line of output. - -Sample input data for the ``df -li`` command looks like:: - - Filesystem Inodes IUsed IFree IUse% Mounted on - /dev/mapper/vg_lxcrhel6sat56-lv_root - 6275072 124955 6150117 2% / - devtmpfs 1497120 532 1496588 1% /dev - tmpfs 1499684 331 1499353 1% /dev/shm - tmpfs 1499684 728 1498956 1% /run - tmpfs 1499684 16 1499668 1% /sys/fs/cgroup - tmpfs 1499684 54 1499630 1% /tmp - /dev/sda2 106954752 298662 106656090 1% /home - /dev/sda1 128016 429 127587 1% /boot - tmpfs 1499684 6 1499678 1% /V M T o o l s - tmpfs 1499684 15 1499669 1% /VM Tools - -This module provides two parsers: +Data is avaliable as rows of the output contained in one ``Record`` object +for each line of output. + +Parsers contained in this module are: DiskFree_LI - command ``df -li`` -------------------------------- @@ -33,31 +18,9 @@ DiskFree_AL - command ``df -al`` -------------------------------- -This example demonstrates the ``DiskFree_LI`` class but all classes will provide -the same functionality. - -Examples: - >>> df_info = shared[DiskFree_LI] - >>> df_info.filesystem_names - ['tmpfs', '/dev/mapper/vg_lxcrhel6sat56-lv_root', 'devtmpfs', '/dev/sda2', '/dev/sda1'] - >>> df_info.get_filesystem('/dev/sda2') - [Record(filesystem='/dev/sda2', total='106954752', used='298662', available='106656090', capacity='1%', mounted_on='/home')] - >>> df_info.mount_names - ['/tmp', '/home', '/dev', '/boot', '/VM Tools', '/sys/fs/cgroup', '/', '/run', '/V M T o o l s', '/dev/shm'] - >>> df_info.get_mount('/boot') - Record(filesystem='/dev/sda1', total='128016', used='429', available='127587', capacity='1%', mounted_on='/boot') - >>> len(df_info) - 10 - >>> [d.mounted_on for d in df_info if 'sda' in d.filesystem] - ['/home', '/boot'] - >>> df_info.data[0].filesystem - '/dev/mapper/vg_lxcrhel6sat56-lv_root' - >>> df_info.data[0] - Record(filesystem='/dev/mapper/vg_lxcrhel6sat56-lv_root', total='6275072', used='124955', available='6150117', capacity='2%', mounted_on='/') """ -from .. import parser, CommandParser -from collections import namedtuple, defaultdict - +from collections import namedtuple +from insights import parser, CommandParser from insights.parsers import ParseException from insights.specs import Specs @@ -85,10 +48,10 @@ def parse_df_lines(df_content): in terms of inodes:: - filesystem: Name of the filesystem - - total: total number of resources on the filesystem - - used: number of the resources used on the filesystem - - available: number of the resource available on the filesystem - - capacity: percentage of the resource used on the filesystem + - total (str): total number of resources on the filesystem + - used (str): number of the resources used on the filesystem + - available (str): number of the resource available on the filesystem + - capacity (str): percentage of the resource used on the filesystem - mounted_on: mount point of the filesystem """ df_ls = {} @@ -126,7 +89,8 @@ def parse_df_lines(df_content): class DiskFree(CommandParser): - """Class to provide methods used by all ``df`` command classes. + """ + Class to provide methods used by all ``df`` command classes. Attributes: data (list of Record): List of ``Record`` objects for each line of command @@ -136,15 +100,20 @@ class DiskFree(CommandParser): dictionary is keyed by the ``filesystem`` value of the Record. mounts (dict): Dictionary with each entry being a ``Record`` object corresponding to the ``mounted_on`` key. + + Raises: + ParseException: When there are lines cannot be parsed or the + ``block size`` cannot be recognized. """ def __init__(self, context): super(DiskFree, self).__init__(context) - filesystems = defaultdict(list) + self.filesystems = {} self.mounts = {} for datum in self.data: - filesystems[datum.filesystem].append(datum) + if datum.filesystem not in self.filesystems: + self.filesystems[datum.filesystem] = [] + self.filesystems[datum.filesystem].append(datum) self.mounts[datum.mounted_on] = datum - self.filesystems = dict(filesystems) def __len__(self): return len(self.data) @@ -154,14 +123,54 @@ def __iter__(self): yield row def parse_content(self, content): + def _digital_block_size(_block_size): + """ + # man df + SIZE may be (or may be an integer optionally followed by) one + of following: KB 1000, K 1024, MB 1000*1000, M 1024*1024, and so + on for G, T, P, E, Z, Y. + """ + units = { + '': 1, + 'B': 1, + 'K': 1024, + 'KB': 1000, + 'M': 1024 * 1024, + 'MB': 1000 * 1000, + 'G': 1024 * 1024 * 1024, + 'GB': 1000 * 1000 * 1000, + 'T': 1024 * 1024 * 1024 * 1024, + 'TB': 1000 * 1000 * 1000 * 1000, + 'P': 1024 * 1024 * 1024 * 1024 * 1024, + 'PB': 1000 * 1000 * 1000 * 1000 * 1000, + 'E': 1024 * 1024 * 1024 * 1024 * 1024 * 1024, + 'EB': 1000 * 1000 * 1000 * 1000 * 1000 * 1000, + 'Z': 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024, + 'ZB': 1000 * 1000 * 1000 * 1000 * 1000 * 1000 * 1000, + 'Y': 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024, + 'YB': 1000 * 1000 * 1000 * 1000 * 1000 * 1000 * 1000 * 1000, + } + suffix = _block_size[-2:].lstrip('0123456789') + suffix_up = suffix.upper() + if suffix_up in units: + return units[suffix_up] * int(_block_size.rstrip('kKMGTPEZYB')) + raise ParseException("Unknown block size: '{0}'".format(suffix)) + bad_lines = ["no such file or directory"] content = [l for l in content if bad_lines[0] not in l.lower()] + self.block_size = self.raw_block_size = None + # Get the block_size when there is such column + header = content[0] + if 'blocks' in header: + block_size = [i.split('-')[0] for i in header.split() if 'blocks' in i][0] + self.raw_block_size = block_size + self.block_size = _digital_block_size(block_size) self.data = parse_df_lines(content) @property def filesystem_names(self): """list: Returns list of unique filesystem names.""" - return self.filesystems.keys() + return sorted(self.filesystems.keys()) def get_filesystem(self, name): """str: Returns list of Record objects for filesystem ``name``.""" @@ -170,7 +179,7 @@ def get_filesystem(self, name): @property def mount_names(self): """list: Returns list of unique mount point names.""" - return self.mounts.keys() + return sorted(self.mounts.keys()) def get_mount(self, name): """Record: Returns Record obj for mount point ``name``.""" @@ -195,16 +204,15 @@ class DiskFree_LI(DiskFree): Typical content of the ``df -li`` command output looks like:: - Filesystem Inodes IUsed IFree IUse% Mounted on - /dev/mapper/vg_lxcrhel6sat56-lv_root - 6275072 124955 6150117 2% / - devtmpfs 1497120 532 1496588 1% /dev - tmpfs 1499684 331 1499353 1% /dev/shm - tmpfs 1499684 728 1498956 1% /tmp - /dev/sda2 106954752 298662 106656090 1% /home - /dev/sda1 128016 429 127587 1% /boot - tmpfs 1499684 6 1499678 1% /run/user/988 - tmpfs 1499684 15 1499669 1% /run/user/100 + Filesystem Inodes IUsed IFree IUse% Mounted on + devtmpfs 242224 359 241865 1% /dev + tmpfs 246028 1 246027 1% /dev/shm + tmpfs 246028 491 245537 1% /run + tmpfs 246028 17 246011 1% /sys/fs/cgroup + /dev/sda2 8911872 58130 8853742 1% / + /dev/sdb1 26213888 19662 26194226 1% /opt/data + /dev/sda1 524288 306 523982 1% /boot + tmpfs 246028 5 246023 1% /run/user/0 Attributes: data (list): A list of the ``df`` information with one ``Record`` object for @@ -219,6 +227,24 @@ class DiskFree_LI(DiskFree): IFree available IUse% capacity Mounted on mounted_on + + Examples: + >>> len(df_li) + 8 + >>> len(df_li.filesystem_names) + 5 + >>> df_li.get_filesystem('/dev/sda1')[0].mounted_on == '/boot' + True + >>> '/opt/data' in df_li.mount_names + True + >>> df_li.get_mount('/sys/fs/cgroup').available == '246011' + True + >>> [d.mounted_on for d in df_li if 'sda' in d.filesystem] == ['/', '/boot'] + True + >>> df_li.data[0].filesystem == 'devtmpfs' + True + >>> df_li.data[0].capacity == '1%' + True """ pass @@ -229,17 +255,27 @@ class DiskFree_ALP(DiskFree): Typical content of the ``df -alP`` command looks like:: - Filesystem 1024-blocks Used Available Capacity Mounted on - /dev/mapper/vg_lxcrhel6sat56-lv_root 98571884 4244032 89313940 5% / - sysfs 0 0 0 - /sys - proc 0 0 0 - /proc - devtmpfs 5988480 0 5988480 0% /dev - securityfs 0 0 0 - /sys/kernel/security - tmpfs 5998736 491660 5507076 9% /dev/shm - devpts 0 0 0 - /dev/pts - tmpfs 5998736 1380 5997356 1% /run - tmpfs 5998736 0 5998736 0% /sys/fs/cgroup - cgroup 0 0 0 - /sys/fs/cgroup/systemd + Filesystem 1024-blocks Used Available Capacity Mounted on + sysfs 0 0 0 - /sys + proc 0 0 0 - /proc + devtmpfs 968896 0 968896 0% /dev + securityfs 0 0 0 - /sys/kernel/security + tmpfs 984112 0 984112 0% /dev/shm + devpts 0 0 0 - /dev/pts + tmpfs 984112 8660 975452 1% /run + tmpfs 984112 0 984112 0% /sys/fs/cgroup + cgroup 0 0 0 - /sys/fs/cgroup/systemd + cgroup 0 0 0 - /sys/fs/cgroup/pids + cgroup 0 0 0 - /sys/fs/cgroup/rdma + configfs 0 0 0 - /sys/kernel/config + /dev/sda2 17813504 2127172 15686332 12% / + selinuxfs 0 0 0 - /sys/fs/selinux + systemd-1 - - - - /proc/sys/fs/binfmt_misc + debugfs 0 0 0 - /sys/kernel/debug + mqueue 0 0 0 - /dev/mqueue + hugetlbfs 0 0 0 - /dev/hugepages + /dev/sdb1 52402180 1088148 51314032 3% /V M T o o l s + /dev/sda1 1038336 185676 852660 18% /boot Attributes: data (list): A list of the ``df`` information with one ``Record`` object for @@ -254,6 +290,32 @@ class DiskFree_ALP(DiskFree): Available available Capacity capacity Mounted on mounted_on + raw_block_size (str): The unit of display values. + block_size (int): The unit of display values, which is converted to integer. + + Examples: + >>> len(df_alP) + 20 + >>> len(df_alP.filesystem_names) + 16 + >>> df_alP.raw_block_size + '1024' + >>> df_alP.block_size + 1024 + >>> df_alP.get_filesystem('/dev/sda2')[0].mounted_on == '/' + True + >>> '/V M T o o l s' in df_alP.mount_names + True + >>> df_alP.get_mount('/boot').available + '852660' + >>> int(int(df_alP.get_mount('/boot').available) * df_alP.block_size / 1024) # to KB + 852660 + >>> int(int(df_alP.get_mount('/boot').available) * df_alP.block_size / 1024 / 1024) # to MB + 832 + >>> [d.mounted_on for d in df_alP if 'sda' in d.filesystem] == ['/', '/boot'] + True + >>> df_alP.data[0].filesystem == 'sysfs' + True """ pass @@ -264,17 +326,27 @@ class DiskFree_AL(DiskFree): Typical content of the ``df -al`` command looks like:: - Filesystem 1K-blocks Used Available Use% Mounted on - /dev/mapper/vg_lxcrhel6sat56-lv_root 98571884 4244032 89313940 5% / - sysfs 0 0 0 - /sys - proc 0 0 0 - /proc - devtmpfs 5988480 0 5988480 0% /dev - securityfs 0 0 0 - /sys/kernel/security - tmpfs 5998736 491660 5507076 9% /dev/shm - devpts 0 0 0 - /dev/pts - tmpfs 5998736 1380 5997356 1% /run - tmpfs 5998736 0 5998736 0% /sys/fs/cgroup - cgroup 0 0 0 - /sys/fs/cgroup/systemd + Filesystem 1K-blocks Used Available Use% Mounted on + sysfs 0 0 0 - /sys + proc 0 0 0 - /proc + devtmpfs 968896 0 968896 0% /dev + securityfs 0 0 0 - /sys/kernel/security + tmpfs 984112 0 984112 0% /dev/shm + devpts 0 0 0 - /dev/pts + tmpfs 984112 8660 975452 1% /run + tmpfs 984112 0 984112 0% /sys/fs/cgroup + cgroup 0 0 0 - /sys/fs/cgroup/systemd + cgroup 0 0 0 - /sys/fs/cgroup/pids + cgroup 0 0 0 - /sys/fs/cgroup/rdma + configfs 0 0 0 - /sys/kernel/config + /dev/sda2 17813504 2127172 15686332 12% / + selinuxfs 0 0 0 - /sys/fs/selinux + systemd-1 - - - - /proc/sys/fs/binfmt_misc + debugfs 0 0 0 - /sys/kernel/debug + mqueue 0 0 0 - /dev/mqueue + hugetlbfs 0 0 0 - /dev/hugepages + /dev/sdb1 52402180 1088148 51314032 3% /V M T o o l s + /dev/sda1 1038336 185676 852660 18% /boot Attributes: data (list): A list of the ``df`` information with one ``Record`` object for @@ -289,5 +361,31 @@ class DiskFree_AL(DiskFree): Available available Use% capacity Mounted on mounted_on + raw_block_size (str): The unit of display values. + block_size (int): The unit of display values, which is converted to integer. + + Examples: + >>> len(df_al) + 20 + >>> len(df_al.filesystem_names) + 16 + >>> df_al.raw_block_size + '1K' + >>> df_al.block_size + 1024 + >>> df_al.get_filesystem('/dev/sda2')[0].mounted_on == '/' + True + >>> '/V M T o o l s' in df_al.mount_names + True + >>> df_al.get_mount('/').used + '2127172' + >>> int(int(df_al.get_mount('/').used) * df_alP.block_size / 1024) # to KB + 2127172 + >>> int(int(df_al.get_mount('/').used) * df_alP.block_size / 1024 / 1024) # to MB + 2077 + >>> [d.mounted_on for d in df_al if 'sda' in d.filesystem] == ['/', '/boot'] + True + >>> df_al.data[0].filesystem == 'sysfs' + True """ pass diff --git a/insights/parsers/tests/test_df.py b/insights/parsers/tests/test_df.py index b8ef56d16..cc8076bb4 100644 --- a/insights/parsers/tests/test_df.py +++ b/insights/parsers/tests/test_df.py @@ -1,8 +1,9 @@ +import pytest +import doctest + from insights.parsers import df, ParseException from insights.tests import context_wrap -import pytest - DF_LI = """ Filesystem Inodes IUsed IFree IUse% Mounted on /dev/mapper/vg_lxcrhel6sat56-lv_root @@ -16,8 +17,8 @@ tmpfs 1499684 54 1499630 1% /tmp /dev/sda2 106954752 298662 106656090 1% /home /dev/sda1 128016 429 127587 1% /boot -tmpfs 1499684 6 1499678 1% /V M T o o l s -tmpfs 1499684 15 1499669 1% /VM Tools +/dev/sdb1 1499684 6 1499678 1% /V M T o o l s +/dev/sdb2 1499684 15 1499669 1% /VM Tools """.strip() @@ -25,7 +26,7 @@ def test_df_li(): df_list = df.DiskFree_LI(context_wrap(DF_LI)) assert len(df_list) == 10 assert len(df_list.mounts) == 10 - assert len(df_list.filesystems) == 5 + assert len(df_list.filesystems) == 7 assert '/home' in df_list.mounts r = df.Record( filesystem='/dev/sda2', @@ -40,7 +41,7 @@ def test_df_li(): assert '/dev/sda2' in df_list.filesystems assert len(df_list.get_filesystem('/dev/sda2')) == 1 assert df_list.get_filesystem('/dev/sda2')[0] == r - assert len(df_list.get_filesystem('tmpfs')) == 6 + assert len(df_list.get_filesystem('tmpfs')) == 4 assert df_list.get_mount('/dev').filesystem == 'devtmpfs' assert df_list.get_mount('/run').total == '1499684' assert df_list.get_mount('/tmp').used == '54' @@ -58,7 +59,7 @@ def test_df_li(): assert sorted(df_list.mount_names) == sorted_mount_names assert sorted(df_list.filesystem_names) == sorted([ '/dev/mapper/vg_lxcrhel6sat56-lv_root', 'devtmpfs', 'tmpfs', - '/dev/sda2', '/dev/sda1' + '/dev/sda2', '/dev/sda1', '/dev/sdb2', '/dev/sdb1' ]) # Test get_path @@ -204,9 +205,103 @@ def test_df_al(): sysfs 0 """ +DF_AL_BAD_BS = """ +Filesystem 1a-blocks Used Available Use% Mounted on +/dev/mapper/vg_lxcrhel6sat56-lv_root 98571884 4244032 89313940 5% / +""" + def test_df_al_bad(): with pytest.raises(ParseException) as exc: df_list = df.DiskFree_AL(context_wrap(DF_AL_BAD)) assert len(df_list) == 2 assert 'Could not parse line' in str(exc) + + with pytest.raises(ParseException) as exc: + df_list = df.DiskFree_AL(context_wrap(DF_AL_BAD_BS)) + assert 'Unknown block size' in str(exc) + +DF_AL_BS_2MB = """ +Filesystem 2MB-blocks Used Available Use% Mounted on +/dev/vda3 62031 49197 9680 84% / +""" + + +def test_df_al_2MB(): + df_list = df.DiskFree_LI(context_wrap(DF_AL_BS_2MB)) + root = df_list.get_mount('/') + assert root.filesystem == '/dev/vda3' + assert root.total == '62031' + assert df_list.raw_block_size == '2MB' + assert df_list.block_size == 2000000 + assert int(root.total) * df_list.block_size == 124062000000 # To Bytes + +DF_LI_DOC = """ +Filesystem Inodes IUsed IFree IUse% Mounted on +devtmpfs 242224 359 241865 1% /dev +tmpfs 246028 1 246027 1% /dev/shm +tmpfs 246028 491 245537 1% /run +tmpfs 246028 17 246011 1% /sys/fs/cgroup +/dev/sda2 8911872 58130 8853742 1% / +/dev/sdb1 26213888 19662 26194226 1% /opt/data +/dev/sda1 524288 306 523982 1% /boot +tmpfs 246028 5 246023 1% /run/user/0 +""".strip() + +DF_ALP_DOC = """ +Filesystem 1024-blocks Used Available Capacity Mounted on +sysfs 0 0 0 - /sys +proc 0 0 0 - /proc +devtmpfs 968896 0 968896 0% /dev +securityfs 0 0 0 - /sys/kernel/security +tmpfs 984112 0 984112 0% /dev/shm +devpts 0 0 0 - /dev/pts +tmpfs 984112 8660 975452 1% /run +tmpfs 984112 0 984112 0% /sys/fs/cgroup +cgroup 0 0 0 - /sys/fs/cgroup/systemd +cgroup 0 0 0 - /sys/fs/cgroup/pids +cgroup 0 0 0 - /sys/fs/cgroup/rdma +configfs 0 0 0 - /sys/kernel/config +/dev/sda2 17813504 2127172 15686332 12% / +selinuxfs 0 0 0 - /sys/fs/selinux +systemd-1 - - - - /proc/sys/fs/binfmt_misc +debugfs 0 0 0 - /sys/kernel/debug +mqueue 0 0 0 - /dev/mqueue +hugetlbfs 0 0 0 - /dev/hugepages +/dev/sdb1 52402180 1088148 51314032 3% /V M T o o l s +/dev/sda1 1038336 185676 852660 18% /boot +""".strip() + +DF_AL_DOC = """ +Filesystem 1K-blocks Used Available Use% Mounted on +sysfs 0 0 0 - /sys +proc 0 0 0 - /proc +devtmpfs 968896 0 968896 0% /dev +securityfs 0 0 0 - /sys/kernel/security +tmpfs 984112 0 984112 0% /dev/shm +devpts 0 0 0 - /dev/pts +tmpfs 984112 8660 975452 1% /run +tmpfs 984112 0 984112 0% /sys/fs/cgroup +cgroup 0 0 0 - /sys/fs/cgroup/systemd +cgroup 0 0 0 - /sys/fs/cgroup/pids +cgroup 0 0 0 - /sys/fs/cgroup/rdma +configfs 0 0 0 - /sys/kernel/config +/dev/sda2 17813504 2127172 15686332 12% / +selinuxfs 0 0 0 - /sys/fs/selinux +systemd-1 - - - - /proc/sys/fs/binfmt_misc +debugfs 0 0 0 - /sys/kernel/debug +mqueue 0 0 0 - /dev/mqueue +hugetlbfs 0 0 0 - /dev/hugepages +/dev/sdb1 52402180 1088148 51314032 3% /V M T o o l s +/dev/sda1 1038336 185676 852660 18% /boot +""".strip() + + +def test_doc_examples(): + env = { + 'df_li': df.DiskFree_LI(context_wrap(DF_LI_DOC)), + 'df_al': df.DiskFree_AL(context_wrap(DF_AL_DOC)), + 'df_alP': df.DiskFree_ALP(context_wrap(DF_ALP_DOC)), + } + failed, total = doctest.testmod(df, globs=env) + assert failed == 0 From f4bed7eeba85507a39571fe731fe7fce23248cde Mon Sep 17 00:00:00 2001 From: Stephen Date: Wed, 1 Apr 2020 16:04:16 -0400 Subject: [PATCH 015/892] Add new filter method (#2517) * Fix typo in filters.py Signed-off-by: Stephen Adams * Add filters.dump() to apply_spec_filters Now when we apply_spec_filters, we also do a filters.dump() in order to get the new filters added to the egg. This is to simplify the release process. Signed-off-by: Stephen Adams --- insights/core/filters.py | 2 +- insights/tools/apply_spec_filters.py | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/insights/core/filters.py b/insights/core/filters.py index c2893a233..22cbd320c 100644 --- a/insights/core/filters.py +++ b/insights/core/filters.py @@ -198,5 +198,5 @@ def dump(stream=None): stream.write(dumps()) else: path = os.path.join(os.path.dirname(insights.__file__), _filename) - with open(path, "wu") as f: + with open(path, "w") as f: f.write(dumps()) diff --git a/insights/tools/apply_spec_filters.py b/insights/tools/apply_spec_filters.py index 40bf32df1..4bf4df5bf 100644 --- a/insights/tools/apply_spec_filters.py +++ b/insights/tools/apply_spec_filters.py @@ -10,6 +10,7 @@ from insights import dr, get_filters from insights.core.spec_factory import RegistryPoint from insights.specs import Specs +from insights.core import filters if len(sys.argv) < 3: print("Provide uploader.json location and packages to load") @@ -28,9 +29,11 @@ dr.load_components("insights.parsers") dr.load_components("insights.combiners") + for package in sys.argv[2:]: dr.load_components(package) +filters.dump() specs = sorted(vars(Specs)) filters = {} for spec in specs: From 15303d06a935a62fd770bca77d64c2abc3eb26b2 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 2 Apr 2020 08:17:24 +0800 Subject: [PATCH 016/892] Add base class YumListRpm for 'yum list' commands (#2525) * Add base class YumListRpm for 'yum list' commands Signed-off-by: Xiangce Liu * fix ng test in python2 Signed-off-by: Xiangce Liu --- docs/shared_parsers_catalog/yum_list.rst | 3 + .../parsers/tests/test_yum_list_installed.py | 13 +- insights/parsers/yum_list.py | 200 ++++++++++++++++++ insights/parsers/yum_list_installed.py | 60 +----- 4 files changed, 222 insertions(+), 54 deletions(-) create mode 100644 docs/shared_parsers_catalog/yum_list.rst create mode 100644 insights/parsers/yum_list.py diff --git a/docs/shared_parsers_catalog/yum_list.rst b/docs/shared_parsers_catalog/yum_list.rst new file mode 100644 index 000000000..aceecdd6a --- /dev/null +++ b/docs/shared_parsers_catalog/yum_list.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.yum_list + :members: + :show-inheritance: diff --git a/insights/parsers/tests/test_yum_list_installed.py b/insights/parsers/tests/test_yum_list_installed.py index 812db3ca7..162069191 100644 --- a/insights/parsers/tests/test_yum_list_installed.py +++ b/insights/parsers/tests/test_yum_list_installed.py @@ -1,7 +1,9 @@ import pytest +import doctest from insights import SkipComponent -from insights.parsers.yum_list_installed import YumListInstalled +from insights.parsers import yum_list +from insights.parsers.yum_list import YumListInstalled from insights.tests import context_wrap @@ -45,6 +47,7 @@ HEADER_FOOTER_JUNK = """ Loaded plugins: product-id, search-disabled-repos, subscription-manager +Repodata is over 2 weeks old. Install yum-cron? Or run: yum makecache fast Installed Packages GConf2.x86_64 3.2.6-8.el7 @rhel-7-server-rpms GeoIP.x86_64 1.5.0-11.el7 @anaconda/7.3 @@ -157,3 +160,11 @@ def test_multiple_stanza(): assert rpm.release == "9.el7" assert rpm.arch == "noarch" assert rpm.repo == "installed" + + +def test_doc_examples(): + env = { + 'installed_rpms': YumListInstalled(context_wrap(HEADER_FOOTER_JUNK)), + } + failed, total = doctest.testmod(yum_list, globs=env) + assert failed == 0 diff --git a/insights/parsers/yum_list.py b/insights/parsers/yum_list.py new file mode 100644 index 000000000..2ee562a3b --- /dev/null +++ b/insights/parsers/yum_list.py @@ -0,0 +1,200 @@ +""" +Yum List Command +================ + +The parsers contains in this module are: + +YumListInstalled - Command ``yum list installed`` +------------------------------------------------- + +""" +from collections import defaultdict + +from insights import CommandParser, parser, SkipComponent +from insights.specs import Specs +from insights.parsers.installed_rpms import InstalledRpm, RpmList + + +class YumListRpm(InstalledRpm): + """ + The same as :py:class:`insights.parsers.installed_rpms.InstalledRpm` but + with an additional ``.repo`` attribute. + """ + def __init__(self, data): + self.repo = None + """str: yum / dnf repository name, if available.""" + + super(YumListRpm, self).__init__(data) + + +class YumListBase(CommandParser, RpmList): + """ + Base class for the ``yum list [installed|available]`` commands. Each line + is parsed and stored in a ``YumListRpm`` object. + + .. note:: + + ``YumListBase`` shares the :py:class:`insights.parsers.installed_rpms.RpmList` + interface with :py:class:`insights.parsers.installed_rpms.InstalledRpms`. + The only difference is ``YumListBase`` takes the output of ``yum list`` as + its source data, and the :py:class:`YumListRpm` instances it produces + contain a ``.repo`` attribute. + """ + + def __init__(self, context): + self.expired_cache = False + """bool: Indicates if the yum repo cache is expired.""" + + super(YumListBase, self).__init__(context) + + def _find_start(self, content): + for i, c in enumerate(content): + if 'Repodata is over 2 weeks old' in c: + self.expired_cache = True + elif c == "Installed Packages": + break + return i + 1 + + def _get_rows(self, content): + """ + Yields: + a list per row of the following form: + + .. code:: python + + [ + , + <[epoch:]version-release>, + + ] + """ + start = self._find_start(content) + if start == len(content): + raise SkipComponent() + + # join hanging wrapped lines together into a single line. + # see https://bugzilla.redhat.com/show_bug.cgi?id=584525 + cur = [] + for line in content[start:]: + if not cur: + cur.append(line.strip()) + elif line.startswith(" "): + cur.append(line.strip()) + else: + yield " ".join(cur).split() + cur = [line.strip()] + + if cur: + yield " ".join(cur).split() + + def _make_record(self, package, ver_rel, repo): + """ + Given the fields of a ``yum list`` row, return a dictionary + of name, version, release, epoch, arch, and repo. + """ + name, _, arch = package.rpartition(".") + repo = repo.lstrip("@") + + # Kept as string in InstalledRpm. Duplicating here for consistency. + epoch = "0" + if ":" in ver_rel: + epoch, ver_rel = ver_rel.split(":", 1) + version, release = ver_rel.split("-") + + # This is special cased for InstalledRpm. Duplicating here for + # consistency. + if name.startswith('oracleasm') and name.endswith('.el5'): + name, version2 = name.split('-', 1) + version = version2 + '-' + version + + return {"name": name, "version": version, "release": release, "epoch": + epoch, "arch": arch, "repo": repo} + + def _unknown_row(self, row): + """ + Heuristic to tell us we've hit the bottom of the Installed + Packages stanza. + """ + return len(row) != 3 or row[:2] == ["Loaded", "plugins:"] + + def parse_content(self, content): + """ + ``yum list`` output is basically tabular with an ignorable + set of rows at the top and a line "Installed Packages" that designates + the following rows as data. Each column has a maximum width, and if any + column overflows, the following columns wrap to the next line and + indent to their usual starting positions. It's also possible for the + data rows to be followed by more lines that should be ignored. Since + ``yum list`` is for human consumption, the footer lines can be + syntactically ambiguous with data lines. We use heuristics to check for + an invalid row to signal the end of data. + """ + + packages = defaultdict(list) + for row in self._get_rows(content): + if self._unknown_row(row): + break + rec = self._make_record(*row) + packages[rec["name"]].append(YumListRpm(rec)) + self.packages = dict(packages) + + +@parser(Specs.yum_list_installed) +class YumListInstalled(YumListBase): + """ + The ``YumListInstalled`` class parses the output of the ``yum list installed`` + command. Each line is parsed and stored in a ``YumListRpm`` object. + + Sample input data:: + + Repodata is over 2 weeks old. Install yum-cron? Or run: yum makecache fast + Loaded plugins: product-id, search-disabled-repos, subscription-manager + Installed Packages + GConf2.x86_64 3.2.6-8.el7 @rhel-7-server-rpms + GeoIP.x86_64 1.5.0-11.el7 @anaconda/7.3 + ImageMagick.x86_64 6.7.8.9-15.el7_2 @rhel-7-server-rpms + NetworkManager.x86_64 1:1.4.0-17.el7_3 installed + NetworkManager.x86_64 1:1.8.0-9.el7 installed + NetworkManager-config-server.noarch + 1:1.8.0-9.el7 installed + Uploading Enabled Repositories Report + Loaded plugins: priorities, product-id, rhnplugin, rhui-lb, subscription- + : manager, versionlock + + Examples: + >>> type(installed_rpms) + + >>> 'GeoIP' in installed_rpms + True + >>> installed_rpms.get_max('GeoIP') + 0:GeoIP-1.5.0-11.el7 + >>> installed_rpms.expired_cache + True + >>> type(installed_rpms.get_max('GeoIP')) + + >>> rpm1 = installed_rpms.get_max('GeoIP') + >>> rpm1.package == 'GeoIP-1.5.0-11.el7' + True + >>> rpm1.nvr == 'GeoIP-1.5.0-11.el7' + True + >>> rpm1.source + >>> rpm1.name + 'GeoIP' + >>> rpm1.version + '1.5.0' + >>> rpm1.release + '11.el7' + >>> rpm1.arch + 'x86_64' + >>> rpm1.epoch + '0' + >>> from insights.parsers.yum_list import YumListRpm + >>> rpm2 = YumListRpm.from_package('GeoIP-1.6.0-11.el7.x86_64') + >>> rpm1 == rpm2 + False + >>> rpm1 > rpm2 + False + >>> rpm1 < rpm2 + True + """ + pass diff --git a/insights/parsers/yum_list_installed.py b/insights/parsers/yum_list_installed.py index 3e7489dae..ca9f47082 100644 --- a/insights/parsers/yum_list_installed.py +++ b/insights/parsers/yum_list_installed.py @@ -1,65 +1,17 @@ """ YumListInstalled - Command ``yum list installed`` ================================================= -The ``YumListInstalled`` class parses the output of the ``yum list installed`` -command. Each line is parsed and stored in a ``YumInstalledRpm`` object. - -Sample input data:: - - Loaded plugins: product-id, search-disabled-repos, subscription-manager - Installed Packages - GConf2.x86_64 3.2.6-8.el7 @rhel-7-server-rpms - GeoIP.x86_64 1.5.0-11.el7 @anaconda/7.3 - ImageMagick.x86_64 6.7.8.9-15.el7_2 @rhel-7-server-rpms - NetworkManager.x86_64 1:1.4.0-17.el7_3 installed - NetworkManager.x86_64 1:1.8.0-9.el7 installed - NetworkManager-config-server.noarch - 1:1.8.0-9.el7 installed - Uploading Enabled Repositories Report - Loaded plugins: priorities, product-id, rhnplugin, rhui-lb, subscription- - : manager, versionlock - -Examples: - >>> type(rpms) - - >>> 'GeoIP' in rpms - True - >>> rpms.get_max('GeoIP') - 0:GeoIP-1.5.0-11.el7 - >>> rpms.expired_cache - True - >>> type(rpms.get_max('GeoIP')) - - >>> rpm = rpms.get_max('GeoIP') - >>> rpm.package - 'GeoIP-1.5.0-11.el7' - >>> rpm.nvr - 'GeoIP-1.5.0-11.el7' - >>> rpm.source - >>> rpm.name - 'GeoIP' - >>> rpm.version - '1.5.0' - >>> rpm.release - '11.el7' - >>> rpm.arch - 'x86_64' - >>> rpm.epoch - '0' - >>> from insights.parsers.yum_list_installed import YumInstalledRpm - >>> rpm2 = YumInstalledRpm.from_package('GeoIP-1.6.0-11.el7.x86_64') - >>> rpm == rpm2 - False - >>> rpm > rpm2 - False - >>> rpm < rpm2 - True + +.. warning:: + This module is deprecated, please import the parsers in + :py:mod:`insights.parsers.yum_list` instead. """ from collections import defaultdict from insights import CommandParser, parser, SkipComponent from insights.specs import Specs from insights.parsers.installed_rpms import InstalledRpm, RpmList +from insights.util import deprecated class YumInstalledRpm(InstalledRpm): @@ -68,6 +20,7 @@ class YumInstalledRpm(InstalledRpm): with an additional ``.repo`` attribute. """ def __init__(self, data): + deprecated(YumInstalledRpm, "Import YumListRpm from insights.parsers.yum_list instead") self.repo = None """str: yum / dnf repository name, if available.""" @@ -85,6 +38,7 @@ class YumListInstalled(CommandParser, RpmList): """ def __init__(self, context): + deprecated(YumListInstalled, "Import YumListBase from insights.parsers.yum_list instead") self.expired_cache = False """bool: Indicates if the yum repo cache is expired.""" From 159889ae705859329cfa0c16a6dfec6c380b47ed Mon Sep 17 00:00:00 2001 From: Jitka Obselkova <41325380+jobselko@users.noreply.github.com> Date: Fri, 3 Apr 2020 03:09:28 +0200 Subject: [PATCH 017/892] Parser for dhclient in NetworkManager (#2526) * Add parser for dhclient Signed-off-by: Jitka Obselkova * Remove init Signed-off-by: Jitka Obselkova Co-authored-by: Xiangce Liu --- .../networkmanager_dhclient.rst | 3 + insights/parsers/networkmanager_dhclient.py | 107 ++++++++++++++++ .../tests/test_networkmanager_dhclient.py | 116 ++++++++++++++++++ 3 files changed, 226 insertions(+) create mode 100644 docs/shared_parsers_catalog/networkmanager_dhclient.rst create mode 100644 insights/parsers/networkmanager_dhclient.py create mode 100644 insights/parsers/tests/test_networkmanager_dhclient.py diff --git a/docs/shared_parsers_catalog/networkmanager_dhclient.rst b/docs/shared_parsers_catalog/networkmanager_dhclient.rst new file mode 100644 index 000000000..37e745b56 --- /dev/null +++ b/docs/shared_parsers_catalog/networkmanager_dhclient.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.networkmanager_dhclient + :members: + :show-inheritance: diff --git a/insights/parsers/networkmanager_dhclient.py b/insights/parsers/networkmanager_dhclient.py new file mode 100644 index 000000000..44d5612ab --- /dev/null +++ b/insights/parsers/networkmanager_dhclient.py @@ -0,0 +1,107 @@ +""" +NetworkManagerDhclient - file ``/etc/NetworkManager/dispatcher.d/*-dhclient`` +============================================================================= +""" + +import re + +from insights.core import Parser +from insights.core.plugins import parser +from insights.parsers import SkipException +from insights.specs import Specs + + +VULNERABLE_BLOCK_RHEL_6 = re.compile(re.escape(""" +eval "$( +declare | LC_ALL=C grep '^DHCP4_[A-Z_]*=' | while read opt; do + optname=${opt%%=*} + optname=${optname,,} + optname=new_${optname#dhcp4_} + optvalue=${opt#*=} + echo "$optname=$optvalue" +done +)" +""".strip())) + +VULNERABLE_BLOCK_RHEL_7 = re.compile(re.escape(""" +eval "$( +declare | LC_ALL=C grep '^DHCP4_[A-Z_]*=' | while read opt; do + optname=${opt%%=*} + optname=${optname,,} + optname=new_${optname#dhcp4_} + optvalue=${opt#*=} + echo "export $optname=$optvalue" +done +)" +""".strip())) + + +@parser(Specs.networkmanager_dispatcher_d) +class NetworkManagerDhclient(Parser): + """ + Class for parsing ``/etc/NetworkManager/dispatcher.d/*-dhclient`` file. + + Attributes: + has_vulnerable_block (bool): True, if the vulnerable block is present, False otherwise. + + Raises: + SkipException: When content is empty or cannot be parsed. + + Sample output of this command is:: + + #!/bin/bash + # run dhclient.d scripts in an emulated environment + + PATH=/bin:/usr/bin:/sbin + SAVEDIR=/var/lib/dhclient + ETCDIR=/etc/dhcp + interface=$1 + + eval "$( + declare | LC_ALL=C grep '^DHCP4_[A-Z_]*=' | while read opt; do + optname=${opt%%=*} + optname=${optname,,} + optname=new_${optname#dhcp4_} + optvalue=${opt#*=} + echo "export $optname=$optvalue" + done + )" + + [ -f /etc/sysconfig/network ] && . /etc/sysconfig/network + + [ -f /etc/sysconfig/network-scripts/ifcfg-$interface ] && \ + . /etc/sysconfig/network-scripts/ifcfg-$interface + + if [ -d $ETCDIR/dhclient.d ]; then + for f in $ETCDIR/dhclient.d/*.sh; do + if [ -x $f ]; then + subsystem="${f%.sh}" + subsystem="${subsystem##*/}" + . ${f} + if [ "$2" = "up" ]; then + "${subsystem}_config" + elif [ "$2" = "dhcp4-change" ]; then + if [ "$subsystem" = "chrony" -o "$subsystem" = "ntp" ]; then + "${subsystem}_config" + fi + elif [ "$2" = "down" ]; then + "${subsystem}_restore" + fi + fi + done + fi + + Examples: + >>> type(dhclient) + + >>> dhclient.has_vulnerable_block + True + """ + def parse_content(self, content): + if not content: + raise SkipException("No content.") + + result = "\n".join(content) + match_rhel_6 = VULNERABLE_BLOCK_RHEL_6.search(result) + match_rhel_7 = VULNERABLE_BLOCK_RHEL_7.search(result) + self.has_vulnerable_block = bool(match_rhel_7) or bool(match_rhel_6) diff --git a/insights/parsers/tests/test_networkmanager_dhclient.py b/insights/parsers/tests/test_networkmanager_dhclient.py new file mode 100644 index 000000000..dcf0d3125 --- /dev/null +++ b/insights/parsers/tests/test_networkmanager_dhclient.py @@ -0,0 +1,116 @@ +import doctest +import pytest + +from insights.parsers import networkmanager_dhclient, SkipException +from insights.parsers.networkmanager_dhclient import NetworkManagerDhclient +from insights.tests import context_wrap + + +DHCLIENT_RHEL_6 = "/etc/NetworkManager/dispatcher.d/10-dhclient" + +DHCLIENT_RHEL_7 = "/etc/NetworkManager/dispatcher.d/11-dhclient" + +NOT_VULNERABLE_RHEL_6 = """ +#!/bin/bash +# run dhclient.d scripts in an emulated environment + +PATH=/bin:/usr/bin:/sbin +SAVEDIR=/var/lib/dhclient +ETCDIR=/etc/dhcp +interface=$1 + +eval "$( +declare | LC_ALL=C grep '^DHCP4_[A-Z_]*=' | while read -r opt; do + optname=${opt%%=*} + optname=${optname,,} + optname=new_${optname#dhcp4_} + optvalue=${opt#*=} + echo "$optname=$optvalue" +done +)" + +[ -f /etc/sysconfig/network ] && . /etc/sysconfig/network + +[ -f /etc/sysconfig/network-scripts/ifcfg-$interface ] && \ + . /etc/sysconfig/network-scripts/ifcfg-$interface + +if [ -d $ETCDIR/dhclient.d ]; then + for f in $ETCDIR/dhclient.d/*.sh; do + if [ -x $f ]; then + subsystem="${f%.sh}" + subsystem="${subsystem##*/}" + . ${f} + if [ "$2" = "up" ]; then + "${subsystem}_config" + elif [ "$2" = "down" ]; then + "${subsystem}_restore" + fi + fi + done +fi +""".strip() + +VULNERABLE_RHEL_7 = """ +#!/bin/bash +# run dhclient.d scripts in an emulated environment + +PATH=/bin:/usr/bin:/sbin +SAVEDIR=/var/lib/dhclient +ETCDIR=/etc/dhcp +interface=$1 + +eval "$( +declare | LC_ALL=C grep '^DHCP4_[A-Z_]*=' | while read opt; do + optname=${opt%%=*} + optname=${optname,,} + optname=new_${optname#dhcp4_} + optvalue=${opt#*=} + echo "export $optname=$optvalue" +done +)" + +[ -f /etc/sysconfig/network ] && . /etc/sysconfig/network + +[ -f /etc/sysconfig/network-scripts/ifcfg-$interface ] && \ + . /etc/sysconfig/network-scripts/ifcfg-$interface + +if [ -d $ETCDIR/dhclient.d ]; then + for f in $ETCDIR/dhclient.d/*.sh; do + if [ -x $f ]; then + subsystem="${f%.sh}" + subsystem="${subsystem##*/}" + . ${f} + if [ "$2" = "up" ]; then + "${subsystem}_config" + elif [ "$2" = "dhcp4-change" ]; then + if [ "$subsystem" = "chrony" -o "$subsystem" = "ntp" ]; then + "${subsystem}_config" + fi + elif [ "$2" = "down" ]; then + "${subsystem}_restore" + fi + fi + done +fi +""".strip() + + +def test_no_data(): + with pytest.raises(SkipException): + NetworkManagerDhclient(context_wrap("")) + + +def test_dhclient(): + dhclient_1 = NetworkManagerDhclient(context_wrap(VULNERABLE_RHEL_7, path=DHCLIENT_RHEL_7)) + assert dhclient_1.has_vulnerable_block + + dhclient_2 = NetworkManagerDhclient(context_wrap(NOT_VULNERABLE_RHEL_6, path=DHCLIENT_RHEL_6)) + assert not dhclient_2.has_vulnerable_block + + +def test_doc_examples(): + env = { + "dhclient": NetworkManagerDhclient(context_wrap(VULNERABLE_RHEL_7, path=DHCLIENT_RHEL_7)) + } + failed, total = doctest.testmod(networkmanager_dhclient, globs=env) + assert failed == 0 From b792c0ce3a0a8661c4055965d275f976cac685c2 Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Mon, 6 Apr 2020 13:05:31 -0500 Subject: [PATCH 018/892] Limit version of Sphinx for doc build Signed-off-by: Bob Fahr --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 2be9671e1..122799ca4 100644 --- a/setup.py +++ b/setup.py @@ -66,7 +66,7 @@ def maybe_require(pkg): ]) docs = set([ - 'Sphinx', + 'Sphinx<=2.4.4', 'nbsphinx', 'sphinx_rtd_theme', 'ipython', From c3fdc5a756d1c8ead0e899893726666b7d90b38c Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Mon, 6 Apr 2020 14:35:27 -0500 Subject: [PATCH 019/892] Update test data for foreman logs (#2530) Signed-off-by: Bob Fahr --- insights/parsers/tests/test_foreman_log.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/insights/parsers/tests/test_foreman_log.py b/insights/parsers/tests/test_foreman_log.py index 8f1b86783..cd40b4d1e 100644 --- a/insights/parsers/tests/test_foreman_log.py +++ b/insights/parsers/tests/test_foreman_log.py @@ -130,11 +130,11 @@ CANDLEPIN_LOG = """ 2016-09-09 13:45:52,650 [req=bd5a4284-d280-4fc5-a3d5-fc976b7aa5cc, org=] INFO org.candlepin.common.filter.LoggingFilter - Request: verb=GET, uri=/candlepin/consumers/f7677b4b-c470-4626-86a4-2fdf2546af4b -2016-09-09 13:45:52,784 [req=bd5a4284-d280-4fc5-a3d5-fc976b7aa5cc, org=ING_Luxembourg_SA] INFO org.candlepin.common.filter.LoggingFilter - Response: status=200, content-type="application/json", time=134 +2016-09-09 13:45:52,784 [req=bd5a4284-d280-4fc5-a3d5-fc976b7aa5cc, org=example_org] INFO org.candlepin.common.filter.LoggingFilter - Response: status=200, content-type="application/json", time=134 2016-09-09 13:45:52,947 [req=909ca4c5-f24e-4212-8f23-cc754d06ac57, org=] INFO org.candlepin.common.filter.LoggingFilter - Request: verb=GET, uri=/candlepin/consumers/f7677b4b-c470-4626-86a4-2fdf2546af4b/content_overrides 2016-09-09 13:45:52,976 [req=909ca4c5-f24e-4212-8f23-cc754d06ac57, org=] INFO org.candlepin.common.filter.LoggingFilter - Response: status=200, content-type="application/json", time=29 2016-09-09 13:45:53,072 [req=49becd26-5dfe-4d2f-8667-470519230d88, org=] INFO org.candlepin.common.filter.LoggingFilter - Request: verb=GET, uri=/candlepin/consumers/f7677b4b-c470-4626-86a4-2fdf2546af4b/release -2016-09-09 13:45:53,115 [req=49becd26-5dfe-4d2f-8667-470519230d88, org=ING_Luxembourg_SA] INFO org.candlepin.common.filter.LoggingFilter - Response: status=200, content-type="application/json", time=43 +2016-09-09 13:45:53,115 [req=49becd26-5dfe-4d2f-8667-470519230d88, org=example_org] INFO org.candlepin.common.filter.LoggingFilter - Response: status=200, content-type="application/json", time=43 """.strip() From 3f7eb74275b198501e2508a0ae8ecdb51d0a3c60 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Mon, 6 Apr 2020 16:24:44 -0400 Subject: [PATCH 020/892] change default tags file to tags.yaml (#2520) Signed-off-by: Jeremy Crafts --- insights/client/utilities.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/insights/client/utilities.py b/insights/client/utilities.py index b25837dab..63ff77428 100644 --- a/insights/client/utilities.py +++ b/insights/client/utilities.py @@ -315,7 +315,7 @@ def systemd_notify(pid): logger.debug('systemd-notify returned %s', proc.returncode) -def get_tags(tags_file_path=os.path.join(constants.default_conf_dir, "tags.conf")): +def get_tags(tags_file_path=os.path.join(constants.default_conf_dir, "tags.yaml")): ''' Load tag data from the tags file. @@ -333,7 +333,7 @@ def get_tags(tags_file_path=os.path.join(constants.default_conf_dir, "tags.conf" return tags -def write_tags(tags, tags_file_path=os.path.join(constants.default_conf_dir, "tags.conf")): +def write_tags(tags, tags_file_path=os.path.join(constants.default_conf_dir, "tags.yaml")): """ Writes tags to tags_file_path From 7c8c7e99ae09f13409ee016729ece98efecb8e6a Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Tue, 7 Apr 2020 14:42:41 -0400 Subject: [PATCH 021/892] Fix rpm version logging (#2513) * use client constants instead of rpm command to get version Signed-off-by: Jeremy Crafts * refactor, fix unit tests Signed-off-by: Jeremy Crafts * flake Signed-off-by: Jeremy Crafts --- insights/client/utilities.py | 19 +++++++------ insights/tests/client/test_utilities.py | 36 +++++++++++++++++++++---- 2 files changed, 42 insertions(+), 13 deletions(-) diff --git a/insights/client/utilities.py b/insights/client/utilities.py index 63ff77428..9750eb692 100644 --- a/insights/client/utilities.py +++ b/insights/client/utilities.py @@ -23,6 +23,11 @@ from .constants import InsightsConstants as constants from .collection_rules import InsightsUploadConf +try: + from insights_client.constants import InsightsConstants as wrapper_constants +except ImportError: + wrapper_constants = None + logger = logging.getLogger(__name__) @@ -228,16 +233,14 @@ def get_version_info(): ''' Get the insights client and core versions for archival ''' - cmd = 'rpm -q --qf "%{VERSION}-%{RELEASE}" insights-client' + try: + client_version = wrapper_constants.version + except AttributeError: + # wrapper_constants is None or has no attribute "version" + client_version = None version_info = {} version_info['core_version'] = '%s-%s' % (package_info['VERSION'], package_info['RELEASE']) - rpm_proc = run_command_get_output(cmd) - if rpm_proc['status'] != 0: - # Unrecoverable error - logger.debug('Error occurred while running rpm -q. Details:\n%s' % rpm_proc['output']) - version_info['client_version'] = None - else: - version_info['client_version'] = rpm_proc['output'] + version_info['client_version'] = client_version return version_info diff --git a/insights/tests/client/test_utilities.py b/insights/tests/client/test_utilities.py index 5c860aece..17284ba4b 100644 --- a/insights/tests/client/test_utilities.py +++ b/insights/tests/client/test_utilities.py @@ -82,16 +82,42 @@ def test_run_command_get_output(): assert util.run_command_get_output(cmd) == {'status': 0, 'output': u'hello\n'} -@patch('insights.client.utilities.run_command_get_output') +@patch('insights.client.utilities.wrapper_constants') @patch.dict('insights.client.utilities.package_info', {'VERSION': '1', 'RELEASE': '1'}) -def test_get_version_info(run_command_get_output): - # package_info['VERSION'] = '1' - # package_info['RELEASE'] = '1' - run_command_get_output.return_value = {'output': 1, 'status': 0} +def test_get_version_info_OK(wrapper_constants): + ''' + insights_client constants are imported OK and version + is reported. Return version as defined + ''' + wrapper_constants.version = 1 version_info = util.get_version_info() assert version_info == {'core_version': '1-1', 'client_version': 1} +@patch('insights.client.utilities.wrapper_constants', new=None) +@patch.dict('insights.client.utilities.package_info', {'VERSION': '1', 'RELEASE': '1'}) +def test_get_version_info_no_module(): + ''' + insights_client constants cannot be imported, + constants object is None. Return None version. + ''' + version_info = util.get_version_info() + assert version_info == {'core_version': '1-1', 'client_version': None} + + +@patch('insights.client.utilities.wrapper_constants') +@patch.dict('insights.client.utilities.package_info', {'VERSION': '1', 'RELEASE': '1'}) +def test_get_version_info_no_version(wrapper_constants): + ''' + insights_client constants are imported OK but + constants object has no attribute "version." + Return None version + ''' + del wrapper_constants.version + version_info = util.get_version_info() + assert version_info == {'core_version': '1-1', 'client_version': None} + + def test_validate_remove_file(): tf = '/tmp/remove.cfg' with open(tf, 'wb') as f: From 41d418cc59ab00d0e24b469b119a3762552f29e6 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Wed, 8 Apr 2020 15:50:47 -0400 Subject: [PATCH 022/892] Split remove conf (#2481) * split yaml remove.conf into two new files Signed-off-by: Jeremy Crafts * add create_report function to InsightsUploadConf Signed-off-by: Jeremy Crafts * write report to archive, tweak parsing, skip remove.conf if empty Signed-off-by: Jeremy Crafts * move empty file warnings to debug level, fix report function Signed-off-by: Jeremy Crafts * fix line Signed-off-by: Jeremy Crafts * update --validate function Signed-off-by: Jeremy Crafts * start to update unit tests Signed-off-by: Jeremy Crafts * fix perms check, return None for empty conf instead of {} Signed-off-by: Jeremy Crafts * warning when running with invalid perms, error when using --validate Signed-off-by: Jeremy Crafts * tweak old rm_conf parse, unit tests, make report into json dump Signed-off-by: Jeremy Crafts * DRY the conf load functions Signed-off-by: Jeremy Crafts * add kb link to deprecation warning Signed-off-by: Jeremy Crafts * add message to use new formats to use YAML Signed-off-by: Jeremy Crafts * exit 0 on --validate and empty configuration change "remove file" in logging to "blacklist configuration" Signed-off-by: Jeremy Crafts * remove extra call to validate Signed-off-by: Jeremy Crafts * fix unit tests, skip mock_open failures in QE Signed-off-by: Jeremy Crafts * fix unit tests some more, improve error handling Signed-off-by: Jeremy Crafts * fix imports, definitions Signed-off-by: Jeremy Crafts * flake Signed-off-by: Jeremy Crafts * use .yaml file extension Signed-off-by: Jeremy Crafts * add components to file-redaction.conf Signed-off-by: Jeremy Crafts * change more *redaction.conf references to .yaml Signed-off-by: Jeremy Crafts --- insights/client/client.py | 3 +- insights/client/collection_rules.py | 315 +++++++--- insights/client/config.py | 8 + insights/client/data_collector.py | 10 +- insights/client/phase/v1.py | 14 +- .../collection_rules/test_get_rm_conf.py | 562 ++++++++++++------ insights/tests/client/test_collect.py | 26 +- insights/tests/client/test_insights_spec.py | 2 +- .../tests/client/test_skip_commands_files.py | 10 +- insights/tests/client/test_utilities.py | 20 +- 10 files changed, 655 insertions(+), 315 deletions(-) diff --git a/insights/client/client.py b/insights/client/client.py index e4cc3cc89..a762aa0c1 100644 --- a/insights/client/client.py +++ b/insights/client/client.py @@ -282,6 +282,7 @@ def collect(config, pconn): collection_rules = pc.get_conf_file() rm_conf = pc.get_rm_conf() + blacklist_report = pc.create_report() if rm_conf: logger.warn("WARNING: Excluding data from files") @@ -292,7 +293,7 @@ def collect(config, pconn): msg_name = determine_hostname(config.display_name) dc = DataCollector(config, archive, mountpoint=mp) logger.info('Starting to collect Insights data for %s', msg_name) - dc.run_collection(collection_rules, rm_conf, branch_info) + dc.run_collection(collection_rules, rm_conf, branch_info, blacklist_report) output = dc.done(collection_rules, rm_conf) return output diff --git a/insights/client/collection_rules.py b/insights/client/collection_rules.py index f8fca4469..7ab70a412 100644 --- a/insights/client/collection_rules.py +++ b/insights/client/collection_rules.py @@ -21,7 +21,77 @@ logger = logging.getLogger(__name__) NETWORK = constants.custom_network_log_level -expected_keys = ('commands', 'files', 'patterns', 'keywords') + +def correct_format(parsed_data, expected_keys, filename): + ''' + Ensure the parsed file matches the needed format + Returns True, on error + Returns False, None on success + ''' + # validate keys are what we expect + def is_list_of_strings(data): + ''' + Helper function for correct_format() + ''' + if data is None: + # nonetype, no data to parse. treat as empty list + return True + if not isinstance(data, list): + return False + for l in data: + if not isinstance(l, six.string_types): + return False + return True + + keys = parsed_data.keys() + invalid_keys = set(keys).difference(expected_keys) + if invalid_keys: + return True, ('Unknown section(s) in %s: ' % filename + ', '.join(invalid_keys) + + '\nValid sections are ' + ', '.join(expected_keys) + '.') + + # validate format (lists of strings) + for k in expected_keys: + if k in parsed_data: + if k == 'patterns' and isinstance(parsed_data['patterns'], dict): + if 'regex' not in parsed_data['patterns']: + return True, 'Patterns section contains an object but the "regex" key was not specified.' + if 'regex' in parsed_data['patterns'] and len(parsed_data['patterns']) > 1: + return True, 'Unknown keys in the patterns section. Only "regex" is valid.' + if not is_list_of_strings(parsed_data['patterns']['regex']): + return True, 'regex section under patterns must be a list of strings.' + continue + if not is_list_of_strings(parsed_data[k]): + return True, '%s section must be a list of strings.' % k + return False, None + + +def load_yaml(filename): + try: + with open(filename) as f: + loaded_yaml = yaml.safe_load(f) + if loaded_yaml is None: + logger.debug('%s is empty.', filename) + return {} + except (yaml.YAMLError, yaml.parser.ParserError) as e: + # can't parse yaml from conf + raise RuntimeError('ERROR: Cannot parse %s.\n' + 'If using any YAML tokens such as [] in an expression, ' + 'be sure to wrap the expression in quotation marks.\n\nError details:\n%s\n' % (filename, e)) + if not isinstance(loaded_yaml, dict): + # loaded data should be a dict with at least one key + raise RuntimeError('ERROR: Invalid YAML loaded.') + return loaded_yaml + + +def verify_permissions(f): + ''' + Verify 600 permissions on a file + ''' + mode = stat.S_IMODE(os.stat(f).st_mode) + if not mode == 0o600: + raise RuntimeError("Invalid permissions on %s. " + "Expected 0600 got %s" % (f, oct(mode))) + logger.debug("Correct file permissions on %s", f) class InsightsUploadConf(object): @@ -36,9 +106,21 @@ def __init__(self, config, conn=None): self.config = config self.fallback_file = constants.collection_fallback_file self.remove_file = config.remove_file + self.redaction_file = config.redaction_file + self.content_redaction_file = config.content_redaction_file self.collection_rules_file = constants.collection_rules_file self.collection_rules_url = self.config.collection_rules_url self.gpg = self.config.gpg + + # set rm_conf as a class attribute so we can observe it + # in create_report + self.rm_conf = None + + # attribute to set when using file-redaction.yaml instead of + # remove.conf, for reporting purposes. True by default + # since new format is favored. + self.using_new_format = True + if conn: if self.collection_rules_url is None: if config.legacy_upload: @@ -217,128 +299,183 @@ def get_rm_conf_old(self): Get excluded files config from remove_file. """ # Convert config object into dict - logger.debug('Trying to parse as INI file.') + self.using_new_format = False parsedconfig = ConfigParser.RawConfigParser() - + if not self.remove_file: + # no filename defined, return nothing + logger.debug('remove_file is undefined') + return None + if not os.path.isfile(self.remove_file): + logger.debug('%s not found. No data files, commands,' + ' or patterns will be ignored, and no keyword obfuscation will occur.', self.remove_file) + return None + try: + verify_permissions(self.remove_file) + except RuntimeError as e: + if self.config.validate: + # exit if permissions invalid and using --validate + raise RuntimeError('ERROR: %s' % e) + logger.warning('WARNING: %s', e) try: parsedconfig.read(self.remove_file) + sections = parsedconfig.sections() + + if not sections: + # file has no sections, skip it + logger.debug('Remove.conf exists but no parameters have been defined.') + return None + + if sections != ['remove']: + raise RuntimeError('ERROR: invalid section(s) in remove.conf. Only "remove" is valid.') + + expected_keys = ('commands', 'files', 'patterns', 'keywords') rm_conf = {} for item, value in parsedconfig.items('remove'): if item not in expected_keys: - raise RuntimeError('Unknown section in remove.conf: ' + item + - '\nValid sections are ' + ', '.join(expected_keys) + '.') + raise RuntimeError('ERROR: Unknown key in remove.conf: ' + item + + '\nValid keys are ' + ', '.join(expected_keys) + '.') if six.PY3: rm_conf[item] = value.strip().encode('utf-8').decode('unicode-escape').split(',') else: rm_conf[item] = value.strip().decode('string-escape').split(',') - return rm_conf + self.rm_conf = rm_conf except ConfigParser.Error as e: # can't parse config file at all logger.debug(e) - raise RuntimeError('ERROR: Cannot parse the remove.conf file as a YAML file ' - 'nor as an INI file. Please check the file formatting.\n' + logger.debug('To configure using YAML, please use file-redaction.yaml and file-content-redaction.yaml.') + raise RuntimeError('ERROR: Cannot parse the remove.conf file.\n' 'See %s for more information.' % self.config.logging_file) + logger.warning('WARNING: remove.conf is deprecated. Please use file-redaction.yaml and file-content-redaction.yaml. See https://access.redhat.com/articles/4511681 for details.') + return self.rm_conf - def get_rm_conf(self): + def load_redaction_file(self, fname): ''' - Load remove conf. If it's a YAML-formatted file, try to load - the "new" version of remove.conf + Load the YAML-style file-redaction.yaml + or file-content-redaction.yaml files ''' - def is_list_of_strings(data): - ''' - Helper function for correct_format() - ''' - if data is None: - # nonetype, no data to parse. treat as empty list - return True - if not isinstance(data, list): - return False - for l in data: - if not isinstance(l, six.string_types): - return False - return True - - def correct_format(parsed_data): - ''' - Ensure the parsed file matches the needed format - Returns True, on error - ''' - # validate keys are what we expect - keys = parsed_data.keys() - invalid_keys = set(keys).difference(expected_keys) - if invalid_keys: - return True, ('Unknown section(s) in remove.conf: ' + ', '.join(invalid_keys) + - '\nValid sections are ' + ', '.join(expected_keys) + '.') - - # validate format (lists of strings) - for k in expected_keys: - if k in parsed_data: - if k == 'patterns' and isinstance(parsed_data['patterns'], dict): - if 'regex' not in parsed_data['patterns']: - return True, 'Patterns section contains an object but the "regex" key was not specified.' - if 'regex' in parsed_data['patterns'] and len(parsed_data['patterns']) > 1: - return True, 'Unknown keys in the patterns section. Only "regex" is valid.' - if not is_list_of_strings(parsed_data['patterns']['regex']): - return True, 'regex section under patterns must be a list of strings.' - continue - if not is_list_of_strings(parsed_data[k]): - return True, '%s section must be a list of strings.' % k - return False, None - - if not os.path.isfile(self.remove_file): - logger.debug('No remove.conf defined. No files/commands will be ignored.') + if fname not in (self.redaction_file, self.content_redaction_file): + # invalid function use, should never get here in a production situation + return None + if not fname: + # no filename defined, return nothing + logger.debug('redaction_file or content_redaction_file is undefined') + return None + if not fname or not os.path.isfile(fname): + if fname == self.redaction_file: + logger.debug('%s not found. No files or commands will be skipped.', self.redaction_file) + elif fname == self.content_redaction_file: + logger.debug('%s not found. ' + 'No patterns will be skipped and no keyword obfuscation will occur.', self.content_redaction_file) return None try: - with open(self.remove_file) as f: - rm_conf = yaml.safe_load(f) - if rm_conf is None: - logger.warn('WARNING: Remove file %s is empty.', self.remove_file) - return {} - except (yaml.YAMLError, yaml.parser.ParserError) as e: - # can't parse yaml from conf, try old style - logger.debug('ERROR: Cannot parse remove.conf as a YAML file.\n' - 'If using any YAML tokens such as [] in an expression, ' - 'be sure to wrap the expression in quotation marks.\n\nError details:\n%s\n', e) - return self.get_rm_conf_old() - if not isinstance(rm_conf, dict): - # loaded data should be a dict with at least one key (commands, files, patterns, keywords) - logger.debug('ERROR: Invalid YAML loaded.') - return self.get_rm_conf_old() - err, msg = correct_format(rm_conf) + verify_permissions(fname) + except RuntimeError as e: + if self.config.validate: + # exit if permissions invalid and using --validate + raise RuntimeError('ERROR: %s' % e) + logger.warning('WARNING: %s', e) + loaded = load_yaml(fname) + if fname == self.redaction_file: + err, msg = correct_format(loaded, ('commands', 'files', 'components'), fname) + elif fname == self.content_redaction_file: + err, msg = correct_format(loaded, ('patterns', 'keywords'), fname) if err: # YAML is correct but doesn't match the format we need raise RuntimeError('ERROR: ' + msg) + return loaded + + def get_rm_conf(self): + ''' + Try to load the the "new" version of + remove.conf (file-redaction.yaml and file-redaction.yaml) + ''' + rm_conf = {} + redact_conf = self.load_redaction_file(self.redaction_file) + content_redact_conf = self.load_redaction_file(self.content_redaction_file) + + if redact_conf: + rm_conf.update(redact_conf) + if content_redact_conf: + rm_conf.update(content_redact_conf) + + if not redact_conf and not content_redact_conf: + # no file-redaction.yaml or file-content-redaction.yaml defined, + # try to use remove.conf + return self.get_rm_conf_old() + # remove Nones, empty strings, and empty lists filtered_rm_conf = dict((k, v) for k, v in rm_conf.items() if v) + self.rm_conf = filtered_rm_conf return filtered_rm_conf def validate(self): ''' Validate remove.conf ''' - if not os.path.isfile(self.remove_file): - logger.warn("WARNING: Remove file does not exist") - return False - # Make sure permissions are 600 - mode = stat.S_IMODE(os.stat(self.remove_file).st_mode) - if not mode == 0o600: - logger.error("WARNING: Invalid remove file permissions. " - "Expected 0600 got %s" % oct(mode)) - return False - else: - logger.debug("Correct file permissions") success = self.get_rm_conf() - if success is None or success is False: - logger.error('Could not parse remove.conf') - return False + if not success: + logger.info('No contents in the blacklist configuration to validate.') + return None # Using print here as this could contain sensitive information - if self.config.verbose or self.config.validate: - print('Remove file parsed contents:') - print(success) - logger.info('Parsed successfully.') + print('Blacklist configuration parsed contents:') + print(success) + logger.info('Parsed successfully.') return True + def create_report(self): + def length(lst): + ''' + Because of how the INI remove.conf is parsed, + an empty value in the conf will produce + the value [''] when parsed. Do not include + these in the report + ''' + if len(lst) == 1 and lst[0] == '': + return 0 + return len(lst) + + num_commands = 0 + num_files = 0 + num_components = 0 + num_patterns = 0 + num_keywords = 0 + using_regex = False + + if self.rm_conf: + for key in self.rm_conf: + if key == 'commands': + num_commands = length(self.rm_conf['commands']) + if key == 'files': + num_files = length(self.rm_conf['files']) + if key == 'components': + num_components = length(self.rm_conf['components']) + if key == 'patterns': + if isinstance(self.rm_conf['patterns'], dict): + num_patterns = length(self.rm_conf['patterns']['regex']) + using_regex = True + else: + num_patterns = length(self.rm_conf['patterns']) + if key == 'keywords': + num_keywords = length(self.rm_conf['keywords']) + + return { + 'obfuscate': self.config.obfuscate, + 'obfuscate_hostname': self.config.obfuscate_hostname, + 'commands': num_commands, + 'files': num_files, + 'components': num_components, + 'patterns': num_patterns, + 'keywords': num_keywords, + 'using_new_format': self.using_new_format, + 'using_patterns_regex': using_regex + } + if __name__ == '__main__': from .config import InsightsConfig - print(InsightsUploadConf(InsightsConfig().load_all())) + config = InsightsConfig().load_all() + uploadconf = InsightsUploadConf(config) + uploadconf.validate() + report = uploadconf.create_report() + + print(report) diff --git a/insights/client/config.py b/insights/client/config.py index e0a96f395..49bb1b7aa 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -242,6 +242,14 @@ # non-CLI 'default': os.path.join(constants.default_conf_dir, 'remove.conf') }, + 'redaction_file': { + # non-CLI + 'default': os.path.join(constants.default_conf_dir, 'file-redaction.yaml') + }, + 'content_redaction_file': { + # non-CLI + 'default': os.path.join(constants.default_conf_dir, 'file-content-redaction.yaml') + }, 'reregister': { 'default': False, 'opt': ['--force-reregister'], diff --git a/insights/client/data_collector.py b/insights/client/data_collector.py index f0a57c4fb..fcf77a0ad 100644 --- a/insights/client/data_collector.py +++ b/insights/client/data_collector.py @@ -85,6 +85,11 @@ def f(k, v): t = list(chain.from_iterable(t)) self.archive.add_metadata_to_archive(json.dumps(t), '/tags.json') + def _write_blacklist_report(self, blacklist_report): + logger.debug("Writing blacklist report to archive...") + self.archive.add_metadata_to_archive( + json.dumps(blacklist_report), '/blacklist_report') + def _run_pre_command(self, pre_cmd): ''' Run a pre command to get external args for a command @@ -182,7 +187,7 @@ def _parse_command_spec(self, spec, precmds): else: return [spec] - def run_collection(self, conf, rm_conf, branch_info): + def run_collection(self, conf, rm_conf, branch_info, blacklist_report): ''' Run specs and collect all the data ''' @@ -197,7 +202,7 @@ def run_collection(self, conf, rm_conf, branch_info): # handle the None or empty case of the sub-object if 'regex' in exclude and not exclude['regex']: raise LookupError - logger.warn("WARNING: Skipping patterns found in remove.conf") + logger.warn("WARNING: Skipping patterns defined in blacklist configuration") except LookupError: logger.debug('Patterns section of remove.conf is empty.') @@ -247,6 +252,7 @@ def run_collection(self, conf, rm_conf, branch_info): self._write_display_name() self._write_version_info() self._write_tags() + self._write_blacklist_report(blacklist_report) logger.debug('Metadata collection finished.') def done(self, conf, rm_conf): diff --git a/insights/client/phase/v1.py b/insights/client/phase/v1.py index 4cc7b8676..79dccb901 100644 --- a/insights/client/phase/v1.py +++ b/insights/client/phase/v1.py @@ -64,22 +64,12 @@ def pre_update(client, config): # validate the remove file if config.validate: try: - if validate_remove_file(config): - sys.exit(constants.sig_kill_ok) - else: - sys.exit(constants.sig_kill_bad) + validate_remove_file(config) + sys.exit(constants.sig_kill_ok) except RuntimeError as e: logger.error(e) sys.exit(constants.sig_kill_bad) - if os.path.isfile(config.remove_file): - if os.stat(config.remove_file).st_size != 0: - try: - validate_remove_file(config) - except RuntimeError as e: - logger.error(e) - sys.exit(constants.sig_kill_bad) - # handle cron stuff if config.enable_schedule: # enable automatic scheduling diff --git a/insights/tests/client/collection_rules/test_get_rm_conf.py b/insights/tests/client/collection_rules/test_get_rm_conf.py index 98ee7e94e..4041a51ea 100644 --- a/insights/tests/client/collection_rules/test_get_rm_conf.py +++ b/insights/tests/client/collection_rules/test_get_rm_conf.py @@ -1,47 +1,19 @@ # -*- coding: UTF-8 -*- -import os -import json import six import mock import pytest from .helpers import insights_upload_conf -from mock.mock import patch -from insights.client.collection_rules import InsightsUploadConf -from insights.client.config import InsightsConfig +from mock.mock import patch, Mock +from insights.client.collection_rules import correct_format, load_yaml, verify_permissions conf_remove_file = '/tmp/remove.conf' +conf_file_redaction_file = '/tmp/file-redaction.yaml' +conf_file_content_redaction_file = '/tmp/file-content-redaction.yaml' removed_files = ["/etc/some_file", "/tmp/another_file"] -def teardown_function(func): - if func is test_raw_config_parser: - if os.path.isfile(conf_remove_file): - os.remove(conf_remove_file) - - -def patch_isfile(isfile): - """ - Makes isfile return the passed result. - """ - def decorator(old_function): - patcher = patch("insights.client.collection_rules.os.path.isfile", return_value=isfile) - return patcher(old_function) - return decorator - - -def patch_raw_config_parser(items): - """ - Mocks RawConfigParser so it returns the passed items. - """ - def decorator(old_function): - patcher = patch("insights.client.collection_rules.ConfigParser.RawConfigParser", - **{"return_value.items.return_value": items}) - return patcher(old_function) - return decorator - - def patch_open(filedata): if six.PY3: open_name = 'builtins.open' @@ -51,248 +23,452 @@ def patch_open(filedata): return patch(open_name, mock.mock_open(read_data=filedata), create=True) -@patch_raw_config_parser([]) -@patch_isfile(False) -def test_no_file(isfile, raw_config_parser): - upload_conf = insights_upload_conf(remove_file=conf_remove_file) - result = upload_conf.get_rm_conf() - - isfile.assert_called_once_with(conf_remove_file) +# Tests for the correct_format function +def test_correct_format_ok_validtypes(): + ''' + Verify that valid config is allowed when + proper keys and lists of strings are specified + ''' + # files and commands (file-redaction.yaml) + parsed_data = {'commands': ['/bin/test', '/bin/test2'], 'files': ['/var/lib/aaa', '/var/lib/nnn']} + expected_keys = ('commands', 'files') + err, msg = correct_format(parsed_data, expected_keys, conf_file_redaction_file) + assert not err + assert msg is None + + # patterns w. list of strings (file-content-redaction.yaml) + parsed_data = {'patterns': ['abcd', 'bcdef'], 'keywords': ['example', 'example2']} + expected_keys = ('patterns', 'keywords') + err, msg = correct_format(parsed_data, expected_keys, conf_file_content_redaction_file) + assert not err + assert msg is None + + # patterns w. regex object (file-content-redaction.yaml) + parsed_data = {'patterns': {'regex': ['abcd', 'bcdef']}, 'keywords': ['example', 'example2']} + expected_keys = ('patterns', 'keywords') + err, msg = correct_format(parsed_data, expected_keys, conf_file_content_redaction_file) + assert not err + assert msg is None + + +def test_config_verification_ok_emptyvalues(): + ''' + Verify that valid config is allowed when + proper keys and empty (None) values are specified + ''' + parsed_data = {'patterns': None, 'keywords': ['abc', 'def']} + expected_keys = ('patterns', 'keywords') + err, msg = correct_format(parsed_data, expected_keys, conf_file_content_redaction_file) + assert not err + assert msg is None - # no file, no call to open - with patch_open('') as mock_open: - mock_open.assert_not_called() - assert result is None +def test_config_verification_bad_invalidkeys(): + ''' + Verify that a config with invalid keys is not allowed + ''' + parsed_data = {'commands': None, 'files': None, 'somekey': None} + expected_keys = ('commands', 'files') + err, msg = correct_format(parsed_data, expected_keys, conf_file_redaction_file) + assert err + assert 'Unknown section' in msg -@patch('insights.client.collection_rules.InsightsUploadConf.get_rm_conf_old') -@patch_isfile(True) -def test_return(isfile, get_rm_conf_old): +def test_correct_format_bad_keys_in_wrong_file(): ''' - Test that loading YAML from a file will return a dict + Verify that an otherwise valid key is not + specified in the wrong file (i.e. patterns + in file-redaction.yaml) ''' - filedata = '---\ncommands:\n- /bin/ls\n- ethtool_i' - with patch_open(filedata): - upload_conf = insights_upload_conf(remove_file=conf_remove_file) - result = upload_conf.get_rm_conf() - assert result == {'commands': ['/bin/ls', 'ethtool_i']} - get_rm_conf_old.assert_not_called() + parsed_data = {'files': ['/etc/example'], 'patterns': ['abc', 'def']} + expected_keys = ('files', 'commands') + err, msg = correct_format(parsed_data, expected_keys, conf_file_redaction_file) + assert err + assert 'Unknown section(s) in ' + conf_file_redaction_file in msg -@patch('insights.client.collection_rules.InsightsUploadConf.get_rm_conf_old') -@patch_isfile(True) -def test_fallback_to_old(isfile, get_rm_conf_old): +def test_correct_format_bad_invalidtypes(): ''' - Test that the YAML function falls back to classic INI - if the file cannot be parsed as YAML + Verify that a config with valid keys, + but invalid data types, is not allowed ''' - filedata = 'ncommands\n /badwain/ls\n- ethtool_i' - with patch_open(filedata): - upload_conf = insights_upload_conf(remove_file=conf_remove_file) - upload_conf.get_rm_conf() - get_rm_conf_old.assert_called_once() + parsed_data = {'commands': 'somestring', 'files': ['/var/lib/aaa', '/var/lib/bbb']} + expected_keys = ('commands', 'files') + err, msg = correct_format(parsed_data, expected_keys, conf_file_redaction_file) + assert err + assert 'must be a list of strings' in msg -@pytest.mark.skipif(mock.version_info < (3, 0, 5), reason="Old mock_open has no iteration control") -@patch_isfile(True) -def test_fallback_ini_data(isfile): +def test_correct_format_bad_patterns_keysnoregex(): ''' - Test that the YAML function falls back to classic INI - if the file cannot be parsed as YAML, and the data is - parsed as INI + Verify that a config with patterns, if a dict + with a single key, only contains the key + "regex" ''' - filedata = '[remove]\ncommands=/bin/ls,ethtool_i' - with patch_open(filedata): - upload_conf = insights_upload_conf(remove_file=conf_remove_file) - result = upload_conf.get_rm_conf() - assert result == {'commands': ['/bin/ls', 'ethtool_i']} + parsed_data = {'patterns': {'wrongkey': ['a(bc)', 'nextregex']}} + expected_keys = ('patterns', 'keywords') + err, msg = correct_format(parsed_data, expected_keys, conf_file_content_redaction_file) + assert err + assert 'contains an object but the "regex" key was not specified' in msg -@pytest.mark.skipif(mock.version_info < (3, 0, 5), reason="Old mock_open has no iteration control") -@patch_isfile(True) -def test_fallback_bad_data(isfile): +def test_correct_format_bad_patterns_invalidkey(): ''' - Test that the YAML function falls back to classic INI - if the file cannot be parsed as YAML, and the data isn't - INI either so it's thrown out + Verify that a config with patterns, if a dict + containing the key "regex", only contains the key "regex" ''' - filedata = 'ncommands\n /badwain/ls\n- ethtool_i' - with patch_open(filedata): - upload_conf = insights_upload_conf(remove_file=conf_remove_file) - with pytest.raises(RuntimeError) as e: - upload_conf.get_rm_conf() - assert 'YAML file nor as an INI file' in str(e.value) + parsed_data = {'patterns': {'regex': [], 'wrongkey': ['a(bc)', 'nextregex']}} + expected_keys = ('patterns', 'keywords') + err, msg = correct_format(parsed_data, expected_keys, conf_file_content_redaction_file) + assert err + assert 'Only "regex" is valid' in msg -@patch_isfile(True) -def test_load_string_patterns(isfile): +def test_correct_format_bad_patterns_regexinvalidtype(): ''' - Test that the patterns section is loaded as a list of strings. + Verify that if a regex key exists in the + patterns section, that the value is a list + of strings ''' - filedata = '---\npatterns:\n- abcd\n- bcdef' - with patch_open(filedata): - upload_conf = insights_upload_conf(remove_file=conf_remove_file) - result = upload_conf.get_rm_conf() - assert 'patterns' in result - assert isinstance(result['patterns'], list) + parsed_data = {'patterns': {'regex': 'a(b)'}} + expected_keys = ('patterns', 'keywords') + err, msg = correct_format(parsed_data, expected_keys, conf_file_content_redaction_file) + assert err + assert 'regex section under patterns must be a list of strings' in msg -@patch_isfile(True) -def test_load_string_regex(isfile): +def test_load_yaml_ok(): ''' - Test that the patterns section is loaded as a dict with - key 'regex' and the value is a list of strings + Verify that proper YAML is parsed correctly ''' - filedata = '---\npatterns:\n regex:\n - abcd\n - bcdef' - with patch_open(filedata): - upload_conf = insights_upload_conf(remove_file=conf_remove_file) - result = upload_conf.get_rm_conf() - assert 'patterns' in result - assert isinstance(result['patterns'], dict) - assert 'regex' in result['patterns'] - assert isinstance(result['patterns']['regex'], list) + yaml_data = '---\ncommands:\n- /bin/abc/def\n- /bin/ghi/jkl\nfiles:\n- /etc/abc/def.conf\n' + with patch_open(yaml_data): + result = load_yaml('test') + assert result -@patch_raw_config_parser([("files", ",".join(removed_files))]) -@patch_isfile(True) -def test_return_old(isfile, raw_config_parser): - upload_conf = insights_upload_conf(remove_file=conf_remove_file) - result = upload_conf.get_rm_conf_old() +def test_load_yaml_error(): + ''' + Verify that improper YAML raises an error + ''' + yaml_data = '---\ncommands: files:\n- /etc/abc/def.conf\n' + with patch_open(yaml_data): + with pytest.raises(RuntimeError) as e: + result = load_yaml('test') + assert not result + assert 'Cannot parse' in str(e.value) - raw_config_parser.assert_called_once_with() - raw_config_parser.return_value.read.assert_called_with(conf_remove_file) - raw_config_parser.return_value.items.assert_called_with('remove') - assert result == {"files": removed_files} +def test_load_yaml_inline_tokens_in_regex_quotes(): + ''' + Verify that, if specifying a regex containing tokens parseable + by YAML (such as []), when wrapped in quotation marks, + the regex is parsed properly. + ''' + yaml_data = '---\npatterns:\n regex:\n - \"[[:digit:]]*\"\n' + with patch_open(yaml_data): + result = load_yaml('test') + assert result -def test_raw_config_parser(): +def test_load_yaml_inline_tokens_in_regex_noquotes(): ''' - Ensure that get_rm_conf and json.loads (used to load uploader.json) return the same filename + Verify that, if specifying a regex containing tokens parseable + by YAML (such as []), when not wrapped in quotation marks, + an error is raised. ''' - raw_filename = '/etc/yum/pluginconf.d/()*\\\\w+\\\\.conf' - uploader_snip = json.loads('{"pattern": [], "symbolic_name": "pluginconf_d", "file": "' + raw_filename + '"}') - with open(conf_remove_file, 'w') as rm_conf: - rm_conf.write('[remove]\nfiles=' + raw_filename) - coll = InsightsUploadConf(InsightsConfig(remove_file=conf_remove_file)) - items = coll.get_rm_conf() - assert items['files'][0] == uploader_snip['file'] + yaml_data = '---\npatterns:\n regex:\n - [[:digit:]]*\n' + with patch_open(yaml_data): + with pytest.raises(RuntimeError) as e: + result = load_yaml('test') + assert not result + assert 'Cannot parse' in str(e.value) -@patch_isfile(True) -def test_config_verification_ok_validtypes(isfile): +@patch('insights.client.collection_rules.stat.S_IMODE', return_value=0o600) +@patch('insights.client.collection_rules.os.stat', return_value=Mock(st_mode=1)) +def test_verify_permissions_ok(os_stat, s_imode): ''' - Verify that valid config is allowed when - proper keys and lists of strings are specified + Verify that file permissions 600 does not raise an error ''' - # patterns w. list of strings - filedata = '---\ncommands:\n- /bin/test\n- /bin/test2\nfiles:\n- /var/lib/aaa\n- /var/lib/nnn\npatterns:\n- abcd\n- bcdef\nkeywords:\n- example\n- example2' - with patch_open(filedata): - upload_conf = insights_upload_conf(remove_file=conf_remove_file) - result = upload_conf.get_rm_conf() - assert result - - # patterns w. regex object - filedata = '---\ncommands:\n- /bin/test\n- /bin/test2\nfiles:\n- /var/lib/aaa\n- /var/lib/nnn\npatterns:\n regex:\n - abcd\n - bcdef\nkeywords:\n- example\n- example2' - with patch_open(filedata): - upload_conf = insights_upload_conf(remove_file=conf_remove_file) - result = upload_conf.get_rm_conf() - assert result + verify_permissions('test') -@patch_isfile(True) -def test_config_verification_ok_emptyvalues(isfile): +@patch('insights.client.collection_rules.stat.S_IMODE', return_value=0o644) +@patch('insights.client.collection_rules.os.stat', return_value=Mock(st_mode=1)) +def test_verify_permissions_bad(os_stat, s_imode): ''' - Verify that valid config is allowed when - proper keys and empty (None) values are specified + Verify that file permissions 600 does not raise an error ''' - filedata = '---\ncommands:\n- some_symbolic_name\nfiles:\npatterns:\nkeywords:\n' - with patch_open(filedata): - upload_conf = insights_upload_conf(remove_file=conf_remove_file) - result = upload_conf.get_rm_conf() - assert result + with pytest.raises(RuntimeError) as e: + verify_permissions('test') + assert 'Invalid permissions' in str(e.value) + +# @patch_isfile(True) +# def test_config_filtering(isfile): +# ''' +# Verify that keys with None values +# do not appear in the final conf +# ''' +# filedata = '---\npatterns:\nfiles:\n- /var/lib/aaa' +# with patch_open(filedata): +# upload_conf = insights_upload_conf(remove_file=conf_remove_file) +# result = upload_conf.get_rm_conf() +# assert 'patterns' not in result and 'files' in result -@patch_isfile(True) -def test_config_verification_bad_invalidkeys(isfile): +def patch_isfile(isfile): + """ + Makes isfile return the passed result. + """ + def decorator(old_function): + patcher = patch("insights.client.collection_rules.os.path.isfile", return_value=isfile) + return patcher(old_function) + return decorator + + +def patch_raw_config_parser(items): + """ + Mocks RawConfigParser so it returns the passed items. + """ + def decorator(old_function): + patcher = patch("insights.client.collection_rules.ConfigParser.RawConfigParser", + **{"return_value.items.return_value": items}) + return patcher(old_function) + return decorator + + +@patch_isfile(False) +def test_rm_conf_old_nofile(isfile): ''' - Verify that a config with invalid keys is not allowed + Ensure an empty blacklist is generated when the file + remove.conf does not exist. ''' - filedata = '---\ncommands:\nfiles:\nsomekey:\n' - with patch_open(filedata): - upload_conf = insights_upload_conf(remove_file=conf_remove_file) - with pytest.raises(RuntimeError) as e: - upload_conf.get_rm_conf() - assert 'Unknown section' in str(e.value) + upload_conf = insights_upload_conf(remove_file=conf_remove_file) + result = upload_conf.get_rm_conf_old() + + isfile.assert_called_once_with(conf_remove_file) + + # no file, no call to open + with patch_open('') as mock_open: + mock_open.assert_not_called() + + assert result is None +@pytest.mark.skipif(mock.version_info < (3, 0, 5), reason="Old mock_open has no iteration control") +@patch('insights.client.collection_rules.verify_permissions', return_value=True) @patch_isfile(True) -def test_config_verification_bad_invalidtypes(isfile): +def test_rm_conf_old_emptyfile(isfile, verify): ''' - Verify that a config with valid keys, - but invalid data types, is not allowed + Ensure an empty blacklist is generated when the old + remove.conf exists, but is empty. ''' - filedata = '---\ncommands: somestring\nfiles:\n- /var/lib/aaa\n- /var/lib/bbb\n' + filedata = '' with patch_open(filedata): upload_conf = insights_upload_conf(remove_file=conf_remove_file) - with pytest.raises(RuntimeError) as e: - upload_conf.get_rm_conf() - assert 'must be a list of strings' in str(e.value) + result = upload_conf.get_rm_conf_old() + assert result is None +@pytest.mark.skipif(mock.version_info < (3, 0, 5), reason="Old mock_open has no iteration control") +@patch('insights.client.collection_rules.verify_permissions', return_value=True) @patch_isfile(True) -def test_config_verification_bad_patterns_keysnoregex(isfile): +def test_rm_conf_old_load_bad_invalidsection(isfile, verify): ''' - Verify that a config with patterns, if a dict - with a single key, only contains the key - "regex" + Ensure an error is raised when an invalid + section is defined in the old remove.conf ''' - filedata = '---\npatterns:\n wrongkey:\n - a(bc)\n - nextregex' + filedata = '[wrong]\ncommands=/bin/abc' with patch_open(filedata): upload_conf = insights_upload_conf(remove_file=conf_remove_file) with pytest.raises(RuntimeError) as e: - upload_conf.get_rm_conf() - assert 'contains an object but the "regex" key was not specified' in str(e.value) + upload_conf.get_rm_conf_old() + assert 'ERROR: invalid section(s)' in str(e.value) +@pytest.mark.skipif(mock.version_info < (3, 0, 5), reason="Old mock_open has no iteration control") +@patch('insights.client.collection_rules.verify_permissions', return_value=True) @patch_isfile(True) -def test_config_verification_bad_patterns_invalidkey(isfile): +def test_rm_conf_old_load_bad_keysnosection(isfile, verify): ''' - Verify that a config with patterns, if a dict - containing the key "regex", only contains the key "regex" + Ensure an error is raised when keys are defined without + a section in the old remove.conf ''' - filedata = '---\npatterns:\n regex:\n wrongkey:\n - a(bc)\n - nextregex' + filedata = 'commands=/bin/abc\nfiles=/etc/def' with patch_open(filedata): upload_conf = insights_upload_conf(remove_file=conf_remove_file) with pytest.raises(RuntimeError) as e: - upload_conf.get_rm_conf() - assert 'Only "regex" is valid' in str(e.value) + upload_conf.get_rm_conf_old() + assert 'ERROR: Cannot parse' in str(e.value) +@pytest.mark.skipif(mock.version_info < (3, 0, 5), reason="Old mock_open has no iteration control") +@patch('insights.client.collection_rules.verify_permissions', return_value=True) @patch_isfile(True) -def test_config_verification_bad_patterns_regexinvalidtype(isfile): +def test_rm_conf_old_load_bad_invalidkey(isfile, verify): ''' - Verify that if a regex key exists in the - patterns section, that the value is a list - of strings + Ensure an error is raised when an invalid key is defined ''' - filedata = '---\npatterns:\n regex: a(b)' + filedata = '[remove]\ncommands=/bin/abc\nbooradley=/etc/def' with patch_open(filedata): upload_conf = insights_upload_conf(remove_file=conf_remove_file) with pytest.raises(RuntimeError) as e: - upload_conf.get_rm_conf() - assert 'regex section under patterns must be a list of strings' in str(e.value) + upload_conf.get_rm_conf_old() + assert 'ERROR: Unknown key' in str(e.value) +@pytest.mark.skipif(mock.version_info < (3, 0, 5), reason="Old mock_open has no iteration control") +@patch('insights.client.collection_rules.verify_permissions', return_value=True) @patch_isfile(True) -def test_config_filtering(isfile): +def test_rm_conf_old_load_ok(isfile, verify): ''' - Verify that keys with None values - do not appear in the final conf + Ensure that the old rm_conf load works + with valid data. ''' - filedata = '---\npatterns:\nfiles:\n- /var/lib/aaa' + filedata = '[remove]\ncommands=/bin/ls,ethtool_i\nfiles=/etc/test\npatterns=abc123,def456\nkeywords=key1,key2,key3' with patch_open(filedata): upload_conf = insights_upload_conf(remove_file=conf_remove_file) - result = upload_conf.get_rm_conf() - assert 'patterns' not in result and 'files' in result + result = upload_conf.get_rm_conf_old() + assert result == {'commands': ['/bin/ls', 'ethtool_i'], 'files': ['/etc/test'], 'patterns': ['abc123', 'def456'], 'keywords': ['key1', 'key2', 'key3']} + + +# @patch('insights.client.collection_rules.verify_permissions', return_value=True) +# @patch_isfile(True) +# def test_rm_conf_old_load_bad(isfile, verify): +# ''' +# Ensure that the old rm_conf load rejects +# invalid data. +# ''' +# filedata = '[remove]\ncommands=/bin/ls,ethtool_i\nfiles=/etc/test\npatterns=abc123,def456\nkeywords=key1,key2,key3' +# with patch_open(filedata): +# upload_conf = insights_upload_conf(remove_file=conf_remove_file) +# result = upload_conf.get_rm_conf_old() +# assert result == {'commands': ['/bin/ls', 'ethtool_i'], 'files': ['/etc/test'], 'patterns': ['abc123', 'def456'], 'keywords': ['key1', 'key2', 'key3']} + + +# @patch('insights.client.collection_rules.InsightsUploadConf.get_rm_conf_old') +# @patch_isfile(True) +# def test_return(isfile, get_rm_conf_old): +# ''' +# Test that loading YAML from a file will return a dict +# ''' +# filedata = '---\ncommands:\n- /bin/ls\n- ethtool_i' +# with patch_open(filedata): +# upload_conf = insights_upload_conf(remove_file=conf_remove_file) +# result = upload_conf.get_rm_conf() +# assert result == {'commands': ['/bin/ls', 'ethtool_i']} +# get_rm_conf_old.assert_not_called() + + +# @patch('insights.client.collection_rules.InsightsUploadConf.get_rm_conf_old') +# @patch_isfile(True) +# def test_fallback_to_old(isfile, get_rm_conf_old): +# ''' +# Test that the YAML function falls back to classic INI +# if the file cannot be parsed as YAML +# ''' +# filedata = 'ncommands\n /badwain/ls\n- ethtool_i' +# with patch_open(filedata): +# upload_conf = insights_upload_conf(remove_file=conf_remove_file) +# upload_conf.get_rm_conf() +# get_rm_conf_old.assert_called_once() + + +# @pytest.mark.skipif(mock.version_info < (3, 0, 5), reason="Old mock_open has no iteration control") +# @patch_isfile(True) +# def test_fallback_ini_data(isfile): +# ''' +# Test that the YAML function falls back to classic INI +# if the file cannot be parsed as YAML, and the data is +# parsed as INI +# ''' +# filedata = '[remove]\ncommands=/bin/ls,ethtool_i' +# with patch_open(filedata): +# upload_conf = insights_upload_conf(remove_file=conf_remove_file) +# result = upload_conf.get_rm_conf() +# assert result == {'commands': ['/bin/ls', 'ethtool_i']} + + +# @pytest.mark.skipif(mock.version_info < (3, 0, 5), reason="Old mock_open has no iteration control") +# @patch_isfile(True) +# def test_fallback_bad_data(isfile): +# ''' +# Test that the YAML function falls back to classic INI +# if the file cannot be parsed as YAML, and the data isn't +# INI either so it's thrown out +# ''' +# filedata = 'ncommands\n /badwain/ls\n- ethtool_i' +# with patch_open(filedata): +# upload_conf = insights_upload_conf(remove_file=conf_remove_file) +# with pytest.raises(RuntimeError) as e: +# upload_conf.get_rm_conf() +# assert 'YAML file nor as an INI file' in str(e.value) + + +# @patch_isfile(True) +# def test_load_string_patterns(isfile): +# ''' +# Test that the patterns section is loaded as a list of strings. +# ''' +# filedata = '---\npatterns:\n- abcd\n- bcdef' +# with patch_open(filedata): +# upload_conf = insights_upload_conf(remove_file=conf_remove_file) +# result = upload_conf.get_rm_conf() +# assert 'patterns' in result +# assert isinstance(result['patterns'], list) + + +# @patch_isfile(True) +# def test_load_string_regex(isfile): +# ''' +# Test that the patterns section is loaded as a dict with +# key 'regex' and the value is a list of strings +# ''' +# filedata = '---\npatterns:\n regex:\n - abcd\n - bcdef' +# with patch_open(filedata): +# upload_conf = insights_upload_conf(remove_file=conf_remove_file) +# result = upload_conf.get_rm_conf() +# assert 'patterns' in result +# assert isinstance(result['patterns'], dict) +# assert 'regex' in result['patterns'] +# assert isinstance(result['patterns']['regex'], list) + + +# @patch_raw_config_parser([("files", ",".join(removed_files))]) +# @patch_isfile(True) +# def test_return_old(isfile, raw_config_parser): +# upload_conf = insights_upload_conf(remove_file=conf_remove_file) +# result = upload_conf.get_rm_conf_old() + +# raw_config_parser.assert_called_once_with() +# raw_config_parser.return_value.read.assert_called_with(conf_remove_file) +# raw_config_parser.return_value.items.assert_called_with('remove') + +# assert result == {"files": removed_files} + + +# def test_raw_config_parser(): +# ''' +# Ensure that get_rm_conf and json.loads (used to load uploader.json) return the same filename +# ''' +# raw_filename = '/etc/yum/pluginconf.d/()*\\\\w+\\\\.conf' +# uploader_snip = json.loads('{"pattern": [], "symbolic_name": "pluginconf_d", "file": "' + raw_filename + '"}') +# with open(conf_remove_file, 'w') as rm_conf: +# rm_conf.write('[remove]\nfiles=' + raw_filename) +# coll = InsightsUploadConf(InsightsConfig(remove_file=conf_remove_file)) +# items = coll.get_rm_conf() +# assert items['files'][0] == uploader_snip['file'] + + +# @patch_isfile(True) +# def test_config_filtering(isfile): +# ''' +# Verify that keys with None values +# do not appear in the final conf +# ''' +# filedata = '---\npatterns:\nfiles:\n- /var/lib/aaa' +# with patch_open(filedata): +# upload_conf = insights_upload_conf(remove_file=conf_remove_file) +# result = upload_conf.get_rm_conf() +# assert 'patterns' not in result and 'files' in result diff --git a/insights/tests/client/test_collect.py b/insights/tests/client/test_collect.py index 171cbe7f8..4c7052522 100644 --- a/insights/tests/client/test_collect.py +++ b/insights/tests/client/test_collect.py @@ -10,11 +10,14 @@ from tempfile import NamedTemporaryFile import six import mock +import pytest stdin_uploader_json = {"some key": "some value"} stdin_sig = "some signature" stdin_payload = {"uploader.json": json_dumps(stdin_uploader_json), "sig": stdin_sig} conf_remove_file = "/tmp/remove.conf" +conf_file_redaction_file = "/tmp/file-redaction.yaml" +conf_file_content_redaction_file = "/tmp/file-content-redaction.yaml" removed_files = ["/etc/some_file", "/tmp/another_file"] @@ -22,7 +25,10 @@ def collect_args(*insights_config_args, **insights_config_custom_kwargs): """ Instantiates InsightsConfig with a default logging_file argument. """ - all_insights_config_kwargs = {"logging_file": "/tmp/insights.log", "remove_file": conf_remove_file} + all_insights_config_kwargs = {"logging_file": "/tmp/insights.log", + "remove_file": conf_remove_file, + "redaction_file": conf_file_redaction_file, + "content_redaction_file": conf_file_content_redaction_file} all_insights_config_kwargs.update(insights_config_custom_kwargs) return InsightsConfig(*insights_config_args, **all_insights_config_kwargs), Mock() @@ -100,7 +106,7 @@ def decider(*args, **kwargs): """ Returns given value for remove_file and True for any other file. """ - if args[0] == conf_remove_file: + if args[0] in (conf_remove_file, conf_file_redaction_file, conf_file_content_redaction_file): return remove_file_exists else: return True @@ -172,11 +178,12 @@ def test_get_rm_conf_file(get_branch_info, get_conf_file, get_rm_conf, data_coll get_rm_conf.assert_called_once_with() +@patch("insights.client.client.InsightsUploadConf.create_report") @patch_data_collector() @patch_get_rm_conf() @patch_get_conf_file() @patch_get_branch_info() -def test_data_collector_file(get_branch_info, get_conf_file, get_rm_conf, data_collector): +def test_data_collector_file(get_branch_info, get_conf_file, get_rm_conf, data_collector, create_report): """ Configuration from a file is passed to the DataCollector along with removed files configuration. """ @@ -186,7 +193,8 @@ def test_data_collector_file(get_branch_info, get_conf_file, get_rm_conf, data_c collection_rules = get_conf_file.return_value rm_conf = get_rm_conf.return_value branch_info = get_branch_info.return_value - data_collector.return_value.run_collection.assert_called_once_with(collection_rules, rm_conf, branch_info) + blacklist_report = create_report.return_value + data_collector.return_value.run_collection.assert_called_once_with(collection_rules, rm_conf, branch_info, blacklist_report) data_collector.return_value.done.assert_called_once_with(collection_rules, rm_conf) @@ -240,13 +248,15 @@ def test_file_signature_invalid(get_branch_info, validate_gpg_sig, data_collecto validate_gpg_sig.assert_called() +@pytest.mark.skip(reason="This test became too convoluted and will be useless when core collection launches.") @mark.regression +@patch('insights.client.collection_rules.verify_permissions') @patch_data_collector() @patch_raw_config_parser() @patch_isfile(True) @patch_try_disk({"version": "1.2.3"}) @patch_get_branch_info() -def test_file_result(get_branch_info, try_disk, raw_config_parser, data_collector): +def test_file_result(get_branch_info, try_disk, raw_config_parser, data_collector, verify_permissions): """ Configuration from file is loaded from the "uploader.json" key. """ @@ -256,8 +266,10 @@ def test_file_result(get_branch_info, try_disk, raw_config_parser, data_collecto open_name = '__builtin__.open' with patch(open_name, create=True) as mock_open: - mock_open.side_effect = [mock.mock_open(read_data='[remove]\nfiles=/etc/some_file,/tmp/another_file').return_value] - + mock_open.side_effect = [mock.mock_open(read_data='').return_value, + mock.mock_open(read_data='').return_value, + mock.mock_open(read_data='[remove]\nfiles=/etc/some_file,/tmp/another_file').return_value] + raw_config_parser.side_effect = [Mock(sections=Mock(return_value=['remove']), items=Mock(return_value=[('files', '/etc/some_file,/tmp/another_file')]))] config, pconn = collect_args() collect(config, pconn) diff --git a/insights/tests/client/test_insights_spec.py b/insights/tests/client/test_insights_spec.py index 63f7867bd..93e8298c4 100644 --- a/insights/tests/client/test_insights_spec.py +++ b/insights/tests/client/test_insights_spec.py @@ -12,7 +12,7 @@ def test_read_pidfile_called(read_pidfile): Pidfile is read when collection starts ''' dc = DataCollector(MagicMock(display_name=None)) - dc.run_collection({'commands': [], 'files': []}, None, None) + dc.run_collection({'commands': [], 'files': []}, None, None, '') read_pidfile.assert_called_once() diff --git a/insights/tests/client/test_skip_commands_files.py b/insights/tests/client/test_skip_commands_files.py index 5861b816a..1b8e9a499 100644 --- a/insights/tests/client/test_skip_commands_files.py +++ b/insights/tests/client/test_skip_commands_files.py @@ -16,7 +16,7 @@ def test_omit_before_expanded_paths(InsightsFile, parse_file_spec): collection_rules = {'files': [{"file": "/etc/pam.d/vsftpd", "pattern": [], "symbolic_name": "vsftpd"}], 'commands': {}} rm_conf = {'files': ["/etc/pam.d/vsftpd"]} - data_collector.run_collection(collection_rules, rm_conf, {}) + data_collector.run_collection(collection_rules, rm_conf, {}, '') parse_file_spec.assert_not_called() InsightsFile.assert_not_called() @@ -32,7 +32,7 @@ def test_omit_after_expanded_paths(InsightsFile, parse_file_spec): collection_rules = {'files': [{"file": "/etc/yum.repos.d/()*.*\\.repo", "pattern": [], "symbolic_name": "yum_repos_d"}], 'commands': {}} rm_conf = {'files': ["/etc/yum/repos.d/test.repo"]} - data_collector.run_collection(collection_rules, rm_conf, {}) + data_collector.run_collection(collection_rules, rm_conf, {}, '') parse_file_spec.assert_called_once() InsightsFile.assert_not_called() @@ -51,7 +51,7 @@ def test_omit_symbolic_name(InsightsCommand, InsightsFile, parse_file_spec): 'commands': [{"command": "/sbin/chkconfig --list", "pattern": [], "symbolic_name": "chkconfig"}], 'pre_commands': []} rm_conf = {'files': ["vsftpd"], "commands": ["chkconfig"]} - data_collector.run_collection(collection_rules, rm_conf, {}) + data_collector.run_collection(collection_rules, rm_conf, {}, '') parse_file_spec.assert_not_called() InsightsFile.assert_not_called() InsightsCommand.assert_not_called() @@ -71,7 +71,7 @@ def test_symbolic_name_bc(InsightsArchive, InsightsFile, InsightsCommand): 'commands': [{"command": "/sbin/chkconfig --list", "pattern": []}], 'pre_commands': []} rm_conf = {'files': ["vsftpd"], "commands": ["chkconfig"]} - data_collector.run_collection(collection_rules, rm_conf, {}) + data_collector.run_collection(collection_rules, rm_conf, {}, '') InsightsFile.assert_called_once() InsightsCommand.assert_called_once() InsightsArchive.return_value.add_to_archive.assert_has_calls( @@ -125,5 +125,5 @@ def test_omit_after_parse_command(InsightsCommand, run_pre_command): collection_rules = {'commands': [{"command": "/sbin/ethtool -i", "pattern": [], "pre_command": "iface", "symbolic_name": "ethtool"}], 'files': [], "pre_commands": {"iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'"}} rm_conf = {'commands': ["/sbin/ethtool -i eth0"]} - data_collector.run_collection(collection_rules, rm_conf, {}) + data_collector.run_collection(collection_rules, rm_conf, {}, '') InsightsCommand.assert_not_called() diff --git a/insights/tests/client/test_utilities.py b/insights/tests/client/test_utilities.py index 17284ba4b..14899158a 100644 --- a/insights/tests/client/test_utilities.py +++ b/insights/tests/client/test_utilities.py @@ -7,6 +7,7 @@ import re import mock import six +import pytest from mock.mock import patch @@ -118,17 +119,26 @@ def test_get_version_info_no_version(wrapper_constants): assert version_info == {'core_version': '1-1', 'client_version': None} -def test_validate_remove_file(): +def test_validate_remove_file_bad_perms(): tf = '/tmp/remove.cfg' with open(tf, 'wb') as f: f.write(remove_file_content) - assert util.validate_remove_file(InsightsConfig(remove_file='/tmp/boop')) is False - os.chmod(tf, 0o644) - assert util.validate_remove_file(InsightsConfig(remove_file=tf)) is False + + conf = InsightsConfig(remove_file=tf, redaction_file=None, content_redaction_file=None, validate=True) + with pytest.raises(RuntimeError): + os.chmod(tf, 0o644) + util.validate_remove_file(conf) os.chmod(tf, 0o600) - assert util.validate_remove_file(InsightsConfig(remove_file=tf)) is not False + assert util.validate_remove_file(conf) is not False os.remove(tf) + +def test_validate_remove_file_good_perms(): + tf = '/tmp/remove.cfg' + with open(tf, 'wb') as f: + f.write(remove_file_content) + + # TODO: DRY From 5b57ba27fc182b3fa2e6fff3a238aadd0b79f186 Mon Sep 17 00:00:00 2001 From: Jitka Obselkova <41325380+jobselko@users.noreply.github.com> Date: Thu, 9 Apr 2020 00:22:49 +0200 Subject: [PATCH 023/892] Parser for 'rpm -V coreutils procps procps-ng shadow-utils passwd sudo' (#2534) * Add parser for rpm_v_packages Signed-off-by: Jitka Obselkova * Remove condition Signed-off-by: Jitka Obselkova --- .../shared_parsers_catalog/rpm_v_packages.rst | 3 ++ insights/parsers/rpm_v_packages.py | 51 +++++++++++++++++++ insights/parsers/tests/test_rpm_v_packages.py | 47 +++++++++++++++++ 3 files changed, 101 insertions(+) create mode 100644 docs/shared_parsers_catalog/rpm_v_packages.rst create mode 100644 insights/parsers/rpm_v_packages.py create mode 100644 insights/parsers/tests/test_rpm_v_packages.py diff --git a/docs/shared_parsers_catalog/rpm_v_packages.rst b/docs/shared_parsers_catalog/rpm_v_packages.rst new file mode 100644 index 000000000..d38a5b31f --- /dev/null +++ b/docs/shared_parsers_catalog/rpm_v_packages.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.rpm_v_packages + :members: + :show-inheritance: diff --git a/insights/parsers/rpm_v_packages.py b/insights/parsers/rpm_v_packages.py new file mode 100644 index 000000000..4b5fbc433 --- /dev/null +++ b/insights/parsers/rpm_v_packages.py @@ -0,0 +1,51 @@ +""" +RpmVPackages - command ``/bin/rpm -V coreutils procps procps-ng shadow-utils passwd sudo`` +========================================================================================== +""" + +from insights.core import CommandParser +from insights.core.plugins import parser +from insights.specs import Specs + + +@parser(Specs.rpm_V_packages) +class RpmVPackages(CommandParser): + """ + Class for parsing ``/bin/rpm -V coreutils procps procps-ng shadow-utils passwd sudo`` command. + + Attributes: + packages_list (list of dictionaries): every dictionary contains information about one entry + + Sample output of this command is:: + + package procps is not installed + ..?...... c /etc/sudoers + ..?...... /usr/bin/sudo + ..?...... /usr/bin/sudoreplay + missing /var/db/sudo/lectured (Permission denied) + + Examples: + >>> type(rpm_v_packages) + + >>> len(rpm_v_packages.packages_list) + 5 + >>> sorted(rpm_v_packages.packages_list[0].items()) + [('attributes', None), ('file', None), ('line', 'package procps is not installed'), ('mark', None)] + >>> sorted(rpm_v_packages.packages_list[1].items()) + [('attributes', '..?......'), ('file', '/etc/sudoers'), ('line', '..?...... c /etc/sudoers'), ('mark', 'c')] +""" + + def parse_content(self, content): + self.packages_list = [] + + for line in content: + line_parts = line.split() + if "package" in line_parts[0] or "missing" in line_parts[0]: + entry = {"line": line.strip(), "attributes": None, "mark": None, "file": None} + elif len(line_parts) == 3: + entry = {"line": line.strip(), "attributes": line_parts[0], "mark": line_parts[1], + "file": line_parts[2]} + else: + entry = {"line": line.strip(), "attributes": line_parts[0], "mark": None, + "file": line_parts[1]} + self.packages_list.append(entry) diff --git a/insights/parsers/tests/test_rpm_v_packages.py b/insights/parsers/tests/test_rpm_v_packages.py new file mode 100644 index 000000000..233dfbb30 --- /dev/null +++ b/insights/parsers/tests/test_rpm_v_packages.py @@ -0,0 +1,47 @@ +import doctest + +from insights.parsers import rpm_v_packages +from insights.parsers.rpm_v_packages import RpmVPackages +from insights.tests import context_wrap + + +TEST_RPM = """ +package procps is not installed +..?...... c /etc/sudoers +..?...... /usr/bin/sudo +..?...... /usr/bin/sudoreplay +missing /var/db/sudo/lectured (Permission denied) +""" + + +def test_rpm_empty(): + rpm_pkgs = RpmVPackages(context_wrap([])) + assert rpm_pkgs.packages_list == [] + + +def test_rpm(): + line_1 = {'attributes': None, 'file': None, + 'line': 'package procps is not installed', 'mark': None} + line_2 = {'attributes': '..?......', 'file': '/etc/sudoers', + 'line': '..?...... c /etc/sudoers', 'mark': 'c'} + line_3 = {'attributes': '..?......', 'file': '/usr/bin/sudo', + 'line': '..?...... /usr/bin/sudo', 'mark': None} + line_4 = {'attributes': '..?......', 'file': '/usr/bin/sudoreplay', + 'line': '..?...... /usr/bin/sudoreplay', 'mark': None} + line_5 = {'attributes': None, 'file': None, + 'line': 'missing /var/db/sudo/lectured (Permission denied)', 'mark': None} + + rpm_pkgs = RpmVPackages(context_wrap(TEST_RPM)) + assert rpm_pkgs.packages_list[0] == line_1 + assert rpm_pkgs.packages_list[1] == line_2 + assert rpm_pkgs.packages_list[2] == line_3 + assert rpm_pkgs.packages_list[3] == line_4 + assert rpm_pkgs.packages_list[4] == line_5 + + +def test_doc_examples(): + env = { + "rpm_v_packages": RpmVPackages(context_wrap(TEST_RPM)) + } + failed, total = doctest.testmod(rpm_v_packages, globs=env) + assert failed == 0 From 9e115e74391eb7d55b4a6e4d251074827564e85d Mon Sep 17 00:00:00 2001 From: vishwanathjadhav Date: Thu, 9 Apr 2020 13:08:11 +0530 Subject: [PATCH 024/892] Enhanced Bond parser to fetch the up and down delay (#2535) * Enhanced Bond parser to fetch the up and down delay Signed-off-by: vishawanathjadhav Enhanced Bond parser to fetch up and down delay * Added data attribute to fetch the slave details in sequence Signed-off-by: vishawanathjadhav Added data attribute to fetch the slave details in sequence * Added data attribute to fetch the slave details in sequence Signed-off-by: vishawanathjadhav Added data attribute to fetch the slave details in sequence Signed-off-by: vishawanathjadhav Added data attribute to fetch the slave details in sequence * Converted variable as a local variable Signed-off-by: vishawanathjadhav Converted attribute to local variable --- insights/parsers/bond.py | 63 +++++++++++++++++-- insights/parsers/tests/test_bond.py | 93 +++++++++++++++++++++++++++++ 2 files changed, 150 insertions(+), 6 deletions(-) diff --git a/insights/parsers/bond.py b/insights/parsers/bond.py index 1495d0d87..b03fa991c 100644 --- a/insights/parsers/bond.py +++ b/insights/parsers/bond.py @@ -103,21 +103,35 @@ def parse_content(self, content): self._slave_speed = [] self._slave_duplex = [] self._primary_slave = None + self._up_delay = None + self._down_delay = None + self._data = {} + name_slave = None for line in get_active_lines(content): if line.startswith("Bonding Mode: "): raw_mode = line.split(":", 1)[1].strip() self._bond_mode = raw_mode + self._data['mode'] = self._bond_mode if raw_mode in BOND_PREFIX_MAP: self._bond_mode = BOND_PREFIX_MAP[raw_mode] + self._data['mode'] = self._bond_mode else: raise ParseException("Unrecognised bonding mode '{b}'".format(b=raw_mode)) elif line.startswith("Partner Mac Address: "): self._partner_mac_address = line.split(":", 1)[1].strip() + self._data['partner_mac'] = self._partner_mac_address elif line.startswith("Slave Interface: "): - self._slave_interface.append(line.split(":", 1)[1].strip()) + name_slave = line.split(":", 1)[1].strip() + self._slave_interface.append(name_slave) + self._data[name_slave] = {} elif line.strip().startswith("Aggregator ID: "): - self._aggregator_id.append(line.strip().split(':', 1)[1].strip()) + agg_id = line.strip().split(':', 1)[1].strip() + self._aggregator_id.append(agg_id) + if name_slave: + self._data[name_slave]['aggregator_id'] = agg_id + else: + self._data['aggregator_id'] = agg_id elif line.strip().startswith("Transmit Hash Policy"): # No need of values in bracket: # Integer notification (0), (1), (2) of layer2, layer3+4, layer2+3 resp @@ -125,19 +139,35 @@ def parse_content(self, content): elif line.strip().startswith("Currently Active Slave"): self._active_slave = line.split(":", 1)[1].split()[0] elif line.strip().startswith("MII Status: "): - self._mii_status.append(line.strip().split(':', 1)[1].strip()) + mii_status = line.strip().split(':', 1)[1].strip() + self._mii_status.append(mii_status) + if name_slave: + self._data[name_slave]['mii_status'] = mii_status + else: + self._data['mii_status'] = mii_status elif line.strip().startswith("Link Failure Count: "): - self._slave_link_failure_count.append(line.strip().split(':', 1)[1].strip()) + link_fail_cnt = line.strip().split(':', 1)[1].strip() + self._slave_link_failure_count.append(link_fail_cnt) + if name_slave: + self._data[name_slave]['link_fail_cnt'] = link_fail_cnt elif line.strip().startswith("Speed: "): - self._slave_speed.append(line.strip().split(':', 1)[1].strip()) + speed = line.strip().split(':', 1)[1].strip() + self._slave_speed.append(speed) + self._data[name_slave]['speed'] = speed elif line.strip().startswith("Duplex: "): - self._slave_duplex.append(line.strip().split(':', 1)[1].strip()) + duplex = line.strip().split(':', 1)[1].strip() + self._slave_duplex.append(duplex) + self._data[name_slave]['duplex'] = duplex elif line.strip().startswith("ARP Polling Interval (ms):"): self._arp_polling_interval = line.strip().split(':', 1)[1].strip() elif line.strip().startswith("ARP IP target/s (n.n.n.n form):"): self._arp_ip_target = line.strip().split(':', 1)[1].strip() elif line.strip().startswith("Primary Slave"): self._primary_slave = line.split(":", 1)[1].strip() + elif line.strip().startswith("Up Delay (ms):"): + self._up_delay = line.strip().split(':', 1)[1].strip() + elif line.strip().startswith("Down Delay (ms):"): + self._down_delay = line.strip().split(':', 1)[1].strip() @property def bond_mode(self): @@ -231,3 +261,24 @@ def primary_slave(self): If the key is not in the bond file, ``None`` is returned. """ return self._primary_slave + + @property + def up_delay(self): + """Returns the "Up Delay" in the bond file if key/value exists. + If the key is not in the bond file, ``None`` is returned. + """ + return self._up_delay + + @property + def down_delay(self): + """Returns the "Down Delay" in the bond file if key/value exists. + If the key is not in the bond file, ``None`` is returned. + """ + return self._down_delay + + @property + def data(self): + """Returns all the details of bond interface and corresponding slave details + on sucess else it will return empty ``{}``. + """ + return self._data diff --git a/insights/parsers/tests/test_bond.py b/insights/parsers/tests/test_bond.py index 433d93abb..afba0e6b6 100644 --- a/insights/parsers/tests/test_bond.py +++ b/insights/parsers/tests/test_bond.py @@ -170,6 +170,84 @@ Slave queue ID: 0 """.strip() +BOND_MODE_4 = """ +Ethernet Channel Bonding Driver: v3.7.1 (April 27, 2011) + +Bonding Mode: IEEE 802.3ad Dynamic link aggregation +Transmit Hash Policy: layer2 (0) +MII Status: up +MII Polling Interval (ms): 100 +Up Delay (ms): 2000 +Down Delay (ms): 1000 + +802.3ad info +LACP rate: slow +Min links: 0 +Aggregator selection policy (ad_select): stable +System priority: 65535 +System MAC address: 08:00:27:99:a3:6b +Active Aggregator Info: +\t\t\t\tAggregator ID: 1 +\t\t\t\tNumber of ports: 1 +\t\t\t\tActor Key: 9 +\t\t\t\tPartner Key: 1 +\t\t\t\tPartner Mac Address: 00:00:00:00:00:00 + +Slave Interface: enp0s9 +MII Status: up +Speed: 1000 Mbps +Duplex: full +Link Failure Count: 0 +Permanent HW addr: 08:00:27:99:a3:6b +Slave queue ID: 0 +Aggregator ID: 1 +Actor Churn State: none +Partner Churn State: churned +Actor Churned Count: 0 +Partner Churned Count: 1 +details actor lacp pdu: + system priority: 65535 + system mac address: 08:00:27:99:a3:6b + port key: 9 + port priority: 255 + port number: 1 + port state: 77 +details partner lacp pdu: + system priority: 65535 + system mac address: 00:00:00:00:00:00 + oper key: 1 + port priority: 255 + port number: 1 + port state: 1 + +Slave Interface: enp0s8 +MII Status: down +Speed: Unknown +Duplex: Unknown +Link Failure Count: 0 +Permanent HW addr: 08:00:27:a2:8d:f5 +Slave queue ID: 0 +Aggregator ID: 2 +Actor Churn State: churned +Partner Churn State: churned +Actor Churned Count: 1 +Partner Churned Count: 1 +details actor lacp pdu: + system priority: 65535 + system mac address: 08:00:27:99:a3:6b + port key: 0 + port priority: 255 + port number: 2 + port state: 69 +details partner lacp pdu: + system priority: 65535 + system mac address: 00:00:00:00:00:00 + oper key: 1 + port priority: 255 + port number: 1 + port state: 1 +""".strip() + def test_netstat_doc_examples(): env = { @@ -185,6 +263,11 @@ def test_bond_class(): assert not bond_obj.partner_mac_address assert bond_obj.bond_mode == '0' assert bond_obj.slave_interface == ['eno1', 'eno2'] + assert bond_obj.up_delay == '0' + assert bond_obj.down_delay == '0' + assert bond_obj.data['eno1']['speed'] == '1000 Mbps' + assert bond_obj.data['eno1']['mii_status'] == 'up' + assert bond_obj.data['eno2']['mii_status'] == 'up' bond_obj = Bond(context_wrap(BONDINFO_MODE_4, CONTEXT_PATH)) assert bond_obj.bond_mode == '4' @@ -225,6 +308,16 @@ def test_bond_class(): assert bond_obj_4.arp_ip_target == "10.152.1.1" assert bond_obj_4.primary_slave == 'em3 (primary_reselect failure)' + bond_obj = Bond(context_wrap(BOND_MODE_4, CONTEXT_PATH)) + assert bond_obj.file_name == 'bond0' + assert bond_obj.up_delay == '2000' + assert bond_obj.down_delay == '1000' + assert bond_obj.data['mii_status'] == 'up' + assert bond_obj.data['enp0s9']['mii_status'] == 'up' + assert bond_obj.data['enp0s8']['mii_status'] == 'down' + assert bond_obj.data['enp0s8']['aggregator_id'] == '2' + assert bond_obj.data['enp0s9']['aggregator_id'] == '1' + with pytest.raises(ParseException) as exc: bond_obj = Bond(context_wrap(BONDINFO_UNKNOWN_BOND_MODE, CONTEXT_PATH)) assert not bond_obj.bond_mode From 6f12940fb21f1231a692b6e0f929c3215f6a9bc8 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 9 Apr 2020 13:16:21 -0400 Subject: [PATCH 025/892] messaging fixes for blacklist (#2537) * messaging fixes for blacklist Signed-off-by: Jeremy Crafts * fix case where conf does not exist Signed-off-by: Jeremy Crafts --- insights/client/data_collector.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/insights/client/data_collector.py b/insights/client/data_collector.py index fcf77a0ad..0e5b2f7b2 100644 --- a/insights/client/data_collector.py +++ b/insights/client/data_collector.py @@ -204,7 +204,7 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): raise LookupError logger.warn("WARNING: Skipping patterns defined in blacklist configuration") except LookupError: - logger.debug('Patterns section of remove.conf is empty.') + logger.debug('Patterns section of blacklist configuration is empty.') for c in conf['commands']: # remember hostname archive path @@ -272,13 +272,14 @@ def done(self, conf, rm_conf): and archive files. """ if self.config.obfuscate: + if rm_conf and rm_conf.get('keywords'): + logger.warn("WARNING: Skipping keywords defined in blacklist configuration") cleaner = SOSCleaner(quiet=True) clean_opts = CleanOptions( self.config, self.archive.tmp_dir, rm_conf, self.hostname_path) cleaner.clean_report(clean_opts, self.archive.archive_dir) if clean_opts.keyword_file is not None: os.remove(clean_opts.keyword_file.name) - logger.warn("WARNING: Skipping keywords found in remove.conf") if self.config.output_dir: # return the entire soscleaner dir # see additions to soscleaner.SOSCleaner.clean_report From 5333dea253921b205af808136f1cc217f8c31be8 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 9 Apr 2020 14:44:43 -0400 Subject: [PATCH 026/892] do not error out when using --group on platform (#2532) Signed-off-by: Jeremy Crafts --- insights/client/config.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/insights/client/config.py b/insights/client/config.py index 49bb1b7aa..a13f5f45c 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -639,10 +639,6 @@ def _validate_options(self): if self.payload and not self.content_type: raise ValueError( '--payload requires --content-type') - if not self.legacy_upload: - if self.group: - raise ValueError( - '--group is not supported at this time.') if self.offline: if self.to_json: raise ValueError('Cannot use --to-json in offline mode.') From 880dc04b17be4f7fef5df2a724fd11fb8de2249a Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 9 Apr 2020 15:28:37 -0400 Subject: [PATCH 027/892] direct to insights console after registration (#2521) Signed-off-by: Jeremy Crafts --- insights/client/client.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/insights/client/client.py b/insights/client/client.py index a762aa0c1..f6293d350 100644 --- a/insights/client/client.py +++ b/insights/client/client.py @@ -328,6 +328,9 @@ def _legacy_upload(config, pconn, tar_file, content_type, collection_duration=No msg_name, account_number) else: logger.info("Successfully uploaded report for %s.", msg_name) + if config.register: + # direct to console after register + upload + logger.info('View the Red Hat Insights console at https://cloud.redhat.com/insights/') break elif upload.status_code in (412, 413): @@ -359,6 +362,9 @@ def upload(config, pconn, tar_file, content_type, collection_duration=None): if upload.status_code in (200, 202): msg_name = determine_hostname(config.display_name) logger.info("Successfully uploaded report for %s.", msg_name) + if config.register: + # direct to console after register + upload + logger.info('View the Red Hat Insights console at https://cloud.redhat.com/insights/') return elif upload.status_code in (413, 415): pconn.handle_fail_rcs(upload) From 6e69fb57568f47e77e8eb39d44da5bddf60670f6 Mon Sep 17 00:00:00 2001 From: Jesse Jaggars Date: Thu, 16 Apr 2020 10:15:55 -0400 Subject: [PATCH 028/892] Prefer igzip over gzip when handling tarballs (#2546) * Adding a decompressor option to from_file This allows callers to specify exactly which decompression binary to use, skipping the content-type check and allowing for use of novel programs such as igzip. Signed-off-by: Jesse Jaggars * Automatically use igzip if available. Causes tar to use igzip with `-I igzip` if it's on the PATH. Signed-off-by: Christopher Sams * Remove custom decompressor param in favor of existing content_type keyword. Signed-off-by: Christopher Sams Co-authored-by: Christopher Sams --- insights/core/archives.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/insights/core/archives.py b/insights/core/archives.py index ebe10d9b6..2e53d612f 100644 --- a/insights/core/archives.py +++ b/insights/core/archives.py @@ -4,7 +4,7 @@ import os import tempfile from contextlib import contextmanager -from insights.util import fs, subproc +from insights.util import fs, subproc, which from insights.util.content_type import from_file as content_type_from_file logger = logging.getLogger(__name__) @@ -50,8 +50,8 @@ def __init__(self, timeout=None): TAR_FLAGS = { "application/x-xz": "-J", - "application/x-gzip": "-z", - "application/gzip": "-z", + "application/x-gzip": "-I igzip" if which("igzip") else "-z", + "application/gzip": "-I igzip" if which("igzip") else "-z", "application/x-bzip2": "-j", "application/x-tar": "" } From 45be0f337b0fba011ff515a38194f4ed606adc7f Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 16 Apr 2020 22:45:01 +0800 Subject: [PATCH 029/892] Remove the db2licm (#2542) Signed-off-by: Xiangce Liu --- docs/shared_parsers_catalog/db2licm.rst | 3 - insights/parsers/db2licm.py | 79 --------------------- insights/parsers/tests/test_db2licm.py | 94 ------------------------- insights/specs/__init__.py | 1 - insights/specs/default.py | 1 - insights/specs/insights_archive.py | 1 - 6 files changed, 179 deletions(-) delete mode 100644 docs/shared_parsers_catalog/db2licm.rst delete mode 100644 insights/parsers/db2licm.py delete mode 100644 insights/parsers/tests/test_db2licm.py diff --git a/docs/shared_parsers_catalog/db2licm.rst b/docs/shared_parsers_catalog/db2licm.rst deleted file mode 100644 index b5777bfc0..000000000 --- a/docs/shared_parsers_catalog/db2licm.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.parsers.db2licm - :members: - :show-inheritance: diff --git a/insights/parsers/db2licm.py b/insights/parsers/db2licm.py deleted file mode 100644 index f15b96396..000000000 --- a/insights/parsers/db2licm.py +++ /dev/null @@ -1,79 +0,0 @@ -""" -IBM DB2 Sever details -===================== - -Module for the processing of output from the ``db2licm -l`` command. -""" -from insights.core.plugins import parser -from insights.core import CommandParser -from insights.parsers import ParseException, get_active_lines -from insights.specs import Specs - - -@parser(Specs.db2licm_l) -class DB2Info(CommandParser, dict): - """ - This parser processes the output of the command `db2licm_l` and provides - the information as a dictionary. - - Sample input:: - - Product name: "DB2 Enterprise Server Edition" - License type: "CPU Option" - Expiry date: "Permanent" - Product identifier: "db2ese" - Version information: "9.7" - Enforcement policy: "Soft Stop" - Features: - DB2 Performance Optimization ESE: "Not licensed" - DB2 Storage Optimization: "Not licensed" - DB2 Advanced Access Control: "Not licensed" - IBM Homogeneous Replication ESE: "Not licensed" - - Product name: "DB2 Connect Server" - Expiry date: "Expired" - Product identifier: "db2consv" - Version information: "9.7" - Concurrent connect user policy: "Disabled" - Enforcement policy: "Soft Stop" - - Example: - - >>> list(parser_result.keys()) - ['DB2 Enterprise Server Edition', 'DB2 Connect Server'] - >>> parser_result['DB2 Enterprise Server Edition']["Version information"] - '9.7' - - Override the base class parse_content to parse the output of the '''db2licm -l''' command. - Information that is stored in the object is made available to the rule plugins. - - - Raises: - ParseException: raised if data is not parsable. - """ - - def parse_content(self, content): - - # name = None - body = {} - - # Input data is available in text file. Reading each line in file and parsing it to a dictionary. - for line in get_active_lines(content): - if ':' in line: - key, val = [i.strip() for i in line.strip().split(":", 1)] - if key == "Features": - continue - else: - raise ParseException("Unable to parse db2licm info: {0}".format(content)) - - if key == "Product name": - body = {} - self[val] = body - else: - body[key] = val - - if not self: - # If no data is obtained in the command execution then throw an exception instead of returning an empty - # object. Rules depending solely on this parser will not be invoked, so they don't have to - # explicitly check for invalid data. - raise ParseException("Unable to parse db2licm info: {0}".format(content)) diff --git a/insights/parsers/tests/test_db2licm.py b/insights/parsers/tests/test_db2licm.py deleted file mode 100644 index ece1e82c2..000000000 --- a/insights/parsers/tests/test_db2licm.py +++ /dev/null @@ -1,94 +0,0 @@ -import doctest -import pytest -from insights.parsers import ParseException -from insights.tests import context_wrap -from insights.parsers import db2licm -from insights.parsers.db2licm import DB2Info - -INVALID_OUTPUT = "".strip() - -VALID_OUTPUT = """ -Product name: DB2 Enterprise Server Edition -License type: CPU Option -Expiry date: Permanent -Product identifier: db2ese -Version information: 9.7 -Enforcement policy: Soft Stop -Features: -DB2 Performance Optimization ESE: Not licensed -DB2 Storage Optimization: Not licensed -DB2 Advanced Access Control: Not licensed -IBM Homogeneous Replication ESE: Not licensed -""".strip() - -VALID_OUTPUT_MULTIPLE = """ -Product name: DB2 Enterprise Server Edition -License type: CPU Option -Expiry date: Permanent -Product identifier: db2ese -Version information: 9.7 -Enforcement policy: Soft Stop -Features: -DB2 Performance Optimization ESE: Not licensed -DB2 Storage Optimization: Not licensed -DB2 Advanced Access Control: Not licensed -IBM Homogeneous Replication ESE: Not licensed - -Product name: DB2 Connect Server -Expiry date: Expired -Product identifier: db2consv -Version information: 9.7 -Concurrent connect user policy: Disabled -Enforcement policy: Soft Stop -""".strip() - - -def test_valid_command_output_1(): - parser_result = DB2Info(context_wrap(VALID_OUTPUT)) - assert parser_result is not None - - assert parser_result["DB2 Enterprise Server Edition"]["License type"] == "CPU Option" - assert parser_result["DB2 Enterprise Server Edition"]["Expiry date"] == "Permanent" - assert parser_result["DB2 Enterprise Server Edition"]["Product identifier"] == "db2ese" - assert parser_result["DB2 Enterprise Server Edition"]["Version information"] == "9.7" - assert parser_result["DB2 Enterprise Server Edition"]["Enforcement policy"] == "Soft Stop" - assert parser_result["DB2 Enterprise Server Edition"]["DB2 Performance Optimization ESE"] == "Not licensed" - assert parser_result["DB2 Enterprise Server Edition"]["DB2 Storage Optimization"] == "Not licensed" - assert parser_result["DB2 Enterprise Server Edition"]["DB2 Advanced Access Control"] == "Not licensed" - assert parser_result["DB2 Enterprise Server Edition"]["IBM Homogeneous Replication ESE"] == "Not licensed" - - -def test_valid_command_output_2(): - parser_result = DB2Info(context_wrap(VALID_OUTPUT_MULTIPLE)) - assert parser_result is not None - - assert parser_result["DB2 Enterprise Server Edition"]["License type"] == "CPU Option" - assert parser_result["DB2 Enterprise Server Edition"]["Expiry date"] == "Permanent" - assert parser_result["DB2 Enterprise Server Edition"]["Product identifier"] == "db2ese" - assert parser_result["DB2 Enterprise Server Edition"]["Version information"] == "9.7" - assert parser_result["DB2 Enterprise Server Edition"]["Enforcement policy"] == "Soft Stop" - assert parser_result["DB2 Enterprise Server Edition"]["DB2 Performance Optimization ESE"] == "Not licensed" - assert parser_result["DB2 Enterprise Server Edition"]["DB2 Storage Optimization"] == "Not licensed" - assert parser_result["DB2 Enterprise Server Edition"]["DB2 Advanced Access Control"] == "Not licensed" - assert parser_result["DB2 Enterprise Server Edition"]["IBM Homogeneous Replication ESE"] == "Not licensed" - - assert parser_result["DB2 Connect Server"]["Expiry date"] == "Expired" - assert parser_result["DB2 Connect Server"]["Product identifier"] == "db2consv" - assert parser_result["DB2 Connect Server"]["Version information"] == "9.7" - assert parser_result["DB2 Connect Server"]["Enforcement policy"] == "Soft Stop" - assert parser_result["DB2 Connect Server"]["Concurrent connect user policy"] == "Disabled" - - -def test_invalid_command_output(): - with pytest.raises(ParseException) as e: - DB2Info(context_wrap(INVALID_OUTPUT)) - assert "Unable to parse db2licm info: []" == str(e.value) - - -def test_db2licm_doc_examples(): - env = { - 'parser_result': DB2Info( - context_wrap(VALID_OUTPUT_MULTIPLE)), - } - failed, total = doctest.testmod(db2licm, globs=env) - assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 890c7a831..ad0cbb853 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -102,7 +102,6 @@ class Specs(SpecSet): date_iso = RegistryPoint() date = RegistryPoint() date_utc = RegistryPoint() - db2licm_l = RegistryPoint() dcbtool_gc_dcb = RegistryPoint(multi_output=True) df__alP = RegistryPoint() df__al = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 1fb8fa681..86ff6bdee 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -218,7 +218,6 @@ def is_ceph_monitor(broker): date = simple_command("/bin/date") date_iso = simple_command("/bin/date --iso-8601=seconds") date_utc = simple_command("/bin/date --utc") - db2licm_l = simple_command("/usr/bin/db2licm -l") df__al = simple_command("/bin/df -al") df__alP = simple_command("/bin/df -alP") df__li = simple_command("/bin/df -li") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 6fe3373f2..9643db78f 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -37,7 +37,6 @@ class InsightsArchiveSpecs(Specs): date = simple_file("insights_commands/date") date_iso = simple_file("insights_commands/date_--iso-8601_seconds") date_utc = simple_file("insights_commands/date_--utc") - db2licm_l = simple_file("insights_commands/db2licm_l") df__al = simple_file("insights_commands/df_-al") df__alP = simple_file("insights_commands/df_-alP") df__li = simple_file("insights_commands/df_-li") From 711a98ca4d6bbfb1142b6d8e6bad1233dbefd19b Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 16 Apr 2020 23:19:57 +0800 Subject: [PATCH 030/892] Add parser for spec "ndctl list -Ni" (#2541) * Add parser for spec "ndctl list -Ni" Signed-off-by: Huanhuan Li * Add docstring, renmae spec name, and update the method Signed-off-by: Huanhuan Li * Update docstring Signed-off-by: Huanhuan Li * Rename spec name, update method name Signed-off-by: Huanhuan Li * Fix python2 test error, update docstring Signed-off-by: Huanhuan Li * Move the code to get blokdev list to parse_content Signed-off-by: Huanhuan Li --- docs/shared_parsers_catalog/ndctl_list.rst | 3 + insights/parsers/ndctl_list.py | 85 ++++++++++++++++++++++ insights/parsers/tests/test_ndctl_list.py | 51 +++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 6 files changed, 142 insertions(+) create mode 100644 docs/shared_parsers_catalog/ndctl_list.rst create mode 100644 insights/parsers/ndctl_list.py create mode 100644 insights/parsers/tests/test_ndctl_list.py diff --git a/docs/shared_parsers_catalog/ndctl_list.rst b/docs/shared_parsers_catalog/ndctl_list.rst new file mode 100644 index 000000000..0545cbde9 --- /dev/null +++ b/docs/shared_parsers_catalog/ndctl_list.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ndctl_list + :members: + :show-inheritance: diff --git a/insights/parsers/ndctl_list.py b/insights/parsers/ndctl_list.py new file mode 100644 index 000000000..e93bb3576 --- /dev/null +++ b/insights/parsers/ndctl_list.py @@ -0,0 +1,85 @@ +""" +Dump the platform nvdimm device topology and attributes in json +=============================================================== + +This module contains the following parsers: + +NdctlListNi - command ``/usr/bin/ndctl list -Ni`` +================================================= +""" + +from insights.core import JSONParser, CommandParser +from insights.core.plugins import parser +from insights.specs import Specs + + +@parser(Specs.ndctl_list_Ni) +class NdctlListNi(JSONParser, CommandParser): + """ + Class for parsing the command of ``/usr/bin/ndctl list -Ni`` + + Sample output:: + + [ + { + "dev":"namespace1.0", + "mode":"fsdax", + "map":"mem", + "size":811746721792, + "uuid":"6a5d93a5-6044-461b-8d19-0409bd323a94", + "sector_size":512, + "align":2097152, + "blockdev":"pmem1" + }, + { + "dev":"namespace1.1", + "mode":"raw", + "size":0, + "uuid":"00000000-0000-0000-0000-000000000000", + "sector_size":512, + "state":"disabled" + }, + { + "dev":"namespace0.0", + "mode":"raw", + "size":0, + "uuid":"00000000-0000-0000-0000-000000000000", + "sector_size":512, + "state":"disabled" + } + ] + + Examples: + + >>> type(ndctl_list) + + >>> 'pmem1' in ndctl_list.blockdev_list + True + >>> ndctl_list.get_blockdev('pmem1').get('mode') == 'fsdax' + True + """ + + def parse_content(self, content): + super(NdctlListNi, self).parse_content(content) + self._blockdevs = [item['blockdev'] for item in self.data if 'blockdev' in item] + + @property + def blockdev_list(self): + """ Return a list of the blockdev attribute of all the devices if it has the attribute""" + return self._blockdevs + + def get_blockdev(self, dev_name): + """ + Return a dict of the block device info + + Args: + dev_name (str): the blockdev name + + Returns: + dict: return a dict with all the info if there is the block device else empty dict + + """ + for item in self.data: + if item.get('blockdev', '') == dev_name: + return item + return {} diff --git a/insights/parsers/tests/test_ndctl_list.py b/insights/parsers/tests/test_ndctl_list.py new file mode 100644 index 000000000..671909803 --- /dev/null +++ b/insights/parsers/tests/test_ndctl_list.py @@ -0,0 +1,51 @@ +import doctest +from insights.parsers import ndctl_list +from insights.parsers.ndctl_list import NdctlListNi +from insights.tests import context_wrap + +NDCTL_OUTPUT = """ +[ + { + "dev":"namespace1.0", + "mode":"fsdax", + "map":"mem", + "size":811746721792, + "uuid":"6a7d93f5-60c4-461b-8d19-0409bd323a94", + "sector_size":512, + "align":2097152, + "blockdev":"pmem1" + }, + { + "dev":"namespace1.1", + "mode":"raw", + "size":0, + "uuid":"00000000-0000-0000-0000-000000000000", + "sector_size":512, + "state":"disabled" + }, + { + "dev":"namespace0.0", + "mode":"raw", + "size":0, + "uuid":"00000000-0000-0000-0000-000000000000", + "sector_size":512, + "state":"disabled" + } +] +""".strip() + + +def test_netstat_doc_examples(): + env = { + 'ndctl_list': NdctlListNi(context_wrap(NDCTL_OUTPUT)) + } + failed, total = doctest.testmod(ndctl_list, globs=env) + assert failed == 0 + + +def test_get_dev_attr(): + ndctl = NdctlListNi(context_wrap(NDCTL_OUTPUT)) + assert ndctl.blockdev_list == ['pmem1'] + assert 'map' in ndctl.get_blockdev('pmem1') + assert ndctl.get_blockdev('pmem1').get('map') == 'mem' + assert ndctl.get_blockdev('pmem2') == {} diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index ad0cbb853..d6495a85c 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -352,6 +352,7 @@ class Specs(SpecSet): mysqld_limits = RegistryPoint() named_checkconf_p = RegistryPoint(filterable=True) namespace = RegistryPoint() + ndctl_list_Ni = RegistryPoint() netconsole = RegistryPoint() netstat_agn = RegistryPoint() netstat_i = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 86ff6bdee..db0695b25 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -621,6 +621,7 @@ def lsmod_all_names(broker): mysqld_limits = foreach_collect(mysqld_pid, "/proc/%s/limits") named_checkconf_p = simple_command("/usr/sbin/named-checkconf -p") namespace = simple_command("/bin/ls /var/run/netns") + ndctl_list_Ni = simple_command("/usr/bin/ndctl list -Ni") ip_netns_exec_namespace_lsof = foreach_execute(namespace, "/sbin/ip netns exec %s lsof -i") netconsole = simple_file("/etc/sysconfig/netconsole") netstat = simple_command("/bin/netstat -neopa") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 9643db78f..7efd050dc 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -170,6 +170,7 @@ class InsightsArchiveSpecs(Specs): mysqladmin_status = simple_file("insights_commands/mysqladmin_status") named_checkconf_p = simple_file("insights_commands/named-checkconf_-p") namespace = simple_file("insights_commands/ls_.var.run.netns") + ndctl_list_Ni = simple_file("insights_commands/ndctl_list_-Ni") netstat = simple_file("insights_commands/netstat_-neopa") netstat_agn = simple_file("insights_commands/netstat_-agn") netstat_i = simple_file("insights_commands/netstat_-i") From 0df069a682f4d35045e7f99d95ecf62bf8935c11 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Wed, 22 Apr 2020 14:18:28 +0800 Subject: [PATCH 031/892] Add parser for kdump-capture.service of dracut module (#2549) * Add parser for kdump-capture.service of dracut module Signed-off-by: Huanhuan Li * Add notes Signed-off-by: Huanhuan Li * Rename files, update examples, docstring Signed-off-by: Huanhuan Li * Add sample input and update python2 error Signed-off-by: Huanhuan Li --- .../shared_parsers_catalog/dracut_modules.rst | 3 ++ insights/parsers/dracut_modules.py | 45 ++++++++++++++++++ insights/parsers/tests/test_dracut_modules.py | 46 +++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 96 insertions(+) create mode 100644 docs/shared_parsers_catalog/dracut_modules.rst create mode 100644 insights/parsers/dracut_modules.py create mode 100644 insights/parsers/tests/test_dracut_modules.py diff --git a/docs/shared_parsers_catalog/dracut_modules.rst b/docs/shared_parsers_catalog/dracut_modules.rst new file mode 100644 index 000000000..a161ed391 --- /dev/null +++ b/docs/shared_parsers_catalog/dracut_modules.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.dracut_modules + :members: + :show-inheritance: diff --git a/insights/parsers/dracut_modules.py b/insights/parsers/dracut_modules.py new file mode 100644 index 000000000..3bafc8295 --- /dev/null +++ b/insights/parsers/dracut_modules.py @@ -0,0 +1,45 @@ +""" +Dracut module configuration files to build and extend the initramfs image +========================================================================= + +This module contains the following parsers: + +DracutModuleKdumpCaptureService - file ``/usr/lib/dracut/modules.d/99kdumpbase/kdump-capture.service`` +------------------------------------------------------------------------------------------------------ +""" + +from insights import parser, IniConfigFile +from insights.specs import Specs + + +@parser(Specs.dracut_kdump_capture_service) +class DracutModuleKdumpCaptureService(IniConfigFile): + """ + Class for parsing the `/usr/lib/dracut/modules.d/99kdumpbase/kdump-capture.service` file. + + .. note:: + Please refer to its super-class :py:class:`insights.core.IniConfigFile` + for full usage. + + Sample input:: + + [Unit] + Description=Kdump Vmcore Save Service + After=initrd.target initrd-parse-etc.service sysroot.mount + Before=initrd-cleanup.service + + [Service] + Type=oneshot + ExecStart=/bin/kdump.sh + StandardInput=null + StandardOutput=syslog + + Examples: + >>> 'Service' in config.sections() + True + >>> config.has_option('Service', 'Type') + True + >>> config.get('Service', 'Type') == 'oneshot' + True + """ + pass diff --git a/insights/parsers/tests/test_dracut_modules.py b/insights/parsers/tests/test_dracut_modules.py new file mode 100644 index 000000000..f4db27cc1 --- /dev/null +++ b/insights/parsers/tests/test_dracut_modules.py @@ -0,0 +1,46 @@ +import doctest + +from insights.tests import context_wrap +from insights.parsers import dracut_modules +from insights.parsers.dracut_modules import DracutModuleKdumpCaptureService + +KDUMP_CAPTURE_SERVICE = """ +[Unit] +Description=Kdump Vmcore Save Service +After=dracut-initqueue.service dracut-pre-mount.service dracut-mount.service dracut-pre-pivot.service +Before=initrd-cleanup.service +ConditionPathExists=/etc/initrd-release +OnFailure=emergency.target +OnFailureIsolate=yes + +[Service] +Environment=DRACUT_SYSTEMD=1 +Environment=NEWROOT=/sysroot +Type=oneshot +ExecStart=/bin/kdump.sh +StandardInput=null +StandardOutput=syslog +StandardError=syslog+console +KillMode=process +RemainAfterExit=yes + +# Bash ignores SIGTERM, so we send SIGHUP instead, to ensure that bash +# terminates cleanly. +KillSignal=SIGHUP +""".strip() + + +def test_dracut_kdump_capture(): + kdump_service_conf = DracutModuleKdumpCaptureService(context_wrap(KDUMP_CAPTURE_SERVICE)) + assert 'Unit' in kdump_service_conf.sections() + assert 'dracut-initqueue.service' in kdump_service_conf.get('Unit', 'After') + + +def test_doc(): + failed_count, tests = doctest.testmod( + dracut_modules, + globs={ + 'config': dracut_modules.DracutModuleKdumpCaptureService(context_wrap(KDUMP_CAPTURE_SERVICE)), + } + ) + assert failed_count == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index d6495a85c..30475c4fc 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -133,6 +133,7 @@ class Specs(SpecSet): docker_storage = RegistryPoint() docker_storage_setup = RegistryPoint() docker_sysconfig = RegistryPoint() + dracut_kdump_capture_service = RegistryPoint() dumpe2fs_h = RegistryPoint(multi_output=True) engine_config_all = RegistryPoint() engine_log = RegistryPoint(filterable=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index db0695b25..b3f7f5337 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -294,6 +294,7 @@ def dumpdev(broker): return mounted_dev raise SkipComponent() + dracut_kdump_capture_service = simple_file("/usr/lib/dracut/modules.d/99kdumpbase/kdump-capture.service") dumpe2fs_h = foreach_execute(dumpdev, "/sbin/dumpe2fs -h %s") engine_config_all = simple_command("/usr/bin/engine-config --all") engine_log = simple_file("/var/log/ovirt-engine/engine.log") From 841b759b4e910cccfe01f0897cecca316572e045 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Wed, 22 Apr 2020 15:17:30 +0800 Subject: [PATCH 032/892] Add parser for "/etc/cron.daily/rhsmd" (#2536) * Add parser for "/etc/cron.daily/rhsmd" Signed-off-by: Huanhuan Li * Change parent class to Scannable due to datetime function doesn't fix this parser Signed-off-by: Huanhuan Li * Add test in test file too Signed-off-by: Huanhuan Li * Update examples Signed-off-by: Huanhuan Li * Update notes Signed-off-by: Huanhuan Li Co-authored-by: Xiangce Liu --- .../cron_daily_rhsmd.rst | 3 ++ insights/parsers/cron_daily_rhsmd.py | 35 +++++++++++++++++++ .../parsers/tests/test_cron_daily_rhsmd.py | 29 +++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 69 insertions(+) create mode 100644 docs/shared_parsers_catalog/cron_daily_rhsmd.rst create mode 100644 insights/parsers/cron_daily_rhsmd.py create mode 100644 insights/parsers/tests/test_cron_daily_rhsmd.py diff --git a/docs/shared_parsers_catalog/cron_daily_rhsmd.rst b/docs/shared_parsers_catalog/cron_daily_rhsmd.rst new file mode 100644 index 000000000..b202f9a0b --- /dev/null +++ b/docs/shared_parsers_catalog/cron_daily_rhsmd.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.cron_daily_rhsmd + :members: + :show-inheritance: diff --git a/insights/parsers/cron_daily_rhsmd.py b/insights/parsers/cron_daily_rhsmd.py new file mode 100644 index 000000000..159e68643 --- /dev/null +++ b/insights/parsers/cron_daily_rhsmd.py @@ -0,0 +1,35 @@ +""" +CronDailyRhsmd - file ``/etc/cron.daily/rhsmd`` +=============================================== +""" + +from insights.core import Scannable +from insights.core.plugins import parser +from insights.specs import Specs + + +@parser(Specs.cron_daily_rhsmd) +class CronDailyRhsmd(Scannable): + """ + Parse the ``/etc/cron.daily/rhsmd`` file. + + Sample input:: + + config=$(grep -E "^processTimeout" /etc/rhsm/rhsm.conf | grep -Po "[0-9]+") + rhsmd_timeout=$config + abc=$config + + .. note:: + Please refer to its super-class :py:class:`insights.core.Scannable` + for full usage. + + Examples: + + >>> # CronDailyRhsmd.collect('config_lines', lambda n: n if "$config" in n else "") + >>> # CronDailyRhsmd.any('one_config_line', lambda n: n if "$config" in n else "") + >>> rhsmd.config_lines + ['rhsmd_timeout=$config', 'abc=$config'] + >>> rhsmd.one_config_line + 'rhsmd_timeout=$config' + """ + pass diff --git a/insights/parsers/tests/test_cron_daily_rhsmd.py b/insights/parsers/tests/test_cron_daily_rhsmd.py new file mode 100644 index 000000000..ecb1a6c66 --- /dev/null +++ b/insights/parsers/tests/test_cron_daily_rhsmd.py @@ -0,0 +1,29 @@ +import doctest + +from insights.parsers import cron_daily_rhsmd +from insights.parsers.cron_daily_rhsmd import CronDailyRhsmd +from insights.tests import context_wrap + +RHSMD_1 = """ +config=$(grep -E "^processTimeout" /etc/rhsm/rhsm.conf | grep -Po "[0-9]+") +rhsmd_timeout=$config +abc=$config +""".strip() + + +def test_docs(): + CronDailyRhsmd.collect('config_lines', lambda n: n if "$config" in n else "") + CronDailyRhsmd.any('one_config_line', lambda n: n if "$config" in n else "") + env = { + 'rhsmd': CronDailyRhsmd(context_wrap(RHSMD_1)) + } + failed, total = doctest.testmod(cron_daily_rhsmd, globs=env) + assert failed == 0 + + +def test_parser(): + cron_daily_rhsmd.CronDailyRhsmd.collect('config_varialbe_lines', lambda n: n if "$config" in n else "") + cron_daily_rhsmd.CronDailyRhsmd.any('rhsmd_timeout', lambda n: n if "rhsmd_timeout" in n else "") + rhs = cron_daily_rhsmd.CronDailyRhsmd(context_wrap(RHSMD_1)) + assert rhs.rhsmd_timeout == 'rhsmd_timeout=$config' + assert rhs.config_varialbe_lines == ['rhsmd_timeout=$config', 'abc=$config'] diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 30475c4fc..80b6238da 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -93,6 +93,7 @@ class Specs(SpecSet): cpuinfo = RegistryPoint() cpupower_frequency_info = RegistryPoint() cpuset_cpus = RegistryPoint() + cron_daily_rhsmd = RegistryPoint(filterable=True) crypto_policies_config = RegistryPoint() crypto_policies_state_current = RegistryPoint() crypto_policies_opensshserver = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index b3f7f5337..15755dd2b 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -210,6 +210,7 @@ def is_ceph_monitor(broker): cpuinfo_max_freq = simple_file("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq") cpupower_frequency_info = simple_command("/usr/bin/cpupower -c all frequency-info") cpuset_cpus = simple_file("/sys/fs/cgroup/cpuset/cpuset.cpus") + cron_daily_rhsmd = simple_file("/etc/cron.daily/rhsmd") crypto_policies_config = simple_file("/etc/crypto-policies/config") crypto_policies_state_current = simple_file("/etc/crypto-policies/state/current") crypto_policies_opensshserver = simple_file("/etc/crypto-policies/back-ends/opensshserver.config") From 04cd30028424f36537a3914fa6c90aebc01d35f5 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 23 Apr 2020 16:13:33 +0800 Subject: [PATCH 033/892] Update uname per RHEL 7.8 (#2559) Signed-off-by: Xiangce Liu --- insights/parsers/uname.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/parsers/uname.py b/insights/parsers/uname.py index 8749e9000..4b4744d08 100644 --- a/insights/parsers/uname.py +++ b/insights/parsers/uname.py @@ -99,6 +99,7 @@ "3.10.0-862": "7.5", "3.10.0-957": "7.6", "3.10.0-1062": "7.7", + "3.10.0-1127": "7.8", "4.18.0-80": "8.0", "4.18.0-147": "8.1", } From 909633c5237509b8f4d5628e6e2b716a476fd751 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 23 Apr 2020 22:43:01 +0800 Subject: [PATCH 034/892] print 'result' when assert failed in test (#2552) Signed-off-by: Xiangce Liu --- insights/tests/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/tests/__init__.py b/insights/tests/__init__.py index fd6ff14a6..3ec27fa3c 100644 --- a/insights/tests/__init__.py +++ b/insights/tests/__init__.py @@ -93,7 +93,7 @@ def deep_compare(result, expected): assert result["type"] == "skip", result return - assert eq(result, expected) + assert eq(result, expected), result def run_input_data(component, input_data): From 698924f3836ec251f3ab6a269746bb766614ed96 Mon Sep 17 00:00:00 2001 From: Stephen Date: Thu, 23 Apr 2020 11:13:06 -0400 Subject: [PATCH 035/892] Validate tags (#2543) * Validate tags and handle errors properly Signed-off-by: Stephen Adams * Handle load_yaml failure in get_tags Signed-off-by: Stephen Adams * Fix config Signed-off-by: Stephen Adams * Remove string formatting Signed-off-by: Stephen Adams * Use simple error log for bad tags yaml Signed-off-by: Stephen Adams * remove unused yaml Loader since we're leveraging the load_yaml function we can remove these imports Signed-off-by: Stephen Adams * Update to tags.yaml Forgot we had switched from tags.conf to tags.yaml Signed-off-by: Stephen Adams * fix tags filename and add clear error message Signed-off-by: Stephen Adams * Modify log messages and update help for tags.yaml validation Signed-off-by: Stephen Adams Co-authored-by: Jeremy Crafts --- insights/client/collection_rules.py | 19 +++++++++++++++++- insights/client/config.py | 6 +++++- insights/client/constants.py | 1 + insights/client/utilities.py | 26 +++++++++++++------------ insights/tests/client/test_utilities.py | 2 +- 5 files changed, 39 insertions(+), 15 deletions(-) diff --git a/insights/client/collection_rules.py b/insights/client/collection_rules.py index 7ab70a412..5b80024ec 100644 --- a/insights/client/collection_rules.py +++ b/insights/client/collection_rules.py @@ -108,6 +108,7 @@ def __init__(self, config, conn=None): self.remove_file = config.remove_file self.redaction_file = config.redaction_file self.content_redaction_file = config.content_redaction_file + self.tags_file = config.tags_file self.collection_rules_file = constants.collection_rules_file self.collection_rules_url = self.config.collection_rules_url self.gpg = self.config.gpg @@ -408,10 +409,26 @@ def get_rm_conf(self): self.rm_conf = filtered_rm_conf return filtered_rm_conf + def get_tags_conf(self): + ''' + Try to load the tags.conf file + ''' + if not os.path.isfile(self.tags_file): + logger.info("%s does not exist", self.tags_file) + return None + else: + try: + load_yaml(self.tags_file) + logger.info("%s loaded successfully", self.tags_file) + except RuntimeError: + logger.warning("Invalid YAML. Unable to load %s", self.tags_file) + return None + def validate(self): ''' - Validate remove.conf + Validate remove.conf and tags.conf ''' + self.get_tags_conf() success = self.get_rm_conf() if not success: logger.info('No contents in the blacklist configuration to validate.') diff --git a/insights/client/config.py b/insights/client/config.py index a13f5f45c..366b1faf2 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -242,6 +242,10 @@ # non-CLI 'default': os.path.join(constants.default_conf_dir, 'remove.conf') }, + 'tags_file': { + # non-CLI + 'default': os.path.join(constants.default_conf_dir, 'tags.yaml') + }, 'redaction_file': { # non-CLI 'default': os.path.join(constants.default_conf_dir, 'file-redaction.yaml') @@ -339,7 +343,7 @@ 'validate': { 'default': False, 'opt': ['--validate'], - 'help': 'Validate remove.conf', + 'help': 'Validate remove.conf and tags.yaml', 'action': 'store_true' }, 'verbose': { diff --git a/insights/client/constants.py b/insights/client/constants.py index ae7e0954c..412c37df4 100644 --- a/insights/client/constants.py +++ b/insights/client/constants.py @@ -10,6 +10,7 @@ class InsightsConstants(object): command_blacklist = ('rm', 'kill', 'reboot', 'shutdown') default_conf_dir = os.getenv('INSIGHTS_CONF_DIR', default='/etc/insights-client') default_conf_file = os.path.join(default_conf_dir, 'insights-client.conf') + default_tags_file = os.path.join(default_conf_dir, 'tags.yaml') log_dir = os.path.join(os.sep, 'var', 'log', app_name) simple_find_replace_dir = '/etc/redhat-access-insights' default_log_file = os.path.join(log_dir, app_name + '.log') diff --git a/insights/client/utilities.py b/insights/client/utilities.py index 9750eb692..619c1aa6a 100644 --- a/insights/client/utilities.py +++ b/insights/client/utilities.py @@ -15,13 +15,13 @@ import yaml try: - from yaml import CLoader as Loader, CDumper as Dumper + from yaml import CDumper as Dumper except ImportError: - from yaml import Loader, Dumper + from yaml import Dumper from .. import package_info from .constants import InsightsConstants as constants -from .collection_rules import InsightsUploadConf +from .collection_rules import InsightsUploadConf, load_yaml try: from insights_client.constants import InsightsConstants as wrapper_constants @@ -176,7 +176,7 @@ def _expand_paths(path): def validate_remove_file(config): """ - Validate the remove file + Validate the remove file and tags file """ return InsightsUploadConf(config).validate() @@ -318,7 +318,7 @@ def systemd_notify(pid): logger.debug('systemd-notify returned %s', proc.returncode) -def get_tags(tags_file_path=os.path.join(constants.default_conf_dir, "tags.yaml")): +def get_tags(tags_file_path=constants.default_tags_file): ''' Load tag data from the tags file. @@ -326,17 +326,19 @@ def get_tags(tags_file_path=os.path.join(constants.default_conf_dir, "tags.yaml" ''' tags = None - try: - with open(tags_file_path) as f: - data = f.read() - tags = yaml.load(data, Loader=Loader) - except EnvironmentError as e: - logger.debug("tags file does not exist: %s", os.strerror(e.errno)) + if os.path.isfile(tags_file_path): + try: + tags = load_yaml(tags_file_path) + except RuntimeError: + logger.error("Invalid YAML. Unable to load %s", tags_file_path) + return None + else: + logger.debug("%s does not exist", tags_file_path) return tags -def write_tags(tags, tags_file_path=os.path.join(constants.default_conf_dir, "tags.yaml")): +def write_tags(tags, tags_file_path=constants.default_tags_file): """ Writes tags to tags_file_path diff --git a/insights/tests/client/test_utilities.py b/insights/tests/client/test_utilities.py index 14899158a..76326cc55 100644 --- a/insights/tests/client/test_utilities.py +++ b/insights/tests/client/test_utilities.py @@ -294,7 +294,7 @@ def test_get_tags_empty(): fp.write(content) fp.close() got = util.get_tags(fp.name) - assert got is None + assert got == {} def test_get_tags_nonexist(): From 9ced45e7edf2a9f50b6e66d1bd3aca5152dc2870 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 23 Apr 2020 11:50:05 -0400 Subject: [PATCH 036/892] Migrate tags.conf files to tags.yaml (#2556) * add migrate_tags function to rename default tags file Signed-off-by: Jeremy Crafts * only init tags after logging init, tweak migrate func Signed-off-by: Jeremy Crafts * add some exception handling just in case Signed-off-by: Jeremy Crafts * add unit test Signed-off-by: Jeremy Crafts --- insights/client/__init__.py | 26 ++++++++++++----- insights/client/utilities.py | 22 ++++++++++++++ insights/tests/client/test_utilities.py | 39 +++++++++++++++++++++++++ 3 files changed, 79 insertions(+), 8 deletions(-) diff --git a/insights/client/__init__.py b/insights/client/__init__.py index fcebe6a19..2c47abb57 100644 --- a/insights/client/__init__.py +++ b/insights/client/__init__.py @@ -18,7 +18,8 @@ write_to_disk, generate_machine_id, get_tags, - write_tags) + write_tags, + migrate_tags) NETWORK = constants.custom_network_log_level logger = logging.getLogger(__name__) @@ -51,6 +52,7 @@ def __init__(self, config=None, setup_logging=True, **kwargs): if setup_logging: self.set_up_logging() try_auto_configuration(self.config) + self.initialize_tags() else: # write PID to file in case we need to ping systemd write_to_disk(constants.pidfile, content=str(os.getpid())) @@ -59,13 +61,6 @@ def __init__(self, config=None, setup_logging=True, **kwargs): self.session = None self.connection = None - if self.config.group: - tags = get_tags() - if tags is None: - tags = {} - tags["group"] = self.config.group - write_tags(tags) - def _net(func): def _init_connection(self, *args, **kwargs): # setup a request session @@ -614,6 +609,21 @@ def copy_to_output_file(self, insights_archive): if self.config.obfuscate: self._copy_soscleaner_files(insights_archive) + def initialize_tags(self): + ''' + Initialize the tags file if needed + ''' + # migrate the old file if necessary + migrate_tags() + + # initialize with group if group was specified + if self.config.group: + tags = get_tags() + if tags is None: + tags = {} + tags["group"] = self.config.group + write_tags(tags) + def format_config(config): # Log config except the password diff --git a/insights/client/utilities.py b/insights/client/utilities.py index 619c1aa6a..32f2bb708 100644 --- a/insights/client/utilities.py +++ b/insights/client/utilities.py @@ -351,3 +351,25 @@ def write_tags(tags, tags_file_path=constants.default_tags_file): with open(tags_file_path, mode="w+") as f: data = yaml.dump(tags, Dumper=Dumper, default_flow_style=False) f.write(data) + + +def migrate_tags(): + ''' + We initially released the tags feature with the tags file set as + tags.conf, but soon after switched it over to tags.yaml. There may be + installations out there with tags.conf files, so rename the files. + ''' + tags_conf = os.path.join(constants.default_conf_dir, 'tags.conf') + tags_yaml = os.path.join(constants.default_conf_dir, 'tags.yaml') + + if os.path.exists(tags_yaml): + # current default file exists, do nothing + return + if os.path.exists(tags_conf): + # old file exists and current does not + logger.info('Tags file %s detected. This filename is deprecated; please use %s. The file will be renamed automatically.', + tags_conf, tags_yaml) + try: + os.rename(tags_conf, tags_yaml) + except OSError as e: + logger.error(e) diff --git a/insights/tests/client/test_utilities.py b/insights/tests/client/test_utilities.py index 76326cc55..699308d49 100644 --- a/insights/tests/client/test_utilities.py +++ b/insights/tests/client/test_utilities.py @@ -308,3 +308,42 @@ def test_write_tags(): util.write_tags(tags, tags_file_path=fp.name) got = util.get_tags(fp.name) assert got == tags + + +@patch('insights.client.utilities.os.rename') +@patch('insights.client.utilities.os.path.exists') +def test_migrate_tags(path_exists, os_rename): + ''' + Test the migrate_tags function for the following cases: + 1) tags.yaml does not exist, tags.conf does not exist + - do nothing + 2) tags.yaml exists, tags.conf does not exist + - do nothing + 3) tags.yaml does not exist, tags.conf exists + - rename tags.conf to tags.yaml + 4) tags.yaml exists, tags.conf exists + - do nothing + ''' + # existence of tags.yaml is checked FIRST, tags.conf is checked SECOND + # mock side effects are according to this order + + # case 1 + path_exists.side_effect = [False, False] + util.migrate_tags() + os_rename.assert_not_called() + os_rename.reset_mock() + # case 2 + path_exists.side_effect = [True, False] + util.migrate_tags() + os_rename.assert_not_called() + os_rename.reset_mock() + # case 3 + path_exists.side_effect = [False, True] + util.migrate_tags() + os_rename.assert_called_once() + os_rename.reset_mock() + # case 4 + path_exists.side_effect = [True, True] + util.migrate_tags() + os_rename.assert_not_called() + os_rename.reset_mock() From 9955b792902d8dbbdf4426154b05d5d8952b6331 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Sun, 26 Apr 2020 09:58:46 +0800 Subject: [PATCH 037/892] Remove -R from ls_etc and only list specified sub-dirs (#2544) * Remove -R from ls_etc and only list specified sub-dirs Signed-off-by: Xiangce Liu * Change glob_file to simple_file Signed-off-by: Xiangce Liu --- insights/parsers/ls_etc.py | 16 +++++++++------- insights/specs/default.py | 5 ++++- insights/specs/insights_archive.py | 2 +- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/insights/parsers/ls_etc.py b/insights/parsers/ls_etc.py index dc2656898..cf987b9ef 100644 --- a/insights/parsers/ls_etc.py +++ b/insights/parsers/ls_etc.py @@ -1,12 +1,12 @@ """ -LsEtc - command ``ls -lanR /etc`` -================================= +LsEtc - command ``ls -lan /etc `` +================================================ -The ``ls -lanR /etc`` command provides information for the listing of -the ``/etc`` directory. See ``FileListing`` class for additional -information. +The ``ls -lan /etc `` command provides information for +the listing of the ``/etc`` directory and specified sub-directories. +See ``FileListing`` class for additional information. -Sample ``ls -lanR /etc`` output:: +Sample ``ls -lan /etc/sysconfig /etc/rc.d/rc3.d`` output:: /etc/sysconfig: total 96 @@ -70,5 +70,7 @@ @parser(Specs.ls_etc) class LsEtc(CommandParser, FileListing): - """Parses output of ``ls -lanR /etc`` command.""" + """ + Parses output of ``ls -lan /etc `` command. + """ pass diff --git a/insights/specs/default.py b/insights/specs/default.py index 15755dd2b..5b620adf2 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -520,7 +520,10 @@ def semid(broker): ls_disk = simple_command("/bin/ls -lanR /dev/disk") ls_docker_volumes = simple_command("/bin/ls -lanR /var/lib/docker/volumes") ls_edac_mc = simple_command("/bin/ls -lan /sys/devices/system/edac/mc") - ls_etc = simple_command("/bin/ls -lanR /etc") + etc_and_sub_dirs = sorted(["/etc", "/etc/pki/tls/private", "/etc/pki/tls/certs", + "/etc/pki/ovirt-vmconsole", "/etc/nova/migration", "/etc/sysconfig", + "/etc/cloud/cloud.cfg.d"]) + ls_etc = simple_command("ls -lan {0}".format(' '.join(etc_and_sub_dirs))) ls_lib_firmware = simple_command("/bin/ls -lanR /lib/firmware") ls_ocp_cni_openshift_sdn = simple_command("/bin/ls -l /var/lib/cni/networks/openshift-sdn") ls_origin_local_volumes_pods = simple_command("/bin/ls -l /var/lib/origin/openshift.local.volumes/pods") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 7efd050dc..6000ad2ba 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -122,7 +122,7 @@ class InsightsArchiveSpecs(Specs): ls_disk = simple_file("insights_commands/ls_-lanR_.dev.disk") ls_docker_volumes = simple_file("insights_commands/ls_-lanR_.var.lib.docker.volumes") ls_edac_mc = simple_file("insights_commands/ls_-lan_.sys.devices.system.edac.mc") - ls_etc = simple_file("insights_commands/ls_-lanR_.etc") + ls_etc = simple_file("insights_commands/ls_-lan_.etc_.etc.cloud.cloud.cfg.d_.etc.nova.migration_.etc.pki.ovirt-vmconsole_.etc.pki.tls.certs_.etc.pki.tls.private_.etc.sysconfig") ls_lib_firmware = simple_file("insights_commands/ls_-lanR_.lib.firmware") ls_ocp_cni_openshift_sdn = simple_file("insights_commands/ls_-l_.var.lib.cni.networks.openshift-sdn") ls_origin_local_volumes_pods = simple_file("insights_commands/ls_-l_.var.lib.origin.openshift.local.volumes.pods") From 22723270a3e6c5d3604be8c9b714bcaa0625d82c Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Sun, 26 Apr 2020 15:28:25 +0800 Subject: [PATCH 038/892] Remove semid from RegistryPoint (#2564) Signed-off-by: Xiangce Liu --- insights/specs/__init__.py | 1 - 1 file changed, 1 deletion(-) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 80b6238da..39d8aee95 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -541,7 +541,6 @@ class Specs(SpecSet): sealert = RegistryPoint() secure = RegistryPoint(filterable=True) selinux_config = RegistryPoint() - semid = RegistryPoint() sestatus = RegistryPoint() setup_named_chroot = RegistryPoint(filterable=True) smartctl = RegistryPoint(multi_output=True) From 915bc65dc899f067d74c2563e889b67261ca7990 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Mon, 27 Apr 2020 15:26:19 -0500 Subject: [PATCH 039/892] Include analysis metadata in json and yaml responses. (#2562) The new analysis_metadata section contains: - analysis start time in UTC - analysis end time in UTC - execution context (host, sos report, insights archive, etc.) - list of loaded plugin sets that registered themselves with `insights.add_status` Closes #2560 Signed-off-by: Christopher Sams --- insights/__init__.py | 5 ++++- insights/core/evaluators.py | 26 +++++++++++++++++++++++++- insights/formats/__init__.py | 3 +++ insights/formats/html.py | 2 +- insights/formats/template.py | 2 -- insights/tests/test_evaluators.py | 10 ++++++++-- insights/util/__init__.py | 25 +++++++++++++++++++++++++ 7 files changed, 66 insertions(+), 7 deletions(-) diff --git a/insights/__init__.py b/insights/__init__.py index 0d2500ba8..9e4fbb352 100644 --- a/insights/__init__.py +++ b/insights/__init__.py @@ -73,7 +73,7 @@ def get_nvr(): """ -def add_status(name, nvr, commit): +def add_status(name, nvr, commit=None): """ Rule repositories should call this method in their package __init__ to register their version information. @@ -81,6 +81,9 @@ def add_status(name, nvr, commit): RULES_STATUS[name] = {"version": nvr, "commit": commit} +add_status(package_info["NAME"], get_nvr(), package_info["COMMIT"]) + + def process_dir(broker, root, graph, context, inventory=None): ctx = create_context(root, context) log.debug("Processing %s with %s" % (root, ctx)) diff --git a/insights/core/evaluators.py b/insights/core/evaluators.py index 13791a147..358b47be4 100644 --- a/insights/core/evaluators.py +++ b/insights/core/evaluators.py @@ -3,12 +3,16 @@ import sys from collections import defaultdict +from datetime import datetime from ..formats import Formatter from ..specs import Specs from ..combiners.hostname import hostname as combiner_hostname from ..parsers.branch_info import BranchInfo +from ..util import utc from . import dr, plugins +from .context import ExecutionContext +import insights log = logging.getLogger(__name__) @@ -26,8 +30,17 @@ def __init__(self, broker=None, stream=sys.stdout, incremental=False): self.metadata = {} self.metadata_keys = {} self.incremental = incremental + self.context_cls = None def observer(self, comp, broker): + if self.context_cls is None: + for c in self.broker.instances: + try: + if issubclass(c, ExecutionContext): + self.context_cls = c + except: + pass + if comp is combiner_hostname and comp in broker: self.hostname = broker[comp].fqdn @@ -91,7 +104,18 @@ def get_response(self): if k not in ("rule", "fingerprint"): r[k] = v - return self.format_response(r) + r = self.format_response(r) + + ctx = dr.get_name(self.context_cls) if self.context_cls is not None else None + + r["analysis_metadata"] = { + "start": self.start_time.isoformat(), + "finish": datetime.now(utc).isoformat(), + "execution_context": ctx, + "plugin_sets": insights.RULES_STATUS + } + + return r def handle_result(self, plugin, r): type_ = r["type"] diff --git a/insights/formats/__init__.py b/insights/formats/__init__.py index 68090eac2..825d91eca 100644 --- a/insights/formats/__init__.py +++ b/insights/formats/__init__.py @@ -2,7 +2,9 @@ import six import sys +from datetime import datetime from insights import dr, rule +from insights.util import utc RENDERERS = {} @@ -57,6 +59,7 @@ class Formatter(object): def __init__(self, broker, stream=sys.stdout): self.broker = broker self.stream = stream + self.start_time = datetime.now(utc) def __enter__(self): self.preprocess() diff --git a/insights/formats/html.py b/insights/formats/html.py index 417263ab2..5031e4442 100644 --- a/insights/formats/html.py +++ b/insights/formats/html.py @@ -124,7 +124,7 @@ class HtmlFormat(TemplateFormat): def create_template_context(self): ctx = { "root": self.find_root() or "Unknown", - "start_time": self.start_time, + "start_time": self.start_time.strftime("%Y-%m-%d %H:%M:%S") } sorted_rules = {} response_type_getter = itemgetter("response_type") diff --git a/insights/formats/template.py b/insights/formats/template.py index 8f9202172..7e3c6aadb 100644 --- a/insights/formats/template.py +++ b/insights/formats/template.py @@ -1,7 +1,6 @@ from __future__ import print_function import inspect import sys -from datetime import datetime from jinja2 import Template @@ -91,7 +90,6 @@ def preprocess(self): Watches rules go by as they evaluate and collects information about them for later display in postprocess. """ - self.start_time = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") self.rules = [] self.broker.add_observer(self.collect_rules, rule) diff --git a/insights/tests/test_evaluators.py b/insights/tests/test_evaluators.py index f7cd8aa45..dd145690a 100644 --- a/insights/tests/test_evaluators.py +++ b/insights/tests/test_evaluators.py @@ -91,7 +91,10 @@ def test_single_evaluator(): with SingleEvaluator(broker) as e: dr.run(report, broker=broker) result2 = e.get_response() - assert result1 == result2 + assert result1["reports"] == result2["reports"] + for k in ["start", "finish", "execution_context", "plugin_sets"]: + assert k in result1["analysis_metadata"] + assert k in result2["analysis_metadata"] def test_insights_evaluator(): @@ -104,7 +107,10 @@ def test_insights_evaluator(): with InsightsEvaluator(broker) as e: dr.run(report, broker=broker) result2 = e.get_response() - assert result1 == result2 + assert result1["reports"] == result2["reports"] + for k in ["start", "finish", "execution_context", "plugin_sets"]: + assert k in result1["analysis_metadata"] + assert k in result2["analysis_metadata"] def test_insights_evaluator_attrs_serial(): diff --git a/insights/util/__init__.py b/insights/util/__init__.py index a9bccb9c5..2eacd982a 100644 --- a/insights/util/__init__.py +++ b/insights/util/__init__.py @@ -5,6 +5,8 @@ import platform import os import warnings +import datetime + TMP_DIR = os.path.join("/tmp", "insights-web") logger = logging.getLogger(__name__) @@ -29,6 +31,29 @@ def parse_bool(s, default=False): return TRUTH.get(s.lower(), default) +# python2 doesn't have a utc tzinfo by default +# see https://docs.python.org/2/library/datetime.html#tzinfo-objects +try: + utc = datetime.timezone.utc +except: + class UTC(datetime.tzinfo): + """ + A tzinfo class for UTC. + """ + ZERO = datetime.timedelta(0) + + def utcoffset(self, dt): + return self.ZERO + + def tzname(self, dt): + return "UTC" + + def dst(self, dt): + return self.ZERO + + utc = UTC() + + def which(cmd, env=None): env = env or os.environ if cmd.startswith("/"): From 6dbeee3a84c8d2d8d67452c8100d19541f469964 Mon Sep 17 00:00:00 2001 From: Jesse Jaggars Date: Mon, 27 Apr 2020 16:53:42 -0400 Subject: [PATCH 040/892] adding a scandir() based option for get_all_files (#2538) * adding a scandir() based option for get_all_files Signed-off-by: Jesse Jaggars * removing dead function Signed-off-by: Jesse Jaggars * fix the test Signed-off-by: Jesse Jaggars * get_all_files w/o scandir too Signed-off-by: Jesse Jaggars * full paths for get_all_files Signed-off-by: Jesse Jaggars --- insights/core/archives.py | 10 ---------- insights/core/hydration.py | 26 ++++++++++++++++++-------- insights/tests/test_extractors.py | 4 ++-- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/insights/core/archives.py b/insights/core/archives.py index 2e53d612f..4ec97f863 100644 --- a/insights/core/archives.py +++ b/insights/core/archives.py @@ -76,16 +76,6 @@ def from_path(self, path, extract_dir=None, content_type=None): return self -def get_all_files(path): - names = [] - for root, dirs, files in os.walk(path): - for dirname in dirs: - names.append(os.path.join(root, dirname) + "/") - for filename in files: - names.append(os.path.join(root, filename)) - return names - - class Extraction(object): def __init__(self, tmp_dir, content_type): self.tmp_dir = tmp_dir diff --git a/insights/core/hydration.py b/insights/core/hydration.py index 41ab91607..61ff4e568 100644 --- a/insights/core/hydration.py +++ b/insights/core/hydration.py @@ -8,13 +8,23 @@ log = logging.getLogger(__name__) - -def get_all_files(path): - all_files = [] - for f in archives.get_all_files(path): - if os.path.isfile(f) and not os.path.islink(f): - all_files.append(f) - return all_files +if hasattr(os, "scandir"): + def get_all_files(path): + with os.scandir(path) as it: + for ent in it: + if ent.is_dir(follow_symlinks=False): + for pth in get_all_files(ent.path): + yield pth + elif ent.is_file(follow_symlinks=False): + yield ent.path + +else: + def get_all_files(path): + for root, _, files in os.walk(path): + for f in files: + full_path = os.path.join(root, f) + if os.path.isfile(full_path) and not os.path.islink(full_path): + yield full_path def identify(files): @@ -35,7 +45,7 @@ def create_context(path, context=None): if arc: return ClusterArchiveContext(path, all_files=arc) - all_files = get_all_files(path) + all_files = list(get_all_files(path)) if not all_files: raise archives.InvalidArchive("No files in archive") diff --git a/insights/tests/test_extractors.py b/insights/tests/test_extractors.py index a01512cfa..313236611 100644 --- a/insights/tests/test_extractors.py +++ b/insights/tests/test_extractors.py @@ -5,7 +5,7 @@ import zipfile from contextlib import closing -from insights.core import archives +from insights.core.hydration import get_all_files from insights.core.archives import extract @@ -38,7 +38,7 @@ def _add_to_zip(zf, path, zippath): try: with extract("/tmp/test.zip") as ex: - assert any(f.endswith("/sys/kernel/kexec_crash_size") for f in archives.get_all_files(ex.tmp_dir)) + assert any(f.endswith("/sys/kernel/kexec_crash_size") for f in get_all_files(ex.tmp_dir)) finally: os.unlink("/tmp/test.zip") From 38e8f32a0c1759b0c08ecd10fdd7e6ecb640d1fd Mon Sep 17 00:00:00 2001 From: Sachin Date: Tue, 28 Apr 2020 20:35:59 +0530 Subject: [PATCH 041/892] Add RHEL-8.2 kernel entry (#2568) Signed-off-by: Sachin Patil --- insights/parsers/uname.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/parsers/uname.py b/insights/parsers/uname.py index 4b4744d08..b776f9911 100644 --- a/insights/parsers/uname.py +++ b/insights/parsers/uname.py @@ -102,6 +102,7 @@ "3.10.0-1127": "7.8", "4.18.0-80": "8.0", "4.18.0-147": "8.1", + "4.18.0-193": "8.2" } release_to_kernel_map = dict((v, k) for k, v in rhel_release_map.items()) From 527aaba3017cf05debf3988c2069817968aab68a Mon Sep 17 00:00:00 2001 From: Stephen Date: Tue, 28 Apr 2020 16:33:46 -0400 Subject: [PATCH 042/892] Set net_debug true when test_connection is true (#2569) We need to see the network log messages when testing the connection. This will imply net-debug if we use test-connection. Signed-off-by: Stephen Adams --- insights/client/config.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/insights/client/config.py b/insights/client/config.py index 366b1faf2..afa2bbfc8 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -700,6 +700,8 @@ def _imply_options(self): self.keep_archive = self.keep_archive or self.no_upload if self.to_json and self.quiet: self.diagnosis = True + if self.test_connection: + self.net_debug = True if self.payload or self.diagnosis or self.compliance or self.show_results or self.check_results: self.legacy_upload = False if self.payload and (self.logging_file == constants.default_log_file): From 4d1c8a3f94d7aa8405f03eb64d44482e3f6787f0 Mon Sep 17 00:00:00 2001 From: Jitka Obselkova <41325380+jobselko@users.noreply.github.com> Date: Thu, 30 Apr 2020 01:49:16 +0200 Subject: [PATCH 043/892] Add parsers for config_file_perms (#2566) Signed-off-by: Jitka Obselkova --- .../config_file_perms.rst | 3 + insights/parsers/config_file_perms.py | 96 +++++++++++++++++++ .../parsers/tests/test_config_file_perms.py | 47 +++++++++ 3 files changed, 146 insertions(+) create mode 100644 docs/shared_parsers_catalog/config_file_perms.rst create mode 100644 insights/parsers/config_file_perms.py create mode 100644 insights/parsers/tests/test_config_file_perms.py diff --git a/docs/shared_parsers_catalog/config_file_perms.rst b/docs/shared_parsers_catalog/config_file_perms.rst new file mode 100644 index 000000000..72683b832 --- /dev/null +++ b/docs/shared_parsers_catalog/config_file_perms.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.config_file_perms + :members: + :show-inheritance: diff --git a/insights/parsers/config_file_perms.py b/insights/parsers/config_file_perms.py new file mode 100644 index 000000000..79bf2146e --- /dev/null +++ b/insights/parsers/config_file_perms.py @@ -0,0 +1,96 @@ +""" +Configuration File Permissions parsers +====================================== + +Parsers included in this module are: + +SshdConfigPerms - command ``/bin/ls -l /etc/ssh/sshd_config`` +------------------------------------------------------------- +Grub1ConfigPerms - command ``/bin/ls -l /boot/grub/grub.conf`` +-------------------------------------------------------------- +Grub2ConfigPerms - command ``/bin/ls -l /boot/grub2/grub.cfg`` +-------------------------------------------------------------- +""" + +from insights.core import CommandParser +from insights.core.plugins import parser +from insights.specs import Specs +from insights.util.file_permissions import FilePermissions + + +class FilePermissionsParser(CommandParser, FilePermissions): + """ + Base class for ``SshdConfigPerms``, ``Grub1ConfigPerms`` and ``Grub2ConfigPerms`` classes. + + Attributes: + line (string): the line from the command output + """ + + def __init__(self, context): + self.line = "" + CommandParser.__init__(self, context) + FilePermissions.__init__(self, self.line) + + def parse_content(self, content): + non_empty_lines = [line for line in content if line] # get rid of blank lines + self.line = non_empty_lines[0] + + +@parser(Specs.sshd_config_perms) +class SshdConfigPerms(FilePermissionsParser): + """ + Class for parsing ``/bin/ls -l /etc/ssh/sshd_config`` command. + + Sample output of this command is:: + + -rw-------. 1 root root 4179 Dec 1 2014 /etc/ssh/sshd_config + + Examples: + >>> type(sshd_perms) + + >>> sshd_perms.line + '-rw-------. 1 root root 4179 Dec 1 2014 /etc/ssh/sshd_config' + """ + + def __init__(self, context): + super(SshdConfigPerms, self).__init__(context) + + +@parser(Specs.grub1_config_perms) +class Grub1ConfigPerms(FilePermissionsParser): + """ + Class for parsing ``/bin/ls -l /boot/grub/grub.conf`` command. + + Sample output of this command is:: + + -rw-r--r--. 1 root root 4179 Dec 1 2014 /boot/grub/grub.conf + + Examples: + >>> type(grub1_perms) + + >>> grub1_perms.line + '-rw-r--r--. 1 root root 4179 Dec 1 2014 /boot/grub/grub.conf' + """ + + def __init__(self, context): + super(Grub1ConfigPerms, self).__init__(context) + + +@parser(Specs.grub_config_perms) +class Grub2ConfigPerms(FilePermissionsParser): + """ + Class for parsing ``/bin/ls -l /boot/grub2/grub.cfg`` command. + + Sample output of this command is:: + + -rw-r--r--. 1 root root 4179 Dec 1 2014 /boot/grub2/grub.cfg + + Examples: + >>> type(grub2_perms) + + >>> grub2_perms.line + '-rw-r--r--. 1 root root 4179 Dec 1 2014 /boot/grub2/grub.cfg' + """ + + def __init__(self, context): + super(Grub2ConfigPerms, self).__init__(context) diff --git a/insights/parsers/tests/test_config_file_perms.py b/insights/parsers/tests/test_config_file_perms.py new file mode 100644 index 000000000..78fe797ca --- /dev/null +++ b/insights/parsers/tests/test_config_file_perms.py @@ -0,0 +1,47 @@ +import doctest +import pytest + +from insights.parsers import config_file_perms +from insights.parsers.config_file_perms import SshdConfigPerms, Grub1ConfigPerms, Grub2ConfigPerms +from insights.tests import context_wrap + + +PARSERS = [ + # (parser class, path to configuration file) + (SshdConfigPerms, "/etc/ssh/sshd_config"), + (Grub1ConfigPerms, "/boot/grub/grub.conf"), + (Grub2ConfigPerms, "/boot/grub2/grub.cfg") +] + +TEST_CASES_PERMISSIONS = [ + # (line, owned_by_root, only_root_can_write) + ('-rw-rw-r--. 1 root root 4179 Dec 1 2014 ', True, True), + ('-rw-r--rw-. 1 root root 4179 Dec 1 2014 ', True, False), + ('-rw-r--r--. 1 root user 4179 Dec 1 2014 ', False, True), + ('-rw-rw-r--. 1 root user 4179 Dec 1 2014 ', False, False), + ('-rw-r--r--. 1 user root 4179 Dec 1 2014 ', False, False), +] + + +@pytest.mark.parametrize("parser, path", PARSERS) +@pytest.mark.parametrize("line, owned_by_root, only_root_can_write", TEST_CASES_PERMISSIONS) +def test_sshd_grub(parser, path, line, owned_by_root, only_root_can_write): + line_with_path = line + path + result = parser(context_wrap(line_with_path)) + assert result.line == line_with_path + assert result.owned_by("root", also_check_group=True) == owned_by_root + assert result.only_root_can_write() == only_root_can_write + + +def test_doc_examples(): + sshd = "-rw-------. 1 root root 4179 Dec 1 2014 /etc/ssh/sshd_config" + grub1 = "-rw-r--r--. 1 root root 4179 Dec 1 2014 /boot/grub/grub.conf" + grub2 = "-rw-r--r--. 1 root root 4179 Dec 1 2014 /boot/grub2/grub.cfg" + + env = { + "sshd_perms": SshdConfigPerms(context_wrap(sshd)), + "grub1_perms": Grub1ConfigPerms(context_wrap(grub1)), + "grub2_perms": Grub2ConfigPerms(context_wrap(grub2)), + } + failed, total = doctest.testmod(config_file_perms, globs=env) + assert failed == 0 From 5520c7f4d5c0fe634bff340c05777b74fa55cd29 Mon Sep 17 00:00:00 2001 From: Jitka Obselkova <41325380+jobselko@users.noreply.github.com> Date: Thu, 30 Apr 2020 01:59:16 +0200 Subject: [PATCH 044/892] Add parser for ls_var_www_perms (#2567) Signed-off-by: Jitka Obselkova --- .../ls_var_www_perms.rst | 3 + insights/parsers/ls_var_www_perms.py | 47 ++++++++++++ .../parsers/tests/test_ls_var_www_perms.py | 71 +++++++++++++++++++ 3 files changed, 121 insertions(+) create mode 100644 docs/shared_parsers_catalog/ls_var_www_perms.rst create mode 100644 insights/parsers/ls_var_www_perms.py create mode 100644 insights/parsers/tests/test_ls_var_www_perms.py diff --git a/docs/shared_parsers_catalog/ls_var_www_perms.rst b/docs/shared_parsers_catalog/ls_var_www_perms.rst new file mode 100644 index 000000000..5496130a5 --- /dev/null +++ b/docs/shared_parsers_catalog/ls_var_www_perms.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ls_var_www_perms + :members: + :show-inheritance: diff --git a/insights/parsers/ls_var_www_perms.py b/insights/parsers/ls_var_www_perms.py new file mode 100644 index 000000000..797bcbc34 --- /dev/null +++ b/insights/parsers/ls_var_www_perms.py @@ -0,0 +1,47 @@ +""" +LsVarWwwPerms - command ``/bin/ls -la /dev/null /var/www`` +========================================================== +""" + +from insights.core import CommandParser, FileListing +from insights.core.plugins import parser +from insights.specs import Specs +from insights.util.file_permissions import FilePermissions + + +@parser(Specs.ls_var_www) +class LsVarWwwPerms(CommandParser, FileListing): + """ + Class for parsing ``/bin/ls -la /dev/null /var/www`` command. + + Attributes: + file_permissions (list): list of `FilePermissions` objects for every file from the output + + Sample output of this command is:: + + crw-rw-rw-. 1 root root 1, 3 Dec 18 09:18 /dev/null + + /var/www: + total 16 + drwxr-xr-x. 4 root root 33 Dec 15 08:12 . + drwxr-xr-x. 20 root root 278 Dec 15 08:12 .. + drwxr-xr-x. 2 root root 6 Oct 3 09:37 cgi-bin + drwxr-xr-x. 2 root root 6 Oct 3 09:37 html + + Examples: + + >>> type(ls_var_www_perms) + + >>> ls_var_www_perms.file_permissions[2] + FilePermissions(cgi-bin) + >>> ls_var_www_perms.file_permissions[2].line + 'drwxr-xr-x. 2 root root 6 Oct 3 09:37 cgi-bin' + """ + + def parse_content(self, content): + super(LsVarWwwPerms, self).parse_content(content) + + self.file_permissions = [] + if "/var/www" in self: + for filename, info in sorted(self.listing_of("/var/www").items()): + self.file_permissions.append(FilePermissions(info["raw_entry"])) diff --git a/insights/parsers/tests/test_ls_var_www_perms.py b/insights/parsers/tests/test_ls_var_www_perms.py new file mode 100644 index 000000000..74f777937 --- /dev/null +++ b/insights/parsers/tests/test_ls_var_www_perms.py @@ -0,0 +1,71 @@ +import doctest +import pytest + +from insights.parsers import ls_var_www_perms +from insights.parsers.ls_var_www_perms import LsVarWwwPerms +from insights.tests import context_wrap +from insights.util.file_permissions import FilePermissions + + +GOOD_FOLDER_OUTPUT = """ +crw-rw-rw-. 1 root root 1, 3 Dec 18 09:18 /dev/null + +/var/www: +total 16 +drwxr-xr-x. 4 root root 33 Dec 15 08:12 . +drwxr-xr-x. 20 root root 278 Dec 15 08:12 .. +drwxr-xr-x. 2 root root 6 Oct 3 09:37 cgi-bin +drwxr-xr-x. 2 root root 6 Oct 3 09:37 html +""" + +GOOD_FOLDER_EXPECTED_OUTPUT = [ + "drwxr-xr-x. 4 root root 33 Dec 15 08:12 .", + "drwxr-xr-x. 20 root root 278 Dec 15 08:12 ..", + "drwxr-xr-x. 2 root root 6 Oct 3 09:37 cgi-bin", + "drwxr-xr-x. 2 root root 6 Oct 3 09:37 html", +] + +EMPTY_FOLDER_OUTPUT = """ +crw-rw-rw-. 1 root root 1, 3 Dec 18 09:18 /dev/null + +/var/www: +total 0 +drwxr-xr-x. 4 root root 33 Dec 15 08:12 . +drwxr-xr-x. 20 root root 278 Dec 15 08:12 .. +""" + +EMPTY_FOLDER_EXPECTED_OUTPUT = [ + "drwxr-xr-x. 4 root root 33 Dec 15 08:12 .", + "drwxr-xr-x. 20 root root 278 Dec 15 08:12 ..", +] + +NO_FOLDER_OUTPUT = """ +/bin/ls: cannot access '/var/www': No such file or directory +crw-rw-rw-. 1 root root 1, 3 Dec 18 09:18 /dev/null +""" + +NO_FOLDER_EXPECTED_OUTPUT = [ +] + +OUTPUTS_FROM_FOLDERS = [ + (GOOD_FOLDER_OUTPUT, GOOD_FOLDER_EXPECTED_OUTPUT), + (EMPTY_FOLDER_OUTPUT, EMPTY_FOLDER_EXPECTED_OUTPUT), + (NO_FOLDER_OUTPUT, NO_FOLDER_EXPECTED_OUTPUT) +] + + +@pytest.mark.parametrize("output, expected_output", OUTPUTS_FROM_FOLDERS) +def test_ls_var(output, expected_output): + test = LsVarWwwPerms(context_wrap(output)) + assert len(test.file_permissions) == len(expected_output) + for test_line, expected_line in zip(test.file_permissions, expected_output): + assert repr(test_line) == repr(FilePermissions(expected_line)) + assert test_line.line == FilePermissions(expected_line).line + + +def test_doc_examples(): + env = { + "ls_var_www_perms": LsVarWwwPerms(context_wrap(GOOD_FOLDER_OUTPUT)) + } + failed, total = doctest.testmod(ls_var_www_perms, globs=env) + assert failed == 0 From 64a6611a8a78f166db7938e3964c5e35840d09d8 Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Thu, 30 Apr 2020 09:14:19 -0500 Subject: [PATCH 045/892] Update docs to work with the latest ver of Sphinx (#2565) * Update version of doc build tools * Fix errors in doc build Signed-off-by: Bob Fahr --- docs/docs_guidelines.rst | 51 ++++++++++++++++++++++---------- insights/core/remote_resource.py | 27 +++++++---------- insights/parsers/ethtool.py | 3 +- insights/parsers/nmcli.py | 3 -- insights/parsers/vdo_status.py | 8 +---- insights/parsers/yum.py | 14 ++++++--- setup.py | 4 +-- 7 files changed, 60 insertions(+), 50 deletions(-) diff --git a/docs/docs_guidelines.rst b/docs/docs_guidelines.rst index c010d7643..79f9e8aa4 100644 --- a/docs/docs_guidelines.rst +++ b/docs/docs_guidelines.rst @@ -126,24 +126,27 @@ Description .. code-block:: python :linenos: - :lineno-start: 4 - This module provides plugins access to the PCI device information gathered from - the ``/usr/sbin/lspci`` command. + """ + lspci - Command + =============== - Typical output of the ``lspci`` command is:: + This module provides plugins access to the PCI device information gathered from + the ``/usr/sbin/lspci`` command. - 00:00.0 Host bridge: Intel Corporation 2nd Generation Core Processor Family DRAM Controller (rev 09) - 00:02.0 VGA compatible controller: Intel Corporation 2nd Generation Core Processor Family Integrated Graphics Controller (rev 09) - 03:00.0 Network controller: Intel Corporation Centrino Advanced-N 6205 [Taylor Peak] (rev 34) - 0d:00.0 System peripheral: Ricoh Co Ltd PCIe SDXC/MMC Host Controller (rev 07) + Typical output of the ``lspci`` command is:: - The data is exposed via the ``obj.lines`` attribute which is a list containing - each line in the output. The data may also be filtered using the - ``obj.get("filter string")`` method. This method will return a list of lines - containing only "filter string". The ``in`` operator may also be used to test - whether a particular string is in the ``lspci`` output. Other methods/operators - are also supported, see the :py:class:`insights.core.LogFileOutput` class for more information. + 00:00.0 Host bridge: Intel Corporation 2nd Generation Core Processor Family DRAM Controller (rev 09) + 00:02.0 VGA compatible controller: Intel Corporation 2nd Generation Core Processor Family Integrated Graphics Controller (rev 09) + 03:00.0 Network controller: Intel Corporation Centrino Advanced-N 6205 [Taylor Peak] (rev 34) + 0d:00.0 System peripheral: Ricoh Co Ltd PCIe SDXC/MMC Host Controller (rev 07) + + The data is exposed via the ``obj.lines`` attribute which is a list containing + each line in the output. The data may also be filtered using the + ``obj.get("filter string")`` method. This method will return a list of lines + containing only "filter string". The ``in`` operator may also be used to test + whether a particular string is in the ``lspci`` output. Other methods/operators + are also supported, see the :py:class:`insights.core.LogFileOutput` class for more information. Next comes the description of the module. Since this description is the first thing a developer will see when viewing @@ -162,13 +165,27 @@ Notes/References .. code-block:: python :linenos: - :lineno-start: 22 + :lineno-start: 20 + :force: + + """ Note: The examples in this module may be executed with the following command: ``python -m insights.parsers.lspci`` + Examples: + >>> pci_info.get("Intel Corporation") + ['00:00.0 Host bridge: Intel Corporation 2nd Generation Core Processor Family DRAM Controller (rev 09)', '00:02.0 VGA compatible controller: Intel Corporation 2nd Generation Core Processor Family Integrated Graphics Controller (rev 09)', '03:00.0 Network controller: Intel Corporation Centrino Advanced-N 6205 [Taylor Peak] (rev 34)'] + >>> len(pci_info.get("Network controller")) + 1 + >>> "Centrino Advanced-N 6205" in pci_info + True + >>> "0d:00.0" in pci_info + True + """ + Module notes and/or references are not necessary unless there is information that should be included to aid a developer in understanding the parser. In this particular case this information is only provided as an aid to the @@ -181,8 +198,10 @@ Examples .. code-block:: python :linenos: - :lineno-start: 27 + :lineno-start: 25 + """ + Examples: >>> pci_info.get("Intel Corporation") ['00:00.0 Host bridge: Intel Corporation 2nd Generation Core Processor Family DRAM Controller (rev 09)', '00:02.0 VGA compatible controller: Intel Corporation 2nd Generation Core Processor Family Integrated Graphics Controller (rev 09)', '03:00.0 Network controller: Intel Corporation Centrino Advanced-N 6205 [Taylor Peak] (rev 34)'] diff --git a/insights/core/remote_resource.py b/insights/core/remote_resource.py index aa017f511..4a168dbd4 100644 --- a/insights/core/remote_resource.py +++ b/insights/core/remote_resource.py @@ -15,9 +15,6 @@ class RemoteResource(object): """ RemoteResource class for accessing external Web resources. - Attributes: - timeout(float): Time in seconds for the requests.get api call to wait before returning a timeout exception - Examples: >>> from insights.core.remote_resource import RemoteResource >>> rr = RemoteResource() @@ -26,6 +23,7 @@ class RemoteResource(object): """ timeout = 10 + """ float: Time in seconds for the requests.get api call to wait before returning a timeout exception """ def __init__(self, session=None): @@ -55,13 +53,6 @@ class CachedRemoteResource(RemoteResource): """ RemoteResource subclass that sets up caching for subsequent Web resource requests. - Attributes: - expire_after (float): Amount of time in seconds that the cache will expire - backend (str): Type of storage for cache `DictCache1`, `FileCache` or `RedisCache` - redis_host (str): Hostname of redis instance if `RedisCache` backend is specified - redis_port (int): Port used to contact the redis instance if `RedisCache` backend is specified - file_cache_path (string): Path to where file cache will be stored if `FileCache` backend is specified - Examples: >>> from insights.core.remote_resource import CachedRemoteResource >>> crr = CachedRemoteResource() @@ -71,12 +62,17 @@ class CachedRemoteResource(RemoteResource): """ expire_after = 180 + """ float: Amount of time in seconds that the cache will expire """ backend = "DictCache" + """ str: Type of storage for cache `DictCache1`, `FileCache` or `RedisCache` """ redis_port = 6379 + """ int: Port used to contact the redis instance if `RedisCache` backend is specified """ redis_host = 'localhost' + """ str: Hostname of redis instance if `RedisCache` backend is specified """ __heuristic = 'DefaultHeuristic' __cache = None file_cache_path = '.web_cache' + """ str: Path to where file cache will be stored if `FileCache` backend is specified """ def __init__(self): @@ -101,16 +97,15 @@ def __init__(self): class DefaultHeuristic(BaseHeuristic): """ BaseHeuristic subclass that sets the default caching headers if not supplied by the remote service. - - Attributes: - default_cache_vars (str): Message content warning that the response from the remote server did not - return proper HTTP cache headers so we will use default cache settings - server_cache_headers (str): Message content warning that we are using cache settings returned by the - remote server. """ default_cache_vars = "Remote service caching headers not set correctly, using default caching" + """ + str: Message content warning that the response from the remote server did not + return proper HTTP cache headers so we will use default cache settings + """ server_cache_headers = "Caching being done based on caching headers returned by remote service" + """ str: Message content warning that we are using cache settings returned by the remote server. """ def __init__(self, expire_after): diff --git a/insights/parsers/ethtool.py b/insights/parsers/ethtool.py index 25dcf63f6..c354bbb1a 100644 --- a/insights/parsers/ethtool.py +++ b/insights/parsers/ethtool.py @@ -714,7 +714,6 @@ class TimeStamp(CommandParser): Attributes: data (dict): Dictionary of keys with values. - ifname (str): Interface name. Raises: ParseException: Raised when any problem parsing the command output. @@ -794,7 +793,7 @@ class Ethtool(CommandParser): values, split into individual words. available_link_modes (list): A list of the 'Available link modes' values, split into individual words. - supported_link_modes (list): A list of the 'Supported ports' values, + supported_ports (list): A list of the 'Supported ports' values, split into individual words. Sample input:: diff --git a/insights/parsers/nmcli.py b/insights/parsers/nmcli.py index 26382614e..899c8c297 100644 --- a/insights/parsers/nmcli.py +++ b/insights/parsers/nmcli.py @@ -37,9 +37,6 @@ class NmcliDevShow(CommandParser, dict): This parser works like a python dictionary, all parsed data can be accessed via the ``dict`` interfaces. - Attributes: - connected_devices(list): list of devices who's state is connected. - Sample input for ``/usr/bin/nmcli dev show``:: GENERAL.DEVICE: em3 diff --git a/insights/parsers/vdo_status.py b/insights/parsers/vdo_status.py index a474db8d8..2f9225baf 100644 --- a/insights/parsers/vdo_status.py +++ b/insights/parsers/vdo_status.py @@ -88,7 +88,6 @@ class VDOStatus(YAMLParser): Attributes: data (dict): the result parsed of 'vdo status' - volumns (list): The list the vdo volumns involved """ @@ -127,12 +126,7 @@ def __get_dev_mapper__(self, vol): @property def volumns(self): - """ - The volumns appeared in vdo status - - Returns: - list: vdo volumns - """ + """ list: List of the volumns in vdo status """ return sorted(self.data['VDOs'].keys()) if 'VDOs' in self.data else [] def get_slab_size_of_vol(self, vol): diff --git a/insights/parsers/yum.py b/insights/parsers/yum.py index 387d8169d..41103c40a 100644 --- a/insights/parsers/yum.py +++ b/insights/parsers/yum.py @@ -122,9 +122,6 @@ class YumRepoList(CommandParser): } } - rhel_repos(list): list of all the rhel repos and the item is just the repo id without server and arch info. For example:: - - self.rhel_repos = ['rhel-7-server-e4s-rpms', 'rhel-ha-for-rhel-7-server-e4s-rpms', 'rhel-sap-hana-for-rhel-7-server-e4s-rpms'] """ def parse_content(self, content): if not content: @@ -177,7 +174,16 @@ def eus(self): @property def rhel_repos(self): - '''list of RHEL repos/Repo IDs''' + """ + list: list of all the rhel repos and the item is just the repo id without server and arch info. + For example:: + + self.rhel_repos = [ + 'rhel-7-server-e4s-rpms', + 'rhel-ha-for-rhel-7-server-e4s-rpms', + 'rhel-sap-hana-for-rhel-7-server-e4s-rpms' + ] + """ return [i.split('/')[0] for i in self.repos if i.startswith('rhel')] diff --git a/setup.py b/setup.py index 122799ca4..f93ef0132 100644 --- a/setup.py +++ b/setup.py @@ -66,13 +66,13 @@ def maybe_require(pkg): ]) docs = set([ - 'Sphinx<=2.4.4', + 'Sphinx<=3.0.2', 'nbsphinx', 'sphinx_rtd_theme', 'ipython', 'colorama', 'jinja2', - 'Pygments<2.5.1' + 'Pygments' ]) testing = set([ From 5dcc506689ebf17e48cdeed36b7073735ed3789f Mon Sep 17 00:00:00 2001 From: Stephen Date: Thu, 30 Apr 2020 11:29:20 -0400 Subject: [PATCH 046/892] Move test URL messages to network logger (#2573) When using test-conenction we now enable --net-debug. In order to see the url that is being printed, we need to put the url in the NETWORK log level Signed-off-by: Stephen Adams --- insights/client/connection.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/insights/client/connection.py b/insights/client/connection.py index cbb178bad..44b7a73c8 100644 --- a/insights/client/connection.py +++ b/insights/client/connection.py @@ -315,7 +315,7 @@ def _legacy_test_urls(self, url, method): paths = (url.path + '/', '', '/r', '/r/insights') for ext in paths: try: - logger.debug("Testing: %s", test_url + ext) + logger.log(NETWORK, "Testing: %s", test_url + ext) if method is "POST": test_req = self.session.post( test_url + ext, timeout=self.config.http_timeout, data=test_flag) @@ -347,7 +347,7 @@ def _test_urls(self, url, method): if self.config.legacy_upload: return self._legacy_test_urls(url, method) try: - logger.debug('Testing %s', url) + logger.log(NETWORK, 'Testing %s', url) if method is 'POST': test_tar = TemporaryFile(mode='rb', suffix='.tar.gz') test_files = { From 66e5a75dfbd22b90929292cc351f14e28a8ad184 Mon Sep 17 00:00:00 2001 From: Ian Page Hands Date: Fri, 1 May 2020 15:29:28 -0400 Subject: [PATCH 047/892] Fixes improper calculation of core_total (#2563) * Fixes improper calculation of core_total * change impl to work with py 2.6 * lint Closes 2555 Signed-off-by: Ian Page Hands * Add deprecation warning for core_total Signed-off-by: Bob Fahr Co-authored-by: Bob Fahr --- insights/parsers/cpuinfo.py | 18 +- insights/parsers/tests/test_cpuinfo.py | 971 ++++++++++++++++++++++++- 2 files changed, 986 insertions(+), 3 deletions(-) diff --git a/insights/parsers/cpuinfo.py b/insights/parsers/cpuinfo.py index 04f7ab058..1d0a053f0 100644 --- a/insights/parsers/cpuinfo.py +++ b/insights/parsers/cpuinfo.py @@ -240,10 +240,24 @@ def vendor(self): @defaults() def core_total(self): """ - str: Returns the total number of cores for the server if available, else None. + int: Returns the total number of cores for the server if available, else None. + + .. warning:: + This function is deprecated. Please use the + :py:class:`insights.parsers.lscpu.LsCPU` class attribute + ``info['Cores per socket']`` and ``info['Sockets']`` values instead. """ if self.data and 'cpu_cores' in self.data: - return sum([int(c) for c in self.data['cpu_cores']]) + # I guess we can't get this fancey on older versions of RHEL + # return sum({e['sockets']: int(e['cpu_cores']) for e in self}.values()) + physical_dict = {} + for e in self: + # we should rename sockets here to physical_ids as cpuinfo + # has it there can be many physical_ids per socket + # see fgrep 'physical id' /proc/cpuinfo on a single + # package system + physical_dict[e['sockets']] = int(e['cpu_cores']) + return sum(physical_dict.values()) else: return None diff --git a/insights/parsers/tests/test_cpuinfo.py b/insights/parsers/tests/test_cpuinfo.py index 66118c660..8a6187de5 100644 --- a/insights/parsers/tests/test_cpuinfo.py +++ b/insights/parsers/tests/test_cpuinfo.py @@ -3,6 +3,962 @@ from insights.tests import context_wrap import doctest +CPUINFO_HETERO = """ +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 2359.484 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 0 +cpu cores : 4 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 3221.700 +cache size : 20480 KB +physical id : 1 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 32 +initial apicid : 32 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: +""" + +CPUINFO_NOIR = """ +processor : 0 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 2359.484 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 0 +initial apicid : 0 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 1 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 3221.700 +cache size : 20480 KB +physical id : 1 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 32 +initial apicid : 32 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 2 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 1200.058 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 1 +cpu cores : 8 +apicid : 2 +initial apicid : 2 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 3 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 1859.248 +cache size : 20480 KB +physical id : 1 +siblings : 16 +core id : 1 +cpu cores : 8 +apicid : 34 +initial apicid : 34 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 4 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 2098.165 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 2 +cpu cores : 8 +apicid : 4 +initial apicid : 4 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 5 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 3237.702 +cache size : 20480 KB +physical id : 1 +siblings : 16 +core id : 2 +cpu cores : 8 +apicid : 36 +initial apicid : 36 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 6 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 1200.210 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 3 +cpu cores : 8 +apicid : 6 +initial apicid : 6 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 7 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 3262.179 +cache size : 20480 KB +physical id : 1 +siblings : 16 +core id : 3 +cpu cores : 8 +apicid : 38 +initial apicid : 38 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 8 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 1199.929 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 4 +cpu cores : 8 +apicid : 8 +initial apicid : 8 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 9 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 3300.892 +cache size : 20480 KB +physical id : 1 +siblings : 16 +core id : 4 +cpu cores : 8 +apicid : 40 +initial apicid : 40 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 10 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 2728.786 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 5 +cpu cores : 8 +apicid : 10 +initial apicid : 10 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 11 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 2443.784 +cache size : 20480 KB +physical id : 1 +siblings : 16 +core id : 5 +cpu cores : 8 +apicid : 42 +initial apicid : 42 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 12 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 1365.540 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 6 +cpu cores : 8 +apicid : 12 +initial apicid : 12 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 13 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 3265.053 +cache size : 20480 KB +physical id : 1 +siblings : 16 +core id : 6 +cpu cores : 8 +apicid : 44 +initial apicid : 44 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 14 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 2076.931 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 7 +cpu cores : 8 +apicid : 14 +initial apicid : 14 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 15 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 3299.792 +cache size : 20480 KB +physical id : 1 +siblings : 16 +core id : 7 +cpu cores : 8 +apicid : 46 +initial apicid : 46 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 16 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 1199.880 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 1 +initial apicid : 1 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 17 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 3299.368 +cache size : 20480 KB +physical id : 1 +siblings : 16 +core id : 0 +cpu cores : 8 +apicid : 33 +initial apicid : 33 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 18 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 1604.670 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 1 +cpu cores : 8 +apicid : 3 +initial apicid : 3 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 19 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 3264.037 +cache size : 20480 KB +physical id : 1 +siblings : 16 +core id : 1 +cpu cores : 8 +apicid : 35 +initial apicid : 35 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 20 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 3004.784 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 2 +cpu cores : 8 +apicid : 5 +initial apicid : 5 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 21 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 3253.148 +cache size : 20480 KB +physical id : 1 +siblings : 16 +core id : 2 +cpu cores : 8 +apicid : 37 +initial apicid : 37 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 22 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 1200.025 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 3 +cpu cores : 8 +apicid : 7 +initial apicid : 7 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 23 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 3264.614 +cache size : 20480 KB +physical id : 1 +siblings : 16 +core id : 3 +cpu cores : 8 +apicid : 39 +initial apicid : 39 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 24 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 1672.400 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 4 +cpu cores : 8 +apicid : 9 +initial apicid : 9 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 25 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 1748.696 +cache size : 20480 KB +physical id : 1 +siblings : 16 +core id : 4 +cpu cores : 8 +apicid : 41 +initial apicid : 41 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 26 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 1599.248 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 5 +cpu cores : 8 +apicid : 11 +initial apicid : 11 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 27 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 3212.567 +cache size : 20480 KB +physical id : 1 +siblings : 16 +core id : 5 +cpu cores : 8 +apicid : 43 +initial apicid : 43 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 28 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 2049.386 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 6 +cpu cores : 8 +apicid : 13 +initial apicid : 13 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 29 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 3223.740 +cache size : 20480 KB +physical id : 1 +siblings : 16 +core id : 6 +cpu cores : 8 +apicid : 45 +initial apicid : 45 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 30 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 1200.024 +cache size : 20480 KB +physical id : 0 +siblings : 16 +core id : 7 +cpu cores : 8 +apicid : 15 +initial apicid : 15 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: + +processor : 31 +vendor_id : GenuineIntel +cpu family : 6 +model : 45 +model name : Intel(R) Xeon(R) CPU E5-2670 0 @ 2.60GHz +stepping : 7 +microcode : 0x718 +cpu MHz : 3217.580 +cache size : 20480 KB +physical id : 1 +siblings : 16 +core id : 7 +cpu cores : 8 +apicid : 47 +initial apicid : 47 +fpu : yes +fpu_exception : yes +cpuid level : 13 +wp : yes +flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic popcnt tsc_deadline_timer aes xsave avx lahf_lm epb pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid xsaveopt dtherm ida arat pln pts md_clear flush_l1d +vmx flags : vnmi preemption_timer invvpid ept_x_only ept_1gb flexpriority tsc_offset vtpr mtf vapic ept vpid unrestricted_guest ple +bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs itlb_multihit +bogomips : 5199.96 +clflush size : 64 +cache_alignment : 64 +address sizes : 46 bits physical, 48 bits virtual +power management: +""" + CPUINFO = """ COMMAND> cat /proc/cpuinfo COMMAND> cat /proc/cpuinfo @@ -483,7 +1439,20 @@ def test_one_socket_cpuinfo(): cpu_info = CpuInfo(context_wrap(ONE_SOCKET_CPUINFO)) assert cpu_info.cpu_count == 8 assert cpu_info.socket_count == 1 - assert cpu_info.core_total == 32 + assert cpu_info.core_total == 4 + + +def test_noir_cpuinfo(): + cpu_info = CpuInfo(context_wrap(CPUINFO_NOIR)) + assert cpu_info.cpu_count == 32 + assert cpu_info.socket_count == 2 + assert cpu_info.core_total == 16 + + +def test_hetero_cpuinfo(): + cpu_info = CpuInfo(context_wrap(CPUINFO_HETERO)) + assert cpu_info.socket_count == 2 + assert cpu_info.core_total == 12 def test_empty_cpuinfo(): From ba2b6d02fc4d77439a23efc2a15d5c9807a0c179 Mon Sep 17 00:00:00 2001 From: Jitka Obselkova <41325380+jobselko@users.noreply.github.com> Date: Wed, 6 May 2020 23:14:21 +0200 Subject: [PATCH 048/892] Parser for a path to whoopsie report (#2574) * Parser for whoopsie Signed-off-by: Jitka Obselkova * Fix typo in path Signed-off-by: Jitka Obselkova * Change quotation marks Signed-off-by: Jitka Obselkova --- docs/shared_parsers_catalog/whoopsie.rst | 3 ++ insights/parsers/tests/test_whoopsie.py | 45 ++++++++++++++++++++++++ insights/parsers/whoopsie.py | 44 +++++++++++++++++++++++ 3 files changed, 92 insertions(+) create mode 100644 docs/shared_parsers_catalog/whoopsie.rst create mode 100644 insights/parsers/tests/test_whoopsie.py create mode 100644 insights/parsers/whoopsie.py diff --git a/docs/shared_parsers_catalog/whoopsie.rst b/docs/shared_parsers_catalog/whoopsie.rst new file mode 100644 index 000000000..6d0e7fc22 --- /dev/null +++ b/docs/shared_parsers_catalog/whoopsie.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.whoopsie + :members: + :show-inheritance: diff --git a/insights/parsers/tests/test_whoopsie.py b/insights/parsers/tests/test_whoopsie.py new file mode 100644 index 000000000..7a2595d78 --- /dev/null +++ b/insights/parsers/tests/test_whoopsie.py @@ -0,0 +1,45 @@ +import doctest +import pytest + +from insights.parsers import whoopsie +from insights.parsers.whoopsie import Whoopsie +from insights.tests import context_wrap + +BOTH_MATCHED = """ +/var/crash/.reports-1000-user/whoopsie-report +""".strip() + +NOT_FIND_MATCHED = """ +/usr/bin/find: '/var/crash': No such file or directory +/var/tmp/.reports-1000-user/whoopsie-report +""".strip() + +BOTH_NOT_FIND = """ +/usr/bin/find: '/var/crash': No such file or directory +/usr/bin/find: '/var/tmp': No such file or directory +""".strip() + +BOTH_EMPTY = """ +""" + +TEST_CASES = [ + (BOTH_MATCHED, "1000", "/var/crash/.reports-1000-user/whoopsie-report"), + (NOT_FIND_MATCHED, "1000", "/var/tmp/.reports-1000-user/whoopsie-report"), + (BOTH_NOT_FIND, None, None), + (BOTH_EMPTY, None, None) +] + + +@pytest.mark.parametrize("output, uid, file", TEST_CASES) +def test_whoopsie(output, uid, file): + test = Whoopsie(context_wrap(output)) + assert test.uid == uid + assert test.file == file + + +def test_doc_examples(): + env = { + "whoopsie": Whoopsie(context_wrap(BOTH_MATCHED)), + } + failed, total = doctest.testmod(whoopsie, globs=env) + assert failed == 0 diff --git a/insights/parsers/whoopsie.py b/insights/parsers/whoopsie.py new file mode 100644 index 000000000..d130d4687 --- /dev/null +++ b/insights/parsers/whoopsie.py @@ -0,0 +1,44 @@ +""" +Whoopsie - command ``/usr/bin/find /var/crash /var/tmp -path '*.reports-*/whoopsie-report' -print -quit`` +========================================================================================================= +""" + +from insights.core import CommandParser +from insights.core.plugins import parser +from insights.specs import Specs +import re + +WHOOPSIE_RE = re.compile(r'.*.reports-(\d+)-.*/whoopsie-report') + + +@parser(Specs.woopsie) +class Whoopsie(CommandParser): + """ + Class for parsing the ``/usr/bin/find /var/crash /var/tmp -path '*.reports-*/whoopsie-report' -print -quit`` + command. + + Attributes: + uid (string): uid parsed from the file path + file (string): the line parsed from the command output + + Sample output of this command is:: + + /var/crash/.reports-1000-user/whoopsie-report + + Examples: + >>> type(whoopsie) + + >>> whoopsie.uid + '1000' + >>> whoopsie.file + '/var/crash/.reports-1000-user/whoopsie-report' + """ + + def parse_content(self, content): + self.uid = None + self.file = None + + match_whoopsie = WHOOPSIE_RE.search('\n'.join(content)) + if match_whoopsie: + self.uid = match_whoopsie.group(1) + self.file = match_whoopsie.group(0) From 30b25afa5ac216b7630378f236c4332f0993b0ab Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 7 May 2020 15:13:24 -0400 Subject: [PATCH 049/892] always create archive_tmp_dir for compatibility (#2576) Signed-off-by: Jeremy Crafts --- insights/client/archive.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/insights/client/archive.py b/insights/client/archive.py index 5e3beee21..d2b3f0449 100644 --- a/insights/client/archive.py +++ b/insights/client/archive.py @@ -30,9 +30,7 @@ def __init__(self, config): """ self.config = config self.tmp_dir = tempfile.mkdtemp(prefix='/var/tmp/') - self.archive_tmp_dir = None - if not self.config.obfuscate: - self.archive_tmp_dir = tempfile.mkdtemp(prefix='/var/tmp/') + self.archive_tmp_dir = tempfile.mkdtemp(prefix='/var/tmp/') name = determine_hostname() self.archive_name = ("insights-%s-%s" % (name, From c5e8b8fe4b397bdac6d39cbdecff20092e6dd43e Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Fri, 8 May 2020 15:34:15 -0400 Subject: [PATCH 050/892] strip whitespace before UUID check for machine-id, log exception (#2571) Signed-off-by: Jeremy Crafts --- insights/client/utilities.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/insights/client/utilities.py b/insights/client/utilities.py index 32f2bb708..6bd356b26 100644 --- a/insights/client/utilities.py +++ b/insights/client/utilities.py @@ -145,11 +145,14 @@ def generate_machine_id(new=False, logger.debug("Creating %s", destination_file) write_to_disk(destination_file, content=machine_id) + machine_id = str(machine_id).strip() + try: uuid.UUID(machine_id, version=4) - return str(machine_id).strip() - except ValueError: + return machine_id + except ValueError as e: logger.error("Invalid machine ID: %s", machine_id) + logger.error("Error details: %s", str(e)) logger.error("Remove %s and a new one will be generated.\nRerun the client with --register", destination_file) sys.exit(constants.sig_kill_bad) From 0f009171ab8d3b63dafe8da88654590355457112 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Sun, 10 May 2020 08:48:06 +0800 Subject: [PATCH 051/892] Remove duplicated apicid in mappings of CpuInfo (#2583) Signed-off-by: Xiangce Liu --- insights/parsers/cpuinfo.py | 1 - 1 file changed, 1 deletion(-) diff --git a/insights/parsers/cpuinfo.py b/insights/parsers/cpuinfo.py index 1d0a053f0..08dc0c00f 100644 --- a/insights/parsers/cpuinfo.py +++ b/insights/parsers/cpuinfo.py @@ -141,7 +141,6 @@ def parse_content(self, content): "revision": "revision", "address sizes": "address_sizes", "bugs": "bugs", - "apicid": "apicid" } for line in get_active_lines(content, comment_char="COMMAND>"): From 00ca02d458024d2baabcf988da85d7c0f287411b Mon Sep 17 00:00:00 2001 From: Jitka Obselkova <41325380+jobselko@users.noreply.github.com> Date: Mon, 11 May 2020 23:18:15 +0200 Subject: [PATCH 052/892] Parser for ImageMagick policy.xml file (#2585) * Parser for imagemagick policy Signed-off-by: Jitka Obselkova * Change exception Signed-off-by: Jitka Obselkova * Add example into doctest Signed-off-by: Jitka Obselkova * Fix dictionary in doctest Signed-off-by: Jitka Obselkova --- .../imagemagick_policy.rst | 3 + insights/parsers/imagemagick_policy.py | 72 ++++++++++++ .../parsers/tests/test_imagemagick_policy.py | 110 ++++++++++++++++++ 3 files changed, 185 insertions(+) create mode 100644 docs/shared_parsers_catalog/imagemagick_policy.rst create mode 100644 insights/parsers/imagemagick_policy.py create mode 100644 insights/parsers/tests/test_imagemagick_policy.py diff --git a/docs/shared_parsers_catalog/imagemagick_policy.rst b/docs/shared_parsers_catalog/imagemagick_policy.rst new file mode 100644 index 000000000..b8dbd247f --- /dev/null +++ b/docs/shared_parsers_catalog/imagemagick_policy.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.imagemagick_policy + :members: + :show-inheritance: diff --git a/insights/parsers/imagemagick_policy.py b/insights/parsers/imagemagick_policy.py new file mode 100644 index 000000000..d28e10418 --- /dev/null +++ b/insights/parsers/imagemagick_policy.py @@ -0,0 +1,72 @@ +""" +ImageMagickPolicy - files ``/etc/ImageMagick/policy.xml`` and ``/usr/lib*/ImageMagick-6.5.4/config/policy.xml`` +=============================================================================================================== +""" + +from insights.core import XMLParser +from insights.core.plugins import parser +from insights.parsers import SkipException +from insights.specs import Specs + + +@parser(Specs.imagemagick_policy) +class ImageMagickPolicy(XMLParser): + """ + Class for parsing the ``/etc/ImageMagick/policy.xml`` and ``/usr/lib*/ImageMagick-6.5.4/config/policy.xml`` + files. + + Attributes: + policies (list): list of Element objects with a 'policy' tag + + Raises: + SkipException: When content is empty or cannot be parsed. + + Sample output of this file is:: + + + + + + + ]> + + + + + + + + + + + + + + + + + + Examples: + >>> type(imagemagick_policy) + + >>> len(imagemagick_policy.policies) + 10 + >>> sorted(imagemagick_policy.policies[0].items()) + [('domain', 'coder'), ('pattern', 'EPHEMERAL'), ('rights', 'none')] + """ + + def parse_content(self, content): + if not content: + raise SkipException("No content.") + + try: + super(ImageMagickPolicy, self).parse_content(content) + self.policies = self.get_elements(".//policy") + except Exception: # file without elements + self.policies = [] diff --git a/insights/parsers/tests/test_imagemagick_policy.py b/insights/parsers/tests/test_imagemagick_policy.py new file mode 100644 index 000000000..fa429b936 --- /dev/null +++ b/insights/parsers/tests/test_imagemagick_policy.py @@ -0,0 +1,110 @@ +import doctest +import pytest + +from insights.parsers import imagemagick_policy, SkipException +from insights.parsers.imagemagick_policy import ImageMagickPolicy +from insights.tests import context_wrap + +XML_POLICY_COMMENTED_SOME = """ + + + + + +]> + + + + + + + + + + + + + + + + +""" + +XML_POLICY_COMMENTED_ALL = """ + + + + + +]> + + + + + + + + +""" + +XML_EMPTY = """ + + + + + +]> + + +""" + +TEST_CASES = [ + (XML_POLICY_COMMENTED_SOME, 10), + (XML_POLICY_COMMENTED_ALL, 0), + (XML_EMPTY, 0), +] + + +def test_no_data(): + with pytest.raises(SkipException): + ImageMagickPolicy(context_wrap("")) + + +@pytest.mark.parametrize("output, length", TEST_CASES) +def test_parsing_policymap(output, length): + xml = ImageMagickPolicy(context_wrap(output)) + assert len(xml.policies) == length + + +def test_doc_examples(): + env = { + "imagemagick_policy": ImageMagickPolicy(context_wrap(XML_POLICY_COMMENTED_SOME)) + } + failed, total = doctest.testmod(imagemagick_policy, globs=env) + assert failed == 0 From 8f192b5b6b9467cdcc57033997037c3402e50360 Mon Sep 17 00:00:00 2001 From: vishwanathjadhav Date: Tue, 12 May 2020 12:13:17 +0530 Subject: [PATCH 053/892] updated specs (#2579) * updated specs Signed-off-by: vishawanathjadhav * Updated specs to select as per availablity Signed-off-by: vishawanathjadhav * removed first_of because on RHEL7/8, symlink created for /sbin/grubby in /usr/sbin/grubby Signed-off-by: vishawanathjadhav --- insights/specs/default.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 5b620adf2..dd59d554a 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -350,7 +350,7 @@ def dumpdev(broker): grub2_cfg = simple_file("/boot/grub2/grub.cfg") grub2_efi_cfg = simple_file("boot/efi/EFI/redhat/grub.cfg") grubby_default_index = simple_command("/usr/sbin/grubby --default-index") # only RHEL7 and updwards - grubby_default_kernel = simple_command("/usr/sbin/grubby --default-kernel") # RHEL6 and updwards + grubby_default_kernel = simple_command("/sbin/grubby --default-kernel") hammer_ping = simple_command("/usr/bin/hammer ping") hammer_task_list = simple_command("/usr/bin/hammer --config /root/.hammer/cli.modules.d/foreman.yml --output csv task list --search 'state=running AND ( label=Actions::Candlepin::ListenOnCandlepinEvents OR label=Actions::Katello::EventQueue::Monitor )'") haproxy_cfg = first_file(["/var/lib/config-data/puppet-generated/haproxy/etc/haproxy/haproxy.cfg", "/etc/haproxy/haproxy.cfg"]) From 043ec69864a626efb30556247a0f5493b2aecfec Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Tue, 12 May 2020 09:03:15 -0500 Subject: [PATCH 054/892] Initial version of autology and specs documentation (#2580) * Initial version of autology and specs documentation * Add new module autology for spec introspection * Add new reporting function for spec documentation that runs during sphinx build * Modify docs to support new specs documentation page * Ignore new dynamically generated specs documentation page in git Signed-off-by: Bob Fahr * Fix docstring and remove unnecessary raises Signed-off-by: Bob Fahr * Fix flake8 error Signed-off-by: Bob Fahr * Fix flake8 error Signed-off-by: Bob Fahr * Remove extra code Signed-off-by: Bob Fahr --- .gitignore | 1 + docs/api.rst | 2 + docs/api_index.rst | 10 + docs/conf.py | 6 +- docs/index.rst | 1 + insights/util/autology/__init__.py | 5 + insights/util/autology/datasources.py | 419 ++++++++++++++++++++++++++ insights/util/specs_catalog.py | 103 +++++++ 8 files changed, 546 insertions(+), 1 deletion(-) create mode 100644 insights/util/autology/__init__.py create mode 100644 insights/util/autology/datasources.py create mode 100644 insights/util/specs_catalog.py diff --git a/.gitignore b/.gitignore index bdce95f05..67bff891f 100644 --- a/.gitignore +++ b/.gitignore @@ -25,6 +25,7 @@ pylint.out cover/ docs/plugin_catalog/*.rst docs/components.rst +docs/specs_catalog.rst build/ .idea* dist/ diff --git a/docs/api.rst b/docs/api.rst index 6a0d8a954..814aa866d 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -93,6 +93,8 @@ each unique context, and also provide a default set of data sources that are common among one or more contexts. All available contexts are defined in the module :py:mod:`insights.core.context`. +.. _datasources-ref: + Data Sources ============ diff --git a/docs/api_index.rst b/docs/api_index.rst index 1eecb8acb..61187c18a 100644 --- a/docs/api_index.rst +++ b/docs/api_index.rst @@ -183,3 +183,13 @@ insights.util :members: :show-inheritance: :undoc-members: + +.. automodule:: insights.util.autology + :members: + :show-inheritance: + :undoc-members: + +.. automodule:: insights.util.autology.datasources + :members: + :show-inheritance: + :undoc-members: diff --git a/docs/conf.py b/docs/conf.py index 7acda2e5a..ca467f8fe 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -18,7 +18,7 @@ import sys import os import insights -from insights.util import component_graph +from insights.util import component_graph, specs_catalog # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the @@ -366,3 +366,7 @@ def setup(app): # Dynamically generate cross reference for components prior to doc build filename = os.path.join(app.confdir, "components.rst") component_graph.main(filename) + + # Dynamically generate datasource documentation prior to doc build + filename = os.path.join(app.confdir, "specs_catalog.rst") + specs_catalog.main(filename) diff --git a/docs/index.rst b/docs/index.rst index 43b4748aa..e52ddaa2c 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -11,6 +11,7 @@ Contents: api exception_model api_index + specs_catalog parsers_index combiners_index components_index diff --git a/insights/util/autology/__init__.py b/insights/util/autology/__init__.py new file mode 100644 index 000000000..3ed000e5f --- /dev/null +++ b/insights/util/autology/__init__.py @@ -0,0 +1,5 @@ +class AutologyError(Exception): + """ + Exception class for the :py:mod:`insights.util.autology` module + """ + pass diff --git a/insights/util/autology/datasources.py b/insights/util/autology/datasources.py new file mode 100644 index 000000000..26a65fd37 --- /dev/null +++ b/insights/util/autology/datasources.py @@ -0,0 +1,419 @@ +""" +datasources - Provides introspection functionality for datasources in insights-core + +This module provides classes to provide introspection for each of the datasource +definition classes in the :py:mod`insights.specs` package. +""" +import inspect +import insights.core.spec_factory +import insights.specs +from insights.specs.default import DefaultSpecs as InDefaultSpecs +from insights.specs.insights_archive import InsightsArchiveSpecs as InArchiveSpecs +from insights.specs.sos_archive import SosSpecs as InSosSpecs +from . import AutologyError + + +SIMPLE_COMMAND_TYPE = 'simple_command' +""" str: Literal constant for a simple_comment Spec object """ +SIMPLE_FILE_TYPE = 'simple_file' +""" str: Literal constant for a simple_file Spec object """ +GLOB_FILE_TYPE = 'glob_file' +""" str: Literal constant for a simple_file Spec object """ +FOREACH_EXECUTE_TYPE = 'foreach_execute' +""" str: Literal constant for a foreach_execute Spec object """ +LISTDIR_TYPE = 'listdir' +""" str: Literal constant for a listdir Spec object """ +LIST_TYPE = 'list' +""" str: Literal constant for a list Spec object """ +STRING_TYPE = 'string' +""" str: Literal constant for a string Spec object """ +NONE_TYPE = 'none' +""" str: Literal constant for a Spec object with None type """ +FUNCTION_TYPE = 'function' +""" str: Literal constant for a function Spec object """ +FIRST_FILE_TYPE = 'first_file' +""" str: Literal constant for a first_file Spec object """ +FOREACH_COLLECT_TYPE = 'foreach_collect' +""" str: Literal constant for a foreach_collect Spec object """ +FIRST_OF_TYPE = 'first_of' +""" str: Literal constant for a first_of Spec object """ +COMMAND_WITH_ARGS_TYPE = 'command_with_args' +""" str: Literal constant for a command_with_args Spec object """ +UNKNOWN_TYPE = 'unknown' +""" str: Literal constant for a Spec object with unknown type """ +ANONYMOUS_SPEC_NAME = 'anonymous' +""" str: Literal constant used for Specs with no ``name`` attribute """ + + +def is_simple_command(m_obj): + """ bool: True if broker object is a simple_command object """ + return isinstance(m_obj, insights.core.spec_factory.simple_command) + + +def is_simple_file(m_obj): + """ bool: True if broker object is a simple_file object """ + return isinstance(m_obj, insights.core.spec_factory.simple_file) + + +def is_glob_file(m_obj): + """ bool: True if broker object is a glob_file object """ + return isinstance(m_obj, insights.core.spec_factory.glob_file) + + +def is_foreach_execute(m_obj): + """ bool: True if broker object is a foreach_execute object """ + return isinstance(m_obj, insights.core.spec_factory.foreach_execute) + + +def is_first_file(m_obj): + """ bool: True if broker object is a first_file object """ + return isinstance(m_obj, insights.core.spec_factory.first_file) + + +def is_first_of(m_obj): + """ bool: True if broker object is a is_first_of object """ + return isinstance(m_obj, insights.core.spec_factory.first_of) + + +def is_foreach_collect(m_obj): + """ bool: True if broker object is a is_foreach_collect object """ + return isinstance(m_obj, insights.core.spec_factory.foreach_collect) + + +def is_listdir(m_obj): + """ bool: True if broker object is a is_listdir object """ + return isinstance(m_obj, insights.core.spec_factory.listdir) + + +def is_command_with_args(m_obj): + """ bool: True if broker object is a is_command_with_args object """ + return isinstance(m_obj, insights.core.spec_factory.command_with_args) + + +def is_function(m_obj): + """ bool: True if object is a function object """ + return inspect.isfunction(m_obj) + + +class Spec(dict): + """ + Class to identify and describe datasources and related objects + + The normal way to create one of these objects is to use the factory + method :py:meth:`from_object` and provide as input one of the datasource + objects from :py:mod:`insights.core.spec_factory`. This class is implemented + as a dictionary and all attributes of the object are stored as dictionary keys. + A ``repr`` string must be included in ``kwargs`` to __init__ and it will be + removed and stored in the ``repr_str`` attribute. If a ``name`` attributed + is not provided, a default name of ``anonymous`` will be used. This is a special + name recognized when formatting Spec output to be used when specs have other + specs as providers/dependents. + + Attributes: + repr_str (str): String to be used to implement the __repr__ method for this object + Supports jinja2 formatting using dictionary attributes of this object + self (dict): All attributes of this object are included in the dictionary except + the repr string. + + Raises: + AutologyError: Raised if a ``repr`` string is not provided or if an unsupported + object type is passed to the constructor or to the factory method. + """ + def __init__(self, **kwargs): + super(Spec, self).__init__(kwargs) + try: + self.repr_str = self.pop('repr') + except KeyError: + raise AutologyError('A repr items must be supplied for the spec name: {}'.format(self.get('name', 'no name'))) + + if 'name' not in self: + self['name'] = ANONYMOUS_SPEC_NAME + + def __repr__(self): + try: + formatted_str = self.repr_str.format(**self) + except KeyError: + if '{name} =' in self.repr_str: + _, r = self.repr_str.split('{name} =') + fixed = r.replace('{', '{{').replace('}', '}}') + formatted_str = (''.join(['{name} =', fixed])).format(**self) + else: + formatted_str = self.repr_str.replace('{', '{{').replace('}', '}}').format(**self) + return formatted_str + + @classmethod + def from_object(cls, m_type, m_name=ANONYMOUS_SPEC_NAME): + """ + Factory method to create Spec objects + + This method evaluates the m_type object type and extract the Spec + attributes based on that type. + + Attributes: + m_type (obj): One of the datasource objects from :py:mod:`insights.core.spec_factory`. + m_name (str): Name of the datasource object, if not provided ``ANONYMOUS_SPEC_NAME`` + + Raises: + AutologyError: Raises this error if it cannot determine the object type + + Returns: + Spec: Returns an object of type `Spec` + """ + m_members = inspect.getmembers(m_type) + m_spec = {'name': m_name, 'type': m_type} + m_spec['kind'] = next((v for k, v in m_members if k == "kind"), None) + m_spec['context'] = next((v for k, v in m_members if k == "context"), None) + m_spec['multi_output'] = next((v for k, v in m_members if k == "multi_output"), None) + if is_simple_command(m_type): + m_spec['type_name'] = SIMPLE_COMMAND_TYPE + m_spec['cmd'] = next((v for k, v in m_members if k == "cmd"), None) + m_spec['repr'] = 'simple_command("{cmd}")' + + elif is_simple_file(m_type): + m_spec['type_name'] = SIMPLE_FILE_TYPE + m_spec['path'] = next((v for k, v in m_members if k == "path"), None) + m_spec['repr'] = 'simple_file("{path}")' + + elif is_glob_file(m_type): + m_spec['type_name'] = GLOB_FILE_TYPE + m_spec['patterns'] = next((v for k, v in m_members if k == "patterns"), None) + m_spec['repr'] = 'glob_file({patterns})' + + elif is_first_file(m_type): + m_spec['type_name'] = FIRST_FILE_TYPE + m_spec['paths'] = next((v for k, v in m_members if k == "paths"), None) + m_spec['repr'] = 'first_file({paths})' + + elif is_listdir(m_type): + m_spec['type_name'] = LISTDIR_TYPE + m_spec['path'] = next((v for k, v in m_members if k == 'path'), None) + m_spec['repr'] = 'listdir("{path}")' + + elif is_foreach_execute(m_type): + m_spec['cmd'] = next((v for k, v in m_members if k == "cmd"), None) + m_spec['type_name'] = FOREACH_EXECUTE_TYPE + provider = next((v for k, v in m_members if k == "provider"), None) + if provider: + m_spec['provider'] = cls.from_object(provider) + + else: + m_spec['provider'] = Spec( + name=ANONYMOUS_SPEC_NAME, + type=None, + type_name=NONE_TYPE, + repr='NONE PROVIDER') + + m_spec['repr'] = 'foreach_execute("{cmd}", provider={provider})' + + elif is_first_of(m_type): + m_spec['type_name'] = FIRST_OF_TYPE + deps = next((v for k, v in m_members if k == "deps"), None) + m_spec['deps'] = [Spec.from_object(d) for d in deps] + deps_repr = ', '.join(['{0}'.format(d) for d in m_spec['deps']]) + m_spec['repr'] = 'first_of([{0}])'.format(deps_repr) + + elif is_command_with_args(m_type): + m_spec['type_name'] = COMMAND_WITH_ARGS_TYPE + m_spec['cmd'] = next((v for k, v in m_members if k == "cmd"), None) + provider = next((v for k, v in m_members if k == "provider"), None) + if provider: + m_spec['provider'] = cls.from_object(provider) + m_spec['repr'] = 'command_with_args("{cmd}", provider={provider})' + + elif is_foreach_collect(m_type): + m_spec['type_name'] = FOREACH_COLLECT_TYPE + m_spec['path'] = next((v for k, v in m_members if k == 'path'), None) + provider = next((v for k, v in m_members if k == "provider"), None) + if provider: + m_spec['provider'] = cls.from_object(provider) + m_spec['repr'] = 'foreach_collect("{path}", provider={provider})' + + elif m_type is None: + m_spec['type_name'] = NONE_TYPE + m_spec['repr'] = 'NONE TYPE' + + elif inspect.isfunction(m_type): + m_spec['type_name'] = FUNCTION_TYPE + f_members = inspect.getmembers(m_type) + m_spec['fxn_name'] = next((v for k, v in f_members if k == '__name__'), 'function') + m_spec['source'] = inspect.getsource(m_type) + m_spec['repr'] = '{fxn_name}()' + + elif isinstance(m_type, list): + m_spec['type_name'] = LIST_TYPE + m_spec['list'] = m_type + m_spec['repr'] = '{list}' + + elif isinstance(m_type, str): + m_spec['type_name'] = STRING_TYPE + m_spec['string'] = m_type + m_spec['repr'] = '{string}' + + else: + raise AutologyError('Unsupported name {} object {}, please add it'.format(m_name, m_type)) + + m_spec['repr'] = ''.join(['{name} = ', m_spec['repr']]) if m_name != ANONYMOUS_SPEC_NAME else m_spec['repr'] + + return cls(**m_spec) + + @property + def is_simple_command(self): + """ bool: True if this spec is a simple_command """ + return self.get('type_name', UNKNOWN_TYPE) == SIMPLE_COMMAND_TYPE + + @property + def is_simple_file(self): + """ bool: True if this spec is a simple_file """ + return self.get('type_name', UNKNOWN_TYPE) == SIMPLE_FILE_TYPE + + @property + def is_glob_file(self): + """ bool: True if this spec is a glob_file """ + return self.get('type_name', UNKNOWN_TYPE) == GLOB_FILE_TYPE + + @property + def is_foreach_execute(self): + """ bool: True if this spec is a foreach_execute """ + return self.get('type_name', UNKNOWN_TYPE) == FOREACH_EXECUTE_TYPE + + @property + def is_first_file(self): + """ bool: True if this spec is a first_file """ + return self.get('type_name', UNKNOWN_TYPE) == FIRST_FILE_TYPE + + @property + def is_first_of(self): + """ bool: True if this spec is a first_of """ + return self.get('type_name', UNKNOWN_TYPE) == FIRST_OF_TYPE + + @property + def is_foreach_collect(self): + """ bool: True if this spec is a foreach_collect """ + return self.get('type_name', UNKNOWN_TYPE) == FOREACH_COLLECT_TYPE + + @property + def is_listdir(self): + """ bool: True if this spec is a listdir """ + return self.get('type_name', UNKNOWN_TYPE) == LISTDIR_TYPE + + @property + def is_command_with_args(self): + """ bool: True if this spec is a command_with_args """ + return self.get('type_name', UNKNOWN_TYPE) == COMMAND_WITH_ARGS_TYPE + + @property + def is_function(self): + """ bool: True if this spec is a function """ + return self.get('type_name', UNKNOWN_TYPE) == FUNCTION_TYPE + + @property + def is_unknown_type(self): + """ bool: True if this spec has an unknown type """ + return self.get('type_name', UNKNOWN_TYPE) == UNKNOWN_TYPE + + +class RegistrySpecs(dict): + """ + Class to provide introspection for Registry objects in :py:class:`insights.specs.Specs` + + Dictionary of `Spec` objects with spec names as the keys and a Spec object as each value. + Each value has the set of attributes:: + + name - Name of the spec + type - The spec object + filterable - Whether or not the Registry object is ``filterable`` + multi_output - Whether or not the Registry object is ``multi_output`` + raw - Whether or not the Registry object is ``raw`` + """ + REGISTRY_REPR = '{name} = RegistryPoint(filterable={filterable}, multi_output={multi_output}, raw={raw})' + """ str: repr string for all Registry objects """ + + def __init__(self): + members = inspect.getmembers(insights.specs.Specs) + for m_name, m_type in members: + if m_name.startswith('__') or m_name == 'context_handlers': + # Don't care about dunder members or context handlers + continue + + m_members = inspect.getmembers(m_type) + self.update({ + m_name: Spec( + name=m_name, + type=m_type, + filterable=next((v for k, v in m_members if k == 'filterable'), None), + multi_output=next((v for k, v in m_members if k == 'multi_output'), None), + raw=next((v for k, v in m_members if k == 'raw'), None), + repr=self.REGISTRY_REPR + ) + }) + + def is_registered(self, spec_name): + """ bool: Whether ``spec_name`` is in Registry specs. """ + return spec_name in self + + +class DefaultSpecs(dict): + """ + Class to provide introspection for datasource objects in :py:class:`insights.specs.default.DefaultSpecs` + + Dictionary of `Spec` objects with spec names as the keys and a Spec object as each value. + Each Spec has different attributes depending on the type of spec. See the + :py:meth:`Spec.from_object` factory method for more information. + + """ + _SPECS_OBJECT_CLASS = InDefaultSpecs + """ obj: Datasource class to use for introspection """ + + def __init__(self): + for m_name, m_type in inspect.getmembers(self._SPECS_OBJECT_CLASS): + if (m_name.startswith('__') or m_name == 'context_handlers' or + m_name == 'registry' or str(m_type).startswith('insights.specs.Spec')): + # Don't care about dunder members or context handlers + # or registry entries + continue + + m_spec = Spec.from_object(m_type, m_name) + self.update({m_name: m_spec}) + + +class InsightsArchiveSpecs(DefaultSpecs): + """ + Class to provide introspection for datasource objects in + :py:class:`insights.specs.insights_archive.InsightsArchiveSpecs` + + Dictionary of `Spec` objects with spec names as the keys and a Spec object as each value. + Each Spec has different attributes depending on the type of spec. See the + :py:meth:`Spec.from_object` factory method for more information. + + """ + _SPECS_OBJECT_CLASS = InArchiveSpecs + """ obj: Datasource class to use for introspection """ + + pass + + +class SosSpecs(DefaultSpecs): + """ + Class to provide introspection for datasource objects in :py:class:`insights.specs.sos_archive.SosSpecs` + + Dictionary of `Spec` objects with spec names as the keys and a Spec object as each value. + Each Spec has different attributes depending on the type of spec. See the + :py:meth:`Spec.from_object` factory method for more information. + + """ + _SPECS_OBJECT_CLASS = InSosSpecs + """ obj: Datasource class to use for introspection """ + + pass + +if __name__ == "__main__": + specs = DefaultSpecs() + for k, v in specs.items(): + try: + print(v) + except Exception as e: + print('======= Error with spec: ', k) + print('repr_str :', v.repr_str) + for dk, dv in v.items(): + print('\t', dk, ":", dv) + raise diff --git a/insights/util/specs_catalog.py b/insights/util/specs_catalog.py new file mode 100644 index 000000000..e1fcda71b --- /dev/null +++ b/insights/util/specs_catalog.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python +""" +This module uses the inspec module to introspec information about +all of the datasources and dynamically create a ``.rst`` document to be +included in the sphinx build process + +The output file is a listing of all default specs defined in +:py:mod:`insights.specs.default`. + +It can be called from the commandline to manually generate the file or +it may also be called inside the Sphinx *conf.py* `setup` function to +generate the documentation automatically. + +Command Line Use +---------------- + +This assumes that you have a working installation of insights-core +and python environment for insights-core development. + +To run from the command line simply execute this utility and provide +the name of the output file:: + + $ python -m insights.util.specs_catalog.main output_filename.rst + +""" +import argparse +from insights.util.autology.datasources import DefaultSpecs + +TEMPLATE = """ +Datasource Catalog +================== + +This catalog of specs provides the definitions for each of the specs the Insights Client +can collect. The format for this file is ``name = collection_type(collection_arguments)``. +A detailed description of the collection_types can be found in :ref:`datasources-ref`. +A summary of the collection_types includes: + +* *simple_file* - collects the contents of a file +* *simple_command* - collects the output from a command +* *glob_file* - collects contents of all files matching the glob pattern +* *first_file* - collects the contents of the first file detected in a list + of files +* *listdir* - collects the output of the ``ls`` command for the file/glob argument +* *foreach_execute* - collects the output of the command for each ``provider`` argument +* *foreach_collect* - collects the contents of the path created by replacing + each element in the provider into the path +* *first_of* - collects the contents of datasource that returns data +* *command_with_args* - collects the output of the command with each ``provider`` argument + +Some datasources are implemented as functions and each links to the details provided in the +function specific documentation. Generally functions are used as a ``provider`` to other +datasources to, for instance, get a list of running processes of a particular program. + +Python code that implements these datasources is located in :py:mod:`insights.specs.default`. + +Datasources +----------- + +Functions +^^^^^^^^^ + +.. hlist:: +""".strip() + + +def blank_line(fh, number=1): + for l in range(number): + fh.write("\n") + + +def main(filename): + defaultspecs = DefaultSpecs() + with open(filename, "w") as fh: + fh.write(TEMPLATE) + blank_line(fh, 2) + functions = [v for v in defaultspecs.values() if v.is_function] + for v in functions: + try: + fh.write(' * :py:func:`{fxn_name}() `\n'.format(fxn_name=v['fxn_name'])) + except Exception: + print('Error with function spec: {name}'.format(name=v['fxn_name'])) + + fh.write('\n\nGeneral Datasources\n^^^^^^^^^^^^^^^^^^^\n\n::\n\n') + + for k, v in defaultspecs.items(): + try: + if not v.is_function: + line = str(v) + line = line.replace('\n', '\\n') + fh.write(' {spec}\n'.format(spec=line)) + except Exception: + print('Error with spec: {name}'.format(name=k)) + + +def parse_args(): + p = argparse.ArgumentParser() + p.add_argument("out_filename", help="Name of the output file to write the specs catalog") + return p.parse_args() + + +if __name__ == "__main__": + args = parse_args() + main(args.out_filename) From d4b89b83b0d3bb136177e89571c7820d57df075e Mon Sep 17 00:00:00 2001 From: Andrew Kofink Date: Tue, 12 May 2020 14:27:15 -0400 Subject: [PATCH 055/892] Use determine_hostname instead of get_canonical_facts()['fqdn'] (#2587) @jcrafts recommended this method instead of canonical facts to determine the hostname of the client system. This fixes RHCLOUD-6495. Signed-off-by: Andrew Kofink --- insights/client/apps/compliance/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/insights/client/apps/compliance/__init__.py b/insights/client/apps/compliance/__init__.py index 33999461a..f66a1d184 100644 --- a/insights/client/apps/compliance/__init__.py +++ b/insights/client/apps/compliance/__init__.py @@ -2,7 +2,7 @@ from insights.client.archive import InsightsArchive from insights.client.connection import InsightsConnection from insights.client.constants import InsightsConstants as constants -from insights.util.canonical_facts import get_canonical_facts +from insights.client.utilities import determine_hostname from logging import getLogger from platform import linux_distribution from re import findall @@ -21,7 +21,7 @@ class ComplianceClient: def __init__(self, config): self.config = config self.conn = InsightsConnection(config) - self.hostname = get_canonical_facts().get('fqdn', '') + self.hostname = determine_hostname() self.archive = InsightsArchive(config) def oscap_scan(self): From 31bd3f08ba0ccfe6c9d7678bc1cd411af19d90d6 Mon Sep 17 00:00:00 2001 From: Andrew Kofink Date: Tue, 12 May 2020 16:17:48 -0400 Subject: [PATCH 056/892] encode() messages to call() (#2588) unicode strings in RHEL 6, Python 2.6.6 that are split with shlex insert null characters '\x00', which cause Popen to fail. call() may handle this in the future, but for now, compliance will be sure to encode any strings passed to call Signed-off-by: Andrew Kofink Co-authored-by: Jeremy Crafts --- insights/client/apps/compliance/__init__.py | 4 ++-- insights/tests/client/apps/test_compliance.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/insights/client/apps/compliance/__init__.py b/insights/client/apps/compliance/__init__.py index f66a1d184..c183ed7d8 100644 --- a/insights/client/apps/compliance/__init__.py +++ b/insights/client/apps/compliance/__init__.py @@ -85,7 +85,7 @@ def profile_files(self): return glob("{0}*rhel{1}*.xml".format(POLICY_FILE_LOCATION, self.os_release())) def find_scap_policy(self, profile_ref_id): - rc, grep = call('grep ' + profile_ref_id + ' ' + ' '.join(self.profile_files()), keep_rc=True) + rc, grep = call(('grep ' + profile_ref_id + ' ' + ' '.join(self.profile_files())).encode(), keep_rc=True) if rc: logger.error('XML profile file not found matching ref_id {0}\n{1}\n'.format(profile_ref_id, grep)) exit(constants.sig_kill_bad) @@ -107,7 +107,7 @@ def run_scan(self, profile_ref_id, policy_xml, output_path, tailoring_file_path= env = os.environ.copy() env.update({'TZ': 'UTC'}) oscap_command = self.build_oscap_command(profile_ref_id, policy_xml, output_path, tailoring_file_path) - rc, oscap = call(oscap_command, keep_rc=True, env=env) + rc, oscap = call(oscap_command.encode(), keep_rc=True, env=env) if rc and rc != NONCOMPLIANT_STATUS: logger.error('Scan failed') logger.error(oscap) diff --git a/insights/tests/client/apps/test_compliance.py b/insights/tests/client/apps/test_compliance.py index 672a66c63..c8a727f40 100644 --- a/insights/tests/client/apps/test_compliance.py +++ b/insights/tests/client/apps/test_compliance.py @@ -110,7 +110,7 @@ def test_run_scan(config, call): env = os.environ env.update({'TZ': 'UTC'}) compliance_client.run_scan('ref_id', '/nonexistent', output_path) - call.assert_called_with("oscap xccdf eval --profile ref_id --results " + output_path + ' /nonexistent', keep_rc=True, env=env) + call.assert_called_with(("oscap xccdf eval --profile ref_id --results " + output_path + ' /nonexistent').encode(), keep_rc=True, env=env) @patch("insights.client.apps.compliance.call", return_value=(1, 'bad things happened'.encode('utf-8'))) @@ -122,7 +122,7 @@ def test_run_scan_fail(config, call): env.update({'TZ': 'UTC'}) with raises(SystemExit): compliance_client.run_scan('ref_id', '/nonexistent', output_path) - call.assert_called_with("oscap xccdf eval --profile ref_id --results " + output_path + ' /nonexistent', keep_rc=True, env=env) + call.assert_called_with(("oscap xccdf eval --profile ref_id --results " + output_path + ' /nonexistent').encode(), keep_rc=True, env=env) @patch("insights.client.config.InsightsConfig") From ffb276e5870601f754a52949573c4812ee060e16 Mon Sep 17 00:00:00 2001 From: Jesse Jaggars Date: Wed, 13 May 2020 09:58:36 -0400 Subject: [PATCH 057/892] collect metrics exported by pcp if possible (#2570) Signed-off-by: Jesse Jaggars --- insights/specs/__init__.py | 1 + insights/specs/default.py | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 39d8aee95..7c6da4d8a 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -447,6 +447,7 @@ class Specs(SpecSet): passenger_status = RegistryPoint() password_auth = RegistryPoint() pci_rport_target_disk_paths = RegistryPoint() + pcp_metrics = RegistryPoint() pcs_config = RegistryPoint() pcs_quorum_status = RegistryPoint() pcs_status = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index dd59d554a..3ed43a5cd 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -28,9 +28,11 @@ from insights.parsers.dnf_module import DnfModuleList from insights.combiners.cloud_provider import CloudProvider from insights.combiners.satellite_version import SatelliteVersion +from insights.combiners.services import Services from insights.components.rhel_version import IsRhel8 from insights.specs import Specs + from grp import getgrgid from os import stat from pwd import getpwuid @@ -728,6 +730,13 @@ def lsmod_all_names(broker): pacemaker_log = first_file(["/var/log/pacemaker.log", "/var/log/pacemaker/pacemaker.log"]) pci_rport_target_disk_paths = simple_command("/usr/bin/find /sys/devices/ -maxdepth 10 -mindepth 9 -name stat -type f") + @datasource(Services, context=HostContext) + def pcp_enabled(broker): + if not broker[Services].is_on("pmproxy"): + raise SkipComponent("pmproxy not enabled") + + pcp_metrics = simple_command("/usr/bin/curl -s http://127.0.0.1:44322/metrics --connect-timeout 5", deps=[pcp_enabled]) + @datasource(ps_auxww, context=HostContext) def package_and_java(broker): """Command: package_and_java""" From 07d0a1743ebf3f41c162bb7250d3c10a1b68f521 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Wed, 13 May 2020 12:18:53 -0400 Subject: [PATCH 058/892] remove the KB access code paths (#2501) Signed-off-by: Jeremy Crafts --- insights/client/apps/aws/__init__.py | 131 ------------------ insights/client/config.py | 23 --- insights/client/constants.py | 4 - insights/client/phase/v1.py | 12 -- insights/client/utilities.py | 4 - insights/tests/client/apps/test_aws.py | 123 ---------------- .../client/phase/test_LEGACY_post_update.py | 4 +- .../client/phase/test_collect_and_upload.py | 2 - .../tests/client/phase/test_post_update.py | 4 +- 9 files changed, 2 insertions(+), 305 deletions(-) delete mode 100644 insights/client/apps/aws/__init__.py delete mode 100644 insights/tests/client/apps/test_aws.py diff --git a/insights/client/apps/aws/__init__.py b/insights/client/apps/aws/__init__.py deleted file mode 100644 index 18f5c50d2..000000000 --- a/insights/client/apps/aws/__init__.py +++ /dev/null @@ -1,131 +0,0 @@ -import logging -import base64 -import json -from requests import ConnectionError, Timeout -from requests.exceptions import HTTPError, MissingSchema -from ssl import SSLError -from urllib3.exceptions import MaxRetryError -from insights.client.connection import InsightsConnection -from insights.client.schedule import get_scheduler -from insights.client.constants import InsightsConstants as constants -from insights.client.utilities import write_to_disk - -logger = logging.getLogger(__name__) -NETWORK = constants.custom_network_log_level - -IDENTITY_URI = 'http://169.254.169.254/latest/dynamic/instance-identity' -IDENTITY_DOC_URI = IDENTITY_URI + '/document' -IDENTITY_PKCS7_URI = IDENTITY_URI + '/pkcs7' - - -def aws_main(config): - ''' - Process AWS entitlements with Hydra - ''' - if config.authmethod != 'BASIC': - logger.error('AWS entitlement is only available when BASIC auth is used.\n' - 'Set auto_config=False and authmethod=BASIC in %s.', config.conf) - return False - # workaround for a workaround - # the hydra API doesn't accept the legacy cert - # and legacy_upload=False currently just - # redirects to the classic API with /platform added - # so if doing AWS entitlement, use cert_verify=True - config.cert_verify = True - conn = InsightsConnection(config) - - bundle = get_aws_identity(conn) - if not bundle: - return False - succ = post_to_hydra(conn, bundle) - if not succ: - return False - # register with insights if this option - # wasn't specified - if not config.portal_access_no_insights: - enable_delayed_registration(config) - return True - - -def get_uri(conn, uri): - ''' - Fetch information from URIs - ''' - try: - logger.log(NETWORK, 'GET %s', uri) - res = conn.session.get(uri, timeout=conn.config.http_timeout) - except (ConnectionError, Timeout) as e: - logger.error(e) - logger.error('Could not reach %s', uri) - return None - logger.log(NETWORK, 'Status code: %s', res.status_code) - return res - - -def get_aws_identity(conn): - ''' - Get data from AWS - ''' - logger.info('Fetching AWS identity information.') - doc_res = get_uri(conn, IDENTITY_DOC_URI) - pkcs7_res = get_uri(conn, IDENTITY_PKCS7_URI) - if not (doc_res and pkcs7_res) or not (doc_res.ok and pkcs7_res.ok): - logger.error('Error getting identity information.') - return None - logger.debug('Identity information obtained successfully.') - identity_doc = base64.b64encode(doc_res.content) - - return { - 'document': identity_doc.decode('utf-8'), - 'pkcs7': pkcs7_res.text - } - - -def post_to_hydra(conn, data): - ''' - Post data to Hydra - ''' - logger.info('Submitting identity information to Red Hat.') - hydra_endpoint = conn.config.portal_access_hydra_url - - # POST to hydra - try: - json_data = json.dumps(data) - logger.log(NETWORK, 'POST %s', hydra_endpoint) - logger.log(NETWORK, 'POST body: %s', json_data) - res = conn.session.post(hydra_endpoint, data=json_data, timeout=conn.config.http_timeout) - except MissingSchema as e: - logger.error(e) - return False - except (ConnectionError, Timeout, SSLError, MaxRetryError) as e: - logger.error(e) - logger.error('Could not reach %s', hydra_endpoint) - return False - logger.log(NETWORK, 'Status code: %s', res.status_code) - try: - res.raise_for_status() - except HTTPError as e: - # if failure, - # error, return False - logger.error(e) - try: - res_json = res.json() - err_msg = res_json.get('message', '') - err_details = res_json.get('detailMessage', '') - logger.error('%s\n%s', err_msg, err_details) - except ValueError: - logger.error('Could not parse JSON response.') - return False - logger.info('Entitlement information has been sent.') - return True - - -def enable_delayed_registration(config): - ''' - Write a marker file to allow client to know that - it should attempt to register when it runs - ''' - logger.debug('Writing to %s', constants.register_marker_file) - write_to_disk(constants.register_marker_file) - job = get_scheduler(config) - job.set_daily() diff --git a/insights/client/config.py b/insights/client/config.py index afa2bbfc8..4dd8febd2 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -390,25 +390,6 @@ 'const': True, 'nargs': '?', 'group': 'platform' - }, - # AWS options - 'portal_access': { - 'default': False, - 'opt': ['--portal-access'], - 'group': 'platform', - 'action': 'store_true', - 'help': 'Entitle an AWS instance with Red Hat and register with Red Hat Insights' - }, - 'portal_access_no_insights': { - 'default': False, - 'opt': ['--portal-access-no-insights'], - 'group': 'platform', - 'action': 'store_true', - 'help': 'Entitle an AWS instance with Red Hat, but do not register with Red Hat Insights' - }, - 'portal_access_hydra_url': { - # non-CLI - 'default': constants.default_portal_access_hydra_url } } @@ -638,8 +619,6 @@ def _validate_options(self): if self.enable_schedule and self.disable_schedule: raise ValueError( 'Conflicting options: --enable-schedule and --disable-schedule') - if self.portal_access and self.portal_access_no_insights: - raise ValueError('Conflicting options: --portal-access and --portal-access-no-insights') if self.payload and not self.content_type: raise ValueError( '--payload requires --content-type') @@ -706,8 +685,6 @@ def _imply_options(self): self.legacy_upload = False if self.payload and (self.logging_file == constants.default_log_file): self.logging_file = constants.default_payload_log - if os.path.exists(constants.register_marker_file): - self.register = True if self.output_dir or self.output_file: # do not upload in this case self.no_upload = True diff --git a/insights/client/constants.py b/insights/client/constants.py index 412c37df4..a39a14acd 100644 --- a/insights/client/constants.py +++ b/insights/client/constants.py @@ -44,8 +44,4 @@ class InsightsConstants(object): sig_kill_bad = 101 cached_branch_info = os.path.join(default_conf_dir, '.branch_info') pidfile = os.path.join(os.sep, 'var', 'run', 'insights-client.pid') - # this file is used to attempt registration when the client starts, implies --register - register_marker_file = os.path.join(os.sep, 'var', 'run', 'insights-client-try-register') - # default Hydra endpoint for posting entitlements information for AWS - default_portal_access_hydra_url = 'https://access.redhat.com/hydra/rest/accounts/entitle' valid_compressors = ("gz", "xz", "bz2", "none") diff --git a/insights/client/phase/v1.py b/insights/client/phase/v1.py index 79dccb901..5e322a706 100644 --- a/insights/client/phase/v1.py +++ b/insights/client/phase/v1.py @@ -13,7 +13,6 @@ from insights.client.utilities import validate_remove_file, print_egg_versions, write_to_disk from insights.client.schedule import get_scheduler from insights.client.apps.compliance import ComplianceClient -from insights.client.apps.aws import aws_main logger = logging.getLogger(__name__) @@ -128,11 +127,6 @@ def post_update(client, config): logger.debug("CONFIG: %s", config) print_egg_versions() - # --registering an AWS machine - if config.portal_access or config.portal_access_no_insights: - logger.debug('Entitling an AWS host. Bypassing registration check.') - return - if config.show_results: try: client.show_results() @@ -265,12 +259,6 @@ def post_update(client, config): def collect_and_output(client, config): # last phase, delete PID file on exit atexit.register(write_to_disk, constants.pidfile, delete=True) - # register cloud (aws) - if config.portal_access or config.portal_access_no_insights: - if aws_main(config): - sys.exit(constants.sig_kill_ok) - else: - sys.exit(constants.sig_kill_bad) # --compliance was called if config.compliance: config.payload, config.content_type = ComplianceClient(config).oscap_scan() diff --git a/insights/client/utilities.py b/insights/client/utilities.py index 6bd356b26..750bab81c 100644 --- a/insights/client/utilities.py +++ b/insights/client/utilities.py @@ -101,10 +101,6 @@ def delete_registered_file(): def delete_unregistered_file(): for f in constants.unregistered_files: write_to_disk(f, delete=True) - # this function only called when machine is registered, - # so while registering, delete this file too. we only - # need it around until we're registered - write_to_disk(constants.register_marker_file, delete=True) def delete_cache_files(): diff --git a/insights/tests/client/apps/test_aws.py b/insights/tests/client/apps/test_aws.py deleted file mode 100644 index 14a5b054f..000000000 --- a/insights/tests/client/apps/test_aws.py +++ /dev/null @@ -1,123 +0,0 @@ -import json -from requests import ConnectionError, Timeout -from requests.exceptions import HTTPError -from ssl import SSLError -from urllib3.exceptions import MaxRetryError -from mock.mock import patch, Mock -from insights.client.config import InsightsConfig -from insights.client.connection import InsightsConnection -from insights.client.apps import aws - - -@patch('insights.client.apps.aws.get_aws_identity') -@patch('insights.client.apps.aws.post_to_hydra') -@patch('insights.client.apps.aws.enable_delayed_registration') -def test_aws_main(enable_delayed_registration, post_to_hydra, get_aws_identity): - ''' - Test the flow of the main routine for success and failure - ''' - # portal access with insights registration - conf = InsightsConfig(portal_access=True) - assert aws.aws_main(conf) - get_aws_identity.assert_called_once() - post_to_hydra.assert_called_once() - enable_delayed_registration.assert_called_once() - - get_aws_identity.reset_mock() - post_to_hydra.reset_mock() - enable_delayed_registration.reset_mock() - - # portal access, no insights registration - conf = InsightsConfig(portal_access_no_insights=True) - assert aws.aws_main(conf) - get_aws_identity.assert_called_once() - post_to_hydra.assert_called_once() - enable_delayed_registration.assert_not_called() - - get_aws_identity.reset_mock() - post_to_hydra.reset_mock() - enable_delayed_registration.reset_mock() - - # identity call fails - returns false - conf = InsightsConfig(portal_access=True) - get_aws_identity.return_value = None - result = aws.aws_main(conf) - assert not result - post_to_hydra.assert_not_called() - enable_delayed_registration.assert_not_called() - - get_aws_identity.reset_mock() - post_to_hydra.reset_mock() - enable_delayed_registration.reset_mock() - - # hydra call fails - returns false - conf = InsightsConfig(portal_access=True) - post_to_hydra.return_value = False - result = aws.aws_main(conf) - assert not result - enable_delayed_registration.assert_not_called() - - -def test_get_uri(): - ''' - Test that GET success and failure handled properly - ''' - pass - - -@patch('insights.client.apps.aws.get_uri') -def test_get_aws_identity(get_uri): - ''' - Test that AWS identity success and failure handled properly - ''' - # returns OK - get_uri.side_effect = [Mock(ok=True, content=b'{"test": "test"}'), Mock(ok=True, content="test")] - conn = InsightsConnection(InsightsConfig()) - assert aws.get_aws_identity(conn) - - # URIs don't return OK status, return None - get_uri.side_effect = [Mock(ok=False, content=None), Mock(ok=False, content=None)] - assert aws.get_aws_identity(conn) is None - - # URIs can't connect, return None - get_uri.side_effect = [None, None] - assert aws.get_aws_identity(conn) is None - - -@patch('insights.client.apps.aws.logger.error') -def test_post_to_hydra(logger_error): - ''' - Test that POST to Hydra success and failure handled properly - ''' - conn = InsightsConnection(InsightsConfig()) - error_msg = '{"message":"error", "detailMessage":"error details"}' - error_json = json.loads(error_msg) - # successful POST - conn.session.post = Mock(return_value=Mock(status_code=200)) - assert aws.post_to_hydra(conn, '') - conn.session.post.assert_called_once() - - # connection error - conn.session.post = Mock(side_effect=(ConnectionError, Timeout, SSLError, MaxRetryError)) - assert not aws.post_to_hydra(conn, '') - conn.session.post.assert_called_once() - - # bad response w/ JSON - conn.session.post = Mock( - return_value=Mock(status_code=500, - text=error_msg, - json=Mock(return_value=error_json), - raise_for_status=Mock(return_value='', side_effect=HTTPError))) - assert not aws.post_to_hydra(conn, '') - conn.session.post.assert_called_once() - logger_error.assert_called_with('%s\n%s', 'error', 'error details') - - # bad response w/ no JSON - conn.session.post = Mock( - return_value=Mock(status_code=500, - text='', - json=Mock(side_effect=ValueError), - raise_for_status=Mock(return_value='', side_effect=HTTPError))) - assert not aws.post_to_hydra(conn, '') - conn.session.post.assert_called_once() - logger_error.assert_called_with('Could not parse JSON response.') diff --git a/insights/tests/client/phase/test_LEGACY_post_update.py b/insights/tests/client/phase/test_LEGACY_post_update.py index a6149d865..34d394e0d 100644 --- a/insights/tests/client/phase/test_LEGACY_post_update.py +++ b/insights/tests/client/phase/test_LEGACY_post_update.py @@ -17,9 +17,7 @@ def patch_insights_config(old_function): "return_value.load_all.return_value.register": False, "return_value.load_all.return_value.diagnosis": None, "return_value.load_all.return_value.show_results": False, - "return_value.load_all.return_value.check_results": False, - "return_value.load_all.return_value.portal_access": False, - "return_value.load_all.return_value.portal_access_no_insights": False}) + "return_value.load_all.return_value.check_results": False}) return patcher(old_function) diff --git a/insights/tests/client/phase/test_collect_and_upload.py b/insights/tests/client/phase/test_collect_and_upload.py index 14435d878..6a7e3add1 100644 --- a/insights/tests/client/phase/test_collect_and_upload.py +++ b/insights/tests/client/phase/test_collect_and_upload.py @@ -20,8 +20,6 @@ def patch_insights_config(old_function): "return_value.load_all.return_value.keep_archive": False, "return_value.load_all.return_value.register": False, "return_value.load_all.return_value.diagnosis": None, - "return_value.load_all.return_value.portal_access": False, - "return_value.load_all.return_value.portal_access_no_insights": False, "return_value.load_all.return_value.payload": None, "return_value.load_all.return_value.compliance": False, "return_value.load_all.return_value.output_dir": None, diff --git a/insights/tests/client/phase/test_post_update.py b/insights/tests/client/phase/test_post_update.py index 5ecac69fc..979c4a568 100644 --- a/insights/tests/client/phase/test_post_update.py +++ b/insights/tests/client/phase/test_post_update.py @@ -20,9 +20,7 @@ def patch_insights_config(old_function): "return_value.load_all.return_value.reregister": False, "return_value.load_all.return_value.payload": None, "return_value.load_all.return_value.show_results": False, - "return_value.load_all.return_value.check_results": False, - "return_value.load_all.return_value.portal_access": False, - "return_value.load_all.return_value.portal_access_no_insights": False}) + "return_value.load_all.return_value.check_results": False}) return patcher(old_function) # DRY this at some point... for the love of god From 89ff3ae0eaf85934a765f0d3fc62549051ca173b Mon Sep 17 00:00:00 2001 From: Joel Date: Wed, 13 May 2020 13:02:52 -0400 Subject: [PATCH 059/892] parser: add parser for yum list available (#2523) Add a parser to provide the output of yum list available Signed-off-by: Joel Savitz --- .../parsers/tests/test_yum_list_available.py | 159 ++++++++++++++++++ insights/parsers/yum_list.py | 26 ++- insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 5 files changed, 185 insertions(+), 3 deletions(-) create mode 100644 insights/parsers/tests/test_yum_list_available.py diff --git a/insights/parsers/tests/test_yum_list_available.py b/insights/parsers/tests/test_yum_list_available.py new file mode 100644 index 000000000..d65e984e5 --- /dev/null +++ b/insights/parsers/tests/test_yum_list_available.py @@ -0,0 +1,159 @@ +import pytest + +from insights import SkipComponent +from insights.parsers.yum_list import YumListAvailable +from insights.tests import context_wrap + + +EMPTY = """ +Available Packages +""".strip() + +EXPIRED_EMPTY = """ +Repodata is over 2 weeks old. Install yum-cron? Or run: yum makecache fast +Available Packages +""".strip() + +EXPIRED_WITH_DATA = """ +Repodata is over 2 weeks old. Install yum-cron? Or run: yum makecache fast +Available Packages +bash.x86_64 4.4.23-1.fc28 @updates +""".strip() + +SIMPLE = """ +Available Packages +bash.x86_64 4.4.23-1.fc28 @updates +""".strip() + +WRAPPED_LINE = """ +Available Packages +NetworkManager-bluetooth.x86_64 1:1.10.10-1.fc28 @updates +NetworkManager-config-connectivity-fedora.noarch + 1:1.10.10-1.fc28 @updates +NetworkManager-glib.x86_64 1:1.10.10-1.fc28 @updates +NetworkManager-libnm.x86_64 1:1.10.10-1.fc28 @updates +clucene-contribs-lib.x86_64 2.3.3.4-31.20130812.e8e3d20git.fc28 + @fedora +clucene-core.x86_64 2.3.3.4-31.20130812.e8e3d20git.fc28 + @fedora +""".strip() + +COMMANDLINE = """ +Available Packages +jdk1.8.0_121.x86_64 2000:1.8.0_121-fcs @@commandline +""" + +HEADER_FOOTER_JUNK = """ +Loaded plugins: product-id, search-disabled-repos, subscription-manager +Available Packages +GConf2.x86_64 3.2.6-8.el7 @rhel-7-server-rpms +GeoIP.x86_64 1.5.0-11.el7 @anaconda/7.3 +ImageMagick.x86_64 6.7.8.9-15.el7_2 @rhel-7-server-rpms +NetworkManager.x86_64 1:1.4.0-17.el7_3 installed +NetworkManager.x86_64 1:1.8.0-9.el7 installed +NetworkManager-config-server.noarch + 1:1.8.0-9.el7 installed +Uploading Enabled Repositories Report +Loaded plugins: priorities, product-id, rhnplugin, rhui-lb, subscription- + : manager, versionlock +""" + + +def test_empty(): + ctx = context_wrap(EMPTY) + with pytest.raises(SkipComponent): + YumListAvailable(ctx) + + +def test_simple(): + ctx = context_wrap(SIMPLE) + rpms = YumListAvailable(ctx) + rpm = rpms.newest("bash") + assert rpm is not None + assert rpm.epoch == "0" + assert rpm.version == "4.4.23" + assert rpm.release == "1.fc28" + assert rpm.arch == "x86_64" + assert rpm.repo == "updates" + + +def test_expired_cache_with_data(): + ctx = context_wrap(EXPIRED_WITH_DATA) + rpms = YumListAvailable(ctx) + assert rpms.expired_cache is True + + +def test_expired_cache_no_data(): + ctx = context_wrap(EXPIRED_EMPTY) + with pytest.raises(SkipComponent): + YumListAvailable(ctx) + + +def test_wrapped(): + ctx = context_wrap(WRAPPED_LINE) + rpms = YumListAvailable(ctx) + rpm = rpms.newest("NetworkManager-bluetooth") + assert rpm is not None + assert rpm.epoch == "1" + assert rpm.version == "1.10.10" + assert rpm.release == "1.fc28" + assert rpm.arch == "x86_64" + assert rpm.repo == "updates" + + rpm = rpms.newest("NetworkManager-config-connectivity-fedora") + assert rpm is not None + assert rpm.epoch == "1" + assert rpm.version == "1.10.10" + assert rpm.release == "1.fc28" + assert rpm.arch == "noarch" + assert rpm.repo == "updates" + + rpm = rpms.newest("clucene-contribs-lib") + assert rpm is not None + assert rpm.epoch == "0" + assert rpm.version == "2.3.3.4" + assert rpm.release == "31.20130812.e8e3d20git.fc28" + assert rpm.arch == "x86_64" + assert rpm.repo == "fedora" + + rpm = rpms.newest("clucene-core") + assert rpm is not None + assert rpm.epoch == "0" + assert rpm.version == "2.3.3.4" + assert rpm.release == "31.20130812.e8e3d20git.fc28" + assert rpm.arch == "x86_64" + assert rpm.repo == "fedora" + + +def test_commandline(): + ctx = context_wrap(COMMANDLINE) + rpms = YumListAvailable(ctx) + + rpm = rpms.newest("jdk1.8.0_121") + assert rpm is not None + assert rpm.epoch == "2000" + assert rpm.version == "1.8.0_121" + assert rpm.release == "fcs" + assert rpm.arch == "x86_64" + assert rpm.repo == "commandline" + + +def test_multiple_stanza(): + ctx = context_wrap(HEADER_FOOTER_JUNK) + rpms = YumListAvailable(ctx) + + rpm = rpms.newest("GConf2") + assert rpm is not None + assert rpm.epoch == "0" + assert rpm.version == "3.2.6" + assert rpm.release == "8.el7" + assert rpm.arch == "x86_64" + assert rpm.repo == "rhel-7-server-rpms" + + rpm = rpms.newest("NetworkManager-config-server") + assert rpm is not None + assert rpm.epoch == "1" + assert rpm.version == "1.8.0" + assert rpm.release == "9.el7" + assert rpm.arch == "noarch" + assert rpm.repo == "installed" diff --git a/insights/parsers/yum_list.py b/insights/parsers/yum_list.py index 2ee562a3b..481972661 100644 --- a/insights/parsers/yum_list.py +++ b/insights/parsers/yum_list.py @@ -7,6 +7,9 @@ YumListInstalled - Command ``yum list installed`` ------------------------------------------------- +YumListAvailable - Command ``yum list available`` +------------------------------------------------- + """ from collections import defaultdict @@ -41,17 +44,20 @@ class YumListBase(CommandParser, RpmList): contain a ``.repo`` attribute. """ - def __init__(self, context): + def __init__(self, context, package_status): self.expired_cache = False """bool: Indicates if the yum repo cache is expired.""" + self.package_status = package_status + """str: Indicates if the list is of installed or available packages.""" + super(YumListBase, self).__init__(context) def _find_start(self, content): for i, c in enumerate(content): if 'Repodata is over 2 weeks old' in c: self.expired_cache = True - elif c == "Installed Packages": + elif c == self.package_status + " Packages": break return i + 1 @@ -197,4 +203,18 @@ class YumListInstalled(YumListBase): >>> rpm1 < rpm2 True """ - pass + def __init__(self, context): + super(YumListInstalled, self).__init__(context, "Installed") + + +@parser(Specs.yum_list_available) +class YumListAvailable(YumListBase): + """ + The ``YumListAvailable`` class parses the output of the ``yum list available`` + command. Each line is parsed and stored in a ``YumListRpm`` object. + + Input and usage examples are identical to ``YumListInstalled`` but with + "Installed" replaced with "Available" wherever applicable. + """ + def __init__(self, context): + super(YumListAvailable, self).__init__(context, "Available") diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 7c6da4d8a..6879aadc4 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -657,6 +657,7 @@ class Specs(SpecSet): xfs_info = RegistryPoint(multi_output=True) xinetd_conf = RegistryPoint(multi_output=True) yum_conf = RegistryPoint() + yum_list_available = RegistryPoint() yum_list_installed = RegistryPoint() yum_log = RegistryPoint() yum_repolist = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 3ed43a5cd..84cfa21d8 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -1049,6 +1049,7 @@ def xfs_mounts(broker): xfs_info = foreach_execute(xfs_mounts, "/usr/sbin/xfs_info %s") xinetd_conf = glob_file(["/etc/xinetd.conf", "/etc/xinetd.d/*"]) yum_conf = simple_file("/etc/yum.conf") + yum_list_available = simple_command("yum -C --noplugins list available") yum_list_installed = simple_command("yum -C --noplugins list installed") yum_log = simple_file("/var/log/yum.log") yum_repolist = simple_command("/usr/bin/yum -C --noplugins repolist") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 6000ad2ba..fd92fa309 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -294,5 +294,6 @@ class InsightsArchiveSpecs(Specs): virt_what = simple_file("insights_commands/virt-what") woopsie = simple_file("insights_commands/find_.var.crash_.var.tmp_-path_.reports-_.whoopsie-report") yum_list_installed = simple_file("insights_commands/yum_-C_--noplugins_list_installed") + yum_list_available = simple_file("insights_commands/yum_-C_--noplugins_list_available") yum_repolist = first_file(["insights_commands/yum_-C_--noplugins_repolist", "insights_commands/yum_-C_repolist"]) zdump_v = simple_file("insights_commands/zdump_-v_.etc.localtime_-c_2019_2039") From 592529350f2fab4b95f9e4f9eeafd8a42ee7b2e1 Mon Sep 17 00:00:00 2001 From: Xiaoxue Wang Date: Thu, 14 May 2020 16:55:51 +0800 Subject: [PATCH 060/892] Fix KeyError in parser blkid (#2591) Signed-off-by: XiaoXue Wang --- insights/parsers/blkid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/parsers/blkid.py b/insights/parsers/blkid.py index b04a33525..b7cb78da7 100644 --- a/insights/parsers/blkid.py +++ b/insights/parsers/blkid.py @@ -74,4 +74,4 @@ def parse_content(self, content): def filter_by_type(self, fs_type): """list: Returns a list of all entries where TYPE = ``fs_type``.""" - return [row for row in self.data if row['TYPE'] == fs_type] + return [row for row in self.data if row.get('TYPE') == fs_type] From 0a8f736bde28a4114870aa9f2c859c0a115b8812 Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Thu, 14 May 2020 10:54:18 -0500 Subject: [PATCH 061/892] Add markers to syslog format tests (#2593) * Add new marker syslog_format to syslog formatter tests to allow turning them off when testing in a container that doesn't allow calls to ``getuser`` Signed-off-by: Bob Fahr --- insights/tests/test_formats.py | 3 +++ setup.cfg | 2 ++ 2 files changed, 5 insertions(+) diff --git a/insights/tests/test_formats.py b/insights/tests/test_formats.py index c61c1237a..67f37f7f3 100644 --- a/insights/tests/test_formats.py +++ b/insights/tests/test_formats.py @@ -1,3 +1,4 @@ +import pytest from six import StringIO from insights import dr, make_fail, rule from insights.formats.text import HumanReadableFormat @@ -41,6 +42,7 @@ def test_json_format(): assert "bar" in data +@pytest.mark.syslog_format def test_syslog_format_no_archive(): broker = dr.Broker() output = StringIO() @@ -52,6 +54,7 @@ def test_syslog_format_no_archive(): assert SL_CMD in data +@pytest.mark.syslog_format def test_syslog_format_archive(): broker = dr.Broker() output = StringIO() diff --git a/setup.cfg b/setup.cfg index a22188415..c1000128b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,4 +1,6 @@ [tool:pytest] +markers = + syslog_format: marks tests for syslog format (deselect with '-m "not syslog_format"') # Look for tests only in tests directories. python_files = "insights/tests/*" "insights/parsers/tests/*" "insights/combiners/tests/*" "insights/parsr/tests/*" "insights/parsr/examples/tests/*" "insights/parsr/query/tests/*" "insights/archive/test.py" "insights/components/tests/*" "insights/parsr/tests/*" # Display summary info for (s)skipped, (X)xpassed, (x)xfailed, (f)failed and (e)errored tests From c612c58d89131187f95e26156fb0f3e14e3d6d96 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 14 May 2020 13:51:16 -0400 Subject: [PATCH 062/892] add exception handling to egg fetch (#2522) Signed-off-by: Jeremy Crafts --- insights/client/__init__.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/insights/client/__init__.py b/insights/client/__init__.py index 2c47abb57..3027eeb3c 100644 --- a/insights/client/__init__.py +++ b/insights/client/__init__.py @@ -7,6 +7,7 @@ import shutil import sys from subprocess import Popen, PIPE +from requests import ConnectionError from .. import package_info from . import client @@ -170,13 +171,18 @@ def _fetch(self, path, etag_file, target_path, force): # If the etag was found and we are not force fetching # Then add it to the request logger.log(NETWORK, "GET %s", url) - if current_etag and not force: - logger.debug('Requesting new file with etag %s', current_etag) - etag_headers = {'If-None-Match': current_etag} - response = self.session.get(url, headers=etag_headers, timeout=self.config.http_timeout) - else: - logger.debug('Found no etag or forcing fetch') - response = self.session.get(url, timeout=self.config.http_timeout) + try: + if current_etag and not force: + logger.debug('Requesting new file with etag %s', current_etag) + etag_headers = {'If-None-Match': current_etag} + response = self.session.get(url, headers=etag_headers, timeout=self.config.http_timeout) + else: + logger.debug('Found no etag or forcing fetch') + response = self.session.get(url, timeout=self.config.http_timeout) + except ConnectionError as e: + logger.error(e) + logger.error('The Insights API could not be reached.') + return False # Debug information logger.debug('Status code: %d', response.status_code) From bc8be15a76ef5661e18a7feb031967a9d9a0ae4a Mon Sep 17 00:00:00 2001 From: Sheng Lao <39508521+shlao@users.noreply.github.com> Date: Tue, 19 May 2020 15:23:50 +0800 Subject: [PATCH 063/892] Add parser for file /etc/cni/net.d/87-podman-bridge.conflist (#2592) Signed-off-by: shlao Co-authored-by: Xiangce Liu --- .../cni_podman_bridge_conf.rst | 3 + insights/parsers/cni_podman_bridge_conf.py | 68 +++++++++++++++++++ .../tests/test_cni_podman_bridge_conf.py | 63 +++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 136 insertions(+) create mode 100644 docs/shared_parsers_catalog/cni_podman_bridge_conf.rst create mode 100644 insights/parsers/cni_podman_bridge_conf.py create mode 100644 insights/parsers/tests/test_cni_podman_bridge_conf.py diff --git a/docs/shared_parsers_catalog/cni_podman_bridge_conf.rst b/docs/shared_parsers_catalog/cni_podman_bridge_conf.rst new file mode 100644 index 000000000..8a06d8236 --- /dev/null +++ b/docs/shared_parsers_catalog/cni_podman_bridge_conf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.cni_podman_bridge_conf + :members: + :show-inheritance: diff --git a/insights/parsers/cni_podman_bridge_conf.py b/insights/parsers/cni_podman_bridge_conf.py new file mode 100644 index 000000000..eecd57b34 --- /dev/null +++ b/insights/parsers/cni_podman_bridge_conf.py @@ -0,0 +1,68 @@ +""" +CNIPodmanBridgeConf - file ``/etc/cni/net.d/87-podman-bridge.conflist`` +======================================================================= + +This parser converts file ``/etc/cni/net.d/87-podman-bridge.conflist`` +into a dictionary that matches the JSON string in the file. + +Sample file content:: + + { + "cniVersion": "0.4.0", + "name": "podman", + "plugins": [ + { + "type": "bridge", + "bridge": "cni-podman0", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "routes": [ + { + "dst": "0.0.0.0/0" + } + ], + "ranges": [ + [ + { + "subnet": "10.12.0.0/16", + "gateway": "10.12.0.1" + } + ] + ] + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + }, + { + "type": "firewall", + "backend": "iptables" + }, + { + "type": "tuning" + } + ] + } + + Examples: + >>> len(cni_podman_bridge_conf["plugins"]) + 4 + >>> cni_podman_bridge_conf["plugins"][0]["ipMasq"] + True +""" + +from insights.specs import Specs +from insights import JSONParser, parser + + +@parser(Specs.cni_podman_bridge_conf) +class CNIPodmanBridgeConf(JSONParser): + """ + Class for file: /etc/cni/net.d/87-podman-bridge.conflist + """ + pass diff --git a/insights/parsers/tests/test_cni_podman_bridge_conf.py b/insights/parsers/tests/test_cni_podman_bridge_conf.py new file mode 100644 index 000000000..fa56ca90a --- /dev/null +++ b/insights/parsers/tests/test_cni_podman_bridge_conf.py @@ -0,0 +1,63 @@ +import doctest + +from insights.tests import context_wrap +from insights.parsers import cni_podman_bridge_conf +from insights.parsers.cni_podman_bridge_conf import CNIPodmanBridgeConf + +PODMAN_CNI_FILE = ''' +{ + "cniVersion": "0.4.0", + "name": "podman", + "plugins": [ + { + "type": "bridge", + "bridge": "cni-podman0", + "isGateway": true, + "ipMasq": true, + "ipam": { + "type": "host-local", + "routes": [ + { + "dst": "0.0.0.0/0" + } + ], + "ranges": [ + [ + { + "subnet": "10.12.0.0/16", + "gateway": "10.12.0.1" + } + ] + ] + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + }, + { + "type": "firewall", + "backend": "iptables" + }, + { + "type": "tuning" + } + ] +} +'''.strip() + + +def test_doc_examples(): + env = { + 'cni_podman_bridge_conf': CNIPodmanBridgeConf(context_wrap(PODMAN_CNI_FILE)), + } + failed, total = doctest.testmod(cni_podman_bridge_conf, globs=env) + assert failed == 0 + + +def test_cni_podman_bridge_conf(): + conf = CNIPodmanBridgeConf(context_wrap(PODMAN_CNI_FILE)) + assert len(conf["plugins"]) == 4 + assert conf["plugins"][3]["type"] == "tuning" diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 6879aadc4..5d68e3884 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -75,6 +75,7 @@ class Specs(SpecSet): cloud_init_log = RegistryPoint(filterable=True) cluster_conf = RegistryPoint(filterable=True) cmdline = RegistryPoint() + cni_podman_bridge_conf = RegistryPoint() cobbler_modules_conf = RegistryPoint() cobbler_settings = RegistryPoint() corosync = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 84cfa21d8..4fdc8af43 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -193,6 +193,7 @@ def is_ceph_monitor(broker): cloud_init_log = simple_file("/var/log/cloud-init.log") cluster_conf = simple_file("/etc/cluster/cluster.conf") cmdline = simple_file("/proc/cmdline") + cni_podman_bridge_conf = simple_file("/etc/cni/net.d/87-podman-bridge.conflist") cpe = simple_file("/etc/system-release-cpe") # are these locations for different rhel versions? cobbler_settings = first_file(["/etc/cobbler/settings", "/conf/cobbler/settings"]) From 65c86b2075e7633522a8365d62d9d738be35d74b Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Tue, 19 May 2020 18:01:19 +0800 Subject: [PATCH 064/892] Add parser "corosync_cmapctl" (#2598) * Add parser "corosync_cmapctl" Signed-off-by: Huanhuan Li * Fix flake8 error Signed-off-by: Huanhuan Li Signed-off-by: Huanhuan Li * Adjust format Signed-off-by: Huanhuan Li * Replace LegacyItemAccess with dict Signed-off-by: Huanhuan Li * Return "stats.schedmiss" by function to avoid redundant data Signed-off-by: Huanhuan Li * Fix python2 error Signed-off-by: Huanhuan Li --- .../corosync_cmapctl.rst | 3 + insights/parsers/corosync_cmapctl.py | 59 ++++++++++++ .../parsers/tests/test_corosync_cmapctl.py | 92 +++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 6 ++ insights/specs/insights_archive.py | 1 + 6 files changed, 162 insertions(+) create mode 100644 docs/shared_parsers_catalog/corosync_cmapctl.rst create mode 100644 insights/parsers/corosync_cmapctl.py create mode 100644 insights/parsers/tests/test_corosync_cmapctl.py diff --git a/docs/shared_parsers_catalog/corosync_cmapctl.rst b/docs/shared_parsers_catalog/corosync_cmapctl.rst new file mode 100644 index 000000000..8837ad1d5 --- /dev/null +++ b/docs/shared_parsers_catalog/corosync_cmapctl.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.corosync_cmapctl + :members: + :show-inheritance: diff --git a/insights/parsers/corosync_cmapctl.py b/insights/parsers/corosync_cmapctl.py new file mode 100644 index 000000000..445093c59 --- /dev/null +++ b/insights/parsers/corosync_cmapctl.py @@ -0,0 +1,59 @@ +""" +CorosyncCmapctl - Command ``corosync-cmapctl [params]`` +======================================================= + +This module parses the output of the ``corosync-cmapctl [params]`` command. +""" + +from insights import parser, CommandParser +from insights.parsers import SkipException, ParseException +from insights.specs import Specs + + +@parser(Specs.corosync_cmapctl) +class CorosyncCmapctl(CommandParser, dict): + """ + Class for parsing the `/usr/sbin/corosync-cmapctl [params]` command. + All lines are stored in the dictionary with the left part of the equal + sign witout parenthese info as the key and the right part of equal sign + as the value. + + Typical output of the command is:: + + config.totemconfig_reload_in_progress (u8) = 0 + internal_configuration.service.0.name (str) = corosync_cmap + internal_configuration.service.0.ver (u32) = 0 + internal_configuration.service.1.name (str) = corosync_cfg + internal_configuration.service.1.ver (u32) = 0 + internal_configuration.service.2.name (str) = corosync_cpg + internal_configuration.service.2.ver (u32) = 0 + + Examples: + >>> type(corosync) + + >>> 'internal_configuration.service.0.name' in corosync + True + >>> corosync['internal_configuration.service.0.name'] + 'corosync_cmap' + + Raises: + SkipException: When there is no content + ParseException: When there is no "=" in the content + """ + + def __init__(self, context): + super(CorosyncCmapctl, self).__init__(context, extra_bad_lines=['corosync-cmapctl: invalid option']) + + def get_stats_schedmiss(self): + """ Return a dict of the stats.schedmiss info """ + return dict((key, value) for key, value in self.items() if key.startswith('stats.schedmiss')) + + def parse_content(self, content): + if not content: + raise SkipException + for line in content: + if '=' not in line: + raise ParseException("Can not parse line %s" % line) + key, value = [item.strip() for item in line.split('=')] + key_without_parenthese = key.split()[0] + self[key_without_parenthese] = value diff --git a/insights/parsers/tests/test_corosync_cmapctl.py b/insights/parsers/tests/test_corosync_cmapctl.py new file mode 100644 index 000000000..897c899e5 --- /dev/null +++ b/insights/parsers/tests/test_corosync_cmapctl.py @@ -0,0 +1,92 @@ +import doctest +import pytest + +from insights.tests import context_wrap +from insights.parsers import corosync_cmapctl, ParseException, SkipException +from insights.core.plugins import ContentException + + +COROSYNC_CONTENT_1 = """ +config.totemconfig_reload_in_progress (u8) = 0 +internal_configuration.service.0.name (str) = corosync_cmap +internal_configuration.service.0.ver (u32) = 0 +internal_configuration.service.1.name (str) = corosync_cfg +internal_configuration.service.1.ver (u32) = 0 +internal_configuration.service.2.name (str) = corosync_cpg +internal_configuration.service.2.ver (u32) = 0 +internal_configuration.service.3.name (str) = corosync_quorum +internal_configuration.service.3.ver (u32) = 0 +internal_configuration.service.4.name (str) = corosync_pload +internal_configuration.service.4.ver (u32) = 0 +internal_configuration.service.5.name (str) = corosync_votequorum +internal_configuration.service.5.ver (u32) = 0 +logging.logfile (str) = /var/log/cluster/corosync.log +logging.to_logfile (str) = yes +logging.to_syslog (str) = yes +nodelist.local_node_pos (u32) = 1 +nodelist.node.0.nodeid (u32) = 1 +""".strip() + +COROSYNC_CONTENT_2 = """ +stats.schedmiss.0.delay (flt) = 2023.957031 +stats.schedmiss.0.timestamp (u64) = 5106558848098 +stats.schedmiss.1.delay (flt) = 2023.436279 +stats.schedmiss.1.timestamp (u64) = 5106556824141 +stats.schedmiss.2.delay (flt) = 2030.076904 +stats.schedmiss.2.timestamp (u64) = 5106554800704 +stats.schedmiss.3.delay (flt) = 2022.936890 +stats.schedmiss.3.timestamp (u64) = 5106552770627 +stats.schedmiss.4.delay (flt) = 2024.749023 +stats.schedmiss.4.timestamp (u64) = 5106550747691 +stats.schedmiss.5.delay (flt) = 2021.841553 +stats.schedmiss.5.timestamp (u64) = 5106548722942 +stats.schedmiss.6.delay (flt) = 2024.137207 +stats.schedmiss.6.timestamp (u64) = 5106546701100 +stats.schedmiss.7.delay (flt) = 2021.287476 +stats.schedmiss.7.timestamp (u64) = 5106544676963 +stats.schedmiss.8.delay (flt) = 2025.073853 +stats.schedmiss.8.timestamp (u64) = 5106542655675 +stats.schedmiss.9.delay (flt) = 2473.122070 +stats.schedmiss.9.timestamp (u64) = 5106540630601 +""".strip() + +COROSYNC_CONTENT_3 = """ +""".strip() + +COROSYNC_CONTENT_4 = """ +corosync-cmapctl: invalid option -- 'C' +""".strip() + +COROSYNC_CONTENT_5 = """ +stats.schedmiss.7.timestamp (u64)5106544676963 +stats.schedmiss.8.delay (flt) = 2025.073853 +stats.schedmiss.8.timestamp (u64) = 5106542655675 +stats.schedmiss.9.delay (flt) = 2473.122070 +stats.schedmiss.9.timestamp (u64) = 5106540630601 +""".strip() + + +def test_corosync_doc_examples(): + env = { + 'corosync': corosync_cmapctl.CorosyncCmapctl(context_wrap(COROSYNC_CONTENT_1, path='corosync_cmpctl')), + } + failed, total = doctest.testmod(corosync_cmapctl, globs=env) + assert failed == 0 + + +def test_state_schemiss(): + corodata = corosync_cmapctl.CorosyncCmapctl(context_wrap(COROSYNC_CONTENT_2, path='corosync-cmapctl_-m_stats_stats.schedmiss')) + schemiss_dict = corodata.get_stats_schedmiss() + assert 'stats.schedmiss.0.delay' in schemiss_dict + assert schemiss_dict['stats.schedmiss.0.delay'] == '2023.957031' + assert schemiss_dict['stats.schedmiss.0.timestamp'] == '5106558848098' + assert len(schemiss_dict) == 20 + + +def test_exception(): + with pytest.raises(SkipException): + corosync_cmapctl.CorosyncCmapctl(context_wrap(COROSYNC_CONTENT_3, path="corosync_cmpctl")) + with pytest.raises(ContentException): + corosync_cmapctl.CorosyncCmapctl(context_wrap(COROSYNC_CONTENT_4, path="corosync_cmpctl_-C")) + with pytest.raises(ParseException): + corosync_cmapctl.CorosyncCmapctl(context_wrap(COROSYNC_CONTENT_5, path="corosync_cmpctl")) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 5d68e3884..2cc06ea73 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -79,6 +79,7 @@ class Specs(SpecSet): cobbler_modules_conf = RegistryPoint() cobbler_settings = RegistryPoint() corosync = RegistryPoint() + corosync_cmapctl = RegistryPoint(multi_output=True) corosync_conf = RegistryPoint() cpe = RegistryPoint() cpu_cores = RegistryPoint(multi_output=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index 4fdc8af43..b373a5667 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -199,6 +199,12 @@ def is_ceph_monitor(broker): cobbler_settings = first_file(["/etc/cobbler/settings", "/conf/cobbler/settings"]) cobbler_modules_conf = first_file(["/etc/cobbler/modules.conf", "/conf/cobbler/modules.conf"]) corosync = simple_file("/etc/sysconfig/corosync") + corosync_cmapctl = foreach_execute( + [ + "/usr/sbin/corosync-cmapctl", + '/usr/sbin/corosync-cmapctl -m stats "stats.schedmiss."', + '/usr/sbin/corosync-cmapctl -C schedmiss' + ], "%s") corosync_conf = simple_file("/etc/corosync/corosync.conf") cpu_cores = glob_file("sys/devices/system/cpu/cpu[0-9]*/online") cpu_siblings = glob_file("sys/devices/system/cpu/cpu[0-9]*/topology/thread_siblings_list") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index fd92fa309..6f8a55f30 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -33,6 +33,7 @@ class InsightsArchiveSpecs(Specs): chkconfig = simple_file("insights_commands/chkconfig_--list") chronyc_sources = simple_file("insights_commands/chronyc_sources") cpupower_frequency_info = simple_file("insights_commands/cpupower_-c_all_frequency-info") + corosync_cmapctl = glob_file("insights_commands/corosync-cmapctl*") crt = simple_file("insights_commands/find_.etc.origin.node_.etc.origin.master_-type_f_-path_.crt") date = simple_file("insights_commands/date") date_iso = simple_file("insights_commands/date_--iso-8601_seconds") From ab1d4046cecbe6f7b6cf3d76ae64fbbcbe6ef428 Mon Sep 17 00:00:00 2001 From: Sachin Date: Thu, 21 May 2020 19:26:47 +0530 Subject: [PATCH 065/892] Add parser for the command 'systemd-analyze blame' (#2590) Signed-off-by: Sachin Patil --- .../systemd_analyze.rst | 3 + insights/parsers/systemd_analyze.py | 64 +++++++++++++++++++ .../parsers/tests/test_systemd_analyze.py | 38 +++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 6 files changed, 108 insertions(+) create mode 100644 docs/shared_parsers_catalog/systemd_analyze.rst create mode 100644 insights/parsers/systemd_analyze.py create mode 100644 insights/parsers/tests/test_systemd_analyze.py diff --git a/docs/shared_parsers_catalog/systemd_analyze.rst b/docs/shared_parsers_catalog/systemd_analyze.rst new file mode 100644 index 000000000..5869e1be3 --- /dev/null +++ b/docs/shared_parsers_catalog/systemd_analyze.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.systemd_analyze + :members: + :show-inheritance: diff --git a/insights/parsers/systemd_analyze.py b/insights/parsers/systemd_analyze.py new file mode 100644 index 000000000..0714a5fcf --- /dev/null +++ b/insights/parsers/systemd_analyze.py @@ -0,0 +1,64 @@ +""" +SystemdAnalyzeBlame - command ``systemd-analyze blame`` +======================================================= + +This module parses the output of command ``systemd-analyze blame``. +""" +from insights.specs import Specs +from insights import CommandParser, parser +from insights.parsers import SkipException + + +@parser(Specs.systemd_analyze_blame) +class SystemdAnalyzeBlame(CommandParser, dict): + """Parse the output of ``systemd-analyze blame`` as ``dict``. The time to + initialize is converted into seconds. + + Typical output:: + + 33.080s cloud-init-local.service + 32.423s unbound-anchor.service + 2.773s kdump.service + 1.699s dnf-makecache.service + 1.304s cloud-init.service + 1.073s initrd-switch-root.service + 939ms cloud-config.service + 872ms tuned.service + 770ms cloud-final.service + + Examples: + + >>> 'cloud-init-local.service' in output + True + >>> output.get('cloud-init.service', 0) + 1.304 + + Returns: + (dict): With unit-name & time as key-value pair. + Ex:: + + {'cloud-config.service': 0.939, + 'cloud-final.service': 0.77, + 'cloud-init-local.service': 33.08, + 'cloud-init.service': 1.304, + 'dnf-makecache.service': 1.699, + 'initrd-switch-root.service': 1.073, + 'kdump.service': 2.773, + 'tuned.service': 0.872, + 'unbound-anchor.service': 32.423} + + Raises: + SkipException: If content is not provided. + """ + def parse_content(self, content): + if not content: + raise SkipException + + for c in content: + time, service = c.split() + if time.endswith('ms'): + _time = round(float(time.strip('ms')) / 1000, 5) + else: + _time = round(float(time.strip('ms')), 5) + + self[service] = _time diff --git a/insights/parsers/tests/test_systemd_analyze.py b/insights/parsers/tests/test_systemd_analyze.py new file mode 100644 index 000000000..d718b23f4 --- /dev/null +++ b/insights/parsers/tests/test_systemd_analyze.py @@ -0,0 +1,38 @@ +import doctest +import pytest + +from insights.parsers import systemd_analyze +from insights.parsers import SkipException +from insights.tests import context_wrap + + +OUTPUT = """ +33.080s cloud-init-local.service +32.423s unbound-anchor.service + 2.773s kdump.service + 1.699s dnf-makecache.service + 1.304s cloud-init.service + 1.073s initrd-switch-root.service + 939ms cloud-config.service + 872ms tuned.service + 770ms cloud-final.service +""".strip() + + +def test_output(): + output = systemd_analyze.SystemdAnalyzeBlame(context_wrap(OUTPUT)) + assert ('cloud-init-local.service' in output) is True + + # Test time(seconds) + assert output.get('cloud-init.service', 0) == 1.304 + + with pytest.raises(SkipException): + assert systemd_analyze.SystemdAnalyzeBlame(context_wrap("")) is None + + +def test_documentation(): + failed_count, tests = doctest.testmod( + systemd_analyze, + globs={'output': systemd_analyze.SystemdAnalyzeBlame(context_wrap(OUTPUT))} + ) + assert failed_count == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 2cc06ea73..4519135af 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -603,6 +603,7 @@ class Specs(SpecSet): systemctl_show_all_services = RegistryPoint() systemctl_show_target = RegistryPoint() systemctl_smartpdc = RegistryPoint() + systemd_analyze_blame = RegistryPoint() systemd_docker = RegistryPoint() systemd_logind_conf = RegistryPoint() systemd_openshift_node = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index b373a5667..f73aef990 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -977,6 +977,7 @@ def block(broker): systemctl_show_all_services = simple_command("/bin/systemctl show *.service") systemctl_show_target = simple_command("/bin/systemctl show *.target") systemctl_smartpdc = simple_command("/bin/systemctl show smart_proxy_dynflow_core") + systemd_analyze_blame = simple_command("/bin/systemd-analyze blame") systemd_docker = simple_command("/usr/bin/systemctl cat docker.service") systemd_logind_conf = simple_file("/etc/systemd/logind.conf") systemd_openshift_node = simple_command("/usr/bin/systemctl cat atomic-openshift-node.service") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 6f8a55f30..fb3f7eab5 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -277,6 +277,7 @@ class InsightsArchiveSpecs(Specs): systemctl_show_all_services = simple_file("insights_commands/systemctl_show_.service") systemctl_show_target = simple_file("insights_commands/systemctl_show_.target") systemctl_smartpdc = simple_file("insights_commands/systemctl_show_smart_proxy_dynflow_core") + systemd_analyze_blame = simple_file("insights_commands/systemd-analyze_blame") systemd_docker = first_file(["insights_commands/systemctl_cat_docker.service", "/usr/lib/systemd/system/docker.service"]) systemd_openshift_node = first_file(["insights_commands/systemctl_cat_atomic-openshift-node.service", "/usr/lib/systemd/system/atomic-openshift-node.service"]) systool_b_scsi_v = simple_file("insights_commands/systool_-b_scsi_-v") From 8a3f9bdc2875adca88b83baaa77165ea327a4f50 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 21 May 2020 22:34:03 +0800 Subject: [PATCH 066/892] Update return values of cpuinfo (#2600) Signed-off-by: Xiangce Liu --- insights/parsers/cpuinfo.py | 15 +++++---------- insights/parsers/tests/test_cpuinfo.py | 3 +++ 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/insights/parsers/cpuinfo.py b/insights/parsers/cpuinfo.py index 08dc0c00f..74553671b 100644 --- a/insights/parsers/cpuinfo.py +++ b/insights/parsers/cpuinfo.py @@ -179,26 +179,23 @@ def cache_size(self): return self.data["cache_sizes"][0] @property - @defaults() def cpu_count(self): """ - str: Returns the number of CPUs. + int : Returns the number of CPUs. """ return len(self.data.get("cpus", [])) @property - @defaults() def apicid(self): """ - str: Returns the apicid of the processor. + list: Returns the list of apicid of the processor. """ - return self.data["apicid"] + return self.data.get("apicid", []) @property - @defaults() def socket_count(self): """ - str: Returns the number of sockets. This is distinct from the number + int: Returns the number of sockets. This is distinct from the number of CPUs. """ return len(set(self.data.get("sockets", []))) @@ -220,7 +217,7 @@ def model_number(self): return self.data["model_ids"][0] @property - @defaults() + @defaults([]) def flags(self): """ list: Returns a list of feature flags for the first CPU. @@ -257,8 +254,6 @@ def core_total(self): # package system physical_dict[e['sockets']] = int(e['cpu_cores']) return sum(physical_dict.values()) - else: - return None def get_processor_by_index(self, index): """ diff --git a/insights/parsers/tests/test_cpuinfo.py b/insights/parsers/tests/test_cpuinfo.py index 8a6187de5..235afbc83 100644 --- a/insights/parsers/tests/test_cpuinfo.py +++ b/insights/parsers/tests/test_cpuinfo.py @@ -1447,6 +1447,7 @@ def test_noir_cpuinfo(): assert cpu_info.cpu_count == 32 assert cpu_info.socket_count == 2 assert cpu_info.core_total == 16 + assert 'fpu' in cpu_info.flags def test_hetero_cpuinfo(): @@ -1459,6 +1460,8 @@ def test_empty_cpuinfo(): cpu_info = CpuInfo(context_wrap("")) assert cpu_info.cpu_count == 0 assert cpu_info.core_total is None + assert cpu_info.apicid == [] + assert cpu_info.flags == [] def test_arm_cpuinfo(): From c6f4ffabdbc034c24bb261c4ab66b29078285e58 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Tue, 26 May 2020 10:35:25 +0800 Subject: [PATCH 067/892] Enhance parser "orosync_cmapctl" (#2602) * Enhance parser "orosync_cmapctl" * Fixes #2601 * The way of clearing schedule events data is different on RHEL 7 and RHEL 8 * so run these commands with dependency Signed-off-by: Huanhuan Li * Update code to combine the cases on RHEL 7 and RHEL 8 Signed-off-by: Huanhuan Li * Fix flake8 error Signed-off-by: Huanhuan Li --- insights/parsers/corosync_cmapctl.py | 4 --- .../parsers/tests/test_corosync_cmapctl.py | 31 ++++++++++++++++--- insights/specs/default.py | 17 +++++----- 3 files changed, 36 insertions(+), 16 deletions(-) diff --git a/insights/parsers/corosync_cmapctl.py b/insights/parsers/corosync_cmapctl.py index 445093c59..b4fa21715 100644 --- a/insights/parsers/corosync_cmapctl.py +++ b/insights/parsers/corosync_cmapctl.py @@ -44,10 +44,6 @@ class CorosyncCmapctl(CommandParser, dict): def __init__(self, context): super(CorosyncCmapctl, self).__init__(context, extra_bad_lines=['corosync-cmapctl: invalid option']) - def get_stats_schedmiss(self): - """ Return a dict of the stats.schedmiss info """ - return dict((key, value) for key, value in self.items() if key.startswith('stats.schedmiss')) - def parse_content(self, content): if not content: raise SkipException diff --git a/insights/parsers/tests/test_corosync_cmapctl.py b/insights/parsers/tests/test_corosync_cmapctl.py index 897c899e5..e80106eaa 100644 --- a/insights/parsers/tests/test_corosync_cmapctl.py +++ b/insights/parsers/tests/test_corosync_cmapctl.py @@ -65,6 +65,20 @@ stats.schedmiss.9.timestamp (u64) = 5106540630601 """.strip() +COROSYNC_CONTENT_6 = """ +config.totemconfig_reload_in_progress (u8) = 0 +internal_configuration.service.0.name (str) = corosync_cmap +internal_configuration.service.0.ver (u32) = 0 +internal_configuration.service.1.name (str) = corosync_cfg +internal_configuration.service.1.ver (u32) = 0 +internal_configuration.service.2.name (str) = corosync_cpg +internal_configuration.service.2.ver (u32) = 0 +internal_configuration.service.3.name (str) = corosync_quorum +internal_configuration.service.3.ver (u32) = 0 +runtime.schedmiss.delay (flt) = 2282.403320 +runtime.schedmiss.timestamp (u64) = 1589895874915 +""".strip() + def test_corosync_doc_examples(): env = { @@ -76,11 +90,10 @@ def test_corosync_doc_examples(): def test_state_schemiss(): corodata = corosync_cmapctl.CorosyncCmapctl(context_wrap(COROSYNC_CONTENT_2, path='corosync-cmapctl_-m_stats_stats.schedmiss')) - schemiss_dict = corodata.get_stats_schedmiss() - assert 'stats.schedmiss.0.delay' in schemiss_dict - assert schemiss_dict['stats.schedmiss.0.delay'] == '2023.957031' - assert schemiss_dict['stats.schedmiss.0.timestamp'] == '5106558848098' - assert len(schemiss_dict) == 20 + assert 'stats.schedmiss.0.delay' in corodata + assert corodata['stats.schedmiss.0.delay'] == '2023.957031' + assert corodata['stats.schedmiss.0.timestamp'] == '5106558848098' + assert len(corodata) == 20 def test_exception(): @@ -90,3 +103,11 @@ def test_exception(): corosync_cmapctl.CorosyncCmapctl(context_wrap(COROSYNC_CONTENT_4, path="corosync_cmpctl_-C")) with pytest.raises(ParseException): corosync_cmapctl.CorosyncCmapctl(context_wrap(COROSYNC_CONTENT_5, path="corosync_cmpctl")) + + +def test_runtime_schemiss(): + corodata = corosync_cmapctl.CorosyncCmapctl(context_wrap(COROSYNC_CONTENT_6, path='corosync-cmapctl')) + assert "runtime.schedmiss.delay" in corodata + assert corodata['runtime.schedmiss.delay'] == '2282.403320' + assert "runtime.schedmiss.timestamp" in corodata + assert corodata['runtime.schedmiss.timestamp'] == '1589895874915' diff --git a/insights/specs/default.py b/insights/specs/default.py index f73aef990..44a2a8c94 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -29,7 +29,7 @@ from insights.combiners.cloud_provider import CloudProvider from insights.combiners.satellite_version import SatelliteVersion from insights.combiners.services import Services -from insights.components.rhel_version import IsRhel8 +from insights.components.rhel_version import IsRhel8, IsRhel7 from insights.specs import Specs @@ -199,12 +199,15 @@ def is_ceph_monitor(broker): cobbler_settings = first_file(["/etc/cobbler/settings", "/conf/cobbler/settings"]) cobbler_modules_conf = first_file(["/etc/cobbler/modules.conf", "/conf/cobbler/modules.conf"]) corosync = simple_file("/etc/sysconfig/corosync") - corosync_cmapctl = foreach_execute( - [ - "/usr/sbin/corosync-cmapctl", - '/usr/sbin/corosync-cmapctl -m stats "stats.schedmiss."', - '/usr/sbin/corosync-cmapctl -C schedmiss' - ], "%s") + + @datasource([IsRhel7, IsRhel8]) + def corosync_cmapctl_cmd_list(broker): + if broker.get(IsRhel7): + return ["/usr/sbin/corosync-cmapctl", 'corosync-cmapctl -d runtime.schedmiss.timestamp', 'corosync-cmapctl -d runtime.schedmiss.delay'] + if broker.get(IsRhel8): + return ["/usr/sbin/corosync-cmapctl", '/usr/sbin/corosync-cmapctl -m stats', '/usr/sbin/corosync-cmapctl -C schedmiss'] + raise SkipComponent() + corosync_cmapctl = foreach_execute(corosync_cmapctl_cmd_list, "%s") corosync_conf = simple_file("/etc/corosync/corosync.conf") cpu_cores = glob_file("sys/devices/system/cpu/cpu[0-9]*/online") cpu_siblings = glob_file("sys/devices/system/cpu/cpu[0-9]*/topology/thread_siblings_list") From 47396f48081830d25af0c6fb5375f02d267afc55 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Tue, 2 Jun 2020 09:24:11 -0500 Subject: [PATCH 068/892] Update copyright in docs to include 2019 and 2020. (#2605) Signed-off-by: Christopher Sams --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index ca467f8fe..5989f6d8e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -59,7 +59,7 @@ # General information about the project. project = u'insights-core' -copyright = u'2016, 2017, 2018 Red Hat, Inc' +copyright = u'2016, 2017, 2018, 2019, 2020 Red Hat, Inc' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the From f9220e5ebef57a9d918b97f2df800e9b5d4f69d6 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Wed, 3 Jun 2020 14:48:07 -0500 Subject: [PATCH 069/892] Insights shell (#2606) * Initial check of insights shell. Signed-off-by: Christopher Sams * Add 'shell' to insights cli. Remove sossy ref. Signed-off-by: Christopher Sams * Add some docstrings to insights shell. Signed-off-by: Christopher Sams * Add some descriptions to the cli options. Signed-off-by: Christopher Sams * More docs. Signed-off-by: Christopher Sams * Remove old shell script. Signed-off-by: Christopher Sams * More docs. Signed-off-by: Christopher Sams * Add color to show_trees when colorama is installed. Signed-off-by: Christopher Sams * Add color to find. Signed-off-by: Christopher Sams * Add depth parameter to show_trees. Signed-off-by: Christopher Sams * Colors for components with missing dependencies. Signed-off-by: Christopher Sams * Check for shortnames when evaluating matches. Signed-off-by: Christopher Sams * Move dependency tree to the dop of the diagnostic dump. Signed-off-by: Christopher Sams * Fix some docstrings. Signed-off-by: Christopher Sams * Fix typo in CLI help. Signed-off-by: Christopher Sams * Refactor color detection. Signed-off-by: Christopher Sams * evaluate_all and --no-defaults Signed-off-by: Christopher Sams * All show_trees to show unevaluated datasource deps. Signed-off-by: Christopher Sams * Don't evaluate_all if zero components can run. Signed-off-by: Christopher Sams * Show exceptions beneath failed in show_trees. Signed-off-by: Christopher Sams * Color show_failed and show exceptions. Signed-off-by: Christopher Sams * Fix show_requested. Cleanup cruft. Signed-off-by: Christopher Sams * Show errors in find. Signed-off-by: Christopher Sams * Tweak pydoc. evaluate falls back to dr.get_component. Signed-off-by: Christopher Sams * show_source with highlighting and paging. Signed-off-by: Christopher Sams * Ignore spec by default for show_failed and show_exceptions Signed-off-by: Christopher Sams * Fix pydoc typo. Signed-off-by: Christopher Sams * Remove stupid name = name statement. Signed-off-by: Christopher Sams * show_trees ignores 'spec' by default. Signed-off-by: Christopher Sams * Use ip.psource. Add func for ipython dict key autocomplete. Signed-off-by: Christopher Sams * Use ip.inspector.pinfo with detail_level=1 instead of ip.inspector.psource. Signed-off-by: Christopher Sams * Show module source instead of component source for insights components. Signed-off-by: Christopher Sams * Add some detail around rules and conditions. Signed-off-by: Christopher Sams * Add code coverage highlighting when viewing source. Signed-off-by: Christopher Sams * Use pygments instead of colorama. Signed-off-by: Christopher Sams * Add show_rule_report. Signed-off-by: Christopher Sams * Add _make_skip to _get_rule_value_kind. Signed-off-by: Christopher Sams * Automatically render links in show_rule_report. Signed-off-by: Christopher Sams * Update __Model pydoc. Signed-off-by: Christopher Sams * Fix explanation for --no-coverage. Signed-off-by: Christopher Sams * Allow code editing when saving make_rule to file. Signed-off-by: Christopher Sams * Print path saved after editing rule. Signed-off-by: Christopher Sams * clear list of requested models and current session history. Signed-off-by: Christopher Sams * _get_rule_value. enumerate(.., start=1) in show_source. Signed-off-by: Christopher Sams * Add more paging. Always run coverage - allow version < 5. Signed-off-by: Christopher Sams * os.linesep instead of explicit newlines. Signed-off-by: Christopher Sams * Adding paging to diagnostics and models.find() Signed-off-by: Christopher Sams * Apply black formatting. Handle imports when using --config. Signed-off-by: Christopher Sams * Option to show trees toward dependents. Signed-off-by: Christopher Sams * Fix flake8 issues caused by black. Signed-off-by: Christopher Sams * Document toward_dependents. Signed-off-by: Christopher Sams * Explain that pylibs are excluded from coverage. Signed-off-by: Christopher Sams * Tie insights shell into docs. Signed-off-by: Christopher Sams * Changes to work with 2.7 Signed-off-by: Christopher Sams * Fix ocp.conf Signed-off-by: Christopher Sams * Analyze multiple archives at the same time. Signed-off-by: Christopher Sams * Key models by original file name. Signed-off-by: Christopher Sams * Fix model index when analyzing local system. Signed-off-by: Christopher Sams * Ensure archives are evaluated in consistent order. Signed-off-by: Christopher Sams * Handle no archive and single archive correctly. Signed-off-by: Christopher Sams * Have ocp analysis exclude log files during yaml loading. Signed-off-by: Christopher Sams * Add pydoc to the Holder class so help(models) makes sense. Signed-off-by: Christopher Sams * Add filter disable instructions to Models pydoc. Signed-off-by: Christopher Sams * show_timings Signed-off-by: Christopher Sams * Add total to show_timings Signed-off-by: Christopher Sams * show timings instead of find after evaluate_all. Signed-off-by: Christopher Sams --- docs/index.rst | 1 + docs/shell.rst | 11 + insights/command_parser.py | 7 +- insights/ocp.py | 16 +- insights/shell.py | 887 +++++++++++++++++++++++++++++++++++++ 5 files changed, 917 insertions(+), 5 deletions(-) create mode 100644 docs/shell.rst create mode 100644 insights/shell.py diff --git a/docs/index.rst b/docs/index.rst index e52ddaa2c..aac7a31ff 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -16,6 +16,7 @@ Contents: combiners_index components_index ocp + shell docs_guidelines components embedded_content diff --git a/docs/shell.rst b/docs/shell.rst new file mode 100644 index 000000000..065fe0dcc --- /dev/null +++ b/docs/shell.rst @@ -0,0 +1,11 @@ +Insights Shell +============== +The insights shell is an ipython-based interactive environment for exploring +insights components. You can run it against your local system or different +kinds of packaged data like insights archives, sosreports, JBoss Diagnostic +Reports, must-gather archives, and more. See ``insights shell -h`` for +details. + +.. automodule:: insights.shell + :members: + :show-inheritance: diff --git a/insights/command_parser.py b/insights/command_parser.py index 2a36d9568..9bc2be549 100644 --- a/insights/command_parser.py +++ b/insights/command_parser.py @@ -17,7 +17,8 @@ collect Collect all specs against the client and create an Insights archive. inspect Execute component and shell out to ipython for evaluation. info View info and docs for Insights Core components. - ocpshell Interactive evaluation of archives, directories, or individual yaml files. + ocpshell Interactive evaluation of archives or directories from OCP, or individual yaml files. + shell Interactive evaluation of archives and directories. run Run insights-core against host or an archive. version Show Insights Core version information and exit. """ @@ -76,6 +77,10 @@ def ocpshell(self): from .ocpshell import main as ocpshell_main ocpshell_main() + def shell(self): + from .shell import main as shell_main + shell_main() + def run(self): from insights import run if "" not in sys.path: diff --git a/insights/ocp.py b/insights/ocp.py index 249fbacbf..33cbb7f78 100644 --- a/insights/ocp.py +++ b/insights/ocp.py @@ -9,7 +9,7 @@ import yaml from fnmatch import fnmatch -from insights.core.plugins import component +from insights.core.plugins import component, datasource from insights.core.context import InsightsOperatorContext, MustGatherContext from insights.core.archives import extract @@ -18,6 +18,7 @@ log = logging.getLogger(__name__) +contexts = [InsightsOperatorContext, MustGatherContext] try: # requires pyyaml installed after libyaml @@ -67,8 +68,15 @@ def analyze(paths, excludes=None): return Result(children=results) -@component([InsightsOperatorContext, MustGatherContext]) -def conf(io, mg): +@datasource(contexts) +def conf_root(broker): + for ctx in contexts: + if ctx in broker: + return broker[ctx].root + + +@component(conf_root) +def conf(root): """ The ``conf`` component parses all configuration in an insights-operator or must-gather archive and returns an object that is part of the parsr common @@ -77,4 +85,4 @@ def conf(io, mg): .. _tutorial: https://insights-core.readthedocs.io/en/latest/notebooks/Parsr%20Query%20Tutorial.html """ - return analyze((io or mg).root) + return analyze(root, excludes=["*.log"]) diff --git a/insights/shell.py b/insights/shell.py new file mode 100644 index 000000000..acad2a5db --- /dev/null +++ b/insights/shell.py @@ -0,0 +1,887 @@ +# -*- coding: utf-8 -*- +from __future__ import print_function +import argparse +import logging +import importlib +import inspect +import os +import re +import six +import traceback +import yaml + +from collections import defaultdict +from contextlib import contextmanager + +import IPython +from pygments.console import ansiformat +from traitlets.config.loader import Config + +from insights.parsr.query import * # noqa +from insights.parsr.query import eq, matches, make_child_query as q # noqa +from insights.parsr.query.boolean import FALSE, TRUE + +from insights import ( + apply_configs, + apply_default_enabled, + create_context, + datasource, + dr, + extract, + load_default_plugins, + load_packages, + parse_plugins, +) +from insights.core import plugins +from insights.core.context import HostContext +from insights.core.spec_factory import ContentProvider, RegistryPoint +from insights.formats import render +from insights.formats.text import render_links + +Loader = getattr(yaml, "CSafeLoader", yaml.SafeLoader) + +RULE_COLORS = {"fail": "brightred", "pass": "blue", "info": "magenta", "skip": "yellow"} + + +@contextmanager +def _create_new_broker(path=None): + """ + Create a broker and populate it by evaluating the path with all + registered datasources. + + Args: + path (str): path to the archive or directory to analyze. ``None`` will + analyze the current system. + """ + datasources = dr.get_components_of_type(datasource) + + def make_broker(ctx): + broker = dr.Broker() + broker[ctx.__class__] = ctx + + dr.run(datasources, broker=broker) + + del broker[ctx.__class__] + return broker + + if path: + if os.path.isdir(path): + ctx = create_context(path) + yield (path, make_broker(ctx)) + else: + with extract(path) as e: + ctx = create_context(e.tmp_dir) + yield (e.tmp_dir, make_broker(ctx)) + else: + yield (os.curdir, make_broker(HostContext())) + + +# contextlib.ExitStack isn't available in all python versions +# so recurse to victory. +def with_brokers(archives, callback): + brokers = [] + + def inner(paths): + if paths: + path = paths.pop() + with _create_new_broker(path) as ctx: + brokers.append(ctx) + inner(paths) + else: + callback(brokers) + + if archives: + inner(list(reversed(archives))) + else: + with _create_new_broker() as ctx: + callback([ctx]) + + +def _get_available_models(broker, group=dr.GROUPS.single): + """ + Given a broker populated with datasources, return everything that could + run based on them. + """ + state = set(broker.instances.keys()) + models = {} + + for comp in dr.run_order(dr.COMPONENTS[group]): + if comp in dr.DELEGATES and not plugins.is_datasource(comp): + if dr.DELEGATES[comp].get_missing_dependencies(state): + continue + + if plugins.is_type( + comp, (plugins.rule, plugins.condition, plugins.incident) + ): + name = "_".join( + [dr.get_base_module_name(comp), dr.get_simple_name(comp)] + ) + else: + name = dr.get_simple_name(comp) + + if name in models: + prev = models[name] + models[dr.get_name(prev).replace(".", "_")] = prev + + del models[name] + name = dr.get_name(comp).replace(".", "_") + + models[name] = comp + state.add(comp) + + return models + + +class Models(dict): + u""" + Represents all components that may be available given the data being + analyzed. Use models.find() to see them. Tab complete attributes to access + them. Use help(models) for more info. + + Start the shell with the environment variable + INSIGHTS_FILTERS_ENABLED=False to disable filtering that may cause + unexpected missing data. + + Examples: + + >>> models.find("rpm") + InstalledRpms (insights.parsers.installed_rpms.InstalledRpms) + + >>> rpms = models.InstalledRpms + >>> rpms.newest("bash") + 0:bash-4.1.2-48.el6 + + >>> models.find("(?i)yum") # Prefix "(?i)" ignores case. + YumConf (insights.parsers.yum_conf.YumConf, parser) + YumLog (insights.parsers.yumlog.YumLog, parser) + YumRepoList (insights.parsers.yum.YumRepoList, parser) + └╌╌Incorrect line: 'repolist: 33296' + YumReposD (insights.parsers.yum_repos_d.YumReposD, parser) + + >>> models.show_trees("rpm") + insights.parsers.installed_rpms.InstalledRpms (parser) + ┊ insights.specs.Specs.installed_rpms (unfiltered / lines) + ┊ ┊╌╌╌╌╌TextFileProvider("'/home/csams/Downloads/archives/sosreport-example-20191225000000/sos_commands/rpm/package-data'") + ┊ ┊ insights.specs.insights_archive.InsightsArchiveSpecs.installed_rpms (unfiltered / lines) + ┊ ┊ ┊ insights.specs.insights_archive.InsightsArchiveSpecs.all_installed_rpms (unfiltered / lines) + ┊ ┊ ┊ ┊ insights.core.context.HostArchiveContext () + ┊ ┊ insights.specs.sos_archive.SosSpecs.installed_rpms (unfiltered / lines) + ┊ ┊ ┊╌╌╌╌╌TextFileProvider("'/home/csams/Downloads/archives/sosreport-example-20191225000000/sos_commands/rpm/package-data'") + ┊ ┊ ┊ insights.core.context.SosArchiveContext () + ┊ ┊ insights.specs.default.DefaultSpecs.installed_rpms (unfiltered / lines) + ┊ ┊ ┊ insights.specs.default.DefaultSpecs.docker_installed_rpms (unfiltered / lines) + ┊ ┊ ┊ ┊ insights.core.context.DockerImageContext () + ┊ ┊ ┊ insights.specs.default.DefaultSpecs.host_installed_rpms (unfiltered / lines) + ┊ ┊ ┊ ┊ insights.core.context.HostContext () + """ + + def __init__(self, broker, models, cwd, tmp, cov): + self._requested = set() + self._broker = broker + self._cwd = cwd + self._tmp = tmp + self._cov = cov + super(Models, self).__init__(models) + + def __dir__(self): + """ Enabled ipython autocomplete. """ + return sorted(set(list(self.keys()) + dir(Models))) + + def _ipython_key_completions_(self): + """ For autocomplete of keys when accessing models as a dict. """ + return sorted(self.keys()) + + def __str__(self): + return "{} components possibly available".format(len(self)) + + def _get_color(self, comp): + if comp in self._broker: + if plugins.is_type(comp, plugins.rule) and self._broker[comp].get("type") == "skip": + return "yellow" + return "green" + elif comp in self._broker.exceptions: + return "brightred" + elif comp in self._broker.missing_requirements: + return "yellow" + else: + return "" + + def _dump_diagnostics(self, comp): + results = [] + results.append("Dependency Tree") + results.append("===============") + results.extend(self._show_tree(comp)) + results.append("") + results.append("Missing Dependencies") + results.append("====================") + results.extend(self._show_missing(comp)) + results.append("") + results.append("Exceptions") + results.append("==========") + results.extend(self._show_exceptions(comp)) + IPython.core.page.page(six.u(os.linesep.join(results))) + + def evaluate_all(self, match=None, ignore="spec"): + """ + Evaluate all components that match. + + Args: + match (str, optional): regular expression for matching against + the fully qualified name of components to keep. + ignore (str, optional): regular expression for searching against + the fully qualified name of components to ignore. + """ + match, ignore = self._desugar_match_ignore(match, ignore) + + tasks = [] + for c in self.values(): + name = dr.get_name(c) + if match.test(name) and not ignore.test(name): + if not any( + [ + c in self._broker.instances, + c in self._broker.exceptions, + c in self._broker.missing_requirements, + ] + ): + tasks.append(c) + + if not tasks: + return + + dr.run(tasks, broker=self._broker) + self.show_timings(match, ignore) + + def evaluate(self, name): + """ + Evaluate a component and return its result. Prints diagnostic + information in the case of failure. This function is useful when a + component's name contains characters that aren't valid for python + identifiers so you can't access it with ``models.``. + + Args: + name (str): the name of the component as shown by :func:`Models.find()`. + """ + comp = self.get(name) or dr.get_component(name) + if not comp: + return + + if not plugins.is_rule(comp): + self._requested.add((name, comp)) + + if comp in self._broker: + return self._broker[comp] + + if comp in self._broker.exceptions or comp in self._broker.missing_requirements: + self._dump_diagnostics(comp) + return + + val = dr.run(comp, broker=self._broker).get(comp) + + if comp not in self._broker: + if comp in self._broker.exceptions or comp in self._broker.missing_requirements: + self._dump_diagnostics(comp) + else: + print("{} chose to skip.".format(dr.get_name(comp))) + return val + + def __getattr__(self, name): + return self.evaluate(name) + + # TODO: lot of room for improvement here... + def make_rule(self, path=None, overwrite=False, pick=None): + """ + Attempt to generate a rule based on models used so far. + + Args: + path(str): path to store the rule. + overwrite (bool): whether to overwrite an existing file. + pick (str): Optionally specify which lines or line ranges + to use for the rule body. "1 2 3" gets lines 1,2 and 3. + "1 3-5 7" gets line 1, lines 3 through 5, and line 7. + """ + import IPython + + ip = IPython.get_ipython() + ignore = [ + r"=.*models\.", + r"^(%|!|help)", + r"make_rule", + r"models\.(show|find).*", + r".*\?$", + r"^(clear|pwd|cd *.*|ll|ls)$", + ] + + if pick: + lines = [r[2] for r in ip.history_manager.get_range_by_str(pick)] + else: + lines = [] + for r in ip.history_manager.get_range(): + l = r[2] + if any(re.search(i, l) for i in ignore): + continue + elif l.startswith("models."): + l = l[7:] + lines.append(l) + + # the user asked for these models during the session. + requested = sorted(self._requested, key=lambda i: i[0]) + + # figure out what we need to import for filtering. + filterable = defaultdict(list) + for _, c in requested: + for d in dr.get_dependency_graph(c): + try: + if isinstance(d, RegistryPoint) and d.filterable: + n = d.__qualname__ + cls = n.split(".")[0] + filterable[dr.get_module_name(d)].append((cls, n)) + except: + pass + + model_names = [dr.get_simple_name(i[1]) for i in requested] + var_names = [i[0].lower() for i in requested] + + imports = [ + "from insights import rule, make_fail, make_info, make_pass # noqa", + "from insights.parsr.query import * # noqa", + "", + ] + + for _, c in requested: + mod = dr.get_module_name(c) + name = dr.get_simple_name(c) + imports.append("from {} import {}".format(mod, name)) + + seen = set() + if filterable: + imports.append("") + + for k in sorted(filterable): + for (cls, n) in filterable[k]: + if (k, cls) not in seen: + imports.append("from {} import {}".format(k, cls)) + seen.add((k, cls)) + + seen = set() + filters = [] + for k in sorted(filterable): + for i in filterable[k]: + if i not in seen: + filters.append("add_filter({}, ...)".format(i[1])) + seen.add(i) + + if filters: + imports.append("from insights.core.filters import add_filter") + + filter_stanza = os.linesep.join(filters) + import_stanza = os.linesep.join(imports) + decorator = "@rule({})".format(", ".join(model_names)) + func_decl = "def report({}):".format(", ".join(var_names)) + body = os.linesep.join([" " + x for x in lines]) if lines else " pass" + + res = import_stanza + if filter_stanza: + res += (os.linesep * 2) + filter_stanza + + res += (os.linesep * 3) + decorator + os.linesep + func_decl + os.linesep + body + + if path: + if not path.startswith("/"): + realpath = os.path.realpath(path) + if not realpath.startswith(self._cwd): + path = os.path.join(self._cwd, path) + + if os.path.exists(path) and not overwrite: + print( + "{} already exists. Use overwrite=True to overwrite.".format(path) + ) + return + + if not os.path.exists(path) or overwrite: + with open(path, "w") as f: + f.write(res) + ip.magic("edit -x {}".format(path)) + print("Saved to {}".format(path)) + else: + IPython.core.page.page(ip.pycolorize(res)) + + def _desugar_match_ignore(self, match, ignore): + if match is None: + match = TRUE + elif isinstance(match, str): + if match in self: + match = eq(dr.get_name(self[match])) + else: + match = matches(match) + + if ignore is None: + ignore = FALSE + elif isinstance(ignore, str): + ignore = matches(ignore) + + return (match, ignore) + + def _show_missing(self, comp): + results = [] + try: + req, alo = self._broker.missing_requirements[comp] + name = dr.get_name(comp) + results.append(name) + results.append("-" * len(name)) + if req: + results.append("Requires:") + for r in req: + results.append(" {}".format(dr.get_name(r))) + if alo: + if req: + results.append("") + results.append("Requires At Least One From Each List:") + for r in alo: + results.append("[") + for i in r: + results.append(" {}".format(dr.get_name(i))) + results.append("]") + except: + pass + return results + + def show_requested(self): + """ Show the components you've worked with so far. """ + results = [] + for name, comp in sorted(self._requested): + results.append( + ansiformat( + self._get_color(comp), "{} {}".format(name, dr.get_name(comp)) + ) + ) + IPython.core.page.page(six.u(os.linesep.join(results))) + + def reset_requested(self): + """ Reset requested state so you can work on a new rule. """ + IPython.get_ipython().history_manager.reset() + self._requested.clear() + + def show_source(self, comp): + """ + Show source for the given module, class, or function. Also accepts + the string names, with the side effect that the component will be + imported. + """ + try: + if isinstance(comp, six.string_types): + comp = self.get(comp) or dr.get_component(comp) or importlib.import_module(comp) + comp = inspect.getmodule(comp) + ip = IPython.get_ipython() + if self._cov: + path, runnable, excluded, not_run, _ = self._cov.analysis2(comp) + runnable, not_run = set(runnable), set(not_run) + src = ip.pycolorize(inspect.getsource(comp)).splitlines() + width = len(str(len(src))) + template = "{0:>%s}" % width + results = [] + file_line = "{} {}".format( + ansiformat("red", "File:"), os.path.realpath(path) + ) + explain_line = "{} numbered lines have executed. python standard libs are excluded.".format( + ansiformat("*brightgreen*", "Green") + ) + results.append(file_line) + results.append(explain_line) + results.append("") + for i, line in enumerate(src, start=1): + prefix = template.format(i) + if i in runnable and i not in not_run: + color = "*brightgreen*" + else: + color = "gray" + results.append("{} {}".format(ansiformat(color, prefix), line)) + IPython.core.page.page(six.u(os.linesep.join(results))) + else: + ip.inspector.pinfo(comp, detail_level=1) + except: + traceback.print_exc() + + def _get_type_name(self, comp): + try: + return dr.get_component_type(comp).__name__ + except: + return "" + + def _get_rule_value_kind(self, val): + if val is None: + kind = None + elif isinstance(val, plugins.make_response): + kind = "fail" + elif isinstance(val, plugins.make_pass): + kind = "pass" + elif isinstance(val, plugins.make_info): + kind = "info" + elif isinstance(val, plugins._make_skip): + kind = "skip" + elif isinstance(val, plugins.Response): + kind = val.response_type or "" + else: + kind = "" + return kind + + def _get_rule_value(self, comp): + try: + val = self._broker[comp] + if plugins.is_rule(comp): + _type = val.__class__.__name__ + kind = self._get_rule_value_kind(val) + color = RULE_COLORS.get(kind, "") + return ansiformat(color, " [{}]".format(_type)) + except: + pass + return "" + + def _show_datasource(self, d, v, indent=""): + try: + filtered = "filtered" if dr.DELEGATES[d].filterable else "unfiltered" + mode = "bytes" if dr.DELEGATES[d].raw else "lines" + except: + filtered = "unknown" + mode = "unknown" + + desc = "{n} ({f} / {m})".format(n=dr.get_name(d), f=filtered, m=mode) + color = self._get_color(d) + + results = [] + results.append(indent + ansiformat(color, desc)) + + if not v: + return results + if not isinstance(v, list): + v = [v] + + for i in v: + if isinstance(i, ContentProvider): + s = ansiformat(color, str(i)) + else: + s = ansiformat(color, "") + results.append("{}\u250A\u254C\u254C\u254C\u254C\u254C{}".format(indent, s)) + return results + + def _show_tree(self, node, indent="", depth=None, dep_getter=dr.get_dependencies): + if depth is not None and depth == 0: + return [] + + results = [] + color = self._get_color(node) + if plugins.is_datasource(node): + results.extend( + self._show_datasource(node, self._broker.get(node), indent=indent) + ) + else: + _type = self._get_type_name(node) + name = dr.get_name(node) + suffix = self._get_rule_value(node) + desc = ansiformat(color, "{n} ({t}".format(n=name, t=_type)) + results.append(indent + desc + suffix + ansiformat(color, ")")) + + dashes = "\u250A\u254C\u254C\u254C\u254C\u254C" + if node in self._broker.exceptions: + for ex in self._broker.exceptions[node]: + results.append(indent + dashes + ansiformat(color, str(ex))) + + deps = dep_getter(node) + next_indent = indent + "\u250A " + for d in deps: + results.extend( + self._show_tree( + d, next_indent, depth=depth if depth is None else depth - 1, dep_getter=dep_getter + ) + ) + return results + + def show_trees(self, match=None, ignore="spec", depth=None, toward_dependents=False): + """ + Show dependency trees of any components whether they're available or not. + + Args: + match (str, optional): regular expression for matching against + the fully qualified name of components to keep. + ignore (str, optional): regular expression for searching against + the fully qualified name of components to ignore. + depth (int, optional): how deep into the tree to explore. + toward_dependents (bool, optional): whether to walk the tree toward dependents. + Default is to walk toward dependencies. + """ + match, ignore = self._desugar_match_ignore(match, ignore) + dep_getter = dr.get_dependents if toward_dependents else dr.get_dependencies + + graph = defaultdict(list) + for c in dr.DELEGATES: + name = dr.get_name(c) + if match.test(name) and not ignore.test(name): + graph[name].append(c) + + results = [] + for name in sorted(graph): + for c in graph[name]: + results.extend(self._show_tree(c, depth=depth, dep_getter=dep_getter)) + results.append("") + IPython.core.page.page(six.u(os.linesep.join(results))) + + def show_failed(self, match=None, ignore="spec"): + """ + Show names of any components that failed during evaluation. Ignores + "spec" by default. + + Args: + match (str, optional): regular expression for matching against + the fully qualified name of components to keep. + ignore (str, optional): regular expression for searching against + the fully qualified name of components to ignore. + """ + match, ignore = self._desugar_match_ignore(match, ignore) + + mid_dashes = "\u250A\u254C\u254C" + bottom_dashes = "\u2514\u254C\u254C" + results = [] + for comp in sorted(self._broker.exceptions, key=dr.get_name): + name = dr.get_name(comp) + if match.test(name) and not ignore.test(name): + color = self._get_color(comp) + results.append(ansiformat(color, name)) + exes = self._broker.exceptions[comp] + last = len(exes) - 1 + for i, ex in enumerate(exes): + dashes = bottom_dashes if i == last else mid_dashes + results.append(ansiformat(color, dashes + str(ex))) + results.append("") + IPython.core.page.page(six.u(os.linesep.join(results))) + + def _show_exceptions(self, comp): + name = dr.get_name(comp) + results = [ansiformat("*brightred*", name)] + results.append(ansiformat("*brightred*", "-" * len(name))) + for e in self._broker.exceptions.get(comp, []): + t = self._broker.tracebacks.get(e) + if t: + results.append(t) + return results + + def show_exceptions(self, match=None, ignore="spec"): + """ + Show exceptions that occurred during evaluation. Ignores "spec" by + default. + + Args: + match (str, optional): regular expression for matching against + the fully qualified name of components to keep. + ignore (str, optional): regular expression for searching against + the fully qualified name of components to ignore. + """ + match, ignore = self._desugar_match_ignore(match, ignore) + + results = [] + for comp in sorted(self._broker.exceptions, key=dr.get_name): + name = dr.get_name(comp) + if match.test(name) and not ignore.test(name): + results.extend(self._show_exceptions(comp)) + IPython.core.page.page(six.u(os.linesep.join(results))) + + def show_rule_report(self, match=None, ignore=None): + """ + Print a rule report for the matching rules. + """ + match, ignore = self._desugar_match_ignore(match, ignore) + results = defaultdict(dict) + + for comp, val in self._broker.items(): + name = dr.get_name(comp) + if plugins.is_rule(comp) and match.test(name) and not ignore.test(name): + kind = self._get_rule_value_kind(val) + + if kind: + body = render(comp, val) + links = render_links(comp) + results[kind][name] = os.linesep.join([body, "", links]) + + report = [] + for kind in ["info", "pass", "fail"]: + color = RULE_COLORS.get(kind, "") + hits = results.get(kind, {}) + for name in sorted(hits): + report.append(ansiformat(color, name)) + report.append(ansiformat(color, "-" * len(name))) + report.append(hits[name]) + report.append("") + IPython.core.page.page(six.u(os.linesep.join(report))) + + def show_timings(self, match=None, ignore="spec", group=dr.GROUPS.single): + """ + Show timings for components that have successfully evaluated. + + Args: + match (str, optional): regular expression for matching against + the fully qualified name of components to keep. + ignore (str, optional): regular expression for searching against + the fully qualified name of components to ignore. + """ + match, ignore = self._desugar_match_ignore(match, ignore) + + results = [] + total = 0.0 + for comp in dr.COMPONENTS[group]: + name = dr.get_name(comp) + if comp in self._broker.exec_times and match.test(name) and not ignore.test(name): + color = self._get_color(comp) + t = self._broker.exec_times[comp] + total += t + results.append((t, name, color)) + + report = [ansiformat("brightmagenta", "Total: {:.10f} seconds".format(total)), ""] + for timing, name, color in sorted(results, reverse=True): + report.append(ansiformat(color, "{:.10f}: {}".format(timing, name))) + + IPython.core.page.page(six.u(os.linesep.join(report))) + + def find(self, match=None, ignore=None): + """ + Find components that might be available based on the data being + analyzed. + + Args: + match (str, optional): regular expression for matching against + the fully qualified name of components to keep. + ignore (str, optional): regular expression for searching against + the fully qualified name of components to ignore. + """ + match, ignore = self._desugar_match_ignore(match, ignore) + mid_dashes = "\u250A\u254C\u254C" + bottom_dashes = "\u2514\u254C\u254C" + results = [] + for p in sorted(self, key=str.lower): + comp = self[p] + name = dr.get_name(comp) + if match.test(name) and not ignore.test(name): + color = self._get_color(comp) + _type = self._get_type_name(comp) + suffix = self._get_rule_value(comp) + desc = ansiformat(color, "{p} ({n}, {t}".format(p=p, n=name, t=_type)) + results.append(desc + suffix + ansiformat(color, ")")) + if comp in self._broker.exceptions: + exes = self._broker.exceptions[comp] + last = len(exes) - 1 + for i, ex in enumerate(exes): + dashes = bottom_dashes if i == last else mid_dashes + results.append(ansiformat(color, dashes + str(ex))) + IPython.core.page.page(six.u(os.linesep.join(results))) + + +class Holder(dict): + """ + This is a dictionary that holds models for multiple archives. Access each model + set using the path to the archive as the key. See models.keys(). + """ + def _ipython_key_completions_(self): + return self.keys() + + +def start_session(paths, change_directory=False, __coverage=None): + __cwd = os.path.abspath(os.curdir) + + def callback(brokers): + models = Holder() + for i, (path, broker) in enumerate(brokers): + avail = _get_available_models(broker) + if paths: + if len(paths) > 1: + models[paths[i]] = Models(broker, avail, __cwd, path, __coverage) + else: + models = Models(broker, avail, __cwd, path, __coverage) + else: + models = Models(broker, avail, __cwd, path, __coverage) + + if change_directory and len(brokers) == 1: + __working_path, _ = brokers[0] + os.chdir(__working_path) + # disable jedi since it won't autocomplete for objects with__getattr__ + # defined. + IPython.core.completer.Completer.use_jedi = False + __cfg = Config() + __cfg.TerminalInteractiveShell.banner1 = Models.__doc__ + __ns = {} + __ns.update(globals()) + __ns.update({"models": models}) + IPython.start_ipython([], user_ns=__ns, config=__cfg) + + with_brokers(paths, callback) + if change_directory: + os.chdir(__cwd) + + +def _handle_config(config): + if config: + with open(config) as f: + cfg = yaml.load(f, Loader=Loader) + load_packages(cfg.get("packages", [])) + apply_default_enabled(cfg) + apply_configs(cfg) + + +def _parse_args(): + desc = "Perform interactive system analysis with insights components." + epilog = """ + Set env INSIGHTS_FILTERS_ENABLED=False to disable filtering that may + cause unexpected missing data. + """.strip() + p = argparse.ArgumentParser(description=desc, epilog=epilog) + + p.add_argument( + "-p", "--plugins", default="", help="Comma separated list of packages to load." + ) + p.add_argument("-c", "--config", help="The insights configuration to apply.") + p.add_argument( + "--no-coverage", + action="store_true", + help="Don't show code coverage when viewing source.", + ) + p.add_argument( + "--cd", + action="store_true", + help="Change into the expanded directory for analysis.", + ) + p.add_argument( + "--no-defaults", action="store_true", help="Don't load default components." + ) + p.add_argument( + "-v", "--verbose", action="store_true", help="Global debug level logging." + ) + + path_desc = "Archives or paths to analyze. Leave off to target the current system." + p.add_argument("paths", nargs="*", help=path_desc) + + return p.parse_args() + + +def main(): + args = _parse_args() + logging.basicConfig(level=logging.DEBUG if args.verbose else logging.ERROR) + + cov = None + if not args.no_coverage: + from coverage import Coverage + + cov = Coverage(cover_pylib=False) + cov.start() + + if not args.no_defaults: + load_default_plugins() + dr.load_components("insights.parsers", "insights.combiners") + + load_packages(parse_plugins(args.plugins)) + _handle_config(args.config) + + start_session(args.paths, args.cd, __coverage=cov) + if cov: + cov.stop() + cov.erase() + + +if __name__ == "__main__": + main() From 34c85faa85215ecda01e67fec445d85a3c9977b2 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Wed, 3 Jun 2020 14:57:46 -0500 Subject: [PATCH 070/892] Add simple bash completion script for the insights command. (#2607) Signed-off-by: Christopher Sams --- insights-completion.bash | 1 + 1 file changed, 1 insertion(+) create mode 100644 insights-completion.bash diff --git a/insights-completion.bash b/insights-completion.bash new file mode 100644 index 000000000..9ac6708a1 --- /dev/null +++ b/insights-completion.bash @@ -0,0 +1 @@ +complete -o default -W "cat collect inspect info ocpshell shell run version" insights From 141723947803acfbbbfe3a5bad94ead56870e353 Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Mon, 8 Jun 2020 11:55:24 -0400 Subject: [PATCH 071/892] client: add suppressed --force option (#2613) During unregister, this allows cleanup of local files even when API unregistration fails. This is to enable unregistration along side RHSM. Signed-off-by: Link Dupont --- insights/client/client.py | 15 ++++++++++----- insights/client/config.py | 6 ++++++ 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/insights/client/client.py b/insights/client/client.py index f6293d350..9a416397a 100644 --- a/insights/client/client.py +++ b/insights/client/client.py @@ -206,6 +206,11 @@ def _legacy_handle_unregistration(config, pconn): """ returns (bool): True success, False failure """ + def __cleanup_local_files(): + write_unregistered_file() + get_scheduler(config).remove_scheduling() + delete_cache_files() + check = get_registration_status(config, pconn) for m in check['messages']: @@ -213,6 +218,8 @@ def _legacy_handle_unregistration(config, pconn): if check['unreachable']: # Run connection test and exit + if config.force: + __cleanup_local_files() return None if check['status']: @@ -222,9 +229,7 @@ def _legacy_handle_unregistration(config, pconn): logger.info('This system is already unregistered.') if unreg: # only set if unreg was successful - write_unregistered_file() - get_scheduler(config).remove_scheduling() - delete_cache_files() + __cleanup_local_files() return unreg @@ -239,8 +244,8 @@ def handle_unregistration(config, pconn): return _legacy_handle_unregistration(config, pconn) unreg = pconn.unregister() - if unreg: - # only set if unreg was successful + if unreg or config.force: + # only set if unreg was successful or --force was set write_unregistered_file() delete_cache_files() return unreg diff --git a/insights/client/config.py b/insights/client/config.py index 4dd8febd2..09dfd143e 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -143,6 +143,12 @@ # non-CLI 'default': None }, + 'force': { + 'default': False, + 'opt': ['--force'], + 'help': argparse.SUPPRESS, + 'action': 'store_true' + }, 'group': { 'default': None, 'opt': ['--group'], From f48d5312aecc003afd0ea3a59702f10522a74502 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Wed, 10 Jun 2020 03:55:25 +0800 Subject: [PATCH 072/892] Deprecate the aws_instance_type (#2604) * Remove aws_instance_type Signed-off-by: Xiangce Liu * Revert the parser and just mark it as deprecated Signed-off-by: Xiangce Liu * revert the spec back to insights_archive.py Signed-off-by: Xiangce Liu * resort it in __init__.py Signed-off-by: Xiangce Liu --- insights/parsers/aws_instance_type.py | 9 +++++++++ insights/specs/default.py | 1 - 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/insights/parsers/aws_instance_type.py b/insights/parsers/aws_instance_type.py index 61811798a..813c68203 100644 --- a/insights/parsers/aws_instance_type.py +++ b/insights/parsers/aws_instance_type.py @@ -11,11 +11,16 @@ from insights.parsers import SkipException, ParseException from insights import parser, CommandParser from insights.specs import Specs +from insights.util import deprecated @parser(Specs.aws_instance_type) class AWSInstanceType(CommandParser): """ + .. note:: + This parser is deprecated, please use + :py:class:`insights.parsers.aws_instance_id.AWSInstanceIdDoc` instead. + Class for parsing the AWS Instance type returned by command ``curl -s http://169.254.169.254/latest/meta-data/instance-type`` @@ -38,6 +43,10 @@ class AWSInstanceType(CommandParser): 'r3.xlarge' """ + def __init__(self, *args, **kwargs): + deprecated(AWSInstanceType, "Use AWSInstanceIdDoc in insights.insights.aws_instance_id instead.") + super(AWSInstanceType, self).__init__(*args, **kwargs) + def parse_content(self, content): if not content or 'curl: ' in content[0]: raise SkipException() diff --git a/insights/specs/default.py b/insights/specs/default.py index 44a2a8c94..6ea97733b 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -108,7 +108,6 @@ def is_aws(broker): aws_instance_id_doc = simple_command("/usr/bin/curl -s http://169.254.169.254/latest/dynamic/instance-identity/document --connect-timeout 5", deps=[is_aws]) aws_instance_id_pkcs7 = simple_command("/usr/bin/curl -s http://169.254.169.254/latest/dynamic/instance-identity/pkcs7 --connect-timeout 5", deps=[is_aws]) - aws_instance_type = simple_command("/usr/bin/curl -s http://169.254.169.254/latest/meta-data/instance-type --connect-timeout 5", deps=[is_aws]) @datasource(CloudProvider) def is_azure(broker): From 7dbeee2f72c0f2b46b815874acaeeff97141ece0 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Wed, 10 Jun 2020 09:39:54 +0800 Subject: [PATCH 073/892] Update the deprecated/useless items in ceph_version (#2618) * Update the deprecated/useless items in ceph_version Signed-off-by: Xiangce Liu * Mark CephVersionError as deprecated Signed-off-by: Xiangce Liu --- insights/parsers/ceph_version.py | 73 ++++++++++----------- insights/parsers/tests/test_ceph_version.py | 10 +-- 2 files changed, 39 insertions(+), 44 deletions(-) diff --git a/insights/parsers/ceph_version.py b/insights/parsers/ceph_version.py index c26ca0954..0ded27496 100644 --- a/insights/parsers/ceph_version.py +++ b/insights/parsers/ceph_version.py @@ -1,6 +1,6 @@ """ -CephVersion - command ``/usr/bin/ceph -v`` -========================================== +CephVersion - command ``ceph -v`` +================================= This module provides plugins access to the Ceph version information gathered from the ``ceph -v`` command. This module parses the community version to the Red Hat @@ -9,27 +9,16 @@ The Red Hat Ceph Storage releases and corresponding Ceph package releases are documented in https://access.redhat.com/solutions/2045583 -Typical output of the ``ceph -v`` command is:: - - ceph version 0.94.9-9.el7cp (b83334e01379f267fb2f9ce729d74a0a8fa1e92c) - -Note: - This module can only be used for Ceph. - -Example: - >>> ceph_ver = shared[CephVersion] - >>> ceph_ver.version - '1.3.3' - >>> ceph_ver.major - '1.3' - >>> ceph_ver.minor - '3' """ -from .. import parser, CommandParser import re +from insights import parser, CommandParser +from insights.parsers import SkipException from insights.specs import Specs +from insights.util import deprecated +# TODO: the following metrics need update timely per: +# - https://access.redhat.com/solutions/2045583 community_to_release_map = { "0.94.1-15": {'version': "1.3.0", 'major': '1.3', 'minor': '0', 'downstream_release': 'NA'}, "0.94.3-3": {'version': "1.3.1", 'major': '1.3', 'minor': '1', 'downstream_release': 'NA'}, @@ -64,45 +53,40 @@ } -class CephVersionError(Exception): - """ - Exception subclass for errors related to the content data and the - CephVersion class. - - This exception should not be caught by rules plugins unless it is necessary - for the plugin to return a particular answer when a problem occurs with - ceph version data. If a plugin catches this exception it must reraise it so that - the engine has the opportunity to handle it/log it as necessary. +@parser(Specs.ceph_v) +class CephVersion(CommandParser): """ + Class for parsing the output of command ``ceph -v``. - def __init__(self, message, errors): - """Class constructor""" - - super(CephVersionError, self).__init__(message) - self.errors = errors - self.message = message + Typical output of the ``ceph -v`` command is:: + ceph version 0.94.9-9.el7cp (b83334e01379f267fb2f9ce729d74a0a8fa1e92c) -@parser(Specs.ceph_v) -class CephVersion(CommandParser): - """ Class for parsing the content of ``ceph_version``.""" + Example: + >>> ceph_v.version + '1.3.3' + >>> ceph_v.major + '1.3' + >>> ceph_v.minor + '3' + """ def parse_content(self, content): # Parse Ceph Version Content and get Release, Major, Minor number if not content: - raise CephVersionError("Empty Ceph Version Line", content) + raise SkipException("Empty Ceph Version Line", content) ceph_version_line = content[-1] # re search pattern pattern_community = r'((\d{1,2})\.(\d{1,2})\.((\d{1,2})|x))((\-(\d+)))' community_version_mo = re.search(pattern_community, str(ceph_version_line), 0) if not community_version_mo: - raise CephVersionError("Wrong Format Ceph Version", content) + raise SkipException("Wrong Format Ceph Version", content) community_version = community_version_mo.group(0) release_data = community_to_release_map.get(community_version, None) if not release_data: - raise CephVersionError("No Mapping Release Version. Ceph Release Number is Null", content) + raise SkipException("No Mapping Release Version. Ceph Release Number is Null", content) self.version = release_data['version'] self.major = release_data['major'] @@ -113,3 +97,14 @@ def parse_content(self, content): "major": int(community_version_mo.group(3)), "minor": int(community_version_mo.group(4)) } + + +class CephVersionError(Exception): + """ + .. note:: + This class is deprecated, please use :py:class:`insights.parsers.SkipException` instead. + """ + + def __init__(self, *args, **kwargs): + deprecated(CephVersionError, "Use SkipException instead.") + super(CephVersionError, self).__init__(*args, **kwargs) diff --git a/insights/parsers/tests/test_ceph_version.py b/insights/parsers/tests/test_ceph_version.py index 490ae2b73..721eb0224 100644 --- a/insights/parsers/tests/test_ceph_version.py +++ b/insights/parsers/tests/test_ceph_version.py @@ -1,6 +1,6 @@ from insights.parsers.ceph_version import CephVersion -from insights.parsers.ceph_version import CephVersionError from insights.tests import context_wrap +from insights.parsers import SkipException import pytest CV1 = "ceph version 0.94.9-9.el7cp (b83334e01379f267fb2f9ce729d74a0a8fa1e92c)" @@ -18,16 +18,16 @@ def test_ceph_version(): - with pytest.raises(CephVersionError) as error_context2: + with pytest.raises(SkipException) as error_context2: CephVersion(context_wrap(CV2)) assert 'Empty Ceph Version Line' in str(error_context2) - with pytest.raises(CephVersionError) as error_context3: + with pytest.raises(SkipException) as error_context3: CephVersion(context_wrap(CV3)) assert 'Wrong Format Ceph Version' in str(error_context3) - with pytest.raises(CephVersionError) as error_context5: + with pytest.raises(SkipException) as error_context5: CephVersion(context_wrap(CV5)) assert 'Wrong Format Ceph Version' in str(error_context5) - with pytest.raises(CephVersionError) as error_context6: + with pytest.raises(SkipException) as error_context6: CephVersion(context_wrap(CV6)) assert 'No Mapping Release Version' in str(error_context6) From 8975a4068f22b857558110f93e796677a2a07a09 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Wed, 10 Jun 2020 10:40:04 +0800 Subject: [PATCH 074/892] Update ceph version mapping (#2619) * Update ceph version mapping Signed-off-by: Xiangce Liu * Add ceph 4.x to mapping Signed-off-by: Xiangce Liu --- insights/parsers/ceph_version.py | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/insights/parsers/ceph_version.py b/insights/parsers/ceph_version.py index 0ded27496..c6e018b77 100644 --- a/insights/parsers/ceph_version.py +++ b/insights/parsers/ceph_version.py @@ -20,24 +20,37 @@ # TODO: the following metrics need update timely per: # - https://access.redhat.com/solutions/2045583 community_to_release_map = { - "0.94.1-15": {'version': "1.3.0", 'major': '1.3', 'minor': '0', 'downstream_release': 'NA'}, + "0.80.8-5": {'version': "1.2.3", 'major': '1.2', 'minor': '3', 'downstream_release': 'NA'}, + "0.94.1-15": {'version': "1.3", 'major': '1.3', 'minor': '0', 'downstream_release': 'NA'}, "0.94.3-3": {'version': "1.3.1", 'major': '1.3', 'minor': '1', 'downstream_release': 'NA'}, "0.94.5-9": {'version': "1.3.2", 'major': '1.3', 'minor': '2', 'downstream_release': 'NA'}, + "0.94.5-12": {'version': "1.3.2", 'major': '1.3', 'minor': '2', 'downstream_release': 'async'}, + "0.94.5-13": {'version': "1.3.2", 'major': '1.3', 'minor': '2', 'downstream_release': 'async'}, + "0.94.5-14": {'version': "1.3.2", 'major': '1.3', 'minor': '2', 'downstream_release': 'async'}, + "0.94.5-15": {'version': "1.3.2", 'major': '1.3', 'minor': '2', 'downstream_release': 'async'}, "0.94.9-3": {'version': "1.3.3", 'major': '1.3', 'minor': '3', 'downstream_release': 'NA'}, + "0.94.9-8": {'version': "1.3.3", 'major': '1.3', 'minor': '3', 'downstream_release': 'async'}, "0.94.9-9": {'version': "1.3.3", 'major': '1.3', 'minor': '3', 'downstream_release': 'async 2'}, "0.94.10-2": {'version': "1.3.4", 'major': '1.3', 'minor': '4', 'downstream_release': 'NA'}, "10.2.2-38": {'version': "2.0", 'major': '2', 'minor': '0', 'downstream_release': '0'}, + "10.2.2-41": {'version': "2.0", 'major': '2', 'minor': '0', 'downstream_release': 'async'}, "10.2.3-13": {'version': "2.1", 'major': '2', 'minor': '1', 'downstream_release': '0'}, - "10.2.5": {'version': "2.2", 'major': '2', 'minor': '2', 'downstream_release': '0'}, + "10.2.3-17": {'version': "2.1", 'major': '2', 'minor': '1', 'downstream_release': 'async'}, "10.2.5-37": {'version': "2.2", 'major': '2', 'minor': '2', 'downstream_release': '0'}, "10.2.7-27": {'version': "2.3", 'major': '2', 'minor': '3', 'downstream_release': '0'}, "10.2.7-28": {'version': "2.3", 'major': '2', 'minor': '3', 'downstream_release': 'async'}, + "10.2.7-32": {'version': "2.4", 'major': '2', 'minor': '4', 'downstream_release': '0'}, "10.2.7-48": {'version': "2.4", 'major': '2', 'minor': '4', 'downstream_release': 'async'}, "10.2.10-16": {'version': "2.5", 'major': '2', 'minor': '5', 'downstream_release': '0'}, + "10.2.10-17": {'version': "2.5", 'major': '2', 'minor': '5', 'downstream_release': 'async'}, "10.2.10-28": {'version': "2.5.1", 'major': '2', 'minor': '5', 'downstream_release': '1'}, "10.2.10-40": {'version': "2.5.2", 'major': '2', 'minor': '5', 'downstream_release': '2'}, "10.2.10-43": {'version': "2.5.3", 'major': '2', 'minor': '5', 'downstream_release': '3'}, "10.2.10-49": {'version': "2.5.4", 'major': '2', 'minor': '5', 'downstream_release': '4'}, + "10.2.10-51": {'version': "2.5.5", 'major': '2', 'minor': '5', 'downstream_release': '5'}, + "12.2.1-40": {'version': "3.0", 'major': '3', 'minor': '0', 'downstream_release': '0'}, + "12.2.1-45": {'version': "3.0", 'major': '3', 'minor': '0', 'downstream_release': '1'}, + "12.2.1-46": {'version': "3.0", 'major': '3', 'minor': '0', 'downstream_release': '1 CVE'}, "12.2.4-6": {'version': "3.0.2", 'major': '3', 'minor': '0', 'downstream_release': '2'}, "12.2.4-10": {'version': "3.0.3", 'major': '3', 'minor': '0', 'downstream_release': '3'}, "12.2.4-30": {'version': "3.0.4", 'major': '3', 'minor': '0', 'downstream_release': '4'}, @@ -50,6 +63,11 @@ "12.2.12-45": {'version': "3.3", 'major': '3', 'minor': '3', 'downstream_release': '0'}, "12.2.12-48": {'version': "3.3", 'major': '3', 'minor': '3', 'downstream_release': 'async'}, "12.2.12-74": {'version': "3.3.1", 'major': '3', 'minor': '3', 'downstream_release': '1'}, + "12.2.12-79": {'version': "3.3.1", 'major': '3', 'minor': '3', 'downstream_release': 'async'}, + "12.2.12-84": {'version': "3.3.2", 'major': '3', 'minor': '3', 'downstream_release': '2'}, + "12.2.12-101": {'version': "3.3.4", 'major': '3', 'minor': '3', 'downstream_release': '4'}, + "14.2.4-125": {'version': "4.0", 'major': '4', 'minor': '0', 'downstream_release': '0'}, + "14.2.8-59": {'version': "4.1", 'major': '4', 'minor': '1', 'downstream_release': '0'}, } From 995d4289d7c80a29928900a5ba44dd05e06c5345 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Wed, 10 Jun 2020 13:44:03 +0800 Subject: [PATCH 075/892] Add is_els to CephVersion (#2621) * Add is_els to CephVersion Signed-off-by: Xiangce Liu * Add test case for CephVersion.is_els Signed-off-by: Xiangce Liu --- insights/combiners/ceph_version.py | 52 ++++++++++++------- insights/combiners/tests/test_ceph_version.py | 4 ++ insights/parsers/ceph_version.py | 15 +++++- insights/parsers/tests/test_ceph_version.py | 16 ++++++ 4 files changed, 68 insertions(+), 19 deletions(-) diff --git a/insights/combiners/ceph_version.py b/insights/combiners/ceph_version.py index 65c77db65..98cc95e01 100644 --- a/insights/combiners/ceph_version.py +++ b/insights/combiners/ceph_version.py @@ -6,23 +6,6 @@ the ``CephVersion``, ``CephInsights`` and ``CephReport`` parsers. The order from most preferred to least preferred is `CephVersion``, ``CephInsights``, ``CephReport``. -Examples: - >>> type(cv) - - >>> cv.version - '3.2' - >>> cv.major - '3' - >>> cv.minor - '2' - >>> cv.downstream_release - '0' - >>> cv.upstream_version["release"] - 12 - >>> cv.upstream_version["major"] - 2 - >>> cv.upstream_version["minor"] - 8 """ from insights import combiner @@ -37,7 +20,37 @@ class CephVersion(object): """ Combiner for Ceph Version information. It uses the results of the ``CephVersion``, ``CephInsights`` and ``CephReport`` parsers. - The order from most preferred to least preferred is `CephVersion``, ``CephInsights``, ``CephReport``. + + The prefered parsing order is `CephVersion``, ``CephInsights``, ``CephReport``. + + Attributes: + version (str): The Red Hat release version + major (str): The major version of Red Hat release version + minor (str): The minor version of Red Hat release version + is_els (boolean): If the verion in 'Extended life cycle support (ELS) add-on' phase + downstream_release (str): The downstream release info + upstream_version (dict): The detailed upstream version info with the + following keys `release (int)`, `major (int)` and `minor (int)`. + + Examples: + >>> type(cv) + + >>> cv.version + '3.2' + >>> cv.major + '3' + >>> cv.minor + '2' + >>> cv.is_els + False + >>> cv.downstream_release + '0' + >>> cv.upstream_version["release"] + 12 + >>> cv.upstream_version["major"] + 2 + >>> cv.upstream_version["minor"] + 8 """ def __init__(self, cv, ci, cr): @@ -45,6 +58,7 @@ def __init__(self, cv, ci, cr): self.version = cv.version self.major = cv.major self.minor = cv.minor + self.is_els = cv.is_els self.downstream_release = cv.downstream_release self.upstream_version = cv.upstream_version elif ci: @@ -53,6 +67,7 @@ def __init__(self, cv, ci, cr): self.version = cv.version self.major = cv.major self.minor = cv.minor + self.is_els = cv.is_els self.downstream_release = cv.downstream_release self.upstream_version = cv.upstream_version else: @@ -61,5 +76,6 @@ def __init__(self, cv, ci, cr): self.version = cv.version self.major = cv.major self.minor = cv.minor + self.is_els = cv.is_els self.downstream_release = cv.downstream_release self.upstream_version = cv.upstream_version diff --git a/insights/combiners/tests/test_ceph_version.py b/insights/combiners/tests/test_ceph_version.py index 60a290c34..881d25469 100644 --- a/insights/combiners/tests/test_ceph_version.py +++ b/insights/combiners/tests/test_ceph_version.py @@ -85,6 +85,7 @@ def test_ceph_version(): assert ret.version == "3.2" assert ret.major == "3" assert ret.minor == "2" + assert not ret.is_els assert ret.downstream_release == "0" assert ret.upstream_version["release"] == 12 assert ret.upstream_version["major"] == 2 @@ -98,6 +99,7 @@ def test_ceph_version_2(): assert ret.version == "3.2" assert ret.major == "3" assert ret.minor == "2" + assert not ret.is_els assert ret.downstream_release == "0" assert ret.upstream_version["release"] == 12 assert ret.upstream_version["major"] == 2 @@ -110,6 +112,7 @@ def test_ceph_insights(): assert ret.version == "3.2" assert ret.major == "3" assert ret.minor == "2" + assert not ret.is_els assert ret.downstream_release == "0" assert ret.upstream_version["release"] == 12 assert ret.upstream_version["major"] == 2 @@ -122,6 +125,7 @@ def test_ceph_report(): assert ret.version == "3.2" assert ret.major == "3" assert ret.minor == "2" + assert not ret.is_els assert ret.downstream_release == "0" assert ret.upstream_version["release"] == 12 assert ret.upstream_version["major"] == 2 diff --git a/insights/parsers/ceph_version.py b/insights/parsers/ceph_version.py index c6e018b77..20dd4c416 100644 --- a/insights/parsers/ceph_version.py +++ b/insights/parsers/ceph_version.py @@ -19,6 +19,7 @@ # TODO: the following metrics need update timely per: # - https://access.redhat.com/solutions/2045583 +# - https://access.redhat.com/articles/1372203 community_to_release_map = { "0.80.8-5": {'version': "1.2.3", 'major': '1.2', 'minor': '3', 'downstream_release': 'NA'}, "0.94.1-15": {'version': "1.3", 'major': '1.3', 'minor': '0', 'downstream_release': 'NA'}, @@ -47,7 +48,7 @@ "10.2.10-40": {'version': "2.5.2", 'major': '2', 'minor': '5', 'downstream_release': '2'}, "10.2.10-43": {'version': "2.5.3", 'major': '2', 'minor': '5', 'downstream_release': '3'}, "10.2.10-49": {'version': "2.5.4", 'major': '2', 'minor': '5', 'downstream_release': '4'}, - "10.2.10-51": {'version': "2.5.5", 'major': '2', 'minor': '5', 'downstream_release': '5'}, + "10.2.10-51": {'version': "2.5.5", 'major': '2', 'minor': '5', 'downstream_release': '5', 'els': True}, "12.2.1-40": {'version': "3.0", 'major': '3', 'minor': '0', 'downstream_release': '0'}, "12.2.1-45": {'version': "3.0", 'major': '3', 'minor': '0', 'downstream_release': '1'}, "12.2.1-46": {'version': "3.0", 'major': '3', 'minor': '0', 'downstream_release': '1 CVE'}, @@ -80,6 +81,15 @@ class CephVersion(CommandParser): ceph version 0.94.9-9.el7cp (b83334e01379f267fb2f9ce729d74a0a8fa1e92c) + Attributes: + version (str): The Red Hat release version + major (str): The major version of Red Hat release version + minor (str): The minor version of Red Hat release version + is_els (boolean): If the verion in 'Extended life cycle support (ELS) add-on' phase + downstream_release (str): The downstream release info + upstream_version (dict): The detailed upstream version info with the + following keys `release (int)`, `major (int)` and `minor (int)`. + Example: >>> ceph_v.version '1.3.3' @@ -87,6 +97,8 @@ class CephVersion(CommandParser): '1.3' >>> ceph_v.minor '3' + >>> ceph_v.is_els + False """ def parse_content(self, content): @@ -109,6 +121,7 @@ def parse_content(self, content): self.version = release_data['version'] self.major = release_data['major'] self.minor = release_data['minor'] + self.is_els = release_data.get('els', False) self.downstream_release = release_data['downstream_release'] self.upstream_version = { "release": int(community_version_mo.group(2)), diff --git a/insights/parsers/tests/test_ceph_version.py b/insights/parsers/tests/test_ceph_version.py index 721eb0224..a896ad0ca 100644 --- a/insights/parsers/tests/test_ceph_version.py +++ b/insights/parsers/tests/test_ceph_version.py @@ -15,6 +15,7 @@ CV9 = "ceph version 12.2.5-59.el7cp (d4b9f17b56b3348566926849313084dd6efc2ca2)" CV10 = "ceph version 12.2.8-128.el7cp (030358773c5213a14c1444a5147258672b2dc15f)" CV10_1 = "ceph version 12.2.12-74.el7cp (030358773c5213a14c1444a5147258672b2dc15f)" +CV_els = "ceph version 10.2.10-51.el7cp (030358773c5213a14c1444a5147258672b2dc15f)" def test_ceph_version(): @@ -35,46 +36,61 @@ def test_ceph_version(): assert ceph_version1.version == "1.3.3" assert ceph_version1.major == '1.3' assert ceph_version1.minor == "3" + assert not ceph_version1.is_els assert ceph_version1.downstream_release == "async 2" ceph_version4 = CephVersion(context_wrap(CV4)) assert ceph_version4.version == "2.0" assert ceph_version4.major == '2' assert ceph_version4.minor == "0" + assert not ceph_version4.is_els assert ceph_version4.downstream_release == "0" ceph = CephVersion(context_wrap(CV_5)) assert ceph.version == "2.2" assert ceph.major == '2' assert ceph.minor == '2' + assert not ceph.is_els assert ceph.downstream_release == "0" ceph_version7 = CephVersion(context_wrap(CV7)) assert ceph_version7.version == "2.2" assert ceph_version7.major == '2' assert ceph_version7.minor == "2" + assert not ceph_version7.is_els assert ceph_version7.downstream_release == "0" ceph_version8 = CephVersion(context_wrap(CV8)) assert ceph_version8.version == "2.3" assert ceph_version8.major == '2' assert ceph_version8.minor == "3" + assert not ceph_version8.is_els assert ceph_version8.downstream_release == "0" ceph_version9 = CephVersion(context_wrap(CV9)) assert ceph_version9.version == "3.1.1" assert ceph_version9.major == '3' assert ceph_version9.minor == "1" + assert not ceph_version9.is_els assert ceph_version9.downstream_release == "1" ceph_version10 = CephVersion(context_wrap(CV10)) assert ceph_version10.version == "3.2.2" assert ceph_version10.major == '3' assert ceph_version10.minor == "2" + assert not ceph_version10.is_els assert ceph_version10.downstream_release == "2" ceph_version10_1 = CephVersion(context_wrap(CV10_1)) assert ceph_version10_1.version == "3.3.1" assert ceph_version10_1.major == '3' assert ceph_version10_1.minor == "3" + assert not ceph_version10_1.is_els assert ceph_version10_1.downstream_release == "1" + + ceph_version_els = CephVersion(context_wrap(CV_els)) + assert ceph_version_els.version == "2.5.5" + assert ceph_version_els.major == '2' + assert ceph_version_els.minor == "5" + assert ceph_version_els.is_els + assert ceph_version_els.downstream_release == "5" From c941ef97b308e721635914926881d4a517e3c14d Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Wed, 10 Jun 2020 13:35:06 -0400 Subject: [PATCH 076/892] only encode() if in python2 (#2612) * only encode() if in python2 Signed-off-by: Jeremy Crafts * fix encoding in unit test Signed-off-by: Jeremy Crafts --- insights/client/apps/compliance/__init__.py | 15 ++++++++++++--- insights/tests/client/apps/test_compliance.py | 11 +++++++++-- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/insights/client/apps/compliance/__init__.py b/insights/client/apps/compliance/__init__.py index c183ed7d8..1881c180c 100644 --- a/insights/client/apps/compliance/__init__.py +++ b/insights/client/apps/compliance/__init__.py @@ -9,6 +9,7 @@ from sys import exit from insights.util.subproc import call import os +import six NONCOMPLIANT_STATUS = 2 COMPLIANCE_CONTENT_TYPE = 'application/vnd.redhat.compliance.something+tgz' @@ -85,7 +86,10 @@ def profile_files(self): return glob("{0}*rhel{1}*.xml".format(POLICY_FILE_LOCATION, self.os_release())) def find_scap_policy(self, profile_ref_id): - rc, grep = call(('grep ' + profile_ref_id + ' ' + ' '.join(self.profile_files())).encode(), keep_rc=True) + grepcmd = 'grep ' + profile_ref_id + ' ' + ' '.join(self.profile_files()) + if not six.PY3: + grepcmd = grepcmd.encode() + rc, grep = call(grepcmd, keep_rc=True) if rc: logger.error('XML profile file not found matching ref_id {0}\n{1}\n'.format(profile_ref_id, grep)) exit(constants.sig_kill_bad) @@ -107,7 +111,9 @@ def run_scan(self, profile_ref_id, policy_xml, output_path, tailoring_file_path= env = os.environ.copy() env.update({'TZ': 'UTC'}) oscap_command = self.build_oscap_command(profile_ref_id, policy_xml, output_path, tailoring_file_path) - rc, oscap = call(oscap_command.encode(), keep_rc=True, env=env) + if not six.PY3: + oscap_command = oscap_command.encode() + rc, oscap = call(oscap_command, keep_rc=True, env=env) if rc and rc != NONCOMPLIANT_STATUS: logger.error('Scan failed') logger.error(oscap) @@ -116,7 +122,10 @@ def run_scan(self, profile_ref_id, policy_xml, output_path, tailoring_file_path= self.archive.copy_file(output_path) def _assert_oscap_rpms_exist(self): - rc, rpm = call('rpm -qa ' + ' '.join(REQUIRED_PACKAGES), keep_rc=True) + rpmcmd = 'rpm -qa ' + ' '.join(REQUIRED_PACKAGES) + if not six.PY3: + rpmcmd = rpmcmd.encode() + rc, rpm = call(rpmcmd, keep_rc=True) if rc: logger.error('Tried running rpm -qa but failed: {0}.\n'.format(rpm)) exit(constants.sig_kill_bad) diff --git a/insights/tests/client/apps/test_compliance.py b/insights/tests/client/apps/test_compliance.py index c8a727f40..bb76fdb8f 100644 --- a/insights/tests/client/apps/test_compliance.py +++ b/insights/tests/client/apps/test_compliance.py @@ -4,6 +4,7 @@ from mock.mock import patch, Mock, mock_open from pytest import raises import os +import six PATH = '/usr/share/xml/scap/ref_id.xml' @@ -110,7 +111,10 @@ def test_run_scan(config, call): env = os.environ env.update({'TZ': 'UTC'}) compliance_client.run_scan('ref_id', '/nonexistent', output_path) - call.assert_called_with(("oscap xccdf eval --profile ref_id --results " + output_path + ' /nonexistent').encode(), keep_rc=True, env=env) + if six.PY3: + call.assert_called_with(("oscap xccdf eval --profile ref_id --results " + output_path + ' /nonexistent'), keep_rc=True, env=env) + else: + call.assert_called_with(("oscap xccdf eval --profile ref_id --results " + output_path + ' /nonexistent').encode(), keep_rc=True, env=env) @patch("insights.client.apps.compliance.call", return_value=(1, 'bad things happened'.encode('utf-8'))) @@ -122,7 +126,10 @@ def test_run_scan_fail(config, call): env.update({'TZ': 'UTC'}) with raises(SystemExit): compliance_client.run_scan('ref_id', '/nonexistent', output_path) - call.assert_called_with(("oscap xccdf eval --profile ref_id --results " + output_path + ' /nonexistent').encode(), keep_rc=True, env=env) + if six.PY3: + call.assert_called_with(("oscap xccdf eval --profile ref_id --results " + output_path + ' /nonexistent'), keep_rc=True, env=env) + else: + call.assert_called_with(("oscap xccdf eval --profile ref_id --results " + output_path + ' /nonexistent').encode(), keep_rc=True, env=env) @patch("insights.client.config.InsightsConfig") From 31503330b56c718ada0dfa48b66b3f2ccdef5236 Mon Sep 17 00:00:00 2001 From: Jitka Obselkova <41325380+jobselko@users.noreply.github.com> Date: Mon, 15 Jun 2020 09:51:06 +0200 Subject: [PATCH 077/892] Add full name to CloudProvider combiner (#2622) Signed-off-by: Jitka Obselkova --- insights/combiners/cloud_provider.py | 16 ++++++++++++++++ insights/combiners/tests/test_cloud_provider.py | 13 +++++++++++++ 2 files changed, 29 insertions(+) diff --git a/insights/combiners/cloud_provider.py b/insights/combiners/cloud_provider.py index fdccf278a..f2c001171 100644 --- a/insights/combiners/cloud_provider.py +++ b/insights/combiners/cloud_provider.py @@ -20,6 +20,8 @@ True >>> cp_aws.cp_uuid['aws'] 'EC2F58AF-2DAD-C57E-88C0-A81CB6084290' + >>> cp_aws.long_name + 'Amazon Web Services' >>> cp_azure.cloud_provider 'azure' >>> cp_azure.cp_yum == {'aws': [], 'google': [], 'azure': ['rhui-microsoft-azure-rhel7-2.2-74'], 'alibaba': []} @@ -91,6 +93,13 @@ class CloudProvider(object): GOOGLE = __GOOGLE.name """GOOGLE Cloud Provider Constant""" + _long_name_mapping = { + 'alibaba': 'Alibaba Cloud', + 'aws': 'Amazon Web Services', + 'azure': 'Microsoft Azure', + 'google': 'Google Cloud' + } + def __init__(self, rpms, dmidcd, yrl): self.cp_bios_vendor = self._get_cp_bios_vendor(dmidcd) @@ -234,3 +243,10 @@ def _get_cp_from_manuf(self, dmidcd): else '' ) return prov + + @property + def long_name(self): + """ + Return long name for the specific cloud provider. + """ + return self._long_name_mapping.get(self.cloud_provider) diff --git a/insights/combiners/tests/test_cloud_provider.py b/insights/combiners/tests/test_cloud_provider.py index 1631acc55..8a56289b0 100644 --- a/insights/combiners/tests/test_cloud_provider.py +++ b/insights/combiners/tests/test_cloud_provider.py @@ -578,6 +578,7 @@ def test_rpm_google(): assert ret.cloud_provider == CloudProvider.GOOGLE assert 'google-rhui-client-5.1.100-1.el7' in ret.cp_rpms.get(CloudProvider.GOOGLE) assert 'google-rhui-client-5.1.100-1.el6' in ret.cp_rpms.get(CloudProvider.GOOGLE) + assert ret.long_name == 'Google Cloud' def test_rpm_aws(): @@ -587,6 +588,7 @@ def test_rpm_aws(): ret = CloudProvider(irpms, dmi, yrl) assert ret.cloud_provider == CloudProvider.AWS assert ret.cp_rpms.get(CloudProvider.AWS)[0] == 'rh-amazon-rhui-client-2.2.124-1.el7' + assert ret.long_name == 'Amazon Web Services' def test_rpm_azure(): @@ -596,6 +598,7 @@ def test_rpm_azure(): ret = CloudProvider(irpms, dmi, yrl) assert ret.cloud_provider == CloudProvider.AZURE assert ret.cp_rpms.get(CloudProvider.AZURE)[0] == 'WALinuxAgent-2.2.18-1.el7' + assert ret.long_name == 'Microsoft Azure' def test__yum_azure(): @@ -650,6 +653,16 @@ def test_dmidecode_alibaba(): ret = CloudProvider(irpms, dmi, yrl) assert ret.cloud_provider == CloudProvider.ALIBABA assert ret.cp_manufacturer[CloudProvider.ALIBABA] == 'Alibaba Cloud' + assert ret.long_name == 'Alibaba Cloud' + + +def test_no_data(): + irpms = IRPMS(context_wrap(RPMS)) + dmi = DMIDecode(context_wrap(DMIDECODE)) + yrl = YumRepoList(context_wrap(YUM_REPOLIST_NOT_AZURE)) + ret = CloudProvider(irpms, dmi, yrl) + assert ret.cloud_provider is None + assert ret.long_name is None def test_docs(): From 7af327bbbcd93adab07afd7ff6be2f9667b14ff1 Mon Sep 17 00:00:00 2001 From: Sachin Date: Mon, 15 Jun 2020 13:34:03 +0530 Subject: [PATCH 078/892] [RpmVPackages] Add 'chrony' package verification (#2616) Signed-off-by: Sachin Patil Co-authored-by: Xiangce Liu --- insights/parsers/rpm_v_packages.py | 15 ++++++++++++--- insights/parsers/tests/test_rpm_v_packages.py | 9 +++++++++ insights/specs/default.py | 2 +- insights/specs/insights_archive.py | 2 +- 4 files changed, 23 insertions(+), 5 deletions(-) diff --git a/insights/parsers/rpm_v_packages.py b/insights/parsers/rpm_v_packages.py index 4b5fbc433..4380dbd57 100644 --- a/insights/parsers/rpm_v_packages.py +++ b/insights/parsers/rpm_v_packages.py @@ -1,6 +1,15 @@ """ -RpmVPackages - command ``/bin/rpm -V coreutils procps procps-ng shadow-utils passwd sudo`` -========================================================================================== +RpmVPackages - command ``/bin/rpm -V `` +================================================= + +Below packages are verified: + - coreutils + - procps + - procps-ng + - shadow-utils + - passwd + - sudo + - chrony """ from insights.core import CommandParser @@ -11,7 +20,7 @@ @parser(Specs.rpm_V_packages) class RpmVPackages(CommandParser): """ - Class for parsing ``/bin/rpm -V coreutils procps procps-ng shadow-utils passwd sudo`` command. + Class for parsing ``/bin/rpm -V `` command. Attributes: packages_list (list of dictionaries): every dictionary contains information about one entry diff --git a/insights/parsers/tests/test_rpm_v_packages.py b/insights/parsers/tests/test_rpm_v_packages.py index 233dfbb30..d4cc0de81 100644 --- a/insights/parsers/tests/test_rpm_v_packages.py +++ b/insights/parsers/tests/test_rpm_v_packages.py @@ -13,6 +13,12 @@ missing /var/db/sudo/lectured (Permission denied) """ +TEST_RPM_2 = """ +package procps is not installed +S.5....T. c /etc/sudoers +S.5....T. c /etc/chrony.conf +""" + def test_rpm_empty(): rpm_pkgs = RpmVPackages(context_wrap([])) @@ -38,6 +44,9 @@ def test_rpm(): assert rpm_pkgs.packages_list[3] == line_4 assert rpm_pkgs.packages_list[4] == line_5 + rpm_pkgs_2 = RpmVPackages(context_wrap(TEST_RPM_2)) + assert rpm_pkgs_2.packages_list[2].get('file', None) == '/etc/chrony.conf' + def test_doc_examples(): env = { diff --git a/insights/specs/default.py b/insights/specs/default.py index 6ea97733b..4c7327058 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -854,7 +854,7 @@ def rhev_data_center(broker): rndc_status = simple_command("/usr/sbin/rndc status") root_crontab = simple_command("/usr/bin/crontab -l -u root") route = simple_command("/sbin/route -n") - rpm_V_packages = simple_command("/bin/rpm -V coreutils procps procps-ng shadow-utils passwd sudo", keep_rc=True) + rpm_V_packages = simple_command("/bin/rpm -V coreutils procps procps-ng shadow-utils passwd sudo chrony", keep_rc=True) rsyslog_conf = simple_file("/etc/rsyslog.conf") samba = simple_file("/etc/samba/smb.conf") saphostctrl_listinstances = simple_command("/usr/sap/hostctrl/exe/saphostctrl -function ListInstances") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index fb3f7eab5..3094c67c3 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -243,7 +243,7 @@ class InsightsArchiveSpecs(Specs): root_crontab = simple_file("insights_commands/crontab_-l_-u_root") rndc_status = simple_file("insights_commands/rndc_status") route = simple_file("insights_commands/route_-n") - rpm_V_packages = simple_file("insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo") + rpm_V_packages = first_file("insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo_chrony", "insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo") sapcontrol_getsystemupdatelist = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_sapcontrol_getsystemupdatelist") saphostctl_getcimobject_sapinstance = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostctrl_-function_GetCIMObject_-enuminstances_SAPInstance") saphostexec_status = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostexec_-status") From 0627fbbd300174480fe37ab9a47b79415a91c92b Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Mon, 15 Jun 2020 16:40:50 +0800 Subject: [PATCH 079/892] Add one new signing key (#2626) Signed-off-by: Huanhuan Li Co-authored-by: Xiangce Liu --- insights/parsers/installed_rpms.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/parsers/installed_rpms.py b/insights/parsers/installed_rpms.py index f84bb056e..cd7010026 100644 --- a/insights/parsers/installed_rpms.py +++ b/insights/parsers/installed_rpms.py @@ -339,7 +339,7 @@ class InstalledRpm(object): 'F76F66C3D4082792', '199e2f91fd431d51', '5326810137017186', '45689c882fa658e0', '219180cddb42a60e', '7514f77d8366b0d9', 'fd372689897da07a', '938a80caf21541eb', - '08b871e6a5787476', + '08b871e6a5787476', '1AC4971355A34A82' 'E191DDB2C509E861' ] """ From c21c19ce2befa435717683f296ebd7deebd6bcfa Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Mon, 15 Jun 2020 03:57:57 -0500 Subject: [PATCH 080/892] Add new attributes to dmsetup parser (#2624) * Add new attributes to dmsetup parser * Added new attributes with more detail * Updated tests Signed-off-by: Bob Fahr * Add examples and tests for examples Signed-off-by: Bob Fahr Co-authored-by: Xiangce Liu --- insights/parsers/dmsetup.py | 41 ++++++++++-- insights/parsers/tests/test_dmsetup_info.py | 71 ++++++++++++++++++++- 2 files changed, 106 insertions(+), 6 deletions(-) diff --git a/insights/parsers/dmsetup.py b/insights/parsers/dmsetup.py index 7bd46a5c3..808a80ad5 100644 --- a/insights/parsers/dmsetup.py +++ b/insights/parsers/dmsetup.py @@ -10,12 +10,19 @@ ----------------------------------------- """ - +from collections import namedtuple from insights import parser, CommandParser from insights.parsers import parse_delimited_table from insights.specs import Specs +SetupInfo = namedtuple('SetupInfo', [ + 'name', 'major', 'minor', 'open', 'segments', 'events', + 'live_table', 'inactive_table', 'suspended', 'readonly', 'uuid'] +) +""" Data structure to represent dmsetup information """ + + @parser(Specs.dmsetup_info) class DmsetupInfo(CommandParser): """ @@ -52,16 +59,25 @@ class DmsetupInfo(CommandParser): uuids (list): UUID by_name (dict): Access to each device by devicename by_uuid (dict): Access to each device by uuid + info (list): List of devices found, in order using SetupInfo structure Example: - >>> len(info) + >>> len(setup_info) 6 - >>> info.names[0] + >>> setup_info.names[0] 'VG00-tmp' - >>> info[1]['Maj'] + >>> setup_info[1]['Maj'] '253' - >>> info[1]['Stat'] + >>> setup_info[1]['Stat'] 'L--w' + >>> setup_info.info[-1].name + 'VG00-var_log_audit' + >>> setup_info.info[-1].major + 253 + >>> setup_info.info[-1].live_table + True + >>> setup_info.info[-1].readonly + False """ def parse_content(self, content): @@ -71,6 +87,21 @@ def parse_content(self, content): self.by_name = dict((dm['Name'], dm) for dm in self.data if 'Name' in dm) self.uuids = [dm['UUID'] for dm in self.data if 'UUID' in dm] self.by_uuid = dict((dm['UUID'], dm) for dm in self.data if 'UUID' in dm) + self.info = [] + for dm in self.data: + self.info.append(SetupInfo( + name=dm.get('Name'), + major=int(dm.get('Maj')) if 'Maj' in dm and dm.get('Maj').isdigit() else None, + minor=int(dm.get('Min')) if 'Min' in dm and dm.get('Min').isdigit() else None, + open=int(dm.get('Open')) if 'Open' in dm and dm.get('Open').isdigit() else None, + segments=int(dm.get('Targ')) if 'Targ' in dm and dm.get('Targ').isdigit() else None, + events=int(dm.get('Event')) if 'Event' in dm and dm.get('Event').isdigit() else None, + live_table=dm.get('Stat', '----')[0] == 'L', + inactive_table=dm.get('Stat', '----')[1] == 'I', + suspended=dm.get('Stat', '----')[2] == 's', + readonly=dm.get('Stat', '----')[3] == 'r', + uuid=dm.get('UUID') + )) def __len__(self): """ diff --git a/insights/parsers/tests/test_dmsetup_info.py b/insights/parsers/tests/test_dmsetup_info.py index 73e0737c8..70f3b2968 100644 --- a/insights/parsers/tests/test_dmsetup_info.py +++ b/insights/parsers/tests/test_dmsetup_info.py @@ -1,4 +1,6 @@ -from insights.parsers.dmsetup import DmsetupInfo +import doctest +from insights.parsers import dmsetup +from insights.parsers.dmsetup import DmsetupInfo, SetupInfo from insights.tests import context_wrap DMSETUP_INFO_1 = """ @@ -16,6 +18,11 @@ VG00-opt 253 7 L--w 1 4 0 LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTaxIiCYm5hcvgQdXynPGBfHQLrtE3sqUKT2 """.strip() +DMSETUP_INFO_2 = """ +Name Maj Min Stat Open Targ Event UUID +VG00-tmp 253 8 xIsr 1 1 a LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTax6lLmBji2ueSbX49gxcV76M29cmukQiw4 +""".strip() + def test_dmsetup_info(): r = DmsetupInfo(context_wrap(DMSETUP_INFO_1)) @@ -28,3 +35,65 @@ def test_dmsetup_info(): 'VG00-var_log_audit', 'VG_DB-vol01', 'VG00-opt'] assert r.names == [dm['Name'] for dm in r] assert len(r.by_uuid) == 11 + assert r.info[0] == SetupInfo( + name='VG00-tmp', + major=253, + minor=8, + open=1, + segments=1, + events=0, + live_table=True, + inactive_table=False, + suspended=False, + readonly=False, + uuid='LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTax6lLmBji2ueSbX49gxcV76M29cmukQiw4' + ) + assert r.info[-1] == SetupInfo( + name='VG00-opt', + major=253, + minor=7, + open=1, + segments=4, + events=0, + live_table=True, + inactive_table=False, + suspended=False, + readonly=False, + uuid='LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTaxIiCYm5hcvgQdXynPGBfHQLrtE3sqUKT2' + ) + + +def test_dmsetup_setupinfo(): + r = DmsetupInfo(context_wrap(DMSETUP_INFO_2)) + assert r.info[0] == SetupInfo( + name='VG00-tmp', + major=253, + minor=8, + open=1, + segments=1, + events=None, + live_table=False, + inactive_table=True, + suspended=True, + readonly=True, + uuid='LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTax6lLmBji2ueSbX49gxcV76M29cmukQiw4' + ) + + +DMSETUP_EXAMPLES = """ +Name Maj Min Stat Open Targ Event UUID +VG00-tmp 253 8 L--w 1 1 0 LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTax6lLmBji2ueSbX49gxcV76M29cmukQiw4 +VG00-home 253 3 L--w 1 1 0 LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTaxCqXOnbGe2zjhX923dFiIdl1oi7mO9tXp +VG00-var 253 6 L--w 1 2 0 LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTaxicvyvt67113nTb8vMlGfgdEjDx0LKT2O +VG00-swap 253 1 L--w 2 1 0 LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTax3Ll2XhOYZkylx1CjOQi7G4yHgrIOsyqG +VG00-root 253 0 L--w 1 1 0 LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTaxKpnAKYhrYMYMNMwjegkW965bUgtJFTRY +VG00-var_log_audit 253 5 L--w 1 1 0 LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTaxwQ8R0XWJRm86QX3befq1cHRy47Von6ZW +""".strip() + + +def test_examples(): + env = { + 'setup_info': DmsetupInfo(context_wrap(DMSETUP_EXAMPLES)) + } + failed, total = doctest.testmod(dmsetup, globs=env) + assert failed == 0 From 69289db255d84da4ea292fdb4de1e972dade3330 Mon Sep 17 00:00:00 2001 From: Rohan Arora Date: Wed, 17 Jun 2020 10:31:04 +0530 Subject: [PATCH 081/892] Fix context checking in create_context_fix (#2628) After archive is extracted, if top level dirs end with string which is a valid compression type, current logic determines context as ClusterArchiveContext. This change double checks if path is a file along with ending with a compression extension. Signed-off-by: Rohan Arora --- insights/core/hydration.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/insights/core/hydration.py b/insights/core/hydration.py index 61ff4e568..da24043e6 100644 --- a/insights/core/hydration.py +++ b/insights/core/hydration.py @@ -41,7 +41,9 @@ def identify(files): def create_context(path, context=None): top = os.listdir(path) - arc = [os.path.join(path, f) for f in top if f.endswith(archives.COMPRESSION_TYPES)] + arc = [os.path.join(path, f) for f in top + if f.endswith(archives.COMPRESSION_TYPES) and + os.path.isfile(os.path.join(path, f))] if arc: return ClusterArchiveContext(path, all_files=arc) From 56bd9178208d500297fc6ba05aa6341c8482c821 Mon Sep 17 00:00:00 2001 From: Rohan Arora Date: Thu, 18 Jun 2020 03:17:18 +0530 Subject: [PATCH 082/892] Disk usage parser (#2584) * Add DiskUsage Parser and Combiner Signed-off-by: Rohan Arora * Correct Typo Signed-off-by: Rohan Arora * Correct doc syntax Signed-off-by: Rohan Arora * Add doctest for DiskUsage Parser Signed-off-by: Rohan Arora * Changes during review Signed-off-by: Rohan Arora * Documentation Changes Signed-off-by: Rohan Arora --- docs/shared_combiners_catalog/du.rst | 3 + docs/shared_parsers_catalog/du.rst | 3 + insights/combiners/du.py | 43 ++++++++ insights/combiners/tests/test_du.py | 30 +++++ insights/parsers/du.py | 95 ++++++++++++++++ insights/parsers/tests/test_du.py | 157 +++++++++++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 9 files changed, 334 insertions(+) create mode 100644 docs/shared_combiners_catalog/du.rst create mode 100644 docs/shared_parsers_catalog/du.rst create mode 100644 insights/combiners/du.py create mode 100644 insights/combiners/tests/test_du.py create mode 100644 insights/parsers/du.py create mode 100644 insights/parsers/tests/test_du.py diff --git a/docs/shared_combiners_catalog/du.rst b/docs/shared_combiners_catalog/du.rst new file mode 100644 index 000000000..2e9385cd7 --- /dev/null +++ b/docs/shared_combiners_catalog/du.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.combiners.du + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/du.rst b/docs/shared_parsers_catalog/du.rst new file mode 100644 index 000000000..effa0c696 --- /dev/null +++ b/docs/shared_parsers_catalog/du.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.du + :members: + :show-inheritance: diff --git a/insights/combiners/du.py b/insights/combiners/du.py new file mode 100644 index 000000000..f6c8d7437 --- /dev/null +++ b/insights/combiners/du.py @@ -0,0 +1,43 @@ +""" +Disk Usage +========== + +Combiners for gathering information from du parsers. +""" + +from insights import combiner +from insights.parsers.du import DiskUsageDir + + +@combiner(DiskUsageDir) +class DiskUsageDirs(dict): + """ + Combiner for the :class:`insights.parsers.du.DiskUsageDir` parser. + + The parser is multioutput, one parser instance for each directory disk + usage. This combiner puts all of them back together and presents them as a + dict where the keys are the directory names and the space usage are the + values. + + Sample input data for du commands as parsed by the parsers:: + + # Output of the command: + # /bin/du -s -k /var/log + 553500 /var/log + + # Output of the command: + # /bin/du -s -k /var/lib/pgsql + 519228 /var/lib/pgsql + + Examples: + >>> type(disk_usage_dirs) + + >>> sorted(disk_usage_dirs.keys()) + ['/var/lib/pgsql', '/var/log'] + >>> disk_usage_dirs['/var/lib/pgsql'] + 519228 + """ + def __init__(self, du_dirs): + super(DiskUsageDirs, self).__init__() + for du in du_dirs: + self.update(du) diff --git a/insights/combiners/tests/test_du.py b/insights/combiners/tests/test_du.py new file mode 100644 index 000000000..53599a7b6 --- /dev/null +++ b/insights/combiners/tests/test_du.py @@ -0,0 +1,30 @@ +import doctest +from insights.tests import context_wrap +from insights.combiners import du +from insights.parsers.du import DiskUsageDir + +DISK_USAGE_DIR_SAMPLE1 = """ +553500 /var/log +""".strip() + +DISK_USAGE_DIR_SAMPLE2 = """ +519228 /var/lib/pgsql +""".strip() + + +def test_disk_usage_dirs(): + parser1 = DiskUsageDir(context_wrap(DISK_USAGE_DIR_SAMPLE1)) + parser2 = DiskUsageDir(context_wrap(DISK_USAGE_DIR_SAMPLE2)) + disk_usage_dirs = du.DiskUsageDirs([parser1, parser2]) + assert disk_usage_dirs is not None + assert set(disk_usage_dirs.keys()) == set(["/var/log", "/var/lib/pgsql"]) + assert disk_usage_dirs["/var/log"] == 553500 + assert disk_usage_dirs["/var/lib/pgsql"] == 519228 + + +def test_disk_usage_dirs_docs(): + parser1 = DiskUsageDir(context_wrap(DISK_USAGE_DIR_SAMPLE1)) + parser2 = DiskUsageDir(context_wrap(DISK_USAGE_DIR_SAMPLE2)) + env = {'disk_usage_dirs': du.DiskUsageDirs([parser1, parser2])} + failed, total = doctest.testmod(du, globs=env) + assert failed == 0 diff --git a/insights/parsers/du.py b/insights/parsers/du.py new file mode 100644 index 000000000..944c584a3 --- /dev/null +++ b/insights/parsers/du.py @@ -0,0 +1,95 @@ +""" +Disk Usage parsers +================== + +Module for the processing of output from the ``du`` command. + +Parsers provided by this module include: + +DiskUsageDir - command ``du -s -k {directory}`` +----------------------------------------------- + +""" +from insights import parser, CommandParser +from insights.parsers import ParseException, SkipException +from insights.specs import Specs + + +class DiskUsage(CommandParser, dict): + """ + Reads output of du command and turns it into a dictionary with pathname as + key and size in blocks. + + Supports parsing input data as long as output is 2 column data with first + column as space size with integer values only and second as pathname which + can be a file or directory. Space size with decimal values or unit suffixes + like M, GiB is not supported. + + du command produces output in 1K blocks unless block size is specified in + command options or an environment variable. This parser is intended to be + used only with default block size of 1K which is also equal to plain "du" + or "du -k". + + Sample input data:: + + 56 /var/lib/alternatives + 4 /var/lib/logrotate + 5492 /var/lib/mlocate + 20 /var/lib/NetworkManager + 186484 /var/lib/pgsql + 856 /var/lib/rhsm + 110712 /var/lib/rpm + 4 /var/lib/rsyslog + 64 /var/lib/systemd + 15200 /var/lib/yum + + Examples: + >>> '/var/lib/pgsql' in disk_usage + True + >>> disk_usage.get('/var/lib/pgsql') + 186484 + >>> int(disk_usage.get('/var/lib/pgsql') / 1024) # to MiB + 182 + + Raises: + SkipException: When no data could be parsed. + ParseException: Raised when any problem parsing the command output. + """ + + def parse_content(self, content): + """ + Parse input data into a dictionary. + """ + # For errors like : + # /bin/du: cannot read directory '/somepath' + # /bin/du: cannot access `/somepath': No such file or directory + du_error = 'bin/du: ' + + for line in content: + if du_error in line: + continue + + line_split = line.split(None, 1) + if len(line_split) != 2: + raise ParseException("Could not parse line: {0}".format(line)) + size, path = line_split + path = path.rstrip() + if path.startswith('.'): + raise ParseException("Relative paths not supported: {0}'". + format(line)) + if path and size.isdigit(): + self[path] = int(size) + else: + raise ParseException("Could not parse line: '{0}'". + format(line)) + if len(self) == 0: + raise SkipException('No data parsed') + + +@parser(Specs.du_dirs) +class DiskUsageDir(DiskUsage): + """ + Parser class for processing du output for multiple directories, each + collected using ``du -s -k {directory}``. + """ + pass diff --git a/insights/parsers/tests/test_du.py b/insights/parsers/tests/test_du.py new file mode 100644 index 000000000..68e529b1b --- /dev/null +++ b/insights/parsers/tests/test_du.py @@ -0,0 +1,157 @@ +import pytest +import doctest + +from insights.parsers import du +from insights.parsers.du import DiskUsage +from insights.tests import context_wrap +from insights.parsers import ParseException, SkipException +from insights.core.plugins import ContentException + +# du -s /var/lib/pgsql +DU_VAR_LIB = """ +186724 /var/lib/pgsql +""".strip() + +# du -s /var/lib/* +DU_VAR_LIB_STAR = """ +56 /var/lib/alternatives +4 /var/lib/logrotate +5492 /var/lib/mlocate +20 /var/lib/NetworkManager +186484 /var/lib/pgsql +856 /var/lib/rhsm +110712 /var/lib/rpm +4 /var/lib/rsyslog +64 /var/lib/systemd +15200 /var/lib/yum +""".strip() + +# du -sh / containing only error lines +DU_ACCESS_ERROR = """ +/bin/du: cannot access '/proc/17405/task/17405/fd/4': No such file or directory +/bin/du: cannot access '/proc/17405/task/17405/fdinfo/4': No such file or directory +""".strip() + +# du -s / containing error lines and a valid value +DU_ACCESS_ERROR_OKAY = """ +/bin/du: cannot access '/proc/17405/task/17405/fd/4': No such file or directory +5904560 / +""".strip() + +# Valid output with spaces in path +DU_SPACES = """ +102400 /mnt/abc xyz +""".strip() + +# du -a inside a directory +DU_RELATIVE = """ +1652 ./messages +1652 . +""" + +# du --blocks=4K , 4k blocks but no suffix unit +DU_4K_VAR_LOG = """ +3 /var/log/tuned +3969 /var/log/rhsm +85 /var/log/httpd +3 /var/log/squid +""".strip() + +# du -sh / containing only one error line +# CommandParser will throw ContentException as +# this contains single line with "No such file or directory" +DU_ACCESS_ERROR_SINGLE = """ +/bin/du: cannot access '/proc/17405/task/17405/fd/4': No such file or directory +""".strip() + +# 1M Blocks, Parser only supports integer values without unit suffix +DU_M_VAR_LOG = """ +1M /var/log/tuned +144M /var/log/rhsm +26M /var/log/httpd +5M /var/log/squid +""".strip() + +# du -h /var/log throwing exception +DU_H = """ +12K /var/log/tuned +144M /var/log/rhsm +26M /var/log/httpd +4.2M /var/log/squid +""".strip() + +DU_INVALID_1 = """ +20 /var/lib/NetworkManager +110712 +""".strip() + +DU_INVALID_2 = """ +2A /var/lib/NetworkManager +""".strip() + + +def test_du(): + du = DiskUsage(context_wrap(DU_VAR_LIB)) + assert len(du) == 1 + assert '/var/lib/pgsql' in du + assert du.get('/var/lib/pgsql') == 186724 + assert '/var/some/fake' not in du + assert du.get('/Fake') is None + assert du == {'/var/lib/pgsql': 186724} + + du = DiskUsage(context_wrap(DU_VAR_LIB_STAR)) + assert len(du) == 10 + assert '/var/lib/rpm' in du + assert du.get('/var/lib/rpm') == 110712 + assert du == {'/var/lib/alternatives': 56, '/var/lib/logrotate': 4, '/var/lib/mlocate': 5492, '/var/lib/NetworkManager': 20, '/var/lib/pgsql': 186484, '/var/lib/rhsm': 856, '/var/lib/rpm': 110712, '/var/lib/rsyslog': 4, '/var/lib/systemd': 64, '/var/lib/yum': 15200} + + du = DiskUsage(context_wrap(DU_ACCESS_ERROR_OKAY)) + assert len(du) == 1 + assert '/' in du + assert du.get('/') == 5904560 + + du = DiskUsage(context_wrap(DU_SPACES)) + assert du.get('/mnt/abc xyz') == 102400 + assert du == {'/mnt/abc xyz': 102400} + + du = DiskUsage(context_wrap(DU_4K_VAR_LOG)) + assert len(du) == 4 + assert '/var/log/httpd' in du + assert du.get('/var/log/httpd') == 85 + + +def test_du_bad(): + + with pytest.raises(SkipException) as exc: + DiskUsage(context_wrap(DU_ACCESS_ERROR)) + assert 'No data parsed' in str(exc) + + with pytest.raises(ParseException) as exc: + DiskUsage(context_wrap(DU_RELATIVE)) + assert 'Relative paths not supported' in str(exc) + + with pytest.raises(ContentException) as exc: + DiskUsage(context_wrap(DU_ACCESS_ERROR_SINGLE)) + assert 'No such file or directory' in str(exc) + + with pytest.raises(ParseException) as exc: + DiskUsage(context_wrap(DU_M_VAR_LOG)) + assert 'Could not parse line' in str(exc) + + with pytest.raises(ParseException) as exc: + DiskUsage(context_wrap(DU_H)) + assert 'Could not parse line' in str(exc) + + with pytest.raises(ParseException) as exc: + DiskUsage(context_wrap(DU_INVALID_1)) + assert 'Could not parse line' in str(exc) + + with pytest.raises(ParseException) as exc: + DiskUsage(context_wrap(DU_INVALID_2)) + assert 'Could not parse line' in str(exc) + + +def test_du_doc_examples(): + env = {'disk_usage': DiskUsage(context_wrap(DU_VAR_LIB_STAR))} + failed, total = doctest.testmod(du, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 4519135af..c2d40a76a 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -137,6 +137,7 @@ class Specs(SpecSet): docker_storage_setup = RegistryPoint() docker_sysconfig = RegistryPoint() dracut_kdump_capture_service = RegistryPoint() + du_dirs = RegistryPoint(multi_output=True) dumpe2fs_h = RegistryPoint(multi_output=True) engine_config_all = RegistryPoint() engine_log = RegistryPoint(filterable=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index 4c7327058..2e3617c1a 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -307,6 +307,7 @@ def dumpdev(broker): raise SkipComponent() dracut_kdump_capture_service = simple_file("/usr/lib/dracut/modules.d/99kdumpbase/kdump-capture.service") + du_dirs = foreach_execute(['/var/lib/candlepin/activemq-artemis'], "/bin/du -s -k %s") dumpe2fs_h = foreach_execute(dumpdev, "/sbin/dumpe2fs -h %s") engine_config_all = simple_command("/usr/bin/engine-config --all") engine_log = simple_file("/var/log/ovirt-engine/engine.log") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 3094c67c3..4542a143b 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -55,6 +55,7 @@ class InsightsArchiveSpecs(Specs): docker_info = simple_file("insights_commands/docker_info") docker_list_containers = simple_file("insights_commands/docker_ps_--all_--no-trunc") docker_list_images = simple_file("insights_commands/docker_images_--all_--no-trunc_--digests") + du_dirs = glob_file("insights_commands/du_-s_-k_*") engine_config_all = simple_file("insights_commands/engine-config_--all") ethtool = glob_file("insights_commands/ethtool_*", ignore="ethtool_-.*") ethtool_S = glob_file("insights_commands/ethtool_-S_*") From 98a66fee9e6f6098525922c6e5f39fa641af7a2f Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Wed, 17 Jun 2020 17:23:26 -0500 Subject: [PATCH 083/892] Include the 'q' query helper in make_rule. (#2633) Signed-off-by: Christopher Sams --- insights/shell.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/shell.py b/insights/shell.py index acad2a5db..dc9504e25 100644 --- a/insights/shell.py +++ b/insights/shell.py @@ -345,6 +345,7 @@ def make_rule(self, path=None, overwrite=False, pick=None): imports = [ "from insights import rule, make_fail, make_info, make_pass # noqa", "from insights.parsr.query import * # noqa", + "from insights.parsr.query import make_child_query as q # noqa", "", ] From 69be992fcc6496610e949ce8815640c2a7e69d12 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Thu, 18 Jun 2020 08:30:09 -0500 Subject: [PATCH 084/892] Fix saving cache instance. (#2632) The class level variable was __cache, but the instance was being saved to .cache, so it wasn't surviving. Signed-off-by: Christopher Sams --- insights/core/remote_resource.py | 14 +++++++------- insights/tests/test_remote_resource.py | 12 ++++++++++++ 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/insights/core/remote_resource.py b/insights/core/remote_resource.py index 4a168dbd4..9e0d53d3c 100644 --- a/insights/core/remote_resource.py +++ b/insights/core/remote_resource.py @@ -4,6 +4,7 @@ import calendar from cachecontrol.heuristics import BaseHeuristic from cachecontrol.wrapper import CacheControl +from cachecontrol.cache import DictCache from cachecontrol.caches.file_cache import FileCache from datetime import datetime, timedelta @@ -70,26 +71,25 @@ class CachedRemoteResource(RemoteResource): redis_host = 'localhost' """ str: Hostname of redis instance if `RedisCache` backend is specified """ __heuristic = 'DefaultHeuristic' - __cache = None + _cache = None file_cache_path = '.web_cache' """ str: Path to where file cache will be stored if `FileCache` backend is specified """ def __init__(self): session = requests.Session() - hclass = globals()[self.__heuristic] - if not self.__class__.__cache: + if not self.__class__._cache: if self.backend == "RedisCache": pool = redis.ConnectionPool(host=self.redis_host, port=self.redis_port, db=0) r = redis.Redis(connection_pool=pool) - self.__class__.cache = RedisCache(r) + self.__class__._cache = RedisCache(r) elif self.backend == "FileCache": - self.__class__.cache = FileCache(self.file_cache_path) + self.__class__._cache = FileCache(self.file_cache_path) else: - self.__class__.cache = None + self.__class__._cache = DictCache() - session = CacheControl(session, heuristic=hclass(self.expire_after), cache=self.__class__.cache) + session = CacheControl(session, heuristic=DefaultHeuristic(self.expire_after), cache=self.__class__._cache) super(CachedRemoteResource, self).__init__(session) diff --git a/insights/tests/test_remote_resource.py b/insights/tests/test_remote_resource.py index af1d280c8..ae66eb26c 100644 --- a/insights/tests/test_remote_resource.py +++ b/insights/tests/test_remote_resource.py @@ -1,3 +1,4 @@ +from cachecontrol.cache import DictCache from insights.core.remote_resource import RemoteResource, CachedRemoteResource from insights.tests.mock_web_server import TestMockServer import sys @@ -52,3 +53,14 @@ def test_get_cached_remote_resource_not_found(self): url = 'http://localhost:{port}/moc/'.format(port=self.server_port) rtn = crr.get(url) assert rtn.content == NOT_FOUND + + def test_save_dict_cache(self): + crr = CachedRemoteResource() + assert crr._cache is not None + assert isinstance(crr._cache, DictCache) + + crr2 = CachedRemoteResource() + assert crr2._cache is not None + assert isinstance(crr2._cache, DictCache) + + assert crr._cache is crr2._cache From 4825dbbf1d77e6e6aca60500bbb1ba7128b8ba64 Mon Sep 17 00:00:00 2001 From: Jesse Jaggars Date: Thu, 18 Jun 2020 09:55:26 -0400 Subject: [PATCH 085/892] This PR adds a new lvm parser that depends on the lvmconfig command. (#2575) * adding lvmconfig Signed-off-by: Jesse Jaggars * first pass at a parser Signed-off-by: Jesse Jaggars * crutching on json to load the data Signed-off-by: Jesse Jaggars * updating tests Signed-off-by: Jesse Jaggars * inferring warnings via whitespace, rather than hardcoding a single line Signed-off-by: Jesse Jaggars * moving lvm test data to it's own file so I can ignore flake8 errors about its formatting Signed-off-by: Jesse Jaggars * adding spec to archive specs Signed-off-by: Jesse Jaggars --- .flake8 | 2 +- insights/parsers/lvm.py | 167 +++++++++++++----- insights/parsers/tests/lvm_test_data.py | 224 ++++++++++++++++++++++++ insights/parsers/tests/test_lvm.py | 8 + insights/specs/__init__.py | 1 + insights/specs/default.py | 4 + insights/specs/insights_archive.py | 4 + 7 files changed, 363 insertions(+), 47 deletions(-) create mode 100644 insights/parsers/tests/lvm_test_data.py diff --git a/.flake8 b/.flake8 index 54a83cb32..e42cbab6a 100644 --- a/.flake8 +++ b/.flake8 @@ -1,3 +1,3 @@ [flake8] ignore = E501,E126,E127,E128,E722,E741 -exclude = insights/contrib,bin,docs,include,lib,lib64,.git,.collections.py +exclude = insights/contrib,bin,docs,include,lib,lib64,.git,.collections.py,insights/parsers/tests/lvm_test_data.py diff --git a/insights/parsers/lvm.py b/insights/parsers/lvm.py index d99483f75..c3e01ca3a 100644 --- a/insights/parsers/lvm.py +++ b/insights/parsers/lvm.py @@ -32,13 +32,22 @@ from __future__ import print_function import json -from ..util import parse_keypair_lines -from .. import add_filter -from .. import Parser, parser, get_active_lines, LegacyItemAccess, CommandParser -from . import parse_fixed_table +from collections import defaultdict + from insights.parsers import ParseException from insights.specs import Specs +from .. import ( + CommandParser, + LegacyItemAccess, + Parser, + add_filter, + get_active_lines, + parser, +) +from ..util import parse_keypair_lines +from . import parse_fixed_table + def map_keys(pvs, keys): """ @@ -57,23 +66,47 @@ def map_keys(pvs, keys): def find_warnings(content): """Look for lines containing warning/error/info strings instead of data.""" - keywords = [k.lower() for k in [ - "WARNING", "Couldn't find device", "Configuration setting", - "read failed", "Was device resized?", "Invalid argument", - "leaked on lvs", "Checksum error", "is exported", "failed.", - "Invalid metadata", "response failed", "unknown device", - "duplicate", "not found", "Missing device", "Internal error", - "Input/output error", "Incorrect metadata", "Cannot process volume", - "No such file or directory", "Logging initialised", "changed sizes", - "vsnprintf failed", "write failed", "correction failed", - "Failed to write", "Couldn't read", "marked missing", - "Attempt to close device", "Ignoring supplied major", - "not match metadata" - ]] + keywords = [ + k.lower() + for k in [ + "WARNING", + "Couldn't find device", + "Configuration setting", + "read failed", + "Was device resized?", + "Invalid argument", + "leaked on lvs", + "Checksum error", + "is exported", + "failed.", + "Invalid metadata", + "response failed", + "unknown device", + "duplicate", + "not found", + "Missing device", + "Internal error", + "Input/output error", + "Incorrect metadata", + "Cannot process volume", + "No such file or directory", + "Logging initialised", + "changed sizes", + "vsnprintf failed", + "write failed", + "correction failed", + "Failed to write", + "Couldn't read", + "marked missing", + "Attempt to close device", + "Ignoring supplied major", + "not match metadata", + ] + ] for l in content: lower = l.strip().lower() # Avoid hitting keywords inside the data - if not lower.startswith('lvm2'): + if not lower.startswith("lvm2"): if any(k in lower for k in keywords): yield l @@ -110,8 +143,8 @@ def __len__(self): def __getitem__(self, key): if isinstance(key, int): - return self.data['content'][key] - for i in self.data['content']: + return self.data["content"][key] + for i in self.data["content"]: if i[self.PRIMARY_KEY] == key: return i return None @@ -161,6 +194,7 @@ class Pvs(Lvm): unique key for each PV is created by joining the `PV_NAME and PV_UUID fields with a `+` character. This key is added to the dictionary as the `PV_KEY` field. """ + KEYS = { "LVM2_PV_MDA_USED_COUNT": "#PMdaUse", "LVM2_PV_UUID": "PV_UUID", @@ -183,17 +217,17 @@ class Pvs(Lvm): "LVM2_PV_FREE": "PFree", "LVM2_PV_ALLOCATABLE": "Allocatable", "LVM2_PV_MDA_SIZE": "PMdaSize", - "LVM2_PV_MISSING": "Missing" + "LVM2_PV_MISSING": "Missing", } PRIMARY_KEY = "PV" def parse_content(self, content): super(Pvs, self).parse_content(content) - for pv in self.data['content']: - pv_name = pv.get('PV') if pv.get('PV') is not None else 'no_name' - pv_uuid = pv.get('PV_UUID') if pv.get('PV_UUID') is not None else 'no_uuid' - pv.update({'PV_KEY': '+'.join([pv_name, pv_uuid])}) + for pv in self.data["content"]: + pv_name = pv.get("PV") if pv.get("PV") is not None else "no_name" + pv_uuid = pv.get("PV_UUID") if pv.get("PV_UUID") is not None else "no_uuid" + pv.update({"PV_KEY": "+".join([pv_name, pv_uuid])}) def vg(self, name): """Return all physical volumes assigned to the given volume group""" @@ -207,6 +241,7 @@ class PvsAll(Pvs): Uses the ``Pvs`` class defined in this module. """ + pass @@ -262,19 +297,21 @@ class PvsHeadings(LvmHeadings): '/dev/fedora/home' """ + PRIMARY_KEY = Pvs.PRIMARY_KEY def parse_content(self, content): - self.data = parse_fixed_table(content, - heading_ignore=['PV '], - header_substitute=[('PV UUID', 'PV_UUID'), - ('1st PE', '1st_PE')], - trailing_ignore=['Reloading', 'Wiping']) + self.data = parse_fixed_table( + content, + heading_ignore=["PV "], + header_substitute=[("PV UUID", "PV_UUID"), ("1st PE", "1st_PE")], + trailing_ignore=["Reloading", "Wiping"], + ) self.data = map_keys(self.data, Pvs.KEYS) for pv in self.data: - pv_name = pv.get('PV') if pv.get('PV') is not None else 'no_name' - pv_uuid = pv.get('PV_UUID') if pv.get('PV_UUID') is not None else 'no_uuid' - pv.update({'PV_KEY': '+'.join([pv_name, pv_uuid])}) + pv_name = pv.get("PV") if pv.get("PV") is not None else "no_name" + pv_uuid = pv.get("PV_UUID") if pv.get("PV_UUID") is not None else "no_uuid" + pv.update({"PV_KEY": "+".join([pv_name, pv_uuid])}) def vg(self, name): """Return all physical volumes assigned to the given volume group""" @@ -309,6 +346,7 @@ class Vgs(Lvm): } ] """ + KEYS = { "LVM2_VG_EXTENDABLE": "Extendable", "LVM2_VG_EXTENT_SIZE": "Ext", @@ -341,7 +379,7 @@ class Vgs(Lvm): "LVM2_VG_CLUSTERED": "Clustered", "LVM2_VG_LOCKARGS": "Lock Args", "LVM2_MAX_LV": "MaxLV", - "LVM2_VG_SIZE": "VSize" + "LVM2_VG_SIZE": "VSize", } PRIMARY_KEY = "VG" @@ -354,6 +392,7 @@ class VgsAll(Vgs): Uses the ``Vgs`` class defined in this module. """ + pass @@ -388,14 +427,16 @@ class VgsHeadings(LvmHeadings): >>> vgs_info.data[2]['LSize'] '2.00g' """ + PRIMARY_KEY = Vgs.PRIMARY_KEY def parse_content(self, content): - self.data = parse_fixed_table(content, - heading_ignore=['VG '], - header_substitute=[('VG Tags', 'VG_Tags'), - ('VG UUID', 'VG_UUID')], - trailing_ignore=['Reloading', 'Wiping']) + self.data = parse_fixed_table( + content, + heading_ignore=["VG "], + header_substitute=[("VG Tags", "VG_Tags"), ("VG UUID", "VG_UUID")], + trailing_ignore=["Reloading", "Wiping"], + ) self.data = map_keys(self.data, Vgs.KEYS) @@ -429,6 +470,7 @@ class Lvs(Lvm): } ] """ + KEYS = { "LVM2_POOL_LV_UUID": "Pool_UUID", "LVM2_LV_PARENT": "Parent", @@ -513,7 +555,7 @@ class Lvs(Lvm): "LVM2_LV_DESCENDANTS": "Descendants", "LVM2_REGION_SIZE": "Region", "LVM2_SEGTYPE": "SegType", - "LVM2_SEG_MONITOR": "Monitor" + "LVM2_SEG_MONITOR": "Monitor", } PRIMARY_KEY = "LV" @@ -540,6 +582,7 @@ class LvsAll(Lvs): Uses the ``Lvs`` class defined in this module. """ + pass @@ -575,12 +618,13 @@ class LvsHeadings(LvmHeadings): >>> lvs_info.data[2]['LSize'] '2.00g' """ + PRIMARY_KEY = Lvs.PRIMARY_KEY def parse_content(self, content): - self.data = parse_fixed_table(content, - heading_ignore=['LV '], - header_substitute=[('LV Tags', 'LV_Tags')]) + self.data = parse_fixed_table( + content, heading_ignore=["LV "], header_substitute=[("LV Tags", "LV_Tags")] + ) self.data = map_keys(self.data, Lvs.KEYS) @@ -592,7 +636,7 @@ def parse_content(self, content): LVM_CONF_FILTERS = [ "locking_type", # CMIRROR_PERF_ISSUE "filter", # LVM_CONF_REMOVE_BOOTDEV HA_LVM_RELOCATE_ISSUE LVM_FILTER_ISSUE - "volume_list" # HA_LVM_RELOCATE_ISSUE + "volume_list", # HA_LVM_RELOCATE_ISSUE ] add_filter(Specs.lvm_conf, LVM_CONF_FILTERS) @@ -635,7 +679,7 @@ def parse_content(self, content): lvm_conf_dict = {} for line in get_active_lines(content): if "=" in line: - (key, value) = [item.strip() for item in line.split('=', 1)] + (key, value) = [item.strip() for item in line.split("=", 1)] try: lvm_conf_dict[key] = json.loads(value) except Exception: @@ -643,6 +687,35 @@ def parse_content(self, content): self.data = lvm_conf_dict +def _lvm_render(o): + if isinstance(o, dict): + parts = ['"%s": %s' % (k, _lvm_render(v)) for k, v in o.items()] + return "{%s}" % ",".join(parts) + return "%s" % o + + +@parser(Specs.lvmconfig) +class LvmConfig(CommandParser): + def parse_content(self, content): + dd = defaultdict(dict) + key = None + for line in content: + line = line.rstrip() + if not line: + continue + + if line[-1] == "{": + key = line.split()[0] + elif line[0] == "}": + key = None + elif line[0] == "\t": + k, v = line.strip().split("=", 1) + dd[key][k] = v + else: + pass # inferring this a stderr, so skipping + self.data = json.loads(_lvm_render(dict(dd))) + + if __name__ == "__main__": # This is a quick script to generate the key mappings in each subclass. # Run each lvm command with --separator="|", --nameprefixes and *not* --noheadings @@ -652,6 +725,8 @@ def parse_content(self, content): content = sys.stdin.read().splitlines() headers = [h.strip().replace(" ", "_") for h in content[0].split("|")] - nameprefixes = [v.split("=")[0].strip() for v in content[1].replace("0 ", "0").split("|")] + nameprefixes = [ + v.split("=")[0].strip() for v in content[1].replace("0 ", "0").split("|") + ] pairs = zip(nameprefixes, headers) print(json.dumps(OrderedDict(sorted(pairs)))) diff --git a/insights/parsers/tests/lvm_test_data.py b/insights/parsers/tests/lvm_test_data.py new file mode 100644 index 000000000..7baaed234 --- /dev/null +++ b/insights/parsers/tests/lvm_test_data.py @@ -0,0 +1,224 @@ +LVMCONFIG = """ + WARNING: Running as a non-root user. Functionality may be unavailable. +devices { + filter=["a|.*/|"] + allow_changes_with_duplicate_pvs=0 + issue_discards=0 + pv_min_size=2048 + require_restorefile_with_uuid=1 + disable_after_error_count=0 + ignore_lvm_mirrors=1 + ignore_suspended_devices=0 + data_alignment_offset_detection=1 + data_alignment=0 + data_alignment_detection=1 + md_chunk_alignment=1 + fw_raid_component_detection=0 + md_component_detection=1 + multipath_component_detection=1 + sysfs_scan=1 + write_cache_state=1 + cache_file_prefix="" + cache_dir="/etc/lvm/cache" + preferred_names=["^/dev/mpath/","^/dev/mapper/mpath","^/dev/[hs]d"] + external_device_info_source="none" + obtain_device_list_from_udev=1 + scan="/dev" + dir="/dev" + global_filter=["a|.*/|"] + cache="/etc/lvm/cache/.cache" + default_data_alignment=1 +} +backup { + backup=1 + backup_dir="/etc/lvm/backup" + archive=1 + archive_dir="/etc/lvm/archive" + retain_min=10 + retain_days=30 +} +shell { + history_size=100 +} +config { + checks=1 + abort_on_errors=0 + profile_dir="/etc/lvm/profile" +} +allocation { + thin_pool_metadata_require_separate_pvs=0 + cache_pool_metadata_require_separate_pvs=0 + mirror_logs_require_separate_pvs=0 + wipe_signatures_when_zeroing_new_lvs=1 + use_blkid_wiping=1 + maximise_cling=1 + raid_stripe_all_devices=0 + cache_pool_cachemode="writethrough" + cache_metadata_format=0 + cache_mode="writethrough" + thin_pool_zero=1 + thin_pool_discards="passdown" + thin_pool_chunk_size_policy="generic" + physical_extent_size=4096 +} +log { + report_command_log=0 + debug_classes=["memory","devices","io","activation","allocation","lvmetad","metadata","cache","locking","lvmpolld","dbus"] + activation=0 + prefix=" " + command_names=0 + indent=1 + level=0 + overwrite=0 + syslog=1 + silent=0 + verbose=0 + command_log_sort="log_seq_num" + command_log_cols="log_seq_num,log_type,log_context,log_object_type,log_object_name,log_object_id,log_object_group,log_object_group_id,log_message,log_errno,log_ret_code" + command_log_selection="!(log_type=status && message=success)" +} +global { + fallback_to_lvm1=0 + notify_dbus=1 + use_lvmpolld=1 + system_id_source="none" + use_lvmlockd=0 + use_lvmetad=1 + sparse_segtype_default="thin" + raid10_segtype_default="raid10" + mirror_segtype_default="raid1" + metadata_read_only=0 + abort_on_internal_errors=0 + prioritise_write_locks=1 + locking_dir="/run/lock/lvm" + fallback_to_local_locking=1 + fallback_to_clustered_locking=1 + wait_for_locks=1 + locking_type=1 + etc="/etc" + proc="/proc" + activation=1 + suffix=1 + si_unit_consistency=1 + units="r" + test=0 + umask=63 + format="lvm2" + locking_library="liblvm2clusterlock.so" + detect_internal_vg_cache_corruption=0 + lvdisplay_shows_full_device_path=0 + use_aio=1 + lvmetad_update_wait_time=10 + lvmlockd_lock_retries=3 + sanlock_lv_extend=256 + thin_check_executable="/usr/sbin/thin_check" + thin_dump_executable="/usr/sbin/thin_dump" + thin_repair_executable="/usr/sbin/thin_repair" + thin_check_options=["-q","--clear-needs-check-flag"] + thin_repair_options=[""] + cache_check_executable="/usr/sbin/cache_check" + cache_dump_executable="/usr/sbin/cache_dump" + cache_repair_executable="/usr/sbin/cache_repair" + cache_check_options=["-q","--clear-needs-check-flag"] + cache_repair_options=[""] + fsadm_executable="/usr/sbin/fsadm" +} +activation { + activation_mode="degraded" + polling_interval=15 + monitoring=1 + use_mlockall=0 + thin_pool_autoextend_percent=20 + thin_pool_autoextend_threshold=100 + snapshot_autoextend_percent=20 + snapshot_autoextend_threshold=100 + mirror_log_fault_policy="allocate" + mirror_image_fault_policy="remove" + raid_fault_policy="warn" + readahead="auto" + raid_region_size=2048 + process_priority=-18 + reserved_memory=8192 + reserved_stack=64 + use_linear_target=1 + missing_stripe_filler="error" + retry_deactivation=1 + verify_udev_operations=0 + udev_rules=1 + udev_sync=1 + checks=0 + mirror_region_size=2048 + error_when_full=0 + mirror_device_fault_policy="remove" + auto_set_activation_skip=1 +} +metadata { + check_pv_device_sizes=1 + record_lvs_history=0 + lvs_history_retention_time=0 + pvmetadatacopies=1 + vgmetadatacopies=0 + pvmetadatasize=255 + pvmetadataignore=0 + stripesize=64 +} +report { + output_format="basic" + compact_output=0 + compact_output_cols="" + aligned=1 + buffered=1 + headings=1 + separator=" " + list_item_separator="," + prefixes=0 + quoted=1 + columns_as_rows=0 + binary_values_as_numeric=0 + time_format="%Y-%m-%d %T %z" + devtypes_sort="devtype_name" + devtypes_cols="devtype_name,devtype_max_partitions,devtype_description" + devtypes_cols_verbose="devtype_name,devtype_max_partitions,devtype_description" + lvs_sort="vg_name,lv_name" + lvs_cols="lv_name,vg_name,lv_attr,lv_size,pool_lv,origin,data_percent,metadata_percent,move_pv,mirror_log,copy_percent,convert_lv" + lvs_cols_verbose="lv_name,vg_name,seg_count,lv_attr,lv_size,lv_major,lv_minor,lv_kernel_major,lv_kernel_minor,pool_lv,origin,data_percent,metadata_percent,move_pv,copy_percent,mirror_log,convert_lv,lv_uuid,lv_profile" + vgs_sort="vg_name" + vgs_cols="vg_name,pv_count,lv_count,snap_count,vg_attr,vg_size,vg_free" + vgs_cols_verbose="vg_name,vg_attr,vg_extent_size,pv_count,lv_count,snap_count,vg_size,vg_free,vg_uuid,vg_profile" + pvs_sort="pv_name" + pvs_cols="pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free" + pvs_cols_verbose="pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,dev_size,pv_uuid" + segs_sort="vg_name,lv_name,seg_start" + segs_cols="lv_name,vg_name,lv_attr,stripes,segtype,seg_size" + segs_cols_verbose="lv_name,vg_name,lv_attr,seg_start,seg_size,stripes,segtype,stripesize,chunksize" + pvsegs_sort="pv_name,pvseg_start" + pvsegs_cols="pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,pvseg_start,pvseg_size" + pvsegs_cols_verbose="pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,pvseg_start,pvseg_size,lv_name,seg_start_pe,segtype,seg_pe_ranges" + vgs_cols_full="vg_all" + pvs_cols_full="pv_all" + lvs_cols_full="lv_all" + pvsegs_cols_full="pvseg_all,pv_uuid,lv_uuid" + segs_cols_full="seg_all,lv_uuid" + vgs_sort_full="vg_name" + pvs_sort_full="pv_name" + lvs_sort_full="vg_name,lv_name" + pvsegs_sort_full="pv_uuid,pvseg_start" + segs_sort_full="lv_uuid,seg_start" + mark_hidden_devices=1 + two_word_unknown_device=0 +} +dmeventd { + raid_library="libdevmapper-event-lvm2raid.so" + thin_library="libdevmapper-event-lvm2thin.so" + snapshot_library="libdevmapper-event-lvm2snapshot.so" + mirror_library="libdevmapper-event-lvm2mirror.so" + thin_command="lvm lvextend --use-policies" + executable="/usr/sbin/dmeventd" +} +tags { + hosttags=0 +} +local { + system_id="" + host_id=0 +}""".strip() diff --git a/insights/parsers/tests/test_lvm.py b/insights/parsers/tests/test_lvm.py index df62cf57c..55ab945b0 100644 --- a/insights/parsers/tests/test_lvm.py +++ b/insights/parsers/tests/test_lvm.py @@ -1,5 +1,7 @@ from __future__ import print_function from insights.parsers import lvm +from insights.tests import context_wrap +from .lvm_test_data import LVMCONFIG WARNINGS_CONTENT = """ WARNING @@ -40,3 +42,9 @@ def compare_partial_dicts(result, expected): print("Failed for key {k}, {r} != {e}".format(k=k, r=result[k], e=expected[k])) mismatches += 1 return mismatches == 0 + + +def test_lvmconfig(): + p = lvm.LvmConfig(context_wrap(LVMCONFIG)) + assert p.data["dmeventd"]["raid_library"] == "libdevmapper-event-lvm2raid.so" + assert p.data["global"]["thin_check_options"] == ["-q", "--clear-needs-check-flag"] diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index c2d40a76a..cbff8d873 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -319,6 +319,7 @@ class Specs(SpecSet): lsscsi = RegistryPoint() lvdisplay = RegistryPoint() lvm_conf = RegistryPoint(filterable=True) + lvmconfig = RegistryPoint() lvs_noheadings = RegistryPoint() lvs_noheadings_all = RegistryPoint() lvs = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 2e3617c1a..69c88e535 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -570,6 +570,10 @@ def semid(broker): lsscsi = simple_command("/usr/bin/lsscsi") lvdisplay = simple_command("/sbin/lvdisplay") lvm_conf = simple_file("/etc/lvm/lvm.conf") + lvmconfig = first_of([ + simple_command("/usr/sbin/lvmconfig --type full"), + simple_command("/usr/sbin/lvm dumpconfig --type full"), + ]) lvs = None # simple_command('/sbin/lvs -a -o +lv_tags,devices --config="global{locking_type=0}"') lvs_noheadings = simple_command("/sbin/lvs --nameprefixes --noheadings --separator='|' -a -o lv_name,lv_size,lv_attr,mirror_log,vg_name,devices,region_size,data_percent,metadata_percent,segtype,seg_monitor --config=\"global{locking_type=0}\"") lvs_noheadings_all = simple_command("/sbin/lvs --nameprefixes --noheadings --separator='|' -a -o lv_name,lv_size,lv_attr,mirror_log,vg_name,devices,region_size,data_percent,metadata_percent,segtype --config='global{locking_type=0} devices{filter=[\"a|.*|\"]}'") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 4542a143b..6a3611d25 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -154,6 +154,10 @@ class InsightsArchiveSpecs(Specs): lssap = simple_file("insights_commands/usr.sap.hostctrl.exe.lssap") lsscsi = simple_file("insights_commands/lsscsi") lvdisplay = simple_file("insights_commands/lvdisplay") + lvmconfig = first_file([ + simple_file("insights_commands/lvmconfig_--type_full"), + simple_file("insights_commands/lvm_dumpconfig_--type_full"), + ]) lvs_noheadings = simple_file("insights_commands/lvs_--nameprefixes_--noheadings_--separator_-a_-o_lv_name_lv_size_lv_attr_mirror_log_vg_name_devices_region_size_data_percent_metadata_percent_segtype_seg_monitor_--config_global_locking_type_0") lvs_noheadings_all = simple_file("insights_commands/lvs_--nameprefixes_--noheadings_--separator_-a_-o_lv_name_lv_size_lv_attr_mirror_log_vg_name_devices_region_size_data_percent_metadata_percent_segtype_--config_global_locking_type_0_devices_filter_a") max_uid = simple_file("insights_commands/awk_-F_if_3_max_max_3_END_print_max_.etc.passwd") From 0e0927d857490f6c08409e32ea07efa5f7201fc2 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 18 Jun 2020 13:56:29 -0400 Subject: [PATCH 086/892] create thread for sdnotify (#2561) * create thread for sdnotify Signed-off-by: Jeremy Crafts --- insights/client/client.py | 8 +--- insights/client/data_collector.py | 12 ++--- insights/client/insights_spec.py | 20 +++------ insights/client/utilities.py | 48 +++++++++++++++----- insights/tests/client/test_client.py | 35 +-------------- insights/tests/client/test_insights_spec.py | 50 +-------------------- insights/tests/client/test_utilities.py | 34 +++++++------- 7 files changed, 68 insertions(+), 139 deletions(-) diff --git a/insights/client/client.py b/insights/client/client.py index 9a416397a..b06553fd3 100644 --- a/insights/client/client.py +++ b/insights/client/client.py @@ -15,9 +15,7 @@ delete_registered_file, delete_unregistered_file, delete_cache_files, - determine_hostname, - read_pidfile, - systemd_notify) + determine_hostname) from .collection_rules import InsightsUploadConf from .data_collector import DataCollector from .connection import InsightsConnection @@ -310,9 +308,7 @@ def get_connection(config): def _legacy_upload(config, pconn, tar_file, content_type, collection_duration=None): logger.info('Uploading Insights data.') api_response = None - parent_pid = read_pidfile() for tries in range(config.retries): - systemd_notify(parent_pid) upload = pconn.upload_archive(tar_file, '', collection_duration) if upload.status_code in (200, 201): @@ -359,9 +355,7 @@ def upload(config, pconn, tar_file, content_type, collection_duration=None): if config.legacy_upload: return _legacy_upload(config, pconn, tar_file, content_type, collection_duration) logger.info('Uploading Insights data.') - parent_pid = read_pidfile() for tries in range(config.retries): - systemd_notify(parent_pid) upload = pconn.upload_archive(tar_file, content_type, collection_duration) if upload.status_code in (200, 202): diff --git a/insights/client/data_collector.py b/insights/client/data_collector.py index 0e5b2f7b2..0db10f24c 100644 --- a/insights/client/data_collector.py +++ b/insights/client/data_collector.py @@ -16,7 +16,7 @@ from insights.util import mangle from ..contrib.soscleaner import SOSCleaner -from .utilities import _expand_paths, get_version_info, read_pidfile, get_tags +from .utilities import _expand_paths, get_version_info, systemd_notify_init_thread, get_tags from .constants import InsightsConstants as constants from .insights_spec import InsightsFile, InsightsCommand from .archive import InsightsArchive @@ -191,7 +191,9 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): ''' Run specs and collect all the data ''' - parent_pid = read_pidfile() + # initialize systemd-notify thread + systemd_notify_init_thread() + if rm_conf is None: rm_conf = {} logger.debug('Beginning to run collection spec...') @@ -220,7 +222,7 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): if s['command'] in rm_commands: logger.warn("WARNING: Skipping command %s", s['command']) continue - cmd_spec = InsightsCommand(self.config, s, exclude, self.mountpoint, parent_pid) + cmd_spec = InsightsCommand(self.config, s, exclude, self.mountpoint) self.archive.add_to_archive(cmd_spec) for f in conf['files']: rm_files = rm_conf.get('files', []) @@ -233,7 +235,7 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): if s['file'] in rm_conf.get('files', []): logger.warn("WARNING: Skipping file %s", s['file']) else: - file_spec = InsightsFile(s, exclude, self.mountpoint, parent_pid) + file_spec = InsightsFile(s, exclude, self.mountpoint) self.archive.add_to_archive(file_spec) if 'globs' in conf: for g in conf['globs']: @@ -242,7 +244,7 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): if g['file'] in rm_conf.get('files', []): logger.warn("WARNING: Skipping file %s", g) else: - glob_spec = InsightsFile(g, exclude, self.mountpoint, parent_pid) + glob_spec = InsightsFile(g, exclude, self.mountpoint) self.archive.add_to_archive(glob_spec) logger.debug('Spec collection finished.') diff --git a/insights/client/insights_spec.py b/insights/client/insights_spec.py index af6e997c0..da54e1312 100644 --- a/insights/client/insights_spec.py +++ b/insights/client/insights_spec.py @@ -9,7 +9,7 @@ from insights.util import mangle from .constants import InsightsConstants as constants -from .utilities import determine_hostname, systemd_notify +from .utilities import determine_hostname logger = logging.getLogger(__name__) @@ -18,7 +18,7 @@ class InsightsSpec(object): ''' A spec loaded from the uploader.json ''' - def __init__(self, config, spec, exclude, parent_pid=None): + def __init__(self, config, spec, exclude): self.config = config # exclusions patterns for this spec @@ -36,16 +36,14 @@ def __init__(self, config, spec, exclude, parent_pid=None): self.exclude = exclude # pattern for spec collection self.pattern = spec['pattern'] if spec['pattern'] else None - # PID of parent insights-client process, to notify systemd watchdog - self.parent_pid = parent_pid class InsightsCommand(InsightsSpec): ''' A command spec ''' - def __init__(self, config, spec, exclude, mountpoint, parent_pid=None): - InsightsSpec.__init__(self, config, spec, exclude, parent_pid) + def __init__(self, config, spec, exclude, mountpoint): + InsightsSpec.__init__(self, config, spec, exclude) self.command = spec['command'].replace( '{CONTAINER_MOUNT_POINT}', mountpoint) self.archive_path = mangle.mangle_command(self.command) @@ -58,9 +56,6 @@ def get_output(self): Execute a command through system shell. First checks to see if the requested command is executable. Returns (returncode, stdout, 0) ''' - # let systemd know we're still going - systemd_notify(self.parent_pid) - if self.is_hostname: # short circuit for hostame with internal method return determine_hostname() @@ -158,8 +153,8 @@ class InsightsFile(InsightsSpec): ''' A file spec ''' - def __init__(self, spec, exclude, mountpoint, parent_pid=None): - InsightsSpec.__init__(self, None, spec, exclude, parent_pid) + def __init__(self, spec, exclude, mountpoint): + InsightsSpec.__init__(self, None, spec, exclude) # substitute mountpoint for collection self.real_path = os.path.join(mountpoint, spec['file'].lstrip('/')) self.archive_path = spec['file'] @@ -168,9 +163,6 @@ def get_output(self): ''' Get file content, selecting only lines we are interested in ''' - # let systemd know we're still going - systemd_notify(self.parent_pid) - if not os.path.isfile(self.real_path): logger.debug('File %s does not exist', self.real_path) return diff --git a/insights/client/utilities.py b/insights/client/utilities.py index 750bab81c..88053d410 100644 --- a/insights/client/utilities.py +++ b/insights/client/utilities.py @@ -11,6 +11,8 @@ import shlex import re import sys +import threading +import time from subprocess import Popen, PIPE, STDOUT import yaml @@ -293,28 +295,50 @@ def read_pidfile(): return pid -def systemd_notify(pid): +def _systemd_notify(pid): ''' Ping the systemd watchdog with the main PID so that the watchdog doesn't kill the process ''' - if not os.getenv('NOTIFY_SOCKET'): - # running standalone, not via systemd job - return + try: + proc = Popen(['/usr/bin/systemd-notify', '--pid=' + str(pid), 'WATCHDOG=1']) + except OSError as e: + logger.debug('Could not launch systemd-notify: %s', str(e)) + return False + stdout, stderr = proc.communicate() + if proc.returncode != 0: + logger.debug('systemd-notify returned %s', proc.returncode) + return False + return True + + +def systemd_notify_init_thread(): + ''' + Use a thread to periodically ping systemd instead + of calling it on a per-command basis + ''' + pid = read_pidfile() if not pid: logger.debug('No PID specified.') return + if not os.getenv('NOTIFY_SOCKET'): + # running standalone, not via systemd job + return if not os.path.exists('/usr/bin/systemd-notify'): # RHEL 6, no systemd return - try: - proc = Popen(['/usr/bin/systemd-notify', '--pid=' + str(pid), 'WATCHDOG=1']) - except OSError: - logger.debug('Could not launch systemd-notify.') - return - stdout, stderr = proc.communicate() - if proc.returncode != 0: - logger.debug('systemd-notify returned %s', proc.returncode) + + def _sdnotify_loop(): + while True: + # run sdnotify every 30 seconds + if not _systemd_notify(pid): + # end the loop if something goes wrong + break + time.sleep(30) + + sdnotify_thread = threading.Thread(target=_sdnotify_loop, args=()) + sdnotify_thread.daemon = True + sdnotify_thread.start() def get_tags(tags_file_path=constants.default_tags_file): diff --git a/insights/tests/client/test_client.py b/insights/tests/client/test_client.py index a10e76dc0..316610037 100644 --- a/insights/tests/client/test_client.py +++ b/insights/tests/client/test_client.py @@ -9,7 +9,7 @@ from insights import package_info from insights.client.constants import InsightsConstants as constants from insights.client.utilities import generate_machine_id -from mock.mock import patch, Mock, mock_open, call +from mock.mock import patch, Mock, call class FakeConnection(object): @@ -410,39 +410,6 @@ def test_platform_upload(_legacy_upload, _, path_exists): _legacy_upload.assert_not_called() -@patch('insights.client.client.write_to_disk') -@patch('insights.client.client.open', new_callable=mock_open) -@patch('insights.client.client.systemd_notify') -@patch('insights.client.client.read_pidfile') -@patch('insights.client.os.path.exists', return_value=True) -@patch('insights.client.connection.InsightsConnection.upload_archive', return_value=Mock(status_code=200, text='{}')) -def test_legacy_upload_systemd(_, path_exists, read_pidfile, systemd_notify, op, wtd): - ''' - Pidfile is read and systemd-notify is called for legacy upload - ''' - config = InsightsConfig(legacy_upload=True) - config.account_number = '' # legacy registration thing - client = InsightsClient(config) - client.upload('test.gar.gz', 'test.content.type') - read_pidfile.assert_called_once() - systemd_notify.assert_called_once_with(read_pidfile.return_value) - - -@patch('insights.client.client.systemd_notify') -@patch('insights.client.client.read_pidfile') -@patch('insights.client.os.path.exists', return_value=True) -@patch('insights.client.connection.InsightsConnection.upload_archive', return_value=Mock(status_code=200)) -def test_platform_upload_systemd(_, path_exists, read_pidfile, systemd_notify): - ''' - Pidfile is read and systemd-notify is called for platform upload - ''' - config = InsightsConfig(legacy_upload=False) - client = InsightsClient(config) - client.upload('test.gar.gz', 'test.content.type') - read_pidfile.assert_called_once() - systemd_notify.assert_called_once_with(read_pidfile.return_value) - - @patch('insights.client.os.path.exists', return_value=True) @patch('insights.client.connection.InsightsConnection.upload_archive', return_value=Mock(status_code=200)) @patch('insights.client.client._legacy_upload') diff --git a/insights/tests/client/test_insights_spec.py b/insights/tests/client/test_insights_spec.py index 93e8298c4..9059406f3 100644 --- a/insights/tests/client/test_insights_spec.py +++ b/insights/tests/client/test_insights_spec.py @@ -1,53 +1,9 @@ -from insights.client.data_collector import DataCollector -from insights.client.insights_spec import InsightsCommand from insights.client.insights_spec import InsightsFile from insights.client.insights_spec import InsightsSpec from mock.mock import patch, MagicMock, ANY import mock -@patch('insights.client.data_collector.read_pidfile') -def test_read_pidfile_called(read_pidfile): - ''' - Pidfile is read when collection starts - ''' - dc = DataCollector(MagicMock(display_name=None)) - dc.run_collection({'commands': [], 'files': []}, None, None, '') - read_pidfile.assert_called_once() - - -@patch('insights.client.insights_spec.systemd_notify') -@patch('insights.client.insights_spec.Popen') -def test_systemd_notify_called_cmd(Popen, systemd_notify): - ''' - Systemd_notify is called before a command is run or - a file is collected - ''' - process_mock = mock.Mock() - attrs = {'communicate.return_value': (b'output', b'error')} - process_mock.configure_mock(**attrs) - Popen.return_value = process_mock - cs = InsightsCommand(MagicMock(), {'command': '', 'pattern': [], 'symbolic_name': ''}, None, '/', parent_pid='420') - cs.get_output() - systemd_notify.assert_called_with('420') - - -@patch('insights.client.insights_spec.systemd_notify') -@patch('insights.client.insights_spec.Popen') -def test_systemd_notify_called_file(Popen, systemd_notify): - ''' - Systemd_notify is called before a command is run or - a file is collected - ''' - process_mock = mock.Mock() - attrs = {'communicate.return_value': (b'output', b'error')} - process_mock.configure_mock(**attrs) - Popen.return_value = process_mock - fs = InsightsFile({'file': '', 'pattern': [], 'symbolic_name': ''}, None, '/', parent_pid='420') - fs.get_output() - systemd_notify.assert_called_with('420') - - def test_string_pattern_init(): ''' Assert spec is loaded in string mode when a list of strings is present @@ -67,10 +23,9 @@ def test_regex_pattern_init(): assert spec.regex -@patch('insights.client.insights_spec.systemd_notify') @patch('insights.client.insights_spec.Popen') @patch('insights.client.insights_spec.os.path.isfile', return_value=True) -def test_string_pattern_called(isfile, Popen, systemd_notify): +def test_string_pattern_called(isfile, Popen): ''' ''' process_mock = mock.Mock() @@ -82,10 +37,9 @@ def test_string_pattern_called(isfile, Popen, systemd_notify): Popen.assert_any_call(['grep', '-F', '-v', '-f', ANY], stdin=ANY, stdout=ANY) -@patch('insights.client.insights_spec.systemd_notify') @patch('insights.client.insights_spec.Popen') @patch('insights.client.insights_spec.os.path.isfile', return_value=True) -def test_regex_pattern_called(isfile, Popen, systemd_notify): +def test_regex_pattern_called(isfile, Popen): ''' ''' process_mock = mock.Mock() diff --git a/insights/tests/client/test_utilities.py b/insights/tests/client/test_utilities.py index 699308d49..a2e292f95 100644 --- a/insights/tests/client/test_utilities.py +++ b/insights/tests/client/test_utilities.py @@ -224,59 +224,55 @@ def test_read_pidfile_failure(): assert util.read_pidfile() is None -@patch('insights.client.utilities.Popen') +@patch('insights.client.utilities.threading.Thread') @patch('insights.client.utilities.os.path.exists') -def test_systemd_notify_no_socket(exists, Popen): +def test_systemd_notify_init_thread_no_socket(exists, thread): ''' Test this function when NOTIFY_SOCKET is undefined, i.e. when we run the client on demand and not via systemd job ''' exists.return_value = True - Popen.return_value.communicate.return_value = ('', '') - util.systemd_notify('420') - Popen.assert_not_called() + util.systemd_notify_init_thread() + thread.assert_not_called() @patch('insights.client.utilities.Popen') -@patch('insights.client.utilities.os.path.exists') -@patch.dict('insights.client.utilities.os.environ', {'NOTIFY_SOCKET': '/tmp/test.sock'}) -def test_systemd_notify(exists, Popen): +def test_systemd_notify(Popen): ''' Test calling systemd-notify with a "valid" PID On RHEL 7, exists(/usr/bin/systemd-notify) == True ''' - exists.return_value = True Popen.return_value.communicate.return_value = ('', '') - util.systemd_notify('420') + util._systemd_notify('420') Popen.assert_called_once() -@patch('insights.client.utilities.Popen') +@patch('insights.client.utilities.threading.Thread') @patch('insights.client.utilities.os.path.exists') @patch.dict('insights.client.utilities.os.environ', {'NOTIFY_SOCKET': '/tmp/test.sock'}) -def test_systemd_notify_failure_bad_pid(exists, Popen): +def test_systemd_notify_init_thread_failure_bad_pid(exists, thread): ''' - Test calling systemd-notify with an invalid PID + Test initializing systemd-notify loop with an invalid PID On RHEL 7, exists(/usr/bin/systemd-notify) == True ''' exists.return_value = True - util.systemd_notify(None) + util.systemd_notify_init_thread() exists.assert_not_called() - Popen.assert_not_called() + thread.assert_not_called() -@patch('insights.client.utilities.Popen') +@patch('insights.client.utilities.threading.Thread') @patch('insights.client.utilities.os.path.exists') @patch.dict('insights.client.utilities.os.environ', {'NOTIFY_SOCKET': '/tmp/test.sock'}) -def test_systemd_notify_failure_rhel_6(exists, Popen): +def test_systemd_notify_init_thread_failure_rhel_6(exists, thread): ''' Test calling systemd-notify on RHEL 6 On RHEL 6, exists(/usr/bin/systemd-notify) == False ''' exists.return_value = False - util.systemd_notify('420') - Popen.assert_not_called() + util.systemd_notify_init_thread() + thread.assert_not_called() def test_get_tags(): From 87e536c994b6cf8b4c42b35a15a67c751bfeed97 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Mon, 22 Jun 2020 16:24:37 -0400 Subject: [PATCH 087/892] ignore failure to write .registered file if non-root (#2623) * ignore failure to write .registered file if non-root Signed-off-by: Jeremy Crafts * fix except block Signed-off-by: Jeremy Crafts * specify a permission denied failure in except block Signed-off-by: Jeremy Crafts --- insights/client/connection.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/insights/client/connection.py b/insights/client/connection.py index 44b7a73c8..e8d2cef63 100644 --- a/insights/client/connection.py +++ b/insights/client/connection.py @@ -12,6 +12,7 @@ import platform import xml.etree.ElementTree as ET import warnings +import errno # import io from tempfile import TemporaryFile # from datetime import datetime, timedelta @@ -841,7 +842,7 @@ def _legacy_upload_archive(self, data_collected, duration): logger.debug("Upload duration: %s", upload.elapsed) return upload - def upload_archive(self, data_collected, content_type, duration): + def upload_archive(self, data_collected, content_type, duration=None): """ Do an HTTPS Upload of the archive """ @@ -880,7 +881,14 @@ def upload_archive(self, data_collected, content_type, duration): # 202 from platform, no json response logger.debug(upload.text) # upload = registration on platform - write_registered_file() + try: + write_registered_file() + except OSError as e: + if e.errno == errno.EACCES and os.getuid() != 0: + # if permissions error as non-root, ignore + pass + else: + logger.error('Could not update local registration record: %s', str(e)) else: logger.debug( "Upload archive failed with status code %s", From 315aa99d707d4578086353b478465f25f2903bd1 Mon Sep 17 00:00:00 2001 From: Sachin Date: Tue, 23 Jun 2020 13:03:27 +0530 Subject: [PATCH 088/892] [rpm_V_packages] Fix first_file() should include a list (#2637) The PR#2616 missed specifying file as list(`[]`) which cause dependency error in the rule. Signed-off-by: Sachin Patil --- insights/specs/insights_archive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 6a3611d25..b3dc74aac 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -248,7 +248,7 @@ class InsightsArchiveSpecs(Specs): root_crontab = simple_file("insights_commands/crontab_-l_-u_root") rndc_status = simple_file("insights_commands/rndc_status") route = simple_file("insights_commands/route_-n") - rpm_V_packages = first_file("insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo_chrony", "insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo") + rpm_V_packages = first_file(["insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo_chrony", "insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo"]) sapcontrol_getsystemupdatelist = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_sapcontrol_getsystemupdatelist") saphostctl_getcimobject_sapinstance = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostctrl_-function_GetCIMObject_-enuminstances_SAPInstance") saphostexec_status = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostexec_-status") From 7bf5b40a98be021fd0ff7aec02cc529179cbb482 Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Tue, 23 Jun 2020 10:08:28 -0400 Subject: [PATCH 089/892] client: return True if unregister with --force (#2638) Signed-off-by: Link Dupont --- insights/client/client.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/client/client.py b/insights/client/client.py index b06553fd3..d9af735f5 100644 --- a/insights/client/client.py +++ b/insights/client/client.py @@ -218,6 +218,7 @@ def __cleanup_local_files(): # Run connection test and exit if config.force: __cleanup_local_files() + return True return None if check['status']: From 2638fd6941711dc6db10e089d6cd7cd73dbc90a5 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Fri, 26 Jun 2020 01:11:12 +0800 Subject: [PATCH 090/892] Remove spec saphostctrl_listinstances (#2609) * Remove spec saphostctrl_listinstances Signed-off-by: Xiangce Liu * Use Sap combiner instead of parser Signed-off-by: Xiangce Liu * revert the update to parser Signed-off-by: Xiangce Liu --- insights/specs/default.py | 43 ++++++++------------------------------- 1 file changed, 9 insertions(+), 34 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 69c88e535..dc7909cbf 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -26,6 +26,7 @@ from insights.core.spec_factory import first_file, listdir from insights.parsers.mount import Mount, ProcMounts from insights.parsers.dnf_module import DnfModuleList +from insights.combiners.sap import Sap from insights.combiners.cloud_provider import CloudProvider from insights.combiners.satellite_version import SatelliteVersion from insights.combiners.services import Services @@ -862,46 +863,20 @@ def rhev_data_center(broker): rpm_V_packages = simple_command("/bin/rpm -V coreutils procps procps-ng shadow-utils passwd sudo chrony", keep_rc=True) rsyslog_conf = simple_file("/etc/rsyslog.conf") samba = simple_file("/etc/samba/smb.conf") - saphostctrl_listinstances = simple_command("/usr/sap/hostctrl/exe/saphostctrl -function ListInstances") - @datasource(saphostctrl_listinstances, hostname) - def sap_sid_nr(broker): - """ - Get the SID and Instance Number - - Typical output of saphostctrl_listinstances:: - # /usr/sap/hostctrl/exe/saphostctrl -function ListInstances - Inst Info : SR1 - 01 - liuxc-rhel7-hana-ent - 749, patch 418, changelist 1816226 - - Returns: - (list): List of tuple of SID and Instance Number. - - """ - insts = broker[DefaultSpecs.saphostctrl_listinstances].content - hn = broker[DefaultSpecs.hostname].content[0].split('.')[0].strip() - results = set() - for ins in insts: - ins_splits = ins.split(' - ') - # Local Instance - if ins_splits[2].strip() == hn: - # (sid, nr) - results.add((ins_splits[0].split()[-1].lower(), ins_splits[1].strip())) - return list(results) - - @datasource(sap_sid_nr) + @datasource(Sap) def sap_sid(broker): - """ - Get the SID - - Returns: - (list): List of SID. + sap = broker[Sap] + return [sap.sid(i).lower() for i in sap.local_instances] - """ - return list(set(sn[0] for sn in broker[DefaultSpecs.sap_sid_nr])) + @datasource(Sap) + def sap_sid_num(broker): + sap = broker[Sap] + return [(sap.sid(i).lower(), sap.number(i)) for i in sap.local_instances] sap_hdb_version = foreach_execute(sap_sid, "/usr/bin/sudo -iu %sadm HDB version", keep_rc=True) sap_host_profile = simple_file("/usr/sap/hostctrl/exe/host_profile") - sapcontrol_getsystemupdatelist = foreach_execute(sap_sid_nr, "/usr/bin/sudo -iu %sadm sapcontrol -nr %s -function GetSystemUpdateList", keep_rc=True) + sapcontrol_getsystemupdatelist = foreach_execute(sap_sid_num, "/usr/bin/sudo -iu %sadm sapcontrol -nr %s -function GetSystemUpdateList", keep_rc=True) saphostctl_getcimobject_sapinstance = simple_command("/usr/sap/hostctrl/exe/saphostctrl -function GetCIMObject -enuminstances SAPInstance") saphostexec_status = simple_command("/usr/sap/hostctrl/exe/saphostexec -status") saphostexec_version = simple_command("/usr/sap/hostctrl/exe/saphostexec -version") From f259510cfeb115f34b536cea4367507cff0e58de Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Wed, 1 Jul 2020 11:14:15 -0500 Subject: [PATCH 091/892] Add datasource head to autology (#2644) * Add existing datasource head to autology catalog * Add description to catalog doc template for doc build Signed-off-by: Bob Fahr --- insights/util/autology/datasources.py | 20 +++++++++++++++++++- insights/util/specs_catalog.py | 1 + 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/insights/util/autology/datasources.py b/insights/util/autology/datasources.py index 26a65fd37..8b9264d29 100644 --- a/insights/util/autology/datasources.py +++ b/insights/util/autology/datasources.py @@ -39,6 +39,8 @@ """ str: Literal constant for a first_of Spec object """ COMMAND_WITH_ARGS_TYPE = 'command_with_args' """ str: Literal constant for a command_with_args Spec object """ +HEAD_TYPE = 'head' +""" str: Literal constant used for a head Spec object """ UNKNOWN_TYPE = 'unknown' """ str: Literal constant for a Spec object with unknown type """ ANONYMOUS_SPEC_NAME = 'anonymous' @@ -90,6 +92,11 @@ def is_command_with_args(m_obj): return isinstance(m_obj, insights.core.spec_factory.command_with_args) +def is_head(m_obj): + """ bool: True if object is a head object """ + return isinstance(m_obj, insights.core.spec_factory.head) + + def is_function(m_obj): """ bool: True if object is a function object """ return inspect.isfunction(m_obj) @@ -208,7 +215,7 @@ def from_object(cls, m_type, m_name=ANONYMOUS_SPEC_NAME): elif is_first_of(m_type): m_spec['type_name'] = FIRST_OF_TYPE deps = next((v for k, v in m_members if k == "deps"), None) - m_spec['deps'] = [Spec.from_object(d) for d in deps] + m_spec['deps'] = [cls.from_object(d) for d in deps] deps_repr = ', '.join(['{0}'.format(d) for d in m_spec['deps']]) m_spec['repr'] = 'first_of([{0}])'.format(deps_repr) @@ -228,6 +235,12 @@ def from_object(cls, m_type, m_name=ANONYMOUS_SPEC_NAME): m_spec['provider'] = cls.from_object(provider) m_spec['repr'] = 'foreach_collect("{path}", provider={provider})' + elif is_head(m_type): + m_spec['type_name'] = HEAD_TYPE + dep = next((v for k, v in m_members if k == "dep"), None) + m_spec['dep'] = cls.from_object(dep) + m_spec['repr'] = 'head({0})'.format(m_spec['dep']) + elif m_type is None: m_spec['type_name'] = NONE_TYPE m_spec['repr'] = 'NONE TYPE' @@ -301,6 +314,11 @@ def is_command_with_args(self): """ bool: True if this spec is a command_with_args """ return self.get('type_name', UNKNOWN_TYPE) == COMMAND_WITH_ARGS_TYPE + @property + def is_head(self): + """ bool: True if this spec is a head """ + return self.get('type_name', UNKNOWN_TYPE) == HEAD_TYPE + @property def is_function(self): """ bool: True if this spec is a function """ diff --git a/insights/util/specs_catalog.py b/insights/util/specs_catalog.py index e1fcda71b..c9475708a 100644 --- a/insights/util/specs_catalog.py +++ b/insights/util/specs_catalog.py @@ -46,6 +46,7 @@ each element in the provider into the path * *first_of* - collects the contents of datasource that returns data * *command_with_args* - collects the output of the command with each ``provider`` argument +* *head* - collects the contents of the first item in a list Some datasources are implemented as functions and each links to the details provided in the function specific documentation. Generally functions are used as a ``provider`` to other From 59f19c58c7a0c472eddb7830d112bdc6cbea34f1 Mon Sep 17 00:00:00 2001 From: Stephen Date: Wed, 1 Jul 2020 14:46:42 -0400 Subject: [PATCH 092/892] Silence user warning when importing pkg_resources (#2639) * Silence user warning when importing pkg_resources When importing this package, sys.path warnings are recorded on RHEL6. These are noise and can be safely ignored. They are currently causing cron emails to be sent every time insights runs on a RHEL6 box. RHCLOUD-7066 BZ 1847104 Signed-off-by: Stephen Adams * Better fix for UserWarning pkg_resources was imported above a line that already ignored warnings. I moved the import of it down into the function that used it to take advantage of the already implemented warning ignore line. Signed-off-by: Stephen Adams --- insights/client/connection.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/client/connection.py b/insights/client/connection.py index e8d2cef63..18b6cd358 100644 --- a/insights/client/connection.py +++ b/insights/client/connection.py @@ -8,7 +8,6 @@ import six import json import logging -import pkg_resources import platform import xml.etree.ElementTree as ET import warnings @@ -174,6 +173,7 @@ def user_agent(self): """ Generates and returns a string suitable for use as a request user-agent """ + import pkg_resources core_version = "insights-core" pkg = pkg_resources.working_set.find(pkg_resources.Requirement.parse(core_version)) if pkg is not None: From 7f96dddc1efbbf5a48c976bdfcd8defc84f330d0 Mon Sep 17 00:00:00 2001 From: Rohan Arora Date: Thu, 2 Jul 2020 04:39:59 +0530 Subject: [PATCH 093/892] Fix first_file usage for lvmconfig spec (#2646) Signed-off-by: Rohan Arora --- insights/specs/insights_archive.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index b3dc74aac..5cb5bb809 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -155,8 +155,8 @@ class InsightsArchiveSpecs(Specs): lsscsi = simple_file("insights_commands/lsscsi") lvdisplay = simple_file("insights_commands/lvdisplay") lvmconfig = first_file([ - simple_file("insights_commands/lvmconfig_--type_full"), - simple_file("insights_commands/lvm_dumpconfig_--type_full"), + "insights_commands/lvmconfig_--type_full", + "insights_commands/lvm_dumpconfig_--type_full" ]) lvs_noheadings = simple_file("insights_commands/lvs_--nameprefixes_--noheadings_--separator_-a_-o_lv_name_lv_size_lv_attr_mirror_log_vg_name_devices_region_size_data_percent_metadata_percent_segtype_seg_monitor_--config_global_locking_type_0") lvs_noheadings_all = simple_file("insights_commands/lvs_--nameprefixes_--noheadings_--separator_-a_-o_lv_name_lv_size_lv_attr_mirror_log_vg_name_devices_region_size_data_percent_metadata_percent_segtype_--config_global_locking_type_0_devices_filter_a") From 21c41ae9ac6a8a517afbb4fb437f8468f6b24a59 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Mon, 6 Jul 2020 10:06:25 +0800 Subject: [PATCH 094/892] Support cib.xml in sosreport (#2640) * Support cib.xml in sosreport * The localtion of cib.xml in sosreport is at "sos_commands/pacemaker/crm_report//cib.xml" * And there is only one cib.xml path under sos_commands * since hostname is unknown, so use glob here and then the first one is the expected Signed-off-by: Huanhuan Li * Move the cib changes to sos_archive Signed-off-by: Huanhuan Li --- insights/parsers/tests/test_cib.py | 8 +++++++- insights/specs/sos_archive.py | 10 +++++++++- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/insights/parsers/tests/test_cib.py b/insights/parsers/tests/test_cib.py index fbe897bf5..683c495f7 100644 --- a/insights/parsers/tests/test_cib.py +++ b/insights/parsers/tests/test_cib.py @@ -29,6 +29,12 @@ def test_cib(): - cib = CIB(context_wrap(CIB_CONFIG)) + cib = CIB(context_wrap(CIB_CONFIG, path="/var/lib/pacemaker/cib/cib.xml")) + assert cib is not None + assert cib.nodes == ['foo', 'bar', 'baz'] + + +def test_cib_in_sosreport(): + cib = CIB(context_wrap(CIB_CONFIG, path="sos_commands/pacemaker/crm_report/abc/cib.xml")) assert cib is not None assert cib.nodes == ['foo', 'bar', 'baz'] diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index 6a945d48f..0f95061e3 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -1,7 +1,7 @@ from functools import partial from insights.specs import Specs from insights.core.context import SosArchiveContext -from insights.core.spec_factory import simple_file, first_of, first_file, glob_file +from insights.core.spec_factory import simple_file, first_of, first_file, glob_file, head first_file = partial(first_file, context=SosArchiveContext) glob_file = partial(glob_file, context=SosArchiveContext) @@ -25,6 +25,14 @@ class SosSpecs(Specs): ceph_report = simple_file("sos_commands/ceph/ceph_report") ceph_health_detail = simple_file("sos_commands/ceph/ceph_health_detail_--format_json-pretty") chkconfig = first_file(["sos_commands/startup/chkconfig_--list", "sos_commands/services/chkconfig_--list"]) + cib_xml = first_of( + [ + simple_file("/var/lib/pacemaker/cib/cib.xml"), + head( + glob_file("sos_commands/pacemaker/crm_report/*/cib.xml") + ) + ] + ) cpupower_frequency_info = simple_file("sos_commands/processor/cpupower_frequency-info") date = first_of([simple_file("sos_commands/general/date"), simple_file("sos_commands/date/date")]) df__al = first_file(["sos_commands/filesys/df_-al", "sos_commands/filesys/df_-al_-x_autofs"]) From 9f26f7b3555aefeda4007e30aa27a8842beb901b Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Tue, 7 Jul 2020 10:39:58 +0800 Subject: [PATCH 095/892] Raise SkipException when no packages are found in installed_rpms Signed-off-by: Xiangce Liu --- insights/parsers/installed_rpms.py | 6 ++++++ insights/parsers/tests/test_installed_rpms.py | 16 ++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/insights/parsers/installed_rpms.py b/insights/parsers/installed_rpms.py index cd7010026..45f15cbf8 100644 --- a/insights/parsers/installed_rpms.py +++ b/insights/parsers/installed_rpms.py @@ -78,6 +78,7 @@ from ..util import rsplit from .. import parser, get_active_lines, CommandParser from .rpm_vercmp import rpm_version_compare +from insights.parsers import SkipException from insights.specs import Specs # This list of architectures is taken from PDC (Product Definition Center): @@ -215,6 +216,9 @@ class InstalledRpms(CommandParser, RpmList): """ A parser for working with data containing a list of installed RPM files on the system and related information. + + Raises: + SkipException: When no packages are found. """ def __init__(self, *args, **kwargs): self.errors = [] @@ -244,6 +248,8 @@ def parse_content(self, content): except Exception: # Both ways failed self.unparsed.append(line) + if not self.packages: + raise SkipException() # Don't want defaultdict's behavior after parsing is complete self.packages = dict(self.packages) diff --git a/insights/parsers/tests/test_installed_rpms.py b/insights/parsers/tests/test_installed_rpms.py index fb6643c11..53851cf14 100644 --- a/insights/parsers/tests/test_installed_rpms.py +++ b/insights/parsers/tests/test_installed_rpms.py @@ -1,5 +1,6 @@ import pytest from insights.parsers.installed_rpms import InstalledRpms, InstalledRpm, pad_version +from insights.parsers import SkipException from insights.tests import context_wrap @@ -80,6 +81,18 @@ yum-security-1.1.16-21.el5.noarch '''.strip() +ERROR_DB_NO_PKG = """ +error: rpmdb: BDB0113 Thread/process 20263/140251984590912 failed: BDB1507 Thread died in Berkeley DB library +error: db5 error(-30973) from dbenv->failchk: BDB0087 DB_RUNRECOVERY: Fatal +error, run database recovery +error: cannot open Packages index using db5 - (-30973) +error: cannot open Packages database in /var/lib/rpm +error: rpmdb: BDB0113 Thread/process 20263/140251984590912 failed: BDB1507 Thread died in Berkeley DB library +error: db5 error(-30973) from dbenv->failchk: BDB0087 DB_RUNRECOVERY: Fatal +error, run database recovery +error: cannot open Packages database in /var/lib/rpm +""".strip() + ORACLEASM_RPMS = ''' oracleasm-2.6.18-164.el5-2.0.5-1.el5.x86_64 oracleasmlib-2.0.4-1.el5.x86_64 @@ -198,6 +211,9 @@ def test_corrupt_db(): assert "yum-security" in rpms.packages assert rpms.corrupt is True + with pytest.raises(SkipException): + InstalledRpms(context_wrap(ERROR_DB_NO_PKG)) + def test_rpm_manifest(): rpms = InstalledRpms(context_wrap(RPM_MANIFEST)) From c0801429acaf2fb5efe160edede873865091a9dc Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Tue, 7 Jul 2020 10:40:54 +0800 Subject: [PATCH 096/892] Revert "Raise SkipException when no packages are found in installed_rpms" This reverts commit 9f26f7b3555aefeda4007e30aa27a8842beb901b. --- insights/parsers/installed_rpms.py | 6 ------ insights/parsers/tests/test_installed_rpms.py | 16 ---------------- 2 files changed, 22 deletions(-) diff --git a/insights/parsers/installed_rpms.py b/insights/parsers/installed_rpms.py index 45f15cbf8..cd7010026 100644 --- a/insights/parsers/installed_rpms.py +++ b/insights/parsers/installed_rpms.py @@ -78,7 +78,6 @@ from ..util import rsplit from .. import parser, get_active_lines, CommandParser from .rpm_vercmp import rpm_version_compare -from insights.parsers import SkipException from insights.specs import Specs # This list of architectures is taken from PDC (Product Definition Center): @@ -216,9 +215,6 @@ class InstalledRpms(CommandParser, RpmList): """ A parser for working with data containing a list of installed RPM files on the system and related information. - - Raises: - SkipException: When no packages are found. """ def __init__(self, *args, **kwargs): self.errors = [] @@ -248,8 +244,6 @@ def parse_content(self, content): except Exception: # Both ways failed self.unparsed.append(line) - if not self.packages: - raise SkipException() # Don't want defaultdict's behavior after parsing is complete self.packages = dict(self.packages) diff --git a/insights/parsers/tests/test_installed_rpms.py b/insights/parsers/tests/test_installed_rpms.py index 53851cf14..fb6643c11 100644 --- a/insights/parsers/tests/test_installed_rpms.py +++ b/insights/parsers/tests/test_installed_rpms.py @@ -1,6 +1,5 @@ import pytest from insights.parsers.installed_rpms import InstalledRpms, InstalledRpm, pad_version -from insights.parsers import SkipException from insights.tests import context_wrap @@ -81,18 +80,6 @@ yum-security-1.1.16-21.el5.noarch '''.strip() -ERROR_DB_NO_PKG = """ -error: rpmdb: BDB0113 Thread/process 20263/140251984590912 failed: BDB1507 Thread died in Berkeley DB library -error: db5 error(-30973) from dbenv->failchk: BDB0087 DB_RUNRECOVERY: Fatal -error, run database recovery -error: cannot open Packages index using db5 - (-30973) -error: cannot open Packages database in /var/lib/rpm -error: rpmdb: BDB0113 Thread/process 20263/140251984590912 failed: BDB1507 Thread died in Berkeley DB library -error: db5 error(-30973) from dbenv->failchk: BDB0087 DB_RUNRECOVERY: Fatal -error, run database recovery -error: cannot open Packages database in /var/lib/rpm -""".strip() - ORACLEASM_RPMS = ''' oracleasm-2.6.18-164.el5-2.0.5-1.el5.x86_64 oracleasmlib-2.0.4-1.el5.x86_64 @@ -211,9 +198,6 @@ def test_corrupt_db(): assert "yum-security" in rpms.packages assert rpms.corrupt is True - with pytest.raises(SkipException): - InstalledRpms(context_wrap(ERROR_DB_NO_PKG)) - def test_rpm_manifest(): rpms = InstalledRpms(context_wrap(RPM_MANIFEST)) From 34055e57299e1887a2afc5a5a9c6f21afbf0cebf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Cami?= Date: Wed, 8 Jul 2020 09:04:12 +0200 Subject: [PATCH 097/892] add /etc/ipa to certificates_enddate (#2649) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The FreeIPA CA certificate at /etc/ipa/ca.crt expiration date should be monitored too. Signed-off-by: François Cami --- insights/specs/default.py | 2 +- insights/specs/insights_archive.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index dc7909cbf..e091606b2 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -181,7 +181,7 @@ def is_ceph_monitor(broker): ceph_osd_tree = simple_command("/usr/bin/ceph osd tree -f json") ceph_s = simple_command("/usr/bin/ceph -s -f json") ceph_v = simple_command("/usr/bin/ceph -v") - certificates_enddate = simple_command("/usr/bin/find /etc/origin/node /etc/origin/master /etc/pki -type f -exec /usr/bin/openssl x509 -noout -enddate -in '{}' \; -exec echo 'FileName= {}' \;") + certificates_enddate = simple_command("/usr/bin/find /etc/origin/node /etc/origin/master /etc/pki /etc/ipa -type f -exec /usr/bin/openssl x509 -noout -enddate -in '{}' \; -exec echo 'FileName= {}' \;") chkconfig = simple_command("/sbin/chkconfig --list") chrony_conf = simple_file("/etc/chrony.conf") chronyc_sources = simple_command("/usr/bin/chronyc sources") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 5cb5bb809..64e25782d 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -29,7 +29,7 @@ class InsightsArchiveSpecs(Specs): ceph_osd_tree = first_file(["insights_commands/ceph_osd_tree_-f_json-pretty", "insights_commands/ceph_osd_tree_-f_json"]) ceph_s = first_file(["insights_commands/ceph_-s_-f_json-pretty", "insights_commands/ceph_-s_-f_json"]) ceph_v = simple_file("insights_commands/ceph_-v") - certificates_enddate = simple_file("insights_commands/find_.etc.origin.node_.etc.origin.master_.etc.pki_-type_f_-exec_.usr.bin.openssl_x509_-noout_-enddate_-in_-exec_echo_FileName") + certificates_enddate = first_file(["insights_commands/find_.etc.origin.node_.etc.origin.master_.etc.pki_.etc.ipa_-type_f_-exec_.usr.bin.openssl_x509_-noout_-enddate_-in_-exec_echo_FileName", "insights_commands/find_.etc.origin.node_.etc.origin.master_.etc.pki_-type_f_-exec_.usr.bin.openssl_x509_-noout_-enddate_-in_-exec_echo_FileName"]) chkconfig = simple_file("insights_commands/chkconfig_--list") chronyc_sources = simple_file("insights_commands/chronyc_sources") cpupower_frequency_info = simple_file("insights_commands/cpupower_-c_all_frequency-info") From c05b7d7d4d37fadc440caf312bc5611be0b4246e Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Wed, 8 Jul 2020 15:34:59 +0800 Subject: [PATCH 098/892] Add item journal_sosspec (#2653) * Add item journal_sosspec Signed-off-by: jiazhang * Fix typo Signed-off-by: jiazhang --- insights/specs/sos_archive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index 0f95061e3..38f81f6a1 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -74,7 +74,7 @@ class SosSpecs(Specs): ip_route_show_table_all = simple_file("sos_commands/networking/ip_route_show_table_all") ip_s_link = first_of([simple_file("sos_commands/networking/ip_-s_-d_link"), simple_file("sos_commands/networking/ip_-s_link"), simple_file("sos_commands/networking/ip_link")]) iptables = first_file(["/etc/sysconfig/iptables", "/etc/sysconfig/iptables.save"]) - journal_since_boot = first_of([simple_file("sos_commands/logs/journalctl_--no-pager_--boot"), simple_file("sos_commands/logs/journalctl_--no-pager_--catalog_--boot")]) + journal_since_boot = first_of([simple_file("sos_commands/logs/journalctl_--no-pager_--boot"), simple_file("sos_commands/logs/journalctl_--no-pager_--catalog_--boot"), simple_file("sos_commands/logs/journalctl_--all_--this-boot_--no-pager")]) locale = simple_file("sos_commands/i18n/locale") lsblk = first_file(["sos_commands/block/lsblk", "sos_commands/filesys/lsblk"]) ls_boot = simple_file("sos_commands/boot/ls_-lanR_.boot") From 78fcd1209519933a526b22cd11f996014eee378e Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Wed, 8 Jul 2020 15:12:42 -0400 Subject: [PATCH 099/892] move redaction to function that takes place after collection (#2642) * move redaction to function that takes place after collection Signed-off-by: Jeremy Crafts --- insights/client/data_collector.py | 94 ++++++-- insights/client/insights_spec.py | 99 +-------- .../test__process_content_redaction.py | 92 ++++++++ .../client/data_collector/test_redact.py | 210 ++++++++++++++++++ insights/tests/client/test_insights_spec.py | 51 ----- .../tests/client/test_skip_commands_files.py | 3 +- 6 files changed, 397 insertions(+), 152 deletions(-) create mode 100644 insights/tests/client/data_collector/test__process_content_redaction.py create mode 100644 insights/tests/client/data_collector/test_redact.py delete mode 100644 insights/tests/client/test_insights_spec.py diff --git a/insights/client/data_collector.py b/insights/client/data_collector.py index 0db10f24c..079112706 100644 --- a/insights/client/data_collector.py +++ b/insights/client/data_collector.py @@ -10,6 +10,7 @@ import glob import six import shlex +import re from itertools import chain from subprocess import Popen, PIPE, STDOUT from tempfile import NamedTemporaryFile @@ -31,6 +32,41 @@ SOSCLEANER_LOGGER.setLevel(logging.ERROR) +def _process_content_redaction(filepath, exclude, regex=False): + ''' + Redact content from a file, based on + /etc/insights-client/.exp.sed and and the contents of "exclude" + + filepath file to modify + exclude list of strings to redact + regex whether exclude is a list of regular expressions + + Returns the file contents with the specified data removed + ''' + logger.debug('Processing %s...', filepath) + + # password removal + sedcmd = Popen(['sed', '-rf', constants.default_sed_file, filepath], stdout=PIPE) + # patterns removal + if exclude: + exclude_file = NamedTemporaryFile() + exclude_file.write("\n".join(exclude).encode('utf-8')) + exclude_file.flush() + if regex: + flag = '-E' + else: + flag = '-F' + grepcmd = Popen(['grep', '-v', flag, '-f', exclude_file.name], stdin=sedcmd.stdout, stdout=PIPE) + sedcmd.stdout.close() + stdout, stderr = grepcmd.communicate() + logger.debug('Process status: %s', grepcmd.returncode) + else: + stdout, stderr = sedcmd.communicate() + logger.debug('Process status: %s', sedcmd.returncode) + logger.debug('Process stderr: %s', stderr) + return stdout + + class DataCollector(object): ''' Run commands and collect files @@ -197,16 +233,6 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): if rm_conf is None: rm_conf = {} logger.debug('Beginning to run collection spec...') - exclude = None - if rm_conf: - try: - exclude = rm_conf['patterns'] - # handle the None or empty case of the sub-object - if 'regex' in exclude and not exclude['regex']: - raise LookupError - logger.warn("WARNING: Skipping patterns defined in blacklist configuration") - except LookupError: - logger.debug('Patterns section of blacklist configuration is empty.') for c in conf['commands']: # remember hostname archive path @@ -222,7 +248,7 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): if s['command'] in rm_commands: logger.warn("WARNING: Skipping command %s", s['command']) continue - cmd_spec = InsightsCommand(self.config, s, exclude, self.mountpoint) + cmd_spec = InsightsCommand(self.config, s, self.mountpoint) self.archive.add_to_archive(cmd_spec) for f in conf['files']: rm_files = rm_conf.get('files', []) @@ -235,7 +261,7 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): if s['file'] in rm_conf.get('files', []): logger.warn("WARNING: Skipping file %s", s['file']) else: - file_spec = InsightsFile(s, exclude, self.mountpoint) + file_spec = InsightsFile(s, self.mountpoint) self.archive.add_to_archive(file_spec) if 'globs' in conf: for g in conf['globs']: @@ -244,10 +270,12 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): if g['file'] in rm_conf.get('files', []): logger.warn("WARNING: Skipping file %s", g) else: - glob_spec = InsightsFile(g, exclude, self.mountpoint) + glob_spec = InsightsFile(g, self.mountpoint) self.archive.add_to_archive(glob_spec) logger.debug('Spec collection finished.') + self.redact(rm_conf) + # collect metadata logger.debug('Collecting metadata...') self._write_branch_info(branch_info) @@ -257,6 +285,46 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): self._write_blacklist_report(blacklist_report) logger.debug('Metadata collection finished.') + def redact(self, rm_conf): + ''' + Perform data redaction (password sed command and patterns), + write data to the archive in place + ''' + logger.debug('Running content redaction...') + + if not re.match(r'/var/tmp/.+/insights-.+', self.archive.archive_dir): + # sanity check to make sure we're only modifying + # our own stuff in temp + # we should never get here but just in case + raise RuntimeError('ERROR: invalid Insights archive temp path') + + if rm_conf is None: + rm_conf = {} + exclude = None + regex = False + if rm_conf: + try: + exclude = rm_conf['patterns'] + if isinstance(exclude, dict) and exclude['regex']: + # if "patterns" is a dict containing a non-empty "regex" list + logger.debug('Using regular expression matching for patterns.') + exclude = exclude['regex'] + regex = True + logger.warn("WARNING: Skipping patterns defined in blacklist configuration") + except LookupError: + # either "patterns" was undefined in rm conf, or + # "regex" was undefined in "patterns" + exclude = None + if not exclude: + logger.debug('Patterns section of blacklist configuration is empty.') + + for dirpath, dirnames, filenames in os.walk(self.archive.archive_dir): + for f in filenames: + fullpath = os.path.join(dirpath, f) + redacted_contents = _process_content_redaction(fullpath, exclude, regex) + with open(fullpath, 'wb') as dst: + dst.write(redacted_contents) + def done(self, conf, rm_conf): """ Do finalization stuff diff --git a/insights/client/insights_spec.py b/insights/client/insights_spec.py index da54e1312..1e2de0687 100644 --- a/insights/client/insights_spec.py +++ b/insights/client/insights_spec.py @@ -18,23 +18,8 @@ class InsightsSpec(object): ''' A spec loaded from the uploader.json ''' - def __init__(self, config, spec, exclude): + def __init__(self, config, spec): self.config = config - - # exclusions patterns for this spec - # if exclude is an array of strings, it's old style - # if it's an object or an array of dicts, it's new style - # use regex if it's defined - self.regex = False - self.exclude = None - if exclude and isinstance(exclude, dict): - if 'regex' in exclude and exclude['regex']: - logger.debug('Using regular expression matching in remove.conf.') - self.regex = True - self.exclude = exclude['regex'] - else: - self.exclude = exclude - # pattern for spec collection self.pattern = spec['pattern'] if spec['pattern'] else None @@ -42,8 +27,8 @@ class InsightsCommand(InsightsSpec): ''' A command spec ''' - def __init__(self, config, spec, exclude, mountpoint): - InsightsSpec.__init__(self, config, spec, exclude) + def __init__(self, config, spec, mountpoint): + super(InsightsCommand, self).__init__(config, spec) self.command = spec['command'].replace( '{CONTAINER_MOUNT_POINT}', mountpoint) self.archive_path = mangle.mangle_command(self.command) @@ -86,37 +71,10 @@ def get_output(self): else: raise err - dirty = False - - cmd = "sed -rf " + constants.default_sed_file - sedcmd = Popen(shlex.split(cmd), - stdin=proc0.stdout, - stdout=PIPE) - proc0.stdout.close() - proc0 = sedcmd - - if self.exclude is not None: - exclude_file = NamedTemporaryFile() - exclude_file.write("\n".join(self.exclude).encode('utf-8')) - exclude_file.flush() - if self.regex: - cmd = "grep -E -v -f %s" % exclude_file.name - else: - cmd = "grep -F -v -f %s" % exclude_file.name - proc1 = Popen(shlex.split(cmd), - stdin=proc0.stdout, - stdout=PIPE) - proc0.stdout.close() - stderr = None - if self.pattern is None or len(self.pattern) == 0: - stdout, stderr = proc1.communicate() - - # always log return codes for debug - logger.debug('Proc1 Status: %s', proc1.returncode) - logger.debug('Proc1 stderr: %s', stderr) - proc0 = proc1 + if proc0.returncode == 126 or proc0.returncode == 127: + stdout = "Could not find cmd: %s", self.command - dirty = True + dirty = False if self.pattern is not None and len(self.pattern): pattern_file = NamedTemporaryFile() @@ -139,11 +97,6 @@ def get_output(self): if not dirty: stdout, stderr = proc0.communicate() - # Required hack while we still pass shell=True to Popen; a Popen - # call with shell=False for a non-existant binary will raise OSError. - if proc0.returncode == 126 or proc0.returncode == 127: - stdout = "Could not find cmd: %s", self.command - logger.debug("Proc0 Status: %s", proc0.returncode) logger.debug("Proc0 stderr: %s", stderr) return stdout.decode('utf-8', 'ignore').strip() @@ -153,8 +106,8 @@ class InsightsFile(InsightsSpec): ''' A file spec ''' - def __init__(self, spec, exclude, mountpoint): - InsightsSpec.__init__(self, None, spec, exclude) + def __init__(self, spec, mountpoint): + super(InsightsFile, self).__init__(None, spec) # substitute mountpoint for collection self.real_path = os.path.join(mountpoint, spec['file'].lstrip('/')) self.archive_path = spec['file'] @@ -167,33 +120,11 @@ def get_output(self): logger.debug('File %s does not exist', self.real_path) return - cmd = [] - cmd.append('sed') - cmd.append('-rf') - cmd.append(constants.default_sed_file) - cmd.append(self.real_path) - sedcmd = Popen(cmd, - stdout=PIPE) - - if self.exclude is not None: - exclude_file = NamedTemporaryFile() - exclude_file.write("\n".join(self.exclude).encode('utf-8')) - exclude_file.flush() - - if self.regex: - cmd = "grep -E -v -f %s" % exclude_file.name - else: - cmd = "grep -F -v -f %s" % exclude_file.name - args = shlex.split(cmd) - proc = Popen(args, stdin=sedcmd.stdout, stdout=PIPE) - sedcmd.stdout.close() - stdin = proc.stdout - if self.pattern is None: - output = proc.communicate()[0] - else: - sedcmd = proc + sedcmd = Popen(['sed', '', self.real_path], stdout=PIPE) - if self.pattern is not None: + if self.pattern is None: + output = sedcmd.communicate()[0] + else: pattern_file = NamedTemporaryFile() pattern_file.write("\n".join(self.pattern).encode('utf-8')) pattern_file.flush() @@ -203,12 +134,6 @@ def get_output(self): proc1 = Popen(args, stdin=sedcmd.stdout, stdout=PIPE) sedcmd.stdout.close() - if self.exclude is not None: - stdin.close() - output = proc1.communicate()[0] - if self.pattern is None and self.exclude is None: - output = sedcmd.communicate()[0] - return output.decode('utf-8', 'ignore').strip() diff --git a/insights/tests/client/data_collector/test__process_content_redaction.py b/insights/tests/client/data_collector/test__process_content_redaction.py new file mode 100644 index 000000000..d30f0b20b --- /dev/null +++ b/insights/tests/client/data_collector/test__process_content_redaction.py @@ -0,0 +1,92 @@ +from insights.client.data_collector import _process_content_redaction +from insights.client.constants import InsightsConstants as constants +from mock.mock import patch, Mock, call +from tempfile import NamedTemporaryFile +from subprocess import PIPE + + +test_file_data = 'test\nabcd\n1234\npassword: p4ssw0rd\n' +test_file = NamedTemporaryFile() +test_file.write(test_file_data.encode('utf-8')) +test_file.flush() + + +@patch('insights.client.data_collector.Popen') +@patch('insights.client.data_collector.NamedTemporaryFile') +def test_subproc_calls_egrep(tmpfile, Popen): + ''' + Verify that the sed command to remove passwords is called + + Verify that egrep is called when patterns to exclude are + present and regex == True + ''' + Popen.return_value.communicate = Mock(return_value=('test', None)) + _process_content_redaction(test_file.name, ['test1', 'test2'], True) + tmpfile.assert_called_once() + tmpfile.return_value.write.assert_called_once_with('\n'.join(['test1', 'test2']).encode('utf-8')) + tmpfile.return_value.flush.assert_called_once() + Popen.assert_has_calls([ + call(['sed', '-rf', constants.default_sed_file, test_file.name], stdout=PIPE), + call(['grep', '-v', '-E', '-f', tmpfile.return_value.name], stdin=Popen.return_value.stdout, stdout=PIPE) + ]) + + +@patch('insights.client.data_collector.Popen') +@patch('insights.client.data_collector.NamedTemporaryFile') +def test_subproc_calls_fgrep(tmpfile, Popen): + ''' + Verify that the sed command to remove passwords is called + + Verify that fgrep is called when patterns to exclude are + present and regex == False + ''' + Popen.return_value.communicate = Mock(return_value=('test', None)) + _process_content_redaction(test_file.name, ['test1', 'test2'], False) + tmpfile.assert_called_once() + tmpfile.return_value.write.assert_called_once_with('\n'.join(['test1', 'test2']).encode('utf-8')) + tmpfile.return_value.flush.assert_called_once() + Popen.assert_has_calls([ + call(['sed', '-rf', constants.default_sed_file, test_file.name], stdout=PIPE), + call(['grep', '-v', '-F', '-f', tmpfile.return_value.name], stdin=Popen.return_value.stdout, stdout=PIPE) + ]) + + +@patch('insights.client.data_collector.Popen') +@patch('insights.client.data_collector.NamedTemporaryFile') +def test_nogrep(tmpfile, Popen): + ''' + Verify that grep is not called when no patterns to exclude + are present + ''' + Popen.return_value.communicate = Mock(return_value=('test', None)) + _process_content_redaction(test_file.name, None, False) + tmpfile.assert_not_called() + Popen.assert_called_once_with(['sed', '-rf', constants.default_sed_file, test_file.name], stdout=PIPE) + + +# mock the .exp.sed file for QE pipeline +mock_sed_file = NamedTemporaryFile() +mock_sed_file.write("s/(password[a-zA-Z0-9_]*)(\\s*\\:\\s*\\\"*\\s*|\\s*\\\"*\\s*=\\s*\\\"\\s*|\\s*=+\\s*|\\s*--md5+\\s*|\\s*)([a-zA-Z0-9_!@#$%^&*()+=/-]*)/\\1\\2********/\ns/(password[a-zA-Z0-9_]*)(\\s*\\*+\\s+)(.+)/\\1\\2********/".encode('utf-8')) +mock_sed_file.flush() + + +@patch('insights.client.data_collector.constants.default_sed_file', mock_sed_file.name) +def test_returnvalue(): + ''' + Verify that the returned data is what we expect to see + ''' + # no exclude + retval = _process_content_redaction(test_file.name, [], False) + assert retval == 'test\nabcd\n1234\npassword: ********\n'.encode('utf-8') + + # no exclude also works with None + retval = _process_content_redaction(test_file.name, None, False) + assert retval == 'test\nabcd\n1234\npassword: ********\n'.encode('utf-8') + + # exclude plainstrings + retval = _process_content_redaction(test_file.name, ['test', 'abc'], False) + assert retval == '1234\npassword: ********\n'.encode('utf-8') + + # exclude regex + retval = _process_content_redaction(test_file.name, ['[[:digit:]]+', 'a*(b|c)'], True) + assert retval == 'test\npassword: ********\n'.encode('utf-8') diff --git a/insights/tests/client/data_collector/test_redact.py b/insights/tests/client/data_collector/test_redact.py new file mode 100644 index 000000000..37b817c31 --- /dev/null +++ b/insights/tests/client/data_collector/test_redact.py @@ -0,0 +1,210 @@ +from insights.client.config import InsightsConfig +from insights.client.archive import InsightsArchive +from insights.client.data_collector import DataCollector +from mock.mock import patch +import pytest +import os +import six + +test_file_data = 'test\nabcd\n1234\npassword: p4ssw0rd\n' + + +@patch('insights.client.data_collector.os.walk') +# @patch('insights.client.data_collector._process_content_redaction') +def test_redact_call_walk(walk): + ''' + Verify that redact() calls os.walk and when an + an archive structure is present in /var/tmp/**/insights-* + ''' + conf = InsightsConfig() + arch = InsightsArchive(conf) + # TODO: uncomment this once dual collector logic is merged. + # archive dir must be created explicitly + # arch.create_archive_dir() + + dc = DataCollector(conf, arch) + rm_conf = {} + + dc.redact(rm_conf) + walk.assert_called_once_with(arch.archive_dir) + + +@patch('insights.client.data_collector._process_content_redaction') +def test_redact_call_process_redaction(_process_content_redaction): + ''' + Verify that redact() calls _process_content_redaction + then writes the returned data back to the same file + + Also verifies that the "exclude" parameter is None and the + "regex" parameter is False in the _process_content_redaction + call when rm_conf is empty + ''' + conf = InsightsConfig() + arch = InsightsArchive(conf) + # TODO: uncomment this once dual collector logic is merged. + # archive dir must be created explicitly + # arch.create_archive_dir() + + # put something in the archive to redact + test_file = os.path.join(arch.archive_dir, 'test.file') + with open(test_file, 'w') as t: + t.write(test_file_data) + + dc = DataCollector(conf, arch) + rm_conf = {} + + if six.PY3: + open_name = 'builtins.open' + else: + open_name = '__builtin__.open' + + with patch(open_name, create=True) as mock_open: + dc.redact(rm_conf) + _process_content_redaction.assert_called_once_with(test_file, None, False) + mock_open.assert_called_once_with(test_file, 'wb') + mock_open.return_value.__enter__.return_value.write.assert_called_once_with(_process_content_redaction.return_value) + + +@patch('insights.client.data_collector._process_content_redaction') +def test_redact_exclude_regex(_process_content_redaction): + ''' + Verify that the _process_content_redaction call is made with + exclude == list of strings and regex == True when a list of + regex strings is defined in rm_conf + ''' + conf = InsightsConfig() + arch = InsightsArchive(conf) + # TODO: uncomment this once dual collector logic is merged. + # archive dir must be created explicitly + # arch.create_archive_dir() + + # put something in the archive to redact + test_file = os.path.join(arch.archive_dir, 'test.file') + with open(test_file, 'w') as t: + t.write(test_file_data) + + dc = DataCollector(conf, arch) + rm_conf = {'patterns': {'regex': ['12.*4', '^abcd']}} + + if six.PY3: + open_name = 'builtins.open' + else: + open_name = '__builtin__.open' + + with patch(open_name, create=True): + dc.redact(rm_conf) + _process_content_redaction.assert_called_once_with(test_file, ['12.*4', '^abcd'], True) + + +@patch('insights.client.data_collector._process_content_redaction') +def test_redact_exclude_no_regex(_process_content_redaction): + ''' + Verify that the _process_content_redaction call is made with + exclude == list of strings and regex == False when a list + of pattern strings is defined in rm_conf + ''' + conf = InsightsConfig() + arch = InsightsArchive(conf) + # TODO: uncomment this once dual collector logic is merged. + # archive dir must be created explicitly + # arch.create_archive_dir() + + # put something in the archive to redact + test_file = os.path.join(arch.archive_dir, 'test.file') + with open(test_file, 'w') as t: + t.write(test_file_data) + + dc = DataCollector(conf, arch) + rm_conf = {'patterns': ['1234', 'abcd']} + + if six.PY3: + open_name = 'builtins.open' + else: + open_name = '__builtin__.open' + + with patch(open_name, create=True): + dc.redact(rm_conf) + _process_content_redaction.assert_called_once_with(test_file, ['1234', 'abcd'], False) + + +@patch('insights.client.data_collector._process_content_redaction') +def test_redact_exclude_empty(_process_content_redaction): + ''' + Verify that the _process_content_redaction call is made with + exclude == [] and regex == False when the patterns key is + defined but value is an empty list + ''' + conf = InsightsConfig() + arch = InsightsArchive(conf) + # TODO: uncomment this once dual collector logic is merged. + # archive dir must be created explicitly + # arch.create_archive_dir() + + # put something in the archive to redact + test_file = os.path.join(arch.archive_dir, 'test.file') + with open(test_file, 'w') as t: + t.write(test_file_data) + + dc = DataCollector(conf, arch) + rm_conf = {'patterns': []} + + if six.PY3: + open_name = 'builtins.open' + else: + open_name = '__builtin__.open' + + with patch(open_name, create=True): + dc.redact(rm_conf) + _process_content_redaction.assert_called_once_with(test_file, [], False) + + +@patch('insights.client.data_collector._process_content_redaction') +def test_redact_exclude_none(_process_content_redaction): + ''' + Verify that the _process_content_redaction call is made with + exclude == None and regex == False when the patterns key is + defined but value is an empty dict + ''' + conf = InsightsConfig() + arch = InsightsArchive(conf) + # TODO: uncomment this once dual collector logic is merged. + # archive dir must be created explicitly + # arch.create_archive_dir() + + # put something in the archive to redact + test_file = os.path.join(arch.archive_dir, 'test.file') + with open(test_file, 'w') as t: + t.write(test_file_data) + + dc = DataCollector(conf, arch) + rm_conf = {'patterns': {}} + + if six.PY3: + open_name = 'builtins.open' + else: + open_name = '__builtin__.open' + + with patch(open_name, create=True): + dc.redact(rm_conf) + _process_content_redaction.assert_called_once_with(test_file, None, False) + + +@patch('insights.client.data_collector.os.walk') +@patch('insights.client.data_collector._process_content_redaction') +def test_redact_bad_location(_process_content_redaction, walk): + ''' + Verify that redact() raises a RuntimeError + if the directory present in InsightsArchive is + in a location other than /var/tmp/**/insights-* + ''' + conf = InsightsConfig() + arch = InsightsArchive(conf) + + for bad_path in ['/', '/home', '/etc', '/var/log/', '/home/test', '/var/tmp/f22D1d/ins2ghts']: + arch.archive_dir = bad_path + dc = DataCollector(conf, arch) + rm_conf = {} + with pytest.raises(RuntimeError): + dc.redact(rm_conf) + walk.assert_not_called() + _process_content_redaction.assert_not_called() diff --git a/insights/tests/client/test_insights_spec.py b/insights/tests/client/test_insights_spec.py deleted file mode 100644 index 9059406f3..000000000 --- a/insights/tests/client/test_insights_spec.py +++ /dev/null @@ -1,51 +0,0 @@ -from insights.client.insights_spec import InsightsFile -from insights.client.insights_spec import InsightsSpec -from mock.mock import patch, MagicMock, ANY -import mock - - -def test_string_pattern_init(): - ''' - Assert spec is loaded in string mode when a list of strings is present - in the "patterns" section - (legacy remove conf + new style w/ list only) - ''' - spec = InsightsSpec(MagicMock(), {'command': '', 'pattern': [], 'symbolic_name': ''}, ['test']) - assert not spec.regex - - -def test_regex_pattern_init(): - ''' - Assert spec is loaded in regex mode when a dict is present with the "wegex" - key with a list of strings as its value in the "patterns" section - ''' - spec = InsightsSpec(MagicMock(), {'command': '', 'pattern': [], 'symbolic_name': ''}, {'regex': ['test']}) - assert spec.regex - - -@patch('insights.client.insights_spec.Popen') -@patch('insights.client.insights_spec.os.path.isfile', return_value=True) -def test_string_pattern_called(isfile, Popen): - ''' - ''' - process_mock = mock.Mock() - attrs = {'communicate.return_value': (b'output', b'error')} - process_mock.configure_mock(**attrs) - Popen.return_value = process_mock - fs = InsightsFile({'file': '', 'pattern': [], 'symbolic_name': ''}, ['test'], '/') - fs.get_output() - Popen.assert_any_call(['grep', '-F', '-v', '-f', ANY], stdin=ANY, stdout=ANY) - - -@patch('insights.client.insights_spec.Popen') -@patch('insights.client.insights_spec.os.path.isfile', return_value=True) -def test_regex_pattern_called(isfile, Popen): - ''' - ''' - process_mock = mock.Mock() - attrs = {'communicate.return_value': (b'output', b'error')} - process_mock.configure_mock(**attrs) - Popen.return_value = process_mock - fs = InsightsFile({'file': '', 'pattern': [], 'symbolic_name': ''}, {'regex': ['test']}, '/') - fs.get_output() - Popen.assert_any_call(['grep', '-E', '-v', '-f', ANY], stdin=ANY, stdout=ANY) diff --git a/insights/tests/client/test_skip_commands_files.py b/insights/tests/client/test_skip_commands_files.py index 1b8e9a499..d895f68c1 100644 --- a/insights/tests/client/test_skip_commands_files.py +++ b/insights/tests/client/test_skip_commands_files.py @@ -60,7 +60,8 @@ def test_omit_symbolic_name(InsightsCommand, InsightsFile, parse_file_spec): @patch("insights.client.data_collector.InsightsCommand") @patch("insights.client.data_collector.InsightsFile") @patch("insights.client.data_collector.InsightsArchive") -def test_symbolic_name_bc(InsightsArchive, InsightsFile, InsightsCommand): +@patch("insights.client.data_collector.DataCollector.redact") +def test_symbolic_name_bc(_, InsightsArchive, InsightsFile, InsightsCommand): """ WICKED EDGE CASE: in case uploader.json is old and doesn't have symbolic names, don't crash """ From 9842a2cf9cf5631c55122cc68081d0631bde8a52 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 9 Jul 2020 15:23:34 -0400 Subject: [PATCH 100/892] use regex because cloud.stage messes everything up (#2645) Signed-off-by: Jeremy Crafts Co-authored-by: Stephen --- insights/client/auto_config.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/insights/client/auto_config.py b/insights/client/auto_config.py index 19e740124..337b37622 100644 --- a/insights/client/auto_config.py +++ b/insights/client/auto_config.py @@ -5,6 +5,7 @@ import logging import os import requests +import re try: from urlparse import urlparse @@ -238,6 +239,6 @@ def try_auto_configuration(config): if config.auto_config and not config.offline: if not _try_satellite6_configuration(config): _try_satellite5_configuration(config) - if not config.legacy_upload and 'cloud.redhat.com' not in config.base_url: + if not config.legacy_upload and not re.match(r'(\w+\.)?cloud\.(\w+\.)?redhat\.com', config.base_url): config.base_url = config.base_url + '/platform' logger.debug('Updated base_url: %s', config.base_url) From 80a1ec7bcb14e12c9700875290224f91ae289126 Mon Sep 17 00:00:00 2001 From: Stephen Date: Thu, 9 Jul 2020 15:37:25 -0400 Subject: [PATCH 101/892] Introduce egg router (#2634) * Add the module-router for client pre-release support We have a router service that allows for customers to register to use the pre-release version of the egg to try new features of the client Signed-off-by: Stephen Adams * Add logging to egg module router call Signed-off-by: Stephen Adams * Add schema check to egg_url fetch Signed-off-by: Stephen Adams * Update egg fetch to use connection object base_url Signed-off-by: Stephen Adams * Properly create url for legacy upload egg url fetch Signed-off-by: Stephen Adams * Fix bug in legacy_upload check for egg_url Signed-off-by: Stephen Adams --- insights/client/__init__.py | 22 ++++++++++++++++++++-- insights/client/constants.py | 1 + 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/insights/client/__init__.py b/insights/client/__init__.py index 3027eeb3c..d9f8b88f3 100644 --- a/insights/client/__init__.py +++ b/insights/client/__init__.py @@ -95,6 +95,23 @@ def branch_info(self): """ return client.get_branch_info(self.config, self.connection) + @_net + def get_egg_url(self): + """ + Get the proper url based on the configured egg release branch + """ + if self.config.legacy_upload: + url = self.connection.base_url + '/platform' + constants.module_router_path + else: + url = self.connection.base_url + constants.module_router_path + logger.log(NETWORK, "GET %s", url) + response = self.session.get(url, timeout=self.config.http_timeout) + if response.status_code == 200: + return response.json()["url"] + else: + logger.warning("Unable to fetch egg url. Defaulting to /release") + return '/release' + def fetch(self, force=False): """ returns (dict): {'core': path to new egg, None if no update, @@ -109,16 +126,17 @@ def fetch(self, force=False): logger.debug("Beginning core fetch.") # guess the URLs based on what legacy setting is + egg_release = self.get_egg_url() egg_url = self.config.egg_path egg_gpg_url = self.config.egg_gpg_path if egg_url is None: - egg_url = '/v1/static/core/insights-core.egg' + egg_url = '/v1/static{0}/insights-core.egg'.format(egg_release) # if self.config.legacy_upload: # egg_url = '/v1/static/core/insights-core.egg' # else: # egg_url = '/static/insights-core.egg' if egg_gpg_url is None: - egg_gpg_url = '/v1/static/core/insights-core.egg.asc' + egg_gpg_url = '/v1/static{0}/insights-core.egg.asc'.format(egg_release) # if self.config.legacy_upload: # egg_gpg_url = '/v1/static/core/insights-core.egg.asc' # else: diff --git a/insights/client/constants.py b/insights/client/constants.py index a39a14acd..f79bdf38a 100644 --- a/insights/client/constants.py +++ b/insights/client/constants.py @@ -40,6 +40,7 @@ class InsightsConstants(object): insights_core_last_stable_gpg_sig = os.path.join(insights_core_lib_dir, 'last_stable.egg.asc') insights_core_newest = os.path.join(insights_core_lib_dir, 'newest.egg') insights_core_gpg_sig_newest = os.path.join(insights_core_lib_dir, 'newest.egg.asc') + module_router_path = "/module-update-router/v1/channel?module=insights-core" sig_kill_ok = 100 sig_kill_bad = 101 cached_branch_info = os.path.join(default_conf_dir, '.branch_info') From 9200976efd84a6522432e021ad0f9b52e633db78 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 9 Jul 2020 15:55:46 -0400 Subject: [PATCH 102/892] restore help messages for certain options (#2658) Signed-off-by: Jeremy Crafts --- insights/client/config.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/insights/client/config.py b/insights/client/config.py index 09dfd143e..17806aeb6 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -376,23 +376,21 @@ 'payload': { 'default': None, 'opt': ['--payload'], - # 'help': 'Use Insights client to upload an archive', - 'help': argparse.SUPPRESS, + 'help': 'Use the Insights Client to upload an archive', 'action': 'store', 'group': 'platform' }, 'content_type': { 'default': None, 'opt': ['--content-type'], - # 'help': 'Content type of the archive specified with --payload', - 'help': argparse.SUPPRESS, + 'help': 'Content type of the archive specified with --payload', 'action': 'store', 'group': 'platform' }, 'diagnosis': { 'default': None, 'opt': ['--diagnosis'], - 'help': argparse.SUPPRESS, + 'help': 'Retrieve a diagnosis for this system', 'const': True, 'nargs': '?', 'group': 'platform' From 96568af7a1e07e77bf9cddad2b04df8eeacf5d4d Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Fri, 10 Jul 2020 16:20:55 +0800 Subject: [PATCH 103/892] New parser for ls_tmp (#2659) * New parser for ls_tmp Signed-off-by: Xiangce Liu * Forget the filterable=True Signed-off-by: Xiangce Liu * Add docstring for filterable Signed-off-by: Xiangce Liu --- docs/shared_parsers_catalog/ls_tmp.rst | 3 ++ insights/parsers/ls_tmp.py | 58 ++++++++++++++++++++++++++ insights/parsers/tests/test_ls_tmp.py | 48 +++++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 6 files changed, 112 insertions(+) create mode 100644 docs/shared_parsers_catalog/ls_tmp.rst create mode 100644 insights/parsers/ls_tmp.py create mode 100644 insights/parsers/tests/test_ls_tmp.py diff --git a/docs/shared_parsers_catalog/ls_tmp.rst b/docs/shared_parsers_catalog/ls_tmp.rst new file mode 100644 index 000000000..8f597e2a8 --- /dev/null +++ b/docs/shared_parsers_catalog/ls_tmp.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ls_tmp + :members: + :show-inheritance: diff --git a/insights/parsers/ls_tmp.py b/insights/parsers/ls_tmp.py new file mode 100644 index 000000000..5ba3bfd91 --- /dev/null +++ b/insights/parsers/ls_tmp.py @@ -0,0 +1,58 @@ +""" +LsTmp - Command ``ls -la /tmp`` +=============================== + +The ``ls -la /tmp`` command provides information for the listing of the +``/tmp`` directory. + +""" +from insights import parser, FileListing, CommandParser +from insights.specs import Specs + + +@parser(Specs.ls_tmp) +class LsTmp(CommandParser, FileListing): + """ + Parses output of ``ls -lan /tmp`` command. See ``FileListing`` class for + additional information. + + To prevent the output content from being too large, filters should be + applied. Sample output of ``ls -lan /tmp`` after filtered:: + + drwxrwxr-x. 2 whuser whuser 216 Jul 9 07:09 aws_sos + -rw-r--r--. 1 rauser rauser 1123 Jul 10 00:00 clean-old-archive.log + -rw-rw-r--. 1 whuser whuser 9620 Jul 9 07:09 daily-extraction-warehouse-run.log + -rw-rw-r--. 1 whuser whuser 11214 Jul 9 07:09 dask_master.log + -rw-rw-r--. 1 whuser whuser 27091 Jul 9 07:09 dask_worker.log + -rw-r--r--. 1 user10 user10 29 Jul 10 00:00 date.out + -rw-r--r--. 1 rauser rauser 325933528 Jul 10 00:18 delete-bad-pods.log + -rw-rw-r--. 1 whuser whuser 0 Jul 9 07:08 extraction_driver.log + drwxrwxrwt. 2 root root 6 Mar 27 2017 .font-unix + drwxrwxr-x. 3 user1 user1 17 Oct 28 2019 hadoop-user1 + drwxr-xr-x. 2 user1 user1 32 Jul 4 18:29 hsperfdata_user1 + drwxr-xr-x. 2 rauser rauser 6 Jul 10 00:00 hsperfdata_rauser + drwxr-xr-x. 2 root root 6 Jul 1 14:53 hsperfdata_root + drwxrwxrwt. 2 root root 6 Mar 27 2017 .ICE-unix + srw-rw----. 1 user10 user10 0 Jan 6 2020 lh_pair + srw-rw----. 1 user10 user10 0 Jan 28 11:24 lh_pair_ex + + + Examples: + >>> type(ls_tmp) + + >>> len(ls_tmp.files_of("/tmp")) + 9 + >>> len(ls_tmp.dirs_of("/tmp")) + 7 + >>> "/tmp" in ls_tmp + True + >>> "aws_sos" in ls_tmp.dirs_of("/tmp") + True + >>> "aws_sos" not in ls_tmp.files_of("/tmp") + True + >>> "date.out" not in ls_tmp.dirs_of("/tmp") + True + >>> "date.out" in ls_tmp.files_of("/tmp") + True + """ + pass diff --git a/insights/parsers/tests/test_ls_tmp.py b/insights/parsers/tests/test_ls_tmp.py new file mode 100644 index 000000000..7cd55d9bc --- /dev/null +++ b/insights/parsers/tests/test_ls_tmp.py @@ -0,0 +1,48 @@ +import doctest +from insights.parsers import ls_tmp +from insights.parsers.ls_tmp import LsTmp +from insights.tests import context_wrap + +LS_TMP = """ +drwxrwxr-x. 2 whuser whuser 216 Jul 9 07:09 aws_sos +-rw-r--r--. 1 rauser rauser 1123 Jul 10 00:00 clean-old-archive.log +-rw-rw-r--. 1 whuser whuser 9620 Jul 9 07:09 daily-extraction-warehouse-run.log +-rw-rw-r--. 1 whuser whuser 11214 Jul 9 07:09 dask_master.log +-rw-rw-r--. 1 whuser whuser 27091 Jul 9 07:09 dask_worker.log +-rw-r--r--. 1 user10 user10 29 Jul 10 00:00 date.out +-rw-r--r--. 1 rauser rauser 325933528 Jul 10 00:18 delete-bad-pods.log +-rw-rw-r--. 1 whuser whuser 0 Jul 9 07:08 extraction_driver.log +drwxrwxrwt. 2 root root 6 Mar 27 2017 .font-unix +drwxrwxr-x. 3 user1 user1 17 Oct 28 2019 hadoop-user1 +drwxr-xr-x. 2 user1 user1 32 Jul 4 18:29 hsperfdata_user1 +drwxr-xr-x. 2 rauser rauser 6 Jul 10 00:00 hsperfdata_rauser +drwxr-xr-x. 2 root root 6 Jul 1 14:53 hsperfdata_root +drwxrwxrwt. 2 root root 6 Mar 27 2017 .ICE-unix +srw-rw----. 1 user10 user10 0 Jan 6 2020 lh_pair +srw-rw----. 1 user10 user10 0 Jan 28 11:24 lh_pair_ex +""" + +path = 'insights_commands/ls_-la_.tmp' + + +def test_ls_tmp(): + ls_tmp = LsTmp(context_wrap(LS_TMP, path=path)) + assert len(ls_tmp.listing_of("/tmp")) == 16 + assert len(ls_tmp.dirs_of("/tmp")) == 7 + assert len(ls_tmp.files_of("/tmp")) == 9 + expected = sorted( + [ + 'clean-old-archive.log', 'daily-extraction-warehouse-run.log', + 'dask_master.log', 'dask_worker.log', 'date.out', + 'delete-bad-pods.log', 'extraction_driver.log', 'lh_pair', + 'lh_pair_ex' + ] + ) + actual = ls_tmp.listings.get("/tmp")['files'] + assert actual == expected + + +def test_doc_examples(): + env = {'ls_tmp': LsTmp(context_wrap(LS_TMP, path=path))} + failed, total = doctest.testmod(ls_tmp, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index cbff8d873..15fd9aad6 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -307,6 +307,7 @@ class Specs(SpecSet): ls_var_spool_postfix_maildrop = RegistryPoint() ls_var_tmp = RegistryPoint(filterable=True) ls_var_www = RegistryPoint() + ls_tmp = RegistryPoint(filterable=True) lsblk = RegistryPoint() lsblk_pairs = RegistryPoint() lscpu = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index e091606b2..91186e562 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -556,6 +556,7 @@ def semid(broker): ls_var_tmp = simple_command("/bin/ls -ln /var/tmp") ls_var_run = simple_command("/bin/ls -lnL /var/run") ls_var_www = simple_command("/bin/ls -la /dev/null /var/www") # https://github.com/RedHatInsights/insights-core/issues/827 + ls_tmp = simple_command("/bin/ls -la /tmp") lsblk = simple_command("/bin/lsblk") lsblk_pairs = simple_command("/bin/lsblk -P -o NAME,KNAME,MAJ:MIN,FSTYPE,MOUNTPOINT,LABEL,UUID,RA,RO,RM,MODEL,SIZE,STATE,OWNER,GROUP,MODE,ALIGNMENT,MIN-IO,OPT-IO,PHY-SEC,LOG-SEC,ROTA,SCHED,RQ-SIZE,TYPE,DISC-ALN,DISC-GRAN,DISC-MAX,DISC-ZERO") lscpu = simple_command("/usr/bin/lscpu") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 64e25782d..f6c4bfa2c 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -144,6 +144,7 @@ class InsightsArchiveSpecs(Specs): ls_var_tmp = simple_file("insights_commands/ls_-ln_.var.tmp") ls_var_run = simple_file("insights_commands/ls_-lnL_.var.run") ls_var_www = simple_file("insights_commands/ls_-la_.dev.null_.var.www") + ls_tmp = simple_file("insights_commands/ls_-la_.tmp") lsblk = simple_file("insights_commands/lsblk") lsblk_pairs = simple_file("insights_commands/lsblk_-P_-o_NAME_KNAME_MAJ_MIN_FSTYPE_MOUNTPOINT_LABEL_UUID_RA_RO_RM_MODEL_SIZE_STATE_OWNER_GROUP_MODE_ALIGNMENT_MIN-IO_OPT-IO_PHY-SEC_LOG-SEC_ROTA_SCHED_RQ-SIZE_TYPE_DISC-ALN_DISC-GRAN_DISC-MAX_DISC-ZERO") lscpu = simple_file("insights_commands/lscpu") From 65e8d22e7c78500b9e262f24948cacb1167db36d Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Mon, 13 Jul 2020 11:06:17 +0800 Subject: [PATCH 104/892] Add vendor attribute to InstalledRpm (#2660) Signed-off-by: Xiangce Liu --- insights/parsers/installed_rpms.py | 3 +++ insights/parsers/tests/test_installed_rpms.py | 10 ++++++++++ 2 files changed, 13 insertions(+) diff --git a/insights/parsers/installed_rpms.py b/insights/parsers/installed_rpms.py index cd7010026..fbdfa7546 100644 --- a/insights/parsers/installed_rpms.py +++ b/insights/parsers/installed_rpms.py @@ -363,6 +363,8 @@ def __init__(self, data): self.redhat_signed = None """bool: True when RPM package is signed by Red Hat, False when RPM package is not signed by Red Hat, None when no sufficient info to determine""" + self.vendor = None + """str: RPM package vendor.""" if isinstance(data, six.string_types): data = self._parse_package(data) @@ -370,6 +372,7 @@ def __init__(self, data): for k, v in data.items(): setattr(self, k, v) self.epoch = data['epoch'] if 'epoch' in data and data['epoch'] != '(none)' else '0' + self.vendor = data['vendor'] if 'vendor' in data and data['vendor'] != '(none)' else None _gpg_key_pos = data.get('sigpgp', data.get('rsaheader', data.get('pgpsig_short', data.get('pgpsig', data.get('vendor', ''))))) if _gpg_key_pos: self.redhat_signed = any(key in _gpg_key_pos for key in self.PRODUCT_SIGNING_KEYS) diff --git a/insights/parsers/tests/test_installed_rpms.py b/insights/parsers/tests/test_installed_rpms.py index fb6643c11..5cbe74972 100644 --- a/insights/parsers/tests/test_installed_rpms.py +++ b/insights/parsers/tests/test_installed_rpms.py @@ -61,6 +61,9 @@ {"name": "ca-certificates","version": "2015.2.6","epoch": "(none)","release": "70.1.el7_2","arch": "noarch","installtime": "Fri 24 Jun 2016 04:18:04 PM EDT","buildtime": "1453976868","rsaheader": "RSA/SHA256, Tue 02 Feb 2016 09:45:04 AM EST, Key ID 199e2f91fd431d51","dsaheader": "(none)","srpm": "ca-certificates-2015.2.6-70.1.el7_2.src.rpm"} {"name": "jline","version": "1.0","epoch": "(none)","release": "8.el7","arch": "noarch","installtime": "Thu 02 Jun 2016 05:10:32 PM EDT","buildtime": "1388212830","rsaheader": "RSA/SHA256, Tue 01 Apr 2014 02:54:16 PM EDT, Key ID 199e2f91fd431d51","dsaheader": "(none)","srpm": "jline-1.0-8.el7.src.rpm"} {"name": "libteam","version": "1.17","epoch": "(none)","release": "6.el7_2","arch": "x86_64","installtime": "Fri 24 Jun 2016 04:18:17 PM EDT","buildtime": "1454604485","rsaheader": "RSA/SHA256, Wed 17 Feb 2016 02:25:16 AM EST, Key ID 199e2f91fd431d51","dsaheader": "(none)","srpm": "libteam-1.17-6.el7_2.src.rpm"} +{"name": "crash","epoch":"(none)","version":"7.1.0","release":"8.el6","arch":"x86_64","installtime":"Fri Jul 13 06:53:28 2018","buildtime":"1524061059","vendor":"Red Hat, Inc.","buildhost":"x86-032.build.eng.bos.redhat.com","sigpgp":"RSA/8, Wed Apr 18 10:40:59 2018, Key ID 199e2f91fd431d51"} +{"name": "xorg-x11-drv-vmmouse","epoch":"(none)","version":"13.1.0","release":"1.el6","arch":"x86_64","installtime":"Thu Aug 4 12:23:32 2016","buildtime":"1447274489","vendor":"Red Hat, Inc.","buildhost":"x86-028.build.eng.bos.redhat.com","sigpgp":"RSA/8, Mon Apr 4 11:35:36 2016, Key ID 199e2f91fd431d51"} +{"name": "libnl","epoch":"(none)","version":"1.1.4","release":"2.el6","arch":"x86_64","installtime":"Mon Jun 16 13:21:21 2014","buildtime":"1378459378","vendor":"Red Hat, Inc.","buildhost":"x86-007.build.bos.redhat.com","sigpgp":"RSA/8, Mon Sep 23 07:25:47 2013, Key ID 199e2f91fd431d51"} '''.strip() RPMS_MULTIPLE = ''' @@ -178,6 +181,10 @@ def test_from_line(): assert rpms.get_max("yum").redhat_signed assert rpms.corrupt is False + assert rpms.newest('BESAgent').vendor == 'IBM Corp.' + assert rpms.newest('yum').vendor == 'Red Hat, Inc.' + assert rpms.newest('kernel').vendor is None + def test_from_json(): rpms = InstalledRpms(context_wrap(RPMS_JSON)) @@ -187,6 +194,9 @@ def test_from_json(): assert rpms.get_max("util-linux").epoch == '0' assert rpms.get_max("jboss-servlet-3.0-api").redhat_signed + assert rpms.newest('libnl').vendor == 'Red Hat, Inc.' + assert rpms.newest('log4j').vendor is None + def test_garbage(): rpms = InstalledRpms(context_wrap(RPMS_PACKAGE_WITH_GARBAGE)) From 0fbd7ab3d957ca20f9d856e77522e373a8cfe3c9 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Tue, 14 Jul 2020 09:45:42 +0800 Subject: [PATCH 105/892] Add spec for sosreport (#2661) * Add spec for sosreport Signed-off-by: Huanhuan Li * Change simple_file to glob_file Signed-off-by: Huanhuan Li --- insights/specs/sos_archive.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index 38f81f6a1..8f4e92342 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -33,6 +33,7 @@ class SosSpecs(Specs): ) ] ) + corosync_cmapctl = glob_file("sos_commands/corosync/corosync-cmapctl*") cpupower_frequency_info = simple_file("sos_commands/processor/cpupower_frequency-info") date = first_of([simple_file("sos_commands/general/date"), simple_file("sos_commands/date/date")]) df__al = first_file(["sos_commands/filesys/df_-al", "sos_commands/filesys/df_-al_-x_autofs"]) From 5b6cd877fd546ce9f0f9c5cba703259067e265f6 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Wed, 15 Jul 2020 00:21:15 -0400 Subject: [PATCH 106/892] load filters if available (#2647) * load filters if available Signed-off-by: Jeremy Crafts * fall back to hostname if hostname -f fails Signed-off-by: Jeremy Crafts * catch any IOError when loading Signed-off-by: Jeremy Crafts --- insights/collect.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/insights/collect.py b/insights/collect.py index 38ddaf7b3..3d430eb0c 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -18,10 +18,10 @@ from datetime import datetime from insights import apply_configs, apply_default_enabled, dr -from insights.core import blacklist +from insights.core import blacklist, filters from insights.core.serde import Hydration from insights.util import fs -from insights.util.subproc import call +from insights.util.subproc import call, CalledProcessError SAFE_ENV = { "PATH": os.path.pathsep.join([ @@ -246,7 +246,20 @@ def collect(manifest=default_manifest, tmp_path=None, compress=False): to_persist = get_to_persist(client.get("persist", set())) - hostname = call("hostname -f", env=SAFE_ENV).strip() + try: + filters.load() + except IOError as e: + # could not load filters file + log.debug("No filters available: %s", str(e)) + except AttributeError as e: + # problem parsing the filters + log.debug("Could not parse filters: %s", str(e)) + + try: + hostname = call("hostname -f", env=SAFE_ENV).strip() + except CalledProcessError: + # problem calling hostname -f + hostname = call("hostname", env=SAFE_ENV).strip() suffix = datetime.utcnow().strftime("%Y%m%d%H%M%S") relative_path = "insights-%s-%s" % (hostname, suffix) tmp_path = tmp_path or tempfile.gettempdir() From aa408c923b642f695c329970801f70012443f1c3 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Wed, 15 Jul 2020 09:53:45 -0400 Subject: [PATCH 107/892] load client denylist in core collector (#2654) * load remove.conf in collect.py, disable parallel Signed-off-by: Jeremy Crafts * log when commands/files are skipped Signed-off-by: Jeremy Crafts --- insights/collect.py | 18 +++++++++++++++--- insights/core/spec_factory.py | 2 ++ 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/insights/collect.py b/insights/collect.py index 3d430eb0c..a088367f2 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -66,7 +66,7 @@ enabled: true run_strategy: - name: parallel + name: serial args: max_workers: null @@ -214,7 +214,7 @@ def get_pool(parallel, kwargs): yield None -def collect(manifest=default_manifest, tmp_path=None, compress=False): +def collect(manifest=default_manifest, tmp_path=None, compress=False, rm_conf=None): """ This is the collection entry point. It accepts a manifest, a temporary directory in which to store output, and a boolean for optional compression. @@ -228,7 +228,9 @@ def collect(manifest=default_manifest, tmp_path=None, compress=False): compress (boolean): True to create a tar.gz and remove the original workspace containing output. False to leave the workspace without creating a tar.gz - + rm_conf (dict): Client-provided python dict containing keys + "commands", "files", and "keywords", to be injected + into the manifest blacklist. Returns: The full path to the created tar.gz or workspace. """ @@ -244,6 +246,16 @@ def collect(manifest=default_manifest, tmp_path=None, compress=False): apply_blacklist(client.get("blacklist", {})) + # insights-client + rm_conf = rm_conf or {} + apply_blacklist(rm_conf) + for component in rm_conf.get('components', []): + if not dr.get_component_by_name(component): + log.warning('WARNING: Unknown component in blacklist: %s' % component) + else: + dr.set_enabled(component, enabled=False) + log.warning('WARNING: Skipping component: %s', component) + to_persist = get_to_persist(client.get("persist", set())) try: diff --git a/insights/core/spec_factory.py b/insights/core/spec_factory.py index f045c75e6..ee2ebf45f 100644 --- a/insights/core/spec_factory.py +++ b/insights/core/spec_factory.py @@ -175,6 +175,7 @@ def __init__(self, relative_path, root="/", ds=None, ctx=None): def validate(self): if not blacklist.allow_file("/" + self.relative_path): + log.warning("WARNING: Skipping file %s", "/" + self.relative_path) raise dr.SkipComponent() if not os.path.exists(self.path): @@ -313,6 +314,7 @@ def __init__(self, cmd, ctx, args=None, split=True, keep_rc=False, ds=None, time def validate(self): if not blacklist.allow_command(self.cmd): + log.warning("WARNING: Skipping command %s", self.cmd) raise dr.SkipComponent() if not which(shlex.split(self.cmd)[0], env=self.create_env()): From b4dd37cf1af2dba57aa2a607bb002ee9097a238c Mon Sep 17 00:00:00 2001 From: Sachin Date: Thu, 16 Jul 2020 02:32:35 +0530 Subject: [PATCH 108/892] Add parser for command: engine-db-query --statement "QUERY" --json (#2643) Signed-off-by: Sachin Patil --- .../engine_db_query.rst | 3 + insights/parsers/engine_db_query.py | 50 +++++++++++++ .../parsers/tests/test_engine_db_query.py | 72 +++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 6 files changed, 128 insertions(+) create mode 100644 docs/shared_parsers_catalog/engine_db_query.rst create mode 100644 insights/parsers/engine_db_query.py create mode 100644 insights/parsers/tests/test_engine_db_query.py diff --git a/docs/shared_parsers_catalog/engine_db_query.rst b/docs/shared_parsers_catalog/engine_db_query.rst new file mode 100644 index 000000000..91b13bbc5 --- /dev/null +++ b/docs/shared_parsers_catalog/engine_db_query.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.engine_db_query + :members: + :show-inheritance: diff --git a/insights/parsers/engine_db_query.py b/insights/parsers/engine_db_query.py new file mode 100644 index 000000000..c76959e6e --- /dev/null +++ b/insights/parsers/engine_db_query.py @@ -0,0 +1,50 @@ +""" +EngineDBQuery - command ``engine-db-query --statement "" --json`` +============================================================================ + +Parses the output of the command `engine-db-query` returned in JSON format. +""" +from insights.core import CommandParser, JSONParser +from insights.core.plugins import parser +from insights.specs import Specs + + +@parser(Specs.engine_db_query_vdsm_version) +class EngineDBQueryVDSMversion(CommandParser, JSONParser): + """ + Get the hostname & vdsm package version along with host info. + + Class for parsing the output of the command - ``engine-db-query --statement "SELECT vs.vds_name, rpm_version FROM vds_dynamic vd, vds_static vs WHERE vd.vds_id = vs.vds_id;" --json``. + + Attributes: + data (dict): Host info. + + Sample output of this command is:: + + { + "id_host": "None", + "when": "2020-06-21 12:45:59", + "time": "0.00263094902039", + "name": "None", + "description": "None", + "type": "None", + "kb": "None", + "bugzilla": "None", + "file": "", + "path": "None", + "id": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "hash": "d41d8cd98f00b204e9800998ecf8427e", + "result": [{"vds_name": "hosto", "rpm_version": "vdsm-4.30.40-1.el7ev"}] + } + + + Examples: + >>> output.get('id', None) == 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + True + >>> output.result == [{'vds_name': 'hosto', 'rpm_version': 'vdsm-4.30.40-1.el7ev'}] + True + """ + @property + def result(self): + """Get the value of 'result'.""" + return self.data.get('result', []) diff --git a/insights/parsers/tests/test_engine_db_query.py b/insights/parsers/tests/test_engine_db_query.py new file mode 100644 index 000000000..d6147ddc4 --- /dev/null +++ b/insights/parsers/tests/test_engine_db_query.py @@ -0,0 +1,72 @@ +import doctest +from insights.parsers import engine_db_query +from insights.tests import context_wrap + + +OUTPUT = """ +{ + "id_host": "None", + "when": "2020-06-21 12:45:59", + "time": "0.00263094902039", + "name": "None", + "description": "None", + "type": "None", + "kb": "None", + "bugzilla": "None", + "file": "", + "path": "None", + "id": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "hash": "d41d8cd98f00b204e9800998ecf8427e", + "result": [ + { + "vds_name": "hosto", + "rpm_version": "vdsm-4.30.40-1.el7ev" + } + ] +} +""".strip() + +OUTPUT_2 = """ +{ + "id_host": "None", + "when": "2020-06-21 12:45:59", + "time": "0.00263094902039", + "name": "None", + "description": "None", + "type": "None", + "kb": "None", + "bugzilla": "None", + "file": "", + "path": "None", + "id": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "hash": "d41d8cd98f00b204e9800998ecf8427e", + "result": [ + { + "vds_name": "hosto", + "rpm_version": "vdsm-4.40.20-33.git1b7dedcf3.fc30" + }, + { + "vds_name": "hosto2", + "rpm_version": "vdsm-4.40.13-38.gite9bae3c68.fc30" + } + ] +} +""".strip() + + +def test_edbq(): + output = engine_db_query.EngineDBQueryVDSMversion(context_wrap(OUTPUT)) + assert output.get('id', None) == 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' + assert output.result[0].get('rpm_version') == 'vdsm-4.30.40-1.el7ev' + + # for multiple hosts + output = engine_db_query.EngineDBQueryVDSMversion(context_wrap(OUTPUT_2)) + assert output.result == [{'vds_name': 'hosto', 'rpm_version': 'vdsm-4.40.20-33.git1b7dedcf3.fc30'}, {'vds_name': 'hosto2', 'rpm_version': 'vdsm-4.40.13-38.gite9bae3c68.fc30'}] + + +def test_doc_examples(): + env = { + 'output': engine_db_query.EngineDBQueryVDSMversion(context_wrap(OUTPUT)) + } + failed, total = doctest.testmod(engine_db_query, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 15fd9aad6..57bfd448e 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -140,6 +140,7 @@ class Specs(SpecSet): du_dirs = RegistryPoint(multi_output=True) dumpe2fs_h = RegistryPoint(multi_output=True) engine_config_all = RegistryPoint() + engine_db_query_vdsm_version = RegistryPoint() engine_log = RegistryPoint(filterable=True) etc_journald_conf_d = RegistryPoint(multi_output=True) etc_journald_conf = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 91186e562..61d8da592 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -311,6 +311,7 @@ def dumpdev(broker): du_dirs = foreach_execute(['/var/lib/candlepin/activemq-artemis'], "/bin/du -s -k %s") dumpe2fs_h = foreach_execute(dumpdev, "/sbin/dumpe2fs -h %s") engine_config_all = simple_command("/usr/bin/engine-config --all") + engine_db_query_vdsm_version = simple_command('engine-db-query --statement "SELECT vs.vds_name, rpm_version FROM vds_dynamic vd, vds_static vs WHERE vd.vds_id = vs.vds_id;" --json') engine_log = simple_file("/var/log/ovirt-engine/engine.log") etc_journald_conf = simple_file(r"etc/systemd/journald.conf") etc_journald_conf_d = glob_file(r"etc/systemd/journald.conf.d/*.conf") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index f6c4bfa2c..147bafbce 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -57,6 +57,7 @@ class InsightsArchiveSpecs(Specs): docker_list_images = simple_file("insights_commands/docker_images_--all_--no-trunc_--digests") du_dirs = glob_file("insights_commands/du_-s_-k_*") engine_config_all = simple_file("insights_commands/engine-config_--all") + engine_db_query_vdsm_version = simple_file("insights_commands/engine-db-query_-s_SELECT_vs.vds_name_rpm_version_FROM_vds_dynamic_vd_vds_static_vs_WHERE_vd.vds_id_vs.vds_id_--json") ethtool = glob_file("insights_commands/ethtool_*", ignore="ethtool_-.*") ethtool_S = glob_file("insights_commands/ethtool_-S_*") ethtool_T = glob_file("insights_commands/ethtool_-T_*") From 87579d952fec9b19220dc869509b4351b70305fb Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 16 Jul 2020 05:37:27 +0800 Subject: [PATCH 109/892] Update 'corrupt' and docstring of InstalledRpms for empty packages (#2650) * Raise SkipException when no packages are found in installed_rpms - The spec installed_rpms sometimes collects nothing but the following kind of error messages only: error: db5 error(-30973) from dbenv->failchk: BDB0087 DB_RUNRECOVERY: Fatal error, run database recovery error: cannot open Packages database in /var/lib/rpm - The InstalledRpms should raise SkipException and skip it to avoid false positives when need to check packages not installed. Signed-off-by: Xiangce Liu * Raise the Exception when applying `in` - to keep the "corrupt" situation, in case it's useful for some rules Signed-off-by: Xiangce Liu * remove unused lines Signed-off-by: Xiangce Liu * remove one more unrelated line Signed-off-by: Xiangce Liu * Keep the package as empty dict instead of None when db corruption Signed-off-by: Xiangce Liu --- insights/parsers/installed_rpms.py | 39 +++++++++++++++---- insights/parsers/tests/test_installed_rpms.py | 19 +++++++++ 2 files changed, 50 insertions(+), 8 deletions(-) diff --git a/insights/parsers/installed_rpms.py b/insights/parsers/installed_rpms.py index fbdfa7546..2b790a4c5 100644 --- a/insights/parsers/installed_rpms.py +++ b/insights/parsers/installed_rpms.py @@ -153,6 +153,14 @@ def __contains__(self, package_name): """ Checks if package name is in list of installed RPMs. + .. note:: + The :attr:`packages` could be empty, e.g. when rpm database corrupt. + When doing exclusion check, make sure the ``packages`` is NOT + empty, e.g.:: + + >>> if rpms.packages and "pkg_name" not in rpms: + >>> pass + Args: package_name (str): RPM package name such as 'bash' @@ -217,16 +225,26 @@ class InstalledRpms(CommandParser, RpmList): related information. """ def __init__(self, *args, **kwargs): - self.errors = [] + self.errors = list() """list: List of input lines that indicate an error acquiring the data on the client.""" - self.unparsed = [] + self.unparsed = list() """list: List of input lines that raised an exception during parsing.""" - self.packages = defaultdict(list) - """dict (InstalledRpm): Dictionary of RPMs keyed by package name.""" + self.packages = dict() + """ + dict (InstalledRpm): Dictionary of RPMs keyed by package name. + .. note:: + The ``packages`` could be empty, e.g. when rpm database corrupt. + When doing exclusion check, make sure the ``packages`` is NOT + empty, e.g.:: + + >>> if rpms.packages and "pkg_name" not in rpms.packages: + >>> pass + """ super(InstalledRpms, self).__init__(*args, **kwargs) def parse_content(self, content): + packages = defaultdict(list) for line in get_active_lines(content, comment_char='COMMAND>'): if line.startswith('error:') or line.startswith('warning:'): self.errors.append(line) @@ -234,23 +252,28 @@ def parse_content(self, content): try: # Try to parse from JSON input rpm = InstalledRpm.from_json(line) - self.packages[rpm.name].append(rpm) + packages[rpm.name].append(rpm) except Exception: # If that fails, try to parse from line input if line.strip(): try: rpm = InstalledRpm.from_line(line) - self.packages[rpm.name].append(rpm) + packages[rpm.name].append(rpm) except Exception: # Both ways failed self.unparsed.append(line) # Don't want defaultdict's behavior after parsing is complete - self.packages = dict(self.packages) + self.packages = dict(packages) @property def corrupt(self): """bool: True if RPM database is corrupted, else False.""" - return any('rpmdbNextIterator' in s for s in self.errors) + _corrupts = [ + 'error: rpmdbNextIterator', + 'error: rpmdb: BDB0113', + 'error: db5 error', + ] + return any(c in s for s in self.errors for c in _corrupts) p = re.compile(r"(\d+|[a-z]+|\.|-|_)") diff --git a/insights/parsers/tests/test_installed_rpms.py b/insights/parsers/tests/test_installed_rpms.py index 5cbe74972..063c9bffd 100644 --- a/insights/parsers/tests/test_installed_rpms.py +++ b/insights/parsers/tests/test_installed_rpms.py @@ -83,6 +83,16 @@ yum-security-1.1.16-21.el5.noarch '''.strip() +ERROR_DB_NO_PKG = """ +error: rpmdb: BDB0113 Thread/process 20263/140251984590912 failed: BDB1507 Thread died in Berkeley DB library +error: db5 error(-30973) from dbenv->failchk: BDB0087 DB_RUNRECOVERY: Fatal error, run database recovery +error: cannot open Packages index using db5 - (-30973) +error: cannot open Packages database in /var/lib/rpm +error: rpmdb: BDB0113 Thread/process 20263/140251984590912 failed: BDB1507 Thread died in Berkeley DB library +error: db5 error(-30973) from dbenv->failchk: BDB0087 DB_RUNRECOVERY: Fatal error, run database recovery +error: cannot open Packages database in /var/lib/rpm +""".strip() + ORACLEASM_RPMS = ''' oracleasm-2.6.18-164.el5-2.0.5-1.el5.x86_64 oracleasmlib-2.0.4-1.el5.x86_64 @@ -205,8 +215,17 @@ def test_garbage(): def test_corrupt_db(): rpms = InstalledRpms(context_wrap(ERROR_DB)) + assert rpms.corrupt is True assert "yum-security" in rpms.packages + assert "yum-security" in rpms + + rpms = InstalledRpms(context_wrap(ERROR_DB_NO_PKG)) assert rpms.corrupt is True + assert not rpms.packages + assert "kernel" not in rpms + assert "kernel" not in rpms.packages + assert rpms.newest("kernel") is None + assert rpms.oldest("kernel") is None def test_rpm_manifest(): From 816c9a63ae2814995c2823d9b36c4788e90fbf62 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 16 Jul 2020 10:24:15 -0400 Subject: [PATCH 110/892] use SIGTERM for rpm and yum commands (#2630) Signed-off-by: Jeremy Crafts --- insights/client/insights_spec.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/insights/client/insights_spec.py b/insights/client/insights_spec.py index 1e2de0687..d0b9037b9 100644 --- a/insights/client/insights_spec.py +++ b/insights/client/insights_spec.py @@ -47,8 +47,17 @@ def get_output(self): # all commands should timeout after a long interval so the client does not hang # prepend native nix 'timeout' implementation - timeout_command = 'timeout -s KILL %s %s' % ( - self.config.cmd_timeout, self.command) + + # use TERM for rpm/yum commands, KILL for everything else + if (self.command.startswith('/bin/rpm') or + self.command.startswith('yum') or + self.command.startswith('/usr/bin/yum')): + signal = 'TERM' + else: + signal = 'KILL' + + timeout_command = 'timeout -s %s %s %s' % ( + signal, self.config.cmd_timeout, self.command) # ensure consistent locale for collected command output cmd_env = {'LC_ALL': 'C', From 761c5320cbbc8c0d4d5d45c11cbe240f4fbaa0e3 Mon Sep 17 00:00:00 2001 From: Jitka Obselkova <41325380+jobselko@users.noreply.github.com> Date: Thu, 16 Jul 2020 19:30:33 +0200 Subject: [PATCH 111/892] New 'lines' attribute in HaproxyCfg parser (#2664) * Add lines attribute to haproxy_cfg parser Signed-off-by: Jitka Obselkova * Add attributes section to docstring Signed-off-by: Jitka Obselkova --- insights/parsers/haproxy_cfg.py | 12 +++++++++--- insights/parsers/tests/test_haproxy_cfg.py | 1 + 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/insights/parsers/haproxy_cfg.py b/insights/parsers/haproxy_cfg.py index 9f4284f66..1f8b007c5 100644 --- a/insights/parsers/haproxy_cfg.py +++ b/insights/parsers/haproxy_cfg.py @@ -26,6 +26,10 @@ }---> option: ["tcpka","tcplog"] option tcplog +Attributes: + data (dict): Dictionary of all parsed sections. + lines (list): List of all non-commented lines. + Examples: >>> cfg = shared[HaproxyCfg] >>> cfg.data['global'] @@ -37,7 +41,7 @@ True >>> 'user' in cfg.data.get('global') True - """ +""" from .. import Parser, parser from insights.specs import Specs @@ -46,10 +50,12 @@ def _parse_content(content): SECTION_NAMES = ("global", "defaults", "frontend", "backend", "listen") haproxy_dict = {} section_dict = {} + lines = [] for line in content: line = line.strip() if line.startswith("#") or line == "": continue + lines.append(line) values = line.split(None, 1) if values[0] in SECTION_NAMES: # new section like global:{} or listen mysql: {} @@ -70,11 +76,11 @@ def _parse_content(content): section_dict[attr_key].append(attr_value) else: section_dict[attr_key] = attr_value - return haproxy_dict + return haproxy_dict, lines @parser(Specs.haproxy_cfg) class HaproxyCfg(Parser): """Class to parse file ``haproxy.cfg``.""" def parse_content(self, content): - self.data = _parse_content(content) + self.data, self.lines = _parse_content(content) diff --git a/insights/parsers/tests/test_haproxy_cfg.py b/insights/parsers/tests/test_haproxy_cfg.py index 7d3b207c9..5ca6e7269 100644 --- a/insights/parsers/tests/test_haproxy_cfg.py +++ b/insights/parsers/tests/test_haproxy_cfg.py @@ -246,6 +246,7 @@ def test_haproxy_cls_1(): r = HaproxyCfg(context_wrap(haproxy_osp, osp=osp_c)) assert r.data.get("global").get("maxconn") == "10000" assert r.data.get("listen galera").get("mode") == "tcp" + assert r.lines[2] == "group haproxy" def test_haproxy_cls_2(): From 5bcdacc036b9ce64366d4658e4c9cdc13f1d9ed8 Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Fri, 17 Jul 2020 11:01:15 -0500 Subject: [PATCH 112/892] Remove specs not used by any rules (#2663) * Remove specs not used by any rules * Prepare specs for core collection * Remove all default specs that there are not being used to reduce archive size requirements * This includes specs that are not currently being collected by the current client and specs that would not be evaluated by any rules if collected * Remove assocated archive specs * Add all file-like specs to sos_archive specs to allow collection Signed-off-by: Bob Fahr * Update docstrings in datasources Signed-off-by: Bob Fahr * Update specs catalog documentation Signed-off-by: Bob Fahr * Remove commented lines Signed-off-by: Bob Fahr --- insights/specs/__init__.py | 4 - insights/specs/default.py | 564 +++-------------------------- insights/specs/insights_archive.py | 93 +---- insights/specs/sos_archive.py | 95 ++++- insights/util/specs_catalog.py | 4 +- 5 files changed, 145 insertions(+), 615 deletions(-) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 57bfd448e..b78440eaf 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -211,7 +211,6 @@ class Specs(SpecSet): hponcfg_g = RegistryPoint() httpd_access_log = RegistryPoint(filterable=True) httpd_conf = RegistryPoint(multi_output=True) - httpd_conf_sos = RegistryPoint(multi_output=True) httpd_conf_scl_httpd24 = RegistryPoint(multi_output=True) httpd_conf_scl_jbcs_httpd24 = RegistryPoint(multi_output=True) httpd_error_log = RegistryPoint(filterable=True) @@ -220,7 +219,6 @@ class Specs(SpecSet): httpd_limits = RegistryPoint(multi_output=True) httpd_M = RegistryPoint(multi_output=True) httpd_on_nfs = RegistryPoint() - httpd_pid = RegistryPoint() httpd_ssl_access_log = RegistryPoint(filterable=True) httpd_ssl_error_log = RegistryPoint(filterable=True) httpd_V = RegistryPoint(multi_output=True) @@ -345,7 +343,6 @@ class Specs(SpecSet): modinfo = RegistryPoint(multi_output=True) modinfo_all = RegistryPoint() modprobe = RegistryPoint(multi_output=True) - module = RegistryPoint() mongod_conf = RegistryPoint(multi_output=True, filterable=True) mount = RegistryPoint() mounts = RegistryPoint() @@ -525,7 +522,6 @@ class Specs(SpecSet): route = RegistryPoint() rpm_V_packages = RegistryPoint() rsyslog_conf = RegistryPoint(filterable=True) - running_java = RegistryPoint() samba = RegistryPoint(filterable=True) sap_hdb_version = RegistryPoint(multi_output=True) sap_host_profile = RegistryPoint(filterable=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index 61d8da592..2f680c99b 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -12,25 +12,16 @@ import os import re -from insights.core.context import ClusterArchiveContext -from insights.core.context import DockerImageContext from insights.core.context import HostContext -from insights.core.context import HostArchiveContext -from insights.core.context import OpenShiftContext from insights.core.dr import SkipComponent from insights.core.plugins import datasource -from insights.core.spec_factory import CommandOutputProvider, ContentException, DatasourceProvider, RawFileProvider -from insights.core.spec_factory import simple_file, simple_command, glob_file, command_with_args +from insights.core.spec_factory import RawFileProvider +from insights.core.spec_factory import simple_file, simple_command, glob_file from insights.core.spec_factory import first_of, foreach_collect, foreach_execute from insights.core.spec_factory import first_file, listdir -from insights.parsers.mount import Mount, ProcMounts -from insights.parsers.dnf_module import DnfModuleList -from insights.combiners.sap import Sap from insights.combiners.cloud_provider import CloudProvider -from insights.combiners.satellite_version import SatelliteVersion from insights.combiners.services import Services -from insights.components.rhel_version import IsRhel8, IsRhel7 from insights.specs import Specs @@ -43,6 +34,7 @@ def get_owner(filename): + """ tuple: Return tuple containing uid and gid of file filename """ st = stat(filename) name = getpwuid(st.st_uid).pw_name group = getgrgid(st.st_gid).gr_name @@ -50,21 +42,32 @@ def get_owner(filename): def get_cmd_and_package_in_ps(broker, target_command): - ps = broker[DefaultSpecs.ps_auxww].content - ctx = broker[HostContext] - results = set() - for p in ps: - p_splits = p.split(None, 10) - cmd = p_splits[10].split()[0] if len(p_splits) == 11 else '' - which = ctx.shell_out("which {0}".format(cmd)) if target_command in os.path.basename(cmd) else None - resolved = ctx.shell_out("readlink -e {0}".format(which[0])) if which else None - pkg = ctx.shell_out("/bin/rpm -qf {0}".format(resolved[0])) if resolved else None - if cmd and pkg is not None: - results.add("{0} {1}".format(cmd, pkg[0])) - return results + """ + Search for command in ``ps auxww`` output and determine RPM providing binary + + Arguments: + broker(dict): Current state of specs collected by Insights + target_command(str): Command name to search for in ps output + + Returns: + set: Set including all RPMs that provide the target command + """ + ps = broker[DefaultSpecs.ps_auxww].content + ctx = broker[HostContext] + results = set() + for p in ps: + p_splits = p.split(None, 10) + cmd = p_splits[10].split()[0] if len(p_splits) == 11 else '' + which = ctx.shell_out("which {0}".format(cmd)) if target_command in os.path.basename(cmd) else None + resolved = ctx.shell_out("readlink -e {0}".format(which[0])) if which else None + pkg = ctx.shell_out("/bin/rpm -qf {0}".format(resolved[0])) if resolved else None + if cmd and pkg is not None: + results.add("{0} {1}".format(cmd, pkg[0])) + return results def _make_rpm_formatter(fmt=None): + """ function: Returns function that will format output of rpm query command """ if fmt is None: fmt = [ '"name":"%{NAME}"', @@ -96,12 +99,12 @@ class DefaultSpecs(Specs): auditctl_status = simple_command("/sbin/auditctl -s") auditd_conf = simple_file("/etc/audit/auditd.conf") audit_log = simple_file("/var/log/audit/audit.log") - autofs_conf = simple_file("/etc/autofs.conf") avc_hash_stats = simple_file("/sys/fs/selinux/avc/hash_stats") avc_cache_threshold = simple_file("/sys/fs/selinux/avc/cache_threshold") @datasource(CloudProvider) def is_aws(broker): + """ bool: Returns True if this node is identified as running in AWS """ cp = broker[CloudProvider] if cp and cp.cloud_provider == CloudProvider.AWS: return True @@ -112,6 +115,7 @@ def is_aws(broker): @datasource(CloudProvider) def is_azure(broker): + """ bool: Returns True if this node is identified as running in Azure """ cp = broker[CloudProvider] if cp and cp.cloud_provider == CloudProvider.AZURE: return True @@ -125,10 +129,7 @@ def is_azure(broker): boot_loader_entries = glob_file("/boot/loader/entries/*.conf") branch_info = simple_file("/branch_info", kind=RawFileProvider) brctl_show = simple_command("/usr/sbin/brctl show") - candlepin_log = simple_file("/var/log/candlepin/candlepin.log") - candlepin_error_log = simple_file("/var/log/candlepin/error.log") cgroups = simple_file("/proc/cgroups") - checkin_conf = simple_file("/etc/splice/checkin.conf") ps_alxwww = simple_command("/bin/ps alxwww") ps_aux = simple_command("/bin/ps aux") ps_auxcww = simple_command("/bin/ps auxcww") @@ -138,7 +139,13 @@ def is_azure(broker): @datasource(ps_auxww) def tomcat_base(broker): - """Path: Tomcat base path""" + """ + Function to search the output of ``ps auxww`` to find all running tomcat + processes and extract the base path where the process was started. + + Returns: + list: List of the paths to each running process + """ ps = broker[DefaultSpecs.ps_auxww].content results = [] findall = re.compile(r"\-Dcatalina\.base=(\S+)").findall @@ -150,21 +157,18 @@ def tomcat_base(broker): return list(set(results)) catalina_out = foreach_collect(tomcat_base, "%s/catalina.out") - catalina_server_log = foreach_collect(tomcat_base, "%s/catalina*.log") cciss = glob_file("/proc/driver/cciss/cciss*") cdc_wdm = simple_file("/sys/bus/usb/drivers/cdc_wdm/module/refcnt") - ceilometer_central_log = simple_file("/var/log/ceilometer/central.log") ceilometer_collector_log = first_file(["/var/log/containers/ceilometer/collector.log", "/var/log/ceilometer/collector.log"]) ceilometer_compute_log = first_file(["/var/log/containers/ceilometer/compute.log", "/var/log/ceilometer/compute.log"]) ceilometer_conf = first_file(["/var/lib/config-data/puppet-generated/ceilometer/etc/ceilometer/ceilometer.conf", "/etc/ceilometer/ceilometer.conf"]) - ceph_socket_files = listdir("/var/run/ceph/ceph-*.*.asok", context=HostContext) ceph_conf = first_file(["/var/lib/config-data/puppet-generated/ceph/etc/ceph/ceph.conf", "/etc/ceph/ceph.conf"]) - ceph_config_show = foreach_execute(ceph_socket_files, "/usr/bin/ceph daemon %s config show") ceph_df_detail = simple_command("/usr/bin/ceph df detail -f json") ceph_health_detail = simple_command("/usr/bin/ceph health detail -f json") @datasource(ps_auxww) def is_ceph_monitor(broker): + """ bool: Returns True if ceph monitor process ceph-mon is running on this node """ ps = broker[DefaultSpecs.ps_auxww].content findall = re.compile(r"ceph\-mon").findall if any(findall(p) for p in ps): @@ -174,9 +178,7 @@ def is_ceph_monitor(broker): ceph_insights = simple_command("/usr/bin/ceph insights", deps=[is_ceph_monitor]) ceph_log = glob_file(r"var/log/ceph/ceph.log*") ceph_osd_dump = simple_command("/usr/bin/ceph osd dump -f json") - ceph_osd_df = simple_command("/usr/bin/ceph osd df -f json") ceph_osd_ec_profile_ls = simple_command("/usr/bin/ceph osd erasure-code-profile ls") - ceph_osd_ec_profile_get = foreach_execute(ceph_osd_ec_profile_ls, "/usr/bin/ceph osd erasure-code-profile get %s -f json") ceph_osd_log = glob_file(r"var/log/ceph/ceph-osd*.log") ceph_osd_tree = simple_command("/usr/bin/ceph osd tree -f json") ceph_s = simple_command("/usr/bin/ceph -s -f json") @@ -193,33 +195,15 @@ def is_ceph_monitor(broker): cloud_init_log = simple_file("/var/log/cloud-init.log") cluster_conf = simple_file("/etc/cluster/cluster.conf") cmdline = simple_file("/proc/cmdline") - cni_podman_bridge_conf = simple_file("/etc/cni/net.d/87-podman-bridge.conflist") - cpe = simple_file("/etc/system-release-cpe") - # are these locations for different rhel versions? cobbler_settings = first_file(["/etc/cobbler/settings", "/conf/cobbler/settings"]) cobbler_modules_conf = first_file(["/etc/cobbler/modules.conf", "/conf/cobbler/modules.conf"]) corosync = simple_file("/etc/sysconfig/corosync") - - @datasource([IsRhel7, IsRhel8]) - def corosync_cmapctl_cmd_list(broker): - if broker.get(IsRhel7): - return ["/usr/sbin/corosync-cmapctl", 'corosync-cmapctl -d runtime.schedmiss.timestamp', 'corosync-cmapctl -d runtime.schedmiss.delay'] - if broker.get(IsRhel8): - return ["/usr/sbin/corosync-cmapctl", '/usr/sbin/corosync-cmapctl -m stats', '/usr/sbin/corosync-cmapctl -C schedmiss'] - raise SkipComponent() - corosync_cmapctl = foreach_execute(corosync_cmapctl_cmd_list, "%s") corosync_conf = simple_file("/etc/corosync/corosync.conf") cpu_cores = glob_file("sys/devices/system/cpu/cpu[0-9]*/online") cpu_siblings = glob_file("sys/devices/system/cpu/cpu[0-9]*/topology/thread_siblings_list") cpu_smt_active = simple_file("sys/devices/system/cpu/smt/active") - cpu_smt_control = simple_file("sys/devices/system/cpu/smt/control") cpu_vulns = glob_file("sys/devices/system/cpu/vulnerabilities/*") - cpu_vulns_meltdown = simple_file("sys/devices/system/cpu/vulnerabilities/meltdown") - cpu_vulns_spectre_v1 = simple_file("sys/devices/system/cpu/vulnerabilities/spectre_v1") - cpu_vulns_spectre_v2 = simple_file("sys/devices/system/cpu/vulnerabilities/spectre_v2") - cpu_vulns_spec_store_bypass = simple_file("sys/devices/system/cpu/vulnerabilities/spec_store_bypass") cpuinfo = simple_file("/proc/cpuinfo") - cpuinfo_max_freq = simple_file("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq") cpupower_frequency_info = simple_command("/usr/bin/cpupower -c all frequency-info") cpuset_cpus = simple_file("/sys/fs/cgroup/cpuset/cpuset.cpus") cron_daily_rhsmd = simple_file("/etc/cron.daily/rhsmd") @@ -229,88 +213,23 @@ def corosync_cmapctl_cmd_list(broker): crypto_policies_bind = simple_file("/etc/crypto-policies/back-ends/bind.config") current_clocksource = simple_file("/sys/devices/system/clocksource/clocksource0/current_clocksource") date = simple_command("/bin/date") - date_iso = simple_command("/bin/date --iso-8601=seconds") date_utc = simple_command("/bin/date --utc") df__al = simple_command("/bin/df -al") df__alP = simple_command("/bin/df -alP") df__li = simple_command("/bin/df -li") - dig = simple_command("/usr/bin/dig +dnssec . DNSKEY") dig_dnssec = simple_command("/usr/bin/dig +dnssec . SOA") dig_edns = simple_command("/usr/bin/dig +edns=0 . SOA") dig_noedns = simple_command("/usr/bin/dig +noedns . SOA") - dirsrv = simple_file("/etc/sysconfig/dirsrv") - dirsrv_access = glob_file("var/log/dirsrv/*/access*") dirsrv_errors = glob_file("var/log/dirsrv/*/errors*") - display_java = simple_command("/usr/sbin/alternatives --display java") dmesg = simple_command("/bin/dmesg") dmesg_log = simple_file("/var/log/dmesg") dmidecode = simple_command("/usr/sbin/dmidecode") - dmsetup_info = simple_command("/usr/sbin/dmsetup info -C") - dnf_modules = glob_file("/etc/dnf/modules.d/*.module") - dnf_module_list = simple_command("/usr/bin/dnf -C --noplugins module list", deps=[IsRhel8]) - - @datasource(DnfModuleList) - def dnf_module_names(broker): - dml = broker[DnfModuleList] - if dml: - return (' ').join(dml) - raise SkipComponent() - - dnf_module_info = command_with_args("/usr/bin/dnf -C --noplugins module info %s", dnf_module_names, deps=[IsRhel8]) - dnsmasq_config = glob_file(["/etc/dnsmasq.conf", "/etc/dnsmasq.d/*.conf"]) docker_info = simple_command("/usr/bin/docker info") docker_list_containers = simple_command("/usr/bin/docker ps --all --no-trunc") docker_list_images = simple_command("/usr/bin/docker images --all --no-trunc --digests") - - @datasource(docker_list_images) - def docker_image_ids(broker): - """Command: docker_image_ids""" - images = broker[DefaultSpecs.docker_list_images] - try: - result = set() - for l in images.content[1:]: - result.add(l.split(None)[3].strip()) - except: - raise ContentException("No docker images.") - if result: - return list(result) - raise ContentException("No docker images.") - - # TODO: This parsing is broken. - @datasource(docker_list_containers) - def docker_container_ids(broker): - """Command: docker_container_ids""" - containers = broker[DefaultSpecs.docker_list_containers] - try: - result = set() - for l in containers.content[1:]: - result.add(l.split(None)[3].strip()) - except: - raise ContentException("No docker containers.") - if result: - return list(result) - raise ContentException("No docker containers.") - - docker_host_machine_id = simple_file("/etc/redhat-access-insights/machine-id") - docker_image_inspect = foreach_execute(docker_image_ids, "/usr/bin/docker inspect %s") - docker_container_inspect = foreach_execute(docker_container_ids, "/usr/bin/docker inspect %s") - docker_network = simple_file("/etc/sysconfig/docker-network") - docker_storage = simple_file("/etc/sysconfig/docker-storage") docker_storage_setup = simple_file("/etc/sysconfig/docker-storage-setup") docker_sysconfig = simple_file("/etc/sysconfig/docker") - - @datasource(ProcMounts) - def dumpdev(broker): - mnt = broker[ProcMounts] - mounted_dev = [m.mounted_device for m in mnt if m.mount_type in ('ext2', 'ext3', 'ext4')] - if mounted_dev: - return mounted_dev - raise SkipComponent() - dracut_kdump_capture_service = simple_file("/usr/lib/dracut/modules.d/99kdumpbase/kdump-capture.service") - du_dirs = foreach_execute(['/var/lib/candlepin/activemq-artemis'], "/bin/du -s -k %s") - dumpe2fs_h = foreach_execute(dumpdev, "/sbin/dumpe2fs -h %s") - engine_config_all = simple_command("/usr/bin/engine-config --all") engine_db_query_vdsm_version = simple_command('engine-db-query --statement "SELECT vs.vds_name, rpm_version FROM vds_dynamic vd, vds_static vs WHERE vd.vds_id = vs.vds_id;" --json') engine_log = simple_file("/var/log/ovirt-engine/engine.log") etc_journald_conf = simple_file(r"etc/systemd/journald.conf") @@ -318,43 +237,24 @@ def dumpdev(broker): etc_machine_id = simple_file("/etc/machine-id") etcd_conf = simple_file("/etc/etcd/etcd.conf") ethernet_interfaces = listdir("/sys/class/net", context=HostContext) - dcbtool_gc_dcb = foreach_execute(ethernet_interfaces, "/sbin/dcbtool gc %s dcb") ethtool = foreach_execute(ethernet_interfaces, "/sbin/ethtool %s") ethtool_S = foreach_execute(ethernet_interfaces, "/sbin/ethtool -S %s") ethtool_T = foreach_execute(ethernet_interfaces, "/sbin/ethtool -T %s") - ethtool_a = foreach_execute(ethernet_interfaces, "/sbin/ethtool -a %s") - ethtool_c = foreach_execute(ethernet_interfaces, "/sbin/ethtool -c %s") ethtool_g = foreach_execute(ethernet_interfaces, "/sbin/ethtool -g %s") ethtool_i = foreach_execute(ethernet_interfaces, "/sbin/ethtool -i %s") ethtool_k = foreach_execute(ethernet_interfaces, "/sbin/ethtool -k %s") - exim_conf = simple_file("etc/exim.conf") facter = simple_command("/usr/bin/facter") fc_match = simple_command("/bin/fc-match -sv 'sans:regular:roman' family fontformat") fcoeadm_i = simple_command("/usr/sbin/fcoeadm -i") - fdisk_l = simple_command("/sbin/fdisk -l") findmnt_lo_propagation = simple_command("/bin/findmnt -lo+PROPAGATION") firewall_cmd_list_all_zones = simple_command("/usr/bin/firewall-cmd --list-all-zones") firewalld_conf = simple_file("/etc/firewalld/firewalld.conf") - foreman_production_log = simple_file("/var/log/foreman/production.log") - foreman_proxy_conf = simple_file("/etc/foreman-proxy/settings.yml") - foreman_proxy_log = simple_file("/var/log/foreman-proxy/proxy.log") - foreman_satellite_log = simple_file("/var/log/foreman-installer/satellite.log") - foreman_ssl_access_ssl_log = simple_file("var/log/httpd/foreman-ssl_access_ssl.log") - foreman_rake_db_migrate_status = simple_command('/usr/sbin/foreman-rake db:migrate:status') - foreman_tasks_config = first_file(["/etc/sysconfig/foreman-tasks", "/etc/sysconfig/dynflowd"]) - freeipa_healthcheck_log = simple_file("/var/log/ipa/healthcheck/healthcheck.log") fstab = simple_file("/etc/fstab") galera_cnf = first_file(["/var/lib/config-data/puppet-generated/mysql/etc/my.cnf.d/galera.cnf", "/etc/my.cnf.d/galera.cnf"]) getconf_page_size = simple_command("/usr/bin/getconf PAGE_SIZE") getenforce = simple_command("/usr/sbin/getenforce") getsebool = simple_command("/usr/sbin/getsebool -a") - glance_api_conf = first_file(["/var/lib/config-data/puppet-generated/glance_api/etc/glance/glance-api.conf", "/etc/glance/glance-api.conf"]) - glance_api_log = first_file(["/var/log/containers/glance/api.log", "/var/log/glance/api.log"]) - glance_cache_conf = first_file(["/var/lib/config-data/puppet-generated/glance_api/etc/glance/glance-cache.conf", "/etc/glance/glance-cache.conf"]) - glance_registry_conf = simple_file("/etc/glance/glance-registry.conf") gluster_v_info = simple_command("/usr/sbin/gluster volume info") - gluster_v_status = simple_command("/usr/sbin/gluster volume status") - gluster_peer_status = simple_command("/usr/sbin/gluster peer status") gnocchi_conf = first_file(["/var/lib/config-data/puppet-generated/gnocchi/etc/gnocchi/gnocchi.conf", "/etc/gnocchi/gnocchi.conf"]) gnocchi_metricd_log = first_file(["/var/log/containers/gnocchi/gnocchi-metricd.log", "/var/log/gnocchi/metricd.log"]) grub_conf = simple_file("/boot/grub/grub.conf") @@ -365,20 +265,14 @@ def dumpdev(broker): grub2_efi_cfg = simple_file("boot/efi/EFI/redhat/grub.cfg") grubby_default_index = simple_command("/usr/sbin/grubby --default-index") # only RHEL7 and updwards grubby_default_kernel = simple_command("/sbin/grubby --default-kernel") - hammer_ping = simple_command("/usr/bin/hammer ping") hammer_task_list = simple_command("/usr/bin/hammer --config /root/.hammer/cli.modules.d/foreman.yml --output csv task list --search 'state=running AND ( label=Actions::Candlepin::ListenOnCandlepinEvents OR label=Actions::Katello::EventQueue::Monitor )'") haproxy_cfg = first_file(["/var/lib/config-data/puppet-generated/haproxy/etc/haproxy/haproxy.cfg", "/etc/haproxy/haproxy.cfg"]) heat_api_log = first_file(["/var/log/containers/heat/heat_api.log", "/var/log/heat/heat-api.log", "/var/log/heat/heat_api.log"]) heat_conf = first_file(["/var/lib/config-data/puppet-generated/heat/etc/heat/heat.conf", "/etc/heat/heat.conf"]) - heat_crontab = simple_command("/usr/bin/crontab -l -u heat") - heat_crontab_container = simple_command("docker exec heat_api_cron /usr/bin/crontab -l -u heat") - heat_engine_log = first_file(["/var/log/containers/heat/heat-engine.log", "/var/log/heat/heat-engine.log"]) hostname = simple_command("/bin/hostname -f") hostname_default = simple_command("/bin/hostname") hostname_short = simple_command("/bin/hostname -s") hosts = simple_file("/etc/hosts") - hponcfg_g = simple_command("/sbin/hponcfg -g") - httpd_access_log = simple_file("/var/log/httpd/access_log") httpd_conf = glob_file( [ "/etc/httpd/conf/httpd.conf", @@ -406,22 +300,17 @@ def dumpdev(broker): httpd_error_log = simple_file("var/log/httpd/error_log") httpd24_httpd_error_log = simple_file("/opt/rh/httpd24/root/etc/httpd/logs/error_log") jbcs_httpd24_httpd_error_log = simple_file("/opt/rh/jbcs-httpd24/root/etc/httpd/logs/error_log") - httpd_pid = simple_command("/usr/bin/pgrep -o httpd") - httpd_limits = foreach_collect(httpd_pid, "/proc/%s/limits") - - @datasource(SatelliteVersion) - def is_sat(broker): - sat = broker[SatelliteVersion] - if sat: - return True - raise SkipComponent() - - satellite_enabled_features = simple_command("/usr/bin/curl -sk https://localhost:9090/features --connect-timeout 5", deps=[is_sat]) virt_uuid_facts = simple_file("/etc/rhsm/facts/virt_uuid.facts") @datasource(ps_auxww) def httpd_cmd(broker): - """Command: httpd_command""" + """ + Function to search the output of ``ps auxww`` to find all running Apache + webserver processes and extract the binary path. + + Returns: + list: List of the binary paths to each running process + """ ps = broker[DefaultSpecs.ps_auxww].content ps_httpds = set() for p in ps: @@ -437,39 +326,11 @@ def httpd_cmd(broker): # https://access.redhat.com/solutions/21680 return list(ps_httpds) - @datasource(Mount) - def httpd_on_nfs(broker): - import json - mnt = broker[Mount] - mps = mnt.search(mount_type='nfs4') - # get nfs 4.0 mount points - nfs_mounts = [m.mount_point for m in mps if m['mount_options'].get("vers") == "4.0"] - if nfs_mounts: - # get all httpd ps - httpd_pids = broker[HostContext].shell_out("pgrep httpd") - if httpd_pids: - open_nfs_files = 0 - lsof_cmds = ["lsof -p {}".format(pid) for pid in httpd_pids if pid] - # maybe there are thousands open files - httpd_open_files = broker[HostContext].shell_out(lsof_cmds) - for line in httpd_open_files: - items = line.split() - if len(items) > 8 and items[8].startswith(tuple(nfs_mounts)): - open_nfs_files += 1 - result_dict = {"http_ids": httpd_pids, "nfs_mounts": nfs_mounts, "open_nfs_files": open_nfs_files} - return DatasourceProvider(content=json.dumps(result_dict), relative_path="httpd_open_nfsV4_files") - raise SkipComponent() - httpd_M = foreach_execute(httpd_cmd, "%s -M") - httpd_ssl_access_log = simple_file("/var/log/httpd/ssl_access_log") - httpd_ssl_error_log = simple_file("/var/log/httpd/ssl_error_log") httpd_V = foreach_execute(httpd_cmd, "%s -V") ifcfg = glob_file("/etc/sysconfig/network-scripts/ifcfg-*") ifcfg_static_route = glob_file("/etc/sysconfig/network-scripts/route-*") - ifconfig = simple_command("/sbin/ifconfig -a") imagemagick_policy = glob_file(["/etc/ImageMagick/policy.xml", "/usr/lib*/ImageMagick-6.5.4/config/policy.xml"]) - init_ora = simple_file("${ORACLE_HOME}/dbs/init.ora") - initscript = glob_file(r"etc/rc.d/init.d/*") init_process_cgroup = simple_file("/proc/1/cgroup") interrupts = simple_file("/proc/interrupts") ip_addr = simple_command("/sbin/ip addr") @@ -480,40 +341,17 @@ def httpd_on_nfs(broker): ipcs_m = simple_command("/usr/bin/ipcs -m") ipcs_m_p = simple_command("/usr/bin/ipcs -m -p") ipcs_s = simple_command("/usr/bin/ipcs -s") - - @datasource(ipcs_s) - def semid(broker): - """Command: semids""" - source = broker[DefaultSpecs.ipcs_s].content - results = set() - for s in source: - s_splits = s.split() - # key semid owner perms nsems - # 0x00000000 65536 apache 600 1 - if len(s_splits) == 5 and s_splits[1].isdigit(): - results.add(s_splits[1]) - return results - - ipcs_s_i = foreach_execute(semid, "/usr/bin/ipcs -s -i %s") iptables = simple_command("/sbin/iptables-save") iptables_permanent = simple_file("etc/sysconfig/iptables") ip6tables = simple_command("/sbin/ip6tables-save") - ip6tables_permanent = simple_file("etc/sysconfig/ip6tables") ipv4_neigh = simple_command("/sbin/ip -4 neighbor show nud all") ipv6_neigh = simple_command("/sbin/ip -6 neighbor show nud all") ironic_inspector_log = simple_file("/var/log/ironic-inspector/ironic-inspector.log") - ironic_conf = first_file(["/var/lib/config-data/puppet-generated/ironic/etc/ironic/ironic.conf", "/etc/ironic/ironic.conf"]) iscsiadm_m_session = simple_command("/usr/sbin/iscsiadm -m session") - katello_service_status = simple_command("/usr/bin/katello-service status") kdump_conf = simple_file("/etc/kdump.conf") - kerberos_kdc_log = simple_file("var/log/krb5kdc.log") kernel_config = glob_file("/boot/config-*") - kexec_crash_loaded = simple_file("/sys/kernel/kexec_crash_loaded") kexec_crash_size = simple_file("/sys/kernel/kexec_crash_size") - keystone_conf = first_file(["/var/lib/config-data/puppet-generated/keystone/etc/keystone/keystone.conf", "/etc/keystone/keystone.conf"]) keystone_crontab = simple_command("/usr/bin/crontab -l -u keystone") - keystone_crontab_container = simple_command("docker exec keystone_cron /usr/bin/crontab -l -u keystone") - keystone_log = first_file(["/var/log/containers/keystone/keystone.log", "/var/log/keystone/keystone.log"]) kpatch_list = simple_command("/usr/sbin/kpatch list") krb5 = glob_file([r"etc/krb5.conf", r"etc/krb5.conf.d/*"]) ksmstate = simple_file("/sys/kernel/mm/ksm/run") @@ -523,17 +361,13 @@ def semid(broker): libkeyutils = simple_command("/usr/bin/find -L /lib /lib64 -name 'libkeyutils.so*'") libkeyutils_objdumps = simple_command('/usr/bin/find -L /lib /lib64 -name libkeyutils.so.1 -exec objdump -x "{}" \;') libvirtd_log = simple_file("/var/log/libvirt/libvirtd.log") - libvirtd_qemu_log = glob_file(r"/var/log/libvirt/qemu/*.log") limits_conf = glob_file(["/etc/security/limits.conf", "/etc/security/limits.d/*.conf"]) - locale = simple_command("/usr/bin/locale") localtime = simple_command("/usr/bin/file -L /etc/localtime") logrotate_conf = glob_file(["/etc/logrotate.conf", "/etc/logrotate.d/*"]) lpstat_p = simple_command("/usr/bin/lpstat -p") ls_boot = simple_command("/bin/ls -lanR /boot") ls_dev = simple_command("/bin/ls -lanR /dev") ls_disk = simple_command("/bin/ls -lanR /dev/disk") - ls_docker_volumes = simple_command("/bin/ls -lanR /var/lib/docker/volumes") - ls_edac_mc = simple_command("/bin/ls -lan /sys/devices/system/edac/mc") etc_and_sub_dirs = sorted(["/etc", "/etc/pki/tls/private", "/etc/pki/tls/certs", "/etc/pki/ovirt-vmconsole", "/etc/nova/migration", "/etc/sysconfig", "/etc/cloud/cloud.cfg.d"]) @@ -545,8 +379,8 @@ def semid(broker): ls_run_systemd_generator = simple_command("/bin/ls -lan /run/systemd/generator") ls_R_var_lib_nova_instances = simple_command("/bin/ls -laR /var/lib/nova/instances") ls_sys_firmware = simple_command("/bin/ls -lanR /sys/firmware") + ls_tmp = simple_command("/bin/ls -la /tmp") ls_usr_lib64 = simple_command("/bin/ls -lan /usr/lib64") - ls_usr_sbin = simple_command("/bin/ls -ln /usr/sbin") ls_var_lib_mongodb = simple_command("/bin/ls -la /var/lib/mongodb") ls_var_lib_nova_instances = simple_command("/bin/ls -laRZ /var/lib/nova/instances") ls_var_log = simple_command("/bin/ls -la /var/log /var/log/audit") @@ -557,32 +391,18 @@ def semid(broker): ls_var_tmp = simple_command("/bin/ls -ln /var/tmp") ls_var_run = simple_command("/bin/ls -lnL /var/run") ls_var_www = simple_command("/bin/ls -la /dev/null /var/www") # https://github.com/RedHatInsights/insights-core/issues/827 - ls_tmp = simple_command("/bin/ls -la /tmp") lsblk = simple_command("/bin/lsblk") lsblk_pairs = simple_command("/bin/lsblk -P -o NAME,KNAME,MAJ:MIN,FSTYPE,MOUNTPOINT,LABEL,UUID,RA,RO,RM,MODEL,SIZE,STATE,OWNER,GROUP,MODE,ALIGNMENT,MIN-IO,OPT-IO,PHY-SEC,LOG-SEC,ROTA,SCHED,RQ-SIZE,TYPE,DISC-ALN,DISC-GRAN,DISC-MAX,DISC-ZERO") lscpu = simple_command("/usr/bin/lscpu") - lsinitrd = simple_command("/usr/bin/lsinitrd") - lsinitrd_lvm_conf = first_of([ - simple_command("/sbin/lsinitrd -f /etc/lvm/lvm.conf"), - simple_command("/usr/bin/lsinitrd -f /etc/lvm/lvm.conf") - ]) lsmod = simple_command("/sbin/lsmod") lsof = simple_command("/usr/sbin/lsof") lspci = simple_command("/sbin/lspci -k") lssap = simple_command("/usr/sap/hostctrl/exe/lssap") lsscsi = simple_command("/usr/bin/lsscsi") - lvdisplay = simple_command("/sbin/lvdisplay") lvm_conf = simple_file("/etc/lvm/lvm.conf") - lvmconfig = first_of([ - simple_command("/usr/sbin/lvmconfig --type full"), - simple_command("/usr/sbin/lvm dumpconfig --type full"), - ]) - lvs = None # simple_command('/sbin/lvs -a -o +lv_tags,devices --config="global{locking_type=0}"') lvs_noheadings = simple_command("/sbin/lvs --nameprefixes --noheadings --separator='|' -a -o lv_name,lv_size,lv_attr,mirror_log,vg_name,devices,region_size,data_percent,metadata_percent,segtype,seg_monitor --config=\"global{locking_type=0}\"") - lvs_noheadings_all = simple_command("/sbin/lvs --nameprefixes --noheadings --separator='|' -a -o lv_name,lv_size,lv_attr,mirror_log,vg_name,devices,region_size,data_percent,metadata_percent,segtype --config='global{locking_type=0} devices{filter=[\"a|.*|\"]}'") mac_addresses = glob_file("/sys/class/net/*/address") machine_id = first_file(["etc/insights-client/machine-id", "etc/redhat-access-insights/machine-id", "etc/redhat_access_proactive/machine-id"]) - manila_conf = first_file(["/var/lib/config-data/puppet-generated/manila/etc/manila/manila.conf", "/etc/manila/manila.conf"]) mariadb_log = simple_file("/var/log/mariadb/mariadb.log") max_uid = simple_command("/bin/awk -F':' '{ if($3 > max) max = $3 } END { print max }' /etc/passwd") md5chk_files = foreach_execute( @@ -591,62 +411,32 @@ def semid(broker): mdstat = simple_file("/proc/mdstat") meminfo = first_file(["/proc/meminfo", "/meminfo"]) messages = simple_file("/var/log/messages") - metadata_json = simple_file("metadata.json", context=ClusterArchiveContext, kind=RawFileProvider) - mistral_executor_log = simple_file("/var/log/mistral/executor.log") - mlx4_port = glob_file("/sys/bus/pci/devices/*/mlx4_port[0-9]") modinfo_i40e = simple_command("/sbin/modinfo i40e") modinfo_igb = simple_command("/sbin/modinfo igb") modinfo_ixgbe = simple_command("/sbin/modinfo ixgbe") modinfo_veth = simple_command("/sbin/modinfo veth") modinfo_vmxnet3 = simple_command("/sbin/modinfo vmxnet3") - - @datasource(lsmod, context=HostContext) - def lsmod_only_names(broker): - lsmod = broker[DefaultSpecs.lsmod].content - # skip the title - return [line.split()[0] for line in lsmod[1:] if line.strip()] - - modinfo = foreach_execute(lsmod_only_names, "modinfo %s") - - @datasource(lsmod_only_names, context=HostContext) - def lsmod_all_names(broker): - mod_list = broker[DefaultSpecs.lsmod_only_names] - if mod_list: - return ' '.join(mod_list) - raise SkipComponent() - - modinfo_all = command_with_args("modinfo %s", lsmod_all_names) - modprobe = glob_file(["/etc/modprobe.conf", "/etc/modprobe.d/*.conf"]) - sysconfig_mongod = glob_file([ - "etc/sysconfig/mongod", - "etc/opt/rh/rh-mongodb26/sysconfig/mongod" - ]) mongod_conf = glob_file([ "/etc/mongod.conf", "/etc/mongodb.conf", "/etc/opt/rh/rh-mongodb26/mongod.conf" ]) mount = simple_command("/bin/mount") - mounts = simple_file("/proc/mounts") mssql_conf = simple_file("/var/opt/mssql/mssql.conf") multicast_querier = simple_command("/usr/bin/find /sys/devices/virtual/net/ -name multicast_querier -print -exec cat {} \;") multipath_conf = simple_file("/etc/multipath.conf") multipath_conf_initramfs = simple_command("/bin/lsinitrd -f /etc/multipath.conf") multipath__v4__ll = simple_command("/sbin/multipath -v4 -ll") - mysqladmin_status = simple_command("/bin/mysqladmin status") mysqladmin_vars = simple_command("/bin/mysqladmin variables") mysql_log = glob_file([ "/var/log/mysql/mysqld.log", "/var/log/mysql.log", "/var/opt/rh/rh-mysql*/log/mysql/mysqld.log" ]) - mysqld_pid = simple_command("/usr/bin/pgrep -n mysqld") - mysqld_limits = foreach_collect(mysqld_pid, "/proc/%s/limits") named_checkconf_p = simple_command("/usr/sbin/named-checkconf -p") namespace = simple_command("/bin/ls /var/run/netns") ndctl_list_Ni = simple_command("/usr/bin/ndctl list -Ni") - ip_netns_exec_namespace_lsof = foreach_execute(namespace, "/sbin/ip netns exec %s lsof -i") netconsole = simple_file("/etc/sysconfig/netconsole") netstat = simple_command("/bin/netstat -neopa") netstat_agn = simple_command("/bin/netstat -agn") @@ -659,10 +449,8 @@ def lsmod_all_names(broker): neutron_l3_agent_log = simple_file("/var/log/neutron/l3-agent.log") neutron_metadata_agent_ini = first_file(["/var/lib/config-data/puppet-generated/neutron/etc/neutron/metadata_agent.ini", "/etc/neutron/metadata_agent.ini"]) neutron_metadata_agent_log = first_file(["/var/log/containers/neutron/metadata-agent.log", "/var/log/neutron/metadata-agent.log"]) - neutron_ml2_conf = first_file(["/var/lib/config-data/puppet-generated/neutron/etc/neutron/plugins/ml2/ml2_conf.ini", "/etc/neutron/plugins/ml2/ml2_conf.ini"]) neutron_ovs_agent_log = first_file(["/var/log/containers/neutron/openvswitch-agent.log", "/var/log/neutron/openvswitch-agent.log"]) neutron_plugin_ini = first_file(["/var/lib/config-data/puppet-generated/neutron/etc/neutron/plugin.ini", "/etc/neutron/plugin.ini"]) - neutron_server_log = first_file(["/var/log/containers/neutron/server.log", "/var/log/neutron/server.log"]) nfnetlink_queue = simple_file("/proc/net/netfilter/nfnetlink_queue") nfs_exports = simple_file("/etc/exports") nfs_exports_d = glob_file("/etc/exports.d/*.exports") @@ -681,98 +469,42 @@ def lsmod_all_names(broker): "/etc/nova/nova.conf" ]) nova_crontab = simple_command("/usr/bin/crontab -l -u nova") - nova_crontab_container = simple_command("docker exec nova_api_cron /usr/bin/crontab -l -u nova") nova_uid = simple_command("/usr/bin/id -u nova") - nova_migration_uid = simple_command("/usr/bin/id -u nova_migration") nscd_conf = simple_file("/etc/nscd.conf") nsswitch_conf = simple_file("/etc/nsswitch.conf") ntp_conf = simple_file("/etc/ntp.conf") ntpq_leap = simple_command("/usr/sbin/ntpq -c 'rv 0 leap'") - ntpq_pn = simple_command("/usr/sbin/ntpq -pn") ntptime = simple_command("/usr/sbin/ntptime") numa_cpus = glob_file("/sys/devices/system/node/node[0-9]*/cpulist") numeric_user_group_name = simple_command("/bin/grep -c '^[[:digit:]]' /etc/passwd /etc/group") nvme_core_io_timeout = simple_file("/sys/module/nvme_core/parameters/io_timeout") - oc_get_bc = simple_command("/usr/bin/oc get bc -o yaml --all-namespaces", context=OpenShiftContext) - oc_get_build = simple_command("/usr/bin/oc get build -o yaml --all-namespaces", context=OpenShiftContext) oc_get_clusterrole_with_config = simple_command("/usr/bin/oc get clusterrole --config /etc/origin/master/admin.kubeconfig") oc_get_clusterrolebinding_with_config = simple_command("/usr/bin/oc get clusterrolebinding --config /etc/origin/master/admin.kubeconfig") - oc_get_dc = simple_command("/usr/bin/oc get dc -o yaml --all-namespaces", context=OpenShiftContext) - oc_get_egressnetworkpolicy = simple_command("/usr/bin/oc get egressnetworkpolicy -o yaml --all-namespaces", context=OpenShiftContext) - oc_get_endpoints = simple_command("/usr/bin/oc get endpoints -o yaml --all-namespaces", context=OpenShiftContext) - oc_get_event = simple_command("/usr/bin/oc get event -o yaml --all-namespaces", context=OpenShiftContext) - oc_get_node = simple_command("/usr/bin/oc get nodes -o yaml", context=OpenShiftContext) - oc_get_pod = simple_command("/usr/bin/oc get pod -o yaml --all-namespaces", context=OpenShiftContext) - oc_get_project = simple_command("/usr/bin/oc get project -o yaml --all-namespaces", context=OpenShiftContext) - oc_get_pv = simple_command("/usr/bin/oc get pv -o yaml --all-namespaces", context=OpenShiftContext) - oc_get_pvc = simple_command("/usr/bin/oc get pvc -o yaml --all-namespaces", context=OpenShiftContext) - oc_get_rc = simple_command("/usr/bin/oc get rc -o yaml --all-namespaces", context=OpenShiftContext) - oc_get_role = simple_command("/usr/bin/oc get role -o yaml --all-namespaces", context=OpenShiftContext) - oc_get_rolebinding = simple_command("/usr/bin/oc get rolebinding -o yaml --all-namespaces", context=OpenShiftContext) - oc_get_route = simple_command("/usr/bin/oc get route -o yaml --all-namespaces", context=OpenShiftContext) - oc_get_service = simple_command("/usr/bin/oc get service -o yaml --all-namespaces", context=OpenShiftContext) - oc_get_configmap = simple_command("/usr/bin/oc get configmap -o yaml --all-namespaces", context=OpenShiftContext) - octavia_conf = simple_file("/var/lib/config-data/puppet-generated/octavia/etc/octavia/octavia.conf") odbc_ini = simple_file("/etc/odbc.ini") odbcinst_ini = simple_file("/etc/odbcinst.ini") - crt = simple_command("/usr/bin/find /etc/origin/node /etc/origin/master -type f -path '*.crt'") - openshift_certificates = foreach_execute(crt, "/usr/bin/openssl x509 -noout -enddate -in %s") - openshift_fluentd_pid = simple_command("/usr/bin/pgrep -n fluentd") - openshift_fluentd_environ = foreach_collect(openshift_fluentd_pid, "/proc/%s/environ") openshift_hosts = simple_file("/root/.config/openshift/hosts") openshift_router_pid = simple_command("/usr/bin/pgrep -n openshift-route") openshift_router_environ = foreach_collect(openshift_router_pid, "/proc/%s/environ") openvswitch_other_config = simple_command("/usr/bin/ovs-vsctl -t 5 get Open_vSwitch . other_config") - openvswitch_server_log = simple_file('/var/log/openvswitch/ovsdb-server.log') - openvswitch_daemon_log = simple_file('/var/log/openvswitch/ovs-vswitchd.log') os_release = simple_file("etc/os-release") - osa_dispatcher_log = first_file([ - "/var/log/rhn/osa-dispatcher.log", - "/rhn-logs/rhn/osa-dispatcher.log" - ]) ose_master_config = simple_file("/etc/origin/master/master-config.yaml") ose_node_config = simple_file("/etc/origin/node/node-config.yaml") - ovirt_engine_confd = glob_file("/etc/ovirt-engine/engine.conf.d/*") ovirt_engine_server_log = simple_file("/var/log/ovirt-engine/server.log") ovirt_engine_ui_log = simple_file("/var/log/ovirt-engine/ui.log") - ovirt_engine_boot_log = simple_file("/var/log/ovirt-engine/boot.log") - ovirt_engine_console_log = simple_file("/var/log/ovirt-engine/console.log") - ovs_vsctl_list_br = simple_command("/usr/bin/ovs-vsctl list-br") - ovs_appctl_fdb_show_bridge = foreach_execute(ovs_vsctl_list_br, "/usr/bin/ovs-appctl fdb/show %s") - ovs_ofctl_dump_flows = foreach_execute(ovs_vsctl_list_br, "/usr/bin/ovs-ofctl dump-flows %s") ovs_vsctl_list_bridge = simple_command("/usr/bin/ovs-vsctl list bridge") ovs_vsctl_show = simple_command("/usr/bin/ovs-vsctl show") - ovs_vswitchd_pid = simple_command("/usr/bin/pgrep -o ovs-vswitchd") - ovs_vswitchd_limits = foreach_collect(ovs_vswitchd_pid, "/proc/%s/limits") pacemaker_log = first_file(["/var/log/pacemaker.log", "/var/log/pacemaker/pacemaker.log"]) pci_rport_target_disk_paths = simple_command("/usr/bin/find /sys/devices/ -maxdepth 10 -mindepth 9 -name stat -type f") @datasource(Services, context=HostContext) def pcp_enabled(broker): + """ bool: Returns True if pmproxy service is on in services """ if not broker[Services].is_on("pmproxy"): raise SkipComponent("pmproxy not enabled") pcp_metrics = simple_command("/usr/bin/curl -s http://127.0.0.1:44322/metrics --connect-timeout 5", deps=[pcp_enabled]) - - @datasource(ps_auxww, context=HostContext) - def package_and_java(broker): - """Command: package_and_java""" - return get_cmd_and_package_in_ps(broker, 'java') - - package_provides_java = foreach_execute(package_and_java, "echo %s") - - @datasource(ps_auxww, context=HostContext) - def package_and_httpd(broker): - """Command: package_and_httpd""" - return get_cmd_and_package_in_ps(broker, 'httpd') - - package_provides_httpd = foreach_execute(package_and_httpd, "echo %s") - pam_conf = simple_file("/etc/pam.conf") - parted__l = simple_command("/sbin/parted -l -s") - partitions = simple_file("/proc/partitions") passenger_status = simple_command("/usr/bin/passenger-status") password_auth = simple_file("/etc/pam.d/password-auth") - pcs_config = simple_command("/usr/sbin/pcs config") pcs_quorum_status = simple_command("/usr/sbin/pcs quorum status") pcs_status = simple_command("/usr/sbin/pcs status") pluginconf_d = glob_file("/etc/yum/pluginconf.d/*.conf") @@ -787,28 +519,19 @@ def package_and_httpd(broker): glob_file("/database/postgresql-*.log") ]) puppetserver_config = simple_file("/etc/sysconfig/puppetserver") - prev_uploader_log = simple_file("var/log/redhat-access-insights/redhat-access-insights.log.1") proc_netstat = simple_file("proc/net/netstat") proc_slabinfo = simple_file("proc/slabinfo") proc_snmp_ipv4 = simple_file("proc/net/snmp") proc_snmp_ipv6 = simple_file("proc/net/snmp6") proc_stat = simple_file("proc/stat") pulp_worker_defaults = simple_file("etc/default/pulp_workers") - pvs = simple_command('/sbin/pvs -a -v -o +pv_mda_free,pv_mda_size,pv_mda_count,pv_mda_used_count,pe_count --config="global{locking_type=0}"') pvs_noheadings = simple_command("/sbin/pvs --nameprefixes --noheadings --separator='|' -a -o pv_all,vg_name --config=\"global{locking_type=0}\"") - pvs_noheadings_all = simple_command("/sbin/pvs --nameprefixes --noheadings --separator='|' -a -o pv_all,vg_name --config='global{locking_type=0} devices{filter=[\"a|.*|\"]}'") qemu_conf = simple_file("/etc/libvirt/qemu.conf") qemu_xml = glob_file(r"/etc/libvirt/qemu/*.xml") qpid_stat_g = simple_command("/usr/bin/qpid-stat -g --ssl-certificate=/etc/pki/katello/qpid_client_striped.crt -b amqps://localhost:5671") - qpid_stat_q = simple_command("/usr/bin/qpid-stat -q --ssl-certificate=/etc/pki/katello/qpid_client_striped.crt -b amqps://localhost:5671") - qpid_stat_u = simple_command("/usr/bin/qpid-stat -u --ssl-certificate=/etc/pki/katello/qpid_client_striped.crt -b amqps://localhost:5671") qpidd_conf = simple_file("/etc/qpid/qpidd.conf") rabbitmq_env = simple_file("/etc/rabbitmq/rabbitmq-env.conf") - rabbitmq_logs = glob_file("/var/log/rabbitmq/rabbit@*.log", ignore=".*rabbit@.*(? Date: Fri, 17 Jul 2020 12:48:44 -0400 Subject: [PATCH 113/892] include both classic and core collection (#2635) * include both classic and core collection create CoreCollector class move archive subdir initialization calls use "core_collect=T/F" config var to differentiate * select core by default if rpm is version 3.1.0 or above * set hostname path for soscleaner * pass denylist to core collect Signed-off-by: Jeremy Crafts --- insights/client/archive.py | 63 +++++-- insights/client/client.py | 11 +- insights/client/config.py | 29 ++++ insights/client/constants.py | 2 + insights/client/core_collector.py | 83 +++++++++ insights/client/data_collector.py | 3 + insights/client/phase/v1.py | 3 +- .../client/data_collector/test_redact.py | 71 +++++--- .../client/phase/test_LEGACY_post_update.py | 3 +- .../tests/client/phase/test_post_update.py | 3 +- insights/tests/client/phase/test_update.py | 34 ++++ insights/tests/client/test_archive.py | 157 ++++++++++++++++++ insights/tests/client/test_collect.py | 65 +++++++- insights/tests/client/test_config.py | 28 +++- .../tests/client/test_skip_commands_files.py | 4 + insights/util/canonical_facts.py | 4 + 16 files changed, 518 insertions(+), 45 deletions(-) create mode 100644 insights/client/core_collector.py create mode 100644 insights/tests/client/test_archive.py diff --git a/insights/client/archive.py b/insights/client/archive.py index d2b3f0449..c5698c2ca 100644 --- a/insights/client/archive.py +++ b/insights/client/archive.py @@ -22,45 +22,75 @@ class InsightsArchive(object): """ This class is an interface for adding command output and files to the insights archive + + Attributes: + config - an InsightsConfig object + tmp_dir - a temporary directory in /var/tmp + archive_dir - location to collect archive data inside tmp_dir + archive_tmp_dir - a temporary directory to write the final archive file + archive_name - filename of the archive and archive_dir + cmd_dir - insights_commands directory inside archive_dir + compressor - tar compression flag to use + tar_file - path of the final archive file """ def __init__(self, config): """ Initialize the Insights Archive - Create temp dir, archive dir, and command dir """ self.config = config + # input this to core collector as `tmp_path` self.tmp_dir = tempfile.mkdtemp(prefix='/var/tmp/') + + # we don't really need this anymore... self.archive_tmp_dir = tempfile.mkdtemp(prefix='/var/tmp/') - name = determine_hostname() + self.archive_name = ("insights-%s-%s" % - (name, + (determine_hostname(), time.strftime("%Y%m%d%H%M%S"))) - self.archive_dir = self.create_archive_dir() - self.cmd_dir = self.create_command_dir() + + # lazy create these, only if needed when certain + # functions are called + # classic collection and compliance needs these + # core collection will set "archive_dir" on its own + self.archive_dir = None + self.cmd_dir = None + self.compressor = config.compressor self.tar_file = None atexit.register(self.cleanup_tmp) def create_archive_dir(self): """ - Create the archive dir + Create the archive directory if it is undefined or does not exist. """ + if self.archive_dir and os.path.exists(self.archive_dir): + # attr defined and exists. move along + return self.archive_dir + archive_dir = os.path.join(self.tmp_dir, self.archive_name) - os.makedirs(archive_dir, 0o700) - return archive_dir + if not os.path.exists(archive_dir): + logger.debug('Creating archive directory %s...', archive_dir) + os.makedirs(archive_dir, 0o700) + self.archive_dir = archive_dir + return self.archive_dir def create_command_dir(self): """ - Create the "sos_commands" dir + Create the "insights_commands" dir """ + self.create_archive_dir() cmd_dir = os.path.join(self.archive_dir, "insights_commands") - os.makedirs(cmd_dir, 0o700) - return cmd_dir + logger.debug('Creating command directory %s...', cmd_dir) + if not os.path.exists(cmd_dir): + os.makedirs(cmd_dir, 0o700) + self.cmd_dir = cmd_dir + return self.cmd_dir def get_full_archive_path(self, path): """ Returns the full archive path """ + self.create_archive_dir() return os.path.join(self.archive_dir, path.lstrip('/')) def _copy_file(self, path): @@ -97,6 +127,7 @@ def copy_dir(self, path): """ Recursively copy directory """ + self.create_archive_dir() for directory in path: if os.path.isdir(path): full_path = os.path.join(self.archive_dir, directory.lstrip('/')) @@ -141,15 +172,17 @@ def delete_tmp_dir(self): """ Delete the entire tmp dir """ - logger.debug("Deleting: " + self.tmp_dir) - shutil.rmtree(self.tmp_dir, True) + if self.tmp_dir: + logger.debug("Deleting: " + self.tmp_dir) + shutil.rmtree(self.tmp_dir, True) def delete_archive_dir(self): """ Delete the entire archive dir """ - logger.debug("Deleting: " + self.archive_dir) - shutil.rmtree(self.archive_dir, True) + if self.archive_dir: + logger.debug("Deleting: " + self.archive_dir) + shutil.rmtree(self.archive_dir, True) def delete_archive_file(self): """ diff --git a/insights/client/client.py b/insights/client/client.py index d9af735f5..7fd9bc59c 100644 --- a/insights/client/client.py +++ b/insights/client/client.py @@ -18,6 +18,7 @@ determine_hostname) from .collection_rules import InsightsUploadConf from .data_collector import DataCollector +from .core_collector import CoreCollector from .connection import InsightsConnection from .archive import InsightsArchive from .support import registration_check @@ -284,18 +285,20 @@ def collect(config, pconn): pc = InsightsUploadConf(config) output = None - collection_rules = pc.get_conf_file() rm_conf = pc.get_rm_conf() blacklist_report = pc.create_report() if rm_conf: logger.warn("WARNING: Excluding data from files") - # defaults - mp = None archive = InsightsArchive(config) msg_name = determine_hostname(config.display_name) - dc = DataCollector(config, archive, mountpoint=mp) + if config.core_collect: + collection_rules = None + dc = CoreCollector(config, archive) + else: + collection_rules = pc.get_conf_file() + dc = DataCollector(config, archive) logger.info('Starting to collect Insights data for %s', msg_name) dc.run_collection(collection_rules, rm_conf, branch_info, blacklist_report) output = dc.done(collection_rules, rm_conf) diff --git a/insights/client/config.py b/insights/client/config.py index 17806aeb6..2a91dd1eb 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -6,6 +6,8 @@ import six import sys from six.moves import configparser as ConfigParser +from distutils.version import LooseVersion +from .utilities import get_version_info try: from .constants import InsightsConstants as constants @@ -14,6 +16,24 @@ logger = logging.getLogger(__name__) + +def _core_collect_default(): + ''' + Core collection should be disabled by default, unless + the RPM version 3.1 or above + ''' + rpm_version = get_version_info()['client_version'] + if not rpm_version: + # problem getting the version, default to False + return False + if LooseVersion(rpm_version) < LooseVersion(constants.core_collect_rpm_version): + # rpm version is older than the core collection release + return False + else: + # rpm version is equal to or newer than the core collection release + return True + + DEFAULT_OPTS = { 'analyze_container': { 'default': False, @@ -102,6 +122,9 @@ 'help': 'Pass a custom config file', 'action': 'store' }, + 'core_collect': { + 'default': False + }, 'egg_path': { # non-CLI 'default': None @@ -414,6 +437,12 @@ def __init__(self, *args, **kwargs): self._init_attrs = copy.copy(dir(self)) self._update_dict(DEFAULT_KVS) + + # initialize the real default for core_collect here + # instead of inside DEFAULT_KVS because calling + # this function at the module scope ignores unit test mocks + self.core_collect = _core_collect_default() + if args: self._update_dict(args[0]) self._update_dict(kwargs) diff --git a/insights/client/constants.py b/insights/client/constants.py index f79bdf38a..0cea1a8d6 100644 --- a/insights/client/constants.py +++ b/insights/client/constants.py @@ -46,3 +46,5 @@ class InsightsConstants(object): cached_branch_info = os.path.join(default_conf_dir, '.branch_info') pidfile = os.path.join(os.sep, 'var', 'run', 'insights-client.pid') valid_compressors = ("gz", "xz", "bz2", "none") + # RPM version in which core collection was released + core_collect_rpm_version = '3.1.0' diff --git a/insights/client/core_collector.py b/insights/client/core_collector.py new file mode 100644 index 000000000..8be1aeb49 --- /dev/null +++ b/insights/client/core_collector.py @@ -0,0 +1,83 @@ +""" +Collect all the interesting data for analysis - Core version +""" +from __future__ import absolute_import +import os +import six +import logging +from insights import collect + +from .constants import InsightsConstants as constants +from .data_collector import DataCollector +from .utilities import systemd_notify_init_thread + +APP_NAME = constants.app_name +logger = logging.getLogger(__name__) + + +class CoreCollector(DataCollector): + def __init__(self, *args, **kwargs): + super(CoreCollector, self).__init__(*args, **kwargs) + + def run_collection(self, conf, rm_conf, branch_info, blacklist_report): + ''' + Initialize core collection here and generate the + output directory with collected data. + ''' + # initialize systemd-notify thread + systemd_notify_init_thread() + + if rm_conf is None: + rm_conf = {} + + # add tokens to limit regex handling + # core parses blacklist for files and commands as regex + if 'files' in rm_conf: + for idx, f in enumerate(rm_conf['files']): + rm_conf['files'][idx] = '^' + f + '$' + + if 'commands' in rm_conf: + for idx, c in enumerate(rm_conf['commands']): + rm_conf['commands'][idx] = '^' + c + '$' + + logger.debug('Beginning to run collection...') + + # only load files, keywords, components into core + core_blacklist = { + 'commands': rm_conf.get('commands', []), + 'files': rm_conf.get('files', []), + 'components': rm_conf.get('components', []) + } + + collected_data_path = collect.collect(tmp_path=self.archive.tmp_dir, rm_conf=core_blacklist) + # update the archive dir with the reported data location from Insights Core + if not collected_data_path: + raise RuntimeError('Error running collection: no output path defined.') + self.archive.archive_dir = collected_data_path + self.archive.archive_name = os.path.basename(collected_data_path) + + if not six.PY3: + # collect.py returns a unicode string, and these must be bytestrings + # when we call the tar command in 2.6 + self.archive.archive_dir = self.archive.archive_dir.encode('utf-8') + self.archive.archive_name = self.archive.archive_name.encode('utf-8') + + # set hostname_path for soscleaner + if os.path.exists(os.path.join(self.archive.archive_dir, 'data', 'insights_commands', 'hostname_-f')): + self.hostname_path = 'data/insights_commands/hostname_-f' + else: + # fall back to hostname if hostname -f not available + self.hostname_path = 'data/insights_commands/hostname' + + logger.debug('Collection finished.') + + self.redact(rm_conf) + + # collect metadata + logger.debug('Collecting metadata...') + self._write_branch_info(branch_info) + self._write_display_name() + self._write_version_info() + self._write_tags() + self._write_blacklist_report(blacklist_report) + logger.debug('Metadata collection finished.') diff --git a/insights/client/data_collector.py b/insights/client/data_collector.py index 079112706..cc75768eb 100644 --- a/insights/client/data_collector.py +++ b/insights/client/data_collector.py @@ -230,6 +230,9 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): # initialize systemd-notify thread systemd_notify_init_thread() + self.archive.create_archive_dir() + self.archive.create_command_dir() + if rm_conf is None: rm_conf = {} logger.debug('Beginning to run collection spec...') diff --git a/insights/client/phase/v1.py b/insights/client/phase/v1.py index 5e322a706..9727ed3ef 100644 --- a/insights/client/phase/v1.py +++ b/insights/client/phase/v1.py @@ -117,7 +117,8 @@ def update(client, config): if config.payload: logger.debug('Uploading a payload. Bypassing rules update.') return - client.update_rules() + if not config.core_collect: + client.update_rules() @phase diff --git a/insights/tests/client/data_collector/test_redact.py b/insights/tests/client/data_collector/test_redact.py index 37b817c31..4bb6e8bba 100644 --- a/insights/tests/client/data_collector/test_redact.py +++ b/insights/tests/client/data_collector/test_redact.py @@ -1,7 +1,8 @@ from insights.client.config import InsightsConfig from insights.client.archive import InsightsArchive from insights.client.data_collector import DataCollector -from mock.mock import patch +from insights.client.core_collector import CoreCollector +from mock.mock import patch, Mock import pytest import os import six @@ -9,8 +10,50 @@ test_file_data = 'test\nabcd\n1234\npassword: p4ssw0rd\n' +@patch('insights.client.archive.InsightsArchive', Mock()) +@patch('insights.client.insights_spec.InsightsCommand', Mock()) +@patch('insights.client.insights_spec.InsightsFile', Mock()) +@patch('insights.client.data_collector.DataCollector._parse_command_spec', Mock()) +@patch('insights.client.data_collector.DataCollector._parse_file_spec', Mock()) +@patch('insights.client.data_collector.DataCollector._parse_glob_spec', Mock()) +@patch('insights.client.data_collector.DataCollector.redact') +def test_redact_called_classic(redact): + ''' + Verify that redact is always called during classic collection + ''' + conf = InsightsConfig() + upload_conf = {'commands': [], 'files': [], 'globs': []} + rm_conf = {'test': 'test'} + branch_info = {'test1': 'test2'} + blacklist_report = {'test3': 'test4'} + dc = DataCollector(conf) + dc.run_collection(upload_conf, rm_conf, branch_info, blacklist_report) + redact.assert_called_once_with(rm_conf) + + +@patch('insights.client.archive.InsightsArchive', Mock()) +@patch('insights.client.core_collector.CoreCollector._write_branch_info', Mock()) +@patch('insights.client.core_collector.CoreCollector._write_display_name', Mock()) +@patch('insights.client.core_collector.CoreCollector._write_version_info', Mock()) +@patch('insights.client.core_collector.CoreCollector._write_tags', Mock()) +@patch('insights.client.core_collector.CoreCollector._write_blacklist_report', Mock()) +@patch('insights.client.core_collector.collect.collect', Mock(return_value='/var/tmp/testarchive/insights-test')) +@patch('insights.client.core_collector.CoreCollector.redact') +def test_redact_called_core(redact): + ''' + Verify that redact is always called during core collection + ''' + conf = InsightsConfig(core_collect=True) + upload_conf = None + rm_conf = {'test': 'test'} + branch_info = {'test1': 'test2'} + blacklist_report = {'test3': 'test4'} + dc = CoreCollector(conf) + dc.run_collection(upload_conf, rm_conf, branch_info, blacklist_report) + redact.assert_called_once_with(rm_conf) + + @patch('insights.client.data_collector.os.walk') -# @patch('insights.client.data_collector._process_content_redaction') def test_redact_call_walk(walk): ''' Verify that redact() calls os.walk and when an @@ -18,9 +61,7 @@ def test_redact_call_walk(walk): ''' conf = InsightsConfig() arch = InsightsArchive(conf) - # TODO: uncomment this once dual collector logic is merged. - # archive dir must be created explicitly - # arch.create_archive_dir() + arch.create_archive_dir() dc = DataCollector(conf, arch) rm_conf = {} @@ -41,9 +82,7 @@ def test_redact_call_process_redaction(_process_content_redaction): ''' conf = InsightsConfig() arch = InsightsArchive(conf) - # TODO: uncomment this once dual collector logic is merged. - # archive dir must be created explicitly - # arch.create_archive_dir() + arch.create_archive_dir() # put something in the archive to redact test_file = os.path.join(arch.archive_dir, 'test.file') @@ -74,9 +113,7 @@ def test_redact_exclude_regex(_process_content_redaction): ''' conf = InsightsConfig() arch = InsightsArchive(conf) - # TODO: uncomment this once dual collector logic is merged. - # archive dir must be created explicitly - # arch.create_archive_dir() + arch.create_archive_dir() # put something in the archive to redact test_file = os.path.join(arch.archive_dir, 'test.file') @@ -105,9 +142,7 @@ def test_redact_exclude_no_regex(_process_content_redaction): ''' conf = InsightsConfig() arch = InsightsArchive(conf) - # TODO: uncomment this once dual collector logic is merged. - # archive dir must be created explicitly - # arch.create_archive_dir() + arch.create_archive_dir() # put something in the archive to redact test_file = os.path.join(arch.archive_dir, 'test.file') @@ -136,9 +171,7 @@ def test_redact_exclude_empty(_process_content_redaction): ''' conf = InsightsConfig() arch = InsightsArchive(conf) - # TODO: uncomment this once dual collector logic is merged. - # archive dir must be created explicitly - # arch.create_archive_dir() + arch.create_archive_dir() # put something in the archive to redact test_file = os.path.join(arch.archive_dir, 'test.file') @@ -167,9 +200,7 @@ def test_redact_exclude_none(_process_content_redaction): ''' conf = InsightsConfig() arch = InsightsArchive(conf) - # TODO: uncomment this once dual collector logic is merged. - # archive dir must be created explicitly - # arch.create_archive_dir() + arch.create_archive_dir() # put something in the archive to redact test_file = os.path.join(arch.archive_dir, 'test.file') diff --git a/insights/tests/client/phase/test_LEGACY_post_update.py b/insights/tests/client/phase/test_LEGACY_post_update.py index 34d394e0d..a18568332 100644 --- a/insights/tests/client/phase/test_LEGACY_post_update.py +++ b/insights/tests/client/phase/test_LEGACY_post_update.py @@ -17,7 +17,8 @@ def patch_insights_config(old_function): "return_value.load_all.return_value.register": False, "return_value.load_all.return_value.diagnosis": None, "return_value.load_all.return_value.show_results": False, - "return_value.load_all.return_value.check_results": False}) + "return_value.load_all.return_value.check_results": False, + "return_value.load_all.return_value.core_collect": False}) return patcher(old_function) diff --git a/insights/tests/client/phase/test_post_update.py b/insights/tests/client/phase/test_post_update.py index 979c4a568..b22faed83 100644 --- a/insights/tests/client/phase/test_post_update.py +++ b/insights/tests/client/phase/test_post_update.py @@ -20,7 +20,8 @@ def patch_insights_config(old_function): "return_value.load_all.return_value.reregister": False, "return_value.load_all.return_value.payload": None, "return_value.load_all.return_value.show_results": False, - "return_value.load_all.return_value.check_results": False}) + "return_value.load_all.return_value.check_results": False, + "return_value.load_all.return_value.core_collect": False}) return patcher(old_function) # DRY this at some point... for the love of god diff --git a/insights/tests/client/phase/test_update.py b/insights/tests/client/phase/test_update.py index b7b44c89c..45d5b52aa 100644 --- a/insights/tests/client/phase/test_update.py +++ b/insights/tests/client/phase/test_update.py @@ -12,6 +12,7 @@ def test_update_payload_on(insights_config, insights_client): Rules are not updated when a payload is uploaded """ insights_config.return_value.load_all.return_value.payload = True + insights_config.return_value.load_all.return_value.core_collect = False try: update() except SystemExit: @@ -27,6 +28,39 @@ def test_update_payload_off(insights_config, insights_client): Rules are updated in normal operation (no payload) """ insights_config.return_value.load_all.return_value.payload = False + insights_config.return_value.load_all.return_value.core_collect = False + try: + update() + except SystemExit: + pass + insights_client.return_value.update.assert_called_once() + insights_client.return_value.update_rules.assert_called_once() + + +@patch("insights.client.phase.v1.InsightsClient") +@patch("insights.client.phase.v1.InsightsConfig") +def test_update_core_collect_on(insights_config, insights_client): + """ + Rules are not updated when using core collection + """ + insights_config.return_value.load_all.return_value.payload = False + insights_config.return_value.load_all.return_value.core_collect = True + try: + update() + except SystemExit: + pass + insights_client.return_value.update.assert_called_once() + insights_client.return_value.update_rules.assert_not_called() + + +@patch("insights.client.phase.v1.InsightsClient") +@patch("insights.client.phase.v1.InsightsConfig") +def test_update_core_collect_off(insights_config, insights_client): + """ + Rules are updated when using classic collection + """ + insights_config.return_value.load_all.return_value.payload = False + insights_config.return_value.load_all.return_value.core_collect = False try: update() except SystemExit: diff --git a/insights/tests/client/test_archive.py b/insights/tests/client/test_archive.py new file mode 100644 index 000000000..247f5db8f --- /dev/null +++ b/insights/tests/client/test_archive.py @@ -0,0 +1,157 @@ +from insights.client.archive import InsightsArchive +from mock.mock import patch, Mock, call +from unittest import TestCase + +test_timestamp = '000000' +test_hostname = 'testhostname' +test_archive_name = 'insights-testhostname-000000' +test_archive_dir = '/var/tmp/test/insights-testhostname-000000' +test_cmd_dir = '/var/tmp/test/insights-testhostname-000000/insights_commands' + + +@patch('insights.client.archive.time.strftime', Mock(return_value=test_timestamp)) +@patch('insights.client.archive.determine_hostname', Mock(return_value=test_hostname)) +@patch('insights.client.archive.tempfile.mkdtemp') +@patch('insights.client.archive.atexit.register') +class TestInsightsArchive(TestCase): + + def test_init_archive(self, register, mkdtemp): + ''' + Verify archive is created with default parameters + ''' + config = Mock() + archive = InsightsArchive(config) + + assert archive.config == config + assert archive.tmp_dir + assert archive.archive_tmp_dir + assert archive.archive_dir is None + assert archive.cmd_dir is None + assert archive.compressor == config.compressor + assert archive.archive_name == test_archive_name + + mkdtemp.assert_has_calls([call(prefix='/var/tmp/'), + call(prefix='/var/tmp/')]) + register.assert_called_once() + + @patch('insights.client.archive.os.makedirs') + @patch('insights.client.archive.os.path.exists', Mock(return_value=False)) + def test_create_archive_dir_default(self, makedirs, _, __): + ''' + Verify archive_dir is created when it does not already exist + ''' + archive = InsightsArchive(Mock()) + # give this a discrete value so we can check the results + archive.tmp_dir = '/var/tmp/test' + result = archive.create_archive_dir() + makedirs.assert_called_once_with(test_archive_dir, 0o700) + # ensure the archive_dir is returned from the function + assert result == test_archive_dir + # ensure the class attr is set + assert archive.archive_dir == test_archive_dir + # ensure the retval and attr are the same + assert result == archive.archive_dir + + @patch('insights.client.archive.os.makedirs') + @patch('insights.client.archive.os.path.exists', return_value=False) + def test_create_archive_dir_defined_path_DNE(self, exists, makedirs, _, __): + ''' + Verify archive_dir is created when the attr is defined but + the path does not exist + ''' + archive = InsightsArchive(Mock()) + # give this a discrete value so we can check the results + archive.tmp_dir = '/var/tmp/test' + archive.archive_dir = test_archive_dir + result = archive.create_archive_dir() + exists.assert_has_calls([call(archive.archive_dir), + call(test_archive_dir)]) + makedirs.assert_called_once_with(test_archive_dir, 0o700) + # ensure the archive_dir is returned from the function + assert result == test_archive_dir + # ensure the class attr is set + assert archive.archive_dir == test_archive_dir + # ensure the retval and attr are the same + assert result == archive.archive_dir + + @patch('insights.client.archive.os.makedirs') + @patch('insights.client.archive.os.path.exists', return_value=True) + def test_create_archive_dir_undef_path_exists(self, exists, makedirs, _, __): + ''' + Verify archive_dir is not re-created when the attr is undefined but + the path exists + ''' + archive = InsightsArchive(Mock()) + # give this a discrete value so we can check the results + archive.tmp_dir = '/var/tmp/test' + result = archive.create_archive_dir() + makedirs.assert_not_called() + exists.assert_called_once_with(test_archive_dir) + # ensure the archive_dir is returned from the function + assert result == test_archive_dir + # ensure the class attr is set + assert archive.archive_dir == test_archive_dir + # ensure the retval and attr are the same + assert result == archive.archive_dir + + @patch('insights.client.archive.os.makedirs') + @patch('insights.client.archive.os.path.exists', return_value=True) + def test_create_archive_dir_defined_path_exists(self, exists, makedirs, _, __): + ''' + When archive_dir is defined and exists, simply return the + class attr and do not attempt to create it + ''' + archive = InsightsArchive(Mock()) + # give this a discrete value so we can check the results + archive.tmp_dir = '/var/tmp/test' + archive.archive_dir = test_archive_dir + result = archive.create_archive_dir() + makedirs.assert_not_called() + exists.assert_called_once_with(archive.archive_dir) + # ensure the archive_dir is returned from the function + assert result == test_archive_dir + # ensure the class attr is set + assert archive.archive_dir == test_archive_dir + # ensure the retval and attr are the same + assert result == archive.archive_dir + + @patch('insights.client.archive.InsightsArchive.create_archive_dir', return_value=test_archive_dir) + @patch('insights.client.archive.os.makedirs') + @patch('insights.client.archive.os.path.exists', return_value=False) + def test_create_command_dir(self, exists, makedirs, create_archive_dir, _, __): + ''' + Verify insights_commands dir is created + ''' + archive = InsightsArchive(Mock()) + archive.archive_dir = test_archive_dir + result = archive.create_command_dir() + create_archive_dir.assert_called_once() + makedirs.assert_called_once_with(test_cmd_dir, 0o700) + # ensure the cmd_dir is returned from the function + assert result == test_cmd_dir + # ensure the class attr is set + assert archive.cmd_dir == test_cmd_dir + # ensure the retval and attr are the same + assert result == archive.cmd_dir + + @patch('insights.client.archive.InsightsArchive.create_archive_dir', return_value=test_archive_dir) + @patch('insights.client.archive.os.path.join', Mock()) + def test_get_full_archive_path(self, create_archive_dir, _, __): + ''' + Verify create_archive_dir is called when calling get_full_archive_path + ''' + archive = InsightsArchive(Mock()) + archive.get_full_archive_path('test') + create_archive_dir.assert_called_once() + + @patch('insights.client.archive.InsightsArchive.create_archive_dir', return_value=test_archive_dir) + @patch('insights.client.archive.os.path.join', Mock()) + @patch('insights.client.archive.os.path.isdir', Mock()) + @patch('insights.client.archive.shutil.copytree', Mock()) + def test_copy_dir(self, create_archive_dir, _, __): + ''' + Verify create_archive_dir is called when calling copy_dir + ''' + archive = InsightsArchive(Mock()) + archive.copy_dir('test') + create_archive_dir.assert_called_once() diff --git a/insights/tests/client/test_collect.py b/insights/tests/client/test_collect.py index 4c7052522..d02cc1af8 100644 --- a/insights/tests/client/test_collect.py +++ b/insights/tests/client/test_collect.py @@ -28,7 +28,8 @@ def collect_args(*insights_config_args, **insights_config_custom_kwargs): all_insights_config_kwargs = {"logging_file": "/tmp/insights.log", "remove_file": conf_remove_file, "redaction_file": conf_file_redaction_file, - "content_redaction_file": conf_file_content_redaction_file} + "content_redaction_file": conf_file_content_redaction_file, + "core_collect": False} all_insights_config_kwargs.update(insights_config_custom_kwargs) return InsightsConfig(*insights_config_args, **all_insights_config_kwargs), Mock() @@ -152,7 +153,6 @@ def decorator(old_function): @patch_data_collector() @patch_get_conf_file() -# @patch_get_conf_stdin() @patch_get_branch_info() def test_get_conf_file(get_branch_info, get_conf_file, data_collector): """ @@ -164,6 +164,19 @@ def test_get_conf_file(get_branch_info, get_conf_file, data_collector): get_conf_file.assert_called_once_with() +@patch("insights.client.client.CoreCollector") +@patch_get_conf_file() +@patch_get_branch_info() +def test_get_conf_not_called_core_collection(get_branch_info, get_conf_file, core_collector): + """ + Verify that uploader.json is not loaded when using core collection + """ + config, pconn = collect_args(core_collect=True) + collect(config, pconn) + + get_conf_file.assert_not_called() + + @patch_data_collector() @patch_get_rm_conf() @patch_get_conf_file() @@ -198,6 +211,54 @@ def test_data_collector_file(get_branch_info, get_conf_file, get_rm_conf, data_c data_collector.return_value.done.assert_called_once_with(collection_rules, rm_conf) +@patch("insights.client.client.InsightsUploadConf.create_report") +@patch("insights.client.client.CoreCollector") +@patch_get_rm_conf() +@patch_get_conf_file() +@patch_get_branch_info() +def test_core_collector_file(get_branch_info, get_conf_file, get_rm_conf, core_collector, create_report): + """ + CoreCollector is loaded with rm_conf and a None value for collection_rules + """ + config, pconn = collect_args(core_collect=True) + collect(config, pconn) + + collection_rules = None + rm_conf = get_rm_conf.return_value + branch_info = get_branch_info.return_value + blacklist_report = create_report.return_value + core_collector.return_value.run_collection.assert_called_once_with(collection_rules, rm_conf, branch_info, blacklist_report) + core_collector.return_value.done.assert_called_once_with(collection_rules, rm_conf) + + +@patch("insights.client.client.CoreCollector") +@patch("insights.client.client.DataCollector") +@patch("insights.client.client.InsightsUploadConf.create_report") +@patch_get_rm_conf() +@patch_get_conf_file() +@patch_get_branch_info() +def test_correct_collector_loaded(get_branch_info, get_conf_file, get_rm_conf, create_report, data_collector, core_collector): + ''' + Verify that core collection is loaded for core_collect=True, and that + classic collection is loaded for core_collect=False + ''' + config, pconn = collect_args(core_collect=False) + collect(config, pconn) + + data_collector.return_value.run_collection.assert_called() + core_collector.return_value.run_collection.assert_not_called() + + # clear calls to test opposite condition + data_collector.return_value.run_collection.reset_mock() + core_collector.return_value.run_collection.reset_mock() + + config.core_collect = True + collect(config, pconn) + + data_collector.return_value.run_collection.assert_not_called() + core_collector.return_value.run_collection.assert_called() + + @patch_data_collector() @patch_validate_gpg_sig(False) @patch_isfile(False) diff --git a/insights/tests/client/test_config.py b/insights/tests/client/test_config.py index a016bc011..6c7a233d5 100644 --- a/insights/tests/client/test_config.py +++ b/insights/tests/client/test_config.py @@ -2,7 +2,7 @@ import sys import os from io import TextIOWrapper, BytesIO -from insights.client.config import InsightsConfig, DEFAULT_OPTS +from insights.client.config import InsightsConfig, DEFAULT_OPTS, _core_collect_default from mock.mock import patch @@ -221,3 +221,29 @@ def test_output_file_guess_file_ext(): c.load_all() assert c.output_file == os.path.abspath('test-mno.tar') assert c.compressor == 'none' + + +@patch('insights.client.config.get_version_info') +def test_core_collect_default(get_version_info): + ''' + Verify that _core_collect_default() returns + the correct True/False value depending on + the conditions + ''' + # RPM version is older than 3.1.0 + get_version_info.return_value = {'client_version': '3.0.13-1'} + assert not _core_collect_default() + conf = InsightsConfig() + assert not conf.core_collect + + # RPM version is 3.1.0 + get_version_info.return_value = {'client_version': '3.1.0'} + assert _core_collect_default() + conf = InsightsConfig() + assert conf.core_collect + + # RPM version is newer than 3.1.0 + get_version_info.return_value = {'client_version': '3.1.1'} + assert _core_collect_default() + conf = InsightsConfig() + assert conf.core_collect diff --git a/insights/tests/client/test_skip_commands_files.py b/insights/tests/client/test_skip_commands_files.py index d895f68c1..0a3a47377 100644 --- a/insights/tests/client/test_skip_commands_files.py +++ b/insights/tests/client/test_skip_commands_files.py @@ -86,6 +86,8 @@ def test_dont_archive_when_command_not_found(write_data_to_file): If the command is not found do not archive it """ arch = InsightsArchive(InsightsConfig()) + arch.archive_dir = arch.create_archive_dir() + arch.cmd_dir = arch.create_command_dir() cmd = MagicMock(spec=InsightsCommand) cmd.get_output.return_value = 'timeout: failed to run command blah: No such file or directory' @@ -106,6 +108,8 @@ def test_dont_archive_when_missing_dep(write_data_to_file): If missing dependencies do not archive it """ arch = InsightsArchive(InsightsConfig()) + arch.archive_dir = arch.create_archive_dir() + arch.cmd_dir = arch.create_command_dir() cmd = MagicMock(spec=InsightsCommand) cmd.get_output.return_value = "Missing Dependencies:" diff --git a/insights/util/canonical_facts.py b/insights/util/canonical_facts.py index cdb763f2f..ebe376192 100644 --- a/insights/util/canonical_facts.py +++ b/insights/util/canonical_facts.py @@ -6,6 +6,7 @@ from insights.specs import Specs from insights.core import Parser from insights.core.plugins import parser +from insights.core.dr import set_enabled import uuid @@ -108,6 +109,9 @@ def canonical_facts( def get_canonical_facts(path=None): + set_enabled(canonical_facts, True) + set_enabled(SubscriptionManagerID, True) + set_enabled(IPs, True) br = run(canonical_facts, root=path) d = br[canonical_facts] del d["type"] From fde45f9bf27c5b1563de2122b9ba2060dc4f9b0a Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Fri, 17 Jul 2020 13:03:37 -0400 Subject: [PATCH 114/892] map files and commands to core components (#2648) * map files and commands to core components when core_collect=True * include a copy of uploader.json for mapping Signed-off-by: Jeremy Crafts --- insights/client/collection_rules.py | 20 +- insights/client/map_components.py | 190 + .../collection_rules/test_map_components.py | 341 ++ insights/uploader_json_map.json | 3864 +++++++++++++++++ 4 files changed, 4408 insertions(+), 7 deletions(-) create mode 100644 insights/client/map_components.py create mode 100644 insights/tests/client/collection_rules/test_map_components.py create mode 100644 insights/uploader_json_map.json diff --git a/insights/client/collection_rules.py b/insights/client/collection_rules.py index 5b80024ec..a24c6ebaf 100644 --- a/insights/client/collection_rules.py +++ b/insights/client/collection_rules.py @@ -16,6 +16,7 @@ from subprocess import Popen, PIPE, STDOUT from tempfile import NamedTemporaryFile from .constants import InsightsConstants as constants +from .map_components import map_rm_conf_to_components APP_NAME = constants.app_name logger = logging.getLogger(__name__) @@ -336,9 +337,9 @@ def get_rm_conf_old(self): raise RuntimeError('ERROR: Unknown key in remove.conf: ' + item + '\nValid keys are ' + ', '.join(expected_keys) + '.') if six.PY3: - rm_conf[item] = value.strip().encode('utf-8').decode('unicode-escape').split(',') + rm_conf[item] = [v.strip() for v in value.strip().encode('utf-8').decode('unicode-escape').split(',')] else: - rm_conf[item] = value.strip().decode('string-escape').split(',') + rm_conf[item] = [v.strip() for v in value.strip().decode('string-escape').split(',')] self.rm_conf = rm_conf except ConfigParser.Error as e: # can't parse config file at all @@ -402,12 +403,17 @@ def get_rm_conf(self): if not redact_conf and not content_redact_conf: # no file-redaction.yaml or file-content-redaction.yaml defined, # try to use remove.conf - return self.get_rm_conf_old() + self.rm_conf = self.get_rm_conf_old() + if self.config.core_collect: + self.rm_conf = map_rm_conf_to_components(self.rm_conf) + return self.rm_conf # remove Nones, empty strings, and empty lists filtered_rm_conf = dict((k, v) for k, v in rm_conf.items() if v) self.rm_conf = filtered_rm_conf - return filtered_rm_conf + if self.config.core_collect: + self.rm_conf = map_rm_conf_to_components(self.rm_conf) + return self.rm_conf def get_tags_conf(self): ''' @@ -435,7 +441,7 @@ def validate(self): return None # Using print here as this could contain sensitive information print('Blacklist configuration parsed contents:') - print(success) + print(json.dumps(success, indent=4)) logger.info('Parsed successfully.') return True @@ -493,6 +499,6 @@ def length(lst): config = InsightsConfig().load_all() uploadconf = InsightsUploadConf(config) uploadconf.validate() - report = uploadconf.create_report() + # report = uploadconf.create_report() - print(report) + # print(report) diff --git a/insights/client/map_components.py b/insights/client/map_components.py new file mode 100644 index 000000000..9e3502c53 --- /dev/null +++ b/insights/client/map_components.py @@ -0,0 +1,190 @@ +from __future__ import absolute_import +import pkgutil +import insights +import json +import six +import logging +import textwrap + +from .constants import InsightsConstants as constants + +APP_NAME = constants.app_name +logger = logging.getLogger(__name__) + +uploader_json_file = pkgutil.get_data(insights.__name__, "uploader_json_map.json") +uploader_json = json.loads(uploader_json_file) + + +def map_rm_conf_to_components(rm_conf): + ''' + In order to maximize compatibility between "classic" remove.conf + configurations and core collection, do the following mapping + strategy: + 1. If remove.conf entry matches a symbolic name, disable the + corresponding core component. + 2. If remove.conf entry is a raw command or file, do a reverse + lookup on the symbolic name based on stored uploader.json data, + then continue as in step 1. + 3. If neither conditions 1 or 2 are matched it is either + a) a mistyped command/file, or + b) an arbitrary file. + For (a), classic remove.conf configs require an exact match to + uploader.json. We can carry that condition into our + compatibility with core. + For (b), classic collection had the ability to skip arbitrary + files based on filepaths in uploader.json post-expansion + (i.e. a specific repo file in /etc/yum.repos.d). + Core checks all files collected against the file + blacklist filters, so these files will be omitted + just by the nature of core collection. + ''' + updated_commands = [] + updated_files = [] + updated_components = [] + + if not rm_conf: + return rm_conf + + logger.warning("If possible, commands and files specified in the blacklist configuration will be converted to Insights component specs that will be disabled as needed.") + + # save matches to a dict for informative logging + conversion_map = {} + longest_key_len = 0 + + for section in ['commands', 'files']: + if section not in rm_conf: + continue + for key in rm_conf[section]: + if section == 'commands': + symbolic_name = _search_uploader_json(['commands'], key) + elif section == 'files': + # match both files and globs to rm_conf files + symbolic_name = _search_uploader_json(['files', 'globs'], key) + + component = _get_component_by_symbolic_name(symbolic_name) + if component: + conversion_map[key] = component + if len(key) > longest_key_len: + longest_key_len = len(key) + updated_components.append(component) + else: + if section == 'commands': + updated_commands.append(key) + elif section == 'files': + updated_files.append(key) + + _log_conversion_table(conversion_map, longest_key_len) + + if 'components' in rm_conf: + # update components list if there already is one + original_comp_set = set(rm_conf['components']) + updated_comp_set = set(dict.fromkeys(updated_components)) + # avoid duplicates + rm_conf['components'] += list(updated_comp_set - original_comp_set) + else: + # otherwise create it + rm_conf['components'] = list(dict.fromkeys(updated_components)) + + rm_conf['commands'] = updated_commands + rm_conf['files'] = updated_files + + return rm_conf + + +def _search_uploader_json(headings, key): + ''' + Search an uploader.json block for a command/file from "name" + and return the symbolic name if it exists + + headings - list of headings to search inside uploader.json + key - raw command/file or symbolic name to search + conversion_map - list of names to found components for logging + longest_key_len - length of longest name for logging + ''' + for heading in headings: + # keys inside the dicts are the heading, but singular + singular = heading.rstrip('s') + + for spec in uploader_json[heading]: + if key == spec['symbolic_name'] or (key == spec[singular] and heading != 'globs'): + # matches to a symbolic name or raw command, cache the symbolic name + # only match symbolic name for globs + sname = spec['symbolic_name'] + if not six.PY3: + sname = sname.encode('utf-8') + return sname + # no match + return None + + +def _get_component_by_symbolic_name(sname): + # match a component to a symbolic name + # some symbolic names need to be renamed to fit specs + if sname is None: + # toss back bad input + return None + + spec_prefix = "insights.specs.default.DefaultSpecs." + spec_conversion = { + 'getconf_pagesize': 'getconf_page_size', + 'lspci_kernel': 'lspci', + 'netstat__agn': 'netstat_agn', + 'rpm__V_packages': 'rpm_V_packages', + 'ss_tupna': 'ss', + + 'machine_id1': 'machine_id', + 'machine_id2': 'machine_id', + 'machine_id3': 'machine_id', + 'grub2_efi_grubenv': None, + 'grub2_grubenv': None, + 'limits_d': 'limits_conf', + 'modprobe_conf': 'modprobe', + 'modprobe_d': 'modprobe', + 'ps_auxwww': 'insights.specs.sos_archive.SosSpecs.ps_auxww', # special case + 'rh_mongodb26_conf': 'mongod_conf', + 'sysconfig_rh_mongodb26': 'sysconfig_mongod', + 'redhat_access_proactive_log': None, + + 'krb5_conf_d': 'krb5' + } + + if sname in spec_conversion: + if spec_conversion[sname] is None: + return None + if sname == 'ps_auxwww': + return spec_conversion[sname] + return spec_prefix + spec_conversion[sname] + return spec_prefix + sname + + +def _log_conversion_table(conversion_map, longest_key_len): + ''' + Handle wrapping & logging the conversions + ''' + max_log_len = 48 + + for n in conversion_map: + spec_name_no_prefix = conversion_map[n].rsplit('.', 1)[-1] + + # for specs exceeding a max length, wrap them past the first line + if longest_key_len > max_log_len: + log_len = max_log_len + else: + log_len = longest_key_len + + wrapped_spec = textwrap.wrap(n, max_log_len) + # log the conversion on the first line of the "wrap" + wrapped_spec[0] = '- {0:{1}} => {2}'.format(wrapped_spec[0], log_len, spec_name_no_prefix) + logger.warning('\n '.join(wrapped_spec)) + + +if __name__ == '__main__': + from .config import InsightsConfig + from .collection_rules import InsightsUploadConf + config = InsightsConfig(core_collect=True).load_all() + uploadconf = InsightsUploadConf(config) + # rm_conf = uploadconf.get_rm_conf() + # report = map_rm_conf_to_components(rm_conf) + # uploadconf.rm_conf = report + uploadconf.validate() + # print(report) diff --git a/insights/tests/client/collection_rules/test_map_components.py b/insights/tests/client/collection_rules/test_map_components.py new file mode 100644 index 000000000..a4e34aefa --- /dev/null +++ b/insights/tests/client/collection_rules/test_map_components.py @@ -0,0 +1,341 @@ +import pkgutil +import insights +import json + +# from insights.client.config import InsightsConfig +from insights.client.collection_rules import InsightsUploadConf +from mock.mock import patch, Mock +from insights.specs.default import DefaultSpecs +from insights.specs.sos_archive import SosSpecs +from insights.client.map_components import (map_rm_conf_to_components, + _search_uploader_json, + _get_component_by_symbolic_name) + +uploader_json_file = pkgutil.get_data(insights.__name__, "uploader_json_map.json") +uploader_json = json.loads(uploader_json_file) +default_specs = vars(DefaultSpecs).keys() +sos_specs = vars(SosSpecs).keys() + + +@patch('insights.client.collection_rules.InsightsUploadConf.load_redaction_file', Mock(return_value={'test': 'test'})) +@patch('insights.client.collection_rules.InsightsUploadConf.get_rm_conf_old', Mock(return_value={'test': 'test'})) +@patch('insights.client.collection_rules.map_rm_conf_to_components') +def test_called_when_core_collection_enabled(map_rm_conf_to_components): + ''' + Verify that the function is called from get_rm_conf when core_collect=True + ''' + upload_conf = InsightsUploadConf(Mock(core_collect=True)) + upload_conf.get_rm_conf() + map_rm_conf_to_components.assert_called_once_with({'test': 'test'}) + + +@patch('insights.client.collection_rules.InsightsUploadConf.load_redaction_file', Mock(return_value={'test': 'test'})) +@patch('insights.client.collection_rules.InsightsUploadConf.get_rm_conf_old', Mock(return_value={'test': 'test'})) +@patch('insights.client.collection_rules.map_rm_conf_to_components') +def test_not_called_when_core_collection_disabled(map_rm_conf_to_components): + ''' + Verify that the function is not called from get_rm_conf when core_collect=False + ''' + upload_conf = InsightsUploadConf(Mock(core_collect=False)) + upload_conf.get_rm_conf() + map_rm_conf_to_components.assert_not_called() + + +def test_get_component_by_symbolic_name(): + ''' + Verify that all symbolic names in uploader.json can be mapped + to valid components as prescribed in the conversion function + ''' + # some specs have been removed for core release so because they either + # A) do not appear in uploader.json, or + # B) DO appear in uploader.json, but have no associated rules + # Filter out the (B) specs with this list + skipped_specs = [ + 'ceph_osd_df', + 'dmsetup_info', + 'du_dirs', + 'gluster_peer_status', + 'gluster_v_status', + 'heat_crontab', + 'httpd_on_nfs', + 'ls_edac_mc', + 'ls_usr_sbin', + 'lvmconfig', + 'saphostexec_status', + 'saphostexec_version', + 'nova_migration_uid', + 'ntpq_pn', + 'rabbitmq_queues', + 'rhev_data_center', + 'root_crontab', + 'subscription_manager_installed_product_ids', + 'yum_list_installed', + 'zdump_v', + 'cni_podman_bridge_conf', + 'cpu_smt_control', + 'cpu_vulns_meltdown', + 'cpu_vulns_spectre_v1', + 'cpu_vulns_spectre_v2', + 'cpu_vulns_spec_store_bypass', + 'dnf_modules', + 'docker_storage', + 'freeipa_healthcheck_log', + 'vmware_tools_conf', + 'ironic_conf', + 'octavia_conf', + 'partitions', + 'rhn_hibernate_conf', + 'rhn_search_daemon_log', + 'rhosp_release', + 'secure', + 'foreman_tasks_config', + 'ssh_foreman_config', + 'swift_conf', + 'sys_kernel_sched_features', + 'sysconfig_memcached', + 'sysconfig_mongod', + 'systemd_system_origin_accounting', + 'tuned_conf', + 'vdsm_conf', + 'vdsm_id', + 'neutron_ml2_conf', + 'sap_host_profile', + 'sched_rt_runtime_us', + 'libvirtd_qemu_log', + 'mlx4_port' + ] + + # first, make sure our list is proper and one of these + # are in the default specs + for s in skipped_specs: + assert s not in default_specs + + for category in ['commands', 'files', 'globs']: + for entry in uploader_json[category]: + full_component = _get_component_by_symbolic_name(entry['symbolic_name']) + + if full_component is None: + # this entry should not be in core, so assert that it's missing + assert entry['symbolic_name'] not in default_specs + continue + + module, shortname = full_component.rsplit('.', 1) + + # filter out specs without associated rules + if shortname in skipped_specs: + continue + + if module == "insights.specs.default.DefaultSpecs": + assert shortname in default_specs + elif module == "insights.specs.sos_archive.SosSpecs": + assert shortname in sos_specs + else: + # invalid module name + assert False + + +def test_search_uploader_json(): + ''' + Verify that all valid input from an uploader.json-based remove.conf + will return a symbolic name + ''' + for cmd in uploader_json['commands']: + assert _search_uploader_json(['commands'], cmd['command']) + assert _search_uploader_json(['commands'], cmd['symbolic_name']) + for fil in uploader_json['files']: + assert _search_uploader_json(['files', 'globs'], fil['file']) + assert _search_uploader_json(['files', 'globs'], fil['symbolic_name']) + for glb in uploader_json['globs']: + assert _search_uploader_json(['files', 'globs'], glb['symbolic_name']) + + +def test_search_uploader_json_invalid(): + ''' + Verify that invalid input will return None + ''' + assert _search_uploader_json(['commands'], 'random value') is None + assert _search_uploader_json(['files', 'globs'], 'random value') is None + + +def test_search_uploader_json_globs_symbolic_only(): + ''' + Verify that globs are matched by symbolic name only + ''' + for glb in uploader_json['globs']: + assert _search_uploader_json(['files', 'globs'], glb['glob']) is None + + +def test_map_rm_conf_to_components_sym_names(): + ''' + Verify that all symbolic names in uploader.json result as + components in the output + ''' + # commands + for cmd in uploader_json['commands']: + # run each possible command through the function + sym_name = cmd['symbolic_name'] + rm_conf = {'commands': [sym_name]} + # figure out the destination name should be + spec_name = _get_component_by_symbolic_name(sym_name) + new_rm_conf = map_rm_conf_to_components(rm_conf) + # commands should be empty, components should have 1 item + assert len(new_rm_conf['commands']) == 0 + assert len(new_rm_conf['components']) == 1 + assert new_rm_conf['components'][0] == spec_name + + # files + for fil in uploader_json['files']: + # run each possible file through the function + sym_name = fil['symbolic_name'] + rm_conf = {'files': [sym_name]} + # figure out the destination name should be + spec_name = _get_component_by_symbolic_name(sym_name) + new_rm_conf = map_rm_conf_to_components(rm_conf) + # files should be empty, components should have 1 item + # except for these which cannot be mapped to specs. + # in which case, components empty and these remain in files + if sym_name in ['grub2_efi_grubenv', + 'grub2_grubenv', + 'redhat_access_proactive_log']: + assert len(new_rm_conf['files']) == 1 + assert new_rm_conf['files'][0] == sym_name + assert len(new_rm_conf['components']) == 0 + else: + assert len(new_rm_conf['files']) == 0 + assert len(new_rm_conf['components']) == 1 + assert new_rm_conf['components'][0] == spec_name + + # globs + for glb in uploader_json['globs']: + # run each possible glob through the function + sym_name = glb['symbolic_name'] + rm_conf = {'files': [sym_name]} + # figure out the destination name should be + spec_name = _get_component_by_symbolic_name(sym_name) + new_rm_conf = map_rm_conf_to_components(rm_conf) + # files should be empty, components should have 1 item + assert len(new_rm_conf['files']) == 0 + assert len(new_rm_conf['components']) == 1 + assert new_rm_conf['components'][0] == spec_name + + +def test_map_rm_conf_to_components_raw_cmds_files(): + ''' + Verify that all raw files/commands in uploader.json result as + components in the output + ''' + # commands + for cmd in uploader_json['commands']: + # run each possible command through the function + rm_conf = {'commands': [cmd['command']]} + sym_name = cmd['symbolic_name'] + # figure out the destination name should be + spec_name = _get_component_by_symbolic_name(sym_name) + new_rm_conf = map_rm_conf_to_components(rm_conf) + # commands should be empty, components should have 1 item + assert len(new_rm_conf['commands']) == 0 + assert len(new_rm_conf['components']) == 1 + assert new_rm_conf['components'][0] == spec_name + + # files + for fil in uploader_json['files']: + # run each possible file through the function + rm_conf = {'files': [fil['file']]} + sym_name = fil['symbolic_name'] + # figure out the destination name should be + spec_name = _get_component_by_symbolic_name(sym_name) + new_rm_conf = map_rm_conf_to_components(rm_conf) + # files should be empty, components should have 1 item + # except for these which cannot be mapped to specs. + # in which case, components empty and these remain in files + if fil['file'] in ['/boot/efi/EFI/redhat/grubenv', + '/boot/grub2/grubenv', + '/var/log/redhat_access_proactive/redhat_access_proactive.log']: + assert len(new_rm_conf['files']) == 1 + assert new_rm_conf['files'][0] == fil['file'] + assert len(new_rm_conf['components']) == 0 + else: + assert len(new_rm_conf['files']) == 0 + assert len(new_rm_conf['components']) == 1 + assert new_rm_conf['components'][0] == spec_name + + +def test_map_rm_conf_to_components_invalid(): + ''' + Verify that matching commands/files are mapped to components + ''' + rm_conf = {'commands': ['random', 'value'], 'files': ['other', 'invalid', 'data']} + new_rm_conf = map_rm_conf_to_components(rm_conf) + # rm_conf should be unchanged + assert len(new_rm_conf['commands']) == 2 + assert len(new_rm_conf['files']) == 3 + assert len(new_rm_conf['components']) == 0 + assert new_rm_conf['commands'] == rm_conf['commands'] + assert new_rm_conf['files'] == rm_conf['files'] + + +@patch('insights.client.map_components._search_uploader_json') +def test_rm_conf_empty(_search_uploader_json): + ''' + Verify the function returns rm_conf unchanged if called + with an empty dict or None + ''' + rm_conf = {} + new_rm_conf = map_rm_conf_to_components(rm_conf) + _search_uploader_json.assert_not_called() + assert new_rm_conf == {} + + rm_conf = None + new_rm_conf = map_rm_conf_to_components(rm_conf) + _search_uploader_json.assert_not_called() + assert new_rm_conf is None + + +@patch('insights.client.map_components.logger.warning') +def test_log_long_key(logger_warning): + ''' + Verify the conversion table is logged with proper + spacing, wrapping, and unconverted specs are not logged + ''' + rm_conf = {'commands': ["/usr/bin/find /etc/origin/node /etc/origin/master /etc/pki /etc/ipa -type f -exec /usr/bin/openssl x509 -noout -enddate -in '{}' \\; -exec echo 'FileName= {}' \\;", + "/usr/bin/md5sum /etc/pki/product/69.pem", + "ss_tupna"], + 'files': ["/etc/sysconfig/virt-who", + "/etc/yum.repos.d/fedora-cisco-openh264.repo", + "krb5_conf_d"]} + map_rm_conf_to_components(rm_conf) + logger_warning.assert_any_call("- /usr/bin/find /etc/origin/node => certificates_enddate\n /etc/origin/master /etc/pki /etc/ipa -type f\n -exec /usr/bin/openssl x509 -noout -enddate -in\n '{}' \\; -exec echo 'FileName= {}' \\;") + logger_warning.assert_any_call("- /usr/bin/md5sum /etc/pki/product/69.pem => md5chk_files") + logger_warning.assert_any_call("- ss_tupna => ss"), + logger_warning.assert_any_call("- /etc/sysconfig/virt-who => sysconfig_virt_who") + logger_warning.assert_any_call("- krb5_conf_d => krb5") + + +@patch('insights.client.map_components.logger.warning') +def test_log_short_key(logger_warning): + ''' + Verify the conversion table is logged without wrapping or spacing when key + is short + ''' + rm_conf = {'commands': ["ss_tupna"]} + map_rm_conf_to_components(rm_conf) + logger_warning.assert_any_call("If possible, commands and files specified in the blacklist configuration will be converted to Insights component specs that will be disabled as needed.") + + +def test_components_added(): + ''' + Verify that the resulting component list is + an aggregation of the current list and the conversion results + with no duplicates. + ''' + rm_conf = {'commands': ["ss_tupna", + "/usr/bin/md5sum /etc/pki/product/69.pem"], + 'components': ["insights.specs.default.DefaultSpecs.ss", + "insights.specs.default.DefaultSpecs.sysconfig_virt_who"]} + results = map_rm_conf_to_components(rm_conf) + + assert results == {'commands': [], + 'files': [], + 'components': ["insights.specs.default.DefaultSpecs.ss", + "insights.specs.default.DefaultSpecs.sysconfig_virt_who", + "insights.specs.default.DefaultSpecs.md5chk_files"]} diff --git a/insights/uploader_json_map.json b/insights/uploader_json_map.json new file mode 100644 index 000000000..fa7c7a088 --- /dev/null +++ b/insights/uploader_json_map.json @@ -0,0 +1,3864 @@ +{ + "commands": [ + { + "command": "/usr/bin/abrt status --bare=True", + "pattern": [], + "symbolic_name": "abrt_status_bare" + }, + { + "command": "python -m insights.tools.cat --no-header aws_instance_id_doc", + "pattern": [], + "symbolic_name": "aws_instance_id_doc" + }, + { + "command": "python -m insights.tools.cat --no-header aws_instance_id_pkcs7", + "pattern": [], + "symbolic_name": "aws_instance_id_pkcs7" + }, + { + "command": "python -m insights.tools.cat --no-header azure_instance_type", + "pattern": [], + "symbolic_name": "azure_instance_type" + }, + { + "command": "/sbin/auditctl -s", + "pattern": [], + "symbolic_name": "auditctl_status" + }, + { + "command": "/sbin/blkid -c /dev/null", + "pattern": [], + "symbolic_name": "blkid" + }, + { + "command": "/usr/sbin/brctl show", + "pattern": [], + "symbolic_name": "brctl_show" + }, + { + "command": "/usr/bin/ceph health detail -f json", + "pattern": [], + "symbolic_name": "ceph_health_detail" + }, + { + "command": "/usr/bin/ceph df detail -f json", + "pattern": [], + "symbolic_name": "ceph_df_detail" + }, + { + "command": "/usr/bin/ceph osd dump -f json", + "pattern": [], + "symbolic_name": "ceph_osd_dump" + }, + { + "command": "/usr/bin/ceph osd df -f json", + "pattern": [], + "symbolic_name": "ceph_osd_df" + }, + { + "command": "/usr/bin/ceph osd tree -f json", + "pattern": [], + "symbolic_name": "ceph_osd_tree" + }, + { + "command": "/usr/bin/ceph -s -f json", + "pattern": [], + "symbolic_name": "ceph_s" + }, + { + "command": "/usr/bin/ceph -v", + "pattern": [], + "symbolic_name": "ceph_v" + }, + { + "command": "python -m insights.tools.cat --no-header ceph_insights", + "pattern": [], + "symbolic_name": "ceph_insights" + }, + { + "command": "/usr/bin/find /etc/origin/node /etc/origin/master /etc/pki /etc/ipa -type f -exec /usr/bin/openssl x509 -noout -enddate -in '{}' \\; -exec echo 'FileName= {}' \\;", + "pattern": [], + "symbolic_name": "certificates_enddate" + }, + { + "command": "/sbin/chkconfig --list", + "pattern": [], + "symbolic_name": "chkconfig" + }, + { + "command": "/usr/bin/chronyc sources", + "pattern": [], + "symbolic_name": "chronyc_sources" + }, + { + "command": "/usr/bin/cpupower -c all frequency-info", + "pattern": [], + "symbolic_name": "cpupower_frequency_info" + }, + { + "command": "/bin/date", + "pattern": [], + "symbolic_name": "date" + }, + { + "command": "/bin/date --utc", + "pattern": [], + "symbolic_name": "date_utc" + }, + { + "command": "/bin/df -al", + "pattern": [], + "symbolic_name": "df__al" + }, + { + "command": "/bin/df -alP", + "pattern": [], + "symbolic_name": "df__alP" + }, + { + "command": "/bin/df -li", + "pattern": [], + "symbolic_name": "df__li" + }, + { + "command": "/usr/bin/dig +dnssec . SOA", + "pattern": [], + "symbolic_name": "dig_dnssec" + }, + { + "command": "/usr/bin/dig +edns=0 . SOA", + "pattern": [], + "symbolic_name": "dig_edns" + }, + { + "command": "/usr/bin/dig +noedns . SOA", + "pattern": [], + "symbolic_name": "dig_noedns" + }, + { + "command": "/bin/dmesg", + "pattern": [ + " is now offline", + "AMD Secure Memory Encryption (SME) active", + "Amazon EC2", + "Brought up ", + "CIFS VFS: protocol revalidation - security settings mismatch", + "CSUM", + "CVE-2017-1000364", + "CVE-2018-14634", + "Dazed and confused, but trying to continue", + "Device is ineligible for IOMMU domain attach due to platform RMRR requirement", + "Dropping TSO", + "EDAC ", + "EFI", + "Efi", + "Emulex OneConnect OCe10100, FCoE Initiator", + "FEATURE IBPB_SUPPORT", + "FEATURE SPEC_CTRL", + "Ignoring BGRT: failed to map image header memory", + "Ignoring BGRT: failed to map image memory", + "Kernel page table isolation", + "L1TF", + "L1Tf", + "Linux version", + "Machine Check Exception", + "Machine check events logged", + "NUMA: ", + "Node 0 CPUs: ", + "QLogic QLE2692 - QLogic 16Gb FC Dual-port HBA", + "SMBIOS ", + "SPLXMOD: SPLX 3.0: KHM loaded. Version [30118]", + "SPLXMOD: SPLX 3.0: KHM loaded. Version [30119]", + "TECH PREVIEW: NVMe over FC may not be fully supported.", + "Uhhuh. NMI received for unknown reason", + "VPD access disabled", + "WRITE SAME failed. Manually zeroing", + "Warning: QLogic ISP3XXX Network Driver - this hardware has not undergone testing by Red Hat and might not be certified", + "__cpufreq_add_dev", + "blocked FC remote port time out: removing target and saving binding", + "crashkernel reservation failed", + "crashkernel=auto resulted in zero bytes of reserved memory", + "e1000: E1000 MODULE IS NOT SUPPORTED", + "efi", + "fw=8.08.", + "ixgbevf: Unknown parameter `InterruptThrottleRate'", + "l1tf", + "mce: ", + "netconsole: network logging started", + "page allocation failure: order:", + "resetting", + "smpboot: CPU ", + "temperature above threshold", + "the DIE domain not a subset of the NUMA domain", + "tx hang", + "vmxnet3", + "vpd r/w failed", + "x86/pti" + ], + "symbolic_name": "dmesg" + }, + { + "command": "/usr/sbin/dmidecode", + "pattern": [], + "symbolic_name": "dmidecode" + }, + { + "command": "/usr/sbin/dmidecode -s system-uuid", + "pattern": [], + "symbolic_name": "bios_uuid" + }, + { + "command": "/usr/sbin/dmsetup info -C", + "pattern": [], + "symbolic_name": "dmsetup_info" + }, + { + "command": "/usr/bin/docker info", + "pattern": [], + "symbolic_name": "docker_info" + }, + { + "command": "/usr/bin/docker ps --all --no-trunc", + "pattern": [], + "symbolic_name": "docker_list_containers" + }, + { + "command": "/usr/bin/docker images --all --no-trunc --digests", + "pattern": [], + "symbolic_name": "docker_list_images" + }, + { + "command": "/bin/du -s -k /var/lib/candlepin/activemq-artemis", + "pattern": [], + "symbolic_name": "du_dirs" + }, + { + "command": "/sbin/ethtool", + "pattern": [], + "pre_command": "iface", + "symbolic_name": "ethtool" + }, + { + "command": "/sbin/ethtool -S", + "pattern": [], + "pre_command": "iface", + "symbolic_name": "ethtool_S" + }, + { + "command": "/sbin/ethtool -T", + "pattern": [], + "pre_command": "iface", + "symbolic_name": "ethtool_T" + }, + { + "command": "/sbin/ethtool -g", + "pattern": [], + "pre_command": "iface", + "symbolic_name": "ethtool_g" + }, + { + "command": "/sbin/ethtool -i", + "pattern": [], + "pre_command": "iface", + "symbolic_name": "ethtool_i" + }, + { + "command": "/sbin/ethtool -k", + "pattern": [], + "pre_command": "iface", + "symbolic_name": "ethtool_k" + }, + { + "command": "/usr/bin/facter", + "pattern": [], + "symbolic_name": "facter" + }, + { + "command": "/usr/bin/qpid-stat -g --ssl-certificate=/etc/pki/katello/qpid_client_striped.crt -b amqps://localhost:5671", + "pattern": [], + "symbolic_name": "qpid_stat_g" + }, + { + "command": "/bin/fc-match -sv 'sans:regular:roman' family fontformat", + "pattern": [], + "symbolic_name": "fc_match" + }, + { + "command": "/usr/sbin/fcoeadm -i", + "pattern": [], + "symbolic_name": "fcoeadm_i" + }, + { + "command": "/bin/findmnt -lo+PROPAGATION", + "pattern": [], + "symbolic_name": "findmnt_lo_propagation" + }, + { + "command": "/usr/bin/firewall-cmd --list-all-zones", + "pattern": [], + "symbolic_name": "firewall_cmd_list_all_zones" + }, + { + "command": "/usr/bin/getconf PAGE_SIZE", + "pattern": [], + "symbolic_name": "getconf_pagesize" + }, + { + "command": "/usr/sbin/getenforce", + "pattern": [], + "symbolic_name": "getenforce" + }, + { + "command": "/usr/sbin/getsebool -a", + "pattern": [], + "symbolic_name": "getsebool" + }, + { + "command": "/usr/sbin/gluster volume info", + "pattern": [], + "symbolic_name": "gluster_v_info" + }, + { + "command": "/usr/sbin/gluster peer status", + "pattern": [], + "symbolic_name": "gluster_peer_status" + }, + { + "command": "/usr/sbin/gluster volume status", + "pattern": [], + "symbolic_name": "gluster_v_status" + }, + { + "command": "/bin/ls -l /boot/grub/grub.conf", + "pattern": [], + "symbolic_name": "grub1_config_perms" + }, + { + "command": "/bin/ls -l /boot/grub2/grub.cfg", + "pattern": [], + "symbolic_name": "grub_config_perms" + }, + { + "command": "/usr/sbin/grubby --default-index", + "pattern": [], + "symbolic_name": "grubby_default_index" + }, + { + "command": "/sbin/grubby --default-kernel", + "pattern": [], + "symbolic_name": "grubby_default_kernel" + }, + { + "command": "/usr/bin/crontab -l -u heat", + "pattern": [ + "heat-manage" + ], + "symbolic_name": "heat_crontab" + }, + { + "command": "/bin/hostname", + "pattern": [], + "symbolic_name": "hostname_default" + }, + { + "command": "/bin/hostname -f", + "pattern": [], + "symbolic_name": "hostname" + }, + { + "command": "/bin/hostname -I", + "pattern": [], + "symbolic_name": "ip_addresses" + }, + { + "command": "/bin/hostname -s", + "pattern": [], + "symbolic_name": "hostname_short" + }, + { + "command": "/usr/sbin/httpd -V", + "pattern": [], + "symbolic_name": "httpd_V" + }, + { + "command": "python -m insights.tools.cat --no-header httpd_on_nfs", + "pattern": [], + "symbolic_name": "httpd_on_nfs" + }, + { + "command": "/usr/sbin/httpd -M", + "pattern": [], + "symbolic_name": "httpd_M" + }, + { + "command": "/bin/rpm -qa --root={CONTAINER_MOUNT_POINT} --qf='\\{\"name\":\"%{NAME}\",\"epoch\":\"%{EPOCH}\",\"version\":\"%{VERSION}\",\"release\":\"%{RELEASE}\",\"arch\":\"%{ARCH}\",\"installtime\":\"%{INSTALLTIME:date}\",\"buildtime\":\"%{BUILDTIME}\",\"vendor\":\"%{VENDOR}\",\"buildhost\":\"%{BUILDHOST}\",\"sigpgp\":\"%{SIGPGP:pgpsig}\"\\}\n'", + "pattern": [], + "symbolic_name": "installed_rpms", + "image": true + }, + { + "command": "/sbin/ip -s -d link", + "pattern": [], + "symbolic_name": "ip_s_link" + }, + { + "command": "/sbin/ip6tables-save", + "pattern": [], + "symbolic_name": "ip6tables" + }, + { + "command": "/sbin/ip addr", + "pattern": [], + "symbolic_name": "ip_addr" + }, + { + "command": "/sbin/ip route show table all", + "pattern": [], + "symbolic_name": "ip_route_show_table_all" + }, + { + "command": "/usr/bin/ipcs -m", + "pattern": [], + "symbolic_name": "ipcs_m" + }, + { + "command": "/usr/bin/ipcs -m -p", + "pattern": [], + "symbolic_name": "ipcs_m_p" + }, + { + "command": "/usr/bin/ipcs -s", + "pattern": [], + "symbolic_name": "ipcs_s" + }, + { + "command": "/sbin/iptables-save", + "pattern": [], + "symbolic_name": "iptables" + }, + { + "command": "/sbin/ip -4 neighbor show nud all", + "pattern": [], + "symbolic_name": "ipv4_neigh" + }, + { + "command": "/sbin/ip -6 neighbor show nud all", + "pattern": [], + "symbolic_name": "ipv6_neigh" + }, + { + "command": "/usr/sbin/iscsiadm -m session", + "pattern": [], + "symbolic_name": "iscsiadm_m_session" + }, + { + "command": "/usr/bin/crontab -l -u keystone", + "pattern": [ + "heat-manage", + "keystone-manage" + ], + "symbolic_name": "keystone_crontab" + }, + { + "command": "/usr/sbin/kpatch list", + "pattern": [], + "symbolic_name": "kpatch_list" + }, + { + "command": "/usr/bin/find -L /lib /lib64 -name 'libkeyutils.so*'", + "pattern": [], + "symbolic_name": "libkeyutils" + }, + { + "command": "/usr/bin/find -L /lib /lib64 -name libkeyutils.so.1 -exec objdump -x \"{}\" \\;", + "pattern": [], + "symbolic_name": "libkeyutils_objdumps" + }, + { + "command": "/usr/bin/file -L /etc/localtime", + "pattern": [], + "symbolic_name": "localtime" + }, + { + "command": "/usr/bin/lpstat -p", + "pattern": [], + "symbolic_name": "lpstat_p" + }, + { + "command": "/bin/ls -lanR /boot", + "pattern": [], + "symbolic_name": "ls_boot" + }, + { + "command": "/bin/ls -lanR /dev", + "pattern": [], + "symbolic_name": "ls_dev" + }, + { + "command": "/bin/ls -lanR /dev/disk", + "pattern": [], + "symbolic_name": "ls_disk" + }, + { + "command": "/bin/ls -lan /sys/devices/system/edac/mc", + "pattern": [], + "symbolic_name": "ls_edac_mc" + }, + { + "command": "/bin/ls -lan /etc /etc/cloud/cloud.cfg.d /etc/nova/migration /etc/pki/ovirt-vmconsole /etc/pki/tls/certs /etc/pki/tls/private /etc/sysconfig", + "pattern": [], + "symbolic_name": "ls_etc" + }, + { + "command": "/bin/ls -lanR /lib/firmware", + "pattern": [], + "symbolic_name": "ls_lib_firmware" + }, + { + "command": "/bin/ls -lanR /sys/firmware", + "pattern": [], + "symbolic_name": "ls_sys_firmware" + }, + { + "command": "/bin/ls -la /var/lib/mongodb", + "pattern": [], + "symbolic_name": "ls_var_lib_mongodb" + }, + { + "command": "/bin/ls -laR /var/lib/nova/instances", + "pattern": [], + "symbolic_name": "ls_R_var_lib_nova_instances" + }, + { + "command": "/bin/ls -laRZ /var/lib/nova/instances", + "pattern": [], + "symbolic_name": "ls_var_lib_nova_instances" + }, + { + "command": "/bin/ls -ld /var/opt/mssql", + "pattern": [], + "symbolic_name": "ls_var_opt_mssql" + }, + { + "command": "/bin/ls -lan /usr/lib64", + "pattern": [ + "total" + ], + "symbolic_name": "ls_usr_lib64" + }, + { + "command": "/bin/ls -ln /usr/sbin", + "pattern": [ + "total" + ], + "symbolic_name": "ls_usr_sbin" + }, + { + "command": "/bin/ls -la /var/log /var/log/audit", + "pattern": [], + "symbolic_name": "ls_var_log" + }, + { + "command": "/bin/ls -la /var/opt/mssql/log", + "pattern": [], + "symbolic_name": "ls_var_opt_mssql_log" + }, + { + "command": "/bin/ls -la /dev/null /var/www", + "pattern": [], + "symbolic_name": "ls_var_www" + }, + { + "command": "/bin/ls -lnL /var/run", + "pattern": [], + "symbolic_name": "ls_var_run" + }, + { + "command": "/bin/ls -ln /var/spool/postfix/maildrop", + "pattern": [], + "symbolic_name": "ls_var_spool_postfix_maildrop" + }, + { + "command": "/bin/ls -ln /var/spool/clientmqueue", + "pattern": [], + "symbolic_name": "ls_var_spool_clientmq" + }, + { + "command": "/bin/ls -l /var/lib/cni/networks/openshift-sdn", + "pattern": [], + "symbolic_name": "ls_ocp_cni_openshift_sdn" + }, + { + "command": "/bin/ls -l /var/lib/origin/openshift.local.volumes/pods", + "pattern": [], + "symbolic_name": "ls_origin_local_volumes_pods" + }, + { + "command": "/bin/ls -lan /", + "pattern": [], + "symbolic_name": "ls_osroot" + }, + { + "command": "/bin/ls -lan /run/systemd/generator", + "pattern": [], + "symbolic_name": "ls_run_systemd_generator" + }, + { + "command": "/bin/ls -ln /var/tmp", + "pattern": [ + "/var/tmp", + "foreman-ssh-cmd" + ], + "symbolic_name": "ls_var_tmp" + }, + { + "command": "/bin/lsblk", + "pattern": [], + "symbolic_name": "lsblk" + }, + { + "command": "/bin/lsblk -P -o NAME,KNAME,MAJ:MIN,FSTYPE,MOUNTPOINT,LABEL,UUID,RA,RO,RM,MODEL,SIZE,STATE,OWNER,GROUP,MODE,ALIGNMENT,MIN-IO,OPT-IO,PHY-SEC,LOG-SEC,ROTA,SCHED,RQ-SIZE,TYPE,DISC-ALN,DISC-GRAN,DISC-MAX,DISC-ZERO", + "pattern": [], + "symbolic_name": "lsblk_pairs" + }, + { + "command": "/usr/bin/lscpu", + "pattern": [], + "symbolic_name": "lscpu" + }, + { + "command": "/sbin/lsmod", + "pattern": [], + "symbolic_name": "lsmod" + }, + { + "command": "/usr/sbin/lsof", + "pattern": [ + "(deleted)", + "/var/log/journal", + "COMMAND", + "libcrypto", + "liblvm2cmd.so", + "libssl", + "libssl.so", + "lsnrctl", + "ovs-vswit", + "rgw_swift", + "tnslsnr" + ], + "symbolic_name": "lsof" + }, + { + "command": "/sbin/lspci -k", + "pattern": [], + "symbolic_name": "lspci" + }, + { + "command": "/sbin/lspci -k", + "pattern": [], + "symbolic_name": "lspci_kernel" + }, + { + "command": "/usr/sap/hostctrl/exe/lssap", + "pattern": [], + "symbolic_name": "lssap" + }, + { + "command": "/usr/sbin/lvmconfig --type full", + "pattern": [], + "symbolic_name": "lvmconfig" + }, + { + "command": "/usr/sbin/lvm dumpconfig --type full", + "pattern": [], + "symbolic_name": "lvmconfig" + }, + { + "command": "/usr/sap/hostctrl/exe/saphostctrl -function GetCIMObject -enuminstances SAPInstance", + "pattern": [ + "******", + "CreationClassName", + "FullQualifiedHostname", + "Hostname", + "InstanceName", + "SID", + "SapVersionInfo", + "SystemNumber" + ], + "symbolic_name": "saphostctl_getcimobject_sapinstance" + }, + { + "command": "/usr/sap/hostctrl/exe/saphostexec -status", + "pattern": [], + "symbolic_name": "saphostexec_status" + }, + { + "command": "/usr/sap/hostctrl/exe/saphostexec -version", + "pattern": [], + "symbolic_name": "saphostexec_version" + }, + { + "command": "/usr/bin/lsscsi", + "pattern": [], + "symbolic_name": "lsscsi" + }, + { + "command": "/sbin/lvs --nameprefixes --noheadings --separator='|' -a -o lv_name,lv_size,lv_attr,mirror_log,vg_name,devices,region_size,data_percent,metadata_percent,segtype,seg_monitor --config=\"global{locking_type=0}\"", + "pattern": [], + "symbolic_name": "lvs_noheadings" + }, + { + "command": "/bin/awk -F':' '{ if($3 > max) max = $3 } END { print max }' /etc/passwd", + "pattern": [], + "symbolic_name": "max_uid" + }, + { + "command": "/usr/bin/md5sum /etc/pki/product/69.pem", + "pattern": [], + "symbolic_name": "md5chk_files" + }, + { + "command": "/usr/bin/md5sum /etc/pki/product-default/69.pem", + "pattern": [], + "symbolic_name": "md5chk_files" + }, + { + "command": "/usr/bin/md5sum /usr/lib/libsoftokn3.so", + "pattern": [], + "symbolic_name": "md5chk_files" + }, + { + "command": "/usr/bin/md5sum /usr/lib64/libsoftokn3.so", + "pattern": [], + "symbolic_name": "md5chk_files" + }, + { + "command": "/usr/bin/md5sum /usr/lib/libfreeblpriv3.so", + "pattern": [], + "symbolic_name": "md5chk_files" + }, + { + "command": "/usr/bin/md5sum /usr/lib64/libfreeblpriv3.so", + "pattern": [], + "symbolic_name": "md5chk_files" + }, + { + "command": "/bin/mount", + "pattern": [], + "symbolic_name": "mount" + }, + { + "command": "/sbin/modinfo i40e", + "pattern": [], + "symbolic_name": "modinfo_i40e" + }, + { + "command": "/sbin/modinfo igb", + "pattern": [], + "symbolic_name": "modinfo_igb" + }, + { + "command": "/sbin/modinfo ixgbe", + "pattern": [], + "symbolic_name": "modinfo_ixgbe" + }, + { + "command": "/sbin/modinfo veth", + "pattern": [], + "symbolic_name": "modinfo_veth" + }, + { + "command": "/sbin/modinfo vmxnet3", + "pattern": [], + "symbolic_name": "modinfo_vmxnet3" + }, + { + "command": "/usr/bin/find /sys/devices/virtual/net/ -name multicast_querier -print -exec cat {} \\;", + "pattern": [], + "symbolic_name": "multicast_querier" + }, + { + "command": "/sbin/multipath -v4 -ll", + "pattern": [], + "symbolic_name": "multipath__v4__ll" + }, + { + "command": "/bin/lsinitrd -f /etc/multipath.conf", + "pattern": [], + "symbolic_name": "multipath_conf_initramfs" + }, + { + "command": "/bin/mysqladmin variables", + "pattern": [], + "symbolic_name": "mysqladmin_vars" + }, + { + "command": "/usr/sbin/named-checkconf -p", + "pattern": [ + "DISABLE-ALGORITHMS", + "DISABLE-DS-DIGESTS", + "DNSSEC-ENABLE", + "DSA", + "Disable-Algorithms", + "Disable-Ds-Digests", + "Dnssec-Enable", + "Dsa", + "ECCGOST", + "ECDSAP256SHA256", + "ECDSAP384SHA384", + "Eccgost", + "Ecdsap256Sha256", + "Ecdsap384Sha384", + "GOST", + "Gost", + "NSEC3DSA", + "NSEC3RSASHA1", + "Nsec3Dsa", + "Nsec3Rsasha1", + "RSAMD5", + "RSASHA1", + "RSASHA256", + "RSASHA512", + "Rsamd5", + "Rsasha1", + "Rsasha256", + "Rsasha512", + "SHA-1", + "SHA-256", + "SHA-384", + "SHA1", + "SHA256", + "SHA384", + "Sha-1", + "Sha-256", + "Sha-384", + "Sha1", + "Sha256", + "Sha384", + "disable-algorithms", + "disable-ds-digests", + "dnssec-enable", + "dsa", + "eccgost", + "ecdsap256sha256", + "ecdsap384sha384", + "gost", + "nsec3dsa", + "nsec3rsasha1", + "rsamd5", + "rsasha1", + "rsasha256", + "rsasha512", + "sha-1", + "sha-256", + "sha-384", + "sha1", + "sha256", + "sha384", + "}" + ], + "symbolic_name": "named_checkconf_p" + }, + { + "command": "/bin/ls /var/run/netns", + "pattern": [], + "symbolic_name": "namespace" + }, + { + "command": "/usr/bin/ndctl list -Ni", + "pattern": [], + "symbolic_name": "ndctl_list_Ni" + }, + { + "command": "/bin/netstat -neopa", + "pattern": [], + "symbolic_name": "netstat" + }, + { + "command": "/bin/netstat -i", + "pattern": [], + "symbolic_name": "netstat_i" + }, + { + "command": "/bin/netstat -s", + "pattern": [], + "symbolic_name": "netstat_s" + }, + { + "command": "/bin/netstat -agn", + "pattern": [], + "symbolic_name": "netstat__agn" + }, + { + "command": "/usr/bin/nmcli conn show", + "pattern": [], + "symbolic_name": "nmcli_conn_show" + }, + { + "command": "/usr/bin/nmcli dev show", + "pattern": [], + "symbolic_name": "nmcli_dev_show" + }, + { + "command": "/usr/bin/crontab -l -u nova", + "pattern": [], + "symbolic_name": "nova_crontab" + }, + { + "command": "/usr/bin/id -u nova", + "pattern": [], + "symbolic_name": "nova_uid" + }, + { + "command": "/usr/bin/id -u nova_migration", + "pattern": [], + "symbolic_name": "nova_migration_uid" + }, + { + "command": "/usr/bin/hammer --config /root/.hammer/cli.modules.d/foreman.yml --output csv task list --search 'state=running AND ( label=Actions::Candlepin::ListenOnCandlepinEvents OR label=Actions::Katello::EventQueue::Monitor )'", + "pattern": [], + "symbolic_name": "hammer_task_list" + }, + { + "command": "/usr/sbin/ntpq -c 'rv 0 leap'", + "pattern": [], + "symbolic_name": "ntpq_leap" + }, + { + "command": "/usr/sbin/ntpq -pn", + "pattern": [], + "symbolic_name": "ntpq_pn" + }, + { + "command": "/usr/sbin/ntptime", + "pattern": [], + "symbolic_name": "ntptime" + }, + { + "command": "/bin/grep -c '^[[:digit:]]' /etc/passwd /etc/group", + "pattern": [], + "symbolic_name": "numeric_user_group_name" + }, + { + "command": "/usr/bin/oc get clusterrole --config /etc/origin/master/admin.kubeconfig", + "pattern": [], + "symbolic_name": "oc_get_clusterrole_with_config" + }, + { + "command": "/usr/bin/oc get clusterrolebinding --config /etc/origin/master/admin.kubeconfig", + "pattern": [], + "symbolic_name": "oc_get_clusterrolebinding_with_config" + }, + { + "command": "/usr/bin/ovs-vsctl -t 5 get Open_vSwitch . other_config", + "pattern": [], + "symbolic_name": "openvswitch_other_config" + }, + { + "command": "/usr/bin/ovs-vsctl list bridge", + "pattern": [], + "symbolic_name": "ovs_vsctl_list_bridge" + }, + { + "command": "/usr/bin/ovs-vsctl show", + "pattern": [], + "symbolic_name": "ovs_vsctl_show" + }, + { + "command": "/usr/bin/passenger-status", + "pattern": [], + "symbolic_name": "passenger_status" + }, + { + "command": "/usr/bin/find /sys/devices/ -maxdepth 10 -mindepth 9 -name stat -type f", + "pattern": [], + "symbolic_name": "pci_rport_target_disk_paths" + }, + { + "command": "/usr/sbin/pcs quorum status", + "pattern": [], + "symbolic_name": "pcs_quorum_status" + }, + { + "command": "/usr/sbin/pcs status", + "pattern": [], + "symbolic_name": "pcs_status" + }, + { + "command": "/bin/ps alxwww", + "pattern": [ + "/usr/bin/gnome-shell", + "/usr/bin/openshift start master", + "/usr/bin/openshift start node", + "COMMAND", + "bash", + "chronyd", + "corosync", + "docker", + "ntpd", + "openshift start master api", + "openshift start master controllers", + "openshift start node", + "ora", + "pacemakerd", + "pcsd", + "spausedd", + "tuned" + ], + "symbolic_name": "ps_alxwww" + }, + { + "command": "/bin/ps aux", + "pattern": [ + "/usr/bin/docker", + "/usr/bin/docker daemon", + "/usr/bin/docker-current", + "/usr/bin/docker-current daemon", + "/usr/bin/dockerd-current", + "/usr/bin/gnome-shell", + "/usr/bin/hyperkube kubelet", + "/usr/bin/openshift start master", + "/usr/bin/openshift start node", + "COMMAND", + "STAP/8.2", + "bash", + "ceph-osd", + "chronyd", + "corosync", + "docker", + "mysqld", + "ntpd", + "oc observe csr", + "openshift start master api", + "openshift start master controllers", + "openshift start node", + "ora", + "pacemakerd", + "pcsd", + "phc2sys", + "postgres", + "ptp4l", + "spausedd", + "tuned" + ], + "symbolic_name": "ps_aux" + }, + { + "command": "/bin/ps auxcww", + "pattern": [], + "symbolic_name": "ps_auxcww" + }, + { + "command": "/bin/ps auxww", + "pattern": [ + "/opt/perf/bin/midaemon", + "/sbin/multipathd", + "/usr/bin/gnome-shell", + "/usr/bin/openshift start master", + "/usr/bin/openshift start node", + "/usr/bin/teamd", + "/usr/sbin/fcoemon --syslog", + "COMMAND", + "bash", + "catalina.base", + "ceilometer-coll", + "chronyd", + "cmirrord", + "corosync", + "docker", + "elasticsearch", + "goferd", + "greenplum", + "httpd", + "iscsid", + "multipath", + "nfs-server", + "nfsd", + "nginx", + "nova-compute", + "ntpd", + "octavia-worker", + "openshift start master api", + "openshift start master controllers", + "openshift start node", + "ora", + "pacemakerd", + "pcsd", + "pkla-check-auth", + "postgres", + "smbd", + "spausedd", + "target_completi", + "taskomaticd", + "tgtd", + "tuned", + "virt-who" + ], + "symbolic_name": "ps_auxww" + }, + { + "command": "/bin/ps -ef", + "pattern": [ + "/usr/bin/gnome-shell", + "/usr/bin/openshift start master", + "/usr/bin/openshift start node", + "CMD", + "bash", + "chronyd", + "corosync", + "docker", + "neutron-ns-metadata-proxy", + "nginx: master process", + "nginx: worker process", + "ntpd", + "openshift start master api", + "openshift start master controllers", + "openshift start node", + "ora", + "pacemakerd", + "pcsd", + "spausedd", + "tuned" + ], + "symbolic_name": "ps_ef" + }, + { + "command": "/bin/ps -eo pid,ppid,comm", + "pattern": [], + "symbolic_name": "ps_eo" + }, + { + "command": "/sbin/pvs --nameprefixes --noheadings --separator='|' -a -o pv_all,vg_name --config=\"global{locking_type=0}\"", + "pattern": [], + "symbolic_name": "pvs_noheadings" + }, + { + "command": "/usr/sbin/rabbitmqctl list_queues name messages consumers auto_delete", + "pattern": [], + "symbolic_name": "rabbitmq_queues" + }, + { + "command": "/usr/sbin/rabbitmqctl report", + "pattern": [], + "symbolic_name": "rabbitmq_report" + }, + { + "command": "/usr/sbin/rabbitmqctl list_users", + "pattern": [], + "symbolic_name": "rabbitmq_users" + }, + { + "command": "/usr/bin/readlink -e /etc/mtab", + "pattern": [], + "symbolic_name": "readlink_e_etc_mtab" + }, + { + "command": "/usr/bin/readlink -e /etc/origin/node/certificates/kubelet-client-current.pem", + "pattern": [], + "symbolic_name": "readlink_e_shift_cert_client" + }, + { + "command": "/usr/bin/readlink -e /etc/origin/node/certificates/kubelet-server-current.pem", + "pattern": [], + "symbolic_name": "readlink_e_shift_cert_server" + }, + { + "command": "/usr/bin/rhn-schema-version", + "pattern": [], + "symbolic_name": "rhn_schema_version" + }, + { + "command": "python -m insights.tools.cat --no-header rhev_data_center", + "pattern": [], + "symbolic_name": "rhev_data_center" + }, + { + "command": "/usr/sbin/rndc status", + "pattern": [], + "symbolic_name": "rndc_status" + }, + { + "command": "/usr/bin/crontab -l -u root", + "pattern": [ + "heat-manage" + ], + "symbolic_name": "root_crontab" + }, + { + "command": "/bin/rpm -V coreutils procps procps-ng shadow-utils passwd sudo chrony", + "pattern": [], + "symbolic_name": "rpm_V_packages" + }, + { + "command": "/usr/bin/mongo pulp_database --eval 'db.serverStatus().storageEngine'", + "pattern": [], + "symbolic_name": "satellite_mongodb_storage_engine" + }, + { + "command": "/usr/bin/sealert -l \"*\"", + "pattern": [], + "symbolic_name": "sealert" + }, + { + "command": "/usr/sbin/sestatus -b", + "pattern": [], + "symbolic_name": "sestatus" + }, + { + "command": "/usr/bin/smbstatus -p", + "pattern": [], + "symbolic_name": "smbstatus_p" + }, + { + "command": "/usr/bin/scl --list", + "pattern": [], + "symbolic_name": "software_collections_list" + }, + { + "command": "/usr/sbin/subscription-manager identity", + "pattern": [], + "symbolic_name": "subscription_manager_id" + }, + { + "command": "/usr/bin/find /etc/pki/product-default/ /etc/pki/product/ -name '*pem' -exec rct cat-cert --no-content '{}' \\;", + "pattern": [ + "ID:" + ], + "symbolic_name": "subscription_manager_installed_product_ids" + }, + { + "command": "/usr/sbin/ss -tupna", + "pattern": [], + "symbolic_name": "ss" + }, + { + "command": "/usr/sbin/ss -tupna", + "pattern": [], + "symbolic_name": "ss_tupna" + }, + { + "command": "/bin/ls -l /etc/ssh/sshd_config", + "pattern": [], + "symbolic_name": "sshd_config_perms" + }, + { + "command": "/sbin/sysctl -a", + "pattern": [], + "symbolic_name": "sysctl" + }, + { + "command": "/bin/systemctl cat rpcbind.socket", + "pattern": [], + "symbolic_name": "systemctl_cat_rpcbind_socket" + }, + { + "command": "/bin/systemctl show openstack-cinder-volume", + "pattern": [], + "symbolic_name": "systemctl_cinder_volume" + }, + { + "command": "/bin/systemctl list-unit-files", + "pattern": [], + "symbolic_name": "systemctl_list_unit_files" + }, + { + "command": "/bin/systemctl list-units", + "pattern": [], + "symbolic_name": "systemctl_list_units" + }, + { + "command": "/bin/systemctl show mariadb", + "pattern": [], + "symbolic_name": "systemctl_mariadb" + }, + { + "command": "/bin/systemctl show qpidd", + "pattern": [], + "symbolic_name": "systemctl_qpidd" + }, + { + "command": "/bin/systemctl show qdrouterd", + "pattern": [], + "symbolic_name": "systemctl_qdrouterd" + }, + { + "command": "/bin/systemctl show httpd", + "pattern": [], + "symbolic_name": "systemctl_httpd" + }, + { + "command": "/bin/systemctl show nginx", + "pattern": [], + "symbolic_name": "systemctl_nginx" + }, + { + "command": "/bin/systemctl show smart_proxy_dynflow_core", + "pattern": [], + "symbolic_name": "systemctl_smartpdc" + }, + { + "command": "/bin/systemctl show *.service", + "pattern": [], + "symbolic_name": "systemctl_show_all_services" + }, + { + "command": "/bin/systemctl show *.target", + "pattern": [], + "symbolic_name": "systemctl_show_target" + }, + { + "command": "/bin/systemd-analyze blame", + "pattern": [], + "symbolic_name": "systemd_analyze_blame" + }, + { + "command": "/usr/bin/systemctl cat docker.service", + "pattern": [], + "symbolic_name": "systemd_docker" + }, + { + "command": "/usr/bin/systemctl cat atomic-openshift-node.service", + "pattern": [], + "symbolic_name": "systemd_openshift_node" + }, + { + "command": "/bin/systool -b scsi -v", + "pattern": [], + "symbolic_name": "systool_b_scsi_v" + }, + { + "command": "/usr/bin/find /usr/share -maxdepth 1 -name 'tomcat*' -exec /bin/grep -R -s 'VirtualDirContext' --include '*.xml' '{}' +", + "pattern": [], + "symbolic_name": "tomcat_vdc_fallback" + }, + { + "command": "/usr/sbin/tuned-adm list", + "pattern": [], + "symbolic_name": "tuned_adm" + }, + { + "command": "/bin/uname -a", + "pattern": [], + "symbolic_name": "uname" + }, + { + "command": "/usr/bin/uptime", + "pattern": [], + "symbolic_name": "uptime" + }, + { + "command": "/usr/bin/vdo status", + "pattern": [], + "symbolic_name": "vdo_status" + }, + { + "command": "/sbin/vgdisplay -vv", + "pattern": [], + "symbolic_name": "vgdisplay" + }, + { + "command": "/sbin/vgs --nameprefixes --noheadings --separator='|' -a -o vg_all --config=\"global{locking_type=0}\"", + "pattern": [], + "symbolic_name": "vgs_noheadings" + }, + { + "command": "/usr/bin/virsh --readonly list --all", + "pattern": [], + "symbolic_name": "virsh_list_all" + }, + { + "command": "/usr/sbin/virt-what", + "pattern": [], + "symbolic_name": "virt_what" + }, + { + "command": "/usr/bin/find /var/crash /var/tmp -path '*.reports-*/whoopsie-report' -print -quit", + "pattern": [], + "symbolic_name": "woopsie" + }, + { + "command": "yum -C --noplugins list available", + "pattern": [], + "symbolic_name": "yum_list_available" + }, + { + "command": "yum -C --noplugins list installed", + "pattern": [], + "symbolic_name": "yum_list_installed" + }, + { + "command": "/usr/bin/yum -C --noplugins repolist", + "pattern": [], + "symbolic_name": "yum_repolist" + }, + { + "command": "/usr/sbin/zdump -v /etc/localtime -c 2019,2039", + "pattern": [], + "symbolic_name": "zdump_v" + } + ], + "files": [ + { + "file": "/root/.config/openshift/hosts", + "pattern": [ + "[", + "openshift_use_crio" + ], + "symbolic_name": "openshift_hosts" + }, + { + "file": "/etc/redhat-access-insights/machine-id", + "pattern": [], + "symbolic_name": "machine_id1" + }, + { + "file": "/etc/redhat_access_proactive/machine-id", + "pattern": [], + "symbolic_name": "machine_id2" + }, + { + "file": "/etc/machine-id", + "pattern": [], + "symbolic_name": "etc_machine_id" + }, + { + "file": "/proc/1/cgroup", + "pattern": [], + "symbolic_name": "init_process_cgroup" + }, + { + "file": "/etc/insights-client/machine-id", + "pattern": [], + "symbolic_name": "machine_id3" + }, + { + "file": "/var/log/audit/audit.log", + "pattern": [ + "comm=\"logrotate\" path=\"/var/log/candlepin", + "comm=\"virtlogd\" name=\"console.log\"", + "type=AVC" + ], + "symbolic_name": "audit_log" + }, + { + "file": "/etc/cloud/cloud.cfg.d/99-custom-networking.cfg", + "pattern": [], + "symbolic_name": "cloud_init_custom_network" + }, + { + "file": "/var/log/cloud-init.log", + "pattern": [ + "401", + "Attempting to load yaml from string of length 59 with allowed root types", + "Failed loading yaml blob. Invalid format at line 1 column 1", + "Network config is likely broken", + "No available network renderers found", + "Read 59 bytes from /etc/cloud/cloud.cfg.d/99-datasource.cfg", + "Unable to render networking", + "WARNING", + "bad status code", + "failed", + "http://169.254.169.254", + "sysconfig", + "url_helper.py" + ], + "symbolic_name": "cloud_init_log" + }, + { + "file": "/etc/audit/auditd.conf", + "pattern": [], + "symbolic_name": "auditd_conf" + }, + { + "file": "/sys/fs/selinux/avc/hash_stats", + "pattern": [], + "symbolic_name": "avc_hash_stats" + }, + { + "file": "/sys/fs/selinux/avc/cache_threshold", + "pattern": [], + "symbolic_name": "avc_cache_threshold" + }, + { + "file": "/proc/net/bonding/()*bond.*", + "pattern": [], + "symbolic_name": "bond" + }, + { + "file": "/var/log/tomcat/()*catalina\\.out", + "pattern": [ + "NoCobblerTokenException: We had an error trying to login." + ], + "symbolic_name": "catalina_out" + }, + { + "file": "/var/log/tomcat6/()*catalina\\.out", + "pattern": [ + "NoCobblerTokenException: We had an error trying to login." + ], + "symbolic_name": "catalina_out" + }, + { + "file": "/tomcat-logs/tomcat/()*catalina\\.out", + "pattern": [ + "NoCobblerTokenException: We had an error trying to login." + ], + "symbolic_name": "catalina_out" + }, + { + "file": "/tomcat-logs/tomcat6/()*catalina\\.out", + "pattern": [ + "NoCobblerTokenException: We had an error trying to login." + ], + "symbolic_name": "catalina_out" + }, + { + "file": "/proc/driver/cciss/()*cciss.*", + "pattern": [], + "symbolic_name": "cciss" + }, + { + "file": "/sys/bus/usb/drivers/cdc_wdm/module/refcnt", + "pattern": [], + "symbolic_name": "cdc_wdm" + }, + { + "file": "/etc/ceilometer/ceilometer.conf", + "pattern": [], + "symbolic_name": "ceilometer_conf" + }, + { + "file": "/var/lib/config-data/puppet-generated/ceilometer/etc/ceilometer/ceilometer.conf", + "pattern": [], + "symbolic_name": "ceilometer_conf" + }, + { + "file": "/var/log/ceilometer/compute.log", + "pattern": [ + "Cannot inspect data of" + ], + "symbolic_name": "ceilometer_compute_log" + }, + { + "file": "/var/log/containers/ceilometer/compute.log", + "pattern": [ + "Cannot inspect data of" + ], + "symbolic_name": "ceilometer_compute_log" + }, + { + "file": "/var/log/ceilometer/collector.log", + "pattern": [ + "DBDataError", + "ERROR", + "Out of range value for column", + "pymysql.err.DataError" + ], + "symbolic_name": "ceilometer_collector_log" + }, + { + "file": "/var/log/containers/ceilometer/collector.log", + "pattern": [ + "DBDataError", + "ERROR", + "Out of range value for column", + "pymysql.err.DataError" + ], + "symbolic_name": "ceilometer_collector_log" + }, + { + "file": "/etc/ceph/ceph.conf", + "pattern": [ + "[", + "rgw_swift_account_in_url" + ], + "symbolic_name": "ceph_conf" + }, + { + "file": "/var/lib/config-data/puppet-generated/ceph/etc/ceph/ceph.conf", + "pattern": [ + "[", + "rgw_swift_account_in_url" + ], + "symbolic_name": "ceph_conf" + }, + { + "file": "/var/log/ceph/()*ceph-osd.*\\.log$", + "pattern": [ + "common/Thread.cc" + ], + "symbolic_name": "ceph_osd_log" + }, + { + "file": "/proc/cgroups", + "pattern": [], + "symbolic_name": "cgroups" + }, + { + "file": "/etc/chrony.conf", + "pattern": [], + "symbolic_name": "chrony_conf" + }, + { + "file": "/var/lib/pacemaker/cib/cib.xml", + "pattern": [], + "symbolic_name": "cib_xml" + }, + { + "file": "/etc/cinder/cinder.conf", + "pattern": [], + "symbolic_name": "cinder_conf" + }, + { + "file": "/var/log/cinder/cinder-api.log", + "pattern": [ + "DEBUG oslo.messaging._drivers.impl_rabbit", + "Timed out waiting for RPC response", + "Too many connections" + ], + "symbolic_name": "cinder_api_log" + }, + { + "file": "/var/log/containers/cinder/cinder-api.log", + "pattern": [ + "DEBUG oslo.messaging._drivers.impl_rabbit", + "Timed out waiting for RPC response", + "Too many connections" + ], + "symbolic_name": "cinder_api_log" + }, + { + "file": "/var/lib/config-data/puppet-generated/cinder/etc/cinder/cinder.conf", + "pattern": [], + "symbolic_name": "cinder_conf" + }, + { + "file": "/var/log/cinder/volume.log", + "pattern": [ + "DEBUG oslo.messaging._drivers.impl_rabbit", + "Image cloning unsuccessful for image", + "Message: NFS file could not be discovered.", + "Timed out waiting for RPC response", + "[Errno 24] Too many open files" + ], + "symbolic_name": "cinder_volume_log" + }, + { + "file": "/etc/cluster/cluster.conf", + "pattern": [ + "clusternode name=" + ], + "symbolic_name": "cluster_conf" + }, + { + "file": "/proc/cmdline", + "pattern": [], + "symbolic_name": "cmdline" + }, + { + "file": "/etc/cni/net.d/87-podman-bridge.conflist", + "pattern": [], + "symbolic_name": "cni_podman_bridge_conf" + }, + { + "file": "/etc/corosync/corosync.conf", + "pattern": [], + "symbolic_name": "corosync_conf" + }, + { + "file": "/etc/cobbler/modules.conf", + "pattern": [], + "symbolic_name": "cobbler_modules_conf" + }, + { + "file": "/etc/cobbler/settings", + "pattern": [], + "symbolic_name": "cobbler_settings" + }, + { + "file": "/etc/sysconfig/corosync", + "pattern": [], + "symbolic_name": "corosync" + }, + { + "file": "/proc/cpuinfo", + "pattern": [], + "symbolic_name": "cpuinfo" + }, + { + "file": "/sys/devices/system/clocksource/clocksource0/current_clocksource", + "pattern": [], + "symbolic_name": "current_clocksource" + }, + { + "file": "/sys/devices/system/cpu/smt/active", + "pattern": [], + "symbolic_name": "cpu_smt_active" + }, + { + "file": "/sys/devices/system/cpu/smt/control", + "pattern": [], + "symbolic_name": "cpu_smt_control" + }, + { + "file": "/sys/devices/system/cpu/vulnerabilities/meltdown", + "pattern": [], + "symbolic_name": "cpu_vulns_meltdown" + }, + { + "file": "/sys/devices/system/cpu/vulnerabilities/spectre_v1", + "pattern": [], + "symbolic_name": "cpu_vulns_spectre_v1" + }, + { + "file": "/sys/devices/system/cpu/vulnerabilities/spectre_v2", + "pattern": [], + "symbolic_name": "cpu_vulns_spectre_v2" + }, + { + "file": "/sys/devices/system/cpu/vulnerabilities/spec_store_bypass", + "pattern": [], + "symbolic_name": "cpu_vulns_spec_store_bypass" + }, + { + "file": "/sys/fs/cgroup/cpuset/cpuset.cpus", + "pattern": [], + "symbolic_name": "cpuset_cpus" + }, + { + "file": "/etc/cron.daily/rhsmd", + "pattern": [ + "if [ -n $config ]; then" + ], + "symbolic_name": "cron_daily_rhsmd" + }, + { + "file": "/etc/crypto-policies/config", + "pattern": [], + "symbolic_name": "crypto_policies_config" + }, + { + "file": "/etc/crypto-policies/state/current", + "pattern": [], + "symbolic_name": "crypto_policies_state_current" + }, + { + "file": "/etc/crypto-policies/back-ends/opensshserver.config", + "pattern": [], + "symbolic_name": "crypto_policies_opensshserver" + }, + { + "file": "/etc/crypto-policies/back-ends/bind.config", + "pattern": [], + "symbolic_name": "crypto_policies_bind" + }, + { + "file": "/sys/kernel/debug/x86/retp_enabled", + "pattern": [], + "symbolic_name": "x86_retp_enabled" + }, + { + "file": "/var/log/dirsrv/.*/()*(errors|errors\\.2.*)", + "pattern": [ + "DSRetroclPlugin - delete_changerecord: could not delete change record", + "We recommend to increase the entry cache size nsslapd-cachememsize" + ], + "symbolic_name": "dirsrv_errors" + }, + { + "file": "/var/log/dmesg", + "pattern": [ + "Amazon EC2", + "CVE-2017-1000364", + "CVE-2018-14634", + "EFI", + "Efi", + "FEATURE IBPB_SUPPORT", + "FEATURE SPEC_CTRL", + "Kernel page table isolation", + "L1TF", + "L1Tf", + "Linux version", + "__cpufreq_add_dev", + "efi", + "l1tf", + "x86/pti" + ], + "symbolic_name": "dmesg_log" + }, + { + "file": "/etc/dnf/modules.d/.*\\.module", + "pattern": [], + "symbolic_name": "dnf_modules" + }, + { + "file": "/etc/redhat-access-insights/machine-id", + "pattern": [], + "symbolic_name": "machine_id1" + }, + { + "file": "/etc/sysconfig/docker-storage-setup", + "pattern": [], + "symbolic_name": "docker_storage_setup" + }, + { + "file": "/etc/sysconfig/docker-storage", + "pattern": [], + "symbolic_name": "docker_storage" + }, + { + "file": "/etc/sysconfig/docker", + "pattern": [], + "symbolic_name": "docker_sysconfig" + }, + { + "file": "/usr/lib/dracut/modules.d/99kdumpbase/kdump-capture.service", + "pattern": [], + "symbolic_name": "dracut_kdump_capture_service" + }, + { + "file": "/etc/etcd/etcd.conf", + "pattern": [ + "ETCD_DATA_DIR", + "[" + ], + "symbolic_name": "etcd_conf" + }, + { + "file": "/var/log/ovirt-engine/engine.log", + "pattern": [ + "Data Center", + "ERROR", + "INFO", + "Low disk space. Host", + "VDS_LOW_DISK_SPACE_ERROR", + "org.ovirt.engine.core.bll.storage.lease.AddVmLeaseCommand", + "org.ovirt.engine.core.bll.storage.lease.RemoveVmLeaseCommand", + "org.ovirt.engine.core.dal.dbbroker.auditloghandling.AuditLogDirector", + "org.ovirt.engine.core.vdsbroker.vdsbroker.HotPlugLeaseVDSCommand", + "org.ovirt.engine.core.vdsbroker.vdsbroker.HotUnplugLeaseVDSCommand" + ], + "symbolic_name": "engine_log" + }, + { + "file": "/var/log/ovirt-engine/server.log", + "pattern": [ + "ERROR [org.jboss.as.controller.management-operation]", + "INFO [org.wildfly.extension.undertow", + "Operation (\"deploy\") failed", + "Registered web context: '/ovirt-engine/api' for server" + ], + "symbolic_name": "ovirt_engine_server_log" + }, + { + "file": "/var/log/ovirt-engine/ui.log", + "pattern": [ + "Uncaught exception: com.google.gwt.event.shared.UmbrellaException" + ], + "symbolic_name": "ovirt_engine_ui_log" + }, + { + "file": "/etc/systemd/()*journald\\.conf", + "pattern": [], + "symbolic_name": "etc_journald_conf" + }, + { + "file": "/etc/systemd/journald.conf.d/()*.+\\.conf", + "pattern": [], + "symbolic_name": "etc_journald_conf_d" + }, + { + "file": "/etc/firewalld/firewalld.conf", + "pattern": [ + "AllowZoneDrifting" + ], + "symbolic_name": "firewalld_conf" + }, + { + "file": "/var/log/ipa/healthcheck/healthcheck.log", + "pattern": [], + "symbolic_name": "freeipa_healthcheck_log" + }, + { + "file": "/etc/fstab", + "pattern": [], + "symbolic_name": "fstab" + }, + { + "file": "/etc/my.cnf.d/galera.cnf", + "pattern": [], + "symbolic_name": "galera_cnf" + }, + { + "file": "/var/lib/config-data/puppet-generated/mysql/etc/my.cnf.d/galera.cnf", + "pattern": [], + "symbolic_name": "galera_cnf" + }, + { + "file": "/boot/efi/EFI/redhat/grub.conf", + "pattern": [], + "symbolic_name": "grub_efi_conf" + }, + { + "file": "/boot/efi/EFI/redhat/grubenv", + "pattern": [], + "symbolic_name": "grub2_efi_grubenv" + }, + { + "file": "/boot/grub/grub.conf", + "pattern": [], + "symbolic_name": "grub_conf" + }, + { + "file": "/boot/efi/EFI/redhat/grub.cfg", + "pattern": [], + "symbolic_name": "grub2_efi_cfg" + }, + { + "file": "/boot/grub2/grub.cfg", + "pattern": [], + "symbolic_name": "grub2_cfg" + }, + { + "file": "/boot/grub2/grubenv", + "pattern": [], + "symbolic_name": "grub2_grubenv" + }, + { + "file": "/etc/haproxy/haproxy.cfg", + "pattern": [], + "symbolic_name": "haproxy_cfg" + }, + { + "file": "/var/lib/config-data/puppet-generated/haproxy/etc/haproxy/haproxy.cfg", + "pattern": [], + "symbolic_name": "haproxy_cfg" + }, + { + "file": "/etc/heat/heat.conf", + "pattern": [], + "symbolic_name": "heat_conf" + }, + { + "file": "/var/lib/config-data/puppet-generated/heat/etc/heat/heat.conf", + "pattern": [], + "symbolic_name": "heat_conf" + }, + { + "file": "/var/log/heat/heat-api.log", + "pattern": [ + "Too many connections" + ], + "symbolic_name": "heat_api_log" + }, + { + "file": "/var/log/heat/heat_api.log", + "pattern": [ + "Too many connections" + ], + "symbolic_name": "heat_api_log" + }, + { + "file": "/var/log/containers/heat/heat_api.log", + "pattern": [ + "Too many connections" + ], + "symbolic_name": "heat_api_log" + }, + { + "file": "/etc/hosts", + "pattern": [], + "symbolic_name": "hosts" + }, + { + "file": "/var/log/httpd/error_log", + "pattern": [ + "(28)No space left on device: ", + "AH00485: scoreboard is full, not at MaxRequestWorkers", + "ModSecurity: collections_remove_stale: Failed deleting collection", + "Require ServerLimit > 0, setting to 1", + "Resource temporarily unavailable", + "The mpm module (prefork.c) is not supported by mod_http2", + "[crit] Memory allocation failed, aborting process", + "and would exceed the ServerLimit value of ", + "consider raising the MaxClients setting", + "consider raising the MaxRequestWorkers setting", + "exceed ServerLimit of", + "exceeds ServerLimit value of", + "exceeds compile time limit of", + "exceeds compile-time limit of", + "exit signal Segmentation fault", + "manager_handler CONFIG error: MEM: Can't update or insert node", + "manager_handler ENABLE-APP error: MEM: Can't update or insert context", + "manager_handler ENABLE-APP error: MEM: Can't update or insert host alias", + "not allowed, increasing to 1" + ], + "symbolic_name": "httpd_error_log" + }, + { + "file": "/opt/rh/httpd24/root/etc/httpd/logs/error_log", + "pattern": [ + "The mpm module (prefork.c) is not supported by mod_http2" + ], + "symbolic_name": "httpd24_httpd_error_log" + }, + { + "file": "/opt/rh/jbcs-httpd24/root/etc/httpd/logs/error_log", + "pattern": [ + "The mpm module (prefork.c) is not supported by mod_http2" + ], + "symbolic_name": "jbcs_httpd24_httpd_error_log" + }, + { + "file": "/etc/sysconfig/network-scripts/()*ifcfg-.*", + "pattern": [], + "symbolic_name": "ifcfg" + }, + { + "file": "/etc/sysconfig/network-scripts/()*route-.*", + "pattern": [], + "symbolic_name": "ifcfg_static_route" + }, + { + "file": "/etc/ImageMagick/()*policy\\.xml", + "pattern": [ + "", + "" + ], + "symbolic_name": "imagemagick_policy" + }, + { + "file": "/usr/lib64/ImageMagick-6.5.4/config/()*policy\\.xml", + "pattern": [ + "", + "" + ], + "symbolic_name": "imagemagick_policy" + }, + { + "file": "/usr/lib/ImageMagick-6.5.4/config/()*policy\\.xml", + "pattern": [ + "", + "" + ], + "symbolic_name": "imagemagick_policy" + }, + { + "file": "/etc/vmware-tools/tools.conf", + "pattern": [], + "symbolic_name": "vmware_tools_conf" + }, + { + "file": "/proc/interrupts", + "pattern": [], + "symbolic_name": "interrupts" + }, + { + "file": "/var/log/ipaupgrade.log", + "pattern": [ + "wait_for_open_ports: localhost" + ], + "symbolic_name": "ipaupgrade_log" + }, + { + "file": "/etc/sysconfig/iptables", + "pattern": [], + "symbolic_name": "iptables_permanent" + }, + { + "file": "/etc/ironic/ironic.conf", + "pattern": [ + "[" + ], + "symbolic_name": "ironic_conf" + }, + { + "file": "/var/lib/config-data/puppet-generated/ironic/etc/ironic/ironic.conf", + "pattern": [ + "[" + ], + "symbolic_name": "ironic_conf" + }, + { + "file": "/var/log/ironic-inspector/ironic-inspector.log", + "pattern": [ + "Certificate did not match expected hostname", + "ERROR requests.packages.urllib3.connection" + ], + "symbolic_name": "ironic_inspector_log" + }, + { + "file": "/etc/kdump.conf", + "pattern": [], + "symbolic_name": "kdump_conf" + }, + { + "file": "/sys/kernel/kexec_crash_size", + "pattern": [], + "symbolic_name": "kexec_crash_size" + }, + { + "file": "/etc/()*krb5\\.conf", + "pattern": [], + "symbolic_name": "krb5" + }, + { + "file": "/sys/kernel/mm/ksm/run", + "pattern": [], + "symbolic_name": "ksmstate" + }, + { + "file": "/var/log/libvirt/libvirtd.log", + "pattern": [ + "qemuMigrationSrcNBDStorageCopyBlockdev:", + "virDomainBlockCommit:", + "virDomainBlockCopy:", + "virDomainBlockPull:", + "virDomainSnapshotCreateXML:" + ], + "symbolic_name": "libvirtd_log" + }, + { + "file": "/etc/security/()*limits\\.conf", + "pattern": [], + "symbolic_name": "limits_conf" + }, + { + "file": "/etc/security/limits.d/()*.*\\.conf", + "pattern": [], + "symbolic_name": "limits_d" + }, + { + "file": "/etc/lvm/lvm.conf", + "pattern": [ + "auto_activation_volume_list", + "filter", + "locking_type", + "thin_pool_autoextend", + "volume_list" + ], + "symbolic_name": "lvm_conf" + }, + { + "file": "/var/log/mariadb/mariadb.log", + "pattern": [ + "Duplicate entry", + "Too many open files", + "for key 'PRIMARY'" + ], + "symbolic_name": "mariadb_log" + }, + { + "file": "/proc/mdstat", + "pattern": [], + "symbolic_name": "mdstat" + }, + { + "file": "/proc/meminfo", + "pattern": [], + "symbolic_name": "meminfo" + }, + { + "file": "/var/log/messages", + "pattern": [ + " disconnect jid=", + "\"/var/lib/pgsql/data\" is missing or empty", + "(enic): transmit queue 0 timed out", + ", type vxfs) has no security xattr handler", + "- image is referenced in one or more repositories", + "/input/input", + "11000 E11000 duplicate key error index: pulp_database.repo_profile_applicability.$profile_hash_-1_repo_id_-1", + ": segfault at ", + ": session replaced: jid=", + "Abort command issued", + "Broken pipe", + "Buffer I/O error on device", + "Cannot assign requested address", + "Cannot assign requested address: AH00072", + "Connection amqps://subscription.rhn.redhat.com:5647 disconnected", + "Corosync main process was not scheduled (@", + "Could not set", + "DHCPv4 lease renewal requested", + "DMA Status error. Resetting chip", + "Detected Tx Unit Hang", + "Device is still in reset", + "Device offlined - not ready after error recovery", + "Error I40E_AQ_RC_EINVAL adding RX filters on PF, promiscuous mode forced on", + "Error deleting EBS Disk volume aws", + "Error running DeviceResume dm_task_run failed", + "Exception happened during processing of request from", + "Failed to bind socket: No such file or directory", + "Failed to extend thin", + "List /apis/image.openshift.io/v1/images", + "Loop callback failed with: Cannot allocate memory", + "MDC/MDIO access timeout", + "Medium access timeout failure. Offlining disk!", + "MountVolume.SetUp succeeded for volume", + "NETDEV WATCHDOG", + "Neighbour table overflow", + "NetworkManager state is now CONNECTED_SITE", + "Not scheduled for", + "Orphaned pod", + "Out of MCCQ wrbs", + "Out of memory: Kill process", + "Out of memory: kill process", + "PPM exceeds tolerance 500 PPM", + "ProcessExecutionError: Exit code: 1; Stdin: ; Stdout: ; Stderr: setting the network namespace", + "SCSI error: return code =", + "SDN initialization failed: Error: Existing service with IP: None is not part of service network", + "Scheduled import of stream", + "Steal time is >", + "TX driver issue detected, PF reset issued", + "TX stuck with port_enabled=1: resetting channels", + "This system does not support \"SSSE3\"", + "Throttling request took", + "TypeError: object of type 'NoneType' has no len()", + "Virtualization daemon", + "WRITE SAME failed. Manually zeroing", + "] trap divide error ", + "_NET_ACTIVE_WINDOW", + "as active slave; either", + "belongs to docker.service", + "callbacks suppressed", + "canceled DHCP transaction, DHCP client pid", + "chardev: opening backend \"socket\" failed", + "clearing Tx timestamp hang", + "dev_set_mac_address of dev", + "device-mapper: multipath: Failing path", + "does not seem to be present, delaying initialization", + "drivers/input/input-leds.c:115 input_leds_connect", + "enabling it in", + "end_request: I/O error, dev", + "error Error on attach: Node not found", + "evicted, waiting for pod to be cleaned up", + "eviction manager: eviction criteria not yet met for threshold", + "eviction manager: must evict pod(s) to reclaim nodefsInodes", + "eviction manager: observations: signal=allocatableNodeFs.available, available: -", + "ext4_ext_search_left", + "failed to modify QP to RTR: -22", + "failed while handling", + "failed with error -110", + "failed! ALB mode requires that the base driver support setting the hw address also when the network", + "failed: Connection amqps:", + "failed: Invalid argument", + "failed: rpc error: code = 2 desc = unable to inspect docker image", + "fiid_obj_get: 'present_countdown_value': data not available", + "firewalld - dynamic firewall daemon", + "fit failure summary on nodes : Insufficient pods", + "from image service failed: rpc error: code = Canceled desc = context canceled", + "host not found in upstream", + "http2: no cached connection was available", + "hv_netvsc vmbus_", + "hv_netvsc: probe of vmbus_", + "hw csum failure", + "in libnl.so.1", + "initiating reset due to tx timeout", + "invalid key/value pair in file /usr/lib/udev/rules.d/59-fc-wwpn-id.rules", + "ip_local_port_range: prefer different parity for start/end values", + "irq handler for vector (irq -1)", + "is beyond advertised capabilities", + "is down or the link is down", + "is greater than comparison timestamp", + "iscsiadm: iscsiadm: Could not log into all portals", + "kernel/softirq.c:159 local_bh_enable+", + "kernel: BUG: soft lockup", + "kernel: CIFS VFS: Unexpected SMB signature", + "kernel: INFO: task xfsaild/md", + "kernel: Memory cgroup out of memory: Kill process", + "kernel: TCP: out of memory -- consider tuning tcp_mem", + "kernel: bnx2fc: byte_count", + "kernel: kvm: disabled by bios", + "kernel: lockd: Unknown symbol register_inet6addr_notifier", + "kernel: lockd: Unknown symbol unregister_inet6addr_notifier", + "kernel: megasas: Found FW in FAULT state,will reset adapter.", + "kernel: nfs: server", + "kernel: possible SYN flooding on port", + "khash_super_prune_nolock", + "link status up for interface", + "megaraid_sas: FW detected to be in faultstate, restarting it", + "mode:0x20", + "multipathd.service operation timed out. Terminating", + "netlink_socket|ERR|fcntl: Too many open file", + "nfs_reap_expired_delegations", + "not responding, timed out", + "page allocation failure", + "per_source_limit from", + "platform microcode: firmware: requesting", + "reservation conflict", + "returned a bad sequence-id error", + "rhsmd: rhsmd process exceeded runtime and was killed", + "segfault at", + "server kernel: rhsmcertd-worke", + "shm_open failed, Permission denied", + "skb_copy", + "skb_over_panic", + "start request repeated too quickly for docker.service", + "state changed timeout -> done", + "swapper: page allocation failure", + "systemd: Unit ip6tables.service entered failed state", + "systemd: Unit iptables.service entered failed state", + "systemd[1]: Received SIGCHLD from PID", + "tg3_start_xmit", + "there is a meaningful conflict", + "timed out", + "timeout before we got a set response", + "timeout; kill it", + "timing out command, waited", + "transmit queue", + "udev: renamed network interface", + "unknown filesystem type 'binfmt_misc'", + "vdsm-tool: EnvironmentError: Failed to restore the persisted networks", + "watch chan error: etcdserver: mvcc: required revision has been compacted" + ], + "symbolic_name": "messages" + }, + { + "file": "/etc/()*modprobe\\.conf", + "pattern": [], + "symbolic_name": "modprobe_conf" + }, + { + "file": "/etc/modprobe.d/()*.*\\.conf", + "pattern": [], + "symbolic_name": "modprobe_d" + }, + { + "file": "/etc/()*mongod.conf", + "pattern": [ + "dbpath" + ], + "symbolic_name": "mongod_conf" + }, + { + "file": "/var/opt/mssql/mssql.conf", + "pattern": [], + "symbolic_name": "mssql_conf" + }, + { + "file": "/etc/multipath.conf", + "pattern": [], + "symbolic_name": "multipath_conf" + }, + { + "file": "/var/log/mysql/mysqld.log", + "pattern": [ + "OpenSSL internal error, assertion failed: Digest MD5 forbidden in FIPS mode!", + "Too many open files", + "[ERROR]" + ], + "symbolic_name": "mysql_log" + }, + { + "file": "/var/log/mysql.log", + "pattern": [ + "OpenSSL internal error, assertion failed: Digest MD5 forbidden in FIPS mode!", + "Too many open files", + "[ERROR]" + ], + "symbolic_name": "mysql_log" + }, + { + "file": "/etc/sysconfig/netconsole", + "pattern": [], + "symbolic_name": "netconsole" + }, + { + "file": "/etc/NetworkManager/dispatcher.d/.*-dhclient", + "pattern": [], + "symbolic_name": "networkmanager_dispatcher_d" + }, + { + "file": "/proc/net/netfilter/nfnetlink_queue", + "pattern": [], + "symbolic_name": "nfnetlink_queue" + }, + { + "file": "/etc/exports", + "pattern": [ + "*", + "no_root_squash" + ], + "symbolic_name": "nfs_exports" + }, + { + "file": "/etc/exports.d/()*.*\\.exports", + "pattern": [ + "*", + "no_root_squash" + ], + "symbolic_name": "nfs_exports_d" + }, + { + "file": "/var/log/nova/nova-api.log", + "pattern": [ + "DEBUG oslo.messaging._drivers.impl_rabbit", + "Timed out waiting for RPC response", + "Timed out waiting for a reply to message ID", + "Too many connections" + ], + "symbolic_name": "nova_api_log" + }, + { + "file": "/var/log/containers/nova/nova-api.log", + "pattern": [ + "DEBUG oslo.messaging._drivers.impl_rabbit", + "Timed out waiting for RPC response", + "Timed out waiting for a reply to message ID", + "Too many connections" + ], + "symbolic_name": "nova_api_log" + }, + { + "file": "/var/log/nova/nova-compute.log", + "pattern": [ + "/console.log: Permission denied", + ": No such device or address", + "Attempting claim on node", + "Claim successful on node", + "Could not open ", + "DEBUG oslo.messaging._drivers.impl_rabbit", + "Detach volume", + "Disk of instance is too large", + "During sync_power_state the instance has a pending task", + "ERROR nova.virt.libvirt.driver", + "ERROR oslo_messaging.rpc.server Command: scp -r", + "ERROR oslo_messaging.rpc.server InvalidSharedStorage", + "ERROR oslo_messaging.rpc.server Stderr: u'Device crypt-dm-uuid-mpath", + "Find Multipath device file for volume WWN", + "FlavorDiskSmallerThanImage: Flavor's disk is too small for requested image.", + "INFO nova.compute.manager", + "INFO os_brick.initiator.linuxscsi", + "Instance shutdown by itself. Calling the stop API.", + "Live Migration failure: internal error: process exited while connecting to monitor", + "Live Migration failure: operation failed: Failed to connect to remote libvirt URI qemu+ssh", + "Migration pre-check error: Unable to migrate", + "No such device or address", + "Resuming guest", + "Stderr: u'blockdev: cannot open", + "Successfully plugged vif VIFBridge", + "Timed out waiting for RPC response", + "cmt=off: Property '.cmt' not found", + "does not match source", + "error: Failed to start domain", + "from mountpoint /dev", + "is not active", + "is not on shared storage", + "libvirt-guests.sh", + "libvirtError: Requested operation is not valid: domain has assigned non-USB host devices", + "libvirtError: Unable to delete file /var/lib/nova/instances/", + "unsupported configuration: Target device drive address", + "unsupported configuration: Target network card MTU", + "unsupported configuration: Unable to find security driver for model selinux" + ], + "symbolic_name": "nova_compute_log" + }, + { + "file": "/var/log/containers/nova/nova-compute.log", + "pattern": [ + "/console.log: Permission denied", + ": No such device or address", + "Attempting claim on node", + "Claim successful on node", + "Could not open ", + "DEBUG oslo.messaging._drivers.impl_rabbit", + "Detach volume", + "Disk of instance is too large", + "During sync_power_state the instance has a pending task", + "ERROR nova.virt.libvirt.driver", + "ERROR oslo_messaging.rpc.server Command: scp -r", + "ERROR oslo_messaging.rpc.server InvalidSharedStorage", + "ERROR oslo_messaging.rpc.server Stderr: u'Device crypt-dm-uuid-mpath", + "Find Multipath device file for volume WWN", + "FlavorDiskSmallerThanImage: Flavor's disk is too small for requested image.", + "INFO nova.compute.manager", + "INFO os_brick.initiator.linuxscsi", + "Instance shutdown by itself. Calling the stop API.", + "Live Migration failure: internal error: process exited while connecting to monitor", + "Live Migration failure: operation failed: Failed to connect to remote libvirt URI qemu+ssh", + "Migration pre-check error: Unable to migrate", + "No such device or address", + "Resuming guest", + "Stderr: u'blockdev: cannot open", + "Successfully plugged vif VIFBridge", + "Timed out waiting for RPC response", + "cmt=off: Property '.cmt' not found", + "does not match source", + "error: Failed to start domain", + "from mountpoint /dev", + "is not active", + "is not on shared storage", + "libvirt-guests.sh", + "libvirtError: Requested operation is not valid: domain has assigned non-USB host devices", + "libvirtError: Unable to delete file /var/lib/nova/instances/", + "unsupported configuration: Target device drive address", + "unsupported configuration: Target network card MTU", + "unsupported configuration: Unable to find security driver for model selinux" + ], + "symbolic_name": "nova_compute_log" + }, + { + "file": "/etc/nova/nova.conf", + "pattern": [], + "symbolic_name": "nova_conf" + }, + { + "file": "/var/lib/config-data/puppet-generated/nova/etc/nova/nova.conf", + "pattern": [], + "symbolic_name": "nova_conf" + }, + { + "file": "/var/lib/config-data/puppet-generated/nova_libvirt/etc/nova/nova.conf", + "pattern": [], + "symbolic_name": "nova_conf" + }, + { + "file": "/etc/nscd.conf", + "pattern": [ + "enable-cache" + ], + "symbolic_name": "nscd_conf" + }, + { + "file": "/etc/nsswitch.conf", + "pattern": [ + "HOSTS:", + "Hosts:", + "group", + "hosts:", + "passwd", + "shadow" + ], + "symbolic_name": "nsswitch_conf" + }, + { + "file": "/etc/ntp.conf", + "pattern": [], + "symbolic_name": "ntp_conf" + }, + { + "file": "/sys/module/nvme_core/parameters/io_timeout", + "pattern": [], + "symbolic_name": "nvme_core_io_timeout" + }, + { + "file": "/var/lib/config-data/puppet-generated/octavia/etc/octavia/octavia.conf", + "pattern": [ + "[", + "active_connection_max_retries", + "active_connection_rety_interval", + "admin_log_targets", + "administrative_log_facility", + "agent_request_read_timeout", + "agent_server_ca", + "agent_server_cert", + "agent_server_network_dir", + "agent_server_network_file", + "agent_tls_protocol", + "allow_pagination", + "allow_ping_health_monitors", + "allow_sorting", + "allow_tls_terminated_listeners", + "allow_vip_network_id", + "allow_vip_port_id", + "allow_vip_subnet_id", + "amp_active_retries", + "amp_active_wait_sec", + "amp_boot_network_list", + "amp_flavor_id", + "amp_image_id", + "amp_image_owner_id", + "amp_image_tag", + "amp_secgroup_list", + "amp_ssh_access_allowed", + "amphora_driver", + "amphora_expiry_age", + "amphora_udp_driver", + "anti_affinity_policy", + "api_base_uri", + "audit_map_file", + "auth_strategy", + "auth_type", + "availability_zone", + "base_cert_dir", + "base_path", + "bind_host", + "bind_ip", + "bind_port", + "build_active_retries", + "build_rate_limit", + "build_retry_interval", + "ca_certificates_file", + "cafile", + "cert_generator", + "cert_manager", + "cert_validity_time", + "cleanup_interval", + "client_ca", + "client_cert", + "compute_driver", + "connection_logging", + "connection_max_retries", + "connection_retry_interval", + "controller_ip_port_list", + "debug", + "default_health_monitor_quota", + "default_listener_quota", + "default_load_balancer_quota", + "default_member_quota", + "default_pool_quota", + "default_provider_driver", + "disable_local_log_storage", + "disable_revert", + "distributor_driver", + "driver", + "enable_anti_affinity", + "enable_proxy_headers_parsing", + "enabled", + "enabled_provider_agents", + "enabled_provider_drivers", + "endpoint_type", + "engine", + "failover_threads", + "forward_all_logs", + "get_socket_path", + "graceful_shutdown_timeout", + "haproxy_cmd", + "haproxy_stick_size", + "haproxy_template", + "health_check_interval", + "health_update_driver", + "health_update_threads", + "heartbeat_interval", + "heartbeat_timeout", + "ignore_req_list", + "insecure", + "lb_network_interface", + "load_balancer_expiry_age", + "loadbalancer_topology", + "log_dir", + "log_file", + "log_protocol", + "log_queue_size", + "log_retry_count", + "log_retry_interval", + "logging_template_override", + "max_process_warning_percent", + "max_retries", + "max_workers", + "memcached_servers", + "network_driver", + "octavia_plugins", + "pagination_max_limit", + "policy_file", + "port_detach_timeout", + "provider_agent_shutdown_timeout", + "random_amphora_name_length", + "region_name", + "respawn_count", + "respawn_interval", + "rest_request_conn_timeout", + "rest_request_read_timeout", + "retry_interval", + "rpc_thread_pool_size", + "server_ca", + "service_name", + "signing_digest", + "sock_rlimit", + "spare_amphora_pool_size", + "spare_check_interval", + "stats_max_processes", + "stats_request_timeout", + "stats_socket_path", + "stats_update_driver", + "stats_update_threads", + "status_max_processes", + "status_request_timeout", + "status_socket_path", + "status_update_threads", + "storage_path", + "tenant_log_targets", + "topic", + "topics", + "udp_connect_min_interval_health_monitor", + "use_oslo_messaging", + "use_upstart", + "user_data_config_drive", + "user_log_facility", + "user_log_format", + "volume_create_max_retries", + "volume_create_retry_interval", + "volume_create_timeout", + "volume_driver", + "volume_size", + "volume_type", + "vrrp_advert_int", + "vrrp_check_interval", + "vrrp_fail_count", + "vrrp_garp_refresh_count", + "vrrp_garp_refresh_interval", + "vrrp_success_count", + "workers" + ], + "symbolic_name": "octavia_conf" + }, + { + "file": "/etc/odbc.ini", + "pattern": [ + "DRIVER", + "Driver", + "NO_SSPS", + "No_ssps", + "SERVER", + "Server", + "[", + "driver", + "no_ssps", + "server" + ], + "symbolic_name": "odbc_ini" + }, + { + "file": "/etc/odbcinst.ini", + "pattern": [], + "symbolic_name": "odbcinst_ini" + }, + { + "file": "/etc/origin/master/master-config.yaml", + "pattern": [], + "symbolic_name": "ose_master_config" + }, + { + "file": "/etc/origin/node/node-config.yaml", + "pattern": [], + "symbolic_name": "ose_node_config" + }, + { + "file": "/var/log/pacemaker.log", + "pattern": [ + "pcmk_dbus_find_error" + ], + "symbolic_name": "pacemaker_log" + }, + { + "file": "/var/log/pacemaker/pacemaker.log", + "pattern": [ + "pcmk_dbus_find_error" + ], + "symbolic_name": "pacemaker_log" + }, + { + "file": "/proc/partitions", + "pattern": [], + "symbolic_name": "partitions" + }, + { + "file": "/etc/pam.d/password-auth", + "pattern": [], + "symbolic_name": "password_auth" + }, + { + "file": "/etc/yum/pluginconf.d/()*\\w+\\.conf", + "pattern": [], + "symbolic_name": "pluginconf_d" + }, + { + "file": "/var/lib/pgsql/data/postgresql.conf", + "pattern": [], + "symbolic_name": "postgresql_conf" + }, + { + "file": "/var/lib/pgsql/data/pg_log/()*postgresql-.+\\.log", + "pattern": [ + "FATAL", + "checkpoints are occurring too frequently", + "connection limit exceeded for non-superusers", + "database is not accepting commands to avoid wraparound data loss in database", + "must be vacuumed within", + "remaining connection slots are reserved for non-replication superuser connections" + ], + "symbolic_name": "postgresql_log" + }, + { + "file": "/proc/net/netstat", + "pattern": [], + "symbolic_name": "proc_netstat" + }, + { + "file": "/proc/net/snmp", + "pattern": [], + "symbolic_name": "proc_snmp_ipv4" + }, + { + "file": "/proc/net/snmp6", + "pattern": [], + "symbolic_name": "proc_snmp_ipv6" + }, + { + "file": "/proc/slabinfo", + "pattern": [], + "symbolic_name": "proc_slabinfo" + }, + { + "file": "/proc/stat", + "pattern": [], + "symbolic_name": "proc_stat" + }, + { + "file": "/sos_commands/process/ps_auxwww", + "pattern": [], + "symbolic_name": "ps_auxwww" + }, + { + "file": "/etc/default/pulp_workers", + "pattern": [], + "symbolic_name": "pulp_worker_defaults" + }, + { + "file": "/etc/sysconfig/puppetserver", + "pattern": [ + "JAVA_ARGS" + ], + "symbolic_name": "puppetserver_config" + }, + { + "file": "/etc/libvirt/qemu.conf", + "pattern": [], + "symbolic_name": "qemu_conf" + }, + { + "file": "/etc/libvirt/qemu/()*.+\\.xml", + "pattern": [], + "symbolic_name": "qemu_xml" + }, + { + "file": "/etc/rabbitmq/rabbitmq-env.conf", + "pattern": [], + "symbolic_name": "rabbitmq_env" + }, + { + "file": "/var/log/rabbitmq/startup_log", + "pattern": [ + "Event crashed log handler:" + ], + "symbolic_name": "rabbitmq_startup_log" + }, + { + "file": "/etc/rc.d/rc.local", + "pattern": [], + "symbolic_name": "rc_local" + }, + { + "file": "/etc/rdma/rdma.conf", + "pattern": [], + "symbolic_name": "rdma_conf" + }, + { + "file": "/etc/redhat-release", + "pattern": [], + "symbolic_name": "redhat_release" + }, + { + "file": "/etc/os-release", + "pattern": [], + "symbolic_name": "os_release" + }, + { + "file": "/etc/resolv.conf", + "pattern": [], + "symbolic_name": "resolv_conf" + }, + { + "file": "/etc/opt/rh/rh-mongodb26/()*mongod.conf", + "pattern": [ + "destination", + "syslog", + "systemLog" + ], + "symbolic_name": "rh_mongodb26_conf" + }, + { + "file": "/etc/sysconfig/rhn/()*rhn-entitlement-cert\\.xml.*", + "pattern": [], + "symbolic_name": "rhn_entitlement_cert_xml" + }, + { + "file": "/etc/rhn/rhn.conf", + "pattern": [], + "symbolic_name": "rhn_conf" + }, + { + "file": "/usr/share/rhn/config-defaults/rhn_hibernate.conf", + "pattern": [], + "symbolic_name": "rhn_hibernate_conf" + }, + { + "file": "/var/log/rhn/search/rhn_search_daemon.log", + "pattern": [ + "APPARENT DEADLOCK!" + ], + "symbolic_name": "rhn_search_daemon_log" + }, + { + "file": "/var/log/rhn/rhn_taskomatic_daemon.log", + "pattern": [], + "symbolic_name": "rhn_taskomatic_daemon_log" + }, + { + "file": "/etc/rhosp-release", + "pattern": [], + "symbolic_name": "rhosp_release" + }, + { + "file": "/etc/rhsm/rhsm.conf", + "pattern": [], + "symbolic_name": "rhsm_conf" + }, + { + "file": "/var/lib/rhsm/cache/releasever.json", + "pattern": [], + "symbolic_name": "rhsm_releasever" + }, + { + "file": "/etc/qpid/qpidd.conf", + "pattern": [], + "symbolic_name": "qpidd_conf" + }, + { + "file": "/var/log/rhsm/rhsm.log", + "pattern": [ + "KeyError: 'config.network.dnsConfig.hostName'", + "Validation failed: Name is invalid", + "virt.host_type=hyperv, virt.uuid=Not Settable" + ], + "symbolic_name": "rhsm_log" + }, + { + "file": "/etc/rsyslog.conf", + "pattern": [ + "$ActionQueueFileName", + "imjournal", + "imtcp", + "regex" + ], + "symbolic_name": "rsyslog_conf" + }, + { + "file": "/etc/samba/smb.conf", + "pattern": [ + "GLOBAL", + "Global", + "KERBEROS METHOD", + "Kerberos Method", + "MAX SMBD PROCESSES", + "Max Smbd Processes", + "NT PIPE SUPPORT", + "Nt Pipe Support", + "PASSDB BACKEND", + "Passdb Backend", + "REALM", + "Realm", + "SECURITY", + "Security", + "[", + "comment", + "global", + "kerberos method", + "max smbd processes", + "nt pipe support", + "passdb backend", + "path", + "read only", + "realm", + "security", + "writable" + ], + "symbolic_name": "samba" + }, + { + "file": "/etc/redhat-access/redhat-access-insights.properties", + "pattern": [], + "symbolic_name": "sat5_insights_properties" + }, + { + "file": "/etc/foreman-installer/custom-hiera.yaml", + "pattern": [], + "symbolic_name": "satellite_custom_hiera" + }, + { + "file": "/usr/share/foreman/lib/satellite/version.rb", + "pattern": [], + "symbolic_name": "satellite_version_rb" + }, + { + "file": "/proc/scsi/scsi", + "pattern": [], + "symbolic_name": "scsi" + }, + { + "file": "/proc/net/sctp/assocs", + "pattern": [], + "symbolic_name": "sctp_asc" + }, + { + "file": "/proc/net/sctp/eps", + "pattern": [], + "symbolic_name": "sctp_eps" + }, + { + "file": "/proc/net/sctp/snmp", + "pattern": [], + "symbolic_name": "sctp_snmp" + }, + { + "file": "/var/log/secure", + "pattern": [ + "[CAUTION] This_is_the default_filter_string_for_all_large_files!" + ], + "symbolic_name": "secure" + }, + { + "file": "/etc/selinux/config", + "pattern": [], + "symbolic_name": "selinux_config" + }, + { + "file": "/etc/sysconfig/foreman-tasks", + "pattern": [ + "EXECUTOR_MEMORY_LIMIT", + "EXECUTOR_MEMORY_MONITOR_DELAY", + "EXECUTOR_MEMORY_MONITOR_INTERVAL" + ], + "symbolic_name": "foreman_tasks_config" + }, + { + "file": "/etc/sysconfig/dynflowd", + "pattern": [ + "EXECUTOR_MEMORY_LIMIT", + "EXECUTOR_MEMORY_MONITOR_DELAY", + "EXECUTOR_MEMORY_MONITOR_INTERVAL" + ], + "symbolic_name": "foreman_tasks_config" + }, + { + "file": "/proc/net/softnet_stat", + "pattern": [], + "symbolic_name": "softnet_stat" + }, + { + "file": "/proc/net/sockstat", + "pattern": [], + "symbolic_name": "sockstat" + }, + { + "file": "/etc/ssh/sshd_config", + "pattern": [ + "ALLOWUSERS", + "AllowUsers", + "Allowusers", + "CHALLENGERESPONSEAUTHENTICATION", + "CIPHERS", + "CLIENTALIVECOUNTMAX", + "CLIENTALIVEINTERVAL", + "ChallengeResponseAuthentication", + "Challengeresponseauthentication", + "Ciphers", + "ClientAliveCountMax", + "ClientAliveInterval", + "Clientalivecountmax", + "Clientaliveinterval", + "DENYUSERS", + "DenyUsers", + "Denyusers", + "KBDINTERACTIVEAUTHENTICATION", + "KbdInteractiveAuthentication", + "Kbdinteractiveauthentication", + "LOGINGRACETIME", + "LoginGraceTime", + "Logingracetime", + "MACS", + "MACs", + "MAXAUTHTRIES", + "MAXSTARTUPS", + "Macs", + "Match", + "MaxAuthTries", + "MaxStartups", + "Maxauthtries", + "Maxstartups", + "PERMITEMPTYPASSWORDS", + "PERMITROOTLOGIN", + "PROTOCOL", + "PasswordAuthentication", + "PermitEmptyPasswords", + "PermitRootLogin", + "Permitemptypasswords", + "Permitrootlogin", + "Port", + "Protocol", + "USEPAM", + "UsePAM", + "UsePam", + "Usepam", + "allowusers", + "challengeresponseauthentication", + "ciphers", + "clientalivecountmax", + "clientaliveinterval", + "denyusers", + "kbdinteractiveauthentication", + "logingracetime", + "macs", + "maxauthtries", + "maxstartups", + "permitemptypasswords", + "permitrootlogin", + "protocol", + "usepam" + ], + "symbolic_name": "sshd_config" + }, + { + "file": "/etc/ssh/ssh_config", + "pattern": [ + "Host", + "ProxyCommand" + ], + "symbolic_name": "ssh_config" + }, + { + "file": "/usr/share/foreman/.ssh/ssh_config", + "pattern": [ + "[CAUTION] This_is_the default_filter_string_for_all_large_files!" + ], + "symbolic_name": "ssh_foreman_config" + }, + { + "file": "/usr/share/foreman-proxy/.ssh/ssh_config", + "pattern": [ + "Host", + "ProxyCommand" + ], + "symbolic_name": "ssh_foreman_proxy_config" + }, + { + "file": "/etc/sssd/sssd.conf", + "pattern": [], + "symbolic_name": "sssd_config" + }, + { + "file": "/etc/swift/swift.conf", + "pattern": [], + "symbolic_name": "swift_conf" + }, + { + "file": "/var/lib/config-data/puppet-generated/swift/etc/swift/swift.conf", + "pattern": [], + "symbolic_name": "swift_conf" + }, + { + "file": "/etc/swift/object-expirer.conf", + "pattern": [], + "symbolic_name": "swift_object_expirer_conf" + }, + { + "file": "/var/lib/config-data/puppet-generated/swift/etc/swift/object-expirer.conf", + "pattern": [], + "symbolic_name": "swift_object_expirer_conf" + }, + { + "file": "/etc/swift/proxy-server.conf", + "pattern": [], + "symbolic_name": "swift_proxy_server_conf" + }, + { + "file": "/var/lib/config-data/puppet-generated/swift/etc/swift/proxy-server.conf", + "pattern": [], + "symbolic_name": "swift_proxy_server_conf" + }, + { + "file": "/sys/kernel/debug/sched_features", + "pattern": [], + "symbolic_name": "sys_kernel_sched_features" + }, + { + "file": "/etc/sysconfig/kdump", + "pattern": [], + "symbolic_name": "sysconfig_kdump" + }, + { + "file": "/etc/sysconfig/libvirt-guests", + "pattern": [], + "symbolic_name": "sysconfig_libvirt_guests" + }, + { + "file": "/etc/sysconfig/memcached", + "pattern": [], + "symbolic_name": "sysconfig_memcached" + }, + { + "file": "/var/lib/config-data/memcached/etc/sysconfig/memcached", + "pattern": [], + "symbolic_name": "sysconfig_memcached" + }, + { + "file": "/etc/sysconfig/()*mongod", + "pattern": [], + "symbolic_name": "sysconfig_mongod" + }, + { + "file": "/etc/sysconfig/ntpd", + "pattern": [], + "symbolic_name": "sysconfig_ntpd" + }, + { + "file": "/etc/sysconfig/network", + "pattern": [], + "symbolic_name": "sysconfig_network" + }, + { + "file": "/etc/opt/rh/rh-mongodb26/sysconfig/()*mongod", + "pattern": [], + "symbolic_name": "sysconfig_rh_mongodb26" + }, + { + "file": "/etc/sysconfig/prelink", + "pattern": [], + "symbolic_name": "sysconfig_prelink" + }, + { + "file": "/etc/sysconfig/sshd", + "pattern": [], + "symbolic_name": "sysconfig_sshd" + }, + { + "file": "/etc/sysconfig/virt-who", + "pattern": [], + "symbolic_name": "sysconfig_virt_who" + }, + { + "file": "/etc/sysctl.conf", + "pattern": [], + "symbolic_name": "sysctl_conf" + }, + { + "file": "/etc/systemd/logind.conf", + "pattern": [], + "symbolic_name": "systemd_logind_conf" + }, + { + "file": "/etc/systemd/system.conf.d/origin-accounting.conf", + "pattern": [], + "symbolic_name": "systemd_system_origin_accounting" + }, + { + "file": "/etc/systemd/system.conf", + "pattern": [], + "symbolic_name": "systemd_system_conf" + }, + { + "file": "/etc/sysconfig/rhn/systemid", + "pattern": [], + "symbolic_name": "systemid" + }, + { + "file": "/sys/kernel/mm/transparent_hugepage/enabled", + "pattern": [], + "symbolic_name": "thp_enabled" + }, + { + "file": "/sys/kernel/mm/transparent_hugepage/use_zero_page", + "pattern": [], + "symbolic_name": "thp_use_zero_page" + }, + { + "file": "/etc/tmpfiles.d/()*.*\\.conf", + "pattern": [], + "symbolic_name": "tmpfilesd" + }, + { + "file": "/usr/lib/tmpfiles.d/()*.*\\.conf", + "pattern": [], + "symbolic_name": "tmpfilesd" + }, + { + "file": "/run/tmpfiles.d/()*.*\\.conf", + "pattern": [], + "symbolic_name": "tmpfilesd" + }, + { + "file": "/etc/tuned.conf", + "pattern": [], + "symbolic_name": "tuned_conf" + }, + { + "file": "/etc/sysconfig/rhn/up2date", + "pattern": [], + "symbolic_name": "up2date" + }, + { + "file": "/var/log/up2date", + "pattern": [ + "The certificate /usr/share/rhn/RHNS-CA-CERT is expired" + ], + "symbolic_name": "up2date_log" + }, + { + "file": "/usr/lib/systemd/journald.conf.d/()*.+\\.conf", + "pattern": [], + "symbolic_name": "usr_journald_conf_d" + }, + { + "file": "/etc/vdsm/vdsm.conf", + "pattern": [], + "symbolic_name": "vdsm_conf" + }, + { + "file": "/var/log/vdsm/vdsm.log", + "pattern": [ + "(mailbox-spm) [storage.Misc.excCmd] /usr/bin/taskset --cpu-list", + "Bad volume specification", + "Changed state to Down: 'NoneType' object has no attribute 'attrib'", + "Changed state to Down: internal error: Attempted double use of PCI slot", + "ERROR (mailbox-spm) [storage.MailBox.SpmMailMonitor]", + "INFO", + "RPC call Host.setupNetworks failed", + "Stopping connection", + "The name org.fedoraproject.FirewallD1 was not provided by any .service files", + "The vm start process failed", + "_report_inconsistency", + "lastCheck", + "libvirtError: internal error: failed to format device alias", + "looking for unfetched domain", + "storage.TaskManager.Task" + ], + "symbolic_name": "vdsm_log" + }, + { + "file": "/etc/vdsm/vdsm.id", + "pattern": [], + "symbolic_name": "vdsm_id" + }, + { + "file": "/etc/vdsm/logger.conf", + "pattern": [], + "symbolic_name": "vdsm_logger_conf" + }, + { + "file": "/etc/()*virt-who\\.conf", + "pattern": [ + "[", + "configs", + "debug", + "env", + "interval", + "log_", + "oneshot", + "owner", + "server", + "type" + ], + "symbolic_name": "virt_who_conf" + }, + { + "file": "/etc/virt-who.d/()*.*\\.conf", + "pattern": [ + "[", + "configs", + "debug", + "env", + "interval", + "log_", + "oneshot", + "owner", + "server", + "type" + ], + "symbolic_name": "virt_who_conf" + }, + { + "file": "/etc/libvirt/virtlogd.conf", + "pattern": [ + "max_size" + ], + "symbolic_name": "virtlogd_conf" + }, + { + "file": "/sys/kernel/mm/swap/vma_ra_enabled", + "pattern": [], + "symbolic_name": "vma_ra_enabled" + }, + { + "file": "/etc/pam.d/vsftpd", + "pattern": [], + "symbolic_name": "vsftpd" + }, + { + "file": "/etc/vsftpd/vsftpd.conf", + "pattern": [ + "LOCAL_ENABLE", + "Local_Enable", + "SSL_ENABLE", + "SSL_SSLV3", + "Ssl_Enable", + "Ssl_Sslv3", + "local_enable", + "ssl_enable", + "ssl_sslv3" + ], + "symbolic_name": "vsftpd_conf" + }, + { + "file": "/sys/kernel/debug/x86/ibpb_enabled", + "pattern": [], + "symbolic_name": "x86_ibpb_enabled" + }, + { + "file": "/sys/kernel/debug/x86/ibrs_enabled", + "pattern": [], + "symbolic_name": "x86_ibrs_enabled" + }, + { + "file": "/sys/kernel/debug/x86/pti_enabled", + "pattern": [], + "symbolic_name": "x86_pti_enabled" + }, + { + "file": "/etc/()*xinetd\\.conf", + "pattern": [], + "symbolic_name": "xinetd_conf" + }, + { + "file": "/etc/xinetd.d/()*.*", + "pattern": [], + "symbolic_name": "xinetd_conf" + }, + { + "file": "/etc/yum.conf", + "pattern": [], + "symbolic_name": "yum_conf" + }, + { + "file": "/etc/yum.repos.d/()*.*\\.repo", + "pattern": [], + "symbolic_name": "yum_repos_d" + }, + { + "file": "/var/log/redhat_access_proactive/redhat_access_proactive.log", + "pattern": [], + "symbolic_name": "redhat_access_proactive_log" + }, + { + "file": "/etc/logrotate.conf", + "pattern": [], + "symbolic_name": "logrotate_conf" + }, + { + "file": "/etc/logrotate.d/().*", + "pattern": [], + "symbolic_name": "logrotate_conf" + }, + { + "file": "/etc/rhsm/facts/virt_uuid.facts", + "pattern": [], + "symbolic_name": "virt_uuid_facts" + }, + { + "file": "/var/log/containers/gnocchi/gnocchi-metricd.log", + "pattern": [ + "ObjectNotFound: error opening pool 'metrics'" + ], + "symbolic_name": "gnocchi_metricd_log" + }, + { + "file": "/var/log/gnocchi/metricd.log", + "pattern": [ + "ObjectNotFound: error opening pool 'metrics'" + ], + "symbolic_name": "gnocchi_metricd_log" + }, + { + "file": "/var/lib/config-data/puppet-generated/gnocchi/etc/gnocchi/gnocchi.conf", + "pattern": [ + "[", + "ceph", + "ceph_pool", + "driver", + "storage" + ], + "symbolic_name": "gnocchi_conf" + }, + { + "file": "/etc/gnocchi/gnocchi.conf", + "pattern": [ + "[", + "ceph", + "ceph_pool", + "driver", + "storage" + ], + "symbolic_name": "gnocchi_conf" + }, + { + "file": "/etc/neutron/neutron.conf", + "pattern": [ + "[", + "debug", + "dhcp_agents_per_network" + ], + "symbolic_name": "neutron_conf" + }, + { + "file": "/var/lib/config-data/puppet-generated/neutron/etc/neutron/neutron.conf", + "pattern": [ + "[", + "debug", + "dhcp_agents_per_network" + ], + "symbolic_name": "neutron_conf" + }, + { + "file": "/etc/neutron/dhcp_agent.ini", + "pattern": [ + "[", + "force_metadata" + ], + "symbolic_name": "neutron_dhcp_agent_ini" + }, + { + "file": "/var/lib/config-data/puppet-generated/neutron/etc/neutron/dhcp_agent.ini", + "pattern": [ + "[", + "force_metadata" + ], + "symbolic_name": "neutron_dhcp_agent_ini" + }, + { + "file": "/etc/neutron/plugin.ini", + "pattern": [], + "symbolic_name": "neutron_plugin_ini" + }, + { + "file": "/var/lib/config-data/puppet-generated/neutron/etc/neutron/plugin.ini", + "pattern": [], + "symbolic_name": "neutron_plugin_ini" + }, + { + "file": "/etc/zipl.conf", + "pattern": [], + "symbolic_name": "zipl_conf" + }, + { + "file": "/etc/smart_proxy_dynflow_core/settings.yml", + "pattern": [ + ":database:" + ], + "symbolic_name": "smartpdc_settings" + }, + { + "file": "/etc/neutron/l3_agent.ini", + "pattern": [ + "[", + "agent_mode" + ], + "symbolic_name": "neutron_l3_agent_ini" + }, + { + "file": "/var/lib/config-data/puppet-generated/neutron/etc/neutron/l3_agent.ini", + "pattern": [ + "[", + "agent_mode" + ], + "symbolic_name": "neutron_l3_agent_ini" + }, + { + "file": "/var/log/neutron/l3-agent.log", + "pattern": [ + "DEBUG oslo.messaging._drivers.impl_rabbit", + "Duplicate iptables rule detected", + "Error while deleting router", + "Stderr: Another app is currently holding the xtables lock", + "Timed out waiting for RPC response" + ], + "symbolic_name": "neutron_l3_agent_log" + }, + { + "file": "/etc/neutron/metadata_agent.ini", + "pattern": [ + "[", + "auth_url" + ], + "symbolic_name": "neutron_metadata_agent_ini" + }, + { + "file": "/var/lib/config-data/puppet-generated/neutron/etc/neutron/metadata_agent.ini", + "pattern": [ + "[", + "auth_url" + ], + "symbolic_name": "neutron_metadata_agent_ini" + }, + { + "file": "/var/log/neutron/metadata-agent.log", + "pattern": [ + "Unauthorized: {\"error\": {\"message\": \"The resource could not be found.\", \"code\": 404, \"title\": \"Not Found\"}}" + ], + "symbolic_name": "neutron_metadata_agent_log" + }, + { + "file": "/var/log/containers/neutron/metadata-agent.log", + "pattern": [ + "Unauthorized: {\"error\": {\"message\": \"The resource could not be found.\", \"code\": 404, \"title\": \"Not Found\"}}" + ], + "symbolic_name": "neutron_metadata_agent_log" + }, + { + "file": "/etc/neutron/plugins/ml2/ml2_conf.ini", + "pattern": [ + "[" + ], + "symbolic_name": "neutron_ml2_conf" + }, + { + "file": "/var/lib/config-data/puppet-generated/neutron/etc/neutron/plugins/ml2/ml2_conf.ini", + "pattern": [ + "[" + ], + "symbolic_name": "neutron_ml2_conf" + }, + { + "file": "/var/log/neutron/openvswitch-agent.log", + "pattern": [ + "Agent main thread died of an exception", + "DEBUG oslo.messaging._drivers.impl_rabbit", + "Timed out waiting for RPC response", + "neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native.ovs_ryuapp", + "u'device_owner': u'network:router_interface_distributed'" + ], + "symbolic_name": "neutron_ovs_agent_log" + }, + { + "file": "/var/log/containers/neutron/openvswitch-agent.log", + "pattern": [ + "Agent main thread died of an exception", + "DEBUG oslo.messaging._drivers.impl_rabbit", + "Timed out waiting for RPC response", + "neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native.ovs_ryuapp", + "u'device_owner': u'network:router_interface_distributed'" + ], + "symbolic_name": "neutron_ovs_agent_log" + }, + { + "file": "/usr/libexec/setup-named-chroot.sh", + "pattern": [ + "/", + "ROOTDIR_MOUNT" + ], + "symbolic_name": "setup_named_chroot" + }, + { + "file": "/usr/sap/hostctrl/exe/host_profile", + "pattern": [ + "DIR_", + "SAPSYSTEM" + ], + "symbolic_name": "sap_host_profile" + }, + { + "file": "/proc/sys/kernel/sched_rt_runtime_us", + "pattern": [], + "symbolic_name": "sched_rt_runtime_us" + }, + { + "file": "/usr/lib/udev/rules.d/59-fc-wwpn-id.rules", + "pattern": [ + "ENV{FC_INITIATOR_WWPN}", + "ENV{FC_TARGET_LUN}", + "ENV{FC_TARGET_WWPN}" + ], + "symbolic_name": "udev_fc_wwpn_id_rules" + } + ], + "globs": [ + { + "glob": "/sys/devices/system/cpu/cpu[0-9]*/online", + "symbolic_name": "cpu_cores", + "pattern": [] + }, + { + "glob": "/sys/devices/system/cpu/cpu[0-9]*/topology/thread_siblings_list", + "symbolic_name": "cpu_siblings", + "pattern": [] + }, + { + "glob": "/sys/devices/system/cpu/vulnerabilities/*", + "symbolic_name": "cpu_vulns", + "pattern": [] + }, + { + "glob": "/sys/class/net/*/address", + "symbolic_name": "mac_addresses", + "pattern": [] + }, + { + "glob": "/etc/httpd/conf.d/*/*.conf", + "pattern": [], + "symbolic_name": "httpd_conf" + }, + { + "glob": "/etc/httpd/conf*/*.conf", + "pattern": [], + "symbolic_name": "httpd_conf" + }, + { + "glob": "/opt/rh/httpd24/root/etc/httpd/conf.d/*/*.conf", + "pattern": [], + "symbolic_name": "httpd_conf_scl_httpd24" + }, + { + "glob": "/opt/rh/httpd24/root/etc/httpd/conf*/*.conf", + "pattern": [], + "symbolic_name": "httpd_conf_scl_httpd24" + }, + { + "glob": "/opt/rh/jbcs-httpd24/root/etc/httpd/conf.d/*/*.conf", + "pattern": [], + "symbolic_name": "httpd_conf_scl_jbcs_httpd24" + }, + { + "glob": "/opt/rh/jbcs-httpd24/root/etc/httpd/conf*/*.conf", + "pattern": [], + "symbolic_name": "httpd_conf_scl_jbcs_httpd24" + }, + { + "glob": "/sys/class/net/bond[0-9]*/bonding/tlb_dynamic_lb", + "symbolic_name": "bond_dynamic_lb", + "pattern": [] + }, + { + "glob": "/boot/loader/entries/*.conf", + "symbolic_name": "boot_loader_entries", + "pattern": [] + }, + { + "glob": "/var/opt/amq-broker/*/etc/broker.xml", + "symbolic_name": "amq_broker", + "pattern": [] + }, + { + "glob": "/boot/config-*", + "symbolic_name": "kernel_config", + "pattern": [ + "CONFIG_BPF_SYSCALL", + "CONFIG_PREEMPT_RT_FULL", + "CONFIG_SMP" + ] + }, + { + "glob": "/etc/krb5.conf.d/*", + "symbolic_name": "krb5_conf_d", + "pattern": [] + }, + { + "glob": "/etc/nginx/*.conf", + "pattern": [], + "symbolic_name": "nginx_conf" + }, + { + "glob": "/etc/nginx/*.d/*.conf", + "pattern": [], + "symbolic_name": "nginx_conf" + }, + { + "glob": "/etc/opt/rh/rh-nginx*/nginx/*.conf", + "pattern": [], + "symbolic_name": "nginx_conf" + }, + { + "glob": "/etc/opt/rh/rh-nginx*/nginx/*.d/*.conf", + "pattern": [], + "symbolic_name": "nginx_conf" + }, + { + "glob": "/opt/rh/nginx*/root/etc/nginx/*.conf", + "pattern": [], + "symbolic_name": "nginx_conf" + }, + { + "glob": "/opt/rh/nginx*/root/etc/nginx/*.d/*.conf", + "pattern": [], + "symbolic_name": "nginx_conf" + }, + { + "glob": "/sys/fs/cgroup/cpu/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod[a-f0-9_]*.slice/cpu.cfs_quota_us", + "symbolic_name": "kubepods_cpu_quota", + "pattern": [] + }, + { + "glob": "/var/log/ceph/ceph.log*", + "pattern": [ + "[WRN] slow request" + ], + "symbolic_name": "ceph_log" + }, + { + "glob": "/var/log/libvirt/qemu/*.log", + "pattern": [ + "[CAUTION] This_is_the default_filter_string_for_all_large_files!" + ], + "symbolic_name": "libvirtd_qemu_log" + }, + { + "glob": "/sys/bus/pci/devices/*/mlx4_port[0-9]", + "symbolic_name": "mlx4_port", + "pattern": [] + }, + { + "glob": "/var/opt/rh/rh-mysql*/log/mysql/mysqld.log", + "symbolic_name": "mysql_log", + "pattern": [ + "OpenSSL internal error, assertion failed: Digest MD5 forbidden in FIPS mode!", + "Too many open files", + "[ERROR]" + ] + }, + { + "glob": "/sys/devices/system/node/node[0-9]*/cpulist", + "symbolic_name": "numa_cpus", + "pattern": [] + }, + { + "glob": "/sys/class/scsi_host/host[0-9]*/fwrev", + "symbolic_name": "scsi_fwver", + "pattern": [] + }, + { + "glob": "/sys/class/scsi_host/host[0-9]*/eh_deadline", + "symbolic_name": "scsi_eh_deadline", + "pattern": [] + } + ], + "meta_specs": { + "analysis_target": { + "archive_file_name": "/insights_data/analysis_target" + }, + "branch_info": { + "archive_file_name": "/branch_info" + }, + "machine-id": { + "archive_file_name": "/insights_data/machine-id" + }, + "metadata.json": { + "archive_file_name": "metadata.json" + }, + "uploader_log": { + "archive_file_name": "/insights_data/insights_logs/insights.log" + } + }, + "pre_commands": { + "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" + }, + "version": "2020-07-16T14:49:33.251429" +} \ No newline at end of file From f5f51e15ed5a217967485fac041d725c2597a440 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Fri, 17 Jul 2020 13:11:26 -0400 Subject: [PATCH 115/892] add --list-specs option to refer to component docs (#2657) * add --list-specs option to refer to component docs Signed-off-by: Jeremy Crafts --- insights/client/__init__.py | 6 ++++++ insights/client/config.py | 6 ++++++ insights/client/phase/v1.py | 4 ++++ insights/tests/client/phase/test_LEGACY_post_update.py | 1 + insights/tests/client/phase/test_post_update.py | 1 + 5 files changed, 18 insertions(+) diff --git a/insights/client/__init__.py b/insights/client/__init__.py index d9f8b88f3..1f2477d7e 100644 --- a/insights/client/__init__.py +++ b/insights/client/__init__.py @@ -648,6 +648,12 @@ def initialize_tags(self): tags["group"] = self.config.group write_tags(tags) + def list_specs(self): + logger.info("For a full list of insights-core datasources, please refer to https://insights-core.readthedocs.io/en/latest/specs_catalog.html") + logger.info("The items in General Datasources can be selected for omission by adding them to the 'components' section of file-redaction.yaml") + logger.info("When specifying these items in file-redaction.yaml, they must be prefixed with 'insights.specs.default.DefaultSpecs.', i.e. 'insights.specs.default.DefaultSpecs.httpd_V'") + logger.info("This information applies only to Insights Core collection. To use Core collection, set core_collect=True in %s", self.config.conf) + def format_config(config): # Log config except the password diff --git a/insights/client/config.py b/insights/client/config.py index 2a91dd1eb..9c03620aa 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -193,6 +193,12 @@ def _core_collect_default(): 'action': 'store_true', 'group': 'debug' }, + 'list_specs': { + 'default': False, + 'opt': ['--list-specs'], + 'help': 'Show insights-client collection specs', + 'action': 'store_true' + }, 'logging_file': { 'default': constants.default_log_file, 'opt': ['--logging-file'], diff --git a/insights/client/phase/v1.py b/insights/client/phase/v1.py index 9727ed3ef..01ca16122 100644 --- a/insights/client/phase/v1.py +++ b/insights/client/phase/v1.py @@ -128,6 +128,10 @@ def post_update(client, config): logger.debug("CONFIG: %s", config) print_egg_versions() + if config.list_specs: + client.list_specs() + sys.exit(constants.sig_kill_ok) + if config.show_results: try: client.show_results() diff --git a/insights/tests/client/phase/test_LEGACY_post_update.py b/insights/tests/client/phase/test_LEGACY_post_update.py index a18568332..6325b4090 100644 --- a/insights/tests/client/phase/test_LEGACY_post_update.py +++ b/insights/tests/client/phase/test_LEGACY_post_update.py @@ -16,6 +16,7 @@ def patch_insights_config(old_function): "return_value.load_all.return_value.display_name": False, "return_value.load_all.return_value.register": False, "return_value.load_all.return_value.diagnosis": None, + "return_value.load_all.return_value.list_specs": False, "return_value.load_all.return_value.show_results": False, "return_value.load_all.return_value.check_results": False, "return_value.load_all.return_value.core_collect": False}) diff --git a/insights/tests/client/phase/test_post_update.py b/insights/tests/client/phase/test_post_update.py index b22faed83..110996c05 100644 --- a/insights/tests/client/phase/test_post_update.py +++ b/insights/tests/client/phase/test_post_update.py @@ -19,6 +19,7 @@ def patch_insights_config(old_function): "return_value.load_all.return_value.diagnosis": None, "return_value.load_all.return_value.reregister": False, "return_value.load_all.return_value.payload": None, + "return_value.load_all.return_value.list_specs": False, "return_value.load_all.return_value.show_results": False, "return_value.load_all.return_value.check_results": False, "return_value.load_all.return_value.core_collect": False}) From 6bff5d088a5dd3d63b326f7cafebbc774f683003 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Mon, 20 Jul 2020 11:16:27 -0500 Subject: [PATCH 116/892] Apply default timeout to commands during collection. (#2669) CommandOutputProvider gets the timeout value from the ExeuctionContext if one isn't supplied in the related datasource definition. Signed-off-by: Christopher Sams --- insights/core/spec_factory.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/insights/core/spec_factory.py b/insights/core/spec_factory.py index ee2ebf45f..a7d571678 100644 --- a/insights/core/spec_factory.py +++ b/insights/core/spec_factory.py @@ -381,7 +381,8 @@ def write(self, dst): args = self.create_args() fs.ensure_path(os.path.dirname(dst)) if args: - p = Pipeline(*args, timeout=self.timeout, env=self.create_env()) + timeout = self.timeout or self.ctx.timeout + p = Pipeline(*args, timeout=timeout, env=self.create_env()) return p.write(dst, keep_rc=self.keep_rc) def __repr__(self): From 7c1652ef42de8b17495671bd0b5fb3c79a3d26da Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Mon, 20 Jul 2020 13:27:15 -0400 Subject: [PATCH 117/892] load client config timeout in core (#2667) Signed-off-by: Jeremy Crafts --- insights/client/core_collector.py | 2 +- insights/collect.py | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/insights/client/core_collector.py b/insights/client/core_collector.py index 8be1aeb49..ba8c09669 100644 --- a/insights/client/core_collector.py +++ b/insights/client/core_collector.py @@ -49,7 +49,7 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): 'components': rm_conf.get('components', []) } - collected_data_path = collect.collect(tmp_path=self.archive.tmp_dir, rm_conf=core_blacklist) + collected_data_path = collect.collect(tmp_path=self.archive.tmp_dir, rm_conf=core_blacklist, client_timeout=self.config.cmd_timeout) # update the archive dir with the reported data location from Insights Core if not collected_data_path: raise RuntimeError('Error running collection: no output path defined.') diff --git a/insights/collect.py b/insights/collect.py index a088367f2..b7a636386 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -214,7 +214,7 @@ def get_pool(parallel, kwargs): yield None -def collect(manifest=default_manifest, tmp_path=None, compress=False, rm_conf=None): +def collect(manifest=default_manifest, tmp_path=None, compress=False, rm_conf=None, client_timeout=None): """ This is the collection entry point. It accepts a manifest, a temporary directory in which to store output, and a boolean for optional compression. @@ -231,6 +231,7 @@ def collect(manifest=default_manifest, tmp_path=None, compress=False, rm_conf=No rm_conf (dict): Client-provided python dict containing keys "commands", "files", and "keywords", to be injected into the manifest blacklist. + client_timeout (int): Client-provided command timeout value Returns: The full path to the created tar.gz or workspace. """ @@ -247,6 +248,11 @@ def collect(manifest=default_manifest, tmp_path=None, compress=False, rm_conf=No apply_blacklist(client.get("blacklist", {})) # insights-client + if client_timeout: + try: + client['context']['args']['timeout'] = client_timeout + except LookupError: + log.warning('Could not set timeout option.') rm_conf = rm_conf or {} apply_blacklist(rm_conf) for component in rm_conf.get('components', []): From 8ef1a6efd744c59472aab654989c464b67d3b812 Mon Sep 17 00:00:00 2001 From: Jitka Obselkova <41325380+jobselko@users.noreply.github.com> Date: Tue, 21 Jul 2020 02:54:22 +0200 Subject: [PATCH 118/892] Fix small errors in docstring in file_permissions.py (#2666) Signed-off-by: Jitka Obselkova --- insights/util/file_permissions.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/insights/util/file_permissions.py b/insights/util/file_permissions.py index 2dbc428e9..26fbc2590 100644 --- a/insights/util/file_permissions.py +++ b/insights/util/file_permissions.py @@ -217,7 +217,7 @@ def only_root_can_read(self, root_group_can_read=True): read bits for root user/group are not checked because root can read/write anything regardless of the read/write permissions. - When called with ``root_root_group_can_read`` = ``True``: + When called with ``root_group_can_read`` = ``True``: * owner must be root * and 'others' permissions must not contain read @@ -245,7 +245,7 @@ def only_root_can_read(self, root_group_can_read=True): -??r??-?? nonroot root --------- nonroot nonroot - When called with ``root_root_group_can_read`` = ``False``: + When called with ``root_group_can_read`` = ``False``: * owner must be root * and 'group' and 'others' permissions must not contain read @@ -297,7 +297,7 @@ def only_root_can_write(self, root_group_can_write=True): write bits for root user/group are not checked because root can read/write anything regardless of the read/write permissions. - When called with ``root_root_group_can_write`` = ``True``: + When called with ``root_group_can_write`` = ``True``: * owner must be root * and 'others' permissions must not contain write @@ -324,7 +324,7 @@ def only_root_can_write(self, root_group_can_write=True): ?-??w??-? nonroot root --------- nonroot nonroot - When called with ``root_root_group_can_write`` = ``False``: + When called with ``root_group_can_write`` = ``False``: * owner must be root * and 'group' and 'others' permissions must not contain write @@ -338,7 +338,7 @@ def only_root_can_write(self, root_group_can_write=True): -w------- root nonroot rwxr-x--- root root rwxr-x--- root nonroot - rwxr-xrwx root nonroot + rwxr-xr-x root nonroot Specifically, these cases are NOT valid because the owner can chmod permissions and grant themselves permissions without root's From a44cfefdb4082660eff79b8821133c978d7833d4 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Tue, 21 Jul 2020 13:09:35 -0400 Subject: [PATCH 119/892] change to RuntimeError so it is caught by top level except (#2670) * change to RuntimeError so it is caught by top level except Signed-off-by: Jeremy Crafts * fix unit test Signed-off-by: Jeremy Crafts * fix unit tests part 2 Signed-off-by: Jeremy Crafts --- insights/client/collection_rules.py | 2 +- insights/tests/client/collection_rules/test_get_conf_file.py | 2 +- insights/tests/client/test_collect.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/insights/client/collection_rules.py b/insights/client/collection_rules.py index a24c6ebaf..eec5b3ea8 100644 --- a/insights/client/collection_rules.py +++ b/insights/client/collection_rules.py @@ -275,7 +275,7 @@ def get_conf_file(self): logger.debug(json.dumps(conf)) return conf - raise ValueError("ERROR: Unable to download conf or read it from disk!") + raise RuntimeError("ERROR: Unable to download conf or read it from disk!") def get_conf_update(self): """ diff --git a/insights/tests/client/collection_rules/test_get_conf_file.py b/insights/tests/client/collection_rules/test_get_conf_file.py index 074377c4b..228e2ef7b 100644 --- a/insights/tests/client/collection_rules/test_get_conf_file.py +++ b/insights/tests/client/collection_rules/test_get_conf_file.py @@ -120,5 +120,5 @@ def test_no_file_error(try_disk): """ upload_conf = insights_upload_conf() - with raises(ValueError): + with raises(RuntimeError): upload_conf.get_conf_file() diff --git a/insights/tests/client/test_collect.py b/insights/tests/client/test_collect.py index d02cc1af8..d790d37f1 100644 --- a/insights/tests/client/test_collect.py +++ b/insights/tests/client/test_collect.py @@ -303,7 +303,7 @@ def test_file_signature_invalid(get_branch_info, validate_gpg_sig, data_collecto """ config, pconn = collect_args() with patch_temp_conf_file(): - with raises(ValueError): + with raises(RuntimeError): collect(config, pconn) validate_gpg_sig.assert_called() @@ -370,7 +370,7 @@ def test_file_no_data(get_branch_info, try_disk, data_collector): Configuration from file is loaded from the "uploader.json" key. """ config, pconn = collect_args() - with raises(ValueError): + with raises(RuntimeError): collect(config, pconn) data_collector.return_value.run_collection.assert_not_called() From 6803b93b2a10117a910db9cd40da8503a5c2f639 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 23 Jul 2020 03:15:29 +0800 Subject: [PATCH 120/892] New parsers for SAP trace files (#2636) * New parsers for SAP trace files Signed-off-by: Xiangce Liu * fix the typo in insights_archive: glob_files Signed-off-by: Xiangce Liu * Add docstring for the re-implemented get_after Signed-off-by: Xiangce Liu * a bit more update to the docstring Signed-off-by: Xiangce Liu * add interfaces: sid and instance, and test Signed-off-by: Xiangce Liu * Remove the default filters Signed-off-by: Xiangce Liu * Remove the unused import Signed-off-by: Xiangce Liu * Add docstring for SAP datasources Signed-off-by: Xiangce Liu * Remove unrelated spec and refact the doc of sap_sid_name Signed-off-by: Xiangce Liu * Add the missed import of Sap Signed-off-by: Xiangce Liu --- .../sap_dev_trace_files.rst | 3 + insights/parsers/sap_dev_trace_files.py | 215 ++++++++++++++++++ .../parsers/tests/test_sap_dev_trace_files.py | 82 +++++++ insights/specs/__init__.py | 2 + insights/specs/default.py | 10 + insights/specs/insights_archive.py | 2 + 6 files changed, 314 insertions(+) create mode 100644 docs/shared_parsers_catalog/sap_dev_trace_files.rst create mode 100644 insights/parsers/sap_dev_trace_files.py create mode 100644 insights/parsers/tests/test_sap_dev_trace_files.py diff --git a/docs/shared_parsers_catalog/sap_dev_trace_files.rst b/docs/shared_parsers_catalog/sap_dev_trace_files.rst new file mode 100644 index 000000000..cebe9f08c --- /dev/null +++ b/docs/shared_parsers_catalog/sap_dev_trace_files.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.sap_dev_trace_files + :members: + :show-inheritance: diff --git a/insights/parsers/sap_dev_trace_files.py b/insights/parsers/sap_dev_trace_files.py new file mode 100644 index 000000000..7308ab139 --- /dev/null +++ b/insights/parsers/sap_dev_trace_files.py @@ -0,0 +1,215 @@ +""" +SAP Trace Files - Files ``/usr/sap/SID/SNAME/work/dev_*`` +========================================================== + +Parsers included in this module are: + +SapDevDisp - Files ``/usr/sap/SID/SNAME/work/dev_disp`` +------------------------------------------------------- + +SapDevRd - Files ``/usr/sap/SID/SNAME/work/dev_rd`` +--------------------------------------------------- +""" +from insights import parser, LogFileOutput +from insights.specs import Specs +from insights.parsers import ParseException + + +class SapDevTraceFile(LogFileOutput): + """ + The Base class for parsing the SAP trace files. + """ + + def get_after(self, *args, **kwargs): + """ + .. warning:: + The ``get_after`` function is not supported by this Parser because + of the structure of the SAP trace files are totally different with + the log files expected by the base class``LogFileOutput``. + + Raises: + ParseException: Always raises ParseException. + """ + raise ParseException("get_after() is not supported by this Parser.") + + @property + def sid(self): + """ + The SID of this trace file. + """ + return self.file_path.lstrip('/').split('/')[2] + + @property + def instance(self): + """ + The instance name of this trace file. + """ + return self.file_path.lstrip('/').split('/')[3] + + +@parser(Specs.sap_dev_disp) +class SapDevDisp(SapDevTraceFile): + """ + This class reads the SAP trace files ``/usr/sap/SID/SNAME/work/dev_disp`` + + Sample content of the file:: + + --------------------------------------------------- + trc file: "dev_disp", trc level: 1, release: "745" + --------------------------------------------------- + + Sun Aug 18 17:17:45 2019 + TRACE FILE TRUNCATED ( pid = 16955 ) + + sysno 00 + sid RH1 + systemid 390 (AMD/Intel x86_64 with Linux) + relno 7450 + patchlevel 0 + patchno 100 + intno 20151301 + make multithreaded, Unicode, 64 bit, optimized + profile /usr/sap/RH1/SYS/profile/RH1_D00_vm37-39 + pid 16955 + + Sun Aug 18 17:17:45 2019 + kernel runs with dp version 3000(ext=117000) (@(#) DPLIB-INT-VERSION-0+3000-UC) + length of sys_adm_ext is 500 bytes + + Scheduler info + -------------- + WP info + #dia = 10 + #btc = 3 + #standby = 0 + #max = 21 + General Scheduler info + preemptionInfo.isActive = true + preemptionInfo.timeslice = 500 + preemptionInfo.checkLoad = true + Prio Class High + maxRuntime[RQ_Q_PRIO_HIGH] = 600 sec + maxRuntimeHalf[RQ_Q_PRIO_HIGH] = 300 sec + Running requests[RQ_Q_PRIO_NORMAL] = 0 + Running requests[RQ_Q_PRIO_LOW] = 0 + + *** WARNING => DpHdlDeadWp: wp_adm slot for W7 has no pid [dpxxwp.c 1353] + DpSkipSnapshot: last snapshot created at Sun Aug 18 17:15:25 2019, skip new snapshot + *** WARNING => DpHdlDeadWp: wp_adm slot for W8 has no pid [dpxxwp.c 1353] + DpSkipSnapshot: last snapshot created at Sun Aug 18 17:15:25 2019, skip new snapshot + *** WARNING => DpHdlDeadWp: wp_adm slot for W9 has no pid [dpxxwp.c 1353] + + Sun Aug 18 17:17:45 2019 + DpSkipSnapshot: last snapshot created at Sun Aug 18 17:17:45 2019, skip new snapshot + DpCheckSapcontrolProcess: sapcontrol with pid 1479 terminated + *** WARNING => DpRequestProcessingCheck: potential request processing problem detected (14. check) [dpxxwp.c 4633] + + Examples: + >>> type(dev_disp) + + >>> dev_disp.file_path == '/usr/sap/RH1/D00/work/dev_disp' + True + >>> dev_disp.sid == 'RH1' + True + >>> dev_disp.instance == 'D00' + True + >>> len(dev_disp.get("WARNING")) + 4 + """ + pass + + +@parser(Specs.sap_dev_rd) +class SapDevRd(SapDevTraceFile): + """ + This class reads the SAP trace files ``/usr/sap/SID/SNAME/work/dev_rd`` + + Sample content of the file:: + + --------------------------------------------------- + trc file: "dev_rd", trc level: 1, release: "745" + --------------------------------------------------- + systemid 390 (AMD/Intel x86_64 with Linux) + relno 7450 + patchlevel 0 + patchno 100 + intno 20151301 + make multithreaded, Unicode, 64 bit, optimized + pid 16982 + + Thu Jul 18 02:59:37 2019 + gateway (version=745.2015.12.21 (with SSL support)) + Bind service (socket) to port + GwPrintMyHostAddr: my host addresses are : + * + * SWITCH TRC-LEVEL from 1 to 1 + * + ***LOG S00=> GwInitReader, gateway started ( 16982) [gwxxrd.c 1820] + systemid 390 (AMD/Intel x86_64 with Linux) + relno 7450 + patchlevel 0 + patchno 100 + intno 20151301 + make multithreaded, Unicode, 64 bit, optimized + pid 16982 + + Thu Jul 18 02:59:37 2019 + gateway (version=745.2015.12.21 (with SSL support)) + gw/reg_no_conn_info = 1 + * SWITCH TRC-RESOLUTION from 1 to 1 + gw/sim_mode : set to 0 + gw/logging : ACTION=Ss LOGFILE=gw_log-%y-%m-%d SWITCHTF=day MAXSIZEKB=100 + NI buffering enabled + CCMS: initialize CCMS Monitoring for ABAP instance with J2EE addin. + + Thu Jul 18 02:59:38 2019 + CCMS: Initialized monitoring segment of size 60000000. + CCMS: Initialized CCMS Headers in the shared monitoring segment. + CCMS: Checking Downtime Configuration of Monitoring Segment. + + Thu Jul 18 02:59:39 2019 + CCMS: AlMsUpload called by wp 1002. + CCMS: AlMsUpload successful for /usr/sap/RH1/D00/log/ALMTTREE (542 MTEs). + + Thu Jul 18 02:59:40 2019 + GwIInitSecInfo: secinfo version = 2 + GwIRegInitRegInfo: reg_info file /usr/sap/RH1/D00/data/reginfo not found + + ********** SERVER SNAPSHOT 2727 - begin ********** + + ----------------------- + --- SYS TABLE DUMP ---- + ----------------------- + +------+-------+----------------------+-----------------+------------+-------+--------------------------+ + | Indx | State | Hostname | Addr | Port / TP | Type | Last-Request | + +------+-------+----------------------+-----------------+------------+-------+--------------------------+ + | 4 | CONNE | vm37-39 | 127.0.0.1 | IGS.RH1 | REGIS | Mon Aug 19 05:15:15 2019 | + | 3 | CONNE | vm37-39 | 127.0.0.1 | IGS.RH1 | REGIS | Mon Aug 19 05:15:15 2019 | + | 2 | CONNE | vm37-39 | 127.0.0.1 | IGS.RH1 | REGIS | Mon Aug 19 05:15:15 2019 | + | 1 | CONNE | vm37-39 | 127.0.0.1 | IGS.RH1 | REGIS | Mon Aug 19 05:15:15 2019 | + | 0 | CONNE | vm37-39.gsslab.pek2. | 10.72.37.39 | sapgw00 | LOCAL | Wed Aug 7 22:58:50 2019 | + +------+-------+----------------------+-----------------+------------+-------+--------------------------+ + ----------------------- + --- GWTBL TABLE DUMP -- + ----------------------- + +------+-------+----------------------+-----------------+------------+-------+--------------------------+ + | Indx | State | Hostname | Addr | Port | Type | Last-Request | + +------+-------+----------------------+-----------------+------------+-------+--------------------------+ + | 0 | CONNE | vm37-39.gsslab.pek2. | 10.72.37.39 | sapgw00 | LOCAL | Wed Aug 7 22:58:50 2019 | + +------+-------+----------------------+-----------------+------------+-------+--------------------------+ + + ********** SERVER SNAPSHOT 2727 - end ************ + + Examples: + >>> type(dev_rd) + + >>> dev_rd.file_path == '/usr/sap/RH2/D03/work/dev_rd' + True + >>> dev_rd.sid == 'RH2' + True + >>> dev_rd.instance == 'D03' + True + >>> len(dev_rd.get("CCMS:")) + 6 + """ + pass diff --git a/insights/parsers/tests/test_sap_dev_trace_files.py b/insights/parsers/tests/test_sap_dev_trace_files.py new file mode 100644 index 000000000..f02a65b38 --- /dev/null +++ b/insights/parsers/tests/test_sap_dev_trace_files.py @@ -0,0 +1,82 @@ +import pytest +import doctest +from insights.parsers import sap_dev_trace_files, ParseException +from insights.parsers.sap_dev_trace_files import SapDevDisp, SapDevRd +from insights.tests import context_wrap + +SAP_DEV_DISP = """ +--------------------------------------------------- +trc file: "dev_disp", trc level: 1, release: "745" +--------------------------------------------------- + +*** WARNING => DpHdlDeadWp: wp_adm slot for W7 has no pid [dpxxwp.c 1353] +DpSkipSnapshot: last snapshot created at Sun Aug 18 17:15:25 2019, skip new snapshot +*** WARNING => DpHdlDeadWp: wp_adm slot for W8 has no pid [dpxxwp.c 1353] +DpSkipSnapshot: last snapshot created at Sun Aug 18 17:15:25 2019, skip new snapshot +*** WARNING => DpHdlDeadWp: wp_adm slot for W9 has no pid [dpxxwp.c 1353] + +Sun Aug 18 17:17:45 2019 +DpSkipSnapshot: last snapshot created at Sun Aug 18 17:17:45 2019, skip new snapshot +DpCheckSapcontrolProcess: sapcontrol with pid 1479 terminated +*** WARNING => DpRequestProcessingCheck: potential request processing problem detected (14. check) [dpxxwp.c 4633] +""".strip() + +SAP_DEV_RD = """ +--------------------------------------------------- +trc file: "dev_rd", trc level: 1, release: "745" +--------------------------------------------------- +Thu Jul 18 02:59:37 2019 +gateway (version=745.2015.12.21 (with SSL support)) +gw/reg_no_conn_info = 1 +* SWITCH TRC-RESOLUTION from 1 to 1 +gw/sim_mode : set to 0 +gw/logging : ACTION=Ss LOGFILE=gw_log-%y-%m-%d SWITCHTF=day MAXSIZEKB=100 +NI buffering enabled +CCMS: initialize CCMS Monitoring for ABAP instance with J2EE addin. + +Thu Jul 18 02:59:38 2019 +CCMS: Initialized monitoring segment of size 60000000. +CCMS: Initialized CCMS Headers in the shared monitoring segment. +CCMS: Checking Downtime Configuration of Monitoring Segment. + +Thu Jul 18 02:59:39 2019 +CCMS: AlMsUpload called by wp 1002. +CCMS: AlMsUpload successful for /usr/sap/RH1/D00/log/ALMTTREE (542 MTEs). + +Thu Jul 18 02:59:40 2019 +GwIInitSecInfo: secinfo version = 2 +GwIRegInitRegInfo: reg_info file /usr/sap/RH1/D00/data/reginfo not found +""".strip() + + +SapDevDisp.keep_scan('warning_lines', "WARNING") +SapDevRd.keep_scan('ccms', "CCMS:") +DISP_PATH = '/usr/sap/RH1/D00/work/dev_disp' +RD_PATH = '/usr/sap/RH2/D03/work/dev_rd' + + +def test_dev_disp(): + dev_disp = SapDevDisp(context_wrap(SAP_DEV_DISP, path=DISP_PATH)) + assert len(dev_disp.warning_lines) == len(dev_disp.get("WARNING")) + assert dev_disp.sid == DISP_PATH.split('/')[3] + assert dev_disp.instance == DISP_PATH.split('/')[4] + with pytest.raises(ParseException): + dev_disp.get_after() + + +def test_dev_rd(): + dev_rd = SapDevRd(context_wrap(SAP_DEV_RD, path=RD_PATH)) + assert len(dev_rd.ccms) == len(dev_rd.get("CCMS:")) + assert dev_rd.sid == RD_PATH.split('/')[3] + assert dev_rd.instance == RD_PATH.split('/')[4] + with pytest.raises(ParseException): + dev_rd.get_after() + + +def test_dev_docs(): + env = { + "dev_disp": SapDevDisp(context_wrap(SAP_DEV_DISP, path=DISP_PATH)), + "dev_rd": SapDevRd(context_wrap(SAP_DEV_RD, path=RD_PATH)) + } + failed, total = doctest.testmod(sap_dev_trace_files, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index b78440eaf..8ccbbda41 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -523,6 +523,8 @@ class Specs(SpecSet): rpm_V_packages = RegistryPoint() rsyslog_conf = RegistryPoint(filterable=True) samba = RegistryPoint(filterable=True) + sap_dev_disp = RegistryPoint(multi_output=True, filterable=True) + sap_dev_rd = RegistryPoint(multi_output=True, filterable=True) sap_hdb_version = RegistryPoint(multi_output=True) sap_host_profile = RegistryPoint(filterable=True) sapcontrol_getsystemupdatelist = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 2f680c99b..c54eede3f 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -22,6 +22,7 @@ from insights.core.spec_factory import first_file, listdir from insights.combiners.cloud_provider import CloudProvider from insights.combiners.services import Services +from insights.combiners.sap import Sap from insights.specs import Specs @@ -554,6 +555,15 @@ def pcp_enabled(broker): rpm_V_packages = simple_command("/bin/rpm -V coreutils procps procps-ng shadow-utils passwd sudo chrony", keep_rc=True) rsyslog_conf = simple_file("/etc/rsyslog.conf") samba = simple_file("/etc/samba/smb.conf") + + @datasource(Sap) + def sap_sid_name(broker): + """(list): Returns the list of (SAP SID, SAP InstanceName) """ + sap = broker[Sap] + return [(sap.sid(i), i) for i in sap.local_instances] + + sap_dev_disp = foreach_collect(sap_sid_name, "/usr/sap/%s/%s/work/dev_disp") + sap_dev_rd = foreach_collect(sap_sid_name, "/usr/sap/%s/%s/work/dev_rd") saphostctl_getcimobject_sapinstance = simple_command("/usr/sap/hostctrl/exe/saphostctrl -function GetCIMObject -enuminstances SAPInstance") sat5_insights_properties = simple_file("/etc/redhat-access/redhat-access-insights.properties") satellite_mongodb_storage_engine = simple_command("/usr/bin/mongo pulp_database --eval 'db.serverStatus().storageEngine'") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index f35ae6aad..f977f3d3d 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -178,6 +178,8 @@ class InsightsArchiveSpecs(Specs): rhev_data_center = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_rhev_data_center") rndc_status = simple_file("insights_commands/rndc_status") rpm_V_packages = first_file(["insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo_chrony", "insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo"]) + sap_dev_disp = glob_file("/usr/sap/*/*/work/dev_disp") + sap_dev_rd = glob_file("/usr/sap/*/*/work/dev_rd") saphostctl_getcimobject_sapinstance = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostctrl_-function_GetCIMObject_-enuminstances_SAPInstance") satellite_mongodb_storage_engine = simple_file("insights_commands/mongo_pulp_database_--eval_db.serverStatus_.storageEngine") sealert = simple_file('insights_commands/sealert_-l') From 467d268a07857f08fff7b3996386448fa6c77c67 Mon Sep 17 00:00:00 2001 From: Akshay Ghodake Date: Thu, 23 Jul 2020 00:56:30 +0530 Subject: [PATCH 121/892] [Spec-Update] pcs_status spac updated to first_file. (#2674) Signed-off-by: Akshay Ghodake --- insights/specs/sos_archive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index 5f9aeb89a..2d23aba89 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -161,7 +161,7 @@ class SosSpecs(Specs): partitions = simple_file("/proc/partitions") pcs_config = simple_file("sos_commands/pacemaker/pcs_config") pcs_quorum_status = simple_file("sos_commands/pacemaker/pcs_quorum_status") - pcs_status = simple_file("sos_commands/pacemaker/pcs_status") + pcs_status = first_file(["sos_commands/pacemaker/pcs_status", "/sos_commands/pacemaker/pcs_status_--full"]) podman_image_inspect = glob_file("sos_commands/podman/podman_inspect_*") podman_list_containers = first_file(["sos_commands/podman/podman_ps_-a", "sos_commands/podman/podman_ps"]) podman_list_images = simple_file("sos_commands/podman/podman_images") From 5421b3ebfaf0ba100bee799251faf8b60c0b7b26 Mon Sep 17 00:00:00 2001 From: Rohan Arora Date: Thu, 23 Jul 2020 19:07:01 +0530 Subject: [PATCH 122/892] Add Candlepin Log (#2671) * Add candlepin_log to default specs Signed-off-by: Rohan Arora * Add time_format to CandlepinLog Signed-off-by: Rohan Arora * Fix pytest for Python 2 Signed-off-by: Rohan Arora --- insights/parsers/foreman_log.py | 63 ++++++++++++++++++---- insights/parsers/tests/test_foreman_log.py | 14 ++++- insights/specs/default.py | 1 + 3 files changed, 66 insertions(+), 12 deletions(-) diff --git a/insights/parsers/foreman_log.py b/insights/parsers/foreman_log.py index ad04fc413..61ed4328e 100644 --- a/insights/parsers/foreman_log.py +++ b/insights/parsers/foreman_log.py @@ -26,6 +26,7 @@ ---------------------------------------------------------------- """ +from datetime import datetime from .. import LogFileOutput, parser from insights.specs import Specs @@ -54,8 +55,50 @@ class ProductionLog(LogFileOutput): @parser(Specs.candlepin_log) class CandlepinLog(LogFileOutput): - """Class for parsing ``candlepin/candlepin.log`` file.""" - pass + """Class for parsing ``candlepin/candlepin.log`` file. + + Sample input:: + + 2016-09-09 13:45:52,650 [req=bd5a4284-d280-4fc5-a3d5-fc976b7aa5cc, org=] INFO org.candlepin.common.filter.LoggingFilter - Request: verb=GET, uri=/candlepin/consumers/f7677b4b-c470-4626-86a4-2fdf2546af4b + 2016-09-09 13:45:52,784 [req=bd5a4284-d280-4fc5-a3d5-fc976b7aa5cc, org=example_org] INFO org.candlepin.common.filter.LoggingFilter - Response: status=200, content-type="application/json", time=134 + 2016-09-09 13:45:52,947 [req=909ca4c5-f24e-4212-8f23-cc754d06ac57, org=] INFO org.candlepin.common.filter.LoggingFilter - Request: verb=GET, uri=/candlepin/consumers/f7677b4b-c470-4626-86a4-2fdf2546af4b/content_overrides + 2016-09-09 13:45:52,976 [req=909ca4c5-f24e-4212-8f23-cc754d06ac57, org=] INFO org.candlepin.common.filter.LoggingFilter - Response: status=200, content-type="application/json", time=29 + 2016-09-09 13:45:53,072 [req=49becd26-5dfe-4d2f-8667-470519230d88, org=] INFO org.candlepin.common.filter.LoggingFilter - Request: verb=GET, uri=/candlepin/consumers/f7677b4b-c470-4626-86a4-2fdf2546af4b/release + 2016-09-09 13:45:53,115 [req=49becd26-5dfe-4d2f-8667-470519230d88, org=example_org] INFO org.candlepin.common.filter.LoggingFilter - Response: status=200, content-type="application/json", time=43 + + Each line is parsed into a dictionary with the following keys: + + * **raw_message(str)** - complete log line + * **message(str)** - the body of the log + * **timestamp(datetime)** - date and time of log as datetime object + + Examples: + >>> cp_log_lines = cp_log.get('candlepin/consumers') + >>> len(cp_log_lines) + 3 + >>> cp_log_lines[0].get('raw_message') + '2016-09-09 13:45:52,650 [req=bd5a4284-d280-4fc5-a3d5-fc976b7aa5cc, org=] INFO org.candlepin.common.filter.LoggingFilter - Request: verb=GET, uri=/candlepin/consumers/f7677b4b-c470-4626-86a4-2fdf2546af4b' + >>> cp_log_lines[0].get('message') + '[req=bd5a4284-d280-4fc5-a3d5-fc976b7aa5cc, org=] INFO org.candlepin.common.filter.LoggingFilter - Request: verb=GET, uri=/candlepin/consumers/f7677b4b-c470-4626-86a4-2fdf2546af4b' + >>> cp_log_lines[0].get('timestamp') + datetime.datetime(2016, 9, 9, 13, 45, 52, 650000) + """ + + time_format = '%Y-%m-%d %H:%M:%S,%f' + + def _parse_line(self, line): + # line format from /var/lib/tomcat/webapps/candlepin/WEB-INF/classes/logback.xml + # %d{ISO8601} [thread=%thread] [%X{requestType}=%X{requestUuid}, org=%X{org}, csid=%X{csid}] %-5p %c - %m%n + # http://logback.qos.ch/manual/layouts.html + msg_info = {'raw_message': line} + line_split = line.split(None, 2) + if len(line_split) > 2: + try: + msg_info['timestamp'] = datetime.strptime(' '.join(line_split[:2]), self.time_format) + msg_info['message'] = line_split[2] + except ValueError: + pass + return msg_info @parser(Specs.candlepin_error_log) @@ -71,17 +114,15 @@ class CandlepinErrorLog(LogFileOutput): 2016-09-07 15:20:33,796 [=, org=] WARN org.apache.qpid.transport.network.security.ssl.SSLUtil - Exception received while trying to verify hostname 2016-09-07 15:27:34,367 [=, org=] WARN org.apache.qpid.transport.network.security.ssl.SSLUtil - Exception received while trying to verify hostname 2016-09-07 16:49:24,650 [=, org=] WARN org.apache.qpid.transport.network.security.ssl.SSLUtil - Exception received while trying to verify hostname - 2016-09-07 18:07:53,688 [req=d9dc3cfd-abf7-485e-b1eb-e1e28e4b0f28, org=org_ray] ERROR org.candlepin.sync.Importer - Conflicts occurred during import that were + 2016-09-07 18:07:53,688 [req=d9dc3cfd-abf7-485e-b1eb-e1e28e4b0f28, org=org_ray] ERROR org.candlepin.sync.Importer - Conflicts occurred during import that were not overridden: 2016-09-07 18:07:53,690 [req=d9dc3cfd-abf7-485e-b1eb-e1e28e4b0f28, org=org_ray] ERROR org.candlepin.sync.Importer - [DISTRIBUTOR_CONFLICT] - 2016-09-07 18:07:53,711 [req=d9dc3cfd-abf7-485e-b1eb-e1e28e4b0f28, org=org_ray] ERROR org.candlepin.resource.OwnerResource - Recording import failure - org.candlepin.sync.ImportConflictException: Owner has already imported from another subscription management application. + 2016-09-07 18:07:53,711 [req=d9dc3cfd-abf7-485e-b1eb-e1e28e4b0f28, org=org_ray] ERROR org.candlepin.resource.OwnerResource - Recording import failure org.candlepin.sync.ImportConflictException: Owner has already imported from another subscription management application. Examples: - >>> candlepin_log = shared[Candlepin_Error_Log] >>> candlepin_log.get('req=d9dc3cfd-abf7-485e-b1eb-e1e28e4b0f28')[0]['raw_message'] - '2016-09-07 18:07:53,688 [req=d9dc3cfd-abf7-485e-b1eb-e1e28e4b0f28, org=org_ray] ERROR org.candlepin.sync.Importer - Conflicts occurred during import that were' - - >>> candlepin_log.get_after(datetime(2016, 9, 7, 16, 0, 0)[0]['raw_message'] + '2016-09-07 18:07:53,688 [req=d9dc3cfd-abf7-485e-b1eb-e1e28e4b0f28, org=org_ray] ERROR org.candlepin.sync.Importer - Conflicts occurred during import that were not overridden:' + >>> from datetime import datetime + >>> list(candlepin_log.get_after(datetime(2016, 9, 7, 16, 0, 0)))[0]['raw_message'] '2016-09-07 16:49:24,650 [=, org=] WARN org.apache.qpid.transport.network.security.ssl.SSLUtil - Exception received while trying to verify hostname' """ pass @@ -102,7 +143,7 @@ class ForemanSSLAccessLog(LogFileOutput): Examples: - >>> foreman_ssl_acess_log = shared[ForemanSSLAccessLog] - >>> foreman_ssl_acess_log.get('req=d9dc3cfd-abf7-485e-b1eb-e1e28e4b0f28') + >>> foreman_ssl_acess_log.get('consumers/385e688f-43ad-41b2-9fc7-593942ddec78')[0]['raw_message'] + '10.181.73.211 - rhcapkdc.example2.com [27/Mar/2017:13:34:52 -0400] "GET /rhsm/consumers/385e688f-43ad-41b2-9fc7-593942ddec78 HTTP/1.1" 200 10736 "-" "-"' """ time_format = '%d/%b/%Y:%H:%M:%S' diff --git a/insights/parsers/tests/test_foreman_log.py b/insights/parsers/tests/test_foreman_log.py index cd40b4d1e..4291ae968 100644 --- a/insights/parsers/tests/test_foreman_log.py +++ b/insights/parsers/tests/test_foreman_log.py @@ -1,9 +1,11 @@ from insights.tests import context_wrap +from insights.parsers import foreman_log from insights.parsers.foreman_log import SatelliteLog, ProductionLog from insights.parsers.foreman_log import CandlepinLog, ProxyLog from insights.parsers.foreman_log import CandlepinErrorLog from insights.parsers.foreman_log import ForemanSSLAccessLog from datetime import datetime +import doctest PRODUCTION_LOG = """ @@ -194,7 +196,9 @@ def test_candlepin_log(): cp_log = CandlepinLog(context_wrap(CANDLEPIN_LOG)) assert "req=49becd26-5dfe-4d2f-8667-470519230d88" in cp_log assert len(cp_log.get("req=bd5a4284-d280-4fc5-a3d5-fc976b7aa5cc")) == 2 - assert len(list(cp_log.get_after(datetime(2016, 9, 9, 13, 45, 53)))) == 2 + # https://github.com/RedHatInsights/insights-core/pull/2641 + # assert len(list(cp_log.get_after(datetime(2016, 9, 9, 13, 45, 53)))) == 2 + assert cp_log.get("req=bd5a4284-d280-4fc5-a3d5-fc976b7aa5cc")[0]['timestamp'] == datetime(2016, 9, 9, 13, 45, 52, 650000) def test_satellite_log(): @@ -218,3 +222,11 @@ def test_foreman_ssl_access_ssl_log(): assert len(foreman_ssl_access_log.get("GET /rhsm/consumers")) == 5 assert len(foreman_ssl_access_log.get("385e688f-43ad-41b2-9fc7-593942ddec78")) == 3 assert len(list(foreman_ssl_access_log.get_after(datetime(2017, 3, 27, 13, 34, 0)))) == 7 + + +def test_doc(): + failed_count, tests = doctest.testmod(foreman_log, + globs={"cp_log": CandlepinLog(context_wrap(CANDLEPIN_LOG)), + "candlepin_log": CandlepinErrorLog(context_wrap(CANDLEPIN_ERROR_LOG)), + "foreman_ssl_acess_log": ForemanSSLAccessLog(context_wrap(FOREMAN_SSL_ACCESS_SSL_LOG))}) + assert failed_count == 0 diff --git a/insights/specs/default.py b/insights/specs/default.py index c54eede3f..e126c5563 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -130,6 +130,7 @@ def is_azure(broker): boot_loader_entries = glob_file("/boot/loader/entries/*.conf") branch_info = simple_file("/branch_info", kind=RawFileProvider) brctl_show = simple_command("/usr/sbin/brctl show") + candlepin_log = simple_file("/var/log/candlepin/candlepin.log") cgroups = simple_file("/proc/cgroups") ps_alxwww = simple_command("/bin/ps alxwww") ps_aux = simple_command("/bin/ps aux") From 7d59743d3df2a42a50c0a78f60610de0514cfad5 Mon Sep 17 00:00:00 2001 From: vishwanathjadhav Date: Thu, 23 Jul 2020 19:22:42 +0530 Subject: [PATCH 123/892] Upstart parser (#2662) * Local changes Signed-off-by: vishawanathjadhav * Added parser to parse the upstart managed daemon Signed-off-by: vishawanathjadhav * Updated as per review comments and fixed the typos Signed-off-by: vishawanathjadhav Fixed as per review comments. Fixed typos. * Updated doc strings Signed-off-by: vishawanathjadhav * Removed methods from attributes section and added actual attributes Signed-off-by: vishawanathjadhav Removed methods from the attributes section and added attributes to the attributes section * Updated class name Signed-off-by: vishawanathjadhav * Added cmd path Signed-off-by: vishawanathjadhav * Removed some added specs Signed-off-by: vishawanathjadhav Added the signer --- docs/shared_parsers_catalog/upstart.rst | 3 + insights/parsers/tests/test_upstart.py | 79 ++++++++++++++ insights/parsers/upstart.py | 135 ++++++++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 6 files changed, 220 insertions(+) create mode 100644 docs/shared_parsers_catalog/upstart.rst create mode 100644 insights/parsers/tests/test_upstart.py create mode 100644 insights/parsers/upstart.py diff --git a/docs/shared_parsers_catalog/upstart.rst b/docs/shared_parsers_catalog/upstart.rst new file mode 100644 index 000000000..07a7de5ae --- /dev/null +++ b/docs/shared_parsers_catalog/upstart.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.upstart + :members: + :show-inheritance: diff --git a/insights/parsers/tests/test_upstart.py b/insights/parsers/tests/test_upstart.py new file mode 100644 index 000000000..af2dff730 --- /dev/null +++ b/insights/parsers/tests/test_upstart.py @@ -0,0 +1,79 @@ +import doctest +import pytest +from insights.parsers import upstart, SkipException +from insights.parsers.upstart import UpstartInitctlList +from insights.tests import context_wrap + + +INITCTL_LIST = """ +rc stop/waiting +vmware-tools start/running +tty (/dev/tty3) start/running, process 9499 +tty (/dev/tty2) start/running, process 9495 +tty (/dev/tty1) start/running, process 9493 +tty (/dev/tty6) start/running, process 9507 +tty (/dev/tty5) start/running, process 9505 +tty (/dev/ttyS0) start/running, process 9509 +tty (/dev/tty4) stop/waiting +plymouth-shutdown stop/waiting +control-alt-delete stop/waiting +rcS-emergency stop/waiting +readahead-collector stop/waiting +kexec-disable stop/waiting +quit-plymouth stop/waiting +rcS stop/waiting +prefdm stop/waiting +init-system-dbus stop/waiting +ck-log-system-restart stop/waiting +readahead stop/waiting +ck-log-system-start stop/waiting +splash-manager stop/waiting +start-ttys stop/waiting +readahead-disable-services stop/waiting +ck-log-system-stop stop/waiting +rcS-sulogin stop/waiting +serial stop/waiting +""".strip() + +INITCTL_LIST_2 = """ +rc stop/waiting +vmware-tools start/running +/dev/tty3 +tty (/dev/tty2) start/running, process 9495 +tty (/dev/tty1) start/running, process 9493 +tty (/dev/tty6) start/running, process 9507 +tty (/dev/tty5) start/running, process 9505 +tty (/dev/ttyS0) start/running, process 9509 +tty (/dev/tty4) stop/waiting +plymouth-shutdown stop/waiting +control-alt-delete stop/waiting +""".strip() + + +def test_upstart(): + upstart_obj = UpstartInitctlList(context_wrap(INITCTL_LIST)) + assert upstart_obj.upstart_managed('vmware-tools') == 'vmware-tools start/running' + assert upstart_obj.daemon_status('vmware-tools') == 'start/running' + assert upstart_obj.dev_status('/dev/tty6') == 'start/running' + assert upstart_obj.dev_status('/dev/tts') is None + assert upstart_obj.upstart_managed('RCX') is None + assert upstart_obj.tty['/dev/tty6']['status'] == 'start/running' + assert upstart_obj.tty['/dev/tty6']['process'] == '9507' + assert upstart_obj.tty['/dev/tty4']['status'] == 'stop/waiting' + assert upstart_obj.upstart_managed('/dev/tty6') == 'tty (/dev/tty6) start/running, process 9507' + upstart_obj = UpstartInitctlList(context_wrap(INITCTL_LIST_2)) + assert upstart_obj.dev_status('/dev/tty3') is None + + +def test_execp_upstart(): + with pytest.raises(SkipException) as exc: + UpstartInitctlList(context_wrap('')) + assert 'No Contents' in str(exc.value) + + +def test_upstart_doc_examples(): + env = { + 'upstart_obj': UpstartInitctlList(context_wrap(INITCTL_LIST)) + } + failed, total = doctest.testmod(upstart, globs=env) + assert failed == 0 diff --git a/insights/parsers/upstart.py b/insights/parsers/upstart.py new file mode 100644 index 000000000..f6cbca494 --- /dev/null +++ b/insights/parsers/upstart.py @@ -0,0 +1,135 @@ +""" +UpstartInitctlList - Command ``initctl --system list`` +====================================================== + +Parser to parse the output of ``initctl --system list`` command. + +""" + +from insights import parser, CommandParser +from insights.parsers import SkipException +from insights.specs import Specs + + +@parser(Specs.initctl_lst) +class UpstartInitctlList(CommandParser): + """ + Class to parse the output of initctl command. It allows a + system administrator to communicate and interact with the + Upstart init(8) daemon and list the services managed by + Upstart init. + + Sample output:: + + rc stop/waiting + vmware-tools start/running + tty (/dev/tty3) start/running, process 9499 + tty (/dev/tty2) start/running, process 9495 + tty (/dev/tty1) start/running, process 9493 + tty (/dev/tty6) start/running, process 9507 + tty (/dev/tty5) start/running, process 9505 + tty (/dev/tty4) start/running, process 9502 + tty (/dev/ttyS0) start/running, process 9509 + plymouth-shutdown stop/waiting + control-alt-delete stop/waiting + rcS-emergency stop/waiting + readahead-collector stop/waiting + kexec-disable stop/waiting + quit-plymouth stop/waiting + rcS stop/waiting + prefdm stop/waiting + init-system-dbus stop/waiting + ck-log-system-restart stop/waiting + readahead stop/waiting + ck-log-system-start stop/waiting + splash-manager stop/waiting + start-ttys stop/waiting + readahead-disable-services stop/waiting + ck-log-system-stop stop/waiting + rcS-sulogin stop/waiting + serial stop/waiting + + Raises: + SkipException: When nothing need to parse. + + Attributes: + data(list): Daemon details are stored as list of str. + tty(dict): Device details are stored with status and process id. + daemon_proc(dict): Daemon status are stored with its `dameon` name and `status`. + + Examples: + >>> type(upstart_obj) + + >>> upstart_obj.upstart_managed('vmware-tools') + 'vmware-tools start/running' + >>> upstart_obj.daemon_status('vmware-tools') + 'start/running' + >>> upstart_obj.daemon_status('start-ttys') + 'stop/waiting' + >>> upstart_obj.dev_status('/dev/tty4') + 'stop/waiting' + >>> upstart_obj.upstart_managed('/dev/tty3') + 'tty (/dev/tty3) start/running, process 9499' + >>> upstart_obj.daemon_proc['quit-plymouth'] + 'stop/waiting' + >>> upstart_obj.tty['/dev/tty4']['status'] + 'stop/waiting' + >>> sorted(upstart_obj.data)[0] + 'ck-log-system-restart stop/waiting' + """ + + def parse_content(self, content): + self.data = [] + self.tty = {} + self.daemon_proc = {} + if (not content): + raise SkipException("No Contents") + for line in content: + self.data.append(line) + if 'dev/tty' in line: + line_s = line.split() + if len(line_s) > 1: + dev = line_s[1].replace('(', '').replace(')', '') + self.tty[dev] = {} + self.tty[dev]['status'] = line_s[2].replace(',', '') + if len(line_s) > 4 and 'process' in line: + self.tty[dev]['process'] = str(line_s[4]) + else: + line_s = line.split() + proc = line_s[0] + status = line_s[1] + self.daemon_proc[proc] = status + + def upstart_managed(self, daemon): + """ + Arguments: + daemon(str): The daemon name + + Returns: + (str): This method returns the status of daemon service if it is managed by upstar else it will return `None`. + """ + for line in self.data: + if daemon in line: + return line + return None + + def daemon_status(self, daemon): + """ + Arguments: + daemon(str): The daemon name + + Returns: + (str): This method will return the status of the process `start/running` or `stop/waiting` if it is managed by upstart else it will return `None`. + """ + return self.daemon_proc.get(daemon, None) + + def dev_status(self, dev): + """ + Arguments: + dev(str): The tty device name + + Returns: + (str): This method will return the status of the tty device `start/running` or `stop/waiting`, along with `process-ID` if it is managed by upstart else it will return `None`. + """ + if dev and dev in self.tty.keys(): + return self.tty[dev].get('status', None) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 8ccbbda41..fe872a823 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -227,6 +227,7 @@ class Specs(SpecSet): ifcfg_static_route = RegistryPoint(multi_output=True) ifconfig = RegistryPoint() imagemagick_policy = RegistryPoint(multi_output=True, filterable=True) + initctl_lst = RegistryPoint() init_ora = RegistryPoint() initscript = RegistryPoint(multi_output=True) init_process_cgroup = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index e126c5563..711773373 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -333,6 +333,7 @@ def httpd_cmd(broker): ifcfg = glob_file("/etc/sysconfig/network-scripts/ifcfg-*") ifcfg_static_route = glob_file("/etc/sysconfig/network-scripts/route-*") imagemagick_policy = glob_file(["/etc/ImageMagick/policy.xml", "/usr/lib*/ImageMagick-6.5.4/config/policy.xml"]) + initctl_lst = simple_command("/sbin/initctl --system list") init_process_cgroup = simple_file("/proc/1/cgroup") interrupts = simple_file("/proc/interrupts") ip_addr = simple_command("/sbin/ip addr") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index f977f3d3d..f31d4c534 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -71,6 +71,7 @@ class InsightsArchiveSpecs(Specs): httpd_M = glob_file("insights_commands/*httpd*_-M") httpd_on_nfs = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_httpd_on_nfs") httpd_V = glob_file("insights_commands/*httpd*_-V") + initctl_lst = simple_file("insights_commands/initctl_--system_list") ip6tables = simple_file("insights_commands/ip6tables-save") ip_addr = simple_file("insights_commands/ip_addr") ip_addresses = simple_file("insights_commands/hostname_-I") From cd1f9f88290e0b2f27dd677b4a6a6ad170d1b123 Mon Sep 17 00:00:00 2001 From: vishwanathjadhav Date: Mon, 27 Jul 2020 08:42:00 +0530 Subject: [PATCH 124/892] Added specs for insights archive (#2677) * Added specs for insights archive Signed-off-by: vishawanathjadhav * Arranged alphabatic order Signed-off-by: vishawanathjadhav --- insights/specs/default.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/specs/default.py b/insights/specs/default.py index 711773373..55c16d767 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -334,6 +334,7 @@ def httpd_cmd(broker): ifcfg_static_route = glob_file("/etc/sysconfig/network-scripts/route-*") imagemagick_policy = glob_file(["/etc/ImageMagick/policy.xml", "/usr/lib*/ImageMagick-6.5.4/config/policy.xml"]) initctl_lst = simple_command("/sbin/initctl --system list") + initscript = glob_file("/etc/rc.d/init.d/*") init_process_cgroup = simple_file("/proc/1/cgroup") interrupts = simple_file("/proc/interrupts") ip_addr = simple_command("/sbin/ip addr") From eca27378dc7e65c65c339f654be31abca399e98b Mon Sep 17 00:00:00 2001 From: Sheng Lao <39508521+shlao@users.noreply.github.com> Date: Wed, 29 Jul 2020 03:45:01 +0800 Subject: [PATCH 125/892] Add Parser for file /etc/dnf/dnf.conf (#2625) * Add Parser for file /etc/dnf/dnf.conf Signed-off-by: shlao * Add filter Signed-off-by: shlao * Updated insights/specs/default.py Signed-off-by: shlao --- docs/shared_parsers_catalog/dnf_conf.rst | 3 + insights/parsers/dnf_conf.py | 41 ++++++++++++++ insights/parsers/tests/test_dnf_conf.py | 72 ++++++++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 118 insertions(+) create mode 100644 docs/shared_parsers_catalog/dnf_conf.rst create mode 100644 insights/parsers/dnf_conf.py create mode 100644 insights/parsers/tests/test_dnf_conf.py diff --git a/docs/shared_parsers_catalog/dnf_conf.rst b/docs/shared_parsers_catalog/dnf_conf.rst new file mode 100644 index 000000000..e58f77f7d --- /dev/null +++ b/docs/shared_parsers_catalog/dnf_conf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.dnf_conf + :members: + :show-inheritance: diff --git a/insights/parsers/dnf_conf.py b/insights/parsers/dnf_conf.py new file mode 100644 index 000000000..dcc3333f4 --- /dev/null +++ b/insights/parsers/dnf_conf.py @@ -0,0 +1,41 @@ +""" +DnfConf - file ``/etc/dnf/dnf.conf`` +==================================== + +This module provides parsing for the ``/etc/dnf/dnf.conf`` file. +The ``DnfConf`` class parses the information in the file +``/etc/dnf/dnf.conf``. See the ``YumConf`` class for more +information on attributes and methods. + +Sample file content:: + + [main] + gpgcheck=1 + installonly_limit=3 + clean_requirements_on_remove=True + best=False + skip_if_unavailable=True + +Examples: + >>> 'main' in dconf + True + >>> 'rhel-7-server-rpms' in dconf + False + >>> dconf.has_option('main', 'gpgcheck') + True + >>> dconf.has_option('main', 'foo') + False +""" + +from insights import parser +from insights.specs import Specs +from insights.core.filters import add_filter +from insights.parsers.yum_conf import YumConf + +add_filter(Specs.dnf_conf, '[') + + +@parser(Specs.dnf_conf) +class DnfConf(YumConf): + """Parse contents of file ``/etc/dnf/dnf.conf``.""" + pass diff --git a/insights/parsers/tests/test_dnf_conf.py b/insights/parsers/tests/test_dnf_conf.py new file mode 100644 index 000000000..8c3d872df --- /dev/null +++ b/insights/parsers/tests/test_dnf_conf.py @@ -0,0 +1,72 @@ +import doctest + +from insights.tests import context_wrap +from insights.parsers import dnf_conf +from insights.parsers.dnf_conf import DnfConf + + +DNF_CONF = """ +[main] +gpgcheck=1 +installonly_limit=3 +clean_requirements_on_remove=True +best=False +skip_if_unavailable=True + +[rhel-7-server-rhn-tools-beta-debug-rpms] +metadata_expire = 86400 +sslclientcert = /etc/pki/entitlement/1234.pem +baseurl = https://cdn.redhat.com/content/beta/rhel/server/7/$basearch/rhn-tools/debug +ui_repoid_vars = basearch +sslverify = 1 +name = RHN Tools for Red Hat Enterprise Linux 7 Server Beta (Debug RPMs) +sslclientkey = /etc/pki/entitlement/1234-key.pem +gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta,file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release +enabled = 0 +sslcacert = /etc/rhsm/ca/redhat-uep.pem +gpgcheck = 1 + +[bad-repo] +gpgkey = +""" + + +CONF_PATH = '/etc/dnf/dnf.conf' + + +def test_doc_examples(): + env = { + 'dconf': DnfConf(context_wrap(DNF_CONF, path=CONF_PATH)), + } + failed, total = doctest.testmod(dnf_conf, globs=env) + assert failed == 0 + + +def test_get_dnf_conf(): + dnf_conf = DnfConf(context_wrap(DNF_CONF, path=CONF_PATH)) + + assert dnf_conf.items('main') == { + 'gpgcheck': '1', + 'installonly_limit': '3', + 'clean_requirements_on_remove': 'True', + 'best': 'False', + 'skip_if_unavailable': 'True' + } + + assert dnf_conf.items('rhel-7-server-rhn-tools-beta-debug-rpms') == { + u'ui_repoid_vars': u'basearch', + u'sslverify': u'1', + u'name': u'RHN Tools for Red Hat Enterprise Linux 7 Server Beta (Debug RPMs)', + u'sslclientkey': u'/etc/pki/entitlement/1234-key.pem', + u'enabled': u'0', + u'gpgkey': [u'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta', + u'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release'], + u'sslclientcert': u'/etc/pki/entitlement/1234.pem', + u'baseurl': [u'https://cdn.redhat.com/content/beta/rhel/server/7/$basearch/rhn-tools/debug'], + u'sslcacert': u'/etc/rhsm/ca/redhat-uep.pem', + u'gpgcheck': u'1', + u'metadata_expire': u'86400' + } + + assert dnf_conf.file_name == 'dnf.conf' + assert dnf_conf.file_path == '/etc/dnf/dnf.conf' diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index fe872a823..8c31a89c7 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -122,6 +122,7 @@ class Specs(SpecSet): dmesg_log = RegistryPoint(filterable=True) dmidecode = RegistryPoint() dmsetup_info = RegistryPoint() + dnf_conf = RegistryPoint(filterable=True) dnf_modules = RegistryPoint() dnf_module_list = RegistryPoint() dnf_module_info = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 55c16d767..56564f650 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -226,6 +226,7 @@ def is_ceph_monitor(broker): dmesg = simple_command("/bin/dmesg") dmesg_log = simple_file("/var/log/dmesg") dmidecode = simple_command("/usr/sbin/dmidecode") + dnf_conf = simple_file("/etc/dnf/dnf.conf") docker_info = simple_command("/usr/bin/docker info") docker_list_containers = simple_command("/usr/bin/docker ps --all --no-trunc") docker_list_images = simple_command("/usr/bin/docker images --all --no-trunc --digests") From 71d171fd2db2426574b92e0111efa51e3917a979 Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Wed, 29 Jul 2020 10:28:22 -0500 Subject: [PATCH 126/892] Add filters to neutron conf parser (#2684) * Add additional filters that may not be added by rules to capture some additional data Signed-off-by: Bob Fahr --- insights/parsers/neutron_conf.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/insights/parsers/neutron_conf.py b/insights/parsers/neutron_conf.py index 3071ac38d..55d403243 100644 --- a/insights/parsers/neutron_conf.py +++ b/insights/parsers/neutron_conf.py @@ -30,6 +30,18 @@ add_filter(Specs.neutron_conf, ["["]) +ADDITIONAL_FILTERS = [ + "service_plugins", + "allow_automatic_dhcp_failover", + "rpc_workers", + "api_workers", + "ipam_driver", + "agent_down_time", + "agent_report_interval", + "router_distributed" +] +add_filter(Specs.neutron_conf, ADDITIONAL_FILTERS) + @parser(Specs.neutron_conf) class NeutronConf(IniConfigFile): From 11a737c3bc4b5bd77369f943e34ac6f37833b712 Mon Sep 17 00:00:00 2001 From: Arie Bregman Date: Wed, 29 Jul 2020 23:34:15 +0300 Subject: [PATCH 127/892] Add parser for neutron sriov agent file (#2655) Add parser to provide the content of neutron sriov agent configuration file. Tests included as well. Signed-off-by: abregman --- .../neutron_sriov_agent.rst | 3 ++ insights/parsers/neutron_sriov_agent.py | 45 +++++++++++++++++++ .../parsers/tests/test_neutron_sriov_agent.py | 34 ++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 3 ++ 5 files changed, 86 insertions(+) create mode 100644 docs/shared_parsers_catalog/neutron_sriov_agent.rst create mode 100644 insights/parsers/neutron_sriov_agent.py create mode 100644 insights/parsers/tests/test_neutron_sriov_agent.py diff --git a/docs/shared_parsers_catalog/neutron_sriov_agent.rst b/docs/shared_parsers_catalog/neutron_sriov_agent.rst new file mode 100644 index 000000000..bb2de76e8 --- /dev/null +++ b/docs/shared_parsers_catalog/neutron_sriov_agent.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.neutron_sriov_agent + :members: + :show-inheritance: diff --git a/insights/parsers/neutron_sriov_agent.py b/insights/parsers/neutron_sriov_agent.py new file mode 100644 index 000000000..635e1c7e7 --- /dev/null +++ b/insights/parsers/neutron_sriov_agent.py @@ -0,0 +1,45 @@ +""" +NeutronSriovAgent - file ``/etc/neutron/plugins/ml2/sriov_agent.ini`` +===================================================================== + +This class provides parsing for the files: + ``/etc/neutron/plugins/ml2/sriov_agent.ini`` + ``/var/lib/config-data/puppet-generated/neutron/etc/neutron/plugins/ml2/sriov_agent.ini`` + +Sample input data is in the format:: + + [DEFAULT] + debug = false + + [sriov_nic] + physical_device_mappings=datacentre:enp2s0f6 + + [agent] + polling_interval=2 + + [securitygroup] + firewall_driver=noop + report_interval = 60 + + [keystone_authtoken] + auth_port = 35357 + +See the ``IniConfigFile`` class for examples. +""" +from .. import IniConfigFile, parser, add_filter +from insights.specs import Specs + +FILTERS = [ + "debug", + "[", + "physical_device_mappings", + "exclude_devices", + "extensions" +] +add_filter(Specs.neutron_sriov_agent, FILTERS) + + +@parser(Specs.neutron_sriov_agent) +class NeutronSriovAgent(IniConfigFile): + """Class to parse file ``sriov_agent.ini``.""" + pass diff --git a/insights/parsers/tests/test_neutron_sriov_agent.py b/insights/parsers/tests/test_neutron_sriov_agent.py new file mode 100644 index 000000000..ea1ed94ab --- /dev/null +++ b/insights/parsers/tests/test_neutron_sriov_agent.py @@ -0,0 +1,34 @@ +from insights.parsers.neutron_sriov_agent import NeutronSriovAgent +from insights.tests import context_wrap + +NEUTRON_SRIOV_AGENT_CONF = """ +[DEFAULT] +debug = false +verbose = false + +[sriov_nic] +physical_device_mappings=datacentre:enp2s0f6 + +[agent] +polling_interval=2 + +[securitygroup] +firewall_driver=noop +report_interval = 60 + +[keystone_authtoken] +""" + + +def test_neutron_sriov_agent(): + n_sriov_agent = NeutronSriovAgent(context_wrap(NEUTRON_SRIOV_AGENT_CONF)) + assert n_sriov_agent is not None + assert list(n_sriov_agent.sections()) == [ + 'sriov_nic', 'agent', 'securitygroup', 'keystone_authtoken'] + assert n_sriov_agent.defaults() == { + 'debug': 'false', + 'verbose': 'false'} + assert n_sriov_agent.get('sriov_nic', 'physical_device_mappings') == 'datacentre:enp2s0f6' + assert n_sriov_agent.has_option('securitygroup', 'firewall_driver') + assert not n_sriov_agent.has_option('yabba', 'dabba_do') + assert n_sriov_agent.get('DEFAULT', 'debug') == 'false' diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 8c31a89c7..bcd461740 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -367,6 +367,7 @@ class Specs(SpecSet): netstat_s = RegistryPoint() networkmanager_dispatcher_d = RegistryPoint(multi_output=True) neutron_conf = RegistryPoint(filterable=True) + neutron_sriov_agent = RegistryPoint(filterable=True) neutron_dhcp_agent_ini = RegistryPoint(filterable=True) neutron_l3_agent_ini = RegistryPoint(filterable=True) neutron_l3_agent_log = RegistryPoint(filterable=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index 56564f650..406eceb63 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -449,6 +449,9 @@ def httpd_cmd(broker): netstat_s = simple_command("/bin/netstat -s") networkmanager_dispatcher_d = glob_file("/etc/NetworkManager/dispatcher.d/*-dhclient") neutron_conf = first_file(["/var/lib/config-data/puppet-generated/neutron/etc/neutron/neutron.conf", "/etc/neutron/neutron.conf"]) + neutron_sriov_agent = first_file([ + "/var/lib/config-data/puppet-generated/neutron/etc/neutron/plugins/ml2/sriov_agent.ini", + "/etc/neutron/plugins/ml2/sriov_agent.ini"]) neutron_dhcp_agent_ini = first_file(["/var/lib/config-data/puppet-generated/neutron/etc/neutron/dhcp_agent.ini", "/etc/neutron/dhcp_agent.ini"]) neutron_l3_agent_ini = first_file(["/var/lib/config-data/puppet-generated/neutron/etc/neutron/l3_agent.ini", "/etc/neutron/l3_agent.ini"]) neutron_l3_agent_log = simple_file("/var/log/neutron/l3-agent.log") From cb198b3cf5da4b60eb951fe56bd79b46e236efe8 Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Thu, 30 Jul 2020 11:30:31 +0800 Subject: [PATCH 128/892] Enhance parser yum_log to handle erased package with version (#2688) * Enhance yumlog Signed-off-by: jiazhang * Fix condition Signed-off-by: jiazhang --- insights/parsers/tests/test_yumlog.py | 14 ++++++++++++++ insights/parsers/yumlog.py | 2 +- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/insights/parsers/tests/test_yumlog.py b/insights/parsers/tests/test_yumlog.py index 751cac276..0ed3563a7 100644 --- a/insights/parsers/tests/test_yumlog.py +++ b/insights/parsers/tests/test_yumlog.py @@ -15,6 +15,17 @@ Jan 24 00:24:11 Updated: glibc-devel-2.12-1.149.el6_6.4.i686 """.strip() +OKAY2 = """ +May 19 23:29:12 Installed: cyrus-sasl-md5-2.1.26-23.el7.x86_64 +May 19 23:29:14 Installed: 389-ds-base-1.3.8.4-23.el7_6.x86_64 +Jul 21 09:09:39 Updated: httpd-tools-2.4.6-93.el7.x86_64 +Jul 21 09:09:39 Installed: mailcap-2.1.41-2.el7.noarch +Jul 26 23:24:36 Erased: systemd-219-46.el7.x86_64 +Jul 28 23:24:36 Erased: systemd-219-67.el7.x86_64 +Jul 21 09:09:40 Installed: httpd-2.4.6-93.el7.x86_64 +Jul 20 09:09:40 Installed: httpd-2.4.6-97.el7.x86_64 +""".strip() + ERROR = """ May 23 18:06:24 Installed: wget-1.14-10.el7_0.1.x86_64 Jan 24 00:24:00 Updated: glibc-2.12-1.149.el6_6.4.x86_64 @@ -79,3 +90,6 @@ def test_erased(): yl = YumLog(context_wrap(OKAY)) assert any(e.pkg.name == "redhat-access-insights-batch" for e in yl) is True assert any(e.pkg.name == "katello-agent" for e in yl) is True + + yl2 = YumLog(context_wrap(OKAY2)) + assert any(e.pkg.name == "systemd" for e in yl2) is True diff --git a/insights/parsers/yumlog.py b/insights/parsers/yumlog.py index faf88b3c5..4808fa5c6 100644 --- a/insights/parsers/yumlog.py +++ b/insights/parsers/yumlog.py @@ -117,7 +117,7 @@ def parse_content(self, content): timestamp = ' '.join([month, day, time]) state = state.rstrip(':') pkg = pkg.split(':')[-1].strip() - if state == self.ERASED: + if state == self.ERASED and "." not in pkg: pkg = InstalledRpm({'name': pkg}) else: pkg = InstalledRpm.from_package(pkg) From 9f5b4bfb2a61d7c2418bad0da12f339dec236e71 Mon Sep 17 00:00:00 2001 From: Sheng Lao <39508521+shlao@users.noreply.github.com> Date: Thu, 30 Jul 2020 21:37:40 +0800 Subject: [PATCH 129/892] Add parser NamedConf (#2680) * Add parser NamedConf Signed-off-by: shlao * Modify the docsting Signed-off-by: shlao * Add more tests Signed-off-by: shlao --- docs/shared_parsers_catalog/named_conf.rst | 3 + insights/parsers/named_conf.py | 46 +++++ insights/parsers/tests/test_named_conf.py | 220 +++++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 271 insertions(+) create mode 100644 docs/shared_parsers_catalog/named_conf.rst create mode 100644 insights/parsers/named_conf.py create mode 100644 insights/parsers/tests/test_named_conf.py diff --git a/docs/shared_parsers_catalog/named_conf.rst b/docs/shared_parsers_catalog/named_conf.rst new file mode 100644 index 000000000..9db147736 --- /dev/null +++ b/docs/shared_parsers_catalog/named_conf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.named_conf + :members: + :show-inheritance: diff --git a/insights/parsers/named_conf.py b/insights/parsers/named_conf.py new file mode 100644 index 000000000..d7a30a2ce --- /dev/null +++ b/insights/parsers/named_conf.py @@ -0,0 +1,46 @@ +""" +NamedConf parser - file ``/etc/named.conf`` +=========================================== + +NamedConf parser the file named configuration file. +Named is a name server used by BIND. +""" + +from insights.specs import Specs +from insights.core.plugins import parser +from insights.parsers import SkipException +from insights.parsers.named_checkconf import NamedCheckconf + + +@parser(Specs.named_conf) +class NamedConf(NamedCheckconf): + """ + Class for parsing the file ``/etc/named.conf```, We use class ``NamedCheckConf`` to parse most + of the named.conf configurations and class ``NamedConf`` to parse the `include` directives. + + .. note:: + Please refer to the super-class :py:class:`insights.parsers.named_checkconf:NamedCheckConf` + for more usage information. + + Attributes: + includes (list): List of files in 'include' section. + + Raises: + SkipException: When content is empty or cannot be parsed. + + Examples: + >>> named_conf.includes + ['/etc/crypto-policies/back-ends/bind.config'] + """ + + def parse_content(self, content): + includes = [] + super(NamedConf, self).parse_content(content) + + try: + for line in [l for l in content if l.strip().startswith('include ') and ';' in l]: + includes.append(line.split(';')[0].replace('"', '').split()[1]) + except IndexError: + raise SkipException("Syntax error of include directive") + + self.includes = includes diff --git a/insights/parsers/tests/test_named_conf.py b/insights/parsers/tests/test_named_conf.py new file mode 100644 index 000000000..543d293be --- /dev/null +++ b/insights/parsers/tests/test_named_conf.py @@ -0,0 +1,220 @@ +import doctest +import pytest + +from insights.parsers import named_conf, SkipException +from insights.parsers.named_conf import NamedConf +from insights.tests import context_wrap + + +CONFIG_NORMAL_SECTIONS = """ +logging { + channel "default_debug" { + file "data/named.run"; + severity dynamic; + }; +}; +options { + directory "/var/named"; + dump-file "/var/named/data/cache_dump.db"; + listen-on port 53 { + 127.0.0.1/32; + }; + listen-on-v6 port 53 { + ::1/128; + }; + managed-keys-directory "/var/named/dynamic"; + memstatistics-file "/var/named/data/named_mem_stats.txt"; + pid-file "/run/named/named.pid"; + recursing-file "/var/named/data/named.recursing"; + secroots-file "/var/named/data/named.secroots"; + session-keyfile "/run/named/session.key"; + statistics-file "/var/named/data/named_stats.txt"; + disable-algorithms "." { + "RSAMD5"; + "DSA"; + }; + disable-ds-digests "." { + "GOST"; + }; + dnssec-enable yes; + dnssec-validation yes; + recursion yes; + allow-query { + "localhost"; + }; + + include "/etc/crypto-policies/back-ends/bind.config"; +}; +""" + +CONFIG_INVALID_SECTIONS = """ +logging { + channel "default_debug" { + file "data/named.run"; + severity dynamic; + }; +}; +options { + directory "/var/named"; + dump-file "/var/named/data/cache_dump.db"; + listen-on port 53 { + 127.0.0.1/32; + }; + listen-on-v6 port 53 { + ::1/128; + }; + managed-keys-directory "/var/named/dynamic"; + memstatistics-file "/var/named/data/named_mem_stats.txt"; + pid-file "/run/named/named.pid"; + recursing-file "/var/named/data/named.recursing"; + secroots-file "/var/named/data/named.secroots"; + session-keyfile "/run/named/session.key"; + statistics-file "/var/named/data/named_stats.txt"; + disable-algorithms "." { + "RSAMD5"; + "DSA"; + }; + disable-ds-digests "." { + "GOST"; + }; + dnssec-enable yes; + dnssec-validation yes; + recursion yes; + allow-query { + "localhost"; + }; + + include ""; +}; +""" + +CONFIG_COMPLEX_SECTIONS = """ +include "/tmp/test-unix"; # Unix style + +logging { + channel "default_debug" { + file "data/named.run"; + severity dynamic; + }; +}; +options { + directory "/var/named"; + dump-file "/var/named/data/cache_dump.db"; + listen-on port 53 { + 127.0.0.1/32; + }; + listen-on-v6 port 53 { + ::1/128; + }; + managed-keys-directory "/var/named/dynamic"; + memstatistics-file "/var/named/data/named_mem_stats.txt"; + pid-file "/run/named/named.pid"; + recursing-file "/var/named/data/named.recursing"; + secroots-file "/var/named/data/named.secroots"; + session-keyfile "/run/named/session.key"; + statistics-file "/var/named/data/named_stats.txt"; + disable-algorithms "." { + "RSAMD5"; + "DSA"; + }; + disable-ds-digests "." { + "GOST"; + }; + dnssec-enable yes; + dnssec-validation yes; + recursion yes; + allow-query { + "localhost"; + }; + + include "/etc/crypto-policies/back-ends/bind.config"; +}; +managed-keys { + "." initial-key 257 3 8 "AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF + FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX + bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD + X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz + W5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGOYl7OyQdXfZ57relS + Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq + QxA+Uk1ihz0="; + "." initial-key 257 3 8 "AwEAAaz/tAm8yTn4Mfeh5eyI96WSVexTBAvkMgJzkKTOiW1vkIbzxeF3 + +/4RgWOq7HrxRixHlFlExOLAJr5emLvN7SWXgnLh4+B5xQlNVz8Og8kv + ArMtNROxVQuCaSnIDdD5LKyWbRd2n9WGe2R8PzgCmr3EgVLrjyBxWezF + 0jLHwVN8efS3rCj/EWgvIWgb9tarpVUDK/b58Da+sqqls3eNbuv7pr+e + oZG+SrDK6nWeL3c6H5Apxz7LjVc1uTIdsIXxuOLYA4/ilBmSVIzuDWfd + RUfhHdY6+cn8HFRm+2hM8AnXGXws9555KrUB5qihylGa8subX2Nn6UwN + R1AkUTV74bU="; +}; +zone "." IN { + type hint; + file "named.ca"; +}; +zone "localhost.localdomain" IN { + type master; + file "named.localhost"; + allow-update { + "none"; + }; +}; +zone "localhost" IN { + type master; + file "named.localhost"; + allow-update { + "none"; + }; +}; +zone "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa" IN { + type master; + file "named.loopback"; + allow-update { + "none"; + }; +}; +zone "1.0.0.127.in-addr.arpa" IN { + type master; + file "named.loopback"; + allow-update { + "none"; + }; +}; +zone "0.in-addr.arpa" IN { + type master; + file "named.empty"; + allow-update { + "none"; + }; + +include "/etc/crypto-policies/back-ends/bind.config-c"; /* c style */ +}; + + include "/etc/crypto-policies/back-ends/bind.config-c-plus"; // C++ style +include "/etc/crypto-policies/back-ends/bind.config"; // the sname line +""" + + +def test_config_no_data(): + with pytest.raises(SkipException): + NamedConf(context_wrap("")) + + +def test_config_invalid_data(): + with pytest.raises(SkipException): + NamedConf(context_wrap(CONFIG_INVALID_SECTIONS)) + + +def test_config_include_sections(): + include_sections = NamedConf(context_wrap(CONFIG_COMPLEX_SECTIONS)) + assert len(include_sections.includes) == 5 + assert include_sections.includes[0] == '/tmp/test-unix' + assert include_sections.includes[1] == '/etc/crypto-policies/back-ends/bind.config' + assert include_sections.includes[2] == '/etc/crypto-policies/back-ends/bind.config-c' + assert include_sections.includes[3] == '/etc/crypto-policies/back-ends/bind.config-c-plus' + assert include_sections.includes[4] == '/etc/crypto-policies/back-ends/bind.config' + + +def test_doc_examples(): + env = { + "named_conf": NamedConf(context_wrap(CONFIG_NORMAL_SECTIONS)), + } + failed, total = doctest.testmod(named_conf, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index bcd461740..e210d3214 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -358,6 +358,7 @@ class Specs(SpecSet): mysql_log = RegistryPoint(multi_output=True, filterable=True) mysqld_limits = RegistryPoint() named_checkconf_p = RegistryPoint(filterable=True) + named_conf = RegistryPoint(filterable=True) namespace = RegistryPoint() ndctl_list_Ni = RegistryPoint() netconsole = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 406eceb63..0551e5e32 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -440,6 +440,7 @@ def httpd_cmd(broker): "/var/opt/rh/rh-mysql*/log/mysql/mysqld.log" ]) named_checkconf_p = simple_command("/usr/sbin/named-checkconf -p") + named_conf = simple_file("/etc/named.conf") namespace = simple_command("/bin/ls /var/run/netns") ndctl_list_Ni = simple_command("/usr/bin/ndctl list -Ni") netconsole = simple_file("/etc/sysconfig/netconsole") From 6c19a4bfd5bb00050d02af7ae09476163e6f44d2 Mon Sep 17 00:00:00 2001 From: Sheng Lao <39508521+shlao@users.noreply.github.com> Date: Thu, 30 Jul 2020 21:51:50 +0800 Subject: [PATCH 130/892] According to the spec: lvs_noheadings, The docstring for class Lvs needs to be updated (#2681) Signed-off-by: shlao --- insights/parsers/lvm.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/insights/parsers/lvm.py b/insights/parsers/lvm.py index c3e01ca3a..dcd518b9b 100644 --- a/insights/parsers/lvm.py +++ b/insights/parsers/lvm.py @@ -19,8 +19,8 @@ VgsHeadings - command ``vgs -v -o +vg_mda_count,vg_mda_free,vg_mda_size,vg_mda_used_count,vg_tags --config="global{locking_type=0}"`` ------------------------------------------------------------------------------------------------------------------------------------- -Lvs - command ``/sbin/lvs --nameprefixes --noheadings --separator='|' -a -o lv_all`` ------------------------------------------------------------------------------------- +Lvs - command ``/sbin/lvs --nameprefixes --noheadings --separator='|' -a -o lv_name,lv_size,lv_attr,mirror_log,vg_name,devices,region_size,data_percent,metadata_percent,segtype,seg_monitor --config="global{locking_type=0}"`` +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- LvsHeadings - command ``/sbin/lvs -a -o +lv_tags,devices --config="global{locking_type=0}"`` -------------------------------------------------------------------------------------------- @@ -443,7 +443,7 @@ def parse_content(self, content): @parser(Specs.lvs_noheadings) class Lvs(Lvm): """ - Parse the output of the `/sbin/lvs --nameprefixes --noheadings --separator='|' -a -o lv_all` command. + Parse the output of the `/sbin/lvs --nameprefixes --noheadings --separator='|' -a -o lv_name,lv_size,lv_attr,mirror_log,vg_name,devices,region_size,data_percent,metadata_percent,segtype,seg_monitor --config="global{locking_type=0}"` command. Parse each line in the output of lvs based on the lvs datasource in `insights/specs/`: From 09004921816e289fd468ca0eb2a61b92ad753d73 Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Thu, 30 Jul 2020 09:11:51 -0500 Subject: [PATCH 131/892] Restore ethtool -c specs for core collection (#2686) Signed-off-by: Bob Fahr --- insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 2 files changed, 2 insertions(+) diff --git a/insights/specs/default.py b/insights/specs/default.py index 0551e5e32..f4982e2c3 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -243,6 +243,7 @@ def is_ceph_monitor(broker): ethtool = foreach_execute(ethernet_interfaces, "/sbin/ethtool %s") ethtool_S = foreach_execute(ethernet_interfaces, "/sbin/ethtool -S %s") ethtool_T = foreach_execute(ethernet_interfaces, "/sbin/ethtool -T %s") + ethtool_c = foreach_execute(ethernet_interfaces, "/sbin/ethtool -c %s") ethtool_g = foreach_execute(ethernet_interfaces, "/sbin/ethtool -g %s") ethtool_i = foreach_execute(ethernet_interfaces, "/sbin/ethtool -i %s") ethtool_k = foreach_execute(ethernet_interfaces, "/sbin/ethtool -k %s") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index f31d4c534..5037f605a 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -47,6 +47,7 @@ class InsightsArchiveSpecs(Specs): ethtool = glob_file("insights_commands/ethtool_*", ignore="ethtool_-.*") ethtool_S = glob_file("insights_commands/ethtool_-S_*") ethtool_T = glob_file("insights_commands/ethtool_-T_*") + ethtool_c = glob_file("insights_commands/ethtool_-c_*") ethtool_g = glob_file("insights_commands/ethtool_-g_*") ethtool_i = glob_file("insights_commands/ethtool_-i_*") ethtool_k = glob_file("insights_commands/ethtool_-k_*") From edac809978c7c6bf28ff56495571ecda86ec78cf Mon Sep 17 00:00:00 2001 From: Sheng Lao <39508521+shlao@users.noreply.github.com> Date: Fri, 31 Jul 2020 01:24:07 +0800 Subject: [PATCH 132/892] Add EtcSshConfigD Parser (#2682) Signed-off-by: shlao --- insights/parsers/ssh_client_config.py | 93 +++++++++++++++++++ .../parsers/tests/test_ssh_client_config.py | 26 ++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 4 files changed, 121 insertions(+) diff --git a/insights/parsers/ssh_client_config.py b/insights/parsers/ssh_client_config.py index 2e418bf93..ec360feac 100644 --- a/insights/parsers/ssh_client_config.py +++ b/insights/parsers/ssh_client_config.py @@ -9,6 +9,9 @@ EtcSshConfig - file ``/etc/ssh/ssh_config`` ------------------------------------------- +EtcSshConfigD - files ``/etc/ssh/ssh_config.d/*`` +------------------------------------------------- + ForemanSshConfig - file ``/usr/share/foreman/.ssh/ssh_config`` -------------------------------------------------------------- @@ -26,6 +29,51 @@ class SshClientConfig(Parser): """ Base class for ssh client configuration file. + Sample output:: + + # This is the ssh client system-wide configuration file. See + # ssh_config(5) for more information. This file provides defaults for + # users, and the values can be changed in per-user configuration files + # or on the command line. + + ProxyCommand ssh -q -W %h:%p gateway.example.com + + Host * + GSSAPIAuthentication yes + # If this option is set to yes then remote X11 clients will have full access + # to the original X11 display. As virtually no X11 client supports the untrusted + # mode correctly we set this to yes. + ForwardX11Trusted yes + # Send locale-related environment variables + SendEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES + SendEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT + SendEnv LC_IDENTIFICATION LC_ALL LANGUAGE + SendEnv XMODIFIERS + + Host proxytest + HostName 192.168.122.2 + + Attributes: + + global_lines (list): The list of site-wide configuration, as + namedtuple('KeyValue', ['keyword', 'value', 'line']). + host_lines (dict): The dict of all host-specific definitions, as + {'Host_name': [namedtuple('KeyValue', ['keyword', 'value', 'line'])]} + + Examples: + >>> len(sshconfig.global_lines) + 1 + >>> sshconfig.global_lines[0].keyword + 'ProxyCommand' + >>> sshconfig.global_lines[0].value + 'ssh -q -W %h:%p gateway.example.com' + >>> 'Host_*' in sshconfig.host_lines + True + >>> sshconfig.host_lines['Host_proxytest'][0].keyword + 'HostName' + >>> sshconfig.host_lines['Host_proxytest'][0].value + '192.168.122.2' + Raises: SkipException: When input content is empty. Not found any parse results. """ @@ -124,6 +172,51 @@ class EtcSshConfig(SshClientConfig): pass +@parser(Specs.ssh_config_d) +class EtcSshConfigD(SshClientConfig): + """ + This Parser reads the files ``/etc/ssh/ssh_config.d/*`` + + Sample output:: + + # The options here are in the "Match final block" to be applied as the last + # options and could be potentially overwritten by the user configuration + Match final all + # Follow system-wide Crypto Policy, if defined: + Include /etc/crypto-policies/back-ends/openssh.config + + GSSAPIAuthentication yes + + # If this option is set to yes then remote X11 clients will have full access + # to the original X11 display. As virtually no X11 client supports the untrusted + # mode correctly we set this to yes. + ForwardX11Trusted yes + + # Send locale-related environment variables + SendEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES + SendEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT + SendEnv LC_IDENTIFICATION LC_ALL LANGUAGE + SendEnv XMODIFIERS + + # Uncomment this if you want to use .local domain + # Host *.local + + Attributes: + + global_lines (list): The list of site-wide configuration, as + namedtuple('KeyValue', ['keyword', 'value', 'line']). + host_lines (dict): The dict of all host-specific definitions, as + {'Host_name': [namedtuple('KeyValue', ['keyword', 'value', 'line'])]} + + Examples: + >>> etcsshconfigd.global_lines[1].keyword + 'Include' + >>> etcsshconfigd.global_lines[1].value + '/etc/crypto-policies/back-ends/openssh.config' + """ + pass + + @parser(Specs.ssh_foreman_config) class ForemanSshConfig(SshClientConfig): """ diff --git a/insights/parsers/tests/test_ssh_client_config.py b/insights/parsers/tests/test_ssh_client_config.py index c021242e8..2e1d799d6 100644 --- a/insights/parsers/tests/test_ssh_client_config.py +++ b/insights/parsers/tests/test_ssh_client_config.py @@ -28,6 +28,30 @@ HostName 192.168.122.2 """ +SSH_CONFIG_D_INPUT = """ +# The options here are in the "Match final block" to be applied as the last +# options and could be potentially overwritten by the user configuration +Match final all + # Follow system-wide Crypto Policy, if defined: + Include /etc/crypto-policies/back-ends/openssh.config + + GSSAPIAuthentication yes + +# If this option is set to yes then remote X11 clients will have full access +# to the original X11 display. As virtually no X11 client supports the untrusted +# mode correctly we set this to yes. + ForwardX11Trusted yes + +# Send locale-related environment variables + SendEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES + SendEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT + SendEnv LC_IDENTIFICATION LC_ALL LANGUAGE + SendEnv XMODIFIERS + +# Uncomment this if you want to use .local domain +# Host *.local +""" + SSH_CONFIG_INPUT_EMPTY = """ # ProxyCommand ssh -q -W %h:%p gateway.example.com # RekeyLimit 1G 1h @@ -62,7 +86,9 @@ def test_ssh_config_AB(): def test_ssh_client_config_docs(): env = { + 'sshconfig': scc.SshClientConfig(context_wrap(SSH_CONFIG_INPUT)), 'etcsshconfig': scc.EtcSshConfig(context_wrap(SSH_CONFIG_INPUT)), + 'etcsshconfigd': scc.EtcSshConfigD(context_wrap(SSH_CONFIG_D_INPUT)), 'foremansshconfig': scc.ForemanSshConfig(context_wrap(SSH_CONFIG_INPUT)), 'foreman_proxy_ssh_config': scc.ForemanProxySshConfig(context_wrap(SSH_CONFIG_INPUT)) } diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index e210d3214..339185dde 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -560,6 +560,7 @@ class Specs(SpecSet): softnet_stat = RegistryPoint() software_collections_list = RegistryPoint() spfile_ora = RegistryPoint(multi_output=True) + ssh_config_d = RegistryPoint(multi_output=True, filterable=True) ssh_config = RegistryPoint(filterable=True) ssh_foreman_config = RegistryPoint(filterable=True) ssh_foreman_proxy_config = RegistryPoint(filterable=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index f4982e2c3..3352e07e1 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -595,6 +595,7 @@ def sap_sid_name(broker): software_collections_list = simple_command('/usr/bin/scl --list') ss = simple_command("/usr/sbin/ss -tupna") ssh_config = simple_file("/etc/ssh/ssh_config") + ssh_config_d = glob_file(r"/etc/ssh/ssh_config.d/*") ssh_foreman_proxy_config = simple_file("/usr/share/foreman-proxy/.ssh/ssh_config") sshd_config = simple_file("/etc/ssh/sshd_config") sshd_config_perms = simple_command("/bin/ls -l /etc/ssh/sshd_config") From 249f7e5dffed3e5511c9b4656ff397e6cf2acd8d Mon Sep 17 00:00:00 2001 From: Sachin Date: Thu, 30 Jul 2020 23:08:26 +0530 Subject: [PATCH 133/892] Add parser to parse output of 'lsvmbus -vv' (#2679) Signed-off-by: Sachin Patil --- docs/shared_parsers_catalog/lsvmbus.rst | 3 + insights/parsers/lsvmbus.py | 86 +++++++++++++++++++++++++ insights/parsers/tests/test_lsvmbus.py | 45 +++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 6 files changed, 137 insertions(+) create mode 100644 docs/shared_parsers_catalog/lsvmbus.rst create mode 100644 insights/parsers/lsvmbus.py create mode 100644 insights/parsers/tests/test_lsvmbus.py diff --git a/docs/shared_parsers_catalog/lsvmbus.rst b/docs/shared_parsers_catalog/lsvmbus.rst new file mode 100644 index 000000000..eab912c77 --- /dev/null +++ b/docs/shared_parsers_catalog/lsvmbus.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.lsvmbus + :members: + :show-inheritance: diff --git a/insights/parsers/lsvmbus.py b/insights/parsers/lsvmbus.py new file mode 100644 index 000000000..601e0cb62 --- /dev/null +++ b/insights/parsers/lsvmbus.py @@ -0,0 +1,86 @@ +""" +LsvmBus - Command ``lsvmbus -vv`` +================================= + +This module parses the output of the command ``lsvmbus -vv``. +""" +import re + +from insights import parser, CommandParser +from insights.parsers import SkipException +from insights.specs import Specs + + +@parser(Specs.lsvmbus) +class LsvmBus(CommandParser): + """Parse the output of ``lsvmbus -vv`` as list. + + Typical output:: + + VMBUS ID 18: Class_ID = {44c4f61d-4444-4400-9d52-802e27ede19f} - PCI Express pass-through + Device_ID = {47505500-0001-0000-3130-444531303244} + Sysfs path: /sys/bus/vmbus/devices/47505500-0001-0000-3130-444531303244 + Rel_ID=18, target_cpu=0 + VMBUS ID 26: Class_ID = {44c4f61d-4444-4400-9d52-802e27ede19f} - PCI Express pass-through + Device_ID = {47505500-0002-0000-3130-444531303244} + Sysfs path: /sys/bus/vmbus/devices/47505500-0002-0000-3130-444531303244 + Rel_ID=26, target_cpu=0 + VMBUS ID 73: Class_ID = {44c4f61d-4444-4400-9d52-802e27ede19f} - PCI Express pass-through + Device_ID = {47505500-0003-0001-3130-444531303244} + Sysfs path: /sys/bus/vmbus/devices/47505500-0003-0001-3130-444531303244 + Rel_ID=73, target_cpu=0 + VMBUS ID 74: Class_ID = {44c4f61d-4444-4400-9d52-802e27ede19f} - PCI Express pass-through + Device_ID = {47505500-0004-0001-3130-444531303244} + Sysfs path: /sys/bus/vmbus/devices/47505500-0004-0001-3130-444531303244 + Rel_ID=74, target_cpu=0 + + Examples: + + >>> assert len(lsvmbus.devices) == 4 + >>> assert lsvmbus.devices[0].get('vmbus_id', None) == '18' + >>> assert lsvmbus.devices[0].get('device_id', None) == '47505500-0001-0000-3130-444531303244' + >>> assert lsvmbus.devices[0].get('rel_id', None) == '18' + >>> assert lsvmbus.devices[0].get('sysfs_path', None) == '/sys/bus/vmbus/devices/47505500-0001-0000-3130-444531303244' + >>> assert lsvmbus.devices[0].get('target_cpu', None) == '0' + + Attributes: + devices (list): List of ``dict`` for each device. For example:: + + [ + { + 'vmbus_id': '18', + 'class_id': '44c4f61d-4444-4400-9d52-802e27ede19f', + 'type': 'PCI Express pass-through', + 'device_id': '47505500-0001-0000-3130-444531303244', + 'sysfs_path': '/sys/bus/vmbus/devices/47505500-0001-0000-3130-444531303244', + 'rel_id': '18', + 'target_cpu': '0' + }, + {...} + ] + + """ + def parse_content(self, content): + if not content: + raise SkipException('No content.') + self.devices = [] + parts = zip(*(iter(content),) * 4) + patrn_vmbusid = re.compile(r"VMBUS ID (\d+)") + patrn_type = re.compile("- (.*)") + patrn_classid = re.compile("Class_ID = {(.*)}") + patrn_deviceid = re.compile("Device_ID = {(.*)}") + patrn_sysfspath = re.compile("Sysfs path: (.*)") + patrn_relid = re.compile(r"Rel_ID=(\d+)") + patrn_targetcpu = re.compile(r"target_cpu=(\d+)") + for part in parts: + self.devices.append( + { + 'vmbus_id': patrn_vmbusid.search(part[0]).groups()[0], + 'class_id': patrn_classid.search(part[0]).groups()[0], + 'type': patrn_type.search(part[0]).groups()[0], + 'device_id': patrn_deviceid.search(part[1]).groups()[0], + 'sysfs_path': patrn_sysfspath.search(part[2]).groups()[0], + 'rel_id': patrn_relid.search(part[3]).groups()[0], + 'target_cpu': patrn_targetcpu.search(part[3]).groups()[0], + } + ) diff --git a/insights/parsers/tests/test_lsvmbus.py b/insights/parsers/tests/test_lsvmbus.py new file mode 100644 index 000000000..9056dfb28 --- /dev/null +++ b/insights/parsers/tests/test_lsvmbus.py @@ -0,0 +1,45 @@ +import doctest +import pytest + +from insights.parsers import lsvmbus +from insights.parsers import SkipException +from insights.tests import context_wrap + + +OUTPUT = """ +VMBUS ID 18: Class_ID = {44c4f61d-4444-4400-9d52-802e27ede19f} - PCI Express pass-through + Device_ID = {47505500-0001-0000-3130-444531303244} + Sysfs path: /sys/bus/vmbus/devices/47505500-0001-0000-3130-444531303244 + Rel_ID=18, target_cpu=0 +VMBUS ID 26: Class_ID = {44c4f61d-4444-4400-9d52-802e27ede19f} - PCI Express pass-through + Device_ID = {47505500-0002-0000-3130-444531303244} + Sysfs path: /sys/bus/vmbus/devices/47505500-0002-0000-3130-444531303244 + Rel_ID=26, target_cpu=0 +VMBUS ID 73: Class_ID = {44c4f61d-4444-4400-9d52-802e27ede19f} - PCI Express pass-through + Device_ID = {47505500-0003-0001-3130-444531303244} + Sysfs path: /sys/bus/vmbus/devices/47505500-0003-0001-3130-444531303244 + Rel_ID=73, target_cpu=0 +VMBUS ID 74: Class_ID = {44c4f61d-4444-4400-9d52-802e27ede19f} - PCI Express pass-through + Device_ID = {47505500-0004-0001-3130-444531303244} + Sysfs path: /sys/bus/vmbus/devices/47505500-0004-0001-3130-444531303244 + Rel_ID=74, target_cpu=0 +""".strip() + + +def test_lsvmbus(): + output = lsvmbus.LsvmBus(context_wrap(OUTPUT)) + assert len(output.devices) == 4 + assert output.devices[0].get('vmbus_id', None) == '18' + assert output.devices[0].get('device_id', None) == '47505500-0001-0000-3130-444531303244' + assert output.devices[0].get('rel_id', None) == '18' + assert output.devices[0].get('sysfs_path', None) == '/sys/bus/vmbus/devices/47505500-0001-0000-3130-444531303244' + assert output.devices[0].get('target_cpu', None) == '0' + + with pytest.raises(SkipException): + assert lsvmbus.LsvmBus(context_wrap("")) is None + + +def test_docs(): + env = {'lsvmbus': lsvmbus.LsvmBus(context_wrap(OUTPUT))} + failed, total = doctest.testmod(lsvmbus, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 339185dde..dfdafcf2a 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -319,6 +319,7 @@ class Specs(SpecSet): lspci = RegistryPoint() lssap = RegistryPoint() lsscsi = RegistryPoint() + lsvmbus = RegistryPoint() lvdisplay = RegistryPoint() lvm_conf = RegistryPoint(filterable=True) lvmconfig = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 3352e07e1..3a625d04b 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -405,6 +405,7 @@ def httpd_cmd(broker): lspci = simple_command("/sbin/lspci -k") lssap = simple_command("/usr/sap/hostctrl/exe/lssap") lsscsi = simple_command("/usr/bin/lsscsi") + lsvmbus = simple_command("/usr/sbin/lsvmbus -vv") lvm_conf = simple_file("/etc/lvm/lvm.conf") lvs_noheadings = simple_command("/sbin/lvs --nameprefixes --noheadings --separator='|' -a -o lv_name,lv_size,lv_attr,mirror_log,vg_name,devices,region_size,data_percent,metadata_percent,segtype,seg_monitor --config=\"global{locking_type=0}\"") mac_addresses = glob_file("/sys/class/net/*/address") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 5037f605a..46e7b89ca 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -122,6 +122,7 @@ class InsightsArchiveSpecs(Specs): lspci = simple_file("insights_commands/lspci_-k") lssap = simple_file("insights_commands/usr.sap.hostctrl.exe.lssap") lsscsi = simple_file("insights_commands/lsscsi") + lsvmbus = simple_file("insights_commands/lsvmbus_-vv") lvmconfig = first_file([ "insights_commands/lvmconfig_--type_full", "insights_commands/lvm_dumpconfig_--type_full" From 459196a9b48d42c2e986123807cccd595896a859 Mon Sep 17 00:00:00 2001 From: Rohan Arora Date: Mon, 3 Aug 2020 18:45:09 +0530 Subject: [PATCH 134/892] Correct glob for yum_repos_d (#2689) (#2690) Signed-off-by: Rohan Arora --- insights/specs/default.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 3a625d04b..74dc7836e 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -668,7 +668,7 @@ def sap_sid_name(broker): yum_conf = simple_file("/etc/yum.conf") yum_list_available = simple_command("yum -C --noplugins list available") yum_repolist = simple_command("/usr/bin/yum -C --noplugins repolist") - yum_repos_d = glob_file("/etc/yum.repos.d/*") + yum_repos_d = glob_file("/etc/yum.repos.d/*.repo") zipl_conf = simple_file("/etc/zipl.conf") rpm_format = format_rpm() installed_rpms = simple_command("/bin/rpm -qa --qf '%s'" % rpm_format, context=HostContext) From c57b035c686b4e5b5e843fd7317699c9848a6983 Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Mon, 3 Aug 2020 15:35:53 -0500 Subject: [PATCH 135/892] Remove initscript spec due to performance issues (#2693) * Removing this spec because it can collect too much data and cause failures in collection and processin of the archive * Leaving it in sosreport and insights archives in case it has already been collected Signed-off-by: Bob Fahr --- insights/specs/default.py | 1 - 1 file changed, 1 deletion(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 74dc7836e..17518eb4a 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -336,7 +336,6 @@ def httpd_cmd(broker): ifcfg_static_route = glob_file("/etc/sysconfig/network-scripts/route-*") imagemagick_policy = glob_file(["/etc/ImageMagick/policy.xml", "/usr/lib*/ImageMagick-6.5.4/config/policy.xml"]) initctl_lst = simple_command("/sbin/initctl --system list") - initscript = glob_file("/etc/rc.d/init.d/*") init_process_cgroup = simple_file("/proc/1/cgroup") interrupts = simple_file("/proc/interrupts") ip_addr = simple_command("/sbin/ip addr") From 398f4bf21f8995e033b764c132a6e91b7875051b Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Tue, 4 Aug 2020 17:20:49 -0400 Subject: [PATCH 136/892] remove invalid tests, mock some filesystem writes (#2699) * remove invalid tests, mock some filesystem writes Signed-off-by: Jeremy Crafts * flake Signed-off-by: Jeremy Crafts --- insights/tests/client/test_client.py | 7 +++++-- insights/tests/client/test_utilities.py | 25 +------------------------ 2 files changed, 6 insertions(+), 26 deletions(-) diff --git a/insights/tests/client/test_client.py b/insights/tests/client/test_client.py index 316610037..75157735c 100644 --- a/insights/tests/client/test_client.py +++ b/insights/tests/client/test_client.py @@ -386,7 +386,9 @@ def test_unregister_upload(handle_unregistration): @patch('insights.client.os.path.exists', return_value=True) +@patch('insights.client.connection.InsightsConnection.upload_archive', Mock(return_value=Mock(status_code=200))) @patch('insights.client.client._legacy_upload') +@patch('insights.client.client.write_to_disk', Mock()) def test_legacy_upload(_legacy_upload, path_exists): ''' _legacy_upload called when legacy upload @@ -398,9 +400,10 @@ def test_legacy_upload(_legacy_upload, path_exists): @patch('insights.client.os.path.exists', return_value=True) -@patch('insights.client.connection.InsightsConnection.upload_archive', return_value=Mock(status_code=200)) +@patch('insights.client.connection.InsightsConnection.upload_archive', Mock(return_value=Mock(status_code=200))) @patch('insights.client.client._legacy_upload') -def test_platform_upload(_legacy_upload, _, path_exists): +@patch('insights.client.client.write_to_disk', Mock()) +def test_platform_upload(_legacy_upload, path_exists): ''' _legacy_upload not called when platform upload ''' diff --git a/insights/tests/client/test_utilities.py b/insights/tests/client/test_utilities.py index a2e292f95..856d5374a 100644 --- a/insights/tests/client/test_utilities.py +++ b/insights/tests/client/test_utilities.py @@ -3,11 +3,9 @@ import uuid import insights.client.utilities as util from insights.client.constants import InsightsConstants as constants -from insights.client.config import InsightsConfig import re import mock import six -import pytest from mock.mock import patch @@ -119,29 +117,7 @@ def test_get_version_info_no_version(wrapper_constants): assert version_info == {'core_version': '1-1', 'client_version': None} -def test_validate_remove_file_bad_perms(): - tf = '/tmp/remove.cfg' - with open(tf, 'wb') as f: - f.write(remove_file_content) - - conf = InsightsConfig(remove_file=tf, redaction_file=None, content_redaction_file=None, validate=True) - with pytest.raises(RuntimeError): - os.chmod(tf, 0o644) - util.validate_remove_file(conf) - os.chmod(tf, 0o600) - assert util.validate_remove_file(conf) is not False - os.remove(tf) - - -def test_validate_remove_file_good_perms(): - tf = '/tmp/remove.cfg' - with open(tf, 'wb') as f: - f.write(remove_file_content) - - # TODO: DRY - - @patch('insights.client.utilities.constants.registered_files', ['/tmp/insights-client.registered', '/tmp/redhat-access-insights.registered']) @@ -248,6 +224,7 @@ def test_systemd_notify(Popen): Popen.assert_called_once() +@patch('insights.client.utilities.read_pidfile', mock.Mock(return_value=None)) @patch('insights.client.utilities.threading.Thread') @patch('insights.client.utilities.os.path.exists') @patch.dict('insights.client.utilities.os.environ', {'NOTIFY_SOCKET': '/tmp/test.sock'}) From 7cda1a5da54b0944380a3b027c895d1141074fd4 Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Wed, 5 Aug 2020 09:36:08 -0400 Subject: [PATCH 137/892] client: write lastupload during platform upload (#2692) Signed-off-by: Link Dupont Co-authored-by: Jeremy Crafts --- insights/client/client.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/client/client.py b/insights/client/client.py index 7fd9bc59c..59292283d 100644 --- a/insights/client/client.py +++ b/insights/client/client.py @@ -363,6 +363,7 @@ def upload(config, pconn, tar_file, content_type, collection_duration=None): upload = pconn.upload_archive(tar_file, content_type, collection_duration) if upload.status_code in (200, 202): + write_to_disk(constants.lastupload_file) msg_name = determine_hostname(config.display_name) logger.info("Successfully uploaded report for %s.", msg_name) if config.register: From c8bb39b945f90bfd97f91029fd26d38fe83f01f3 Mon Sep 17 00:00:00 2001 From: Arie Bregman Date: Wed, 5 Aug 2020 19:54:18 +0300 Subject: [PATCH 138/892] Add designate parser (#2687) Including tests and and docs. Signed-off-by: abregman --- .../shared_parsers_catalog/designate_conf.rst | 3 ++ insights/parsers/designate_conf.py | 47 +++++++++++++++++++ insights/parsers/tests/test_designate_conf.py | 31 ++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 2 + 5 files changed, 84 insertions(+) create mode 100644 docs/shared_parsers_catalog/designate_conf.rst create mode 100644 insights/parsers/designate_conf.py create mode 100644 insights/parsers/tests/test_designate_conf.py diff --git a/docs/shared_parsers_catalog/designate_conf.rst b/docs/shared_parsers_catalog/designate_conf.rst new file mode 100644 index 000000000..3a7fbd732 --- /dev/null +++ b/docs/shared_parsers_catalog/designate_conf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.designate_conf + :members: + :show-inheritance: diff --git a/insights/parsers/designate_conf.py b/insights/parsers/designate_conf.py new file mode 100644 index 000000000..7739fc50f --- /dev/null +++ b/insights/parsers/designate_conf.py @@ -0,0 +1,47 @@ +""" +DesignateConf - file ``/etc/designate/designate.conf`` +====================================================== + +This class provides parsing for the files: + ``/var/lib/config-data/puppet-generated/designate/etc/designate/designate.conf`` + ``/etc/designate/designate.conf`` + +Sample input data is in the format:: + + [DEFAULT] + state_path=/var/lib/designate + root_helper=sudo designate-rootwrap /etc/designate/rootwrap.conf + debug=True + log_dir=/var/log/designate + + [keystone_authtoken] + www_authenticate_uri=http://localhost:5000 + project_name=service + project_domain_name=Default + + [oslo_messaging_notifications] + driver=messagingv2 + +See the ``IniConfigFile`` class for examples. +""" +from .. import IniConfigFile, parser, add_filter +from insights.specs import Specs + +ADDITIONAL_FILTERS = [ + "[", + "state_path", + "root_helper", + "debug", + "log_dir", + "www_authenticate_uri", + "project_name", + "project_domain_name", + "driver" +] +add_filter(Specs.designate_conf, ADDITIONAL_FILTERS) + + +@parser(Specs.designate_conf) +class DesignateConf(IniConfigFile): + """Class to parse file ``designate.conf``.""" + pass diff --git a/insights/parsers/tests/test_designate_conf.py b/insights/parsers/tests/test_designate_conf.py new file mode 100644 index 000000000..4a7798a10 --- /dev/null +++ b/insights/parsers/tests/test_designate_conf.py @@ -0,0 +1,31 @@ +from insights.parsers.designate_conf import DesignateConf +from insights.tests import context_wrap + +DESIGNATE_CONF = """ +[DEFAULT] +state_path=/var/lib/designate +debug=True +log_dir=/var/log/designate + +[keystone_authtoken] +www_authenticate_uri=http://localhost:5000 +project_name=service +project_domain_name=Default + +[oslo_messaging_notifications] +driver=messagingv2 +""" + + +def test_designate_conf(): + dconf = DesignateConf(context_wrap(DESIGNATE_CONF)) + assert dconf is not None + assert list(dconf.sections()) == ['keystone_authtoken', 'oslo_messaging_notifications'] + assert dconf.defaults() == { + 'debug': 'True', + 'state_path': '/var/lib/designate', + 'log_dir': '/var/log/designate'} + assert dconf.get('keystone_authtoken', 'project_name') == 'service' + assert dconf.has_option('oslo_messaging_notifications', 'driver') + assert not dconf.has_option('yabba', 'dabba_do') + assert dconf.get('DEFAULT', 'debug') == 'True' diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index dfdafcf2a..1a03e4846 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -105,6 +105,7 @@ class Specs(SpecSet): date_iso = RegistryPoint() date = RegistryPoint() date_utc = RegistryPoint() + designate_conf = RegistryPoint(filterable=True) dcbtool_gc_dcb = RegistryPoint(multi_output=True) df__alP = RegistryPoint() df__al = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 17518eb4a..c4478efd3 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -216,6 +216,8 @@ def is_ceph_monitor(broker): current_clocksource = simple_file("/sys/devices/system/clocksource/clocksource0/current_clocksource") date = simple_command("/bin/date") date_utc = simple_command("/bin/date --utc") + designate_conf = first_file(["/var/lib/config-data/puppet-generated/designate/etc/designate/designate.conf", + "/etc/designate/designate.conf"]) df__al = simple_command("/bin/df -al") df__alP = simple_command("/bin/df -alP") df__li = simple_command("/bin/df -li") From 3c648dc45cc4dcab1d3e114dec8f3d8e9fed6ba5 Mon Sep 17 00:00:00 2001 From: Xiaoxue Wang Date: Thu, 6 Aug 2020 02:47:54 +0800 Subject: [PATCH 139/892] Add spec /proc/mounts into default.py (#2691) Signed-off-by: XiaoXue Wang --- insights/specs/default.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/specs/default.py b/insights/specs/default.py index c4478efd3..f943ca846 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -431,6 +431,7 @@ def httpd_cmd(broker): "/etc/opt/rh/rh-mongodb26/mongod.conf" ]) mount = simple_command("/bin/mount") + mounts = simple_file("/proc/mounts") mssql_conf = simple_file("/var/opt/mssql/mssql.conf") multicast_querier = simple_command("/usr/bin/find /sys/devices/virtual/net/ -name multicast_querier -print -exec cat {} \;") multipath_conf = simple_file("/etc/multipath.conf") From 3713f3ac1157f7ef2d3db6fa2cb1e7e9ce8d3940 Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Thu, 6 Aug 2020 04:34:45 +0800 Subject: [PATCH 140/892] Add item yum_log (#2696) Signed-off-by: jiazhang --- insights/specs/default.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/specs/default.py b/insights/specs/default.py index f943ca846..1cc8defc0 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -669,6 +669,7 @@ def sap_sid_name(broker): xinetd_conf = glob_file(["/etc/xinetd.conf", "/etc/xinetd.d/*"]) yum_conf = simple_file("/etc/yum.conf") yum_list_available = simple_command("yum -C --noplugins list available") + yum_log = simple_file("/var/log/yum.log") yum_repolist = simple_command("/usr/bin/yum -C --noplugins repolist") yum_repos_d = glob_file("/etc/yum.repos.d/*.repo") zipl_conf = simple_file("/etc/zipl.conf") From 8430755e3d754764ea99550e83ada9f71c4b4d02 Mon Sep 17 00:00:00 2001 From: Stephen Date: Thu, 6 Aug 2020 11:12:33 -0400 Subject: [PATCH 141/892] Client invocation (#2675) * Add client invocation to user agent Signed-off-by: Stephen Adams * flake8 Signed-off-by: Stephen Adams --- insights/client/__init__.py | 5 ++++- insights/client/connection.py | 9 ++++++++- insights/client/constants.py | 1 + insights/client/phase/v1.py | 1 + insights/client/utilities.py | 15 +++++++++++++++ insights/tests/client/init/test_write_pidfile.py | 7 +++++-- 6 files changed, 34 insertions(+), 4 deletions(-) diff --git a/insights/client/__init__.py b/insights/client/__init__.py index 1f2477d7e..301a138de 100644 --- a/insights/client/__init__.py +++ b/insights/client/__init__.py @@ -20,7 +20,8 @@ generate_machine_id, get_tags, write_tags, - migrate_tags) + migrate_tags, + get_parent_process) NETWORK = constants.custom_network_log_level logger = logging.getLogger(__name__) @@ -57,6 +58,8 @@ def __init__(self, config=None, setup_logging=True, **kwargs): else: # write PID to file in case we need to ping systemd write_to_disk(constants.pidfile, content=str(os.getpid())) + # write PPID to file so that we can grab the client execution method + write_to_disk(constants.ppidfile, content=get_parent_process()) # setup insights connection placeholder # used for requests self.session = None diff --git a/insights/client/connection.py b/insights/client/connection.py index 18b6cd358..9654f9a13 100644 --- a/insights/client/connection.py +++ b/insights/client/connection.py @@ -186,6 +186,12 @@ def user_agent(self): if pkg is not None: client_version = "%s/%s" % (pkg.project_name, pkg.version) + if os.path.isfile(constants.ppidfile): + with open(constants.ppidfile, 'r') as f: + parent_process = f.read() + else: + parent_process = "unknown" + requests_version = None pkg = pkg_resources.working_set.find(pkg_resources.Requirement.parse("requests")) if pkg is not None: @@ -216,9 +222,10 @@ def user_agent(self): logger.warning("Failed to detect OS version: %s", e) kernel_version = "%s %s" % (platform.system(), platform.release()) - ua = "{client_version} ({core_version}; {requests_version}) {os_family} {os_release} ({python_version}; {kernel_version})".format( + ua = "{client_version} ({core_version}; {requests_version}) {os_family} {os_release} ({python_version}; {kernel_version}); {parent_process}".format( client_version=client_version, core_version=core_version, + parent_process=parent_process, python_version=python_version, os_family=os_family, os_release=os_release, diff --git a/insights/client/constants.py b/insights/client/constants.py index 0cea1a8d6..c36a5fcb6 100644 --- a/insights/client/constants.py +++ b/insights/client/constants.py @@ -45,6 +45,7 @@ class InsightsConstants(object): sig_kill_bad = 101 cached_branch_info = os.path.join(default_conf_dir, '.branch_info') pidfile = os.path.join(os.sep, 'var', 'run', 'insights-client.pid') + ppidfile = os.path.join(os.sep, 'tmp', 'insights-client.ppid') valid_compressors = ("gz", "xz", "bz2", "none") # RPM version in which core collection was released core_collect_rpm_version = '3.1.0' diff --git a/insights/client/phase/v1.py b/insights/client/phase/v1.py index 01ca16122..044f9781b 100644 --- a/insights/client/phase/v1.py +++ b/insights/client/phase/v1.py @@ -264,6 +264,7 @@ def post_update(client, config): def collect_and_output(client, config): # last phase, delete PID file on exit atexit.register(write_to_disk, constants.pidfile, delete=True) + atexit.register(write_to_disk, constants.ppidfile, delete=True) # --compliance was called if config.compliance: config.payload, config.content_type = ComplianceClient(config).oscap_scan() diff --git a/insights/client/utilities.py b/insights/client/utilities.py index 88053d410..d0524ba37 100644 --- a/insights/client/utilities.py +++ b/insights/client/utilities.py @@ -396,3 +396,18 @@ def migrate_tags(): os.rename(tags_conf, tags_yaml) except OSError as e: logger.error(e) + + +def get_parent_process(): + ''' + Get parent process of the client + + Returns: string + ''' + ppid = os.getppid() + output = run_command_get_output('cat /proc/%s/status' % ppid) + if output['status'] == 0: + name = output['output'].splitlines()[0].split('\t')[1] + return name + else: + return "unknown" diff --git a/insights/tests/client/init/test_write_pidfile.py b/insights/tests/client/init/test_write_pidfile.py index 6fdbe1c1d..cee3d04ad 100644 --- a/insights/tests/client/init/test_write_pidfile.py +++ b/insights/tests/client/init/test_write_pidfile.py @@ -5,14 +5,17 @@ @patch("insights.client.write_to_disk") @patch("insights.client.os.getpid") -def test_write_pidfile(getpid, write_to_disk): +@patch("insights.client.utilities.get_parent_process") +def test_write_pidfile(get_parent_process, getpid, write_to_disk): ''' Test writing of the pidfile when InsightsClient is called initially (when setup_logging=False) ''' InsightsClient(setup_logging=False) getpid.assert_called_once() - write_to_disk.assert_called_with(InsightsConstants.pidfile, content=str(getpid.return_value)) + calls = [write_to_disk(InsightsConstants.pidfile, content=str(getpid.return_value)), + write_to_disk(InsightsConstants.ppidfile, content=get_parent_process.return_value)] + write_to_disk.has_calls(calls) @patch("insights.client.write_to_disk") From 8190de0cd550001e5c099805d59d2a7bf4c56f9f Mon Sep 17 00:00:00 2001 From: Yadnesh Kulkarni Date: Thu, 6 Aug 2020 20:45:57 +0530 Subject: [PATCH 142/892] Add parser SendQSocketBuffer and RecvQSocketBuffer to parse files `/proc/sys/net/ipv4/tcp_wmem` (#2614) and `/proc/sys/net/ipv4/tcp_rmem` respectively. Fixes #2557 Signed-off-by: Yadnesh Kulkarni --- .../sendq_recvq_socket_buffer.rst | 3 + insights/parsers/sendq_recvq_socket_buffer.py | 96 +++++++++++++++++++ .../tests/test_sendq_recvq_socket_buffer.py | 58 +++++++++++ insights/specs/__init__.py | 2 + insights/specs/sos_archive.py | 2 + 5 files changed, 161 insertions(+) create mode 100644 docs/shared_parsers_catalog/sendq_recvq_socket_buffer.rst create mode 100644 insights/parsers/sendq_recvq_socket_buffer.py create mode 100644 insights/parsers/tests/test_sendq_recvq_socket_buffer.py diff --git a/docs/shared_parsers_catalog/sendq_recvq_socket_buffer.rst b/docs/shared_parsers_catalog/sendq_recvq_socket_buffer.rst new file mode 100644 index 000000000..a64c33cc0 --- /dev/null +++ b/docs/shared_parsers_catalog/sendq_recvq_socket_buffer.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.sendq_recvq_socket_buffer + :members: + :show-inheritance: \ No newline at end of file diff --git a/insights/parsers/sendq_recvq_socket_buffer.py b/insights/parsers/sendq_recvq_socket_buffer.py new file mode 100644 index 000000000..ca980f764 --- /dev/null +++ b/insights/parsers/sendq_recvq_socket_buffer.py @@ -0,0 +1,96 @@ +""" +SendQSocketBuffer - file ``/proc/sys/net/ipv4/tcp_wmem`` +-------------------------------------------------------- +RecvQSocketBuffer - file ``/proc/sys/net/ipv4/tcp_rmem`` +-------------------------------------------------------- +""" +from insights.parsers import ParseException +from insights import parser, Parser +from insights.specs import Specs + + +class SocketBuffer(Parser): + """ Base class for SendQSocketBuffer & RecvQSocketBuffer + """ + def parse_content(self, content): + if not content: + raise ParseException("Empty content") + buffer_values = content[-1].split() + self.raw = " ".join(buffer_values) + self.minimum, self.default, self.maximum = [int(value) for value in buffer_values] + + def __repr__(self): + return "".format( + r=self.raw, + min=self.minimum, + dft=self.default, + max=self.maximum + ) + + +@parser(Specs.sendq_socket_buffer) +class SendQSocketBuffer(SocketBuffer): + """Parse the file ``/proc/sys/net/ipv4/tcp_wmem`` + + Parameter ipv4/tcp_wmem is the amount of memory in bytes write (transmit) buffer per open socket. + This is a vector of 3 integers: [min, default, max]. + These parameters are used by TCP to regulate send buffer sizes. + TCP dynamically adjusts the size of the send buffer from the default values listed below, + in the range of these values, depending on memory available. + + Read more on http://man7.org/linux/man-pages/man7/tcp.7.html + + Sample input:: + 4096 16384 4194304 + + Examples: + >>> sendq_buffer_values.raw + '4096 16384 4194304' + >>> sendq_buffer_values.minimum + 4096 + >>> sendq_buffer_values.default + 16384 + >>> sendq_buffer_values.maximum + 4194304 + + Attributes: + raw: The raw content of send buffer sizes from tcp_wmem + minimum: Minimum size of the send buffer used by each TCP socket + default: The default size of the send buffer for a TCP socket + maximum: The maximum size of the send buffer used by each TCP socket + """ + pass + + +@parser(Specs.recvq_socket_buffer) +class RecvQSocketBuffer(SocketBuffer): + """Parse the file ``/proc/sys/net/ipv4/tcp_rmem`` + + Parameter ipv4/tcp_rmem is the amount of memory in bytes for read (receive) buffer per open socket. + This is a vector of 3 integers: [min, default, max]. + These parameters are used by TCP to regulate receive buffer sizes. + TCP dynamically adjusts the size of the receive buffer from the defaults listed below, + in the range of these values, depending on memory available in the system. + + Read more on http://man7.org/linux/man-pages/man7/tcp.7.html + + Sample input:: + 4096 87380 6291456 + + Examples: + >>> recvq_buffer_values.raw + '4096 87380 6291456' + >>> recvq_buffer_values.minimum + 4096 + >>> recvq_buffer_values.default + 87380 + >>> recvq_buffer_values.maximum + 6291456 + + Attributes: + raw: The raw content of receive buffer sizes from tcp_rmem + minimum: Minimum size of the receive buffer used by each TCP socket + default: The default size of the receive buffer for a TCP socket + maximum: The maximum size of the receive buffer used by each TCP socket + """ + pass diff --git a/insights/parsers/tests/test_sendq_recvq_socket_buffer.py b/insights/parsers/tests/test_sendq_recvq_socket_buffer.py new file mode 100644 index 000000000..4ef088852 --- /dev/null +++ b/insights/parsers/tests/test_sendq_recvq_socket_buffer.py @@ -0,0 +1,58 @@ +import doctest +import pytest +from insights.parsers import ParseException + +from insights.tests import context_wrap +from insights.parsers import sendq_recvq_socket_buffer +from insights.parsers.sendq_recvq_socket_buffer import SendQSocketBuffer, RecvQSocketBuffer + +SENDQ_SOCKET_BUFFER = """ +4096 16384 4194304 +""".strip() + +EMPTY_SENDQ_SOCKET_BUFFER = """ +""".strip() + +RECVQ_SOCKET_BUFFER = """ +4096 87380 6291456 +""".strip() + +EMPTY_RECVQ_SOCKET_BUFFER = """ +""".strip() + + +def test_empty_sendq_socket_buffer(): + with pytest.raises(ParseException) as exc: + SendQSocketBuffer(context_wrap(EMPTY_SENDQ_SOCKET_BUFFER)) + assert str(exc.value) == "Empty content" + + +def test_sendq_socket_buffer(): + sendq_buffer = SendQSocketBuffer(context_wrap(SENDQ_SOCKET_BUFFER)) + assert sendq_buffer.minimum == 4096 + assert sendq_buffer.default == 16384 + assert sendq_buffer.maximum == 4194304 + assert sendq_buffer.raw == '4096 16384 4194304' + + +def test_empty_recvq_socket_buffer(): + with pytest.raises(ParseException) as exc: + RecvQSocketBuffer(context_wrap(EMPTY_RECVQ_SOCKET_BUFFER)) + assert str(exc.value) == "Empty content" + + +def test_recvq_socket_buffer(): + recvq_buffer = RecvQSocketBuffer(context_wrap(RECVQ_SOCKET_BUFFER)) + assert recvq_buffer.minimum == 4096 + assert recvq_buffer.default == 87380 + assert recvq_buffer.maximum == 6291456 + assert recvq_buffer.raw == '4096 87380 6291456' + + +def test_doc(): + env = { + 'sendq_buffer_values': SendQSocketBuffer(context_wrap(SENDQ_SOCKET_BUFFER)), + 'recvq_buffer_values': RecvQSocketBuffer(context_wrap(RECVQ_SOCKET_BUFFER)), + } + failures, tests = doctest.testmod(sendq_recvq_socket_buffer, globs=env) + assert failures == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 1a03e4846..dca9fb1b3 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -676,3 +676,5 @@ class Specs(SpecSet): yum_repos_d = RegistryPoint(multi_output=True) zdump_v = RegistryPoint() zipl_conf = RegistryPoint() + sendq_socket_buffer = RegistryPoint() + recvq_socket_buffer = RegistryPoint() diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index 2d23aba89..c97533665 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -262,3 +262,5 @@ class SosSpecs(Specs): xfs_info = glob_file("sos_commands/xfs/xfs_info*") yum_log = simple_file("/var/log/yum.log") yum_repolist = simple_file("sos_commands/yum/yum_-C_repolist") + sendq_socket_buffer = simple_file("proc/sys/net/ipv4/tcp_wmem") + recvq_socket_buffer = simple_file("proc/sys/net/ipv4/tcp_rmem") From 9dbe732b74fcede09022414ac3b9aa45ca597f6d Mon Sep 17 00:00:00 2001 From: Akshay Ghodake Date: Wed, 12 Aug 2020 11:49:51 +0530 Subject: [PATCH 143/892] Added spec for subscription_manager_in sos_archive.py (#2710) The Spec for subscription_manager_id added in sos_archive.py to parse the associated file: - sos_commands/subscription_manager/subscription-manager_identity. Signed-off-by: Akshay Ghodake --- insights/specs/sos_archive.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index c97533665..171c331f3 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -220,6 +220,7 @@ class SosSpecs(Specs): sssd_logs = glob_file("var/log/sssd/*.log") samba_logs = glob_file("var/log/samba/log.*") ssh_foreman_config = simple_file("/usr/share/foreman/.ssh/ssh_config") + subscription_manager_id = simple_file("/sos_commands/subscription_manager/subscription-manager_identity") subscription_manager_list_consumed = first_file([ 'sos_commands/yum/subscription-manager_list_--consumed', 'sos_commands/subscription_manager/subscription-manager_list_--consumed', From 273d35c5e3f6179fc0e594ae4919480b306386b1 Mon Sep 17 00:00:00 2001 From: Sheng Lao <39508521+shlao@users.noreply.github.com> Date: Wed, 12 Aug 2020 20:51:07 +0800 Subject: [PATCH 144/892] Fixed the pattern for ssh_config_d (#2711) Signed-off-by: shlao --- insights/specs/default.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 1cc8defc0..50a91587d 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -598,7 +598,7 @@ def sap_sid_name(broker): software_collections_list = simple_command('/usr/bin/scl --list') ss = simple_command("/usr/sbin/ss -tupna") ssh_config = simple_file("/etc/ssh/ssh_config") - ssh_config_d = glob_file(r"/etc/ssh/ssh_config.d/*") + ssh_config_d = glob_file(r"/etc/ssh/ssh_config.d/*.conf") ssh_foreman_proxy_config = simple_file("/usr/share/foreman-proxy/.ssh/ssh_config") sshd_config = simple_file("/etc/ssh/sshd_config") sshd_config_perms = simple_command("/bin/ls -l /etc/ssh/sshd_config") From 82c4d574ea81dfd4760a45305afc5ef57d701df8 Mon Sep 17 00:00:00 2001 From: Andrew Kofink Date: Wed, 12 Aug 2020 13:42:24 -0400 Subject: [PATCH 145/892] Only scan against internal, non-canonical profiles (#2694) Related to https://github.com/RedHatInsights/compliance-backend/pull/496 Signed-off-by: Andrew Kofink Co-authored-by: Jeremy Crafts --- insights/client/apps/compliance/__init__.py | 2 +- insights/tests/client/apps/test_compliance.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/insights/client/apps/compliance/__init__.py b/insights/client/apps/compliance/__init__.py index 1881c180c..5c5a70a60 100644 --- a/insights/client/apps/compliance/__init__.py +++ b/insights/client/apps/compliance/__init__.py @@ -70,7 +70,7 @@ def download_tailoring_file(self, policy): # We need to update compliance-backend to fix this def get_policies(self): response = self.conn.session.get("https://{0}/compliance/profiles".format(self.config.base_url), - params={'search': 'system_names={0}'.format(self.hostname)}) + params={'search': 'system_names={0} external=false canonical=false'.format(self.hostname)}) logger.debug("Content of the response: {0} - {1}".format(response, response.json())) if response.status_code == 200: diff --git a/insights/tests/client/apps/test_compliance.py b/insights/tests/client/apps/test_compliance.py index bb76fdb8f..24719207f 100644 --- a/insights/tests/client/apps/test_compliance.py +++ b/insights/tests/client/apps/test_compliance.py @@ -51,7 +51,7 @@ def test_get_policies(config): compliance_client.hostname = 'foo' compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': [{'attributes': 'data'}]}))) assert compliance_client.get_policies() == [{'attributes': 'data'}] - compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_names=foo'}) + compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_names=foo external=false canonical=false'}) @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) @@ -60,7 +60,7 @@ def test_get_policies_no_policies(config): compliance_client.hostname = 'foo' compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': []}))) assert compliance_client.get_policies() == [] - compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_names=foo'}) + compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_names=foo external=false canonical=false'}) @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) @@ -69,7 +69,7 @@ def test_get_policies_error(config): compliance_client.hostname = 'foo' compliance_client.conn.session.get = Mock(return_value=Mock(status_code=500)) assert compliance_client.get_policies() == [] - compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_names=foo'}) + compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_names=foo external=false canonical=false'}) @patch("insights.client.apps.compliance.linux_distribution", return_value=(None, '6.5', None)) From e37cf57e4aeef44f8427bb090d4f6b37fc290e15 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Wed, 19 Aug 2020 09:51:09 -0400 Subject: [PATCH 146/892] update uploader.json map (#2706) Signed-off-by: Jeremy Crafts --- insights/uploader_json_map.json | 131 ++++++++++++++++++++++++++++++-- 1 file changed, 124 insertions(+), 7 deletions(-) diff --git a/insights/uploader_json_map.json b/insights/uploader_json_map.json index fa7c7a088..a9219bbc5 100644 --- a/insights/uploader_json_map.json +++ b/insights/uploader_json_map.json @@ -169,6 +169,7 @@ "SMBIOS ", "SPLXMOD: SPLX 3.0: KHM loaded. Version [30118]", "SPLXMOD: SPLX 3.0: KHM loaded. Version [30119]", + "Secure boot enabled", "TECH PREVIEW: NVMe over FC may not be fully supported.", "Uhhuh. NMI received for unknown reason", "VPD access disabled", @@ -232,12 +233,23 @@ "pattern": [], "symbolic_name": "du_dirs" }, + { + "command": "/bin/engine-db-query --statement \"SELECT vs.vds_name, rpm_version FROM vds_dynamic vd, vds_static vs WHERE vd.vds_id = vs.vds_id;\" --json", + "pattern": [], + "symbolic_name": "engine_db_query_vdsm_version" + }, { "command": "/sbin/ethtool", "pattern": [], "pre_command": "iface", "symbolic_name": "ethtool" }, + { + "command": "/sbin/ethtool -c", + "pattern": [], + "pre_command": "iface", + "symbolic_name": "ethtool_c" + }, { "command": "/sbin/ethtool -S", "pattern": [], @@ -396,6 +408,11 @@ "symbolic_name": "installed_rpms", "image": true }, + { + "command": "/sbin/initctl --system list", + "pattern": [], + "symbolic_name": "initctl_lst" + }, { "command": "/sbin/ip -s -d link", "pattern": [], @@ -542,6 +559,8 @@ { "command": "/bin/ls -lan /usr/lib64", "pattern": [ + "liblber", + "libldap", "total" ], "symbolic_name": "ls_usr_lib64" @@ -611,6 +630,14 @@ ], "symbolic_name": "ls_var_tmp" }, + { + "command": "/bin/ls -la /tmp", + "pattern": [ + ".sap", + ".trex" + ], + "symbolic_name": "ls_tmp" + }, { "command": "/bin/lsblk", "pattern": [], @@ -702,6 +729,11 @@ "pattern": [], "symbolic_name": "lsscsi" }, + { + "command": "/usr/sbin/lsvmbus -vv", + "pattern": [], + "symbolic_name": "lsvmbus" + }, { "command": "/sbin/lvs --nameprefixes --noheadings --separator='|' -a -o lv_name,lv_size,lv_attr,mirror_log,vg_name,devices,region_size,data_percent,metadata_percent,segtype,seg_monitor --config=\"global{locking_type=0}\"", "pattern": [], @@ -993,13 +1025,17 @@ "COMMAND", "bash", "chronyd", + "clvmd", "corosync", + "crmd", + "dlm_controld", "docker", "ntpd", "openshift start master api", "openshift start master controllers", "openshift start node", "ora", + "pacemaker-controld", "pacemakerd", "pcsd", "spausedd", @@ -1024,7 +1060,10 @@ "bash", "ceph-osd", "chronyd", + "clvmd", "corosync", + "crmd", + "dlm_controld", "docker", "mysqld", "ntpd", @@ -1033,6 +1072,7 @@ "openshift start master controllers", "openshift start node", "ora", + "pacemaker-controld", "pacemakerd", "pcsd", "phc2sys", @@ -1063,8 +1103,11 @@ "catalina.base", "ceilometer-coll", "chronyd", + "clvmd", "cmirrord", "corosync", + "crmd", + "dlm_controld", "docker", "elasticsearch", "goferd", @@ -1082,6 +1125,7 @@ "openshift start master controllers", "openshift start node", "ora", + "pacemaker-controld", "pacemakerd", "pcsd", "pkla-check-auth", @@ -1105,7 +1149,10 @@ "CMD", "bash", "chronyd", + "clvmd", "corosync", + "crmd", + "dlm_controld", "docker", "neutron-ns-metadata-proxy", "nginx: master process", @@ -1115,6 +1162,7 @@ "openshift start master controllers", "openshift start node", "ora", + "pacemaker-controld", "pacemakerd", "pcsd", "spausedd", @@ -1484,6 +1532,11 @@ "pattern": [], "symbolic_name": "bond" }, + { + "file": "/var/log/candlepin/candlepin.log", + "pattern": [], + "symbolic_name": "candlepin_log" + }, { "file": "/var/log/tomcat/()*catalina\\.out", "pattern": [ @@ -1779,13 +1832,22 @@ "L1TF", "L1Tf", "Linux version", + "Secure boot enabled", "__cpufreq_add_dev", "efi", + "hv_vmbus: probe failed for device", "l1tf", "x86/pti" ], "symbolic_name": "dmesg_log" }, + { + "file": "/etc/dnf/dnf.conf", + "pattern": [ + "[" + ], + "symbolic_name": "dnf_conf" + }, { "file": "/etc/dnf/modules.d/.*\\.module", "pattern": [], @@ -2165,6 +2227,7 @@ "file": "/var/log/messages", "pattern": [ " disconnect jid=", + " invoked oom-killer: ", "\"/var/lib/pgsql/data\" is missing or empty", "(enic): transmit queue 0 timed out", ", type vxfs) has no security xattr handler", @@ -2204,9 +2267,9 @@ "Orphaned pod", "Out of MCCQ wrbs", "Out of memory: Kill process", - "Out of memory: kill process", "PPM exceeds tolerance 500 PPM", "ProcessExecutionError: Exit code: 1; Stdin: ; Stdout: ; Stderr: setting the network namespace", + "Result of start operation for clvmd ", "SCSI error: return code =", "SDN initialization failed: Error: Existing service with IP: None is not part of service network", "Scheduled import of stream", @@ -2254,6 +2317,7 @@ "hv_netvsc vmbus_", "hv_netvsc: probe of vmbus_", "hw csum failure", + "ill process ", "in libnl.so.1", "initiating reset due to tx timeout", "invalid key/value pair in file /usr/lib/udev/rules.d/59-fc-wwpn-id.rules", @@ -2267,13 +2331,14 @@ "kernel: BUG: soft lockup", "kernel: CIFS VFS: Unexpected SMB signature", "kernel: INFO: task xfsaild/md", + "kernel: Linux version", "kernel: Memory cgroup out of memory: Kill process", "kernel: TCP: out of memory -- consider tuning tcp_mem", "kernel: bnx2fc: byte_count", "kernel: kvm: disabled by bios", "kernel: lockd: Unknown symbol register_inet6addr_notifier", "kernel: lockd: Unknown symbol unregister_inet6addr_notifier", - "kernel: megasas: Found FW in FAULT state,will reset adapter.", + "kernel: megasas: Found FW in FAULT state, will reset adapter.", "kernel: nfs: server", "kernel: possible SYN flooding on port", "khash_super_prune_nolock", @@ -2295,6 +2360,7 @@ "shm_open failed, Permission denied", "skb_copy", "skb_over_panic", + "socket error sending to node", "start request repeated too quickly for docker.service", "state changed timeout -> done", "swapper: page allocation failure", @@ -2310,6 +2376,7 @@ "transmit queue", "udev: renamed network interface", "unknown filesystem type 'binfmt_misc'", + "ut of memory: ", "vdsm-tool: EnvironmentError: Failed to restore the persisted networks", "watch chan error: etcdserver: mvcc: required revision has been compacted" ], @@ -2332,6 +2399,11 @@ ], "symbolic_name": "mongod_conf" }, + { + "file": "/proc/mounts", + "pattern": [], + "symbolic_name": "mounts" + }, { "file": "/var/opt/mssql/mssql.conf", "pattern": [], @@ -3058,6 +3130,7 @@ "ALLOWUSERS", "AllowUsers", "Allowusers", + "AuthorizedKeysFile", "CHALLENGERESPONSEAUTHENTICATION", "CIPHERS", "CLIENTALIVECOUNTMAX", @@ -3124,6 +3197,7 @@ "file": "/etc/ssh/ssh_config", "pattern": [ "Host", + "Include", "ProxyCommand" ], "symbolic_name": "ssh_config" @@ -3441,9 +3515,9 @@ "symbolic_name": "yum_conf" }, { - "file": "/etc/yum.repos.d/()*.*\\.repo", + "file": "/var/log/yum.log", "pattern": [], - "symbolic_name": "yum_repos_d" + "symbolic_name": "yum_log" }, { "file": "/var/log/redhat_access_proactive/redhat_access_proactive.log", @@ -3505,8 +3579,16 @@ "file": "/etc/neutron/neutron.conf", "pattern": [ "[", + "agent_down_time", + "agent_report_interval", + "allow_automatic_dhcp_failover", + "api_workers", "debug", - "dhcp_agents_per_network" + "dhcp_agents_per_network", + "ipam_driver", + "router_distributed", + "rpc_workers", + "service_plugins" ], "symbolic_name": "neutron_conf" }, @@ -3514,8 +3596,16 @@ "file": "/var/lib/config-data/puppet-generated/neutron/etc/neutron/neutron.conf", "pattern": [ "[", + "agent_down_time", + "agent_report_interval", + "allow_automatic_dhcp_failover", + "api_workers", "debug", - "dhcp_agents_per_network" + "dhcp_agents_per_network", + "ipam_driver", + "router_distributed", + "rpc_workers", + "service_plugins" ], "symbolic_name": "neutron_conf" }, @@ -3545,6 +3635,28 @@ "pattern": [], "symbolic_name": "neutron_plugin_ini" }, + { + "file": "/etc/neutron/plugins/ml2/sriov_agent.ini", + "pattern": [ + "[", + "debug", + "exclude_devices", + "extensions", + "physical_device_mappings" + ], + "symbolic_name": "neutron_sriov_agent" + }, + { + "file": "/var/lib/config-data/puppet-generated/neutron/etc/neutron/plugins/ml2/sriov_agent.ini", + "pattern": [ + "[", + "debug", + "exclude_devices", + "extensions", + "physical_device_mappings" + ], + "symbolic_name": "neutron_sriov_agent" + }, { "file": "/etc/zipl.conf", "pattern": [], @@ -3838,6 +3950,11 @@ "glob": "/sys/class/scsi_host/host[0-9]*/eh_deadline", "symbolic_name": "scsi_eh_deadline", "pattern": [] + }, + { + "glob": "/etc/yum.repos.d/*.repo", + "symbolic_name": "yum_repos_d", + "pattern": [] } ], "meta_specs": { @@ -3860,5 +3977,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2020-07-16T14:49:33.251429" + "version": "2020-08-06T14:32:35.711025" } \ No newline at end of file From 7e664d84f2205644f7ab8f2dfcc4e8296eaaec46 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Wed, 19 Aug 2020 16:59:31 -0500 Subject: [PATCH 147/892] Update yum.repos.d parser to be less tolerant. (#2713) * Update yum.repos.d parser to be less tolerant. Signed-off-by: Christopher Sams * Move grammar to module level. It's static, so there's no need to reconstruct it on every parse. Signed-off-by: Christopher Sams --- insights/parsers/tests/test_yum_repos_d.py | 1 - insights/parsers/yum_repos_d.py | 59 ++++++++++++++-------- 2 files changed, 38 insertions(+), 22 deletions(-) diff --git a/insights/parsers/tests/test_yum_repos_d.py b/insights/parsers/tests/test_yum_repos_d.py index e71a20868..097c94db0 100644 --- a/insights/parsers/tests/test_yum_repos_d.py +++ b/insights/parsers/tests/test_yum_repos_d.py @@ -16,7 +16,6 @@ baseurl=ftp://ftp.redhat.com/pub/redhat/linux/beta/$releasever/en/os/SRPMS/,ftp://ftp2.redhat.com/pub/redhat/linux/beta/$releasever/en/os/SRPMS/ enabled=0 gpgcheck=1 - 0 # This should be ignored gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta,file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release ''' diff --git a/insights/parsers/yum_repos_d.py b/insights/parsers/yum_repos_d.py index ac7382b52..4bb2b20db 100644 --- a/insights/parsers/yum_repos_d.py +++ b/insights/parsers/yum_repos_d.py @@ -1,5 +1,39 @@ -from .. import Parser, parser, get_active_lines, LegacyItemAccess +import re +import string + +from .. import Parser, parser, LegacyItemAccess from insights.specs import Specs +from insights.parsr import (Char, EOF, HangingString, InSet, Many, OneLineComment, Opt, + skip_none, String, WithIndent, WS) + + +header_chars = (set(string.printable) - set(string.whitespace) - set("[]")) | set(" ") +sep_chars = set(":=") +key_chars = header_chars - sep_chars - set(" ") +value_chars = set(string.printable) - set("\n\r") + +LeftEnd = WS >> Char("[") << WS +RightEnd = WS >> Char("]") << WS +Header = LeftEnd >> String(header_chars) << RightEnd +Key = WS >> String(key_chars) << WS +Sep = InSet(sep_chars) +Value = WS >> HangingString(value_chars) +KVPair = WithIndent(Key + Opt(Sep >> Value)) +Comment = WS >> (OneLineComment("#") | OneLineComment(";")).map(lambda x: None) + +Line = Comment | KVPair.map(tuple) +Sect = (Header + Many(Line).map(skip_none).map(dict)).map(tuple) +Doc = Many(Comment | Sect).map(skip_none).map(dict) +Top = Doc << WS << EOF + + +def parse_yum_repos(content): + doc = Top(content) + for k, v in doc.items(): + for special in ("baseurl", "gpgkey"): + if special in v: + v[special] = [i.strip() for i in re.split(",| ", v[special])] + return doc @parser(Specs.yum_repos_d) @@ -10,7 +44,7 @@ def get(self, key): return self.data.get(key) def parse_content(self, content): - ''' + """ Return an object contains a dict. { "rhel-source": { @@ -33,25 +67,8 @@ def parse_content(self, content): gpgcheck=1 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release_bak - ''' - repos_dict = {} - section_dict = {} - key = None - for line in get_active_lines(content): - if line.startswith('['): - section_dict = {} - repos_dict[line[1:-1]] = section_dict - elif '=' in line: - key, value = [s.strip() for s in line.split("=", 1)] - if key in ('baseurl', 'gpgkey'): - section_dict[key] = [v.strip() for v in value.split(",")] - else: - section_dict[key] = value - else: - if key and isinstance(section_dict[key], list): - section_dict[key].extend(v.strip() for v in line.split(",")) - # Otherwise ignore line if no key or we don't store multiple values - self.data = repos_dict + """ + self.data = parse_yum_repos("\n".join(content)) def __iter__(self): for repo in self.data: From 73d3cb10fb6d0ea8684d096aa4718219e4eb1700 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Wed, 19 Aug 2020 19:26:48 -0500 Subject: [PATCH 148/892] Make parsr error messages more concise. (#2715) * Make parsr error messages more concise. When parsing fails, print only the _named_ parsers in the list of attempted parser stacks. Signed-off-by: Christopher Sams * Try to satisfy RHEL6. Signed-off-by: Christopher Sams --- insights/parsr/__init__.py | 68 ++++++++++++++++---------------- insights/parsr/examples/arith.py | 8 ++-- 2 files changed, 39 insertions(+), 37 deletions(-) diff --git a/insights/parsr/__init__.py b/insights/parsr/__init__.py index bd84ae062..ecccf3790 100644 --- a/insights/parsr/__init__.py +++ b/insights/parsr/__init__.py @@ -120,16 +120,16 @@ def inner(self, pos, data, ctx): if self._debug: line = ctx.line(pos) + 1 col = ctx.col(pos) + 1 - log.debug("Trying {} at line {} col {}".format(self, line, col)) + log.debug("Trying {0} at line {1} col {2}".format(self, line, col)) try: res = func(self, pos, data, ctx) if self._debug: - log.debug("Result: {}".format(res[1])) + log.debug("Result: {0}".format(res[1])) return res except: if self._debug: ps = "-> ".join([str(p) for p in ctx.parser_stack]) - log.debug("Failed: {}".format(ps)) + log.debug("Failed: {0}".format(ps)) raise finally: ctx.parser_stack.pop() @@ -323,11 +323,13 @@ def __call__(self, data, src=None, Ctx=Context): lineno = ctx.line(ctx.pos) + 1 colno = ctx.col(ctx.pos) + 1 - msg = "At line {} column {}:" + msg = "At line {0} column {1}:" print(msg.format(lineno, colno, ctx.lines), file=err) for parsers, msg in ctx.errors: - ps = "-> ".join([str(p) for p in parsers]) - print("{}: Got {!r}. {}".format(ps, data[ctx.pos], msg), file=err) + names = " -> ".join([p.name for p in parsers if p.name]) + v = data[ctx.pos] or "EOF" + print(names, file=err) + print(" {0} Got {1!r}.".format(msg, v), file=err) err.seek(0) raise Exception(err.read()) @@ -363,12 +365,14 @@ def __init__(self, char): def process(self, pos, data, ctx): if data[pos] == self.char: return (pos + 1, self.char) - msg = "Expected {!r}.".format(self.char) + msg = "Expected {0}.".format(self.char) ctx.set(pos, msg) raise Exception(msg) def __repr__(self): - return "Char({!r})".format(self.char) + if self.name is None: + return "Char({0})".format(self.char) + return self.name class InSet(Parser): @@ -395,13 +399,13 @@ def process(self, pos, data, ctx): c = data[pos] if c in self.values: return (pos + 1, c) - msg = "Expected {}.".format(self) + msg = "Expected {0}.".format(self) ctx.set(pos, msg) raise Exception(msg) def __repr__(self): - if not self.name: - return "InSet({!r})".format(sorted(self.values)) + if self.name is None: + return "InSet({0!r})".format(sorted(self.values)) return super(InSet, self).__repr__() @@ -441,7 +445,7 @@ def process(self, pos, data, ctx): break p = data[pos] if len(results) < self.min_length: - msg = "Expected {} of {}.".format(self.min_length, sorted(self.chars)) + msg = "Expected {0} of {1}.".format(self.min_length, sorted(self.chars)) ctx.set(old, msg) raise Exception(msg) return pos, "".join(results) @@ -488,6 +492,7 @@ def __init__(self, chars, value=_NULL, ignore_case=False): self.chars = chars if not ignore_case else chars.lower() self.value = value self.ignore_case = ignore_case + self.name = "Literal{0!r}".format(self.chars) def process(self, pos, data, ctx): old = pos @@ -496,7 +501,7 @@ def process(self, pos, data, ctx): if data[pos] == c: pos += 1 else: - msg = "Expected {!r}.".format(self.chars) + msg = "Expected {0!r}.".format(self.chars) ctx.set(old, msg) raise Exception(msg) return pos, (self.chars if self.value is self._NULL else self.value) @@ -507,7 +512,7 @@ def process(self, pos, data, ctx): result.append(data[pos]) pos += 1 else: - msg = "Expected case insensitive {!r}.".format(self.chars) + msg = "Expected case insensitive {0!r}.".format(self.chars) ctx.set(old, msg) raise Exception(msg) return pos, ("".join(result) if self.value is self._NULL else self.value) @@ -689,7 +694,7 @@ def process(self, pos, data, ctx): break if len(results) < self.lower: child = self.children[0] - msg = "Expected at least {} of {}.".format(self.lower, child) + msg = "Expected at least {0} of {1}.".format(self.lower, child) ctx.set(orig, msg) raise Exception() @@ -697,7 +702,7 @@ def process(self, pos, data, ctx): def __repr__(self): if not self.name: - return "Many({}, lower={})".format(self.children[0], self.lower) + return "Many({0}, lower={1})".format(self.children[0], self.lower) return super(Many, self).__repr__() @@ -801,7 +806,7 @@ def process(self, pos, data, ctx): except Exception: return new, res else: - msg = "{} can't follow {}".format(right, left) + msg = "{0} can't follow {1}".format(right, left) ctx.set(new, msg) raise Exception() @@ -916,7 +921,7 @@ def process(self, pos, data, ctx): def __repr__(self): if not self.name: - return "Map({}({}))".format(self.func.__name__, self.children[0]) + return "Map({0}({1}))".format(self.func.__name__, self.children[0]) return super(Map, self).__repr__() @@ -1155,7 +1160,7 @@ def process(self, pos, data, ctx): e = expect.lower() if r != e: - msg = "Expected {!r}. Got {!r}.".format(expect, res) + msg = "Expected {0!r}. Got {1!r}.".format(expect, res) ctx.set(pos, msg) raise Exception(msg) return pos, res @@ -1167,13 +1172,10 @@ def _make_number(sign, int_part, frac_part): def skip_none(x): - """ - Filters ``None`` values from a list. Often used with map. - """ return [i for i in x if i is not None] -EOF = EOF() +EOF = EOF() % "EOF" EOL = InSet("\n\r") % "EOL" LineEnd = Wrapper(EOL | EOF) % "LineEnd" EQ = Char("=") @@ -1189,15 +1191,15 @@ def skip_none(x): Colon = Char(":") SemiColon = Char(";") Comma = Char(",") -AnyChar = AnyChar() -NonZeroDigit = InSet(set(string.digits) - set("0")) -Digit = InSet(string.digits) % "Digit" -Digits = String(string.digits) % "Digits" -Letter = InSet(string.ascii_letters) -Letters = String(string.ascii_letters) -WSChar = InSet(set(string.whitespace) - set("\n\r")) % "Whitespace w/o EOL" -WS = Many(InSet(string.whitespace) % "WS") % "Whitespace" -Number = (Lift(_make_number) * Opt(Char("-"), "") * Digits * Opt(Char(".") + Digits)) % "Number" +AnyChar = AnyChar() % "any character" +NonZeroDigit = InSet(set(string.digits) - set("0")) % "non zero digit" +Digit = InSet(string.digits) % "digit" +Digits = String(string.digits) % "digits" +Letter = InSet(string.ascii_letters) % "ASCII letter" +Letters = String(string.ascii_letters) % "ASCII letters" +WSChar = InSet(set(string.whitespace) - set("\n\r")) % "whitespace w/o EOL" +WS = Many(InSet(string.whitespace) % "any whitespace") +Number = (Lift(_make_number) * Opt(Char("-"), "") * Digits * Opt(Char(".") + Digits)) % "number" SingleQuotedString = Char("'") >> String(set(string.printable) - set("'"), "'") << Char("'") DoubleQuotedString = Char('"') >> String(set(string.printable) - set('"'), '"') << Char('"') -QuotedString = Wrapper(DoubleQuotedString | SingleQuotedString) % "Quoted String" +QuotedString = Wrapper(DoubleQuotedString | SingleQuotedString) % "quoted string" diff --git a/insights/parsr/examples/arith.py b/insights/parsr/examples/arith.py index 667db0260..68123c0dd 100644 --- a/insights/parsr/examples/arith.py +++ b/insights/parsr/examples/arith.py @@ -7,7 +7,7 @@ def evaluate(e): - return Top(e)[0] + return Top(e) def op(args): @@ -40,11 +40,11 @@ def op(args): # at the end. # We have to declare expr before its definition since it's used recursively. -expr = Forward() % "expr forward" +expr = Forward() # A factor is a simple number or a subexpression between parentheses -factor = WS >> (Number % "Number" | (LeftParen >> expr << RightParen)) << WS +factor = (WS >> (Number | (LeftParen >> expr << RightParen)) << WS) % "factor" # A term handles strings of multiplication and division. As written, it would # convert "1 + 2 - 3 + 4" into [1, [['+', 2], ['-', 3], ['+', 4]]]. The first @@ -59,4 +59,4 @@ def op(args): expr <= (term + Many(LowOps + term)).map(op) % "expr" # Top returns [result, None] on success and raises an Exception on failure. -Top = (expr + EOF) % "Top" +Top = expr << EOF From 59548d0f006a2891db5e9cda7dd9a1e5a38e1076 Mon Sep 17 00:00:00 2001 From: vishwanathjadhav Date: Thu, 20 Aug 2020 06:11:12 +0530 Subject: [PATCH 149/892] Added specs and test cases for "ls -ln /etc/rc.d/init.d/" (#2700) * Added specs and test cases for "ls -ln /etc/rc.d/init.d/" Signed-off-by: vishawanathjadhav * Updated specs Signed-off-by: vishawanathjadhav --- insights/parsers/tests/test_ls_etc.py | 180 ++++++++++++++++++++++++++ insights/specs/default.py | 4 +- insights/specs/insights_archive.py | 2 +- 3 files changed, 183 insertions(+), 3 deletions(-) diff --git a/insights/parsers/tests/test_ls_etc.py b/insights/parsers/tests/test_ls_etc.py index 2aa2b0c14..0dd8854b3 100644 --- a/insights/parsers/tests/test_ls_etc.py +++ b/insights/parsers/tests/test_ls_etc.py @@ -5,6 +5,183 @@ LS_ETC = """ +/etc/: +total 388 +drwxr-xr-x. 46 1000 1000 4096 Jun 11 21:01 . +drwx------. 15 1000 1000 4096 Aug 5 13:45 .. +drwxr-xr-x. 3 1000 1000 4096 Nov 12 2015 acpi +drwxr-xr-x. 2 1000 1000 4096 Apr 19 2017 alsa +drwxr-xr-x. 2 1000 1000 4096 Apr 19 07:56 alternatives +-rw-r--r--. 1 1000 1000 148 Jan 12 2016 asound.conf +drwxr-x---. 2 1000 1000 4096 Jun 11 20:31 audit +-rw-r--r--. 1 1000 1000 13641 Mar 13 2019 autofs.conf +-rw-------. 1 1000 1000 232 Mar 13 2019 autofs_ldap_auth.conf +-rw-r--r--. 1 1000 1000 667 Mar 13 2019 auto.master +-rw-r--r--. 1 1000 1000 524 Mar 13 2019 auto.misc +-rwxr-xr-x. 1 1000 1000 1260 Mar 13 2019 auto.net +-rwxr-xr-x. 1 1000 1000 687 Mar 13 2019 auto.smb +drwxr-xr-x. 2 1000 1000 4096 Mar 24 2019 cron.d +drwxr-xr-x. 2 1000 1000 4096 Oct 20 2019 cron.daily +-rw-------. 1 1000 1000 0 Jul 22 2016 cron.deny +drwxr-xr-x. 2 1000 1000 4096 Apr 19 2017 cron.hourly +drwxr-xr-x. 2 1000 1000 4096 Mar 5 2015 cron.monthly +-rw-r--r--. 1 1000 1000 457 Jun 3 2011 crontab +drwxr-xr-x. 2 1000 1000 4096 May 17 2019 cups +drwxr-xr-x. 3 1000 1000 4096 Aug 18 2019 dbus-1 +-rw-r--r--. 1 1000 1000 21214 Oct 22 2017 dnsmasq.conf +-rw-r--r--. 1 1000 1000 1331 Jun 11 19:33 fstab +drwxr-xr-x. 6 1000 1000 4096 Jul 8 2018 gdm +lrwxrwxrwx. 1 1000 1000 22 Jun 11 20:56 grub.conf -> ../boot/grub/grub.conf +-rw-r--r--. 1 1000 1000 9 Nov 2 2016 host.conf +-rw-r--r--. 1 1000 1000 1128 Mar 6 2019 hosts +-rw-r--r--. 1 1000 1000 158 Jan 12 2010 hosts.20150304.133833 +-rw-r--r--. 1 1000 1000 986 Mar 5 2015 hosts.20180326.154713.ihaider +-rw-r--r--. 1 1000 1000 370 Jan 12 2010 hosts.allow +-rw-------. 1 1000 1000 1073 Mar 6 2019 hosts.backup +-rw-r--r--. 1 1000 1000 460 Jan 12 2010 hosts.deny +-rw-r--r--. 1 1000 1000 4850 Feb 8 2017 idmapd.conf +drwxr-xr-x. 2 1000 1000 4096 Jun 11 19:32 init +-rw-r--r--. 1 1000 1000 0 Feb 12 2018 init.conf +drwxr-xr-x. 2 1000 1000 4096 Apr 19 07:55 init.d +-rw-r--r--. 1 1000 1000 884 Apr 27 2018 inittab +drwxr-xr-x. 2 1000 1000 4096 Jul 8 2018 iproute2 +-rw-r--r--. 1 1000 1000 2380 Jan 9 2020 ipsec.conf +drwx------. 3 1000 1000 4096 Mar 20 15:12 ipsec.d +drwxr-xr-x. 2 1000 1000 4096 Jun 27 2018 iscsi +drwxr-xr-x. 2 1000 1000 4096 Mar 24 2019 java +-rw-r--r--. 1 1000 1000 8120 Jul 8 2018 kdump.conf +-rw-r--r--. 1 1000 1000 4752 Jun 11 21:00 krb5.conf +-rw-r--r--. 1 1000 1000 28 Jul 25 2013 ld.so.conf +drwxr-xr-x. 2 1000 1000 4096 Jun 11 20:55 ld.so.conf.d +-rw-r--r--. 1 1000 1000 3519 May 4 2010 localtime +-rw-r--r--. 1 1000 1000 215 Nov 19 2013 logrotate.conf +-rw-r--r--. 1 1000 1000 662 Aug 29 2007 logrotate.conf.20150304.134454 +drwxr-xr-x. 2 1000 1000 4096 Apr 19 07:56 logrotate.d +-rw-r--r--. 1 1000 1000 152 Mar 5 2015 lsb-release +drwxr-xr-x. 2 1000 1000 4096 Mar 5 2015 lsb-release.d +drwxr-xr-x. 6 1000 1000 4096 Jan 20 2018 lvm +drwxr-xr-x. 2 1000 1000 4096 Jun 11 19:32 modprobe.d +-rw-r--r--. 1 1000 1000 744 Sep 18 2014 multipath.conf +drwxr-xr-x. 2 1000 1000 4096 Sep 12 2018 NetworkManager +-rw-r--r--. 1 1000 1000 58 Apr 27 2018 networks +-rw-r--r--. 1 1000 1000 3605 Mar 3 00:32 nfsmount.conf +-rw-r--r--. 1 1000 1000 1724 Mar 5 2015 nsswitch.conf +drwxr-xr-x. 2 1000 1000 4096 Jan 27 2019 ntp +-rw-r--r--. 1 1000 1000 261 Mar 23 2016 ntp.conf +drwxr-xr-x. 3 1000 1000 4096 Apr 19 2017 openldap +drwxr-xr-x. 2 1000 1000 4096 Jun 11 21:00 pam.d +drwxr-xr-x. 4 1000 1000 4096 Apr 19 2017 pki +drwxr-xr-x. 2 1000 1000 4096 Apr 19 2017 postfix +drwxr-xr-x. 2 1000 1000 4096 Apr 19 07:56 ppp +drwxr-xr-x. 10 1000 1000 4096 Jul 8 2018 rc.d +-rw-r--r--. 1 1000 1000 56 Nov 22 2019 redhat-release +-rw-r--r--. 1 1000 1000 1484 Jul 22 2014 request-key.conf +drwxr-xr-x. 2 1000 1000 4096 Apr 19 07:55 request-key.d +-rw-r--r--. 1 1000 1000 71 Mar 5 2015 resolv.conf +drwxr-xr-x. 3 1000 1000 4096 Oct 20 2019 rhsm +-rw-r--r--. 1 1000 1000 2951 Apr 19 2017 rsyslog.conf +drwxr-xr-x. 2 1000 1000 4096 Jan 17 2020 samba +drwxr-xr-x. 4 1000 1000 4096 May 29 09:35 security +drwxr-xr-x. 3 1000 1000 4096 Dec 6 2017 selinux +drwxr-xr-x. 2 1000 1000 4096 Jan 17 2020 snmp +-rw-r--r--. 1 1000 1000 256 Oct 9 2019 sos.conf +drwxr-xr-x. 2 1000 1000 4096 Jun 11 21:01 ssh +drwxr-xr-x. 6 1000 1000 4096 Jun 11 20:55 sysconfig +-rw-r--r--. 1 1000 1000 1183 Apr 17 09:32 sysctl.conf +drwxr-xr-x. 3 1000 1000 4096 Jan 17 2020 udev +drwxr-xr-x. 2 1000 1000 4096 Aug 4 16:02 vmware-tools +drwxr-xr-x. 3 1000 1000 4096 Jul 8 2018 X11 +-rw-------. 1 1000 1000 1001 Dec 16 2015 xinetd.conf +drwxr-xr-x. 2 1000 1000 4096 Apr 19 2017 xinetd.d +drwxr-xr-x. 3 1000 1000 4096 Apr 16 2017 yum +-rw-r--r--. 1 1000 1000 813 Mar 15 2016 yum.conf +drwxr-xr-x. 2 1000 1000 4096 Apr 19 07:53 yum.repos.d + +/etc/rc.d/init.d: +total 460 +drwxr-xr-x. 2 1000 1000 4096 Apr 19 07:55 . +drwxr-xr-x. 10 1000 1000 4096 Jul 8 2018 .. +-rwxr-xr-x. 1 1000 1000 1287 Jan 24 2018 abrt-ccpp +-rwxr-xr-x. 1 1000 1000 1628 Jan 24 2018 abrtd +-rwxr-xr-x. 1 1000 1000 1641 Jan 24 2018 abrt-oops +-rwxr-xr-x. 1 1000 1000 1818 Nov 12 2015 acpid +-rwxr-xr-x. 1 1000 1000 2062 Oct 18 2016 atd +-rwxr-xr-x. 1 1000 1000 3580 Dec 22 2016 auditd +-rwxr-xr-x. 1 1000 1000 4040 Mar 13 2019 autofs +-rwxr-xr-x. 1 1000 1000 11695 Aug 16 2019 b9daemon +-rwxr-xr-x. 1 1000 1000 2105 Dec 4 2014 besclient +-r-xr-xr-x. 1 1000 1000 1362 Nov 2 2017 blk-availability +-rwxr--r--. 1 1000 1000 2155 Jan 29 2019 cbdaemon +-rwxr-xr-x. 1 1000 1000 2010 Mar 5 2015 centrifydc +-rwxr-xr-x. 1 1000 1000 11864 Jun 16 2015 cpuspeed +-rwxr-xr-x. 1 1000 1000 2826 Jul 22 2016 crond +-rwxr-xr-x. 1 1000 1000 3034 Feb 27 2019 cups +-rwxr-xr-x. 1 1000 1000 1734 Sep 27 2017 dnsmasq +-rw-r--r--. 1 1000 1000 25592 Apr 27 2018 functions +-rwxr-xr-x. 1 1000 1000 1801 Jun 19 2014 haldaemon +-rwxr-xr-x. 1 1000 1000 5985 Apr 27 2018 halt +-rwxr-xr-x. 1 1000 1000 11244 Apr 16 2018 ip6tables +-rwxr-xr-x. 1 1000 1000 6548 Jan 9 2020 ipsec +-rwxr-xr-x. 1 1000 1000 11123 Apr 16 2018 iptables +-rwxr-xr-x. 1 1000 1000 1938 Feb 2 2018 irqbalance +-rwxr-xr-x. 1 1000 1000 4535 Jun 9 2017 iscsi +-rwxr-xr-x. 1 1000 1000 3990 Jun 9 2017 iscsid +-rwxr-xr-x. 1 1000 1000 21406 Apr 12 2018 kdump +-rwxr-xr-x. 1 1000 1000 652 Apr 27 2018 killall +-r-xr-xr-x. 1 1000 1000 2137 Nov 2 2017 lvm2-lvmetad +-r-xr-xr-x. 1 1000 1000 3045 Nov 2 2017 lvm2-monitor +-rwxr-xr-x. 1 1000 1000 2103 Oct 27 2015 mcelogd +-rwxr-xr-x. 1 1000 1000 2571 Jan 26 2017 mdmonitor +-rwxr-xr-x. 1 1000 1000 2200 Jul 8 2019 messagebus +-rwxr-xr-x. 1 1000 1000 2523 Sep 11 2018 multipathd +-r-x------. 1 1000 1000 22776 Mar 5 2015 netbackup +-rwxr-xr-x. 1 1000 1000 4334 Apr 27 2018 netconsole +-rwxr-xr-x. 1 1000 1000 5309 Apr 27 2018 netfs +-rwxr-xr-x. 1 1000 1000 6742 Apr 27 2018 network +-rwxr-xr-x. 1 1000 1000 2188 Jan 13 2017 NetworkManager +-rwxr-xr-x. 1 1000 1000 6889 Mar 3 00:32 nfs +-rwxr-xr-x. 1 1000 1000 3526 Mar 3 00:32 nfslock +-rwxr-xr-x. 1 1000 1000 3570 Nov 23 2016 nfs-rdma +-rwxr-xr-x. 1 1000 1000 1923 Dec 11 2018 ntpd +-rwxr-xr-x. 1 1000 1000 2043 Dec 11 2018 ntpdate +-rwxr-xr-x. 1 1000 1000 2023 Mar 17 2016 portreserve +-rwxr-xr-x. 1 1000 1000 3912 Oct 31 2016 postfix +-rwxr-xr-x. 1 1000 1000 1738 Mar 1 2016 pppoe-server +-rwxr-xr-x. 1 1000 1000 1556 Nov 2 2016 psacct +-rwxr-xr-x. 1 1000 1000 2034 Jan 7 2015 quota_nld +-rwx------. 1 1000 1000 2092 Feb 1 2018 rc.agent_user +-rwxr-xr-x. 1 1000 1000 1513 Dec 7 2016 rdisc +-rwxr-xr-x. 1 1000 1000 12856 Nov 23 2016 rdma +-rwxr-xr-x. 1 1000 1000 1822 Oct 25 2016 restorecond +-rwxr-xr-x. 1 1000 1000 2898 Mar 19 2010 rhnsd +-rwxr-xr-x. 1 1000 1000 1770 Jun 26 2019 rhsmcertd +-rwxr-xr-x. 1 1000 1000 1808 Sep 3 2015 rngd +-rwxr-xr-x. 1 1000 1000 2073 Feb 8 2018 rpcbind +-rwxr-xr-x. 1 1000 1000 2518 Mar 3 00:32 rpcgssd +-rwxr-xr-x. 1 1000 1000 2305 Mar 3 00:32 rpcidmapd +-rwxr-xr-x. 1 1000 1000 2464 Mar 3 00:32 rpcsvcgssd +-rwxr-xr-x. 1 1000 1000 2011 Dec 1 2017 rsyslog +-rwxr-xr-x. 1 1000 1000 1698 Oct 25 2016 sandbox +-rwxr-xr-x. 1 1000 1000 2056 Feb 27 2015 saslauthd +-rwxr--r--. 1 1000 1000 1642 Apr 10 2017 scx-cimd +-rwxr-xr-x. 1 1000 1000 647 Apr 27 2018 single +-rwxr-xr-x. 1 1000 1000 3002 Oct 21 2016 smartd +-rwxr-xr-x. 1 1000 1000 2162 Sep 26 2019 snmpd +-rwxr-xr-x. 1 1000 1000 1738 Sep 26 2019 snmptrapd +-rwxr-xr-x. 1 1000 1000 2472 Apr 2 2019 spice-vdagentd +-rwx------. 1 1000 1000 1028 Jun 23 2018 splunk +-rwxr-xr-x. 1 1000 1000 4621 Mar 20 2019 sshd +-rwxr-xr-x. 1 1000 1000 1144 Nov 13 2017 sysstat +-rwxr-xr-x. 1 1000 1000 2096 Mar 15 2018 tsys-notify +-rwxr-xr-x. 1 1000 1000 2265 Jun 23 2014 tsys-notify.20190401.214025. +-rwxr-xr-x. 1 1000 1000 2294 Oct 1 2019 udev-post +-rwxr-xr-x. 1 1000 1000 3176 Apr 5 2019 udsagent +lrwxrwxrwx. 1 1000 1000 30 Jun 11 19:32 vmware-tools -> ../../vmware-tools/services.sh +lrwxrwxrwx. 1 1000 1000 40 Mar 5 2015 vxpbx_exchanged -> ../../../opt/VRTSpbx/bin/vxpbx_exchanged +-rwxr-xr-x. 1 1000 1000 1598 Oct 9 2019 winbind +-rwxr-xr-x. 1 1000 1000 1914 Oct 18 2017 wpa_supplicant +-rwxr-xr-x. 1 1000 1000 3555 Dec 16 2015 xinetd + /etc/sysconfig: total 96 drwxr-xr-x. 7 0 0 4096 Jul 6 23:41 . @@ -27,7 +204,10 @@ def test_ls_etc(): list_etc = ls_etc.LsEtc(context_wrap(LS_ETC)) + assert "/etc/rc.d/init.d" in list_etc assert "/etc/sysconfig" in list_etc + assert 'vmware-tools' in list_etc.files_of("/etc/rc.d/init.d") + assert len(list_etc.files_of("/etc/rc.d/init.d")) == 80 assert len(list_etc.files_of("/etc/sysconfig")) == 3 assert list_etc.files_of("/etc/sysconfig") == ['ebtables-config', 'firewalld', 'grub'] assert list_etc.dirs_of("/etc/sysconfig") == ['.', '..', 'cbq', 'console'] diff --git a/insights/specs/default.py b/insights/specs/default.py index 50a91587d..b8e249ee5 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -377,8 +377,8 @@ def httpd_cmd(broker): ls_disk = simple_command("/bin/ls -lanR /dev/disk") etc_and_sub_dirs = sorted(["/etc", "/etc/pki/tls/private", "/etc/pki/tls/certs", "/etc/pki/ovirt-vmconsole", "/etc/nova/migration", "/etc/sysconfig", - "/etc/cloud/cloud.cfg.d"]) - ls_etc = simple_command("ls -lan {0}".format(' '.join(etc_and_sub_dirs))) + "/etc/cloud/cloud.cfg.d", "/etc/rc.d/init.d"]) + ls_etc = simple_command("/bin/ls -lan {0}".format(' '.join(etc_and_sub_dirs))) ls_lib_firmware = simple_command("/bin/ls -lanR /lib/firmware") ls_ocp_cni_openshift_sdn = simple_command("/bin/ls -l /var/lib/cni/networks/openshift-sdn") ls_origin_local_volumes_pods = simple_command("/bin/ls -l /var/lib/origin/openshift.local.volumes/pods") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 46e7b89ca..0f120868a 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -94,7 +94,7 @@ class InsightsArchiveSpecs(Specs): ls_boot = simple_file("insights_commands/ls_-lanR_.boot") ls_dev = simple_file("insights_commands/ls_-lanR_.dev") ls_disk = simple_file("insights_commands/ls_-lanR_.dev.disk") - ls_etc = simple_file("insights_commands/ls_-lan_.etc_.etc.cloud.cloud.cfg.d_.etc.nova.migration_.etc.pki.ovirt-vmconsole_.etc.pki.tls.certs_.etc.pki.tls.private_.etc.sysconfig") + ls_etc = simple_file("insights_commands/ls_-lan_.etc_.etc.cloud.cloud.cfg.d_.etc.nova.migration_.etc.pki.ovirt-vmconsole_.etc.pki.tls.certs_.etc.pki.tls.private_.etc.rc.d.init.d_.etc.sysconfig") ls_lib_firmware = simple_file("insights_commands/ls_-lanR_.lib.firmware") ls_ocp_cni_openshift_sdn = simple_file("insights_commands/ls_-l_.var.lib.cni.networks.openshift-sdn") ls_origin_local_volumes_pods = simple_file("insights_commands/ls_-l_.var.lib.origin.openshift.local.volumes.pods") From 91c1b45c215f119e3215e69384bfbe859cfb4e4c Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Mon, 24 Aug 2020 14:56:22 -0400 Subject: [PATCH 150/892] Print deep-link to HBI record if one exists (#2719) * client: add show_inventory_deep_link method This 'show_inventory_deep_link' method will query inventory for the system record and use the inventory "id" field to format a deep-link URL into cloud.redhat.com's inventory page for the host. Signed-off-by: Link Dupont * client: update deep-link URL message Signed-off-by: Link Dupont Co-authored-by: Jeremy Crafts --- insights/client/__init__.py | 18 ++++++++++++++++++ insights/client/phase/v1.py | 1 + 2 files changed, 19 insertions(+) diff --git a/insights/client/__init__.py b/insights/client/__init__.py index 301a138de..d3461963c 100644 --- a/insights/client/__init__.py +++ b/insights/client/__init__.py @@ -547,6 +547,24 @@ def show_results(self): else: raise e + def show_inventory_deep_link(self): + """ + Show a deep link to this host inventory record + """ + system = self.connection._fetch_system_by_machine_id() + if system: + if len(system) == 1: + try: + id = system[0]["id"] + logger.info("View details about this system on cloud.redhat.com:") + logger.info( + "https://cloud.redhat.com/insights/inventory/{0}".format(id) + ) + except Exception as e: + logger.error( + "Error: malformed system record: {0}: {1}".format(system, e) + ) + def _copy_soscleaner_files(self, insights_archive): ''' Helper function to copy the .csv reports generated by SOScleaner diff --git a/insights/client/phase/v1.py b/insights/client/phase/v1.py index 044f9781b..a031d8eb9 100644 --- a/insights/client/phase/v1.py +++ b/insights/client/phase/v1.py @@ -300,6 +300,7 @@ def collect_and_output(client, config): if resp: if config.to_json: print(json.dumps(resp)) + client.show_inventory_deep_link() client.delete_cached_branch_info() From 459fd3736ae65f9db418a417d90f6b7e7efa9cee Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Mon, 24 Aug 2020 15:50:37 -0400 Subject: [PATCH 151/892] Stage env autoconfig (#2701) * use staging url for autoconfig Signed-off-by: Jeremy Crafts * force cloud.redhat when using stage Signed-off-by: Jeremy Crafts * use cert.cloud.redhat.com Signed-off-by: Jeremy Crafts * force cert_verify=True for stage Signed-off-by: Jeremy Crafts * fix unit tests Signed-off-by: Jeremy Crafts * add tests for stage Signed-off-by: Jeremy Crafts * flake Signed-off-by: Jeremy Crafts --- insights/client/auto_config.py | 20 +++-- .../auto_config/test_autoconfig_urls.py | 73 +++++++++++++++++-- .../auto_config/test_branch_info_call.py | 4 +- 3 files changed, 81 insertions(+), 16 deletions(-) diff --git a/insights/client/auto_config.py b/insights/client/auto_config.py index 337b37622..46bac97d4 100644 --- a/insights/client/auto_config.py +++ b/insights/client/auto_config.py @@ -55,7 +55,7 @@ def verify_connectivity(config): return False -def set_auto_configuration(config, hostname, ca_cert, proxy, is_satellite): +def set_auto_configuration(config, hostname, ca_cert, proxy, is_satellite, is_stage): """ Set config based on discovered data """ @@ -76,7 +76,10 @@ def set_auto_configuration(config, hostname, ca_cert, proxy, is_satellite): logger.debug('Auto-configured base_url: %s', config.base_url) else: # connected directly to RHSM - config.base_url = hostname + '/r/insights' + if is_stage: + config.base_url = hostname + '/api' + else: + config.base_url = hostname + '/r/insights' logger.debug('Auto-configured base_url: %s', config.base_url) logger.debug('Not connected to Satellite, skipping branch_info') # direct connection to RHSM, skip verify_connectivity @@ -112,6 +115,7 @@ def _try_satellite6_configuration(config): key = open(rhsmCertificate.keypath(), 'r').read() rhsm = rhsmCertificate(key, cert) is_satellite = False + is_stage = False # This will throw an exception if we are not registered logger.debug('Checking if system is subscription-manager registered') @@ -152,9 +156,13 @@ def _try_satellite6_configuration(config): rhsm_hostname = 'cert-api.access.redhat.com' rhsm_ca = None elif _is_staging_rhsm(rhsm_hostname): - logger.debug('Connected to staging RHSM, using rhel-test') - rhsm_hostname = 'rhel-test.cloud.redhat.com' - rhsm_ca = False # NOT None + logger.debug('Connected to staging RHSM, using cert.cloud.stage.redhat.com') + rhsm_hostname = 'cert.cloud.stage.redhat.com' + # never use legacy upload for staging + config.legacy_upload = False + config.cert_verify = True + is_stage = True + rhsm_ca = None else: # Set the host path # 'rhsm_hostname' should really be named ~ 'rhsm_host_base_url' @@ -162,7 +170,7 @@ def _try_satellite6_configuration(config): is_satellite = True logger.debug("Trying to set auto_configuration") - set_auto_configuration(config, rhsm_hostname, rhsm_ca, proxy, is_satellite) + set_auto_configuration(config, rhsm_hostname, rhsm_ca, proxy, is_satellite, is_stage) return True except Exception as e: logger.debug(e) diff --git a/insights/tests/client/auto_config/test_autoconfig_urls.py b/insights/tests/client/auto_config/test_autoconfig_urls.py index 2c79460c4..d2ee6c32d 100644 --- a/insights/tests/client/auto_config/test_autoconfig_urls.py +++ b/insights/tests/client/auto_config/test_autoconfig_urls.py @@ -13,7 +13,7 @@ def test_rhsm_legacy_url(set_auto_configuration, initConfig): initConfig().get.side_effect = ['subscription.rhsm.redhat.com', '443', '', '', '', '', ''] config = Mock(base_url=None, upload_url=None, legacy_upload=True, insecure_connection=False) _try_satellite6_configuration(config) - set_auto_configuration.assert_called_with(config, 'cert-api.access.redhat.com', None, None, False) + set_auto_configuration.assert_called_with(config, 'cert-api.access.redhat.com', None, None, False, False) @patch("insights.client.auto_config.rhsmCertificate", Mock()) @@ -28,7 +28,41 @@ def test_rhsm_platform_url(set_auto_configuration, initConfig): config = Mock(base_url=None, upload_url=None, legacy_upload=False, insecure_connection=False) _try_satellite6_configuration(config) # set_auto_configuration.assert_called_with(config, 'cloud.redhat.com', None, None, False) - set_auto_configuration.assert_called_with(config, 'cert-api.access.redhat.com', None, None, False) + set_auto_configuration.assert_called_with(config, 'cert-api.access.redhat.com', None, None, False, False) + + +@patch("insights.client.auto_config.rhsmCertificate", Mock()) +@patch("insights.client.auto_config.open", Mock()) +@patch("insights.client.auto_config._importInitConfig") +@patch("insights.client.auto_config.set_auto_configuration") +def test_rhsm_stage_legacy_url(set_auto_configuration, initConfig): + ''' + Ensure the correct host URL is selected for auto_config on a legacy staging RHSM upload + + This will still force legacy_upload=False as there is no classic staging env, + so the result is the same as platform upload. + + ''' + initConfig().get.side_effect = ['subscription.rhsm.stage.redhat.com', '443', '', '', '', '', ''] + config = Mock(base_url=None, upload_url=None, legacy_upload=True, insecure_connection=False) + _try_satellite6_configuration(config) + # config.legacy_upload is modified in the function + config.legacy_upload = False + set_auto_configuration.assert_called_with(config, 'cert.cloud.stage.redhat.com', None, None, False, True) + + +@patch("insights.client.auto_config.rhsmCertificate", Mock()) +@patch("insights.client.auto_config.open", Mock()) +@patch("insights.client.auto_config._importInitConfig") +@patch("insights.client.auto_config.set_auto_configuration") +def test_rhsm_stage_platform_url(set_auto_configuration, initConfig): + ''' + Ensure the correct host URL is selected for auto_config on a platform staging RHSM upload + ''' + initConfig().get.side_effect = ['subscription.rhsm.stage.redhat.com', '443', '', '', '', '', ''] + config = Mock(base_url=None, upload_url=None, legacy_upload=False, insecure_connection=False) + _try_satellite6_configuration(config) + set_auto_configuration.assert_called_with(config, 'cert.cloud.stage.redhat.com', None, None, False, True) @patch("insights.client.auto_config.rhsmCertificate", Mock()) @@ -42,7 +76,7 @@ def test_sat_legacy_url(set_auto_configuration, initConfig): initConfig().get.side_effect = ['test.satellite.com', '443', '', '', '', '', 'test_cert'] config = Mock(base_url=None, upload_url=None, legacy_upload=True, insecure_connection=False) _try_satellite6_configuration(config) - set_auto_configuration.assert_called_with(config, 'test.satellite.com:443/redhat_access', 'test_cert', None, True) + set_auto_configuration.assert_called_with(config, 'test.satellite.com:443/redhat_access', 'test_cert', None, True, False) @patch("insights.client.auto_config.rhsmCertificate", Mock()) @@ -56,7 +90,7 @@ def test_sat_platform_url(set_auto_configuration, initConfig): initConfig().get.side_effect = ['test.satellite.com', '443', '', '', '', '', 'test_cert'] config = Mock(base_url=None, upload_url=None, legacy_upload=False, insecure_connection=False) _try_satellite6_configuration(config) - set_auto_configuration.assert_called_with(config, 'test.satellite.com:443/redhat_access', 'test_cert', None, True) + set_auto_configuration.assert_called_with(config, 'test.satellite.com:443/redhat_access', 'test_cert', None, True, False) @patch("insights.client.auto_config.verify_connectivity", Mock()) @@ -65,7 +99,7 @@ def test_rhsm_legacy_base_url_configured(): Ensure the correct base URL is assembled for a legacy RHSM upload ''' config = Mock(base_url=None, upload_url=None, legacy_upload=True, insecure_connection=False, proxy=None) - set_auto_configuration(config, 'cert-api.access.redhat.com', None, None, False) + set_auto_configuration(config, 'cert-api.access.redhat.com', None, None, False, False) assert config.base_url == 'cert-api.access.redhat.com/r/insights' @@ -78,7 +112,7 @@ def test_rhsm_platform_base_url_configured(): # set_auto_configuration(config, 'cloud.redhat.com', None, None, False) # assert config.base_url == 'cloud.redhat.com/api' # [CIRCUS MUSIC] - set_auto_configuration(config, 'cert-api.access.redhat.com', None, None, False) + set_auto_configuration(config, 'cert-api.access.redhat.com', None, None, False, False) # assert config.base_url == 'cert-api.access.redhat.com/r/insights/platform' assert config.base_url == 'cert-api.access.redhat.com/r/insights' @@ -89,7 +123,7 @@ def test_sat_legacy_base_url_configured(): Ensure the correct base URL is assembled for a legacy RHSM upload ''' config = Mock(base_url=None, upload_url=None, legacy_upload=True, insecure_connection=False, proxy=None) - set_auto_configuration(config, 'test.satellite.com:443/redhat_access', 'test_cert', None, True) + set_auto_configuration(config, 'test.satellite.com:443/redhat_access', 'test_cert', None, True, False) assert config.base_url == 'test.satellite.com:443/redhat_access/r/insights' @@ -99,7 +133,7 @@ def test_sat_platform_base_url_configured(): Ensure the correct base URL is assembled for a platform RHSM upload ''' config = Mock(base_url=None, upload_url=None, legacy_upload=False, insecure_connection=False, proxy=None) - set_auto_configuration(config, 'test.satellite.com:443/redhat_access', 'test_cert', None, True) + set_auto_configuration(config, 'test.satellite.com:443/redhat_access', 'test_cert', None, True, False) # assert config.base_url == 'test.satellite.com:443/redhat_access/r/insights/platform' assert config.base_url == 'test.satellite.com:443/redhat_access/r/insights' @@ -129,3 +163,26 @@ def test_platform_path_added(): config = Mock(base_url='test.satellite.com:443/redhat_access/r/insights', auto_config=False, legacy_upload=False, offline=False) try_auto_configuration(config) assert config.base_url == 'test.satellite.com:443/redhat_access/r/insights/platform' + + +@patch("insights.client.auto_config.verify_connectivity", Mock()) +def test_rhsm_stage_legacy_base_url_configured(): + ''' + Ensure the correct base URL is assembled for a legacy staging RHSM upload + + This will still force legacy_upload=False as there is no classic staging env, + so the result is the same as platform upload. + ''' + config = Mock(base_url=None, upload_url=None, legacy_upload=True, insecure_connection=False, proxy=None) + set_auto_configuration(config, 'cert.cloud.stage.redhat.com', None, None, False, True) + assert config.base_url == 'cert.cloud.stage.redhat.com/api' + + +@patch("insights.client.auto_config.verify_connectivity", Mock()) +def test_rhsm_stage_platform_base_url_configured(): + ''' + Ensure the correct base URL is assembled for a platform staging RHSM upload + ''' + config = Mock(base_url=None, upload_url=None, legacy_upload=False, insecure_connection=False, proxy=None) + set_auto_configuration(config, 'cert.cloud.stage.redhat.com', None, None, False, True) + assert config.base_url == 'cert.cloud.stage.redhat.com/api' diff --git a/insights/tests/client/auto_config/test_branch_info_call.py b/insights/tests/client/auto_config/test_branch_info_call.py index a1ba4d48d..40a3edac5 100644 --- a/insights/tests/client/auto_config/test_branch_info_call.py +++ b/insights/tests/client/auto_config/test_branch_info_call.py @@ -8,7 +8,7 @@ def test_sat_branch_info_called(connection): When is_satellite is True, means we're on sat. get_branch_info should be called. ''' config = Mock(base_url=None, upload_url=None, legacy_upload=False, insecure_connection=False) - set_auto_configuration(config, 'test.com:443/redhat_access', 'some_cert', None, True) + set_auto_configuration(config, 'test.com:443/redhat_access', 'some_cert', None, True, False) connection.return_value.get_branch_info.assert_called_once() @@ -18,5 +18,5 @@ def test_rhsm_branch_info_not_called(connection): When is_satellite is False, means we're on direct RHSM. get_branch_info should not be called. ''' config = Mock(base_url=None, upload_url=None, legacy_upload=False, insecure_connection=False) - set_auto_configuration(config, 'cert-api.access.redhat.com', None, None, False) + set_auto_configuration(config, 'cert-api.access.redhat.com', None, None, False, False) connection.return_value.get_branch_info.assert_not_called() From 9a88d1e81aaf02951f76f862f6f69c0759e3401d Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Mon, 24 Aug 2020 17:18:48 -0400 Subject: [PATCH 152/892] make sure to add new arg for sat 5 (#2726) * make sure to add new arg for sat 5 Signed-off-by: Jeremy Crafts * fix unit test Signed-off-by: Jeremy Crafts --- insights/client/auto_config.py | 2 +- .../client/auto_config/test_try_satellite5_configuration.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/insights/client/auto_config.py b/insights/client/auto_config.py index 46bac97d4..12b044f8f 100644 --- a/insights/client/auto_config.py +++ b/insights/client/auto_config.py @@ -230,7 +230,7 @@ def _try_satellite5_configuration(config): else: proxy = proxy + proxy_host_port logger.debug("RHN Proxy: %s", proxy) - set_auto_configuration(config, hostname, rhn_ca, proxy, True) + set_auto_configuration(config, hostname, rhn_ca, proxy, True, False) else: logger.debug("Could not find hostname") return False diff --git a/insights/tests/client/auto_config/test_try_satellite5_configuration.py b/insights/tests/client/auto_config/test_try_satellite5_configuration.py index de2835619..dba06a034 100644 --- a/insights/tests/client/auto_config/test_try_satellite5_configuration.py +++ b/insights/tests/client/auto_config/test_try_satellite5_configuration.py @@ -18,4 +18,5 @@ def test_set_auto_configuration(isfile_mock, read_systemid_file_mock, open_mock, "some-hostname/redhat_access", "some-certificate", None, - True) + True, + False) From f8451afe4ad8ee6b424d0f72c211813ffbda7e2e Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Mon, 24 Aug 2020 18:37:06 -0400 Subject: [PATCH 153/892] config: sort "actions" into an arg group (#2725) Signed-off-by: Link Dupont Co-authored-by: Jeremy Crafts --- insights/client/config.py | 59 +++++++++++++++++++++------------------ 1 file changed, 32 insertions(+), 27 deletions(-) diff --git a/insights/client/config.py b/insights/client/config.py index 9c03620aa..753081533 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -94,7 +94,8 @@ def _core_collect_default(): 'default': False, 'opt': ['--check-results'], 'help': "Check for insights results", - 'action': "store_true" + 'action': "store_true", + 'group': 'actions' }, 'cmd_timeout': { # non-CLI @@ -108,7 +109,8 @@ def _core_collect_default(): 'default': False, 'opt': ['--compliance'], 'help': 'Scan the system using openscap and upload the report', - 'action': 'store_true' + 'action': 'store_true', + 'group': 'actions' }, 'compressor': { 'default': 'gz', @@ -140,7 +142,8 @@ def _core_collect_default(): 'default': False, 'opt': ['--disable-schedule'], 'help': 'Disable automatic scheduling', - 'action': 'store_true' + 'action': 'store_true', + 'group': 'actions' }, 'display_name': { 'default': None, @@ -153,6 +156,7 @@ def _core_collect_default(): 'opt': ['--enable-schedule'], 'help': 'Enable automatic scheduling for collection to run', 'action': 'store_true', + 'group': 'actions' }, 'gpg': { 'default': True, @@ -197,7 +201,8 @@ def _core_collect_default(): 'default': False, 'opt': ['--list-specs'], 'help': 'Show insights-client collection specs', - 'action': 'store_true' + 'action': 'store_true', + 'group': 'actions' }, 'logging_file': { 'default': constants.default_log_file, @@ -271,7 +276,8 @@ def _core_collect_default(): 'default': False, 'opt': ['--register'], 'help': 'Register system to the Red Hat Insights Service', - 'action': 'store_true' + 'action': 'store_true', + 'group': 'actions', }, 'remove_file': { # non-CLI @@ -310,7 +316,8 @@ def _core_collect_default(): 'default': False, 'opt': ['--show-results'], 'help': "Show insights about this host", - 'action': "store_true" + 'action': "store_true", + 'group': 'actions' }, 'silent': { 'default': False, @@ -339,7 +346,8 @@ def _core_collect_default(): 'default': False, 'opt': ['--test-connection'], 'help': 'Test connectivity to Red Hat', - 'action': 'store_true' + 'action': 'store_true', + 'group': 'debug' }, 'to_json': { 'default': False, @@ -351,7 +359,8 @@ def _core_collect_default(): 'default': False, 'opt': ['--unregister'], 'help': 'Unregister system from the Red Hat Insights Service', - 'action': 'store_true' + 'action': 'store_true', + 'group': 'actions' }, 'upload_url': { # non-CLI @@ -379,14 +388,14 @@ def _core_collect_default(): 'default': False, 'opt': ['--validate'], 'help': 'Validate remove.conf and tags.yaml', - 'action': 'store_true' + 'action': 'store_true', + 'group': 'actions' }, 'verbose': { 'default': False, 'opt': ['--verbose'], 'help': "DEBUG output to stdout", - 'action': "store_true", - 'group': 'debug' + 'action': "store_true" }, 'version': { 'default': False, @@ -394,9 +403,6 @@ def _core_collect_default(): 'help': "Display version", 'action': "store_true" }, - - # platform options - # hide help messages with SUPPRESS until we're ready to make them public 'legacy_upload': { # True: upload to insights classic API # False: upload to insights platform API @@ -407,14 +413,13 @@ def _core_collect_default(): 'opt': ['--payload'], 'help': 'Use the Insights Client to upload an archive', 'action': 'store', - 'group': 'platform' + 'group': 'actions' }, 'content_type': { 'default': None, 'opt': ['--content-type'], 'help': 'Content type of the archive specified with --payload', - 'action': 'store', - 'group': 'platform' + 'action': 'store' }, 'diagnosis': { 'default': None, @@ -422,7 +427,7 @@ def _core_collect_default(): 'help': 'Retrieve a diagnosis for this system', 'const': True, 'nargs': '?', - 'group': 'platform' + 'group': 'actions' } } @@ -546,23 +551,23 @@ def _load_command_line(self, conf_only=False): self._update_dict(self._cli_opts) return parser = argparse.ArgumentParser() - debug_grp = parser.add_argument_group('Debug options') - platf_grp = parser.add_argument_group('Platform options') + arg_groups = { + "actions": parser.add_argument_group("actions"), + "debug": parser.add_argument_group("optional debug arguments") + } cli_options = dict((k, v) for k, v in DEFAULT_OPTS.items() if ( 'opt' in v)) for _, o in cli_options.items(): - group = o.pop('group', None) - if group == 'debug': - g = debug_grp - elif group == 'platform': - g = platf_grp + group_name = o.pop('group', None) + if group_name is None: + group = parser else: - g = parser + group = arg_groups[group_name] optnames = o.pop('opt') # use argparse.SUPPRESS as CLI defaults so it won't parse # options that weren't specified o['default'] = argparse.SUPPRESS - g.add_argument(*optnames, **o) + group.add_argument(*optnames, **o) options = parser.parse_args() From c54c9d250a119f6e19d4b28e7d14cd415ba07dc6 Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Tue, 25 Aug 2020 09:26:36 -0400 Subject: [PATCH 154/892] suppress help output for check-results (#2723) Signed-off-by: Link Dupont --- insights/client/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/client/config.py b/insights/client/config.py index 753081533..459553062 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -93,7 +93,7 @@ def _core_collect_default(): 'check_results': { 'default': False, 'opt': ['--check-results'], - 'help': "Check for insights results", + 'help': argparse.SUPPRESS, 'action': "store_true", 'group': 'actions' }, From 5a26a1b3be39c1da724d994f2fb670fb56f56028 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Tue, 25 Aug 2020 11:17:55 -0400 Subject: [PATCH 155/892] update uploader.json map and unit test for new specs (#2720) Signed-off-by: Jeremy Crafts --- .../collection_rules/test_map_components.py | 4 ++- insights/uploader_json_map.json | 28 ++++++++++++++++++- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/insights/tests/client/collection_rules/test_map_components.py b/insights/tests/client/collection_rules/test_map_components.py index a4e34aefa..5c541e711 100644 --- a/insights/tests/client/collection_rules/test_map_components.py +++ b/insights/tests/client/collection_rules/test_map_components.py @@ -102,7 +102,9 @@ def test_get_component_by_symbolic_name(): 'sap_host_profile', 'sched_rt_runtime_us', 'libvirtd_qemu_log', - 'mlx4_port' + 'mlx4_port', + 'dm_mod_use_blk_mq', + 'scsi_mod_use_blk_mq' ] # first, make sure our list is proper and one of these diff --git a/insights/uploader_json_map.json b/insights/uploader_json_map.json index a9219bbc5..2b5fc722e 100644 --- a/insights/uploader_json_map.json +++ b/insights/uploader_json_map.json @@ -708,6 +708,8 @@ "FullQualifiedHostname", "Hostname", "InstanceName", + "LOG Q0I=> NiIRead: P=::; L=::: recv", + "LOG Q0I=> NiPConnect2: :: connect", "SID", "SapVersionInfo", "SystemNumber" @@ -1818,6 +1820,11 @@ ], "symbolic_name": "dirsrv_errors" }, + { + "file": "/sys/module/dm_mod/parameters/use_blk_mq", + "pattern": [], + "symbolic_name": "dm_mod_use_blk_mq" + }, { "file": "/var/log/dmesg", "pattern": [ @@ -3069,6 +3076,11 @@ "pattern": [], "symbolic_name": "scsi" }, + { + "file": "/sys/module/scsi_mod/parameters/use_blk_mq", + "pattern": [], + "symbolic_name": "scsi_mod_use_blk_mq" + }, { "file": "/proc/net/sctp/assocs", "pattern": [], @@ -3575,6 +3587,13 @@ ], "symbolic_name": "gnocchi_conf" }, + { + "file": "/etc/named.conf", + "pattern": [ + "include" + ], + "symbolic_name": "named_conf" + }, { "file": "/etc/neutron/neutron.conf", "pattern": [ @@ -3955,6 +3974,13 @@ "glob": "/etc/yum.repos.d/*.repo", "symbolic_name": "yum_repos_d", "pattern": [] + }, + { + "glob": "/etc/ssh/ssh_config.d/*.conf", + "symbolic_name": "ssh_config_d", + "pattern": [ + "Include" + ] } ], "meta_specs": { @@ -3977,5 +4003,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2020-08-06T14:32:35.711025" + "version": "2020-08-12T15:22:22.060797" } \ No newline at end of file From e20b0e7fd8efdf4ceda12c1ead4204cdca3f7561 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Wed, 26 Aug 2020 11:35:40 -0400 Subject: [PATCH 156/892] check value of system fetch response (#2728) Signed-off-by: Jeremy Crafts --- insights/client/connection.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/insights/client/connection.py b/insights/client/connection.py index 9654f9a13..e09bc5545 100644 --- a/insights/client/connection.py +++ b/insights/client/connection.py @@ -748,6 +748,9 @@ def unregister(self): return self._legacy_unregister() results = self._fetch_system_by_machine_id() + if not results: + logger.info('This host could not be found.') + return False try: logger.debug("Unregistering host...") url = self.api_url + "/inventory/v1/hosts/" + results[0]['id'] From af42f79d8623e1232cac53c8ec2a45b5165bba12 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Wed, 26 Aug 2020 12:40:33 -0400 Subject: [PATCH 157/892] begin to update soscleaner to 0.4.4 (#2527) * begin to update soscleaner to 0.4.4 * 2.6 compatible imports of builtins * use ipaddress module from contrib and add compatibility chaos * remove unnecessary messaging Signed-off-by: Jeremy Crafts --- insights/client/data_collector.py | 8 +- insights/contrib/soscleaner.py | 1840 +++++++++++++---- .../tests/client/data_collector/test_done.py | 8 + 3 files changed, 1425 insertions(+), 431 deletions(-) diff --git a/insights/client/data_collector.py b/insights/client/data_collector.py index cc75768eb..264efd955 100644 --- a/insights/client/data_collector.py +++ b/insights/client/data_collector.py @@ -379,8 +379,14 @@ def __init__(self, config, tmp_dir, rm_conf, hostname_path): self.files = [] self.quiet = True self.keyword_file = None + self.keywords_file = None self.keywords = None self.no_tar_file = config.output_dir + self.loglevel = 'INFO' + self.obfuscate_macs = False + self.networks = None + self.users = None + self.users_file = None if rm_conf: try: @@ -389,7 +395,7 @@ def __init__(self, config, tmp_dir, rm_conf, hostname_path): self.keyword_file.write("\n".join(keywords).encode('utf-8')) self.keyword_file.flush() self.keyword_file.close() - self.keywords = [self.keyword_file.name] + self.keywords_file = [self.keyword_file.name] logger.debug("Attmpting keyword obfuscation") except LookupError: pass diff --git a/insights/contrib/soscleaner.py b/insights/contrib/soscleaner.py index 0c3ecfb66..1c5b24cad 100644 --- a/insights/contrib/soscleaner.py +++ b/insights/contrib/soscleaner.py @@ -14,675 +14,1650 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# File Name : sos-gov.py +# File Name : soscleaner.py # Creation Date : 10-01-2013 # Created By : Jamie Duncan -# Last Modified : Sat 13 Sep 2014 10:51:54 PM EDT -# Purpose : an sosreport scrubber +# Purpose : an sosreport and data set obfuscation tool import os +import hashlib import re import errno +import stat import sys import uuid import shutil -import struct, socket import tempfile import logging import tarfile -import six +from insights.contrib.ipaddress import IPv4Network, IPv4Address, IPv6Network, IPv6Address -from insights.util import content_type +from random import randint +from six.moves import configparser +import subprocess +import six +if six.PY3: + from builtins import str + from builtins import range + from builtins import object +else: + from __builtin__ import str + from __builtin__ import range + from __builtin__ import object + + +class SOSCleaner(object): + """ + A class to parse through an sosreport or generic dataset to begin the + cleaning and obfuscation process required in many industries. + """ -class SOSCleaner: - ''' - A class to parse through an sosreport and begin the cleaning process required in many industries - Parameters: - debug - will generate add'l output to STDOUT. defaults to no - reporting - will post progress and overall statistics to STDOUT. defaults to yes - ''' def __init__(self, quiet=False): self.name = 'soscleaner' - self.version = '0.2.2' - self.loglevel = 'INFO' #this can be overridden by the command-line app self.quiet = quiet self.domain_count = 0 - self.domains = list() - self.keywords = list() + self.domains = ['redhat.com', 'localhost.localdomain'] + self.short_domains = ['localdomain', 'localhost'] self.domainname = None self.report_dir = '/tmp' + self.version = '0.4.4' + self.false_positives = [ + 'installed-debs', + 'installed_rpms', + 'sos_commands/dpkg', + 'sos_commands/rpm', + 'sos_commands/snappy/snap_list_--all', + 'sos_commands/snappy/snap_--version' + ] + self.loglevel = 'INFO' + self.net_db = list() # Network Information database + self.ip_db = list() + if six.PY3: + self.default_net = IPv4Network('128.0.0.0/8') + else: + self.default_net = IPv4Network(unicode('128.0.0.0/8')) + self.default_netmask = self.default_net.prefixlen + # we'll have to keep track of how many networks we have so we don't have to count them each time we need to create a new one. + self.net_count = 0 + self.net_metadata = dict() - # IP obfuscation information - self.ip_db = dict() #IP database - self.start_ip = '10.230.230.1' + self.net_metadata[self.default_net.network_address.compressed] = dict() + self.net_metadata[self.default_net.network_address.compressed]['host_count'] = 0 # Hostname obfuscation information - self.hn_db = dict() #hostname database + self.hn_db = dict() # hostname database self.hostname_count = 0 self.hostname = None - # Domainname obfuscation information - self.dn_db = dict() #domainname database - self.root_domain = 'example.com' #right now this needs to be a 2nd level domain, like foo.com, example.com, domain.org, etc. + self.mac_db = dict() # mac address database + self.mac_count = 0 - # self.origin_path, self.dir_path, self.session, self.logfile, self.uuid = self._prep_environment() - # self._start_logging(self.logfile) + # Domainname obfuscation information + self.dn_db = dict() # domainname database + # right now this needs to be a 2nd level domain + # examples: foo.com, example.com, domain.org + self.root_domain = 'obfuscateddomain.com' # Keyword obfuscation information - self.keywords = None - self.kw_db = dict() #keyword database + self.keywords_file = list() + self.keywords = list() + self.kw_db = dict() # keyword database self.kw_count = 0 + # obfuscating users from the last command, per rfe #79 + self.users_file = 'sos_commands/last/lastlog_-u_1000-60000' + self.user_db = dict() + self.user_count = 0 + self.config_file = '/etc/soscleaner.conf' + self._read_early_config_options() + self.obfuscate_macs = True # issue #98 + + def _check_uid(self): + """Ensures soscleaner is running as root. This isn't required for soscleaner, + but sosreports are run as root and root tends to own the files inside the + sosreport tarball + """ + + try: # pragma: no cover + if os.getuid() != 0: + self.logger.warning( + "soscleaner must be executed by the root user in the same manner as sosreport") + self.logger.warning("soscleaner cannot continue. Exiting...") + + sys.exit(8) + + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + "UID_ERROR - unable to run SOSCleaner - you do not appear to be the root user") + + def _read_early_config_options(self): + """Reads an optional configuration file to load often-used defaults for + domains, networks, keywords, etc. If a config file is present and command-line + parameters are passed in, they will be additive, with the config file being + read in first. + """ + + try: + config = configparser.ConfigParser() + if os.path.exists(self.config_file): + config.read(self.config_file) + + # load in default config values + if config.has_option('Default', 'loglevel'): + self.loglevel = config.get('Default', 'loglevel').upper() + if config.has_option('Default', 'root_domain'): + self.root_domain = config.get('Default', 'root_domain') + if config.has_option('Default', 'quiet'): + self.quiet = config.get('Default', 'quiet') + return True + + else: + return True + + except OSError as e: # pragma: no cover + pass + + def _read_later_config_options(self): + """Reads an optional configuration file to load often-used defaults for + domains, networks, keywords, etc. If a config file is present and command-line + parameters are passed in, they will be addadtive, with the config file being + read in first. + """ + + try: + config = configparser.ConfigParser() + if os.path.exists(self.config_file): + config.read(self.config_file) + self.logger.con_out( + "Loading config file for default values - %s", self.config_file) + if config.has_section('DomainConfig'): + domains = config.get('DomainConfig', 'domains').split(',') + for d in domains: + self.domains.append(d) + self.logger.con_out( + "Loading domains from config file - %s", d) + else: + self.logger.con_out( + "No config found - DomainConfig.domains") + else: + self.logger.con_out( + "No config file section found - DomainConfig") + + if config.has_section('KeywordConfig'): + if config.has_option('KeywordConfig', 'keywords'): + keywords = config.get('KeywordConfig', 'keywords') + kw = keywords.split(',') + for k in kw: + self.keywords.append(k.strip()) + else: + self.logger.con_out( + "No config found - KeywordConfig.keywords") + if config.has_option('KeywordConfig', 'keyword_files'): + keyword_files = config.get( + 'KeywordConfig', 'keyword_files').split(',') + for f in keyword_files: + self.keywords_file.append(f) + self.logger.con_out( + "Adding keyword file from config file - %s", f) + else: + self.logger.con_out( + "No config found - KeywordConfig.keyword_files") + + # load in networks + # we need them to be in a list so we can process + # them individually + # each network should be a CIDR notation + # string, eg 192.168.1.0/24 + if config.has_section('NetworkConfig'): + if config.has_option('NetworkConfig', 'networks'): + networks = config.get('NetworkConfig', 'networks') + networks = networks.split(',') + for network in networks: + self._ip4_add_network(network) + self.logger.con_out( + "Adding network from config file - %s", network) + else: + self.logger.con_out( + "No config found - NetworkConfig.networks") + + if config.has_section('MacConfig'): + if config.has_option('MacConfig', 'obfuscate_macs'): + self.obfuscate_macs = bool( + config.get('MacConfig', 'obfuscate_macs')) + + except Exception as e: # pragma: no cover + self.logger.exception(e) + self.logger.con_out( + "READ_CONFIG_OPTIONS_ERROR - Unable to load configs from file %s - Continuing without those values", self.config_file) + + def _extract_file_data(self, filename): + """Extracts data from a file and return the data""" + try: + fh = open(filename, 'r') + data = fh.readlines() + fh.close() + + return data + + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception("FILE_OPEN_ERROR - unable to open %s", filename) + def _skip_file(self, d, files): - ''' - The function passed into shutil.copytree to ignore certain patterns and filetypes - Currently Skipped - Directories - handled by copytree - Symlinks - handled by copytree - Write-only files (stuff in /proc) + """The function passed into shutil.copytree to ignore certain + patterns and filetypes + Currently Skipped: + 1) Directories - handled by copytree + 2) Symlinks - handled by copytree + 3) Write-only files (stuff in /proc) Binaries (can't scan them) - ''' + Sockets and FIFO files. Scanning them locks up the copying. + """ + def confirm_text_file(filename): + """I know this is an epic hack, but I've seen a _ton_ + of inconsistency around different distribution's builds + of python-magic. Until it stabilizes, I'm just going to + hack around it. + """ + try: + command = "file %s" % filename + filetype = os.popen(command).read().strip( + '\n').split(':')[1].strip().lower() + if 'text' in filetype or 'json' in filetype: + return True + else: + return False + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + "CONFIRM_TEXT_FILE_ERROR - Cannot confirm file type - %s", filename) + skip_list = [] for f in files: f_full = os.path.join(d, f) if not os.path.isdir(f_full): if not os.path.islink(f_full): - #mode = oct(os.stat(f_full).st_mode)[-3:] - # executing as root makes this first if clause useless. - # i thought i'd already removed it. - jduncan - #if mode == '200' or mode == '444' or mode == '400': - # skip_list.append(f) - mime_type = content_type.from_file(f_full) - if 'text' not in mime_type and 'json' not in mime_type: + mode = os.stat(f_full).st_mode + if stat.S_ISSOCK(mode) or stat.S_ISFIFO(mode): + skip_list.append(f) + if not confirm_text_file(f_full): # if it's not a text file skip_list.append(f) return skip_list def _start_logging(self, filename): - #will get the logging instance going - loglevel_config = 'logging.%s' % self.loglevel + """Creates the logging objects and starts a logging instance.""" + # will get the logging instance going + loglevel_config = '%s' % self.loglevel - #i'd like the stdout to be under another logging name than 'con_out' - console_log_level = 25 #between INFO and WARNING + # i'd like the stdout to be under another logging name than 'con_out' + console_log_level = 25 # between INFO and WARNING quiet = self.quiet logging.addLevelName(console_log_level, "CONSOLE") - def con_out(self, message, *args, **kws): + def con_out(self, message, *args, **kws): # pragma: no cover if not quiet: self._log(console_log_level, message, args, **kws) logging.Logger.con_out = con_out logging.basicConfig(filename=filename, - level=eval(loglevel_config), - format='%(asctime)s %(name)s %(levelname)s: %(message)s', - datefmt = '%m-%d %H:%M:%S' - ) - if not self.quiet: # pragma: no cover + level=logging.getLevelName(loglevel_config), + format='%(asctime)s %(name)s %(levelname)s: %(message)s', + datefmt='%m-%d %H:%M:%S' + ) + if not self.quiet: # pragma: no cover console = logging.StreamHandler(sys.stdout) - formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s', '%m-%d %H:%M:%S') + formatter = logging.Formatter( + '%(asctime)s %(name)s %(levelname)s: %(message)s', '%m-%d %H:%M:%S') console.setFormatter(formatter) console.setLevel(console_log_level) self.logger = logging.getLogger(__name__) if not self.quiet: - self.logger.addHandler(console) # pragma: no cover + self.logger.addHandler(console) # pragma: no cover self.logger.con_out("Log File Created at %s" % filename) def _prep_environment(self): - - #we set up our various needed directory structures, etc. - ran_uuid = str(uuid.uuid4().int)[:16] # 16 digit random string - origin_path = os.path.join(self.report_dir, "soscleaner-origin-%s" % ran_uuid) # the origin dir we'll copy the files into - dir_path = os.path.join(self.report_dir, "soscleaner-%s" % ran_uuid) # the dir we will put our cleaned files into - session = os.path.join(self.report_dir, "soscleaner-%s" % ran_uuid) # short-hand for the soscleaner session to create reports, etc. - logfile = os.path.join(self.report_dir, "%s.log" % session) # the primary logfile + """Creates the needed definitions to identify the unique + soscleaner runs + It creates a 16 character UUID, then uses that to + create an origin_path to define where the temporary working + files are stored, a dir_path that is where the + obfuscated files are located, and a session value, + which is used in multiple locations to identify objects + for a given soscleaner run + """ + + # we set up our various needed directory structures, etc. + # 16 digit random string + ran_uuid = str(uuid.uuid4().int)[:16] + # Gather data into its own soscleaner session directory + self.report_dir += '/' + 'soscleaner-' + ran_uuid + os.makedirs( self.report_dir, 0o700 ) + # the origin dir we'll copy the files from + origin_path = os.path.join( + self.report_dir, "soscleaner-origin-%s" % ran_uuid) + # the dir we will put our cleaned files into + dir_path = os.path.join(self.report_dir, "soscleaner-%s" % ran_uuid) + # short-hand for the soscleaner session to create reports, etc. + session = os.path.join(self.report_dir, "soscleaner-%s" % ran_uuid) + # the primary logfile + logfile = os.path.join(self.report_dir, "%s.log" % session) return origin_path, dir_path, session, logfile, ran_uuid def _extract_sosreport(self, path): + """Extracts an sosreport, accounting for all common compression algorithms + as well as working with uncompressed directories and single files. + """ + def get_compression_sig(filename): + try: + """I know this is an epic hack, but I've seen a _ton_ of inconsistency around different + distribution's builds of python-magic. Until it stabilizes, I'm just going to hack around it. + """ + + command = "file %s" % filename + compression_type = os.popen(command).read().strip( + '\n').split(':')[1].strip().lower() + return compression_type + + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + "GET_COMPRESSION_SIG_ERROR: Unable to verify compression sig - %s", filename) - self.logger.con_out("Beginning SOSReport Extraction") - compression_sig = content_type.from_file(path).lower() - if 'directory' in compression_sig: - self.logger.info('%s appears to be a %s - continuing', path, compression_sig) - # Clear out origin_path as we don't have one - self.origin_path = None - return path - - elif 'compressed data' in compression_sig: - if compression_sig == 'xz compressed data': - #This is a hack to account for the fact that the tarfile library doesn't - #handle lzma (XZ) compression until version 3.3 beta + try: + self.logger.con_out("Beginning SOSReport Extraction") + if os.path.isdir(path): + self.logger.info( + '%s appears to be a directory, no extraction required - continuing', path) + # Clear out origin_path as we don't have one + self.origin_path = None + return path + else: try: - self.logger.info('Data Source Appears To Be LZMA Encrypted Data - decompressing into %s', self.origin_path) - self.logger.info('LZMA Hack - Creating %s', self.origin_path) - os.system('mkdir %s' % self.origin_path) - os.system('tar -xJf %s -C %s' % (path, self.origin_path)) - return_path = os.path.join(self.origin_path, os.listdir(self.origin_path)[0]) + compression_sig = get_compression_sig(path) + if compression_sig == 'xz compressed data': + try: + self.logger.info( + 'Data Source Appears To Be LZMA Encrypted Data - decompressing into %s', self.origin_path) + self.logger.info( + 'LZMA Hack - Creating %s', self.origin_path) + os.makedirs( self.origin_path, 0o755 ) + subprocess.Popen( + ["tar", "-xJf", path, "-C", self.origin_path]).wait() + + return_path = os.path.join( + self.origin_path, os.listdir(self.origin_path)[0]) + + return return_path + + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + 'DecompressionError, Unable to decrypt LZMA compressed file %s', path) + + # the tarfile module handles other compression types. + # so we can just use that + else: + p = tarfile.open(path, 'r') + self.logger.info( + 'Data Source Appears To Be %s - decompressing into %s', compression_sig, self.origin_path) - return return_path + p.extractall(self.origin_path) + return_path = os.path.join( + self.origin_path, os.path.commonprefix(p.getnames())) - except Exception as e: # pragma: no cover + return return_path + + except Exception as e: # pragma: no cover self.logger.exception(e) - raise Exception('DecompressionError, Unable to decrypt LZMA compressed file %s', path) + raise Exception( + "DeCompressionError: Unable to De-Compress %s into %s", path, self.origin_path) + + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + 'CompressionError: Unable To Determine Compression Type') + + ################################ + # User Functions # + ################################ + + def _process_user_option(self, users): + """Adds users specified from the command line to the user_db object""" + + try: + for username in users: + new_user = self._user2db(username) + self.logger.con_out( + "Adding user from the command line - %s > %s", username, new_user) + + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + "PROCESS_USER_OPTION_ERROR: unable to add user to user database") + + def _sub_username(self, line): + """Accepts a line from a file as input and replaces all occurrences of the users in the + user_db with the obfuscated values. + Returns the obfuscated line. + """ + + try: + if self.user_count > 0: # we have obfuscated keywords to work with + for user, o_user in list(self.user_db.items()): + line = re.sub(r'\b%s\b(?i)' % user, o_user, line) + self.logger.debug( + "Obfuscating User - %s > %s", user, o_user) + + return line + + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + 'SUB_USERNAME_ERROR: Unable to obfuscate usernames on line - %s', line) + + def _create_random_username(self): + """Generates a random, unique obfuscated user ID and returns it""" + + def _randomizer(): + return "obfuscateduser%s" % randint(1,1000000) + + test_user = _randomizer() + if test_user in list(self.user_db.values()): + while test_user in list(self.user_db.values()): + self.logger.debug("Duplicate Obfuscated Hostname. Retrying - %s", test_user) + test_user = _randomizer() + if test_user not in list(self.user_db.values()): + return test_user + else: + return test_user + + def _user2db(self, username): + """Takes a username and adds it to the user_db with an obfuscated partner. + If the user hasn't been encountered before, it will add it to the database + and return the obfuscated partner entry. + If the user is already in the database it will return the obfuscated username + """ + try: + o_user = self.user_db.get(username) + if o_user is None: # no match, so we need to add to the database + # new username, so we increment the counter to get the user's obfuscated name + self.user_count += 1 + o_user = self._create_random_username() + self.logger.info( + "Adding new obfuscated user: %s > %s", username, o_user) + self.user_db[username] = o_user + + return o_user + + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + "USER_TO_DB_ERROR: unable to add user %s to database", username) + + def _process_users_file(self): + """Uses the 'last' output from an sosreport and generate a list of usernames to obfuscate in log files, etc. + By default it looks for the last file from an sosreport. But it can process any line-delimited list of users + From RFE #79 + """ + + # Users and entries that we don't want to obfuscate that could show up in lastlog + ignored_users = ('Username', + 'ubuntu' + ) + + # we're not calling this function from an option on the cli, we're just running it as part of __init__ + + try: + users_file = os.path.join(self.dir_path, self.users_file) + # check to make sure users_file is there and we can access it + if os.path.exists(users_file): + self.logger.con_out( + "Processing output from user file - %s", users_file) + data = self._extract_file_data(users_file) + sorted_users = list() + + # first, we get out the unique user entries + for line in data: + if len(line) > 1: # there are some blank lines at the end of the last output + sorted_users.append(line.split()[0]) + + # then we add them to the obfuscation database + for user in sorted_users: + if user not in ignored_users: + self.logger.con_out("Obfuscating user %s", user) + self._user2db(user) + + return True else: - p = tarfile.open(path, 'r') + self.logger.con_out( + "Unable to locate user file - %s", users_file) + self.logger.con_out("Continuing without processing users file") - self.logger.info('Data Source Appears To Be %s - decompressing into %s', compression_sig, self.origin_path) - try: - p.extractall(self.origin_path) - return_path = os.path.join(self.origin_path, os.path.commonprefix(p.getnames())) + return False - return return_path + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + "PROCESS_USERS_FILE_ERROR: unable to add file - %s", self.users_file) - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception("DeCompressionError: Unable to De-Compress %s into %s", path, self.origin_path) - else: # pragma: no cover - raise Exception('CompressionError: Unable To Determine Compression Type') + ################################ + # IP Obfuscation Functions # + ################################ def _sub_ip(self, line): - ''' - This will substitute an obfuscated IP for each instance of a given IP in a file - This is called in the self._clean_line function, along with user _sub_* functions to scrub a given - line in a file. - It scans a given line and if an IP exists, it obfuscates the IP using _ip2db and returns the altered line - ''' + """Substitutes a found IP with its corresponding obfuscated partner. + This is called in the self._clean_line function, along with user _sub_* + functions to scrub a given line in a file. It scans a given line and if + an IP exists, it obfuscates the IP using _ip4_2_db and returns the altered + line + """ try: pattern = r"(((\b25[0-5]|\b2[0-4][0-9]|\b1[0-9][0-9]|\b[1-9][0-9]|\b[1-9]))(\.(\b25[0-5]|\b2[0-4][0-9]|\b1[0-9][0-9]|\b[1-9][0-9]|\b[0-9])){3})" ips = [each[0] for each in re.findall(pattern, line)] if len(ips) > 0: for ip in ips: - new_ip = self._ip2db(ip) + new_ip = self._ip4_2_db(ip) self.logger.debug("Obfuscating IP - %s > %s", ip, new_ip) line = line.replace(ip, new_ip) return line - except Exception as e: # pragma: no cover + + except Exception as e: # pragma: no cover self.logger.exception(e) - raise Exception('SubIPError: Unable to Substitute IP Address - %s', ip) + raise Exception("SUB_IP_ERROR: Unable to obfuscate IP address") - def _get_disclaimer(self): # pragma: no cover - #prints a disclaimer that this isn't an excuse for manual or any other sort of data verification + ############################# + # Formatting Functions # + ############################# - self.logger.con_out("%s version %s" % (self.name, self.version)) - self.logger.warning("%s is a tool to help obfuscate sensitive information from an existing sosreport." % self.name) - self.logger.warning("Please review the content before passing it along to any third party.") + def _get_version(self): + """Prints out soscleaner version""" - def _create_ip_report(self): - ''' - this will take the obfuscated ip and hostname databases and output csv files - ''' + self.logger.con_out( + "SOSCleaner version: %s" % self.version) + + def _get_disclaimer(self): + """Prints out a disclaimer at the beginning of each soscleaner run""" + + self.logger.con_out( + "%s is a tool to help obfuscate sensitive information from an existing sosreport." % self.name) # pragma: no cover + self.logger.con_out( + "Please review the content before passing it along to any third party.") # pragma: no cover + + ########################### + # Reporting Functions # + ########################### + def _create_mac_report(self): + """Creates a report of MAC addresses and their obfuscated counterparts""" try: - ip_report_name = os.path.join(self.report_dir, "%s-ip.csv" % self.session) - self.logger.con_out('Creating IP Report - %s', ip_report_name) - ip_report = open(ip_report_name, 'wt') - ip_report.write('Obfuscated IP,Original IP\n') - for k,v in self.ip_db.items(): - ip_report.write('%s,%s\n' %(self._int2ip(k),self._int2ip(v))) - ip_report.close() - self.logger.info('Completed IP Report') + mac_report_name = os.path.join( + self.report_dir, "%s-mac.csv" % self.session) + self.logger.con_out( + 'Creating MAC address Report - %s', mac_report_name) + mac_report = open(mac_report_name, 'w') + mac_report.write('Original MAC Address,Obfuscated MAC Address\n') + if len(self.mac_db) > 0: + for k, v in list(self.mac_db.items()): + mac_report.write('%s,%s\n' % (k, v)) + else: + mac_report.write('None,None\n') + mac_report.close() + os.chmod(mac_report_name, 0o600) + self.logger.info('Completed MAC Address Report') - self.ip_report = ip_report_name - except Exception as e: # pragma: no cover + self.mac_report = mac_report_name + + except Exception as e: # pragma no cover + self.logger.exception(e) + raise Exception( + 'CREATE_MAC_REPORT_ERROR: Unable to create report - %s', mac_report_name) + + def _create_kw_report(self): + """Creates a report of keywords and their obfuscated counterparts""" + try: + kw_report_name = os.path.join( + self.report_dir, "%s-keyword.csv" % self.session) + self.logger.con_out( + 'Creating keyword address Report - %s', kw_report_name) + kw_report = open(kw_report_name, 'w') + kw_report.write('Original Keyword,Obfuscated Keyword\n') + if self.kw_count > 0: + for keyword, o_keyword in list(self.kw_db.items()): + kw_report.write('%s,%s\n' % (keyword, o_keyword)) + else: + kw_report.write('None,None\n') + kw_report.close() + os.chmod(kw_report_name, 0o600) + self.logger.info('Completed Keyword Report') + + self.kw_report = kw_report_name + + except Exception as e: # pragma: no cover self.logger.exception(e) - raise Exception('CreateReport Error: Error Creating IP Report') + raise Exception( + 'CREATE_KW_REPORT_ERROR: unable to create report - $%s', kw_report_name) + + def _create_un_report(self): + """Creates a report of usernames and their obfuscated counterparts. + """ + try: + un_report_name = os.path.join( + self.report_dir, "%s-username.csv" % self.session) + self.logger.con_out( + 'Creating Username Report - %s', un_report_name) + un_report = open(un_report_name, 'w') + un_report.write('Original Username,Obfuscated Username\n') + for k, v in list(self.user_db.items()): + un_report.write('%s,%s\n' % (k, v)) + un_report.close() + os.chmod(un_report_name, 0o600) + + self.un_report = un_report_name + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + 'CREATE_USERNAME_REPORT_ERROR: Unable to create report - %s', un_report_name) def _create_hn_report(self): + """Creates a report of hostnames and their obfuscated counterparts""" try: - hn_report_name = os.path.join(self.report_dir, "%s-hostname.csv" % self.session) - self.logger.con_out('Creating Hostname Report - %s', hn_report_name) - hn_report = open(hn_report_name, 'wt') - hn_report.write('Obfuscated Hostname,Original Hostname\n') + hn_report_name = os.path.join( + self.report_dir, "%s-hostname.csv" % self.session) + self.logger.con_out( + 'Creating Hostname Report - %s', hn_report_name) + hn_report = open(hn_report_name, 'w') + hn_report.write('Original Hostname,Obfuscated Hostname\n') if self.hostname_count > 0: - for k,v in self.hn_db.items(): - hn_report.write('%s,%s\n' %(k,v)) + for k, v in list(self.hn_db.items()): + hn_report.write('%s,%s\n' % (k, v)) else: hn_report.write('None,None\n') + os.chmod(hn_report_name, 0o600) hn_report.close() self.logger.info('Completed Hostname Report') self.hn_report = hn_report_name - except Exception as e: #pragma: no cover + except Exception as e: # pragma: no cover self.logger.exception(e) - raise Exception('CreateReport Error: Error Creating Hostname Report') + raise Exception( + 'CREATE_HN_REPORT_ERROR: Unable to create report - %s', hn_report_name) def _create_dn_report(self): + """Creates a report of domain names and their obfuscated conterparts""" try: - dn_report_name = os.path.join(self.report_dir, "%s-dn.csv" % self.session) - self.logger.con_out('Creating Domainname Report - %s', dn_report_name) - dn_report = open(dn_report_name, 'wt') - dn_report.write('Obfuscated Domain,Original Domain\n') + dn_report_name = os.path.join( + self.report_dir, "%s-dn.csv" % self.session) + self.logger.con_out( + 'Creating Domainname Report - %s', dn_report_name) + dn_report = open(dn_report_name, 'w') + dn_report.write('Original Domain,Obfuscated Domain\n') if self.domain_count > 0: - for k,v in self.dn_db.items(): - dn_report.write('%s,%s\n' %(k,v)) + for domain, o_domain in list(self.dn_db.items()): + dn_report.write('%s,%s\n' % (domain, o_domain)) else: dn_report.write('None,None\n') dn_report.close() + os.chmod(dn_report_name, 0o600) self.logger.info('Completed Domainname Report') self.dn_report = dn_report_name - except Exception as e: # pragma: no cover + except Exception as e: # pragma: no cover self.logger.exception(e) - raise Exception('CreateReport Error: Error Creating Domainname Report') + raise Exception( + 'CREATE_DN_REPORT_ERROR: Unable to create report - %s', dn_report_name) - def _create_reports(self): # pragma: no cover + def _create_ip_report(self): + """Creates a report of IP addresses and their obfuscated counterparts""" + try: + ip_report_name = os.path.join( + self.report_dir, "%s-ip.csv" % self.session) + self.logger.con_out('Creating IP Report - %s', ip_report_name) + ip_report = open(ip_report_name, 'w') + ip_report.write('Original IP,Obfuscated IP\n') + for i in self.ip_db: + ip_report.write('%s,%s\n' % (i[0], i[1])) + ip_report.close() + os.chmod(ip_report_name, 0o600) + self.logger.info('Completed IP Report') + + self.ip_report = ip_report_name - self._create_ip_report() - self._create_hn_report() - self._create_dn_report() + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + 'CREATE_IP_REPORT_ERROR: Unable to create report - %s', ip_report_name) + + def _create_sos_report(self): + """Creates a report of original sosreport tarball and its obfuscated counterpart""" + try: + sos_report_name = os.path.join( + self.report_dir, "%s-sosreport.csv" % self.session) + self.logger.con_out('Creating sosreport Report - %s', sos_report_name) + sos_report = open(sos_report_name, 'w') + sos_report.write('Original Sosreport,Obfuscated Sosreport\n') + sos_report.write('%s,%s.tar.gz\n' % (self.sosreport_filename, self.session)) + sos_report.close() + os.chmod(sos_report_name, 0o600) + self.logger.info('Completed Sosreport Report') + + self.sos_report = sos_report_name + + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + 'CREATE_SOS_REPORT_ERROR: Unable to create report - %s', sos_report_name) + + def _create_reports(self): + """Creates the reports at the end of an soscleaner run""" + + self._create_ip_report() # pragma: no cover + self._create_hn_report() # pragma: no cover + self._create_dn_report() # pragma: no cover + # self._create_un_report() # pragma: no cover + # self._create_mac_report() # pragma: no cover + # self._create_kw_report() # pragma: no cover + # self._create_sos_report() # pragma: no cover + # os.chmod(self.logfile, 0o600) + + ############################# + # MAC Address functions # + ############################# + + def _sub_mac(self, line): + """Finds potential MAC addresses and obfuscates them in a single line.""" + try: + pattern = re.compile(r'(?:[0-9a-fA-F]:?){12}') + macs = re.findall(pattern, line) + if len(macs) > 0: + for mac in macs: + new_mac = self._mac2db(mac) + self.logger.debug( + "Obfuscating MAC address - %s > %s", mac, new_mac) + line = line.replace(mac, new_mac) + return line + + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception("SUB_MAC_ERROR: Unable to obfuscate MAC address") + + def _mac2db(self, mac): + """Adds an MAC address to the MAC database and returns the obfuscated + entry, or returns the existing obfuscated MAC entry. + """ + try: + o_mac = self.mac_db.get(mac) + if o_mac is None: # no match: we have to add it to the db + # using this lambda to create a valid randomized mac address is + # documented at https://www.commandlinefu.com/commands/view/7245/generate-random-valid-mac-addresses + # many thanks for putting that little thought together + o_mac = ':'.join(['%02x' % x for x in [randint(0, 255) for x in list(range(6))]]) + self.logger.debug( + "Creating new obfuscated MAC address: %s > %s", mac, o_mac) + self.mac_db[mac] = o_mac + + return o_mac + + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + "MAC2DB_ERROR: unable to add MAC to database - %s", mac) + + ########################### + # Hostname functions # + ########################### + + def _hn2db(self, host): + """Adds a hostname for a hostname for an included domain or return an existing entry. + It is called by _add_hostnames to verify if the domain is in an included + domain for obfuscation, and the entry to hn_db, and return the obfuscated value + """ + try: + o_host = self.hn_db.get(host) + if o_host is None: # no database match + split_host = host.split('.') + self.hostname_count += 1 # increment the counter to get the host ID number + if len(split_host) == 1: # we have a non-fqdn - typically the host short name + o_host = "obfuscatedhost%s" % self.hostname_count + self.hn_db[host] = o_host + elif len(split_host) == 2: # we have a root domain, a la example.com + o_host = self._dn2db(host) + else: # a 3rd level domain or higher + domain = '.'.join(split_host[1:]) + o_domain = self._dn2db(domain) + o_host = "host%s.%s" % (self.hostname_count, o_domain) + self.hn_db[host] = o_host + + if o_host is not None: + return o_host + + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + "HN2DB_ERROR: Unable to add hostname to database - %s", host) + + def _get_hostname(self, hostname='hostname'): + """Gets the hostname from an sosreport. Used at the beginning of an + SOSCleaner run to set self.hostname and self.domainname + """ + + try: + hostfile = os.path.join(self.dir_path, hostname) + fh = open(hostfile, 'r') + name_list = fh.readline().rstrip().split('.') + hostname = name_list[0] + if len(name_list) > 1: + domainname = '.'.join(name_list[1:len(name_list)]) + else: + domainname = None + + return hostname, domainname + + except IOError as e: # the 'hostname' file doesn't exist or isn't readable for some reason + if not self.quiet: # pragma: no cover + self.logger.exception(e) + + hostname = None + domainname = None + + return hostname, domainname + + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + 'GET_HOSTNAME_ERROR: Cannot resolve hostname from %s') % hostfile + + def _validate_domainname(self, hostname): + """Takes a potential domain name and validates it against the domain database + (self.dn_db). It takes care to look for higher-level subdomains for the + domains entered at the beginning of the sosreport run. Logic behind this definition of a valid domain: + A domain can be a total of 253 characters, per RFC 1035, RFC 1123 and RFC 2181 + Each label can be a maximum of 63 characters + With 4th, 5th, 6th level domains being more the norm today, I wanted to take as + broad an interpretation of a domain as I could. SO: + separated by a word boundary + the lower domains can be a max of 190 characters, not including dots + any valid domain character is allowed (alpha, digit, dash) + the top level domain can be up to 63 characters, and not contain numbers + With a 200 character limit to the lower domains, technically an 11th level domain + would not be obfuscated. As for right now, I'm OK with that. Please file an issue + in Github if you are not. + Summary: + Valid domain is defined as + . + """ + def _eval_domains(root_domain): + """Looks for matches of higher-level domains against the existing + domains in self.dn_db. Returns True if it's a match, and false if + no match is found. This is used to determine if we should add a new + subdomain to self.dn_db. + """ + for known_domain in list(self.dn_db.keys()): + if known_domain in root_domain: + self.logger.debug( + "evaluated domain found in database %s > %s", root_domain, known_domain) + return True + return False + + domainname = hostname.split('.') + domain_depth = len(domainname) + self.logger.debug("validating domain %s - depth: %s", + hostname, domain_depth) + # The first clause checks for potential domains that are 3rd level + # domains or higher. If the base domain (everything except the + # first octet) is already in the database, it adds the host. If + # the root domain is in the database, but this is a new higher- + # level domain, it adds the higher-level domain to the database + # before moving forward with obfuscating the full hostname. + found_domain = False + if domain_depth > 2: + # everything after the hostname is the domain we need to check + root_domain = '.'.join(domainname[1:domain_depth]) + self.logger.debug("validating domain - %s", root_domain) + # We try a straigh match first + o_domain = self._dn2db(root_domain) + if o_domain is not None: # we got a straight match + found_domain = True + # If we don't get a straight match, then we look to see if + # it is a subdomain of an already obfuscated domain. + else: + add_domain = _eval_domains(root_domain) + if add_domain: + self.logger.debug( + "Found new subdomain of %s - %s", root_domain, domainname) + found_domain = True + o_domain = self._dn2db(root_domain, add_domain=True) + + elif domain_depth == 2: + o_domain = self.dn_db.get(hostname) + if o_domain: + self.logger.debug( + "Domain found in domain database - %s", domainname) + found_domain = True + + return found_domain def _sub_hostname(self, line): - ''' - This will replace the exact hostname and all instances of the domain name with the obfuscated alternatives. - Example: - ''' - try: - for od,d in self.dn_db.items(): - #regex = re.compile(r'\w*\.%s' % d) - regex = re.compile(r'(?![\W\-\:\ \.])[a-zA-Z0-9\-\_\.]*\.%s' % d) - hostnames = [each for each in regex.findall(line)] - if len(hostnames) > 0: - for hn in hostnames: - new_hn = self._hn2db(hn) - self.logger.debug("Obfuscating FQDN - %s > %s", hn, new_hn) - line = line.replace(hn, new_hn) - if self.hostname: - line = line.replace(self.hostname, self._hn2db(self.hostname)) #catch any non-fqdn instances of the system hostname + """Replaces the exact hostname and all instances of the domain name with + their obfuscated alternatives. Also handles auto-creation of subdomains + for known domains. Example: if redhat.com is in the domain database, + access.redhat.com and registry.redhat.com will both be obfuscated as + unique domain entries. + """ + # self.logger.debug("Processing Line - %s", line) + potential_hostnames = re.findall( + r'\b[a-zA-Z0-9-\.]{1,200}\.[a-zA-Z]{1,63}\b', line) + try: + for hostname in potential_hostnames: + hostname = hostname.lower() + self.logger.debug( + "Verifying potential hostname - %s", hostname) + domain_found = self._validate_domainname(hostname) + + # If we have a potential match that is a host on a domain that + # we care about, we regex it out of the line. + if domain_found: + o_hostname = self._hn2db(hostname) + line = re.sub(r'\b%s\b(?i)' % hostname, o_hostname, line) + + # Now that the hard work is done, we account for the handful of + # single-word "short domains" that we care about. We start with + # the hostname. + if self.hostname is not None: + o_host = self._hn2db(self.hostname) + line = re.sub(r'\b%s\b(?i)' % self.hostname, o_host, line) + + # There are a handful of short domains that we want to obfuscate + # Things like 'localhost' and 'localdomain' + # They are kept in self.short_domains and added to the domain + # database. They won't match the potential_hostnames regex because + # they're only 1 word, so we handle them here. + for domain in self.short_domains: + o_host = self._hn2db(domain) + line = re.sub(r'\b%s\b(?i)' % domain, o_host, line) return line - except Exception as e: # pragma: no cover + + except Exception as e: # pragma: no cover self.logger.exception(e) - raise Exception('SubHostnameError: Unable to Substitute Hostname/Domainname') + raise Exception( + "SUB_HOSTNAME_ERROR: Unable to obfuscate hostnames on line - %s", line) + + ############################ + # Filesystem functions # + ############################ + + def _clean_line(self, line, filename): + """Returns a line with obfuscations for all covered data types: + hostname, ip, user, keyword, and MAC address. The filename is passed in + so we can know whether or not to obfuscate IP addresses. IP obfuscation + is excluding in a few files where RPM version numbers cause false + positives and are known to not contain IP address information. + """ + + try: + process_obfuscation = True + # We want to skip the files in self.false_positives for all + # obfuscation but keywords because they don't have any sensible + # info in them and they generate a lot of false positives that + # much up the obfuscation and confuse people when they're working + # with the files + # Issues #60 & #101 + for false_positive in self.false_positives: + if false_positive in filename: + process_obfuscation = False + new_line = self._sub_keywords(line) # Keyword Substitution + if self.obfuscate_macs is True: + new_line = self._sub_mac(new_line) # MAC address obfuscation + if process_obfuscation: + new_line = self._sub_hostname( + new_line) # Hostname substitution + new_line = self._sub_ip(new_line) # IP substitution + new_line = self._sub_username( + new_line) # Username substitution + + return new_line + + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception("CLEAN_LINE_ERROR: Cannot Clean Line - %s" % line) + + def _clean_file(self, f): + """Takes a given file path, scrubs it, and saves a new copy of + the obfuscated file in the same location + """ + if os.path.exists(f) and not os.path.islink(f): + tmp_file = tempfile.TemporaryFile() + try: + data = self._extract_file_data(f) + if len(data) > 0: # if the file isn't empty: + for l in data: + # self.logger.debug("Obfuscating Line - %s", l) + new_l = self._clean_line(l, f) + if six.PY3: + tmp_file.write(new_l.encode('utf-8')) + else: + tmp_file.write(new_l) + + tmp_file.seek(0) + + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + "CLEAN_FILE_ERROR: Unable to obfuscate file - %s" % f) + + try: + if len(data) > 0: + new_fh = open(f, 'wb') + for line in tmp_file: + new_fh.write(line) + new_fh.close() + except OSError as e: + # If there's an IO error (disk is full) + if e.errno == errno.EIO: # pragma: no cover + self.logger.exception(e) + self.logger.con_out( + "CLEAN_FILE_ERROR: Not enough disk space to complete report obfusation") + self.logger.con_out( + "CLEAN_FILE_ERROR: Removing partially obfuscated report and other artifacts") + self.logger.con_out( + "CLEAN_FILE_ERROR: Please remedy the disk pressure and re-run soscleaner") + self._clean_up() + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + "CLEAN_FILE_ERROR: Unable to write obfuscated file - %s" % f) + + finally: + tmp_file.close() + + def _add_extra_files(self, files): + """Incorporates extra files are to be analyzed with an sosreport by + adding them to the origin path to be analyzed + """ + + try: + for f in files: + self.logger.con_out( + "adding additional file for analysis: %s" % f) + fname = os.path.basename(f) + f_new = os.path.join(self.dir_path, fname) + shutil.copyfile(f, f_new) + except IOError as e: + self.logger.con_out( + "ExtraFileError: %s is not readable or does not exist. Skipping File" % f) + self.logger.exception(e) + pass + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + "ADD_EXTRA_FILES_ERROR: Unable to process extra file - %s" % f) + + def _walk_report(self, folder): + """Returns a dictonary of dictionaries in the format {directory_name:[file1,file2,filex]}""" + + dir_list = {} + try: + for dirName, subdirList, fileList in os.walk(folder): + x = [] + for fname in fileList: + x.append(fname) + dir_list[dirName] = x + + return dir_list + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + "WALK_REPORT_ERROR: Unable to create file list in folder - %s", folder) + + def _file_list(self, folder): + """returns a list of file names in an sosreport directory""" + try: + rtn = [] + walk = self._walk_report(folder) + for key, val in list(walk.items()): + for v in val: + x = os.path.join(key, v) + rtn.append(x) + + # a count of the files we'll have in the final cleaned sosreport + self.file_count = len(rtn) + return rtn + + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + "FILE_LIST_ERROR: Unable to create file list from directory - %s", folder) def _make_dest_env(self): - ''' - This will create the folder in self.report_dir (defaults to /tmp) to store the sanitized files and populate it using shutil - These are the files that will be scrubbed - ''' + """Creates the folder in self.report_dir (defaults to /tmp) to store + sanitized files and populates it using shutil. These are the files that + will be scrubbed. + """ try: - shutil.copytree(self.report, self.dir_path, symlinks=True, ignore=self._skip_file) + shutil.copytree(self.report, self.dir_path, + symlinks=True, ignore=self._skip_file) - except Exception as e: #pragma: no cover + except Exception as e: # pragma: no cover self.logger.exception(e) - raise Exception("DestinationEnvironment Error: Cannot Create Destination Environment") + raise Exception( + "MAKE_DESTINATION_ENV_ERROR: Cannot Create Destination Environment") def _create_archive(self): - '''This will create a tar.gz compressed archive of the scrubbed directory''' + """Creates a tar.gz compressed archive of the scrubbed directory""" try: - self.archive_path = os.path.join(self.report_dir, "%s.tar.gz" % self.session) - self.logger.con_out('Creating SOSCleaner Archive - %s', self.archive_path) + self.archive_path = os.path.join( + self.report_dir, "%s.tar.gz" % self.session) + self.logger.con_out( + 'Creating SOSCleaner Archive - %s', self.archive_path) t = tarfile.open(self.archive_path, 'w:gz') for dirpath, dirnames, filenames in os.walk(self.dir_path): for f in filenames: f_full = os.path.join(dirpath, f) - f_archive = f_full.replace(self.report_dir,'') - self.logger.debug('adding %s to %s archive', f_archive, self.archive_path) + f_archive = f_full.replace(self.report_dir, '') + self.logger.debug('adding %s to %s archive', + f_archive, self.archive_path) t.add(f_full, arcname=f_archive) - except Exception as e: #pragma: no cover + os.chmod(self.archive_path, 0o600) # per #90 + except Exception as e: # pragma: no cover self.logger.exception(e) - raise Exception('CreateArchiveError: Unable to create Archive') + raise Exception( + 'CREATE_ARCHIVE_ERROR: Unable to create archive - %s', self.archive_path) self._clean_up() self.logger.info('Archiving Complete') - self.logger.con_out('SOSCleaner Complete') if not self.quiet: # pragma: no cover - t.add(self.logfile, arcname=self.logfile.replace(self.report_dir,'')) + t.add(self.logfile, arcname=self.logfile.replace(self.report_dir, '')) t.close() + def soscleaner_checksum(self): + """check MD5 against soscleaner tarball""" + soscleaner_archive = self.session + ".tar.gz" + checksum = hashlib.md5(open(soscleaner_archive, 'rb').read()).hexdigest() + + soscleaner_archive_hash = soscleaner_archive + ".md5" + fp = open(soscleaner_archive_hash, "w") + fp.write(checksum + "\n") + self.logger.con_out('md5 checksum is: %s' % checksum) + fp.close() + + def finalmsg(self): + """Final message at the end of the soscleaner run""" + self.logger.con_out('SOSCleaner Complete') + def _clean_up(self): - '''This will clean up origin directories, etc.''' + """Cleans up origin directories and other soscleaner processing artifacts""" self.logger.info('Beginning Clean Up Process') try: if self.origin_path: - self.logger.info('Removing Origin Directory - %s', self.origin_path) + self.logger.info( + 'Removing Origin Directory - %s', self.origin_path) shutil.rmtree(self.origin_path) self.logger.info('Removing Working Directory - %s', self.dir_path) shutil.rmtree(self.dir_path) self.logger.info('Clean Up Process Complete') - except Exception as e: #pragma: no cover + + except Exception as e: # pragma: no cover self.logger.exception(e) + raise Exception( + "CLEAN_UP_ERROR: Unable to complete clean up process") - def _process_hosts_file(self): - # this will process the hosts file more thoroughly to try and capture as many server short names/aliases as possible - # could lead to false positives if people use dumb things for server aliases, like 'file' or 'server' or other common terms - # this may be an option that can be enabled... --hosts or similar? + ######################## + # Domain Functions # + ######################## + def _dn2db(self, domain, add_domain=False): + """Adds a domain to dn_db and returns the obfuscated value.""" try: - if os.path.isfile(os.path.join(self.dir_path, 'etc/hosts')): - with open(os.path.join(self.dir_path, 'etc/hosts')) as f: - self.logger.con_out("Processing hosts file for better obfuscation coverage") - data = f.readlines() - for line in data: - x = re.split('\ |\t', line.rstrip()) #chunk up the line, delimiting with spaces and tabs (both used in hosts files) - # we run through the rest of the items in a given line, ignoring the IP to be picked up by the normal methods - # skipping over the 'localhost' and 'localdomain' entries - for item in x[1:len(x)]: - if len(item) > 0: - if all(['localhost' not in item, 'localdomain' not in item]): - new_host = self._hn2db(item) - self.logger.debug("Added to hostname database through hosts file processing - %s > %s", item, new_host) - else: # pragma: no cover - self.logger.con_out("Unable to Process Hosts File. Hosts File Processing Disabled") + o_domain = self.dn_db.get(domain) + if o_domain is None: + # Try converting it all to lowercase + if add_domain: + self.domain_count += 1 + o_domain = "ofuscateddomain%s.com" % self.domain_count + self.dn_db[domain] = o_domain + self.logger.con_out( + "Adding new obfuscated domain - %s > %s", domain, o_domain) + + if o_domain: + return o_domain + else: + return None - except Exception as e: #pragma: no cover + except Exception as e: # pragma: no cover self.logger.exception(e) + raise Exception( + "DN2DB_ERROR: Unable to retrieve obfuscated domain - %s", domain) def _domains2db(self): - #adds any additional domainnames to the domain database to be searched for + """Adds domains to the domain database""" try: - #we will add the root domain for an FQDN as well. + # First we'll grab the domain for the sosreport and obfuscate it to the base root_domain + # value, which defaults to "obfuscateddomain.com" if self.domainname is not None: - self.dn_db[self.root_domain] = self.domainname - self.logger.con_out("Obfuscated Domain Created - %s" % self.root_domain) + self._dn2db(self.domainname, add_domain=True) - split_root_d = self.root_domain.split('.') + for dom in self.domains: + self._dn2db(dom, add_domain=True) - for d in self.domains: - if d not in self.dn_db.values(): #no duplicates - d_number = len(self.dn_db) - o_domain = "%s%s.%s" % (split_root_d[0], d_number, split_root_d[1]) - self.dn_db[o_domain] = d - self.logger.con_out("Obfuscated Domain Created - %s" % o_domain) + for dom in self.short_domains: + self._dn2db(dom, add_domain=True) - self.domain_count = len(self.dn_db) return True - except Exception as e: # pragma: no cover + except Exception as e: # pragma: no cover self.logger.exception(e) + raise Exception("DOMAINS2DB_ERROR: Unable to process domains") + + ######################### + # Keyword functions # + ######################### def _keywords2db(self): - #processes optional keywords to add to be obfuscated + """Adds keywords to the keyword database""" try: - if self.keywords: # value is set to None by default - k_count = 0 - for f in self.keywords: + if len(self.keywords_file) > 0: + for f in self.keywords_file: if os.path.isfile(f): - with open(f, 'rt') as klist: + with open(f, 'r') as klist: for keyword in klist.readlines(): - o_kw = "keyword%s" % k_count - self.kw_db[keyword.rstrip()] = o_kw - self.logger.con_out("Added Obfuscated Keyword - %s", o_kw) - k_count += 1 - self.logger.con_out("Added Keyword Contents from file - %s", f) + keyword = keyword.rstrip() + if len(keyword) > 1: + if self.kw_db.get(keyword) is None: # no duplicates + o_kw = "obfuscatedkeyword%s" % self.kw_count + self.kw_db[keyword] = o_kw + self.logger.con_out( + "Added Obfuscated Keyword from Keywords File - %s > %s", keyword, o_kw) + self.kw_count += 1 + else: + self.logger.con_out( + "Unable to add Obfuscated Keyword - %s", keyword) + self.logger.con_out( + "Added Keyword Contents from file - %s", f) else: - self.logger.con_out("%s does not seem to be a file. Not adding any keywords from" % f) - - self.kw_count = k_count - - except Exception as e: # pragma: no cover + self.logger.con_out( + "%s does not seem to be a file. Not adding any keywords from" % f) + if len(self.keywords) > 0: + for kw in self.keywords: + if len(kw) > 1: # no single digit keywords + o_kw = "obfuscatedkeyword%s" % self.kw_count + self.kw_db[kw] = o_kw + self.logger.con_out( + "Added obfuscated keyword - %s > %s", kw, o_kw) + self.kw_count += 1 + + except Exception as e: # pragma: no cover self.logger.exception(e) - - def _kw2db(self, keyword): - #returns the obfuscated value for a keyword - - return self.kw_db[keyword] + raise Exception( + "KEYWORDS2DB_ERROR: Unable to process keyword - %s", keyword) def _sub_keywords(self, line): - # this will substitute out any keyword entries on a given line - #try: - if self.kw_count > 0: # we have obfuscated keywords to work with - for k in self.kw_db.keys(): - if k in line: - line = line.replace(k, self._kw2db(k)) - self.logger.debug("Obfuscating Keyword - %s > %s", k, self._kw2db(k)) + """Accepts a line from a file in an sosreport and obfuscates any known keyword entries on the line.""" + try: + if self.kw_count > 0: # we have obfuscated keywords to work with + for keyword, o_keyword in list(self.kw_db.items()): + if keyword in line: + # insights-client modification to match partial + # words like old soscleaner. original expression + # is r'\b%s\b' + line = re.sub(r'%s' % keyword, o_keyword, line) + self.logger.debug( + "Obfuscating Keyword - %s > %s", keyword, o_keyword) - return line + return line - '''except Exception, e: # pragma: no cover + except Exception as e: # pragma: no cover self.logger.exception(e) - raise Exception('SubKeywordError: Unable to Substitute Keywords')''' + raise Exception( + 'SUB_KEYWORDS_ERROR: Unable to obfuscate keywords on line - %s', line) - def _get_hostname(self, hostname='hostname'): - #gets the hostname and stores hostname/domainname so they can be filtered out later + ######################### + # Network Functions # + ######################### + def _process_route_file(self): + """Parses the output from the route command in an sosreport to populate + self.net_db with networks to obfuscate + """ try: - hostfile = os.path.join(self.dir_path, hostname) - fh = open(hostfile, 'rt') - name_list = fh.readline().rstrip().split('.') - hostname = name_list[0] - if len(name_list) > 1: - domainname = '.'.join(name_list[1:len(name_list)]) + route_path = os.path.join(self.dir_path, 'route') + if os.path.exists(route_path): + fh = open(route_path, 'r') + self.logger.info( + "Found route file. Auto-adding routed networks.") + # skip the first 2 header lines and get down to the data + data = fh.readlines()[2:] + for line in data: + x = line.split() + if not x[0] == '0.0.0.0': # skip the default gateway + net_string = "%s/%s" % (x[0], x[2]) + self._ip4_add_network(net_string) + self.logger.debug( + "Network Added by Auto-Route Processing.") + fh.close() else: - domainname = None - - return hostname, domainname - - except IOError as e: #the 'hostname' file doesn't exist or isn't readable for some reason - self.logger.warning("Unable to determine system hostname!!!") - self.logger.warning("Automatic Hostname Data Obfuscation Will Not Occur!!!") - self.logger.warning("To Remedy This Situation please enable the 'general' plugin when running sosreport") - self.logger.warning("and/or be sure the 'hostname' symlink exists in the root directory of you sosreport") - if not self.quiet: - self.logger.exception(e) + self.logger.info( + "No route file found. Unable to auto-add routed networks for this system.") + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + "PROCESS_ROUTE_FILE_ERROR: Cannot process file - %s", route_path) - hostname = None - domainname = None + def _ip4_new_obfuscate_net(self, netmask): + """Returns a new IPv4 Network Object to be used as an obfuscated network.""" + try: + # this is going to get hacky + # this will return an IPv4Address object that is 129.0.0.0 + start_point = self.default_net.broadcast + 1 + x = start_point.compressed.split('.') # break it apart + # calculate the new first octet + new_octet = str(int(x[0]) + self.net_count) + + self.net_count += 1 + # a new string to create the new obfuscated network object + new_net_string = "%s.0.0.0/%s" % (new_octet, netmask) + if six.PY3: + retval = IPv4Network(new_net_string) + else: + retval = IPv4Network(unicode(new_net_string)) - return hostname, domainname + return retval - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception('GetHostname Error: Cannot resolve hostname from %s') % hostfile - - def _ip2int(self, ipstr): - #converts a dotted decimal IP address into an integer that can be incremented - integer = struct.unpack('!I', socket.inet_aton(ipstr))[0] - - return integer - - def _int2ip(self, num): - #converts an integer stored in the IP database into a dotted decimal IP - ip = socket.inet_ntoa(struct.pack('!I', num)) - - return ip - - def _ip2db(self, ip): - ''' - adds an IP address to the IP database and returns the obfuscated entry, or returns the - existing obfuscated IP entry - FORMAT: - {$obfuscated_ip: $original_ip,} - ''' - - ip_num = self._ip2int(ip) - ip_found = False - db = self.ip_db - for k,v in db.items(): - if v == ip_num: - ret_ip = self._int2ip(k) - ip_found = True - if ip_found: #the entry already existed - return ret_ip - else: #the entry did not already exist - if len(self.ip_db) > 0: - new_ip = max(db.keys()) + 1 + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + "IP4_NEW_OBFUSCATE_NET_ERROR: Unable to create new network - %s", new_net_string) + + def _ip4_parse_network(self, network): + """Takes the input values and return useable objects from them. + Generates an IPv4Network object for the original network, and a string + value for the subnet mask that is used to create the obfuscated network + """ + try: + if six.PY3: + net = IPv4Network(network) else: - new_ip = self._ip2int(self.start_ip) - db[new_ip] = ip_num - - return self._int2ip(new_ip) - - def _hn2db(self, hn): - ''' - This will add a hostname for a hostname for an included domain or return an existing entry - ''' - db = self.hn_db - hn_found = False - for k,v in db.items(): - if v == hn: #the hostname is in the database - ret_hn = k - hn_found = True - if hn_found: - return ret_hn - else: - self.hostname_count += 1 #we have a new hostname, so we increment the counter to get the host ID number - o_domain = self.root_domain - for od,d in self.dn_db.items(): - if d in hn: - o_domain = od - new_hn = "host%s.%s" % (self.hostname_count, o_domain) - self.hn_db[new_hn] = hn + net = IPv4Network(unicode(network)) + subnet = str(net.prefixlen) - return new_hn + return net, subnet - def _walk_report(self, folder): - '''returns a dictonary of dictionaries in the format {directory_name:[file1,file2,filex]}''' + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + "IP4_PARSE_NETWORK_ERROR: Unable to parse network - %s", network) - dir_list = {} + def _ip4_network_in_db(self, network): + """Returns True if a network already exists in self.net_db. Is used in + self._ip4_add_network to ensure we don't get duplicate network entries + """ try: - for dirName, subdirList, fileList in os.walk(folder): - x = [] - for fname in fileList: - x.append(fname) - dir_list[dirName] = x + if any(network in x for x in self.net_db): + return True + return False - return dir_list - except Exception as e: # pragma: no cover + except Exception as e: # pragma: no cover self.logger.exception(e) - raise Exception("WalkReport Error: Unable to Walk Report") - - def _file_list(self, folder): - '''returns a list of file names in an sosreport directory''' - rtn = [] - walk = self._walk_report(folder) - for key,val in walk.items(): - for v in val: - x=os.path.join(key,v) - rtn.append(x) - - self.file_count = len(rtn) #a count of the files we'll have in the final cleaned sosreport, for reporting - return rtn - - def _clean_line(self, l): - '''this will return a line with obfuscations for all possible variables, hostname, ip, etc.''' - - new_line = self._sub_ip(l) # IP substitution - new_line = self._sub_hostname(new_line) # Hostname substitution - new_line = self._sub_keywords(new_line) # Keyword Substitution - - return new_line + raise Exception( + "IP4_NETWORK_IN_DB_ERROR: Unable to test for network in network database - %s", network) + + def _add_loopback_network(self): + """ + Adds an entry into the needed databases to keep loopback addresses + somewhat sane. They will be obfuscated, but within the loopback numberspace. + So more of a shuffler than anything else. + """ + try: + self.logger.info( + "Adding Entry to Network Metadata Database - 127.0.0.0") + self.net_metadata['127.0.0.0'] = dict() + self.net_metadata['127.0.0.0']['host_count'] = 0 - def _clean_file(self, f): - '''this will take a given file path, scrub it accordingly, and save a new copy of the file - in the same location''' - if os.path.exists(f) and not os.path.islink(f): - tmp_file = tempfile.TemporaryFile(mode='w+b') - try: - fh = open(f, 'r') - data = fh.readlines() - fh.close() - if len(data) > 0: #if the file isn't empty: - for l in data: - new_l = self._clean_line(l) - if six.PY3: - tmp_file.write(new_l.encode('utf-8')) - else: - tmp_file.write(new_l) + if six.PY3: + lb_net = IPv4Network('127.0.0.0/8') + else: + lb_net = IPv4Network(unicode('127.0.0.0/8')) + loopback_entry = (lb_net, lb_net) + self.net_db.append(loopback_entry) + self.logger.con_out("Creating Loopback Network Entry") - tmp_file.seek(0) + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + "ADD_LOOPBACK_NETWORK_ERROR: Unable to create obfuscated loopback network") + + def _ip4_add_network(self, network): + """Takes any networks specified via the command-line parameters as well + as the routes file (if present) and creates obfuscated networks for each + of them. This is called in self._process_route_file as well as in + self.clean_report + """ + try: + net, netmask = self._ip4_parse_network(network) + + # make sure we don't have duplicates + if not self._ip4_network_in_db(net): + new_net = self._ip4_new_obfuscate_net( + netmask) # the obfuscated network + new_entry = (net, new_net) + + self.net_db.append(new_entry) + self.logger.con_out( + "Created New Obfuscated Network - %s" % new_net.with_prefixlen) + + self.net_metadata[new_net.network_address.compressed] = dict() + self.logger.info( + "Adding Entry to Network Metadata Database - %s" % new_net.with_prefixlen) + self.net_metadata[new_net.network_address.compressed]['host_count'] = 0 + else: + self.logger.info( + "Network already exists in database. Not obfuscating. - %s" % network) - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception("CleanFile Error: Cannot Open File For Reading - %s" % f) + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + "IP4_ADD_NETWORK_ERROR: Unable to add obfuscated network - %s", network) + + def _ip4_find_network(self, ip): + """Takes an IP address and returns back the obfuscated network it belongs to + This is called by the _ip4_2_db function + The value returned is a string that is the network address for the given network - IPv4Network.network.compressed + This can be used to create a new obfuscated IP address for this value + """ + try: + if six.PY3: + ip = IPv4Address(ip) # re-cast as an IPv4 object + else: + ip = IPv4Address(unicode(ip)) # re-cast as an IPv4 object + network = self.default_net.network_address + for net in self.net_db: + if ip in net[0]: + # we have a match! We'll return the proper obfuscated network + network = net[1].network_address - try: - if len(data) > 0: - new_fh = open(f, 'wb') - for line in tmp_file: - new_fh.write(line) - new_fh.close() - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception("CleanFile Error: Cannot Write to New File - %s" % f) + return network - finally: - tmp_file.close() + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + "IP4_FIND_NETWORK_ERROR: Unable to determin obfuscated network for IP address - %s", ip) + + def _ip4_in_db(self, ip): + """Returns True if an IP is found the the obfuscation database. Returns + False otherwise The ip parameter is an IPv4Address object This function + is called from within _ip4_2_db + """ + try: + if any(ip in x for x in self.ip_db): + return True + return False - def _add_extra_files(self, files): - '''if extra files are to be analyzed with an sosreport, this will add them to the origin path to be analyzed''' + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception( + "IP4_IN_DB_ERROR: Unable to verify if IP is in database - %s", ip) + def _ip4_2_db(self, orig_ip): + """Adds an IP address to the IP database and returns the obfuscated + entry, or returns the existing obfuscated IP entry. + """ try: - for f in files: - self.logger.con_out("adding additional file for analysis: %s" % f) - fname = os.path.basename(f) - f_new = os.path.join(self.dir_path, fname) - shutil.copyfile(f,f_new) - except IOError as e: - self.logger.con_out("ExtraFileError: %s is not readable or does not exist. Skipping File" % f) - self.logger.exception(e) - pass + if self._ip4_in_db(orig_ip): # the IP exists already in the database + # http://stackoverflow.com/a/18114565/263834 + data = dict(self.ip_db) + # we'll pull the existing obfuscated IP from the database + obf_ip = data[orig_ip] + + return obf_ip.compressed + + else: # it's a new database, so we have to create a new obfuscated IP for the proper network and a new ip_db entry + # get the network information + net = self._ip4_find_network(orig_ip) + self.net_metadata[net.compressed]['host_count'] += 1 + # take the network and increment the number of hosts to get to the next available IP + if six.PY3: + obf_ip = IPv4Address( + net) + self.net_metadata[net.compressed]['host_count'] + else: + obf_ip = IPv4Address( + unicode(net)) + self.net_metadata[net.compressed]['host_count'] + self.ip_db.append((orig_ip, obf_ip)) + + return obf_ip.compressed + except Exception as e: # pragma: no cover self.logger.exception(e) - raise Exception("ExtraFileError: Unable to Process Extra File - %s" % f) + raise Exception( + "IP4_2_DB_ERROR: unable to add IP to database - %s", orig_ip) def _clean_files_only(self, files): - ''' if a user only wants to process one or more specific files, instead of a full sosreport ''' + """Processes one or more specific files, instead of a full sosreport.""" try: if not (os.path.exists(self.origin_path)): - self.logger.info("Creating Origin Path - %s" % self.origin_path) - os.makedirs(self.origin_path) # create the origin_path directory + self.logger.info("Creating Origin Path - %s" % + self.origin_path) + # create the origin_path directory + os.makedirs(self.origin_path) if not (os.path.exists(self.dir_path)): - self.logger.info("Creating Directory Path - %s" % self.dir_path) + self.logger.info("Creating Directory Path - %s" % + self.dir_path) os.makedirs(self.dir_path) # create the dir_path directory self._add_extra_files(files) - except OSError as e: # pragma: no cover + except OSError as e: # pragma: no cover + # If the file already exists if e.errno == errno.EEXIST: pass + # If there's an IO error (disk is full) + elif e.errno == errno.EIO: # pragma: no cover + self.logger.exception(e) + self.logger.con_out( + "CLEAN_FILE_ERROR: Not enough disk space to complete report obfusation") + self.logger.con_out( + "CLEAN_FILE_ERROR: Remove partially obfuscated report and other artifacts") + self.logger.con_out( + "CLEAN_FILE_ERROR: Please remedy the disk pressure and re-run soscleaner") + self._clean_up() else: # pragma: no cover self.logger.exception(e) - raise e + raise Exception( + "CLEAN_FILES_ONLY_ERROR: Unable to clean file from dataset - OSError") - except Exception as e: # pragma: no cover + except Exception as e: # pragma: no cover self.logger.exception(e) - raise Exception("CleanFilesOnlyError: unable to process") + raise Exception( + "CLEAN_FILES_ONLY_ERROR: Unable toclean files from dataset") - def clean_report(self, options, sosreport): # pragma: no cover - '''this is the primary function, to put everything together and analyze an sosreport''' + def _process_report_dir(self, report_dir): # pragma: no cover + """Overrides the default (/tmp) location for the soscleaner run""" + try: # pragma: no cover + if os.path.isdir(report_dir): + self.report_dir = report_dir + + return True - if options.report_dir: # override the default location for artifacts (/tmp) - if os.path.isdir(options.report_dir): - self.report_dir = options.report_dir - self.origin_path, self.dir_path, self.session, self.logfile, self.uuid = self._prep_environment() - self._start_logging(self.logfile) - self._get_disclaimer() + except Exception as e: + self.logger.exception(e) + raise Exception( + "PROCESS_REPORT_DIR_ERROR: Unable to set report output directory") + + def _start_soscleaner(self): # pragma no cover + """Sets up the data structures and filesystem attributes to get soscleaner going properly""" + try: + self.origin_path, self.dir_path, self.session, self.logfile, self.uuid = self._prep_environment() + self._start_logging(self.logfile) + self._check_uid() # make sure it's soscleaner is running as root + self._get_version() + self._get_disclaimer() + except Exception as e: + self.logger.exception(e) + raise Exception( + "START_SOSCLEANER_ERROR: Unable to create needed artifacts to run soscleaner") + + def clean_report(self, options, sosreport): # pragma: no cover + """The primary function, to put everything together and analyze an sosreport.""" + if options.report_dir: + self._process_report_dir(options.report_dir) + self.loglevel = options.loglevel + self._start_soscleaner() + self._read_later_config_options() + if options.obfuscate_macs: + self.obfuscate_macs = options.obfuscate_macs + self._add_loopback_network() + if options.networks: # we have defined networks + self.networks = options.networks + for network in options.networks: + self._ip4_add_network(network) if options.domains: - self.domains = options.domains + self.domains.extend(options.domains) + if options.keywords_file: + self.keywords_file = options.keywords_file if options.keywords: self.keywords = options.keywords - self._keywords2db() + self._keywords2db() + if options.users: # users from the command line with the -u option + self._process_user_option(options.users) + if options.users_file: + self.users_file = options.users_file if not sosreport: if not options.files: - raise Exception("Error: You must supply either an sosreport and/or files to process") - - self.logger.con_out("No sosreport supplied. Only processing specific files") + raise Exception( + "Error: You must supply either an sosreport and/or files to process") + self.logger.con_out( + "No sosreport supplied. Only processing specific files") + if not options.networks: + self.logger.con_out( + "No sosreport supplied and no networks specified. All IP addresses will be obfuscated into the same default subnet") self._clean_files_only(options.files) else: # we DO have an sosreport to analyze self.report = self._extract_sosreport(sosreport) self._make_dest_env() # create the working directory if options.hostname_path: - self.hostname, self.domainname = self._get_hostname(options.hostname_path) + self.hostname, self.domainname = self._get_hostname( + options.hostname_path) else: self.hostname, self.domainname = self._get_hostname() - + self._process_route_file() if options.files: self._add_extra_files(options.files) - if self.hostname: # if we have a hostname that's not a None type - self.hn_db['host0'] = self.hostname # we'll prime the hostname pump to clear out a ton of useless logic later - - self._process_hosts_file() # we'll take a dig through the hosts file and make sure it is as scrubbed as possible + # we'll prime the hostname pump to clear out a ton of useless logic later + self.hn_db['host0'] = self.hostname self._domains2db() files = self._file_list(self.dir_path) - self.logger.con_out("IP Obfuscation Start Address - %s", self.start_ip) + self._process_users_file() + self.logger.con_out( + "IP Obfuscation Network Created - %s", self.default_net.compressed) self.logger.con_out("*** SOSCleaner Processing ***") self.logger.info("Working Directory - %s", self.dir_path) for f in files: @@ -690,8 +1665,10 @@ def clean_report(self, options, sosreport): # pragma: no cover self._clean_file(f) self.logger.con_out("*** SOSCleaner Statistics ***") self.logger.con_out("IP Addresses Obfuscated - %s", len(self.ip_db)) - self.logger.con_out("Hostnames Obfuscated - %s" , len(self.hn_db)) - self.logger.con_out("Domains Obfuscated - %s" , len(self.dn_db)) + self.logger.con_out("Hostnames Obfuscated - %s", len(self.hn_db)) + self.logger.con_out("Domains Obfuscated - %s", len(self.dn_db)) + self.logger.con_out("Users Obfuscated - %s", self.user_count) + self.logger.con_out("Keywords Obfuscated - %s", self.kw_count) self.logger.con_out("Total Files Analyzed - %s", self.file_count) self.logger.con_out("*** SOSCleaner Artifacts ***") self._create_reports() @@ -707,6 +1684,9 @@ def clean_report(self, options, sosreport): # pragma: no cover # end insights-client modifications self._create_archive() + self.soscleaner_checksum() + self.finalmsg() + return_data = [self.archive_path, self.logfile, self.ip_report] if self.hostname: diff --git a/insights/tests/client/data_collector/test_done.py b/insights/tests/client/data_collector/test_done.py index 2b2e677d8..54afb1d03 100644 --- a/insights/tests/client/data_collector/test_done.py +++ b/insights/tests/client/data_collector/test_done.py @@ -76,6 +76,10 @@ def test_soscleaner_additions(isdir_, clean_opts): for returning before creating the archive ''' clean_opts.hostname_path = 'test' + clean_opts.obfuscate_macs = False + clean_opts.networks = None + clean_opts.users = None + clean_opts.users_file = None # test that soscleaner returns as normal by default, # then that it returns None when no_tar_file is not None @@ -87,6 +91,7 @@ def test_soscleaner_additions(isdir_, clean_opts): s.file_count = Mock() s._prep_environment = Mock(return_value=(None, None, None, None, None)) s._start_logging = Mock() + s._check_uid = Mock() s._get_disclaimer = Mock() s._keywords2db = Mock() s._clean_files_only = Mock() @@ -98,6 +103,9 @@ def test_soscleaner_additions(isdir_, clean_opts): s._domains2db = Mock() s._file_list = Mock(return_value=[]) s._clean_file = Mock() + s._process_route_file = Mock() + s._process_users_file = Mock() + s.soscleaner_checksum = Mock() s._create_reports = Mock(side_effect=setattr(s, 'logfile', 'test')) s._create_reports = Mock(side_effect=setattr(s, 'ip_report', 'test')) s._create_archive = Mock(side_effect=setattr(s, 'archive_path', 'test')) From 0a5572f1ec909238cbfdf243af60b11e7e3f4163 Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Wed, 26 Aug 2020 15:04:29 -0400 Subject: [PATCH 158/892] connection: pull insights-client version directly (#2724) * connection: pull insights-client version directly Signed-off-by: Link Dupont * connection: move import insights_client... ... into a try:except: block. This way it will fail gracefully during unit tests. Signed-off-by: Link Dupont Co-authored-by: Jeremy Crafts --- insights/client/connection.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/insights/client/connection.py b/insights/client/connection.py index e09bc5545..e888460c7 100644 --- a/insights/client/connection.py +++ b/insights/client/connection.py @@ -181,10 +181,11 @@ def user_agent(self): else: core_version = "Core %s" % package_info["VERSION"] - client_version = "insights-client" - pkg = pkg_resources.working_set.find(pkg_resources.Requirement.parse(client_version)) - if pkg is not None: - client_version = "%s/%s" % (pkg.project_name, pkg.version) + try: + from insights_client import constants as insights_client_constants + client_version = "insights-client/{0}".format(insights_client_constants.InsightsConstants.version) + except ImportError: + client_version = "insights-client" if os.path.isfile(constants.ppidfile): with open(constants.ppidfile, 'r') as f: From 152fc0b1f133e25260fb2c22c305567c7c0f9d9d Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Wed, 26 Aug 2020 15:44:48 -0400 Subject: [PATCH 159/892] write selected egg release channel to the archive (#2683) * write selected egg release channel to the archive Signed-off-by: Jeremy Crafts * flake Signed-off-by: Jeremy Crafts --- insights/client/__init__.py | 8 ++ insights/client/constants.py | 1 + insights/client/core_collector.py | 1 + insights/client/data_collector.py | 17 +++++ .../data_collector/test_write_metadata.py | 73 +++++++++++++++++++ insights/tests/client/init/test_fetch.py | 40 +++++++++- 6 files changed, 139 insertions(+), 1 deletion(-) create mode 100644 insights/tests/client/data_collector/test_write_metadata.py diff --git a/insights/client/__init__.py b/insights/client/__init__.py index d3461963c..08630f5db 100644 --- a/insights/client/__init__.py +++ b/insights/client/__init__.py @@ -130,6 +130,14 @@ def fetch(self, force=False): # guess the URLs based on what legacy setting is egg_release = self.get_egg_url() + + try: + # write the release path to temp so we can collect it + # in the archive + write_to_disk(constants.egg_release_file, content=egg_release) + except (OSError, IOError) as e: + logger.debug('Could not write egg release file: %s', str(e)) + egg_url = self.config.egg_path egg_gpg_url = self.config.egg_gpg_path if egg_url is None: diff --git a/insights/client/constants.py b/insights/client/constants.py index c36a5fcb6..758a95bb9 100644 --- a/insights/client/constants.py +++ b/insights/client/constants.py @@ -45,6 +45,7 @@ class InsightsConstants(object): sig_kill_bad = 101 cached_branch_info = os.path.join(default_conf_dir, '.branch_info') pidfile = os.path.join(os.sep, 'var', 'run', 'insights-client.pid') + egg_release_file = os.path.join(os.sep, 'tmp', 'insights-client-egg-release') ppidfile = os.path.join(os.sep, 'tmp', 'insights-client.ppid') valid_compressors = ("gz", "xz", "bz2", "none") # RPM version in which core collection was released diff --git a/insights/client/core_collector.py b/insights/client/core_collector.py index ba8c09669..ff9d32bd3 100644 --- a/insights/client/core_collector.py +++ b/insights/client/core_collector.py @@ -80,4 +80,5 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): self._write_version_info() self._write_tags() self._write_blacklist_report(blacklist_report) + self._write_egg_release() logger.debug('Metadata collection finished.') diff --git a/insights/client/data_collector.py b/insights/client/data_collector.py index 264efd955..3596a62b6 100644 --- a/insights/client/data_collector.py +++ b/insights/client/data_collector.py @@ -126,6 +126,22 @@ def _write_blacklist_report(self, blacklist_report): self.archive.add_metadata_to_archive( json.dumps(blacklist_report), '/blacklist_report') + def _write_egg_release(self): + logger.debug("Writing egg release to archive...") + egg_release = '' + try: + with open(constants.egg_release_file) as fil: + egg_release = fil.read() + except IOError as e: + logger.debug('Could not read the egg release file :%s', str(e)) + try: + os.remove(constants.egg_release_file) + except OSError as e: + logger.debug('Could not remove the egg release file: %s', str(e)) + + self.archive.add_metadata_to_archive( + egg_release, '/egg_release') + def _run_pre_command(self, pre_cmd): ''' Run a pre command to get external args for a command @@ -286,6 +302,7 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): self._write_version_info() self._write_tags() self._write_blacklist_report(blacklist_report) + self._write_egg_release() logger.debug('Metadata collection finished.') def redact(self, rm_conf): diff --git a/insights/tests/client/data_collector/test_write_metadata.py b/insights/tests/client/data_collector/test_write_metadata.py new file mode 100644 index 000000000..d529e7d22 --- /dev/null +++ b/insights/tests/client/data_collector/test_write_metadata.py @@ -0,0 +1,73 @@ +import six +import mock +from insights.client.constants import InsightsConstants as constants +from insights.client.config import InsightsConfig +from insights.client.data_collector import DataCollector +from mock.mock import patch + + +@patch('insights.client.data_collector.os.remove') +@patch('insights.client.data_collector.InsightsArchive') +def test_egg_release_file_read_and_written(archive, remove): + ''' + Verify the egg release file is read from file and + written to the archive + ''' + if six.PY3: + open_name = 'builtins.open' + else: + open_name = '__builtin__.open' + + with patch(open_name, create=True) as mock_open: + mock_open.side_effect = [mock.mock_open(read_data='/testvalue').return_value] + c = InsightsConfig() + d = DataCollector(c) + d._write_egg_release() + remove.assert_called_once_with(constants.egg_release_file) + d.archive.add_metadata_to_archive.assert_called_once_with('/testvalue', '/egg_release') + + +@patch('insights.client.data_collector.os.remove') +@patch('insights.client.data_collector.InsightsArchive') +def test_egg_release_file_read_and_written_no_delete(archive, remove): + ''' + Verify the egg release file is read from file and + written to the archive, even if the file cannot be deleted + ''' + if six.PY3: + open_name = 'builtins.open' + else: + open_name = '__builtin__.open' + + remove.side_effect = OSError('test') + + with patch(open_name, create=True) as mock_open: + mock_open.side_effect = [mock.mock_open(read_data='/testvalue').return_value] + c = InsightsConfig() + d = DataCollector(c) + d._write_egg_release() + remove.assert_called_once_with(constants.egg_release_file) + d.archive.add_metadata_to_archive.assert_called_once_with('/testvalue', '/egg_release') + + +@patch('insights.client.data_collector.os.remove') +@patch('insights.client.data_collector.InsightsArchive') +def test_egg_release_file_read_and_written_no_read(archive, remove): + ''' + Verify that when the egg release file cannot be read, + a blank string is written to the archive + ''' + if six.PY3: + open_name = 'builtins.open' + else: + open_name = '__builtin__.open' + + remove.side_effect = OSError('test') + + with patch(open_name, create=True) as mock_open: + mock_open.side_effect = IOError('test') + c = InsightsConfig() + d = DataCollector(c) + d._write_egg_release() + remove.assert_called_once_with(constants.egg_release_file) + d.archive.add_metadata_to_archive.assert_called_once_with('', '/egg_release') diff --git a/insights/tests/client/init/test_fetch.py b/insights/tests/client/init/test_fetch.py index 4683a7965..b31e57c8d 100644 --- a/insights/tests/client/init/test_fetch.py +++ b/insights/tests/client/init/test_fetch.py @@ -1,6 +1,7 @@ from insights.client import InsightsClient from insights.client.config import InsightsConfig -from mock.mock import Mock +from insights.client.constants import InsightsConstants as constants +from mock.mock import Mock, patch from pytest import fixture from tempfile import NamedTemporaryFile @@ -42,3 +43,40 @@ def test_request_forced(insights_client): url = "{0}{1}".format(insights_client.connection.base_url, source_path) timeout = insights_client.config.http_timeout insights_client.session.get.assert_called_once_with(url, timeout=timeout) + + +@patch('insights.client.InsightsClient._fetch', Mock()) +@patch('insights.client.os.path', Mock()) +@patch('insights.client.tempfile', Mock()) +@patch('insights.client.InsightsClient.get_egg_url', return_value='/testvalue') +@patch('insights.client.write_to_disk') +def test_egg_release_written(write_to_disk, get_egg_url, insights_client): + ''' + Verify egg release file successfully written after request + ''' + insights_client.fetch(force=False) + write_to_disk.assert_called_once_with(constants.egg_release_file, content='/testvalue') + + +@patch('insights.client.InsightsClient._fetch') +@patch('insights.client.os.path', Mock()) +@patch('insights.client.tempfile', Mock()) +@patch('insights.client.InsightsClient.get_egg_url', return_value='/testvalue') +@patch('insights.client.write_to_disk') +def test_egg_release_error(write_to_disk, get_egg_url, _fetch, insights_client): + ''' + Verify OSError and IOError are caught and process continues on + ''' + write_to_disk.side_effect = OSError('test') + assert insights_client.fetch(force=False) + write_to_disk.assert_called_once_with(constants.egg_release_file, content='/testvalue') + assert _fetch.call_count == 2 + + write_to_disk.side_effect = None + write_to_disk.reset_mock() + _fetch.reset_mock() + + write_to_disk.side_effect = IOError('test') + assert insights_client.fetch(force=False) + write_to_disk.assert_called_once_with(constants.egg_release_file, content='/testvalue') + assert _fetch.call_count == 2 From 01c5a0ccd5ac4b4bf5cbf815e61c58e0cebcd8a7 Mon Sep 17 00:00:00 2001 From: Stanislav Kontar Date: Wed, 26 Aug 2020 22:10:33 +0200 Subject: [PATCH 160/892] Spamassassin channels parser (#2703) * Add spamassassin_channels spec Signed-off-by: Stanislav Kontar * Add parser for spamassassin channels https://projects.engineering.redhat.com/browse/PSINSIGHTS-136 Signed-off-by: Stanislav Kontar * Improved tests and attribute handling Signed-off-by: Stanislav Kontar * Changed command to not require glob Signed-off-by: Stanislav Kontar --- .../spamassassin_channels.rst | 3 + insights/parsers/spamassassin_channels.py | 44 ++++++++++++ .../tests/test_spamassassin_channels.py | 69 +++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 6 files changed, 119 insertions(+) create mode 100644 docs/shared_parsers_catalog/spamassassin_channels.rst create mode 100644 insights/parsers/spamassassin_channels.py create mode 100644 insights/parsers/tests/test_spamassassin_channels.py diff --git a/docs/shared_parsers_catalog/spamassassin_channels.rst b/docs/shared_parsers_catalog/spamassassin_channels.rst new file mode 100644 index 000000000..3b579b2af --- /dev/null +++ b/docs/shared_parsers_catalog/spamassassin_channels.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.spamassassin_channels + :members: + :show-inheritance: diff --git a/insights/parsers/spamassassin_channels.py b/insights/parsers/spamassassin_channels.py new file mode 100644 index 000000000..0dfd905a6 --- /dev/null +++ b/insights/parsers/spamassassin_channels.py @@ -0,0 +1,44 @@ +""" +SpamassassinChannels - command ``/bin/grep -r '^\\s*CHANNELURL=' /etc/mail/spamassassin/channel.d`` +========================================================================================================= +""" +import re +from collections import OrderedDict + +from insights.core import CommandParser +from insights.core.plugins import parser +from insights.specs import Specs + + +@parser(Specs.spamassassin_channels) +class SpamassassinChannels(CommandParser): + """ + Class for parsing the ``/bin/grep -r '^\\s*CHANNELURL=' /etc/mail/spamassassin/channel.d`` + command. + + Attributes: + channels (OrderedDict): channels grouped by configuration file + + Sample output of this command is:: + + /etc/mail/spamassassin/channel.d/sought.conf:CHANNELURL=sought.rules.yerp.org + /etc/mail/spamassassin/channel.d/spamassassin-official.conf:CHANNELURL=updates.spamassassin.org + + Examples: + >>> type(spamassassin_channels) + + >>> spamassassin_channels.channels + OrderedDict([('/etc/mail/spamassassin/channel.d/sought.conf', ['sought.rules.yerp.org']), ('/etc/mail/spamassassin/channel.d/spamassassin-official.conf', ['updates.spamassassin.org'])]) + """ + def __init__(self, *args, **kwargs): + self.channels = OrderedDict() + super(SpamassassinChannels, self).__init__(*args, **kwargs) + + def parse_content(self, content): + + for line in content: + file_name, file_line = line.split(":", 1) + channel = re.sub('^\\s*CHANNELURL=', '', file_line).strip() + if file_name not in self.channels: + self.channels[file_name] = [] + self.channels[file_name].append(channel) diff --git a/insights/parsers/tests/test_spamassassin_channels.py b/insights/parsers/tests/test_spamassassin_channels.py new file mode 100644 index 000000000..4775d39b3 --- /dev/null +++ b/insights/parsers/tests/test_spamassassin_channels.py @@ -0,0 +1,69 @@ +import doctest +import pytest + +from insights.parsers import spamassassin_channels +from insights.parsers.spamassassin_channels import SpamassassinChannels +from insights.tests import context_wrap + +DEFAULT = """ +/etc/mail/spamassassin/channel.d/sought.conf:CHANNELURL=sought.rules.yerp.org +/etc/mail/spamassassin/channel.d/spamassassin-official.conf:CHANNELURL=updates.spamassassin.org +""".strip() + +EXPECTED_DEFAULT = { + "/etc/mail/spamassassin/channel.d/sought.conf": ["sought.rules.yerp.org"], + "/etc/mail/spamassassin/channel.d/spamassassin-official.conf": ["updates.spamassassin.org"], +} + +SPACES = """ +/etc/mail/spamassassin/channel.d/sought.conf: CHANNELURL=sought.rules.yerp.org +/etc/mail/spamassassin/channel.d/spamassassin-official.conf:CHANNELURL=updates.spamassassin.org +""".strip() + +EXPECTED_SPACES = { + "/etc/mail/spamassassin/channel.d/sought.conf": ["sought.rules.yerp.org"], + "/etc/mail/spamassassin/channel.d/spamassassin-official.conf": ["updates.spamassassin.org"], +} + +TWO_IN_FILE = """ +/etc/mail/spamassassin/channel.d/sought.conf:CHANNELURL=sought.rules.yerp.org +/etc/mail/spamassassin/channel.d/sought.conf:CHANNELURL=sought2.rules.yerp.org +/etc/mail/spamassassin/channel.d/spamassassin-official.conf:CHANNELURL=updates.spamassassin.org +""".strip() + +EXPECTED_TWO_IN_FILE = { + "/etc/mail/spamassassin/channel.d/sought.conf": ["sought.rules.yerp.org", "sought2.rules.yerp.org"], + "/etc/mail/spamassassin/channel.d/spamassassin-official.conf": ["updates.spamassassin.org"], +} + +INVALID = """ +/etc/mail/spamassassin/channel.d/sought.conf:CHANNELURL=sought.rules.yerp.org +/etc/mail/spamassassin/channel.d/sought.conf:CHANNELURL=sought2.rules.yerp.org +garbage garbage +""".strip() + + +TEST_CASES = [ + (DEFAULT, EXPECTED_DEFAULT), + (SPACES, EXPECTED_SPACES), + (TWO_IN_FILE, EXPECTED_TWO_IN_FILE), +] + + +@pytest.mark.parametrize("input, output", TEST_CASES) +def test_spamassassin_channels(input, output): + test = SpamassassinChannels(context_wrap(input)) + assert test.channels == output + + +def test_exception(): + with pytest.raises(Exception): + SpamassassinChannels(context_wrap(INVALID)) + + +def test_doc_examples(): + env = { + "spamassassin_channels": SpamassassinChannels(context_wrap(DEFAULT)), + } + failed, total = doctest.testmod(spamassassin_channels, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index dca9fb1b3..d6f3a808a 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -561,6 +561,7 @@ class Specs(SpecSet): sockstat = RegistryPoint() softnet_stat = RegistryPoint() software_collections_list = RegistryPoint() + spamassassin_channels = RegistryPoint() spfile_ora = RegistryPoint(multi_output=True) ssh_config_d = RegistryPoint(multi_output=True, filterable=True) ssh_config = RegistryPoint(filterable=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index b8e249ee5..d28fe5858 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -596,6 +596,7 @@ def sap_sid_name(broker): sockstat = simple_file("/proc/net/sockstat") softnet_stat = simple_file("proc/net/softnet_stat") software_collections_list = simple_command('/usr/bin/scl --list') + spamassassin_channels = simple_command("/bin/grep -r '^\\s*CHANNELURL=' /etc/mail/spamassassin/channel.d") ss = simple_command("/usr/sbin/ss -tupna") ssh_config = simple_file("/etc/ssh/ssh_config") ssh_config_d = glob_file(r"/etc/ssh/ssh_config.d/*.conf") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 0f120868a..4be18a3b8 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -189,6 +189,7 @@ class InsightsArchiveSpecs(Specs): sestatus = simple_file("insights_commands/sestatus_-b") smbstatus_p = simple_file("insights_commands/smbstatus_-p") software_collections_list = simple_file('insights_commands/scl_--list') + spamassassin_channels = simple_file('insights_commands/grep_-r_s_CHANNELURL_.etc.mail.spamassassin.channel.d') ss = simple_file("insights_commands/ss_-tupna") sshd_config_perms = simple_file("insights_commands/ls_-l_.etc.ssh.sshd_config") subscription_manager_id = simple_file("insights_commands/subscription-manager_identity") From dc33049e7b474131cbff2f9c7d7cfa77978f0e88 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 27 Aug 2020 04:30:44 +0800 Subject: [PATCH 161/892] New parser AnsibleTowerLicenseType (#2695) * New parser AnsibleTowerLicense Signed-off-by: Xiangce Liu * Fix flake8 errors Signed-off-by: Xiangce Liu * Add spec to sos archive Signed-off-by: Xiangce Liu * Change spec to 'awx-manage check_license' Signed-off-by: Xiangce Liu * 100% coverage Signed-off-by: Xiangce Liu --- docs/shared_parsers_catalog/awx_manage.rst | 3 ++ insights/parsers/awx_manage.py | 39 ++++++++++++++++ insights/parsers/tests/test_awx_manage.py | 54 ++++++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 6 files changed, 99 insertions(+) create mode 100644 docs/shared_parsers_catalog/awx_manage.rst create mode 100644 insights/parsers/awx_manage.py create mode 100644 insights/parsers/tests/test_awx_manage.py diff --git a/docs/shared_parsers_catalog/awx_manage.rst b/docs/shared_parsers_catalog/awx_manage.rst new file mode 100644 index 000000000..afe92f997 --- /dev/null +++ b/docs/shared_parsers_catalog/awx_manage.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.awx_manage + :members: + :show-inheritance: diff --git a/insights/parsers/awx_manage.py b/insights/parsers/awx_manage.py new file mode 100644 index 000000000..562580e9d --- /dev/null +++ b/insights/parsers/awx_manage.py @@ -0,0 +1,39 @@ +""" +AwxManage - commands ``awx-manage`` +=================================== + +Parsers contains in this module are: + +AnsibleTowerLicenseType - command ``awx-manage check_license`` +-------------------------------------------------------------- +""" + +from insights import JSONParser, parser, CommandParser +from insights.parsers import SkipException, ParseException +from insights.specs import Specs + + +@parser(Specs.awx_manage_check_license) +class AnsibleTowerLicenseType(CommandParser, JSONParser): + """ + Parses the output of command ``awx-manage check_license`` + + Sample output of the command:: + + enterprise + + Attributes: + type (str): The license type, e.g. "enterprise" + + Examples: + >>> type(awx_license) + + >>> awx_license.type == "enterprise" + True + """ + def parse_content(self, content): + if not content: + raise SkipException + if len(content) != 1: + raise ParseException("Invalid output: {0}".format(content)) + self.type = content[0].strip() diff --git a/insights/parsers/tests/test_awx_manage.py b/insights/parsers/tests/test_awx_manage.py new file mode 100644 index 000000000..c01fb1531 --- /dev/null +++ b/insights/parsers/tests/test_awx_manage.py @@ -0,0 +1,54 @@ +import doctest +import pytest + +from insights.parsers import awx_manage, SkipException, ParseException +from insights.core import ContentException +from insights.parsers.awx_manage import AnsibleTowerLicenseType +from insights.tests import context_wrap + + +NO_LICENSE = """ +none +""".strip() + +STD_LICENSE = """ +enterprise +""".strip() + +NG_COMMAND_0 = "" + +NG_COMMAND_1 = """ +awx-manage: command not found +""".strip() + +NG_COMMAND_2 = """ +Traceback (most recent call last): +File \"/bin/awx-manage\", line 11, in + load_entry_point('awx==3.6.4', 'console_scripts', 'awx-manage')() +""".strip() + + +def test_ansible_tower_license(): + ret = AnsibleTowerLicenseType(context_wrap(NO_LICENSE)) + assert ret.type == 'none' + ret = AnsibleTowerLicenseType(context_wrap(STD_LICENSE)) + assert ret.type == 'enterprise' + + +def test_ansible_tower_license_ab(): + with pytest.raises(SkipException): + AnsibleTowerLicenseType(context_wrap(NG_COMMAND_0)) + + with pytest.raises(ContentException): + AnsibleTowerLicenseType(context_wrap(NG_COMMAND_1)) + + with pytest.raises(ParseException): + AnsibleTowerLicenseType(context_wrap(NG_COMMAND_2)) + + +def test_awx_manage_doc_examples(): + env = { + 'awx_license': AnsibleTowerLicenseType(context_wrap(STD_LICENSE)), + } + failed, total = doctest.testmod(awx_manage, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index d6f3a808a..99ec4dc7c 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -27,6 +27,7 @@ class Specs(SpecSet): aws_instance_id_doc = RegistryPoint() aws_instance_id_pkcs7 = RegistryPoint() aws_instance_type = RegistryPoint() + awx_manage_check_license = RegistryPoint() azure_instance_type = RegistryPoint() bios_uuid = RegistryPoint() blkid = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index d28fe5858..7579e9af7 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -113,6 +113,7 @@ def is_aws(broker): aws_instance_id_doc = simple_command("/usr/bin/curl -s http://169.254.169.254/latest/dynamic/instance-identity/document --connect-timeout 5", deps=[is_aws]) aws_instance_id_pkcs7 = simple_command("/usr/bin/curl -s http://169.254.169.254/latest/dynamic/instance-identity/pkcs7 --connect-timeout 5", deps=[is_aws]) + awx_manage_check_license = simple_command("/usr/bin/awx-manage check_license") @datasource(CloudProvider) def is_azure(broker): diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 4be18a3b8..fef217774 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -14,6 +14,7 @@ class InsightsArchiveSpecs(Specs): auditctl_status = simple_file("insights_commands/auditctl_-s") aws_instance_id_doc = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_aws_instance_id_doc") aws_instance_id_pkcs7 = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_aws_instance_id_pkcs7") + awx_manage_check_license = simple_file("insights_commands/awx-manage_check_license") azure_instance_type = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_azure_instance_type") bios_uuid = simple_file("insights_commands/dmidecode_-s_system-uuid") blkid = simple_file("insights_commands/blkid_-c_.dev.null") From 0c1a3c89ccc1cf40a29a01c1789b44f8d411010a Mon Sep 17 00:00:00 2001 From: Sheng Lao <39508521+shlao@users.noreply.github.com> Date: Thu, 27 Aug 2020 13:45:35 +0800 Subject: [PATCH 162/892] Update the spec of DmsetupInfo (#2716) * Update the spec of DmsetupInfo Signed-off-by: shlao * remove dmsetup_info spec from the test that checks for removed specs Signed-off-by: shlao --- insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + insights/tests/client/collection_rules/test_map_components.py | 1 - 3 files changed, 2 insertions(+), 1 deletion(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 7579e9af7..be4137939 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -229,6 +229,7 @@ def is_ceph_monitor(broker): dmesg = simple_command("/bin/dmesg") dmesg_log = simple_file("/var/log/dmesg") dmidecode = simple_command("/usr/sbin/dmidecode") + dmsetup_info = simple_command("/usr/sbin/dmsetup info -C") dnf_conf = simple_file("/etc/dnf/dnf.conf") docker_info = simple_command("/usr/bin/docker info") docker_list_containers = simple_command("/usr/bin/docker ps --all --no-trunc") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index fef217774..0edddd258 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -41,6 +41,7 @@ class InsightsArchiveSpecs(Specs): display_name = simple_file("display_name") dmesg = simple_file("insights_commands/dmesg") dmidecode = simple_file("insights_commands/dmidecode") + dmsetup_info = simple_file("insights_commands/dmsetup_info_-C") docker_info = simple_file("insights_commands/docker_info") docker_list_containers = simple_file("insights_commands/docker_ps_--all_--no-trunc") docker_list_images = simple_file("insights_commands/docker_images_--all_--no-trunc_--digests") diff --git a/insights/tests/client/collection_rules/test_map_components.py b/insights/tests/client/collection_rules/test_map_components.py index 5c541e711..b73a6a869 100644 --- a/insights/tests/client/collection_rules/test_map_components.py +++ b/insights/tests/client/collection_rules/test_map_components.py @@ -52,7 +52,6 @@ def test_get_component_by_symbolic_name(): # Filter out the (B) specs with this list skipped_specs = [ 'ceph_osd_df', - 'dmsetup_info', 'du_dirs', 'gluster_peer_status', 'gluster_v_status', From ff7836b1ba4321de4d0c321b0a6f92ab8e7a19f7 Mon Sep 17 00:00:00 2001 From: Akshay Gaikwad Date: Thu, 27 Aug 2020 05:47:23 +0000 Subject: [PATCH 163/892] Parse foreman production log lines (#2730) Fix issue #2729 Parse each log line into raw_message, message, timestamp for ease of use. Signed-off-by: Akshay Gaikwad --- insights/parsers/foreman_log.py | 20 ++++++++++++++++++-- insights/parsers/tests/test_foreman_log.py | 4 ++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/insights/parsers/foreman_log.py b/insights/parsers/foreman_log.py index 61ed4328e..3117735c9 100644 --- a/insights/parsers/foreman_log.py +++ b/insights/parsers/foreman_log.py @@ -49,8 +49,24 @@ class SatelliteLog(LogFileOutput): @parser(Specs.foreman_production_log) class ProductionLog(LogFileOutput): - """Class for parsing ``foreman/production.log`` file.""" - pass + """Class for parsing ``foreman/production.log`` file. + + Each line is parsed into a dictionary with the following keys: + + * raw_message (str) - complete log line + * message (str) - the body of the log + * timestamp (datetime) - date and time of log as datetime object + """ + def _parse_line(self, line): + msg_info = {'raw_message': line} + line_split = line.split(None, 2) + if len(line_split) > 2: + try: + msg_info['timestamp'] = datetime.strptime(' '.join(line_split[:2]), self.time_format) + msg_info['message'] = line_split[2] + except ValueError: + pass + return msg_info @parser(Specs.candlepin_log) diff --git a/insights/parsers/tests/test_foreman_log.py b/insights/parsers/tests/test_foreman_log.py index 4291ae968..9154c7a5d 100644 --- a/insights/parsers/tests/test_foreman_log.py +++ b/insights/parsers/tests/test_foreman_log.py @@ -176,6 +176,10 @@ def test_production_log(): fm_log = ProductionLog(context_wrap(PRODUCTION_LOG)) assert 2 == len(fm_log.get("Rendered text template")) + line_dict = fm_log.get("Katello::Api::V2::RepositoriesController#sync_complete")[0] + assert line_dict["message"] == \ + "[I] Processing by Katello::Api::V2::RepositoriesController#sync_complete as JSON" + assert line_dict["timestamp"] == datetime(2015, 11, 13, 3, 30, 7) assert "Expired 48 Reports" in fm_log assert fm_log.get("Completed 200 OK in 93")[0]['raw_message'] == \ "2015-11-13 09:41:58 [I] Completed 200 OK in 93ms (Views: 2.9ms | ActiveRecord: 0.3ms)" From e1e7ee0ce3a10f09ef243c2b7445268349a322fa Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 27 Aug 2020 14:18:21 -0400 Subject: [PATCH 164/892] add messaging about SOScleaner output formats (#2734) Signed-off-by: Jeremy Crafts --- insights/client/data_collector.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/client/data_collector.py b/insights/client/data_collector.py index 3596a62b6..846b45e75 100644 --- a/insights/client/data_collector.py +++ b/insights/client/data_collector.py @@ -362,6 +362,7 @@ def done(self, conf, rm_conf): and archive files. """ if self.config.obfuscate: + logger.warn('WARNING: Some SOSCleaner obfuscation output formatting has changed. See https://access.redhat.com/articles/5355431 for more details.') if rm_conf and rm_conf.get('keywords'): logger.warn("WARNING: Skipping keywords defined in blacklist configuration") cleaner = SOSCleaner(quiet=True) From 1509b5383698b1c4ea9ce170510eff5256146622 Mon Sep 17 00:00:00 2001 From: Stephen Adams Date: Thu, 27 Aug 2020 15:59:54 -0400 Subject: [PATCH 165/892] Update json map Signed-off-by: Stephen Adams --- insights/uploader_json_map.json | 35 +++++++++++++++++++++++---------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/insights/uploader_json_map.json b/insights/uploader_json_map.json index 2b5fc722e..9cb032d8c 100644 --- a/insights/uploader_json_map.json +++ b/insights/uploader_json_map.json @@ -15,6 +15,11 @@ "pattern": [], "symbolic_name": "aws_instance_id_pkcs7" }, + { + "command": "/usr/bin/awx-manage check_license", + "pattern": [], + "symbolic_name": "awx_manage_check_license" + }, { "command": "python -m insights.tools.cat --no-header azure_instance_type", "pattern": [], @@ -177,7 +182,6 @@ "Warning: QLogic ISP3XXX Network Driver - this hardware has not undergone testing by Red Hat and might not be certified", "__cpufreq_add_dev", "blocked FC remote port time out: removing target and saving binding", - "crashkernel reservation failed", "crashkernel=auto resulted in zero bytes of reserved memory", "e1000: E1000 MODULE IS NOT SUPPORTED", "efi", @@ -522,7 +526,7 @@ "symbolic_name": "ls_edac_mc" }, { - "command": "/bin/ls -lan /etc /etc/cloud/cloud.cfg.d /etc/nova/migration /etc/pki/ovirt-vmconsole /etc/pki/tls/certs /etc/pki/tls/private /etc/sysconfig", + "command": "/bin/ls -lan /etc /etc/cloud/cloud.cfg.d /etc/nova/migration /etc/pki/ovirt-vmconsole /etc/pki/tls/certs /etc/pki/tls/private /etc/sysconfig /etc/rc.d/init.d", "pattern": [], "symbolic_name": "ls_etc" }, @@ -708,8 +712,6 @@ "FullQualifiedHostname", "Hostname", "InstanceName", - "LOG Q0I=> NiIRead: P=::; L=::: recv", - "LOG Q0I=> NiPConnect2: :: connect", "SID", "SapVersionInfo", "SystemNumber" @@ -1106,7 +1108,6 @@ "ceilometer-coll", "chronyd", "clvmd", - "cmirrord", "corosync", "crmd", "dlm_controld", @@ -1264,6 +1265,11 @@ "pattern": [], "symbolic_name": "software_collections_list" }, + { + "command": "/bin/grep -r '^\\s*CHANNELURL=' /etc/mail/spamassassin/channel.d", + "pattern": [], + "symbolic_name": "spamassassin_channels" + }, { "command": "/usr/sbin/subscription-manager identity", "pattern": [], @@ -2255,7 +2261,6 @@ "DMA Status error. Resetting chip", "Detected Tx Unit Hang", "Device is still in reset", - "Device offlined - not ready after error recovery", "Error I40E_AQ_RC_EINVAL adding RX filters on PF, promiscuous mode forced on", "Error deleting EBS Disk volume aws", "Error running DeviceResume dm_task_run failed", @@ -2282,7 +2287,6 @@ "Scheduled import of stream", "Steal time is >", "TX driver issue detected, PF reset issued", - "TX stuck with port_enabled=1: resetting channels", "This system does not support \"SSSE3\"", "Throttling request took", "TypeError: object of type 'NoneType' has no len()", @@ -3487,12 +3491,24 @@ "LOCAL_ENABLE", "Local_Enable", "SSL_ENABLE", + "SSL_SSLV2", "SSL_SSLV3", + "SSL_TLSV1", + "SSL_TLSV1_1", + "SSL_TLSV1_2", "Ssl_Enable", + "Ssl_Sslv2", "Ssl_Sslv3", + "Ssl_Tlsv1", + "Ssl_Tlsv1_1", + "Ssl_Tlsv1_2", "local_enable", "ssl_enable", - "ssl_sslv3" + "ssl_sslv2", + "ssl_sslv3", + "ssl_tlsv1", + "ssl_tlsv1_1", + "ssl_tlsv1_2" ], "symbolic_name": "vsftpd_conf" }, @@ -3708,7 +3724,6 @@ "file": "/var/log/neutron/l3-agent.log", "pattern": [ "DEBUG oslo.messaging._drivers.impl_rabbit", - "Duplicate iptables rule detected", "Error while deleting router", "Stderr: Another app is currently holding the xtables lock", "Timed out waiting for RPC response" @@ -4003,5 +4018,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2020-08-12T15:22:22.060797" + "version": "2020-08-27T15:58:30.933074" } \ No newline at end of file From ec06bf6af269e0e428084b9ea24ab071e609496f Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Wed, 2 Sep 2020 09:24:17 -0400 Subject: [PATCH 166/892] url_cache: Delete the cache key rather than the entire cache (#2738) Signed-off-by: Link Dupont --- insights/client/url_cache.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/insights/client/url_cache.py b/insights/client/url_cache.py index f75f37065..78976bba0 100644 --- a/insights/client/url_cache.py +++ b/insights/client/url_cache.py @@ -17,6 +17,7 @@ class URLCache(object): URLCache is a simple pickle cache, intended to be used as an HTTP response cache. """ + def __init__(self, path=None): """ Initialize a URLCache, loading entries from @path, if provided. @@ -36,7 +37,7 @@ def get(self, url): try: item = self._cache[url] if item.cached_at + _KEEPTIME <= time.time(): - del (self._cache, url) + del self._cache[url] return None return self._cache[url] except KeyError: From 7f65690d0828b08db4a5c96384d64383f92d4058 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Wed, 2 Sep 2020 15:26:21 -0500 Subject: [PATCH 167/892] Reverse context definition order. Add core3_archive spec file. (#2739) Core3 archives were being processed as regular archives because the context detection tested contexts in reverse order of their definition, and HostArchiveContext was defined after SerializedArchiveContext (core3 archive). Regular specs wouldn't work for core3 archives, but the client puts some critical files like satellite branch_info directly into the archive root, so I've allowed regular specs to depend on and fire in the presence of SerializedArchiveContext. Signed-off-by: Christopher Sams --- insights/__init__.py | 1 + insights/core/context.py | 10 ++++++---- insights/shell.py | 6 ++++++ insights/specs/core3_archive.py | 16 ++++++++++++++++ 4 files changed, 29 insertions(+), 4 deletions(-) create mode 100644 insights/specs/core3_archive.py diff --git a/insights/__init__.py b/insights/__init__.py index 9e4fbb352..eaf6f6291 100644 --- a/insights/__init__.py +++ b/insights/__init__.py @@ -137,6 +137,7 @@ def _run(broker, graph=None, root=None, context=None, inventory=None): def load_default_plugins(): dr.load_components("insights.specs.default") dr.load_components("insights.specs.insights_archive") + dr.load_components("insights.specs.core3_archive") dr.load_components("insights.specs.sos_archive") dr.load_components("insights.specs.jdr_archive") diff --git a/insights/core/context.py b/insights/core/context.py index 0d309040e..3e73c4a43 100644 --- a/insights/core/context.py +++ b/insights/core/context.py @@ -136,6 +136,8 @@ def __init__(cls, name, bases, dct): return ExecutionContextMeta.registry.append(cls) + # Remember that contexts are tried *in reverse order* so that they + # may be overridden by just loading a plugin. @classmethod def identify(cls, files): for e in reversed(cls.registry): @@ -214,13 +216,13 @@ def __init__(self, root='/', timeout=30, all_files=None): @fs_root -class SerializedArchiveContext(ExecutionContext): - marker = "insights_archive.txt" +class HostArchiveContext(ExecutionContext): + marker = "insights_commands" @fs_root -class HostArchiveContext(ExecutionContext): - marker = "insights_commands" +class SerializedArchiveContext(ExecutionContext): + marker = "insights_archive.txt" @fs_root diff --git a/insights/shell.py b/insights/shell.py index dc9504e25..549b676d8 100644 --- a/insights/shell.py +++ b/insights/shell.py @@ -17,6 +17,8 @@ from pygments.console import ansiformat from traitlets.config.loader import Config +from insights.core.context import SerializedArchiveContext +from insights.core.serde import Hydration from insights.parsr.query import * # noqa from insights.parsr.query import eq, matches, make_child_query as q # noqa from insights.parsr.query.boolean import FALSE, TRUE @@ -59,6 +61,10 @@ def make_broker(ctx): broker = dr.Broker() broker[ctx.__class__] = ctx + if isinstance(ctx, SerializedArchiveContext): + h = Hydration(ctx.root) + broker = h.hydrate(broker=broker) + dr.run(datasources, broker=broker) del broker[ctx.__class__] diff --git a/insights/specs/core3_archive.py b/insights/specs/core3_archive.py new file mode 100644 index 000000000..0c3228b9a --- /dev/null +++ b/insights/specs/core3_archive.py @@ -0,0 +1,16 @@ +""" +This file holds specs that are in the new core3 archives but that don't have an entry +in the meta_data directory. This happens when the client itself collects the data +and puts it into the archive. +""" +from functools import partial + +from insights.core.context import SerializedArchiveContext +from insights.specs import Specs +from insights.core.spec_factory import simple_file + +simple_file = partial(simple_file, context=SerializedArchiveContext) + + +class Core3Specs(Specs): + branch_info = simple_file("/branch_info") From e7fd0594bb19929846caf9508992d157a3a8f718 Mon Sep 17 00:00:00 2001 From: Xiaoxue Wang Date: Thu, 3 Sep 2020 22:31:28 +0800 Subject: [PATCH 168/892] Add parser and spec for /sys/module/{dm_mod,scsi_mod}/parameters/use_blk_mq (#2702) * Add spec of /sys/module/{dm_mod,scsi_mod}/parameters/use_blk_mq Signed-off-by: XiaoXue Wang * Add parser for {dm_mod,scsi_mod}_use_blk_mq Signed-off-by: XiaoXue Wang * Fix doc error in sys_module Signed-off-by: XiaoXue Wang * Handle 1/0 as known file content Signed-off-by: XiaoXue Wang * Change the is_on behavior Signed-off-by: XiaoXue Wang * Enhance the docstings Signed-off-by: XiaoXue Wang * Use ValueError instead of ParseException Signed-off-by: XiaoXue Wang * Remove the new added specs from skipped_specs list Signed-off-by: XiaoXue Wang --- docs/shared_parsers_catalog/sys_module.rst | 3 + insights/parsers/sys_module.py | 89 +++++++++++++++++++ insights/parsers/tests/test_sys_module.py | 53 +++++++++++ insights/specs/__init__.py | 2 + insights/specs/default.py | 2 + insights/specs/sos_archive.py | 2 + .../collection_rules/test_map_components.py | 4 +- 7 files changed, 152 insertions(+), 3 deletions(-) create mode 100644 docs/shared_parsers_catalog/sys_module.rst create mode 100644 insights/parsers/sys_module.py create mode 100644 insights/parsers/tests/test_sys_module.py diff --git a/docs/shared_parsers_catalog/sys_module.rst b/docs/shared_parsers_catalog/sys_module.rst new file mode 100644 index 000000000..d24fd608b --- /dev/null +++ b/docs/shared_parsers_catalog/sys_module.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.sys_module + :members: + :show-inheritance: diff --git a/insights/parsers/sys_module.py b/insights/parsers/sys_module.py new file mode 100644 index 000000000..35a548b46 --- /dev/null +++ b/insights/parsers/sys_module.py @@ -0,0 +1,89 @@ +""" +``/sys/module`` System Module Information +========================================= + +A parser to parse the system module information. + +Parsers included in this module are: + +DMModUseBlkMq - file ``/sys/module/dm_mod/parameters/use_blk_mq`` + +SCSIModUseBlkMq - file ``/sys/module/scsi_mod/parameters/use_blk_mq`` +--------------------------------------------------------------------- + +""" + + +from insights import parser, Parser +from insights.parsers import SkipException +from insights.specs import Specs + + +class XModUseBlkMq(Parser): + """ + Parse for file `/sys/module/{dm_mod,scsi_mod}/parameters/use_blk_mq`. + File content shows if use_blk_mq parameter is on. + + Sample Content:: + + Y + + Raises: + SkipException: When nothing need to parse. + + Attributes: + val(str): Raw data of the content. + """ + + def parse_content(self, content): + if not content or len(content) != 1: + raise SkipException() + self.val = content[0].strip() + + @property + def is_on(self): + """ + Returns: + (bool): True for on, False for off. + + Raises: + ValueError: When tell is_on for unknown cases. + """ + if self.val in ['Y', '1']: + return True + elif self.val in ['N', '0']: + return False + else: + raise ValueError("Unexpected value {0}, please get raw data from attribute 'val' and tell is_on by yourself.".format(self.val)) + + +@parser(Specs.dm_mod_use_blk_mq) +class DMModUseBlkMq(XModUseBlkMq): + """ + This file `/sys/module/dm_mod/parameters/use_blk_mq` shows if use_blk_mq + parameter is on. + + Examples:: + + >>> dm_mod_use_blk_mq.val + 'Y' + >>> dm_mod_use_blk_mq.is_on + True + """ + pass + + +@parser(Specs.scsi_mod_use_blk_mq) +class SCSIModUseBlkMq(XModUseBlkMq): + """ + This file `/sys/module/scsi_mod/parameters/use_blk_mq` shows if use_blk_mq + parameter is on. + + Examples:: + + >>> scsi_mod_use_blk_mq.val + 'N' + >>> scsi_mod_use_blk_mq.is_on + False + """ + pass diff --git a/insights/parsers/tests/test_sys_module.py b/insights/parsers/tests/test_sys_module.py new file mode 100644 index 000000000..951c03839 --- /dev/null +++ b/insights/parsers/tests/test_sys_module.py @@ -0,0 +1,53 @@ +import doctest +import pytest +from insights.parsers import sys_module, SkipException +from insights.parsers.sys_module import DMModUseBlkMq, SCSIModUseBlkMq +from insights.tests import context_wrap + + +SCSI_DM_MOD_USE_BLK_MQ_Y = """ +Y +""".strip() +SCSI_DM_MOD_USE_BLK_MQ_N = """ +N +""".strip() +SCSI_DM_MOD_USE_BLK_MQ_UNKNOW_CASE = """ +unknow_case +""".strip() + +SCSI_DM_MOD_USE_BLK_MQ_EMPTY = """ + +""".strip() + + +def test_doc_examples(): + env = { + 'dm_mod_use_blk_mq': DMModUseBlkMq(context_wrap(SCSI_DM_MOD_USE_BLK_MQ_Y)), + 'scsi_mod_use_blk_mq': SCSIModUseBlkMq(context_wrap(SCSI_DM_MOD_USE_BLK_MQ_N)), + } + failed, total = doctest.testmod(sys_module, globs=env) + assert failed == 0 + + +def test_XModUseBlkMq(): + dm_mod_y = DMModUseBlkMq(context_wrap(SCSI_DM_MOD_USE_BLK_MQ_Y)) + assert dm_mod_y.is_on is True + assert dm_mod_y.val == 'Y' + + scsi_mod_n = SCSIModUseBlkMq(context_wrap(SCSI_DM_MOD_USE_BLK_MQ_N)) + assert scsi_mod_n.is_on is False + assert scsi_mod_n.val == 'N' + + dm_mod_unknow = DMModUseBlkMq(context_wrap(SCSI_DM_MOD_USE_BLK_MQ_UNKNOW_CASE)) + assert dm_mod_unknow.val == 'unknow_case' + + +def test_class_exceptions(): + with pytest.raises(SkipException): + dm_mod = DMModUseBlkMq(context_wrap(SCSI_DM_MOD_USE_BLK_MQ_EMPTY)) + assert dm_mod is None + + with pytest.raises(ValueError) as e: + dm_mod_unknow = DMModUseBlkMq(context_wrap(SCSI_DM_MOD_USE_BLK_MQ_UNKNOW_CASE)) + dm_mod_unknow.is_on + assert "Unexpected value unknow_case, please get raw data from attribute 'val' and tell is_on by yourself." in str(e) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 99ec4dc7c..3aa3f42ef 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -120,6 +120,7 @@ class Specs(SpecSet): dirsrv_errors = RegistryPoint(multi_output=True, filterable=True) display_java = RegistryPoint() display_name = RegistryPoint() + dm_mod_use_blk_mq = RegistryPoint() dmesg = RegistryPoint(filterable=True) dmesg_log = RegistryPoint(filterable=True) dmidecode = RegistryPoint() @@ -545,6 +546,7 @@ class Specs(SpecSet): scheduler = RegistryPoint(multi_output=True) sched_rt_runtime_us = RegistryPoint() scsi = RegistryPoint() + scsi_mod_use_blk_mq = RegistryPoint() sctp_asc = RegistryPoint() sctp_eps = RegistryPoint() sctp_snmp = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index be4137939..1617bd3ad 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -226,6 +226,7 @@ def is_ceph_monitor(broker): dig_edns = simple_command("/usr/bin/dig +edns=0 . SOA") dig_noedns = simple_command("/usr/bin/dig +noedns . SOA") dirsrv_errors = glob_file("var/log/dirsrv/*/errors*") + dm_mod_use_blk_mq = simple_file("/sys/module/dm_mod/parameters/use_blk_mq") dmesg = simple_command("/bin/dmesg") dmesg_log = simple_file("/var/log/dmesg") dmidecode = simple_command("/usr/sbin/dmidecode") @@ -586,6 +587,7 @@ def sap_sid_name(broker): scsi = simple_file("/proc/scsi/scsi") scsi_eh_deadline = glob_file('/sys/class/scsi_host/host[0-9]*/eh_deadline') scsi_fwver = glob_file('/sys/class/scsi_host/host[0-9]*/fwrev') + scsi_mod_use_blk_mq = simple_file("/sys/module/scsi_mod/parameters/use_blk_mq") sctp_asc = simple_file('/proc/net/sctp/assocs') sctp_eps = simple_file('/proc/net/sctp/eps') sctp_snmp = simple_file('/proc/net/sctp/snmp') diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index 171c331f3..0107721b6 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -60,6 +60,7 @@ class SosSpecs(Specs): docker_list_images = simple_file("sos_commands/docker/docker_images") docker_network = simple_file("/etc/sysconfig/docker-network") docker_storage = simple_file("/etc/sysconfig/docker-storage") + dm_mod_use_blk_mq = simple_file("/sys/module/dm_mod/parameters/use_blk_mq") dmesg = first_file(["sos_commands/kernel/dmesg", "sos_commands/general/dmesg", "var/log/dmesg"]) dmidecode = simple_file("sos_commands/hardware/dmidecode") dmsetup_info = simple_file("sos_commands/devicemapper/dmsetup_info_-c") @@ -215,6 +216,7 @@ class SosSpecs(Specs): sap_host_profile = simple_file("/usr/sap/hostctrl/exe/host_profile") sched_rt_runtime_us = simple_file("/proc/sys/kernel/sched_rt_runtime_us") scheduler = glob_file("/sys/block/*/queue/scheduler") + scsi_mod_use_blk_mq = simple_file("/sys/module/scsi_mod/parameters/use_blk_mq") secure = simple_file("/var/log/secure") sestatus = simple_file("sos_commands/selinux/sestatus_-b") sssd_logs = glob_file("var/log/sssd/*.log") diff --git a/insights/tests/client/collection_rules/test_map_components.py b/insights/tests/client/collection_rules/test_map_components.py index b73a6a869..43bdb5066 100644 --- a/insights/tests/client/collection_rules/test_map_components.py +++ b/insights/tests/client/collection_rules/test_map_components.py @@ -101,9 +101,7 @@ def test_get_component_by_symbolic_name(): 'sap_host_profile', 'sched_rt_runtime_us', 'libvirtd_qemu_log', - 'mlx4_port', - 'dm_mod_use_blk_mq', - 'scsi_mod_use_blk_mq' + 'mlx4_port' ] # first, make sure our list is proper and one of these From b023cc1325227a5b9b904c1e6a2404a63c911d35 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Thu, 3 Sep 2020 09:58:08 -0500 Subject: [PATCH 169/892] Use builtins.open in python 3. (#2740) We used codecs.open for py2 compat, and it's an order of magnitude slower decoding unicode than vanilla open in py3. Use six to be smarter. Signed-off-by: Christopher Sams --- insights/core/spec_factory.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/insights/core/spec_factory.py b/insights/core/spec_factory.py index a7d571678..5f558ec69 100644 --- a/insights/core/spec_factory.py +++ b/insights/core/spec_factory.py @@ -245,8 +245,12 @@ def load(self): rc, out = self.ctx.shell_out(args, keep_rc=True, env=SAFE_ENV) self.rc = rc return out - with codecs.open(self.path, "r", encoding="utf-8", errors="surrogateescape") as f: - return [l.rstrip("\n") for l in f] + if six.PY3: + with open(self.path, "r", encoding="utf-8", errors="surrogateescape") as f: + return [l.rstrip("\n") for l in f] + else: + with codecs.open(self.path, "r", encoding="utf-8", errors="surrogateescape") as f: + return [l.rstrip("\n") for l in f] def _stream(self): """ @@ -263,8 +267,12 @@ def _stream(self): with streams.connect(*args, env=SAFE_ENV) as s: yield s else: - with codecs.open(self.path, "r", encoding="utf-8", errors="surrogateescape") as f: - yield f + if six.PY3: + with open(self.path, "r", encoding="utf-8", errors="surrogateescape") as f: + yield f + else: + with codecs.open(self.path, "r", encoding="utf-8", errors="surrogateescape") as f: + yield f except StopIteration: raise except Exception as ex: From d23b71a5295180f3f4d5b70df1ca8d2c66682e9a Mon Sep 17 00:00:00 2001 From: Rohan Arora Date: Thu, 3 Sep 2020 21:29:45 +0530 Subject: [PATCH 170/892] Fix %f regex for get_after (#2641) * Fix %f regex for get_after strptime supports Microseconds with %f but not Milliseconds. When logs have milliseconds, %f still works fine to convert to datetime object as zeros are padded to right. When LogFileOutput uses %f in time_format but there are 3 digit milliseconds, get_after function does not work correctly as it looks for a 6 digit number. This change adjusts the regex to {1,6} digits for get_after to work. Source : Comments for %f in following documentation : https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior Signed-off-by: Rohan Arora * Add test case Signed-off-by: Rohan Arora --- insights/core/__init__.py | 2 +- insights/tests/test_logfileoutput.py | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/insights/core/__init__.py b/insights/core/__init__.py index a11708f93..75be17222 100644 --- a/insights/core/__init__.py +++ b/insights/core/__init__.py @@ -1186,7 +1186,7 @@ def get_after(self, timestamp, s=None): 'p': r'\w{2}', # AM / PM 'M': r'([012345]\d)', # Minutes 'S': r'([012345]\d|60)', # Seconds, including leap second - 'f': r'\d{6}', # Microseconds + 'f': r'\d{1,6}', # Microseconds } # Construct the regex from the time string diff --git a/insights/tests/test_logfileoutput.py b/insights/tests/test_logfileoutput.py index a3c13ef6d..dd2ca68de 100644 --- a/insights/tests/test_logfileoutput.py +++ b/insights/tests/test_logfileoutput.py @@ -301,3 +301,22 @@ def test_logs_with_two_timestamps(): logerr = BadClassMariaDBLog(ctx) assert list(logerr.get_after(datetime(2017, 3, 27, 3, 39, 46))) is None assert 'get_after does not recognise time formats of type ' in str(exc) + + +MILLISECONDS_TOWER_LOG = """ +2020-05-28 19:25:36,892 WARNING django.request Not Found: /api/v1/me/ +2020-05-28 19:25:46,944 INFO awx.api.authentication User admin performed a GET to /api/v2/activity_stream/ through the API +2020-05-28 19:33:03,125 INFO awx.api.authentication User admin performed a GET to /api/v2/job_templates/7/survey_spec/ through the API +2020-05-28 19:33:03,413 INFO awx.api.authentication User admin performed a GET to /api/v2/projects/ through the API +""".strip() + + +class FakeTowerLog(LogFileOutput): + time_format = '%Y-%m-%d %H:%M:%S,%f' + + +def test_logs_with_milliseconds(): + ctx = context_wrap(MILLISECONDS_TOWER_LOG, path='/var/log/tower/tower.log') + log = FakeTowerLog(ctx) + assert len(log.lines) == 4 + assert len(list(log.get_after(datetime(2020, 5, 28, 19, 25, 46, 944)))) == 3 From 915b2d029836dc34f9a87a07890df3bf47e5050e Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 3 Sep 2020 12:57:23 -0400 Subject: [PATCH 171/892] move uploader_json_map file to client dir (#2741) Signed-off-by: Jeremy Crafts --- insights/client/map_components.py | 2 +- insights/{ => client}/uploader_json_map.json | 0 insights/tests/client/collection_rules/test_map_components.py | 2 +- 3 files changed, 2 insertions(+), 2 deletions(-) rename insights/{ => client}/uploader_json_map.json (100%) diff --git a/insights/client/map_components.py b/insights/client/map_components.py index 9e3502c53..fe90db25d 100644 --- a/insights/client/map_components.py +++ b/insights/client/map_components.py @@ -11,7 +11,7 @@ APP_NAME = constants.app_name logger = logging.getLogger(__name__) -uploader_json_file = pkgutil.get_data(insights.__name__, "uploader_json_map.json") +uploader_json_file = pkgutil.get_data(insights.__name__, "client/uploader_json_map.json") uploader_json = json.loads(uploader_json_file) diff --git a/insights/uploader_json_map.json b/insights/client/uploader_json_map.json similarity index 100% rename from insights/uploader_json_map.json rename to insights/client/uploader_json_map.json diff --git a/insights/tests/client/collection_rules/test_map_components.py b/insights/tests/client/collection_rules/test_map_components.py index 43bdb5066..879fd45c3 100644 --- a/insights/tests/client/collection_rules/test_map_components.py +++ b/insights/tests/client/collection_rules/test_map_components.py @@ -11,7 +11,7 @@ _search_uploader_json, _get_component_by_symbolic_name) -uploader_json_file = pkgutil.get_data(insights.__name__, "uploader_json_map.json") +uploader_json_file = pkgutil.get_data(insights.__name__, "client/uploader_json_map.json") uploader_json = json.loads(uploader_json_file) default_specs = vars(DefaultSpecs).keys() sos_specs = vars(SosSpecs).keys() From 2469b41ccb34ac86b99441833ce886e5ba00b81a Mon Sep 17 00:00:00 2001 From: Sheng Lao <39508521+shlao@users.noreply.github.com> Date: Fri, 4 Sep 2020 12:17:39 +0800 Subject: [PATCH 172/892] Add spec ipsec_conf (#2707) Signed-off-by: shlao --- docs/shared_parsers_catalog/ipsec_conf.rst | 3 + insights/parsers/ipsec_conf.py | 87 ++++++++++++++++++++++ insights/parsers/tests/test_ipsec_conf.py | 55 ++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 147 insertions(+) create mode 100644 docs/shared_parsers_catalog/ipsec_conf.rst create mode 100644 insights/parsers/ipsec_conf.py create mode 100644 insights/parsers/tests/test_ipsec_conf.py diff --git a/docs/shared_parsers_catalog/ipsec_conf.rst b/docs/shared_parsers_catalog/ipsec_conf.rst new file mode 100644 index 000000000..d2b6920d8 --- /dev/null +++ b/docs/shared_parsers_catalog/ipsec_conf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ipsec_conf + :members: + :show-inheritance: diff --git a/insights/parsers/ipsec_conf.py b/insights/parsers/ipsec_conf.py new file mode 100644 index 000000000..a061f788e --- /dev/null +++ b/insights/parsers/ipsec_conf.py @@ -0,0 +1,87 @@ +""" +IpsecConf parser - file ``/etc/ipsec.conf`` +=========================================== + +IpsecConf parser the file /etc/ipsec.conf about +the configuration and control information +for the Libreswan IPsec subsystem. +""" + +from collections import defaultdict + +from insights.specs import Specs +from insights.core import CommandParser +from .. import parser, get_active_lines +from insights.parsers import SkipException + + +@parser(Specs.ipsec_conf) +class IpsecConf(CommandParser, dict): + """ + Class for parsing the file ``/etc/ipsec.conf`` about the configuration + and control information for the Libreswan IPsec subsystem + + Raises: + SkipException: When content is empty or cannot be parsed. + + Sample output of this command is:: + + # /etc/ipsec.conf - Libreswan IPsec configuration file + # + # see 'man ipsec.conf' and 'man pluto' for more information + # + # For example configurations and documentation, see https://libreswan.org/wiki/ + + config setup + # plutodebug="control parsing" + # plutodebug="all crypt" + plutodebug=none + # It seems that T-Mobile in the US and Rogers/Fido in Canada are + # using 25/8 as "private" address space on their wireless networks. + # This range has never been announced via BGP (at least up to 2015) + virtual_private=%v4:10.0.0.0/8,%v4:192.168.0.0/16,%v4:172.16.0.0/12,%v4:25.0.0.0/8,%v4:100.64.0.0/10,%v6:fd00::/8,%v6:fe80::/10 + + # if it exists, include system wide crypto-policy defaults + include /etc/crypto-policies/back-ends/libreswan.config + + # It is best to add your IPsec connections as separate files in /etc/ipsec.d/ + include /etc/ipsec.d/*.conf + + Examples: + >>> ipsec_conf['config']['setup']['plutodebug'] == 'none' + True + >>> ipsec_conf['include'] + ['/etc/crypto-policies/back-ends/libreswan.config', '/etc/ipsec.d/*.conf'] + """ + + def parse_content(self, content): + if not content: + raise SkipException('No content.') + + ipsec_type, ipsec_name = "", "" + ipsec_sections = {} + + try: + for line in get_active_lines(content): + if line.startswith('include '): + include, path = [field.strip() for field in line.split()] + array = self.get('include', []) + array.append(path) + self['include'] = array + continue + + if line.startswith(('conn ', 'config ')): + ipsec_type, ipsec_name = [field.strip() for field in line.split()] + ipsec_sections = self.get(ipsec_type, defaultdict(dict)) + continue + + if '=' not in line or ipsec_type == "" or ipsec_name == "": + # skip the options that don't within a section + continue + + key, value = [field.strip() for field in line.split('=')] + ipsec_sections[ipsec_name][key] = value + self[ipsec_type] = ipsec_sections + + except ValueError: + raise SkipException('Syntax error') diff --git a/insights/parsers/tests/test_ipsec_conf.py b/insights/parsers/tests/test_ipsec_conf.py new file mode 100644 index 000000000..bf6398cef --- /dev/null +++ b/insights/parsers/tests/test_ipsec_conf.py @@ -0,0 +1,55 @@ +import doctest +import pytest + +from insights.parsers import ipsec_conf, SkipException +from insights.parsers.ipsec_conf import IpsecConf +from insights.tests import context_wrap + + +IPSEC_NORMAL = """ +# /etc/ipsec.conf - Libreswan IPsec configuration file +# +# see 'man ipsec.conf' and 'man pluto' for more information +# +# For example configurations and documentation, see https://libreswan.org/wiki/ + +config setup + # plutodebug="control parsing" + # plutodebug="all crypt" + plutodebug=none + # It seems that T-Mobile in the US and Rogers/Fido in Canada are + # using 25/8 as "private" address space on their wireless networks. + # This range has never been announced via BGP (at least up to 2015) + virtual_private=%v4:10.0.0.0/8,%v4:192.168.0.0/16,%v4:172.16.0.0/12,%v4:25.0.0.0/8,%v4:100.64.0.0/10,%v6:fd00::/8,%v6:fe80::/10 + +# if it exists, include system wide crypto-policy defaults +include /etc/crypto-policies/back-ends/libreswan.config + +# It is best to add your IPsec connections as separate files in /etc/ipsec.d/ +include /etc/ipsec.d/*.conf +""" + + +def test_config_no_data(): + with pytest.raises(SkipException): + IpsecConf(context_wrap("")) + + +def test_config_dnssec(): + conf = IpsecConf(context_wrap(IPSEC_NORMAL)) + assert len(conf.get('include')) == 2 + assert conf.get('include')[0] == '/etc/crypto-policies/back-ends/libreswan.config' + assert conf.get('include')[1] == '/etc/ipsec.d/*.conf' + assert conf.get('config') is not None + assert conf.get('config').get('setup') is not None + assert conf['config']['setup']['plutodebug'] == 'none' + assert conf['config']['setup']['virtual_private'] == '%v4:10.0.0.0/8,%v4:192.168.0.0/16,%v4:172.16.0.0/12,%v4:25.0.0.0/8,%v4:100.64.0.0/10,%v6:fd00::/8,%v6:fe80::/10' + assert conf.get('conn') is None + + +def test_doc_examples(): + env = { + "ipsec_conf": IpsecConf(context_wrap(IPSEC_NORMAL)), + } + failed, total = doctest.testmod(ipsec_conf, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 3aa3f42ef..d02899c1c 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -249,6 +249,7 @@ class Specs(SpecSet): ip_netns_exec_namespace_lsof = RegistryPoint(multi_output=True, filterable=True) ip_route_show_table_all = RegistryPoint() ip_s_link = RegistryPoint() + ipsec_conf = RegistryPoint(filterable=True) iptables_permanent = RegistryPoint() iptables = RegistryPoint() ipv4_neigh = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 1617bd3ad..878417417 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -351,6 +351,7 @@ def httpd_cmd(broker): ipcs_m = simple_command("/usr/bin/ipcs -m") ipcs_m_p = simple_command("/usr/bin/ipcs -m -p") ipcs_s = simple_command("/usr/bin/ipcs -s") + ipsec_conf = simple_file("/etc/ipsec.conf") iptables = simple_command("/sbin/iptables-save") iptables_permanent = simple_file("etc/sysconfig/iptables") ip6tables = simple_command("/sbin/ip6tables-save") From bd170dd8bba2961a62d05f65862f5e1308042353 Mon Sep 17 00:00:00 2001 From: Sheng Lao <39508521+shlao@users.noreply.github.com> Date: Fri, 4 Sep 2020 13:20:36 +0800 Subject: [PATCH 173/892] Add spec libssh_config (#2708) Signed-off-by: shlao --- docs/shared_parsers_catalog/libssh_config.rst | 3 + insights/parsers/libssh_config.py | 123 ++++++++++++++++++ insights/parsers/tests/test_libssh_config.py | 42 ++++++ insights/specs/__init__.py | 2 + insights/specs/default.py | 2 + 5 files changed, 172 insertions(+) create mode 100644 docs/shared_parsers_catalog/libssh_config.rst create mode 100644 insights/parsers/libssh_config.py create mode 100644 insights/parsers/tests/test_libssh_config.py diff --git a/docs/shared_parsers_catalog/libssh_config.rst b/docs/shared_parsers_catalog/libssh_config.rst new file mode 100644 index 000000000..e67a46220 --- /dev/null +++ b/docs/shared_parsers_catalog/libssh_config.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.libssh_config + :members: + :show-inheritance: diff --git a/insights/parsers/libssh_config.py b/insights/parsers/libssh_config.py new file mode 100644 index 000000000..a9cbd5a22 --- /dev/null +++ b/insights/parsers/libssh_config.py @@ -0,0 +1,123 @@ +""" +Libssh configuration files +========================== + +This module provides the ssh options in libssh configuration file + +Parsers provided by this module are: + +LibsshClientConfig - file ``/etc/libssh/libssh_client.config`` +-------------------------------------------------------------- + +LibsshServerConfig - file ``/etc/libssh/libssh_server.config`` +-------------------------------------------------------------- +""" + +from insights.specs import Specs +from insights.core import CommandParser +from .. import parser, get_active_lines +from insights.parsers import SkipException + + +class LibsshConfig(CommandParser, dict): + """ + Base class for libssh configuration file. + + There are many options in the libssh configuration file and + all of them are stored in one of the following formations: + a) Key [space] Value + b) Key = Value + + There are the keywords used in the configuration file: + include + hostkey + listenaddress + port + loglevel + ciphers + macs + kexalgorithms + match + pubkeyacceptedkeytypes + hostkeyalgorithms + all + user + group + host + localaddress + localport + rdomain + address + + Sample output:: + + # Parse system-wide crypto configuration file + Include /etc/crypto-policies/back-ends/libssh.config + # Parse OpenSSH configuration file for consistency + Include /etc/ssh/sshd_config + + .. note:: + + If there are two or more lines that have the same key, then we + store the values in a list. + + Examples: + >>> 'Include' in config + True + >>> config['Include'] + ['/etc/crypto-policies/back-ends/libssh.config', '/etc/ssh/sshd_config'] + + Raises: + SkipException: When input content is empty or there is a syntax error. + """ + + def parse_content(self, content): + if not content: + raise SkipException('No content.') + + for line in get_active_lines(content): + delimiter = None + if ' ' in line: + delimiter = ' ' + + if '=' in line: + delimiter = '=' + + try: + k, v = [i.strip() for i in line.split(delimiter)] + if k not in self: + self[k] = v + else: + _v = self[k] + _v = [_v] if not isinstance(_v, list) else _v + if v not in _v: + _v.append(v) + self[k] = _v + except ValueError: + raise SkipException('Syntax error') + + +@parser(Specs.libssh_client_config) +class LibsshClientConfig(LibsshConfig): + """ + Parser for accessing the ``/etc/libssh/libssh_client.config`` file. + + .. note:: + + Please refer to the super-class :py:class:`insights.parsers.libssh_config:LibsshConfig` + for more usage information. + """ + pass + + +@parser(Specs.libssh_server_config) +class LibsshServerConfig(LibsshConfig): + """ + Parser for accessing the ``/etc/libssh/libssh_server.config`` file + + .. note:: + + Please refer to the super-class :py:class:`insights.parsers.libssh_config:LibsshConfig` + for more usage information. + """ + pass diff --git a/insights/parsers/tests/test_libssh_config.py b/insights/parsers/tests/test_libssh_config.py new file mode 100644 index 000000000..8b977bd74 --- /dev/null +++ b/insights/parsers/tests/test_libssh_config.py @@ -0,0 +1,42 @@ +import doctest +import pytest + +from insights.tests import context_wrap +from insights.parsers import libssh_config, SkipException +from insights.parsers.libssh_config import LibsshConfig + +CLIENT_CONFIG = """ +# Parse system-wide crypto configuration file +Include /etc/crypto-policies/back-ends/libssh.config +# Parse OpenSSH configuration file for consistency +Include /etc/ssh/ssh_config +""".strip() + +SERVER_CONFIG = """ +# Parse system-wide crypto configuration file +Include /etc/crypto-policies/back-ends/libssh.config +# Parse OpenSSH configuration file for consistency +Include /etc/ssh/sshd_config +""".strip() + + +def test_config_no_data(): + with pytest.raises(SkipException): + LibsshConfig(context_wrap("")) + + +def test_constructor(): + result = LibsshConfig(context_wrap(CLIENT_CONFIG)) + + assert 'Include' in result + assert len(result['Include']) == 2 + assert result['Include'][0] == '/etc/crypto-policies/back-ends/libssh.config' + assert result['Include'][1] == '/etc/ssh/ssh_config' + + +def test_doc_examples(): + env = { + "config": LibsshConfig(context_wrap(SERVER_CONFIG)), + } + failed, total = doctest.testmod(libssh_config, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index d02899c1c..bdc7ba86f 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -280,6 +280,8 @@ class Specs(SpecSet): lastupload = RegistryPoint(multi_output=True) libkeyutils_objdumps = RegistryPoint() libkeyutils = RegistryPoint() + libssh_client_config = RegistryPoint(filterable=True) + libssh_server_config = RegistryPoint(filterable=True) libvirtd_log = RegistryPoint(filterable=True) libvirtd_qemu_log = RegistryPoint(multi_output=True, filterable=True) limits_conf = RegistryPoint(multi_output=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index 878417417..047f01a5e 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -371,6 +371,8 @@ def httpd_cmd(broker): lastupload = glob_file(last_upload_globs) libkeyutils = simple_command("/usr/bin/find -L /lib /lib64 -name 'libkeyutils.so*'") libkeyutils_objdumps = simple_command('/usr/bin/find -L /lib /lib64 -name libkeyutils.so.1 -exec objdump -x "{}" \;') + libssh_client_config = simple_file("/etc/libssh/libssh_client.config") + libssh_server_config = simple_file("/etc/libssh/libssh_server.config") libvirtd_log = simple_file("/var/log/libvirt/libvirtd.log") limits_conf = glob_file(["/etc/security/limits.conf", "/etc/security/limits.d/*.conf"]) localtime = simple_command("/usr/bin/file -L /etc/localtime") From 142e0cc91f7f20c5f464dc3eac700ae1998afba0 Mon Sep 17 00:00:00 2001 From: Rahul Srivastava <44598880+rasrivas-redhat@users.noreply.github.com> Date: Wed, 9 Sep 2020 10:27:26 +0530 Subject: [PATCH 174/892] new spec ls_edac_mc (#2752) * new spec ls_edac_mc Signed-off-by: rasrivas * removed the ls_edac_mc keyword from the test_map_components file as its needed in a rule Signed-off-by: rasrivas --- insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + insights/tests/client/collection_rules/test_map_components.py | 1 - 3 files changed, 2 insertions(+), 1 deletion(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 047f01a5e..ee197c3e8 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -381,6 +381,7 @@ def httpd_cmd(broker): ls_boot = simple_command("/bin/ls -lanR /boot") ls_dev = simple_command("/bin/ls -lanR /dev") ls_disk = simple_command("/bin/ls -lanR /dev/disk") + ls_edac_mc = simple_command("/bin/ls -lan /sys/devices/system/edac/mc") etc_and_sub_dirs = sorted(["/etc", "/etc/pki/tls/private", "/etc/pki/tls/certs", "/etc/pki/ovirt-vmconsole", "/etc/nova/migration", "/etc/sysconfig", "/etc/cloud/cloud.cfg.d", "/etc/rc.d/init.d"]) diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 0edddd258..179980852 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -96,6 +96,7 @@ class InsightsArchiveSpecs(Specs): ls_boot = simple_file("insights_commands/ls_-lanR_.boot") ls_dev = simple_file("insights_commands/ls_-lanR_.dev") ls_disk = simple_file("insights_commands/ls_-lanR_.dev.disk") + ls_edac_mc = simple_file("insights_commands/ls_-lan_.sys.devices.system.edac.mc") ls_etc = simple_file("insights_commands/ls_-lan_.etc_.etc.cloud.cloud.cfg.d_.etc.nova.migration_.etc.pki.ovirt-vmconsole_.etc.pki.tls.certs_.etc.pki.tls.private_.etc.rc.d.init.d_.etc.sysconfig") ls_lib_firmware = simple_file("insights_commands/ls_-lanR_.lib.firmware") ls_ocp_cni_openshift_sdn = simple_file("insights_commands/ls_-l_.var.lib.cni.networks.openshift-sdn") diff --git a/insights/tests/client/collection_rules/test_map_components.py b/insights/tests/client/collection_rules/test_map_components.py index 879fd45c3..b2cbfe5a7 100644 --- a/insights/tests/client/collection_rules/test_map_components.py +++ b/insights/tests/client/collection_rules/test_map_components.py @@ -57,7 +57,6 @@ def test_get_component_by_symbolic_name(): 'gluster_v_status', 'heat_crontab', 'httpd_on_nfs', - 'ls_edac_mc', 'ls_usr_sbin', 'lvmconfig', 'saphostexec_status', From 9ba45a83bce121c0161e244aafc59839389fbae3 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Wed, 9 Sep 2020 14:42:31 -0500 Subject: [PATCH 175/892] Catch when grep produces "Not a directory" on failure. (#2754) The CommandParser now watches for "not a directory" to catch command failures. Fixes 2753 Signed-off-by: Christopher Sams --- insights/core/__init__.py | 1 + insights/tests/test_commandparser.py | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/insights/core/__init__.py b/insights/core/__init__.py index 75be17222..61efe9219 100644 --- a/insights/core/__init__.py +++ b/insights/core/__init__.py @@ -527,6 +527,7 @@ class CommandParser(Parser): __bad_single_lines = [ "no such file or directory", + "not a directory", "command not found", "no module named", "no files found for", diff --git a/insights/tests/test_commandparser.py b/insights/tests/test_commandparser.py index d8fb6cbf5..d74354f55 100644 --- a/insights/tests/test_commandparser.py +++ b/insights/tests/test_commandparser.py @@ -6,6 +6,7 @@ CMF = "blah: Command not found" NO_FILES_FOUND = "No files found for docker.service" NO_SUCH_FILE = "/usr/bin/blah: No such file or directory" +NOT_A_DIRECTORY = "/etc/mail/spamassassin/channel.d: Not a directory" MULTI_LINE = """ blah: Command not found /usr/bin/blah: No such file or directory @@ -41,6 +42,12 @@ def test_no_such_file_or_directory(): assert "No such file or directory" in str(e.value) +def test_not_a_directory(): + with pytest.raises(ContentException) as e: + MockParser(context_wrap(NOT_A_DIRECTORY)) + assert "Not a directory" in str(e.value) + + def test_multi_line(): assert MULTI_LINE.split('\n') == MockParser(context_wrap(MULTI_LINE)).data From 88458a5bccc3ff7daf18757d91a53140102f1306 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Wed, 9 Sep 2020 15:37:02 -0500 Subject: [PATCH 176/892] The http_conf Name definition is too strict. (#2749) Allow digits after the first character. Fixes #2748 Signed-off-by: Christopher Sams --- insights/combiners/httpd_conf.py | 5 ++++- insights/combiners/tests/test_httpd_conf_tree.py | 10 ++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/insights/combiners/httpd_conf.py b/insights/combiners/httpd_conf.py index a72e40f49..2ec8bef55 100644 --- a/insights/combiners/httpd_conf.py +++ b/insights/combiners/httpd_conf.py @@ -275,7 +275,10 @@ def __init__(self, ctx): Complex = Forward() Comment = (WS >> OneLineComment("#")).map(lambda x: None) - Name = String(string.ascii_letters + "_/") + First = InSet(string.ascii_letters + "_/") + Rest = String(string.ascii_letters + "_/" + string.digits) + Name = (First + Rest).map("".join) + Num = Number & (WSChar | LineEnd) StartName = WS >> PosMarker(StartTagName(Letters)) << WS diff --git a/insights/combiners/tests/test_httpd_conf_tree.py b/insights/combiners/tests/test_httpd_conf_tree.py index 5ff6d6671..6858bb46c 100644 --- a/insights/combiners/tests/test_httpd_conf_tree.py +++ b/insights/combiners/tests/test_httpd_conf_tree.py @@ -5,6 +5,10 @@ from insights.parsers import SkipException import pytest +HTTPD_CONF_MIXED_NAME = ''' +H2Push on +''' + HTTPD_CONF_MIXED = ''' JustFotTest_NoSec "/var/www/cgi" @@ -785,3 +789,9 @@ def test_recursive_includes(): httpd1 = _HttpdConf(context_wrap(MULTIPLE_INCLUDES, path='/etc/httpd/conf/httpd.conf')) httpd2 = _HttpdConf(context_wrap(MULTIPLE_INCLUDES, path='/etc/httpd/conf.d/05-foreman.d/hello.conf')) HttpdConfTree([httpd1, httpd2]) + + +def test_mixed_name(): + httpd1 = _HttpdConf(context_wrap(HTTPD_CONF_MIXED_NAME, path='/etc/httpd/conf/httpd.conf')) + result = HttpdConfTree([httpd1]) + assert len(result.doc["H2Push"]) == 1 From 8f2dccf53733da3c1cb6f1f64d318eb206b85506 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 10 Sep 2020 05:13:48 +0800 Subject: [PATCH 177/892] Remove blank lines in data for parse_fixed_table (#2744) * If we keep blank lines, it will generate a dict with empty values, * which is useless. Signed-off-by: Huanhuan Li --- insights/parsers/__init__.py | 21 +++++++++++-------- insights/parsers/tests/test_parsers_module.py | 12 +++++++++++ 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/insights/parsers/__init__.py b/insights/parsers/__init__.py index c6a5f293d..728da2b48 100644 --- a/insights/parsers/__init__.py +++ b/insights/parsers/__init__.py @@ -289,8 +289,9 @@ def parse_fixed_table(table_lines, data in fixed positions in each remaining row of table data. Table columns must not contain spaces within the column name. Column headings are assumed to be left justified and the column data width is the width of the - heading label plus all whitespace to the right of the label. This function - will handle blank columns. + heading label plus all whitespace to the right of the label. This function will + remove all blank rows in data but it will handle blank columns if some of the + columns aren't empty. Arguments: table_lines (list): List of strings with the first line containing column @@ -330,6 +331,7 @@ def parse_fixed_table(table_lines, [{'Column1': 'data1', 'Column2': 'data 2', 'Column3': 'data 3'}, {'Column1': 'data4', 'Column2': 'data5', 'Column3': 'data6'}] """ + def calc_column_indices(line, headers): idx = [] for h in headers: @@ -355,13 +357,14 @@ def calc_column_indices(line, headers): table_data = [] for line in table_lines[first_line + 1:last_line]: - col_data = {} - for i, (s, e) in enumerate(idx_pairs): - val = line[s:e].strip() - if empty_exception and not val: - raise ParseException('Incorrect line: \'{0}\''.format(line)) - col_data[col_headers[i]] = val - table_data.append(col_data) + if line.strip(): + col_data = {} + for i, (s, e) in enumerate(idx_pairs): + val = line[s:e].strip() + if empty_exception and not val: + raise ParseException('Incorrect line: \'{0}\''.format(line)) + col_data[col_headers[i]] = val + table_data.append(col_data) return table_data diff --git a/insights/parsers/tests/test_parsers_module.py b/insights/parsers/tests/test_parsers_module.py index 7e4624067..108723cf9 100644 --- a/insights/parsers/tests/test_parsers_module.py +++ b/insights/parsers/tests/test_parsers_module.py @@ -224,6 +224,16 @@ def test_calc_offset(): Another trailing non-data line """.strip() +FIXED_CONTENT_5 = """ +Column1 Column 2 Column 3 + +data1 data 2 data 3 + +data 7 data 9 + +data10 +""".strip() + FIXED_CONTENT_DUP_HEADER_PREFIXES = """ NAMESPACE NAME LABELS @@ -288,6 +298,8 @@ def test_parse_fixed_table(): data = parse_fixed_table(FIXED_CONTENT_DUP_HEADER_PREFIXES.splitlines()) assert data[0] == {'NAMESPACE': 'default', 'NAME': 'foo', 'LABELS': 'app=superawesome'} + data = parse_fixed_table(FIXED_CONTENT_5.splitlines()) + assert len(data) == 3 def test_parse_fixed_table_empty_exception(): From 81a6a623a4ffd760b03a1c593143001c24e79183 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Wed, 9 Sep 2020 16:24:18 -0500 Subject: [PATCH 178/892] Remove defunct entry_point references. (#2743) Signed-off-by: Christopher Sams --- setup.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/setup.py b/setup.py index f93ef0132..43283af7b 100644 --- a/setup.py +++ b/setup.py @@ -20,8 +20,6 @@ 'insights-inspect = insights.tools.insights_inspect:main', 'insights-info = insights.tools.query:main', 'insights-ocpshell= insights.ocpshell:main', - 'gen_api = insights.tools.generate_api_config:main', - 'insights-perf = insights.tools.perf:main', 'client = insights.client:run', 'mangle = insights.util.mangle:main' ] From 211e7ccc781d30146c5bd7915dcc308e24dd78a2 Mon Sep 17 00:00:00 2001 From: Sachin Date: Thu, 10 Sep 2020 18:04:51 +0530 Subject: [PATCH 179/892] Remove the semicolon from query statement (#2751) Two major changes ----------------- 1. Remove the ";" from DB query statement: ON RHV Manager the statement ending with ";" failed with below error ``` SELECT row_to_json(t) FROM (SELECT vs.vds_name, rpm_version FROM vds_dynamic vd, vds_static vs WHERE vd.vds_id = vs.vds_id;) t Traceback (most recent call last): File "/usr/lib/python2.7/site-packages/engine_db_query/__init__.py", line 337, in query_return_json query SyntaxError: syntax error at or near ";" LINE 5: ...ds_dynamic vd, vds_static vs WHERE vd.vds_id = vs.vds_id;) t ^ Traceback (most recent call last): File "/usr/bin/engine-db-query", line 281, in sys.exit(main()) File "/usr/bin/engine-db-query", line 273, in main knowledge_base=args.kb_url File "/usr/lib/python2.7/site-packages/engine_db_query/__init__.py", line 213, in execute knowledge_base=knowledge_base File "/usr/lib/python2.7/site-packages/engine_db_query/__init__.py", line 348, in query_return_json ret = cursor.fetchall() psycopg2.ProgrammingError: no results to fetch ``` After removing the ";", we get the expectec output, ``` { "id_host": "None","when": "2020-09-07 14:44:21","time": "0.00279307365417", "name": "None", "description": "None", "type": "None", "kb": "None", "bugzilla": "None", "file": "", "path": "None", "id": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "hash": "d41d8cd98f00b204e9800998ecf8427e", "result": [[{"vds_name": "hosto", "rpm_version": "vdsm-4.30.40-1.el7ev"}]]} ``` 2. Fixed data collection file name: The rule failed with below error, ``` insights.core.plugins.ContentException: /home/psachin/insights-vm-9-32.example.com-20200826174644/insights_commands/engine-db-query_-s_SELECT_vs.vds_name_rpm_version_FROM_vds_dynamic_vd_vds_static_vs_WHERE_vd.vds_id_vs.vds_id_--json does not exist. ``` Signed-off-by: Sachin Patil --- insights/parsers/engine_db_query.py | 9 ++++- .../parsers/tests/test_engine_db_query.py | 39 ++++++++++++++++++- insights/specs/default.py | 2 +- insights/specs/insights_archive.py | 2 +- 4 files changed, 48 insertions(+), 4 deletions(-) diff --git a/insights/parsers/engine_db_query.py b/insights/parsers/engine_db_query.py index c76959e6e..ff9ba6fe4 100644 --- a/insights/parsers/engine_db_query.py +++ b/insights/parsers/engine_db_query.py @@ -5,6 +5,7 @@ Parses the output of the command `engine-db-query` returned in JSON format. """ from insights.core import CommandParser, JSONParser +from insights.parsers import SkipException from insights.core.plugins import parser from insights.specs import Specs @@ -14,7 +15,7 @@ class EngineDBQueryVDSMversion(CommandParser, JSONParser): """ Get the hostname & vdsm package version along with host info. - Class for parsing the output of the command - ``engine-db-query --statement "SELECT vs.vds_name, rpm_version FROM vds_dynamic vd, vds_static vs WHERE vd.vds_id = vs.vds_id;" --json``. + Class for parsing the output of the command - ``engine-db-query --statement "SELECT vs.vds_name, rpm_version FROM vds_dynamic vd, vds_static vs WHERE vd.vds_id = vs.vds_id" --json``. Attributes: data (dict): Host info. @@ -44,6 +45,12 @@ class EngineDBQueryVDSMversion(CommandParser, JSONParser): >>> output.result == [{'vds_name': 'hosto', 'rpm_version': 'vdsm-4.30.40-1.el7ev'}] True """ + def parse_content(self, content): + if not content: + raise SkipException("Empty output.") + + super(EngineDBQueryVDSMversion, self).parse_content(content) + @property def result(self): """Get the value of 'result'.""" diff --git a/insights/parsers/tests/test_engine_db_query.py b/insights/parsers/tests/test_engine_db_query.py index d6147ddc4..031d13381 100644 --- a/insights/parsers/tests/test_engine_db_query.py +++ b/insights/parsers/tests/test_engine_db_query.py @@ -1,5 +1,6 @@ import doctest -from insights.parsers import engine_db_query +import pytest +from insights.parsers import engine_db_query, ParseException, SkipException from insights.tests import context_wrap @@ -53,6 +54,32 @@ } """.strip() +ERROR = """ + + SELECT + row_to_json(t) + FROM + (SELECT vs.vds_name, rpm_version FROM vds_dynamic vd, vds_static vs WHERE vd.vds_id = vs.vds_id;) t + +Traceback (most recent call last): + File "/usr/lib/python2.7/site-packages/engine_db_query/__init__.py", line 337, in query_return_json + query +SyntaxError: syntax error at or near ";" +LINE 5: ...ds_dynamic vd, vds_static vs WHERE vd.vds_id = vs.vds_id;) t + ^ + +Traceback (most recent call last): + File "/usr/bin/engine-db-query", line 281, in + sys.exit(main()) + File "/usr/bin/engine-db-query", line 273, in main + knowledge_base=args.kb_url + File "/usr/lib/python2.7/site-packages/engine_db_query/__init__.py", line 213, in execute + knowledge_base=knowledge_base + File "/usr/lib/python2.7/site-packages/engine_db_query/__init__.py", line 348, in query_return_json + ret = cursor.fetchall() +psycopg2.ProgrammingError: no results to fetch +""".strip() + def test_edbq(): output = engine_db_query.EngineDBQueryVDSMversion(context_wrap(OUTPUT)) @@ -63,6 +90,16 @@ def test_edbq(): output = engine_db_query.EngineDBQueryVDSMversion(context_wrap(OUTPUT_2)) assert output.result == [{'vds_name': 'hosto', 'rpm_version': 'vdsm-4.40.20-33.git1b7dedcf3.fc30'}, {'vds_name': 'hosto2', 'rpm_version': 'vdsm-4.40.13-38.gite9bae3c68.fc30'}] + # No content + with pytest.raises(SkipException) as e: + engine_db_query.EngineDBQueryVDSMversion(context_wrap("")) + assert "Empty output." in str(e) + + # Error + with pytest.raises(ParseException) as e: + engine_db_query.EngineDBQueryVDSMversion(context_wrap(ERROR)) + assert "couldn't parse json." in str(e) + def test_doc_examples(): env = { diff --git a/insights/specs/default.py b/insights/specs/default.py index ee197c3e8..fe7d34922 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -238,7 +238,7 @@ def is_ceph_monitor(broker): docker_storage_setup = simple_file("/etc/sysconfig/docker-storage-setup") docker_sysconfig = simple_file("/etc/sysconfig/docker") dracut_kdump_capture_service = simple_file("/usr/lib/dracut/modules.d/99kdumpbase/kdump-capture.service") - engine_db_query_vdsm_version = simple_command('engine-db-query --statement "SELECT vs.vds_name, rpm_version FROM vds_dynamic vd, vds_static vs WHERE vd.vds_id = vs.vds_id;" --json') + engine_db_query_vdsm_version = simple_command('engine-db-query --statement "SELECT vs.vds_name, rpm_version FROM vds_dynamic vd, vds_static vs WHERE vd.vds_id = vs.vds_id" --json') engine_log = simple_file("/var/log/ovirt-engine/engine.log") etc_journald_conf = simple_file(r"etc/systemd/journald.conf") etc_journald_conf_d = glob_file(r"etc/systemd/journald.conf.d/*.conf") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 179980852..69bfd6536 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -45,7 +45,7 @@ class InsightsArchiveSpecs(Specs): docker_info = simple_file("insights_commands/docker_info") docker_list_containers = simple_file("insights_commands/docker_ps_--all_--no-trunc") docker_list_images = simple_file("insights_commands/docker_images_--all_--no-trunc_--digests") - engine_db_query_vdsm_version = simple_file("insights_commands/engine-db-query_-s_SELECT_vs.vds_name_rpm_version_FROM_vds_dynamic_vd_vds_static_vs_WHERE_vd.vds_id_vs.vds_id_--json") + engine_db_query_vdsm_version = simple_file("insights_commands/engine-db-query_--statement_SELECT_vs.vds_name_rpm_version_FROM_vds_dynamic_vd_vds_static_vs_WHERE_vd.vds_id_vs.vds_id_--json") ethtool = glob_file("insights_commands/ethtool_*", ignore="ethtool_-.*") ethtool_S = glob_file("insights_commands/ethtool_-S_*") ethtool_T = glob_file("insights_commands/ethtool_-T_*") From d181610663458079e6ba4b5cc6b32efe90ff6b44 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 10 Sep 2020 13:51:13 -0400 Subject: [PATCH 180/892] do not redact client metadata files (#2735) * skip the client metadata files in soscleaner * skip mac obfuscation because it breaks machine id * only redact and obfuscate on /data dir for core collection * skip machine-ids in soscleaner and redaction Signed-off-by: Jeremy Crafts --- insights/client/data_collector.py | 21 +++++++++++++- insights/contrib/soscleaner.py | 28 +++++++++++++++++-- .../tests/client/data_collector/test_done.py | 2 +- .../client/data_collector/test_redact.py | 20 +++++++++++++ 4 files changed, 66 insertions(+), 5 deletions(-) diff --git a/insights/client/data_collector.py b/insights/client/data_collector.py index 846b45e75..61aedf3d4 100644 --- a/insights/client/data_collector.py +++ b/insights/client/data_collector.py @@ -338,9 +338,26 @@ def redact(self, rm_conf): if not exclude: logger.debug('Patterns section of blacklist configuration is empty.') - for dirpath, dirnames, filenames in os.walk(self.archive.archive_dir): + # TODO: consider implementing redact() in CoreCollector class rather than + # special handling here + if self.config.core_collect: + # redact only from the 'data' directory + searchpath = os.path.join(self.archive.archive_dir, 'data') + if not os.path.isdir(searchpath): + # abort if the dir does not exist + # we should never get here but just in case + raise RuntimeError('ERROR: invalid Insights archive temp path') + else: + searchpath = self.archive.archive_dir + + for dirpath, dirnames, filenames in os.walk(searchpath): for f in filenames: fullpath = os.path.join(dirpath, f) + if (fullpath.endswith('etc/insights-client/machine-id') or + fullpath.endswith('etc/machine-id') or + fullpath.endswith('insights_commands/subscription-manager_identity')): + # do not redact the ID files + continue redacted_contents = _process_content_redaction(fullpath, exclude, regex) with open(fullpath, 'wb') as dst: dst.write(redacted_contents) @@ -405,6 +422,8 @@ def __init__(self, config, tmp_dir, rm_conf, hostname_path): self.networks = None self.users = None self.users_file = None + self.obfuscate_macs = False + self.core_collect = config.core_collect if rm_conf: try: diff --git a/insights/contrib/soscleaner.py b/insights/contrib/soscleaner.py index 1c5b24cad..14e357606 100644 --- a/insights/contrib/soscleaner.py +++ b/insights/contrib/soscleaner.py @@ -1014,8 +1014,6 @@ def _clean_line(self, line, filename): if false_positive in filename: process_obfuscation = False new_line = self._sub_keywords(line) # Keyword Substitution - if self.obfuscate_macs is True: - new_line = self._sub_mac(new_line) # MAC address obfuscation if process_obfuscation: new_line = self._sub_hostname( new_line) # Hostname substitution @@ -1654,13 +1652,37 @@ def clean_report(self, options, sosreport): # pragma: no cover self.hn_db['host0'] = self.hostname self._domains2db() - files = self._file_list(self.dir_path) + if options.core_collect: + # operate on the "data" directory when doing core collection + files = self._file_list(os.path.join(self.dir_path, 'data')) + else: + files = self._file_list(self.dir_path) self._process_users_file() self.logger.con_out( "IP Obfuscation Network Created - %s", self.default_net.compressed) self.logger.con_out("*** SOSCleaner Processing ***") self.logger.info("Working Directory - %s", self.dir_path) for f in files: + if options.core_collect: + # set a relative path of $ARCHIVEROOT/data for core collection + relative_path = os.path.relpath(f, start=os.path.join(self.dir_path, 'data')) + else: + # set a relative path of $ARCHIVEROOT for non core collection + relative_path = os.path.relpath(f, start=self.dir_path) + + # in addition to setting up that relative path, skip these + # files in the archive root for classic collection + if relative_path in ('display_name', + 'blacklist_report', + 'tags.json', + 'branch_info', + 'version_info', + 'egg_release'): + continue + # ALWAYS skip machine-id, subman id, and insights id + if relative_path in ('etc/machine-id', + 'etc/insights-client/machine-id'): + continue self.logger.debug("Cleaning %s", f) self._clean_file(f) self.logger.con_out("*** SOSCleaner Statistics ***") diff --git a/insights/tests/client/data_collector/test_done.py b/insights/tests/client/data_collector/test_done.py index 54afb1d03..65643301a 100644 --- a/insights/tests/client/data_collector/test_done.py +++ b/insights/tests/client/data_collector/test_done.py @@ -89,7 +89,7 @@ def test_soscleaner_additions(isdir_, clean_opts): s = SOSCleaner() s.logger = Mock() s.file_count = Mock() - s._prep_environment = Mock(return_value=(None, None, None, None, None)) + s._prep_environment = Mock(return_value=(None, '/var/tmp/test/socleaner-test', None, None, None)) s._start_logging = Mock() s._check_uid = Mock() s._get_disclaimer = Mock() diff --git a/insights/tests/client/data_collector/test_redact.py b/insights/tests/client/data_collector/test_redact.py index 4bb6e8bba..7779b4154 100644 --- a/insights/tests/client/data_collector/test_redact.py +++ b/insights/tests/client/data_collector/test_redact.py @@ -70,6 +70,26 @@ def test_redact_call_walk(walk): walk.assert_called_once_with(arch.archive_dir) +@patch('insights.client.data_collector.os.walk') +@patch('insights.client.data_collector.os.path.isdir', Mock(return_value=True)) +def test_redact_call_walk_core(walk): + ''' + Verify that redact() calls os.walk and when an + an archive structure is present in /var/tmp/**/insights-* + + With core collection, /data is added to the path + ''' + conf = InsightsConfig(core_collect=True) + arch = InsightsArchive(conf) + arch.create_archive_dir() + + dc = DataCollector(conf, arch) + rm_conf = {} + + dc.redact(rm_conf) + walk.assert_called_once_with(os.path.join(arch.archive_dir, 'data')) + + @patch('insights.client.data_collector._process_content_redaction') def test_redact_call_process_redaction(_process_content_redaction): ''' From d5b7d216bed7a65760d2e8592e0036cc73101af4 Mon Sep 17 00:00:00 2001 From: Stephen Adams Date: Thu, 10 Sep 2020 14:10:00 -0400 Subject: [PATCH 181/892] Update uploader.json map Signed-off-by: Stephen Adams --- insights/client/uploader_json_map.json | 55 ++++++++++++++++++++++---- 1 file changed, 47 insertions(+), 8 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index 9cb032d8c..8aa9ad3ee 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -238,7 +238,7 @@ "symbolic_name": "du_dirs" }, { - "command": "/bin/engine-db-query --statement \"SELECT vs.vds_name, rpm_version FROM vds_dynamic vd, vds_static vs WHERE vd.vds_id = vs.vds_id;\" --json", + "command": "/bin/engine-db-query --statement \"SELECT vs.vds_name, rpm_version FROM vds_dynamic vd, vds_static vs WHERE vd.vds_id = vs.vds_id\" --json", "pattern": [], "symbolic_name": "engine_db_query_vdsm_version" }, @@ -526,7 +526,7 @@ "symbolic_name": "ls_edac_mc" }, { - "command": "/bin/ls -lan /etc /etc/cloud/cloud.cfg.d /etc/nova/migration /etc/pki/ovirt-vmconsole /etc/pki/tls/certs /etc/pki/tls/private /etc/sysconfig /etc/rc.d/init.d", + "command": "/bin/ls -lan /etc /etc/cloud/cloud.cfg.d /etc/nova/migration /etc/pki/ovirt-vmconsole /etc/pki/tls/certs /etc/pki/tls/private /etc/rc.d/init.d /etc/sysconfig", "pattern": [], "symbolic_name": "ls_etc" }, @@ -1034,6 +1034,9 @@ "crmd", "dlm_controld", "docker", + "heat-engine", + "nova-compute", + "nova-conductor", "ntpd", "openshift start master api", "openshift start master controllers", @@ -1069,7 +1072,10 @@ "crmd", "dlm_controld", "docker", + "heat-engine", "mysqld", + "nova-compute", + "nova-conductor", "ntpd", "oc observe csr", "openshift start master api", @@ -1115,6 +1121,7 @@ "elasticsearch", "goferd", "greenplum", + "heat-engine", "httpd", "iscsid", "multipath", @@ -1122,6 +1129,7 @@ "nfsd", "nginx", "nova-compute", + "nova-conductor", "ntpd", "octavia-worker", "openshift start master api", @@ -1157,9 +1165,12 @@ "crmd", "dlm_controld", "docker", + "heat-engine", "neutron-ns-metadata-proxy", "nginx: master process", "nginx: worker process", + "nova-compute", + "nova-conductor", "ntpd", "openshift start master api", "openshift start master controllers", @@ -1857,7 +1868,8 @@ { "file": "/etc/dnf/dnf.conf", "pattern": [ - "[" + "[", + "best" ], "symbolic_name": "dnf_conf" }, @@ -2138,6 +2150,15 @@ ], "symbolic_name": "ipaupgrade_log" }, + { + "file": "/etc/ipsec.conf", + "pattern": [ + "config", + "include", + "plutodebug" + ], + "symbolic_name": "ipsec_conf" + }, { "file": "/etc/sysconfig/iptables", "pattern": [], @@ -2185,6 +2206,20 @@ "pattern": [], "symbolic_name": "ksmstate" }, + { + "file": "/etc/libssh/libssh_client.config", + "pattern": [ + "Include" + ], + "symbolic_name": "libssh_client_config" + }, + { + "file": "/etc/libssh/libssh_server.config", + "pattern": [ + "Include" + ], + "symbolic_name": "libssh_server_config" + }, { "file": "/var/log/libvirt/libvirtd.log", "pattern": [ @@ -2247,6 +2282,8 @@ "- image is referenced in one or more repositories", "/input/input", "11000 E11000 duplicate key error index: pulp_database.repo_profile_applicability.$profile_hash_-1_repo_id_-1", + "17763", + ": possible SYN flooding on port", ": segfault at ", ": session replaced: jid=", "Abort command issued", @@ -2267,6 +2304,7 @@ "Exception happened during processing of request from", "Failed to bind socket: No such file or directory", "Failed to extend thin", + "Hyper-V Host", "List /apis/image.openshift.io/v1/images", "Loop callback failed with: Cannot allocate memory", "MDC/MDIO access timeout", @@ -2351,7 +2389,6 @@ "kernel: lockd: Unknown symbol unregister_inet6addr_notifier", "kernel: megasas: Found FW in FAULT state, will reset adapter.", "kernel: nfs: server", - "kernel: possible SYN flooding on port", "khash_super_prune_nolock", "link status up for interface", "megaraid_sas: FW detected to be in faultstate, restarting it", @@ -2366,7 +2403,6 @@ "reservation conflict", "returned a bad sequence-id error", "rhsmd: rhsmd process exceeded runtime and was killed", - "segfault at", "server kernel: rhsmcertd-worke", "shm_open failed, Permission denied", "skb_copy", @@ -3606,7 +3642,9 @@ { "file": "/etc/named.conf", "pattern": [ - "include" + "include", + "{", + "}" ], "symbolic_name": "named_conf" }, @@ -3994,7 +4032,8 @@ "glob": "/etc/ssh/ssh_config.d/*.conf", "symbolic_name": "ssh_config_d", "pattern": [ - "Include" + "Include", + "SendEnv" ] } ], @@ -4018,5 +4057,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2020-08-27T15:58:30.933074" + "version": "2020-09-10T14:04:52.231084" } \ No newline at end of file From c249c6dee4f5aa421eaa51955c85d296246f1827 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Tue, 15 Sep 2020 11:11:31 -0500 Subject: [PATCH 182/892] Ensure evaluators are populated with system_id and branch_info. (#2757) This PR also adds a procedure for initializing brokers from an archive whose type is automatically detected. This new API should be used by service wrappers. Fixes #2756 Signed-off-by: Christopher Sams --- insights/__init__.py | 9 ++------- insights/core/evaluators.py | 10 +++++----- insights/core/hydration.py | 19 +++++++++++++++++-- insights/formats/__init__.py | 2 +- insights/formats/_json.py | 4 ++-- 5 files changed, 27 insertions(+), 17 deletions(-) diff --git a/insights/__init__.py b/insights/__init__.py index eaf6f6291..9896895e8 100644 --- a/insights/__init__.py +++ b/insights/__init__.py @@ -34,13 +34,12 @@ from .core import dr # noqa: F401 from .core.context import ClusterArchiveContext, HostContext, HostArchiveContext, SerializedArchiveContext, ExecutionContext # noqa: F401 from .core.dr import SkipComponent # noqa: F401 -from .core.hydration import create_context +from .core.hydration import create_context, initialize_broker # noqa: F401 from .core.plugins import combiner, fact, metadata, parser, rule # noqa: F401 from .core.plugins import datasource, condition, incident # noqa: F401 from .core.plugins import make_response, make_metadata, make_fingerprint # noqa: F401 from .core.plugins import make_pass, make_fail, make_info # noqa: F401 from .core.filters import add_filter, apply_filters, get_filters # noqa: F401 -from .core.serde import Hydration from .formats import get_formatter from .parsers import get_active_lines # noqa: F401 from .util import defaults # noqa: F401 @@ -85,7 +84,7 @@ def add_status(name, nvr, commit=None): def process_dir(broker, root, graph, context, inventory=None): - ctx = create_context(root, context) + ctx, broker = initialize_broker(root, context=context, broker=broker) log.debug("Processing %s with %s" % (root, ctx)) if isinstance(ctx, ClusterArchiveContext): @@ -93,10 +92,6 @@ def process_dir(broker, root, graph, context, inventory=None): archives = [f for f in ctx.all_files if f.endswith(COMPRESSION_TYPES)] return process_cluster(graph, archives, broker=broker, inventory=inventory) - broker[ctx.__class__] = ctx - if isinstance(ctx, SerializedArchiveContext): - h = Hydration(ctx.root) - broker = h.hydrate(broker=broker) graph = dict((k, v) for k, v in graph.items() if k in dr.COMPONENTS[dr.GROUPS.single]) broker = dr.run(graph, broker=broker) return broker diff --git a/insights/core/evaluators.py b/insights/core/evaluators.py index 358b47be4..1b59f1732 100644 --- a/insights/core/evaluators.py +++ b/insights/core/evaluators.py @@ -151,14 +151,14 @@ def __init__(self, broker=None, system_id=None, stream=sys.stdout, incremental=F def observer(self, comp, broker): super(InsightsEvaluator, self).observer(comp, broker) - if comp is Specs.machine_id and comp in broker: + if self.system_id is None and Specs.machine_id in broker: self.system_id = broker[Specs.machine_id].content[0].strip() - if comp is Specs.redhat_release and comp in broker: - self.release = broker[comp].content[0].strip() + if self.release is None and Specs.redhat_release in broker: + self.release = broker[Specs.redhat_release].content[0].strip() - if comp is BranchInfo and BranchInfo in broker: - self.branch_info = broker[comp].data + if not self.branch_info and BranchInfo in broker: + self.branch_info = broker[BranchInfo].data if comp is Specs.metadata_json and comp in broker: md = broker[comp] diff --git a/insights/core/hydration.py b/insights/core/hydration.py index da24043e6..2b873d60e 100644 --- a/insights/core/hydration.py +++ b/insights/core/hydration.py @@ -1,10 +1,12 @@ import logging import os -from insights.core import archives +from insights.core import archives, dr +from insights.core.serde import Hydration from insights.core.context import (ClusterArchiveContext, ExecutionContextMeta, - HostArchiveContext) + HostArchiveContext, + SerializedArchiveContext) log = logging.getLogger(__name__) @@ -54,3 +56,16 @@ def create_context(path, context=None): common_path, ctx = identify(all_files) context = context or ctx return context(common_path, all_files=all_files) + + +def initialize_broker(path, context=None, broker=None): + ctx = create_context(path, context=context) + broker = broker or dr.Broker() + if isinstance(ctx, ClusterArchiveContext): + return ctx, broker + + broker[ctx.__class__] = ctx + if isinstance(ctx, SerializedArchiveContext): + h = Hydration(ctx.root) + broker = h.hydrate(broker=broker) + return ctx, broker diff --git a/insights/formats/__init__.py b/insights/formats/__init__.py index 825d91eca..78acdb0ee 100644 --- a/insights/formats/__init__.py +++ b/insights/formats/__init__.py @@ -89,7 +89,7 @@ def configure(p): def __init__(self, args=None): if args: - hn = "insights.combiners.hostname" + hn = "insights.combiners.hostname, insights.parsers.branch_info" args.plugins = ",".join([args.plugins, hn]) if args.plugins else hn if args.fail_only: print('Options conflict: -f and -F, drops -F', file=sys.stderr) diff --git a/insights/formats/_json.py b/insights/formats/_json.py index 59cc1c383..bdc8193bd 100644 --- a/insights/formats/_json.py +++ b/insights/formats/_json.py @@ -1,10 +1,10 @@ import json -from insights.core.evaluators import SingleEvaluator +from insights.core.evaluators import SingleEvaluator as Evaluator from insights.formats import EvaluatorFormatterAdapter -class JsonFormat(SingleEvaluator): +class JsonFormat(Evaluator): def postprocess(self): json.dump(self.get_response(), self.stream) From 626913a2a1886c1aa9e2a2db5927ece331100ab0 Mon Sep 17 00:00:00 2001 From: Stephen Adams Date: Thu, 17 Sep 2020 13:59:17 -0400 Subject: [PATCH 183/892] Update uploader_json_map file Signed-off-by: Stephen Adams --- insights/client/uploader_json_map.json | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index 8aa9ad3ee..eae9cbed7 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -1103,12 +1103,14 @@ "pattern": [ "/opt/perf/bin/midaemon", "/sbin/multipathd", + "/sbin/rngd", "/usr/bin/gnome-shell", "/usr/bin/openshift start master", "/usr/bin/openshift start node", "/usr/bin/teamd", "/usr/sbin/fcoemon --syslog", "COMMAND", + "[rcu_sched]", "bash", "catalina.base", "ceilometer-coll", @@ -1141,6 +1143,8 @@ "pcsd", "pkla-check-auth", "postgres", + "rcu_gp_kthread", + "rcu_sched", "smbd", "spausedd", "target_completi", @@ -2334,6 +2338,7 @@ "_NET_ACTIVE_WINDOW", "as active slave; either", "belongs to docker.service", + "blocked for more than", "callbacks suppressed", "canceled DHCP transaction, DHCP client pid", "chardev: opening backend \"socket\" failed", @@ -3082,6 +3087,7 @@ "SECURITY", "Security", "[", + "]", "comment", "global", "kerberos method", @@ -3092,6 +3098,8 @@ "read only", "realm", "security", + "server max protocol", + "socket options", "writable" ], "symbolic_name": "samba" @@ -3221,7 +3229,6 @@ "PermitRootLogin", "Permitemptypasswords", "Permitrootlogin", - "Port", "Protocol", "USEPAM", "UsePAM", @@ -4057,5 +4064,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2020-09-10T14:04:52.231084" + "version": "2020-09-17T13:54:44.273820" } \ No newline at end of file From 30295a4402901a0498b7b82b18b21f9d5fcbac67 Mon Sep 17 00:00:00 2001 From: Andrew Kofink Date: Mon, 21 Sep 2020 16:25:54 -0400 Subject: [PATCH 184/892] [RHICOMPL-737] Fail gracefully for missing profiles (#2760) Before, if a SCAP profile was missing on the client system, the --compliance run would halt and inform the user, even if there were more profiles assigned to the host that did exist. After this change, the user still gets an error message for a missing profile, but other existing profiles will still be scanned and uploaded as usual. Signed-off-by: Andrew Kofink --- insights/client/apps/compliance/__init__.py | 4 +++- insights/tests/client/apps/test_compliance.py | 14 ++++++++++++-- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/insights/client/apps/compliance/__init__.py b/insights/client/apps/compliance/__init__.py index 5c5a70a60..f2acdbfd2 100644 --- a/insights/client/apps/compliance/__init__.py +++ b/insights/client/apps/compliance/__init__.py @@ -92,7 +92,7 @@ def find_scap_policy(self, profile_ref_id): rc, grep = call(grepcmd, keep_rc=True) if rc: logger.error('XML profile file not found matching ref_id {0}\n{1}\n'.format(profile_ref_id, grep)) - exit(constants.sig_kill_bad) + return None filenames = findall('/usr/share/xml/scap/.+xml', grep) if not filenames: logger.error('No XML profile files found matching ref_id {0}\n{1}\n'.format(profile_ref_id, ' '.join(filenames))) @@ -107,6 +107,8 @@ def build_oscap_command(self, profile_ref_id, policy_xml, output_path, tailoring return command def run_scan(self, profile_ref_id, policy_xml, output_path, tailoring_file_path=None): + if policy_xml is None: + return logger.info('Running scan for {0}... this may take a while'.format(profile_ref_id)) env = os.environ.copy() env.update({'TZ': 'UTC'}) diff --git a/insights/tests/client/apps/test_compliance.py b/insights/tests/client/apps/test_compliance.py index 24719207f..1495f4795 100644 --- a/insights/tests/client/apps/test_compliance.py +++ b/insights/tests/client/apps/test_compliance.py @@ -99,8 +99,7 @@ def test_find_scap_policy(config, call): def test_find_scap_policy_not_found(config, call): compliance_client = ComplianceClient(config) compliance_client.profile_files = lambda: ['/something'] - with raises(SystemExit): - compliance_client.find_scap_policy('ref_id') + assert compliance_client.find_scap_policy('ref_id') is None @patch("insights.client.apps.compliance.call", return_value=(0, ''.encode('utf-8'))) @@ -132,6 +131,17 @@ def test_run_scan_fail(config, call): call.assert_called_with(("oscap xccdf eval --profile ref_id --results " + output_path + ' /nonexistent').encode(), keep_rc=True, env=env) +@patch("insights.client.apps.compliance.call", return_value=(0, ''.encode('utf-8'))) +@patch("insights.client.config.InsightsConfig") +def test_run_scan_missing_profile(config, call): + compliance_client = ComplianceClient(config) + output_path = '/tmp/oscap_results-ref_id.xml' + env = os.environ + env.update({'TZ': 'UTC'}) + assert compliance_client.run_scan('ref_id', None, output_path) is None + call.assert_not_called() + + @patch("insights.client.config.InsightsConfig") def test_tailored_file_is_not_downloaded_if_not_needed(config): compliance_client = ComplianceClient(config) From 40c3c29c5177bbcebf628bf4f3c602e854f64a02 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Tue, 22 Sep 2020 12:40:53 -0400 Subject: [PATCH 185/892] Rollback soscleaner (#2762) * Revert soscleaner updates * keep added allowlist of metadata Signed-off-by: Jeremy Crafts --- insights/client/data_collector.py | 15 +- insights/contrib/soscleaner.py | 1838 ++++------------- .../tests/client/data_collector/test_done.py | 8 - .../client/data_collector/test_redact.py | 1 - 4 files changed, 434 insertions(+), 1428 deletions(-) diff --git a/insights/client/data_collector.py b/insights/client/data_collector.py index 61aedf3d4..af88100cb 100644 --- a/insights/client/data_collector.py +++ b/insights/client/data_collector.py @@ -343,8 +343,9 @@ def redact(self, rm_conf): if self.config.core_collect: # redact only from the 'data' directory searchpath = os.path.join(self.archive.archive_dir, 'data') - if not os.path.isdir(searchpath): - # abort if the dir does not exist + if not (os.path.isdir(searchpath) and + re.match(r'/var/tmp/.+/insights-.+/data', searchpath)): + # abort if the dir does not exist and isn't the correct format # we should never get here but just in case raise RuntimeError('ERROR: invalid Insights archive temp path') else: @@ -379,7 +380,6 @@ def done(self, conf, rm_conf): and archive files. """ if self.config.obfuscate: - logger.warn('WARNING: Some SOSCleaner obfuscation output formatting has changed. See https://access.redhat.com/articles/5355431 for more details.') if rm_conf and rm_conf.get('keywords'): logger.warn("WARNING: Skipping keywords defined in blacklist configuration") cleaner = SOSCleaner(quiet=True) @@ -414,15 +414,8 @@ def __init__(self, config, tmp_dir, rm_conf, hostname_path): self.files = [] self.quiet = True self.keyword_file = None - self.keywords_file = None self.keywords = None self.no_tar_file = config.output_dir - self.loglevel = 'INFO' - self.obfuscate_macs = False - self.networks = None - self.users = None - self.users_file = None - self.obfuscate_macs = False self.core_collect = config.core_collect if rm_conf: @@ -432,7 +425,7 @@ def __init__(self, config, tmp_dir, rm_conf, hostname_path): self.keyword_file.write("\n".join(keywords).encode('utf-8')) self.keyword_file.flush() self.keyword_file.close() - self.keywords_file = [self.keyword_file.name] + self.keywords = [self.keyword_file.name] logger.debug("Attmpting keyword obfuscation") except LookupError: pass diff --git a/insights/contrib/soscleaner.py b/insights/contrib/soscleaner.py index 14e357606..5613d753b 100644 --- a/insights/contrib/soscleaner.py +++ b/insights/contrib/soscleaner.py @@ -14,1642 +14,671 @@ # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# File Name : soscleaner.py +# File Name : sos-gov.py # Creation Date : 10-01-2013 # Created By : Jamie Duncan -# Purpose : an sosreport and data set obfuscation tool +# Last Modified : Sat 13 Sep 2014 10:51:54 PM EDT +# Purpose : an sosreport scrubber import os -import hashlib import re import errno -import stat import sys import uuid import shutil +import struct, socket import tempfile import logging import tarfile -from insights.contrib.ipaddress import IPv4Network, IPv4Address, IPv6Network, IPv6Address +import six -from random import randint -from six.moves import configparser -import subprocess +from insights.util import content_type -import six -if six.PY3: - from builtins import str - from builtins import range - from builtins import object -else: - from __builtin__ import str - from __builtin__ import range - from __builtin__ import object - - -class SOSCleaner(object): - """ - A class to parse through an sosreport or generic dataset to begin the - cleaning and obfuscation process required in many industries. - """ +class SOSCleaner: + ''' + A class to parse through an sosreport and begin the cleaning process required in many industries + Parameters: + debug - will generate add'l output to STDOUT. defaults to no + reporting - will post progress and overall statistics to STDOUT. defaults to yes + ''' def __init__(self, quiet=False): self.name = 'soscleaner' + self.version = '0.2.2' + self.loglevel = 'INFO' #this can be overridden by the command-line app self.quiet = quiet self.domain_count = 0 - self.domains = ['redhat.com', 'localhost.localdomain'] - self.short_domains = ['localdomain', 'localhost'] + self.domains = list() + self.keywords = list() self.domainname = None self.report_dir = '/tmp' - self.version = '0.4.4' - self.false_positives = [ - 'installed-debs', - 'installed_rpms', - 'sos_commands/dpkg', - 'sos_commands/rpm', - 'sos_commands/snappy/snap_list_--all', - 'sos_commands/snappy/snap_--version' - ] - self.loglevel = 'INFO' - self.net_db = list() # Network Information database - self.ip_db = list() - if six.PY3: - self.default_net = IPv4Network('128.0.0.0/8') - else: - self.default_net = IPv4Network(unicode('128.0.0.0/8')) - self.default_netmask = self.default_net.prefixlen - # we'll have to keep track of how many networks we have so we don't have to count them each time we need to create a new one. - self.net_count = 0 - self.net_metadata = dict() - self.net_metadata[self.default_net.network_address.compressed] = dict() - self.net_metadata[self.default_net.network_address.compressed]['host_count'] = 0 + # IP obfuscation information + self.ip_db = dict() #IP database + self.start_ip = '10.230.230.1' # Hostname obfuscation information - self.hn_db = dict() # hostname database + self.hn_db = dict() #hostname database self.hostname_count = 0 self.hostname = None - self.mac_db = dict() # mac address database - self.mac_count = 0 - # Domainname obfuscation information - self.dn_db = dict() # domainname database - # right now this needs to be a 2nd level domain - # examples: foo.com, example.com, domain.org - self.root_domain = 'obfuscateddomain.com' + self.dn_db = dict() #domainname database + self.root_domain = 'example.com' #right now this needs to be a 2nd level domain, like foo.com, example.com, domain.org, etc. + + # self.origin_path, self.dir_path, self.session, self.logfile, self.uuid = self._prep_environment() + # self._start_logging(self.logfile) # Keyword obfuscation information - self.keywords_file = list() - self.keywords = list() - self.kw_db = dict() # keyword database + self.keywords = None + self.kw_db = dict() #keyword database self.kw_count = 0 - # obfuscating users from the last command, per rfe #79 - self.users_file = 'sos_commands/last/lastlog_-u_1000-60000' - self.user_db = dict() - self.user_count = 0 - self.config_file = '/etc/soscleaner.conf' - self._read_early_config_options() - self.obfuscate_macs = True # issue #98 - - def _check_uid(self): - """Ensures soscleaner is running as root. This isn't required for soscleaner, - but sosreports are run as root and root tends to own the files inside the - sosreport tarball - """ - - try: # pragma: no cover - if os.getuid() != 0: - self.logger.warning( - "soscleaner must be executed by the root user in the same manner as sosreport") - self.logger.warning("soscleaner cannot continue. Exiting...") - - sys.exit(8) - - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - "UID_ERROR - unable to run SOSCleaner - you do not appear to be the root user") - - def _read_early_config_options(self): - """Reads an optional configuration file to load often-used defaults for - domains, networks, keywords, etc. If a config file is present and command-line - parameters are passed in, they will be additive, with the config file being - read in first. - """ - - try: - config = configparser.ConfigParser() - if os.path.exists(self.config_file): - config.read(self.config_file) - - # load in default config values - if config.has_option('Default', 'loglevel'): - self.loglevel = config.get('Default', 'loglevel').upper() - if config.has_option('Default', 'root_domain'): - self.root_domain = config.get('Default', 'root_domain') - if config.has_option('Default', 'quiet'): - self.quiet = config.get('Default', 'quiet') - return True - - else: - return True - - except OSError as e: # pragma: no cover - pass - - def _read_later_config_options(self): - """Reads an optional configuration file to load often-used defaults for - domains, networks, keywords, etc. If a config file is present and command-line - parameters are passed in, they will be addadtive, with the config file being - read in first. - """ - - try: - config = configparser.ConfigParser() - if os.path.exists(self.config_file): - config.read(self.config_file) - self.logger.con_out( - "Loading config file for default values - %s", self.config_file) - if config.has_section('DomainConfig'): - domains = config.get('DomainConfig', 'domains').split(',') - for d in domains: - self.domains.append(d) - self.logger.con_out( - "Loading domains from config file - %s", d) - else: - self.logger.con_out( - "No config found - DomainConfig.domains") - else: - self.logger.con_out( - "No config file section found - DomainConfig") - - if config.has_section('KeywordConfig'): - if config.has_option('KeywordConfig', 'keywords'): - keywords = config.get('KeywordConfig', 'keywords') - kw = keywords.split(',') - for k in kw: - self.keywords.append(k.strip()) - else: - self.logger.con_out( - "No config found - KeywordConfig.keywords") - if config.has_option('KeywordConfig', 'keyword_files'): - keyword_files = config.get( - 'KeywordConfig', 'keyword_files').split(',') - for f in keyword_files: - self.keywords_file.append(f) - self.logger.con_out( - "Adding keyword file from config file - %s", f) - else: - self.logger.con_out( - "No config found - KeywordConfig.keyword_files") - - # load in networks - # we need them to be in a list so we can process - # them individually - # each network should be a CIDR notation - # string, eg 192.168.1.0/24 - if config.has_section('NetworkConfig'): - if config.has_option('NetworkConfig', 'networks'): - networks = config.get('NetworkConfig', 'networks') - networks = networks.split(',') - for network in networks: - self._ip4_add_network(network) - self.logger.con_out( - "Adding network from config file - %s", network) - else: - self.logger.con_out( - "No config found - NetworkConfig.networks") - - if config.has_section('MacConfig'): - if config.has_option('MacConfig', 'obfuscate_macs'): - self.obfuscate_macs = bool( - config.get('MacConfig', 'obfuscate_macs')) - - except Exception as e: # pragma: no cover - self.logger.exception(e) - self.logger.con_out( - "READ_CONFIG_OPTIONS_ERROR - Unable to load configs from file %s - Continuing without those values", self.config_file) - - def _extract_file_data(self, filename): - """Extracts data from a file and return the data""" - try: - fh = open(filename, 'r') - data = fh.readlines() - fh.close() - - return data - - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception("FILE_OPEN_ERROR - unable to open %s", filename) - def _skip_file(self, d, files): - """The function passed into shutil.copytree to ignore certain - patterns and filetypes - Currently Skipped: - 1) Directories - handled by copytree - 2) Symlinks - handled by copytree - 3) Write-only files (stuff in /proc) + ''' + The function passed into shutil.copytree to ignore certain patterns and filetypes + Currently Skipped + Directories - handled by copytree + Symlinks - handled by copytree + Write-only files (stuff in /proc) Binaries (can't scan them) - Sockets and FIFO files. Scanning them locks up the copying. - """ - def confirm_text_file(filename): - """I know this is an epic hack, but I've seen a _ton_ - of inconsistency around different distribution's builds - of python-magic. Until it stabilizes, I'm just going to - hack around it. - """ - try: - command = "file %s" % filename - filetype = os.popen(command).read().strip( - '\n').split(':')[1].strip().lower() - if 'text' in filetype or 'json' in filetype: - return True - else: - return False - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - "CONFIRM_TEXT_FILE_ERROR - Cannot confirm file type - %s", filename) - + ''' skip_list = [] for f in files: f_full = os.path.join(d, f) if not os.path.isdir(f_full): if not os.path.islink(f_full): - mode = os.stat(f_full).st_mode - if stat.S_ISSOCK(mode) or stat.S_ISFIFO(mode): - skip_list.append(f) - if not confirm_text_file(f_full): # if it's not a text file + #mode = oct(os.stat(f_full).st_mode)[-3:] + # executing as root makes this first if clause useless. + # i thought i'd already removed it. - jduncan + #if mode == '200' or mode == '444' or mode == '400': + # skip_list.append(f) + mime_type = content_type.from_file(f_full) + if 'text' not in mime_type and 'json' not in mime_type: skip_list.append(f) return skip_list def _start_logging(self, filename): - """Creates the logging objects and starts a logging instance.""" - # will get the logging instance going - loglevel_config = '%s' % self.loglevel + #will get the logging instance going + loglevel_config = 'logging.%s' % self.loglevel - # i'd like the stdout to be under another logging name than 'con_out' - console_log_level = 25 # between INFO and WARNING + #i'd like the stdout to be under another logging name than 'con_out' + console_log_level = 25 #between INFO and WARNING quiet = self.quiet logging.addLevelName(console_log_level, "CONSOLE") - def con_out(self, message, *args, **kws): # pragma: no cover + def con_out(self, message, *args, **kws): if not quiet: self._log(console_log_level, message, args, **kws) logging.Logger.con_out = con_out logging.basicConfig(filename=filename, - level=logging.getLevelName(loglevel_config), - format='%(asctime)s %(name)s %(levelname)s: %(message)s', - datefmt='%m-%d %H:%M:%S' - ) - if not self.quiet: # pragma: no cover + level=eval(loglevel_config), + format='%(asctime)s %(name)s %(levelname)s: %(message)s', + datefmt = '%m-%d %H:%M:%S' + ) + if not self.quiet: # pragma: no cover console = logging.StreamHandler(sys.stdout) - formatter = logging.Formatter( - '%(asctime)s %(name)s %(levelname)s: %(message)s', '%m-%d %H:%M:%S') + formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s', '%m-%d %H:%M:%S') console.setFormatter(formatter) console.setLevel(console_log_level) self.logger = logging.getLogger(__name__) if not self.quiet: - self.logger.addHandler(console) # pragma: no cover + self.logger.addHandler(console) # pragma: no cover self.logger.con_out("Log File Created at %s" % filename) def _prep_environment(self): - """Creates the needed definitions to identify the unique - soscleaner runs - It creates a 16 character UUID, then uses that to - create an origin_path to define where the temporary working - files are stored, a dir_path that is where the - obfuscated files are located, and a session value, - which is used in multiple locations to identify objects - for a given soscleaner run - """ - - # we set up our various needed directory structures, etc. - # 16 digit random string - ran_uuid = str(uuid.uuid4().int)[:16] - # Gather data into its own soscleaner session directory - self.report_dir += '/' + 'soscleaner-' + ran_uuid - os.makedirs( self.report_dir, 0o700 ) - # the origin dir we'll copy the files from - origin_path = os.path.join( - self.report_dir, "soscleaner-origin-%s" % ran_uuid) - # the dir we will put our cleaned files into - dir_path = os.path.join(self.report_dir, "soscleaner-%s" % ran_uuid) - # short-hand for the soscleaner session to create reports, etc. - session = os.path.join(self.report_dir, "soscleaner-%s" % ran_uuid) - # the primary logfile - logfile = os.path.join(self.report_dir, "%s.log" % session) + + #we set up our various needed directory structures, etc. + ran_uuid = str(uuid.uuid4().int)[:16] # 16 digit random string + origin_path = os.path.join(self.report_dir, "soscleaner-origin-%s" % ran_uuid) # the origin dir we'll copy the files into + dir_path = os.path.join(self.report_dir, "soscleaner-%s" % ran_uuid) # the dir we will put our cleaned files into + session = os.path.join(self.report_dir, "soscleaner-%s" % ran_uuid) # short-hand for the soscleaner session to create reports, etc. + logfile = os.path.join(self.report_dir, "%s.log" % session) # the primary logfile return origin_path, dir_path, session, logfile, ran_uuid def _extract_sosreport(self, path): - """Extracts an sosreport, accounting for all common compression algorithms - as well as working with uncompressed directories and single files. - """ - def get_compression_sig(filename): - try: - """I know this is an epic hack, but I've seen a _ton_ of inconsistency around different - distribution's builds of python-magic. Until it stabilizes, I'm just going to hack around it. - """ - - command = "file %s" % filename - compression_type = os.popen(command).read().strip( - '\n').split(':')[1].strip().lower() - return compression_type - - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - "GET_COMPRESSION_SIG_ERROR: Unable to verify compression sig - %s", filename) - try: - self.logger.con_out("Beginning SOSReport Extraction") - if os.path.isdir(path): - self.logger.info( - '%s appears to be a directory, no extraction required - continuing', path) - # Clear out origin_path as we don't have one - self.origin_path = None - return path - else: + self.logger.con_out("Beginning SOSReport Extraction") + compression_sig = content_type.from_file(path).lower() + if 'directory' in compression_sig: + self.logger.info('%s appears to be a %s - continuing', path, compression_sig) + # Clear out origin_path as we don't have one + self.origin_path = None + return path + + elif 'compressed data' in compression_sig: + if compression_sig == 'xz compressed data': + #This is a hack to account for the fact that the tarfile library doesn't + #handle lzma (XZ) compression until version 3.3 beta try: - compression_sig = get_compression_sig(path) - if compression_sig == 'xz compressed data': - try: - self.logger.info( - 'Data Source Appears To Be LZMA Encrypted Data - decompressing into %s', self.origin_path) - self.logger.info( - 'LZMA Hack - Creating %s', self.origin_path) - os.makedirs( self.origin_path, 0o755 ) - subprocess.Popen( - ["tar", "-xJf", path, "-C", self.origin_path]).wait() - - return_path = os.path.join( - self.origin_path, os.listdir(self.origin_path)[0]) - - return return_path - - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - 'DecompressionError, Unable to decrypt LZMA compressed file %s', path) - - # the tarfile module handles other compression types. - # so we can just use that - else: - p = tarfile.open(path, 'r') - self.logger.info( - 'Data Source Appears To Be %s - decompressing into %s', compression_sig, self.origin_path) + self.logger.info('Data Source Appears To Be LZMA Encrypted Data - decompressing into %s', self.origin_path) + self.logger.info('LZMA Hack - Creating %s', self.origin_path) + os.system('mkdir %s' % self.origin_path) + os.system('tar -xJf %s -C %s' % (path, self.origin_path)) + return_path = os.path.join(self.origin_path, os.listdir(self.origin_path)[0]) - p.extractall(self.origin_path) - return_path = os.path.join( - self.origin_path, os.path.commonprefix(p.getnames())) + return return_path - return return_path - - except Exception as e: # pragma: no cover + except Exception as e: # pragma: no cover self.logger.exception(e) - raise Exception( - "DeCompressionError: Unable to De-Compress %s into %s", path, self.origin_path) - - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - 'CompressionError: Unable To Determine Compression Type') - - ################################ - # User Functions # - ################################ - - def _process_user_option(self, users): - """Adds users specified from the command line to the user_db object""" - - try: - for username in users: - new_user = self._user2db(username) - self.logger.con_out( - "Adding user from the command line - %s > %s", username, new_user) - - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - "PROCESS_USER_OPTION_ERROR: unable to add user to user database") - - def _sub_username(self, line): - """Accepts a line from a file as input and replaces all occurrences of the users in the - user_db with the obfuscated values. - Returns the obfuscated line. - """ - - try: - if self.user_count > 0: # we have obfuscated keywords to work with - for user, o_user in list(self.user_db.items()): - line = re.sub(r'\b%s\b(?i)' % user, o_user, line) - self.logger.debug( - "Obfuscating User - %s > %s", user, o_user) - - return line - - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - 'SUB_USERNAME_ERROR: Unable to obfuscate usernames on line - %s', line) - - def _create_random_username(self): - """Generates a random, unique obfuscated user ID and returns it""" - - def _randomizer(): - return "obfuscateduser%s" % randint(1,1000000) - - test_user = _randomizer() - if test_user in list(self.user_db.values()): - while test_user in list(self.user_db.values()): - self.logger.debug("Duplicate Obfuscated Hostname. Retrying - %s", test_user) - test_user = _randomizer() - if test_user not in list(self.user_db.values()): - return test_user - else: - return test_user - - def _user2db(self, username): - """Takes a username and adds it to the user_db with an obfuscated partner. - If the user hasn't been encountered before, it will add it to the database - and return the obfuscated partner entry. - If the user is already in the database it will return the obfuscated username - """ - try: - o_user = self.user_db.get(username) - if o_user is None: # no match, so we need to add to the database - # new username, so we increment the counter to get the user's obfuscated name - self.user_count += 1 - o_user = self._create_random_username() - self.logger.info( - "Adding new obfuscated user: %s > %s", username, o_user) - self.user_db[username] = o_user - - return o_user - - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - "USER_TO_DB_ERROR: unable to add user %s to database", username) - - def _process_users_file(self): - """Uses the 'last' output from an sosreport and generate a list of usernames to obfuscate in log files, etc. - By default it looks for the last file from an sosreport. But it can process any line-delimited list of users - From RFE #79 - """ - - # Users and entries that we don't want to obfuscate that could show up in lastlog - ignored_users = ('Username', - 'ubuntu' - ) - - # we're not calling this function from an option on the cli, we're just running it as part of __init__ - - try: - users_file = os.path.join(self.dir_path, self.users_file) - # check to make sure users_file is there and we can access it - if os.path.exists(users_file): - self.logger.con_out( - "Processing output from user file - %s", users_file) - data = self._extract_file_data(users_file) - sorted_users = list() - - # first, we get out the unique user entries - for line in data: - if len(line) > 1: # there are some blank lines at the end of the last output - sorted_users.append(line.split()[0]) - - # then we add them to the obfuscation database - for user in sorted_users: - if user not in ignored_users: - self.logger.con_out("Obfuscating user %s", user) - self._user2db(user) - - return True + raise Exception('DecompressionError, Unable to decrypt LZMA compressed file %s', path) else: - self.logger.con_out( - "Unable to locate user file - %s", users_file) - self.logger.con_out("Continuing without processing users file") + p = tarfile.open(path, 'r') - return False + self.logger.info('Data Source Appears To Be %s - decompressing into %s', compression_sig, self.origin_path) + try: + p.extractall(self.origin_path) + return_path = os.path.join(self.origin_path, os.path.commonprefix(p.getnames())) - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - "PROCESS_USERS_FILE_ERROR: unable to add file - %s", self.users_file) + return return_path - ################################ - # IP Obfuscation Functions # - ################################ + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception("DeCompressionError: Unable to De-Compress %s into %s", path, self.origin_path) + else: # pragma: no cover + raise Exception('CompressionError: Unable To Determine Compression Type') def _sub_ip(self, line): - """Substitutes a found IP with its corresponding obfuscated partner. - This is called in the self._clean_line function, along with user _sub_* - functions to scrub a given line in a file. It scans a given line and if - an IP exists, it obfuscates the IP using _ip4_2_db and returns the altered - line - """ + ''' + This will substitute an obfuscated IP for each instance of a given IP in a file + This is called in the self._clean_line function, along with user _sub_* functions to scrub a given + line in a file. + It scans a given line and if an IP exists, it obfuscates the IP using _ip2db and returns the altered line + ''' try: pattern = r"(((\b25[0-5]|\b2[0-4][0-9]|\b1[0-9][0-9]|\b[1-9][0-9]|\b[1-9]))(\.(\b25[0-5]|\b2[0-4][0-9]|\b1[0-9][0-9]|\b[1-9][0-9]|\b[0-9])){3})" ips = [each[0] for each in re.findall(pattern, line)] if len(ips) > 0: for ip in ips: - new_ip = self._ip4_2_db(ip) + new_ip = self._ip2db(ip) self.logger.debug("Obfuscating IP - %s > %s", ip, new_ip) line = line.replace(ip, new_ip) return line - - except Exception as e: # pragma: no cover + except Exception as e: # pragma: no cover self.logger.exception(e) - raise Exception("SUB_IP_ERROR: Unable to obfuscate IP address") - - ############################# - # Formatting Functions # - ############################# + raise Exception('SubIPError: Unable to Substitute IP Address - %s', ip) - def _get_version(self): - """Prints out soscleaner version""" + def _get_disclaimer(self): # pragma: no cover + #prints a disclaimer that this isn't an excuse for manual or any other sort of data verification - self.logger.con_out( - "SOSCleaner version: %s" % self.version) + self.logger.con_out("%s version %s" % (self.name, self.version)) + self.logger.warning("%s is a tool to help obfuscate sensitive information from an existing sosreport." % self.name) + self.logger.warning("Please review the content before passing it along to any third party.") - def _get_disclaimer(self): - """Prints out a disclaimer at the beginning of each soscleaner run""" - - self.logger.con_out( - "%s is a tool to help obfuscate sensitive information from an existing sosreport." % self.name) # pragma: no cover - self.logger.con_out( - "Please review the content before passing it along to any third party.") # pragma: no cover - - ########################### - # Reporting Functions # - ########################### - def _create_mac_report(self): - """Creates a report of MAC addresses and their obfuscated counterparts""" - try: - mac_report_name = os.path.join( - self.report_dir, "%s-mac.csv" % self.session) - self.logger.con_out( - 'Creating MAC address Report - %s', mac_report_name) - mac_report = open(mac_report_name, 'w') - mac_report.write('Original MAC Address,Obfuscated MAC Address\n') - if len(self.mac_db) > 0: - for k, v in list(self.mac_db.items()): - mac_report.write('%s,%s\n' % (k, v)) - else: - mac_report.write('None,None\n') - mac_report.close() - os.chmod(mac_report_name, 0o600) - self.logger.info('Completed MAC Address Report') - - self.mac_report = mac_report_name - - except Exception as e: # pragma no cover - self.logger.exception(e) - raise Exception( - 'CREATE_MAC_REPORT_ERROR: Unable to create report - %s', mac_report_name) - - def _create_kw_report(self): - """Creates a report of keywords and their obfuscated counterparts""" + def _create_ip_report(self): + ''' + this will take the obfuscated ip and hostname databases and output csv files + ''' try: - kw_report_name = os.path.join( - self.report_dir, "%s-keyword.csv" % self.session) - self.logger.con_out( - 'Creating keyword address Report - %s', kw_report_name) - kw_report = open(kw_report_name, 'w') - kw_report.write('Original Keyword,Obfuscated Keyword\n') - if self.kw_count > 0: - for keyword, o_keyword in list(self.kw_db.items()): - kw_report.write('%s,%s\n' % (keyword, o_keyword)) - else: - kw_report.write('None,None\n') - kw_report.close() - os.chmod(kw_report_name, 0o600) - self.logger.info('Completed Keyword Report') - - self.kw_report = kw_report_name - - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - 'CREATE_KW_REPORT_ERROR: unable to create report - $%s', kw_report_name) + ip_report_name = os.path.join(self.report_dir, "%s-ip.csv" % self.session) + self.logger.con_out('Creating IP Report - %s', ip_report_name) + ip_report = open(ip_report_name, 'wt') + ip_report.write('Obfuscated IP,Original IP\n') + for k,v in self.ip_db.items(): + ip_report.write('%s,%s\n' %(self._int2ip(k),self._int2ip(v))) + ip_report.close() + self.logger.info('Completed IP Report') - def _create_un_report(self): - """Creates a report of usernames and their obfuscated counterparts. - """ - try: - un_report_name = os.path.join( - self.report_dir, "%s-username.csv" % self.session) - self.logger.con_out( - 'Creating Username Report - %s', un_report_name) - un_report = open(un_report_name, 'w') - un_report.write('Original Username,Obfuscated Username\n') - for k, v in list(self.user_db.items()): - un_report.write('%s,%s\n' % (k, v)) - un_report.close() - os.chmod(un_report_name, 0o600) - - self.un_report = un_report_name - except Exception as e: # pragma: no cover + self.ip_report = ip_report_name + except Exception as e: # pragma: no cover self.logger.exception(e) - raise Exception( - 'CREATE_USERNAME_REPORT_ERROR: Unable to create report - %s', un_report_name) + raise Exception('CreateReport Error: Error Creating IP Report') def _create_hn_report(self): - """Creates a report of hostnames and their obfuscated counterparts""" try: - hn_report_name = os.path.join( - self.report_dir, "%s-hostname.csv" % self.session) - self.logger.con_out( - 'Creating Hostname Report - %s', hn_report_name) - hn_report = open(hn_report_name, 'w') - hn_report.write('Original Hostname,Obfuscated Hostname\n') + hn_report_name = os.path.join(self.report_dir, "%s-hostname.csv" % self.session) + self.logger.con_out('Creating Hostname Report - %s', hn_report_name) + hn_report = open(hn_report_name, 'wt') + hn_report.write('Obfuscated Hostname,Original Hostname\n') if self.hostname_count > 0: - for k, v in list(self.hn_db.items()): - hn_report.write('%s,%s\n' % (k, v)) + for k,v in self.hn_db.items(): + hn_report.write('%s,%s\n' %(k,v)) else: hn_report.write('None,None\n') - os.chmod(hn_report_name, 0o600) hn_report.close() self.logger.info('Completed Hostname Report') self.hn_report = hn_report_name - except Exception as e: # pragma: no cover + except Exception as e: #pragma: no cover self.logger.exception(e) - raise Exception( - 'CREATE_HN_REPORT_ERROR: Unable to create report - %s', hn_report_name) + raise Exception('CreateReport Error: Error Creating Hostname Report') def _create_dn_report(self): - """Creates a report of domain names and their obfuscated conterparts""" try: - dn_report_name = os.path.join( - self.report_dir, "%s-dn.csv" % self.session) - self.logger.con_out( - 'Creating Domainname Report - %s', dn_report_name) - dn_report = open(dn_report_name, 'w') - dn_report.write('Original Domain,Obfuscated Domain\n') + dn_report_name = os.path.join(self.report_dir, "%s-dn.csv" % self.session) + self.logger.con_out('Creating Domainname Report - %s', dn_report_name) + dn_report = open(dn_report_name, 'wt') + dn_report.write('Obfuscated Domain,Original Domain\n') if self.domain_count > 0: - for domain, o_domain in list(self.dn_db.items()): - dn_report.write('%s,%s\n' % (domain, o_domain)) + for k,v in self.dn_db.items(): + dn_report.write('%s,%s\n' %(k,v)) else: dn_report.write('None,None\n') dn_report.close() - os.chmod(dn_report_name, 0o600) self.logger.info('Completed Domainname Report') self.dn_report = dn_report_name - except Exception as e: # pragma: no cover + except Exception as e: # pragma: no cover self.logger.exception(e) - raise Exception( - 'CREATE_DN_REPORT_ERROR: Unable to create report - %s', dn_report_name) + raise Exception('CreateReport Error: Error Creating Domainname Report') - def _create_ip_report(self): - """Creates a report of IP addresses and their obfuscated counterparts""" - try: - ip_report_name = os.path.join( - self.report_dir, "%s-ip.csv" % self.session) - self.logger.con_out('Creating IP Report - %s', ip_report_name) - ip_report = open(ip_report_name, 'w') - ip_report.write('Original IP,Obfuscated IP\n') - for i in self.ip_db: - ip_report.write('%s,%s\n' % (i[0], i[1])) - ip_report.close() - os.chmod(ip_report_name, 0o600) - self.logger.info('Completed IP Report') - - self.ip_report = ip_report_name + def _create_reports(self): # pragma: no cover - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - 'CREATE_IP_REPORT_ERROR: Unable to create report - %s', ip_report_name) - - def _create_sos_report(self): - """Creates a report of original sosreport tarball and its obfuscated counterpart""" - try: - sos_report_name = os.path.join( - self.report_dir, "%s-sosreport.csv" % self.session) - self.logger.con_out('Creating sosreport Report - %s', sos_report_name) - sos_report = open(sos_report_name, 'w') - sos_report.write('Original Sosreport,Obfuscated Sosreport\n') - sos_report.write('%s,%s.tar.gz\n' % (self.sosreport_filename, self.session)) - sos_report.close() - os.chmod(sos_report_name, 0o600) - self.logger.info('Completed Sosreport Report') - - self.sos_report = sos_report_name - - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - 'CREATE_SOS_REPORT_ERROR: Unable to create report - %s', sos_report_name) - - def _create_reports(self): - """Creates the reports at the end of an soscleaner run""" - - self._create_ip_report() # pragma: no cover - self._create_hn_report() # pragma: no cover - self._create_dn_report() # pragma: no cover - # self._create_un_report() # pragma: no cover - # self._create_mac_report() # pragma: no cover - # self._create_kw_report() # pragma: no cover - # self._create_sos_report() # pragma: no cover - # os.chmod(self.logfile, 0o600) - - ############################# - # MAC Address functions # - ############################# - - def _sub_mac(self, line): - """Finds potential MAC addresses and obfuscates them in a single line.""" - try: - pattern = re.compile(r'(?:[0-9a-fA-F]:?){12}') - macs = re.findall(pattern, line) - if len(macs) > 0: - for mac in macs: - new_mac = self._mac2db(mac) - self.logger.debug( - "Obfuscating MAC address - %s > %s", mac, new_mac) - line = line.replace(mac, new_mac) - return line - - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception("SUB_MAC_ERROR: Unable to obfuscate MAC address") - - def _mac2db(self, mac): - """Adds an MAC address to the MAC database and returns the obfuscated - entry, or returns the existing obfuscated MAC entry. - """ - try: - o_mac = self.mac_db.get(mac) - if o_mac is None: # no match: we have to add it to the db - # using this lambda to create a valid randomized mac address is - # documented at https://www.commandlinefu.com/commands/view/7245/generate-random-valid-mac-addresses - # many thanks for putting that little thought together - o_mac = ':'.join(['%02x' % x for x in [randint(0, 255) for x in list(range(6))]]) - self.logger.debug( - "Creating new obfuscated MAC address: %s > %s", mac, o_mac) - self.mac_db[mac] = o_mac - - return o_mac - - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - "MAC2DB_ERROR: unable to add MAC to database - %s", mac) - - ########################### - # Hostname functions # - ########################### - - def _hn2db(self, host): - """Adds a hostname for a hostname for an included domain or return an existing entry. - It is called by _add_hostnames to verify if the domain is in an included - domain for obfuscation, and the entry to hn_db, and return the obfuscated value - """ - try: - o_host = self.hn_db.get(host) - if o_host is None: # no database match - split_host = host.split('.') - self.hostname_count += 1 # increment the counter to get the host ID number - if len(split_host) == 1: # we have a non-fqdn - typically the host short name - o_host = "obfuscatedhost%s" % self.hostname_count - self.hn_db[host] = o_host - elif len(split_host) == 2: # we have a root domain, a la example.com - o_host = self._dn2db(host) - else: # a 3rd level domain or higher - domain = '.'.join(split_host[1:]) - o_domain = self._dn2db(domain) - o_host = "host%s.%s" % (self.hostname_count, o_domain) - self.hn_db[host] = o_host - - if o_host is not None: - return o_host - - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - "HN2DB_ERROR: Unable to add hostname to database - %s", host) - - def _get_hostname(self, hostname='hostname'): - """Gets the hostname from an sosreport. Used at the beginning of an - SOSCleaner run to set self.hostname and self.domainname - """ - - try: - hostfile = os.path.join(self.dir_path, hostname) - fh = open(hostfile, 'r') - name_list = fh.readline().rstrip().split('.') - hostname = name_list[0] - if len(name_list) > 1: - domainname = '.'.join(name_list[1:len(name_list)]) - else: - domainname = None - - return hostname, domainname - - except IOError as e: # the 'hostname' file doesn't exist or isn't readable for some reason - if not self.quiet: # pragma: no cover - self.logger.exception(e) - - hostname = None - domainname = None - - return hostname, domainname - - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - 'GET_HOSTNAME_ERROR: Cannot resolve hostname from %s') % hostfile - - def _validate_domainname(self, hostname): - """Takes a potential domain name and validates it against the domain database - (self.dn_db). It takes care to look for higher-level subdomains for the - domains entered at the beginning of the sosreport run. Logic behind this definition of a valid domain: - A domain can be a total of 253 characters, per RFC 1035, RFC 1123 and RFC 2181 - Each label can be a maximum of 63 characters - With 4th, 5th, 6th level domains being more the norm today, I wanted to take as - broad an interpretation of a domain as I could. SO: - separated by a word boundary - the lower domains can be a max of 190 characters, not including dots - any valid domain character is allowed (alpha, digit, dash) - the top level domain can be up to 63 characters, and not contain numbers - With a 200 character limit to the lower domains, technically an 11th level domain - would not be obfuscated. As for right now, I'm OK with that. Please file an issue - in Github if you are not. - Summary: - Valid domain is defined as - . - """ - def _eval_domains(root_domain): - """Looks for matches of higher-level domains against the existing - domains in self.dn_db. Returns True if it's a match, and false if - no match is found. This is used to determine if we should add a new - subdomain to self.dn_db. - """ - for known_domain in list(self.dn_db.keys()): - if known_domain in root_domain: - self.logger.debug( - "evaluated domain found in database %s > %s", root_domain, known_domain) - return True - return False - - domainname = hostname.split('.') - domain_depth = len(domainname) - self.logger.debug("validating domain %s - depth: %s", - hostname, domain_depth) - # The first clause checks for potential domains that are 3rd level - # domains or higher. If the base domain (everything except the - # first octet) is already in the database, it adds the host. If - # the root domain is in the database, but this is a new higher- - # level domain, it adds the higher-level domain to the database - # before moving forward with obfuscating the full hostname. - found_domain = False - if domain_depth > 2: - # everything after the hostname is the domain we need to check - root_domain = '.'.join(domainname[1:domain_depth]) - self.logger.debug("validating domain - %s", root_domain) - # We try a straigh match first - o_domain = self._dn2db(root_domain) - if o_domain is not None: # we got a straight match - found_domain = True - # If we don't get a straight match, then we look to see if - # it is a subdomain of an already obfuscated domain. - else: - add_domain = _eval_domains(root_domain) - if add_domain: - self.logger.debug( - "Found new subdomain of %s - %s", root_domain, domainname) - found_domain = True - o_domain = self._dn2db(root_domain, add_domain=True) - - elif domain_depth == 2: - o_domain = self.dn_db.get(hostname) - if o_domain: - self.logger.debug( - "Domain found in domain database - %s", domainname) - found_domain = True - - return found_domain + self._create_ip_report() + self._create_hn_report() + self._create_dn_report() def _sub_hostname(self, line): - """Replaces the exact hostname and all instances of the domain name with - their obfuscated alternatives. Also handles auto-creation of subdomains - for known domains. Example: if redhat.com is in the domain database, - access.redhat.com and registry.redhat.com will both be obfuscated as - unique domain entries. - """ - # self.logger.debug("Processing Line - %s", line) - potential_hostnames = re.findall( - r'\b[a-zA-Z0-9-\.]{1,200}\.[a-zA-Z]{1,63}\b', line) - try: - for hostname in potential_hostnames: - hostname = hostname.lower() - self.logger.debug( - "Verifying potential hostname - %s", hostname) - domain_found = self._validate_domainname(hostname) - - # If we have a potential match that is a host on a domain that - # we care about, we regex it out of the line. - if domain_found: - o_hostname = self._hn2db(hostname) - line = re.sub(r'\b%s\b(?i)' % hostname, o_hostname, line) - - # Now that the hard work is done, we account for the handful of - # single-word "short domains" that we care about. We start with - # the hostname. - if self.hostname is not None: - o_host = self._hn2db(self.hostname) - line = re.sub(r'\b%s\b(?i)' % self.hostname, o_host, line) - - # There are a handful of short domains that we want to obfuscate - # Things like 'localhost' and 'localdomain' - # They are kept in self.short_domains and added to the domain - # database. They won't match the potential_hostnames regex because - # they're only 1 word, so we handle them here. - for domain in self.short_domains: - o_host = self._hn2db(domain) - line = re.sub(r'\b%s\b(?i)' % domain, o_host, line) + ''' + This will replace the exact hostname and all instances of the domain name with the obfuscated alternatives. + Example: + ''' + try: + for od,d in self.dn_db.items(): + #regex = re.compile(r'\w*\.%s' % d) + regex = re.compile(r'(?![\W\-\:\ \.])[a-zA-Z0-9\-\_\.]*\.%s' % d) + hostnames = [each for each in regex.findall(line)] + if len(hostnames) > 0: + for hn in hostnames: + new_hn = self._hn2db(hn) + self.logger.debug("Obfuscating FQDN - %s > %s", hn, new_hn) + line = line.replace(hn, new_hn) + if self.hostname: + line = line.replace(self.hostname, self._hn2db(self.hostname)) #catch any non-fqdn instances of the system hostname return line - - except Exception as e: # pragma: no cover + except Exception as e: # pragma: no cover self.logger.exception(e) - raise Exception( - "SUB_HOSTNAME_ERROR: Unable to obfuscate hostnames on line - %s", line) - - ############################ - # Filesystem functions # - ############################ - - def _clean_line(self, line, filename): - """Returns a line with obfuscations for all covered data types: - hostname, ip, user, keyword, and MAC address. The filename is passed in - so we can know whether or not to obfuscate IP addresses. IP obfuscation - is excluding in a few files where RPM version numbers cause false - positives and are known to not contain IP address information. - """ - - try: - process_obfuscation = True - # We want to skip the files in self.false_positives for all - # obfuscation but keywords because they don't have any sensible - # info in them and they generate a lot of false positives that - # much up the obfuscation and confuse people when they're working - # with the files - # Issues #60 & #101 - for false_positive in self.false_positives: - if false_positive in filename: - process_obfuscation = False - new_line = self._sub_keywords(line) # Keyword Substitution - if process_obfuscation: - new_line = self._sub_hostname( - new_line) # Hostname substitution - new_line = self._sub_ip(new_line) # IP substitution - new_line = self._sub_username( - new_line) # Username substitution - - return new_line - - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception("CLEAN_LINE_ERROR: Cannot Clean Line - %s" % line) - - def _clean_file(self, f): - """Takes a given file path, scrubs it, and saves a new copy of - the obfuscated file in the same location - """ - if os.path.exists(f) and not os.path.islink(f): - tmp_file = tempfile.TemporaryFile() - try: - data = self._extract_file_data(f) - if len(data) > 0: # if the file isn't empty: - for l in data: - # self.logger.debug("Obfuscating Line - %s", l) - new_l = self._clean_line(l, f) - if six.PY3: - tmp_file.write(new_l.encode('utf-8')) - else: - tmp_file.write(new_l) - - tmp_file.seek(0) - - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - "CLEAN_FILE_ERROR: Unable to obfuscate file - %s" % f) - - try: - if len(data) > 0: - new_fh = open(f, 'wb') - for line in tmp_file: - new_fh.write(line) - new_fh.close() - except OSError as e: - # If there's an IO error (disk is full) - if e.errno == errno.EIO: # pragma: no cover - self.logger.exception(e) - self.logger.con_out( - "CLEAN_FILE_ERROR: Not enough disk space to complete report obfusation") - self.logger.con_out( - "CLEAN_FILE_ERROR: Removing partially obfuscated report and other artifacts") - self.logger.con_out( - "CLEAN_FILE_ERROR: Please remedy the disk pressure and re-run soscleaner") - self._clean_up() - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - "CLEAN_FILE_ERROR: Unable to write obfuscated file - %s" % f) - - finally: - tmp_file.close() - - def _add_extra_files(self, files): - """Incorporates extra files are to be analyzed with an sosreport by - adding them to the origin path to be analyzed - """ - - try: - for f in files: - self.logger.con_out( - "adding additional file for analysis: %s" % f) - fname = os.path.basename(f) - f_new = os.path.join(self.dir_path, fname) - shutil.copyfile(f, f_new) - except IOError as e: - self.logger.con_out( - "ExtraFileError: %s is not readable or does not exist. Skipping File" % f) - self.logger.exception(e) - pass - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - "ADD_EXTRA_FILES_ERROR: Unable to process extra file - %s" % f) - - def _walk_report(self, folder): - """Returns a dictonary of dictionaries in the format {directory_name:[file1,file2,filex]}""" - - dir_list = {} - try: - for dirName, subdirList, fileList in os.walk(folder): - x = [] - for fname in fileList: - x.append(fname) - dir_list[dirName] = x - - return dir_list - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - "WALK_REPORT_ERROR: Unable to create file list in folder - %s", folder) - - def _file_list(self, folder): - """returns a list of file names in an sosreport directory""" - try: - rtn = [] - walk = self._walk_report(folder) - for key, val in list(walk.items()): - for v in val: - x = os.path.join(key, v) - rtn.append(x) - - # a count of the files we'll have in the final cleaned sosreport - self.file_count = len(rtn) - return rtn - - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - "FILE_LIST_ERROR: Unable to create file list from directory - %s", folder) + raise Exception('SubHostnameError: Unable to Substitute Hostname/Domainname') def _make_dest_env(self): - """Creates the folder in self.report_dir (defaults to /tmp) to store - sanitized files and populates it using shutil. These are the files that - will be scrubbed. - """ + ''' + This will create the folder in self.report_dir (defaults to /tmp) to store the sanitized files and populate it using shutil + These are the files that will be scrubbed + ''' try: - shutil.copytree(self.report, self.dir_path, - symlinks=True, ignore=self._skip_file) + shutil.copytree(self.report, self.dir_path, symlinks=True, ignore=self._skip_file) - except Exception as e: # pragma: no cover + except Exception as e: #pragma: no cover self.logger.exception(e) - raise Exception( - "MAKE_DESTINATION_ENV_ERROR: Cannot Create Destination Environment") + raise Exception("DestinationEnvironment Error: Cannot Create Destination Environment") def _create_archive(self): - """Creates a tar.gz compressed archive of the scrubbed directory""" + '''This will create a tar.gz compressed archive of the scrubbed directory''' try: - self.archive_path = os.path.join( - self.report_dir, "%s.tar.gz" % self.session) - self.logger.con_out( - 'Creating SOSCleaner Archive - %s', self.archive_path) + self.archive_path = os.path.join(self.report_dir, "%s.tar.gz" % self.session) + self.logger.con_out('Creating SOSCleaner Archive - %s', self.archive_path) t = tarfile.open(self.archive_path, 'w:gz') for dirpath, dirnames, filenames in os.walk(self.dir_path): for f in filenames: f_full = os.path.join(dirpath, f) - f_archive = f_full.replace(self.report_dir, '') - self.logger.debug('adding %s to %s archive', - f_archive, self.archive_path) + f_archive = f_full.replace(self.report_dir,'') + self.logger.debug('adding %s to %s archive', f_archive, self.archive_path) t.add(f_full, arcname=f_archive) - os.chmod(self.archive_path, 0o600) # per #90 - except Exception as e: # pragma: no cover + except Exception as e: #pragma: no cover self.logger.exception(e) - raise Exception( - 'CREATE_ARCHIVE_ERROR: Unable to create archive - %s', self.archive_path) + raise Exception('CreateArchiveError: Unable to create Archive') self._clean_up() self.logger.info('Archiving Complete') + self.logger.con_out('SOSCleaner Complete') if not self.quiet: # pragma: no cover - t.add(self.logfile, arcname=self.logfile.replace(self.report_dir, '')) + t.add(self.logfile, arcname=self.logfile.replace(self.report_dir,'')) t.close() - def soscleaner_checksum(self): - """check MD5 against soscleaner tarball""" - soscleaner_archive = self.session + ".tar.gz" - checksum = hashlib.md5(open(soscleaner_archive, 'rb').read()).hexdigest() - - soscleaner_archive_hash = soscleaner_archive + ".md5" - fp = open(soscleaner_archive_hash, "w") - fp.write(checksum + "\n") - self.logger.con_out('md5 checksum is: %s' % checksum) - fp.close() - - def finalmsg(self): - """Final message at the end of the soscleaner run""" - self.logger.con_out('SOSCleaner Complete') - def _clean_up(self): - """Cleans up origin directories and other soscleaner processing artifacts""" + '''This will clean up origin directories, etc.''' self.logger.info('Beginning Clean Up Process') try: if self.origin_path: - self.logger.info( - 'Removing Origin Directory - %s', self.origin_path) + self.logger.info('Removing Origin Directory - %s', self.origin_path) shutil.rmtree(self.origin_path) self.logger.info('Removing Working Directory - %s', self.dir_path) shutil.rmtree(self.dir_path) self.logger.info('Clean Up Process Complete') - - except Exception as e: # pragma: no cover + except Exception as e: #pragma: no cover self.logger.exception(e) - raise Exception( - "CLEAN_UP_ERROR: Unable to complete clean up process") - ######################## - # Domain Functions # - ######################## + def _process_hosts_file(self): + # this will process the hosts file more thoroughly to try and capture as many server short names/aliases as possible + # could lead to false positives if people use dumb things for server aliases, like 'file' or 'server' or other common terms + # this may be an option that can be enabled... --hosts or similar? - def _dn2db(self, domain, add_domain=False): - """Adds a domain to dn_db and returns the obfuscated value.""" try: - o_domain = self.dn_db.get(domain) - if o_domain is None: - # Try converting it all to lowercase - if add_domain: - self.domain_count += 1 - o_domain = "ofuscateddomain%s.com" % self.domain_count - self.dn_db[domain] = o_domain - self.logger.con_out( - "Adding new obfuscated domain - %s > %s", domain, o_domain) - - if o_domain: - return o_domain - else: - return None + if os.path.isfile(os.path.join(self.dir_path, 'etc/hosts')): + with open(os.path.join(self.dir_path, 'etc/hosts')) as f: + self.logger.con_out("Processing hosts file for better obfuscation coverage") + data = f.readlines() + for line in data: + x = re.split('\ |\t', line.rstrip()) #chunk up the line, delimiting with spaces and tabs (both used in hosts files) + # we run through the rest of the items in a given line, ignoring the IP to be picked up by the normal methods + # skipping over the 'localhost' and 'localdomain' entries + for item in x[1:len(x)]: + if len(item) > 0: + if all(['localhost' not in item, 'localdomain' not in item]): + new_host = self._hn2db(item) + self.logger.debug("Added to hostname database through hosts file processing - %s > %s", item, new_host) + else: # pragma: no cover + self.logger.con_out("Unable to Process Hosts File. Hosts File Processing Disabled") - except Exception as e: # pragma: no cover + except Exception as e: #pragma: no cover self.logger.exception(e) - raise Exception( - "DN2DB_ERROR: Unable to retrieve obfuscated domain - %s", domain) def _domains2db(self): - """Adds domains to the domain database""" + #adds any additional domainnames to the domain database to be searched for try: - # First we'll grab the domain for the sosreport and obfuscate it to the base root_domain - # value, which defaults to "obfuscateddomain.com" + #we will add the root domain for an FQDN as well. if self.domainname is not None: - self._dn2db(self.domainname, add_domain=True) + self.dn_db[self.root_domain] = self.domainname + self.logger.con_out("Obfuscated Domain Created - %s" % self.root_domain) - for dom in self.domains: - self._dn2db(dom, add_domain=True) + split_root_d = self.root_domain.split('.') - for dom in self.short_domains: - self._dn2db(dom, add_domain=True) + for d in self.domains: + if d not in self.dn_db.values(): #no duplicates + d_number = len(self.dn_db) + o_domain = "%s%s.%s" % (split_root_d[0], d_number, split_root_d[1]) + self.dn_db[o_domain] = d + self.logger.con_out("Obfuscated Domain Created - %s" % o_domain) + self.domain_count = len(self.dn_db) return True - except Exception as e: # pragma: no cover + except Exception as e: # pragma: no cover self.logger.exception(e) - raise Exception("DOMAINS2DB_ERROR: Unable to process domains") - - ######################### - # Keyword functions # - ######################### def _keywords2db(self): - """Adds keywords to the keyword database""" + #processes optional keywords to add to be obfuscated try: - if len(self.keywords_file) > 0: - for f in self.keywords_file: + if self.keywords: # value is set to None by default + k_count = 0 + for f in self.keywords: if os.path.isfile(f): - with open(f, 'r') as klist: + with open(f, 'rt') as klist: for keyword in klist.readlines(): - keyword = keyword.rstrip() - if len(keyword) > 1: - if self.kw_db.get(keyword) is None: # no duplicates - o_kw = "obfuscatedkeyword%s" % self.kw_count - self.kw_db[keyword] = o_kw - self.logger.con_out( - "Added Obfuscated Keyword from Keywords File - %s > %s", keyword, o_kw) - self.kw_count += 1 - else: - self.logger.con_out( - "Unable to add Obfuscated Keyword - %s", keyword) - self.logger.con_out( - "Added Keyword Contents from file - %s", f) + o_kw = "keyword%s" % k_count + self.kw_db[keyword.rstrip()] = o_kw + self.logger.con_out("Added Obfuscated Keyword - %s", o_kw) + k_count += 1 + self.logger.con_out("Added Keyword Contents from file - %s", f) else: - self.logger.con_out( - "%s does not seem to be a file. Not adding any keywords from" % f) - if len(self.keywords) > 0: - for kw in self.keywords: - if len(kw) > 1: # no single digit keywords - o_kw = "obfuscatedkeyword%s" % self.kw_count - self.kw_db[kw] = o_kw - self.logger.con_out( - "Added obfuscated keyword - %s > %s", kw, o_kw) - self.kw_count += 1 - - except Exception as e: # pragma: no cover + self.logger.con_out("%s does not seem to be a file. Not adding any keywords from" % f) + + self.kw_count = k_count + + except Exception as e: # pragma: no cover self.logger.exception(e) - raise Exception( - "KEYWORDS2DB_ERROR: Unable to process keyword - %s", keyword) + + def _kw2db(self, keyword): + #returns the obfuscated value for a keyword + + return self.kw_db[keyword] def _sub_keywords(self, line): - """Accepts a line from a file in an sosreport and obfuscates any known keyword entries on the line.""" - try: - if self.kw_count > 0: # we have obfuscated keywords to work with - for keyword, o_keyword in list(self.kw_db.items()): - if keyword in line: - # insights-client modification to match partial - # words like old soscleaner. original expression - # is r'\b%s\b' - line = re.sub(r'%s' % keyword, o_keyword, line) - self.logger.debug( - "Obfuscating Keyword - %s > %s", keyword, o_keyword) + # this will substitute out any keyword entries on a given line + #try: + if self.kw_count > 0: # we have obfuscated keywords to work with + for k in self.kw_db.keys(): + if k in line: + line = line.replace(k, self._kw2db(k)) + self.logger.debug("Obfuscating Keyword - %s > %s", k, self._kw2db(k)) - return line + return line - except Exception as e: # pragma: no cover + '''except Exception, e: # pragma: no cover self.logger.exception(e) - raise Exception( - 'SUB_KEYWORDS_ERROR: Unable to obfuscate keywords on line - %s', line) + raise Exception('SubKeywordError: Unable to Substitute Keywords')''' - ######################### - # Network Functions # - ######################### + def _get_hostname(self, hostname='hostname'): + #gets the hostname and stores hostname/domainname so they can be filtered out later - def _process_route_file(self): - """Parses the output from the route command in an sosreport to populate - self.net_db with networks to obfuscate - """ try: - route_path = os.path.join(self.dir_path, 'route') - if os.path.exists(route_path): - fh = open(route_path, 'r') - self.logger.info( - "Found route file. Auto-adding routed networks.") - # skip the first 2 header lines and get down to the data - data = fh.readlines()[2:] - for line in data: - x = line.split() - if not x[0] == '0.0.0.0': # skip the default gateway - net_string = "%s/%s" % (x[0], x[2]) - self._ip4_add_network(net_string) - self.logger.debug( - "Network Added by Auto-Route Processing.") - fh.close() + hostfile = os.path.join(self.dir_path, hostname) + fh = open(hostfile, 'rt') + name_list = fh.readline().rstrip().split('.') + hostname = name_list[0] + if len(name_list) > 1: + domainname = '.'.join(name_list[1:len(name_list)]) else: - self.logger.info( - "No route file found. Unable to auto-add routed networks for this system.") - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - "PROCESS_ROUTE_FILE_ERROR: Cannot process file - %s", route_path) + domainname = None - def _ip4_new_obfuscate_net(self, netmask): - """Returns a new IPv4 Network Object to be used as an obfuscated network.""" - try: - # this is going to get hacky - # this will return an IPv4Address object that is 129.0.0.0 - start_point = self.default_net.broadcast + 1 - x = start_point.compressed.split('.') # break it apart - # calculate the new first octet - new_octet = str(int(x[0]) + self.net_count) - - self.net_count += 1 - # a new string to create the new obfuscated network object - new_net_string = "%s.0.0.0/%s" % (new_octet, netmask) - if six.PY3: - retval = IPv4Network(new_net_string) - else: - retval = IPv4Network(unicode(new_net_string)) + return hostname, domainname - return retval + except IOError as e: #the 'hostname' file doesn't exist or isn't readable for some reason + self.logger.warning("Unable to determine system hostname!!!") + self.logger.warning("Automatic Hostname Data Obfuscation Will Not Occur!!!") + self.logger.warning("To Remedy This Situation please enable the 'general' plugin when running sosreport") + self.logger.warning("and/or be sure the 'hostname' symlink exists in the root directory of you sosreport") + if not self.quiet: + self.logger.exception(e) - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - "IP4_NEW_OBFUSCATE_NET_ERROR: Unable to create new network - %s", new_net_string) - - def _ip4_parse_network(self, network): - """Takes the input values and return useable objects from them. - Generates an IPv4Network object for the original network, and a string - value for the subnet mask that is used to create the obfuscated network - """ - try: - if six.PY3: - net = IPv4Network(network) + hostname = None + domainname = None + + return hostname, domainname + + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception('GetHostname Error: Cannot resolve hostname from %s') % hostfile + + def _ip2int(self, ipstr): + #converts a dotted decimal IP address into an integer that can be incremented + integer = struct.unpack('!I', socket.inet_aton(ipstr))[0] + + return integer + + def _int2ip(self, num): + #converts an integer stored in the IP database into a dotted decimal IP + ip = socket.inet_ntoa(struct.pack('!I', num)) + + return ip + + def _ip2db(self, ip): + ''' + adds an IP address to the IP database and returns the obfuscated entry, or returns the + existing obfuscated IP entry + FORMAT: + {$obfuscated_ip: $original_ip,} + ''' + + ip_num = self._ip2int(ip) + ip_found = False + db = self.ip_db + for k,v in db.items(): + if v == ip_num: + ret_ip = self._int2ip(k) + ip_found = True + if ip_found: #the entry already existed + return ret_ip + else: #the entry did not already exist + if len(self.ip_db) > 0: + new_ip = max(db.keys()) + 1 else: - net = IPv4Network(unicode(network)) - subnet = str(net.prefixlen) + new_ip = self._ip2int(self.start_ip) + db[new_ip] = ip_num + + return self._int2ip(new_ip) + + def _hn2db(self, hn): + ''' + This will add a hostname for a hostname for an included domain or return an existing entry + ''' + db = self.hn_db + hn_found = False + for k,v in db.items(): + if v == hn: #the hostname is in the database + ret_hn = k + hn_found = True + if hn_found: + return ret_hn + else: + self.hostname_count += 1 #we have a new hostname, so we increment the counter to get the host ID number + o_domain = self.root_domain + for od,d in self.dn_db.items(): + if d in hn: + o_domain = od + new_hn = "host%s.%s" % (self.hostname_count, o_domain) + self.hn_db[new_hn] = hn - return net, subnet + return new_hn - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - "IP4_PARSE_NETWORK_ERROR: Unable to parse network - %s", network) + def _walk_report(self, folder): + '''returns a dictonary of dictionaries in the format {directory_name:[file1,file2,filex]}''' - def _ip4_network_in_db(self, network): - """Returns True if a network already exists in self.net_db. Is used in - self._ip4_add_network to ensure we don't get duplicate network entries - """ + dir_list = {} try: - if any(network in x for x in self.net_db): - return True - return False + for dirName, subdirList, fileList in os.walk(folder): + x = [] + for fname in fileList: + x.append(fname) + dir_list[dirName] = x - except Exception as e: # pragma: no cover + return dir_list + except Exception as e: # pragma: no cover self.logger.exception(e) - raise Exception( - "IP4_NETWORK_IN_DB_ERROR: Unable to test for network in network database - %s", network) - - def _add_loopback_network(self): - """ - Adds an entry into the needed databases to keep loopback addresses - somewhat sane. They will be obfuscated, but within the loopback numberspace. - So more of a shuffler than anything else. - """ - try: - self.logger.info( - "Adding Entry to Network Metadata Database - 127.0.0.0") - self.net_metadata['127.0.0.0'] = dict() - self.net_metadata['127.0.0.0']['host_count'] = 0 + raise Exception("WalkReport Error: Unable to Walk Report") - if six.PY3: - lb_net = IPv4Network('127.0.0.0/8') - else: - lb_net = IPv4Network(unicode('127.0.0.0/8')) - loopback_entry = (lb_net, lb_net) - self.net_db.append(loopback_entry) - self.logger.con_out("Creating Loopback Network Entry") + def _file_list(self, folder): + '''returns a list of file names in an sosreport directory''' + rtn = [] + walk = self._walk_report(folder) + for key,val in walk.items(): + for v in val: + x=os.path.join(key,v) + rtn.append(x) - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - "ADD_LOOPBACK_NETWORK_ERROR: Unable to create obfuscated loopback network") - - def _ip4_add_network(self, network): - """Takes any networks specified via the command-line parameters as well - as the routes file (if present) and creates obfuscated networks for each - of them. This is called in self._process_route_file as well as in - self.clean_report - """ - try: - net, netmask = self._ip4_parse_network(network) - - # make sure we don't have duplicates - if not self._ip4_network_in_db(net): - new_net = self._ip4_new_obfuscate_net( - netmask) # the obfuscated network - new_entry = (net, new_net) - - self.net_db.append(new_entry) - self.logger.con_out( - "Created New Obfuscated Network - %s" % new_net.with_prefixlen) - - self.net_metadata[new_net.network_address.compressed] = dict() - self.logger.info( - "Adding Entry to Network Metadata Database - %s" % new_net.with_prefixlen) - self.net_metadata[new_net.network_address.compressed]['host_count'] = 0 - else: - self.logger.info( - "Network already exists in database. Not obfuscating. - %s" % network) + self.file_count = len(rtn) #a count of the files we'll have in the final cleaned sosreport, for reporting + return rtn - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - "IP4_ADD_NETWORK_ERROR: Unable to add obfuscated network - %s", network) - - def _ip4_find_network(self, ip): - """Takes an IP address and returns back the obfuscated network it belongs to - This is called by the _ip4_2_db function - The value returned is a string that is the network address for the given network - IPv4Network.network.compressed - This can be used to create a new obfuscated IP address for this value - """ - try: - if six.PY3: - ip = IPv4Address(ip) # re-cast as an IPv4 object - else: - ip = IPv4Address(unicode(ip)) # re-cast as an IPv4 object - network = self.default_net.network_address - for net in self.net_db: - if ip in net[0]: - # we have a match! We'll return the proper obfuscated network - network = net[1].network_address + def _clean_line(self, l): + '''this will return a line with obfuscations for all possible variables, hostname, ip, etc.''' - return network + new_line = self._sub_ip(l) # IP substitution + new_line = self._sub_hostname(new_line) # Hostname substitution + new_line = self._sub_keywords(new_line) # Keyword Substitution - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - "IP4_FIND_NETWORK_ERROR: Unable to determin obfuscated network for IP address - %s", ip) - - def _ip4_in_db(self, ip): - """Returns True if an IP is found the the obfuscation database. Returns - False otherwise The ip parameter is an IPv4Address object This function - is called from within _ip4_2_db - """ - try: - if any(ip in x for x in self.ip_db): - return True - return False + return new_line - except Exception as e: # pragma: no cover - self.logger.exception(e) - raise Exception( - "IP4_IN_DB_ERROR: Unable to verify if IP is in database - %s", ip) + def _clean_file(self, f): + '''this will take a given file path, scrub it accordingly, and save a new copy of the file + in the same location''' + if os.path.exists(f) and not os.path.islink(f): + tmp_file = tempfile.TemporaryFile(mode='w+b') + try: + fh = open(f, 'r') + data = fh.readlines() + fh.close() + if len(data) > 0: #if the file isn't empty: + for l in data: + new_l = self._clean_line(l) + if six.PY3: + tmp_file.write(new_l.encode('utf-8')) + else: + tmp_file.write(new_l) - def _ip4_2_db(self, orig_ip): - """Adds an IP address to the IP database and returns the obfuscated - entry, or returns the existing obfuscated IP entry. - """ - try: - if self._ip4_in_db(orig_ip): # the IP exists already in the database - # http://stackoverflow.com/a/18114565/263834 - data = dict(self.ip_db) - # we'll pull the existing obfuscated IP from the database - obf_ip = data[orig_ip] - - return obf_ip.compressed - - else: # it's a new database, so we have to create a new obfuscated IP for the proper network and a new ip_db entry - # get the network information - net = self._ip4_find_network(orig_ip) - self.net_metadata[net.compressed]['host_count'] += 1 - # take the network and increment the number of hosts to get to the next available IP - if six.PY3: - obf_ip = IPv4Address( - net) + self.net_metadata[net.compressed]['host_count'] - else: - obf_ip = IPv4Address( - unicode(net)) + self.net_metadata[net.compressed]['host_count'] - self.ip_db.append((orig_ip, obf_ip)) - - return obf_ip.compressed + tmp_file.seek(0) + + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception("CleanFile Error: Cannot Open File For Reading - %s" % f) + try: + if len(data) > 0: + new_fh = open(f, 'wb') + for line in tmp_file: + new_fh.write(line) + new_fh.close() + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception("CleanFile Error: Cannot Write to New File - %s" % f) + + finally: + tmp_file.close() + + def _add_extra_files(self, files): + '''if extra files are to be analyzed with an sosreport, this will add them to the origin path to be analyzed''' + + try: + for f in files: + self.logger.con_out("adding additional file for analysis: %s" % f) + fname = os.path.basename(f) + f_new = os.path.join(self.dir_path, fname) + shutil.copyfile(f,f_new) + except IOError as e: + self.logger.con_out("ExtraFileError: %s is not readable or does not exist. Skipping File" % f) + self.logger.exception(e) + pass except Exception as e: # pragma: no cover self.logger.exception(e) - raise Exception( - "IP4_2_DB_ERROR: unable to add IP to database - %s", orig_ip) + raise Exception("ExtraFileError: Unable to Process Extra File - %s" % f) def _clean_files_only(self, files): - """Processes one or more specific files, instead of a full sosreport.""" + ''' if a user only wants to process one or more specific files, instead of a full sosreport ''' try: if not (os.path.exists(self.origin_path)): - self.logger.info("Creating Origin Path - %s" % - self.origin_path) - # create the origin_path directory - os.makedirs(self.origin_path) + self.logger.info("Creating Origin Path - %s" % self.origin_path) + os.makedirs(self.origin_path) # create the origin_path directory if not (os.path.exists(self.dir_path)): - self.logger.info("Creating Directory Path - %s" % - self.dir_path) + self.logger.info("Creating Directory Path - %s" % self.dir_path) os.makedirs(self.dir_path) # create the dir_path directory self._add_extra_files(files) - except OSError as e: # pragma: no cover - # If the file already exists + except OSError as e: # pragma: no cover if e.errno == errno.EEXIST: pass - # If there's an IO error (disk is full) - elif e.errno == errno.EIO: # pragma: no cover - self.logger.exception(e) - self.logger.con_out( - "CLEAN_FILE_ERROR: Not enough disk space to complete report obfusation") - self.logger.con_out( - "CLEAN_FILE_ERROR: Remove partially obfuscated report and other artifacts") - self.logger.con_out( - "CLEAN_FILE_ERROR: Please remedy the disk pressure and re-run soscleaner") - self._clean_up() else: # pragma: no cover self.logger.exception(e) - raise Exception( - "CLEAN_FILES_ONLY_ERROR: Unable to clean file from dataset - OSError") + raise e - except Exception as e: # pragma: no cover + except Exception as e: # pragma: no cover self.logger.exception(e) - raise Exception( - "CLEAN_FILES_ONLY_ERROR: Unable toclean files from dataset") + raise Exception("CleanFilesOnlyError: unable to process") - def _process_report_dir(self, report_dir): # pragma: no cover - """Overrides the default (/tmp) location for the soscleaner run""" - try: # pragma: no cover - if os.path.isdir(report_dir): - self.report_dir = report_dir - - return True + def clean_report(self, options, sosreport): # pragma: no cover + '''this is the primary function, to put everything together and analyze an sosreport''' - except Exception as e: - self.logger.exception(e) - raise Exception( - "PROCESS_REPORT_DIR_ERROR: Unable to set report output directory") - - def _start_soscleaner(self): # pragma no cover - """Sets up the data structures and filesystem attributes to get soscleaner going properly""" - try: - self.origin_path, self.dir_path, self.session, self.logfile, self.uuid = self._prep_environment() - self._start_logging(self.logfile) - self._check_uid() # make sure it's soscleaner is running as root - self._get_version() - self._get_disclaimer() - except Exception as e: - self.logger.exception(e) - raise Exception( - "START_SOSCLEANER_ERROR: Unable to create needed artifacts to run soscleaner") - - def clean_report(self, options, sosreport): # pragma: no cover - """The primary function, to put everything together and analyze an sosreport.""" - if options.report_dir: - self._process_report_dir(options.report_dir) - self.loglevel = options.loglevel - self._start_soscleaner() - self._read_later_config_options() - if options.obfuscate_macs: - self.obfuscate_macs = options.obfuscate_macs - self._add_loopback_network() - if options.networks: # we have defined networks - self.networks = options.networks - for network in options.networks: - self._ip4_add_network(network) + if options.report_dir: # override the default location for artifacts (/tmp) + if os.path.isdir(options.report_dir): + self.report_dir = options.report_dir + self.origin_path, self.dir_path, self.session, self.logfile, self.uuid = self._prep_environment() + self._start_logging(self.logfile) + self._get_disclaimer() if options.domains: - self.domains.extend(options.domains) - if options.keywords_file: - self.keywords_file = options.keywords_file + self.domains = options.domains if options.keywords: self.keywords = options.keywords - self._keywords2db() - if options.users: # users from the command line with the -u option - self._process_user_option(options.users) - if options.users_file: - self.users_file = options.users_file + self._keywords2db() if not sosreport: if not options.files: - raise Exception( - "Error: You must supply either an sosreport and/or files to process") - self.logger.con_out( - "No sosreport supplied. Only processing specific files") - if not options.networks: - self.logger.con_out( - "No sosreport supplied and no networks specified. All IP addresses will be obfuscated into the same default subnet") + raise Exception("Error: You must supply either an sosreport and/or files to process") + + self.logger.con_out("No sosreport supplied. Only processing specific files") self._clean_files_only(options.files) else: # we DO have an sosreport to analyze self.report = self._extract_sosreport(sosreport) self._make_dest_env() # create the working directory if options.hostname_path: - self.hostname, self.domainname = self._get_hostname( - options.hostname_path) + self.hostname, self.domainname = self._get_hostname(options.hostname_path) else: self.hostname, self.domainname = self._get_hostname() - self._process_route_file() + if options.files: self._add_extra_files(options.files) + if self.hostname: # if we have a hostname that's not a None type - # we'll prime the hostname pump to clear out a ton of useless logic later - self.hn_db['host0'] = self.hostname + self.hn_db['host0'] = self.hostname # we'll prime the hostname pump to clear out a ton of useless logic later + + self._process_hosts_file() # we'll take a dig through the hosts file and make sure it is as scrubbed as possible self._domains2db() if options.core_collect: @@ -1657,9 +686,7 @@ def clean_report(self, options, sosreport): # pragma: no cover files = self._file_list(os.path.join(self.dir_path, 'data')) else: files = self._file_list(self.dir_path) - self._process_users_file() - self.logger.con_out( - "IP Obfuscation Network Created - %s", self.default_net.compressed) + self.logger.con_out("IP Obfuscation Start Address - %s", self.start_ip) self.logger.con_out("*** SOSCleaner Processing ***") self.logger.info("Working Directory - %s", self.dir_path) for f in files: @@ -1687,10 +714,8 @@ def clean_report(self, options, sosreport): # pragma: no cover self._clean_file(f) self.logger.con_out("*** SOSCleaner Statistics ***") self.logger.con_out("IP Addresses Obfuscated - %s", len(self.ip_db)) - self.logger.con_out("Hostnames Obfuscated - %s", len(self.hn_db)) - self.logger.con_out("Domains Obfuscated - %s", len(self.dn_db)) - self.logger.con_out("Users Obfuscated - %s", self.user_count) - self.logger.con_out("Keywords Obfuscated - %s", self.kw_count) + self.logger.con_out("Hostnames Obfuscated - %s" , len(self.hn_db)) + self.logger.con_out("Domains Obfuscated - %s" , len(self.dn_db)) self.logger.con_out("Total Files Analyzed - %s", self.file_count) self.logger.con_out("*** SOSCleaner Artifacts ***") self._create_reports() @@ -1706,9 +731,6 @@ def clean_report(self, options, sosreport): # pragma: no cover # end insights-client modifications self._create_archive() - self.soscleaner_checksum() - self.finalmsg() - return_data = [self.archive_path, self.logfile, self.ip_report] if self.hostname: diff --git a/insights/tests/client/data_collector/test_done.py b/insights/tests/client/data_collector/test_done.py index 65643301a..1126cd717 100644 --- a/insights/tests/client/data_collector/test_done.py +++ b/insights/tests/client/data_collector/test_done.py @@ -76,10 +76,6 @@ def test_soscleaner_additions(isdir_, clean_opts): for returning before creating the archive ''' clean_opts.hostname_path = 'test' - clean_opts.obfuscate_macs = False - clean_opts.networks = None - clean_opts.users = None - clean_opts.users_file = None # test that soscleaner returns as normal by default, # then that it returns None when no_tar_file is not None @@ -91,7 +87,6 @@ def test_soscleaner_additions(isdir_, clean_opts): s.file_count = Mock() s._prep_environment = Mock(return_value=(None, '/var/tmp/test/socleaner-test', None, None, None)) s._start_logging = Mock() - s._check_uid = Mock() s._get_disclaimer = Mock() s._keywords2db = Mock() s._clean_files_only = Mock() @@ -103,9 +98,6 @@ def test_soscleaner_additions(isdir_, clean_opts): s._domains2db = Mock() s._file_list = Mock(return_value=[]) s._clean_file = Mock() - s._process_route_file = Mock() - s._process_users_file = Mock() - s.soscleaner_checksum = Mock() s._create_reports = Mock(side_effect=setattr(s, 'logfile', 'test')) s._create_reports = Mock(side_effect=setattr(s, 'ip_report', 'test')) s._create_archive = Mock(side_effect=setattr(s, 'archive_path', 'test')) diff --git a/insights/tests/client/data_collector/test_redact.py b/insights/tests/client/data_collector/test_redact.py index 7779b4154..f5879580b 100644 --- a/insights/tests/client/data_collector/test_redact.py +++ b/insights/tests/client/data_collector/test_redact.py @@ -76,7 +76,6 @@ def test_redact_call_walk_core(walk): ''' Verify that redact() calls os.walk and when an an archive structure is present in /var/tmp/**/insights-* - With core collection, /data is added to the path ''' conf = InsightsConfig(core_collect=True) From e0f8762e5356c3a851ff59a9f077d004ccbcf193 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 24 Sep 2020 02:53:44 +0800 Subject: [PATCH 186/892] Update the check of netweaver for Sap combiner (#2763) Signed-off-by: Xiangce Liu --- insights/combiners/sap.py | 28 ++++++++++++++++++++++------ insights/combiners/tests/test_sap.py | 3 ++- 2 files changed, 24 insertions(+), 7 deletions(-) diff --git a/insights/combiners/sap.py b/insights/combiners/sap.py index 3c51a3e7a..92ab6ec3d 100644 --- a/insights/combiners/sap.py +++ b/insights/combiners/sap.py @@ -20,6 +20,23 @@ field_names=["name", "hostname", "sid", "type", "number", "fqdn", "version"]) """namedtuple: Type for storing the SAP instance.""" +FUNC_TYPES = ('SMDA',) +""" +SMDA : Solution Manager Diagnostics Agents +""" +NETW_TYPES = ('D', 'ASCS', 'DVEBMGS', 'J', 'SCS', 'ERS', 'W', 'G', 'JC') +""" +D : NetWeaver (ABAP Dialog Instance) +ASCS : NetWeaver (ABAP Central Services) +DVEBMGS: NetWeaver (Primary Application server +J : NetWeaver (Java App Server Instance) +SCS : NetWeaver (Java Central Services) +ERS : NetWeaver (Enqueue Replication Server) +W : NetWeaver (WebDispatcher) +G : NetWeaver (Gateway) +JC : NetWeaver (Java App Server Instance) +""" + @combiner(hostname, [SAPHostCtrlInstances, Lssap]) class Sap(dict): @@ -59,7 +76,6 @@ class Sap(dict): E.g. HANA, NetWeaver, ASCS, or others local_instances (list): List of all SAP instances running on this host """ - FUNC_INSTS = ('SMDA',) """ tuple: Tuple of the prefix string of the functional SAP instances""" def __init__(self, hostname, insts, lssap): @@ -71,10 +87,10 @@ def __init__(self, hostname, insts, lssap): self.all_instances = [] self._types = set() if insts: + self._types = insts.types + self.all_instances = insts.instances for inst in insts.data: k = inst['InstanceName'] - self.all_instances.append(k) - self._types.add(inst['InstanceType']) self.local_instances.append(k) if hn == inst['Hostname'] else None data[k] = SAPInstances(k, inst['Hostname'], @@ -103,7 +119,7 @@ def __init__(self, hostname, insts, lssap): self.update(data) for i in self.all_instances: - (self.function_instances if i.startswith(self.FUNC_INSTS) else self.business_instances).append(i) + (self.function_instances if i.startswith(FUNC_TYPES) else self.business_instances).append(i) def version(self, instance): """str: Returns the version of the ``instance``.""" @@ -128,7 +144,7 @@ def number(self, instance): @property def is_netweaver(self): """bool: Is any SAP NetWeaver instance detected?""" - return 'D' in self._types + return any(_t in self._types for _t in NETW_TYPES) @property def is_hana(self): @@ -137,7 +153,7 @@ def is_hana(self): @property def is_ascs(self): - """bool: Is any SAP System Central Services instance detected?""" + """bool: Is any ABAP Central Services instance detected?""" return 'ASCS' in self._types @property diff --git a/insights/combiners/tests/test_sap.py b/insights/combiners/tests/test_sap.py index 24ba813b4..00ca53622 100644 --- a/insights/combiners/tests/test_sap.py +++ b/insights/combiners/tests/test_sap.py @@ -257,7 +257,8 @@ def test_lssap_ascs(): hn = Hostname(HnF(context_wrap(HOSTNAME)), None, None, None, None) sap = Sap(hn, None, lssap) assert sap['ASCS16'].sid == 'HA2' - assert sap.is_netweaver is False + # ASCS is also a kind of NetWeaver + assert sap.is_netweaver is True assert sap.is_hana is False assert sap.is_ascs is True From 9ea8e902f8122659a84b96f6e8bf27f809f0bf05 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 24 Sep 2020 03:44:12 +0800 Subject: [PATCH 187/892] Update the 'raw_line_key' of parse_delimited_table (#2766) - the raw line should not be stripped Signed-off-by: Xiangce Liu --- insights/parsers/__init__.py | 6 +++--- insights/parsers/tests/test_parsers_module.py | 15 +++++++++++++++ 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/insights/parsers/__init__.py b/insights/parsers/__init__.py index 728da2b48..cb899be32 100644 --- a/insights/parsers/__init__.py +++ b/insights/parsers/__init__.py @@ -445,15 +445,15 @@ def parse_delimited_table(table_lines, content = table_lines[first_line + 1:last_line] headings = [c.strip() if strip else c for c in header.split(header_delim)] r = [] - for row in content: - row = row.strip() + for line in content: + row = line.strip() if row: rowsplit = row.split(delim, max_splits) if strip: rowsplit = [i.strip() for i in rowsplit] o = dict(zip(headings, rowsplit)) if raw_line_key: - o[raw_line_key] = row + o[raw_line_key] = line r.append(o) return r diff --git a/insights/parsers/tests/test_parsers_module.py b/insights/parsers/tests/test_parsers_module.py index 108723cf9..3795dc162 100644 --- a/insights/parsers/tests/test_parsers_module.py +++ b/insights/parsers/tests/test_parsers_module.py @@ -386,6 +386,12 @@ def test_optlist_with_spaces(): (402 rows) """.strip() # Normally has a --- separator line, which is ignored using get_active_lines +TABLE1 = """ +THIS IS A HEADER + this is some content_with_blank_prefix +This is more content +""".strip() + TABLE2 = [ "SID Nr Instance SAPLOCALHOST Version DIR_EXECUTABLE", "HA2| 16| D16| lu0417|749, patch 10, changelist 1698137| /usr/sap/HA2/D16/exe", @@ -473,6 +479,15 @@ def test_parse_delimited_table(): "DIR_EXECUTABLE": "/usr/sap/HA2/D22/exe"}] assert expected == result + # Test raw_line_key + TABLE1_SP = TABLE1.splitlines() + result = parse_delimited_table(TABLE1_SP, raw_line_key='raw_line') + assert isinstance(result, list) + assert len(result) == 2 + assert isinstance(result[0], dict) + # Get the RAW line + assert result[0]['raw_line'] == TABLE1_SP[1] + DATA_LIST = [ {'name': 'test 1', 'role': 'server', 'memory_gb': 16, 'ssd': True}, From 640dd19510adfc92d39a41cc5d65104e3cd7d906 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 24 Sep 2020 21:15:44 +0800 Subject: [PATCH 188/892] Return the common path of marker which is close to root (#2765) * Sometimes, the dir named "sos_commands" appears more than once in sosreport. In this case, the common path should be the one which is the closest to the root. Signed-off-by: Huanhuan Li --- insights/core/context.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/insights/core/context.py b/insights/core/context.py index 3e73c4a43..6ee6295af 100644 --- a/insights/core/context.py +++ b/insights/core/context.py @@ -162,12 +162,22 @@ def handles(cls, files): sep = os.path.sep m = sep + cls.marker.lstrip(sep) + marker_root = set() for f in files: if m in f: i = f.find(m) if f.endswith(m) or f[i + len(m)] == sep: root = os.path.dirname(f[:i + 1]) - return root, cls + marker_root.add(root) + if len(marker_root) == 1: + return (marker_root.pop(), cls) + if len(marker_root) > 1: + # when more marker found, return the one which is closest to root + closest_root = marker_root.pop() + for left_one in marker_root: + if len(left_one) < len(closest_root): + closest_root = left_one + return (closest_root, cls) return (None, None) def check_output(self, cmd, timeout=None, keep_rc=False, env=None): From 3d9514152432f575974662c527553b1f5a139952 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 24 Sep 2020 21:46:12 +0800 Subject: [PATCH 189/892] New parser OpenVmToolsStatRawTextSession (#2768) * New parser OpenVmToolsStatRawTextSession Signed-off-by: Xiangce Liu * Remove the filterable=True Signed-off-by: Xiangce Liu --- docs/shared_parsers_catalog/open_vm_tools.rst | 3 + insights/parsers/open_vm_tools.py | 66 +++++++++++++++++++ insights/parsers/tests/test_open_vm_tools.py | 41 ++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 6 files changed, 113 insertions(+) create mode 100644 docs/shared_parsers_catalog/open_vm_tools.rst create mode 100644 insights/parsers/open_vm_tools.py create mode 100644 insights/parsers/tests/test_open_vm_tools.py diff --git a/docs/shared_parsers_catalog/open_vm_tools.rst b/docs/shared_parsers_catalog/open_vm_tools.rst new file mode 100644 index 000000000..ff951d21f --- /dev/null +++ b/docs/shared_parsers_catalog/open_vm_tools.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.open_vm_tools + :members: + :show-inheritance: diff --git a/insights/parsers/open_vm_tools.py b/insights/parsers/open_vm_tools.py new file mode 100644 index 000000000..f3cee37d8 --- /dev/null +++ b/insights/parsers/open_vm_tools.py @@ -0,0 +1,66 @@ +""" +OpenVmTools - Commands ``open-vm-tools`` +======================================== + +Parsers that parse the output of command ``open-mv-tools`` are included in this +module: + +OpenVmToolsStatRawTextSession - Command ``vmware-toolbox-cmd stat raw text session`` +------------------------------------------------------------------------------------ + +""" + +from insights import parser, CommandParser +from insights.parsers import SkipException +from insights.specs import Specs + + +@parser(Specs.open_vm_tools_stat_raw_text_session) +class OpenVmToolsStatRawTextSession(CommandParser, dict): + """ + Class to parse the output of command ``vmware-toolbox-cmd stat raw text session`` + + Sample input:: + + session = 4004861987670969122 + uptime = 1036293956 + version = VMware ESXi 6.0.0 build-12345 + provider = + uuid.bios = 00 00 00 00 00 00 66 8e-00 00 00 00 51 1e 23 f3 + + Examples: + >>> type(ovmt) + + >>> ovmt['version'] == 'VMware ESXi 6.0.0 build-12345' + True + >>> ovmt.vmware_esxi_version == '6.0.0' + True + """ + + def parse_content(self, content): + if not content or 'must be run inside a virtual machine' in content[0]: + raise SkipException + + data = dict() + for line in content: + if '=' in line: + key, value = [i.strip() for i in line.split('=', 1)] + data[key] = value + + if not data: + raise SkipException + + self._vmware_esxi_version = None + version = data.get('version') + if "VMware ESXi" in version: + self._vmware_esxi_version = version.split()[2] + + self.update(data) + + @property + def vmware_esxi_version(self): + """ + Returns: + (str): The version of the VMware ESXi, e.g. '6.0.0'. None by default. + """ + return self._vmware_esxi_version diff --git a/insights/parsers/tests/test_open_vm_tools.py b/insights/parsers/tests/test_open_vm_tools.py new file mode 100644 index 000000000..19a9fef5f --- /dev/null +++ b/insights/parsers/tests/test_open_vm_tools.py @@ -0,0 +1,41 @@ +import pytest +import doctest +from insights.parsers import open_vm_tools, SkipException +from insights.parsers.open_vm_tools import OpenVmToolsStatRawTextSession +from insights.tests import context_wrap + +V_OUT1 = """ +vmware-toolbox-cmd must be run inside a virtual machine. +""".strip() + +V_OUT2 = """ +test +""".strip() + +V_OUT3 = """ +session = 4004861987670969122 +uptime = 1036293956 +version = VMware ESXi 6.0.0 build-12345 +provider = +uuid.bios = 00 00 00 00 00 00 66 8e-00 00 00 00 51 1e 23 f3 +""".strip() + + +def test_OpenVmToolsStatRawTextSession(): + with pytest.raises(SkipException): + OpenVmToolsStatRawTextSession(context_wrap(V_OUT1)) + + with pytest.raises(SkipException): + OpenVmToolsStatRawTextSession(context_wrap(V_OUT2)) + + o1 = OpenVmToolsStatRawTextSession(context_wrap(V_OUT3)) + assert o1['version'] == 'VMware ESXi 6.0.0 build-12345' + assert o1['provider'] == '' + + +def test_doc_examples(): + env = { + 'ovmt': OpenVmToolsStatRawTextSession(context_wrap(V_OUT3)), + } + failed, total = doctest.testmod(open_vm_tools, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index bdc7ba86f..23f9bd869 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -430,6 +430,7 @@ class Specs(SpecSet): octavia_conf = RegistryPoint(filterable=True) odbc_ini = RegistryPoint(filterable=True) odbcinst_ini = RegistryPoint() + open_vm_tools_stat_raw_text_session = RegistryPoint() openvswitch_other_config = RegistryPoint() openvswitch_server_log = RegistryPoint(filterable=True) openshift_certificates = RegistryPoint(multi_output=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index fe7d34922..aa00e27b6 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -502,6 +502,7 @@ def httpd_cmd(broker): oc_get_clusterrolebinding_with_config = simple_command("/usr/bin/oc get clusterrolebinding --config /etc/origin/master/admin.kubeconfig") odbc_ini = simple_file("/etc/odbc.ini") odbcinst_ini = simple_file("/etc/odbcinst.ini") + open_vm_tools_stat_raw_text_session = simple_command("/usr/bin/vmware-toolbox-cmd stat raw text session") openshift_hosts = simple_file("/root/.config/openshift/hosts") openshift_router_pid = simple_command("/usr/bin/pgrep -n openshift-route") openshift_router_environ = foreach_collect(openshift_router_pid, "/proc/%s/environ") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 69bfd6536..cad4320e9 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -159,6 +159,7 @@ class InsightsArchiveSpecs(Specs): numeric_user_group_name = simple_file("insights_commands/grep_-c_digit_.etc.passwd_.etc.group") oc_get_clusterrole_with_config = simple_file("insights_commands/oc_get_clusterrole_--config_.etc.origin.master.admin.kubeconfig") oc_get_clusterrolebinding_with_config = simple_file("insights_commands/oc_get_clusterrolebinding_--config_.etc.origin.master.admin.kubeconfig") + open_vm_tools_stat_raw_text_session = simple_file("insights_commands/vmware-toolbox-cmd_stat_raw_text_session") openvswitch_other_config = simple_file("insights_commands/ovs-vsctl_-t_5_get_Open_vSwitch_._other_config") ovs_vsctl_list_bridge = simple_file("insights_commands/ovs-vsctl_list_bridge") ovs_vsctl_show = simple_file("insights_commands/ovs-vsctl_show") From fac2e09b04c10dd7a0250c8e096f996774b14ccf Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 24 Sep 2020 13:49:06 -0400 Subject: [PATCH 190/892] update uploader.json map Signed-off-by: Jeremy Crafts --- insights/client/uploader_json_map.json | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index eae9cbed7..c64213fed 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -985,6 +985,11 @@ "pattern": [], "symbolic_name": "oc_get_clusterrolebinding_with_config" }, + { + "command": "/usr/bin/vmware-toolbox-cmd stat raw text session", + "pattern": [], + "symbolic_name": "open_vm_tools_stat_raw_text_session" + }, { "command": "/usr/bin/ovs-vsctl -t 5 get Open_vSwitch . other_config", "pattern": [], @@ -4065,4 +4070,4 @@ "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, "version": "2020-09-17T13:54:44.273820" -} \ No newline at end of file +} From 6026cb6021bb3496c1a599efd0876d39180d435f Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Tue, 29 Sep 2020 14:42:02 +0800 Subject: [PATCH 191/892] Remove the vmware_esxi_version attribute (#2774) - ESXi is only one type of VMware hypervisor arch Signed-off-by: Xiangce Liu --- insights/parsers/open_vm_tools.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/insights/parsers/open_vm_tools.py b/insights/parsers/open_vm_tools.py index f3cee37d8..4e9649d3c 100644 --- a/insights/parsers/open_vm_tools.py +++ b/insights/parsers/open_vm_tools.py @@ -33,8 +33,6 @@ class OpenVmToolsStatRawTextSession(CommandParser, dict): >>> ovmt['version'] == 'VMware ESXi 6.0.0 build-12345' True - >>> ovmt.vmware_esxi_version == '6.0.0' - True """ def parse_content(self, content): @@ -50,17 +48,4 @@ def parse_content(self, content): if not data: raise SkipException - self._vmware_esxi_version = None - version = data.get('version') - if "VMware ESXi" in version: - self._vmware_esxi_version = version.split()[2] - self.update(data) - - @property - def vmware_esxi_version(self): - """ - Returns: - (str): The version of the VMware ESXi, e.g. '6.0.0'. None by default. - """ - return self._vmware_esxi_version From 1331fd6950a83af1e8eba17e83b1fa105a90eb12 Mon Sep 17 00:00:00 2001 From: David Vallee Delisle Date: Wed, 30 Sep 2020 16:18:54 -0400 Subject: [PATCH 192/892] OpenStack logs are either containerized or not (#2737) We should look in both /var/log/service or /var/log/containers/service Signed-off-by: David Vallee Delisle --- insights/specs/default.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index aa00e27b6..f35bb006a 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -193,7 +193,7 @@ def is_ceph_monitor(broker): cib_xml = simple_file("/var/lib/pacemaker/cib/cib.xml") cinder_api_log = first_file(["/var/log/containers/cinder/cinder-api.log", "/var/log/cinder/cinder-api.log"]) cinder_conf = first_file(["/var/lib/config-data/puppet-generated/cinder/etc/cinder/cinder.conf", "/etc/cinder/cinder.conf"]) - cinder_volume_log = simple_file("/var/log/cinder/volume.log") + cinder_volume_log = first_file(["/var/log/containers/cinder/volume.log", "/var/log/containers/cinder/cinder-volume.log", "/var/log/cinder/volume.log"]) cloud_init_custom_network = simple_file("/etc/cloud/cloud.cfg.d/99-custom-networking.cfg") cloud_init_log = simple_file("/var/log/cloud-init.log") cluster_conf = simple_file("/etc/cluster/cluster.conf") @@ -357,7 +357,7 @@ def httpd_cmd(broker): ip6tables = simple_command("/sbin/ip6tables-save") ipv4_neigh = simple_command("/sbin/ip -4 neighbor show nud all") ipv6_neigh = simple_command("/sbin/ip -6 neighbor show nud all") - ironic_inspector_log = simple_file("/var/log/ironic-inspector/ironic-inspector.log") + ironic_inspector_log = first_file(["/var/log/containers/ironic-inspector/ironic-inspector.log", "/var/log/ironic-inspector/ironic-inspector.log"]) iscsiadm_m_session = simple_command("/usr/sbin/iscsiadm -m session") kdump_conf = simple_file("/etc/kdump.conf") kernel_config = glob_file("/boot/config-*") @@ -466,7 +466,7 @@ def httpd_cmd(broker): "/etc/neutron/plugins/ml2/sriov_agent.ini"]) neutron_dhcp_agent_ini = first_file(["/var/lib/config-data/puppet-generated/neutron/etc/neutron/dhcp_agent.ini", "/etc/neutron/dhcp_agent.ini"]) neutron_l3_agent_ini = first_file(["/var/lib/config-data/puppet-generated/neutron/etc/neutron/l3_agent.ini", "/etc/neutron/l3_agent.ini"]) - neutron_l3_agent_log = simple_file("/var/log/neutron/l3-agent.log") + neutron_l3_agent_log = first_file(["/var/log/containers/neutron/l3-agent.log", "/var/log/neutron/l3-agent.log"]) neutron_metadata_agent_ini = first_file(["/var/lib/config-data/puppet-generated/neutron/etc/neutron/metadata_agent.ini", "/etc/neutron/metadata_agent.ini"]) neutron_metadata_agent_log = first_file(["/var/log/containers/neutron/metadata-agent.log", "/var/log/neutron/metadata-agent.log"]) neutron_ovs_agent_log = first_file(["/var/log/containers/neutron/openvswitch-agent.log", "/var/log/neutron/openvswitch-agent.log"]) From 73c7152508d05ef765c5d49af8b1417b7a909565 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 1 Oct 2020 04:34:08 +0800 Subject: [PATCH 193/892] Add more options to the "lvs" spec (#2773) * Add more options to the "lvs" spec Signed-off-by: Huanhuan Li * Move the new comment to the fist place Signed-off-by: Huanhuan Li --- insights/specs/default.py | 2 +- insights/specs/insights_archive.py | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index f35bb006a..0b650d048 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -415,7 +415,7 @@ def httpd_cmd(broker): lsscsi = simple_command("/usr/bin/lsscsi") lsvmbus = simple_command("/usr/sbin/lsvmbus -vv") lvm_conf = simple_file("/etc/lvm/lvm.conf") - lvs_noheadings = simple_command("/sbin/lvs --nameprefixes --noheadings --separator='|' -a -o lv_name,lv_size,lv_attr,mirror_log,vg_name,devices,region_size,data_percent,metadata_percent,segtype,seg_monitor --config=\"global{locking_type=0}\"") + lvs_noheadings = simple_command("/sbin/lvs --nameprefixes --noheadings --separator='|' -a -o lv_name,lv_size,lv_attr,mirror_log,vg_name,devices,region_size,data_percent,metadata_percent,segtype,seg_monitor,lv_kernel_major,lv_kernel_minor --config=\"global{locking_type=0}\"") mac_addresses = glob_file("/sys/class/net/*/address") machine_id = first_file(["etc/insights-client/machine-id", "etc/redhat-access-insights/machine-id", "etc/redhat_access_proactive/machine-id"]) mariadb_log = simple_file("/var/log/mariadb/mariadb.log") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index cad4320e9..13ccbeb48 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -130,7 +130,12 @@ class InsightsArchiveSpecs(Specs): "insights_commands/lvmconfig_--type_full", "insights_commands/lvm_dumpconfig_--type_full" ]) - lvs_noheadings = simple_file("insights_commands/lvs_--nameprefixes_--noheadings_--separator_-a_-o_lv_name_lv_size_lv_attr_mirror_log_vg_name_devices_region_size_data_percent_metadata_percent_segtype_seg_monitor_--config_global_locking_type_0") + lvs_noheadings = first_file( + [ + "insights_commands/lvs_--nameprefixes_--noheadings_--separator_-a_-o_lv_name_lv_size_lv_attr_mirror_log_vg_name_devices_region_size_data_percent_metadata_percent_segtype_seg_monitor_lv_kernel_major_lv_kernel_minor_--config_global_locking_type_0", + "insights_commands/lvs_--nameprefixes_--noheadings_--separator_-a_-o_lv_name_lv_size_lv_attr_mirror_log_vg_name_devices_region_size_data_percent_metadata_percent_segtype_seg_monitor_--config_global_locking_type_0" + ] + ) max_uid = simple_file("insights_commands/awk_-F_if_3_max_max_3_END_print_max_.etc.passwd") md5chk_files = glob_file("insights_commands/md5sum_*") mount = simple_file("insights_commands/mount") From 0758b1c7010b70b95e3bd7ba8f16a0a0ca34d017 Mon Sep 17 00:00:00 2001 From: Jitka Obselkova <41325380+jobselko@users.noreply.github.com> Date: Wed, 30 Sep 2020 22:40:22 +0200 Subject: [PATCH 194/892] Add spec and parser for mokutil command (#2769) Signed-off-by: Jitka Obselkova --- .../mokutil_sbstate.rst | 3 ++ insights/parsers/mokutil_sbstate.py | 39 +++++++++++++++++++ .../parsers/tests/test_mokutil_sbstate.py | 38 ++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 6 files changed, 83 insertions(+) create mode 100644 docs/shared_parsers_catalog/mokutil_sbstate.rst create mode 100644 insights/parsers/mokutil_sbstate.py create mode 100644 insights/parsers/tests/test_mokutil_sbstate.py diff --git a/docs/shared_parsers_catalog/mokutil_sbstate.rst b/docs/shared_parsers_catalog/mokutil_sbstate.rst new file mode 100644 index 000000000..52e837114 --- /dev/null +++ b/docs/shared_parsers_catalog/mokutil_sbstate.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.mokutil_sbstate + :members: + :show-inheritance: diff --git a/insights/parsers/mokutil_sbstate.py b/insights/parsers/mokutil_sbstate.py new file mode 100644 index 000000000..8be766d96 --- /dev/null +++ b/insights/parsers/mokutil_sbstate.py @@ -0,0 +1,39 @@ +""" +MokutilSbstate - command ``mokutil --sb-state`` +=============================================== +""" + +from insights.core import CommandParser +from insights.core.plugins import parser +from insights.specs import Specs + + +@parser(Specs.mokutil_sbstate) +class MokutilSbstate(CommandParser): + """ + Class for parsing the ``mokutil --sb-state`` command. + + Attributes: + secureboot_enabled (bool): True if SecureBoot is enabled, + False if SecureBoot is disabled, otherwise None. + + Sample output of this command is:: + + SecureBoot enabled + + Examples: + + >>> type(mokutil) + + >>> mokutil.secureboot_enabled + True + """ + + def parse_content(self, content): + self.secureboot_enabled = None + + non_empty_lines = [line for line in content if line] # get rid of blank lines + if "SecureBoot enabled" in non_empty_lines[0]: + self.secureboot_enabled = True + elif "SecureBoot disabled" in non_empty_lines[0]: + self.secureboot_enabled = False diff --git a/insights/parsers/tests/test_mokutil_sbstate.py b/insights/parsers/tests/test_mokutil_sbstate.py new file mode 100644 index 000000000..109e2f4e9 --- /dev/null +++ b/insights/parsers/tests/test_mokutil_sbstate.py @@ -0,0 +1,38 @@ +import doctest +import pytest + +from insights.parsers import mokutil_sbstate +from insights.parsers.mokutil_sbstate import MokutilSbstate +from insights.tests import context_wrap + +SECUREBOOT_ENABLED = """ +SecureBoot enabled +""".strip() + +SECUREBOOT_DISABLED = """ +SecureBoot disabled +""".strip() + +NOT_SUPPORTED = """ +EFI variables are not supported on this system +""".strip() + +TEST_CASES = [ + (SECUREBOOT_ENABLED, True), + (SECUREBOOT_DISABLED, False), + (NOT_SUPPORTED, None) +] + + +@pytest.mark.parametrize("output, boolean", TEST_CASES) +def test_mokutil(output, boolean): + test = MokutilSbstate(context_wrap(output)) + assert test.secureboot_enabled == boolean + + +def test_doc_examples(): + env = { + "mokutil": MokutilSbstate(context_wrap(SECUREBOOT_ENABLED)), + } + failed, total = doctest.testmod(mokutil_sbstate, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 23f9bd869..f97edfeee 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -352,6 +352,7 @@ class Specs(SpecSet): modinfo = RegistryPoint(multi_output=True) modinfo_all = RegistryPoint() modprobe = RegistryPoint(multi_output=True) + mokutil_sbstate = RegistryPoint() mongod_conf = RegistryPoint(multi_output=True, filterable=True) mount = RegistryPoint() mounts = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 0b650d048..11c9fbe28 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -432,6 +432,7 @@ def httpd_cmd(broker): modinfo_veth = simple_command("/sbin/modinfo veth") modinfo_vmxnet3 = simple_command("/sbin/modinfo vmxnet3") modprobe = glob_file(["/etc/modprobe.conf", "/etc/modprobe.d/*.conf"]) + mokutil_sbstate = simple_command("/bin/mokutil --sb-state") mongod_conf = glob_file([ "/etc/mongod.conf", "/etc/mongodb.conf", diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 13ccbeb48..a696b5169 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -145,6 +145,7 @@ class InsightsArchiveSpecs(Specs): modinfo_ixgbe = simple_file("insights_commands/modinfo_ixgbe") modinfo_veth = simple_file("insights_commands/modinfo_veth") modinfo_vmxnet3 = simple_file("insights_commands/modinfo_vmxnet3") + mokutil_sbstate = simple_file("insights_commands/mokutil_--sb-state") multicast_querier = simple_file("insights_commands/find_.sys.devices.virtual.net._-name_multicast_querier_-print_-exec_cat") multipath_conf_initramfs = simple_file("insights_commands/lsinitrd_-f_.etc.multipath.conf") multipath__v4__ll = simple_file("insights_commands/multipath_-v4_-ll") From 1fb5a4cef735b44b76d2c08a753688e0ca81a8be Mon Sep 17 00:00:00 2001 From: Rohan Arora Date: Thu, 1 Oct 2020 02:21:52 +0530 Subject: [PATCH 195/892] Add RHEL 7.9 kernel to uname (#2776) Signed-off-by: Rohan Arora --- insights/parsers/uname.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/parsers/uname.py b/insights/parsers/uname.py index b776f9911..c917bf036 100644 --- a/insights/parsers/uname.py +++ b/insights/parsers/uname.py @@ -100,6 +100,7 @@ "3.10.0-957": "7.6", "3.10.0-1062": "7.7", "3.10.0-1127": "7.8", + "3.10.0-1160": "7.9", "4.18.0-80": "8.0", "4.18.0-147": "8.1", "4.18.0-193": "8.2" From bc8573703692fe30e3fec81bafdcfd68c4f09d39 Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Thu, 1 Oct 2020 16:01:04 +0000 Subject: [PATCH 196/892] update uploader_json_map.json Signed-off-by: Link Dupont --- insights/client/uploader_json_map.json | 55 ++++++++++++++++++++++++-- 1 file changed, 51 insertions(+), 4 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index c64213fed..defd6645e 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -739,7 +739,7 @@ "symbolic_name": "lsvmbus" }, { - "command": "/sbin/lvs --nameprefixes --noheadings --separator='|' -a -o lv_name,lv_size,lv_attr,mirror_log,vg_name,devices,region_size,data_percent,metadata_percent,segtype,seg_monitor --config=\"global{locking_type=0}\"", + "command": "/sbin/lvs --nameprefixes --noheadings --separator='|' -a -o lv_name,lv_size,lv_attr,mirror_log,vg_name,devices,region_size,data_percent,metadata_percent,segtype,seg_monitor,lv_kernel_major,lv_kernel_minor --config=\"global{locking_type=0}\"", "pattern": [], "symbolic_name": "lvs_noheadings" }, @@ -778,6 +778,11 @@ "pattern": [], "symbolic_name": "md5chk_files" }, + { + "command": "/bin/mokutil --sb-state", + "pattern": [], + "symbolic_name": "mokutil_sbstate" + }, { "command": "/bin/mount", "pattern": [], @@ -1713,6 +1718,28 @@ "pattern": [], "symbolic_name": "cinder_conf" }, + { + "file": "/var/log/containers/cinder/cinder-volume.log", + "pattern": [ + "DEBUG oslo.messaging._drivers.impl_rabbit", + "Image cloning unsuccessful for image", + "Message: NFS file could not be discovered.", + "Timed out waiting for RPC response", + "[Errno 24] Too many open files" + ], + "symbolic_name": "cinder_volume_log" + }, + { + "file": "/var/log/containers/cinder/volume.log", + "pattern": [ + "DEBUG oslo.messaging._drivers.impl_rabbit", + "Image cloning unsuccessful for image", + "Message: NFS file could not be discovered.", + "Timed out waiting for RPC response", + "[Errno 24] Too many open files" + ], + "symbolic_name": "cinder_volume_log" + }, { "file": "/var/log/cinder/volume.log", "pattern": [ @@ -2191,7 +2218,17 @@ "file": "/var/log/ironic-inspector/ironic-inspector.log", "pattern": [ "Certificate did not match expected hostname", - "ERROR requests.packages.urllib3.connection" + "ERROR requests.packages.urllib3.connection", + "Failed to set boot device to PXE" + ], + "symbolic_name": "ironic_inspector_log" + }, + { + "file": "/var/log/containers/ironic-inspector/ironic-inspector.log", + "pattern": [ + "Certificate did not match expected hostname", + "ERROR requests.packages.urllib3.connection", + "Failed to set boot device to PXE" ], "symbolic_name": "ironic_inspector_log" }, @@ -3780,6 +3817,16 @@ ], "symbolic_name": "neutron_l3_agent_log" }, + { + "file": "/var/log/containers/neutron/l3-agent.log", + "pattern": [ + "DEBUG oslo.messaging._drivers.impl_rabbit", + "Error while deleting router", + "Stderr: Another app is currently holding the xtables lock", + "Timed out waiting for RPC response" + ], + "symbolic_name": "neutron_l3_agent_log" + }, { "file": "/etc/neutron/metadata_agent.ini", "pattern": [ @@ -4069,5 +4116,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2020-09-17T13:54:44.273820" -} + "version": "2020-09-24T14:46:59.896476" +} \ No newline at end of file From 818f69da54568b965c5a705a9147d24a7d9e93e0 Mon Sep 17 00:00:00 2001 From: Martin Zibricky Date: Thu, 8 Oct 2020 14:33:04 +0200 Subject: [PATCH 197/892] Remove unneeded parsers libkeyutils and whoopsie (#2780) * Remove unneeded parsers libkeyutils and whoopsie Signed-off-by: Martin Zibricky * Remove whoopsie spec Signed-off-by: Martin Zibricky --- docs/shared_parsers_catalog/libkeyutils.rst | 3 - docs/shared_parsers_catalog/whoopsie.rst | 3 - insights/client/uploader_json_map.json | 15 - insights/parsers/libkeyutils.py | 116 --- insights/parsers/tests/test_libkeyutils.py | 843 -------------------- insights/parsers/tests/test_whoopsie.py | 45 -- insights/parsers/whoopsie.py | 44 - insights/specs/__init__.py | 3 - insights/specs/default.py | 3 - insights/specs/insights_archive.py | 3 - 10 files changed, 1078 deletions(-) delete mode 100644 docs/shared_parsers_catalog/libkeyutils.rst delete mode 100644 docs/shared_parsers_catalog/whoopsie.rst delete mode 100644 insights/parsers/libkeyutils.py delete mode 100644 insights/parsers/tests/test_libkeyutils.py delete mode 100644 insights/parsers/tests/test_whoopsie.py delete mode 100644 insights/parsers/whoopsie.py diff --git a/docs/shared_parsers_catalog/libkeyutils.rst b/docs/shared_parsers_catalog/libkeyutils.rst deleted file mode 100644 index a474ae805..000000000 --- a/docs/shared_parsers_catalog/libkeyutils.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.parsers.libkeyutils - :members: - :show-inheritance: diff --git a/docs/shared_parsers_catalog/whoopsie.rst b/docs/shared_parsers_catalog/whoopsie.rst deleted file mode 100644 index 6d0e7fc22..000000000 --- a/docs/shared_parsers_catalog/whoopsie.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.parsers.whoopsie - :members: - :show-inheritance: diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index defd6645e..bdbb08640 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -485,16 +485,6 @@ "pattern": [], "symbolic_name": "kpatch_list" }, - { - "command": "/usr/bin/find -L /lib /lib64 -name 'libkeyutils.so*'", - "pattern": [], - "symbolic_name": "libkeyutils" - }, - { - "command": "/usr/bin/find -L /lib /lib64 -name libkeyutils.so.1 -exec objdump -x \"{}\" \\;", - "pattern": [], - "symbolic_name": "libkeyutils_objdumps" - }, { "command": "/usr/bin/file -L /etc/localtime", "pattern": [], @@ -1452,11 +1442,6 @@ "pattern": [], "symbolic_name": "virt_what" }, - { - "command": "/usr/bin/find /var/crash /var/tmp -path '*.reports-*/whoopsie-report' -print -quit", - "pattern": [], - "symbolic_name": "woopsie" - }, { "command": "yum -C --noplugins list available", "pattern": [], diff --git a/insights/parsers/libkeyutils.py b/insights/parsers/libkeyutils.py deleted file mode 100644 index a685f681a..000000000 --- a/insights/parsers/libkeyutils.py +++ /dev/null @@ -1,116 +0,0 @@ -""" -Parsers for detection of Linux/Ebury 1.6 malware indicators -=========================================================== - -Libkeyutils - command ``find -L /lib /lib64 -name 'libkeyutils.so*'`` ---------------------------------------------------------------------- - -Parses output of command ``find -L /lib /lib64 -name 'libkeyutils.so*'`` to find all potentially -affected libraries. - -LibkeyutilsObjdumps - command ``find -L /lib /lib64 -name libkeyutils.so.1 -exec objdump -x "{}" \;`` ------------------------------------------------------------------------------------------------------ - -Parses output of command ``find -L /lib /lib64 -name libkeyutils.so.1 -exec objdump -x "{}" \;`` to -verify linked libraries. - -https://www.welivesecurity.com/2017/10/30/windigo-ebury-update-2/ -""" -import re - -from .. import parser, CommandParser -from ..specs import Specs - - -@parser(Specs.libkeyutils) -class Libkeyutils(CommandParser): - """ - This parser finds all 'libkeyutils.so*' libraries in either /lib or /lib64 directory and its - sub-directories. - - Output of Command:: - - /lib/libkeyutils.so.1 - /lib/tls/libkeyutils.so.1.6 - /lib64/libkeyutils.so - - Example:: - - >>> shared[Libkeyutils].libraries - ['/lib/libkeyutils.so.1', '/lib/tls/libkeyutils.so.1.6', '/lib64/libkeyutils.so'] - """ - def __init__(self, *args, **kwargs): - self.libraries = [] - """list: all 'libkeyutils.so*' libraries located in either /lib or /lib64 directory and its sub-directories.""" - super(Libkeyutils, self).__init__(*args, **kwargs) - - def parse_content(self, content): - self.libraries = list(content) - - -@parser(Specs.libkeyutils_objdumps) -class LibkeyutilsObjdumps(CommandParser): - """ - This parser goes through objdumps of all 'libkeyutils.so.1' libraries in either /lib or /lib64 - directory, and its sub-directories, to finds linked libraries. - - Output of Command:: - - /lib/libkeyutils.so.1: file format elf32-i386 - /lib/libkeyutils.so.1 - architecture: i386, flags 0x00000150: - HAS_SYMS, DYNAMIC, D_PAGED - start address 0x00000f80 - ... - - Dynamic Section: - NEEDED libdl.so.2 - NEEDED libc.so.6 - NEEDED libsbr.so - SONAME libkeyutils.so.1 - INIT 0x00000e54 - ... - - - /lib64/libkeyutils.so.1: file format elf64-x86-64 - /lib64/libkeyutils.so.1 - architecture: i386:x86-64, flags 0x00000150: - HAS_SYMS, DYNAMIC, D_PAGED - start address 0x00000000000014b0 - ... - - Dynamic Section: - NEEDED libdl.so.2 - NEEDED libsbr.so.6 - NEEDED libfake.so - SONAME libkeyutils.so.1 - INIT 0x0000000000001390 - ... - - Example:: - - >>> shared[LibkeyutilsObjdumps].linked_libraries - {'/lib/libkeyutils.so.1': ['libdl.so.2', 'libc.so.6', 'libsbr.so'], - '/lib64/libkeyutils.so.1': ['libdl.so.2', 'libsbr.so.6', 'libfake.so']} - """ - FILE_PATTERN = re.compile(r'(.*libkeyutils.so.1):\s*file format') - NEED_PATTERN = re.compile(r'NEEDED\s+(.*)\s*$') - - def __init__(self, *args, **kwargs): - self.linked_libraries = {} - """dict: found libraries and their linked libraries.""" - super(LibkeyutilsObjdumps, self).__init__(*args, **kwargs) - - def parse_content(self, content): - file_name = None - for line in content: - r = LibkeyutilsObjdumps.FILE_PATTERN.search(line) - if r: - file_name = r.group(1) - r = LibkeyutilsObjdumps.NEED_PATTERN.search(line) - if r and file_name: - library = r.group(1) - if file_name not in self.linked_libraries: - self.linked_libraries[file_name] = [library] - else: - self.linked_libraries[file_name].append(library) diff --git a/insights/parsers/tests/test_libkeyutils.py b/insights/parsers/tests/test_libkeyutils.py deleted file mode 100644 index e82cd53e7..000000000 --- a/insights/parsers/tests/test_libkeyutils.py +++ /dev/null @@ -1,843 +0,0 @@ -from insights.parsers.libkeyutils import Libkeyutils, LibkeyutilsObjdumps -from insights.tests import context_wrap - -SEARCH_NOT_FOUND = ''' -/lib/libkeyutils.so.1 -/lib/libkeyutils.so.1.6 -/lib/debug/usr/lib64/libkeyutils.so.1.5.debug -/lib/debug/usr/lib64/libkeyutils.so.1.debug -/lib/debug/usr/lib64/libkeyutils.so.debug -/lib64/libkeyutils.so.1 -/lib64/libkeyutils.so.1.6 -/lib64/libkeyutils.so -''' - -SEARCH_FOUND_1 = ''' -/lib/libkeyutils.so.1 -/lib/tls/libkeyutils.so.1.6 -/lib/debug/usr/lib64/libkeyutils.so.1.5.debug -/lib/debug/usr/lib64/libkeyutils.so.1.debug -/lib/debug/usr/lib64/libkeyutils.so.debug -/lib64/libkeyutils.so.1 -/lib64/libkeyutils.so.1.6 -/lib64/libkeyutils.so -''' - -SEARCH_FOUND_2 = ''' -/lib/libkeyutils.so.1 -/lib/tls/libkeyutils.so.1.6 -/lib/debug/usr/lib64/libkeyutils.so.1.5.debug -/lib/debug/usr/lib64/libkeyutils.so.1.debug -/lib/debug/usr/lib64/libkeyutils.so.debug -/lib64/tls/libkeyutils.so.1 -/lib64/libkeyutils.so.1.6 -/lib64/libkeyutils.so -''' - -DUMP_NOT_FOUND = ''' - -/lib/libkeyutils.so.1: file format elf32-i386 -/lib/libkeyutils.so.1 -architecture: i386, flags 0x00000150: -HAS_SYMS, DYNAMIC, D_PAGED -start address 0x00000f80 - -Program Header: - LOAD off 0x00000000 vaddr 0x00000000 paddr 0x00000000 align 2**12 - filesz 0x000028b4 memsz 0x000028b4 flags r-x - LOAD off 0x00002e6c vaddr 0x00003e6c paddr 0x00003e6c align 2**12 - filesz 0x00000194 memsz 0x00000198 flags rw- - DYNAMIC off 0x00002e78 vaddr 0x00003e78 paddr 0x00003e78 align 2**2 - filesz 0x000000f8 memsz 0x000000f8 flags rw- - NOTE off 0x00000114 vaddr 0x00000114 paddr 0x00000114 align 2**2 - filesz 0x00000024 memsz 0x00000024 flags r-- -EH_FRAME off 0x000021d8 vaddr 0x000021d8 paddr 0x000021d8 align 2**2 - filesz 0x00000134 memsz 0x00000134 flags r-- - STACK off 0x00000000 vaddr 0x00000000 paddr 0x00000000 align 2**4 - filesz 0x00000000 memsz 0x00000000 flags rw- - RELRO off 0x00002e6c vaddr 0x00003e6c paddr 0x00003e6c align 2**0 - filesz 0x00000194 memsz 0x00000194 flags r-- - -Dynamic Section: - NEEDED libdl.so.2 - NEEDED libc.so.6 - SONAME libkeyutils.so.1 - INIT 0x00000e54 - FINI 0x00002134 - INIT_ARRAY 0x00003e6c - INIT_ARRAYSZ 0x00000004 - FINI_ARRAY 0x00003e70 - FINI_ARRAYSZ 0x00000004 - GNU_HASH 0x00000138 - STRTAB 0x000006f8 - SYMTAB 0x000002b8 - STRSZ 0x00000457 - SYMENT 0x00000010 - PLTGOT 0x00003f70 - REL 0x00000d34 - RELSZ 0x00000120 - RELENT 0x00000008 - VERDEF 0x00000bd8 - VERDEFNUM 0x00000007 - BIND_NOW 0x00000000 - FLAGS_1 0x00000001 - VERNEED 0x00000cc4 - VERNEEDNUM 0x00000001 - VERSYM 0x00000b50 - RELCOUNT 0x00000003 - -Version definitions: -1 0x01 0x02928561 libkeyutils.so.1 -2 0x00 0x0ae3c993 KEYUTILS_0.3 -3 0x00 0x0ae3ca90 KEYUTILS_1.0 - KEYUTILS_0.3 -4 0x00 0x0ae3ca93 KEYUTILS_1.3 - KEYUTILS_1.0 -5 0x00 0x0ae3ca94 KEYUTILS_1.4 - KEYUTILS_1.3 -6 0x00 0x0ae3ca95 KEYUTILS_1.5 - KEYUTILS_1.4 -7 0x00 0x0ae3ca96 KEYUTILS_1.6 - KEYUTILS_1.5 - -Version References: - required from libc.so.6: - 0x09691974 0x00 13 GLIBC_2.3.4 - 0x0d696917 0x00 12 GLIBC_2.7 - 0x09691f73 0x00 11 GLIBC_2.1.3 - 0x0d696914 0x00 10 GLIBC_2.4 - 0x0d696911 0x00 09 GLIBC_2.1 - 0x0d696910 0x00 08 GLIBC_2.0 - -Sections: -Idx Name Size VMA LMA File off Algn - 0 .note.gnu.build-id 00000024 00000114 00000114 00000114 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 1 .gnu.hash 00000180 00000138 00000138 00000138 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 2 .dynsym 00000440 000002b8 000002b8 000002b8 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 3 .dynstr 00000457 000006f8 000006f8 000006f8 2**0 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 4 .gnu.version 00000088 00000b50 00000b50 00000b50 2**1 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 5 .gnu.version_d 000000ec 00000bd8 00000bd8 00000bd8 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 6 .gnu.version_r 00000070 00000cc4 00000cc4 00000cc4 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 7 .rel.dyn 00000120 00000d34 00000d34 00000d34 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 8 .init 00000023 00000e54 00000e54 00000e54 2**2 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 9 .plt 00000010 00000e80 00000e80 00000e80 2**4 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 10 .plt.got 000000f0 00000e90 00000e90 00000e90 2**3 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 11 .text 000011b4 00000f80 00000f80 00000f80 2**4 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 12 .fini 00000014 00002134 00002134 00002134 2**2 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 13 .rodata 00000090 00002148 00002148 00002148 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 14 .eh_frame_hdr 00000134 000021d8 000021d8 000021d8 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 15 .eh_frame 000005a8 0000230c 0000230c 0000230c 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 16 .init_array 00000004 00003e6c 00003e6c 00002e6c 2**2 - CONTENTS, ALLOC, LOAD, DATA - 17 .fini_array 00000004 00003e70 00003e70 00002e70 2**2 - CONTENTS, ALLOC, LOAD, DATA - 18 .data.rel.ro 00000004 00003e74 00003e74 00002e74 2**2 - CONTENTS, ALLOC, LOAD, DATA - 19 .dynamic 000000f8 00003e78 00003e78 00002e78 2**2 - CONTENTS, ALLOC, LOAD, DATA - 20 .got 00000090 00003f70 00003f70 00002f70 2**2 - CONTENTS, ALLOC, LOAD, DATA - 21 .bss 00000004 00004000 00004000 00003000 2**0 - ALLOC - 22 .gnu_debuglink 00000020 00000000 00000000 00003000 2**2 - CONTENTS, READONLY - 23 .gnu_debugdata 00000424 00000000 00000000 00003020 2**0 - CONTENTS, READONLY -SYMBOL TABLE: -no symbols - - - -/lib64/libkeyutils.so.1: file format elf64-x86-64 -/lib64/libkeyutils.so.1 -architecture: i386:x86-64, flags 0x00000150: -HAS_SYMS, DYNAMIC, D_PAGED -start address 0x00000000000014b0 - -Program Header: - LOAD off 0x0000000000000000 vaddr 0x0000000000000000 paddr 0x0000000000000000 align 2**21 - filesz 0x000000000000290c memsz 0x000000000000290c flags r-x - LOAD off 0x0000000000002cd0 vaddr 0x0000000000202cd0 paddr 0x0000000000202cd0 align 2**21 - filesz 0x0000000000000330 memsz 0x0000000000000338 flags rw- - DYNAMIC off 0x0000000000002ce8 vaddr 0x0000000000202ce8 paddr 0x0000000000202ce8 align 2**3 - filesz 0x00000000000001f0 memsz 0x00000000000001f0 flags rw- - NOTE off 0x00000000000001c8 vaddr 0x00000000000001c8 paddr 0x00000000000001c8 align 2**2 - filesz 0x0000000000000024 memsz 0x0000000000000024 flags r-- -EH_FRAME off 0x0000000000002250 vaddr 0x0000000000002250 paddr 0x0000000000002250 align 2**2 - filesz 0x000000000000012c memsz 0x000000000000012c flags r-- - STACK off 0x0000000000000000 vaddr 0x0000000000000000 paddr 0x0000000000000000 align 2**4 - filesz 0x0000000000000000 memsz 0x0000000000000000 flags rw- - RELRO off 0x0000000000002cd0 vaddr 0x0000000000202cd0 paddr 0x0000000000202cd0 align 2**0 - filesz 0x0000000000000330 memsz 0x0000000000000330 flags r-- - -Dynamic Section: - NEEDED libdl.so.2 - NEEDED libc.so.6 - SONAME libkeyutils.so.1 - INIT 0x0000000000001390 - FINI 0x000000000000219c - INIT_ARRAY 0x0000000000202cd0 - INIT_ARRAYSZ 0x0000000000000008 - FINI_ARRAY 0x0000000000202cd8 - FINI_ARRAYSZ 0x0000000000000008 - GNU_HASH 0x00000000000001f0 - STRTAB 0x00000000000009e8 - SYMTAB 0x0000000000000370 - STRSZ 0x0000000000000455 - SYMENT 0x0000000000000018 - PLTGOT 0x0000000000202ed8 - RELA 0x0000000000001018 - RELASZ 0x0000000000000378 - RELAENT 0x0000000000000018 - VERDEF 0x0000000000000ec8 - VERDEFNUM 0x0000000000000007 - BIND_NOW 0x0000000000000000 - FLAGS_1 0x0000000000000001 - VERNEED 0x0000000000000fb8 - VERNEEDNUM 0x0000000000000001 - VERSYM 0x0000000000000e3e - RELACOUNT 0x0000000000000003 - -Version definitions: -1 0x01 0x02928561 libkeyutils.so.1 -2 0x00 0x0ae3c993 KEYUTILS_0.3 -3 0x00 0x0ae3ca90 KEYUTILS_1.0 - KEYUTILS_0.3 -4 0x00 0x0ae3ca93 KEYUTILS_1.3 - KEYUTILS_1.0 -5 0x00 0x0ae3ca94 KEYUTILS_1.4 - KEYUTILS_1.3 -6 0x00 0x0ae3ca95 KEYUTILS_1.5 - KEYUTILS_1.4 -7 0x00 0x0ae3ca96 KEYUTILS_1.6 - KEYUTILS_1.5 - -Version References: - required from libc.so.6: - 0x09691974 0x00 12 GLIBC_2.3.4 - 0x0d696917 0x00 11 GLIBC_2.7 - 0x06969194 0x00 10 GLIBC_2.14 - 0x0d696914 0x00 09 GLIBC_2.4 - 0x09691a75 0x00 08 GLIBC_2.2.5 - -Sections: -Idx Name Size VMA LMA File off Algn - 0 .note.gnu.build-id 00000024 00000000000001c8 00000000000001c8 000001c8 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 1 .gnu.hash 00000180 00000000000001f0 00000000000001f0 000001f0 2**3 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 2 .dynsym 00000678 0000000000000370 0000000000000370 00000370 2**3 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 3 .dynstr 00000455 00000000000009e8 00000000000009e8 000009e8 2**0 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 4 .gnu.version 0000008a 0000000000000e3e 0000000000000e3e 00000e3e 2**1 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 5 .gnu.version_d 000000ec 0000000000000ec8 0000000000000ec8 00000ec8 2**3 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 6 .gnu.version_r 00000060 0000000000000fb8 0000000000000fb8 00000fb8 2**3 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 7 .rela.dyn 00000378 0000000000001018 0000000000001018 00001018 2**3 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 8 .init 00000017 0000000000001390 0000000000001390 00001390 2**2 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 9 .plt 00000010 00000000000013b0 00000000000013b0 000013b0 2**4 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 10 .plt.got 000000f0 00000000000013c0 00000000000013c0 000013c0 2**3 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 11 .text 00000cec 00000000000014b0 00000000000014b0 000014b0 2**4 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 12 .fini 00000009 000000000000219c 000000000000219c 0000219c 2**2 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 13 .rodata 000000a0 00000000000021b0 00000000000021b0 000021b0 2**4 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 14 .eh_frame_hdr 0000012c 0000000000002250 0000000000002250 00002250 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 15 .eh_frame 0000058c 0000000000002380 0000000000002380 00002380 2**3 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 16 .init_array 00000008 0000000000202cd0 0000000000202cd0 00002cd0 2**3 - CONTENTS, ALLOC, LOAD, DATA - 17 .fini_array 00000008 0000000000202cd8 0000000000202cd8 00002cd8 2**3 - CONTENTS, ALLOC, LOAD, DATA - 18 .data.rel.ro 00000008 0000000000202ce0 0000000000202ce0 00002ce0 2**3 - CONTENTS, ALLOC, LOAD, DATA - 19 .dynamic 000001f0 0000000000202ce8 0000000000202ce8 00002ce8 2**3 - CONTENTS, ALLOC, LOAD, DATA - 20 .got 00000128 0000000000202ed8 0000000000202ed8 00002ed8 2**3 - CONTENTS, ALLOC, LOAD, DATA - 21 .bss 00000008 0000000000203000 0000000000203000 00003000 2**0 - ALLOC - 22 .gnu_debuglink 00000020 0000000000000000 0000000000000000 00003000 2**2 - CONTENTS, READONLY - 23 .gnu_debugdata 00000414 0000000000000000 0000000000000000 00003020 2**0 - CONTENTS, READONLY -SYMBOL TABLE: -no symbols -''' # noqa - -DUMP_FOUND_1 = ''' - -/lib/libkeyutils.so.1: file format elf32-i386 -/lib/libkeyutils.so.1 -architecture: i386, flags 0x00000150: -HAS_SYMS, DYNAMIC, D_PAGED -start address 0x00000f80 - -Program Header: - LOAD off 0x00000000 vaddr 0x00000000 paddr 0x00000000 align 2**12 - filesz 0x000028b4 memsz 0x000028b4 flags r-x - LOAD off 0x00002e6c vaddr 0x00003e6c paddr 0x00003e6c align 2**12 - filesz 0x00000194 memsz 0x00000198 flags rw- - DYNAMIC off 0x00002e78 vaddr 0x00003e78 paddr 0x00003e78 align 2**2 - filesz 0x000000f8 memsz 0x000000f8 flags rw- - NOTE off 0x00000114 vaddr 0x00000114 paddr 0x00000114 align 2**2 - filesz 0x00000024 memsz 0x00000024 flags r-- -EH_FRAME off 0x000021d8 vaddr 0x000021d8 paddr 0x000021d8 align 2**2 - filesz 0x00000134 memsz 0x00000134 flags r-- - STACK off 0x00000000 vaddr 0x00000000 paddr 0x00000000 align 2**4 - filesz 0x00000000 memsz 0x00000000 flags rw- - RELRO off 0x00002e6c vaddr 0x00003e6c paddr 0x00003e6c align 2**0 - filesz 0x00000194 memsz 0x00000194 flags r-- - -Dynamic Section: - NEEDED libdl.so.2 - NEEDED libc.so.6 - NEEDED libsbr.so - SONAME libkeyutils.so.1 - INIT 0x00000e54 - FINI 0x00002134 - INIT_ARRAY 0x00003e6c - INIT_ARRAYSZ 0x00000004 - FINI_ARRAY 0x00003e70 - FINI_ARRAYSZ 0x00000004 - GNU_HASH 0x00000138 - STRTAB 0x000006f8 - SYMTAB 0x000002b8 - STRSZ 0x00000457 - SYMENT 0x00000010 - PLTGOT 0x00003f70 - REL 0x00000d34 - RELSZ 0x00000120 - RELENT 0x00000008 - VERDEF 0x00000bd8 - VERDEFNUM 0x00000007 - BIND_NOW 0x00000000 - FLAGS_1 0x00000001 - VERNEED 0x00000cc4 - VERNEEDNUM 0x00000001 - VERSYM 0x00000b50 - RELCOUNT 0x00000003 - -Version definitions: -1 0x01 0x02928561 libkeyutils.so.1 -2 0x00 0x0ae3c993 KEYUTILS_0.3 -3 0x00 0x0ae3ca90 KEYUTILS_1.0 - KEYUTILS_0.3 -4 0x00 0x0ae3ca93 KEYUTILS_1.3 - KEYUTILS_1.0 -5 0x00 0x0ae3ca94 KEYUTILS_1.4 - KEYUTILS_1.3 -6 0x00 0x0ae3ca95 KEYUTILS_1.5 - KEYUTILS_1.4 -7 0x00 0x0ae3ca96 KEYUTILS_1.6 - KEYUTILS_1.5 - -Version References: - required from libc.so.6: - 0x09691974 0x00 13 GLIBC_2.3.4 - 0x0d696917 0x00 12 GLIBC_2.7 - 0x09691f73 0x00 11 GLIBC_2.1.3 - 0x0d696914 0x00 10 GLIBC_2.4 - 0x0d696911 0x00 09 GLIBC_2.1 - 0x0d696910 0x00 08 GLIBC_2.0 - -Sections: -Idx Name Size VMA LMA File off Algn - 0 .note.gnu.build-id 00000024 00000114 00000114 00000114 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 1 .gnu.hash 00000180 00000138 00000138 00000138 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 2 .dynsym 00000440 000002b8 000002b8 000002b8 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 3 .dynstr 00000457 000006f8 000006f8 000006f8 2**0 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 4 .gnu.version 00000088 00000b50 00000b50 00000b50 2**1 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 5 .gnu.version_d 000000ec 00000bd8 00000bd8 00000bd8 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 6 .gnu.version_r 00000070 00000cc4 00000cc4 00000cc4 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 7 .rel.dyn 00000120 00000d34 00000d34 00000d34 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 8 .init 00000023 00000e54 00000e54 00000e54 2**2 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 9 .plt 00000010 00000e80 00000e80 00000e80 2**4 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 10 .plt.got 000000f0 00000e90 00000e90 00000e90 2**3 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 11 .text 000011b4 00000f80 00000f80 00000f80 2**4 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 12 .fini 00000014 00002134 00002134 00002134 2**2 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 13 .rodata 00000090 00002148 00002148 00002148 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 14 .eh_frame_hdr 00000134 000021d8 000021d8 000021d8 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 15 .eh_frame 000005a8 0000230c 0000230c 0000230c 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 16 .init_array 00000004 00003e6c 00003e6c 00002e6c 2**2 - CONTENTS, ALLOC, LOAD, DATA - 17 .fini_array 00000004 00003e70 00003e70 00002e70 2**2 - CONTENTS, ALLOC, LOAD, DATA - 18 .data.rel.ro 00000004 00003e74 00003e74 00002e74 2**2 - CONTENTS, ALLOC, LOAD, DATA - 19 .dynamic 000000f8 00003e78 00003e78 00002e78 2**2 - CONTENTS, ALLOC, LOAD, DATA - 20 .got 00000090 00003f70 00003f70 00002f70 2**2 - CONTENTS, ALLOC, LOAD, DATA - 21 .bss 00000004 00004000 00004000 00003000 2**0 - ALLOC - 22 .gnu_debuglink 00000020 00000000 00000000 00003000 2**2 - CONTENTS, READONLY - 23 .gnu_debugdata 00000424 00000000 00000000 00003020 2**0 - CONTENTS, READONLY -SYMBOL TABLE: -no symbols - - - -/lib64/libkeyutils.so.1: file format elf64-x86-64 -/lib64/libkeyutils.so.1 -architecture: i386:x86-64, flags 0x00000150: -HAS_SYMS, DYNAMIC, D_PAGED -start address 0x00000000000014b0 - -Program Header: - LOAD off 0x0000000000000000 vaddr 0x0000000000000000 paddr 0x0000000000000000 align 2**21 - filesz 0x000000000000290c memsz 0x000000000000290c flags r-x - LOAD off 0x0000000000002cd0 vaddr 0x0000000000202cd0 paddr 0x0000000000202cd0 align 2**21 - filesz 0x0000000000000330 memsz 0x0000000000000338 flags rw- - DYNAMIC off 0x0000000000002ce8 vaddr 0x0000000000202ce8 paddr 0x0000000000202ce8 align 2**3 - filesz 0x00000000000001f0 memsz 0x00000000000001f0 flags rw- - NOTE off 0x00000000000001c8 vaddr 0x00000000000001c8 paddr 0x00000000000001c8 align 2**2 - filesz 0x0000000000000024 memsz 0x0000000000000024 flags r-- -EH_FRAME off 0x0000000000002250 vaddr 0x0000000000002250 paddr 0x0000000000002250 align 2**2 - filesz 0x000000000000012c memsz 0x000000000000012c flags r-- - STACK off 0x0000000000000000 vaddr 0x0000000000000000 paddr 0x0000000000000000 align 2**4 - filesz 0x0000000000000000 memsz 0x0000000000000000 flags rw- - RELRO off 0x0000000000002cd0 vaddr 0x0000000000202cd0 paddr 0x0000000000202cd0 align 2**0 - filesz 0x0000000000000330 memsz 0x0000000000000330 flags r-- - -Dynamic Section: - NEEDED libdl.so.2 - NEEDED libc.so.6 - SONAME libkeyutils.so.1 - INIT 0x0000000000001390 - FINI 0x000000000000219c - INIT_ARRAY 0x0000000000202cd0 - INIT_ARRAYSZ 0x0000000000000008 - FINI_ARRAY 0x0000000000202cd8 - FINI_ARRAYSZ 0x0000000000000008 - GNU_HASH 0x00000000000001f0 - STRTAB 0x00000000000009e8 - SYMTAB 0x0000000000000370 - STRSZ 0x0000000000000455 - SYMENT 0x0000000000000018 - PLTGOT 0x0000000000202ed8 - RELA 0x0000000000001018 - RELASZ 0x0000000000000378 - RELAENT 0x0000000000000018 - VERDEF 0x0000000000000ec8 - VERDEFNUM 0x0000000000000007 - BIND_NOW 0x0000000000000000 - FLAGS_1 0x0000000000000001 - VERNEED 0x0000000000000fb8 - VERNEEDNUM 0x0000000000000001 - VERSYM 0x0000000000000e3e - RELACOUNT 0x0000000000000003 - -Version definitions: -1 0x01 0x02928561 libkeyutils.so.1 -2 0x00 0x0ae3c993 KEYUTILS_0.3 -3 0x00 0x0ae3ca90 KEYUTILS_1.0 - KEYUTILS_0.3 -4 0x00 0x0ae3ca93 KEYUTILS_1.3 - KEYUTILS_1.0 -5 0x00 0x0ae3ca94 KEYUTILS_1.4 - KEYUTILS_1.3 -6 0x00 0x0ae3ca95 KEYUTILS_1.5 - KEYUTILS_1.4 -7 0x00 0x0ae3ca96 KEYUTILS_1.6 - KEYUTILS_1.5 - -Version References: - required from libc.so.6: - 0x09691974 0x00 12 GLIBC_2.3.4 - 0x0d696917 0x00 11 GLIBC_2.7 - 0x06969194 0x00 10 GLIBC_2.14 - 0x0d696914 0x00 09 GLIBC_2.4 - 0x09691a75 0x00 08 GLIBC_2.2.5 - -Sections: -Idx Name Size VMA LMA File off Algn - 0 .note.gnu.build-id 00000024 00000000000001c8 00000000000001c8 000001c8 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 1 .gnu.hash 00000180 00000000000001f0 00000000000001f0 000001f0 2**3 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 2 .dynsym 00000678 0000000000000370 0000000000000370 00000370 2**3 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 3 .dynstr 00000455 00000000000009e8 00000000000009e8 000009e8 2**0 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 4 .gnu.version 0000008a 0000000000000e3e 0000000000000e3e 00000e3e 2**1 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 5 .gnu.version_d 000000ec 0000000000000ec8 0000000000000ec8 00000ec8 2**3 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 6 .gnu.version_r 00000060 0000000000000fb8 0000000000000fb8 00000fb8 2**3 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 7 .rela.dyn 00000378 0000000000001018 0000000000001018 00001018 2**3 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 8 .init 00000017 0000000000001390 0000000000001390 00001390 2**2 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 9 .plt 00000010 00000000000013b0 00000000000013b0 000013b0 2**4 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 10 .plt.got 000000f0 00000000000013c0 00000000000013c0 000013c0 2**3 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 11 .text 00000cec 00000000000014b0 00000000000014b0 000014b0 2**4 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 12 .fini 00000009 000000000000219c 000000000000219c 0000219c 2**2 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 13 .rodata 000000a0 00000000000021b0 00000000000021b0 000021b0 2**4 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 14 .eh_frame_hdr 0000012c 0000000000002250 0000000000002250 00002250 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 15 .eh_frame 0000058c 0000000000002380 0000000000002380 00002380 2**3 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 16 .init_array 00000008 0000000000202cd0 0000000000202cd0 00002cd0 2**3 - CONTENTS, ALLOC, LOAD, DATA - 17 .fini_array 00000008 0000000000202cd8 0000000000202cd8 00002cd8 2**3 - CONTENTS, ALLOC, LOAD, DATA - 18 .data.rel.ro 00000008 0000000000202ce0 0000000000202ce0 00002ce0 2**3 - CONTENTS, ALLOC, LOAD, DATA - 19 .dynamic 000001f0 0000000000202ce8 0000000000202ce8 00002ce8 2**3 - CONTENTS, ALLOC, LOAD, DATA - 20 .got 00000128 0000000000202ed8 0000000000202ed8 00002ed8 2**3 - CONTENTS, ALLOC, LOAD, DATA - 21 .bss 00000008 0000000000203000 0000000000203000 00003000 2**0 - ALLOC - 22 .gnu_debuglink 00000020 0000000000000000 0000000000000000 00003000 2**2 - CONTENTS, READONLY - 23 .gnu_debugdata 00000414 0000000000000000 0000000000000000 00003020 2**0 - CONTENTS, READONLY -SYMBOL TABLE: -no symbols -''' # noqa - -DUMP_FOUND_2 = ''' - -/lib/libkeyutils.so.1: file format elf32-i386 -/lib/libkeyutils.so.1 -architecture: i386, flags 0x00000150: -HAS_SYMS, DYNAMIC, D_PAGED -start address 0x00000f80 - -Program Header: - LOAD off 0x00000000 vaddr 0x00000000 paddr 0x00000000 align 2**12 - filesz 0x000028b4 memsz 0x000028b4 flags r-x - LOAD off 0x00002e6c vaddr 0x00003e6c paddr 0x00003e6c align 2**12 - filesz 0x00000194 memsz 0x00000198 flags rw- - DYNAMIC off 0x00002e78 vaddr 0x00003e78 paddr 0x00003e78 align 2**2 - filesz 0x000000f8 memsz 0x000000f8 flags rw- - NOTE off 0x00000114 vaddr 0x00000114 paddr 0x00000114 align 2**2 - filesz 0x00000024 memsz 0x00000024 flags r-- -EH_FRAME off 0x000021d8 vaddr 0x000021d8 paddr 0x000021d8 align 2**2 - filesz 0x00000134 memsz 0x00000134 flags r-- - STACK off 0x00000000 vaddr 0x00000000 paddr 0x00000000 align 2**4 - filesz 0x00000000 memsz 0x00000000 flags rw- - RELRO off 0x00002e6c vaddr 0x00003e6c paddr 0x00003e6c align 2**0 - filesz 0x00000194 memsz 0x00000194 flags r-- - -Dynamic Section: - NEEDED libdl.so.2 - NEEDED libc.so.6 - NEEDED libsbr.so - SONAME libkeyutils.so.1 - INIT 0x00000e54 - FINI 0x00002134 - INIT_ARRAY 0x00003e6c - INIT_ARRAYSZ 0x00000004 - FINI_ARRAY 0x00003e70 - FINI_ARRAYSZ 0x00000004 - GNU_HASH 0x00000138 - STRTAB 0x000006f8 - SYMTAB 0x000002b8 - STRSZ 0x00000457 - SYMENT 0x00000010 - PLTGOT 0x00003f70 - REL 0x00000d34 - RELSZ 0x00000120 - RELENT 0x00000008 - VERDEF 0x00000bd8 - VERDEFNUM 0x00000007 - BIND_NOW 0x00000000 - FLAGS_1 0x00000001 - VERNEED 0x00000cc4 - VERNEEDNUM 0x00000001 - VERSYM 0x00000b50 - RELCOUNT 0x00000003 - -Version definitions: -1 0x01 0x02928561 libkeyutils.so.1 -2 0x00 0x0ae3c993 KEYUTILS_0.3 -3 0x00 0x0ae3ca90 KEYUTILS_1.0 - KEYUTILS_0.3 -4 0x00 0x0ae3ca93 KEYUTILS_1.3 - KEYUTILS_1.0 -5 0x00 0x0ae3ca94 KEYUTILS_1.4 - KEYUTILS_1.3 -6 0x00 0x0ae3ca95 KEYUTILS_1.5 - KEYUTILS_1.4 -7 0x00 0x0ae3ca96 KEYUTILS_1.6 - KEYUTILS_1.5 - -Version References: - required from libc.so.6: - 0x09691974 0x00 13 GLIBC_2.3.4 - 0x0d696917 0x00 12 GLIBC_2.7 - 0x09691f73 0x00 11 GLIBC_2.1.3 - 0x0d696914 0x00 10 GLIBC_2.4 - 0x0d696911 0x00 09 GLIBC_2.1 - 0x0d696910 0x00 08 GLIBC_2.0 - -Sections: -Idx Name Size VMA LMA File off Algn - 0 .note.gnu.build-id 00000024 00000114 00000114 00000114 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 1 .gnu.hash 00000180 00000138 00000138 00000138 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 2 .dynsym 00000440 000002b8 000002b8 000002b8 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 3 .dynstr 00000457 000006f8 000006f8 000006f8 2**0 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 4 .gnu.version 00000088 00000b50 00000b50 00000b50 2**1 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 5 .gnu.version_d 000000ec 00000bd8 00000bd8 00000bd8 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 6 .gnu.version_r 00000070 00000cc4 00000cc4 00000cc4 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 7 .rel.dyn 00000120 00000d34 00000d34 00000d34 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 8 .init 00000023 00000e54 00000e54 00000e54 2**2 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 9 .plt 00000010 00000e80 00000e80 00000e80 2**4 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 10 .plt.got 000000f0 00000e90 00000e90 00000e90 2**3 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 11 .text 000011b4 00000f80 00000f80 00000f80 2**4 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 12 .fini 00000014 00002134 00002134 00002134 2**2 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 13 .rodata 00000090 00002148 00002148 00002148 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 14 .eh_frame_hdr 00000134 000021d8 000021d8 000021d8 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 15 .eh_frame 000005a8 0000230c 0000230c 0000230c 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 16 .init_array 00000004 00003e6c 00003e6c 00002e6c 2**2 - CONTENTS, ALLOC, LOAD, DATA - 17 .fini_array 00000004 00003e70 00003e70 00002e70 2**2 - CONTENTS, ALLOC, LOAD, DATA - 18 .data.rel.ro 00000004 00003e74 00003e74 00002e74 2**2 - CONTENTS, ALLOC, LOAD, DATA - 19 .dynamic 000000f8 00003e78 00003e78 00002e78 2**2 - CONTENTS, ALLOC, LOAD, DATA - 20 .got 00000090 00003f70 00003f70 00002f70 2**2 - CONTENTS, ALLOC, LOAD, DATA - 21 .bss 00000004 00004000 00004000 00003000 2**0 - ALLOC - 22 .gnu_debuglink 00000020 00000000 00000000 00003000 2**2 - CONTENTS, READONLY - 23 .gnu_debugdata 00000424 00000000 00000000 00003020 2**0 - CONTENTS, READONLY -SYMBOL TABLE: -no symbols - - - -/lib64/libkeyutils.so.1: file format elf64-x86-64 -/lib64/libkeyutils.so.1 -architecture: i386:x86-64, flags 0x00000150: -HAS_SYMS, DYNAMIC, D_PAGED -start address 0x00000000000014b0 - -Program Header: - LOAD off 0x0000000000000000 vaddr 0x0000000000000000 paddr 0x0000000000000000 align 2**21 - filesz 0x000000000000290c memsz 0x000000000000290c flags r-x - LOAD off 0x0000000000002cd0 vaddr 0x0000000000202cd0 paddr 0x0000000000202cd0 align 2**21 - filesz 0x0000000000000330 memsz 0x0000000000000338 flags rw- - DYNAMIC off 0x0000000000002ce8 vaddr 0x0000000000202ce8 paddr 0x0000000000202ce8 align 2**3 - filesz 0x00000000000001f0 memsz 0x00000000000001f0 flags rw- - NOTE off 0x00000000000001c8 vaddr 0x00000000000001c8 paddr 0x00000000000001c8 align 2**2 - filesz 0x0000000000000024 memsz 0x0000000000000024 flags r-- -EH_FRAME off 0x0000000000002250 vaddr 0x0000000000002250 paddr 0x0000000000002250 align 2**2 - filesz 0x000000000000012c memsz 0x000000000000012c flags r-- - STACK off 0x0000000000000000 vaddr 0x0000000000000000 paddr 0x0000000000000000 align 2**4 - filesz 0x0000000000000000 memsz 0x0000000000000000 flags rw- - RELRO off 0x0000000000002cd0 vaddr 0x0000000000202cd0 paddr 0x0000000000202cd0 align 2**0 - filesz 0x0000000000000330 memsz 0x0000000000000330 flags r-- - -Dynamic Section: - NEEDED libdl.so.2 - NEEDED libsbr.so.6 - NEEDED libfake.so - SONAME libkeyutils.so.1 - INIT 0x0000000000001390 - FINI 0x000000000000219c - INIT_ARRAY 0x0000000000202cd0 - INIT_ARRAYSZ 0x0000000000000008 - FINI_ARRAY 0x0000000000202cd8 - FINI_ARRAYSZ 0x0000000000000008 - GNU_HASH 0x00000000000001f0 - STRTAB 0x00000000000009e8 - SYMTAB 0x0000000000000370 - STRSZ 0x0000000000000455 - SYMENT 0x0000000000000018 - PLTGOT 0x0000000000202ed8 - RELA 0x0000000000001018 - RELASZ 0x0000000000000378 - RELAENT 0x0000000000000018 - VERDEF 0x0000000000000ec8 - VERDEFNUM 0x0000000000000007 - BIND_NOW 0x0000000000000000 - FLAGS_1 0x0000000000000001 - VERNEED 0x0000000000000fb8 - VERNEEDNUM 0x0000000000000001 - VERSYM 0x0000000000000e3e - RELACOUNT 0x0000000000000003 - -Version definitions: -1 0x01 0x02928561 libkeyutils.so.1 -2 0x00 0x0ae3c993 KEYUTILS_0.3 -3 0x00 0x0ae3ca90 KEYUTILS_1.0 - KEYUTILS_0.3 -4 0x00 0x0ae3ca93 KEYUTILS_1.3 - KEYUTILS_1.0 -5 0x00 0x0ae3ca94 KEYUTILS_1.4 - KEYUTILS_1.3 -6 0x00 0x0ae3ca95 KEYUTILS_1.5 - KEYUTILS_1.4 -7 0x00 0x0ae3ca96 KEYUTILS_1.6 - KEYUTILS_1.5 - -Version References: - required from libc.so.6: - 0x09691974 0x00 12 GLIBC_2.3.4 - 0x0d696917 0x00 11 GLIBC_2.7 - 0x06969194 0x00 10 GLIBC_2.14 - 0x0d696914 0x00 09 GLIBC_2.4 - 0x09691a75 0x00 08 GLIBC_2.2.5 - -Sections: -Idx Name Size VMA LMA File off Algn - 0 .note.gnu.build-id 00000024 00000000000001c8 00000000000001c8 000001c8 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 1 .gnu.hash 00000180 00000000000001f0 00000000000001f0 000001f0 2**3 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 2 .dynsym 00000678 0000000000000370 0000000000000370 00000370 2**3 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 3 .dynstr 00000455 00000000000009e8 00000000000009e8 000009e8 2**0 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 4 .gnu.version 0000008a 0000000000000e3e 0000000000000e3e 00000e3e 2**1 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 5 .gnu.version_d 000000ec 0000000000000ec8 0000000000000ec8 00000ec8 2**3 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 6 .gnu.version_r 00000060 0000000000000fb8 0000000000000fb8 00000fb8 2**3 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 7 .rela.dyn 00000378 0000000000001018 0000000000001018 00001018 2**3 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 8 .init 00000017 0000000000001390 0000000000001390 00001390 2**2 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 9 .plt 00000010 00000000000013b0 00000000000013b0 000013b0 2**4 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 10 .plt.got 000000f0 00000000000013c0 00000000000013c0 000013c0 2**3 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 11 .text 00000cec 00000000000014b0 00000000000014b0 000014b0 2**4 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 12 .fini 00000009 000000000000219c 000000000000219c 0000219c 2**2 - CONTENTS, ALLOC, LOAD, READONLY, CODE - 13 .rodata 000000a0 00000000000021b0 00000000000021b0 000021b0 2**4 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 14 .eh_frame_hdr 0000012c 0000000000002250 0000000000002250 00002250 2**2 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 15 .eh_frame 0000058c 0000000000002380 0000000000002380 00002380 2**3 - CONTENTS, ALLOC, LOAD, READONLY, DATA - 16 .init_array 00000008 0000000000202cd0 0000000000202cd0 00002cd0 2**3 - CONTENTS, ALLOC, LOAD, DATA - 17 .fini_array 00000008 0000000000202cd8 0000000000202cd8 00002cd8 2**3 - CONTENTS, ALLOC, LOAD, DATA - 18 .data.rel.ro 00000008 0000000000202ce0 0000000000202ce0 00002ce0 2**3 - CONTENTS, ALLOC, LOAD, DATA - 19 .dynamic 000001f0 0000000000202ce8 0000000000202ce8 00002ce8 2**3 - CONTENTS, ALLOC, LOAD, DATA - 20 .got 00000128 0000000000202ed8 0000000000202ed8 00002ed8 2**3 - CONTENTS, ALLOC, LOAD, DATA - 21 .bss 00000008 0000000000203000 0000000000203000 00003000 2**0 - ALLOC - 22 .gnu_debuglink 00000020 0000000000000000 0000000000000000 00003000 2**2 - CONTENTS, READONLY - 23 .gnu_debugdata 00000414 0000000000000000 0000000000000000 00003020 2**0 - CONTENTS, READONLY -SYMBOL TABLE: -no symbols -''' # noqa - - -def test_libkeyutils(): - libkeyutils_search = Libkeyutils(context_wrap(SEARCH_NOT_FOUND)) - assert libkeyutils_search.libraries == SEARCH_NOT_FOUND.strip().split('\n') - - libkeyutils_search = Libkeyutils(context_wrap(SEARCH_FOUND_1)) - assert libkeyutils_search.libraries == SEARCH_FOUND_1.strip().split('\n') - - libkeyutils_search = Libkeyutils(context_wrap(SEARCH_FOUND_2)) - assert libkeyutils_search.libraries == SEARCH_FOUND_2.strip().split('\n') - - -def test_libkeyutilsobjdumps(): - libkeyutils_dumps = LibkeyutilsObjdumps(context_wrap(DUMP_NOT_FOUND)) - assert len(libkeyutils_dumps.linked_libraries) == 2 - assert libkeyutils_dumps.linked_libraries == {'/lib/libkeyutils.so.1': - ['libdl.so.2', 'libc.so.6'], - '/lib64/libkeyutils.so.1': - ['libdl.so.2', 'libc.so.6'], - } - - libkeyutils_dumps = LibkeyutilsObjdumps(context_wrap(DUMP_FOUND_1)) - assert len(libkeyutils_dumps.linked_libraries) == 2 - assert libkeyutils_dumps.linked_libraries == {'/lib/libkeyutils.so.1': - ['libdl.so.2', 'libc.so.6', 'libsbr.so'], - '/lib64/libkeyutils.so.1': - ['libdl.so.2', 'libc.so.6'], - } - - libkeyutils_dumps = LibkeyutilsObjdumps(context_wrap(DUMP_FOUND_2)) - assert len(libkeyutils_dumps.linked_libraries) == 2 - assert libkeyutils_dumps.linked_libraries == {'/lib/libkeyutils.so.1': - ['libdl.so.2', 'libc.so.6', 'libsbr.so'], - '/lib64/libkeyutils.so.1': - ['libdl.so.2', 'libsbr.so.6', 'libfake.so'], - } diff --git a/insights/parsers/tests/test_whoopsie.py b/insights/parsers/tests/test_whoopsie.py deleted file mode 100644 index 7a2595d78..000000000 --- a/insights/parsers/tests/test_whoopsie.py +++ /dev/null @@ -1,45 +0,0 @@ -import doctest -import pytest - -from insights.parsers import whoopsie -from insights.parsers.whoopsie import Whoopsie -from insights.tests import context_wrap - -BOTH_MATCHED = """ -/var/crash/.reports-1000-user/whoopsie-report -""".strip() - -NOT_FIND_MATCHED = """ -/usr/bin/find: '/var/crash': No such file or directory -/var/tmp/.reports-1000-user/whoopsie-report -""".strip() - -BOTH_NOT_FIND = """ -/usr/bin/find: '/var/crash': No such file or directory -/usr/bin/find: '/var/tmp': No such file or directory -""".strip() - -BOTH_EMPTY = """ -""" - -TEST_CASES = [ - (BOTH_MATCHED, "1000", "/var/crash/.reports-1000-user/whoopsie-report"), - (NOT_FIND_MATCHED, "1000", "/var/tmp/.reports-1000-user/whoopsie-report"), - (BOTH_NOT_FIND, None, None), - (BOTH_EMPTY, None, None) -] - - -@pytest.mark.parametrize("output, uid, file", TEST_CASES) -def test_whoopsie(output, uid, file): - test = Whoopsie(context_wrap(output)) - assert test.uid == uid - assert test.file == file - - -def test_doc_examples(): - env = { - "whoopsie": Whoopsie(context_wrap(BOTH_MATCHED)), - } - failed, total = doctest.testmod(whoopsie, globs=env) - assert failed == 0 diff --git a/insights/parsers/whoopsie.py b/insights/parsers/whoopsie.py deleted file mode 100644 index d130d4687..000000000 --- a/insights/parsers/whoopsie.py +++ /dev/null @@ -1,44 +0,0 @@ -""" -Whoopsie - command ``/usr/bin/find /var/crash /var/tmp -path '*.reports-*/whoopsie-report' -print -quit`` -========================================================================================================= -""" - -from insights.core import CommandParser -from insights.core.plugins import parser -from insights.specs import Specs -import re - -WHOOPSIE_RE = re.compile(r'.*.reports-(\d+)-.*/whoopsie-report') - - -@parser(Specs.woopsie) -class Whoopsie(CommandParser): - """ - Class for parsing the ``/usr/bin/find /var/crash /var/tmp -path '*.reports-*/whoopsie-report' -print -quit`` - command. - - Attributes: - uid (string): uid parsed from the file path - file (string): the line parsed from the command output - - Sample output of this command is:: - - /var/crash/.reports-1000-user/whoopsie-report - - Examples: - >>> type(whoopsie) - - >>> whoopsie.uid - '1000' - >>> whoopsie.file - '/var/crash/.reports-1000-user/whoopsie-report' - """ - - def parse_content(self, content): - self.uid = None - self.file = None - - match_whoopsie = WHOOPSIE_RE.search('\n'.join(content)) - if match_whoopsie: - self.uid = match_whoopsie.group(1) - self.file = match_whoopsie.group(0) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index f97edfeee..fa2598379 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -278,8 +278,6 @@ class Specs(SpecSet): ksmstate = RegistryPoint() kubepods_cpu_quota = RegistryPoint(multi_output=True) lastupload = RegistryPoint(multi_output=True) - libkeyutils_objdumps = RegistryPoint() - libkeyutils = RegistryPoint() libssh_client_config = RegistryPoint(filterable=True) libssh_server_config = RegistryPoint(filterable=True) libvirtd_log = RegistryPoint(filterable=True) @@ -670,7 +668,6 @@ class Specs(SpecSet): vmware_tools_conf = RegistryPoint() vsftpd_conf = RegistryPoint(filterable=True) vsftpd = RegistryPoint() - woopsie = RegistryPoint() x86_pti_enabled = RegistryPoint() x86_ibpb_enabled = RegistryPoint() x86_ibrs_enabled = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 11c9fbe28..ae4cbf3dd 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -369,8 +369,6 @@ def httpd_cmd(broker): kubepods_cpu_quota = glob_file("/sys/fs/cgroup/cpu/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod[a-f0-9_]*.slice/cpu.cfs_quota_us") last_upload_globs = ["/etc/redhat-access-insights/.lastupload", "/etc/insights-client/.lastupload"] lastupload = glob_file(last_upload_globs) - libkeyutils = simple_command("/usr/bin/find -L /lib /lib64 -name 'libkeyutils.so*'") - libkeyutils_objdumps = simple_command('/usr/bin/find -L /lib /lib64 -name libkeyutils.so.1 -exec objdump -x "{}" \;') libssh_client_config = simple_file("/etc/libssh/libssh_client.config") libssh_server_config = simple_file("/etc/libssh/libssh_server.config") libvirtd_log = simple_file("/var/log/libvirt/libvirtd.log") @@ -672,7 +670,6 @@ def sap_sid_name(broker): virtlogd_conf = simple_file("/etc/libvirt/virtlogd.conf") vsftpd = simple_file("/etc/pam.d/vsftpd") vsftpd_conf = simple_file("/etc/vsftpd/vsftpd.conf") - woopsie = simple_command(r"/usr/bin/find /var/crash /var/tmp -path '*.reports-*/whoopsie-report' -print -quit") x86_pti_enabled = simple_file("sys/kernel/debug/x86/pti_enabled") x86_ibpb_enabled = simple_file("sys/kernel/debug/x86/ibpb_enabled") x86_ibrs_enabled = simple_file("sys/kernel/debug/x86/ibrs_enabled") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index a696b5169..83cb51686 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -89,8 +89,6 @@ class InsightsArchiveSpecs(Specs): iscsiadm_m_session = simple_file("insights_commands/iscsiadm_-m_session") keystone_crontab = simple_file("insights_commands/crontab_-l_-u_keystone") kpatch_list = simple_file("insights_commands/kpatch_list") - libkeyutils = simple_file("insights_commands/find_-L_.lib_.lib64_-name_libkeyutils.so") - libkeyutils_objdumps = simple_file("insights_commands/find_-L_.lib_.lib64_-name_libkeyutils.so.1_-exec_objdump_-x") localtime = simple_file("insights_commands/file_-L_.etc.localtime") lpstat_p = simple_file("insights_commands/lpstat_-p") ls_boot = simple_file("insights_commands/ls_-lanR_.boot") @@ -230,6 +228,5 @@ class InsightsArchiveSpecs(Specs): vgs_noheadings = simple_file("insights_commands/vgs_--nameprefixes_--noheadings_--separator_-a_-o_vg_all_--config_global_locking_type_0") virsh_list_all = simple_file("insights_commands/virsh_--readonly_list_--all") virt_what = simple_file("insights_commands/virt-what") - woopsie = simple_file("insights_commands/find_.var.crash_.var.tmp_-path_.reports-_.whoopsie-report") yum_list_available = simple_file("insights_commands/yum_-C_--noplugins_list_available") yum_repolist = first_file(["insights_commands/yum_-C_--noplugins_repolist", "insights_commands/yum_-C_repolist"]) From ff6a97fe5d6b05745a75b8f64e16bf046810af3c Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Fri, 9 Oct 2020 18:14:29 -0400 Subject: [PATCH 198/892] fix hosts file parsing for core collection soscleaner (#2781) Signed-off-by: Jeremy Crafts --- insights/contrib/soscleaner.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/insights/contrib/soscleaner.py b/insights/contrib/soscleaner.py index 5613d753b..b1af621ec 100644 --- a/insights/contrib/soscleaner.py +++ b/insights/contrib/soscleaner.py @@ -347,14 +347,19 @@ def _clean_up(self): except Exception as e: #pragma: no cover self.logger.exception(e) - def _process_hosts_file(self): + def _process_hosts_file(self, options): # this will process the hosts file more thoroughly to try and capture as many server short names/aliases as possible # could lead to false positives if people use dumb things for server aliases, like 'file' or 'server' or other common terms # this may be an option that can be enabled... --hosts or similar? + if options.core_collect: + hosts_file = os.path.join(self.dir_path, 'data', 'etc/hosts') + else: + hosts_file = os.path.join(self.dir_path, 'etc/hosts') + try: - if os.path.isfile(os.path.join(self.dir_path, 'etc/hosts')): - with open(os.path.join(self.dir_path, 'etc/hosts')) as f: + if os.path.isfile(hosts_file): + with open(hosts_file) as f: self.logger.con_out("Processing hosts file for better obfuscation coverage") data = f.readlines() for line in data: @@ -678,7 +683,7 @@ def clean_report(self, options, sosreport): # pragma: no cover if self.hostname: # if we have a hostname that's not a None type self.hn_db['host0'] = self.hostname # we'll prime the hostname pump to clear out a ton of useless logic later - self._process_hosts_file() # we'll take a dig through the hosts file and make sure it is as scrubbed as possible + self._process_hosts_file(options) # we'll take a dig through the hosts file and make sure it is as scrubbed as possible self._domains2db() if options.core_collect: From bd041633d394d07f31ff3781889592f2a8ef221d Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Mon, 12 Oct 2020 15:40:14 -0400 Subject: [PATCH 199/892] generate log and lib dirs [RHCLOUD-8677] (#2771) * generate log and lib dirs Signed-off-by: Jeremy Crafts --- insights/client/__init__.py | 17 ++++++++++++++++ insights/client/constants.py | 39 +++++++++++++++++++++++++++++++++--- insights/client/phase/v1.py | 4 ++-- 3 files changed, 55 insertions(+), 5 deletions(-) diff --git a/insights/client/__init__.py b/insights/client/__init__.py index 08630f5db..29d02211b 100644 --- a/insights/client/__init__.py +++ b/insights/client/__init__.py @@ -52,6 +52,7 @@ def __init__(self, config=None, setup_logging=True, **kwargs): # setup_logging is True when called from phase, but not from wrapper. # use this to do any common init (like auto_config) if setup_logging: + _init_client_config_dirs() self.set_up_logging() try_auto_configuration(self.config) self.initialize_tags() @@ -693,3 +694,19 @@ def format_config(config): del config_copy["proxy"] finally: return json.dumps(config_copy, indent=4) + + +def _init_client_config_dirs(): + ''' + Initialize log and lib dirs + TODO: init non-root config dirs + ''' + for d in (constants.log_dir, constants.insights_core_lib_dir): + try: + os.makedirs(d) + except OSError as e: + if e.errno == errno.EEXIST: + # dir exists, this is OK + pass + else: + raise e diff --git a/insights/client/constants.py b/insights/client/constants.py index 758a95bb9..7e51c12d4 100644 --- a/insights/client/constants.py +++ b/insights/client/constants.py @@ -1,8 +1,41 @@ import os +_user_home = os.path.expanduser('~') +_app_name = 'insights-client' +_uid = os.getuid() +_user_cache = os.getenv('XDG_CACHE_HOME', default=os.path.join(_user_home, '.cache')) + + +def _log_dir(): + ''' + Get the insights-client log dir + + Default: /var/log/insights-client + Non-root user: $XDG_CACHE_HOME/insights-client || $HOME/.cache/insights-client/log + ''' + if _uid == 0: + insights_log_dir = os.path.join(os.sep, 'var', 'log', _app_name) + else: + insights_log_dir = os.path.join(_user_cache, _app_name, 'log') + return insights_log_dir + + +def _lib_dir(): + ''' + Get the insights-client egg cache dir + + Default: /var/lib/insights + Non-root user: $XDG_CACHE_HOME/insights-client || $HOME/.cache/insights-client/lib + ''' + if _uid == 0: + insights_lib_dir = os.path.join(os.sep, 'var', 'lib', 'insights') + else: + insights_lib_dir = os.path.join(_user_cache, _app_name, 'lib') + return insights_lib_dir + class InsightsConstants(object): - app_name = 'insights-client' + app_name = _app_name auth_method = 'BASIC' package_path = os.path.dirname( os.path.dirname(os.path.abspath(__file__))) @@ -11,7 +44,7 @@ class InsightsConstants(object): default_conf_dir = os.getenv('INSIGHTS_CONF_DIR', default='/etc/insights-client') default_conf_file = os.path.join(default_conf_dir, 'insights-client.conf') default_tags_file = os.path.join(default_conf_dir, 'tags.yaml') - log_dir = os.path.join(os.sep, 'var', 'log', app_name) + log_dir = _log_dir() simple_find_replace_dir = '/etc/redhat-access-insights' default_log_file = os.path.join(log_dir, app_name + '.log') default_payload_log = os.path.join(log_dir, app_name + '-payload.log') @@ -34,7 +67,7 @@ class InsightsConstants(object): core_etag_file = os.path.join(default_conf_dir, '.insights-core.etag') core_gpg_sig_etag_file = os.path.join(default_conf_dir, '.insights-core-gpg-sig.etag') last_upload_results_file = os.path.join(default_conf_dir, '.last-upload.results') - insights_core_lib_dir = os.path.join('/', 'var', 'lib', 'insights') + insights_core_lib_dir = _lib_dir() insights_core_rpm = os.path.join(default_conf_dir, 'rpm.egg') insights_core_last_stable = os.path.join(insights_core_lib_dir, 'last_stable.egg') insights_core_last_stable_gpg_sig = os.path.join(insights_core_lib_dir, 'last_stable.egg.asc') diff --git a/insights/client/phase/v1.py b/insights/client/phase/v1.py index a031d8eb9..cde361fe5 100644 --- a/insights/client/phase/v1.py +++ b/insights/client/phase/v1.py @@ -22,10 +22,10 @@ def phase(func): def _f(): try: config = InsightsConfig().load_all() - except ValueError as e: + client = InsightsClient(config) + except (ValueError, OSError) as e: sys.stderr.write('ERROR: ' + str(e) + '\n') sys.exit(constants.sig_kill_bad) - client = InsightsClient(config) if config.debug: logger.info("Core path: %s", os.path.dirname(__file__)) try: From ca82b06bc10666ad326120ebe86f179fd144618e Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Wed, 14 Oct 2020 14:11:05 -0400 Subject: [PATCH 200/892] add collection stats file for data collection info (#2764) * add collection stats file for data collection info Signed-off-by: Jeremy Crafts --- insights/client/data_collector.py | 21 +++++++++++++++++++ insights/client/insights_spec.py | 13 +++++++++++- .../tests/client/test_skip_commands_files.py | 3 ++- 3 files changed, 35 insertions(+), 2 deletions(-) diff --git a/insights/client/data_collector.py b/insights/client/data_collector.py index af88100cb..3e2a5bfd1 100644 --- a/insights/client/data_collector.py +++ b/insights/client/data_collector.py @@ -142,6 +142,11 @@ def _write_egg_release(self): self.archive.add_metadata_to_archive( egg_release, '/egg_release') + def _write_collection_stats(self, collection_stats): + logger.debug("Writing collection stats to archive...") + self.archive.add_metadata_to_archive( + json.dumps(collection_stats), '/collection_stats') + def _run_pre_command(self, pre_cmd): ''' Run a pre command to get external args for a command @@ -249,6 +254,8 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): self.archive.create_archive_dir() self.archive.create_command_dir() + collection_stats = {} + if rm_conf is None: rm_conf = {} logger.debug('Beginning to run collection spec...') @@ -269,6 +276,11 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): continue cmd_spec = InsightsCommand(self.config, s, self.mountpoint) self.archive.add_to_archive(cmd_spec) + collection_stats[s['command']] = { + 'return_code': cmd_spec.return_code, + 'exec_time': cmd_spec.exec_time, + 'output_size': cmd_spec.output_size + } for f in conf['files']: rm_files = rm_conf.get('files', []) if f['file'] in rm_files or f.get('symbolic_name') in rm_files: @@ -282,6 +294,10 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): else: file_spec = InsightsFile(s, self.mountpoint) self.archive.add_to_archive(file_spec) + collection_stats[s['file']] = { + 'exec_time': file_spec.exec_time, + 'output_size': file_spec.output_size + } if 'globs' in conf: for g in conf['globs']: glob_specs = self._parse_glob_spec(g) @@ -291,6 +307,10 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): else: glob_spec = InsightsFile(g, self.mountpoint) self.archive.add_to_archive(glob_spec) + collection_stats[g['file']] = { + 'exec_time': glob_spec.exec_time, + 'output_size': glob_spec.output_size + } logger.debug('Spec collection finished.') self.redact(rm_conf) @@ -303,6 +323,7 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): self._write_tags() self._write_blacklist_report(blacklist_report) self._write_egg_release() + self._write_collection_stats(collection_stats) logger.debug('Metadata collection finished.') def redact(self, rm_conf): diff --git a/insights/client/insights_spec.py b/insights/client/insights_spec.py index d0b9037b9..920c4d85d 100644 --- a/insights/client/insights_spec.py +++ b/insights/client/insights_spec.py @@ -4,6 +4,8 @@ import shlex import logging import six +import time +import sys from subprocess import Popen, PIPE, STDOUT from tempfile import NamedTemporaryFile from insights.util import mangle @@ -21,6 +23,9 @@ class InsightsSpec(object): def __init__(self, config, spec): self.config = config self.pattern = spec['pattern'] if spec['pattern'] else None + self.return_code = None + self.exec_time = None + self.output_size = None class InsightsCommand(InsightsSpec): @@ -69,6 +74,7 @@ def get_output(self): if set.intersection(set(args), constants.command_blacklist): raise RuntimeError("Command Blacklist: " + self.command) + exec_start = time.time() try: logger.debug('Executing: %s', args) proc0 = Popen(args, shell=False, stdout=PIPE, stderr=STDOUT, @@ -108,6 +114,9 @@ def get_output(self): logger.debug("Proc0 Status: %s", proc0.returncode) logger.debug("Proc0 stderr: %s", stderr) + self.return_code = proc0.returncode + self.exec_time = time.time() - exec_start + self.output_size = sys.getsizeof(stdout) return stdout.decode('utf-8', 'ignore').strip() @@ -129,6 +138,7 @@ def get_output(self): logger.debug('File %s does not exist', self.real_path) return + exec_start = time.time() sedcmd = Popen(['sed', '', self.real_path], stdout=PIPE) if self.pattern is None: @@ -144,5 +154,6 @@ def get_output(self): sedcmd.stdout.close() output = proc1.communicate()[0] - + self.exec_time = time.time() - exec_start + self.output_size = sys.getsizeof(output) return output.decode('utf-8', 'ignore').strip() diff --git a/insights/tests/client/test_skip_commands_files.py b/insights/tests/client/test_skip_commands_files.py index 0a3a47377..b061b7757 100644 --- a/insights/tests/client/test_skip_commands_files.py +++ b/insights/tests/client/test_skip_commands_files.py @@ -61,6 +61,7 @@ def test_omit_symbolic_name(InsightsCommand, InsightsFile, parse_file_spec): @patch("insights.client.data_collector.InsightsFile") @patch("insights.client.data_collector.InsightsArchive") @patch("insights.client.data_collector.DataCollector.redact") +@patch("insights.client.data_collector.DataCollector._write_collection_stats", MagicMock()) def test_symbolic_name_bc(_, InsightsArchive, InsightsFile, InsightsCommand): """ WICKED EDGE CASE: in case uploader.json is old and doesn't have symbolic names, don't crash @@ -72,7 +73,7 @@ def test_symbolic_name_bc(_, InsightsArchive, InsightsFile, InsightsCommand): 'commands': [{"command": "/sbin/chkconfig --list", "pattern": []}], 'pre_commands': []} rm_conf = {'files': ["vsftpd"], "commands": ["chkconfig"]} - data_collector.run_collection(collection_rules, rm_conf, {}, '') + data_collector.run_collection(collection_rules, rm_conf, {}, {}) InsightsFile.assert_called_once() InsightsCommand.assert_called_once() InsightsArchive.return_value.add_to_archive.assert_has_calls( From 1afbf73e4fad2aa01fb70dcdcfb70cbe04d143b0 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Wed, 14 Oct 2020 14:30:02 -0400 Subject: [PATCH 201/892] update uploader.json map Signed-off-by: Jeremy Crafts --- insights/client/uploader_json_map.json | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index bdbb08640..c141207c6 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -186,7 +186,6 @@ "e1000: E1000 MODULE IS NOT SUPPORTED", "efi", "fw=8.08.", - "ixgbevf: Unknown parameter `InterruptThrottleRate'", "l1tf", "mce: ", "netconsole: network logging started", @@ -1940,11 +1939,7 @@ "INFO", "Low disk space. Host", "VDS_LOW_DISK_SPACE_ERROR", - "org.ovirt.engine.core.bll.storage.lease.AddVmLeaseCommand", - "org.ovirt.engine.core.bll.storage.lease.RemoveVmLeaseCommand", - "org.ovirt.engine.core.dal.dbbroker.auditloghandling.AuditLogDirector", - "org.ovirt.engine.core.vdsbroker.vdsbroker.HotPlugLeaseVDSCommand", - "org.ovirt.engine.core.vdsbroker.vdsbroker.HotUnplugLeaseVDSCommand" + "org.ovirt.engine.core.dal.dbbroker.auditloghandling.AuditLogDirector" ], "symbolic_name": "engine_log" }, @@ -2203,8 +2198,7 @@ "file": "/var/log/ironic-inspector/ironic-inspector.log", "pattern": [ "Certificate did not match expected hostname", - "ERROR requests.packages.urllib3.connection", - "Failed to set boot device to PXE" + "ERROR requests.packages.urllib3.connection" ], "symbolic_name": "ironic_inspector_log" }, @@ -2212,8 +2206,7 @@ "file": "/var/log/containers/ironic-inspector/ironic-inspector.log", "pattern": [ "Certificate did not match expected hostname", - "ERROR requests.packages.urllib3.connection", - "Failed to set boot device to PXE" + "ERROR requests.packages.urllib3.connection" ], "symbolic_name": "ironic_inspector_log" }, @@ -2370,7 +2363,6 @@ "canceled DHCP transaction, DHCP client pid", "chardev: opening backend \"socket\" failed", "clearing Tx timestamp hang", - "dev_set_mac_address of dev", "device-mapper: multipath: Failing path", "does not seem to be present, delaying initialization", "drivers/input/input-leds.c:115 input_leds_connect", @@ -2382,10 +2374,8 @@ "eviction manager: must evict pod(s) to reclaim nodefsInodes", "eviction manager: observations: signal=allocatableNodeFs.available, available: -", "ext4_ext_search_left", - "failed to modify QP to RTR: -22", "failed while handling", "failed with error -110", - "failed! ALB mode requires that the base driver support setting the hw address also when the network", "failed: Connection amqps:", "failed: Invalid argument", "failed: rpc error: code = 2 desc = unable to inspect docker image", @@ -2600,7 +2590,6 @@ "libvirt-guests.sh", "libvirtError: Requested operation is not valid: domain has assigned non-USB host devices", "libvirtError: Unable to delete file /var/lib/nova/instances/", - "unsupported configuration: Target device drive address", "unsupported configuration: Target network card MTU", "unsupported configuration: Unable to find security driver for model selinux" ], @@ -2644,7 +2633,6 @@ "libvirt-guests.sh", "libvirtError: Requested operation is not valid: domain has assigned non-USB host devices", "libvirtError: Unable to delete file /var/lib/nova/instances/", - "unsupported configuration: Target device drive address", "unsupported configuration: Target network card MTU", "unsupported configuration: Unable to find security driver for model selinux" ], @@ -4101,5 +4089,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2020-09-24T14:46:59.896476" + "version": "2020-10-08T10:01:47.114185" } \ No newline at end of file From f569ddef246bc3ffbab91189732ed45944e876c0 Mon Sep 17 00:00:00 2001 From: Akshay Gaikwad Date: Thu, 15 Oct 2020 01:58:41 +0000 Subject: [PATCH 202/892] Add 'firewall_cmd_list_all_zones' spec for sos archive (#2783) Signed-off-by: Akshay Gaikwad --- insights/specs/sos_archive.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index 0107721b6..56571294b 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -76,6 +76,7 @@ class SosSpecs(Specs): ethtool_k = glob_file("sos_commands/networking/ethtool_-k_*") exim_conf = simple_file("etc/exim.conf") fdisk_l_sos = first_of([glob_file(r"sos_commands/filesys/fdisk_-l_*"), glob_file(r"sos_commands/block/fdisk_-l_*")]) + firewall_cmd_list_all_zones = simple_file("sos_commands/firewalld/firewall-cmd_--list-all-zones") foreman_production_log = first_of([simple_file("/var/log/foreman/production.log"), simple_file("sos_commands/foreman/foreman-debug/var/log/foreman/production.log")]) foreman_proxy_conf = first_of([simple_file("/etc/foreman-proxy/settings.yml"), simple_file("sos_commands/foreman/foreman-debug/etc/foreman-proxy/settings.yml")]) foreman_proxy_log = first_of([simple_file("/var/log/foreman-proxy/proxy.log"), simple_file("sos_commands/foreman/foreman-debug/var/log/foreman-proxy/proxy.log")]) From 85ac8fc9ba7eceb172c51be792a95dda8d4e59e8 Mon Sep 17 00:00:00 2001 From: Rohan Arora Date: Mon, 19 Oct 2020 10:36:26 +0530 Subject: [PATCH 203/892] Add du_dirs Spec to contexts (#2785) * Add du_dirs Spec to contexts Signed-off-by: Rohan Arora * Remove from collection_rules tests Signed-off-by: Rohan Arora --- insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + insights/tests/client/collection_rules/test_map_components.py | 1 - 3 files changed, 2 insertions(+), 1 deletion(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index ae4cbf3dd..0e3554aec 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -238,6 +238,7 @@ def is_ceph_monitor(broker): docker_storage_setup = simple_file("/etc/sysconfig/docker-storage-setup") docker_sysconfig = simple_file("/etc/sysconfig/docker") dracut_kdump_capture_service = simple_file("/usr/lib/dracut/modules.d/99kdumpbase/kdump-capture.service") + du_dirs = foreach_execute(['/var/lib/candlepin/activemq-artemis'], "/bin/du -s -k %s") engine_db_query_vdsm_version = simple_command('engine-db-query --statement "SELECT vs.vds_name, rpm_version FROM vds_dynamic vd, vds_static vs WHERE vd.vds_id = vs.vds_id" --json') engine_log = simple_file("/var/log/ovirt-engine/engine.log") etc_journald_conf = simple_file(r"etc/systemd/journald.conf") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 83cb51686..cf7555ed5 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -45,6 +45,7 @@ class InsightsArchiveSpecs(Specs): docker_info = simple_file("insights_commands/docker_info") docker_list_containers = simple_file("insights_commands/docker_ps_--all_--no-trunc") docker_list_images = simple_file("insights_commands/docker_images_--all_--no-trunc_--digests") + du_dirs = glob_file("insights_commands/du_-s_-k_*") engine_db_query_vdsm_version = simple_file("insights_commands/engine-db-query_--statement_SELECT_vs.vds_name_rpm_version_FROM_vds_dynamic_vd_vds_static_vs_WHERE_vd.vds_id_vs.vds_id_--json") ethtool = glob_file("insights_commands/ethtool_*", ignore="ethtool_-.*") ethtool_S = glob_file("insights_commands/ethtool_-S_*") diff --git a/insights/tests/client/collection_rules/test_map_components.py b/insights/tests/client/collection_rules/test_map_components.py index b2cbfe5a7..56f3bcf5e 100644 --- a/insights/tests/client/collection_rules/test_map_components.py +++ b/insights/tests/client/collection_rules/test_map_components.py @@ -52,7 +52,6 @@ def test_get_component_by_symbolic_name(): # Filter out the (B) specs with this list skipped_specs = [ 'ceph_osd_df', - 'du_dirs', 'gluster_peer_status', 'gluster_v_status', 'heat_crontab', From 99fc8b6a3a2671378bf902190bb4d55703c92967 Mon Sep 17 00:00:00 2001 From: Sachin Date: Mon, 19 Oct 2020 10:59:28 +0530 Subject: [PATCH 204/892] Add parser for /sys/module/vhost_net/parameters/experimental_zcopytx (#2778) Signed-off-by: Sachin Patil --- insights/parsers/sys_module.py | 28 +++++++++++++++++------ insights/parsers/tests/test_sys_module.py | 15 +++++++++++- insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 4 files changed, 37 insertions(+), 8 deletions(-) diff --git a/insights/parsers/sys_module.py b/insights/parsers/sys_module.py index 35a548b46..700ff4c07 100644 --- a/insights/parsers/sys_module.py +++ b/insights/parsers/sys_module.py @@ -7,13 +7,12 @@ Parsers included in this module are: DMModUseBlkMq - file ``/sys/module/dm_mod/parameters/use_blk_mq`` - +------------------------------------------------------------------ SCSIModUseBlkMq - file ``/sys/module/scsi_mod/parameters/use_blk_mq`` --------------------------------------------------------------------- - +VHostNetZeroCopyTx - file ``/sys/module/vhost_net/parameters/experimental_zcopytx`` +----------------------------------------------------------------------------------- """ - - from insights import parser, Parser from insights.parsers import SkipException from insights.specs import Specs @@ -51,10 +50,9 @@ def is_on(self): """ if self.val in ['Y', '1']: return True - elif self.val in ['N', '0']: + if self.val in ['N', '0']: return False - else: - raise ValueError("Unexpected value {0}, please get raw data from attribute 'val' and tell is_on by yourself.".format(self.val)) + raise ValueError("Unexpected value {0}, please get raw data from attribute 'val' and tell is_on by yourself.".format(self.val)) @parser(Specs.dm_mod_use_blk_mq) @@ -87,3 +85,19 @@ class SCSIModUseBlkMq(XModUseBlkMq): False """ pass + + +@parser(Specs.vhost_net_zero_copy_tx) +class VHostNetZeroCopyTx(XModUseBlkMq): + """This file `/sys/module/vhost_net/parameters/experimental_zcopytx` shows if + vhost_net's zero-copy tx parameter is enabled or not. + + Examples:: + + >>> vhost_net_zero_copy_tx.val + '0' + >>> vhost_net_zero_copy_tx.is_on + False + + """ + pass diff --git a/insights/parsers/tests/test_sys_module.py b/insights/parsers/tests/test_sys_module.py index 951c03839..a4817430c 100644 --- a/insights/parsers/tests/test_sys_module.py +++ b/insights/parsers/tests/test_sys_module.py @@ -1,7 +1,7 @@ import doctest import pytest from insights.parsers import sys_module, SkipException -from insights.parsers.sys_module import DMModUseBlkMq, SCSIModUseBlkMq +from insights.parsers.sys_module import DMModUseBlkMq, SCSIModUseBlkMq, VHostNetZeroCopyTx from insights.tests import context_wrap @@ -19,11 +19,16 @@ """.strip() +ZERO_COPY = """ +0 +""".strip() + def test_doc_examples(): env = { 'dm_mod_use_blk_mq': DMModUseBlkMq(context_wrap(SCSI_DM_MOD_USE_BLK_MQ_Y)), 'scsi_mod_use_blk_mq': SCSIModUseBlkMq(context_wrap(SCSI_DM_MOD_USE_BLK_MQ_N)), + 'vhost_net_zero_copy_tx': VHostNetZeroCopyTx(context_wrap(ZERO_COPY)), } failed, total = doctest.testmod(sys_module, globs=env) assert failed == 0 @@ -41,6 +46,14 @@ def test_XModUseBlkMq(): dm_mod_unknow = DMModUseBlkMq(context_wrap(SCSI_DM_MOD_USE_BLK_MQ_UNKNOW_CASE)) assert dm_mod_unknow.val == 'unknow_case' + zero_copy_0 = VHostNetZeroCopyTx(context_wrap(ZERO_COPY)) + assert zero_copy_0.is_on is False + assert zero_copy_0.val == '0' + + zero_copy_1 = VHostNetZeroCopyTx(context_wrap(ZERO_COPY.replace('0', '1'))) + assert zero_copy_1.is_on is True + assert zero_copy_1.val == '1' + def test_class_exceptions(): with pytest.raises(SkipException): diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index fa2598379..ccecfd0d5 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -659,6 +659,7 @@ class Specs(SpecSet): vgs_noheadings = RegistryPoint() vgs_noheadings_all = RegistryPoint() vgs = RegistryPoint() + vhost_net_zero_copy_tx = RegistryPoint() virsh_list_all = RegistryPoint() virt_what = RegistryPoint() virt_who_conf = RegistryPoint(multi_output=True, filterable=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index 0e3554aec..708aa92fc 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -661,6 +661,7 @@ def sap_sid_name(broker): usr_journald_conf_d = glob_file(r"usr/lib/systemd/journald.conf.d/*.conf") # note that etc_journald.conf.d also exists vdo_status = simple_command("/usr/bin/vdo status") vgdisplay = simple_command("/sbin/vgdisplay") + vhost_net_zero_copy_tx = simple_file("/sys/module/vhost_net/parameters/experimental_zcopytx") vdsm_log = simple_file("var/log/vdsm/vdsm.log") vdsm_logger_conf = simple_file("etc/vdsm/logger.conf") vma_ra_enabled = simple_file("/sys/kernel/mm/swap/vma_ra_enabled") From 5031f878f5c4d7b73449ccf09f48c7289f39bb10 Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Mon, 19 Oct 2020 00:33:07 -0500 Subject: [PATCH 205/892] Include rhsm_conf module in documentation (#2790) * Add rhsm_conf.rst to include parser in doc build * Fix docstring errors Signed-off-by: Bob Fahr --- docs/shared_parsers_catalog/rhsm_conf.rst | 3 +++ insights/parsers/rhsm_conf.py | 1 + 2 files changed, 4 insertions(+) create mode 100644 docs/shared_parsers_catalog/rhsm_conf.rst diff --git a/docs/shared_parsers_catalog/rhsm_conf.rst b/docs/shared_parsers_catalog/rhsm_conf.rst new file mode 100644 index 000000000..2e344968e --- /dev/null +++ b/docs/shared_parsers_catalog/rhsm_conf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.rhsm_conf + :members: + :show-inheritance: diff --git a/insights/parsers/rhsm_conf.py b/insights/parsers/rhsm_conf.py index 0f6adad76..57f9b490b 100644 --- a/insights/parsers/rhsm_conf.py +++ b/insights/parsers/rhsm_conf.py @@ -3,6 +3,7 @@ ==================================== Typical content of "/etc/rhsm/rhsm.conf" is:: + [rhsm] # Content base URL: baseurl= https://cdn.redhat.com From 07861c5d8259a809f79dd4a33503616d06aae908 Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Mon, 19 Oct 2020 11:55:58 -0400 Subject: [PATCH 206/892] update uploader_json_map.json Signed-off-by: Link Dupont --- insights/client/uploader_json_map.json | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index c141207c6..755e38434 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -1551,7 +1551,10 @@ }, { "file": "/var/log/candlepin/candlepin.log", - "pattern": [], + "pattern": [ + "Candlepin initializing context", + "No Dead Letter Address configured for queue event.org.candlepin.audit.DatabaseListener in AddressSettings" + ], "symbolic_name": "candlepin_log" }, { @@ -2316,6 +2319,9 @@ "Cannot assign requested address", "Cannot assign requested address: AH00072", "Connection amqps://subscription.rhn.redhat.com:5647 disconnected", + "Connection amqps://subscription.rhn.redhat.com:5647 timed out: Opening connection", + "Connection amqps://subscription.rhsm.redhat.com:5647 disconnected", + "Connection amqps://subscription.rhsm.redhat.com:5647 timed out: Opening connection", "Corosync main process was not scheduled (@", "Could not set", "DHCPv4 lease renewal requested", @@ -2398,7 +2404,6 @@ "is down or the link is down", "is greater than comparison timestamp", "iscsiadm: iscsiadm: Could not log into all portals", - "kernel/softirq.c:159 local_bh_enable+", "kernel: BUG: soft lockup", "kernel: CIFS VFS: Unexpected SMB signature", "kernel: INFO: task xfsaild/md", @@ -4089,5 +4094,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2020-10-08T10:01:47.114185" + "version": "2020-10-14T14:41:32.281672" } \ No newline at end of file From 7bead8627bb08358d311fe1870ca32af9538f5f3 Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Wed, 21 Oct 2020 08:40:44 -0500 Subject: [PATCH 207/892] Add new IBM cloud provider, refactor combiner (#2789) * Add new IBM cloud provider, refactor combiner * Added new cloud provider for IBM cloud * Refactored combiner without changing public interface * Updated tests and docs Signed-off-by: Bob Fahr * Documentation updates Signed-off-by: Bob Fahr * Remove extra comments and fix sp err Signed-off-by: Bob Fahr --- insights/combiners/cloud_provider.py | 508 ++++++++++++------ .../combiners/tests/test_cloud_provider.py | 63 ++- 2 files changed, 379 insertions(+), 192 deletions(-) diff --git a/insights/combiners/cloud_provider.py b/insights/combiners/cloud_provider.py index f2c001171..4bdd1bd57 100644 --- a/insights/combiners/cloud_provider.py +++ b/insights/combiners/cloud_provider.py @@ -4,9 +4,10 @@ Combiner for Cloud information. It uses the results of the multiple parsers: -* :class:`InstalledRpms`, -* :class:`YumRepoList` and -* :class:`DMIDecode` parsers +* :py:class:`insights.parsers.installed_rpms.InstalledRpms` +* :py:class:`insights.parsers.yum.YumRepoList` +* :py:class:`insights.parsers.dmidecode.DMIDecode` +* :py:class:`insights.parsers.rhsm_conf.RHSMConf` The combiner uses these parsers determine the Cloud Provider based on a set of criteria that is unique to each cloud provider. @@ -14,9 +15,9 @@ Examples: >>> cp_aws.cloud_provider 'aws' - >>> cp_aws.cp_bios_version == {'aws': '4.2.amazon', 'google': '', 'azure': '', 'alibaba': ''} + >>> cp_aws.cp_bios_version['aws'] == '4.2.amazon' True - >>> cp_aws.cp_rpms == {'aws': ['rh-amazon-rhui-client-2.2.124-1.el7'], 'google': [], 'azure': [], 'alibaba': []} + >>> cp_aws.cp_rpms['aws'] == ['rh-amazon-rhui-client-2.2.124-1.el7'] True >>> cp_aws.cp_uuid['aws'] 'EC2F58AF-2DAD-C57E-88C0-A81CB6084290' @@ -24,13 +25,15 @@ 'Amazon Web Services' >>> cp_azure.cloud_provider 'azure' - >>> cp_azure.cp_yum == {'aws': [], 'google': [], 'azure': ['rhui-microsoft-azure-rhel7-2.2-74'], 'alibaba': []} + >>> cp_azure.cp_yum['azure'] == ['rhui-microsoft-azure-rhel7-2.2-74'] True >>> cp_azure.cp_asset_tag['azure'] '7783-7084-3265-9085-8269-3286-77' >>> cp_alibaba.cloud_provider 'alibaba' - >>> cp_alibaba.cp_manufacturer == {'aws': '', 'google': '', 'azure': '', 'alibaba': 'Alibaba Cloud'} + >>> cp_alibaba.cp_manufacturer['alibaba'] == 'Alibaba Cloud' + True + >>> cp_ibm.cp_rhsm_server_hostname['ibm'] == 'host.networklayer.com' True """ @@ -39,214 +42,363 @@ from insights.parsers.installed_rpms import InstalledRpms from insights.parsers.dmidecode import DMIDecode from insights.parsers.yum import YumRepoList -from collections import namedtuple - +from insights.parsers.rhsm_conf import RHSMConf -@combiner([InstalledRpms, DMIDecode, YumRepoList]) -class CloudProvider(object): - """ - Combiner class to provide cloud vendor facts - Attributes: - cp_bios_vendor (dict): Dictionary containing a value , for each provider, - of Bios vendor used to determine cloud provider. Each providers value will be - empty if none found - cp_bios_version (dict): Dictionary containing a value, for each provider, - of Bios version used to determine cloud provider. Each providers value will be - empty if none found - cp_rpms (dict): Dictionary containing a list, for each provider, of rpm information - used to determine cloud provider. Each providers list will be empty if no matches - found - cp_yum (dict): Dictionary containing a list, for each provider, of yum repo information - used to determine cloud provider. Each providers list will be empty if no matches - found - cp_asset_tag (dict): Dictionary containing a value, for each provider, of rpm information - used to determine cloud provider. Each providers value will be empty if no matches - found - cp_uuid (dict): Dictionary containing a value, for each provider, of uuid information - used to determine cloud provider. Each providers value will be empty if no matches - are found - cp_manufacturer (dict): Dictionary containing a value, for each provider, of system - information used to determine cloud provider. Provider value will be empty if no - matches are found. - cloud_provider (str): String representing the cloud provider that was detected. - If none are detected then it will have the default value `None`. +class CloudProviderInstance(object): """ + Class to represent a base cloud provider instance - __CP = namedtuple('CP', 'name rpm yum vv manuf') - - __GOOGLE = __CP(name='google', rpm='google-rhui-client', yum='', vv='google', manuf='') - __ALIBABA = __CP(name='alibaba', rpm='', yum='', vv='', manuf='alibaba cloud') - __AWS = __CP(name='aws', rpm='rh-amazon-rhui-client', yum='', vv='amazon', manuf='') - __AZURE = __CP(name='azure', rpm='walinuxagent', yum='rhui-microsoft-azure', vv='', manuf='') - __PROVIDERS = [__GOOGLE, __ALIBABA, __AWS, __AZURE] - - ALIBABA = __ALIBABA.name - """Alibaba Cloud Provider Constant""" - - AWS = __AWS.name - """AWS Cloud Provider Constant""" - - AZURE = __AZURE.name - """AZURE Cloud Provider Constant""" - - GOOGLE = __GOOGLE.name - """GOOGLE Cloud Provider Constant""" - - _long_name_mapping = { - 'alibaba': 'Alibaba Cloud', - 'aws': 'Amazon Web Services', - 'azure': 'Microsoft Azure', - 'google': 'Google Cloud' - } - - def __init__(self, rpms, dmidcd, yrl): + Use this base class to derive new cloud provider classes. In each new cloud + provider class set the particular values that will be used to detect that + particular cloud provider. - self.cp_bios_vendor = self._get_cp_bios_vendor(dmidcd) - self.cp_bios_version = self._get_cp_bios_version(dmidcd) - self.cp_rpms = self._get_rpm_cp_info(rpms) - self.cp_yum = self._get_cp_from_yum(yrl) - self.cp_asset_tag = self._get_cp_from_asset_tag(dmidcd) - self.cp_uuid = self._get_cp_from_uuid(dmidcd) - self.cp_manufacturer = self._get_cp_from_manuf(dmidcd) - self.cloud_provider = self._select_provider() - - def _provider_init_list(self): - prov = {} - for p in CloudProvider.__PROVIDERS: - prov[p.name] = [] - return prov - - def _provider_init_str(self): - prov = {} - for p in CloudProvider.__PROVIDERS: - prov[p.name] = '' - return prov + Attributes: - def _select_provider(self): + rpm (str): RPM string in lowercase to use when searching for this cloud provider. + yum (str): Yum repo name string in lowercase to use when searching for this cloud provider. + bios_vendor_version (str): BIOS vendor version string in lowercase to use when searching + for this cloud provider. + manuf (str): Manufacturer string in lowercase to use when searching for this cloud provider. + asset_tag (str): Asset tag string in lowercase to use when searching for this + cloud provider. + uuid (str): UUID string in lowercase to use when searchinf for this cloud provider. + rhsm_hostname (str): Hostname string in lowercase to use when searching for this + cloud provider in ``rhsm.conf``. + cp_bios_vendor (str): BIOS vendor string value found in search for this cloud provider. + cp_bios_version (str): BIOS version string value found in search for this cloud provider. + cp_rpms (list): List of RPM string values found in search for this cloud provider. + cp_yum (list): List of Yum repo name string values found in search for this cloud provider. + cp_asset_tag (str): Asset tag string value found in search for this cloud provider. + cp_uuid (str): UUID string value found in search for this cloud provider. + cp_manufacturer (str): Manufacturer string value found in search for this cloud provider. + cp_rhsm_server_hostname (str): RHSM server hostname string value found in search for + this cloud provider. - if any(value for value in self.cp_bios_vendor.values()): - return ( - self.__AWS.name if (self.cp_bios_vendor['aws'] and - self.__AWS.vv in self.cp_bios_vendor['aws'].lower()) - else self.__GOOGLE.name if (self.cp_bios_vendor['google'] and - self.__GOOGLE.vv in self.cp_bios_vendor['google'].lower()) - else self.__AZURE.name if (self.cp_bios_vendor['azure'] and self.__AZURE.vv in - self.cp_bios_vendor['azure'].lower()) - else None + """ + def __init__(self, rpms=None, dmidcd=None, yum_repos=None, rhsm_cfg=None): + self._rpms = rpms + self._dmidcd = dmidcd + self._yum_repos = yum_repos + self._rhsm_cfg = rhsm_cfg + self.rpm = '' + self.yum = '' + self.bios_vendor_version = '' + self.manuf = '' + self.asset_tag = '' + self.uuid = '' + self.rhsm_hostname = '' + self.cp_bios_vendor = '' + self.cp_bios_version = '' + self.cp_rpms = [] + self.cp_yum = [] + self.cp_asset_tag = '' + self.cp_uuid = '' + self.cp_manufacturer = '' + self.cp_rhsm_server_hostname = '' + + def _get_cp_bios_vendor(self, vendor_version): + """ str: Returns BIOS vendor string if it matches ``vendor_version`` """ + vendor = '' + if self._dmidcd and self._dmidcd.bios: + vendor = ( + self._dmidcd.bios.get('vendor') + if vendor_version and vendor_version in self._dmidcd.bios.get('vendor', '').lower() else '' ) - - if any(value for value in self.cp_bios_version.values()): - return ( - self.__AWS.name if (self.cp_bios_version['aws'] and - self.__AWS.vv in self.cp_bios_version['aws'].lower()) - else self.__GOOGLE.name if (self.cp_bios_version['google'] and - self.__GOOGLE.vv in self.cp_bios_version['google'].lower()) - else self.__AZURE.name if (self.cp_bios_version['azure'] and - self.__AZURE.vv in self.cp_bios_version['azure'].lower()) - else None + return vendor + + def _get_cp_bios_version(self, vendor_version): + """ str: Returns BIOS version string if it matches ``vendor_version`` """ + version = '' + if self._dmidcd and self._dmidcd.bios: + version = ( + self._dmidcd.bios.get('version') + if vendor_version and vendor_version in self._dmidcd.bios.get('version', '').lower() else '' ) - - if any(value for value in self.cp_rpms.values()): - return ( - self.__AWS.name if self.cp_rpms[CloudProvider.AWS] - else self.__GOOGLE.name if self.cp_rpms[CloudProvider.GOOGLE] - else self.__AZURE.name if self.cp_rpms[CloudProvider.AZURE] - else None + return version + + def _get_rpm_cp_info(self, rpm): + """ list: Returns list of RPMs matching ``rpm`` """ + found_rpms = [] + if self._rpms: + for key, val in self._rpms.packages.items(): + for v in val: + if rpm and rpm in v.package.lower(): + found_rpms.append(v.package) + return found_rpms + + def _get_cp_from_manuf(self, manuf): + """ str: Returns manufacturer string if it matches ``manuf`` """ + manufacturer = '' + if self._dmidcd and self._dmidcd.system_info: + manufacturer = ( + self._dmidcd.system_info.get('manufacturer') + if manuf == self._dmidcd.system_info.get('manufacturer', '').lower() else '' + ) + return manufacturer + + def _get_cp_from_yum(self, repo_name): + """ list: Returns list of Yum repos matching ``repo_name`` """ + found_repos = [] + if self._yum_repos and hasattr(self._yum_repos, 'data'): + found_repos = [ + repo.get('id').lower() + for repo in self._yum_repos.data + if repo_name and repo_name in repo.get('id', '').lower() + ] + return found_repos + + def _get_cp_from_rhsm_conf(self, rhsm_server_hostname): + """ str: Returns rhsm server hostname string if it matches ``rhsm_server_hostname`` """ + server_hostname = '' + if self._rhsm_cfg and 'server' in self._rhsm_cfg and 'hostname' in self._rhsm_cfg['server']: + hostname = self._rhsm_cfg.get('server', 'hostname') + if hostname and hostname.lower().strip().endswith(rhsm_server_hostname): + server_hostname = hostname + return server_hostname + + def _get_cp_from_asset_tag(self, asset_tag): + """ str: Returns asset tag string if it matches ``asset_tag`` """ + tag = '' + if self._dmidcd and hasattr(self._dmidcd, 'data'): + ch_info = self._dmidcd.data.get('chassis_information', []) + if ch_info: + tag = ch_info[0].get('asset_tag') if asset_tag and asset_tag == ch_info[0].get('asset_tag', '') else '' + return tag + + def _get_cp_from_uuid(self, uuid): + """ str: Returns UUID string if it matches ``uuid`` """ + found_uuid = '' + if self._dmidcd and self._dmidcd.system_info: + found_uuid = ( + self._dmidcd.system_info.get('uuid') + if uuid and self._dmidcd.system_info.get('uuid', '').lower().strip().startswith(uuid) else '' ) + return found_uuid + + @property + def name(self): + """ str: Short cloud provider class name or ID """ + return self._NAME - if self.cp_yum[CloudProvider.AZURE]: - return CloudProvider.AZURE + @property + def long_name(self): + """ str: Long cloud provider name """ + return self._LONG_NAME - if self.cp_asset_tag[CloudProvider.AZURE]: - return CloudProvider.AZURE - if self.cp_uuid[CloudProvider.AWS]: - return CloudProvider.AWS +class GoogleCloudProvider(CloudProviderInstance): + """ + Class to identify Google Cloud provider - if self.cp_manufacturer[CloudProvider.ALIBABA]: - return CloudProvider.ALIBABA + Google CP can be identified by RPM and BIOS vendor/version + """ + _NAME = 'google' + _LONG_NAME = 'Google Cloud' - def _get_rpm_cp_info(self, rpms): + def __init__(self, *args, **kwargs): + super(GoogleCloudProvider, self).__init__(*args, **kwargs) + self.rpm = 'google-rhui-client' + self.bios_vendor_version = 'google' + self.cp_bios_vendor = self._get_cp_bios_vendor(self.bios_vendor_version) + self.cp_bios_version = self._get_cp_bios_version(self.bios_vendor_version) + self.cp_rpms = self._get_rpm_cp_info(self.rpm) - prov = self._provider_init_list() - if rpms: - for p in self.__PROVIDERS: - for key, val in rpms.packages.items(): - for v in val: - prov[p.name].append(v.package) if p.rpm and p.rpm in v.package.lower() else prov +class AlibabaCloudProvider(CloudProviderInstance): + """ + Class to identify Alibaba Cloud provider - return prov + Alibaba CP can be identified by manufacturer + """ + _NAME = 'alibaba' + _LONG_NAME = 'Alibaba Cloud' - def _get_cp_from_yum(self, yrl): + def __init__(self, *args, **kwargs): + super(AlibabaCloudProvider, self).__init__(*args, **kwargs) + self.manuf = 'alibaba cloud' + self.cp_manufacturer = self._get_cp_from_manuf(self.manuf) - prov = self._provider_init_list() - if yrl and hasattr(yrl, 'data'): - for p in self.__PROVIDERS: - for yval in yrl.data: - prov[p.name].append(yval.get('id').lower()) \ - if p.yum and p.yum in yval.get('id').lower() \ - else prov +class AmazonCloudProvider(CloudProviderInstance): + """ + Class to identify Amazon Cloud provider - return prov + Amazon CP can be identified by RPM, BIOS verndor/version, + and system UUID + """ + _NAME = 'aws' + _LONG_NAME = 'Amazon Web Services' - def _get_cp_from_asset_tag(self, dmidcd): + def __init__(self, *args, **kwargs): + super(AmazonCloudProvider, self).__init__(*args, **kwargs) + self.rpm = 'rh-amazon-rhui-client' + self.bios_vendor_version = 'amazon' + self.uuid = 'ec2' + self.cp_bios_vendor = self._get_cp_bios_vendor(self.bios_vendor_version) + self.cp_bios_version = self._get_cp_bios_version(self.bios_vendor_version) + self.cp_rpms = self._get_rpm_cp_info(self.rpm) + self.cp_uuid = self._get_cp_from_uuid(self.uuid) - prov = self._provider_init_str() - if dmidcd and hasattr(dmidcd, 'data'): - ch_info = dmidcd.data.get('chassis_information') - if ch_info: - asset_tag = ch_info[0].get('asset_tag') - prov['azure'] = asset_tag if asset_tag == '7783-7084-3265-9085-8269-3286-77' else '' - return prov +class AzureCloudProvider(CloudProviderInstance): + """ + Class to identify Azure Cloud provider - def _get_cp_bios_vendor(self, dmidcd): + Azure CP can be identified by RPM, Yum repo, and system asset tag + """ + _NAME = 'azure' + _LONG_NAME = 'Microsoft Azure' - prov = self._provider_init_str() + def __init__(self, *args, **kwargs): + super(AzureCloudProvider, self).__init__(*args, **kwargs) + self.rpm = 'walinuxagent' + self.yum = 'rhui-microsoft-azure' + self.asset_tag = '7783-7084-3265-9085-8269-3286-77' + self.cp_asset_tag = self._get_cp_from_asset_tag(self.asset_tag) + self.cp_rpms = self._get_rpm_cp_info(self.rpm) + self.cp_yum = self._get_cp_from_yum(self.yum) - if dmidcd and dmidcd.bios: - for p in self.__PROVIDERS: - prov[p.name] = dmidcd.bios.get('vendor') if p.vv and p.vv in dmidcd.bios.get('vendor').lower() \ - else '' - return prov - def _get_cp_bios_version(self, dmidcd): +class IBMCloudProvider(CloudProviderInstance): + """ + Class to identify IBM Cloud provider - prov = self._provider_init_str() + IBM CP can be identified by rhsm.conf server hostname setting + """ + _NAME = 'ibm' + _LONG_NAME = 'IBM Cloud' - if dmidcd and dmidcd.bios: - for p in self.__PROVIDERS: - prov[p.name] = dmidcd.bios.get('version') if p.vv and p.vv in dmidcd.bios.get('version').lower() \ - else '' - return prov + def __init__(self, *args, **kwargs): + super(IBMCloudProvider, self).__init__(*args, **kwargs) + self.rhsm_server_hostname = 'networklayer.com' + self.cp_rpms = self._get_rpm_cp_info(self.rpm) + self.cp_yum = self._get_cp_from_yum(self.yum) + self.cp_rhsm_server_hostname = self._get_cp_from_rhsm_conf(self.rhsm_server_hostname) - def _get_cp_from_uuid(self, dmidcd): - prov = self._provider_init_str() +@combiner([InstalledRpms, DMIDecode, YumRepoList, RHSMConf]) +class CloudProvider(object): + """ + Combiner class to provide cloud vendor facts - if dmidcd and dmidcd.bios: - prov['aws'] = dmidcd.system_info.get('uuid') if dmidcd.system_info.get('uuid').lower().startswith('ec2') \ - else '' - return prov + Attributes: + cp_bios_vendor (dict): Dictionary containing a value , for each provider, + of Bios vendor used to determine cloud provider. Each providers value will be + empty if none found + cp_bios_version (dict): Dictionary containing a value, for each provider, + of Bios version used to determine cloud provider. Each providers value will be + empty if none found + cp_rpms (dict): Dictionary containing a list, for each provider, of rpm information + used to determine cloud provider. Each providers list will be empty if no matches + found + cp_yum (dict): Dictionary containing a list, for each provider, of yum repo information + used to determine cloud provider. Each providers list will be empty if no matches + found + cp_asset_tag (dict): Dictionary containing a value, for each provider, of rpm information + used to determine cloud provider. Each providers value will be empty if no matches + found + cp_uuid (dict): Dictionary containing a value, for each provider, of uuid information + used to determine cloud provider. Each providers value will be empty if no matches + are found + cp_manufacturer (dict): Dictionary containing a value, for each provider, of system + information used to determine cloud provider. Provider value will be empty if no + matches are found. + cp_rhsm_server_hostname (dict): Dictionary containing a value, for each provider, + of rhsm.conf server hostnames. Value will be empty if no matches are found. + cloud_provider (str): String representing the cloud provider that was detected. + If none are detected then it will have the default value `None`. + """ + ALIBABA = AlibabaCloudProvider._NAME + """Alibaba Cloud Provider short name""" + + AWS = AmazonCloudProvider._NAME + """AWS Cloud Provider short name""" + + AZURE = AzureCloudProvider._NAME + """AZURE Cloud Provider short name""" + + GOOGLE = GoogleCloudProvider._NAME + """GOOGLE Cloud Provider short name""" + + IBM = IBMCloudProvider._NAME + """IBM Cloud Provider short name""" + + # Add any new cloud provider classes to this list + _CLOUD_PROVIDER_CLASSES = [ + GoogleCloudProvider, + AlibabaCloudProvider, + AmazonCloudProvider, + AzureCloudProvider, + IBMCloudProvider, + ] + + def __init__(self, rpms, dmidcd, yrl, rhsm_cfg): + self._cp_objects = dict([ + (cls._NAME, cls(rpms=rpms, dmidcd=dmidcd, yum_repos=yrl, rhsm_cfg=rhsm_cfg)) + for cls in self._CLOUD_PROVIDER_CLASSES + ]) + self.cp_bios_vendor = dict([(name, cp.cp_bios_vendor) for name, cp in self._cp_objects.items()]) + self.cp_bios_version = dict([(name, cp.cp_bios_version) for name, cp in self._cp_objects.items()]) + self.cp_rpms = dict([(name, cp.cp_rpms) for name, cp in self._cp_objects.items()]) + self.cp_yum = dict([(name, cp.cp_yum) for name, cp in self._cp_objects.items()]) + self.cp_asset_tag = dict([(name, cp.cp_asset_tag) for name, cp in self._cp_objects.items()]) + self.cp_uuid = dict([(name, cp.cp_uuid) for name, cp in self._cp_objects.items()]) + self.cp_manufacturer = dict([(name, cp.cp_manufacturer) for name, cp in self._cp_objects.items()]) + self.cp_rhsm_server_hostname = dict([(name, cp.cp_rhsm_server_hostname) for name, cp in self._cp_objects.items()]) + self.cloud_provider = self._select_provider() - def _get_cp_from_manuf(self, dmidcd): + def _select_provider(self): + """ + This method provides the logic to identify which cloud provider is present. - prov = self._provider_init_str() + If new data sources and/or cloud providers are added you must add logic here to + identify the new cloud provider. - if dmidcd and dmidcd.system_info: - prov[CloudProvider.__ALIBABA.name] = ( - dmidcd.system_info.get('manufacturer') - if dmidcd.system_info.get('manufacturer').lower() == CloudProvider.__ALIBABA.manuf - else '' - ) - return prov + Returns: + str: Returns the name of the cloud provider, corresponds to ``name`` property + in cloud provider classes. If no cloud provider is identified, ``None`` is returned + """ + # Check bios vendor first + if self._cp_objects[self.AWS].cp_bios_vendor: + return self.AWS + elif self._cp_objects[self.GOOGLE].cp_bios_vendor: + return self.GOOGLE + elif self._cp_objects[self.AZURE].cp_bios_vendor: + return self.AZURE + + # Specific vendor not detected, so check bios version + if self._cp_objects[self.AWS].cp_bios_version: + return self.AWS + elif self._cp_objects[self.GOOGLE].cp_bios_version: + return self.GOOGLE + elif self._cp_objects[self.AZURE].cp_bios_version: + return self.AZURE + + # BIOS vendor and version not detected check for RPMs + if self._cp_objects[self.AWS].cp_rpms: + return self.AWS + elif self._cp_objects[self.GOOGLE].cp_rpms: + return self.GOOGLE + elif self._cp_objects[self.AZURE].cp_rpms: + return self.AZURE + + # No luck, check for other attributes + if self._cp_objects[self.AZURE].cp_yum or self._cp_objects[self.AZURE].cp_asset_tag: + return self.AZURE + + if self._cp_objects[self.AWS].cp_uuid: + return self.AWS + + if self._cp_objects[self.ALIBABA].cp_manufacturer: + return self.ALIBABA + + if self._cp_objects[self.IBM].cp_rhsm_server_hostname: + return self.IBM + + return None @property def long_name(self): - """ - Return long name for the specific cloud provider. - """ - return self._long_name_mapping.get(self.cloud_provider) + """ str: Return long name for the specific cloud provider, or ``None`` if no cloud provider """ + return self._cp_objects[self.cloud_provider].long_name if self.cloud_provider is not None else None diff --git a/insights/combiners/tests/test_cloud_provider.py b/insights/combiners/tests/test_cloud_provider.py index 8a56289b0..de9e41848 100644 --- a/insights/combiners/tests/test_cloud_provider.py +++ b/insights/combiners/tests/test_cloud_provider.py @@ -3,6 +3,7 @@ from insights.combiners.cloud_provider import CloudProvider from insights.parsers.installed_rpms import InstalledRpms as IRPMS from insights.parsers.dmidecode import DMIDecode +from insights.parsers.rhsm_conf import RHSMConf from insights.parsers.yum import YumRepoList from insights.tests import context_wrap @@ -569,12 +570,31 @@ End Of Table """ # noqa: E101,W191 +IBM_RHSM_CONF = """ +# Red Hat Subscription Manager Configuration File: + +# Unified Entitlement Platform Configuration +[server] +# Server hostname: +hostname = host.networklayer.com + +# Server prefix: +prefix = /subscription + +# Server port: +port = 443 + +# Set to 1 to disable certificate validation: +insecure = 0 + +""".strip() + def test_rpm_google(): irpms = IRPMS(context_wrap(RPMS_GOOGLE)) dmi = DMIDecode(context_wrap(DMIDECODE)) yrl = YumRepoList(context_wrap(YUM_REPOLIST_NOT_AZURE)) - ret = CloudProvider(irpms, dmi, yrl) + ret = CloudProvider(irpms, dmi, yrl, None) assert ret.cloud_provider == CloudProvider.GOOGLE assert 'google-rhui-client-5.1.100-1.el7' in ret.cp_rpms.get(CloudProvider.GOOGLE) assert 'google-rhui-client-5.1.100-1.el6' in ret.cp_rpms.get(CloudProvider.GOOGLE) @@ -585,7 +605,7 @@ def test_rpm_aws(): irpms = IRPMS(context_wrap(RPMS_AWS)) dmi = DMIDecode(context_wrap(DMIDECODE)) yrl = YumRepoList(context_wrap(YUM_REPOLIST_NOT_AZURE)) - ret = CloudProvider(irpms, dmi, yrl) + ret = CloudProvider(irpms, dmi, yrl, None) assert ret.cloud_provider == CloudProvider.AWS assert ret.cp_rpms.get(CloudProvider.AWS)[0] == 'rh-amazon-rhui-client-2.2.124-1.el7' assert ret.long_name == 'Amazon Web Services' @@ -595,7 +615,7 @@ def test_rpm_azure(): irpms = IRPMS(context_wrap(RPMS_AZURE)) dmi = DMIDecode(context_wrap(DMIDECODE_BARE_METAL)) yrl = YumRepoList(context_wrap(YUM_REPOLIST_NOT_AZURE)) - ret = CloudProvider(irpms, dmi, yrl) + ret = CloudProvider(irpms, dmi, yrl, None) assert ret.cloud_provider == CloudProvider.AZURE assert ret.cp_rpms.get(CloudProvider.AZURE)[0] == 'WALinuxAgent-2.2.18-1.el7' assert ret.long_name == 'Microsoft Azure' @@ -605,7 +625,7 @@ def test__yum_azure(): irpms = IRPMS(context_wrap(RPMS)) dmi = DMIDecode(context_wrap(DMIDECODE)) yrl = YumRepoList(context_wrap(YUM_REPOLIST_AZURE)) - ret = CloudProvider(irpms, dmi, yrl) + ret = CloudProvider(irpms, dmi, yrl, None) assert ret.cloud_provider == CloudProvider.AZURE assert 'rhui-microsoft-azure-rhel7-2.2-74' in ret.cp_yum.get(CloudProvider.AZURE) @@ -614,7 +634,7 @@ def test__bios_version_aws(): irpms = IRPMS(context_wrap(RPMS)) dmi = DMIDecode(context_wrap(DMIDECODE_AWS)) yrl = YumRepoList(context_wrap(YUM_REPOLIST_AZURE)) - ret = CloudProvider(irpms, dmi, yrl) + ret = CloudProvider(irpms, dmi, yrl, None) assert ret.cloud_provider == CloudProvider.AWS assert ret.cp_bios_version[CloudProvider.AWS] == '4.2.amazon' @@ -623,7 +643,7 @@ def test__bios_vendor_google(): irpms = IRPMS(context_wrap(RPMS)) dmi = DMIDecode(context_wrap(DMIDECODE_GOOGLE)) yrl = YumRepoList(context_wrap(YUM_REPOLIST_AZURE)) - ret = CloudProvider(irpms, dmi, yrl) + ret = CloudProvider(irpms, dmi, yrl, None) assert ret.cloud_provider == CloudProvider.GOOGLE assert ret.cp_bios_vendor[CloudProvider.GOOGLE] == 'Google' @@ -632,7 +652,7 @@ def test__asset_tag_azure(): irpms = IRPMS(context_wrap(RPMS)) dmi = DMIDecode(context_wrap(DMIDECODE_AZURE_ASSET_TAG)) yrl = YumRepoList(context_wrap(YUM_REPOLIST_NOT_AZURE)) - ret = CloudProvider(irpms, dmi, yrl) + ret = CloudProvider(irpms, dmi, yrl, None) assert ret.cloud_provider == CloudProvider.AZURE assert ret.cp_asset_tag[CloudProvider.AZURE] == '7783-7084-3265-9085-8269-3286-77' @@ -641,7 +661,7 @@ def test__uuid(): irpms = IRPMS(context_wrap(RPMS)) dmi = DMIDecode(context_wrap(DMIDECODE_AWS_UUID)) yrl = YumRepoList(context_wrap(YUM_REPOLIST_NOT_AZURE)) - ret = CloudProvider(irpms, dmi, yrl) + ret = CloudProvider(irpms, dmi, yrl, None) assert ret.cloud_provider == CloudProvider.AWS assert ret.cp_uuid[CloudProvider.AWS] == 'EC2F58AF-2DAD-C57E-88C0-A81CB6084290' @@ -650,17 +670,25 @@ def test_dmidecode_alibaba(): irpms = IRPMS(context_wrap(RPMS)) dmi = DMIDecode(context_wrap(DMIDECODE_ALIBABA)) yrl = YumRepoList(context_wrap(YUM_REPOLIST_NOT_AZURE)) - ret = CloudProvider(irpms, dmi, yrl) + ret = CloudProvider(irpms, dmi, yrl, None) assert ret.cloud_provider == CloudProvider.ALIBABA assert ret.cp_manufacturer[CloudProvider.ALIBABA] == 'Alibaba Cloud' assert ret.long_name == 'Alibaba Cloud' +def test_rhsm_conf_ibm(): + rhsm_conf = RHSMConf(context_wrap(IBM_RHSM_CONF)) + ret = CloudProvider(None, None, None, rhsm_conf) + assert ret.cloud_provider == CloudProvider.IBM + assert ret.cp_rhsm_server_hostname[CloudProvider.IBM] == 'host.networklayer.com' + assert ret.long_name == 'IBM Cloud' + + def test_no_data(): irpms = IRPMS(context_wrap(RPMS)) dmi = DMIDecode(context_wrap(DMIDECODE)) yrl = YumRepoList(context_wrap(YUM_REPOLIST_NOT_AZURE)) - ret = CloudProvider(irpms, dmi, yrl) + ret = CloudProvider(irpms, dmi, yrl, None) assert ret.cloud_provider is None assert ret.long_name is None @@ -669,22 +697,29 @@ def test_docs(): cp_aws = CloudProvider( IRPMS(context_wrap(RPMS_AWS)), DMIDecode(context_wrap(DMIDECODE_AWS)), - YumRepoList(context_wrap(YUM_REPOLIST_NOT_AZURE)) + YumRepoList(context_wrap(YUM_REPOLIST_NOT_AZURE)), + None ) cp_azure = CloudProvider( IRPMS(context_wrap(RPMS_AZURE)), DMIDecode(context_wrap(DMIDECODE_AZURE_ASSET_TAG)), - YumRepoList(context_wrap(YUM_REPOLIST_AZURE)) + YumRepoList(context_wrap(YUM_REPOLIST_AZURE)), + None ) cp_alibaba = CloudProvider( IRPMS(context_wrap(RPMS)), DMIDecode(context_wrap(DMIDECODE_ALIBABA)), - YumRepoList(context_wrap(YUM_REPOLIST_NOT_AZURE)) + YumRepoList(context_wrap(YUM_REPOLIST_NOT_AZURE)), + None + ) + cp_ibm = CloudProvider( + None, None, None, RHSMConf(context_wrap(IBM_RHSM_CONF)) ) env = { 'cp_aws': cp_aws, 'cp_azure': cp_azure, - 'cp_alibaba': cp_alibaba + 'cp_alibaba': cp_alibaba, + 'cp_ibm': cp_ibm } failed, total = doctest.testmod(cloud_provider, globs=env) assert failed == 0 From 905d32eea3c753546fb6105a5342e03ea84f7cd5 Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Wed, 21 Oct 2020 08:51:36 -0500 Subject: [PATCH 208/892] Fix cmdline utils for core archives (#2787) * Fix cmdline utils for core archives * To properly hydrate core for core collected archives we need to use new hydration function to identify context and hydrate broker Signed-off-by: Bob Fahr * Fix cat utility Signed-off-by: Bob Fahr --- insights/tools/cat.py | 8 ++++---- insights/tools/insights_inspect.py | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/insights/tools/cat.py b/insights/tools/cat.py index e1f8ff8ae..d2cb48ae6 100755 --- a/insights/tools/cat.py +++ b/insights/tools/cat.py @@ -28,8 +28,9 @@ from contextlib import contextmanager -from insights import (apply_configs, create_context, dr, extract, HostContext, +from insights import (apply_configs, dr, extract, HostContext, load_default_plugins) +from insights.core.hydration import initialize_broker from insights.core.spec_factory import ContentProvider try: @@ -97,9 +98,8 @@ def create_broker(root=None): yield broker else: def from_dir(d): - broker = dr.Broker() - ctx = create_context(d, None) - broker[ctx.__class__] = ctx + # ctx is returned here, but its already in the broker so not needed + _, broker = initialize_broker(d) return broker if os.path.isdir(root): diff --git a/insights/tools/insights_inspect.py b/insights/tools/insights_inspect.py index b66ea628e..09ddaf6f0 100755 --- a/insights/tools/insights_inspect.py +++ b/insights/tools/insights_inspect.py @@ -98,9 +98,10 @@ from contextlib import contextmanager -from insights import (apply_configs, create_context, dr, extract, HostContext, +from insights import (apply_configs, dr, extract, HostContext, load_default_plugins) from insights.core import filters +from insights.core.hydration import initialize_broker from IPython import embed from IPython.terminal.embed import InteractiveShellEmbed @@ -162,9 +163,8 @@ def create_broker(root=None): yield broker else: def from_dir(d): - broker = dr.Broker() - ctx = create_context(d, None) - broker[ctx.__class__] = ctx + # ctx is returned here, but its already in the broker so not needed + _, broker = initialize_broker(d) return broker if os.path.isdir(root): From 6ec5a060a94874069fdd75398631a903cc0ebbb9 Mon Sep 17 00:00:00 2001 From: Glutexo Date: Wed, 21 Oct 2020 19:29:47 +0200 Subject: [PATCH 209/892] RHCLOUD-8745 Fix warning when skipping a glob (#2782) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix warning when skipping a glob When a file matched by a glob pattern gets skipped, a message with a whole dictionary is printed instead of just the file name. Fixed. The warning is now consistent with other skipped matches. Signed-off-by: Štěpán Tomsa * Test skipped glob warning Added a test for the warning emitted when a file matched by a glob is skipped. Originally a dictionary was output instead of the file name. Signed-off-by: Štěpán Tomsa * Test other skip warnings The skipped file warning was one of a few more that use the same logic. Added tests for the sibling calls so they are all covered. Signed-off-by: Štěpán Tomsa Co-authored-by: Jeremy Crafts --- insights/client/data_collector.py | 2 +- .../tests/client/test_skip_commands_files.py | 80 +++++++++++++++++++ 2 files changed, 81 insertions(+), 1 deletion(-) diff --git a/insights/client/data_collector.py b/insights/client/data_collector.py index 3e2a5bfd1..d40917cf7 100644 --- a/insights/client/data_collector.py +++ b/insights/client/data_collector.py @@ -303,7 +303,7 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): glob_specs = self._parse_glob_spec(g) for g in glob_specs: if g['file'] in rm_conf.get('files', []): - logger.warn("WARNING: Skipping file %s", g) + logger.warn("WARNING: Skipping file %s", g['file']) else: glob_spec = InsightsFile(g, self.mountpoint) self.archive.add_to_archive(glob_spec) diff --git a/insights/tests/client/test_skip_commands_files.py b/insights/tests/client/test_skip_commands_files.py index b061b7757..81ba11ce2 100644 --- a/insights/tests/client/test_skip_commands_files.py +++ b/insights/tests/client/test_skip_commands_files.py @@ -133,3 +133,83 @@ def test_omit_after_parse_command(InsightsCommand, run_pre_command): rm_conf = {'commands': ["/sbin/ethtool -i eth0"]} data_collector.run_collection(collection_rules, rm_conf, {}, '') InsightsCommand.assert_not_called() + + +@patch("insights.client.data_collector.DataCollector._parse_glob_spec", return_value=[{'glob': '/etc/yum.repos.d/*.repo', 'symbolic_name': 'yum_repos_d', 'pattern': [], 'file': '/etc/yum.repos.d/test.repo'}]) +@patch("insights.client.data_collector.logger.warn") +def test_run_collection_logs_skipped_globs(warn, parse_glob_spec): + c = InsightsConfig() + data_collector = DataCollector(c) + + collection_rules = {'commands': [], 'files': [], 'globs': [{'glob': '/etc/yum.repos.d/*.repo', 'symbolic_name': 'yum_repos_d', 'pattern': []}]} + rm_conf = {'files': ["/etc/yum.repos.d/test.repo"]} + data_collector.run_collection(collection_rules, rm_conf, {}, '') + warn.assert_called_once_with("WARNING: Skipping file %s", "/etc/yum.repos.d/test.repo") + + +@patch("insights.client.data_collector.logger.warn") +def test_run_collection_logs_skipped_files_by_file(warn): + c = InsightsConfig() + data_collector = DataCollector(c) + + collection_rules = {'commands': [], 'files': [{'file': '/etc/machine-id', 'pattern': [], 'symbolic_name': 'etc_machine_id'}], 'globs': []} + rm_conf = {'files': ["/etc/machine-id"]} + data_collector.run_collection(collection_rules, rm_conf, {}, '') + warn.assert_called_once_with("WARNING: Skipping file %s", "/etc/machine-id") + + +@patch("insights.client.data_collector.logger.warn") +def test_run_collection_logs_skipped_files_by_symbolic_name(warn): + c = InsightsConfig() + data_collector = DataCollector(c) + + collection_rules = {'commands': [], 'files': [{'file': '/etc/machine-id', 'pattern': [], 'symbolic_name': 'etc_machine_id'}], 'globs': []} + rm_conf = {'files': ["etc_machine_id"]} + data_collector.run_collection(collection_rules, rm_conf, {}, '') + warn.assert_called_once_with("WARNING: Skipping file %s", "/etc/machine-id") + + +@patch("insights.client.data_collector.DataCollector._parse_file_spec", return_value=[{'file': '/etc/sysconfig/network-scripts/ifcfg-enp0s3', 'pattern': [], 'symbolic_name': 'ifcfg'}]) +@patch("insights.client.data_collector.logger.warn") +def test_run_collection_logs_skipped_files_by_wildcard(warn, parse_file_spec): + c = InsightsConfig() + data_collector = DataCollector(c) + + collection_rules = {'commands': [], 'files': [{'file': '/etc/sysconfig/network-scripts/()*ifcfg-.*', 'pattern': [], 'symbolic_name': 'ifcfg'}], 'globs': []} + rm_conf = {'files': ["/etc/sysconfig/network-scripts/ifcfg-enp0s3"]} + data_collector.run_collection(collection_rules, rm_conf, {}, '') + warn.assert_called_once_with("WARNING: Skipping file %s", "/etc/sysconfig/network-scripts/ifcfg-enp0s3") + + +@patch("insights.client.data_collector.logger.warn") +def test_run_collection_logs_skipped_commands_by_command(warn): + c = InsightsConfig() + data_collector = DataCollector(c) + + collection_rules = {'commands': [{'command': '/bin/date', 'pattern': [], 'symbolic_name': 'date'}], 'files': [], 'globs': []} + rm_conf = {'commands': ["/bin/date"]} + data_collector.run_collection(collection_rules, rm_conf, {}, '') + warn.assert_called_once_with("WARNING: Skipping command %s", "/bin/date") + + +@patch("insights.client.data_collector.logger.warn") +def test_run_collection_logs_skipped_commands_by_symbolic_name(warn): + c = InsightsConfig() + data_collector = DataCollector(c) + + collection_rules = {'commands': [{'command': '/bin/date', 'pattern': [], 'symbolic_name': 'date'}], 'files': [], 'globs': []} + rm_conf = {'commands': ["date"]} + data_collector.run_collection(collection_rules, rm_conf, {}, '') + warn.assert_called_once_with("WARNING: Skipping command %s", "/bin/date") + + +@patch("insights.client.data_collector.DataCollector._parse_command_spec", return_value=[{'command': '/sbin/ethtool enp0s3', 'pattern': [], 'pre_command': 'iface', 'symbolic_name': 'ethtool'}]) +@patch("insights.client.data_collector.logger.warn") +def test_run_collection_logs_skipped_commands_by_pre_command(warn, parse_command_spec): + c = InsightsConfig() + data_collector = DataCollector(c) + + collection_rules = {'commands': [{'command': '/sbin/ethtool', 'pattern': [], 'pre_command': 'iface', 'symbolic_name': 'ethtool'}], 'files': [], 'globs': [], 'pre_commands': {'iface': '/sbin/ip -o link | awk -F \': \' \'/.*link\\/ether/ {print $2}\''}} + rm_conf = {'commands': ["/sbin/ethtool enp0s3"]} + data_collector.run_collection(collection_rules, rm_conf, {}, '') + warn.assert_called_once_with("WARNING: Skipping command %s", "/sbin/ethtool enp0s3") From 0a27b47941c73fcdffdca2c89c4202e8f75148dc Mon Sep 17 00:00:00 2001 From: vishwanathjadhav Date: Thu, 22 Oct 2020 01:42:18 +0530 Subject: [PATCH 210/892] Added parser for abrt CCpp.conf (#2784) * Added parser for abrt CCpp.conf Signed-off-by: vishawanathjadhav * Fixed flake8 warnings Signed-off-by: vishawanathjadhav * Made a parser specs filterable Signed-off-by: vishawanathjadhav --- docs/shared_parsers_catalog/abrt_ccpp.rst | 3 + insights/parsers/abrt_ccpp.py | 92 +++++++++++++++++++++++ insights/parsers/tests/test_abrt_ccpp.py | 88 ++++++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 185 insertions(+) create mode 100644 docs/shared_parsers_catalog/abrt_ccpp.rst create mode 100644 insights/parsers/abrt_ccpp.py create mode 100644 insights/parsers/tests/test_abrt_ccpp.py diff --git a/docs/shared_parsers_catalog/abrt_ccpp.rst b/docs/shared_parsers_catalog/abrt_ccpp.rst new file mode 100644 index 000000000..fc0dd570f --- /dev/null +++ b/docs/shared_parsers_catalog/abrt_ccpp.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.abrt_ccpp + :members: + :show-inheritance: diff --git a/insights/parsers/abrt_ccpp.py b/insights/parsers/abrt_ccpp.py new file mode 100644 index 000000000..769b7cae9 --- /dev/null +++ b/insights/parsers/abrt_ccpp.py @@ -0,0 +1,92 @@ +""" +AbrtCCppConf - file "/etc/abrt/plugins/CCpp.conf" +================================================= + +The AbrtCCppConf class parses the file ``/etc/abrt/plugins/CCpp.conf``. +On success it returns ``dict`` containing abrtcpp setting else it returns ``None``. + +Sample Data:: + + + # Configuration file for CCpp hook + + # CCpp hook writes its template to the "/proc/sys/kernel/core_pattern" file + # and stores the original template in the "/var/run/abrt/saved_core_pattern" + # file. If you want CCpp hook to create a core dump file named according to + # the original template as well, set 'MakeCompatCore' to 'yes'. + # If the original template string starts with "|", the string "core" is used + # instead of the template. + # For more information about naming core dump files see 'man 5 core'. + MakeCompatCore = yes + + # The option allows you to set limit for the core file size in MiB. + # + # This value is compared to value of the MaxCrashReportSize configuration + # option from (/etc/abrt.conf) and the lower value is used as the limit. + # + # If MaxCoreFileSize is 0 then the value of MaxCrashReportSize is the limit. + # If MaxCrashReportSize is 0 then the value of MaxCoreFileSize is the limit. + # If both values are 0 then the core file size is unlimited. + MaxCoreFileSize = 0 + + # Do you want a copy of crashed binary be saved? + # (useful, for example, when _deleted binary_ segfaults) + SaveBinaryImage = no + + # When this option is set to 'yes', core backtrace is generated + # from the memory image of the crashing process. Only the crash + # thread is present in the backtrace. + CreateCoreBacktrace = yes + + # Save full coredump? If set to 'no', coredump won't be saved + # and you won't be able to report the crash to Bugzilla. Only + # useful with CreateCoreBacktrace set to 'yes'. Please + # note that if this option is set to 'no' and MakeCompatCore + # is set to 'yes', the core is still written to the current + # directory. + SaveFullCore = yes + + # Used for debugging the hook + #VerboseLog = 2 + + # Specify where you want to store debuginfos (default: /var/cache/abrt-di) + # + DebuginfoLocation = /var/cache/abrt-di + + # ABRT will ignore crashes in executables whose absolute path matches one of + # specified patterns. + # + #IgnoredPaths = + + # ABRT will process only crashes of either allowed users or users who are + # members of allowed group. If no allowed users nor allowed group are specified + # ABRT will process crashes of all users. + # + #AllowedUsers = + #AllowedGroups = + + +Example: + >>> type(abrt_conf) + + >>> abrt_conf.get('CreateCoreBacktrace') + 'yes' + +""" + + +from insights.specs import Specs +from insights import Parser, parser +from insights.parsers import split_kv_pairs +from insights.parsers import SkipException + + +@parser(Specs.abrt_ccpp_conf) +class AbrtCCppConf(Parser, dict): + """ + Class for parsing ``/etc/abrt/plugins/CCpp.conf``. + """ + def parse_content(self, content): + self.update(split_kv_pairs(content, use_partition=False)) + if not self: + raise SkipException("empty content") diff --git a/insights/parsers/tests/test_abrt_ccpp.py b/insights/parsers/tests/test_abrt_ccpp.py new file mode 100644 index 000000000..ba2f93310 --- /dev/null +++ b/insights/parsers/tests/test_abrt_ccpp.py @@ -0,0 +1,88 @@ +import doctest +import pytest +from insights.parsers import abrt_ccpp +from insights.parsers.abrt_ccpp import AbrtCCppConf +from insights.tests import context_wrap +from insights.parsers import SkipException + +ABRT_CONF_CONTENT = """ +# Configuration file for CCpp hook + +# CCpp hook writes its template to the "/proc/sys/kernel/core_pattern" file +# and stores the original template in the "/var/run/abrt/saved_core_pattern" +# file. If you want CCpp hook to create a core dump file named according to +# the original template as well, set 'MakeCompatCore' to 'yes'. +# If the original template string starts with "|", the string "core" is used +# instead of the template. +# For more information about naming core dump files see 'man 5 core'. +MakeCompatCore = yes + +# The option allows you to set limit for the core file size in MiB. +# +# This value is compared to value of the MaxCrashReportSize configuration +# option from (/etc/abrt.conf) and the lower value is used as the limit. +# +# If MaxCoreFileSize is 0 then the value of MaxCrashReportSize is the limit. +# If MaxCrashReportSize is 0 then the value of MaxCoreFileSize is the limit. +# If both values are 0 then the core file size is unlimited. +MaxCoreFileSize = 0 + +# Do you want a copy of crashed binary be saved? +# (useful, for example, when _deleted binary_ segfaults) +SaveBinaryImage = no + +# When this option is set to 'yes', core backtrace is generated +# from the memory image of the crashing process. Only the crash +# thread is present in the backtrace. +CreateCoreBacktrace = yes + +# Save full coredump? If set to 'no', coredump won't be saved +# and you won't be able to report the crash to Bugzilla. Only +# useful with CreateCoreBacktrace set to 'yes'. Please +# note that if this option is set to 'no' and MakeCompatCore +# is set to 'yes', the core is still written to the current +# directory. +SaveFullCore = yes + +# Used for debugging the hook +#VerboseLog = 2 + +# Specify where you want to store debuginfos (default: /var/cache/abrt-di) +# +DebuginfoLocation = /var/cache/abrt-di + +# ABRT will ignore crashes in executables whose absolute path matches one of +# specified patterns. +# +#IgnoredPaths = + +# ABRT will process only crashes of either allowed users or users who are +# members of allowed group. If no allowed users nor allowed group are specified +# ABRT will process crashes of all users. +# +#AllowedUsers = +#AllowedGroups = +""".strip() + +ABRT_CONF_CONTENT_NO = """ +""".strip() + + +def test_empty_content(): + with pytest.raises(SkipException): + AbrtCCppConf(context_wrap(ABRT_CONF_CONTENT_NO)) + + +def test_abrt_class(): + abrt_obj = AbrtCCppConf(context_wrap(ABRT_CONF_CONTENT)) + assert abrt_obj.get('CreateCoreBacktrace', '').lower() == 'yes' + assert abrt_obj.get('DebuginfoLocation', '').lower() == '/var/cache/abrt-di' + assert abrt_obj.get('Debuginfo', '').lower() == '' + + +def test_docs(): + env = { + 'abrt_conf': AbrtCCppConf(context_wrap(ABRT_CONF_CONTENT)) + } + failed, total = doctest.testmod(abrt_ccpp, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index ccecfd0d5..e6ab312ab 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -16,6 +16,7 @@ class Openshift(SpecSet): class Specs(SpecSet): + abrt_ccpp_conf = RegistryPoint(filterable=True) abrt_status_bare = RegistryPoint() amq_broker = RegistryPoint(multi_output=True) auditctl_status = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 708aa92fc..739c7ac63 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -95,6 +95,7 @@ def inner(idx=None): class DefaultSpecs(Specs): + abrt_ccpp_conf = simple_file("/etc/abrt/plugins/CCpp.conf") abrt_status_bare = simple_command("/usr/bin/abrt status --bare=True") amq_broker = glob_file("/var/opt/amq-broker/*/etc/broker.xml") auditctl_status = simple_command("/sbin/auditctl -s") From 3abf9221c6b6915ed53a3c5b06c0e0fa34f16dc1 Mon Sep 17 00:00:00 2001 From: Martin Zibricky Date: Wed, 21 Oct 2020 22:42:30 +0200 Subject: [PATCH 211/892] Add php.ini parser (#2767) * Add php.ini parser, initial code and tests Signed-off-by: Martin Zibricky * Add .ini grammer from iniparser example Signed-off-by: Martin Zibricky * Add handling python data types Signed-off-by: Martin Zibricky * Add parsing shortcuts for bytes Signed-off-by: Martin Zibricky * Cleanup php_ini parser Signed-off-by: Martin Zibricky * Lint and grammer fixes for php_ini parser Signed-off-by: Martin Zibricky * Make php_ini parser filterable Signed-off-by: Martin Zibricky * Remove filters from php_ini parser Signed-off-by: Martin Zibricky * Add filter [ to php_ini parser Signed-off-by: Martin Zibricky --- docs/shared_parsers_catalog/php_ini.rst | 3 + insights/parsers/php_ini.py | 159 ++++++++++++++++++ insights/parsers/tests/test_php_ini.py | 210 ++++++++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 374 insertions(+) create mode 100644 docs/shared_parsers_catalog/php_ini.rst create mode 100644 insights/parsers/php_ini.py create mode 100644 insights/parsers/tests/test_php_ini.py diff --git a/docs/shared_parsers_catalog/php_ini.rst b/docs/shared_parsers_catalog/php_ini.rst new file mode 100644 index 000000000..2e8bf4d82 --- /dev/null +++ b/docs/shared_parsers_catalog/php_ini.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.php_ini + :members: + :show-inheritance: diff --git a/insights/parsers/php_ini.py b/insights/parsers/php_ini.py new file mode 100644 index 000000000..99a37a590 --- /dev/null +++ b/insights/parsers/php_ini.py @@ -0,0 +1,159 @@ +""" +php_ini - file ``/etc/php.ini`` +=============================== + +This module provides the ``PHPConfig`` class parser, for reading the +options in the ``/etc/php.ini`` file. + +Typical content of ``/etc/php.ini`` file is:: + + [PHP] + engine = On + short_open_tag = Off + precision = 14 + output_buffering = 4096 + zlib.output_compression = Off + implicit_flush = Off + unserialize_callback_func = + serialize_precision = -1 + disable_functions = + disable_classes = + zend.enable_gc = On + zend.exception_ignore_args = On + zend.exception_string_param_max_len = 0 + expose_php = On + max_execution_time = 30 + max_input_time = 60 + memory_limit = 128M + error_reporting = E_ALL & ~E_DEPRECATED & ~E_STRICT + default_mimetype = "text/html" + +The class has one attribute ``data`` which is a nested ``dict`` representing sections +of the input INI file. Each section is represented as ``dict`` where keys are name +of options and values are values of those options. + +Example: + >>> php_conf["PHP"]["default_mimetype"].value + 'text/html' + >>> php_config.data['PHP']['default_mimetype'] + 'text/html' + >>> php_conf.data['Session']['session.cache_limiter'] + 'nocache' + >>> php_conf["PHP"]["max_execution_time"].value + 30 + >>> php_conf["PHP"]["engine"].value # 'On' turns to 'True' + True + >>> php_conf["PHP"]["short_opeh_tag"].value # 'Off' turns to 'False' + False + >>> php_c['PHP']['precision'].value + 14 + >>> php_conf.get("PHP").get("memory_limit") # '128M' is converted into bytes + 134217728 +""" +import string +from insights import parser +from insights.core import ConfigParser +from insights.core.filters import add_filter +from insights.parsr.query import eq +from insights.parsr import (Char, EOF, HangingString, InSet, + LeftBracket, Lift, LineEnd, Literal, RightBracket, + Many, Number, OneLineComment, Opt, PosMarker, + QuotedString, skip_none, String, WithIndent, WS, WSChar) +from insights.parsr.query import Directive, Entry, Section +from insights.parsers import ParseException, SkipException +from insights.specs import Specs + + +# Filter to ensure that the section headings will always be included. +add_filter(Specs.php_ini, "[") + + +@parser(Specs.php_ini, continue_on_error=False) +class PHPConf(ConfigParser): + """ + Class for php configuration file. + """ + def parse_doc(self, content): + try: + def to_directive(x): + name, rest = x + rest = [rest] if rest is not None else [] + return Directive(name=name.value.strip(), attrs=rest, lineno=name.lineno, src=self) + + def to_section(name, rest): + return Section(name=name.value.strip(), children=rest, lineno=name.lineno, src=self) + + def apply_defaults(cfg): + if "DEFAULT" not in cfg: + return cfg + + defaults = cfg["DEFAULT"] + not_defaults = cfg[~eq("DEFAULT")] + for c in not_defaults: + for d in defaults.grandchildren: + if d.name not in c: + c.children.append(d) + + cfg.children = list(not_defaults) + return cfg + + def make_bytes(number, char_multiple): + if char_multiple.lower() == 'k': + return number * 2**10 + if char_multiple.lower() == 'm': + return number * 2**20 + if char_multiple.lower() == 'g': + return number * 2**30 + + content = "\n".join(content) + + header_chars = (set(string.printable) - set(string.whitespace) - set("[]")) | set(" ") + sep_chars = set("=") + key_chars = header_chars - sep_chars + value_chars = set(string.printable) - set("\n\r") + + On = Literal("on", True, ignore_case=True) + Off = Literal("off", False, ignore_case=True) + Tru = Literal("true", True, ignore_case=True) + Fals = Literal("false", False, ignore_case=True) + Boolean = ((On | Off | Tru | Fals) & (WSChar | LineEnd)) % "Boolean" + Num = Number & (WSChar | LineEnd) + QuoStr = QuotedString & (WSChar | LineEnd) + # Handle php.ini shorthand notation for memory limits: 1G, 8M, 50K + # https://www.php.net/manual/en/faq.using.php#faq.using.shorthandbytes + MemNum = (Lift(make_bytes) * Number * (Char('K') | Char('M') | Char('G'))) & (WSChar | LineEnd) + + LeftEnd = (WS + LeftBracket + WS) + RightEnd = (WS + RightBracket + WS) + Header = (LeftEnd >> PosMarker(String(header_chars)) << RightEnd) % "Header" + Key = WS >> PosMarker(String(key_chars)) << WS + Sep = InSet(sep_chars, "Sep") + Value = WS >> (Boolean | MemNum | Num | QuoStr | HangingString(value_chars)) + KVPair = WithIndent(Key + Opt(Sep >> Value)) % "KVPair" + Comment = (WS >> (OneLineComment(";")).map(lambda x: None)) + + Line = Comment | KVPair.map(to_directive) + Sect = Lift(to_section) * Header * Many(Line).map(skip_none) + Doc = Many(Comment | Sect).map(skip_none) + Top = Doc << WS << EOF + + res = Entry(children=Top(content), src=self) + return apply_defaults(res) + + except SkipException: + raise + except: + raise ParseException(ParseException("Could not parse content: '{0}'". + format(content))) + + def parse_content(self, content): + super(PHPConf, self).parse_content(content) + dict_all = {} + for section in self.doc: + section_dict = {} + option_names = set(o.name for o in section) + for name in option_names: + options = [str(o.value) for o in section[name]] + section_dict[name] = options[0] if len(options) == 1 else options + dict_all[section.name] = section_dict + self.data = dict_all diff --git a/insights/parsers/tests/test_php_ini.py b/insights/parsers/tests/test_php_ini.py new file mode 100644 index 000000000..f6fdb584e --- /dev/null +++ b/insights/parsers/tests/test_php_ini.py @@ -0,0 +1,210 @@ +from insights.parsers.php_ini import PHPConf +from insights.parsers import SkipException, ParseException +from insights.tests import context_wrap +import pytest + +# Latest production php.ini from php git repository. Comments stripped out. +# https://git.php.net/?p=php-src.git;a=blob;f=php.ini-production;hb=HEAD +INI_DEFAULT = """ +[PHP] +engine = On +short_open_tag = Off +precision = 14 +output_buffering = 4096 +zlib.output_compression = Off +implicit_flush = Off +unserialize_callback_func = +serialize_precision = -1 +disable_functions = +; This is a comment. +disable_classes = +zend.enable_gc = On +zend.exception_ignore_args = On +zend.exception_string_param_max_len = 0 +expose_php = On +max_execution_time = 30 +max_input_time = 60 +memory_limit = 128M + +error_reporting = E_ALL & ~E_DEPRECATED & ~E_STRICT + +display_errors = Off +display_startup_errors = Off +log_errors = On +log_errors_max_len = 1024 +ignore_repeated_errors = Off +ignore_repeated_source = Off +report_memleaks = On +variables_order = "GPCS" +request_order = "GP" +register_argc_argv = Off +auto_globals_jit = On + +post_max_size = 8M + +auto_prepend_file = +auto_append_file = +default_mimetype = "text/html" +default_charset = "UTF-8" +doc_root = +user_dir = +enable_dl = Off +file_uploads = On +upload_max_filesize = 2M +max_file_uploads = 20 +allow_url_fopen = On +allow_url_include = Off +default_socket_timeout = 60 + +[CLI Server] +cli_server.color = On + +[Date] + +[filter] + +[iconv] + +[imap] + +[intl] + +[sqlite3] + +[Pcre] + +[Pdo] + +[Pdo_mysql] +pdo_mysql.default_socket= + +[Phar] + +[mail function] +SMTP = localhost +smtp_port = 25 +mail.add_x_header = Off + +[ODBC] +odbc.allow_persistent = On +odbc.check_persistent = On +odbc.max_persistent = -1 +odbc.max_links = -1 +odbc.defaultlrl = 4096 +odbc.defaultbinmode = 1 + +[MySQLi] +mysqli.max_persistent = -1 +mysqli.allow_persistent = On +mysqli.max_links = -1 +mysqli.default_port = 3306 +mysqli.default_socket = +mysqli.default_host = +mysqli.default_user = +mysqli.default_pw = +mysqli.reconnect = Off + +[mysqlnd] +mysqlnd.collect_statistics = On +mysqlnd.collect_memory_statistics = Off + +[OCI8] + +[PostgreSQL] +pgsql.allow_persistent = On +pgsql.auto_reset_persistent = Off +pgsql.max_persistent = -1 +pgsql.max_links = -1 +pgsql.ignore_notice = 0 +pgsql.log_notice = 0 + +[bcmath] +bcmath.scale = 0 + +[browscap] + +[Session] +session.save_handler = files +session.use_strict_mode = 0 +session.use_cookies = 1 +session.use_only_cookies = 1 +session.name = PHPSESSID +session.auto_start = 0 +session.cookie_lifetime = 0 +session.cookie_path = / +session.cookie_domain = +session.cookie_httponly = +session.cookie_samesite = +session.serialize_handler = php +session.gc_probability = 1 +session.gc_divisor = 1000 +session.gc_maxlifetime = 1440 +session.referer_check = +session.cache_limiter = nocache +session.cache_expire = 180 +session.use_trans_sid = 0 +session.sid_length = 26 +session.trans_sid_tags = "a=href,area=href,frame=src,form=" +session.sid_bits_per_character = 5 + +[Assertion] +zend.assertions = -1 + +[COM] + +[mbstring] + +[gd] + +[exif] + +[Tidy] +tidy.clean_output = Off + +[soap] +soap.wsdl_cache_enabled=1 +soap.wsdl_cache_dir="/tmp" +soap.wsdl_cache_ttl=86400 +soap.wsdl_cache_limit = 5 + +[sysvshm] + +[ldap] +ldap.max_links = -1 + +[dba] + +[opcache] + +[curl] + +[openssl] + +[ffi] + +""".strip() + +INI_EMPTY = "" +INI_INVALID = "bla bla foo ha [] ^&*@#$%" + + +def test_php_conf_default(): + php_c = PHPConf(context_wrap(INI_DEFAULT)) + assert php_c['PHP']['default_mimetype'].value == 'text/html' + assert php_c.data['PHP']['default_mimetype'] == 'text/html' + assert php_c.data['Session']['session.cache_limiter'] == 'nocache' + assert php_c['PHP']['engine'].value is True + assert php_c['PHP']['precision'].value == 14 + assert php_c['PHP']['disable_classes'].value == '' + assert php_c['PHP']['memory_limit'].value == 128 * 2**20 # Conversion of 128M to bytes. + assert php_c['PHP']['post_max_size'].value == 8 * 2**20 # Conversion of 8M to bytes. + + +def test_php_conf_empty(): + with pytest.raises(SkipException): + PHPConf(context_wrap(INI_EMPTY)) + + +def test_php_conf_invalid(): + with pytest.raises(ParseException): + PHPConf(context_wrap(INI_INVALID)) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index e6ab312ab..b79f56025 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -466,6 +466,7 @@ class Specs(SpecSet): pcs_config = RegistryPoint() pcs_quorum_status = RegistryPoint() pcs_status = RegistryPoint() + php_ini = RegistryPoint(filterable=True) pluginconf_d = RegistryPoint(multi_output=True) podman_container_inspect = RegistryPoint(multi_output=True) podman_image_inspect = RegistryPoint(multi_output=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index 739c7ac63..efb1de2d9 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -529,6 +529,7 @@ def pcp_enabled(broker): password_auth = simple_file("/etc/pam.d/password-auth") pcs_quorum_status = simple_command("/usr/sbin/pcs quorum status") pcs_status = simple_command("/usr/sbin/pcs status") + php_ini = first_file(["/etc/opt/rh/php73/php.ini", "/etc/opt/rh/php72/php.ini", "/etc/php.ini"]) pluginconf_d = glob_file("/etc/yum/pluginconf.d/*.conf") postgresql_conf = first_file([ "/var/lib/pgsql/data/postgresql.conf", From 1de67f7c92ab569a0741c3853b09c07378107456 Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Thu, 22 Oct 2020 11:13:22 -0400 Subject: [PATCH 212/892] update uploader_json_map.json Signed-off-by: Link Dupont --- insights/client/uploader_json_map.json | 39 ++++++++++++++++++-------- 1 file changed, 28 insertions(+), 11 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index 755e38434..f52f4e782 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -1109,7 +1109,6 @@ "/usr/bin/teamd", "/usr/sbin/fcoemon --syslog", "COMMAND", - "[rcu_sched]", "bash", "catalina.base", "ceilometer-coll", @@ -1142,8 +1141,6 @@ "pcsd", "pkla-check-auth", "postgres", - "rcu_gp_kthread", - "rcu_sched", "smbd", "spausedd", "target_completi", @@ -2319,9 +2316,6 @@ "Cannot assign requested address", "Cannot assign requested address: AH00072", "Connection amqps://subscription.rhn.redhat.com:5647 disconnected", - "Connection amqps://subscription.rhn.redhat.com:5647 timed out: Opening connection", - "Connection amqps://subscription.rhsm.redhat.com:5647 disconnected", - "Connection amqps://subscription.rhsm.redhat.com:5647 timed out: Opening connection", "Corosync main process was not scheduled (@", "Could not set", "DHCPv4 lease renewal requested", @@ -2364,7 +2358,6 @@ "_NET_ACTIVE_WINDOW", "as active slave; either", "belongs to docker.service", - "blocked for more than", "callbacks suppressed", "canceled DHCP transaction, DHCP client pid", "chardev: opening backend \"socket\" failed", @@ -2445,13 +2438,11 @@ "there is a meaningful conflict", "timed out", "timeout before we got a set response", - "timeout; kill it", "timing out command, waited", "transmit queue", "udev: renamed network interface", "unknown filesystem type 'binfmt_misc'", "ut of memory: ", - "vdsm-tool: EnvironmentError: Failed to restore the persisted networks", "watch chan error: etcdserver: mvcc: required revision has been compacted" ], "symbolic_name": "messages" @@ -2903,6 +2894,27 @@ "pattern": [], "symbolic_name": "password_auth" }, + { + "file": "/etc/opt/rh/php73/php.ini", + "pattern": [ + "post_max_size" + ], + "symbolic_name": "php_ini" + }, + { + "file": "/etc/opt/rh/php72/php.ini", + "pattern": [ + "post_max_size" + ], + "symbolic_name": "php_ini" + }, + { + "file": "/etc/php.ini", + "pattern": [ + "post_max_size" + ], + "symbolic_name": "php_ini" + }, { "file": "/etc/yum/pluginconf.d/()*\\w+\\.conf", "pattern": [], @@ -3499,6 +3511,11 @@ "pattern": [], "symbolic_name": "vdsm_logger_conf" }, + { + "file": "/sys/module/vhost_net/parameters/experimental_zcopytx", + "pattern": [], + "symbolic_name": "vhost_net_zero_copy_tx" + }, { "file": "/etc/()*virt-who\\.conf", "pattern": [ @@ -4094,5 +4111,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2020-10-14T14:41:32.281672" -} \ No newline at end of file + "version": "2020-10-19T12:07:39.171878" +} From 2437a684677d2f6f445cca963710b8e2ab55bc27 Mon Sep 17 00:00:00 2001 From: Rohan Arora Date: Fri, 23 Oct 2020 13:53:07 +0530 Subject: [PATCH 213/892] Cleanup temp dir created for egg download (#2721) (#2722) Signed-off-by: Rohan Arora Co-authored-by: Jeremy Crafts --- insights/client/__init__.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/insights/client/__init__.py b/insights/client/__init__.py index 29d02211b..0f8d92861 100644 --- a/insights/client/__init__.py +++ b/insights/client/__init__.py @@ -6,6 +6,7 @@ import shlex import shutil import sys +import atexit from subprocess import Popen, PIPE from requests import ConnectionError @@ -65,6 +66,7 @@ def __init__(self, config=None, setup_logging=True, **kwargs): # used for requests self.session = None self.connection = None + self.tmpdir = None def _net(func): def _init_connection(self, *args, **kwargs): @@ -121,10 +123,11 @@ def fetch(self, force=False): returns (dict): {'core': path to new egg, None if no update, 'gpg_sig': path to new sig, None if no update} """ - tmpdir = tempfile.mkdtemp() + self.tmpdir = tempfile.mkdtemp() + atexit.register(self.delete_tmpdir) fetch_results = { - 'core': os.path.join(tmpdir, 'insights-core.egg'), - 'gpg_sig': os.path.join(tmpdir, 'insights-core.egg.asc') + 'core': os.path.join(self.tmpdir, 'insights-core.egg'), + 'gpg_sig': os.path.join(self.tmpdir, 'insights-core.egg.asc') } logger.debug("Beginning core fetch.") @@ -372,6 +375,11 @@ def install(self, new_egg, new_egg_gpg_sig): logger.debug("The new Insights Core was installed successfully.") return {'success': True} + def delete_tmpdir(self): + if self.tmpdir: + logger.debug("Deleting temp directory %s." % (self.tmpdir)) + shutil.rmtree(self.tmpdir, True) + @_net def update_rules(self): """ From 8349f481a794f1395e33382a1115debeab5dfd91 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Mon, 26 Oct 2020 16:13:53 +0800 Subject: [PATCH 214/892] Save the raised exception (#2799) * Save the raised exception * Some gss rules have to check the raised exception to check specific issue when subscription-manager can't work Signed-off-by: Huanhuan Li * Check if there is traceback first Signed-off-by: Huanhuan Li * Add more info in the docstring Signed-off-by: Huanhuan Li --- insights/parsers/subscription_manager_list.py | 7 ++++++ .../tests/test_subscription_manager_list.py | 23 +++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/insights/parsers/subscription_manager_list.py b/insights/parsers/subscription_manager_list.py index 74f00efc3..23281d9a1 100644 --- a/insights/parsers/subscription_manager_list.py +++ b/insights/parsers/subscription_manager_list.py @@ -25,6 +25,10 @@ class SubscriptionManagerList(CommandParser): A general object for parsing the output of ``subscription-manager list``. This should be subclassed to read the specific output - e.g. ``--consumed`` or ``--installed``. + + Attributes: + records (list): A list of dict with the output info, it's empty when the ``error`` occurs + error (str): The raised exception when there is traceback """ def parse_content(self, content): self.records = [] @@ -41,6 +45,9 @@ def parse_content(self, content): # it starts a new record. for line in content: + if 'Traceback' in line: + self.error = content[-1] + break # Check for match of key/value line match = key_val_re.search(line) if match: diff --git a/insights/parsers/tests/test_subscription_manager_list.py b/insights/parsers/tests/test_subscription_manager_list.py index 741408135..de73e34d6 100644 --- a/insights/parsers/tests/test_subscription_manager_list.py +++ b/insights/parsers/tests/test_subscription_manager_list.py @@ -64,6 +64,21 @@ No installed products to list ''' +subscription_manager_list_errors = """ +Traceback (most recent call last): + File "/usr/sbin/subscription-manager", line 9, in + load_entry_point('subscription-manager==1.21.10', 'console_scripts', 'subscription-manager')() + File "/usr/lib/python2.7/site-packages/pkg_resources.py", line 378, in load_entry_point + return get_distribution(dist).load_entry_point(group, name) + File "/usr/lib/python2.7/site-packages/pkg_resources.py", line 2566, in load_entry_point + return ep.load() + File "/usr/lib/python2.7/site-packages/pkg_resources.py", line 2260, in load + entry = __import__(self.module_name, globals(),globals(), ['__name__']) + File "/usr/lib64/python2.7/site-packages/subscription_manager/scripts/subscription_manager.py", line 29, in + if six.PY2: +AttributeError: 'module' object has no attribute 'PY2' +""" + def test_subscription_manager_list_exceptions(): sml = subscription_manager_list.SubscriptionManagerListConsumed( @@ -90,3 +105,11 @@ def test_subscription_manager_list_docs(): } failed, total = doctest.testmod(subscription_manager_list, globs=env) assert failed == 0 + + +def test_exception(): + sml = subscription_manager_list.SubscriptionManagerListConsumed( + context_wrap(subscription_manager_list_errors) + ) + assert not sml.records + assert "AttributeError: 'module' object has no attribute 'PY2'" in sml.error From 9e01c8a57596ea3cf66ce2e8c7fdce049c1692e5 Mon Sep 17 00:00:00 2001 From: Glutexo Date: Tue, 27 Oct 2020 14:00:00 +0100 Subject: [PATCH 215/892] RHCLOUD-9230 Delete pid files on exit (#2795) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Couple PID files write and delete Originally, the PID files were written on the wrapper init call before the phases run. Their removal was however scheduled in the collect and output phase. That resulted in the files not being deleted if this phase is not run. Moved the registration of the shutdown handler that deletes the file, to the init phase. Like this, whenever the file gets created, its deletion is registered. They should no longer remain undeleted. Signed-off-by: Štěpán Tomsa * Fix PID file write tests The tests verifying that the PID files are written were broken. They succeeded even if the file is not written. It was because incorrect unittest.mock usage. Fixed. Signed-off-by: Štěpán Tomsa * Test delete PID files (not) being registered Added new tests verifying that a function that deleted the PID files is (not) registered when InsightsClient is initialized, depending on whether is is called from a phase. Not testing the actual filesystem delete as the actual write is untested too. Signed-off-by: Štěpán Tomsa Co-authored-by: Jeremy Crafts --- insights/client/__init__.py | 23 +++++---- insights/client/phase/v1.py | 6 +-- .../tests/client/init/test_write_pidfile.py | 49 +++++++++++++++---- 3 files changed, 54 insertions(+), 24 deletions(-) diff --git a/insights/client/__init__.py b/insights/client/__init__.py index 0f8d92861..292f8c6ae 100644 --- a/insights/client/__init__.py +++ b/insights/client/__init__.py @@ -30,7 +30,7 @@ class InsightsClient(object): - def __init__(self, config=None, setup_logging=True, **kwargs): + def __init__(self, config=None, from_phase=True, **kwargs): """ The Insights client interface """ @@ -50,18 +50,14 @@ def __init__(self, config=None, setup_logging=True, **kwargs): sys.exit(constants.sig_kill_bad) # END hack. in the future, just set self.config=config - # setup_logging is True when called from phase, but not from wrapper. - # use this to do any common init (like auto_config) - if setup_logging: + if from_phase: _init_client_config_dirs() self.set_up_logging() try_auto_configuration(self.config) self.initialize_tags() - else: - # write PID to file in case we need to ping systemd - write_to_disk(constants.pidfile, content=str(os.getpid())) - # write PPID to file so that we can grab the client execution method - write_to_disk(constants.ppidfile, content=get_parent_process()) + else: # from wrapper + _write_pid_files() + # setup insights connection placeholder # used for requests self.session = None @@ -718,3 +714,12 @@ def _init_client_config_dirs(): pass else: raise e + + +def _write_pid_files(): + for file, content in ( + (constants.pidfile, str(os.getpid())), # PID in case we need to ping systemd + (constants.ppidfile, get_parent_process()) # PPID so that we can grab the client execution method + ): + write_to_disk(file, content=content) + atexit.register(write_to_disk, file, delete=True) diff --git a/insights/client/phase/v1.py b/insights/client/phase/v1.py index cde361fe5..6e2cf6a18 100644 --- a/insights/client/phase/v1.py +++ b/insights/client/phase/v1.py @@ -4,13 +4,12 @@ import logging import os import sys -import atexit from insights.client import InsightsClient from insights.client.config import InsightsConfig from insights.client.constants import InsightsConstants as constants from insights.client.support import InsightsSupport -from insights.client.utilities import validate_remove_file, print_egg_versions, write_to_disk +from insights.client.utilities import validate_remove_file, print_egg_versions from insights.client.schedule import get_scheduler from insights.client.apps.compliance import ComplianceClient @@ -262,9 +261,6 @@ def post_update(client, config): @phase def collect_and_output(client, config): - # last phase, delete PID file on exit - atexit.register(write_to_disk, constants.pidfile, delete=True) - atexit.register(write_to_disk, constants.ppidfile, delete=True) # --compliance was called if config.compliance: config.payload, config.content_type = ComplianceClient(config).oscap_scan() diff --git a/insights/tests/client/init/test_write_pidfile.py b/insights/tests/client/init/test_write_pidfile.py index cee3d04ad..9f6bf2b16 100644 --- a/insights/tests/client/init/test_write_pidfile.py +++ b/insights/tests/client/init/test_write_pidfile.py @@ -1,30 +1,59 @@ from insights.client import InsightsClient from insights.client.constants import InsightsConstants +from mock.mock import call from mock.mock import patch +@patch("insights.client.atexit.register") @patch("insights.client.write_to_disk") @patch("insights.client.os.getpid") -@patch("insights.client.utilities.get_parent_process") -def test_write_pidfile(get_parent_process, getpid, write_to_disk): +@patch("insights.client.get_parent_process") +def test_write_pidfile(get_parent_process, getpid, write_to_disk, register): ''' Test writing of the pidfile when InsightsClient - is called initially (when setup_logging=False) + is called initially (when from_phase=False) ''' - InsightsClient(setup_logging=False) + InsightsClient(from_phase=False) getpid.assert_called_once() - calls = [write_to_disk(InsightsConstants.pidfile, content=str(getpid.return_value)), - write_to_disk(InsightsConstants.ppidfile, content=get_parent_process.return_value)] - write_to_disk.has_calls(calls) + write_to_disk.assert_has_calls(( + call(InsightsConstants.pidfile, content=str(getpid.return_value)), + call(InsightsConstants.ppidfile, content=get_parent_process.return_value) + )) +@patch("insights.client.atexit.register") @patch("insights.client.write_to_disk") +def test_atexit_delete_pidfile(write_to_disk, register): + ''' + Test delete of the pidfile is registered when InsightsClient + is called initially (when from_phase=False) + ''' + InsightsClient(from_phase=False) + register.assert_has_calls(( + call(write_to_disk, InsightsConstants.pidfile, delete=True), + call(write_to_disk, InsightsConstants.ppidfile, delete=True) + )) + + +@patch("insights.client.write_to_disk") +@patch("insights.client.get_parent_process") @patch("insights.client.os.getpid") -def test_write_pidfile_not_called(getpid, write_to_disk): +def test_write_pidfile_not_called(getpid, get_parent_process, write_to_disk): ''' Test that the pidfile is not written when - called from a phase (setup_logging=True) + called from a phase (from-phase=True) ''' - InsightsClient(setup_logging=True) + InsightsClient(from_phase=True) + get_parent_process.assert_not_called() getpid.assert_not_called() write_to_disk.assert_not_called() + + +@patch("insights.client.atexit.register") +def test_atexit_delete_pidfile_not_called(register): + ''' + Test that delete of the pidfile is not registered when + called from a phase (from_phase=True) + ''' + InsightsClient(from_phase=True) + register.assert_not_called() From 35e6f53416a17fb038861a369cb8331d21d6478c Mon Sep 17 00:00:00 2001 From: Stanislav Kontar Date: Wed, 28 Oct 2020 22:05:51 +0100 Subject: [PATCH 216/892] Added parser for testparm data for Samba configuration (#2786) * Added parser for testparm data for Samba configuration https://projects.engineering.redhat.com/browse/PSINSIGHTS-197 Signed-off-by: Stanislav Kontar * Removed unnecessary init and made server role mandatory Signed-off-by: Stanislav Kontar * Fix linter warnings Signed-off-by: Stanislav Kontar * Added another parser and explanation why all of them are needed Signed-off-by: Stanislav Kontar --- insights/parsers/samba.py | 72 ++++++++++++++--- insights/parsers/tests/test_samba.py | 114 ++++++++++++++++++--------- insights/specs/__init__.py | 2 + insights/specs/default.py | 2 + insights/specs/insights_archive.py | 2 + insights/specs/sos_archive.py | 1 + 6 files changed, 148 insertions(+), 45 deletions(-) diff --git a/insights/parsers/samba.py b/insights/parsers/samba.py index 7f048a63a..6417fe46c 100644 --- a/insights/parsers/samba.py +++ b/insights/parsers/samba.py @@ -2,17 +2,17 @@ SambaConfig - file ``/etc/samba/smb.conf`` ========================================== -This parser reads the SaMBa configuration file ``/etc/samba/smb.conf``, which +This parser reads the Samba configuration file ``/etc/samba/smb.conf``, which is in standard .ini format, with a couple of notable features: -* SaMBa ignores spaces at the start of options, which the ConfigParser class - normally does not. This spacing is stripped by this parser. -* SaMBa likewise ignores spaces in section heading names. -* SaMBa allows the same section to be defined multiple times, with the +* Samba ignores spaces at the start of options, which the ConfigParser class + normally does not. This spacing is stripped by this parser. +* Samba likewise ignores spaces in section heading names. +* Samba allows the same section to be defined multiple times, with the options therein being merged as if they were one section. -* SaMBa allows options to be declared before the first section marker. +* Samba allows options to be declared before the first section marker. This parser puts these options in a `global` section. -* SaMBa treats ';' as a comment prefix, similar to '#'. +* Samba treats ';' as a comment prefix, similar to '#'. Sample configuration file:: @@ -68,19 +68,30 @@ 50 """ +import re +from . import ParseException from .. import add_filter, IniConfigFile, parser from insights.specs import Specs add_filter(Specs.samba, ["["]) +add_filter(Specs.testparm_s, ["["]) +add_filter(Specs.testparm_s, ["Server role:"]) + +add_filter(Specs.testparm_v_s, ["["]) +add_filter(Specs.testparm_v_s, ["Server role:"]) + @parser(Specs.samba) class SambaConfig(IniConfigFile): """ - This parser reads the SaMBa configuration file ``/etc/samba/smb.conf``. - """ + This parser reads the Samba configuration file ``/etc/samba/smb.conf``. + Note: It is needed for better resolution descriptions when it is necessary to know what exactly + is in the configuration file. For generic tasks use ``SambaConfigs`` or ``SambaConfigsAll`` + instead. + """ def parse_content(self, content): # smb.conf is special from other ini files in the property that # whatever is before the first section (before the first section) @@ -108,3 +119,46 @@ def parse_content(self, content): # Merge same-named sections just as samba's `testparm` does. new_dict[new_key].update(old_section) self.data._sections = new_dict + + +@parser(Specs.testparm_s) +class SambaConfigs(SambaConfig): + """ + This parser reads the Samba configuration from command `testparm -s` which is more reliable + than parsing the config file, as it includes configuration in internal registry. It also + includes server role. + + Note: This is the most suitable parser when only user changes to the configuration are important + for the detection logic, i.e. misconfiguration. + + Attributes: + server_role (string): Server role as reported by the command. + """ + def parse_content(self, content): + # Parse server role + for line in content: + r = re.search(r"Server role:\s+(\S+)", line) + if r: + self.server_role = r.group(1) + break + else: + raise ParseException("Server role not found.") + + super(SambaConfigs, self).parse_content(content) + + +@parser(Specs.testparm_v_s) +class SambaConfigsAll(SambaConfigs): + """ + This parser reads the Samba configuration from command `testparm -v -s` which is more reliable + than parsing the config file, as it includes configuration in internal registry. It also + includes all default values and server role. + + Note: This parser is needed for cases when active value of specific option is needed for the + detection logic, irrespective of its origin from user changes or defaults, i.e. security + vulnerabilities. + + Attributes: + server_role (string): Server role as reported by the command. + """ + pass diff --git a/insights/parsers/tests/test_samba.py b/insights/parsers/tests/test_samba.py index 1f850e34c..210547557 100644 --- a/insights/parsers/tests/test_samba.py +++ b/insights/parsers/tests/test_samba.py @@ -1,4 +1,6 @@ -from insights.parsers import samba +import pytest + +from insights.parsers import samba, ParseException from insights.tests import context_wrap from doctest import testmod @@ -191,40 +193,80 @@ def test_documentation(): this another option should also be in global = 1 """ +# This is going to be filtered, so removing following lines: +# +# Load smb config files from /etc/samba/smb.conf +# Loaded services file OK. + +TESTPARM = """ +Server role: ROLE_STANDALONE + +# Global parameters +[global] + add user to group script = + afs token lifetime = 604800 + afs username map = + aio max threads = 100 + server schannel = Yes + +[homes] + browseable = No + +""" # noqa: E101,W191,W291 + def test_match(): - config = samba.SambaConfig(context_wrap(SAMBA_CONFIG)) - - assert config.get('global', 'this option should be in global') == 'yes' - assert config.get('global', 'this option should also be in global') == 'true' - assert config.get('global', 'this another option should also be in global') == '1' - assert config.get('global', 'workgroup') == 'MYGROUP' - assert config.get('global', 'workgroup') == 'MYGROUP' - assert config.get('global', 'server string') == 'Samba Server Version %v' - assert not config.has_option('global', 'netbios name') - assert config.get('global', 'log file') == '/var/log/samba/log.%m' - assert config.get('global', 'max log size') == '50' - - assert config.get('global', 'security') == 'user' - assert config.get('global', 'passdb backend') == 'tdbsam' - - assert config.get('global', 'load printers') == 'yes' - assert config.get('global', 'cups options') == 'raw' - - assert not config.has_option('global', 'printcap name') - - assert config.get('homes', 'comment') == 'Home Directories' - assert config.get('homes', 'browseable') == 'no' - assert config.get('homes', 'writable') == 'yes' - assert not config.has_option('homes', 'valid users') - - assert config.get('printers', 'comment') == 'All Printers' - assert config.get('printers', 'path') == '/var/spool/samba' - assert config.get('printers', 'browseable') == 'no' - assert config.get('printers', 'guest ok') == 'no' - assert config.get('printers', 'writable') == 'no' - assert config.get('printers', 'printable') == 'yes' - - assert 'netlogin' not in config - assert 'Profiles' not in config - assert 'public' not in config + for config in [samba.SambaConfig(context_wrap(SAMBA_CONFIG)), # noqa: E101 + samba.SambaConfigs(context_wrap("Server role: ROLE_STANDALONE\n\n" + + SAMBA_CONFIG)), + samba.SambaConfigsAll(context_wrap("Server role: ROLE_STANDALONE\n\n" + + SAMBA_CONFIG)), + ]: + assert config.get('global', 'this option should be in global') == 'yes' + assert config.get('global', 'this option should also be in global') == 'true' + assert config.get('global', 'this another option should also be in global') == '1' + assert config.get('global', 'workgroup') == 'MYGROUP' + assert config.get('global', 'workgroup') == 'MYGROUP' + assert config.get('global', 'server string') == 'Samba Server Version %v' + assert not config.has_option('global', 'netbios name') + assert config.get('global', 'log file') == '/var/log/samba/log.%m' + assert config.get('global', 'max log size') == '50' + + assert config.get('global', 'security') == 'user' + assert config.get('global', 'passdb backend') == 'tdbsam' + + assert config.get('global', 'load printers') == 'yes' + assert config.get('global', 'cups options') == 'raw' + + assert not config.has_option('global', 'printcap name') + + assert config.get('homes', 'comment') == 'Home Directories' + assert config.get('homes', 'browseable') == 'no' + assert config.get('homes', 'writable') == 'yes' + assert not config.has_option('homes', 'valid users') + + assert config.get('printers', 'comment') == 'All Printers' + assert config.get('printers', 'path') == '/var/spool/samba' + assert config.get('printers', 'browseable') == 'no' + assert config.get('printers', 'guest ok') == 'no' + assert config.get('printers', 'writable') == 'no' + assert config.get('printers', 'printable') == 'yes' + + assert 'netlogin' not in config + assert 'Profiles' not in config + assert 'public' not in config + + +def test_server_role(): + config = samba.SambaConfigs(context_wrap(TESTPARM)) + + assert config.get('global', 'server schannel') == 'Yes' + assert config.get('homes', 'browseable') == 'No' + assert config.get('global', 'afs username map') == '' + assert config.server_role == "ROLE_STANDALONE" + + +def test_server_role_missing(): + with pytest.raises(ParseException) as e: + samba.SambaConfigs(context_wrap(SAMBA_CONFIG)) + assert e.value == "Server role not found." diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index b79f56025..65a5fb20d 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -629,6 +629,8 @@ class Specs(SpecSet): systemd_system_origin_accounting = RegistryPoint() systemid = RegistryPoint() systool_b_scsi_v = RegistryPoint() + testparm_s = RegistryPoint(filterable=True) + testparm_v_s = RegistryPoint(filterable=True) tags = RegistryPoint() teamdctl_config_dump = RegistryPoint(multi_output=True) teamdctl_state_dump = RegistryPoint(multi_output=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index efb1de2d9..c68635e08 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -649,6 +649,8 @@ def sap_sid_name(broker): simple_file("/conf/rhn/sysconfig/rhn/systemid") ]) systool_b_scsi_v = simple_command("/bin/systool -b scsi -v") + testparm_s = simple_command("/usr/bin/testparm -s") + testparm_v_s = simple_command("/usr/bin/testparm -v -s") tags = simple_file("/tags.json", kind=RawFileProvider) thp_use_zero_page = simple_file("/sys/kernel/mm/transparent_hugepage/use_zero_page") thp_enabled = simple_file("/sys/kernel/mm/transparent_hugepage/enabled") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index cf7555ed5..b492c565b 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -219,6 +219,8 @@ class InsightsArchiveSpecs(Specs): systemd_docker = first_file(["insights_commands/systemctl_cat_docker.service", "/usr/lib/systemd/system/docker.service"]) systemd_openshift_node = first_file(["insights_commands/systemctl_cat_atomic-openshift-node.service", "/usr/lib/systemd/system/atomic-openshift-node.service"]) systool_b_scsi_v = simple_file("insights_commands/systool_-b_scsi_-v") + testparm_s = simple_file("insights_commands/testparm_-s") + testparm_v_s = simple_file("insights_commands/testparm_-v_-s") tomcat_vdc_fallback = simple_file("insights_commands/find_.usr.share_-maxdepth_1_-name_tomcat_-exec_.bin.grep_-R_-s_VirtualDirContext_--include_.xml") tuned_adm = simple_file("insights_commands/tuned-adm_list") uname = simple_file("insights_commands/uname_-a") diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index 56571294b..4af14cb7b 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -249,6 +249,7 @@ class SosSpecs(Specs): systemd_system_origin_accounting = simple_file("/etc/systemd/system.conf.d/origin-accounting.conf") teamdctl_config_dump = glob_file("sos_commands/teamd/teamdctl_*_config_dump") teamdctl_state_dump = glob_file("sos_commands/teamd/teamdctl_*_state_dump") + testparm_s = simple_file("sos_commands/samba/testparm_s") tomcat_web_xml = first_of([glob_file("/etc/tomcat*/web.xml"), glob_file("/conf/tomcat/tomcat*/web.xml")]) tuned_conf = simple_file("/etc/tuned.conf") From ff43367da2eb82cca1e8d9dd8e4823bc0e487b48 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 29 Oct 2020 21:51:36 +0800 Subject: [PATCH 217/892] New parser for DotNetVersion (#2802) * New parser for DotNetVersion Signed-off-by: Xiangce Liu * fix the typos of spec Signed-off-by: Xiangce Liu --- docs/shared_parsers_catalog/dotnet.rst | 3 ++ insights/parsers/dotnet.py | 46 ++++++++++++++++++++++ insights/parsers/tests/test_dotnet.py | 53 ++++++++++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 6 files changed, 105 insertions(+) create mode 100644 docs/shared_parsers_catalog/dotnet.rst create mode 100644 insights/parsers/dotnet.py create mode 100644 insights/parsers/tests/test_dotnet.py diff --git a/docs/shared_parsers_catalog/dotnet.rst b/docs/shared_parsers_catalog/dotnet.rst new file mode 100644 index 000000000..775f5a914 --- /dev/null +++ b/docs/shared_parsers_catalog/dotnet.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.dotnet + :members: + :show-inheritance: diff --git a/insights/parsers/dotnet.py b/insights/parsers/dotnet.py new file mode 100644 index 000000000..bbc91d10d --- /dev/null +++ b/insights/parsers/dotnet.py @@ -0,0 +1,46 @@ +""" +DotNet- Comand ``/usr/bin/dotnet`` +================================== + +The parser for ``/usr/bin/dotnet --version`` is included in this module.. + +""" + +from insights import parser, CommandParser +from insights.parsers import SkipException, ParseException +from insights.specs import Specs + + +@parser(Specs.dotnet_version) +class DotNetVersion(CommandParser): + """ + Class for parsing the output of the ``/usr/bin/dotnet --version`` command. + + Sample output:: + + 3.1.108 + + Examples: + >>> dotnet_ver.major + 3 + >>> dotnet_ver.minor + 1 + >>> dotnet_ver.raw + '3.1.108' + """ + + def parse_content(self, content): + if not content or len(content) > 1: + raise SkipException + + self.major = self.minor = None + self.raw = content[0].strip() + + if ' ' not in self.raw and '.' in self.raw: + v_sp = [i.strip() for i in self.raw.split('.', 2)] + if len(v_sp) >= 2 and v_sp[0].isdigit() and v_sp[1].isdigit(): + self.major = int(v_sp[0]) + self.minor = int(v_sp[1]) + + if self.major is None: + raise ParseException("Unrecognized version: {0}", self.raw) diff --git a/insights/parsers/tests/test_dotnet.py b/insights/parsers/tests/test_dotnet.py new file mode 100644 index 000000000..0afefe1b9 --- /dev/null +++ b/insights/parsers/tests/test_dotnet.py @@ -0,0 +1,53 @@ +import doctest +import pytest +from insights.parsers import dotnet +from insights.core.plugins import ContentException +from insights.parsers import SkipException, ParseException +from insights.parsers.dotnet import DotNetVersion +from insights.tests import context_wrap + +dotnet_version_1 = "3.1.108" +dotnet_version_2 = "2.1.518" +dotnet_version_3 = """ +-bash: /usr/bin/dotnet: No such file or directory +""".strip() +dotnet_version_4 = "2." +dotnet_version_5 = """ +abc +-bash: /usr/bin/dotnet: No such file or directory +""" + + +def test_dotnet_version(): + ret = DotNetVersion(context_wrap(dotnet_version_1)) + assert ret.major == 3 + assert ret.minor == 1 + assert ret.raw == dotnet_version_1 + + ret = DotNetVersion(context_wrap(dotnet_version_2)) + assert ret.major == 2 + assert ret.minor == 1 + assert ret.raw == dotnet_version_2 + + +def test_dotnet_version_ab(): + with pytest.raises(ContentException): + ret = DotNetVersion(context_wrap(dotnet_version_3)) + assert ret is None + + with pytest.raises(ParseException) as pe: + ret = DotNetVersion(context_wrap(dotnet_version_4)) + assert ret is None + assert "Unrecognized version" in str(pe) + + with pytest.raises(SkipException): + ret = DotNetVersion(context_wrap(dotnet_version_5)) + assert ret is None + + +def test_doc_examples(): + env = { + 'dotnet_ver': DotNetVersion(context_wrap(dotnet_version_1)) + } + failed, total = doctest.testmod(dotnet, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 65a5fb20d..ae83a312f 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -141,6 +141,7 @@ class Specs(SpecSet): docker_storage = RegistryPoint() docker_storage_setup = RegistryPoint() docker_sysconfig = RegistryPoint() + dotnet_version = RegistryPoint() dracut_kdump_capture_service = RegistryPoint() du_dirs = RegistryPoint(multi_output=True) dumpe2fs_h = RegistryPoint(multi_output=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index c68635e08..6c78136b5 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -238,6 +238,7 @@ def is_ceph_monitor(broker): docker_list_images = simple_command("/usr/bin/docker images --all --no-trunc --digests") docker_storage_setup = simple_file("/etc/sysconfig/docker-storage-setup") docker_sysconfig = simple_file("/etc/sysconfig/docker") + dotnet_version = simple_command("/usr/bin/dotnet --version") dracut_kdump_capture_service = simple_file("/usr/lib/dracut/modules.d/99kdumpbase/kdump-capture.service") du_dirs = foreach_execute(['/var/lib/candlepin/activemq-artemis'], "/bin/du -s -k %s") engine_db_query_vdsm_version = simple_command('engine-db-query --statement "SELECT vs.vds_name, rpm_version FROM vds_dynamic vd, vds_static vs WHERE vd.vds_id = vs.vds_id" --json') diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index b492c565b..f491a56ea 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -45,6 +45,7 @@ class InsightsArchiveSpecs(Specs): docker_info = simple_file("insights_commands/docker_info") docker_list_containers = simple_file("insights_commands/docker_ps_--all_--no-trunc") docker_list_images = simple_file("insights_commands/docker_images_--all_--no-trunc_--digests") + dotnet_version = simple_file("insights_commands/dotnet_--version") du_dirs = glob_file("insights_commands/du_-s_-k_*") engine_db_query_vdsm_version = simple_file("insights_commands/engine-db-query_--statement_SELECT_vs.vds_name_rpm_version_FROM_vds_dynamic_vd_vds_static_vs_WHERE_vd.vds_id_vs.vds_id_--json") ethtool = glob_file("insights_commands/ethtool_*", ignore="ethtool_-.*") From 0e64a37aab84080eb5d6f583ebbc97a21d960b8a Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Thu, 29 Oct 2020 09:22:06 -0500 Subject: [PATCH 218/892] Readding spec for sap_hdb_version (#2805) * Spec was removed for core collection * It is now needed, parser is already in place Signed-off-by: Bob Fahr --- insights/specs/default.py | 6 ++++++ insights/specs/insights_archive.py | 1 + 2 files changed, 7 insertions(+) diff --git a/insights/specs/default.py b/insights/specs/default.py index 6c78136b5..ddb92b035 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -579,6 +579,11 @@ def pcp_enabled(broker): rsyslog_conf = simple_file("/etc/rsyslog.conf") samba = simple_file("/etc/samba/smb.conf") + @datasource(Sap) + def sap_sid(broker): + sap = broker[Sap] + return [sap.sid(i).lower() for i in sap.local_instances] + @datasource(Sap) def sap_sid_name(broker): """(list): Returns the list of (SAP SID, SAP InstanceName) """ @@ -587,6 +592,7 @@ def sap_sid_name(broker): sap_dev_disp = foreach_collect(sap_sid_name, "/usr/sap/%s/%s/work/dev_disp") sap_dev_rd = foreach_collect(sap_sid_name, "/usr/sap/%s/%s/work/dev_rd") + sap_hdb_version = foreach_execute(sap_sid, "/usr/bin/sudo -iu %sadm HDB version", keep_rc=True) saphostctl_getcimobject_sapinstance = simple_command("/usr/sap/hostctrl/exe/saphostctrl -function GetCIMObject -enuminstances SAPInstance") sat5_insights_properties = simple_file("/etc/redhat-access/redhat-access-insights.properties") satellite_mongodb_storage_engine = simple_command("/usr/bin/mongo pulp_database --eval 'db.serverStatus().storageEngine'") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index f491a56ea..d51d93b31 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -193,6 +193,7 @@ class InsightsArchiveSpecs(Specs): rpm_V_packages = first_file(["insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo_chrony", "insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo"]) sap_dev_disp = glob_file("/usr/sap/*/*/work/dev_disp") sap_dev_rd = glob_file("/usr/sap/*/*/work/dev_rd") + sap_hdb_version = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_sap_hdb_version") saphostctl_getcimobject_sapinstance = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostctrl_-function_GetCIMObject_-enuminstances_SAPInstance") satellite_mongodb_storage_engine = simple_file("insights_commands/mongo_pulp_database_--eval_db.serverStatus_.storageEngine") sealert = simple_file('insights_commands/sealert_-l') From 98f1d54f1a53fa96c6a429b991fd1385fcb51506 Mon Sep 17 00:00:00 2001 From: Rahul Srivastava <44598880+rasrivas-redhat@users.noreply.github.com> Date: Thu, 29 Oct 2020 20:15:33 +0530 Subject: [PATCH 219/892] new parser networkmanager_config (#2794) * new parser networkmanager_config Signed-off-by: rasrivas * fixed the pipeline error Signed-off-by: rasrivas * updated the doc test Signed-off-by: rasrivas * added a new test case and also modified a test case Signed-off-by: rasrivas * modified the test case Signed-off-by: rasrivas --- .../networkmanager_config.rst | 3 + insights/parsers/networkmanager_config.py | 38 ++++++ .../tests/test_networkmanager_config.py | 125 ++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 168 insertions(+) create mode 100644 docs/shared_parsers_catalog/networkmanager_config.rst create mode 100644 insights/parsers/networkmanager_config.py create mode 100644 insights/parsers/tests/test_networkmanager_config.py diff --git a/docs/shared_parsers_catalog/networkmanager_config.rst b/docs/shared_parsers_catalog/networkmanager_config.rst new file mode 100644 index 000000000..cd85d4908 --- /dev/null +++ b/docs/shared_parsers_catalog/networkmanager_config.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.networkmanager_config + :members: + :show-inheritance: diff --git a/insights/parsers/networkmanager_config.py b/insights/parsers/networkmanager_config.py new file mode 100644 index 000000000..64db81e3d --- /dev/null +++ b/insights/parsers/networkmanager_config.py @@ -0,0 +1,38 @@ +""" +NetworkManagerConfig - file ``/etc/NetworkManager/NetworkManager.conf`` +======================================================================= + +The ``/etc/NetworkManager/NetworkManager.conf`` file is in a standard '.ini' format, +and this parser uses the IniConfigFile base class to read this. + +Given a file containing the following test data:: + + [main] + #plugins=ifcfg-rh,ibft + dhcp=dhclient + +Example: + >>> type(networkmanager_config_obj) + + >>> networkmanager_config_obj.get('main', 'dhcp') == 'dhclient' + True +""" + +from .. import parser, IniConfigFile +from insights.specs import Specs + + +@parser(Specs.networkmanager_conf) +class NetworkManagerConfig(IniConfigFile): + """ + A dict of the content of the ``NetworkManager.conf`` configuration file. + + Example selection of dictionary contents:: + + { + 'main': { + 'dhcp':'dhclient', + } + } + """ + pass diff --git a/insights/parsers/tests/test_networkmanager_config.py b/insights/parsers/tests/test_networkmanager_config.py new file mode 100644 index 000000000..042599855 --- /dev/null +++ b/insights/parsers/tests/test_networkmanager_config.py @@ -0,0 +1,125 @@ +from insights.parsers.networkmanager_config import NetworkManagerConfig +from insights.parsers import networkmanager_config +from insights.tests import context_wrap +import doctest + +NETWORKMANAGER_CONF = """ +# Configuration file for NetworkManager. +# +# See "man 5 NetworkManager.conf" for details. +# +# The directories /usr/lib/NetworkManager/conf.d/ and /var/run/NetworkManager/conf.d/ +# can contain additional configuration snippets installed by packages. These files are +# read before NetworkManager.conf and have thus lowest priority. +# The directory /etc/NetworkManager/conf.d/ can contain additional configuration +# snippets. Those snippets are merged last and overwrite the settings from this main +# file. +# +# The files within one conf.d/ directory are read in asciibetical order. +# +# If /etc/NetworkManager/conf.d/ contains a file with the same name as +# /usr/lib/NetworkManager/conf.d/, the latter file is shadowed and thus ignored. +# Hence, to disable loading a file from /usr/lib/NetworkManager/conf.d/ you can +# put an empty file to /etc with the same name. The same applies with respect +# to the directory /var/run/NetworkManager/conf.d where files in /var/run shadow +# /usr/lib and are themselves shadowed by files under /etc. +# +# If two files define the same key, the one that is read afterwards will overwrite +# the previous one. + +[main] +#plugins=ifcfg-rh,ibft +dhcp=dhclient + + +[logging] +# When debugging NetworkManager, enabling debug logging is of great help. +# +# Logfiles contain no passwords and little sensitive information. But please +# check before posting the file online. You can also personally hand over the +# logfile to a NM developer to treat it confidential. Meet us on #nm on freenode. +# Please post full logfiles except minimal modifications of private data. +# +# You can also change the log-level at runtime via +# $ nmcli general logging level TRACE domains ALL +# However, usually it's cleaner to enable debug logging +# in the configuration and restart NetworkManager so that +# debug logging is enabled from the start. +# +# You will find the logfiles in syslog, for example via +# $ journalctl -u NetworkManager +# +# Note that debug logging of NetworkManager can be quite verbose. Some messages +# might be rate-limited by the logging daemon (see RateLimitIntervalSec, RateLimitBurst +# in man journald.conf). +# +#level=TRACE +#domains=ALL +""" + +NETWORKMANAGER_CONF_NOTMATCH = """ +# Configuration file for NetworkManager. +# +# See "man 5 NetworkManager.conf" for details. +# +# The directories /usr/lib/NetworkManager/conf.d/ and /var/run/NetworkManager/conf.d/ +# can contain additional configuration snippets installed by packages. These files are +# read before NetworkManager.conf and have thus lowest priority. +# The directory /etc/NetworkManager/conf.d/ can contain additional configuration +# snippets. Those snippets are merged last and overwrite the settings from this main +# file. +# +# The files within one conf.d/ directory are read in asciibetical order. +# +# If /etc/NetworkManager/conf.d/ contains a file with the same name as +# /usr/lib/NetworkManager/conf.d/, the latter file is shadowed and thus ignored. +# Hence, to disable loading a file from /usr/lib/NetworkManager/conf.d/ you can +# put an empty file to /etc with the same name. The same applies with respect +# to the directory /var/run/NetworkManager/conf.d where files in /var/run shadow +# /usr/lib and are themselves shadowed by files under /etc. +# +# If two files define the same key, the one that is read afterwards will overwrite +# the previous one. + +[logging] +# When debugging NetworkManager, enabling debug logging is of great help. +# +# Logfiles contain no passwords and little sensitive information. But please +# check before posting the file online. You can also personally hand over the +# logfile to a NM developer to treat it confidential. Meet us on #nm on freenode. +# Please post full logfiles except minimal modifications of private data. +# +# You can also change the log-level at runtime via +# $ nmcli general logging level TRACE domains ALL +# However, usually it's cleaner to enable debug logging +# in the configuration and restart NetworkManager so that +# debug logging is enabled from the start. +# +# You will find the logfiles in syslog, for example via +# $ journalctl -u NetworkManager +# +# Note that debug logging of NetworkManager can be quite verbose. Some messages +# might be rate-limited by the logging daemon (see RateLimitIntervalSec, RateLimitBurst +# in man journald.conf). +# +#level=TRACE +domains=ALL +""" + + +def test_networkmanager_config_match(): + result = NetworkManagerConfig(context_wrap(NETWORKMANAGER_CONF)) + assert result.get('main', 'dhcp') == 'dhclient' + + +def test_networkmanager_config_notmatch(): + result = NetworkManagerConfig(context_wrap(NETWORKMANAGER_CONF_NOTMATCH)) + assert result.has_option('main', 'dhcp') is False + + +def test_networkmanager_config_doc_examples(): + env = { + 'networkmanager_config_obj': NetworkManagerConfig(context_wrap(NETWORKMANAGER_CONF)), + } + failed, total = doctest.testmod(networkmanager_config, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index ae83a312f..533208350 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -374,6 +374,7 @@ class Specs(SpecSet): netstat_i = RegistryPoint() netstat = RegistryPoint() netstat_s = RegistryPoint() + networkmanager_conf = RegistryPoint() networkmanager_dispatcher_d = RegistryPoint(multi_output=True) neutron_conf = RegistryPoint(filterable=True) neutron_sriov_agent = RegistryPoint(filterable=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index ddb92b035..04d893b54 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -461,6 +461,7 @@ def httpd_cmd(broker): netstat_agn = simple_command("/bin/netstat -agn") netstat_i = simple_command("/bin/netstat -i") netstat_s = simple_command("/bin/netstat -s") + networkmanager_conf = simple_file("/etc/NetworkManager/NetworkManager.conf") networkmanager_dispatcher_d = glob_file("/etc/NetworkManager/dispatcher.d/*-dhclient") neutron_conf = first_file(["/var/lib/config-data/puppet-generated/neutron/etc/neutron/neutron.conf", "/etc/neutron/neutron.conf"]) neutron_sriov_agent = first_file([ From 77ab8a3c3645339398b38d7ad1c5cbfb563734bb Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 29 Oct 2020 10:46:29 -0400 Subject: [PATCH 220/892] update developer notes for client directory --- insights/client/README.md | 23 ++++------------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/insights/client/README.md b/insights/client/README.md index 1165dd08b..86a2762ea 100644 --- a/insights/client/README.md +++ b/insights/client/README.md @@ -1,22 +1,7 @@ -## Developer Setup -Instructions are for RHSM-subscribed machines only. -1. Clone this repo and https://github.com/RedHatInsights/insights-client to the same directory. +## Insights Client (Core) Developer Notes -``` -$ git clone git@github.com:RedHatInsights/insights-client.git -$ git clone git@github.com:RedHatInsights/insights-core.git -``` -2. Build the egg and install the client. +* ### **See https://github.com/RedHatInsights/insights-client for build and usage instructions, and details about configuration and runtime.** -``` -$ cd insights-client -$ sh lay-the-eggs.sh -``` +* To rebuild the egg from source, run `./build_client_egg.sh` from the repo root. This will generate a file `insights.zip` that you can pass to `insights-client` with the `EGG` environment variable. -3. Run the client with the following options to disable GPG since this egg is unsigned. - -``` -$ sudo BYPASS_GPG=True EGG=/etc/insights-client/rpm.egg insights-client --no-gpg -``` - -4. Repeat steps 2 & 3 upon making code changes. The majority of the client code lives in this directory, `insights-core/insights/client`. +* The `uploader_json_map.json` file is **NOT** `uploader.json`. Its purpose is to serve as a compatibility layer between denylist configurations for classic collection and core collection. Changes to this file will not affect the commands or files that are collected. It is advised not to make changes to this file as it is copied from the production-ready uploader.json file at release time and not intended to be modified further. From 4031eb4fa80b3bec507c6901679bc6286ff6556d Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 29 Oct 2020 13:43:30 -0400 Subject: [PATCH 221/892] add uploader json map to manifest for build (#2806) Signed-off-by: Jeremy Crafts --- MANIFEST.in | 1 + 1 file changed, 1 insertion(+) diff --git a/MANIFEST.in b/MANIFEST.in index a1dde986d..74be9fcbd 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -5,4 +5,5 @@ include insights/COMMIT include insights/RELEASE include insights/filters.yaml include LICENSE +include insights/client/uploader_json_map.json graft insights/archive/repository/base_archives From 6e8115240d7261a102fa58ac86738915b2482635 Mon Sep 17 00:00:00 2001 From: Stephen Adams Date: Thu, 29 Oct 2020 14:37:56 -0400 Subject: [PATCH 222/892] update uploader_json_map.json Signed-off-by: Stephen Adams --- insights/client/uploader_json_map.json | 49 +++++++++++++++++++------- 1 file changed, 37 insertions(+), 12 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index f52f4e782..ca2818396 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -231,6 +231,11 @@ "pattern": [], "symbolic_name": "docker_list_images" }, + { + "command": "/usr/bin/dotnet --version", + "pattern": [], + "symbolic_name": "dotnet_version" + }, { "command": "/bin/du -s -k /var/lib/candlepin/activemq-artemis", "pattern": [], @@ -1251,6 +1256,11 @@ "pattern": [], "symbolic_name": "rpm_V_packages" }, + { + "command": "python -m insights.tools.cat --no-header sap_hdb_version", + "pattern": [], + "symbolic_name": "sap_hdb_version" + }, { "command": "/usr/bin/mongo pulp_database --eval 'db.serverStatus().storageEngine'", "pattern": [], @@ -1393,6 +1403,16 @@ "pattern": [], "symbolic_name": "systool_b_scsi_v" }, + { + "command": "/usr/bin/testparm -s", + "pattern": [], + "symbolic_name": "testparm_s" + }, + { + "command": "/usr/bin/testparm -v -s", + "pattern": [], + "symbolic_name": "testparm_v_s" + }, { "command": "/usr/bin/find /usr/share -maxdepth 1 -name 'tomcat*' -exec /bin/grep -R -s 'VirtualDirContext' --include '*.xml' '{}' +", "pattern": [], @@ -1460,6 +1480,11 @@ } ], "files": [ + { + "file": "/etc/abrt/plugins/CCpp.conf", + "pattern": [], + "symbolic_name": "abrt_ccpp_conf" + }, { "file": "/root/.config/openshift/hosts", "pattern": [ @@ -2169,9 +2194,7 @@ { "file": "/etc/ipsec.conf", "pattern": [ - "config", - "include", - "plutodebug" + "include" ], "symbolic_name": "ipsec_conf" }, @@ -2313,6 +2336,7 @@ "Abort command issued", "Broken pipe", "Buffer I/O error on device", + "Cannot allocate memory", "Cannot assign requested address", "Cannot assign requested address: AH00072", "Connection amqps://subscription.rhn.redhat.com:5647 disconnected", @@ -2502,6 +2526,11 @@ "pattern": [], "symbolic_name": "netconsole" }, + { + "file": "/etc/NetworkManager/NetworkManager.conf", + "pattern": [], + "symbolic_name": "networkmanager_conf" + }, { "file": "/etc/NetworkManager/dispatcher.d/.*-dhclient", "pattern": [], @@ -2897,21 +2926,21 @@ { "file": "/etc/opt/rh/php73/php.ini", "pattern": [ - "post_max_size" + "[" ], "symbolic_name": "php_ini" }, { "file": "/etc/opt/rh/php72/php.ini", "pattern": [ - "post_max_size" + "[" ], "symbolic_name": "php_ini" }, { "file": "/etc/php.ini", "pattern": [ - "post_max_size" + "[" ], "symbolic_name": "php_ini" }, @@ -3484,20 +3513,16 @@ "file": "/var/log/vdsm/vdsm.log", "pattern": [ "(mailbox-spm) [storage.Misc.excCmd] /usr/bin/taskset --cpu-list", - "Bad volume specification", "Changed state to Down: 'NoneType' object has no attribute 'attrib'", "Changed state to Down: internal error: Attempted double use of PCI slot", "ERROR (mailbox-spm) [storage.MailBox.SpmMailMonitor]", - "INFO", "RPC call Host.setupNetworks failed", "Stopping connection", "The name org.fedoraproject.FirewallD1 was not provided by any .service files", "The vm start process failed", - "_report_inconsistency", "lastCheck", "libvirtError: internal error: failed to format device alias", - "looking for unfetched domain", - "storage.TaskManager.Task" + "looking for unfetched domain" ], "symbolic_name": "vdsm_log" }, @@ -4111,5 +4136,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2020-10-19T12:07:39.171878" + "version": "2020-10-22T11:31:29.359264" } From fcd1e7bde339f77edc9827c47c4a6bf29d5c7eed Mon Sep 17 00:00:00 2001 From: Stephen Date: Fri, 30 Oct 2020 12:06:44 -0400 Subject: [PATCH 223/892] Fix minor typo in registration log message (#2808) Without an extra space the log message looks wrong. `This machine has not been register.Use --register ...` Need a space after that `.` Signed-off-by: Stephen Adams --- insights/client/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/client/client.py b/insights/client/client.py index 59292283d..9d4c435f1 100644 --- a/insights/client/client.py +++ b/insights/client/client.py @@ -176,7 +176,7 @@ def _legacy_handle_registration(config, pconn): 're-register this machine.') else: # not yet registered - logger.info('This machine has not yet been registered.' + logger.info('This machine has not yet been registered. ' 'Use --register to register this machine.') return False From 746bcac029ca14fca874d0f61aa498ca7f64cc82 Mon Sep 17 00:00:00 2001 From: Stephen Date: Sun, 1 Nov 2020 20:14:41 -0500 Subject: [PATCH 224/892] Ensure hostname comparisons in SAP are accurate (#2809) There is a possibility that the FQDN of a system is used as the shortname in SAP configurations. We want to verify that the short hostname is used in these comparisons. Signed-off-by: Stephen Adams --- insights/combiners/sap.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/insights/combiners/sap.py b/insights/combiners/sap.py index 92ab6ec3d..05b15e2e9 100644 --- a/insights/combiners/sap.py +++ b/insights/combiners/sap.py @@ -91,7 +91,7 @@ def __init__(self, hostname, insts, lssap): self.all_instances = insts.instances for inst in insts.data: k = inst['InstanceName'] - self.local_instances.append(k) if hn == inst['Hostname'] else None + self.local_instances.append(k) if hn == inst['Hostname'].split('.')[0] else None data[k] = SAPInstances(k, inst['Hostname'], inst['SID'], @@ -105,7 +105,7 @@ def __init__(self, hostname, insts, lssap): t = k.rstrip('1234567890') self.all_instances.append(k) self._types.add(t) - self.local_instances.append(k) if hn == inst['SAPLOCALHOST'] else None + self.local_instances.append(k) if hn == inst['SAPLOCALHOST'].split('.')[0] else None data[k] = SAPInstances(k, inst['SAPLOCALHOST'], inst['SID'], From 249ca25ac61646e85417744c5d9630e6b8e05edb Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Wed, 4 Nov 2020 00:08:37 +0800 Subject: [PATCH 225/892] Support insights-cat sap_hdb_version temporary (#2810) Signed-off-by: Xiangce Liu --- insights/parsers/sap_hdb_version.py | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/insights/parsers/sap_hdb_version.py b/insights/parsers/sap_hdb_version.py index bd0aabb26..01b718cef 100644 --- a/insights/parsers/sap_hdb_version.py +++ b/insights/parsers/sap_hdb_version.py @@ -6,13 +6,13 @@ commands. """ -from .. import parser, CommandParser, LegacyItemAccess +from insights import parser, CommandParser from insights.parsers import SkipException from insights.specs import Specs @parser(Specs.sap_hdb_version) -class HDBVersion(CommandParser, LegacyItemAccess): +class HDBVersion(CommandParser, dict): """ Class for parsing the output of `HDB version` command. @@ -59,17 +59,25 @@ class HDBVersion(CommandParser, LegacyItemAccess): """ def parse_content(self, content): + _ignore_bad_lines = [ + 'HDB: Command not found', + 'standard error', + 'does not exist', + ] if len(content) <= 1: raise SkipException("Incorrect content.") - self.data = {} + data = {} self.sid = self.version = self.revision = None self.major = self.minor = self.patchlevel = None # get the "sid" from the file_name: "sudo_-iu_adm_HDB_version" if self.file_name and 'adm' in self.file_name: self.sid = [i for i in self.file_name.split('_') if i.endswith('adm')][0][:-3] - for line in content[1:]: + for line in content: + # Skip unexpected lines + if ':' not in line or any(i in line for i in _ignore_bad_lines): + continue key, val = [i.strip() for i in line.split(':', 1)] - self.data[key] = val + data[key] = val if key == 'version': self.version = val val_splits = val.split('.') @@ -81,3 +89,9 @@ def parse_content(self, content): self.patchlevel = val_splits[3] if not self.version: raise SkipException("Incorrect content.") + + self.update(data) + + @property + def data(self): + return self From a8e84ac9e847f77184710e6fe160c41e80a1470a Mon Sep 17 00:00:00 2001 From: Jakub Svoboda Date: Tue, 3 Nov 2020 18:02:06 +0000 Subject: [PATCH 226/892] Add a new parser and spec for postconf. (#2796) Signed-off-by: Jakub Svoboda --- docs/shared_parsers_catalog/postconf.rst | 3 ++ insights/parsers/postconf.py | 49 ++++++++++++++++++++++ insights/parsers/tests/test_postconf.py | 53 ++++++++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 6 files changed, 108 insertions(+) create mode 100644 docs/shared_parsers_catalog/postconf.rst create mode 100644 insights/parsers/postconf.py create mode 100644 insights/parsers/tests/test_postconf.py diff --git a/docs/shared_parsers_catalog/postconf.rst b/docs/shared_parsers_catalog/postconf.rst new file mode 100644 index 000000000..0607ad58d --- /dev/null +++ b/docs/shared_parsers_catalog/postconf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.postconf + :members: + :show-inheritance: \ No newline at end of file diff --git a/insights/parsers/postconf.py b/insights/parsers/postconf.py new file mode 100644 index 000000000..2bcc5a623 --- /dev/null +++ b/insights/parsers/postconf.py @@ -0,0 +1,49 @@ +""" +PostconfBuiltin - command ``postconf -C builtin`` +================================================= +""" + +from insights.core import CommandParser +from insights.core.plugins import parser +from insights.specs import Specs +from insights.parsers import SkipException + + +@parser(Specs.postconf_builtin) +class PostconfBuiltin(CommandParser, dict): + """ + Class for parsing the ``postconf -C builtin`` command. + Sample input:: + + smtpd_tls_loglevel = 0 + smtpd_tls_mandatory_ciphers = medium + smtpd_tls_mandatory_exclude_ciphers = + smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1 + + Examples: + >>> type(postconf) + + >>> postconf['smtpd_tls_loglevel'] == '0' + True + >>> postconf['smtpd_tls_mandatory_ciphers'] == 'medium' + True + >>> postconf['smtpd_tls_mandatory_exclude_ciphers'] == '' + True + >>> postconf['smtpd_tls_mandatory_protocols'] == '!SSLv2, !SSLv3, !TLSv1' + True + """ + + def parse_content(self, content): + if not content: + raise SkipException + + data = dict() + for line in content: + if '=' in line: + key, value = [i.strip() for i in line.split('=', 1)] + data[key] = value + + if not data: + raise SkipException + + self.update(data) diff --git a/insights/parsers/tests/test_postconf.py b/insights/parsers/tests/test_postconf.py new file mode 100644 index 000000000..f6fc3538d --- /dev/null +++ b/insights/parsers/tests/test_postconf.py @@ -0,0 +1,53 @@ +import pytest +import doctest + +from insights.core import ContentException +from insights.parsers import postconf, SkipException +from insights.parsers.postconf import PostconfBuiltin +from insights.tests import context_wrap + +V_OUT1 = """ +""".strip() + +V_OUT2 = """ +smtpd_tls_loglevel = 0 +smtpd_tls_mandatory_ciphers = medium +smtpd_tls_mandatory_exclude_ciphers = +smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1 +""".strip() + +V_OUT3 = """ +command not found +""".strip() + + +def test_PostconfBuiltin(): + with pytest.raises(SkipException): + PostconfBuiltin(context_wrap(V_OUT1)) + + with pytest.raises(ContentException): + PostconfBuiltin(context_wrap(V_OUT3)) + + p = PostconfBuiltin(context_wrap(V_OUT2)) + assert p['smtpd_tls_loglevel'] == '0' + assert p['smtpd_tls_mandatory_ciphers'] == 'medium' + assert p['smtpd_tls_mandatory_exclude_ciphers'] == '' + assert p['smtpd_tls_mandatory_protocols'] == '!SSLv2, !SSLv3, !TLSv1' + + +def test_empty(): + with pytest.raises(SkipException): + PostconfBuiltin(context_wrap("")) + + +def test_invalid(): + with pytest.raises(SkipException): + PostconfBuiltin(context_wrap("asdf")) + + +def test_doc_examples(): + env = { + 'postconf': PostconfBuiltin(context_wrap(V_OUT2)), + } + failed, total = doctest.testmod(postconf, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 533208350..b7e1e0ce4 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -474,6 +474,7 @@ class Specs(SpecSet): podman_image_inspect = RegistryPoint(multi_output=True) podman_list_containers = RegistryPoint() podman_list_images = RegistryPoint() + postconf_builtin = RegistryPoint(filterable=True) postgresql_conf = RegistryPoint() postgresql_log = RegistryPoint(multi_output=True, filterable=True) prev_uploader_log = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 04d893b54..dd8582ed4 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -533,6 +533,7 @@ def pcp_enabled(broker): pcs_status = simple_command("/usr/sbin/pcs status") php_ini = first_file(["/etc/opt/rh/php73/php.ini", "/etc/opt/rh/php72/php.ini", "/etc/php.ini"]) pluginconf_d = glob_file("/etc/yum/pluginconf.d/*.conf") + postconf_builtin = simple_command("/usr/sbin/postconf -C builtin") postgresql_conf = first_file([ "/var/lib/pgsql/data/postgresql.conf", "/opt/rh/postgresql92/root/var/lib/pgsql/data/postgresql.conf", diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index d51d93b31..52eaca33e 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -174,6 +174,7 @@ class InsightsArchiveSpecs(Specs): pcp_metrics = simple_file("insights_commands/curl_-s_http_..127.0.0.1_44322.metrics_--connect-timeout_5") pcs_quorum_status = simple_file("insights_commands/pcs_quorum_status") pcs_status = simple_file("insights_commands/pcs_status") + postconf_builtin = simple_file("insights_commands/postconf_-C_builtin") ps_alxwww = simple_file("insights_commands/ps_alxwww") ps_aux = simple_file("insights_commands/ps_aux") ps_auxcww = simple_file("insights_commands/ps_auxcww") From 333eb2b804c73e29cbad34e03ae07e67958d3cf0 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Wed, 4 Nov 2020 15:22:30 -0500 Subject: [PATCH 227/892] redact glob specs by symbolic name (#2804) * redact glob specs by symbolic name Signed-off-by: Jeremy Crafts --- insights/client/data_collector.py | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/insights/client/data_collector.py b/insights/client/data_collector.py index d40917cf7..6b1226f58 100644 --- a/insights/client/data_collector.py +++ b/insights/client/data_collector.py @@ -260,12 +260,14 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): rm_conf = {} logger.debug('Beginning to run collection spec...') + rm_commands = rm_conf.get('commands', []) + rm_files = rm_conf.get('files', []) + for c in conf['commands']: # remember hostname archive path if c.get('symbolic_name') == 'hostname': self.hostname_path = os.path.join( 'insights_commands', mangle.mangle_command(c['command'])) - rm_commands = rm_conf.get('commands', []) if c['command'] in rm_commands or c.get('symbolic_name') in rm_commands: logger.warn("WARNING: Skipping command %s", c['command']) elif self.mountpoint == "/" or c.get("image"): @@ -282,7 +284,6 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): 'output_size': cmd_spec.output_size } for f in conf['files']: - rm_files = rm_conf.get('files', []) if f['file'] in rm_files or f.get('symbolic_name') in rm_files: logger.warn("WARNING: Skipping file %s", f['file']) else: @@ -300,17 +301,21 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): } if 'globs' in conf: for g in conf['globs']: - glob_specs = self._parse_glob_spec(g) - for g in glob_specs: - if g['file'] in rm_conf.get('files', []): - logger.warn("WARNING: Skipping file %s", g['file']) - else: - glob_spec = InsightsFile(g, self.mountpoint) - self.archive.add_to_archive(glob_spec) - collection_stats[g['file']] = { - 'exec_time': glob_spec.exec_time, - 'output_size': glob_spec.output_size - } + if g.get('symbolic_name') in rm_files: + # ignore glob via symbolic name + logger.warn("WARNING: Skipping file %s", g['glob']) + else: + glob_specs = self._parse_glob_spec(g) + for g in glob_specs: + if g['file'] in rm_files: + logger.warn("WARNING: Skipping file %s", g['file']) + else: + glob_spec = InsightsFile(g, self.mountpoint) + self.archive.add_to_archive(glob_spec) + collection_stats[g['file']] = { + 'exec_time': glob_spec.exec_time, + 'output_size': glob_spec.output_size + } logger.debug('Spec collection finished.') self.redact(rm_conf) From 100b18a5ad73e8cba11c32f200da1c966b210d85 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 5 Nov 2020 15:01:23 -0500 Subject: [PATCH 228/892] update uploader_json_map.json Signed-off-by: Jeremy Crafts --- insights/client/uploader_json_map.json | 33 +++++++++++++++----------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index ca2818396..a4c912b0c 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -178,7 +178,6 @@ "TECH PREVIEW: NVMe over FC may not be fully supported.", "Uhhuh. NMI received for unknown reason", "VPD access disabled", - "WRITE SAME failed. Manually zeroing", "Warning: QLogic ISP3XXX Network Driver - this hardware has not undergone testing by Red Hat and might not be certified", "__cpufreq_add_dev", "blocked FC remote port time out: removing target and saving binding", @@ -1405,12 +1404,18 @@ }, { "command": "/usr/bin/testparm -s", - "pattern": [], + "pattern": [ + "Server role:", + "[" + ], "symbolic_name": "testparm_s" }, { "command": "/usr/bin/testparm -v -s", - "pattern": [], + "pattern": [ + "Server role:", + "[" + ], "symbolic_name": "testparm_v_s" }, { @@ -1482,7 +1487,9 @@ "files": [ { "file": "/etc/abrt/plugins/CCpp.conf", - "pattern": [], + "pattern": [ + "CreateCoreBacktrace" + ], "symbolic_name": "abrt_ccpp_conf" }, { @@ -1971,9 +1978,7 @@ { "file": "/var/log/ovirt-engine/server.log", "pattern": [ - "ERROR [org.jboss.as.controller.management-operation]", "INFO [org.wildfly.extension.undertow", - "Operation (\"deploy\") failed", "Registered web context: '/ovirt-engine/api' for server" ], "symbolic_name": "ovirt_engine_server_log" @@ -2350,7 +2355,6 @@ "Error deleting EBS Disk volume aws", "Error running DeviceResume dm_task_run failed", "Exception happened during processing of request from", - "Failed to bind socket: No such file or directory", "Failed to extend thin", "Hyper-V Host", "List /apis/image.openshift.io/v1/images", @@ -2377,14 +2381,12 @@ "Throttling request took", "TypeError: object of type 'NoneType' has no len()", "Virtualization daemon", - "WRITE SAME failed. Manually zeroing", "] trap divide error ", "_NET_ACTIVE_WINDOW", "as active slave; either", "belongs to docker.service", "callbacks suppressed", "canceled DHCP transaction, DHCP client pid", - "chardev: opening backend \"socket\" failed", "clearing Tx timestamp hang", "device-mapper: multipath: Failing path", "does not seem to be present, delaying initialization", @@ -2926,21 +2928,24 @@ { "file": "/etc/opt/rh/php73/php.ini", "pattern": [ - "[" + "[", + "post_max_size" ], "symbolic_name": "php_ini" }, { "file": "/etc/opt/rh/php72/php.ini", "pattern": [ - "[" + "[", + "post_max_size" ], "symbolic_name": "php_ini" }, { "file": "/etc/php.ini", "pattern": [ - "[" + "[", + "post_max_size" ], "symbolic_name": "php_ini" }, @@ -4136,5 +4141,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2020-10-22T11:31:29.359264" -} + "version": "2020-10-29T14:46:39.878759" +} \ No newline at end of file From 871e0c5062f5f374953b7f1c9df344bda87be9c6 Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Fri, 6 Nov 2020 14:31:04 +0800 Subject: [PATCH 229/892] Add combiner for rsyslog_conf (#2121) * Add combiner for rsyslog_conf Signed-off-by: jiazhang * Fix up class Signed-off-by: jiazhang * Change self.data Signed-off-by: jiazhang * Remove self.config_items Signed-off-by: jiazhang * Use list as up class Signed-off-by: jiazhang * Add multi_output=True Signed-off-by: jiazhang * Add docstring Signed-off-by: jiazhang * Update parser rsyslog (#2732) Signed-off-by: jiazhang --- insights/combiners/rsyslog_confs.py | 43 ++++++ .../combiners/tests/test_rsyslog_confs.py | 138 ++++++++++++++++++ insights/parsers/rsyslog_conf.py | 110 +++++++------- insights/parsers/tests/test_rsyslog_conf.py | 78 +++++----- insights/specs/__init__.py | 2 +- insights/specs/default.py | 2 +- 6 files changed, 280 insertions(+), 93 deletions(-) create mode 100644 insights/combiners/rsyslog_confs.py create mode 100644 insights/combiners/tests/test_rsyslog_confs.py diff --git a/insights/combiners/rsyslog_confs.py b/insights/combiners/rsyslog_confs.py new file mode 100644 index 000000000..ae6b32709 --- /dev/null +++ b/insights/combiners/rsyslog_confs.py @@ -0,0 +1,43 @@ +""" +RsyslogConfAll - files ``/etc/rsyslog.conf`` and ``/etc/rsyslog.d/*.conf`` +========================================================================== + +Combiner for accessing all the rsyslog comfiguration files. There may be +multiple rsyslog configuration, and the main configuration file is +``/etc/rsyslog.conf``. This combiner will not check same option in multi +files, user needs to check this situation in plugin if it is necessary. + +""" +from insights.core.plugins import combiner +from insights.parsers.rsyslog_conf import RsyslogConf + + +@combiner(RsyslogConf) +class RsyslogAllConf(dict): + """ + Combiner for accessing all the rsyslog configuration files. + + Examples: + >>> type(confs) + + >>> len(confs) + 2 + >>> confs['/etc/rsyslog.conf'][0] + '$ModLoad imuxsock' + """ + def __init__(self, confs): + super(RsyslogAllConf, self).__init__() + data = {} + + # Combine rsyslog configuration files into one dict. Key is file name, value is content of configuration file. + for conf in confs: + if conf.file_path == "/etc/rsyslog.conf": + # Check if there is include option, if not, only parse /etc/rsyslog.conf even + # /etc/rsyslog.d/*.conf exist. + if not any(["include(" in item or "$IncludeConfig" in item for item in conf]): + data.clear() + data[conf.file_path] = conf + break + data[conf.file_path] = conf + + self.update(data) diff --git a/insights/combiners/tests/test_rsyslog_confs.py b/insights/combiners/tests/test_rsyslog_confs.py new file mode 100644 index 000000000..f1e6f3110 --- /dev/null +++ b/insights/combiners/tests/test_rsyslog_confs.py @@ -0,0 +1,138 @@ +from insights.combiners.rsyslog_confs import RsyslogAllConf +from insights.combiners import rsyslog_confs +from insights.parsers.rsyslog_conf import RsyslogConf +from insights.tests import context_wrap +import doctest + + +RSYSLOG_CONF_MAIN_7 = r""" +$ModLoad imuxsock # provides support for local system logging (e.g. via logger command) +$ModLoad imjournal # provides access to the systemd journal +$WorkDirectory /var/lib/rsyslog +$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat +$IncludeConfig /etc/rsyslog.d/*.conf +$OmitLocalLogging on +$IMJournalStateFile imjournal.state +*.info;mail.none;authpriv.none;cron.none /var/log/messages +authpriv.* /var/log/secure +mail.* -/var/log/maillog +cron.* /var/log/cron +*.emerg :omusrmsg:* +uucp,news.crit /var/log/spooler +local7.* /var/log/boot.log +""".strip() + +RSYSLOG_CONF_MAIN_NO_INCLUDE_7 = r""" +$ModLoad imuxsock # provides support for local system logging (e.g. via logger command) +$ModLoad imjournal # provides access to the systemd journal +$WorkDirectory /var/lib/rsyslog +$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat +$OmitLocalLogging on +$IMJournalStateFile imjournal.state +*.info;mail.none;authpriv.none;cron.none /var/log/messages +authpriv.* /var/log/secure +mail.* -/var/log/maillog +cron.* /var/log/cron +*.emerg :omusrmsg:* +uucp,news.crit /var/log/spooler +local7.* /var/log/boot.log +""".strip() + +RSYSLOG_CONF_MAIN_CONF_DIR_7 = r""" +$WorkDirectory /tmp +*.info -/var/log/test.log +""".strip() + +RSYSLOG_CONF_MAIN_8 = r""" +module(load="imuxsock" # provides support for local system logging (e.g. via logger command) + SysSock.Use="off") # Turn off message reception via local log socket; +module(load="imjournal" # provides access to the systemd journal + StateFile="imjournal.state") # File to store the position in the journal +global(workDirectory="/var/lib/rsyslog") +module(load="builtin:omfile" Template="RSYSLOG_TraditionalFileFormat") +include(file="/etc/rsyslog.d/*.conf" mode="optional") +*.info;mail.none;authpriv.none;cron.none /var/log/messages +authpriv.* /var/log/secure +mail.* -/var/log/maillog +cron.* /var/log/cron +if $programname == 'prog1' then { + action(type="omfile" file="/var/log/prog1.log") + if $msg contains 'test' then + action(type="omfile" file="/var/log/prog1test.log") + else + action(type="omfile" file="/var/log/prog1notest.log") +} +""".strip() + +RSYSLOG_CONF_MAIN_NO_INCLUDE_8 = r""" +module(load="imuxsock" # provides support for local system logging (e.g. via logger command) + SysSock.Use="off") # Turn off message reception via local log socket; +module(load="imjournal" # provides access to the systemd journal + StateFile="imjournal.state") # File to store the position in the journal +global(workDirectory="/var/lib/rsyslog") +module(load="builtin:omfile" Template="RSYSLOG_TraditionalFileFormat") +*.info;mail.none;authpriv.none;cron.none /var/log/messages +authpriv.* /var/log/secure +mail.* -/var/log/maillog +cron.* /var/log/cron +if $programname == 'prog1' then { + action(type="omfile" file="/var/log/prog1.log") + if $msg contains 'test' then + action(type="omfile" file="/var/log/prog1test.log") + else + action(type="omfile" file="/var/log/prog1notest.log") +} +""".strip() + +RSYSLOG_CONF_MAIN_CONF_DIR_8 = r""" +*.info { + action( + type="omfile" + name="hehe" + file="/tmp/test") +} +""".strip() + + +def test_conf_7_include_dir(): + rsyslog1 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_7, path="/etc/rsyslog.conf")) + rsyslog2 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_CONF_DIR_7, path="/etc/rsyslog.d/test.conf")) + result = RsyslogAllConf([rsyslog1, rsyslog2]) + assert len(result) == 2 + assert result['/etc/rsyslog.conf'][0] == '$ModLoad imuxsock' + + +def test_conf_7_no_include_dir(): + rsyslog1 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_NO_INCLUDE_7, path="/etc/rsyslog.conf")) + rsyslog2 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_CONF_DIR_7, path="/etc/rsyslog.d/test.conf")) + result = RsyslogAllConf([rsyslog1, rsyslog2]) + assert len(result) == 1 + assert result['/etc/rsyslog.conf'][0] == '$ModLoad imuxsock' + + +def test_conf_8_include_dir(): + rsyslog1 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_8, path="/etc/rsyslog.conf")) + rsyslog2 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_CONF_DIR_8, path="/etc/rsyslog.d/test.conf")) + result = RsyslogAllConf([rsyslog1, rsyslog2]) + assert len(result) == 2 + assert result['/etc/rsyslog.d/test.conf'] == ['*.info { action( type="omfile" name="hehe" file="/tmp/test") }'] + + +def test_conf_8_no_include_dir(): + rsyslog1 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_NO_INCLUDE_8, path="/etc/rsyslog.conf")) + rsyslog2 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_CONF_DIR_8, path="/etc/rsyslog.d/test.conf")) + result = RsyslogAllConf([rsyslog1, rsyslog2]) + assert len(result) == 1 + assert result['/etc/rsyslog.conf'][2] == 'global(workDirectory="/var/lib/rsyslog")' + + +def test_rsyslog_confs_doc_examples(): + env = { + 'confs': RsyslogAllConf( + [ + RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_7, path='/etc/rsyslog.conf')), + RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_CONF_DIR_7, path='/etc/rsyslog.d/test.conf')) + ]) + } + failed, total = doctest.testmod(rsyslog_confs, globs=env) + assert failed == 0 diff --git a/insights/parsers/rsyslog_conf.py b/insights/parsers/rsyslog_conf.py index 09f17d497..d5bf96e6e 100644 --- a/insights/parsers/rsyslog_conf.py +++ b/insights/parsers/rsyslog_conf.py @@ -10,70 +10,78 @@ Due to high parsing complexity, this parser presents a simple line-based view of the file that meets the needs of the current rules. - -Example: - >>> content = ''' - ... :fromhost-ip, regex, "10.0.0.[0-9]" /tmp/my_syslog.log - ... $ModLoad imtcp - ... $InputTCPServerRun 10514" - ... '''.strip() - >>> from insights.tests import context_wrap - >>> rsl = RsyslogConf(context_wrap(content)) - >>> len(rsl) - 3 - >>> len(list(rsl)) - 3 - >>> any('imtcp' in n for n in rsl) - True """ from .. import Parser, parser, get_active_lines - -import re from insights.specs import Specs +from insights.core.filters import add_filter + + +add_filter(Specs.rsyslog_conf, ["{", "}", "(", ")"]) @parser(Specs.rsyslog_conf) -class RsyslogConf(Parser): +class RsyslogConf(Parser, list): """ - Parses `/etc/rsyslog.conf` info simple lines. + Parses `/etc/rsyslog.conf` content. Skips lines that begin with hash ("#") or are only whitespace. Attributes: data (list): List of lines in the file that don't start with '#' and aren't whitespace. - config_items(dict): Configuration items opportunistically found in the - configuration file, with their values as given. + + Example: + >>> type(rsysconf) + + >>> len(rsysconf) + 13 + >>> rsysconf[2] + 'authpriv.* /var/log/secure' """ + def __init__(self, *args, **kwargs): + super(RsyslogConf, self).__init__(*args, **kwargs) def parse_content(self, content): - self.data = get_active_lines(content) - - self.config_items = {} - # Config items are e.g. "$Word value #optional comment" - config_re = re.compile(r'^\s*\$(?P\S+)\s+(?P.*?)(?:\s+#.*)?$') - for line in self.data: - lstrip = line.strip() - match = config_re.match(lstrip) - if match: - self.config_items[match.group('name')] = match.group('value') - - def config_val(self, item, default=None): - """ - Return the given configuration item, or the default if not defined. - - Parameters: - item(str): The configuration item name - default: The default if the item is not found (defaults to None) - - Returns: - The related value in the `config_items` dictionary. - """ - return self.config_items.get(item, default) - - def __len__(self): - return len(self.data) - - def __iter__(self): - for d in self.data: - yield d + data = [] + brace_flag = False + parenthesis_flag = False + parenthesis_string = "" + brace_string = "" + + for line in get_active_lines(content): + l_strip = line.strip() + # Combine multi lines in brace into one line + if brace_flag: + brace_string = brace_string + " " + l_strip + if "}" in l_strip: + data.append(brace_string) + brace_string = "" + brace_flag = False + continue + else: + if "{" in l_strip: + if "}" in l_strip: + data.append(l_strip) + else: + brace_flag = True + brace_string = l_strip + continue + # Combine multi lines in parenthesis and not in brace into one line + if parenthesis_flag: + parenthesis_string = parenthesis_string + " " + l_strip + if ")" in l_strip: + data.append(parenthesis_string) + parenthesis_string = "" + parenthesis_flag = False + continue + else: + if "(" in l_strip: + if ")" in l_strip: + data.append(l_strip) + else: + parenthesis_flag = True + parenthesis_string = l_strip + continue + else: + data.append(l_strip) + self.extend(data) diff --git a/insights/parsers/tests/test_rsyslog_conf.py b/insights/parsers/tests/test_rsyslog_conf.py index 5aee79871..a7ff1b129 100644 --- a/insights/parsers/tests/test_rsyslog_conf.py +++ b/insights/parsers/tests/test_rsyslog_conf.py @@ -1,51 +1,49 @@ +import doctest from insights.tests import context_wrap +from insights.parsers import rsyslog_conf from insights.parsers.rsyslog_conf import RsyslogConf -RSYSLOG_CONF_0 = """ -:fromhost-ip, regex, "10.0.0.[0-9]" /tmp/my_syslog.log -$ModLoad imtcp -$InputTCPServerRun 10514 -$template SpiceTmpl,"%TIMESTAMP%.%TIMESTAMP:::date-subseconds% %syslogtag% %syslogseverity-text%:%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\\n" -$WorkDirectory /var/opt/rsyslog # where to place spool files -""".strip() -RSYSLOG_CONF_1 = r""" +RSYSLOG_CONF = r""" # Provides TCP syslog reception -#$ModLoad imtcp.so -#$InputTCPServerRun 514 :msg, regex, "\/vob\/.*\.[cpp|c|java]" /var/log/appMessages +*.info;mail.none;authpriv.none;cron.none /var/log/messages +authpriv.* /var/log/secure +$ModLoad imtcp +$InputTCPServerRun 10514 +module(load="imuxsock" # provides support for local system logging (e.g. via logger command) + SysSock.Use="off") # Turn off message reception via local log socket; +module(load="builtin:omfile" Template="RSYSLOG_TraditionalFileFormat") +include(file="/etc/rsyslog.d/*.conf" mode="optional") +global(workDirectory="/var/lib/rsyslog") +*.info { + action( + type="omfile" + name="hehe" + file="/tmp/testnimei") +} +if $programname == 'prog1' then { + action(type="omfile" file="/var/log/prog1.log") + if $msg contains 'test' then + action(type="omfile" file="/var/log/prog1test.log") + else + action(type="omfile" file="/var/log/prog1notest.log") +} +include(file="/etc/rsyslog.d/*.conf" mode="optional") +cron.* /var/log/cron """.strip() -def test_rsyslog_conf_0(): - ctx = context_wrap(RSYSLOG_CONF_0) - m = RsyslogConf(ctx) - d = list(m) - assert len(m) == 5 - assert len(d) == 5 - # Test configuration item detection in dictionary - assert hasattr(m, 'config_items') - assert isinstance(m.config_items, dict) - assert 'ModLoad' in m.config_items - assert m.config_items['ModLoad'] == 'imtcp' - assert m.config_items['InputTCPServerRun'] == '10514' - assert m.config_items['template'] == 'SpiceTmpl,"%TIMESTAMP%.%TIMESTAMP:::date-subseconds% %syslogtag% %syslogseverity-text%:%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\\n"' - assert 'ForwardSyslogHost' not in m.config_items - # configuration items should not include the comment. - assert 'WorkDirectory' in m.config_items - assert m.config_items['WorkDirectory'] == '/var/opt/rsyslog' - # Test configuration item accessor - assert hasattr(m, 'config_val') - assert m.config_val('ModLoad') == 'imtcp' - assert m.config_val('ForwardSyslogHost', 'syslog.example.com') == 'syslog.example.com' +def test_rsyslog_conf_1(): + rsyslogconf = RsyslogConf(context_wrap(RSYSLOG_CONF)) + assert len(rsyslogconf) == 13 + assert rsyslogconf[2] == "authpriv.* /var/log/secure" + assert rsyslogconf[9] == """*.info { action( type="omfile" name="hehe" file="/tmp/testnimei") }""" -def test_rsyslog_conf_1(): - ctx = context_wrap(RSYSLOG_CONF_1) - m = RsyslogConf(ctx) - d = list(m) - assert len(m) == 1 - assert len(d) == 1 - # Test that commented-out config items are not detected - assert 'ModLoad' not in m.config_items - assert 'InputTCPServerRun' not in m.config_items +def test_rsyslog_doc_examples(): + env = { + 'rsysconf': RsyslogConf(context_wrap(RSYSLOG_CONF)), + } + failed, total = doctest.testmod(rsyslog_conf, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index b7e1e0ce4..ec6d0f4d6 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -537,7 +537,7 @@ class Specs(SpecSet): root_crontab = RegistryPoint() route = RegistryPoint() rpm_V_packages = RegistryPoint() - rsyslog_conf = RegistryPoint(filterable=True) + rsyslog_conf = RegistryPoint(filterable=True, multi_output=True) samba = RegistryPoint(filterable=True) sap_dev_disp = RegistryPoint(multi_output=True, filterable=True) sap_dev_rd = RegistryPoint(multi_output=True, filterable=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index dd8582ed4..f386badcb 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -578,7 +578,7 @@ def pcp_enabled(broker): rhsm_releasever = simple_file('/var/lib/rhsm/cache/releasever.json') rndc_status = simple_command("/usr/sbin/rndc status") rpm_V_packages = simple_command("/bin/rpm -V coreutils procps procps-ng shadow-utils passwd sudo chrony", keep_rc=True) - rsyslog_conf = simple_file("/etc/rsyslog.conf") + rsyslog_conf = glob_file(["/etc/rsyslog.conf", "/etc/rsyslog.d/test.conf"]) samba = simple_file("/etc/samba/smb.conf") @datasource(Sap) From 5237e30b382078a5f92577304bd5a6a782855a69 Mon Sep 17 00:00:00 2001 From: Rohan Arora Date: Fri, 6 Nov 2020 12:01:33 +0530 Subject: [PATCH 230/892] Add RHEL8.3 kernel to uname (#2817) Signed-off-by: Rohan Arora --- insights/parsers/uname.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/insights/parsers/uname.py b/insights/parsers/uname.py index c917bf036..aace685f0 100644 --- a/insights/parsers/uname.py +++ b/insights/parsers/uname.py @@ -103,7 +103,8 @@ "3.10.0-1160": "7.9", "4.18.0-80": "8.0", "4.18.0-147": "8.1", - "4.18.0-193": "8.2" + "4.18.0-193": "8.2", + "4.18.0-240": "8.3", } release_to_kernel_map = dict((v, k) for k, v in rhel_release_map.items()) From bd889d303f4874703155e90fa3d1afef18e5954b Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Fri, 6 Nov 2020 15:02:05 +0800 Subject: [PATCH 231/892] Revert "Add combiner for rsyslog_conf (#2121)" (#2818) This reverts commit 871e0c5062f5f374953b7f1c9df344bda87be9c6. --- insights/combiners/rsyslog_confs.py | 43 ------ .../combiners/tests/test_rsyslog_confs.py | 138 ------------------ insights/parsers/rsyslog_conf.py | 110 +++++++------- insights/parsers/tests/test_rsyslog_conf.py | 78 +++++----- insights/specs/__init__.py | 2 +- insights/specs/default.py | 2 +- 6 files changed, 93 insertions(+), 280 deletions(-) delete mode 100644 insights/combiners/rsyslog_confs.py delete mode 100644 insights/combiners/tests/test_rsyslog_confs.py diff --git a/insights/combiners/rsyslog_confs.py b/insights/combiners/rsyslog_confs.py deleted file mode 100644 index ae6b32709..000000000 --- a/insights/combiners/rsyslog_confs.py +++ /dev/null @@ -1,43 +0,0 @@ -""" -RsyslogConfAll - files ``/etc/rsyslog.conf`` and ``/etc/rsyslog.d/*.conf`` -========================================================================== - -Combiner for accessing all the rsyslog comfiguration files. There may be -multiple rsyslog configuration, and the main configuration file is -``/etc/rsyslog.conf``. This combiner will not check same option in multi -files, user needs to check this situation in plugin if it is necessary. - -""" -from insights.core.plugins import combiner -from insights.parsers.rsyslog_conf import RsyslogConf - - -@combiner(RsyslogConf) -class RsyslogAllConf(dict): - """ - Combiner for accessing all the rsyslog configuration files. - - Examples: - >>> type(confs) - - >>> len(confs) - 2 - >>> confs['/etc/rsyslog.conf'][0] - '$ModLoad imuxsock' - """ - def __init__(self, confs): - super(RsyslogAllConf, self).__init__() - data = {} - - # Combine rsyslog configuration files into one dict. Key is file name, value is content of configuration file. - for conf in confs: - if conf.file_path == "/etc/rsyslog.conf": - # Check if there is include option, if not, only parse /etc/rsyslog.conf even - # /etc/rsyslog.d/*.conf exist. - if not any(["include(" in item or "$IncludeConfig" in item for item in conf]): - data.clear() - data[conf.file_path] = conf - break - data[conf.file_path] = conf - - self.update(data) diff --git a/insights/combiners/tests/test_rsyslog_confs.py b/insights/combiners/tests/test_rsyslog_confs.py deleted file mode 100644 index f1e6f3110..000000000 --- a/insights/combiners/tests/test_rsyslog_confs.py +++ /dev/null @@ -1,138 +0,0 @@ -from insights.combiners.rsyslog_confs import RsyslogAllConf -from insights.combiners import rsyslog_confs -from insights.parsers.rsyslog_conf import RsyslogConf -from insights.tests import context_wrap -import doctest - - -RSYSLOG_CONF_MAIN_7 = r""" -$ModLoad imuxsock # provides support for local system logging (e.g. via logger command) -$ModLoad imjournal # provides access to the systemd journal -$WorkDirectory /var/lib/rsyslog -$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat -$IncludeConfig /etc/rsyslog.d/*.conf -$OmitLocalLogging on -$IMJournalStateFile imjournal.state -*.info;mail.none;authpriv.none;cron.none /var/log/messages -authpriv.* /var/log/secure -mail.* -/var/log/maillog -cron.* /var/log/cron -*.emerg :omusrmsg:* -uucp,news.crit /var/log/spooler -local7.* /var/log/boot.log -""".strip() - -RSYSLOG_CONF_MAIN_NO_INCLUDE_7 = r""" -$ModLoad imuxsock # provides support for local system logging (e.g. via logger command) -$ModLoad imjournal # provides access to the systemd journal -$WorkDirectory /var/lib/rsyslog -$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat -$OmitLocalLogging on -$IMJournalStateFile imjournal.state -*.info;mail.none;authpriv.none;cron.none /var/log/messages -authpriv.* /var/log/secure -mail.* -/var/log/maillog -cron.* /var/log/cron -*.emerg :omusrmsg:* -uucp,news.crit /var/log/spooler -local7.* /var/log/boot.log -""".strip() - -RSYSLOG_CONF_MAIN_CONF_DIR_7 = r""" -$WorkDirectory /tmp -*.info -/var/log/test.log -""".strip() - -RSYSLOG_CONF_MAIN_8 = r""" -module(load="imuxsock" # provides support for local system logging (e.g. via logger command) - SysSock.Use="off") # Turn off message reception via local log socket; -module(load="imjournal" # provides access to the systemd journal - StateFile="imjournal.state") # File to store the position in the journal -global(workDirectory="/var/lib/rsyslog") -module(load="builtin:omfile" Template="RSYSLOG_TraditionalFileFormat") -include(file="/etc/rsyslog.d/*.conf" mode="optional") -*.info;mail.none;authpriv.none;cron.none /var/log/messages -authpriv.* /var/log/secure -mail.* -/var/log/maillog -cron.* /var/log/cron -if $programname == 'prog1' then { - action(type="omfile" file="/var/log/prog1.log") - if $msg contains 'test' then - action(type="omfile" file="/var/log/prog1test.log") - else - action(type="omfile" file="/var/log/prog1notest.log") -} -""".strip() - -RSYSLOG_CONF_MAIN_NO_INCLUDE_8 = r""" -module(load="imuxsock" # provides support for local system logging (e.g. via logger command) - SysSock.Use="off") # Turn off message reception via local log socket; -module(load="imjournal" # provides access to the systemd journal - StateFile="imjournal.state") # File to store the position in the journal -global(workDirectory="/var/lib/rsyslog") -module(load="builtin:omfile" Template="RSYSLOG_TraditionalFileFormat") -*.info;mail.none;authpriv.none;cron.none /var/log/messages -authpriv.* /var/log/secure -mail.* -/var/log/maillog -cron.* /var/log/cron -if $programname == 'prog1' then { - action(type="omfile" file="/var/log/prog1.log") - if $msg contains 'test' then - action(type="omfile" file="/var/log/prog1test.log") - else - action(type="omfile" file="/var/log/prog1notest.log") -} -""".strip() - -RSYSLOG_CONF_MAIN_CONF_DIR_8 = r""" -*.info { - action( - type="omfile" - name="hehe" - file="/tmp/test") -} -""".strip() - - -def test_conf_7_include_dir(): - rsyslog1 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_7, path="/etc/rsyslog.conf")) - rsyslog2 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_CONF_DIR_7, path="/etc/rsyslog.d/test.conf")) - result = RsyslogAllConf([rsyslog1, rsyslog2]) - assert len(result) == 2 - assert result['/etc/rsyslog.conf'][0] == '$ModLoad imuxsock' - - -def test_conf_7_no_include_dir(): - rsyslog1 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_NO_INCLUDE_7, path="/etc/rsyslog.conf")) - rsyslog2 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_CONF_DIR_7, path="/etc/rsyslog.d/test.conf")) - result = RsyslogAllConf([rsyslog1, rsyslog2]) - assert len(result) == 1 - assert result['/etc/rsyslog.conf'][0] == '$ModLoad imuxsock' - - -def test_conf_8_include_dir(): - rsyslog1 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_8, path="/etc/rsyslog.conf")) - rsyslog2 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_CONF_DIR_8, path="/etc/rsyslog.d/test.conf")) - result = RsyslogAllConf([rsyslog1, rsyslog2]) - assert len(result) == 2 - assert result['/etc/rsyslog.d/test.conf'] == ['*.info { action( type="omfile" name="hehe" file="/tmp/test") }'] - - -def test_conf_8_no_include_dir(): - rsyslog1 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_NO_INCLUDE_8, path="/etc/rsyslog.conf")) - rsyslog2 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_CONF_DIR_8, path="/etc/rsyslog.d/test.conf")) - result = RsyslogAllConf([rsyslog1, rsyslog2]) - assert len(result) == 1 - assert result['/etc/rsyslog.conf'][2] == 'global(workDirectory="/var/lib/rsyslog")' - - -def test_rsyslog_confs_doc_examples(): - env = { - 'confs': RsyslogAllConf( - [ - RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_7, path='/etc/rsyslog.conf')), - RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_CONF_DIR_7, path='/etc/rsyslog.d/test.conf')) - ]) - } - failed, total = doctest.testmod(rsyslog_confs, globs=env) - assert failed == 0 diff --git a/insights/parsers/rsyslog_conf.py b/insights/parsers/rsyslog_conf.py index d5bf96e6e..09f17d497 100644 --- a/insights/parsers/rsyslog_conf.py +++ b/insights/parsers/rsyslog_conf.py @@ -10,78 +10,70 @@ Due to high parsing complexity, this parser presents a simple line-based view of the file that meets the needs of the current rules. + +Example: + >>> content = ''' + ... :fromhost-ip, regex, "10.0.0.[0-9]" /tmp/my_syslog.log + ... $ModLoad imtcp + ... $InputTCPServerRun 10514" + ... '''.strip() + >>> from insights.tests import context_wrap + >>> rsl = RsyslogConf(context_wrap(content)) + >>> len(rsl) + 3 + >>> len(list(rsl)) + 3 + >>> any('imtcp' in n for n in rsl) + True """ from .. import Parser, parser, get_active_lines -from insights.specs import Specs -from insights.core.filters import add_filter - -add_filter(Specs.rsyslog_conf, ["{", "}", "(", ")"]) +import re +from insights.specs import Specs @parser(Specs.rsyslog_conf) -class RsyslogConf(Parser, list): +class RsyslogConf(Parser): """ - Parses `/etc/rsyslog.conf` content. + Parses `/etc/rsyslog.conf` info simple lines. Skips lines that begin with hash ("#") or are only whitespace. Attributes: data (list): List of lines in the file that don't start with '#' and aren't whitespace. - - Example: - >>> type(rsysconf) - - >>> len(rsysconf) - 13 - >>> rsysconf[2] - 'authpriv.* /var/log/secure' + config_items(dict): Configuration items opportunistically found in the + configuration file, with their values as given. """ - def __init__(self, *args, **kwargs): - super(RsyslogConf, self).__init__(*args, **kwargs) def parse_content(self, content): - data = [] - brace_flag = False - parenthesis_flag = False - parenthesis_string = "" - brace_string = "" - - for line in get_active_lines(content): - l_strip = line.strip() - # Combine multi lines in brace into one line - if brace_flag: - brace_string = brace_string + " " + l_strip - if "}" in l_strip: - data.append(brace_string) - brace_string = "" - brace_flag = False - continue - else: - if "{" in l_strip: - if "}" in l_strip: - data.append(l_strip) - else: - brace_flag = True - brace_string = l_strip - continue - # Combine multi lines in parenthesis and not in brace into one line - if parenthesis_flag: - parenthesis_string = parenthesis_string + " " + l_strip - if ")" in l_strip: - data.append(parenthesis_string) - parenthesis_string = "" - parenthesis_flag = False - continue - else: - if "(" in l_strip: - if ")" in l_strip: - data.append(l_strip) - else: - parenthesis_flag = True - parenthesis_string = l_strip - continue - else: - data.append(l_strip) - self.extend(data) + self.data = get_active_lines(content) + + self.config_items = {} + # Config items are e.g. "$Word value #optional comment" + config_re = re.compile(r'^\s*\$(?P\S+)\s+(?P.*?)(?:\s+#.*)?$') + for line in self.data: + lstrip = line.strip() + match = config_re.match(lstrip) + if match: + self.config_items[match.group('name')] = match.group('value') + + def config_val(self, item, default=None): + """ + Return the given configuration item, or the default if not defined. + + Parameters: + item(str): The configuration item name + default: The default if the item is not found (defaults to None) + + Returns: + The related value in the `config_items` dictionary. + """ + return self.config_items.get(item, default) + + def __len__(self): + return len(self.data) + + def __iter__(self): + for d in self.data: + yield d diff --git a/insights/parsers/tests/test_rsyslog_conf.py b/insights/parsers/tests/test_rsyslog_conf.py index a7ff1b129..5aee79871 100644 --- a/insights/parsers/tests/test_rsyslog_conf.py +++ b/insights/parsers/tests/test_rsyslog_conf.py @@ -1,49 +1,51 @@ -import doctest from insights.tests import context_wrap -from insights.parsers import rsyslog_conf from insights.parsers.rsyslog_conf import RsyslogConf +RSYSLOG_CONF_0 = """ +:fromhost-ip, regex, "10.0.0.[0-9]" /tmp/my_syslog.log +$ModLoad imtcp +$InputTCPServerRun 10514 +$template SpiceTmpl,"%TIMESTAMP%.%TIMESTAMP:::date-subseconds% %syslogtag% %syslogseverity-text%:%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\\n" +$WorkDirectory /var/opt/rsyslog # where to place spool files +""".strip() -RSYSLOG_CONF = r""" +RSYSLOG_CONF_1 = r""" # Provides TCP syslog reception +#$ModLoad imtcp.so +#$InputTCPServerRun 514 :msg, regex, "\/vob\/.*\.[cpp|c|java]" /var/log/appMessages -*.info;mail.none;authpriv.none;cron.none /var/log/messages -authpriv.* /var/log/secure -$ModLoad imtcp -$InputTCPServerRun 10514 -module(load="imuxsock" # provides support for local system logging (e.g. via logger command) - SysSock.Use="off") # Turn off message reception via local log socket; -module(load="builtin:omfile" Template="RSYSLOG_TraditionalFileFormat") -include(file="/etc/rsyslog.d/*.conf" mode="optional") -global(workDirectory="/var/lib/rsyslog") -*.info { - action( - type="omfile" - name="hehe" - file="/tmp/testnimei") -} -if $programname == 'prog1' then { - action(type="omfile" file="/var/log/prog1.log") - if $msg contains 'test' then - action(type="omfile" file="/var/log/prog1test.log") - else - action(type="omfile" file="/var/log/prog1notest.log") -} -include(file="/etc/rsyslog.d/*.conf" mode="optional") -cron.* /var/log/cron """.strip() -def test_rsyslog_conf_1(): - rsyslogconf = RsyslogConf(context_wrap(RSYSLOG_CONF)) - assert len(rsyslogconf) == 13 - assert rsyslogconf[2] == "authpriv.* /var/log/secure" - assert rsyslogconf[9] == """*.info { action( type="omfile" name="hehe" file="/tmp/testnimei") }""" +def test_rsyslog_conf_0(): + ctx = context_wrap(RSYSLOG_CONF_0) + m = RsyslogConf(ctx) + d = list(m) + assert len(m) == 5 + assert len(d) == 5 + # Test configuration item detection in dictionary + assert hasattr(m, 'config_items') + assert isinstance(m.config_items, dict) + assert 'ModLoad' in m.config_items + assert m.config_items['ModLoad'] == 'imtcp' + assert m.config_items['InputTCPServerRun'] == '10514' + assert m.config_items['template'] == 'SpiceTmpl,"%TIMESTAMP%.%TIMESTAMP:::date-subseconds% %syslogtag% %syslogseverity-text%:%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\\n"' + assert 'ForwardSyslogHost' not in m.config_items + # configuration items should not include the comment. + assert 'WorkDirectory' in m.config_items + assert m.config_items['WorkDirectory'] == '/var/opt/rsyslog' + # Test configuration item accessor + assert hasattr(m, 'config_val') + assert m.config_val('ModLoad') == 'imtcp' + assert m.config_val('ForwardSyslogHost', 'syslog.example.com') == 'syslog.example.com' -def test_rsyslog_doc_examples(): - env = { - 'rsysconf': RsyslogConf(context_wrap(RSYSLOG_CONF)), - } - failed, total = doctest.testmod(rsyslog_conf, globs=env) - assert failed == 0 +def test_rsyslog_conf_1(): + ctx = context_wrap(RSYSLOG_CONF_1) + m = RsyslogConf(ctx) + d = list(m) + assert len(m) == 1 + assert len(d) == 1 + # Test that commented-out config items are not detected + assert 'ModLoad' not in m.config_items + assert 'InputTCPServerRun' not in m.config_items diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index ec6d0f4d6..b7e1e0ce4 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -537,7 +537,7 @@ class Specs(SpecSet): root_crontab = RegistryPoint() route = RegistryPoint() rpm_V_packages = RegistryPoint() - rsyslog_conf = RegistryPoint(filterable=True, multi_output=True) + rsyslog_conf = RegistryPoint(filterable=True) samba = RegistryPoint(filterable=True) sap_dev_disp = RegistryPoint(multi_output=True, filterable=True) sap_dev_rd = RegistryPoint(multi_output=True, filterable=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index f386badcb..dd8582ed4 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -578,7 +578,7 @@ def pcp_enabled(broker): rhsm_releasever = simple_file('/var/lib/rhsm/cache/releasever.json') rndc_status = simple_command("/usr/sbin/rndc status") rpm_V_packages = simple_command("/bin/rpm -V coreutils procps procps-ng shadow-utils passwd sudo chrony", keep_rc=True) - rsyslog_conf = glob_file(["/etc/rsyslog.conf", "/etc/rsyslog.d/test.conf"]) + rsyslog_conf = simple_file("/etc/rsyslog.conf") samba = simple_file("/etc/samba/smb.conf") @datasource(Sap) From 2388cc6f7c32838a449c7466cf015287f64b1e9d Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Mon, 9 Nov 2020 10:03:34 +0800 Subject: [PATCH 232/892] Revert "Revert "Add combiner for rsyslog_conf (#2121)" (#2818)" (#2819) This reverts commit bd889d303f4874703155e90fa3d1afef18e5954b. --- insights/combiners/rsyslog_confs.py | 43 ++++++ .../combiners/tests/test_rsyslog_confs.py | 138 ++++++++++++++++++ insights/parsers/rsyslog_conf.py | 110 +++++++------- insights/parsers/tests/test_rsyslog_conf.py | 78 +++++----- insights/specs/__init__.py | 2 +- insights/specs/default.py | 2 +- 6 files changed, 280 insertions(+), 93 deletions(-) create mode 100644 insights/combiners/rsyslog_confs.py create mode 100644 insights/combiners/tests/test_rsyslog_confs.py diff --git a/insights/combiners/rsyslog_confs.py b/insights/combiners/rsyslog_confs.py new file mode 100644 index 000000000..ae6b32709 --- /dev/null +++ b/insights/combiners/rsyslog_confs.py @@ -0,0 +1,43 @@ +""" +RsyslogConfAll - files ``/etc/rsyslog.conf`` and ``/etc/rsyslog.d/*.conf`` +========================================================================== + +Combiner for accessing all the rsyslog comfiguration files. There may be +multiple rsyslog configuration, and the main configuration file is +``/etc/rsyslog.conf``. This combiner will not check same option in multi +files, user needs to check this situation in plugin if it is necessary. + +""" +from insights.core.plugins import combiner +from insights.parsers.rsyslog_conf import RsyslogConf + + +@combiner(RsyslogConf) +class RsyslogAllConf(dict): + """ + Combiner for accessing all the rsyslog configuration files. + + Examples: + >>> type(confs) + + >>> len(confs) + 2 + >>> confs['/etc/rsyslog.conf'][0] + '$ModLoad imuxsock' + """ + def __init__(self, confs): + super(RsyslogAllConf, self).__init__() + data = {} + + # Combine rsyslog configuration files into one dict. Key is file name, value is content of configuration file. + for conf in confs: + if conf.file_path == "/etc/rsyslog.conf": + # Check if there is include option, if not, only parse /etc/rsyslog.conf even + # /etc/rsyslog.d/*.conf exist. + if not any(["include(" in item or "$IncludeConfig" in item for item in conf]): + data.clear() + data[conf.file_path] = conf + break + data[conf.file_path] = conf + + self.update(data) diff --git a/insights/combiners/tests/test_rsyslog_confs.py b/insights/combiners/tests/test_rsyslog_confs.py new file mode 100644 index 000000000..f1e6f3110 --- /dev/null +++ b/insights/combiners/tests/test_rsyslog_confs.py @@ -0,0 +1,138 @@ +from insights.combiners.rsyslog_confs import RsyslogAllConf +from insights.combiners import rsyslog_confs +from insights.parsers.rsyslog_conf import RsyslogConf +from insights.tests import context_wrap +import doctest + + +RSYSLOG_CONF_MAIN_7 = r""" +$ModLoad imuxsock # provides support for local system logging (e.g. via logger command) +$ModLoad imjournal # provides access to the systemd journal +$WorkDirectory /var/lib/rsyslog +$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat +$IncludeConfig /etc/rsyslog.d/*.conf +$OmitLocalLogging on +$IMJournalStateFile imjournal.state +*.info;mail.none;authpriv.none;cron.none /var/log/messages +authpriv.* /var/log/secure +mail.* -/var/log/maillog +cron.* /var/log/cron +*.emerg :omusrmsg:* +uucp,news.crit /var/log/spooler +local7.* /var/log/boot.log +""".strip() + +RSYSLOG_CONF_MAIN_NO_INCLUDE_7 = r""" +$ModLoad imuxsock # provides support for local system logging (e.g. via logger command) +$ModLoad imjournal # provides access to the systemd journal +$WorkDirectory /var/lib/rsyslog +$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat +$OmitLocalLogging on +$IMJournalStateFile imjournal.state +*.info;mail.none;authpriv.none;cron.none /var/log/messages +authpriv.* /var/log/secure +mail.* -/var/log/maillog +cron.* /var/log/cron +*.emerg :omusrmsg:* +uucp,news.crit /var/log/spooler +local7.* /var/log/boot.log +""".strip() + +RSYSLOG_CONF_MAIN_CONF_DIR_7 = r""" +$WorkDirectory /tmp +*.info -/var/log/test.log +""".strip() + +RSYSLOG_CONF_MAIN_8 = r""" +module(load="imuxsock" # provides support for local system logging (e.g. via logger command) + SysSock.Use="off") # Turn off message reception via local log socket; +module(load="imjournal" # provides access to the systemd journal + StateFile="imjournal.state") # File to store the position in the journal +global(workDirectory="/var/lib/rsyslog") +module(load="builtin:omfile" Template="RSYSLOG_TraditionalFileFormat") +include(file="/etc/rsyslog.d/*.conf" mode="optional") +*.info;mail.none;authpriv.none;cron.none /var/log/messages +authpriv.* /var/log/secure +mail.* -/var/log/maillog +cron.* /var/log/cron +if $programname == 'prog1' then { + action(type="omfile" file="/var/log/prog1.log") + if $msg contains 'test' then + action(type="omfile" file="/var/log/prog1test.log") + else + action(type="omfile" file="/var/log/prog1notest.log") +} +""".strip() + +RSYSLOG_CONF_MAIN_NO_INCLUDE_8 = r""" +module(load="imuxsock" # provides support for local system logging (e.g. via logger command) + SysSock.Use="off") # Turn off message reception via local log socket; +module(load="imjournal" # provides access to the systemd journal + StateFile="imjournal.state") # File to store the position in the journal +global(workDirectory="/var/lib/rsyslog") +module(load="builtin:omfile" Template="RSYSLOG_TraditionalFileFormat") +*.info;mail.none;authpriv.none;cron.none /var/log/messages +authpriv.* /var/log/secure +mail.* -/var/log/maillog +cron.* /var/log/cron +if $programname == 'prog1' then { + action(type="omfile" file="/var/log/prog1.log") + if $msg contains 'test' then + action(type="omfile" file="/var/log/prog1test.log") + else + action(type="omfile" file="/var/log/prog1notest.log") +} +""".strip() + +RSYSLOG_CONF_MAIN_CONF_DIR_8 = r""" +*.info { + action( + type="omfile" + name="hehe" + file="/tmp/test") +} +""".strip() + + +def test_conf_7_include_dir(): + rsyslog1 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_7, path="/etc/rsyslog.conf")) + rsyslog2 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_CONF_DIR_7, path="/etc/rsyslog.d/test.conf")) + result = RsyslogAllConf([rsyslog1, rsyslog2]) + assert len(result) == 2 + assert result['/etc/rsyslog.conf'][0] == '$ModLoad imuxsock' + + +def test_conf_7_no_include_dir(): + rsyslog1 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_NO_INCLUDE_7, path="/etc/rsyslog.conf")) + rsyslog2 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_CONF_DIR_7, path="/etc/rsyslog.d/test.conf")) + result = RsyslogAllConf([rsyslog1, rsyslog2]) + assert len(result) == 1 + assert result['/etc/rsyslog.conf'][0] == '$ModLoad imuxsock' + + +def test_conf_8_include_dir(): + rsyslog1 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_8, path="/etc/rsyslog.conf")) + rsyslog2 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_CONF_DIR_8, path="/etc/rsyslog.d/test.conf")) + result = RsyslogAllConf([rsyslog1, rsyslog2]) + assert len(result) == 2 + assert result['/etc/rsyslog.d/test.conf'] == ['*.info { action( type="omfile" name="hehe" file="/tmp/test") }'] + + +def test_conf_8_no_include_dir(): + rsyslog1 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_NO_INCLUDE_8, path="/etc/rsyslog.conf")) + rsyslog2 = RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_CONF_DIR_8, path="/etc/rsyslog.d/test.conf")) + result = RsyslogAllConf([rsyslog1, rsyslog2]) + assert len(result) == 1 + assert result['/etc/rsyslog.conf'][2] == 'global(workDirectory="/var/lib/rsyslog")' + + +def test_rsyslog_confs_doc_examples(): + env = { + 'confs': RsyslogAllConf( + [ + RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_7, path='/etc/rsyslog.conf')), + RsyslogConf(context_wrap(RSYSLOG_CONF_MAIN_CONF_DIR_7, path='/etc/rsyslog.d/test.conf')) + ]) + } + failed, total = doctest.testmod(rsyslog_confs, globs=env) + assert failed == 0 diff --git a/insights/parsers/rsyslog_conf.py b/insights/parsers/rsyslog_conf.py index 09f17d497..d5bf96e6e 100644 --- a/insights/parsers/rsyslog_conf.py +++ b/insights/parsers/rsyslog_conf.py @@ -10,70 +10,78 @@ Due to high parsing complexity, this parser presents a simple line-based view of the file that meets the needs of the current rules. - -Example: - >>> content = ''' - ... :fromhost-ip, regex, "10.0.0.[0-9]" /tmp/my_syslog.log - ... $ModLoad imtcp - ... $InputTCPServerRun 10514" - ... '''.strip() - >>> from insights.tests import context_wrap - >>> rsl = RsyslogConf(context_wrap(content)) - >>> len(rsl) - 3 - >>> len(list(rsl)) - 3 - >>> any('imtcp' in n for n in rsl) - True """ from .. import Parser, parser, get_active_lines - -import re from insights.specs import Specs +from insights.core.filters import add_filter + + +add_filter(Specs.rsyslog_conf, ["{", "}", "(", ")"]) @parser(Specs.rsyslog_conf) -class RsyslogConf(Parser): +class RsyslogConf(Parser, list): """ - Parses `/etc/rsyslog.conf` info simple lines. + Parses `/etc/rsyslog.conf` content. Skips lines that begin with hash ("#") or are only whitespace. Attributes: data (list): List of lines in the file that don't start with '#' and aren't whitespace. - config_items(dict): Configuration items opportunistically found in the - configuration file, with their values as given. + + Example: + >>> type(rsysconf) + + >>> len(rsysconf) + 13 + >>> rsysconf[2] + 'authpriv.* /var/log/secure' """ + def __init__(self, *args, **kwargs): + super(RsyslogConf, self).__init__(*args, **kwargs) def parse_content(self, content): - self.data = get_active_lines(content) - - self.config_items = {} - # Config items are e.g. "$Word value #optional comment" - config_re = re.compile(r'^\s*\$(?P\S+)\s+(?P.*?)(?:\s+#.*)?$') - for line in self.data: - lstrip = line.strip() - match = config_re.match(lstrip) - if match: - self.config_items[match.group('name')] = match.group('value') - - def config_val(self, item, default=None): - """ - Return the given configuration item, or the default if not defined. - - Parameters: - item(str): The configuration item name - default: The default if the item is not found (defaults to None) - - Returns: - The related value in the `config_items` dictionary. - """ - return self.config_items.get(item, default) - - def __len__(self): - return len(self.data) - - def __iter__(self): - for d in self.data: - yield d + data = [] + brace_flag = False + parenthesis_flag = False + parenthesis_string = "" + brace_string = "" + + for line in get_active_lines(content): + l_strip = line.strip() + # Combine multi lines in brace into one line + if brace_flag: + brace_string = brace_string + " " + l_strip + if "}" in l_strip: + data.append(brace_string) + brace_string = "" + brace_flag = False + continue + else: + if "{" in l_strip: + if "}" in l_strip: + data.append(l_strip) + else: + brace_flag = True + brace_string = l_strip + continue + # Combine multi lines in parenthesis and not in brace into one line + if parenthesis_flag: + parenthesis_string = parenthesis_string + " " + l_strip + if ")" in l_strip: + data.append(parenthesis_string) + parenthesis_string = "" + parenthesis_flag = False + continue + else: + if "(" in l_strip: + if ")" in l_strip: + data.append(l_strip) + else: + parenthesis_flag = True + parenthesis_string = l_strip + continue + else: + data.append(l_strip) + self.extend(data) diff --git a/insights/parsers/tests/test_rsyslog_conf.py b/insights/parsers/tests/test_rsyslog_conf.py index 5aee79871..a7ff1b129 100644 --- a/insights/parsers/tests/test_rsyslog_conf.py +++ b/insights/parsers/tests/test_rsyslog_conf.py @@ -1,51 +1,49 @@ +import doctest from insights.tests import context_wrap +from insights.parsers import rsyslog_conf from insights.parsers.rsyslog_conf import RsyslogConf -RSYSLOG_CONF_0 = """ -:fromhost-ip, regex, "10.0.0.[0-9]" /tmp/my_syslog.log -$ModLoad imtcp -$InputTCPServerRun 10514 -$template SpiceTmpl,"%TIMESTAMP%.%TIMESTAMP:::date-subseconds% %syslogtag% %syslogseverity-text%:%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\\n" -$WorkDirectory /var/opt/rsyslog # where to place spool files -""".strip() -RSYSLOG_CONF_1 = r""" +RSYSLOG_CONF = r""" # Provides TCP syslog reception -#$ModLoad imtcp.so -#$InputTCPServerRun 514 :msg, regex, "\/vob\/.*\.[cpp|c|java]" /var/log/appMessages +*.info;mail.none;authpriv.none;cron.none /var/log/messages +authpriv.* /var/log/secure +$ModLoad imtcp +$InputTCPServerRun 10514 +module(load="imuxsock" # provides support for local system logging (e.g. via logger command) + SysSock.Use="off") # Turn off message reception via local log socket; +module(load="builtin:omfile" Template="RSYSLOG_TraditionalFileFormat") +include(file="/etc/rsyslog.d/*.conf" mode="optional") +global(workDirectory="/var/lib/rsyslog") +*.info { + action( + type="omfile" + name="hehe" + file="/tmp/testnimei") +} +if $programname == 'prog1' then { + action(type="omfile" file="/var/log/prog1.log") + if $msg contains 'test' then + action(type="omfile" file="/var/log/prog1test.log") + else + action(type="omfile" file="/var/log/prog1notest.log") +} +include(file="/etc/rsyslog.d/*.conf" mode="optional") +cron.* /var/log/cron """.strip() -def test_rsyslog_conf_0(): - ctx = context_wrap(RSYSLOG_CONF_0) - m = RsyslogConf(ctx) - d = list(m) - assert len(m) == 5 - assert len(d) == 5 - # Test configuration item detection in dictionary - assert hasattr(m, 'config_items') - assert isinstance(m.config_items, dict) - assert 'ModLoad' in m.config_items - assert m.config_items['ModLoad'] == 'imtcp' - assert m.config_items['InputTCPServerRun'] == '10514' - assert m.config_items['template'] == 'SpiceTmpl,"%TIMESTAMP%.%TIMESTAMP:::date-subseconds% %syslogtag% %syslogseverity-text%:%msg:::sp-if-no-1st-sp%%msg:::drop-last-lf%\\n"' - assert 'ForwardSyslogHost' not in m.config_items - # configuration items should not include the comment. - assert 'WorkDirectory' in m.config_items - assert m.config_items['WorkDirectory'] == '/var/opt/rsyslog' - # Test configuration item accessor - assert hasattr(m, 'config_val') - assert m.config_val('ModLoad') == 'imtcp' - assert m.config_val('ForwardSyslogHost', 'syslog.example.com') == 'syslog.example.com' +def test_rsyslog_conf_1(): + rsyslogconf = RsyslogConf(context_wrap(RSYSLOG_CONF)) + assert len(rsyslogconf) == 13 + assert rsyslogconf[2] == "authpriv.* /var/log/secure" + assert rsyslogconf[9] == """*.info { action( type="omfile" name="hehe" file="/tmp/testnimei") }""" -def test_rsyslog_conf_1(): - ctx = context_wrap(RSYSLOG_CONF_1) - m = RsyslogConf(ctx) - d = list(m) - assert len(m) == 1 - assert len(d) == 1 - # Test that commented-out config items are not detected - assert 'ModLoad' not in m.config_items - assert 'InputTCPServerRun' not in m.config_items +def test_rsyslog_doc_examples(): + env = { + 'rsysconf': RsyslogConf(context_wrap(RSYSLOG_CONF)), + } + failed, total = doctest.testmod(rsyslog_conf, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index b7e1e0ce4..ec6d0f4d6 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -537,7 +537,7 @@ class Specs(SpecSet): root_crontab = RegistryPoint() route = RegistryPoint() rpm_V_packages = RegistryPoint() - rsyslog_conf = RegistryPoint(filterable=True) + rsyslog_conf = RegistryPoint(filterable=True, multi_output=True) samba = RegistryPoint(filterable=True) sap_dev_disp = RegistryPoint(multi_output=True, filterable=True) sap_dev_rd = RegistryPoint(multi_output=True, filterable=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index dd8582ed4..f386badcb 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -578,7 +578,7 @@ def pcp_enabled(broker): rhsm_releasever = simple_file('/var/lib/rhsm/cache/releasever.json') rndc_status = simple_command("/usr/sbin/rndc status") rpm_V_packages = simple_command("/bin/rpm -V coreutils procps procps-ng shadow-utils passwd sudo chrony", keep_rc=True) - rsyslog_conf = simple_file("/etc/rsyslog.conf") + rsyslog_conf = glob_file(["/etc/rsyslog.conf", "/etc/rsyslog.d/test.conf"]) samba = simple_file("/etc/samba/smb.conf") @datasource(Sap) From 9316aaf05d6f6e552b64120b0fa61866a34a2dff Mon Sep 17 00:00:00 2001 From: Akshay Gaikwad Date: Mon, 9 Nov 2020 03:18:31 +0000 Subject: [PATCH 233/892] New parser for etc udev rules (#2811) New parser similar to the udev_rules but parse files under directory `/etc/udev/rules.d/` directory instead. Signed-off-by: Akshay Gaikwad --- .../shared_parsers_catalog/etc_udev_rules.rst | 3 + insights/parsers/etc_udev_rules.py | 47 +++++++++++ insights/parsers/tests/test_etc_udev_rules.py | 79 +++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 131 insertions(+) create mode 100644 docs/shared_parsers_catalog/etc_udev_rules.rst create mode 100644 insights/parsers/etc_udev_rules.py create mode 100644 insights/parsers/tests/test_etc_udev_rules.py diff --git a/docs/shared_parsers_catalog/etc_udev_rules.rst b/docs/shared_parsers_catalog/etc_udev_rules.rst new file mode 100644 index 000000000..c55e4a756 --- /dev/null +++ b/docs/shared_parsers_catalog/etc_udev_rules.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.etc_udev_rules + :members: + :show-inheritance: diff --git a/insights/parsers/etc_udev_rules.py b/insights/parsers/etc_udev_rules.py new file mode 100644 index 000000000..4710bf3d1 --- /dev/null +++ b/insights/parsers/etc_udev_rules.py @@ -0,0 +1,47 @@ +""" +EtcUdevRules - file ``/etc/udev/rules.d/`` +========================================== + +This module is similar to the :py:mod:`insights.parsers.udev_rules` +but parse .rules files under ``/etc/ude/rules.d/`` directory instead. + +The parsers included in this module are: + +UdevRules40Redhat - file ``/etc/udev/rules.d/40-redhat.rules`` +-------------------------------------------------------------- + +""" +from insights import parser +from insights.core import LogFileOutput +from insights.specs import Specs + + +@parser(Specs.etc_udev_40_redhat_rules) +class UdevRules40Redhat(LogFileOutput): + """ + Read the content of ``/etc/udev/rules.d/40-redhat.rules`` file. + + .. note:: + + The syntax of the `.rules` file is complex, and no rules require to + get the serialized parsed result currently. An only existing rule's + supposed to check the syntax of some specific line, so here the + :class:`insights.core.LogFileOutput` is the base class. + + Sample input:: + + # do not edit this file, it will be overwritten on update + # CPU hotadd request + SUBSYSTEM=="cpu", ACTION=="add", TEST=="online", ATTR{online}=="0", ATTR{online}="1" + + # Memory hotadd request + SUBSYSTEM!="memory", ACTION!="add", GOTO="memory_hotplug_end" + PROGRAM="/bin/uname -p", RESULT=="s390*", GOTO="memory_hotplug_end" + + LABEL="memory_hotplug_end" + + Examples: + >>> 'LABEL="memory_hotplug_end"' in udev_rules.lines + True + """ + pass diff --git a/insights/parsers/tests/test_etc_udev_rules.py b/insights/parsers/tests/test_etc_udev_rules.py new file mode 100644 index 000000000..8b5350797 --- /dev/null +++ b/insights/parsers/tests/test_etc_udev_rules.py @@ -0,0 +1,79 @@ +import doctest +from insights.parsers import etc_udev_rules +from insights.parsers.etc_udev_rules import UdevRules40Redhat +from insights.tests import context_wrap + +UDEV_RULES_CONTENT = """ +# do not edit this file, it will be overwritten on update + +# CPU hotadd request +SUBSYSTEM=="cpu", ACTION=="add", TEST=="online", ATTR{online}=="0", ATTR{online}="1" + +# Memory hotadd request +SUBSYSTEM!="memory", GOTO="memory_hotplug_end" +ACTION!="add", GOTO="memory_hotplug_end" +PROGRAM="/bin/uname -p", RESULT=="s390*", GOTO="memory_hotplug_end" + +ENV{.state}="online" +PROGRAM="/bin/systemd-detect-virt", RESULT=="none", ENV{.state}="online_movable" +ATTR{state}=="offline", ATTR{state}="$env{.state}" + +LABEL="memory_hotplug_end" + +# reload sysctl.conf / sysctl.conf.d settings when the bridge module is loaded +ACTION=="add", SUBSYSTEM=="module", KERNEL=="bridge", RUN+="/usr/lib/systemd/systemd-sysctl --prefix=/proc/sys/net/bridge" + +# load SCSI generic (sg) driver +SUBSYSTEM=="scsi", ENV{DEVTYPE}=="scsi_device", TEST!="[module/sg]", RUN+="/sbin/modprobe -bv sg" +SUBSYSTEM=="scsi", ENV{DEVTYPE}=="scsi_target", TEST!="[module/sg]", RUN+="/sbin/modprobe -bv sg" + +# Rule for prandom character device node permissions +KERNEL=="prandom", MODE="0644" + + +# Rules for creating the ID_PATH for SCSI devices based on the CCW bus +# using the form: ccw--zfcp-: +# +ACTION=="remove", GOTO="zfcp_scsi_device_end" + +# +# Set environment variable "ID_ZFCP_BUS" to "1" if the devices +# (both disk and partition) are SCSI devices based on FCP devices +# +KERNEL=="sd*", SUBSYSTEMS=="ccw", DRIVERS=="zfcp", ENV{.ID_ZFCP_BUS}="1" + +# For SCSI disks +KERNEL=="sd*[!0-9]", SUBSYSTEMS=="scsi", ENV{.ID_ZFCP_BUS}=="1", ENV{DEVTYPE}=="disk", SYMLINK+="disk/by-path/ccw-$attr{hba_id}-zfcp-$attr{wwpn}:$attr{fcp_lun}" + + +# For partitions on a SCSI disk +KERNEL=="sd*[0-9]", SUBSYSTEMS=="scsi", ENV{.ID_ZFCP_BUS}=="1", ENV{DEVTYPE}=="partition", SYMLINK+="disk/by-path/ccw-$attr{hba_id}-zfcp-$attr{wwpn}:$attr{fcp_lun}-part%n" + +LABEL="zfcp_scsi_device_end" +""".strip() + +SAMPLE_INPUT = """ +# do not edit this file, it will be overwritten on update +# CPU hotadd request +SUBSYSTEM=="cpu", ACTION=="add", TEST=="online", ATTR{online}=="0", ATTR{online}="1" + +# Memory hotadd request +SUBSYSTEM!="memory", ACTION!="add", GOTO="memory_hotplug_end" +PROGRAM="/bin/uname -p", RESULT=="s390*", GOTO="memory_hotplug_end" + +LABEL="memory_hotplug_end" +""".strip() + + +def test_udev_rules(): + result = UdevRules40Redhat(context_wrap(UDEV_RULES_CONTENT)) + for line in ['SUBSYSTEM=="cpu", ACTION=="add", TEST=="online", ATTR{online}=="0", ATTR{online}="1"', + 'SUBSYSTEM!="memory", GOTO="memory_hotplug_end"', + 'ACTION!="add", GOTO="memory_hotplug_end"']: + assert line in result.lines + + +def test_documentation(): + env = {'udev_rules': UdevRules40Redhat(context_wrap(SAMPLE_INPUT))} + failed_count, tests = doctest.testmod(etc_udev_rules, globs=env) + assert failed_count == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index ec6d0f4d6..3a405a770 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -151,6 +151,7 @@ class Specs(SpecSet): etc_journald_conf_d = RegistryPoint(multi_output=True) etc_journald_conf = RegistryPoint() etc_machine_id = RegistryPoint() + etc_udev_40_redhat_rules = RegistryPoint(filterable=True) etcd_conf = RegistryPoint(filterable=True) ethernet_interfaces = RegistryPoint() ethtool_a = RegistryPoint(multi_output=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index f386badcb..4caffde4e 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -246,6 +246,7 @@ def is_ceph_monitor(broker): etc_journald_conf = simple_file(r"etc/systemd/journald.conf") etc_journald_conf_d = glob_file(r"etc/systemd/journald.conf.d/*.conf") etc_machine_id = simple_file("/etc/machine-id") + etc_udev_40_redhat_rules = simple_file("/etc/udev/rules.d/40-redhat.rules") etcd_conf = simple_file("/etc/etcd/etcd.conf") ethernet_interfaces = listdir("/sys/class/net", context=HostContext) ethtool = foreach_execute(ethernet_interfaces, "/sbin/ethtool %s") From d91d6d6301ea2f0c3e999b69218c0eb091cc64e3 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Mon, 9 Nov 2020 15:12:27 +0800 Subject: [PATCH 234/892] Update the YumRepoList.rhel_repos for RHEL8 satellite (#2820) - In RHEL8 the repo for satellite is not started with "rhel-", but contains '-rhel-' Make sure the `rhel_repos` gives all the repos for RHEL Signed-off-by: Xiangce Liu --- insights/parsers/tests/test_yum_repolist.py | 19 +++++++++++++++++++ insights/parsers/yum.py | 2 +- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/insights/parsers/tests/test_yum_repolist.py b/insights/parsers/tests/test_yum_repolist.py index f2d32dd97..4a8d35666 100644 --- a/insights/parsers/tests/test_yum_repolist.py +++ b/insights/parsers/tests/test_yum_repolist.py @@ -99,6 +99,21 @@ repolist: 12,768 """.strip() +YUM_REPOLIST_RHEL8 = """ +Updating Subscription Management repositories. +Repository packages-microsoft-com-prod is listed more than once in the configuration +repo id repo name +fast-datapath-for-rhel-8-x86_64-rpms Fast Datapath for RHEL 8 x86_64 (RPMs) +packages-microsoft-com-mssql-server-2019 packages-microsoft-com-mssql-server-2019 +packages-microsoft-com-prod packages-microsoft-com-prod +rhel-8-for-x86_64-appstream-rpms Red Hat Enterprise Linux 8 for x86_64 - AppStream (RPMs) +rhel-8-for-x86_64-baseos-rpms Red Hat Enterprise Linux 8 for x86_64 - BaseOS (RPMs) +rhel-8-for-x86_64-highavailability-rpms Red Hat Enterprise Linux 8 for x86_64 - High Availability (RPMs) +rhel-8-for-x86_64-sap-solutions-rpms Red Hat Enterprise Linux 8 for x86_64 - SAP Solutions (RPMs) +rhel-8-for-x86_64-supplementary-rpms Red Hat Enterprise Linux 8 for x86_64 - Supplementary (RPMs) +satellite-tools-6.7-for-rhel-8-x86_64-rpms Red Hat Satellite Tools 6.7 for RHEL 8 x86_64 (RPMs) +""".strip() + YUM_REPOLIST_DOC_NO_REPONAME = """ Loaded plugins: package_upload, product-id, search-disabled-repos, security, subscription-manager repo id status @@ -158,6 +173,10 @@ def test_rhel_repos(): 'rhel-7-server-satellite-capsule-6.1-rpms', 'rhel-server-rhscl-7-rpms']) + repo_list = YumRepoList(context_wrap(YUM_REPOLIST_RHEL8)) + assert len(repo_list.rhel_repos) == 7 + assert 'satellite-tools-6.7-for-rhel-8-x86_64-rpms' in repo_list.rhel_repos + def test_rhel_repos_missing_status(): with pytest.raises(ParseException) as se: diff --git a/insights/parsers/yum.py b/insights/parsers/yum.py index 41103c40a..7053a979e 100644 --- a/insights/parsers/yum.py +++ b/insights/parsers/yum.py @@ -186,4 +186,4 @@ def rhel_repos(self): """ return [i.split('/')[0] for i in self.repos - if i.startswith('rhel')] + if i.startswith('rhel-') or '-rhel-' in i] From a7741a8e2e6c8b1e329a50868d48c377237d6a77 Mon Sep 17 00:00:00 2001 From: Stephen Date: Mon, 9 Nov 2020 11:57:20 -0500 Subject: [PATCH 235/892] Ensure branch_info is parsed as a raw file (#2816) Signed-off-by: Stephen Adams --- insights/specs/__init__.py | 2 +- insights/specs/core3_archive.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 3a405a770..eeeaca5d3 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -35,7 +35,7 @@ class Specs(SpecSet): bond = RegistryPoint(multi_output=True) bond_dynamic_lb = RegistryPoint(multi_output=True) boot_loader_entries = RegistryPoint(multi_output=True) - branch_info = RegistryPoint() + branch_info = RegistryPoint(raw=True) brctl_show = RegistryPoint() candlepin_error_log = RegistryPoint(filterable=True) candlepin_log = RegistryPoint(filterable=True) diff --git a/insights/specs/core3_archive.py b/insights/specs/core3_archive.py index 0c3228b9a..6c1599c13 100644 --- a/insights/specs/core3_archive.py +++ b/insights/specs/core3_archive.py @@ -7,10 +7,10 @@ from insights.core.context import SerializedArchiveContext from insights.specs import Specs -from insights.core.spec_factory import simple_file +from insights.core.spec_factory import RawFileProvider, simple_file simple_file = partial(simple_file, context=SerializedArchiveContext) class Core3Specs(Specs): - branch_info = simple_file("/branch_info") + branch_info = simple_file("/branch_info", kind=RawFileProvider) From e0f33adf19cf7f1179561f6fd63fbafce7e112e8 Mon Sep 17 00:00:00 2001 From: Paul Wayper Date: Tue, 10 Nov 2020 09:16:06 +1100 Subject: [PATCH 236/892] Changing to use 'df (?) -x autofs' for preference (#2822) Signed-off-by: Paul Wayper --- insights/specs/default.py | 6 +++--- insights/specs/insights_archive.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 4caffde4e..710a95cfc 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -220,9 +220,9 @@ def is_ceph_monitor(broker): date_utc = simple_command("/bin/date --utc") designate_conf = first_file(["/var/lib/config-data/puppet-generated/designate/etc/designate/designate.conf", "/etc/designate/designate.conf"]) - df__al = simple_command("/bin/df -al") - df__alP = simple_command("/bin/df -alP") - df__li = simple_command("/bin/df -li") + df__al = simple_command("/bin/df -al -x autofs") + df__alP = simple_command("/bin/df -alP -x autofs") + df__li = simple_command("/bin/df -li -x autofs") dig_dnssec = simple_command("/usr/bin/dig +dnssec . SOA") dig_edns = simple_command("/usr/bin/dig +edns=0 . SOA") dig_noedns = simple_command("/usr/bin/dig +noedns . SOA") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 52eaca33e..fdc2629c0 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -32,9 +32,9 @@ class InsightsArchiveSpecs(Specs): cpupower_frequency_info = simple_file("insights_commands/cpupower_-c_all_frequency-info") date = simple_file("insights_commands/date") date_utc = simple_file("insights_commands/date_--utc") - df__al = simple_file("insights_commands/df_-al") - df__alP = simple_file("insights_commands/df_-alP") - df__li = simple_file("insights_commands/df_-li") + df__al = first_file(["insights_commands/df_-al_-x_autofs", "insights_commands/df_-al"]) + df__alP = first_file(["insights_commands/df_-alP_-x_autofs", "insights_commands/df_-alP"]) + df__li = first_file(["insights_commands/df_-li_-x_autofs", "insights_commands/df_-li"]) dig_dnssec = simple_file("insights_commands/dig_dnssec_._SOA") dig_edns = simple_file("insights_commands/dig_edns_0_._SOA") dig_noedns = simple_file("insights_commands/dig_noedns_._SOA") From 55455ccb79a42eecc6bc307a80cceb5d8c947332 Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Tue, 10 Nov 2020 11:23:44 -0600 Subject: [PATCH 237/892] Add specs for must gather archive context (#2791) * Adding new module for must gather archives * Loading new module when running Signed-off-by: Bob Fahr --- insights/__init__.py | 1 + insights/specs/must_gather_archive.py | 11 +++++++++++ 2 files changed, 12 insertions(+) create mode 100644 insights/specs/must_gather_archive.py diff --git a/insights/__init__.py b/insights/__init__.py index 9896895e8..87e7d8007 100644 --- a/insights/__init__.py +++ b/insights/__init__.py @@ -135,6 +135,7 @@ def load_default_plugins(): dr.load_components("insights.specs.core3_archive") dr.load_components("insights.specs.sos_archive") dr.load_components("insights.specs.jdr_archive") + dr.load_components("insights.specs.must_gather_archive") def load_packages(packages): diff --git a/insights/specs/must_gather_archive.py b/insights/specs/must_gather_archive.py new file mode 100644 index 000000000..b77682454 --- /dev/null +++ b/insights/specs/must_gather_archive.py @@ -0,0 +1,11 @@ +from insights.core.spec_factory import simple_file +from functools import partial +from insights.core.context import MustGatherContext +from insights.specs import Specs + +simple_file = partial(simple_file, context=MustGatherContext) + + +class MustGatherArchiveSpecs(Specs): + + ceph_health_detail = simple_file("ceph/namespaces/openshift-storage/must_gather_commands/json_output/ceph_health_detail_--format_json-pretty") From 80bb0c78337e554624c7291586d61e33a5dac9a7 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Tue, 10 Nov 2020 14:31:54 -0500 Subject: [PATCH 238/892] add --module option to run modules from cli (#2792) * add --module option to run modules from cli * do not show --module option in help * restrict modules to the apps package Signed-off-by: Jeremy Crafts --- insights/client/apps/playbook_verify/__init__.py | 0 insights/client/apps/playbook_verify/__main__.py | 1 + insights/client/config.py | 9 +++++++++ insights/client/phase/v1.py | 10 ++++++++++ insights/tests/client/phase/test_collect_and_upload.py | 1 + 5 files changed, 21 insertions(+) create mode 100644 insights/client/apps/playbook_verify/__init__.py create mode 100644 insights/client/apps/playbook_verify/__main__.py diff --git a/insights/client/apps/playbook_verify/__init__.py b/insights/client/apps/playbook_verify/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/insights/client/apps/playbook_verify/__main__.py b/insights/client/apps/playbook_verify/__main__.py new file mode 100644 index 000000000..bd8478232 --- /dev/null +++ b/insights/client/apps/playbook_verify/__main__.py @@ -0,0 +1 @@ +print('We apologize for the inconvenience.') diff --git a/insights/client/config.py b/insights/client/config.py index 459553062..af0a187df 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -232,6 +232,13 @@ def _core_collect_default(): 'action': 'store_true', 'group': 'debug' }, + 'module': { + 'default': None, + 'opt': ['--module', '-m'], + 'help': 'Directly run a Python module within the insights-core package', + 'action': 'store', + 'help': argparse.SUPPRESS + }, 'obfuscate': { # non-CLI 'default': False @@ -706,6 +713,8 @@ def _validate_options(self): if self.obfuscate: if self._print_errors: sys.stdout.write('WARNING: SOSCleaner reports will be created alongside the output archive.\n') + if self.module and not self.module.startswith('insights.client.apps.'): + raise ValueError('You can only run modules within the namespace insights.client.apps.*') def _imply_options(self): ''' diff --git a/insights/client/phase/v1.py b/insights/client/phase/v1.py index 6e2cf6a18..268098077 100644 --- a/insights/client/phase/v1.py +++ b/insights/client/phase/v1.py @@ -4,6 +4,7 @@ import logging import os import sys +import runpy from insights.client import InsightsClient from insights.client.config import InsightsConfig @@ -261,6 +262,15 @@ def post_update(client, config): @phase def collect_and_output(client, config): + # run a specified module + if config.module: + try: + runpy.run_module(config.module) + except ImportError as e: + logger.error(e) + sys.exit(constants.sig_kill_bad) + sys.exit(constants.sig_kill_ok) + # --compliance was called if config.compliance: config.payload, config.content_type = ComplianceClient(config).oscap_scan() diff --git a/insights/tests/client/phase/test_collect_and_upload.py b/insights/tests/client/phase/test_collect_and_upload.py index 6a7e3add1..6cce91705 100644 --- a/insights/tests/client/phase/test_collect_and_upload.py +++ b/insights/tests/client/phase/test_collect_and_upload.py @@ -22,6 +22,7 @@ def patch_insights_config(old_function): "return_value.load_all.return_value.diagnosis": None, "return_value.load_all.return_value.payload": None, "return_value.load_all.return_value.compliance": False, + "return_value.load_all.return_value.module": False, "return_value.load_all.return_value.output_dir": None, "return_value.load_all.return_value.output_file": None}) return patcher(old_function) From 5aa9cf254918d402c152e913e4a3af9b67a6ac24 Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Wed, 11 Nov 2020 07:21:24 -0600 Subject: [PATCH 239/892] Add line convenience method to parsr Result class (#2815) * Add line convenience method to parsr Result class * There were already a couple of convenience functions in Result so this just adds another * This fixes #2798 Signed-off-by: Bob Fahr * Update string_value to be consistent Signed-off-by: Bob Fahr --- insights/parsr/query/__init__.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/insights/parsr/query/__init__.py b/insights/parsr/query/__init__.py index 1a928e620..3a373a82d 100644 --- a/insights/parsr/query/__init__.py +++ b/insights/parsr/query/__init__.py @@ -292,6 +292,21 @@ def get_crumbs(self): """ return sorted(set(c.get_crumbs() for c in self.children)) + @property + def line(self): + """ + Returns the line of the child if only one child exists. This helps + queries behave more like dictionaries when you know only one result + should exist. + """ + if len(self.children) == 0: + return None + + if len(self.children) == 1: + return self.children[0].line + + raise Exception("More than one value to return.") + @property def string_value(self): """ @@ -299,8 +314,12 @@ def string_value(self): helps queries behave more like dictionaries when you know only one result should exist. """ + if len(self.children) == 0: + return None + if len(self.children) == 1: return self.children[0].string_value + raise Exception("More than one value to return.") @property From 7cdb6957a29623a83675d66e24e93828f86bb22b Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Thu, 12 Nov 2020 11:20:24 +0800 Subject: [PATCH 240/892] Enhance rsyslog spec (#2823) Signed-off-by: jiazhang --- insights/specs/default.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 710a95cfc..a1de7eae9 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -579,7 +579,7 @@ def pcp_enabled(broker): rhsm_releasever = simple_file('/var/lib/rhsm/cache/releasever.json') rndc_status = simple_command("/usr/sbin/rndc status") rpm_V_packages = simple_command("/bin/rpm -V coreutils procps procps-ng shadow-utils passwd sudo chrony", keep_rc=True) - rsyslog_conf = glob_file(["/etc/rsyslog.conf", "/etc/rsyslog.d/test.conf"]) + rsyslog_conf = glob_file(["/etc/rsyslog.conf", "/etc/rsyslog.d/*.conf"]) samba = simple_file("/etc/samba/smb.conf") @datasource(Sap) From 94e225ea620ee954842a296be55bc17b554c6c75 Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Thu, 12 Nov 2020 11:13:15 -0500 Subject: [PATCH 241/892] update uploader_json_map.json Signed-off-by: Link Dupont --- insights/client/uploader_json_map.json | 37 +++++++++++++++++--------- 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index a4c912b0c..240c1b0db 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -111,17 +111,17 @@ "symbolic_name": "date_utc" }, { - "command": "/bin/df -al", + "command": "/bin/df -al -x autofs", "pattern": [], "symbolic_name": "df__al" }, { - "command": "/bin/df -alP", + "command": "/bin/df -alP -x autofs", "pattern": [], "symbolic_name": "df__alP" }, { - "command": "/bin/df -li", + "command": "/bin/df -li -x autofs", "pattern": [], "symbolic_name": "df__li" }, @@ -667,7 +667,6 @@ "libssl.so", "lsnrctl", "ovs-vswit", - "rgw_swift", "tnslsnr" ], "symbolic_name": "lsof" @@ -1148,7 +1147,6 @@ "smbd", "spausedd", "target_completi", - "taskomaticd", "tgtd", "tuned", "virt-who" @@ -1671,16 +1669,14 @@ { "file": "/etc/ceph/ceph.conf", "pattern": [ - "[", - "rgw_swift_account_in_url" + "[" ], "symbolic_name": "ceph_conf" }, { "file": "/var/lib/config-data/puppet-generated/ceph/etc/ceph/ceph.conf", "pattern": [ - "[", - "rgw_swift_account_in_url" + "[" ], "symbolic_name": "ceph_conf" }, @@ -2326,7 +2322,6 @@ { "file": "/var/log/messages", "pattern": [ - " disconnect jid=", " invoked oom-killer: ", "\"/var/lib/pgsql/data\" is missing or empty", "(enic): transmit queue 0 timed out", @@ -2337,7 +2332,6 @@ "17763", ": possible SYN flooding on port", ": segfault at ", - ": session replaced: jid=", "Abort command issued", "Broken pipe", "Buffer I/O error on device", @@ -2376,6 +2370,9 @@ "SDN initialization failed: Error: Existing service with IP: None is not part of service network", "Scheduled import of stream", "Steal time is >", + "TCP listen overflows", + "TCP request queue full SYN cookie replies", + "TCP request queue full drops", "TX driver issue detected, PF reset issued", "This system does not support \"SSSE3\"", "Throttling request took", @@ -4097,6 +4094,20 @@ "symbolic_name": "numa_cpus", "pattern": [] }, + { + "glob": "/etc/rsyslog.d/*.conf", + "pattern": [ + "$ActionQueueFileName", + "imjournal", + "imtcp", + "regex", + "[", + "]", + "{", + "}" + ], + "symbolic_name": "rsyslog_conf" + }, { "glob": "/sys/class/scsi_host/host[0-9]*/fwrev", "symbolic_name": "scsi_fwver", @@ -4141,5 +4152,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2020-10-29T14:46:39.878759" -} \ No newline at end of file + "version": "2020-11-05T15:21:17.758393" +} From 33cf8e9e8581f511e2002868107a8897a6ee6180 Mon Sep 17 00:00:00 2001 From: dehort Date: Tue, 17 Nov 2020 09:49:23 -0600 Subject: [PATCH 242/892] Ansible playbook verifier interface (#2814) * Defining an interface for verifying the signature of ansible playbooks Signed-off-by: Derek Horton * added additional Proof of concept material to original commit Signed-off-by: Derek Horton * Changed name of env variable Signed-off-by: Derek Horton * Add changes based on feedback and test playbook file Signed-off-by: Derek Horton * Add newline at the end of test_playbook.yml Signed-off-by: Derek Horton * Export the exception class Signed-off-by: Derek Horton * Fix flake8 issues Signed-off-by: Derek Horton * Take out the type annotations as python 2.x doesn't like them Signed-off-by: Derek Horton * Take out the type annotations as python 2.x doesn't like them Signed-off-by: Derek Horton * python 2 changes Signed-off-by: Derek Horton * Moving things around Signed-off-by: Derek Horton * lint Signed-off-by: Derek Horton Co-authored-by: Alec Cohan Co-authored-by: Jeremy Crafts --- insights/client/apps/ansible/__init__.py | 0 .../ansible/playbook_verifier/__init__.py | 37 +++++++++++++++++++ .../ansible/playbook_verifier/__main__.py | 25 +++++++++++++ .../client/apps/ansible/test_playbook.yml | 28 ++++++++++++++ 4 files changed, 90 insertions(+) create mode 100644 insights/client/apps/ansible/__init__.py create mode 100644 insights/client/apps/ansible/playbook_verifier/__init__.py create mode 100644 insights/client/apps/ansible/playbook_verifier/__main__.py create mode 100644 insights/client/apps/ansible/test_playbook.yml diff --git a/insights/client/apps/ansible/__init__.py b/insights/client/apps/ansible/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/insights/client/apps/ansible/playbook_verifier/__init__.py b/insights/client/apps/ansible/playbook_verifier/__init__.py new file mode 100644 index 000000000..d1f3e7826 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/__init__.py @@ -0,0 +1,37 @@ +import os + +__all__ = ("verify", "PlaybookValidationError") + + +class PlaybookValidationError(Exception): + """ + Exception raised when playbook validation fails + + Attributes: + playbook -- stringified playbook yaml from stdin + message -- explanation of why validation failed + """ + + def __init__(self, message="PLAYBOOK VALIDATION FAILED"): + self.message = message + super().__init__(self.message) + + def __str__(self): + return self.message + + +def verify(unverified_playbook): + """ + Verify the signed playbook. + + Input: stringified "broken" unverified playbook + Output: stringified "verified" playbook + Error: exception + """ + # Skeleton implementation ... "bless" the incoming playbook + ERROR = os.getenv('ANSIBLE_PLAYBOOK_VERIFIER_THROW_ERROR') + if ERROR: + raise PlaybookValidationError() + + verified_playbook = unverified_playbook + return verified_playbook diff --git a/insights/client/apps/ansible/playbook_verifier/__main__.py b/insights/client/apps/ansible/playbook_verifier/__main__.py new file mode 100644 index 000000000..83ad27223 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/__main__.py @@ -0,0 +1,25 @@ +import sys + +from insights.client.apps.ansible.playbook_verifier import verify + + +def read_playbook(): + """ + Read in the stringified playbook yaml from stdin + """ + unverified_playbook = '' + for line in sys.stdin: + unverified_playbook += line + + return unverified_playbook + + +unverified_playbook = read_playbook() + +try: + verified_playbook = verify(unverified_playbook) +except Exception as e: + sys.stderr.write(e.message) + sys.exit(1) + +print(verified_playbook) diff --git a/insights/client/apps/ansible/test_playbook.yml b/insights/client/apps/ansible/test_playbook.yml new file mode 100644 index 000000000..e64975c9f --- /dev/null +++ b/insights/client/apps/ansible/test_playbook.yml @@ -0,0 +1,28 @@ +--- +# Red Hat Insights has recommended one or more actions for you, a system administrator, to review and if you +# deem appropriate, deploy on your systems running Red Hat software. Based on the analysis, we have automatically +# generated an Ansible Playbook for you. Please review and test the recommended actions and the Playbook as +# they may contain configuration changes, updates, reboots and/or other changes to your systems. Red Hat is not +# responsible for any adverse outcomes related to these recommendations or Playbooks. +# +# ping +# https://cloud.redhat.com/insights/remediations/44466a02-24a1-47b4-84cb-391aeff4523 +# Generated by Red Hat Insights on Thu, 29 Oct 2020 12:24:17 GMT +# Created by some-user + +# Fixes test:ping +# Identifier: (test:ping,fix) +# Version: unknown +- name: ping + hosts: "host1,host2" + tasks: + - ping: + +- name: run insights + hosts: "host1,host2" + become: True + gather_facts: False + tasks: + - name: run insights + command: insights-client + changed_when: false From 528df1653ad278b339115f46ad5a15e86efd3e5b Mon Sep 17 00:00:00 2001 From: Jakub Svoboda Date: Tue, 17 Nov 2020 18:32:12 +0000 Subject: [PATCH 243/892] Add a new parser and spec for doveconf. (#2797) Signed-off-by: Jakub Svoboda --- docs/shared_parsers_catalog/doveconf.rst | 3 + insights/parsers/doveconf.py | 137 +++++++++++++++ insights/parsers/tests/test_doveconf.py | 206 +++++++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 6 files changed, 349 insertions(+) create mode 100644 docs/shared_parsers_catalog/doveconf.rst create mode 100644 insights/parsers/doveconf.py create mode 100644 insights/parsers/tests/test_doveconf.py diff --git a/docs/shared_parsers_catalog/doveconf.rst b/docs/shared_parsers_catalog/doveconf.rst new file mode 100644 index 000000000..2d9db0d77 --- /dev/null +++ b/docs/shared_parsers_catalog/doveconf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.doveconf + :members: + :show-inheritance: diff --git a/insights/parsers/doveconf.py b/insights/parsers/doveconf.py new file mode 100644 index 000000000..655b0d2b2 --- /dev/null +++ b/insights/parsers/doveconf.py @@ -0,0 +1,137 @@ +""" +Doveconf - command ``doveconf`` +=============================== +""" +import string + +from insights import parser, add_filter +from insights.combiners.nginx_conf import EmptyQuotedString +from insights.core import ConfigParser +from insights.parsr import (EOF, InSet, + Lift, Many, OneLineComment, PosMarker, + QuotedString, skip_none, String, WS, WSChar, LeftCurly, RightCurly, Forward, + EOL) +from insights.parsr.query import Directive, Entry, Section +from insights.specs import Specs + +add_filter(Specs.doveconf, [ + "{", + "}" +]) + + +@parser(Specs.doveconf, continue_on_error=False) +class Doveconf(ConfigParser): + """ + Class for parsing the ``doveconf`` command. + Sample input:: + + # 2.2.36 (1f10bfa63): /etc/dovecot/dovecot.conf + auth_anonymous_username = anonymous + auth_cache_negative_ttl = 1 hours + auth_policy_request_attributes = login=%{requested_username} pwhash=%{hashed_password} remote=%{rip} device_id=%{client_id} protocol=%s + auth_policy_server_api_header = + log_timestamp = "%b %d %H:%M:%S " + login_access_sockets = + namespace inbox { + disabled = no + location = + mailbox Drafts { + auto = no + driver = + special_use = \Drafts + } + mailbox Junk { + auto = no + } + order = 0 + prefix = + subscriptions = yes + } + passdb { + args = + auth_verbose = default + } + service aggregator { + chroot = . + client_limit = 0 + fifo_listener replication-notify-fifo { + group = + mode = 0600 + } + group = + unix_listener replication-notify { + mode = 0600 + } + user = $default_internal_user + vsz_limit = 18446744073709551615 B + } + valid_chroot_dirs = + verbose_proctitle = no + + Example: + >>> doveconf['auth_anonymous_username'].value + 'anonymous' + >>> doveconf['auth_cache_negative_ttl'].value + '1 hours' + >>> doveconf['auth_cache_size'].value + '0' + >>> doveconf['auth_policy_request_attributes'].value + 'login=%{requested_username} pwhash=%{hashed_password} remote=%{rip} device_id=%{client_id} protocol=%s' + >>> doveconf['log_timestamp'].value + '"%b %d %H:%M:%S "' + >>> doveconf['namespace'][0].value + 'inbox' + >>> doveconf['namespace'][0]["mailbox"][1].value + 'Junk' + """ + def _parse_doc(self): + + def to_directive(name, attrs): + return Directive(name=name.value, attrs=attrs, lineno=name.lineno, src=self) + + def to_directive_noval(name, sep): + return Directive(name=name.value, attrs=[], lineno=name.lineno, src=self) + + def to_section(name, attrs, body): + return Section(name=name.value, attrs=attrs, children=body, lineno=name.lineno, src=self) + + sep_chars = set("=") + Sep = InSet(sep_chars, "Sep") + + name_chars = string.ascii_letters + "_/" + Name = Many(WSChar) >> PosMarker(String(name_chars) | EmptyQuotedString(name_chars)) << Many(WSChar) + + BareStringDir = String(set(string.printable) - set("\r\n")) + BareStringBlk = String(set(string.printable) - (set(string.whitespace) | set("{}'\""))) + BlockBeg = Many(WSChar) >> LeftCurly << WS + BlockEnd = Many(WSChar) >> RightCurly << (EOL | EOF) + + Comment = OneLineComment("#").map(lambda x: None) << (EOL | EOF) + + Stmt = Forward() + + _AttrDir = Many(WSChar) >> (BareStringDir | QuotedString) + AttrDir = Sep >> Many(_AttrDir) + AttrDirNoVal = Sep << Many(WSChar) + Dir = (Lift(to_directive) * Name * AttrDir) << (EOL | EOF) + DirNoVal = (Lift(to_directive_noval) * Name * AttrDirNoVal) << (EOL | EOF) + + _AttrBlk = Many(WSChar) >> (BareStringBlk | QuotedString) + AttrsBlk = Many(_AttrBlk) + BlockBody = BlockBeg >> Many(Stmt).map(skip_none) << BlockEnd + Block = (Lift(to_section) * Name * AttrsBlk * BlockBody) + + Stmt <= WS >> (Block | Dir | DirNoVal | Comment) << WS + + Doc = Many(Stmt).map(skip_none) + Top = Doc + EOF + + return Top + + def __init__(self, *args, **kwargs): + self.Top = self._parse_doc() + super(Doveconf, self).__init__(*args, **kwargs) + + def parse_doc(self, content): + return Entry(children=self.Top("\n".join(content))[0], src=self) diff --git a/insights/parsers/tests/test_doveconf.py b/insights/parsers/tests/test_doveconf.py new file mode 100644 index 000000000..cffa410dd --- /dev/null +++ b/insights/parsers/tests/test_doveconf.py @@ -0,0 +1,206 @@ +import doctest + +import pytest +from insights.parsers import SkipException +from insights.parsers import doveconf +from insights.parsers.doveconf import Doveconf +from insights.tests import context_wrap + +CONF = """ +# 2.2.36 (1f10bfa63): /etc/dovecot/dovecot.conf +# OS: Linux 4.18.0-193.el8.x86_64 x86_64 Red Hat Enterprise Linux release 8.2 (Ootpa) +# Hostname: localhost +# NOTE: Send doveconf -n output instead when asking for help. +auth_anonymous_username = anonymous +auth_cache_negative_ttl = 1 hours +auth_cache_size = 0 +auth_policy_request_attributes = login=%{requested_username} pwhash=%{hashed_password} remote=%{rip} device_id=%{client_id} protocol=%s +auth_policy_server_api_header = +log_timestamp = "%b %d %H:%M:%S " +login_access_sockets = +login_greeting = Dovecot ready. +login_log_format = %$: %s +login_log_format_elements = user=<%u> method=%m rip=%r lip=%l mpid=%e %c session=<%{session}> +login_plugin_dir = /usr/lib64/dovecot/login +mail_log_prefix = "%s(%u): " +mdbox_rotate_size = 2 M +mmap_disable = no +namespace inbox { + disabled = no + hidden = no + ignore_on_failure = no + inbox = yes + list = yes + location = + mailbox Drafts { + auto = no + autoexpunge = 0 + autoexpunge_max_mails = 0 + comment = + driver = + special_use = \Drafts + } + mailbox Junk { + auto = no + autoexpunge = 0 + autoexpunge_max_mails = 0 + comment = + driver = + special_use = \Junk + } + mailbox Sent { + auto = no + autoexpunge = 0 + autoexpunge_max_mails = 0 + comment = + driver = + special_use = \Sent + } + mailbox "Sent Messages" { + auto = no + autoexpunge = 0 + autoexpunge_max_mails = 0 + comment = + driver = + special_use = \Sent + } + mailbox Trash { + auto = no + autoexpunge = 0 + autoexpunge_max_mails = 0 + comment = + driver = + special_use = \Trash + } + order = 0 + prefix = + separator = + subscriptions = yes + type = private +} +passdb { + args = + auth_verbose = default + default_fields = + deny = no + driver = pam + master = no + mechanisms = + name = + override_fields = + pass = no + result_failure = continue + result_internalfail = continue + result_success = return-ok + skip = never + username_filter = +} +service aggregator { + chroot = . + client_limit = 0 + drop_priv_before_exec = no + executable = aggregator + extra_groups = + fifo_listener replication-notify-fifo { + group = + mode = 0600 + user = + } + group = + idle_kill = 0 + privileged_group = + process_limit = 0 + process_min_avail = 0 + protocol = + service_count = 0 + type = + unix_listener replication-notify { + group = + mode = 0600 + user = + } + user = $default_internal_user + vsz_limit = 18446744073709551615 B +} +service anvil { + chroot = empty + client_limit = 0 + drop_priv_before_exec = no + executable = anvil + extra_groups = + group = + idle_kill = 4294967295 secs + privileged_group = + process_limit = 1 + process_min_avail = 1 + protocol = + service_count = 0 + type = anvil + unix_listener anvil-auth-penalty { + group = + mode = 0600 + user = + } + unix_listener anvil { + group = + mode = 0600 + user = + } + user = $default_internal_user + vsz_limit = 18446744073709551615 B +} +stats_user_min_time = 1 hours +submission_host = +syslog_facility = mail +userdb { + args = + auth_verbose = default + default_fields = + driver = passwd + name = + override_fields = + result_failure = continue + result_internalfail = continue + result_success = return-ok + skip = never +} +valid_chroot_dirs = +verbose_proctitle = no +verbose_ssl = no +version_ignore = no +""".strip() # noqa: W291 + +EMPTY = "" +INVALID = "b{{{la bla foo ha [] ^&*@#$%" + + +def test_doveconf(): + c = Doveconf(context_wrap(CONF)) + assert c['auth_anonymous_username'].value == 'anonymous' + assert c['auth_anonymous_username'].line == 'auth_anonymous_username = anonymous' + assert c['auth_cache_negative_ttl'].value == '1 hours' + assert c['auth_cache_size'].value == '0' + assert c['auth_policy_request_attributes'].value == 'login=%{requested_username} pwhash=%{hashed_password} remote=%{rip} device_id=%{client_id} protocol=%s' + assert c['auth_policy_server_api_header'].value is None + assert c['log_timestamp'].value == '"%b %d %H:%M:%S "' + assert c['namespace'][0].value == 'inbox' + assert c['namespace'][0]["mailbox"][3].value == 'Sent Messages' + assert c['namespace'][0]["mailbox"][3]["special_use"][0].value == '\\Sent' + + +def test_empty(): + with pytest.raises(SkipException): + Doveconf(context_wrap(EMPTY)) + + +def test_invalid(): + with pytest.raises(Exception): + Doveconf(context_wrap(INVALID)) + + +def test_doc_examples(): + env = { + 'doveconf': Doveconf(context_wrap(CONF)), + } + failed, total = doctest.testmod(doveconf, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index eeeaca5d3..c34dfafc3 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -142,6 +142,7 @@ class Specs(SpecSet): docker_storage_setup = RegistryPoint() docker_sysconfig = RegistryPoint() dotnet_version = RegistryPoint() + doveconf = RegistryPoint(filterable=True) dracut_kdump_capture_service = RegistryPoint() du_dirs = RegistryPoint(multi_output=True) dumpe2fs_h = RegistryPoint(multi_output=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index a1de7eae9..52d3f0a27 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -236,6 +236,7 @@ def is_ceph_monitor(broker): docker_info = simple_command("/usr/bin/docker info") docker_list_containers = simple_command("/usr/bin/docker ps --all --no-trunc") docker_list_images = simple_command("/usr/bin/docker images --all --no-trunc --digests") + doveconf = simple_command("/usr/bin/doveconf") docker_storage_setup = simple_file("/etc/sysconfig/docker-storage-setup") docker_sysconfig = simple_file("/etc/sysconfig/docker") dotnet_version = simple_command("/usr/bin/dotnet --version") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index fdc2629c0..ec6904397 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -46,6 +46,7 @@ class InsightsArchiveSpecs(Specs): docker_list_containers = simple_file("insights_commands/docker_ps_--all_--no-trunc") docker_list_images = simple_file("insights_commands/docker_images_--all_--no-trunc_--digests") dotnet_version = simple_file("insights_commands/dotnet_--version") + doveconf = simple_file("insights_commands/doveconf") du_dirs = glob_file("insights_commands/du_-s_-k_*") engine_db_query_vdsm_version = simple_file("insights_commands/engine-db-query_--statement_SELECT_vs.vds_name_rpm_version_FROM_vds_dynamic_vd_vds_static_vs_WHERE_vd.vds_id_vs.vds_id_--json") ethtool = glob_file("insights_commands/ethtool_*", ignore="ethtool_-.*") From 3ebfaf1809443d5b43e4a8bde9b3f4ffb7a463aa Mon Sep 17 00:00:00 2001 From: Chenlizhong Date: Wed, 18 Nov 2020 10:36:12 +0800 Subject: [PATCH 244/892] Skip the insights-client self grep process (#2827) * Skip the insights-client self grep process Signed-off-by: Chen Lizhong * Update the skip string token Signed-off-by: Chen Lizhong --- insights/parsers/ps.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/insights/parsers/ps.py b/insights/parsers/ps.py index 77cd102bc..4c0740482 100644 --- a/insights/parsers/ps.py +++ b/insights/parsers/ps.py @@ -75,6 +75,9 @@ def parse_content(self, content): # The above list comprehension assures all rows have a command. for proc in self.data: cmd = proc[self.command_name] + # skip the insights-client self grep process + if cmd.startswith('grep -F'): + continue self.running.add(cmd) cmd_name = cmd if cmd.startswith('/'): From 7d4e124881a2b48a30f823a0146bc26ad7cdcad7 Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Wed, 18 Nov 2020 18:32:05 -0600 Subject: [PATCH 245/892] Fix foreach_exec specs to use datasource as arg (#2829) * Foreach_execute specs take a datasource or list of datasources as the first argument Signed-off-by: Bob Fahr --- insights/specs/default.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 52d3f0a27..7d83f7306 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -241,7 +241,12 @@ def is_ceph_monitor(broker): docker_sysconfig = simple_file("/etc/sysconfig/docker") dotnet_version = simple_command("/usr/bin/dotnet --version") dracut_kdump_capture_service = simple_file("/usr/lib/dracut/modules.d/99kdumpbase/kdump-capture.service") - du_dirs = foreach_execute(['/var/lib/candlepin/activemq-artemis'], "/bin/du -s -k %s") + + @datasource() + def du_dirs_list(broker): + """ Provide a list of directorys for the ``du_dirs`` spec to scan """ + return ['/var/lib/candlepin/activemq-artemis'] + du_dirs = foreach_execute(du_dirs_list, "/bin/du -s -k %s") engine_db_query_vdsm_version = simple_command('engine-db-query --statement "SELECT vs.vds_name, rpm_version FROM vds_dynamic vd, vds_static vs WHERE vd.vds_id = vs.vds_id" --json') engine_log = simple_file("/var/log/ovirt-engine/engine.log") etc_journald_conf = simple_file(r"etc/systemd/journald.conf") @@ -423,9 +428,12 @@ def httpd_cmd(broker): machine_id = first_file(["etc/insights-client/machine-id", "etc/redhat-access-insights/machine-id", "etc/redhat_access_proactive/machine-id"]) mariadb_log = simple_file("/var/log/mariadb/mariadb.log") max_uid = simple_command("/bin/awk -F':' '{ if($3 > max) max = $3 } END { print max }' /etc/passwd") - md5chk_files = foreach_execute( - ["/etc/pki/product/69.pem", "/etc/pki/product-default/69.pem", "/usr/lib/libsoftokn3.so", "/usr/lib64/libsoftokn3.so", "/usr/lib/libfreeblpriv3.so", "/usr/lib64/libfreeblpriv3.so"], - "/usr/bin/md5sum %s") + + @datasource() + def md5chk_file_list(broker): + """ Provide a list of files to be processed by the ``md5chk_files`` spec """ + return ["/etc/pki/product/69.pem", "/etc/pki/product-default/69.pem", "/usr/lib/libsoftokn3.so", "/usr/lib64/libsoftokn3.so", "/usr/lib/libfreeblpriv3.so", "/usr/lib64/libfreeblpriv3.so"] + md5chk_files = foreach_execute(md5chk_file_list, "/usr/bin/md5sum %s") mdstat = simple_file("/proc/mdstat") meminfo = first_file(["/proc/meminfo", "/meminfo"]) messages = simple_file("/var/log/messages") From 2d8bea02ec7560ca81f46e3a1064488f204a879e Mon Sep 17 00:00:00 2001 From: Jan Dobes Date: Thu, 19 Nov 2020 15:32:15 +0100 Subject: [PATCH 246/892] collect missing DNF modules data (#2828) * initially added by #1656 and removed by #2663 * it's still needed for insights.parsers.dnf_modules used by RedHatInsights/insights-puptoo project Signed-off-by: Jan Dobes --- insights/specs/default.py | 1 + insights/tests/client/collection_rules/test_map_components.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 7d83f7306..bb7f9ce66 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -233,6 +233,7 @@ def is_ceph_monitor(broker): dmidecode = simple_command("/usr/sbin/dmidecode") dmsetup_info = simple_command("/usr/sbin/dmsetup info -C") dnf_conf = simple_file("/etc/dnf/dnf.conf") + dnf_modules = glob_file("/etc/dnf/modules.d/*.module") docker_info = simple_command("/usr/bin/docker info") docker_list_containers = simple_command("/usr/bin/docker ps --all --no-trunc") docker_list_images = simple_command("/usr/bin/docker images --all --no-trunc --digests") diff --git a/insights/tests/client/collection_rules/test_map_components.py b/insights/tests/client/collection_rules/test_map_components.py index 56f3bcf5e..547c593d7 100644 --- a/insights/tests/client/collection_rules/test_map_components.py +++ b/insights/tests/client/collection_rules/test_map_components.py @@ -74,7 +74,6 @@ def test_get_component_by_symbolic_name(): 'cpu_vulns_spectre_v1', 'cpu_vulns_spectre_v2', 'cpu_vulns_spec_store_bypass', - 'dnf_modules', 'docker_storage', 'freeipa_healthcheck_log', 'vmware_tools_conf', From a6dcaf7293032482878b89e71f14e4be56ba2925 Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Thu, 19 Nov 2020 08:52:09 -0600 Subject: [PATCH 247/892] Add sap combiner and deps to manifest for core collection (#2825) * This fixes an issue with collection of the sap_hdb_version spec in core collection Signed-off-by: Bob Fahr --- insights/collect.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/insights/collect.py b/insights/collect.py index b7a636386..f60578c95 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -103,6 +103,16 @@ - name: insights.combiners.hostname enabled: true + # needed to collect the sap_hdb_version spec that uses the Sap combiner + - name: insights.parsers.lssap + enabled: true + + - name: insights.parsers.saphostctrl + enabled: true + + - name: insights.combiners.sap + enabled: true + # needed because some specs aren't given names before they're used in DefaultSpecs - name: insights.core.spec_factory enabled: true From 9ad3675991306263cd485cf1548d39dd544fd591 Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Thu, 19 Nov 2020 09:24:41 -0600 Subject: [PATCH 248/892] Add components to manifest for core collection (#2826) * Adding new combiners and parsers they depend on to manifest so that those components will be loaded for core collection * If these components are not in the manifest they won't run, and the associated specs will not be collected due to dependency failures Signed-off-by: Bob Fahr --- insights/collect.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/insights/collect.py b/insights/collect.py index f60578c95..38a8c3b25 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -103,6 +103,32 @@ - name: insights.combiners.hostname enabled: true + # needed for the CloudProvider combiner + - name: insights.parsers.installed_rpms + enabled: true + + - name: insights.parsers.dmidecode + enabled: true + + - name: insights.parsers.yum + enabled: true + + - name: insights.parsers.rhsm_conf + enabled: true + + - name: insights.combiners.cloud_provider + enabled: true + + # needed for the Services combiner + - name: insights.parsers.chkconfig + enabled: true + + - name: insights.parsers.systemd.unitfiles + enabled: true + + - name: insights.combiners.services + enabled: true + # needed to collect the sap_hdb_version spec that uses the Sap combiner - name: insights.parsers.lssap enabled: true From 846d2e5e3cf29d7bf6b3bb5f8092a6e49de0ab08 Mon Sep 17 00:00:00 2001 From: Stephen Date: Fri, 20 Nov 2020 11:35:07 -0500 Subject: [PATCH 249/892] fix(specs): add spec for display_name to core3_archive (#2831) There are issues with display_name in core3 archives RHCLOUD-10799 Signed-off-by: Stephen Adams --- insights/specs/core3_archive.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/specs/core3_archive.py b/insights/specs/core3_archive.py index 6c1599c13..83a884e37 100644 --- a/insights/specs/core3_archive.py +++ b/insights/specs/core3_archive.py @@ -14,3 +14,4 @@ class Core3Specs(Specs): branch_info = simple_file("/branch_info", kind=RawFileProvider) + display_name = simple_file("display_name") From 238df33c4cdb20fb642dd46f1443ea7358e01cb8 Mon Sep 17 00:00:00 2001 From: Jakub Svoboda Date: Tue, 1 Dec 2020 20:35:22 +0000 Subject: [PATCH 250/892] Add a new parser and spec for postconf. (#2833) Signed-off-by: Jakub Svoboda --- insights/parsers/postconf.py | 70 +++++++++++++++++++++---- insights/parsers/tests/test_postconf.py | 31 ++++++++++- insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 5 files changed, 93 insertions(+), 11 deletions(-) diff --git a/insights/parsers/postconf.py b/insights/parsers/postconf.py index 2bcc5a623..794820ed3 100644 --- a/insights/parsers/postconf.py +++ b/insights/parsers/postconf.py @@ -1,4 +1,7 @@ """ +Postconf - command ``postconf`` +=============================== + PostconfBuiltin - command ``postconf -C builtin`` ================================================= """ @@ -9,10 +12,9 @@ from insights.parsers import SkipException -@parser(Specs.postconf_builtin) -class PostconfBuiltin(CommandParser, dict): +class _Postconf(CommandParser, dict): """ - Class for parsing the ``postconf -C builtin`` command. + Class for parsing the ``postconf`` command. Sample input:: smtpd_tls_loglevel = 0 @@ -21,15 +23,15 @@ class PostconfBuiltin(CommandParser, dict): smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1 Examples: - >>> type(postconf) - - >>> postconf['smtpd_tls_loglevel'] == '0' + >>> type(_postconf) + + >>> _postconf['smtpd_tls_loglevel'] == '0' True - >>> postconf['smtpd_tls_mandatory_ciphers'] == 'medium' + >>> _postconf['smtpd_tls_mandatory_ciphers'] == 'medium' True - >>> postconf['smtpd_tls_mandatory_exclude_ciphers'] == '' + >>> _postconf['smtpd_tls_mandatory_exclude_ciphers'] == '' True - >>> postconf['smtpd_tls_mandatory_protocols'] == '!SSLv2, !SSLv3, !TLSv1' + >>> _postconf['smtpd_tls_mandatory_protocols'] == '!SSLv2, !SSLv3, !TLSv1' True """ @@ -47,3 +49,53 @@ def parse_content(self, content): raise SkipException self.update(data) + + +@parser(Specs.postconf_builtin) +class PostconfBuiltin(_Postconf): + """ + Class for parsing the ``postconf -C builtin`` command. + Sample input:: + + smtpd_tls_loglevel = 0 + smtpd_tls_mandatory_ciphers = medium + smtpd_tls_mandatory_exclude_ciphers = + smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1 + + Examples: + >>> type(postconfb) + + >>> postconfb['smtpd_tls_loglevel'] == '0' + True + >>> postconfb['smtpd_tls_mandatory_ciphers'] == 'medium' + True + >>> postconfb['smtpd_tls_mandatory_exclude_ciphers'] == '' + True + >>> postconfb['smtpd_tls_mandatory_protocols'] == '!SSLv2, !SSLv3, !TLSv1' + True + """ + + +@parser(Specs.postconf) +class Postconf(_Postconf): + """ + Class for parsing the ``postconf`` command. + Sample input:: + + smtpd_tls_loglevel = 0 + smtpd_tls_mandatory_ciphers = medium + smtpd_tls_mandatory_exclude_ciphers = + smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1 + + Examples: + >>> type(postconf) + + >>> postconf['smtpd_tls_loglevel'] == '0' + True + >>> postconf['smtpd_tls_mandatory_ciphers'] == 'medium' + True + >>> postconf['smtpd_tls_mandatory_exclude_ciphers'] == '' + True + >>> postconf['smtpd_tls_mandatory_protocols'] == '!SSLv2, !SSLv3, !TLSv1' + True + """ diff --git a/insights/parsers/tests/test_postconf.py b/insights/parsers/tests/test_postconf.py index f6fc3538d..ab56a5e31 100644 --- a/insights/parsers/tests/test_postconf.py +++ b/insights/parsers/tests/test_postconf.py @@ -3,7 +3,7 @@ from insights.core import ContentException from insights.parsers import postconf, SkipException -from insights.parsers.postconf import PostconfBuiltin +from insights.parsers.postconf import PostconfBuiltin, Postconf, _Postconf from insights.tests import context_wrap V_OUT1 = """ @@ -35,19 +35,46 @@ def test_PostconfBuiltin(): assert p['smtpd_tls_mandatory_protocols'] == '!SSLv2, !SSLv3, !TLSv1' +def test_Postconf(): + with pytest.raises(SkipException): + Postconf(context_wrap(V_OUT1)) + + with pytest.raises(ContentException): + Postconf(context_wrap(V_OUT3)) + + p = Postconf(context_wrap(V_OUT2)) + assert p['smtpd_tls_loglevel'] == '0' + assert p['smtpd_tls_mandatory_ciphers'] == 'medium' + assert p['smtpd_tls_mandatory_exclude_ciphers'] == '' + assert p['smtpd_tls_mandatory_protocols'] == '!SSLv2, !SSLv3, !TLSv1' + + def test_empty(): with pytest.raises(SkipException): PostconfBuiltin(context_wrap("")) + with pytest.raises(SkipException): + Postconf(context_wrap("")) def test_invalid(): with pytest.raises(SkipException): PostconfBuiltin(context_wrap("asdf")) + with pytest.raises(SkipException): + Postconf(context_wrap("asdf")) def test_doc_examples(): env = { - 'postconf': PostconfBuiltin(context_wrap(V_OUT2)), + 'postconfb': PostconfBuiltin(context_wrap(V_OUT2)), + 'postconf': Postconf(context_wrap(V_OUT2)), + '_postconf': _Postconf(context_wrap(V_OUT2)), } failed, total = doctest.testmod(postconf, globs=env) assert failed == 0 + + # TODO + # env = { + # 'postconf': Postconf(context_wrap(V_OUT2)), + # } + # failed, total = doctest.testmod(postconf, globs=env) + # assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index c34dfafc3..e0ae92571 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -477,6 +477,7 @@ class Specs(SpecSet): podman_list_containers = RegistryPoint() podman_list_images = RegistryPoint() postconf_builtin = RegistryPoint(filterable=True) + postconf = RegistryPoint(filterable=True) postgresql_conf = RegistryPoint() postgresql_log = RegistryPoint(multi_output=True, filterable=True) prev_uploader_log = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index bb7f9ce66..aed2f74f6 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -545,6 +545,7 @@ def pcp_enabled(broker): php_ini = first_file(["/etc/opt/rh/php73/php.ini", "/etc/opt/rh/php72/php.ini", "/etc/php.ini"]) pluginconf_d = glob_file("/etc/yum/pluginconf.d/*.conf") postconf_builtin = simple_command("/usr/sbin/postconf -C builtin") + postconf = simple_command("/usr/sbin/postconf") postgresql_conf = first_file([ "/var/lib/pgsql/data/postgresql.conf", "/opt/rh/postgresql92/root/var/lib/pgsql/data/postgresql.conf", diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index ec6904397..c4431fc1f 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -176,6 +176,7 @@ class InsightsArchiveSpecs(Specs): pcs_quorum_status = simple_file("insights_commands/pcs_quorum_status") pcs_status = simple_file("insights_commands/pcs_status") postconf_builtin = simple_file("insights_commands/postconf_-C_builtin") + postconf = simple_file("insights_commands/postconf") ps_alxwww = simple_file("insights_commands/ps_alxwww") ps_aux = simple_file("insights_commands/ps_aux") ps_auxcww = simple_file("insights_commands/ps_auxcww") From ece093339036724a75fb18a250c32c63d7c3d02f Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Wed, 2 Dec 2020 13:13:17 -0500 Subject: [PATCH 251/892] fix(playbook verify): remove placeholder module (#2844) Signed-off-by: Jeremy Crafts --- insights/client/apps/playbook_verify/__init__.py | 0 insights/client/apps/playbook_verify/__main__.py | 1 - 2 files changed, 1 deletion(-) delete mode 100644 insights/client/apps/playbook_verify/__init__.py delete mode 100644 insights/client/apps/playbook_verify/__main__.py diff --git a/insights/client/apps/playbook_verify/__init__.py b/insights/client/apps/playbook_verify/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/insights/client/apps/playbook_verify/__main__.py b/insights/client/apps/playbook_verify/__main__.py deleted file mode 100644 index bd8478232..000000000 --- a/insights/client/apps/playbook_verify/__main__.py +++ /dev/null @@ -1 +0,0 @@ -print('We apologize for the inconvenience.') From 6234b51cac83d6f63dec5b93feaebf413c407a56 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 3 Dec 2020 03:58:56 +0800 Subject: [PATCH 252/892] Remove the unused SAP specs (#2839) Signed-off-by: Xiangce Liu --- insights/specs/default.py | 8 -------- insights/specs/insights_archive.py | 2 -- 2 files changed, 10 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index aed2f74f6..b45ced04b 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -598,14 +598,6 @@ def sap_sid(broker): sap = broker[Sap] return [sap.sid(i).lower() for i in sap.local_instances] - @datasource(Sap) - def sap_sid_name(broker): - """(list): Returns the list of (SAP SID, SAP InstanceName) """ - sap = broker[Sap] - return [(sap.sid(i), i) for i in sap.local_instances] - - sap_dev_disp = foreach_collect(sap_sid_name, "/usr/sap/%s/%s/work/dev_disp") - sap_dev_rd = foreach_collect(sap_sid_name, "/usr/sap/%s/%s/work/dev_rd") sap_hdb_version = foreach_execute(sap_sid, "/usr/bin/sudo -iu %sadm HDB version", keep_rc=True) saphostctl_getcimobject_sapinstance = simple_command("/usr/sap/hostctrl/exe/saphostctrl -function GetCIMObject -enuminstances SAPInstance") sat5_insights_properties = simple_file("/etc/redhat-access/redhat-access-insights.properties") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index c4431fc1f..5319b445c 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -194,8 +194,6 @@ class InsightsArchiveSpecs(Specs): rhev_data_center = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_rhev_data_center") rndc_status = simple_file("insights_commands/rndc_status") rpm_V_packages = first_file(["insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo_chrony", "insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo"]) - sap_dev_disp = glob_file("/usr/sap/*/*/work/dev_disp") - sap_dev_rd = glob_file("/usr/sap/*/*/work/dev_rd") sap_hdb_version = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_sap_hdb_version") saphostctl_getcimobject_sapinstance = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostctrl_-function_GetCIMObject_-enuminstances_SAPInstance") satellite_mongodb_storage_engine = simple_file("insights_commands/mongo_pulp_database_--eval_db.serverStatus_.storageEngine") From b3fe2749c49c7c5cd894c19ebb69e83b743558c4 Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Thu, 3 Dec 2020 16:18:43 +0000 Subject: [PATCH 253/892] update uploader_json_map.json Signed-off-by: Link Dupont --- insights/client/uploader_json_map.json | 42 ++++++++++++++++---------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index 240c1b0db..b44d5582c 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -622,8 +622,7 @@ { "command": "/bin/ls -ln /var/tmp", "pattern": [ - "/var/tmp", - "foreman-ssh-cmd" + "/var/tmp" ], "symbolic_name": "ls_var_tmp" }, @@ -1022,6 +1021,11 @@ "pattern": [], "symbolic_name": "pcs_status" }, + { + "command": "/usr/sbin/postconf -C builtin", + "pattern": [], + "symbolic_name": "postconf_builtin" + }, { "command": "/bin/ps alxwww", "pattern": [ @@ -1029,6 +1033,7 @@ "/usr/bin/openshift start master", "/usr/bin/openshift start node", "COMMAND", + "avahi", "bash", "chronyd", "clvmd", @@ -1412,7 +1417,8 @@ "command": "/usr/bin/testparm -v -s", "pattern": [ "Server role:", - "[" + "[", + "server schannel" ], "symbolic_name": "testparm_v_s" }, @@ -1513,6 +1519,13 @@ "pattern": [], "symbolic_name": "etc_machine_id" }, + { + "file": "/etc/udev/rules.d/40-redhat.rules", + "pattern": [ + "SUBSYSTEM!=\"memory\", ACTION!=\"add\", GOTO=\"memory_hotplug_end\"" + ], + "symbolic_name": "etc_udev_40_redhat_rules" + }, { "file": "/proc/1/cgroup", "pattern": [], @@ -1526,7 +1539,6 @@ { "file": "/var/log/audit/audit.log", "pattern": [ - "comm=\"logrotate\" path=\"/var/log/candlepin", "comm=\"virtlogd\" name=\"console.log\"", "type=AVC" ], @@ -2330,7 +2342,6 @@ "/input/input", "11000 E11000 duplicate key error index: pulp_database.repo_profile_applicability.$profile_hash_-1_repo_id_-1", "17763", - ": possible SYN flooding on port", ": segfault at ", "Abort command issued", "Broken pipe", @@ -2376,7 +2387,6 @@ "TX driver issue detected, PF reset issued", "This system does not support \"SSSE3\"", "Throttling request took", - "TypeError: object of type 'NoneType' has no len()", "Virtualization daemon", "] trap divide error ", "_NET_ACTIVE_WINDOW", @@ -2398,7 +2408,6 @@ "ext4_ext_search_left", "failed while handling", "failed with error -110", - "failed: Connection amqps:", "failed: Invalid argument", "failed: rpc error: code = 2 desc = unable to inspect docker image", "fiid_obj_get: 'present_countdown_value': data not available", @@ -3126,9 +3135,13 @@ "file": "/etc/rsyslog.conf", "pattern": [ "$ActionQueueFileName", + "(", + ")", "imjournal", "imtcp", - "regex" + "regex", + "{", + "}" ], "symbolic_name": "rsyslog_conf" }, @@ -3318,9 +3331,7 @@ { "file": "/etc/ssh/ssh_config", "pattern": [ - "Host", - "Include", - "ProxyCommand" + "Include" ], "symbolic_name": "ssh_config" }, @@ -3523,7 +3534,6 @@ "The name org.fedoraproject.FirewallD1 was not provided by any .service files", "The vm start process failed", "lastCheck", - "libvirtError: internal error: failed to format device alias", "looking for unfetched domain" ], "symbolic_name": "vdsm_log" @@ -4098,11 +4108,11 @@ "glob": "/etc/rsyslog.d/*.conf", "pattern": [ "$ActionQueueFileName", + "(", + ")", "imjournal", "imtcp", "regex", - "[", - "]", "{", "}" ], @@ -4152,5 +4162,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2020-11-05T15:21:17.758393" -} + "version": "2020-11-25T09:46:55.836781" +} \ No newline at end of file From 4db6db6668bf84555a09a75b25f499b42b446b77 Mon Sep 17 00:00:00 2001 From: Glutexo Date: Thu, 3 Dec 2020 20:38:19 +0100 Subject: [PATCH 254/892] Ultralight checkins (#2807) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * DRY Inventory URL to a property The Inventory API URL is used a few times in connection.py. DRIED this by storing it in a property derived from api_url. This is going to be used by the check-in action too. Signed-off-by: Štěpán Tomsa * First working check-in prototype Added the new check-in method all the way down from the CLI argument to the connection object method. The check-in really happens with the machine ID, only printing out the result without any error check. Signed-off-by: Štěpán Tomsa * DRY API request failure Extracted HTTP request error catching and logging. This will be used by the new checkin method. Signed-off-by: Štěpán Tomsa * Correctly process responses Process the check-in responses and report success on success and failure on failure accordingly. Log some useful information along the way. Signed-off-by: Štěpán Tomsa * Rename Machine ID to Insights ID Signed-off-by: Štěpán Tomsa * Test the new check-in method Created tests for the new InsightsConnection.checkin method. It tests: - The API request URL, headers and body. - Fallback from Canonical Facts to Insights ID. - Raised exceptions and return values. Signed-off-by: Štěpán Tomsa * Add tests for Inventory URL Created a new test suite for the InsightsConnection constructor. There, the Inventory URL is composed. Tested that this is done correctly. Signed-off-by: Štěpán Tomsa * Add InsightsClient.checkin tests Added simple tests verifying that the Insights Client calls InsightsConnection.checkin. Signed-off-by: Štěpán Tomsa * Do not check-in in offline mode Signed-off-by: Štěpán Tomsa * Write tests for Insights Phase Signed-off-by: Štěpán Tomsa * Remove non-ASCII character Signed-off-by: Štěpán Tomsa * Lint Signed-off-by: Štěpán Tomsa * Make check-in work with recent API changes The Inventory check-in API changed to be more restful. - POST is used to create a check-in. - 201 Created is returned on success. - 404 Not Found is return if host not found. Changed the check-in code to reflect these changes. 400 Bad Request and 200 OK are now considered unknown responses. Also fixed some test comments. Signed-off-by: Štěpán Tomsa * Move the Canonical Facts to the root object Canonical facts are no longer posted as a separate property. They are first-class properties of the request. Patched Response.__str__ method to support results of get_canonical_facts. It removes the type key from the result and the string method does not expect that. Signed-off-by: Štěpán Tomsa * Use Response in check-in tests get_canonical_facts does not return a pure dict, but a Response object instead. This makes an actual difference, because this object has its own string representation that used to fail before fixing. Replaced plain dicts in the check-in tests to use this type instead. Like that the tests pass only if the stringification works, which is what happens in the executive code. Signed-off-by: Štěpán Tomsa * Test Response stringification There was a bug in Response.__str__ that caused the canonical facts stringification to fail with KeyError. Added a test for the fix. Signed-off-by: Štěpán Tomsa Co-authored-by: Jeremy Crafts --- insights/client/__init__.py | 8 + insights/client/config.py | 9 ++ insights/client/connection.py | 76 +++++++-- insights/client/phase/v1.py | 7 + insights/core/plugins.py | 3 +- .../tests/client/connection/test_checkin.py | 145 ++++++++++++++++++ insights/tests/client/connection/test_init.py | 14 ++ .../client/phase/test_pre_update_checkin.py | 47 ++++++ insights/tests/client/test_client.py | 36 +++++ insights/tests/core/test_plugins.py | 7 + 10 files changed, 336 insertions(+), 16 deletions(-) create mode 100644 insights/tests/client/connection/test_checkin.py create mode 100644 insights/tests/client/connection/test_init.py create mode 100644 insights/tests/client/phase/test_pre_update_checkin.py diff --git a/insights/client/__init__.py b/insights/client/__init__.py index 292f8c6ae..3df34962f 100644 --- a/insights/client/__init__.py +++ b/insights/client/__init__.py @@ -688,6 +688,14 @@ def list_specs(self): logger.info("When specifying these items in file-redaction.yaml, they must be prefixed with 'insights.specs.default.DefaultSpecs.', i.e. 'insights.specs.default.DefaultSpecs.httpd_V'") logger.info("This information applies only to Insights Core collection. To use Core collection, set core_collect=True in %s", self.config.conf) + @_net + def checkin(self): + if self.config.offline: + logger.error('Cannot check-in in offline mode.') + return None + + return self.connection.checkin() + def format_config(config): # Log config except the password diff --git a/insights/client/config.py b/insights/client/config.py index af0a187df..15206d439 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -97,6 +97,13 @@ def _core_collect_default(): 'action': "store_true", 'group': 'actions' }, + 'checkin': { + 'default': False, + 'opt': ['--checkin'], + 'help': 'Do a lightweight check-in instead of full upload', + 'action': "store_true", + 'group': 'actions' + }, 'cmd_timeout': { # non-CLI 'default': constants.default_cmd_timeout @@ -680,6 +687,8 @@ def _validate_options(self): raise ValueError('Cannot check registration status in offline mode.') if self.test_connection: raise ValueError('Cannot run connection test in offline mode.') + if self.checkin: + raise ValueError('Cannot check in in offline mode.') if self.output_dir and self.output_file: raise ValueError('Specify only one: --output-dir or --output-file.') if self.output_dir == '': diff --git a/insights/client/connection.py b/insights/client/connection.py index e888460c7..77ab84744 100644 --- a/insights/client/connection.py +++ b/insights/client/connection.py @@ -49,6 +49,8 @@ URLLIB3_LOGGER = logging.getLogger('requests.packages.urllib3.connectionpool') URLLIB3_LOGGER.setLevel(logging.WARNING) +REQUEST_FAILED_EXCEPTIONS = (requests.ConnectionError, requests.Timeout) + # TODO: Document this, or turn it into a real option if os.environ.get('INSIGHTS_DEBUG_HTTP'): import httplib @@ -60,6 +62,16 @@ requests_log.propagate = True +def _host_not_found(): + raise Exception("Error: failed to find host with matching machine-id. Run insights-client --status to check registration status") + + +def _api_request_failed(exception, message='The Insights API could not be reached.'): + logger.error(exception) + if message: + logger.error(message) + + class InsightsConnection(object): """ @@ -121,6 +133,8 @@ def __init__(self, config): # workaround for a workaround for a workaround base_url_base = self.base_url.split('/platform')[0] self.branch_info_url = base_url_base + '/v1/branch_info' + self.inventory_url = self.api_url + "/inventory/v1" + self.authmethod = self.config.authmethod self.systemid = self.config.systemid or None self.get_proxies() @@ -679,12 +693,11 @@ def _fetch_system_by_machine_id(self): if self.config.legacy_upload: url = self.base_url + '/platform/inventory/v1/hosts?insights_id=' + machine_id else: - url = self.base_url + '/inventory/v1/hosts?insights_id=' + machine_id + url = self.inventory_url + '/hosts?insights_id=' + machine_id logger.log(NETWORK, "GET %s", url) res = self.session.get(url, timeout=self.config.http_timeout) - except (requests.ConnectionError, requests.Timeout) as e: - logger.error(e) - logger.error('The Insights API could not be reached.') + except REQUEST_FAILED_EXCEPTIONS as e: + _api_request_failed(e) return None try: if (self.handle_fail_rcs(res)): @@ -754,7 +767,7 @@ def unregister(self): return False try: logger.debug("Unregistering host...") - url = self.api_url + "/inventory/v1/hosts/" + results[0]['id'] + url = self.inventory_url + "/hosts/" + results[0]['id'] logger.log(NETWORK, "DELETE %s", url) response = self.session.delete(url) response.raise_for_status() @@ -940,8 +953,8 @@ def _legacy_set_display_name(self, display_name): logger.error('Unable to set display name: %s %s', res.status_code, res.text) return False - except (requests.ConnectionError, requests.Timeout, ValueError) as e: - logger.error(e) + except REQUEST_FAILED_EXCEPTIONS + (ValueError,) as e: + _api_request_failed(e, None) # can't connect, run connection test return False @@ -957,13 +970,12 @@ def set_display_name(self, display_name): return system inventory_id = system[0]['id'] - req_url = self.base_url + '/inventory/v1/hosts/' + inventory_id + req_url = self.inventory_url + '/hosts/' + inventory_id try: logger.log(NETWORK, "PATCH %s", req_url) res = self.session.patch(req_url, json={'display_name': display_name}) - except (requests.ConnectionError, requests.Timeout) as e: - logger.error(e) - logger.error('The Insights API could not be reached.') + except REQUEST_FAILED_EXCEPTIONS as e: + _api_request_failed(e) return False if (self.handle_fail_rcs(res)): logger.error('Could not update display name.') @@ -986,8 +998,7 @@ def get_diagnosis(self, remediation_id=None): logger.log(NETWORK, "GET %s", diag_url) res = self.session.get(diag_url, params=params, timeout=self.config.http_timeout) except (requests.ConnectionError, requests.Timeout) as e: - logger.error(e) - logger.error('The Insights API could not be reached.') + _api_request_failed(e) return False if (self.handle_fail_rcs(res)): logger.error('Unable to get diagnosis data: %s %s', @@ -1030,14 +1041,14 @@ def get_advisor_report(self): ''' Retrieve advisor report ''' - url = self.base_url + "/inventory/v1/hosts?insights_id=%s" % generate_machine_id() + url = self.inventory_url + "/hosts?insights_id=%s" % generate_machine_id() content = self._get(url) if content is None: return None host_details = json.loads(content) if host_details["total"] < 1: - raise Exception("Error: failed to find host with matching machine-id. Run insights-client --status to check registration status") + _host_not_found() if host_details["total"] > 1: raise Exception("Error: multiple hosts detected (insights_id = %s)" % generate_machine_id()) @@ -1059,3 +1070,38 @@ def get_advisor_report(self): logger.debug("Wrote \"/var/lib/insights/insights-details.json\"") return json.loads(content) + + def checkin(self): + ''' + Sends an ultralight check-in request containing only the Canonical Facts. + ''' + logger.info("Checking in...") + + try: + canonical_facts = get_canonical_facts() + except Exception as e: + logger.debug('Error getting canonical facts: %s', e) + logger.debug('Falling back to only machine ID.') + insights_id = generate_machine_id() + canonical_facts = {"insights_id": str(insights_id)} + + url = self.inventory_url + "/hosts/checkin" + logger.debug("Sending check-in request to %s with %s" % (url, canonical_facts)) + try: + response = self.session.post(url, headers={"Content-Type": "application/json"}, data=json.dumps(canonical_facts)) + # Change to POST when the API is fixed. + except REQUEST_FAILED_EXCEPTIONS as exception: + _api_request_failed(exception) + return None + logger.debug("Check-in response status code %d" % response.status_code) + + if response.status_code == requests.codes.CREATED: + # Remove OK when the API is fixed. + logger.info("Successfully checked in!") + return True + elif response.status_code == requests.codes.NOT_FOUND: + # Remove BAD_REQUEST when the API is fixed. + _host_not_found() + else: + logger.debug("Check-in response body %s" % response.text) + raise RuntimeError("Unknown check-in API response") diff --git a/insights/client/phase/v1.py b/insights/client/phase/v1.py index 268098077..0e3d1c87e 100644 --- a/insights/client/phase/v1.py +++ b/insights/client/phase/v1.py @@ -110,6 +110,13 @@ def pre_update(client, config): print(json.dumps(resp)) sys.exit(constants.sig_kill_ok) + if config.checkin: + checkin_success = client.checkin() + if checkin_success: + sys.exit(constants.sig_kill_ok) + else: + sys.exit(constants.sig_kill_bad) + @phase def update(client, config): diff --git a/insights/core/plugins.py b/insights/core/plugins.py index 1096ce302..95ce144c0 100644 --- a/insights/core/plugins.py +++ b/insights/core/plugins.py @@ -471,7 +471,8 @@ def __str__(self): keys = sorted(self) if self.key_name in keys: keys.remove(self.key_name) - keys.remove("type") + if "type" in keys: + keys.remove("type") buf = StringIO() if not keys: diff --git a/insights/tests/client/connection/test_checkin.py b/insights/tests/client/connection/test_checkin.py new file mode 100644 index 000000000..7b399db0c --- /dev/null +++ b/insights/tests/client/connection/test_checkin.py @@ -0,0 +1,145 @@ +from json import dumps +from uuid import uuid4 + +from mock.mock import Mock, patch +from pytest import mark +from pytest import raises +from requests import ConnectionError +from requests import Timeout +from requests import codes + +from insights.client.connection import InsightsConnection +from insights.core.plugins import make_metadata + + +def _get_canonical_facts_response(canonical_facts): + d = make_metadata(**canonical_facts) + del d["type"] + return d + + +@patch( + "insights.client.connection.get_canonical_facts", + return_value=_get_canonical_facts_response({"subscription_manager_id": str(uuid4())}) +) +@patch( + "insights.client.connection.InsightsConnection._init_session", + **{"return_value.post.return_value.status_code": codes.CREATED} +) +@patch("insights.client.connection.InsightsConnection.get_proxies") +def test_canonical_facts_request(get_proxies, init_session, get_canonical_facts): + """ + A POST requests to the check-in endpoint is issued with correct headers and + body containing Canonical Facts. + """ + config = Mock(base_url="www.example.com") + + connection = InsightsConnection(config) + connection.checkin() + + expected_url = connection.inventory_url + "/hosts/checkin" + expected_headers = {"Content-Type": "application/json"} + expected_data = get_canonical_facts.return_value + init_session.return_value.post.assert_called_once_with( + expected_url, headers=expected_headers, data=dumps(expected_data) + ) + + +@patch("insights.client.connection.generate_machine_id", return_value=str(uuid4())) +@patch("insights.client.connection.get_canonical_facts", side_effect=RuntimeError()) +@patch( + "insights.client.connection.InsightsConnection._init_session", + **{"return_value.post.return_value.status_code": codes.CREATED} +) +@patch("insights.client.connection.InsightsConnection.get_proxies") +def test_insights_id_request(get_proxies, init_session, get_canonical_facts, generate_machine_id): + """ + A POST requests to the check-in endpoint is issued with correct headers and + body containing only an Insights ID if Canonical Facts collection fails. + """ + config = Mock(base_url="www.example.com") + + connection = InsightsConnection(config) + connection.checkin() + + expected_url = connection.inventory_url + "/hosts/checkin" + expected_headers = {"Content-Type": "application/json"} + expected_data = {"insights_id": generate_machine_id.return_value} + init_session.return_value.post.assert_called_once_with( + expected_url, headers=expected_headers, data=dumps(expected_data) + ) + + +@mark.parametrize(("exception",), ((ConnectionError,), (Timeout,))) +@patch( + "insights.client.connection.get_canonical_facts", + return_value=_get_canonical_facts_response({"subscription_manager_id": "notauuid"}) +) +@patch("insights.client.connection.InsightsConnection._init_session") +@patch("insights.client.connection.InsightsConnection.get_proxies") +def test_request_http_failure(get_proxies, init_session, get_canonical_facts, exception): + """ + If the checkin-request fails, None is returned. + """ + init_session.return_value.post.side_effect = exception + + config = Mock(base_url="www.example.com") + + connection = InsightsConnection(config) + result = connection.checkin() + assert result is None + + +@patch("insights.client.connection.get_canonical_facts", return_value={}) +@patch( + "insights.client.connection.InsightsConnection._init_session", + **{"return_value.post.side_effect": RuntimeError()} +) +@patch("insights.client.connection.InsightsConnection.get_proxies") +def test_request_unknown_exception(get_proxies, init_session, get_canonical_facts): + """ + If an unknown exception occurs, the call crashes. + """ + config = Mock(base_url="www.example.com") + connection = InsightsConnection(config) + + expected_exception = type(init_session.return_value.post.side_effect) + with raises(expected_exception): + connection.checkin() + + +@patch("insights.client.connection.get_canonical_facts", return_value={}) +@patch( + "insights.client.connection.InsightsConnection._init_session", + **{"return_value.post.return_value.status_code": codes.CREATED} +) +@patch("insights.client.connection.InsightsConnection.get_proxies") +def test_response_success(get_proxies, init_session, get_canonical_facts): + """ + If a CREATED status code is received, the check-in was successful. + """ + config = Mock(base_url="www.example.com") + connection = InsightsConnection(config) + + result = connection.checkin() + assert result is True + + +@mark.parametrize( + ("status_code",), + ((codes.OK,), (codes.BAD_REQUEST,), (codes.NOT_FOUND,), (codes.SERVER_ERROR,)) +) +@patch("insights.client.connection.get_canonical_facts", return_value=_get_canonical_facts_response({})) +@patch("insights.client.connection.InsightsConnection._init_session") +@patch("insights.client.connection.InsightsConnection.get_proxies") +def test_response_failure(get_proxies, init_session, get_canonical_facts, status_code): + """ + If an unexpected status code is received, the check-in failed and an exception is raised. + """ + init_session.return_value.post.return_value.status_code = status_code + + config = Mock(base_url="www.example.com") + connection = InsightsConnection(config) + + with raises(Exception): + connection.checkin() diff --git a/insights/tests/client/connection/test_init.py b/insights/tests/client/connection/test_init.py new file mode 100644 index 000000000..d88cf008f --- /dev/null +++ b/insights/tests/client/connection/test_init.py @@ -0,0 +1,14 @@ +from mock.mock import Mock +from mock.mock import patch +from insights.client.connection import InsightsConnection + + +@patch("insights.client.connection.InsightsConnection._init_session") +@patch("insights.client.connection.InsightsConnection.get_proxies") +def test_inventory_url(get_proxies, init_session): + """ + Inventory URL is composed correctly. + """ + config = Mock(base_url="www.example.com", insecure_connection=False) + connection = InsightsConnection(config) + assert connection.inventory_url == "https://www.example.com/inventory/v1" diff --git a/insights/tests/client/phase/test_pre_update_checkin.py b/insights/tests/client/phase/test_pre_update_checkin.py new file mode 100644 index 000000000..d07b7abc3 --- /dev/null +++ b/insights/tests/client/phase/test_pre_update_checkin.py @@ -0,0 +1,47 @@ +# -*- coding: UTF-8 -*- + +from insights.client.constants import InsightsConstants +from insights.client.phase.v1 import pre_update +from mock.mock import patch +from pytest import raises + + +def patch_insights_config(old_function): + patcher = patch("insights.client.phase.v1.InsightsConfig", + **{"return_value.load_all.return_value.auto_config": False, + "return_value.load_all.return_value.version": False, + "return_value.load_all.return_value.validate": False, + "return_value.load_all.return_value.enable_schedule": False, + "return_value.load_all.return_value.disable_schedule": False, + "return_value.load_all.return_value.analyze_container": False, + "return_value.load_all.return_value.test_connection": False, + "return_value.load_all.return_value.support": False, + "return_value.load_all.return_value.diagnosis": False, + "return_value.load_all.return_value.checkin": True}) + return patcher(old_function) + + +@patch("insights.client.phase.v1.InsightsClient", **{"return_value.checkin.return_value": True}) +@patch_insights_config +def test_checkin_success(insights_config, insights_client): + """ + InsightsSupport is constructed with InsightsConfig and collect_support_info is called. + """ + with raises(SystemExit) as exc_info: + pre_update() + + insights_client.return_value.checkin.assert_called_once_with() + assert exc_info.value.code == InsightsConstants.sig_kill_ok + + +@patch("insights.client.phase.v1.InsightsClient", **{"return_value.checkin.return_value": False}) +@patch_insights_config +def test_checkin_failure(insights_config, insights_client): + """ + Support collection replaces the normal client run. + """ + with raises(SystemExit) as exc_info: + pre_update() + + insights_client.return_value.checkin.assert_called_once_with() + assert exc_info.value.code == InsightsConstants.sig_kill_bad diff --git a/insights/tests/client/test_client.py b/insights/tests/client/test_client.py index 75157735c..5e7ae9897 100644 --- a/insights/tests/client/test_client.py +++ b/insights/tests/client/test_client.py @@ -10,6 +10,8 @@ from insights.client.constants import InsightsConstants as constants from insights.client.utilities import generate_machine_id from mock.mock import patch, Mock, call +from pytest import mark +from pytest import raises class FakeConnection(object): @@ -618,3 +620,37 @@ def test_copy_to_output_file_obfuscate_on(shutil_, _copy_soscleaner_files): client.copy_to_output_file('test') shutil_.copyfile.assert_called_once() _copy_soscleaner_files.assert_called_once() + + +@mark.parametrize(("result",), ((True,), (None,))) +def test_checkin_result(result): + config = InsightsConfig() + client = InsightsClient(config) + client.connection = Mock(**{"checkin.return_value": result}) + client.session = True + + result = client.checkin() + client.connection.checkin.assert_called_once_with() + assert result is result + + +def test_checkin_error(): + config = InsightsConfig() + client = InsightsClient(config) + client.connection = Mock(**{"checkin.side_effect": Exception}) + client.session = True + + with raises(Exception): + client.checkin() + + client.connection.checkin.assert_called_once_with() + + +def test_checkin_offline(): + config = InsightsConfig(offline=True) + client = InsightsClient(config) + client.connection = Mock() + + result = client.checkin() + assert result is None + client.connection.checkin.assert_not_called() diff --git a/insights/tests/core/test_plugins.py b/insights/tests/core/test_plugins.py index 870f3f0d0..7c40af24f 100644 --- a/insights/tests/core/test_plugins.py +++ b/insights/tests/core/test_plugins.py @@ -62,3 +62,10 @@ def test_response_too_big(): "error_key": "TESTING", "max_detail_length_error": len(json.dumps({"error_key": "TESTING", "type": "rule", "big": content})) } + + +def test_str_without_type(): + d = plugins.make_response("TESTING", foo="bar") + del d["type"] + str(d) + assert True From 5da8a507d8ed65356b68c028328c6834b2fa34d5 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Tue, 8 Dec 2020 15:57:02 +0800 Subject: [PATCH 255/892] Skip the 'grep -F' in ps in the first pass of parsing (#2853) * Skip the 'grep -F' in ps in the first pass of parse Signed-off-by: Xiangce Liu * add a blank space to the end of 'grep -F ' Signed-off-by: Xiangce Liu --- insights/combiners/tests/test_ps.py | 22 ++++++++++++++++++++++ insights/parsers/ps.py | 6 ++---- 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/insights/combiners/tests/test_ps.py b/insights/combiners/tests/test_ps.py index e0bed9ea5..1d5e49b66 100644 --- a/insights/combiners/tests/test_ps.py +++ b/insights/combiners/tests/test_ps.py @@ -246,3 +246,25 @@ def test_docs(): } failed, total = doctest.testmod(ps, globs=env) assert failed == 0 + + +PS_ALXWWW_W_GREP = """ +F UID PID PPID PRI NI VSZ RSS WCHAN STAT TTY TIME COMMAND +4 0 1 0 20 0 128292 6944 ep_pol Ss ? 0:02 /usr/lib/systemd/systemd --switched-root --system --deserialize 22 +1 0 2 0 20 0 0 0 kthrea S ? 0:00 [kthreadd] +1 0 3 2 20 0 0 0 smpboo S ? 0:00 [ksoftirqd/0] +5 0 4 2 20 0 0 0 worker S ? 0:00 [kworker/0:0] +1 0 5 2 0 -20 0 0 worker S< ? 0:00 [kworker/0:0H] +4 0 1585 1 20 0 39336 3872 ep_pol Ss ? 0:00 /usr/lib/systemd/systemd-journald +5 0 2964 1 16 -4 55520 900 ep_pol S Date: Tue, 8 Dec 2020 16:09:50 +0800 Subject: [PATCH 256/892] Skip None value when doing keyword_search (#2852) Signed-off-by: Xiangce Liu --- insights/parsers/__init__.py | 6 +++--- insights/parsers/tests/test_parsers_module.py | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/insights/parsers/__init__.py b/insights/parsers/__init__.py index cb899be32..77aa03ed9 100644 --- a/insights/parsers/__init__.py +++ b/insights/parsers/__init__.py @@ -511,9 +511,9 @@ def keyword_search(rows, **kwargs): # __startswith matchers = { 'default': lambda s, v: s == v, - 'contains': lambda s, v: v in s, - 'startswith': lambda s, v: s.startswith(v), - 'lower_value': lambda s, v: s.lower() == v.lower(), + 'contains': lambda s, v: s is not None and v in s, + 'startswith': lambda s, v: s is not None and s.startswith(v), + 'lower_value': lambda s, v: None not in (s, v) and s.lower() == v.lower(), } def key_match(row, key, value): diff --git a/insights/parsers/tests/test_parsers_module.py b/insights/parsers/tests/test_parsers_module.py index 3795dc162..750c68efe 100644 --- a/insights/parsers/tests/test_parsers_module.py +++ b/insights/parsers/tests/test_parsers_module.py @@ -620,6 +620,24 @@ def test_keyword_search(): certificate__contains='encryption' ) == [] +PS_LIST = [ + {'PID': '692', 'PPID': '2', 'COMMAND': 'kdmflush', '_line': ' 692 2 kdmflush'}, + {'PID': '701', 'PPID': '2', 'COMMAND': 'kdmflush', '_line': ' 701 2 kdmflush'}, + {'PID': '725', 'PPID': '2', 'COMMAND': 'xfsalloc', '_line': ' 725 2 xfsalloc'}, + {'PID': '726', 'PPID': '2', 'COMMAND': None, '_line': ' 726 2 grep -F xx'}, +] + + +def test_keyword_search_None(): + # Normal search + assert keyword_search(PS_LIST, COMMAND__default=None)[0]['PID'] == '726' + assert keyword_search(PS_LIST, _line__contains='alloc')[0]['PID'] == '725' + assert keyword_search(PS_LIST, COMMAND__startswith='xfs')[0]['PID'] == '725' + assert len(keyword_search(PS_LIST, COMMAND__lower_value='KDMFLUSH')) == 2 + # Check that searches for non-existing keys + assert keyword_search(PS_LIST, NONE__default=None) == [] + assert keyword_search(PS_LIST, NONE__startswith='xfs') == [] + def test_parse_exception(): with pytest.raises(ParseException) as e_info: From 73a1ba609d65a780d7032d58126d20e2851c60d0 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Wed, 9 Dec 2020 15:26:40 +0800 Subject: [PATCH 257/892] Enhance the vendor of InstalledRpm (#2855) * Enhance the vendor of InstalledRpm - The current `InstalledRpm.vendor` treats the `(none)` and `no vendor outputted` as the same Updated as: - set the value as `'(none)'` string, when the vendor is `(none)` - set the value as `None` (NoneType), when there is no `vendor` column/item. Signed-off-by: Xiangce Liu * Update the docstring of 'vendor' attr Signed-off-by: Xiangce Liu --- insights/parsers/installed_rpms.py | 4 ++-- insights/parsers/tests/test_installed_rpms.py | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/insights/parsers/installed_rpms.py b/insights/parsers/installed_rpms.py index 2b790a4c5..bb5956524 100644 --- a/insights/parsers/installed_rpms.py +++ b/insights/parsers/installed_rpms.py @@ -387,7 +387,7 @@ def __init__(self, data): """bool: True when RPM package is signed by Red Hat, False when RPM package is not signed by Red Hat, None when no sufficient info to determine""" self.vendor = None - """str: RPM package vendor.""" + """str: RPM package vendor. `None` when no 'vendor' info""" if isinstance(data, six.string_types): data = self._parse_package(data) @@ -395,7 +395,7 @@ def __init__(self, data): for k, v in data.items(): setattr(self, k, v) self.epoch = data['epoch'] if 'epoch' in data and data['epoch'] != '(none)' else '0' - self.vendor = data['vendor'] if 'vendor' in data and data['vendor'] != '(none)' else None + self.vendor = data['vendor'] if 'vendor' in data else None _gpg_key_pos = data.get('sigpgp', data.get('rsaheader', data.get('pgpsig_short', data.get('pgpsig', data.get('vendor', ''))))) if _gpg_key_pos: self.redhat_signed = any(key in _gpg_key_pos for key in self.PRODUCT_SIGNING_KEYS) diff --git a/insights/parsers/tests/test_installed_rpms.py b/insights/parsers/tests/test_installed_rpms.py index 063c9bffd..a6f0636cd 100644 --- a/insights/parsers/tests/test_installed_rpms.py +++ b/insights/parsers/tests/test_installed_rpms.py @@ -63,7 +63,7 @@ {"name": "libteam","version": "1.17","epoch": "(none)","release": "6.el7_2","arch": "x86_64","installtime": "Fri 24 Jun 2016 04:18:17 PM EDT","buildtime": "1454604485","rsaheader": "RSA/SHA256, Wed 17 Feb 2016 02:25:16 AM EST, Key ID 199e2f91fd431d51","dsaheader": "(none)","srpm": "libteam-1.17-6.el7_2.src.rpm"} {"name": "crash","epoch":"(none)","version":"7.1.0","release":"8.el6","arch":"x86_64","installtime":"Fri Jul 13 06:53:28 2018","buildtime":"1524061059","vendor":"Red Hat, Inc.","buildhost":"x86-032.build.eng.bos.redhat.com","sigpgp":"RSA/8, Wed Apr 18 10:40:59 2018, Key ID 199e2f91fd431d51"} {"name": "xorg-x11-drv-vmmouse","epoch":"(none)","version":"13.1.0","release":"1.el6","arch":"x86_64","installtime":"Thu Aug 4 12:23:32 2016","buildtime":"1447274489","vendor":"Red Hat, Inc.","buildhost":"x86-028.build.eng.bos.redhat.com","sigpgp":"RSA/8, Mon Apr 4 11:35:36 2016, Key ID 199e2f91fd431d51"} -{"name": "libnl","epoch":"(none)","version":"1.1.4","release":"2.el6","arch":"x86_64","installtime":"Mon Jun 16 13:21:21 2014","buildtime":"1378459378","vendor":"Red Hat, Inc.","buildhost":"x86-007.build.bos.redhat.com","sigpgp":"RSA/8, Mon Sep 23 07:25:47 2013, Key ID 199e2f91fd431d51"} +{"name": "libnl","epoch":"(none)","version":"1.1.4","release":"2.el6","arch":"x86_64","installtime":"Mon Jun 16 13:21:21 2014","buildtime":"1378459378","vendor":"(none)","buildhost":"x86-007.build.bos.redhat.com","sigpgp":"RSA/8, Mon Sep 23 07:25:47 2013, Key ID 199e2f91fd431d51"} '''.strip() RPMS_MULTIPLE = ''' @@ -204,7 +204,8 @@ def test_from_json(): assert rpms.get_max("util-linux").epoch == '0' assert rpms.get_max("jboss-servlet-3.0-api").redhat_signed - assert rpms.newest('libnl').vendor == 'Red Hat, Inc.' + assert rpms.newest('libnl').vendor == '(none)' + assert rpms.newest('crash').vendor == 'Red Hat, Inc.' assert rpms.newest('log4j').vendor is None From 12c5fef8506745c3b9a0bdfeba87b8b05b6e6d44 Mon Sep 17 00:00:00 2001 From: Xiaoxue Wang Date: Thu, 10 Dec 2020 05:27:07 +0800 Subject: [PATCH 258/892] Add new spec and parser for "ls -lan /usr/bin" (#2837) * Add new spec and parser for "ls -lan /usr/bin" Signed-off-by: XiaoXue Wang * Adjust the order of the inserted spec line Signed-off-by: XiaoXue Wang --- docs/shared_parsers_catalog/ls_usr_bin.rst | 3 ++ insights/parsers/ls_usr_bin.py | 58 ++++++++++++++++++++++ insights/parsers/tests/test_ls_usr_bin.py | 45 +++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 6 files changed, 109 insertions(+) create mode 100644 docs/shared_parsers_catalog/ls_usr_bin.rst create mode 100644 insights/parsers/ls_usr_bin.py create mode 100644 insights/parsers/tests/test_ls_usr_bin.py diff --git a/docs/shared_parsers_catalog/ls_usr_bin.rst b/docs/shared_parsers_catalog/ls_usr_bin.rst new file mode 100644 index 000000000..8f07c71d9 --- /dev/null +++ b/docs/shared_parsers_catalog/ls_usr_bin.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ls_usr_bin + :members: + :show-inheritance: diff --git a/insights/parsers/ls_usr_bin.py b/insights/parsers/ls_usr_bin.py new file mode 100644 index 000000000..ba524aa3d --- /dev/null +++ b/insights/parsers/ls_usr_bin.py @@ -0,0 +1,58 @@ +""" +LsUsrBin - command ``ls -lan /usr/bin`` +======================================= + +The ``ls -lan /usr/bin`` command provides information for the listing of the +``/usr/bin`` directory. + +Sample input is shown in the Examples. See ``FileListing`` class for +additional information. + +For ls_usr_bin, it may collect a lot of files or directories that may not be +necessary, so a default filter `add_filter(Specs.ls_usr_bin, "total")` has been +added in this parser. + +If addtional file or directory need to be collected by this parser, please +add related filter to corresponding code. + +Sample added filter: + + >>> add_filter(Specs.ls_usr_bin, "python") + >>> add_filter(Specs.ls_usr_bin, "virt") + +Sample directory list collected:: + + total 41472 + lrwxrwxrwx. 1 0 0 7 Oct 22 2019 python -> python2 + -rwxr-xr-x. 1 0 0 2558 Apr 10 2019 python-argcomplete-check-easy-install-script + -rwxr-xr-x. 1 0 0 318 Apr 10 2019 python-argcomplete-tcsh + lrwxrwxrwx. 1 0 0 14 Oct 22 2019 python-config -> python2-config + lrwxrwxrwx. 1 0 0 9 Oct 22 2019 python2 -> python2.7 + +Examples: + + >>> "accessdb" in ls_usr_bin + False + >>> "/usr/bin" in ls_usr_bin + True + >>> ls_usr_bin.dir_entry('/usr/bin', 'python-argcomplete-tcsh')['type'] + '-' + >>> ls_usr_bin.dir_entry('/usr/bin', 'python2')['type'] + 'l' +""" + + +from insights.core.filters import add_filter +from insights.specs import Specs + +from .. import CommandParser, parser +from .. import FileListing + + +add_filter(Specs.ls_usr_bin, "total") + + +@parser(Specs.ls_usr_bin) +class LsUsrBin(CommandParser, FileListing): + """Parses output of ``ls -lan /usr/bin`` command.""" + pass diff --git a/insights/parsers/tests/test_ls_usr_bin.py b/insights/parsers/tests/test_ls_usr_bin.py new file mode 100644 index 000000000..178fcd9b2 --- /dev/null +++ b/insights/parsers/tests/test_ls_usr_bin.py @@ -0,0 +1,45 @@ +import doctest + +from insights.core.filters import add_filter +from insights.parsers import ls_usr_bin +from insights.parsers.ls_usr_bin import LsUsrBin +from insights.specs import Specs +from insights.tests import context_wrap + +LS_USR_BIN = """ +total 41472 +lrwxrwxrwx. 1 0 0 7 Oct 22 2019 python -> python2 +-rwxr-xr-x. 1 0 0 2558 Apr 10 2019 python-argcomplete-check-easy-install-script +-rwxr-xr-x. 1 0 0 318 Apr 10 2019 python-argcomplete-tcsh +lrwxrwxrwx. 1 0 0 14 Oct 22 2019 python-config -> python2-config +lrwxrwxrwx. 1 0 0 9 Oct 22 2019 python2 -> python2.7 +""" + + +def test_ls_usr_bin(): + ls_usr_bin = LsUsrBin(context_wrap(LS_USR_BIN, path='insights_commands/ls_-ln_.usr.bin')) + assert ls_usr_bin.files_of('/usr/bin') == ['python', 'python-argcomplete-check-easy-install-script', 'python-argcomplete-tcsh', 'python-config', 'python2'] + python = ls_usr_bin.dir_entry('/usr/bin', 'python') + assert python is not None + assert python == {'date': 'Oct 22 2019', + 'dir': '/usr/bin', + 'group': '0', + 'link': 'python2', + 'links': 1, + 'name': 'python', + 'owner': '0', + 'perms': 'rwxrwxrwx.', + 'raw_entry': 'lrwxrwxrwx. 1 0 0 7 Oct 22 2019 python -> python2', + 'size': 7, + 'type': 'l'} + + +def test_ls_usr_bin_doc_examples(): + env = { + 'Specs': Specs, + 'add_filter': add_filter, + 'LsUsrBin': LsUsrBin, + 'ls_usr_bin': LsUsrBin(context_wrap(LS_USR_BIN, path='insights_commands/ls_-ln_.usr.bin')), + } + failed, total = doctest.testmod(ls_usr_bin, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index e0ae92571..ecf2e6aef 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -304,6 +304,7 @@ class Specs(SpecSet): ls_run_systemd_generator = RegistryPoint() ls_R_var_lib_nova_instances = RegistryPoint() ls_sys_firmware = RegistryPoint() + ls_usr_bin = RegistryPoint(filterable=True) ls_usr_lib64 = RegistryPoint(filterable=True) ls_usr_sbin = RegistryPoint(filterable=True) ls_var_lib_mongodb = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index b45ced04b..05f391a4d 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -403,6 +403,7 @@ def httpd_cmd(broker): ls_R_var_lib_nova_instances = simple_command("/bin/ls -laR /var/lib/nova/instances") ls_sys_firmware = simple_command("/bin/ls -lanR /sys/firmware") ls_tmp = simple_command("/bin/ls -la /tmp") + ls_usr_bin = simple_command("/bin/ls -lan /usr/bin") ls_usr_lib64 = simple_command("/bin/ls -lan /usr/lib64") ls_var_lib_mongodb = simple_command("/bin/ls -la /var/lib/mongodb") ls_var_lib_nova_instances = simple_command("/bin/ls -laRZ /var/lib/nova/instances") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 5319b445c..6961dc99c 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -107,6 +107,7 @@ class InsightsArchiveSpecs(Specs): ls_R_var_lib_nova_instances = simple_file("insights_commands/ls_-laR_.var.lib.nova.instances") ls_sys_firmware = simple_file("insights_commands/ls_-lanR_.sys.firmware") ls_tmp = simple_file("insights_commands/ls_-la_.tmp") + ls_usr_bin = simple_file("insights_commands/ls_-lan_.usr.bin") ls_usr_lib64 = simple_file("insights_commands/ls_-lan_.usr.lib64") ls_var_lib_mongodb = simple_file("insights_commands/ls_-la_.var.lib.mongodb") ls_var_lib_nova_instances = simple_file("insights_commands/ls_-laRZ_.var.lib.nova.instances") From 4b7b344c543c8f310a281fc864fe480442f815e4 Mon Sep 17 00:00:00 2001 From: Xiaoxue Wang Date: Thu, 10 Dec 2020 05:59:30 +0800 Subject: [PATCH 259/892] Add spec and parser for alternatives_display_python (#2836) * Add spec and parser for alternatives_display_python Signed-off-by: XiaoXue Wang * Enhance the docstings testing Signed-off-by: XiaoXue Wang * Fix docstrings pipeline error Signed-off-by: XiaoXue Wang --- insights/parsers/alternatives.py | 67 ++++++++++++++------- insights/parsers/tests/test_alternatives.py | 63 +++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + insights/specs/sos_archive.py | 1 + 6 files changed, 111 insertions(+), 23 deletions(-) diff --git a/insights/parsers/alternatives.py b/insights/parsers/alternatives.py index 4933f8877..dccdb83a0 100644 --- a/insights/parsers/alternatives.py +++ b/insights/parsers/alternatives.py @@ -61,20 +61,19 @@ class AlternativesOutput(CommandParser): the path to that program for this alternative path. Examples: - >>> java = AlternativesOutput(context_wrap(JAVA_ALTERNATIVES)) - >>> java.program + >>> java_alt.program 'java' - >>> java.link - '/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.111-1.b15.el7_2.x86_64/jre/bin/java' - >>> len(java.paths) - 3 - >>> java.paths[0]['path'] - '/usr/lib/jvm/java-1.7.0-openjdk-1.7.0.111-2.6.7.2.el7_2.x86_64/jre/bin/java' - >>> java.paths[0]['priority'] - 1700111 - >>> java.paths[2]['slave']['ControlPanel'] + >>> java_alt.link + '/usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/java' + >>> len(java_alt.paths) + 2 + >>> java_alt.paths[0]['path'] + '/usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/java' + >>> java_alt.paths[0]['priority'] + 16091 + >>> java_alt.paths[0]['slave']['ControlPanel'] '/usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/ControlPanel' -""" + """ def parse_content(self, content): """ @@ -136,18 +135,40 @@ class JavaAlternatives(AlternativesOutput): alternatives for ``java`` available and which one is currently in use. Examples: - >>> java = shared[JavaAlternatives] - >>> java.program + >>> java_alt.program 'java' - >>> java.link - '/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.111-1.b15.el7_2.x86_64/jre/bin/java' - >>> len(java.paths) - 3 - >>> java.paths[0]['path'] - '/usr/lib/jvm/java-1.7.0-openjdk-1.7.0.111-2.6.7.2.el7_2.x86_64/jre/bin/java' - >>> java.paths[0]['priority'] - 1700111 - >>> java.paths[2]['slave']['ControlPanel'] + >>> java_alt.link + '/usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/java' + >>> len(java_alt.paths) + 2 + >>> java_alt.paths[0]['path'] + '/usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/java' + >>> java_alt.paths[0]['priority'] + 16091 + >>> java_alt.paths[0]['slave']['ControlPanel'] '/usr/lib/jvm/jre-1.6.0-ibm.x86_64/bin/ControlPanel' """ pass + + +@parser(Specs.alternatives_display_python) +class PythonAlternatives(AlternativesOutput): + """ + Class to read the ``/usr/sbin/alternatives --display python`` output. + + Uses the ``AlternativesOutput`` base class to get information about the + alternatives for ``best`` available and which one is currently in use. + + Examples: + >>> python_alt.program + 'python' + >>> python_alt.link + '/usr/bin/python3' + >>> len(python_alt.paths) + 2 + >>> python_alt.paths[0]['path'] + '/usr/libexec/no-python' + >>> python_alt.paths[0]['priority'] + 404 + """ + pass diff --git a/insights/parsers/tests/test_alternatives.py b/insights/parsers/tests/test_alternatives.py index 69d81614e..d2de2f2e5 100644 --- a/insights/parsers/tests/test_alternatives.py +++ b/insights/parsers/tests/test_alternatives.py @@ -1,7 +1,10 @@ +import doctest import pytest from insights.tests import context_wrap +from insights.parsers import alternatives from insights.parsers.alternatives import AlternativesOutput, JavaAlternatives +from insights.parsers.alternatives import PythonAlternatives from insights.core import ParseException ALT_MTA = """ @@ -153,3 +156,63 @@ def test_class_has_java(): 'tnameserv', 'jre_exports', 'jre' ]) assert java.paths[1]['slave']['ControlPanel'] == '(null)' + + +ALTERNATIVE_PYTHON_MANUAL = """ +python - status is manual. + link currently points to /usr/bin/python3 +/usr/libexec/no-python - priority 404 + slave unversioned-python: (null) + slave unversioned-python-man: /usr/share/man/man1/unversioned-python.1.gz +/usr/bin/python3 - priority 300 + slave unversioned-python: /usr/bin/python3 + slave unversioned-python-man: /usr/share/man/man1/python3.1.gz +Current `best' version is /usr/libexec/no-python. +""".strip() + +ALTERNATIVE_PYTHON_AUTO = """ +python - status is auto. + link currently points to /usr/libexec/no-python +/usr/libexec/no-python - priority 404 + slave unversioned-python: (null) + slave unversioned-python-man: /usr/share/man/man1/unversioned-python.1.gz +/usr/bin/python3 - priority 300 + slave unversioned-python: /usr/bin/python3 + slave unversioned-python-man: /usr/share/man/man1/python3.1.gz +Current `best' version is /usr/libexec/no-python. +""".strip() + + +def test_class_python_manual(): + python = PythonAlternatives(context_wrap(ALTERNATIVE_PYTHON_MANUAL)) + + assert python.program == 'python' + assert python.status == 'manual' + assert python.link == '/usr/bin/python3' + assert python.best == '/usr/libexec/no-python' + + assert len(python.paths) == 2 + + for i in ('path', 'priority', 'slave'): + assert i in python.paths[0] + + assert python.paths[0]['path'] == python.best + assert python.paths[0]['priority'] == 404 + assert python.paths[1]['path'] == python.link + assert python.paths[1]['priority'] == 300 + + +def test_class_python_auto(): + python = PythonAlternatives(context_wrap(ALTERNATIVE_PYTHON_AUTO)) + assert python.status == 'auto' + assert python.best == '/usr/libexec/no-python' + assert python.link == python.best + + +def test_python_alternatives_documentation(): + env = { + 'java_alt': JavaAlternatives(context_wrap(alter_java)), + 'python_alt': PythonAlternatives(context_wrap(ALTERNATIVE_PYTHON_MANUAL)), + } + failed, total = doctest.testmod(alternatives, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index ecf2e6aef..99a4ee792 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -18,6 +18,7 @@ class Openshift(SpecSet): class Specs(SpecSet): abrt_ccpp_conf = RegistryPoint(filterable=True) abrt_status_bare = RegistryPoint() + alternatives_display_python = RegistryPoint() amq_broker = RegistryPoint(multi_output=True) auditctl_status = RegistryPoint() auditd_conf = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 05f391a4d..4ab4022b1 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -97,6 +97,7 @@ def inner(idx=None): class DefaultSpecs(Specs): abrt_ccpp_conf = simple_file("/etc/abrt/plugins/CCpp.conf") abrt_status_bare = simple_command("/usr/bin/abrt status --bare=True") + alternatives_display_python = simple_command("/usr/sbin/alternatives --display python") amq_broker = glob_file("/var/opt/amq-broker/*/etc/broker.xml") auditctl_status = simple_command("/sbin/auditctl -s") auditd_conf = simple_file("/etc/audit/auditd.conf") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 6961dc99c..285c8c445 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -11,6 +11,7 @@ class InsightsArchiveSpecs(Specs): abrt_status_bare = simple_file("insights_commands/abrt_status_--bare_True") all_installed_rpms = glob_file("insights_commands/rpm_-qa*") + alternatives_display_python = simple_file("insights_commands/alternatives_--display_python") auditctl_status = simple_file("insights_commands/auditctl_-s") aws_instance_id_doc = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_aws_instance_id_doc") aws_instance_id_pkcs7 = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_aws_instance_id_pkcs7") diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index 4af14cb7b..e5f92b0e1 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -9,6 +9,7 @@ class SosSpecs(Specs): + alternatives_display_python = simple_file("sos_commands/alternatives/alternatives_--display_python") auditctl_status = simple_file("sos_commands/auditd/auditctl_-s") autofs_conf = simple_file("/etc/autofs.conf") From be5d8add4657e82c9a8c28237f21dd1b4e41dd88 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 10 Dec 2020 06:33:58 +0800 Subject: [PATCH 260/892] Add pre-check for the 'ss' command (#2850) - It was known that the `ss` command will load required kernel modules automatically and won't release them after the execution. To avoid changing the system after the collection, `pre-check` is added to ensure only collecting this spec when required modules are loaded See: https://bugzilla.redhat.com/show_bug.cgi?id=1903183 Signed-off-by: Xiangce Liu --- insights/specs/default.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 4ab4022b1..9e183533f 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -23,6 +23,7 @@ from insights.combiners.cloud_provider import CloudProvider from insights.combiners.services import Services from insights.combiners.sap import Sap +from insights.parsers.lsmod import LsMod from insights.specs import Specs @@ -623,7 +624,20 @@ def sap_sid(broker): softnet_stat = simple_file("proc/net/softnet_stat") software_collections_list = simple_command('/usr/bin/scl --list') spamassassin_channels = simple_command("/bin/grep -r '^\\s*CHANNELURL=' /etc/mail/spamassassin/channel.d") - ss = simple_command("/usr/sbin/ss -tupna") + + @datasource(LsMod) + def is_mod_loaded_for_ss(broker): + """ + bool: Returns True if the kernel modules required by ``ss -tupna`` + command are loaded. + """ + lsmod = broker[LsMod] + req_mods = ['inet_diag', 'tcp_diag', 'udp_diag'] + if all(mod in lsmod for mod in req_mods): + return True + raise SkipComponent + + ss = simple_command("/usr/sbin/ss -tupna", deps=[is_mod_loaded_for_ss]) ssh_config = simple_file("/etc/ssh/ssh_config") ssh_config_d = glob_file(r"/etc/ssh/ssh_config.d/*.conf") ssh_foreman_proxy_config = simple_file("/usr/share/foreman-proxy/.ssh/ssh_config") From 0da371db6d957b314fbc5fa52dfc1670dfe2c131 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 10 Dec 2020 07:28:39 +0800 Subject: [PATCH 261/892] Enhance the parsers of saphostexec (#2848) * Enhance the parsers of saphostexec - Use the dict as the base class instead of LegacyItemAccess Signed-off-by: Xiangce Liu * Remove the print - And skip empty lines Signed-off-by: Xiangce Liu * fix doc errors Signed-off-by: Xiangce Liu --- insights/parsers/saphostexec.py | 121 +++++++++++++++------ insights/parsers/tests/test_saphostexec.py | 24 ++-- 2 files changed, 103 insertions(+), 42 deletions(-) diff --git a/insights/parsers/saphostexec.py b/insights/parsers/saphostexec.py index 75ed80830..df35ddc63 100644 --- a/insights/parsers/saphostexec.py +++ b/insights/parsers/saphostexec.py @@ -4,22 +4,22 @@ Shared parsers for parsing output of the ``saphostexec [option]`` commands. -SAPHostExecStatus- command ``saphostexec -status`` --------------------------------------------------- +SAPHostExecStatus - command ``saphostexec -status`` +--------------------------------------------------- SAPHostExecVersion - command ``saphostexec -version`` ----------------------------------------------------- """ -from .. import parser, CommandParser, LegacyItemAccess -from insights.parsers import SkipException +from insights import parser, CommandParser +from insights.parsers import SkipException, ParseException from insights.specs import Specs from collections import namedtuple @parser(Specs.saphostexec_status) -class SAPHostExecStatus(CommandParser, LegacyItemAccess): +class SAPHostExecStatus(CommandParser, dict): """ - Class for parsing the output of `saphostexec -status` command. + Class for parsing the output of ``saphostexec -status`` command. Typical output of the command is:: @@ -27,38 +27,70 @@ class SAPHostExecStatus(CommandParser, LegacyItemAccess): sapstartsrv running (pid = 9163) saposcol running (pid = 9323) - Attributes: - is_running (bool): The SAP Host Agent is running or not. - services (list): List of services. - Examples: >>> type(sha_status) >>> sha_status.is_running True - >>> sha_status.services['saphostexec'] + >>> sha_status.services['saphostexec'].status + 'running' + >>> sha_status['saphostexec'].status + 'running' + >>> sha_status['saphostexec'].pid '9159' """ - def parse_content(self, content): - self.is_running = False - self.services = self.data = {} - if 'saphostexec stopped' not in content[0]: - for line in content: - line_splits = line.split() - self.services[line_splits[0]] = '' - if len(line_splits) == 5 and line_splits[1] == 'running': - self.services[line_splits[0]] = line_splits[-1][:-1] - else: - raise SkipException("Incorrect status: '{0}'".format(line)) + SAPHostAgentService = namedtuple("SAPHostAgentService", field_names=["status", "pid"]) + """namedtuple: Type for storing the lines of ``saphostexec -status``""" - self.is_running = self.services and all(p for p in self.services.values()) + def parse_content(self, content): + data = {} + for line in content: + if not line.strip(): + continue + line_sp = line.strip().split(None, 1) + if len(line_sp) == 2: + value_sp = line_sp[1].replace('(', '').replace(')', '').split() + svc, sta, pid = line_sp[0], value_sp[0], value_sp[-1] + data[svc] = self.SAPHostAgentService(sta, pid) + else: + raise ParseException("Incorrect line: '{0}'".format(line)) + if data: + self.update(data) + else: + raise SkipException + + @property + def is_running(self): + """ + Returns if the SAPHostAgent is running or not. + """ + return all(p.status == 'running' for p in self.values()) + + @property + def data(self): + """ + .. warning:: + + Deprecated, the parser works as a dict please use the built-in + accesses of `dict` + + Returns the parsed data. + """ + return self + + @property + def services(self): + """ + Returns the parsed lines. + """ + return self @parser(Specs.saphostexec_version) -class SAPHostExecVersion(CommandParser, LegacyItemAccess): +class SAPHostExecVersion(CommandParser, dict): """ - Class for parsing the output of `saphostexec -version` command. + Class for parsing the output of ``saphostexec -version`` command. Typical output of the command is:: @@ -87,25 +119,23 @@ class SAPHostExecVersion(CommandParser, LegacyItemAccess): Linux 3 Linux - - Attributes: - components (dict): Dict of :py:class:`SAPComponent` instances. - Examples: >>> type(sha_version) >>> sha_version.components['saphostexec'].version '721' - >>> sha_version.components['saphostexec'].patch + >>> sha_version['saphostexec'].version + '721' + >>> sha_version['saphostexec'].patch '1011' """ - SAPComponent = namedtuple("SAPComponent", + SAPHostAgentComponent = namedtuple("SAPHostAgentComponent", field_names=["version", "patch", "changelist"]) - """namedtuple: Type for storing the SAP components""" + """namedtuple: Type for storing the lines of ``saphostexec -version``""" def parse_content(self, content): - self.components = self.data = {} + data = {} for line in content: # Only process component lines for now if not line.startswith('/usr/sap/hostctrl/exe/'): @@ -113,4 +143,27 @@ def parse_content(self, content): key, val = line.split(':', 1) key = key.split('/')[-1] ver, pch, chl, _ = [s.split()[-1].strip() for s in val.split(', ', 3)] - self.components[key] = self.SAPComponent(ver, pch, chl) + data[key] = self.SAPHostAgentComponent(ver, pch, chl) + if data: + self.update(data) + else: + raise SkipException + + @property + def data(self): + """ + .. warning:: + + Deprecated, the parser works as a dict please use the built-in + accesses of `dict` + + Returns the parsed data. + """ + return self + + @property + def components(self): + """ + Return the dict of :py:class:`SAPHostAgentComponent` instances. + """ + return self diff --git a/insights/parsers/tests/test_saphostexec.py b/insights/parsers/tests/test_saphostexec.py index dd3a2d0ec..7d1c8cccf 100644 --- a/insights/parsers/tests/test_saphostexec.py +++ b/insights/parsers/tests/test_saphostexec.py @@ -1,4 +1,4 @@ -from insights.parsers import saphostexec, SkipException +from insights.parsers import saphostexec, SkipException, ParseException from insights.parsers.saphostexec import SAPHostExecStatus, SAPHostExecVersion from insights.tests import context_wrap import pytest @@ -12,8 +12,7 @@ STATUS_ABNORMAL = """ saphostexec running (pid = 9159) -sapstartsrv run (pid = 9163) -saposcol (pid = 9323) +sapstartsrv """.strip() VER_DOC = """ @@ -45,28 +44,37 @@ SHA_STOP = """ saphostexec stopped + +sapstartsrv running (pid = 9163) """.strip() def test_saphostexec_status_abnormal(): - with pytest.raises(SkipException) as s: + with pytest.raises(ParseException) as s: SAPHostExecStatus(context_wrap(STATUS_ABNORMAL)) - assert "Incorrect status: 'sapstartsrv run (pid = 9163)'" in str(s) - assert "Incorrect status: 'saposcol (pid = 9,23)'" not in str(s) + assert "Incorrect line: 'sapstartsrv'" in str(s) + + with pytest.raises(SkipException): + SAPHostExecStatus(context_wrap('')) def test_saphostexec_status(): sha_status = SAPHostExecStatus(context_wrap(STATUS_DOC)) assert sha_status.is_running is True - assert sha_status.services['saphostexec'] == '9159' + assert sha_status.services['saphostexec'].status == 'running' + assert sha_status.services['saphostexec'].pid == '9159' assert 'saposcol' in sha_status sha_status = SAPHostExecStatus(context_wrap(SHA_STOP)) assert sha_status.is_running is False - assert sha_status.services == {} assert 'saposcol' not in sha_status +def test_saphostexec_version_abnormal(): + with pytest.raises(SkipException): + SAPHostExecVersion(context_wrap('')) + + def test_saphostexec_version(): sha_ver = SAPHostExecVersion(context_wrap(VER_DOC)) assert sha_ver.components['saphostexec'].version == '721' From 2dd653af8daf8a3efb500b8c79d44ce9e21b9f22 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 10 Dec 2020 11:41:28 +0800 Subject: [PATCH 262/892] Add spec "httpd limits" back (#2859) Signed-off-by: Huanhuan Li --- insights/specs/default.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/insights/specs/default.py b/insights/specs/default.py index 9e183533f..d9be14ef9 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -348,6 +348,8 @@ def httpd_cmd(broker): # https://access.redhat.com/solutions/21680 return list(ps_httpds) + httpd_pid = simple_command("/usr/bin/pgrep -o httpd") + httpd_limits = foreach_collect(httpd_pid, "/proc/%s/limits") httpd_M = foreach_execute(httpd_cmd, "%s -M") httpd_V = foreach_execute(httpd_cmd, "%s -V") ifcfg = glob_file("/etc/sysconfig/network-scripts/ifcfg-*") From d66ab2f26ddc5d21a81e7a7f44b9b5a62c43649c Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 10 Dec 2020 12:48:28 +0800 Subject: [PATCH 263/892] Add spec "corosync_cmapctl" back (#2858) Signed-off-by: Huanhuan Li --- insights/specs/default.py | 11 +++++++++++ insights/specs/insights_archive.py | 1 + 2 files changed, 12 insertions(+) diff --git a/insights/specs/default.py b/insights/specs/default.py index d9be14ef9..557f13c51 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -24,6 +24,7 @@ from insights.combiners.services import Services from insights.combiners.sap import Sap from insights.parsers.lsmod import LsMod +from insights.components.rhel_version import IsRhel8, IsRhel7 from insights.specs import Specs @@ -204,6 +205,16 @@ def is_ceph_monitor(broker): cobbler_settings = first_file(["/etc/cobbler/settings", "/conf/cobbler/settings"]) cobbler_modules_conf = first_file(["/etc/cobbler/modules.conf", "/conf/cobbler/modules.conf"]) corosync = simple_file("/etc/sysconfig/corosync") + + @datasource([IsRhel7, IsRhel8]) + def corosync_cmapctl_cmd_list(broker): + if broker.get(IsRhel7): + return ["/usr/sbin/corosync-cmapctl", 'corosync-cmapctl -d runtime.schedmiss.timestamp', 'corosync-cmapctl -d runtime.schedmiss.delay'] + if broker.get(IsRhel8): + return ["/usr/sbin/corosync-cmapctl", '/usr/sbin/corosync-cmapctl -m stats', '/usr/sbin/corosync-cmapctl -C schedmiss'] + raise SkipComponent() + corosync_cmapctl = foreach_execute(corosync_cmapctl_cmd_list, "%s") + corosync_conf = simple_file("/etc/corosync/corosync.conf") cpu_cores = glob_file("sys/devices/system/cpu/cpu[0-9]*/online") cpu_siblings = glob_file("sys/devices/system/cpu/cpu[0-9]*/topology/thread_siblings_list") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 285c8c445..9d7ca9cff 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -30,6 +30,7 @@ class InsightsArchiveSpecs(Specs): certificates_enddate = first_file(["insights_commands/find_.etc.origin.node_.etc.origin.master_.etc.pki_.etc.ipa_-type_f_-exec_.usr.bin.openssl_x509_-noout_-enddate_-in_-exec_echo_FileName", "insights_commands/find_.etc.origin.node_.etc.origin.master_.etc.pki_-type_f_-exec_.usr.bin.openssl_x509_-noout_-enddate_-in_-exec_echo_FileName"]) chkconfig = simple_file("insights_commands/chkconfig_--list") chronyc_sources = simple_file("insights_commands/chronyc_sources") + corosync_cmapctl = glob_file("insights_commands/corosync-cmapctl*") cpupower_frequency_info = simple_file("insights_commands/cpupower_-c_all_frequency-info") date = simple_file("insights_commands/date") date_utc = simple_file("insights_commands/date_--utc") From f403b24472710e98a074e9e2b3ba769c52e812b0 Mon Sep 17 00:00:00 2001 From: Akshay Gaikwad Date: Thu, 10 Dec 2020 15:44:33 +0000 Subject: [PATCH 264/892] Add "warnings" property for LvmHeadings parsers (#2845) Added ``warnings`` property to the parsers extended by ``LvmHeadings`` class. The content may contains warning information which can be access using this property. Signed-off-by: Akshay Gaikwad --- insights/parsers/lvm.py | 12 ++++++++++++ insights/parsers/tests/test_lvm.py | 31 ++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) diff --git a/insights/parsers/lvm.py b/insights/parsers/lvm.py index dcd518b9b..ae404a197 100644 --- a/insights/parsers/lvm.py +++ b/insights/parsers/lvm.py @@ -286,6 +286,8 @@ class PvsHeadings(LvmHeadings): Attributes: data (list): List of dicts, each dict containing one row of the table with column headings as keys. + warnings (set): Set of lines from input data containing + warning strings. Examples: >>> pvs_data = shared[PvsHeadings] @@ -301,6 +303,8 @@ class PvsHeadings(LvmHeadings): PRIMARY_KEY = Pvs.PRIMARY_KEY def parse_content(self, content): + self.warnings = set(find_warnings(content)) + content = [l for l in content if l not in self.warnings] self.data = parse_fixed_table( content, heading_ignore=["PV "], @@ -419,6 +423,8 @@ class VgsHeadings(LvmHeadings): Attributes: data (list): List of dicts, each dict containing one row of the table with column headings as keys. + warnings (set): Set of lines from input data containing + warning strings. Examples: >>> vgs_info = shared[VgsHeadings] @@ -431,6 +437,8 @@ class VgsHeadings(LvmHeadings): PRIMARY_KEY = Vgs.PRIMARY_KEY def parse_content(self, content): + self.warnings = set(find_warnings(content)) + content = [l for l in content if l not in self.warnings] self.data = parse_fixed_table( content, heading_ignore=["VG "], @@ -608,6 +616,8 @@ class LvsHeadings(LvmHeadings): Attributes: data (list): List of dicts, each dict containing one row of the table with column headings as keys. + warnings (set): Set of lines from input data containing + warning strings. Examples: >>> lvs_info = shared[LvsHeadings] @@ -622,6 +632,8 @@ class LvsHeadings(LvmHeadings): PRIMARY_KEY = Lvs.PRIMARY_KEY def parse_content(self, content): + self.warnings = set(find_warnings(content)) + content = [l for l in content if l not in self.warnings] self.data = parse_fixed_table( content, heading_ignore=["LV "], header_substitute=[("LV Tags", "LV_Tags")] ) diff --git a/insights/parsers/tests/test_lvm.py b/insights/parsers/tests/test_lvm.py index 55ab945b0..ceed560e5 100644 --- a/insights/parsers/tests/test_lvm.py +++ b/insights/parsers/tests/test_lvm.py @@ -20,6 +20,30 @@ Attempt To Close Device """.strip() +VGSHEADING_CONTENT = """ +Configuration setting "activation/thin_check_executable" unknown. +Configuration setting "activation/thin_check_options" unknown. +Configuration setting "activation/thin_check_executable" unknown. +Configuration setting "activation/thin_check_options" unknown. +WARNING: Locking disabled. Be careful! This could corrupt your metadata. + Using volume group(s) on command line. +Found duplicate PV qJMs5CEKY89qzq56E2vVeBvxzJGw2sA1: using /dev/mapper/mpathbp2 not /dev/cciss/c0d1p2 +Using duplicate PV /dev/mapper/mpathbp2 from subsystem DM, ignoring /dev/cciss/c0d1p2 +WARNING: Inconsistent metadata found for VG vgshared - updating to use version 26x + Archiving volume group "vgshared" metadata (seqno 26). + Archiving volume group "vgshared" metadata (seqno 27). + Creating volume group backup "/etc/lvm/backup/vgshared" (seqno 27). +VG Attr Ext #PV #LV #SN VSize VFree VG UUID VProfile #VMda VMdaFree VMdaSize #VMdaUse VG Tags +vg00 wz--n- 128.00m 1 7 0 279.00g 5.00g MiuKdK-ruw4-UG1i-0uLE-4dfB-bk2v-uxte2W 1 506.00k 1020.00k 1 +vg00alt wz--n- 4.00m 1 5 0 279.11g 265.11g LNzjbn-HU4C-WFCM-cRZ6-81ns-4rke-X8N0DA 1 507.00k 1020.00k 1 +vg01 wz--n- 4.00m 2 1 0 199.99g 100.99g 7O9ePI-M1Kp-H9VH-Lxt3-pS1Z-nLQB-P1IIO3 2 508.00k 1020.00k 2 +vgshared wz--nc 4.00m 6 1 0 1.76t 899.98g fFYYw3-Ns8Q-akPI-rrFU-YYn5-nKkk-UW67qO 6 507.00k 1020.00k 6 + Reloading config files + Wiping internal VG cache +Configuration setting "activation/thin_check_executable" unknown. +Configuration setting "activation/thin_check_options" unknown. +""".strip() + def test_find_warnings(): data = [l for l in lvm.find_warnings(WARNINGS_CONTENT.splitlines())] @@ -48,3 +72,10 @@ def test_lvmconfig(): p = lvm.LvmConfig(context_wrap(LVMCONFIG)) assert p.data["dmeventd"]["raid_library"] == "libdevmapper-event-lvm2raid.so" assert p.data["global"]["thin_check_options"] == ["-q", "--clear-needs-check-flag"] + + +def test_vgsheading_warnings(): + result = lvm.VgsHeadings(context_wrap(VGSHEADING_CONTENT)) + assert len(result.warnings) == 6 + assert 'Configuration setting "activation/thin_check_executable" unknown.' in result.warnings + assert 'WARNING: Locking disabled. Be careful! This could corrupt your metadata.' in result.warnings From a65706cbd7f3782f3f4a4efa5fb70cb9ff223230 Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Thu, 10 Dec 2020 11:33:25 -0600 Subject: [PATCH 265/892] Add specs for existing parser InstalledProductIDs (#2854) * Add spec for core collection of subscriptioin_manager_installed_product_ids command * Fixes Bugzilla 1905503 * Already being collected by client for legacy collection Signed-off-by: Bob Fahr --- insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + insights/tests/client/collection_rules/test_map_components.py | 1 - 3 files changed, 2 insertions(+), 1 deletion(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 557f13c51..faecccb29 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -658,6 +658,7 @@ def is_mod_loaded_for_ss(broker): sshd_config_perms = simple_command("/bin/ls -l /etc/ssh/sshd_config") sssd_config = simple_file("/etc/sssd/sssd.conf") subscription_manager_id = simple_command("/usr/sbin/subscription-manager identity") # use "/usr/sbin" here, BZ#1690529 + subscription_manager_installed_product_ids = simple_command("/usr/bin/find /etc/pki/product-default/ /etc/pki/product/ -name '*pem' -exec rct cat-cert --no-content '{}' \;") swift_object_expirer_conf = first_file(["/var/lib/config-data/puppet-generated/swift/etc/swift/object-expirer.conf", "/etc/swift/object-expirer.conf"]) swift_proxy_server_conf = first_file(["/var/lib/config-data/puppet-generated/swift/etc/swift/proxy-server.conf", "/etc/swift/proxy-server.conf"]) sysconfig_kdump = simple_file("etc/sysconfig/kdump") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 9d7ca9cff..7bcfc1d76 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -208,6 +208,7 @@ class InsightsArchiveSpecs(Specs): ss = simple_file("insights_commands/ss_-tupna") sshd_config_perms = simple_file("insights_commands/ls_-l_.etc.ssh.sshd_config") subscription_manager_id = simple_file("insights_commands/subscription-manager_identity") + subscription_manager_installed_product_ids = simple_file("insights_commands/find_.etc.pki.product-default._.etc.pki.product._-name_pem_-exec_rct_cat-cert_--no-content") sysctl = simple_file("insights_commands/sysctl_-a") systemctl_cat_rpcbind_socket = simple_file("insights_commands/systemctl_cat_rpcbind.socket") systemctl_cinder_volume = simple_file("insights_commands/systemctl_show_openstack-cinder-volume") diff --git a/insights/tests/client/collection_rules/test_map_components.py b/insights/tests/client/collection_rules/test_map_components.py index 547c593d7..195d3aca2 100644 --- a/insights/tests/client/collection_rules/test_map_components.py +++ b/insights/tests/client/collection_rules/test_map_components.py @@ -65,7 +65,6 @@ def test_get_component_by_symbolic_name(): 'rabbitmq_queues', 'rhev_data_center', 'root_crontab', - 'subscription_manager_installed_product_ids', 'yum_list_installed', 'zdump_v', 'cni_podman_bridge_conf', From 0ec94feb21a4b25a84977114aa9fbdf2a03127bb Mon Sep 17 00:00:00 2001 From: Stephen Adams Date: Thu, 10 Dec 2020 15:19:03 -0500 Subject: [PATCH 266/892] release: update uploader_json_map for release Signed-off-by: Stephen Adams --- insights/client/uploader_json_map.json | 57 ++++++++++++++++++-------- 1 file changed, 41 insertions(+), 16 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index b44d5582c..25a3b53d4 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -5,6 +5,11 @@ "pattern": [], "symbolic_name": "abrt_status_bare" }, + { + "command": "/usr/sbin/alternatives --display python", + "pattern": [], + "symbolic_name": "alternatives_display_python" + }, { "command": "python -m insights.tools.cat --no-header aws_instance_id_doc", "pattern": [], @@ -533,6 +538,11 @@ "pattern": [], "symbolic_name": "ls_sys_firmware" }, + { + "command": "/bin/ls -lan /usr/bin", + "pattern": [], + "symbolic_name": "ls_usr_bin" + }, { "command": "/bin/ls -la /var/lib/mongodb", "pattern": [], @@ -1023,9 +1033,19 @@ }, { "command": "/usr/sbin/postconf -C builtin", - "pattern": [], + "pattern": [ + "smtp_tls_mandatory_protocols", + "smtp_tls_protocols", + "smtpd_tls_mandatory_protocols", + "smtpd_tls_protocols" + ], "symbolic_name": "postconf_builtin" }, + { + "command": "/usr/sbin/postconf", + "pattern": [], + "symbolic_name": "postconf" + }, { "command": "/bin/ps alxwww", "pattern": [ @@ -1033,6 +1053,7 @@ "/usr/bin/openshift start master", "/usr/bin/openshift start node", "COMMAND", + "auditd", "avahi", "bash", "chronyd", @@ -1042,6 +1063,7 @@ "dlm_controld", "docker", "heat-engine", + "mongdb", "nova-compute", "nova-conductor", "ntpd", @@ -1052,6 +1074,8 @@ "pacemaker-controld", "pacemakerd", "pcsd", + "redis-server", + "sap", "spausedd", "tuned" ], @@ -1071,6 +1095,7 @@ "/usr/bin/openshift start node", "COMMAND", "STAP/8.2", + "auditd", "bash", "ceph-osd", "chronyd", @@ -1080,6 +1105,7 @@ "dlm_controld", "docker", "heat-engine", + "mongdb", "mysqld", "nova-compute", "nova-conductor", @@ -1095,6 +1121,8 @@ "phc2sys", "postgres", "ptp4l", + "redis-server", + "sap", "spausedd", "tuned" ], @@ -1117,6 +1145,7 @@ "/usr/bin/teamd", "/usr/sbin/fcoemon --syslog", "COMMAND", + "auditd", "bash", "catalina.base", "ceilometer-coll", @@ -1132,6 +1161,7 @@ "heat-engine", "httpd", "iscsid", + "mongdb", "multipath", "nfs-server", "nfsd", @@ -1149,12 +1179,13 @@ "pcsd", "pkla-check-auth", "postgres", + "redis-server", + "sap", "smbd", "spausedd", "target_completi", "tgtd", - "tuned", - "virt-who" + "tuned" ], "symbolic_name": "ps_auxww" }, @@ -1165,6 +1196,7 @@ "/usr/bin/openshift start master", "/usr/bin/openshift start node", "CMD", + "auditd", "bash", "chronyd", "clvmd", @@ -1173,6 +1205,7 @@ "dlm_controld", "docker", "heat-engine", + "mongdb", "neutron-ns-metadata-proxy", "nginx: master process", "nginx: worker process", @@ -1186,6 +1219,8 @@ "pacemaker-controld", "pacemakerd", "pcsd", + "redis-server", + "sap", "spausedd", "tuned" ], @@ -1305,16 +1340,6 @@ ], "symbolic_name": "subscription_manager_installed_product_ids" }, - { - "command": "/usr/sbin/ss -tupna", - "pattern": [], - "symbolic_name": "ss" - }, - { - "command": "/usr/sbin/ss -tupna", - "pattern": [], - "symbolic_name": "ss_tupna" - }, { "command": "/bin/ls -l /etc/ssh/sshd_config", "pattern": [], @@ -2340,7 +2365,6 @@ ", type vxfs) has no security xattr handler", "- image is referenced in one or more repositories", "/input/input", - "11000 E11000 duplicate key error index: pulp_database.repo_profile_applicability.$profile_hash_-1_repo_id_-1", "17763", ": segfault at ", "Abort command issued", @@ -2442,6 +2466,7 @@ "kernel: megasas: Found FW in FAULT state, will reset adapter.", "kernel: nfs: server", "khash_super_prune_nolock", + "kill: Binary: arguments must be process or job IDs", "link status up for interface", "megaraid_sas: FW detected to be in faultstate, restarting it", "mode:0x20", @@ -4162,5 +4187,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2020-11-25T09:46:55.836781" -} \ No newline at end of file + "version": "2020-12-03T16:24:04.611058" +} From e7bfc834fd8c8c35f2d698abeafc3729560957b8 Mon Sep 17 00:00:00 2001 From: Stephen Adams Date: Thu, 10 Dec 2020 15:18:38 -0500 Subject: [PATCH 267/892] fix: update map components for removed spec Signed-off-by: Stephen Adams --- insights/client/map_components.py | 1 - .../client/collection_rules/test_map_components.py | 13 ++++--------- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/insights/client/map_components.py b/insights/client/map_components.py index fe90db25d..72d8bf5ad 100644 --- a/insights/client/map_components.py +++ b/insights/client/map_components.py @@ -130,7 +130,6 @@ def _get_component_by_symbolic_name(sname): 'lspci_kernel': 'lspci', 'netstat__agn': 'netstat_agn', 'rpm__V_packages': 'rpm_V_packages', - 'ss_tupna': 'ss', 'machine_id1': 'machine_id', 'machine_id2': 'machine_id', diff --git a/insights/tests/client/collection_rules/test_map_components.py b/insights/tests/client/collection_rules/test_map_components.py index 195d3aca2..3c9e9bc9d 100644 --- a/insights/tests/client/collection_rules/test_map_components.py +++ b/insights/tests/client/collection_rules/test_map_components.py @@ -293,15 +293,13 @@ def test_log_long_key(logger_warning): spacing, wrapping, and unconverted specs are not logged ''' rm_conf = {'commands': ["/usr/bin/find /etc/origin/node /etc/origin/master /etc/pki /etc/ipa -type f -exec /usr/bin/openssl x509 -noout -enddate -in '{}' \\; -exec echo 'FileName= {}' \\;", - "/usr/bin/md5sum /etc/pki/product/69.pem", - "ss_tupna"], + "/usr/bin/md5sum /etc/pki/product/69.pem"], 'files': ["/etc/sysconfig/virt-who", "/etc/yum.repos.d/fedora-cisco-openh264.repo", "krb5_conf_d"]} map_rm_conf_to_components(rm_conf) logger_warning.assert_any_call("- /usr/bin/find /etc/origin/node => certificates_enddate\n /etc/origin/master /etc/pki /etc/ipa -type f\n -exec /usr/bin/openssl x509 -noout -enddate -in\n '{}' \\; -exec echo 'FileName= {}' \\;") logger_warning.assert_any_call("- /usr/bin/md5sum /etc/pki/product/69.pem => md5chk_files") - logger_warning.assert_any_call("- ss_tupna => ss"), logger_warning.assert_any_call("- /etc/sysconfig/virt-who => sysconfig_virt_who") logger_warning.assert_any_call("- krb5_conf_d => krb5") @@ -323,14 +321,11 @@ def test_components_added(): an aggregation of the current list and the conversion results with no duplicates. ''' - rm_conf = {'commands': ["ss_tupna", - "/usr/bin/md5sum /etc/pki/product/69.pem"], - 'components': ["insights.specs.default.DefaultSpecs.ss", - "insights.specs.default.DefaultSpecs.sysconfig_virt_who"]} + rm_conf = {'commands': ["/usr/bin/md5sum /etc/pki/product/69.pem"], + 'components': ["insights.specs.default.DefaultSpecs.sysconfig_virt_who"]} results = map_rm_conf_to_components(rm_conf) assert results == {'commands': [], 'files': [], - 'components': ["insights.specs.default.DefaultSpecs.ss", - "insights.specs.default.DefaultSpecs.sysconfig_virt_who", + 'components': ["insights.specs.default.DefaultSpecs.sysconfig_virt_who", "insights.specs.default.DefaultSpecs.md5chk_files"]} From 4a050b710da31726137f5e9102b5a6a6f3991f82 Mon Sep 17 00:00:00 2001 From: Akshay Gaikwad Date: Fri, 11 Dec 2020 05:10:05 +0000 Subject: [PATCH 268/892] [lvm] Remove "unknown device" string from warnings (#2864) "unknown device" is not part of warning messages of pvs/lvs/vgs commanmds but pvs will display unknown device when device is missing. See example of such output: ``` PV VG Fmt Attr PSize PFree DevSize PV UUID /dev/fedora/home --- 0 0 418.75g unknown device --- 0 0 50.00g /dev/fedora/swap --- 0 0 7.69g ``` Signed-off-by: Akshay Gaikwad --- insights/parsers/lvm.py | 1 - 1 file changed, 1 deletion(-) diff --git a/insights/parsers/lvm.py b/insights/parsers/lvm.py index ae404a197..40ed8a36f 100644 --- a/insights/parsers/lvm.py +++ b/insights/parsers/lvm.py @@ -81,7 +81,6 @@ def find_warnings(content): "failed.", "Invalid metadata", "response failed", - "unknown device", "duplicate", "not found", "Missing device", From 36bca47b63f434cd969b701873d2c348f93c0eeb Mon Sep 17 00:00:00 2001 From: Sheng Lao <39508521+shlao@users.noreply.github.com> Date: Mon, 14 Dec 2020 18:05:55 +0800 Subject: [PATCH 269/892] Add parser MDAdmMetadata (#2846) * Add parser MDAdmMetadata Signed-off-by: shlao * Fixed the Datasource issues Signed-off-by: shlao --- docs/shared_parsers_catalog/mdadm.rst | 3 ++ insights/parsers/mdadm.py | 69 +++++++++++++++++++++++++++ insights/parsers/tests/test_mdadm.py | 56 ++++++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 9 ++++ 5 files changed, 138 insertions(+) create mode 100644 docs/shared_parsers_catalog/mdadm.rst create mode 100644 insights/parsers/mdadm.py create mode 100644 insights/parsers/tests/test_mdadm.py diff --git a/docs/shared_parsers_catalog/mdadm.rst b/docs/shared_parsers_catalog/mdadm.rst new file mode 100644 index 000000000..cfc5ef9ea --- /dev/null +++ b/docs/shared_parsers_catalog/mdadm.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.mdadm + :members: + :show-inheritance: diff --git a/insights/parsers/mdadm.py b/insights/parsers/mdadm.py new file mode 100644 index 000000000..d08976f63 --- /dev/null +++ b/insights/parsers/mdadm.py @@ -0,0 +1,69 @@ +""" +MDAdm - command ``/usr/sbin/mdadm -E {device}`` +=============================================== +""" + +from insights.core import CommandParser +from insights.core.plugins import parser +from insights.parsers import split_kv_pairs +from insights.parsers import SkipException + +from insights.specs import Specs + + +@parser(Specs.mdadm_E) +class MDAdmMetadata(CommandParser, dict): + """ + Parser for output of ``mdadm -E`` for each MD device in system. + + This stores the information from the output in the following properties: + * ``device`` - the name of the device after /dev/ - e.g. loop0 + + Sample output:: + + /dev/loop0: + Magic : a92b4efc + Version : 1.0 + Feature Map : 0x0 + Array UUID : 98e098ef:c8662ce2:2ed2aa5f:7f0416a9 + Name : 0 + Creation Time : Mon Jun 29 02:16:52 2020 + Raid Level : raid1 + Raid Devices : 2 + + Avail Dev Size : 16383968 sectors (7.81 GiB 8.39 GB) + Array Size : 1048576 KiB (1024.00 MiB 1073.74 MB) + Used Dev Size : 2097152 sectors (1024.00 MiB 1073.74 MB) + Super Offset : 16383984 sectors + Unused Space : before=0 sectors, after=14286824 sectors + State : clean + Device UUID : 5e249ed9:a9ee800a:c09c963f:363a18d2 + + Update Time : Mon Jun 29 02:19:56 2020 + Bad Block Log : 512 entries available at offset -8 sectors + Checksum : 395066e8 - correct + Events : 60 + + Device Role : Active device 0 + Array State : AA ('A' == active, '.' == missing, 'R' == replacing) + + Examples: + >>> mdadm.device + '/dev/loop0' + >>> mdadm["Device UUID"] + '5e249ed9:a9ee800a:c09c963f:363a18d2' + >>> mdadm["Events"] + 60 + """ + def parse_content(self, content): + mdadm_dev = "/mdadm_-E_.dev." + if mdadm_dev in self.file_path: + self.device = '/dev/' + self.file_path.split(mdadm_dev)[1].strip() + else: + raise SkipException('Cannot parse device name from path {p}'.format(p=self.file_path)) + + for key, val in split_kv_pairs(content, split_on=':').items(): + if val.isdigit(): + val = int(val) + + self[key] = val diff --git a/insights/parsers/tests/test_mdadm.py b/insights/parsers/tests/test_mdadm.py new file mode 100644 index 000000000..d612d28e9 --- /dev/null +++ b/insights/parsers/tests/test_mdadm.py @@ -0,0 +1,56 @@ +from insights.parsers import mdadm +from insights.parsers.mdadm import MDAdmMetadata +from insights.tests import context_wrap + +import doctest + +MDADM_CONTENT = """ +/dev/loop0: +Magic : a92b4efc +Version : 1.0 +Feature Map : 0x0 +Array UUID : 98e098ef:c8662ce2:2ed2aa5f:7f0416a9 +Name : 0 +Creation Time : Mon Jun 29 02:16:52 2020 +Raid Level : raid1 +Raid Devices : 2 + +Avail Dev Size : 16383968 sectors (7.81 GiB 8.39 GB) +Array Size : 1048576 KiB (1024.00 MiB 1073.74 MB) +Used Dev Size : 2097152 sectors (1024.00 MiB 1073.74 MB) +Super Offset : 16383984 sectors +Unused Space : before=0 sectors, after=14286824 sectors +State : clean +Device UUID : 5e249ed9:a9ee800a:c09c963f:363a18d2 + +Update Time : Mon Jun 29 02:19:56 2020 +Bad Block Log : 512 entries available at offset -8 sectors +Checksum : 395066e8 - correct +Events : 60 + +Device Role : Active device 0 +Array State : AA ('A' == active, '.' == missing, 'R' == replacing) +""" # noqa + + +def test_doc_examples(): + env = { + 'mdadm': MDAdmMetadata(context_wrap( + MDADM_CONTENT, path='insights_commands/mdadm_-E_.dev.loop0' + )), + } + failed, total = doctest.testmod(mdadm, globs=env) + assert failed == 0 + + +def test_mdadm(): + md = MDAdmMetadata(context_wrap( + MDADM_CONTENT, path='insights_commands/mdadm_-E_.dev.loop0' + )) + + # Device assertions + assert md.device == '/dev/loop0' + + # Information assertions + assert md['Update Time'] == 'Mon Jun 29 02:19:56 2020' + assert md['Array Size'] == '1048576 KiB (1024.00 MiB 1073.74 MB)' diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 99a4ee792..17f3a8264 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -341,6 +341,7 @@ class Specs(SpecSet): manila_conf = RegistryPoint() mariadb_log = RegistryPoint(filterable=True) max_uid = RegistryPoint() + mdadm_E = RegistryPoint(multi_output=True) md5chk_files = RegistryPoint(multi_output=True) mdstat = RegistryPoint() meminfo = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index faecccb29..0c52ccea6 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -23,6 +23,7 @@ from insights.combiners.cloud_provider import CloudProvider from insights.combiners.services import Services from insights.combiners.sap import Sap +from insights.parsers.mdstat import Mdstat from insights.parsers.lsmod import LsMod from insights.components.rhel_version import IsRhel8, IsRhel7 from insights.specs import Specs @@ -452,6 +453,14 @@ def md5chk_file_list(broker): return ["/etc/pki/product/69.pem", "/etc/pki/product-default/69.pem", "/usr/lib/libsoftokn3.so", "/usr/lib64/libsoftokn3.so", "/usr/lib/libfreeblpriv3.so", "/usr/lib64/libfreeblpriv3.so"] md5chk_files = foreach_execute(md5chk_file_list, "/usr/bin/md5sum %s") mdstat = simple_file("/proc/mdstat") + + @datasource(Mdstat) + def md_device_list(broker): + md = broker[Mdstat] + if md.components: + return [dev["device_name"] for dev in md.components if dev["active"]] + raise SkipComponent() + mdadm_E = foreach_execute(md_device_list, "/usr/sbin/mdadm -E %s") meminfo = first_file(["/proc/meminfo", "/meminfo"]) messages = simple_file("/var/log/messages") modinfo_i40e = simple_command("/sbin/modinfo i40e") From 2e4d4afbf732915cf3c718fae8b655928c83b3d0 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Tue, 15 Dec 2020 09:28:57 +0800 Subject: [PATCH 270/892] Add parser for the satellite clients count spec (#2860) * Add parser for the satellite clients count spec Signed-off-by: Huanhuan Li * Short the command Signed-off-by: Huanhuan Li * Change "su" to "sudo" Signed-off-by: Huanhuan Li --- .../satellite_content_hosts_count.rst | 3 + .../parsers/satellite_content_hosts_count.py | 46 +++++++++++++++ .../test_satellite_content_hosts_count.py | 56 +++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 6 files changed, 108 insertions(+) create mode 100644 docs/shared_parsers_catalog/satellite_content_hosts_count.rst create mode 100644 insights/parsers/satellite_content_hosts_count.py create mode 100644 insights/parsers/tests/test_satellite_content_hosts_count.py diff --git a/docs/shared_parsers_catalog/satellite_content_hosts_count.rst b/docs/shared_parsers_catalog/satellite_content_hosts_count.rst new file mode 100644 index 000000000..fbbd1825d --- /dev/null +++ b/docs/shared_parsers_catalog/satellite_content_hosts_count.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.satellite_content_hosts_count + :members: + :show-inheritance: diff --git a/insights/parsers/satellite_content_hosts_count.py b/insights/parsers/satellite_content_hosts_count.py new file mode 100644 index 000000000..c85072a49 --- /dev/null +++ b/insights/parsers/satellite_content_hosts_count.py @@ -0,0 +1,46 @@ +""" +SatelliteContentHostsCount - command ``psql -d foreman -c 'select count(*) from hosts'`` +======================================================================================== + +The SatelliteContentHostsCount parser reads the output of +``psql -d foreman -c 'select count(*) from hosts'``. + +Sample output of ``psql -d foreman -c 'select count(*) from hosts'``:: + + count + ------- + 13 + (1 row) + +Examples:: + + >>> type(clients) + + >>> clients.count + 13 +""" + +from insights import parser, CommandParser +from insights.specs import Specs +from insights.parsers import SkipException, ParseException + + +@parser(Specs.satellite_content_hosts_count) +class SatelliteContentHostsCount(CommandParser): + """ + Read the ``psql -d foreman -c 'select count(*) from hosts'`` + and set the hosts count to property ``count``. + + Attributes: + count (int): The count of satellite content hosts + """ + + def parse_content(self, content): + self.count = None + if len(content) >= 3 and content[0].strip() == 'count': + try: + self.count = int(content[2].strip()) + except ValueError: + raise ParseException("Unknow satelite content hosts count") + if self.count is None: + raise SkipException("Cannot get the count of satellite content hosts") diff --git a/insights/parsers/tests/test_satellite_content_hosts_count.py b/insights/parsers/tests/test_satellite_content_hosts_count.py new file mode 100644 index 000000000..6c5425a57 --- /dev/null +++ b/insights/parsers/tests/test_satellite_content_hosts_count.py @@ -0,0 +1,56 @@ +import doctest +import pytest + +from insights.parsers import ( + satellite_content_hosts_count, SkipException, ParseException) +from insights.core.plugins import ContentException +from insights.tests import context_wrap + + +SATELLITE_CONTENT_HOSTS_COUNT = ''' + count +------- + 13 +(1 row) + +''' + +SATELLITE_CONTENT_HOSTS_COUNT_WRONG_1 = ''' +-bash: psql: command not found +''' + +SATELLITE_CONTENT_HOSTS_COUNT_WRONG_2 = ''' +su: user postgres does not exist +''' + +SATELLITE_CONTENT_HOSTS_COUNT_WRONG_3 = ''' +psql: FATAL: database "foreman" does not exist +''' + +SATELLITE_CONTENT_HOSTS_COUNT_WRONG_4 = ''' + count +------- + abc +(1 row) + +''' + + +def test_HTL_doc_examples(): + clients = satellite_content_hosts_count.SatelliteContentHostsCount(context_wrap(SATELLITE_CONTENT_HOSTS_COUNT)) + globs = { + 'clients': clients + } + failed, tested = doctest.testmod(satellite_content_hosts_count, globs=globs) + assert failed == 0 + + +def test_wrong_output(): + with pytest.raises(ContentException): + satellite_content_hosts_count.SatelliteContentHostsCount(context_wrap(SATELLITE_CONTENT_HOSTS_COUNT_WRONG_1)) + with pytest.raises(SkipException): + satellite_content_hosts_count.SatelliteContentHostsCount(context_wrap(SATELLITE_CONTENT_HOSTS_COUNT_WRONG_2)) + with pytest.raises(SkipException): + satellite_content_hosts_count.SatelliteContentHostsCount(context_wrap(SATELLITE_CONTENT_HOSTS_COUNT_WRONG_3)) + with pytest.raises(ParseException): + satellite_content_hosts_count.SatelliteContentHostsCount(context_wrap(SATELLITE_CONTENT_HOSTS_COUNT_WRONG_4)) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 17f3a8264..bdc4b7965 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -554,6 +554,7 @@ class Specs(SpecSet): saphostexec_status = RegistryPoint() saphostexec_version = RegistryPoint() sat5_insights_properties = RegistryPoint() + satellite_content_hosts_count = RegistryPoint() satellite_mongodb_storage_engine = RegistryPoint() satellite_version_rb = RegistryPoint() satellite_custom_hiera = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 0c52ccea6..44342d088 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -626,6 +626,7 @@ def sap_sid(broker): sap_hdb_version = foreach_execute(sap_sid, "/usr/bin/sudo -iu %sadm HDB version", keep_rc=True) saphostctl_getcimobject_sapinstance = simple_command("/usr/sap/hostctrl/exe/saphostctrl -function GetCIMObject -enuminstances SAPInstance") sat5_insights_properties = simple_file("/etc/redhat-access/redhat-access-insights.properties") + satellite_content_hosts_count = simple_command("/usr/bin/sudo -iu postgres psql -d foreman -c 'select count(*) from hosts'") satellite_mongodb_storage_engine = simple_command("/usr/bin/mongo pulp_database --eval 'db.serverStatus().storageEngine'") satellite_version_rb = simple_file("/usr/share/foreman/lib/satellite/version.rb") satellite_custom_hiera = simple_file("/etc/foreman-installer/custom-hiera.yaml") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 7bcfc1d76..32949deca 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -199,6 +199,7 @@ class InsightsArchiveSpecs(Specs): rpm_V_packages = first_file(["insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo_chrony", "insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo"]) sap_hdb_version = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_sap_hdb_version") saphostctl_getcimobject_sapinstance = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostctrl_-function_GetCIMObject_-enuminstances_SAPInstance") + satellite_content_hosts_count = simple_file("insights_commands/sudo_-iu_postgres_psql_-d_foreman_-c_select_count_from_hosts") satellite_mongodb_storage_engine = simple_file("insights_commands/mongo_pulp_database_--eval_db.serverStatus_.storageEngine") sealert = simple_file('insights_commands/sealert_-l') sestatus = simple_file("insights_commands/sestatus_-b") From 9c3d874438d1d2c4f6add5b9926dd0f88461b929 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Tue, 15 Dec 2020 10:15:10 +0800 Subject: [PATCH 271/892] Revert the saphostexec_status and saphostexec_version specs (#2847) Signed-off-by: Xiangce Liu --- insights/specs/default.py | 2 ++ insights/specs/insights_archive.py | 2 ++ insights/tests/client/collection_rules/test_map_components.py | 2 -- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 44342d088..86571f8a8 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -625,6 +625,8 @@ def sap_sid(broker): sap_hdb_version = foreach_execute(sap_sid, "/usr/bin/sudo -iu %sadm HDB version", keep_rc=True) saphostctl_getcimobject_sapinstance = simple_command("/usr/sap/hostctrl/exe/saphostctrl -function GetCIMObject -enuminstances SAPInstance") + saphostexec_status = simple_command("/usr/sap/hostctrl/exe/saphostexec -status") + saphostexec_version = simple_command("/usr/sap/hostctrl/exe/saphostexec -version") sat5_insights_properties = simple_file("/etc/redhat-access/redhat-access-insights.properties") satellite_content_hosts_count = simple_command("/usr/bin/sudo -iu postgres psql -d foreman -c 'select count(*) from hosts'") satellite_mongodb_storage_engine = simple_command("/usr/bin/mongo pulp_database --eval 'db.serverStatus().storageEngine'") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 32949deca..66f232bc4 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -200,6 +200,8 @@ class InsightsArchiveSpecs(Specs): sap_hdb_version = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_sap_hdb_version") saphostctl_getcimobject_sapinstance = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostctrl_-function_GetCIMObject_-enuminstances_SAPInstance") satellite_content_hosts_count = simple_file("insights_commands/sudo_-iu_postgres_psql_-d_foreman_-c_select_count_from_hosts") + saphostexec_status = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostexec_-status") + saphostexec_version = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostexec_-version") satellite_mongodb_storage_engine = simple_file("insights_commands/mongo_pulp_database_--eval_db.serverStatus_.storageEngine") sealert = simple_file('insights_commands/sealert_-l') sestatus = simple_file("insights_commands/sestatus_-b") diff --git a/insights/tests/client/collection_rules/test_map_components.py b/insights/tests/client/collection_rules/test_map_components.py index 3c9e9bc9d..e5131e01d 100644 --- a/insights/tests/client/collection_rules/test_map_components.py +++ b/insights/tests/client/collection_rules/test_map_components.py @@ -58,8 +58,6 @@ def test_get_component_by_symbolic_name(): 'httpd_on_nfs', 'ls_usr_sbin', 'lvmconfig', - 'saphostexec_status', - 'saphostexec_version', 'nova_migration_uid', 'ntpq_pn', 'rabbitmq_queues', From 65b4ce29efcb8ac570762eda5fcab84064324155 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Tue, 29 Dec 2020 12:16:07 +0800 Subject: [PATCH 272/892] Add LsMod parser to the pre-loaded component for 'ss' spec Signed-off-by: Xiangce Liu --- insights/collect.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/insights/collect.py b/insights/collect.py index 38a8c3b25..80d4d5e5c 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -139,6 +139,10 @@ - name: insights.combiners.sap enabled: true + # needed for the 'pre-check' of the 'ss' spec + - name: insights.parsers.lsmod + enabled: true + # needed because some specs aren't given names before they're used in DefaultSpecs - name: insights.core.spec_factory enabled: true From 602eed3a1a03a85b7b115a32b36dc4e401fa2f4d Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Tue, 29 Dec 2020 12:22:17 +0800 Subject: [PATCH 273/892] Revert "Add LsMod parser to the pre-loaded component for 'ss' spec" This reverts commit 65b4ce29efcb8ac570762eda5fcab84064324155. --- insights/collect.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/insights/collect.py b/insights/collect.py index 80d4d5e5c..38a8c3b25 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -139,10 +139,6 @@ - name: insights.combiners.sap enabled: true - # needed for the 'pre-check' of the 'ss' spec - - name: insights.parsers.lsmod - enabled: true - # needed because some specs aren't given names before they're used in DefaultSpecs - name: insights.core.spec_factory enabled: true From 28dd72e71f8df7ef26b5a6890b785d1bc12ab090 Mon Sep 17 00:00:00 2001 From: Akshay Gaikwad Date: Mon, 4 Jan 2021 07:04:23 +0530 Subject: [PATCH 274/892] Add mokutil_sbstate spec in sos_archive (#2874) Signed-off-by: Akshay Gaikwad --- insights/specs/sos_archive.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index e5f92b0e1..842d62bb4 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -136,6 +136,7 @@ class SosSpecs(Specs): manila_conf = first_file(["/var/lib/config-data/puppet-generated/manila/etc/manila/manila.conf", "/etc/manila/manila.conf"]) mistral_executor_log = simple_file("/var/log/mistral/executor.log") modinfo_all = glob_file("sos_commands/kernel/modinfo_*") + mokutil_sbstate = simple_file("sos_commands/boot/mokutil_--sb-state") mount = simple_file("sos_commands/filesys/mount_-l") mounts = simple_file("/proc/mounts") mlx4_port = glob_file("/sys/bus/pci/devices/*/mlx4_port[0-9]") From 9035d05e91e13ca6d8f2f5f2f686532fe94a8f42 Mon Sep 17 00:00:00 2001 From: Chenlizhong Date: Mon, 4 Jan 2021 09:38:51 +0800 Subject: [PATCH 275/892] Update ceph version mapping (#2870) Signed-off-by: Chen Lizhong --- insights/parsers/ceph_version.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/insights/parsers/ceph_version.py b/insights/parsers/ceph_version.py index 20dd4c416..488535be2 100644 --- a/insights/parsers/ceph_version.py +++ b/insights/parsers/ceph_version.py @@ -67,8 +67,18 @@ "12.2.12-79": {'version': "3.3.1", 'major': '3', 'minor': '3', 'downstream_release': 'async'}, "12.2.12-84": {'version': "3.3.2", 'major': '3', 'minor': '3', 'downstream_release': '2'}, "12.2.12-101": {'version': "3.3.4", 'major': '3', 'minor': '3', 'downstream_release': '4'}, + "12.2.12-115": {'version': "3.3.5", 'major': '3', 'minor': '3', 'downstream_release': '0'}, + "12.2.12-124": {'version': "3.3.6", 'major': '3', 'minor': '3', 'downstream_release': '0'}, + "12.2.12-127": {'version': "3.3.6", 'major': '3', 'minor': '3', 'downstream_release': '0'}, "14.2.4-125": {'version': "4.0", 'major': '4', 'minor': '0', 'downstream_release': '0'}, + "14.2.4-51": {'version': "4.0", 'major': '4', 'minor': '0', 'downstream_release': '0'}, + "14.2.8-50": {'version': "4.1", 'major': '4', 'minor': '1', 'downstream_release': '0'}, "14.2.8-59": {'version': "4.1", 'major': '4', 'minor': '1', 'downstream_release': '0'}, + "14.2.8-81": {'version': "4.1.1", 'major': '4', 'minor': '1', 'downstream_release': '0'}, + "14.2.8-89": {'version': "4.1.1", 'major': '4', 'minor': '1', 'downstream_release': 'async'}, + "14.2.8-91": {'version': "4.1.1", 'major': '4', 'minor': '1', 'downstream_release': 'async 2'}, + "14.2.8-111": {'version': "4.1.2", 'major': '4', 'minor': '1', 'downstream_release': '0'}, + "14.2.8-115": {'version': "4.1.3", 'major': '4', 'minor': '1', 'downstream_release': '0'}, } From c0d52db2bdf8b0b45b837599e066c46cb1901638 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Wed, 6 Jan 2021 23:30:45 +0800 Subject: [PATCH 276/892] Enhance the package_provides_java and package_provides_httpd (#2841) * Enhance the package_provides_java - Enhance the logic of the datasource to get the command from ps - Deprecate the package_provides_java, use package_provides instead Signed-off-by: Xiangce Liu * Fix doc checking error Signed-off-by: Xiangce Liu * Deprecate the package_provides_httpd parser and combiner as well Signed-off-by: Xiangce Liu * Add the spec to insights_archive.py Signed-off-by: Xiangce Liu * Remove the duplicate 'which' from _get_package Signed-off-by: Xiangce Liu * Resolve conflicts Signed-off-by: Xiangce Liu * Change to use command_with_args instead of foreach_execute - And remove the multi_output=True Signed-off-by: Xiangce Liu * Change to use Ps Combiner instead of PsAuxcww Parser in specs/default.py Signed-off-by: Xiangce Liu * Reomve the PackageProvides Combiner Signed-off-by: Xiangce Liu * Remove the doc entry of package_provides combiner Signed-off-by: Xiangce Liu * Fix typos in doc Signed-off-by: Xiangce Liu * Fix the error in doc Signed-off-by: Xiangce Liu * add more description to the docstring of the base Signed-off-by: Xiangce Liu * fix test for py2 and py3 Signed-off-by: Xiangce Liu * update the docstring of base Signed-off-by: Xiangce Liu * Add doc for properties Signed-off-by: Xiangce Liu * Refine the sub function to execute only oncd for each command Signed-off-by: Xiangce Liu * Add Ps to the default dependencies Signed-off-by: Xiangce Liu * fix test in py2.6 Signed-off-by: Xiangce Liu * fix error in doc Signed-off-by: Xiangce Liu * update the insights_archive Signed-off-by: Xiangce Liu * fix the test Signed-off-by: Xiangce Liu * adjust the spec order Signed-off-by: Xiangce Liu * Use a new spec instead of the package_provides_java/httpd Signed-off-by: Xiangce Liu * Fix the logic error in datasource function Signed-off-by: Xiangce Liu --- .../package_provides.rst | 3 + insights/collect.py | 10 ++ insights/combiners/package_provides_httpd.py | 16 +++ insights/combiners/package_provides_java.py | 15 +++ insights/parsers/package_provides.py | 76 +++++++++++++ insights/parsers/package_provides_httpd.py | 15 +++ insights/parsers/package_provides_java.py | 15 +++ .../parsers/tests/test_package_provides.py | 48 +++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 102 +++++++++++------- insights/specs/insights_archive.py | 1 + 11 files changed, 264 insertions(+), 38 deletions(-) create mode 100644 docs/shared_parsers_catalog/package_provides.rst create mode 100644 insights/parsers/package_provides.py create mode 100644 insights/parsers/tests/test_package_provides.py diff --git a/docs/shared_parsers_catalog/package_provides.rst b/docs/shared_parsers_catalog/package_provides.rst new file mode 100644 index 000000000..f885b4614 --- /dev/null +++ b/docs/shared_parsers_catalog/package_provides.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.package_provides + :members: + :show-inheritance: diff --git a/insights/collect.py b/insights/collect.py index 38a8c3b25..b130e9bbd 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -129,6 +129,16 @@ - name: insights.combiners.services enabled: true + # needed for multiple Datasouce specs + - name: insights.parsers.ps.PsAuxcww + enabled: true + + - name: insights.parsers.ps.PsAuxww + enabled: true + + - name: insights.combiners.ps + enabled: true + # needed to collect the sap_hdb_version spec that uses the Sap combiner - name: insights.parsers.lssap enabled: true diff --git a/insights/combiners/package_provides_httpd.py b/insights/combiners/package_provides_httpd.py index 927841ea5..6b40a3c9e 100644 --- a/insights/combiners/package_provides_httpd.py +++ b/insights/combiners/package_provides_httpd.py @@ -2,6 +2,11 @@ PackageProvidesHttpdAll - Combiner for packages which provide httpd =================================================================== +.. warning:: + + This module is deprecated, please use + :mod:`insights.parsers.package_provides` instead. + Combiner for collecting all the running httpd command and the corresponding RPM package name which is parsed by the PackageProvidesHttpd parser. @@ -10,11 +15,18 @@ from insights.core.plugins import combiner from insights.parsers.package_provides_httpd import PackageProvidesHttpd from .. import LegacyItemAccess +from insights.util import deprecated @combiner(PackageProvidesHttpd) class PackageProvidesHttpdAll(LegacyItemAccess): """ + .. warning:: + + This Combiner is deprecated, please use + :class:`insights.parsers.package_provides.PackageProvidesCommand` + Parsers instead. + This combiner will receive a list of parsers named PackageProvidesHttpd, one for each running instance of httpd and each parser instance will contain the command information and the RPM package information. It works as a ``dict`` with the httpd command information as the key and the @@ -33,6 +45,10 @@ class PackageProvidesHttpdAll(LegacyItemAccess): """ def __init__(self, package_provides_httpd): + deprecated( + PackageProvidesHttpdAll, + 'Please use the :class:`insights.parsers.package_provides.PackageProvidesCommand` instead.' + ) self.data = {} for pkg in package_provides_httpd: self.data[pkg.command] = pkg.package diff --git a/insights/combiners/package_provides_java.py b/insights/combiners/package_provides_java.py index 8d59fc476..1ac6a26e3 100644 --- a/insights/combiners/package_provides_java.py +++ b/insights/combiners/package_provides_java.py @@ -2,6 +2,11 @@ PackageProvidesJavaAll - Combiner for packages which provide java ================================================================= +.. warning:: + + This module is deprecated, please use + :mod:`insights.parsers.package_provides` instead. + Combiner for collecting all the java command and the corresponding package name which is parsed by the PackageProvidesJava parser. @@ -10,11 +15,17 @@ from .. import LegacyItemAccess from insights.core.plugins import combiner from insights.parsers.package_provides_java import PackageProvidesJava +from insights.util import deprecated @combiner(PackageProvidesJava) class PackageProvidesJavaAll(LegacyItemAccess): """ + .. warning:: + + This Combiner is deprecated, please use + :class:`insights.parsers.package_provides.PackageProvidesCommand` Parser instead. + Combiner for collecting all the java command and the corresponding package name which is parsed by the PackageProvidesJava parser. It works as a ``dict`` with the java command as the key and the @@ -40,6 +51,10 @@ class PackageProvidesJavaAll(LegacyItemAccess): """ def __init__(self, package_provides_java): + deprecated( + PackageProvidesJavaAll, + 'Please use the :class:`insights.parsers.package_provides.PackageProvidesCommand` instead.' + ) self.data = {} for pkg in package_provides_java: self.data[pkg.command] = pkg.package diff --git a/insights/parsers/package_provides.py b/insights/parsers/package_provides.py new file mode 100644 index 000000000..c1395951b --- /dev/null +++ b/insights/parsers/package_provides.py @@ -0,0 +1,76 @@ +""" +PackageProvidesCommand - Command ``/bin/echo {command_package}`` +================================================================ +""" + +from insights import parser, CommandParser +from insights.parsers import SkipException, ParseException +from insights.specs import Specs + + +@parser(Specs.package_provides_command) +class PackageProvidesCommand(CommandParser, dict): + """ + Parser to parse the specified running commands and its provider packages. + + This parser will receive a list of string pairs which is generated by + several @datasource and functions. The first string is the full path of the + specified running ``command`` and the second string is the package that + provides this command. It works as a `dict` with the ``command`` as the + key and the corresponding package name as the value. + + To check the provider package of the specified command, please add the + command to the ``COMMANDS`` of :func:`insights.specs.default.DefaultSpecs.cmd_and_pkg` + + Sample output:: + + /usr/bin/java java-11-openjdk-11.0.9.11-2.el8_3.x86_64 + /usr/sbin/httpd httpd-2.4.22-7.el7.x86_64 + /usr/lib/jvm/jre/bin/java java-1.8.0-openjdk-1.8.0.272.b10-3.el8_3.x86_64 + /opt/rh/httpd24/root/usr/sbin/httpd httpd24-httpd-2.4.34-7.el7.x86_64 + + Raises: + SkipException: When no such command detected running on this host. + ParseException: When there is un-parseble line. + + Example: + >>> '/usr/lib/jvm/jre/bin/java' in cmd_package.commands + True + >>> 'java-11-openjdk-11.0.9.11-2.el8_3.x86_64' in cmd_package.packages + True + >>> '/usr/sbin/httpd' in cmd_package.commands + True + >>> 'httpd24-httpd-2.4.34-7.el7.x86_64' in cmd_package.packages + True + >>> cmd_package['/usr/lib/jvm/jre/bin/java'] + 'java-1.8.0-openjdk-1.8.0.272.b10-3.el8_3.x86_64' + >>> cmd_package['/usr/sbin/httpd'] + 'httpd-2.4.22-7.el7.x86_64' + """ + + def parse_content(self, content): + data = {} + for line in content: + l_sp = [l.strip() for l in line.split()] + if len(l_sp) != 2: + raise ParseException('Incorrect line: {0}'.format(line)) + data[l_sp[0]] = l_sp[1] + + if len(data) == 0: + raise SkipException() + + self.update(data) + + @property + def commands(self): + """ + Returns the list of specified commands that are running on this host. + """ + return list(self.keys()) + + @property + def packages(self): + """ + Returns the list of the packages that provide the specified ``commands``. + """ + return list(self.values()) diff --git a/insights/parsers/package_provides_httpd.py b/insights/parsers/package_provides_httpd.py index a9e0e59d2..65a27fb5b 100644 --- a/insights/parsers/package_provides_httpd.py +++ b/insights/parsers/package_provides_httpd.py @@ -2,6 +2,11 @@ PackageProvidesHttpd - command ``/bin/echo {httpd_command_package}`` ==================================================================== +.. warning:: + + This module is deprecated, please use + :mod:`insights.parsers.package_provides` instead. + This module parses the content that contains running instances of 'httpd' and its corresponding RPM package which provide them. The running command and its package name are stored as properties ``command`` and ``package`` of the object. @@ -22,11 +27,17 @@ from insights import parser, CommandParser from insights.specs import Specs from ..parsers import SkipException +from insights.util import deprecated @parser(Specs.package_provides_httpd) class PackageProvidesHttpd(CommandParser): """ + .. warning:: + + This Combiner is deprecated, please use + :class:`insights.parsers.package_provides.PackageProvidesCommand` instead. + Parse the content like '/opt/rh/httpd24/root/usr/sbin/httpd /usr/sbin/httpd' Attributes: @@ -35,6 +46,10 @@ class PackageProvidesHttpd(CommandParser): """ def parse_content(self, content): + deprecated( + PackageProvidesHttpd, + 'Please use the :class:`insights.parsers.package_provides.PackageProvidesCommand` instead.' + ) if len(content) == 0: raise SkipException("Error: ", 'there is not httpd application running') l = content[0].split() diff --git a/insights/parsers/package_provides_java.py b/insights/parsers/package_provides_java.py index 678a6b1ee..b08ec9f4f 100644 --- a/insights/parsers/package_provides_java.py +++ b/insights/parsers/package_provides_java.py @@ -2,6 +2,11 @@ PackageProvidesJava - command ``/bin/echo {java_command_package}`` ================================================================== +.. warning:: + + This module is deprecated, please use + :mod:`insights.parsers.package_provides` instead. + This command reads the output of the pre-command: ``for jp in `/bin/ps auxwww | grep java | grep -v grep| awk '{print $11}' | sort -u`; do echo $jp `readlink -e $jp | xargs rpm -qf`; done`` @@ -43,11 +48,17 @@ from insights import parser, CommandParser from ..parsers import ParseException, SkipException from insights.specs import Specs +from insights.util import deprecated @parser(Specs.package_provides_java) class PackageProvidesJava(CommandParser): """ + .. warning:: + + This Combiner is deprecated, please use + :class:`insights.parsers.package_provides.PackageProvidesCommand` instead. + Parse the output of pre_command:: ``for jp in `/bin/ps auxwww | grep java | grep -v grep| awk '{print $11}' | sort -u`; do echo "$jp `readlink -e $jp | xargs rpm -qf`"; done``. @@ -58,6 +69,10 @@ class PackageProvidesJava(CommandParser): """ def parse_content(self, content): + deprecated( + PackageProvidesJava, + 'Please use the :class:`insights.parsers.package_provides.PackageProvidesCommand` instead.' + ) if len(content) == 0: raise ParseException("Error: ", 'there is not java application running') l = content[0].split() diff --git a/insights/parsers/tests/test_package_provides.py b/insights/parsers/tests/test_package_provides.py new file mode 100644 index 000000000..04299eac1 --- /dev/null +++ b/insights/parsers/tests/test_package_provides.py @@ -0,0 +1,48 @@ +import pytest +import doctest + +from insights.tests import context_wrap +from insights.parsers import SkipException, ParseException +from insights.parsers import package_provides +from insights.parsers.package_provides import PackageProvidesCommand + +PACKAGE_COMMAND = """ +/usr/bin/java java-11-openjdk-11.0.9.11-2.el8_3.x86_64 +/usr/sbin/httpd httpd-2.4.22-7.el7.x86_64 +/usr/lib/jvm/jre/bin/java java-1.8.0-openjdk-1.8.0.272.b10-3.el8_3.x86_64 +/opt/rh/httpd24/root/usr/sbin/httpd httpd24-httpd-2.4.34-7.el7.x86_64 +""" + +PACKAGE_COMMAND_EMPTY = """ +""" + +PACKAGE_COMMAND_NOT_MATCH = """ +jdk-9/bin/java file /root/jdk-9/bin/java is not owned by any package +bin/httpd file /root/bin/httpd is not owned by any package +""" + + +def test_package_provides_command(): + package = PackageProvidesCommand(context_wrap(PACKAGE_COMMAND)) + assert len(package.commands) == 4 + assert len(package.packages) == 4 + assert '/usr/bin/java' in package + assert '/usr/sbin/httpd' in package.commands + assert package['/usr/bin/java'] == 'java-11-openjdk-11.0.9.11-2.el8_3.x86_64' + assert package['/opt/rh/httpd24/root/usr/sbin/httpd'] == 'httpd24-httpd-2.4.34-7.el7.x86_64' + + +def test_package_provides_command_AB(): + with pytest.raises(SkipException): + PackageProvidesCommand(context_wrap(PACKAGE_COMMAND_EMPTY)) + + with pytest.raises(ParseException): + PackageProvidesCommand(context_wrap(PACKAGE_COMMAND_NOT_MATCH)) + + +def test_doc_examples(): + env = { + 'cmd_package': PackageProvidesCommand(context_wrap(PACKAGE_COMMAND)), + } + failed, _ = doctest.testmod(package_provides, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index bdc4b7965..f2e925273 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -461,6 +461,7 @@ class Specs(SpecSet): ovs_vsctl_show = RegistryPoint() ovs_vswitchd_limits = RegistryPoint() pacemaker_log = RegistryPoint(filterable=True) + package_provides_command = RegistryPoint() package_provides_java = RegistryPoint(multi_output=True) package_provides_httpd = RegistryPoint(multi_output=True) pam_conf = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 86571f8a8..42100c4e1 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -9,7 +9,6 @@ """ import logging -import os import re from insights.core.context import HostContext @@ -18,14 +17,16 @@ from insights.core.plugins import datasource from insights.core.spec_factory import RawFileProvider from insights.core.spec_factory import simple_file, simple_command, glob_file -from insights.core.spec_factory import first_of, foreach_collect, foreach_execute +from insights.core.spec_factory import first_of, command_with_args +from insights.core.spec_factory import foreach_collect, foreach_execute from insights.core.spec_factory import first_file, listdir from insights.combiners.cloud_provider import CloudProvider from insights.combiners.services import Services from insights.combiners.sap import Sap +from insights.combiners.ps import Ps +from insights.components.rhel_version import IsRhel8, IsRhel7 from insights.parsers.mdstat import Mdstat from insights.parsers.lsmod import LsMod -from insights.components.rhel_version import IsRhel8, IsRhel7 from insights.specs import Specs @@ -45,29 +46,48 @@ def get_owner(filename): return (name, group) -def get_cmd_and_package_in_ps(broker, target_command): +def _get_running_commands(broker, commands=None): + """ + Search for command in ``ps auxcww`` output and determine RPM providing binary + + Arguments: + broker(dict): Current state of specs collected by Insights + commands(str or list): Command or list of commands to search for in ps output + + Returns: + list: List of the full command paths of the ``command``. """ - Search for command in ``ps auxww`` output and determine RPM providing binary + commands = [] if commands is None else commands + ps_list = [broker[Ps].search(COMMAND_NAME__contains=c) for c in commands] + ps_cmds = [i for sub_l in ps_list for i in sub_l] + ctx = broker[HostContext] + ret = set() + for cmd in set(p['COMMAND_NAME'] for p in ps_cmds): + try: + which = ctx.shell_out("/usr/bin/which {0}".format(cmd)) + except Exception: + continue + ret.add(which[0]) if which else None + return sorted(ret) + + +def _get_package(broker, command): + """ Arguments: broker(dict): Current state of specs collected by Insights - target_command(str): Command name to search for in ps output + command(str): The full command name to get the package Returns: - set: Set including all RPMs that provide the target command + str: The package that provides the ``command``. """ - ps = broker[DefaultSpecs.ps_auxww].content ctx = broker[HostContext] - results = set() - for p in ps: - p_splits = p.split(None, 10) - cmd = p_splits[10].split()[0] if len(p_splits) == 11 else '' - which = ctx.shell_out("which {0}".format(cmd)) if target_command in os.path.basename(cmd) else None - resolved = ctx.shell_out("readlink -e {0}".format(which[0])) if which else None - pkg = ctx.shell_out("/bin/rpm -qf {0}".format(resolved[0])) if resolved else None - if cmd and pkg is not None: - results.add("{0} {1}".format(cmd, pkg[0])) - return results + resolved = ctx.shell_out("/usr/bin/readlink -e {0}".format(command)) + if resolved: + pkg = ctx.shell_out("/usr/bin/rpm -qf {0}".format(resolved[0])) + if pkg: + return pkg[0] + raise SkipComponent def _make_rpm_formatter(fmt=None): @@ -174,12 +194,11 @@ def tomcat_base(broker): ceph_df_detail = simple_command("/usr/bin/ceph df detail -f json") ceph_health_detail = simple_command("/usr/bin/ceph health detail -f json") - @datasource(ps_auxww) + @datasource(Ps) def is_ceph_monitor(broker): """ bool: Returns True if ceph monitor process ceph-mon is running on this node """ - ps = broker[DefaultSpecs.ps_auxww].content - findall = re.compile(r"ceph\-mon").findall - if any(findall(p) for p in ps): + ps = broker[Ps] + if ps.search(COMMAND__contains='ceph-mon'): return True raise SkipComponent() @@ -336,29 +355,16 @@ def du_dirs_list(broker): jbcs_httpd24_httpd_error_log = simple_file("/opt/rh/jbcs-httpd24/root/etc/httpd/logs/error_log") virt_uuid_facts = simple_file("/etc/rhsm/facts/virt_uuid.facts") - @datasource(ps_auxww) + @datasource(Ps) def httpd_cmd(broker): """ - Function to search the output of ``ps auxww`` to find all running Apache + Function to search the output of ``ps auxcww`` to find all running Apache webserver processes and extract the binary path. Returns: list: List of the binary paths to each running process """ - ps = broker[DefaultSpecs.ps_auxww].content - ps_httpds = set() - for p in ps: - p_splits = p.split(None, 10) - if len(p_splits) >= 11: - cmd = p_splits[10].split()[0] - # Should compatible with RHEL6 - # e.g. /usr/sbin/httpd, /usr/sbin/httpd.worker and /usr/sbin/httpd.event - # and SCL's httpd24-httpd - if os.path.basename(cmd).startswith('httpd'): - ps_httpds.add(cmd) - # Running multiple httpd instances on RHEL is supported - # https://access.redhat.com/solutions/21680 - return list(ps_httpds) + return _get_running_commands(broker, 'httpd') httpd_pid = simple_command("/usr/bin/pgrep -o httpd") httpd_limits = foreach_collect(httpd_pid, "/proc/%s/limits") @@ -553,6 +559,26 @@ def md_device_list(broker): ovirt_engine_ui_log = simple_file("/var/log/ovirt-engine/ui.log") ovs_vsctl_list_bridge = simple_command("/usr/bin/ovs-vsctl list bridge") ovs_vsctl_show = simple_command("/usr/bin/ovs-vsctl show") + + @datasource(Ps, context=HostContext) + def cmd_and_pkg(broker): + """ + Returns: + list: List of the command and provider package string of the specified commands. + + Attributes: + COMMANDS (list): List of the specified commands that need to check the provider package. + """ + COMMANDS = ['java'] + pkg_cmd = list() + for cmd in _get_running_commands(broker, COMMANDS): + pkg_cmd.append("{0} {1}".format(cmd, _get_package(broker, cmd))) + if pkg_cmd: + return '\n'.join(pkg_cmd) + raise SkipComponent + + package_provides_command = command_with_args("/usr/bin/echo '%s'", cmd_and_pkg) + package_provides_java = foreach_execute(cmd_and_pkg, "/usr/bin/echo '%s'") pacemaker_log = first_file(["/var/log/pacemaker.log", "/var/log/pacemaker/pacemaker.log"]) pci_rport_target_disk_paths = simple_command("/usr/bin/find /sys/devices/ -maxdepth 10 -mindepth 9 -name stat -type f") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 66f232bc4..2966dd89e 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -173,6 +173,7 @@ class InsightsArchiveSpecs(Specs): openvswitch_other_config = simple_file("insights_commands/ovs-vsctl_-t_5_get_Open_vSwitch_._other_config") ovs_vsctl_list_bridge = simple_file("insights_commands/ovs-vsctl_list_bridge") ovs_vsctl_show = simple_file("insights_commands/ovs-vsctl_show") + package_provides_command = glob_file("insights_commands/echo_*java*") passenger_status = simple_file("insights_commands/passenger-status") pci_rport_target_disk_paths = simple_file("insights_commands/find_.sys.devices._-maxdepth_10_-mindepth_9_-name_stat_-type_f") pcp_metrics = simple_file("insights_commands/curl_-s_http_..127.0.0.1_44322.metrics_--connect-timeout_5") From 474144c1127bab37e7e828217389441f78396d32 Mon Sep 17 00:00:00 2001 From: Rahul Srivastava <44598880+rasrivas-redhat@users.noreply.github.com> Date: Wed, 6 Jan 2021 21:26:47 +0530 Subject: [PATCH 277/892] New parser ls_ipa_idoverride_memberof (#2856) * new parser ls_ipa_idoverride_memberof Signed-off-by: rasrivas * fixed the flake8 error Signed-off-by: rasrivas --- .../ls_ipa_idoverride_memberof.rst | 3 ++ .../parsers/ls_ipa_idoverride_memberof.py | 32 +++++++++++++++++++ .../tests/test_ls_ipa_idoverride_memberof.py | 27 ++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 6 files changed, 65 insertions(+) create mode 100644 docs/shared_parsers_catalog/ls_ipa_idoverride_memberof.rst create mode 100644 insights/parsers/ls_ipa_idoverride_memberof.py create mode 100644 insights/parsers/tests/test_ls_ipa_idoverride_memberof.py diff --git a/docs/shared_parsers_catalog/ls_ipa_idoverride_memberof.rst b/docs/shared_parsers_catalog/ls_ipa_idoverride_memberof.rst new file mode 100644 index 000000000..c14ae30f6 --- /dev/null +++ b/docs/shared_parsers_catalog/ls_ipa_idoverride_memberof.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ls_ipa_idoverride_memberof + :members: + :show-inheritance: diff --git a/insights/parsers/ls_ipa_idoverride_memberof.py b/insights/parsers/ls_ipa_idoverride_memberof.py new file mode 100644 index 000000000..acb1080c1 --- /dev/null +++ b/insights/parsers/ls_ipa_idoverride_memberof.py @@ -0,0 +1,32 @@ +""" +LsIPAIdoverrideMemberof - command ``ls -lan /usr/share/ipa/ui/js/plugins/idoverride-memberof`` +============================================================================================== + +The ``ls -lan /usr/share/ipa/ui/js/plugins/idoverride-memberof`` command provides information for the listing of the +``/usr/share/ipa/ui/js/plugins/idoverride-memberof`` files. See the ``FileListing`` class for a more complete description +of the available features of the class. + +Sample ``ls -lan /usr/share/ipa/ui/js/plugins/idoverride-memberof`` output:: + + /usr/share/ipa/ui/js/plugins/idoverride-memberof: + total 0 + drwxr-xr-x. 2 0 0 0 Nov 11 11:44 . + drwxr-xr-x. 4 0 0 0 Nov 11 11:44 .. + -rw-rw-r--. 1 0 0 0 Nov 11 11:44 idoverride-memberof.js + -rw-rw-r--. 1 0 0 0 Nov 11 11:44 idoverride-admemberof.js + +Examples: + >>> '/usr/share/ipa/ui/js/plugins/idoverride-memberof' in ls_ipa_idoverride_memberof + True + >>> ls_ipa_idoverride_memberof.files_of('/usr/share/ipa/ui/js/plugins/idoverride-memberof') == ['idoverride-memberof.js', 'idoverride-admemberof.js'] + True +""" + +from insights.specs import Specs +from .. import parser, CommandParser, FileListing + + +@parser(Specs.ls_ipa_idoverride_memberof) +class LsIPAIdoverrideMemberof(CommandParser, FileListing): + """Parses output of ``ls -lan /usr/share/ipa/ui/js/plugins/idoverride-memberof`` command.""" + pass diff --git a/insights/parsers/tests/test_ls_ipa_idoverride_memberof.py b/insights/parsers/tests/test_ls_ipa_idoverride_memberof.py new file mode 100644 index 000000000..a35e1ea9a --- /dev/null +++ b/insights/parsers/tests/test_ls_ipa_idoverride_memberof.py @@ -0,0 +1,27 @@ +from insights.parsers import ls_ipa_idoverride_memberof +from insights.parsers.ls_ipa_idoverride_memberof import LsIPAIdoverrideMemberof +from insights.tests import context_wrap +import doctest + +LS_IPA_IDOVERRIDEMEMBEROF = """ +/usr/share/ipa/ui/js/plugins/idoverride-memberof: +total 0 +drwxr-xr-x. 2 0 0 0 Nov 11 11:44 . +drwxr-xr-x. 4 0 0 0 Nov 11 11:44 .. +-rw-rw-r--. 1 0 0 0 Nov 11 11:44 idoverride-memberof.js +-rw-rw-r--. 1 0 0 0 Nov 11 11:44 idoverride-admemberof.js +""" + + +def test_ls_ipa_idoverride_memberof(): + ls_ipa_idoverride_memberof = LsIPAIdoverrideMemberof(context_wrap(LS_IPA_IDOVERRIDEMEMBEROF)) + assert '/usr/share/ipa/ui/js/plugins/idoverride-memberof' in ls_ipa_idoverride_memberof + assert ls_ipa_idoverride_memberof.files_of('/usr/share/ipa/ui/js/plugins/idoverride-memberof') == ['idoverride-memberof.js', 'idoverride-admemberof.js'] + + +def test_ls_ipa_idoverride_memberof_doc(): + failed_count, tests = doctest.testmod( + ls_ipa_idoverride_memberof, + globs={'ls_ipa_idoverride_memberof': ls_ipa_idoverride_memberof.LsIPAIdoverrideMemberof(context_wrap(LS_IPA_IDOVERRIDEMEMBEROF))} + ) + assert failed_count == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index f2e925273..0ebc7444e 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -298,6 +298,7 @@ class Specs(SpecSet): ls_docker_volumes = RegistryPoint() ls_edac_mc = RegistryPoint() ls_etc = RegistryPoint() + ls_ipa_idoverride_memberof = RegistryPoint() ls_lib_firmware = RegistryPoint() ls_ocp_cni_openshift_sdn = RegistryPoint() ls_origin_local_volumes_pods = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 42100c4e1..7b7fa98c9 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -417,6 +417,7 @@ def httpd_cmd(broker): "/etc/pki/ovirt-vmconsole", "/etc/nova/migration", "/etc/sysconfig", "/etc/cloud/cloud.cfg.d", "/etc/rc.d/init.d"]) ls_etc = simple_command("/bin/ls -lan {0}".format(' '.join(etc_and_sub_dirs))) + ls_ipa_idoverride_memberof = simple_command("/bin/ls -lan /usr/share/ipa/ui/js/plugins/idoverride-memberof") ls_lib_firmware = simple_command("/bin/ls -lanR /lib/firmware") ls_ocp_cni_openshift_sdn = simple_command("/bin/ls -l /var/lib/cni/networks/openshift-sdn") ls_origin_local_volumes_pods = simple_command("/bin/ls -l /var/lib/origin/openshift.local.volumes/pods") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 2966dd89e..86f5a2414 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -101,6 +101,7 @@ class InsightsArchiveSpecs(Specs): ls_disk = simple_file("insights_commands/ls_-lanR_.dev.disk") ls_edac_mc = simple_file("insights_commands/ls_-lan_.sys.devices.system.edac.mc") ls_etc = simple_file("insights_commands/ls_-lan_.etc_.etc.cloud.cloud.cfg.d_.etc.nova.migration_.etc.pki.ovirt-vmconsole_.etc.pki.tls.certs_.etc.pki.tls.private_.etc.rc.d.init.d_.etc.sysconfig") + ls_ipa_idoverride_memberof = simple_file("insights_commands/ls_-lan_.usr.share.ipa.ui.js.plugins.idoverride-memberof") ls_lib_firmware = simple_file("insights_commands/ls_-lanR_.lib.firmware") ls_ocp_cni_openshift_sdn = simple_file("insights_commands/ls_-l_.var.lib.cni.networks.openshift-sdn") ls_origin_local_volumes_pods = simple_file("insights_commands/ls_-l_.var.lib.origin.openshift.local.volumes.pods") From 0332aa9788214c1404fff2b12eb38ca5db0b0e52 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 7 Jan 2021 01:22:33 +0800 Subject: [PATCH 278/892] Add parser to parse the puppet ca cert expire date (#2875) Signed-off-by: Huanhuan Li --- .../puppet_ca_cert_expire_date.rst | 3 ++ .../parsers/puppet_ca_cert_expire_date.py | 46 +++++++++++++++++++ .../tests/test_puppet_ca_cert_expire_date.py | 44 ++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 6 files changed, 96 insertions(+) create mode 100644 docs/shared_parsers_catalog/puppet_ca_cert_expire_date.rst create mode 100644 insights/parsers/puppet_ca_cert_expire_date.py create mode 100644 insights/parsers/tests/test_puppet_ca_cert_expire_date.py diff --git a/docs/shared_parsers_catalog/puppet_ca_cert_expire_date.rst b/docs/shared_parsers_catalog/puppet_ca_cert_expire_date.rst new file mode 100644 index 000000000..31cb372e0 --- /dev/null +++ b/docs/shared_parsers_catalog/puppet_ca_cert_expire_date.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.puppet_ca_cert_expire_date + :members: + :show-inheritance: diff --git a/insights/parsers/puppet_ca_cert_expire_date.py b/insights/parsers/puppet_ca_cert_expire_date.py new file mode 100644 index 000000000..d2d138982 --- /dev/null +++ b/insights/parsers/puppet_ca_cert_expire_date.py @@ -0,0 +1,46 @@ +""" +PuppetCertExpireDate - command ``openssl x509 -in /etc/puppetlabs/puppet/ssl/ca/ca_crt.pem -enddate -noout`` +============================================================================================================ + +The PuppetCertExpireDate parser reads the output of +``openssl x509 -in /etc/puppetlabs/puppet/ssl/ca/ca_crt.pem -enddate -noout``. + +Sample output of ``openssl x509 -in /etc/puppetlabs/puppet/ssl/ca/ca_crt.pem -enddate -noout``:: + + notAfter=Dec 4 07:04:05 2035 GMT + +Examples:: + + >>> type(date_info) + + >>> date_info.expire_date + datetime.datetime(2035, 12, 4, 7, 4, 5) + +""" +from datetime import datetime + +from insights import parser, CommandParser +from insights.specs import Specs +from insights.parsers import SkipException, ParseException + + +@parser(Specs.puppet_ca_cert_expire_date) +class PuppetCertExpireDate(CommandParser): + """ + Read the ``openssl x509 -in /etc/puppetlabs/puppet/ssl/ca/ca_crt.pem -enddate -noout`` + and set the date to property ``expire_date``. + + Attributes: + expire_date (datetime): The date when the puppet ca cert will be expired + """ + + def parse_content(self, content): + if len(content) == 1 and content[0].startswith('notAfter='): + date_format = '%b %d %H:%M:%S %Y %Z' + date_str = content[0].split('=', 1)[1] + try: + self.expire_date = datetime.strptime(date_str, date_format) + except Exception: + raise ParseException("Can not parse the date format") + else: + raise SkipException("Cannot get the puppet ca cert expire info") diff --git a/insights/parsers/tests/test_puppet_ca_cert_expire_date.py b/insights/parsers/tests/test_puppet_ca_cert_expire_date.py new file mode 100644 index 000000000..0e1b43557 --- /dev/null +++ b/insights/parsers/tests/test_puppet_ca_cert_expire_date.py @@ -0,0 +1,44 @@ +import doctest +import pytest + +from insights.parsers import ( + puppet_ca_cert_expire_date, SkipException, ParseException) +from insights.tests import context_wrap + + +PUPPET_CERT_EXPIRE_INFO = ''' +notAfter=Dec 4 07:04:05 2035 GMT +''' + +WRONG_PUPPET_CERT_INFO_1 = ''' +Can't open /etc/puppetlabs/puppet/ssl/ca/ca_crt.pem for reading, No such file or directory +140033749546816:error:02001002:system library:fopen:No such file or directory:crypto/bio/bss_file.c:69:fopen('/etc/puppetlabs/puppet/ssl/ca/ca_crt.pem','r') +140033749546816:error:2006D080:BIO routines:BIO_new_file:no such file:crypto/bio/bss_file.c:76: +unable to load certificate +''' + +WRONG_PUPPET_CERT_INFO_2 = ''' +notAfter=Mon Jan 4 02:31:28 EST 202 +''' + +WRONG_PUPPET_CERT_INFO_3 = ''' +Mon Jan 4 02:31:28 EST 202 +''' + + +def test_HTL_doc_examples(): + date_info = puppet_ca_cert_expire_date.PuppetCertExpireDate(context_wrap(PUPPET_CERT_EXPIRE_INFO)) + globs = { + 'date_info': date_info + } + failed, tested = doctest.testmod(puppet_ca_cert_expire_date, globs=globs) + assert failed == 0 + + +def test_wrong_output(): + with pytest.raises(SkipException): + puppet_ca_cert_expire_date.PuppetCertExpireDate(context_wrap(WRONG_PUPPET_CERT_INFO_1)) + with pytest.raises(SkipException): + puppet_ca_cert_expire_date.PuppetCertExpireDate(context_wrap(WRONG_PUPPET_CERT_INFO_3)) + with pytest.raises(ParseException): + puppet_ca_cert_expire_date.PuppetCertExpireDate(context_wrap(WRONG_PUPPET_CERT_INFO_2)) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 0ebc7444e..dd175b9b1 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -500,6 +500,7 @@ class Specs(SpecSet): pulp_worker_defaults = RegistryPoint() puppet_ssl_cert_ca_pem = RegistryPoint() puppetserver_config = RegistryPoint(filterable=True) + puppet_ca_cert_expire_date = RegistryPoint() pvs_noheadings = RegistryPoint() pvs_noheadings_all = RegistryPoint() pvs = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 7b7fa98c9..bdede6f52 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -615,6 +615,7 @@ def pcp_enabled(broker): proc_snmp_ipv6 = simple_file("proc/net/snmp6") proc_stat = simple_file("proc/stat") pulp_worker_defaults = simple_file("etc/default/pulp_workers") + puppet_ca_cert_expire_date = simple_command("/usr/bin/openssl x509 -in /etc/puppetlabs/puppet/ssl/ca/ca_crt.pem -enddate -noout") pvs_noheadings = simple_command("/sbin/pvs --nameprefixes --noheadings --separator='|' -a -o pv_all,vg_name --config=\"global{locking_type=0}\"") qemu_conf = simple_file("/etc/libvirt/qemu.conf") qemu_xml = glob_file(r"/etc/libvirt/qemu/*.xml") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 86f5a2414..068cf5af3 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -188,6 +188,7 @@ class InsightsArchiveSpecs(Specs): ps_auxww = simple_file("insights_commands/ps_auxww") ps_ef = simple_file("insights_commands/ps_-ef") ps_eo = simple_file("insights_commands/ps_-eo_pid_ppid_comm") + puppet_ca_cert_expire_date = simple_file("insights_commands/openssl_x509_-in_.etc.puppetlabs.puppet.ssl.ca.ca_crt.pem_-enddate_-noout") pvs_noheadings = simple_file("insights_commands/pvs_--nameprefixes_--noheadings_--separator_-a_-o_pv_all_vg_name_--config_global_locking_type_0") qpid_stat_g = simple_file("insights_commands/qpid-stat_-g_--ssl-certificate_.etc.pki.katello.qpid_client_striped.crt_-b_amqps_..localhost_5671") rabbitmq_report = simple_file("insights_commands/rabbitmqctl_report") From 130696a7a429da1faeb6d60cadb09f19ac502652 Mon Sep 17 00:00:00 2001 From: Victor M Date: Wed, 6 Jan 2021 19:11:33 +0100 Subject: [PATCH 279/892] Update Compliance error message (#2867) Signed-off-by: Victor M Co-authored-by: Jeremy Crafts --- insights/client/apps/compliance/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/client/apps/compliance/__init__.py b/insights/client/apps/compliance/__init__.py index f2acdbfd2..0fe5bfaf9 100644 --- a/insights/client/apps/compliance/__init__.py +++ b/insights/client/apps/compliance/__init__.py @@ -29,7 +29,7 @@ def oscap_scan(self): self._assert_oscap_rpms_exist() policies = self.get_policies() if not policies: - logger.error("System is not associated with any profiles. Assign profiles by either uploading a SCAP scan or using the compliance web UI.\n") + logger.error("System is not associated with any profiles. Assign profiles using the Compliance web UI.\n") exit(constants.sig_kill_bad) for policy in policies: self.run_scan( From fa7eae6a0c522d8cd3b0991f386f394369eb23e6 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 7 Jan 2021 06:41:40 +0800 Subject: [PATCH 280/892] Add LsMod parser to the pre-loaded component for 'ss' spec (#2873) Signed-off-by: Xiangce Liu --- insights/collect.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/insights/collect.py b/insights/collect.py index b130e9bbd..66c61a3e3 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -149,6 +149,10 @@ - name: insights.combiners.sap enabled: true + # needed for the 'pre-check' of the 'ss' spec + - name: insights.parsers.lsmod + enabled: true + # needed because some specs aren't given names before they're used in DefaultSpecs - name: insights.core.spec_factory enabled: true From eba4c3e8289069434064d3ae5f27b9a9f4d1f240 Mon Sep 17 00:00:00 2001 From: Akshay Gaikwad Date: Thu, 7 Jan 2021 08:45:59 +0530 Subject: [PATCH 281/892] Add ls_sys_firmware spec in sos_archive (#2877) Signed-off-by: Akshay Gaikwad --- insights/specs/sos_archive.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index 842d62bb4..a40331ea9 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -121,6 +121,7 @@ class SosSpecs(Specs): locale = simple_file("sos_commands/i18n/locale") lsblk = first_file(["sos_commands/block/lsblk", "sos_commands/filesys/lsblk"]) ls_boot = simple_file("sos_commands/boot/ls_-lanR_.boot") + ls_sys_firmware = simple_file("sos_commands/boot/ls_-lanR_.sys.firmware") lscpu = simple_file("sos_commands/processor/lscpu") lsinitrd = simple_file("sos_commands/boot/lsinitrd") lsof = simple_file("sos_commands/process/lsof_-b_M_-n_-l") From a0fd815b65586911a403d81a034f0d63f13f1e11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=A0t=C4=9Bp=C3=A1n=20Tomsa?= Date: Thu, 17 Dec 2020 14:59:48 +0100 Subject: [PATCH 282/892] Fix HTTP_PROXY warning condition MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit HTTP proxy warning is no longer issued if there is HTTPS proxy defined. Added tests for all possible combinations. Signed-off-by: Štěpán Tomsa --- insights/client/config.py | 2 +- insights/tests/client/test_config.py | 41 ++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 1 deletion(-) diff --git a/insights/client/config.py b/insights/client/config.py index 15206d439..32a5f7f0a 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -534,7 +534,7 @@ def _boolify(v): return v # put this warning here so the error msg only prints once - if os.environ.get('HTTP_PROXY') and self._print_errors: + if os.environ.get('HTTP_PROXY') and not os.environ.get('HTTPS_PROXY') and self._print_errors: sys.stdout.write('WARNING: HTTP_PROXY is unused by insights-client. Please use HTTPS_PROXY.\n') # ignore these env as they are not config vars diff --git a/insights/tests/client/test_config.py b/insights/tests/client/test_config.py index 6c7a233d5..3e42f5c54 100644 --- a/insights/tests/client/test_config.py +++ b/insights/tests/client/test_config.py @@ -85,6 +85,47 @@ def test_env_number_bad_values(): c._load_env() +@patch('insights.client.config.os.environ', {}) +def test_env_no_proxy_no_warning(): + with patch('insights.client.config.sys.stdout.write') as write: + c = InsightsConfig(_print_errors=True) + c._load_env() + write.assert_not_called() + + +@patch('insights.client.config.os.environ', {'HTTP_PROXY': '127.0.0.1'}) +def test_env_http_proxy_warning(): + with patch('insights.client.config.sys.stdout.write') as write: + c = InsightsConfig(_print_errors=True) + c._load_env() + write.assert_called_once() + + +@patch('insights.client.config.os.environ', {'HTTP_PROXY': '127.0.0.1'}) +@pytest.mark.parametrize(("kwargs",), (({},), ({"_print_errors": False},))) +def test_env_http_proxy_no_warning(kwargs): + with patch('insights.client.config.sys.stdout.write') as write: + c = InsightsConfig(**kwargs) + c._load_env() + write.assert_not_called() + + +@patch('insights.client.config.os.environ', {'HTTP_PROXY': '127.0.0.1', 'HTTPS_PROXY': '127.0.0.1'}) +def test_env_http_and_https_proxy_no_warning(): + with patch('insights.client.config.sys.stdout.write') as write: + c = InsightsConfig(_print_errors=True) + c._load_env() + write.assert_not_called() + + +@patch('insights.client.config.os.environ', {'HTTPS_PROXY': '127.0.0.1'}) +def test_env_https_proxy_no_warning(): + with patch('insights.client.config.sys.stdout.write') as write: + c = InsightsConfig(_print_errors=True) + c._load_env() + write.assert_not_called() + + # empty argv so parse_args isn't polluted with pytest arguments @patch('insights.client.config.sys.argv', [sys.argv[0]]) def test_diagnosis_implies_legacy(): From 486f04e6b85369d494127a35d339c33d7b6acb91 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 7 Jan 2021 13:58:07 -0500 Subject: [PATCH 283/892] update uploader_json_map.json Signed-off-by: Jeremy Crafts --- insights/client/uploader_json_map.json | 41 ++++++++++++++++++++++---- 1 file changed, 36 insertions(+), 5 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index 25a3b53d4..b978719cd 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -528,6 +528,11 @@ "pattern": [], "symbolic_name": "ls_etc" }, + { + "command": "/bin/ls -lan /usr/share/ipa/ui/js/plugins/idoverride-memberof", + "pattern": [], + "symbolic_name": "ls_ipa_idoverride_memberof" + }, { "command": "/bin/ls -lanR /lib/firmware", "pattern": [], @@ -540,7 +545,9 @@ }, { "command": "/bin/ls -lan /usr/bin", - "pattern": [], + "pattern": [ + "total" + ], "symbolic_name": "ls_usr_bin" }, { @@ -1043,7 +1050,12 @@ }, { "command": "/usr/sbin/postconf", - "pattern": [], + "pattern": [ + "smtp_tls_mandatory_protocols", + "smtp_tls_protocols", + "smtpd_tls_mandatory_protocols", + "smtpd_tls_protocols" + ], "symbolic_name": "postconf" }, { @@ -1231,6 +1243,11 @@ "pattern": [], "symbolic_name": "ps_eo" }, + { + "command": "/usr/bin/openssl x509 -in /etc/puppetlabs/puppet/ssl/ca/ca_crt.pem -enddate -noout", + "pattern": [], + "symbolic_name": "puppet_ca_cert_expire_date" + }, { "command": "/sbin/pvs --nameprefixes --noheadings --separator='|' -a -o pv_all,vg_name --config=\"global{locking_type=0}\"", "pattern": [], @@ -1298,6 +1315,11 @@ "pattern": [], "symbolic_name": "sap_hdb_version" }, + { + "command": "/usr/bin/sudo -iu postgres psql -d foreman -c 'select count(*) from hosts'", + "pattern": [], + "symbolic_name": "satellite_content_hosts_count" + }, { "command": "/usr/bin/mongo pulp_database --eval 'db.serverStatus().storageEngine'", "pattern": [], @@ -2360,7 +2382,6 @@ "file": "/var/log/messages", "pattern": [ " invoked oom-killer: ", - "\"/var/lib/pgsql/data\" is missing or empty", "(enic): transmit queue 0 timed out", ", type vxfs) has no security xattr handler", "- image is referenced in one or more repositories", @@ -2385,9 +2406,16 @@ "Error running DeviceResume dm_task_run failed", "Exception happened during processing of request from", "Failed to extend thin", + "File system is filling up", + "High directory name cache miss rate", + "High number of saturated processors", + "High per CPU processor utilization", + "High per CPU system call rate", "Hyper-V Host", "List /apis/image.openshift.io/v1/images", "Loop callback failed with: Cannot allocate memory", + "Low buffer cache read hit ratio", + "Low free swap space", "MDC/MDIO access timeout", "Medium access timeout failure. Offlining disk!", "MountVolume.SetUp succeeded for volume", @@ -2404,11 +2432,14 @@ "SCSI error: return code =", "SDN initialization failed: Error: Existing service with IP: None is not part of service network", "Scheduled import of stream", + "Severe demand for real memory", + "Some CPU busy executing in system mode", "Steal time is >", "TCP listen overflows", "TCP request queue full SYN cookie replies", "TCP request queue full drops", "TX driver issue detected, PF reset issued", + "The threshold number of context switches per second per CPU", "This system does not support \"SSSE3\"", "Throttling request took", "Virtualization daemon", @@ -4187,5 +4218,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2020-12-03T16:24:04.611058" -} + "version": "2020-12-10T15:23:43.704559" +} \ No newline at end of file From 530420d8ab805f5107f02d6af95796a9e32fbec6 Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Mon, 11 Jan 2021 11:02:54 -0600 Subject: [PATCH 284/892] Fix error with ipython and jedi in inspect (#2879) * There is an open issue with ipython and jedi that causes an exception if tab completion is triggered in insights inspect * Open issue in [IPython](https://github.com/ipython/ipython/issues/12740) * This closes #2878 Signed-off-by: Bob Fahr --- setup.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 43283af7b..d3aadc939 100644 --- a/setup.py +++ b/setup.py @@ -70,7 +70,9 @@ def maybe_require(pkg): 'ipython', 'colorama', 'jinja2', - 'Pygments' + 'Pygments', + 'jedi<0.18.0' # Open issue with jedi 0.18.0 and iPython <= 7.19 + # https://github.com/davidhalter/jedi/issues/1714 ]) testing = set([ From 966596f740681779672ebc37be3bc7dc69e9dc41 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Tue, 12 Jan 2021 08:46:43 -0600 Subject: [PATCH 285/892] Fix 2863 (#2883) * intern Entry names. Signed-off-by: Christopher Sams * Convert queries to regular python functions. Signed-off-by: Christopher Sams * Use tuples for attrs. Use tuples for attrs and children in from_dict. Signed-off-by: Christopher Sams * Convert attr queries to python functions. Signed-off-by: Christopher Sams * Skip names that are None when getting keys. Signed-off-by: Christopher Sams * Remove py3 format strings. Signed-off-by: Christopher Sams * Use six.exec_ instead of exec directly. Signed-off-by: Christopher Sams * Use six to ensure intern'd values are acceptable. Signed-off-by: Christopher Sams * fix for python26 str.format expectations. Signed-off-by: Christopher Sams * Try again to satisfy python26. Signed-off-by: Christopher Sams * Try to figure out what's going wrong. Signed-off-by: Christopher Sams --- insights/parsr/query/__init__.py | 198 ++++++++++++++++++------------- insights/parsr/query/boolean.py | 48 ++++++++ 2 files changed, 166 insertions(+), 80 deletions(-) diff --git a/insights/parsr/query/__init__.py b/insights/parsr/query/__init__.py index 3a373a82d..c056b32d8 100644 --- a/insights/parsr/query/__init__.py +++ b/insights/parsr/query/__init__.py @@ -22,9 +22,17 @@ """ import operator import re +import six +import sys from collections import defaultdict -from itertools import chain -from insights.parsr.query.boolean import All, Any, Boolean, Not, pred, pred2, TRUE +from itertools import chain, count +from insights.parsr.query.boolean import All, Any, Boolean, Not, pred, pred2 # noqa + +# intern was a builtin until it moved to sys in python3 +try: + intern = sys.intern +except: + pass class Entry(object): @@ -35,9 +43,9 @@ class Entry(object): __slots__ = ("_name", "attrs", "children", "parent", "lineno", "src") def __init__(self, name=None, attrs=None, children=None, lineno=None, src=None): - self._name = name - self.attrs = attrs or [] - self.children = children or [] + self._name = intern(six.ensure_str(name)) if name is not None else name + self.attrs = attrs if isinstance(attrs, (list, tuple)) else tuple() + self.children = children if isinstance(children, (list, tuple)) else [] self.parent = None self.lineno = lineno self.src = src @@ -67,7 +75,7 @@ def get_keys(self): """ Returns the unique names of all the children as a list. """ - return sorted(set(c.name for c in self.children)) + return sorted(set(c._name for c in self.children if c._name)) def __dir__(self): """ @@ -92,7 +100,7 @@ def line(self): """ Returns the original first line of text that generated the ``Entry``. """ - if self.src is not None: + if self.src is not None and self.lineno is not None: return self.src.content[self.lineno - 1] @property @@ -136,10 +144,10 @@ def upto(self, query): """ Go up from the current node to the first node that matches query. """ - pred = _desugar(query) + predicate = _desugar(query) parent = self.parent while parent is not None: - if pred.test(parent): + if predicate(parent): return parent parent = parent.parent @@ -182,14 +190,19 @@ def where(self, name, value=None): ['2019-08-04T23:17:08Z', '2019-08-04T23:32:14Z'] """ if isinstance(name, _EntryQuery): - query = name + query = name.to_pyfunc() elif isinstance(name, Boolean): - query = child_query(name, value) + query = child_query(name, value).to_pyfunc() elif callable(name): - query = SimpleQuery(pred(name)) + def predicate(e): + try: + return name(e) + except: + return False + query = predicate else: - query = child_query(name, value) - return Result(children=self.children if query.test(self) else []) + query = child_query(name, value).to_pyfunc() + return Result(children=self.children if query(self) else []) @property def section(self): @@ -225,7 +238,7 @@ def __getitem__(self, query): if isinstance(query, (int, slice)): return self.children[query] query = _desugar(query) - return Result(children=[c for c in self.children if query.test(c)]) + return Result(children=[c for c in self.children if query(c)]) def __bool__(self): return bool(self._name or self.attrs or self.children) @@ -416,7 +429,7 @@ def nth(self, n): tmp[c.parent].append(c) results = [] - for p, v in tmp.items(): + for _, v in tmp.items(): try: r = v[n] if isinstance(r, list): @@ -451,17 +464,22 @@ def where(self, name, value=None): ['2019-08-04T23:17:08Z', '2019-08-04T23:32:14Z'] """ if isinstance(name, _EntryQuery): - query = name + query = name.to_pyfunc() elif isinstance(name, Boolean): - query = child_query(name, value) + query = child_query(name, value).to_pyfunc() elif callable(name): - query = SimpleQuery(pred(name)) + def predicate(e): + try: + return name(e) + except: + return False + query = predicate else: - query = child_query(name, value) + query = child_query(name, value).to_pyfunc() results = [] seen = set() for c in self.children: - if c not in seen and query.test(c): + if c not in seen and query(c): results.append(c) return Result(children=results) @@ -469,7 +487,7 @@ def __getitem__(self, query): if isinstance(query, (int, slice)): return self.children[query] query = _desugar(query) - return Result(children=[c for c in self.grandchildren if query.test(c)]) + return Result(children=[c for c in self.grandchildren if query(c)]) class _EntryQuery(object): @@ -485,6 +503,38 @@ def __or__(self, other): def __invert__(self): return _NotEntryQuery(self) + def to_pyfunc(self): + ver = sys.version_info + if ver[0] == 2 and ver[1] == 6: + return self.test + + env = {} + ids = count() + + def expr(b): + if isinstance(b, _AllEntryQuery): + return "(" + " and ".join(expr(p) for p in b.exprs) + ")" + elif isinstance(b, _AnyEntryQuery): + return "(" + " or ".join(expr(p) for p in b.exprs) + ")" + elif isinstance(b, _NotEntryQuery): + return "(" + "not " + expr(b.query) + ")" + else: + num = next(ids) + func = "func_{num}".format(num=num) + env[func] = b.test + return func + "(value)" + + func = """ +def predicate(value): + try: + return {body} + except Exception as ex: + return False + """.format(body=expr(self)) + + six.exec_(func, env, env) + return env["predicate"] + class _AllEntryQuery(_EntryQuery, All): pass @@ -498,31 +548,20 @@ class _NotEntryQuery(_EntryQuery, Not): pass -class NameQuery(_EntryQuery): - """ - A query against the name of an :py:class:`Entry`. - """ - def __init__(self, expr): - self.expr = expr - - def test(self, n): - return self.expr.test(n.name) - - class _AllAttrQuery(_EntryQuery): def __init__(self, expr): self.expr = expr - def test(self, n): - return all(self.expr.test(a) for a in n.attrs) + def test(self, e): + return all(self.expr(a) for a in e.attrs) class _AnyAttrQuery(_EntryQuery): def __init__(self, expr): self.expr = expr - def test(self, n): - return any(self.expr.test(a) for a in n.attrs) + def test(self, e): + return any(self.expr(a) for a in e.attrs) def all_(expr): @@ -541,36 +580,21 @@ def any_(expr): return _AnyAttrQuery(_desugar_attr(expr)) -class SimpleQuery(_EntryQuery): - """ - Automatically used in ``Entry.where`` or ``Result.where``. ``SimpleQuery`` - wraps a function or a lambda that will be passed each ``Entry`` of the - current result. The passed function should return ``True`` or ``False``. - """ - def __init__(self, expr): - if not isinstance(expr, Boolean): - expr = pred(expr) - self.expr = expr - - def test(self, node): - return self.expr.test(node) - - class ChildQuery(_EntryQuery): """ - Returns True if any child node passes the query. + Returns True if any child entry passes the query. """ def __init__(self, expr): self.expr = expr - def test(self, node): - return any(self.expr.test(n) for n in node.children) + def test(self, e): + return any(self.expr(n) for n in e.children) def child_query(name, value=None): """ - Converts a query into a ChildQuery that works on all child nodes at once - to determine if the current node is accepted. + Converts a query into a ChildQuery that works on all child entries at once + to determine if the current entry is accepted. """ q = _desugar((name, value) if value is not None else name) return ChildQuery(q) @@ -581,22 +605,31 @@ def child_query(name, value=None): def _desugar_name(q): if q is None: - return NameQuery(TRUE) - if isinstance(q, NameQuery): - return q + return lambda _: True if isinstance(q, Boolean): - return NameQuery(q) + f = q.to_pyfunc() + return lambda e: f(e._name) if callable(q): - return NameQuery(pred(q)) - return NameQuery(eq(q)) + def predicate(e): + try: + return q(e._name) + except: + return False + return predicate + return lambda e: e._name == q def _desugar_attr(q): if isinstance(q, Boolean): - return q + return q.to_pyfunc() if callable(q): - return pred(q) - return eq(q) + def predicate(v): + try: + return q(v) + except: + return False + return predicate + return lambda v: v == q def _desugar_attrs(q): @@ -606,20 +639,23 @@ def _desugar_attrs(q): q = q[0] return q if isinstance(q, _EntryQuery) else _AnyAttrQuery(_desugar_attr(q)) else: + # conf[name, q0, q1] means "name and (q0 or q1 for any attribute)" attr_queries = [_desugar_attr(a) for a in q] - return _AnyAttrQuery(Any(*attr_queries)) + return _AnyAttrQuery(lambda v: any(p(v) for p in attr_queries)) def _desugar(q): if isinstance(q, _EntryQuery): - return q + return q.to_pyfunc() if isinstance(q, tuple): - q = list(q) name_query = _desugar_name(q[0]) attrs_query = _desugar_attrs(q[1:]) if attrs_query: - return All(name_query, attrs_query) + aq = attrs_query.to_pyfunc() + return lambda e: name_query(e) and aq(e) return name_query + if q is None: + return lambda _: True return _desugar_name(q) @@ -628,9 +664,9 @@ def _flatten(nodes): Flatten the config tree into a list of nodes. """ def inner(n): - res = [n] - res.extend(chain.from_iterable(inner(c) for c in n.children)) - return res + yield n + for i in chain.from_iterable(inner(c) for c in n.children): + yield i return list(chain.from_iterable(inner(n) for n in nodes)) @@ -647,11 +683,13 @@ def compile_queries(*queries): are `or'd` together and that result is `anded` with the name query. Any query that raises an exception is treated as ``False``. """ + queries = [_desugar(q) for q in queries] + def match(qs, nodes): - q = _desugar(qs[0]) - res = [n for n in nodes if q.test(n)] + q = qs[0] qs = qs[1:] - if qs: + res = [n for n in nodes if q(n)] + if qs and res: gc = list(chain.from_iterable(n.children for n in res)) return match(qs, gc) return res @@ -701,12 +739,12 @@ def inner(d): if isinstance(res[0], Entry): result.extend(res) else: - result.append(Entry(name=k, attrs=res)) + result.append(Entry(name=k, attrs=tuple(res))) else: - result.append(Entry(name=k, attrs=[])) + result.append(Entry(name=k)) else: - result.append(Entry(name=k, attrs=[v])) - return result + result.append(Entry(name=k, attrs=(v,))) + return tuple(result) return Entry(children=inner(orig)) diff --git a/insights/parsr/query/boolean.py b/insights/parsr/query/boolean.py index b3a79094e..6e9db5a7d 100644 --- a/insights/parsr/query/boolean.py +++ b/insights/parsr/query/boolean.py @@ -39,6 +39,9 @@ def is_positive(n): gt_five_and_lt_10 = gt(5) & lt(10) """ +from itertools import count +import six +import sys class Boolean(object): @@ -57,6 +60,51 @@ def test(self, value): def __call__(self, value): return self.test(value) + def to_pyfunc(self): + ver = sys.version_info + if ver[0] == 2 and ver[1] == 6: + return self.test + + env = {} + ids = count() + + def expr(b): + if b is TRUE: + return " True " + elif b is FALSE: + return " False " + elif isinstance(b, All): + return "(" + " and ".join(expr(p) for p in b.exprs) + ")" + elif isinstance(b, Any): + return "(" + " or ".join(expr(p) for p in b.exprs) + ")" + elif isinstance(b, Not): + return "(" + "not " + expr(b.query) + ")" + elif isinstance(b, Predicate): + num = next(ids) + + func = "func_{num}".format(num=num) + args = "args_{num}".format(num=num) + + env[func] = b.func + env[args] = b.args + + if isinstance(b, CaselessPredicate): + return func + "(value.lower(), " + "*" + args + ")" + return func + "(value, " + "*" + args + ")" + else: + raise Exception(b) + + func = """ +def predicate(value): + try: + return {body} + except Exception as ex: + return False + """.format(body=expr(self)) + + six.exec_(func, env, env) + return env["predicate"] + class TRUE(Boolean): pass From 37a9302c7a41afc26d528b5a4ae9cb2a1b5df6f3 Mon Sep 17 00:00:00 2001 From: Bob Fahr Date: Tue, 12 Jan 2021 16:26:27 -0600 Subject: [PATCH 286/892] Ensure that args to get_running_commands is a list (#2891) * Ensure that args to get_running_commands is a list * Commands argument could be a type other than a list which caused archive collection to fail * Identified in bugzilla 1915219 * Added tests for exceptions Signed-off-by: Bob Fahr * Fix small typo in exception message. Signed-off-by: Christopher Sams Co-authored-by: Christopher Sams --- insights/specs/default.py | 11 ++++++++--- insights/tests/test_specs.py | 11 +++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index bdede6f52..b5e5578f0 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -46,7 +46,7 @@ def get_owner(filename): return (name, group) -def _get_running_commands(broker, commands=None): +def _get_running_commands(broker, commands): """ Search for command in ``ps auxcww`` output and determine RPM providing binary @@ -56,8 +56,13 @@ def _get_running_commands(broker, commands=None): Returns: list: List of the full command paths of the ``command``. + + Raises: + Exception: Raises an exception if commands object is not a list or is empty """ - commands = [] if commands is None else commands + if not commands or not isinstance(commands, list): + raise Exception('Commands argument must be a list object and contain at least one item') + ps_list = [broker[Ps].search(COMMAND_NAME__contains=c) for c in commands] ps_cmds = [i for sub_l in ps_list for i in sub_l] ctx = broker[HostContext] @@ -364,7 +369,7 @@ def httpd_cmd(broker): Returns: list: List of the binary paths to each running process """ - return _get_running_commands(broker, 'httpd') + return _get_running_commands(broker, ['httpd', ]) httpd_pid = simple_command("/usr/bin/pgrep -o httpd") httpd_limits = foreach_collect(httpd_pid, "/proc/%s/limits") diff --git a/insights/tests/test_specs.py b/insights/tests/test_specs.py index 483cd846f..a5815b92d 100644 --- a/insights/tests/test_specs.py +++ b/insights/tests/test_specs.py @@ -6,6 +6,7 @@ from insights.core.plugins import ContentException from insights.core.spec_factory import (DatasourceProvider, simple_file, simple_command, glob_file, SpecSet) +import insights.specs.default as default_specs import tempfile import pytest import glob @@ -102,3 +103,13 @@ def parse_content(self, content): p = MyParser(ds) assert p.content == data.splitlines() assert list(ds.stream()) == data.splitlines() + + +def test_get_running_commands(): + broker = dr.Broker() + broker[HostContext] = HostContext() + with pytest.raises(Exception): + default_specs._get_running_commands(broker, 'not_a_list') + + with pytest.raises(Exception): + default_specs._get_running_commands(broker, []) From 09d660560ead34a05cd27f599648e8989cf2acde Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Tue, 12 Jan 2021 16:45:40 -0600 Subject: [PATCH 287/892] Bind first_file to HostArchiveContext in insights_archive.py (#2890) Fixes 2889. Signed-off-by: Christopher Sams --- insights/specs/insights_archive.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 068cf5af3..2e88c7ac4 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -5,6 +5,7 @@ simple_file = partial(simple_file, context=HostArchiveContext) glob_file = partial(glob_file, context=HostArchiveContext) +first_file = partial(first_file, context=HostArchiveContext) class InsightsArchiveSpecs(Specs): From f50ad5fa1e1e74d3d9f55dd64f82944b214b4aeb Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Wed, 13 Jan 2021 07:06:51 +0800 Subject: [PATCH 288/892] Skip empty result in chkconfig and unitfiles (#2881) Signed-off-by: Xiangce Liu --- insights/parsers/chkconfig.py | 9 ++++++++- insights/parsers/systemd/unitfiles.py | 13 +++++++++++++ insights/parsers/tests/test_chkconfig.py | 17 +++++++++++++++++ insights/parsers/tests/test_unitfiles.py | 19 +++++++++++++++++++ 4 files changed, 57 insertions(+), 1 deletion(-) diff --git a/insights/parsers/chkconfig.py b/insights/parsers/chkconfig.py index 1b2d11cf4..d82f42dea 100644 --- a/insights/parsers/chkconfig.py +++ b/insights/parsers/chkconfig.py @@ -4,8 +4,9 @@ """ from collections import namedtuple from .. import parser, CommandParser -import re from insights.specs import Specs +from insights.parsers import SkipException +import re @parser(Specs.chkconfig) @@ -15,6 +16,9 @@ class ChkConfig(CommandParser): Sample input data is shown as `content` in the examples below. + Raises: + SkipException: When nothing is parsed. + Examples: >>> content = ''' ... auditd 0:off 1:off 2:on 3:on 4:on 5:on 6:off @@ -119,6 +123,9 @@ def parse_content(self, content): states.append(self.LevelState(num.strip(), state.strip())) self.level_states[service] = states + if not self.services: + raise SkipException + def is_on(self, service_name): """ Checks if the service is enabled in chkconfig. diff --git a/insights/parsers/systemd/unitfiles.py b/insights/parsers/systemd/unitfiles.py index 05d34009a..f72a674b8 100644 --- a/insights/parsers/systemd/unitfiles.py +++ b/insights/parsers/systemd/unitfiles.py @@ -14,6 +14,7 @@ from .. import get_active_lines from ... import Parser, parser from insights.specs import Specs +from insights.parsers import SkipException @parser(Specs.systemctl_list_unit_files) @@ -32,6 +33,9 @@ class UnitFiles(Parser): runlevel1.target disabled runlevel2.target enabled + Raises: + SkipException: When nothing is parsed. + Example: >>> conf.is_on('mariadb.service') @@ -93,6 +97,9 @@ def parse_content(self, content): self.parsed_lines[service] = line self.service_list.append(service) + if not self.services: + raise SkipException + def is_on(self, service_name): """ Checks if the service is enabled in systemctl. @@ -144,6 +151,9 @@ class ListUnits(Parser): 161 loaded units listed. Pass --all to see loaded but inactive units, too. To show all installed unit files use 'systemctl list-unit-files'. + Raises: + SkipException: When nothing is parsed. + Example: >>> units.get_service_details('swap.target') == {'LOAD': 'loaded', 'ACTIVE': 'active', 'SUB': 'active', 'UNIT': 'swap.target', 'DESCRIPTION': 'Swap'} @@ -208,6 +218,9 @@ def parse_content(self, content): if service_details: self.unit_list[parts[first_part]] = service_details + if not self.unit_list: + raise SkipException + def get_service_details(self, service_name): """ Return the service details collected by systemctl. diff --git a/insights/parsers/tests/test_chkconfig.py b/insights/parsers/tests/test_chkconfig.py index 168c13bae..b683dc4e6 100644 --- a/insights/parsers/tests/test_chkconfig.py +++ b/insights/parsers/tests/test_chkconfig.py @@ -1,6 +1,7 @@ import pytest from ...tests import context_wrap from ..chkconfig import ChkConfig +from insights.parsers import SkipException SERVICES = """ auditd 0:off 1:off 2:on 3:on 4:on 5:on 6:off @@ -44,6 +45,17 @@ time-stream: off """ +SERVICES_NG = """ +Note: This output shows SysV services only and does not include native + systemd services. SysV configuration data might be overridden by native + systemd configuration. + + If you want to list systemd services use 'systemctl list-unit-files'. + To see services enabled on particular target use + 'systemctl list-dependencies [target]'. + +""" + def test_chkconfig(): context = context_wrap(SERVICES) @@ -87,3 +99,8 @@ def test_rhel_73(): assert chkconfig.levels_off('netconsole') == set(['0', '1', '2', '3', '4', '5', '6']) assert chkconfig.levels_on('network') == set(['2', '3', '4', '5']) assert chkconfig.levels_on('rsync') == set(['0', '1', '2', '3', '4', '5', '6']) + + +def test_chkconfig_ng(): + with pytest.raises(SkipException): + ChkConfig(context_wrap(SERVICES_NG)) diff --git a/insights/parsers/tests/test_unitfiles.py b/insights/parsers/tests/test_unitfiles.py index 7f1ad08b3..6192ec832 100644 --- a/insights/parsers/tests/test_unitfiles.py +++ b/insights/parsers/tests/test_unitfiles.py @@ -1,7 +1,9 @@ # -*- coding: utf-8 -*- # Per PEP 263 import doctest +import pytest from insights.tests import context_wrap +from insights.parsers import SkipException from insights.parsers.systemd import unitfiles from insights.parsers.systemd.unitfiles import UnitFiles, ListUnits @@ -414,3 +416,20 @@ def test_unitfiles_doc_examples(): } failed, total = doctest.testmod(unitfiles, globs=env) assert failed == 0 + + +UNITFILES_NG = """ +Failed to list unit files: Connection timed out +""".strip() + +LISTUNITS_NG = """ +Failed to list units +""".strip() + + +def test_unitfile_NG(): + with pytest.raises(SkipException): + UnitFiles(context_wrap(UNITFILES_NG)) + + with pytest.raises(SkipException): + ListUnits(context_wrap(LISTUNITS_NG)) From 873441e3bd01578af516d75e7a1c2e406e29551d Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 14 Jan 2021 04:26:31 +0800 Subject: [PATCH 289/892] Add docstring for datasource "corosync_cmapctl_cmd_list" (#2892) Signed-off-by: Huanhuan Li --- insights/specs/default.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/insights/specs/default.py b/insights/specs/default.py index b5e5578f0..f17234142 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -233,6 +233,12 @@ def is_ceph_monitor(broker): @datasource([IsRhel7, IsRhel8]) def corosync_cmapctl_cmd_list(broker): + """ + corosync-cmapctl add different arguments on RHEL7 and RHEL8. + + Returns: + list: A list of related corosync-cmapctl commands based the RHEL version. + """ if broker.get(IsRhel7): return ["/usr/sbin/corosync-cmapctl", 'corosync-cmapctl -d runtime.schedmiss.timestamp', 'corosync-cmapctl -d runtime.schedmiss.delay'] if broker.get(IsRhel8): From ca2ad4ae87dd0f4380fb30619dcbfe35136196af Mon Sep 17 00:00:00 2001 From: Reid Wahl <30487349+nrwahl2@users.noreply.github.com> Date: Wed, 13 Jan 2021 13:07:24 -0800 Subject: [PATCH 290/892] Doc: Fix data type for spec_factory cmds (#2884) cmd members should be strings, not lists of lists. Signed-off-by: Reid Wahl --- insights/core/spec_factory.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/insights/core/spec_factory.py b/insights/core/spec_factory.py index 5f558ec69..a600836a7 100644 --- a/insights/core/spec_factory.py +++ b/insights/core/spec_factory.py @@ -708,7 +708,7 @@ class simple_command(object): Execute a simple command that has no dynamic arguments Args: - cmd (list of lists): the command(s) to execute. Breaking apart a command + cmd (str): the command(s) to execute. Breaking apart a command string that might contain multiple commands separated by a pipe, getting them ready for subproc operations. IE. A command with filters applied @@ -753,7 +753,7 @@ class command_with_args(object): Execute a command that has dynamic arguments Args: - cmd (list of lists): the command to execute. Breaking apart a command + cmd (str): the command to execute. Breaking apart a command string that might require arguments. provider (str or tuple): argument string or a tuple of argument strings. context (ExecutionContext): the context under which the datasource @@ -811,7 +811,7 @@ class foreach_execute(object): Args: provider (list): a list of elements or tuples. - cmd (list of lists): a command with substitution parameters. Breaking + cmd (str): a command with substitution parameters. Breaking apart a command string that might contain multiple commands separated by a pipe, getting them ready for subproc operations. IE. A command with filters applied From 66ef0977368c37fa44360670800c7b64d5d6806c Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 14 Jan 2021 23:02:15 +0800 Subject: [PATCH 291/892] Enhance "satellite_content_hosts_count" with @datasource (#2887) * Enhance "satellite_content_hosts_count" with @datasource * This command should only run on satellite server Signed-off-by: Huanhuan Li * Use first_file for "satellite_content_hosts_count" * To be compatible with old archives, it's better to use first_file Signed-off-by: Huanhuan Li --- insights/collect.py | 4 ++++ insights/specs/default.py | 16 +++++++++++++++- insights/specs/insights_archive.py | 5 ++++- 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/insights/collect.py b/insights/collect.py index 66c61a3e3..485f9edc4 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -153,6 +153,10 @@ - name: insights.parsers.lsmod enabled: true + # needed for the 'pre-check' of the 'is_satellite_server' spec + - name: insights.combiners.satellite_version.SatelliteVersion + enabled: true + # needed because some specs aren't given names before they're used in DefaultSpecs - name: insights.core.spec_factory enabled: true diff --git a/insights/specs/default.py b/insights/specs/default.py index f17234142..ee47b7178 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -27,6 +27,7 @@ from insights.components.rhel_version import IsRhel8, IsRhel7 from insights.parsers.mdstat import Mdstat from insights.parsers.lsmod import LsMod +from insights.combiners.satellite_version import SatelliteVersion from insights.specs import Specs @@ -667,7 +668,20 @@ def sap_sid(broker): saphostexec_status = simple_command("/usr/sap/hostctrl/exe/saphostexec -status") saphostexec_version = simple_command("/usr/sap/hostctrl/exe/saphostexec -version") sat5_insights_properties = simple_file("/etc/redhat-access/redhat-access-insights.properties") - satellite_content_hosts_count = simple_command("/usr/bin/sudo -iu postgres psql -d foreman -c 'select count(*) from hosts'") + + @datasource(SatelliteVersion) + def is_satellite_server(broker): + """ + bool: Returns True if the host is satellite server. + """ + if broker[SatelliteVersion]: + return True + raise SkipComponent + + satellite_content_hosts_count = simple_command( + "/usr/bin/sudo -iu postgres /usr/bin/psql -d foreman -c 'select count(*) from hosts'", + deps=[is_satellite_server] + ) satellite_mongodb_storage_engine = simple_command("/usr/bin/mongo pulp_database --eval 'db.serverStatus().storageEngine'") satellite_version_rb = simple_file("/usr/share/foreman/lib/satellite/version.rb") satellite_custom_hiera = simple_file("/etc/foreman-installer/custom-hiera.yaml") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 2e88c7ac4..33758746a 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -203,7 +203,10 @@ class InsightsArchiveSpecs(Specs): rpm_V_packages = first_file(["insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo_chrony", "insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo"]) sap_hdb_version = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_sap_hdb_version") saphostctl_getcimobject_sapinstance = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostctrl_-function_GetCIMObject_-enuminstances_SAPInstance") - satellite_content_hosts_count = simple_file("insights_commands/sudo_-iu_postgres_psql_-d_foreman_-c_select_count_from_hosts") + satellite_content_hosts_count = first_file([ + "insights_commands/sudo_-iu_postgres_.usr.bin.psql_-d_foreman_-c_select_count_from_hosts", + "insights_commands/sudo_-iu_postgres_psql_-d_foreman_-c_select_count_from_hosts" + ]) saphostexec_status = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostexec_-status") saphostexec_version = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostexec_-version") satellite_mongodb_storage_engine = simple_file("insights_commands/mongo_pulp_database_--eval_db.serverStatus_.storageEngine") From b19aae8bb704b90cc1512d17ad705d5d7595eda3 Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Thu, 14 Jan 2021 15:43:06 +0000 Subject: [PATCH 292/892] update uploader_json_map.json Signed-off-by: Link Dupont --- insights/client/uploader_json_map.json | 73 ++++++++++++++++++++++---- 1 file changed, 62 insertions(+), 11 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index b978719cd..0bddd3481 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -546,6 +546,8 @@ { "command": "/bin/ls -lan /usr/bin", "pattern": [ + "/usr/bin", + "python", "total" ], "symbolic_name": "ls_usr_bin" @@ -1061,7 +1063,6 @@ { "command": "/bin/ps alxwww", "pattern": [ - "/usr/bin/gnome-shell", "/usr/bin/openshift start master", "/usr/bin/openshift start node", "COMMAND", @@ -1070,10 +1071,14 @@ "bash", "chronyd", "clvmd", + "cmirrord", "corosync", "crmd", "dlm_controld", "docker", + "elasticsearch", + "gnome-shell", + "haproxy", "heat-engine", "mongdb", "nova-compute", @@ -1084,10 +1089,17 @@ "openshift start node", "ora", "pacemaker-controld", + "pacemaker_remote", "pacemakerd", "pcsd", + "pkla-check-auth", + "pmcd", + "pmie", + "radosgw", "redis-server", + "rngd", "sap", + "snmpd", "spausedd", "tuned" ], @@ -1101,7 +1113,6 @@ "/usr/bin/docker-current", "/usr/bin/docker-current daemon", "/usr/bin/dockerd-current", - "/usr/bin/gnome-shell", "/usr/bin/hyperkube kubelet", "/usr/bin/openshift start master", "/usr/bin/openshift start node", @@ -1112,10 +1123,14 @@ "ceph-osd", "chronyd", "clvmd", + "cmirrord", "corosync", "crmd", "dlm_controld", "docker", + "elasticsearch", + "gnome-shell", + "haproxy", "heat-engine", "mongdb", "mysqld", @@ -1128,13 +1143,20 @@ "openshift start node", "ora", "pacemaker-controld", + "pacemaker_remote", "pacemakerd", "pcsd", "phc2sys", + "pkla-check-auth", + "pmcd", + "pmie", "postgres", "ptp4l", + "radosgw", "redis-server", + "rngd", "sap", + "snmpd", "spausedd", "tuned" ], @@ -1151,7 +1173,6 @@ "/opt/perf/bin/midaemon", "/sbin/multipathd", "/sbin/rngd", - "/usr/bin/gnome-shell", "/usr/bin/openshift start master", "/usr/bin/openshift start node", "/usr/bin/teamd", @@ -1163,13 +1184,16 @@ "ceilometer-coll", "chronyd", "clvmd", + "cmirrord", "corosync", "crmd", "dlm_controld", "docker", "elasticsearch", + "gnome-shell", "goferd", "greenplum", + "haproxy", "heat-engine", "httpd", "iscsid", @@ -1187,13 +1211,19 @@ "openshift start node", "ora", "pacemaker-controld", + "pacemaker_remote", "pacemakerd", "pcsd", "pkla-check-auth", + "pmcd", + "pmie", "postgres", + "radosgw", "redis-server", + "rngd", "sap", "smbd", + "snmpd", "spausedd", "target_completi", "tgtd", @@ -1204,7 +1234,6 @@ { "command": "/bin/ps -ef", "pattern": [ - "/usr/bin/gnome-shell", "/usr/bin/openshift start master", "/usr/bin/openshift start node", "CMD", @@ -1212,10 +1241,14 @@ "bash", "chronyd", "clvmd", + "cmirrord", "corosync", "crmd", "dlm_controld", "docker", + "elasticsearch", + "gnome-shell", + "haproxy", "heat-engine", "mongdb", "neutron-ns-metadata-proxy", @@ -1229,10 +1262,17 @@ "openshift start node", "ora", "pacemaker-controld", + "pacemaker_remote", "pacemakerd", "pcsd", + "pkla-check-auth", + "pmcd", + "pmie", + "radosgw", "redis-server", + "rngd", "sap", + "snmpd", "spausedd", "tuned" ], @@ -1728,14 +1768,16 @@ { "file": "/etc/ceph/ceph.conf", "pattern": [ - "[" + "[", + "wait" ], "symbolic_name": "ceph_conf" }, { "file": "/var/lib/config-data/puppet-generated/ceph/etc/ceph/ceph.conf", "pattern": [ - "[" + "[", + "wait" ], "symbolic_name": "ceph_conf" }, @@ -1963,6 +2005,9 @@ "L1TF", "L1Tf", "Linux version", + "PM: Creating hibernation image", + "PM: hibernation entry", + "PM: hibernation exit", "Secure boot enabled", "__cpufreq_add_dev", "efi", @@ -2394,7 +2439,6 @@ "Cannot allocate memory", "Cannot assign requested address", "Cannot assign requested address: AH00072", - "Connection amqps://subscription.rhn.redhat.com:5647 disconnected", "Corosync main process was not scheduled (@", "Could not set", "DHCPv4 lease renewal requested", @@ -2416,6 +2460,7 @@ "Loop callback failed with: Cannot allocate memory", "Low buffer cache read hit ratio", "Low free swap space", + "Low random number entropy available", "MDC/MDIO access timeout", "Medium access timeout failure. Offlining disk!", "MountVolume.SetUp succeeded for volume", @@ -2571,8 +2616,10 @@ "file": "/var/log/mysql/mysqld.log", "pattern": [ "OpenSSL internal error, assertion failed: Digest MD5 forbidden in FIPS mode!", + "SSL error", "Too many open files", - "[ERROR]" + "[ERROR]", + "handshake with remote endpoint ssl" ], "symbolic_name": "mysql_log" }, @@ -2580,8 +2627,10 @@ "file": "/var/log/mysql.log", "pattern": [ "OpenSSL internal error, assertion failed: Digest MD5 forbidden in FIPS mode!", + "SSL error", "Too many open files", - "[ERROR]" + "[ERROR]", + "handshake with remote endpoint ssl" ], "symbolic_name": "mysql_log" }, @@ -4151,8 +4200,10 @@ "symbolic_name": "mysql_log", "pattern": [ "OpenSSL internal error, assertion failed: Digest MD5 forbidden in FIPS mode!", + "SSL error", "Too many open files", - "[ERROR]" + "[ERROR]", + "handshake with remote endpoint ssl" ] }, { @@ -4218,5 +4269,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2020-12-10T15:23:43.704559" + "version": "2021-01-07T14:02:30.044043" } \ No newline at end of file From 01ed32f9ec87c332e6f6edef85ca6b4b96e91784 Mon Sep 17 00:00:00 2001 From: Akshay Gaikwad Date: Fri, 15 Jan 2021 11:44:54 +0530 Subject: [PATCH 293/892] Add sos archive spec - lsblk_pairs (#2896) Newly added "lsblk -O -P" command outupt in sosreport. (https://github.com/sosreport/sos/commit/a8dbdd2143f693758b4df76a615d06c85d8638fd) Signed-off-by: Akshay Gaikwad --- insights/specs/sos_archive.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index a40331ea9..8dbaf4fdd 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -120,6 +120,7 @@ class SosSpecs(Specs): libvirtd_qemu_log = glob_file(r"/var/log/libvirt/qemu/*.log") locale = simple_file("sos_commands/i18n/locale") lsblk = first_file(["sos_commands/block/lsblk", "sos_commands/filesys/lsblk"]) + lsblk_pairs = simple_file("sos_commands/block/lsblk_-O_-P") ls_boot = simple_file("sos_commands/boot/ls_-lanR_.boot") ls_sys_firmware = simple_file("sos_commands/boot/ls_-lanR_.sys.firmware") lscpu = simple_file("sos_commands/processor/lscpu") From cd7205b9488932cbc3a89a727124a78b74832754 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Fri, 15 Jan 2021 09:48:13 -0600 Subject: [PATCH 294/892] Ensure internable strings in py2 and py3. (#2899) Fixes #2898 Signed-off-by: Christopher Sams --- insights/parsr/query/__init__.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/insights/parsr/query/__init__.py b/insights/parsr/query/__init__.py index c056b32d8..33852b0f7 100644 --- a/insights/parsr/query/__init__.py +++ b/insights/parsr/query/__init__.py @@ -35,6 +35,18 @@ pass +def _make_str(s): + """ + Inspired by six.ensure_str in six version 1.15. See the six module for + copyright notice. + """ + if six.PY2 and isinstance(s, six.text_type): + return s.encode("utf-8", "strict") + if six.PY3 and isinstance(s, six.binary_type): + return s.decode("utf-8", "strict") + return s + + class Entry(object): """ Entry is the base class for the data model, which is a tree of Entry @@ -43,7 +55,13 @@ class Entry(object): __slots__ = ("_name", "attrs", "children", "parent", "lineno", "src") def __init__(self, name=None, attrs=None, children=None, lineno=None, src=None): - self._name = intern(six.ensure_str(name)) if name is not None else name + if type(name) is str: + self._name = intern(name) + elif isinstance(name, (six.text_type, six.binary_type)): + self._name = intern(_make_str(name)) + else: + self._name = name + self.attrs = attrs if isinstance(attrs, (list, tuple)) else tuple() self.children = children if isinstance(children, (list, tuple)) else [] self.parent = None From 6bc3bc843976b5e764bda1adc3cc1033ea6f9592 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Tue, 19 Jan 2021 14:29:59 -0600 Subject: [PATCH 295/892] Show original exceptions for mapped or lifted functions. (#2901) Fixes #2900 Signed-off-by: Christopher Sams --- insights/parsr/__init__.py | 43 +++++++++++++++++++-- insights/parsr/tests/test_function_error.py | 13 +++++++ 2 files changed, 53 insertions(+), 3 deletions(-) create mode 100644 insights/parsr/tests/test_function_error.py diff --git a/insights/parsr/__init__.py b/insights/parsr/__init__.py index ecccf3790..13bd2ff92 100644 --- a/insights/parsr/__init__.py +++ b/insights/parsr/__init__.py @@ -47,7 +47,9 @@ def op(args): from __future__ import print_function import functools import logging +import os import string +import traceback from bisect import bisect_left from six import StringIO, with_metaclass @@ -116,6 +118,9 @@ def _debug_hook(func): """ @functools.wraps(func) def inner(self, pos, data, ctx): + if ctx.function_error is not None: + # no point in continuing... + raise Exception() ctx.parser_stack.append(self) if self._debug: line = ctx.line(pos) + 1 @@ -136,6 +141,15 @@ def inner(self, pos, data, ctx): return inner +class Backtrack(Exception): + """ + Mapped or Lifted functions should Backtrack if they want to fail without + causing parsing to fail. + """ + def __init__(self, msg): + super(Backtrack, self).__init__(msg) + + class Context(object): """ An instance of Context is threaded through the process call to every @@ -151,6 +165,7 @@ def __init__(self, lines, src=None): self.lines = [i for i, x in enumerate(lines) if x == "\n"] self.parser_stack = [] self.errors = [] + self.function_error = None def set(self, pos, msg): """ @@ -319,6 +334,13 @@ def __call__(self, data, src=None, Ctx=Context): except Exception: pass + if ctx.function_error is not None: + pos, msg = ctx.function_error + lineno = ctx.line(pos) + 1 + colno = ctx.col(pos) + 1 + msg = "At line {0} column {1}: {2}".format(lineno, colno, msg) + raise Exception(msg) + err = StringIO() lineno = ctx.line(ctx.pos) + 1 @@ -917,7 +939,16 @@ def __init__(self, child, func): def process(self, pos, data, ctx): pos, res = self.children[0].process(pos, data, ctx) - return pos, self.func(res) + try: + return pos, self.func(res) + except Backtrack as bt: + ctx.set(pos, bt.msg) + raise + except: + tb = traceback.format_exc() + msg = (self.name or "Map") + " raised{l}{tb}".format(l=os.linesep, tb=tb) + ctx.function_error = (pos, msg) + raise def __repr__(self): if not self.name: @@ -969,8 +1000,14 @@ def process(self, pos, data, ctx): results.append(res) try: return pos, self.func(*results) - except Exception as e: - ctx.set(pos, str(e)) + except Backtrack as bt: + ctx.set(pos, bt.msg) + raise + except: + tb = traceback.format_exc() + msg = (self.name or "Lift") + " raised{l}{tb}".format(l=os.linesep, tb=tb) + ctx.set(pos, msg) + ctx.function_error = (pos, msg) raise diff --git a/insights/parsr/tests/test_function_error.py b/insights/parsr/tests/test_function_error.py new file mode 100644 index 000000000..c15525378 --- /dev/null +++ b/insights/parsr/tests/test_function_error.py @@ -0,0 +1,13 @@ +from insights.parsr import Char + + +def boom(_): + raise Exception("Boom") + + +def test_error(): + p = Char("a").map(boom) + try: + p("a") + except Exception as ex: + assert "boom" in str(ex) From aa0ffd6d50e7b91a25665bacdf5e9431c5cec9c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20Hornick=C3=BD?= Date: Thu, 21 Jan 2021 16:10:41 +0100 Subject: [PATCH 296/892] Add new parser - yum updateinfo (#2849) Signed-off-by: Michal Hornicky --- .../shared_parsers_catalog/yum_updateinfo.rst | 3 ++ insights/parsers/tests/test_yum_updateinfo.py | 41 ++++++++++++++++++ insights/parsers/yum_updateinfo.py | 43 +++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 6 files changed, 90 insertions(+) create mode 100644 docs/shared_parsers_catalog/yum_updateinfo.rst create mode 100644 insights/parsers/tests/test_yum_updateinfo.py create mode 100644 insights/parsers/yum_updateinfo.py diff --git a/docs/shared_parsers_catalog/yum_updateinfo.rst b/docs/shared_parsers_catalog/yum_updateinfo.rst new file mode 100644 index 000000000..d5357f89e --- /dev/null +++ b/docs/shared_parsers_catalog/yum_updateinfo.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.yum_updateinfo + :members: + :show-inheritance: diff --git a/insights/parsers/tests/test_yum_updateinfo.py b/insights/parsers/tests/test_yum_updateinfo.py new file mode 100644 index 000000000..9be0e8ef4 --- /dev/null +++ b/insights/parsers/tests/test_yum_updateinfo.py @@ -0,0 +1,41 @@ +from insights.parsers import yum_updateinfo +from insights.tests import context_wrap +import doctest + +YUM_UPDATEINFO_INPUT = """ +FEDORA-2020-777f43619c bugfix firefox-83.0-13.fc32.x86_64 +FEDORA-2020-786c7010d2 bugfix flatpak-libs-1.8.3-1.fc32.x86_64 +FEDORA-2020-786c7010d2 bugfix flatpak-selinux-1.8.3-1.fc32.noarch +FEDORA-2020-786c7010d2 bugfix flatpak-session-helper-1.8.3-1.fc32.x86_64 +FEDORA-2020-3e2cd487ea bugfix fuse-overlayfs-1.3.0-1.fc32.x86_64 +FEDORA-2020-79e9f139fe bugfix gnome-control-center-3.36.5-1.fc32.x86_64 +FEDORA-2020-79e9f139fe bugfix gnome-control-center-filesystem-3.36.5-1.fc32.noarch +FEDORA-2020-352b61ce72 bugfix iwl100-firmware-39.31.5.1-115.fc32.noarch +FEDORA-2020-352b61ce72 bugfix iwl1000-firmware-1:39.31.5.1-115.fc32.noarch +FEDORA-2020-352b61ce72 bugfix iwl105-firmware-18.168.6.1-115.fc32.noarch +FEDORA-2020-352b61ce72 bugfix iwl135-firmware-18.168.6.1-115.fc32.noarch +FEDORA-2020-352b61ce72 Moderate/Sec. iwl2000-firmware-18.168.6.1-115.fc32.noarch +""" + +TEST_DATA = """ +RSHA-2020-0001 security firefox-83.0-13.fc32.x86_64 +RHBA-2020-0002 bugfix flatpak-libs-1.8.3-1.fc32.x86_64 +RHEA-2020-0003 enhancement flatpak-selinux-1.8.3-1.fc32.noarch +""" + + +def test_yum_updateinfo(): + info = yum_updateinfo.YumUpdateinfo(context_wrap(YUM_UPDATEINFO_INPUT)) + assert info is not None + assert info.items[0]['advisory'] == 'FEDORA-2020-777f43619c' + assert info.items[0]['type'] == 'bugfix' + assert info.items[0]['package'] == 'firefox-83.0-13.fc32.x86_64' + assert len(info.items) == 12 + + +def test_yum_updateinfo_docs(): + env = { + 'updateinfo': yum_updateinfo.YumUpdateinfo(context_wrap(TEST_DATA)) + } + failed, total = doctest.testmod(yum_updateinfo, globs=env) + assert failed == 0 diff --git a/insights/parsers/yum_updateinfo.py b/insights/parsers/yum_updateinfo.py new file mode 100644 index 000000000..d8decaebd --- /dev/null +++ b/insights/parsers/yum_updateinfo.py @@ -0,0 +1,43 @@ +""" +UpdateInfo - command ``yum updateinfo list -C`` +=============================================== +Provides a list of available advisories +""" +from insights import CommandParser, parser +from insights.parsers import parse_delimited_table, SkipException +from insights.specs import Specs + + +@parser(Specs.yum_updateinfo) +class YumUpdateinfo(CommandParser): + """ + Class for parsing the output of `yum updateinfo list -C`. + + Expected output of the command is:: + + RSHA-2020-0001 security firefox-83.0-13.fc32.x86_64 + RHBA-2020-0002 bugfix flatpak-libs-1.8.3-1.fc32.x86_64 + RHEA-2020-0003 enhancement flatpak-selinux-1.8.3-1.fc32.noarch + + Examples: + >>> len(updateinfo.items) + 3 + >>> updateinfo.items[0] + {'advisory': 'RSHA-2020-0001', 'type': 'security', 'package': 'firefox-83.0-13.fc32.x86_64'} + + """ + + def parse_content(self, content): + """Parse the command output""" + with_header = ['advisory type package'] + content + table = parse_delimited_table(with_header) + if not table: + raise SkipException('No data.') + self._items = table + + @property + def items(self): + """ + list: Updatable packages, along with minimal advisory metadata + """ + return self._items diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index dd175b9b1..0868a9146 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -698,6 +698,7 @@ class Specs(SpecSet): yum_log = RegistryPoint() yum_repolist = RegistryPoint() yum_repos_d = RegistryPoint(multi_output=True) + yum_updateinfo = RegistryPoint() zdump_v = RegistryPoint() zipl_conf = RegistryPoint() sendq_socket_buffer = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index ee47b7178..4b04b51d0 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -794,6 +794,7 @@ def is_mod_loaded_for_ss(broker): yum_log = simple_file("/var/log/yum.log") yum_repolist = simple_command("/usr/bin/yum -C --noplugins repolist") yum_repos_d = glob_file("/etc/yum.repos.d/*.repo") + yum_updateinfo = simple_command("/usr/bin/yum -C updateinfo list") zipl_conf = simple_file("/etc/zipl.conf") rpm_format = format_rpm() installed_rpms = simple_command("/bin/rpm -qa --qf '%s'" % rpm_format, context=HostContext) diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 33758746a..a7ef1809a 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -250,3 +250,4 @@ class InsightsArchiveSpecs(Specs): virt_what = simple_file("insights_commands/virt-what") yum_list_available = simple_file("insights_commands/yum_-C_--noplugins_list_available") yum_repolist = first_file(["insights_commands/yum_-C_--noplugins_repolist", "insights_commands/yum_-C_repolist"]) + yum_updateinfo = simple_file("insights_commands/yum_-C_updateinfo_list") From 0bb464d7d47484ffdf8a4754e70dda718394bcbe Mon Sep 17 00:00:00 2001 From: Sachin Date: Thu, 21 Jan 2021 21:16:27 +0530 Subject: [PATCH 297/892] Add new parser for /etc/cloud/cloud.cfg (#2861) Signed-off-by: Sachin Patil --- docs/shared_parsers_catalog/cloud_cfg.rst | 3 + insights/parsers/cloud_cfg.py | 27 ++++++++ insights/parsers/tests/test_cloud_cfg.py | 28 ++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 78 +++++++++++++++++++++-- 5 files changed, 130 insertions(+), 7 deletions(-) create mode 100644 docs/shared_parsers_catalog/cloud_cfg.rst create mode 100644 insights/parsers/cloud_cfg.py create mode 100644 insights/parsers/tests/test_cloud_cfg.py diff --git a/docs/shared_parsers_catalog/cloud_cfg.rst b/docs/shared_parsers_catalog/cloud_cfg.rst new file mode 100644 index 000000000..31f5f19eb --- /dev/null +++ b/docs/shared_parsers_catalog/cloud_cfg.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.cloud_cfg + :members: + :show-inheritance: diff --git a/insights/parsers/cloud_cfg.py b/insights/parsers/cloud_cfg.py new file mode 100644 index 000000000..635f0df1d --- /dev/null +++ b/insights/parsers/cloud_cfg.py @@ -0,0 +1,27 @@ +""" +CloudCfg - datasource ``cloud_cfg`` +=================================== +""" +from insights import JSONParser, parser +from insights.specs import Specs + + +@parser(Specs.cloud_cfg) +class CloudCfg(JSONParser): + """This parser parses the output of ``cloud_cfg`` datasource. + + Typical output from the datasource is:: + + {"version": 1, "config": [{"type": "physical", "name": "eth0", "subnets": [{"type": "dhcp"}, {"type": "dhcp6"}]}]} + + Attributes: + + data(dict): Cloud-init network configuration. + + Examples: + >>> cloud_cfg.data['version'] == 1 + True + >>> cloud_cfg.data['config'] == [{"type": "physical", "name": "eth0", "subnets": [{"type": "dhcp"}, {"type": "dhcp6"}]}] + True +""" + pass diff --git a/insights/parsers/tests/test_cloud_cfg.py b/insights/parsers/tests/test_cloud_cfg.py new file mode 100644 index 000000000..14ac56657 --- /dev/null +++ b/insights/parsers/tests/test_cloud_cfg.py @@ -0,0 +1,28 @@ +import doctest +from insights.parsers import cloud_cfg +from insights.tests import context_wrap + + +CONFIG_1 = """ +{"config": "disabled"} +""" + +CONFIG_2 = """ +{"version": 1, "config": [{"type": "physical", "name": "eth0", "subnets": [{"type": "dhcp"}, {"type": "dhcp6"}]}]} +""" + + +def test_cloud_cfg(): + result = cloud_cfg.CloudCfg(context_wrap(CONFIG_1)) + assert result.data['config'] == 'disabled' + + result = cloud_cfg.CloudCfg(context_wrap(CONFIG_2)) + assert result.data['config'][0]['name'] == 'eth0' + + +def test_doc_examples(): + env = { + 'cloud_cfg': cloud_cfg.CloudCfg(context_wrap(CONFIG_2)), + } + failed, total = doctest.testmod(cloud_cfg, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 0868a9146..7dcb78e02 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -75,6 +75,7 @@ class Specs(SpecSet): cinder_conf = RegistryPoint() cinder_volume_log = RegistryPoint(filterable=True) cloud_init_custom_network = RegistryPoint() + cloud_cfg = RegistryPoint() cloud_init_log = RegistryPoint(filterable=True) cluster_conf = RegistryPoint(filterable=True) cmdline = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 4b04b51d0..ea66f289e 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -10,12 +10,18 @@ import logging import re +import json -from insights.core.context import HostContext +from grp import getgrgid +from os import stat +from pwd import getpwuid +import yaml + +from insights.core.context import HostContext from insights.core.dr import SkipComponent from insights.core.plugins import datasource -from insights.core.spec_factory import RawFileProvider +from insights.core.spec_factory import RawFileProvider, DatasourceProvider from insights.core.spec_factory import simple_file, simple_command, glob_file from insights.core.spec_factory import first_of, command_with_args from insights.core.spec_factory import foreach_collect, foreach_execute @@ -31,11 +37,6 @@ from insights.specs import Specs -from grp import getgrgid -from os import stat -from pwd import getpwuid - - logger = logging.getLogger(__name__) @@ -224,6 +225,69 @@ def is_ceph_monitor(broker): cinder_api_log = first_file(["/var/log/containers/cinder/cinder-api.log", "/var/log/cinder/cinder-api.log"]) cinder_conf = first_file(["/var/lib/config-data/puppet-generated/cinder/etc/cinder/cinder.conf", "/etc/cinder/cinder.conf"]) cinder_volume_log = first_file(["/var/log/containers/cinder/volume.log", "/var/log/containers/cinder/cinder-volume.log", "/var/log/cinder/volume.log"]) + + @datasource(HostContext) + def cloud_cfg(broker): + """This datasource provides the network configuration collected + from ``/etc/cloud/cloud.cfg``. + + Typical content of ``/etc/cloud/cloud.cfg`` file is:: + + #cloud-config + users: + - name: demo + ssh-authorized-keys: + - key_one + - key_two + passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/ + + network: + version: 1 + config: + - type: physical + name: eth0 + subnets: + - type: dhcp + - type: dhcp6 + + system_info: + default_user: + name: user2 + plain_text_passwd: 'someP@assword' + home: /home/user2 + + debug: + output: /var/log/cloud-init-debug.log + verbose: true + + Note: + This datasource may be executed using the following command: + + ``insights-cat --no-header cloud_cfg`` + + Example: + + ``{"version": 1, "config": [{"type": "physical", "name": "eth0", "subnets": [{"type": "dhcp"}, {"type": "dhcp6"}]}]}`` + + Returns: + str: JSON string when the ``network`` parameter is configure, else nothing is returned. + + Raises: + SkipComponent: When the path does not exist. + """ + relative_path = '/etc/cloud/cloud.cfg' + network_config = '' + + try: + with open(relative_path, 'r') as f: + content = yaml.load(f, Loader=yaml.SafeLoader) + network_config = content.get('network', None) + if network_config: + return DatasourceProvider(content=json.dumps(network_config), relative_path=relative_path) + + except OSError: + raise SkipComponent() + cloud_init_custom_network = simple_file("/etc/cloud/cloud.cfg.d/99-custom-networking.cfg") cloud_init_log = simple_file("/var/log/cloud-init.log") cluster_conf = simple_file("/etc/cluster/cluster.conf") From 1b0bab002ce039ef7506222eeb2fe8ca856aca95 Mon Sep 17 00:00:00 2001 From: Stephen Adams Date: Thu, 21 Jan 2021 12:15:08 -0500 Subject: [PATCH 298/892] update uploader_json_map.json Signed-off-by: Stephen Adams --- insights/client/uploader_json_map.json | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index 0bddd3481..47ce901c5 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -240,6 +240,11 @@ "pattern": [], "symbolic_name": "dotnet_version" }, + { + "command": "/usr/bin/doveconf", + "pattern": [], + "symbolic_name": "doveconf" + }, { "command": "/bin/du -s -k /var/lib/candlepin/activemq-artemis", "pattern": [], @@ -1063,6 +1068,7 @@ { "command": "/bin/ps alxwww", "pattern": [ + "/sbin/rngd", "/usr/bin/openshift start master", "/usr/bin/openshift start node", "COMMAND", @@ -1075,6 +1081,7 @@ "corosync", "crmd", "dlm_controld", + "dnsmasq", "docker", "elasticsearch", "gnome-shell", @@ -1108,6 +1115,7 @@ { "command": "/bin/ps aux", "pattern": [ + "/sbin/rngd", "/usr/bin/docker", "/usr/bin/docker daemon", "/usr/bin/docker-current", @@ -1234,6 +1242,7 @@ { "command": "/bin/ps -ef", "pattern": [ + "/sbin/rngd", "/usr/bin/openshift start master", "/usr/bin/openshift start node", "CMD", @@ -1355,11 +1364,6 @@ "pattern": [], "symbolic_name": "sap_hdb_version" }, - { - "command": "/usr/bin/sudo -iu postgres psql -d foreman -c 'select count(*) from hosts'", - "pattern": [], - "symbolic_name": "satellite_content_hosts_count" - }, { "command": "/usr/bin/mongo pulp_database --eval 'db.serverStatus().storageEngine'", "pattern": [], @@ -1569,6 +1573,11 @@ "pattern": [], "symbolic_name": "yum_repolist" }, + { + "command": "/usr/bin/yum -C updateinfo list", + "pattern": [], + "symbolic_name": "yum_updateinfo" + }, { "command": "/usr/sbin/zdump -v /etc/localtime -c 2019,2039", "pattern": [], @@ -4269,5 +4278,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-01-07T14:02:30.044043" + "version": "2021-01-20T09:06:09.239668" } \ No newline at end of file From d1c61a245032310be08a6c3d8f993a84ceeac673 Mon Sep 17 00:00:00 2001 From: Akshay Gaikwad Date: Wed, 27 Jan 2021 22:30:10 +0530 Subject: [PATCH 299/892] Change etc_udev_40_redhat_rules spec entry (#2897) * Modify udev_40_redhat_rules spec entry As mentiend in udev manual, "man udev", udev rules are processed collectively from "/etc/udev/rules.d", "/run/udev/rules.d", "/usr/lib/udev/rules.d", "/usr/local/lib/udev/rules.d" directories. Signed-off-by: Akshay Gaikwad * Move UdevRules40Redhat parser to udev_rules module More appropriate location for "UdevRules40Redhat" parser is in udev_rules module. Deprecating "etc_udev_rules.UdevRules40Redhat" parser. Signed-off-by: Akshay Gaikwad * Revert spec name to etc_udev_40_redhat_rules Signed-off-by: Akshay Gaikwad --- insights/parsers/etc_udev_rules.py | 9 ++- insights/parsers/tests/test_udev_rules.py | 74 ++++++++++++++++++++++- insights/parsers/udev_rules.py | 34 +++++++++++ insights/specs/default.py | 3 +- 4 files changed, 116 insertions(+), 4 deletions(-) diff --git a/insights/parsers/etc_udev_rules.py b/insights/parsers/etc_udev_rules.py index 4710bf3d1..84db2685c 100644 --- a/insights/parsers/etc_udev_rules.py +++ b/insights/parsers/etc_udev_rules.py @@ -14,11 +14,16 @@ from insights import parser from insights.core import LogFileOutput from insights.specs import Specs +from insights.util import deprecated @parser(Specs.etc_udev_40_redhat_rules) class UdevRules40Redhat(LogFileOutput): """ + .. warning:: + This parser is deprecated, please use + :py:class:`insights.parsers.udev_rules.UdevRules40Redhat` instead. + Read the content of ``/etc/udev/rules.d/40-redhat.rules`` file. .. note:: @@ -44,4 +49,6 @@ class UdevRules40Redhat(LogFileOutput): >>> 'LABEL="memory_hotplug_end"' in udev_rules.lines True """ - pass + def __init__(self, *args, **kwargs): + deprecated(UdevRules40Redhat, "Import UdevRules40Redhat from insights.parsers.udev_rules instread.") + super(UdevRules40Redhat, self).__init__(*args, **kwargs) diff --git a/insights/parsers/tests/test_udev_rules.py b/insights/parsers/tests/test_udev_rules.py index c0b434a63..5116f2fe3 100644 --- a/insights/parsers/tests/test_udev_rules.py +++ b/insights/parsers/tests/test_udev_rules.py @@ -1,6 +1,6 @@ import doctest from insights.parsers import udev_rules -from insights.parsers.udev_rules import UdevRulesFCWWPN +from insights.parsers.udev_rules import UdevRulesFCWWPN, UdevRules40Redhat from insights.tests import context_wrap UDEV_RULES_FILT_HIT = """ @@ -9,9 +9,71 @@ ENV{FC_TARGET_LUN}!="$*"; GOTO="fc_wwpn_end" """.strip() +SAMPLE_40_REDHAT_RULES = """ +# do not edit this file, it will be overwritten on update +# CPU hotadd request +SUBSYSTEM=="cpu", ACTION=="add", TEST=="online", ATTR{online}=="0", ATTR{online}="1" + +# Memory hotadd request +SUBSYSTEM!="memory", ACTION!="add", GOTO="memory_hotplug_end" +PROGRAM="/bin/uname -p", RESULT=="s390*", GOTO="memory_hotplug_end" + +LABEL="memory_hotplug_end" +""".strip() + +UDEV_RULES_CONTENT = """ +# do not edit this file, it will be overwritten on update + +# CPU hotadd request +SUBSYSTEM=="cpu", ACTION=="add", TEST=="online", ATTR{online}=="0", ATTR{online}="1" + +# Memory hotadd request +SUBSYSTEM!="memory", GOTO="memory_hotplug_end" +ACTION!="add", GOTO="memory_hotplug_end" +PROGRAM="/bin/uname -p", RESULT=="s390*", GOTO="memory_hotplug_end" + +ENV{.state}="online" +PROGRAM="/bin/systemd-detect-virt", RESULT=="none", ENV{.state}="online_movable" +ATTR{state}=="offline", ATTR{state}="$env{.state}" + +LABEL="memory_hotplug_end" + +# reload sysctl.conf / sysctl.conf.d settings when the bridge module is loaded +ACTION=="add", SUBSYSTEM=="module", KERNEL=="bridge", RUN+="/usr/lib/systemd/systemd-sysctl --prefix=/proc/sys/net/bridge" + +# load SCSI generic (sg) driver +SUBSYSTEM=="scsi", ENV{DEVTYPE}=="scsi_device", TEST!="[module/sg]", RUN+="/sbin/modprobe -bv sg" +SUBSYSTEM=="scsi", ENV{DEVTYPE}=="scsi_target", TEST!="[module/sg]", RUN+="/sbin/modprobe -bv sg" + +# Rule for prandom character device node permissions +KERNEL=="prandom", MODE="0644" + + +# Rules for creating the ID_PATH for SCSI devices based on the CCW bus +# using the form: ccw--zfcp-: +# +ACTION=="remove", GOTO="zfcp_scsi_device_end" + +# +# Set environment variable "ID_ZFCP_BUS" to "1" if the devices +# (both disk and partition) are SCSI devices based on FCP devices +# +KERNEL=="sd*", SUBSYSTEMS=="ccw", DRIVERS=="zfcp", ENV{.ID_ZFCP_BUS}="1" + +# For SCSI disks +KERNEL=="sd*[!0-9]", SUBSYSTEMS=="scsi", ENV{.ID_ZFCP_BUS}=="1", ENV{DEVTYPE}=="disk", SYMLINK+="disk/by-path/ccw-$attr{hba_id}-zfcp-$attr{wwpn}:$attr{fcp_lun}" + + +# For partitions on a SCSI disk +KERNEL=="sd*[0-9]", SUBSYSTEMS=="scsi", ENV{.ID_ZFCP_BUS}=="1", ENV{DEVTYPE}=="partition", SYMLINK+="disk/by-path/ccw-$attr{hba_id}-zfcp-$attr{wwpn}:$attr{fcp_lun}-part%n" + +LABEL="zfcp_scsi_device_end" +""".strip() + def test_documentation(): - env = {'udev_rules': UdevRulesFCWWPN(context_wrap(UDEV_RULES_FILT_HIT))} + env = {'udev_rules': UdevRulesFCWWPN(context_wrap(UDEV_RULES_FILT_HIT)), + 'udev_40_redhat_rules': UdevRules40Redhat(context_wrap(SAMPLE_40_REDHAT_RULES))} failed_count, tests = doctest.testmod(udev_rules, globs=env) assert failed_count == 0 @@ -22,3 +84,11 @@ def test_udev_rules(): 'ENV{FC_INITIATOR_WWPN}!="$*"; GOTO="fc_wwpn_end"', 'ENV{FC_TARGET_LUN}!="$*"; GOTO="fc_wwpn_end"']: assert line in result.lines + + +def test_udev_40_redhat_rules(): + result = UdevRules40Redhat(context_wrap(UDEV_RULES_CONTENT)) + for line in ['SUBSYSTEM=="cpu", ACTION=="add", TEST=="online", ATTR{online}=="0", ATTR{online}="1"', + 'SUBSYSTEM!="memory", GOTO="memory_hotplug_end"', + 'ACTION!="add", GOTO="memory_hotplug_end"']: + assert line in result.lines diff --git a/insights/parsers/udev_rules.py b/insights/parsers/udev_rules.py index e373b2814..fdc33ca02 100644 --- a/insights/parsers/udev_rules.py +++ b/insights/parsers/udev_rules.py @@ -6,6 +6,9 @@ UdevRulesFCWWPN - file ``/usr/lib/udev/rules.d/59-fc-wwpn-id.rules`` -------------------------------------------------------------------- + +UdevRules40Redhat - files ``/etc/udev/rules.d/40-redhat.rules``, ``/run/udev/rules.d/40-redhat.rules``, ``/usr/lib/udev/rules.d/40-redhat.rules``, ``/usr/local/lib/udev/rules.d/40-redhat.rules`` +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- """ from insights import parser from insights.core import LogFileOutput @@ -31,3 +34,34 @@ class UdevRulesFCWWPN(LogFileOutput): True """ pass + + +@parser(Specs.etc_udev_40_redhat_rules) +class UdevRules40Redhat(LogFileOutput): + """ + Read the content of ``40-redhat.rules`` file. + + .. note:: + + The syntax of the `.rules` file is complex, and no rules require to + get the serialized parsed result currently. An only existing rule's + supposed to check the syntax of some specific line, so here the + :class:`insights.core.LogFileOutput` is the base class. + + Sample input:: + + # do not edit this file, it will be overwritten on update + # CPU hotadd request + SUBSYSTEM=="cpu", ACTION=="add", TEST=="online", ATTR{online}=="0", ATTR{online}="1" + + # Memory hotadd request + SUBSYSTEM!="memory", ACTION!="add", GOTO="memory_hotplug_end" + PROGRAM="/bin/uname -p", RESULT=="s390*", GOTO="memory_hotplug_end" + + LABEL="memory_hotplug_end" + + Examples: + >>> 'LABEL="memory_hotplug_end"' in udev_40_redhat_rules.lines + True + """ + pass diff --git a/insights/specs/default.py b/insights/specs/default.py index ea66f289e..0b0d8c2e0 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -362,7 +362,8 @@ def du_dirs_list(broker): etc_journald_conf = simple_file(r"etc/systemd/journald.conf") etc_journald_conf_d = glob_file(r"etc/systemd/journald.conf.d/*.conf") etc_machine_id = simple_file("/etc/machine-id") - etc_udev_40_redhat_rules = simple_file("/etc/udev/rules.d/40-redhat.rules") + etc_udev_40_redhat_rules = first_file(["/etc/udev/rules.d/40-redhat.rules", "/run/udev/rules.d/40-redhat.rules", + "/usr/lib/udev/rules.d/40-redhat.rules", "/usr/local/lib/udev/rules.d/40-redhat.rules"]) etcd_conf = simple_file("/etc/etcd/etcd.conf") ethernet_interfaces = listdir("/sys/class/net", context=HostContext) ethtool = foreach_execute(ethernet_interfaces, "/sbin/ethtool %s") From 8618405d84267dbbe9d01abffc620350e403de2e Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Thu, 28 Jan 2021 09:42:09 -0600 Subject: [PATCH 300/892] Add several useful features to parsr.query. (#2893) - alias get_keys -> keys - alias get_crumbs -> crumbs - show crumbs up and down - alias None -> ANY so "find(ANY)" makes sense - to_df - generate a pandas dataframe from a result - sources / file paths for insights shell -p insights.ocp and other things - from_dict includes src object / path - most_common to show value distributions - choose selects specific nodes from each tree in a result Signed-off-by: Christopher Sams --- insights/ocp.py | 2 +- insights/parsr/query/__init__.py | 225 ++++++++++++++++++++-- insights/parsr/query/tests/test_choose.py | 123 ++++++++++++ insights/parsr/query/tests/test_crumbs.py | 113 +++++++++++ 4 files changed, 442 insertions(+), 21 deletions(-) create mode 100644 insights/parsr/query/tests/test_choose.py create mode 100644 insights/parsr/query/tests/test_crumbs.py diff --git a/insights/ocp.py b/insights/ocp.py index 33cbb7f78..535695313 100644 --- a/insights/ocp.py +++ b/insights/ocp.py @@ -37,7 +37,7 @@ def _get_files(path): def _load(path): with open(path) as f: doc = yaml.load(f, Loader=Loader) - return from_dict(doc) + return from_dict(doc, src=path) def _process(path, excludes=None): diff --git a/insights/parsr/query/__init__.py b/insights/parsr/query/__init__.py index 33852b0f7..db913f760 100644 --- a/insights/parsr/query/__init__.py +++ b/insights/parsr/query/__init__.py @@ -47,6 +47,34 @@ def _make_str(s): return s +# we need this when generating crumbs +try: + isidentifier = str.isidentifier +except: + isidentifier = re.compile(r"^[^\d\W]\w*\Z").match + + +try: + from collections import Counter +except ImportError: + from operator import itemgetter + + class Counter(object): + def __init__(self, data): + self.data = defaultdict(int) + self.update(data) + + def update(self, data): + for d in data: + self.data[d] += 1 + + def most_common(self, top=None): + res = sorted(self.data.items(), key=itemgetter(1), reverse=True) + return res[:top] if top is not None else res + +ANY = None + + class Entry(object): """ Entry is the base class for the data model, which is a tree of Entry @@ -54,7 +82,7 @@ class Entry(object): """ __slots__ = ("_name", "attrs", "children", "parent", "lineno", "src") - def __init__(self, name=None, attrs=None, children=None, lineno=None, src=None): + def __init__(self, name=None, attrs=None, children=None, lineno=None, src=None, set_parents=True): if type(name) is str: self._name = intern(name) elif isinstance(name, (six.text_type, six.binary_type)): @@ -66,9 +94,10 @@ def __init__(self, name=None, attrs=None, children=None, lineno=None, src=None): self.children = children if isinstance(children, (list, tuple)) else [] self.parent = None self.lineno = lineno - self.src = src - for c in self.children: - c.parent = self + self.src = src # insights.core.Parser instance + if set_parents: + for c in self.children: + c.parent = self super(Entry, self).__init__() def __getattr__(self, name): @@ -95,28 +124,86 @@ def get_keys(self): """ return sorted(set(c._name for c in self.children if c._name)) + keys = get_keys + def __dir__(self): """ Exists for ipython autocompletion. """ return self.get_keys() + object.__dir__(self) - def get_crumbs(self): + @property + def source(self): + p = self + while p is not None and p.src is None: + p = p.parent + + if p is not None and p.src is not None: + return getattr(p.src, "file_path", p.src) + + def _crumbs_up(self): + res = [] + segments = [] + cur = self + while cur is not None: + name = cur._name + if name is not None: + segments.append(name) + cur = cur.parent + + if len(segments) == 0: + res.append("") + elif len(segments) == 1: + res.append(segments[0]) + else: + segments = list(reversed(segments)) + path = [segments[0]] + for r in segments[1:]: + r = "." + r if isidentifier(r) else '["{n}"]'.format(n=r) + path.append(r) + res.append("".join(path)) + return res + + def _crumbs_down(self): + res = set() + + def inner(node, base): + if node.children: + for c in node.children: + name = str(c._name) or "" + if base: + if not isidentifier(name): + name = '"{n}"'.format(n=name) + path = base + "[" + name + "]" + else: + path = base + "." + name + else: + if not isidentifier(name): + name = '"{n}"'.format(n=name) + path = "[" + name + "]" + else: + path = name + inner(c, path) + else: + res.add(base) + + inner(self, "") + return sorted(res) + + def get_crumbs(self, down=False): """ - Get the unique name from the current entry to the root. + Get the unique paths from the current entry up to the root or down + to all of the leaves. """ - results = [] - parent = self - while parent and parent._name is not None: - results.append(parent._name) - parent = parent.parent + return self._crumbs_up() if not down else self._crumbs_down() - return ".".join(reversed(results)) + crumbs = get_crumbs @property def line(self): """ Returns the original first line of text that generated the ``Entry``. + ``None`` if the model wasn't generated by an insights parser. """ if self.src is not None and self.lineno is not None: return self.src.content[self.lineno - 1] @@ -192,12 +279,12 @@ def where(self, name, value=None): """ Selects current nodes based on name and value queries of child nodes. If any immediate children match the queries, the parent is included in - the results. The :py:func:``where_query`` function can be used to + the results. The :py:func:``make_child_query`` function can be used to construct queries that act on the children as a whole instead of one at a time. Example: - >>> from insights.parsr.query import where_query as q + >>> from insights.parsr.query import make_child_query as q >>> from insights.parsr.query import from_dict >>> r = from_dict(load_config()) >>> r = conf.status.conditions.where(q("status", "False") | q("type", "Progressing")) @@ -222,6 +309,63 @@ def predicate(e): query = child_query(name, value).to_pyfunc() return Result(children=self.children if query(self) else []) + def choose(self, chooser): + """ + Run a selector function on each node. It should return a tuple, each + element of which is some query using the node. This lets you select + parts of each tree in a result. + + If you want to rename a field, make the element a dictionary whose + key is the name you want and whose value is the query. + + If you want to get all the children of a particular node instead of + specifying them individually, use the * operator in python 3.5+ or + return the query with ".grandchildren" appended otherwise. + + Example: + >>> from insights.parsr.query import make_child_query as q + >>> from insights.parsr.query import from_dict + + >>> conf = from_dict(load_config()) + >>> p = (q("restartCount", gt(2)) & q("ready", False)) + + >>> # get the name, the restartCount renamed to restart, the podIP + >>> # from the node's parent, and all of the children from + >>> # n.lastState.terminated. + + >>> # for python 3.5+ + >>> sel = lambda n: (n["name"], {"restart": n.restartCount}, n.parent.podIP, *n.lastState.terminated) + + >>> # for python 2 + >>> sel = lambda n: (n["name"], {"restart": n.restartCount}, n.parent.podIP, n.lastState.terminated.grandchildren) + + >>> conf.find(ANY).where(p).choose(sel) + """ + results = [] + for c in self.children: + res = chooser(c) + res = res if isinstance(res, (list, tuple)) else (res,) + tmp = [] + for r in res: + if isinstance(r, dict): + for k, v in r.items(): + if isinstance(v, list): + tmp.append(Entry(k, children=v, set_parents=False)) + else: + for i in v.children: + tmp.append(Entry(k, i.attrs, i.children, set_parents=False)) + else: + if isinstance(r, list): + tmp.extend(r) + else: + if isinstance(r, _Choose): + tmp.extend(r.grandchildren) + else: + tmp.extend(r.children) + if tmp: + results.append(Entry(children=tmp, set_parents=False)) + return _Choose(results) + @property def section(self): return None @@ -317,11 +461,25 @@ def get_keys(self): """ return sorted(set(c.name for c in self.grandchildren)) - def get_crumbs(self): + keys = get_keys + + def get_crumbs(self, down=False): """ Get the unique names from the current locations to the roots. """ - return sorted(set(c.get_crumbs() for c in self.children)) + res = chain.from_iterable(c.get_crumbs(down=down) for c in self.children) + return sorted(set(res)) + + crumbs = get_crumbs + + @property + def sources(self): + res = [] + for c in self.children: + src = c.source + if src is not None: + res.append(src) + return sorted(set(res)) @property def line(self): @@ -466,12 +624,12 @@ def where(self, name, value=None): """ Selects current nodes based on name and value queries of child nodes. If any immediate children match the queries, the parent is included in - the results. The :py:func:``where_query`` function can be used to + the results. The :py:func:``make_child_query`` function can be used to construct queries that act on the children as a whole instead of one at a time. Example: - >>> from insights.parsr.query import where_query as q + >>> from insights.parsr.query import make_child_query as q >>> from insights.parsr.query import from_dict >>> r = from_dict(load_config()) >>> r = conf.status.conditions.where(q("status", "False") | q("type", "Progressing")) @@ -501,6 +659,26 @@ def predicate(e): results.append(c) return Result(children=results) + def to_df(self): + import pandas as pd + + res = [] + for p in self.children: + try: + d = dict((c._name, c.attrs[0]) for c in p.children if len(c.attrs) == 1) + res.append(d) + except: + pass + return pd.DataFrame(res) + + def most_common(self, top=None): + """ + Returns the distribution of values returned by queries that return a + single value for each node. + """ + res = [c.attrs[0] for c in self.children if len(c.attrs) == 1] + return Counter(res).most_common(top) + def __getitem__(self, query): if isinstance(query, (int, slice)): return self.children[query] @@ -508,6 +686,13 @@ def __getitem__(self, query): return Result(children=[c for c in self.grandchildren if query(c)]) +class _Choose(Result): + """ + Marker class that allows us to detected nested calls to choose + """ + pass + + class _EntryQuery(object): """ _EntryQuery is the base class of all other query classes. @@ -741,7 +926,7 @@ def select(query, nodes, deep=False, roots=False): return Result(children=top) -def from_dict(orig): +def from_dict(orig, src=None): """ from_dict is a helper function that does its best to convert a python dict into a tree of :py:class:`Entry` instances that can be queried. @@ -763,7 +948,7 @@ def inner(d): else: result.append(Entry(name=k, attrs=(v,))) return tuple(result) - return Entry(children=inner(orig)) + return Entry(children=inner(orig), src=src) def pretty_format(root, indent=4): diff --git a/insights/parsr/query/tests/test_choose.py b/insights/parsr/query/tests/test_choose.py new file mode 100644 index 000000000..0ced81da7 --- /dev/null +++ b/insights/parsr/query/tests/test_choose.py @@ -0,0 +1,123 @@ +import json + +from insights.parsr.query import from_dict + + +DATA = json.loads(''' +{ + "kind": "ClusterVersion", + "apiVersion": "config.openshift.io/v1", + "metadata": { + "name": "version", + "selfLink": "/apis/config.openshift.io/v1/clusterversions/version", + "uid": "11111111-2222-3333-4444-555555555555", + "resourceVersion": "1", + "generation": 1, + "creationTimestamp": "2019-08-04T23:16:46Z" + }, + "spec": { + "clusterID": "55555555-4444-3333-2222-111111111111", + "upstream": "xxxxx://xxx.xxxxxxxxx.xxx/xxx/xxxxxxxxxxxxx/xx/xxxxx", + "channel": "stable-4.2" + }, + "status": { + "desired": { + "version": "4.2.0-0.ci-2019-08-04-183142", + "image": "registry.svc.ci.openshift.org/ocp/release@sha256:63b65452005d6e9e45bb92a7505524db0e406c3281d91bdc1a4f5c5cf71b01c5", + "force": false + }, + "history": [ + { + "state": "Completed", + "startedTime": "2019-08-04T23:17:08Z", + "completionTime": "2019-08-04T23:32:14Z", + "version": "4.2.0-0.ci-2019-08-04-183142", + "image": "registry.svc.ci.openshift.org/ocp/release@sha256:63b65452005d6e9e45bb92a7505524db0e406c3281d91bdc1a4f5c5cf71b01c5", + "verified": false + } + ], + "observedGeneration": 1, + "versionHash": "############", + "conditions": [ + { + "type": "Available", + "status": "True", + "lastTransitionTime": "2019-08-04T23:32:14Z", + "message": "Done applying 4.2.0-0.ci-2019-08-04-183142" + }, + { + "type": "Failing", + "status": "True", + "lastTransitionTime": "2019-08-05T15:04:39Z", + "reason": "ClusterOperatorNotAvailable", + "message": "Cluster operator console is still updating" + }, + { + "type": "Progressing", + "status": "False", + "lastTransitionTime": "2019-08-04T23:32:14Z", + "reason": "ClusterOperatorNotAvailable", + "message": "Error while reconciling 4.2.0-0.ci-2019-08-04-183142: the cluster operator console has not yet successfully rolled out" + }, + { + "type": "RetrievedUpdates", + "status": "False", + "lastTransitionTime": "2019-08-04T23:17:08Z", + "reason": "RemoteFailed", + "message": "Unable to retrieve available updates: currently installed version 4.2.0-0.ci-2019-08-04-183142 not found in the stable-4.2 channel" + } + ], + "availableUpdates": null + } +} +''') + +conf = from_dict(DATA) + + +def test_simple(): + res = conf.status.conditions.choose(lambda c: (c.type, c.reason)) + assert "type" in res + assert "reason" in res + assert "status" not in res + + +def test_ancestor(): + res = conf.status.conditions.choose(lambda c: (c.type, c.reason, c.upto("status").desired.version)) + assert "type" in res + assert "reason" in res + assert "version" in res + assert "status" not in res + + +def test_grandchildren(): + """ + Take all of the children of a node instead of choosing them individually + """ + + # note c.upto("status").desired.grandchildren + # this is the same as *c.upto("status").desired in python3.5+ + res = conf.status.conditions.choose(lambda c: (c.type, c.reason, c.upto("status").desired.grandchildren)) + assert "type" in res + assert "reason" in res + assert "version" in res + assert "image" in res + assert "force" in res + + +def test_nested(): + """ + Allow "subselects." The results are splatted in. + """ + res = conf.status.conditions.choose(lambda c: (c.type, c.reason, c.root.metadata.choose(lambda m: (m["name"], m.uid)))) + assert "type" in res # from conditions + assert "reason" in res # from conditions + assert "name" in res # from metadata + assert "uid" in res # from metadata + + +def test_rename(): + res = conf.status.conditions.choose(lambda c: ({"mytype": c.type}, c.reason)) + assert "mytype" in res + assert "reason" in res + assert "type" not in res diff --git a/insights/parsr/query/tests/test_crumbs.py b/insights/parsr/query/tests/test_crumbs.py new file mode 100644 index 000000000..624754888 --- /dev/null +++ b/insights/parsr/query/tests/test_crumbs.py @@ -0,0 +1,113 @@ +import json + +from insights.parsr.query import from_dict + + +DATA = json.loads(''' +{ + "kind": "ClusterVersion", + "apiVersion": "config.openshift.io/v1", + "metadata": { + "name": "version", + "selfLink": "/apis/config.openshift.io/v1/clusterversions/version", + "uid": "11111111-2222-3333-4444-555555555555", + "resourceVersion": "1", + "generation": 1, + "creationTimestamp": "2019-08-04T23:16:46Z" + }, + "spec": { + "clusterID": "55555555-4444-3333-2222-111111111111", + "upstream": "xxxxx://xxx.xxxxxxxxx.xxx/xxx/xxxxxxxxxxxxx/xx/xxxxx", + "channel": "stable-4.2" + }, + "status": { + "desired": { + "version": "4.2.0-0.ci-2019-08-04-183142", + "image": "registry.svc.ci.openshift.org/ocp/release@sha256:63b65452005d6e9e45bb92a7505524db0e406c3281d91bdc1a4f5c5cf71b01c5", + "force": false + }, + "history": [ + { + "state": "Completed", + "startedTime": "2019-08-04T23:17:08Z", + "completionTime": "2019-08-04T23:32:14Z", + "version": "4.2.0-0.ci-2019-08-04-183142", + "image": "registry.svc.ci.openshift.org/ocp/release@sha256:63b65452005d6e9e45bb92a7505524db0e406c3281d91bdc1a4f5c5cf71b01c5", + "verified": false + } + ], + "observedGeneration": 1, + "versionHash": "############", + "conditions": [ + { + "type": "Available", + "status": "True", + "lastTransitionTime": "2019-08-04T23:32:14Z", + "message": "Done applying 4.2.0-0.ci-2019-08-04-183142" + }, + { + "type": "Failing", + "status": "True", + "lastTransitionTime": "2019-08-05T15:04:39Z", + "reason": "ClusterOperatorNotAvailable", + "message": "Cluster operator console is still updating" + }, + { + "type": "Progressing", + "status": "False", + "lastTransitionTime": "2019-08-04T23:32:14Z", + "reason": "ClusterOperatorNotAvailable", + "message": "Error while reconciling 4.2.0-0.ci-2019-08-04-183142: the cluster operator console has not yet successfully rolled out" + }, + { + "type": "RetrievedUpdates", + "status": "False", + "lastTransitionTime": "2019-08-04T23:17:08Z", + "reason": "RemoteFailed", + "message": "Unable to retrieve available updates: currently installed version 4.2.0-0.ci-2019-08-04-183142 not found in the stable-4.2 channel" + } + ], + "availableUpdates": null + } +} +''') + +conf = from_dict(DATA) + + +def test_crumbs_up(): + c = conf.metadata.name.crumbs() + assert c == ["metadata.name"] + + +def test_crumbs_down(): + c = conf.metadata.crumbs(down=True) + assert c == sorted([ + "name", + "selfLink", + "uid", + "resourceVersion", + "generation", + "creationTimestamp" + ]) + + c = conf.status.crumbs(down=True) + assert c == sorted([ + "desired.version", + "desired.image", + "desired.force", + "history.state", + "history.startedTime", + "history.completionTime", + "history.version", + "history.image", + "history.verified", + "availableUpdates", + "observedGeneration", + "versionHash", + "conditions.type", + "conditions.status", + "conditions.lastTransitionTime", + "conditions.message", + "conditions.reason" + ]) From e7df415231e342a99e8904322c79fb94ce018f8e Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Thu, 28 Jan 2021 09:56:36 -0600 Subject: [PATCH 301/892] set_enabled works on components and their fully qualified names. (#2907) Fixes #2903 Signed-off-by: Christopher Sams --- insights/core/dr.py | 15 ++++++++++----- insights/tests/test_dr_enabled.py | 8 ++++++-- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/insights/core/dr.py b/insights/core/dr.py index 80d951df1..ab2ffcff1 100644 --- a/insights/core/dr.py +++ b/insights/core/dr.py @@ -89,10 +89,11 @@ def add(a, b): def set_enabled(component, enabled=True): """ Enable a component for evaluation. If set to False, the component is - skipped, and all components that require it will not execute. If component - is a fully qualified name string of a callable object instead of the - callable object itself, the component's module is loaded as a side effect - of calling this function. + skipped, and all components that require it will not execute. + + If component is a fully qualified name string of a callable object + instead of the callable object itself, the component's module is loaded + as a side effect of calling this function. Args: component (str or callable): fully qualified name of the component or @@ -102,7 +103,11 @@ def set_enabled(component, enabled=True): Returns: None """ - ENABLED[get_component(component) or component] = enabled + if isinstance(component, six.string_types): + component = get_component(component) + + if component: + ENABLED[component] = enabled def is_enabled(component): diff --git a/insights/tests/test_dr_enabled.py b/insights/tests/test_dr_enabled.py index 8e3e2e82e..89adfb8a7 100644 --- a/insights/tests/test_dr_enabled.py +++ b/insights/tests/test_dr_enabled.py @@ -1,5 +1,5 @@ from insights import combiner, dr - +from insights.core.context import HostContext from insights.parsers.uname import Uname @@ -21,9 +21,13 @@ def test_enabled_object(): assert dr.is_enabled(Uname) +def test_set_enabled_object(): + dr.set_enabled(HostContext) + assert dr.is_enabled(HostContext) + + def test_disabled_string(): dr.set_enabled("insights.core.context.HostContext", False) - from insights.core.context import HostContext assert not dr.is_enabled(HostContext) From 5df2e6f21ae81fcda0b10c76bdc6a292b95ee511 Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Thu, 28 Jan 2021 10:02:04 -0600 Subject: [PATCH 302/892] Add HostContext to all custom datasources (#2911) * Since these should only run when performing host collection, they need to include the proper context * HostContext should be used as an arg and not a kwarg * Fixes #2908 Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/specs/default.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 0b0d8c2e0..fb1f14705 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -135,7 +135,7 @@ class DefaultSpecs(Specs): avc_hash_stats = simple_file("/sys/fs/selinux/avc/hash_stats") avc_cache_threshold = simple_file("/sys/fs/selinux/avc/cache_threshold") - @datasource(CloudProvider) + @datasource(CloudProvider, HostContext) def is_aws(broker): """ bool: Returns True if this node is identified as running in AWS """ cp = broker[CloudProvider] @@ -147,7 +147,7 @@ def is_aws(broker): aws_instance_id_pkcs7 = simple_command("/usr/bin/curl -s http://169.254.169.254/latest/dynamic/instance-identity/pkcs7 --connect-timeout 5", deps=[is_aws]) awx_manage_check_license = simple_command("/usr/bin/awx-manage check_license") - @datasource(CloudProvider) + @datasource(CloudProvider, HostContext) def is_azure(broker): """ bool: Returns True if this node is identified as running in Azure """ cp = broker[CloudProvider] @@ -172,7 +172,7 @@ def is_azure(broker): ps_ef = simple_command("/bin/ps -ef") ps_eo = simple_command("/usr/bin/ps -eo pid,ppid,comm") - @datasource(ps_auxww) + @datasource(ps_auxww, HostContext) def tomcat_base(broker): """ Function to search the output of ``ps auxww`` to find all running tomcat @@ -201,7 +201,7 @@ def tomcat_base(broker): ceph_df_detail = simple_command("/usr/bin/ceph df detail -f json") ceph_health_detail = simple_command("/usr/bin/ceph health detail -f json") - @datasource(Ps) + @datasource(Ps, HostContext) def is_ceph_monitor(broker): """ bool: Returns True if ceph monitor process ceph-mon is running on this node """ ps = broker[Ps] @@ -296,7 +296,7 @@ def cloud_cfg(broker): cobbler_modules_conf = first_file(["/etc/cobbler/modules.conf", "/conf/cobbler/modules.conf"]) corosync = simple_file("/etc/sysconfig/corosync") - @datasource([IsRhel7, IsRhel8]) + @datasource(HostContext, [IsRhel7, IsRhel8]) def corosync_cmapctl_cmd_list(broker): """ corosync-cmapctl add different arguments on RHEL7 and RHEL8. @@ -352,7 +352,7 @@ def corosync_cmapctl_cmd_list(broker): dotnet_version = simple_command("/usr/bin/dotnet --version") dracut_kdump_capture_service = simple_file("/usr/lib/dracut/modules.d/99kdumpbase/kdump-capture.service") - @datasource() + @datasource(HostContext) def du_dirs_list(broker): """ Provide a list of directorys for the ``du_dirs`` spec to scan """ return ['/var/lib/candlepin/activemq-artemis'] @@ -432,7 +432,7 @@ def du_dirs_list(broker): jbcs_httpd24_httpd_error_log = simple_file("/opt/rh/jbcs-httpd24/root/etc/httpd/logs/error_log") virt_uuid_facts = simple_file("/etc/rhsm/facts/virt_uuid.facts") - @datasource(Ps) + @datasource(Ps, HostContext) def httpd_cmd(broker): """ Function to search the output of ``ps auxcww`` to find all running Apache @@ -531,14 +531,14 @@ def httpd_cmd(broker): mariadb_log = simple_file("/var/log/mariadb/mariadb.log") max_uid = simple_command("/bin/awk -F':' '{ if($3 > max) max = $3 } END { print max }' /etc/passwd") - @datasource() + @datasource(HostContext) def md5chk_file_list(broker): """ Provide a list of files to be processed by the ``md5chk_files`` spec """ return ["/etc/pki/product/69.pem", "/etc/pki/product-default/69.pem", "/usr/lib/libsoftokn3.so", "/usr/lib64/libsoftokn3.so", "/usr/lib/libfreeblpriv3.so", "/usr/lib64/libfreeblpriv3.so"] md5chk_files = foreach_execute(md5chk_file_list, "/usr/bin/md5sum %s") mdstat = simple_file("/proc/mdstat") - @datasource(Mdstat) + @datasource(Mdstat, HostContext) def md_device_list(broker): md = broker[Mdstat] if md.components: @@ -638,7 +638,7 @@ def md_device_list(broker): ovs_vsctl_list_bridge = simple_command("/usr/bin/ovs-vsctl list bridge") ovs_vsctl_show = simple_command("/usr/bin/ovs-vsctl show") - @datasource(Ps, context=HostContext) + @datasource(Ps, HostContext) def cmd_and_pkg(broker): """ Returns: @@ -660,7 +660,7 @@ def cmd_and_pkg(broker): pacemaker_log = first_file(["/var/log/pacemaker.log", "/var/log/pacemaker/pacemaker.log"]) pci_rport_target_disk_paths = simple_command("/usr/bin/find /sys/devices/ -maxdepth 10 -mindepth 9 -name stat -type f") - @datasource(Services, context=HostContext) + @datasource(Services, HostContext) def pcp_enabled(broker): """ bool: Returns True if pmproxy service is on in services """ if not broker[Services].is_on("pmproxy"): @@ -723,7 +723,7 @@ def pcp_enabled(broker): rsyslog_conf = glob_file(["/etc/rsyslog.conf", "/etc/rsyslog.d/*.conf"]) samba = simple_file("/etc/samba/smb.conf") - @datasource(Sap) + @datasource(Sap, HostContext) def sap_sid(broker): sap = broker[Sap] return [sap.sid(i).lower() for i in sap.local_instances] @@ -734,7 +734,7 @@ def sap_sid(broker): saphostexec_version = simple_command("/usr/sap/hostctrl/exe/saphostexec -version") sat5_insights_properties = simple_file("/etc/redhat-access/redhat-access-insights.properties") - @datasource(SatelliteVersion) + @datasource(SatelliteVersion, HostContext) def is_satellite_server(broker): """ bool: Returns True if the host is satellite server. @@ -768,7 +768,7 @@ def is_satellite_server(broker): software_collections_list = simple_command('/usr/bin/scl --list') spamassassin_channels = simple_command("/bin/grep -r '^\\s*CHANNELURL=' /etc/mail/spamassassin/channel.d") - @datasource(LsMod) + @datasource(LsMod, HostContext) def is_mod_loaded_for_ss(broker): """ bool: Returns True if the kernel modules required by ``ss -tupna`` From 6b08156cb2b82a42c17d889accb5cef9d79a8a01 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Thu, 28 Jan 2021 10:06:21 -0600 Subject: [PATCH 303/892] Remove debug call that prints unhelpful tracebacks during component loading. (#2914) When dr is asked for a component, it will first ensure that relevant module is loaded. Since the component can be a module attribute or a class attribute, the loader tries to get it as each, and this can cause harmless ImportErrors. A debug statement was added to log them for visibility, but in practice they're causing confusion in the logs. Fixes #2912 Signed-off-by: Christopher Sams --- insights/core/dr.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/insights/core/dr.py b/insights/core/dr.py index ab2ffcff1..d1f4d2006 100644 --- a/insights/core/dr.py +++ b/insights/core/dr.py @@ -161,9 +161,7 @@ def _import_component(name): for f in (_get_from_module, _get_from_class): try: return f(name) - except Exception as e: - log.debug("Couldn't load %s" % name) - log.debug(e, exc_info=True) + except: pass From 101b3a1e54210ce289b8c6277052ec9c744a6f2d Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Thu, 28 Jan 2021 10:21:02 -0600 Subject: [PATCH 304/892] Remove debug log messages generated when commands fail during serialization. (#2915) Sometimes commands fail during archive creation. core logs the failures at debug level, but they're generally harmless and causing confusion when people inspect the collection log. Since we also capture the failures in the metadata for each component, we can remove the debug statement and not lose the info. Fixes #2913 Signed-off-by: Christopher Sams --- insights/core/serde.py | 1 - 1 file changed, 1 deletion(-) diff --git a/insights/core/serde.py b/insights/core/serde.py index 154b96f79..adff200fe 100644 --- a/insights/core/serde.py +++ b/insights/core/serde.py @@ -191,7 +191,6 @@ def dehydrate(self, comp, broker): doc["results"] = marshal(value, root=self.data, pool=self.pool) except Exception: errors.append(traceback.format_exc()) - log.debug(traceback.format_exc()) doc["results"] = None finally: doc["ser_time"] = time.time() - start From a0a07cff8d1bdadad5147763666f8fb4741f4a7d Mon Sep 17 00:00:00 2001 From: Stephen Date: Thu, 28 Jan 2021 14:56:32 -0500 Subject: [PATCH 305/892] fix: collect version_info in core3 archives (#2918) * fix: collect version_info in core3 archives Looks like we need this spec defined for core3 archives. Ran a test locally and this is the only reason I can spot that we wouldn't be populating this field in some cases. Signed-off-by: Stephen Adams * fix: add version_info to default.py Fix a bug where core and egg version info are missing from system_profile. Puptoo needs to use this spec to extract it Signed-off-by: Stephen Adams --- insights/specs/core3_archive.py | 1 + insights/specs/default.py | 1 + 2 files changed, 2 insertions(+) diff --git a/insights/specs/core3_archive.py b/insights/specs/core3_archive.py index 83a884e37..db5cd366f 100644 --- a/insights/specs/core3_archive.py +++ b/insights/specs/core3_archive.py @@ -15,3 +15,4 @@ class Core3Specs(Specs): branch_info = simple_file("/branch_info", kind=RawFileProvider) display_name = simple_file("display_name") + version_info = simple_file("version_info") diff --git a/insights/specs/default.py b/insights/specs/default.py index fb1f14705..56886d7e0 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -837,6 +837,7 @@ def is_mod_loaded_for_ss(broker): uptime = simple_command("/usr/bin/uptime") usr_journald_conf_d = glob_file(r"usr/lib/systemd/journald.conf.d/*.conf") # note that etc_journald.conf.d also exists vdo_status = simple_command("/usr/bin/vdo status") + version_info = simple_file("/version_info") vgdisplay = simple_command("/sbin/vgdisplay") vhost_net_zero_copy_tx = simple_file("/sys/module/vhost_net/parameters/experimental_zcopytx") vdsm_log = simple_file("var/log/vdsm/vdsm.log") From 5c8f057597bd397356b4ce37628b0bec15483ff1 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 28 Jan 2021 15:55:51 -0500 Subject: [PATCH 306/892] update uploader_json_map.json Signed-off-by: Jeremy Crafts --- insights/client/uploader_json_map.json | 28 +++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index 47ce901c5..fcea0dbbe 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -242,7 +242,12 @@ }, { "command": "/usr/bin/doveconf", - "pattern": [], + "pattern": [ + "ssl_min_protocol", + "ssl_protocols", + "{", + "}" + ], "symbolic_name": "doveconf" }, { @@ -553,6 +558,7 @@ "pattern": [ "/usr/bin", "python", + "sudo", "total" ], "symbolic_name": "ls_usr_bin" @@ -1068,7 +1074,6 @@ { "command": "/bin/ps alxwww", "pattern": [ - "/sbin/rngd", "/usr/bin/openshift start master", "/usr/bin/openshift start node", "COMMAND", @@ -1115,7 +1120,6 @@ { "command": "/bin/ps aux", "pattern": [ - "/sbin/rngd", "/usr/bin/docker", "/usr/bin/docker daemon", "/usr/bin/docker-current", @@ -1242,7 +1246,6 @@ { "command": "/bin/ps -ef", "pattern": [ - "/sbin/rngd", "/usr/bin/openshift start master", "/usr/bin/openshift start node", "CMD", @@ -1622,6 +1625,21 @@ ], "symbolic_name": "etc_udev_40_redhat_rules" }, + { + "file": "/run/udev/rules.d/40-redhat.rules", + "pattern": [], + "symbolic_name": "etc_udev_40_redhat_rules" + }, + { + "file": "/usr/lib/udev/rules.d/40-redhat.rules", + "pattern": [], + "symbolic_name": "etc_udev_40_redhat_rules" + }, + { + "file": "/usr/local/lib/udev/rules.d/40-redhat.rules", + "pattern": [], + "symbolic_name": "etc_udev_40_redhat_rules" + }, { "file": "/proc/1/cgroup", "pattern": [], @@ -4278,5 +4296,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-01-20T09:06:09.239668" + "version": "2021-01-26T11:44:37.067394" } \ No newline at end of file From 7abb5dcb1f7ac370c24f4893c4d8b290ee47fb00 Mon Sep 17 00:00:00 2001 From: Akshay Ghodake Date: Fri, 29 Jan 2021 13:59:57 +0530 Subject: [PATCH 307/892] Enhanced parser insights/parsers/ip.py (#2917) * Enhanced parser insights/parsers/ip.py Signed-off-by: Akshay Ghodake * Updated test file for parser ip.py Signed-off-by: Akshay Ghodake --- insights/parsers/ip.py | 3 +++ insights/parsers/tests/test_ip.py | 24 +++++++++++++++++++++--- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/insights/parsers/ip.py b/insights/parsers/ip.py index 5fb4f59fe..5d6274796 100644 --- a/insights/parsers/ip.py +++ b/insights/parsers/ip.py @@ -119,6 +119,9 @@ def parse_link(line, d): d["peer"] = split_content[3] elif len(split_content) >= 2: d["mac"] = split_content[1] + if "promiscuity" in split_content: + d["promiscuity"] = split_content[ + split_content.index('promiscuity') + 1] def parse_inet(line, d): diff --git a/insights/parsers/tests/test_ip.py b/insights/parsers/tests/test_ip.py index e7e27220b..f84beacb9 100644 --- a/insights/parsers/tests/test_ip.py +++ b/insights/parsers/tests/test_ip.py @@ -114,6 +114,14 @@ def test_ip_addr(): 0 8 0 0 0 0 TX: bytes packets errors dropped carrier collsns 0 12 0 0 0 0 +5: tm0: mtu 1500 qdisc noqueue master br0 state UP mode DEFAULT group default qlen 1000 + link/ether d4:f5:ef:01:1a:3c brd ff:ff:ff:ff:ff:ff promiscuity 1 + team + bridge_slave state forwarding priority 32 cost 100 hairpin off guard off root_block off fastleave off learning on flood on port_id 0x8001 port_no 0x1 designated_port 32769 designated_cost 0 designated_bridge 8000.d4:f5:ef:1:1a:3c designated_root 8000.d4:f5:ef:1:1a:3c hold_timer 0.00 message_age_timer 0.00 forward_delay_timer 0.00 topology_change_ack 0 config_pending 0 proxy_arp off proxy_arp_wifi off mcast_router 1 mcast_fast_leave off mcast_flood on neigh_suppress off group_fwd_mask 0x0 group_fwd_mask_str 0x0 vlan_tunnel off addrgenmode none numtxqueues 16 numrxqueues 16 gso_max_size 65536 gso_max_segs 65535 + RX: bytes packets errors dropped overrun mcast + 959425 11784 0 0 0 1 + TX: bytes packets errors dropped carrier collsns + 2980335 14289 0 0 0 0 """.strip() IP_S_LINK_ALL = """ @@ -326,12 +334,13 @@ def test_ip_data_Link(): if_list_all_2 = link_info_all_2.active if_list_all = link_info_all.active if_list = link_info.active - assert len(if_list) == 4 - assert keys_in(["lo", "enp0s3", "enp0s8", "enp0s9"], if_list) + assert len(if_list) == 5 + + assert keys_in(["lo", "enp0s3", "enp0s8", "enp0s9", "tm0"], if_list) assert keys_in(['ppp0', 'lo', 'tun0', 'enp0s25', 'vnet0', 'virbr0'], if_list_all) assert keys_in(['lo', 'eno1', 'eno2', 'ovs-system', 'br-ex', 'vxlan_sys_4789'], if_list_all_2) - assert sorted(link_info.active) == sorted(['lo', 'enp0s3', 'enp0s8', 'enp0s9']) + assert sorted(link_info.active) == sorted(['lo', 'enp0s3', 'enp0s8', 'enp0s9', 'tm0']) lo = link_info["lo"] assert lo["mac"] == "00:00:00:00:00:00" @@ -368,6 +377,15 @@ def test_ip_data_Link(): assert len(geneve_obj.data['geneve']) == 11 assert sorted(geneve_obj.data['geneve']) == sorted(['geneve', 'id', '10', 'remote', '192.168.43.254', 'dstport', '6081', 'noudpcsum', 'udp6zerocsumrx', 'addrgenmode', 'eui64']) + tm0 = link_info["tm0"] + # import pdb;pdb.set_trace() + assert tm0["mac"] == "d4:f5:ef:01:1a:3c" + assert tm0["flags"] == ['BROADCAST', 'MULTICAST', 'UP', 'LOWER_UP'] + assert tm0["type"] == "ether" + assert tm0["mtu"] == 1500 + assert tm0['promiscuity'] == '1' + assert tm0["rx_packets"] == 11784 + IP_ROUTE_SHOW_TABLE_ALL_TEST = """ throw 30.142.64.0/26 table red_mgmt From bb5315e174305af02db8519abf912b1200412ae9 Mon Sep 17 00:00:00 2001 From: Glutexo Date: Tue, 2 Feb 2021 19:03:24 +0100 Subject: [PATCH 308/892] Fix checkin URL with legacy upload (#2927) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Inventory URL is composed incorrectly if legacy upload is enabled. There is a workaround for this in place that disables legacy upload for actions that use the Inventory API. Added the check-in to the list of such actions. Removed show_status from the workaround action list, because the legacy upload config option does not affect it in any way. Signed-off-by: Štěpán Tomsa --- insights/client/config.py | 2 +- insights/tests/client/connection/test_init.py | 33 +++++++++++++++++-- insights/tests/client/test_client.py | 10 +++--- insights/tests/client/test_config.py | 14 ++++++-- 4 files changed, 47 insertions(+), 12 deletions(-) diff --git a/insights/client/config.py b/insights/client/config.py index 32a5f7f0a..4adf7d009 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -743,7 +743,7 @@ def _imply_options(self): self.diagnosis = True if self.test_connection: self.net_debug = True - if self.payload or self.diagnosis or self.compliance or self.show_results or self.check_results: + if self.payload or self.diagnosis or self.compliance or self.check_results or self.checkin: self.legacy_upload = False if self.payload and (self.logging_file == constants.default_log_file): self.logging_file = constants.default_payload_log diff --git a/insights/tests/client/connection/test_init.py b/insights/tests/client/connection/test_init.py index d88cf008f..9e195d4f7 100644 --- a/insights/tests/client/connection/test_init.py +++ b/insights/tests/client/connection/test_init.py @@ -1,14 +1,41 @@ +from insights.client.auto_config import try_auto_configuration +from insights.client.config import InsightsConfig +from insights.client.connection import InsightsConnection from mock.mock import Mock from mock.mock import patch -from insights.client.connection import InsightsConnection +from pytest import mark +from sys import argv @patch("insights.client.connection.InsightsConnection._init_session") @patch("insights.client.connection.InsightsConnection.get_proxies") -def test_inventory_url(get_proxies, init_session): +def test_inventory_url_from_base_url(get_proxies, init_session): """ - Inventory URL is composed correctly. + Inventory URL is composed correctly from the given base URL. """ config = Mock(base_url="www.example.com", insecure_connection=False) connection = InsightsConnection(config) assert connection.inventory_url == "https://www.example.com/inventory/v1" + + +@mark.parametrize(("config_kwargs",), (({"check_results": True},), ({"checkin": True},))) +@patch("insights.client.connection.InsightsConnection._init_session") +@patch("insights.client.connection.InsightsConnection.get_proxies") +@patch("insights.client.auto_config._try_satellite5_configuration") +@patch("insights.client.auto_config._try_satellite6_configuration") +@patch('insights.client.config.sys.argv', [argv[0]]) +def test_inventory_url_from_phase( + try_satellite6_configuration, + try_satellite5_configuration, + get_proxies, + init_session, + config_kwargs +): + """ + Inventory URL is composed correctly from the default configuration. + """ + config = InsightsConfig(**config_kwargs) + config.load_all() # Disables legacy upload. + try_auto_configuration(config) # Updates base_url if legacy upload is disabled. + connection = InsightsConnection(config) + assert connection.inventory_url == "https://cert-api.access.redhat.com/r/insights/platform/inventory/v1" diff --git a/insights/tests/client/test_client.py b/insights/tests/client/test_client.py index 5e7ae9897..415a635bc 100644 --- a/insights/tests/client/test_client.py +++ b/insights/tests/client/test_client.py @@ -622,16 +622,16 @@ def test_copy_to_output_file_obfuscate_on(shutil_, _copy_soscleaner_files): _copy_soscleaner_files.assert_called_once() -@mark.parametrize(("result",), ((True,), (None,))) -def test_checkin_result(result): +@mark.parametrize(("expected_result",), ((True,), (None,))) +def test_checkin_result(expected_result): config = InsightsConfig() client = InsightsClient(config) - client.connection = Mock(**{"checkin.return_value": result}) + client.connection = Mock(**{"checkin.return_value": expected_result}) client.session = True - result = client.checkin() + actual_result = client.checkin() client.connection.checkin.assert_called_once_with() - assert result is result + assert actual_result is expected_result def test_checkin_error(): diff --git a/insights/tests/client/test_config.py b/insights/tests/client/test_config.py index 3e42f5c54..b09915bdc 100644 --- a/insights/tests/client/test_config.py +++ b/insights/tests/client/test_config.py @@ -4,6 +4,7 @@ from io import TextIOWrapper, BytesIO from insights.client.config import InsightsConfig, DEFAULT_OPTS, _core_collect_default from mock.mock import patch +from pytest import mark @patch('insights.client.config.ConfigParser.open') @@ -127,12 +128,19 @@ def test_env_https_proxy_no_warning(): # empty argv so parse_args isn't polluted with pytest arguments +@mark.parametrize(("config",), ( + ({"payload": "./payload.tar.gz", "content_type": "application/gzip"},), + ({"diagnosis": True},), + ({"compliance": True},), + ({"check_results": True},), + ({"checkin": True},), +)) @patch('insights.client.config.sys.argv', [sys.argv[0]]) -def test_diagnosis_implies_legacy(): +def test_implied_non_legacy_upload(config): ''' - --diagnosis should always imply legacy_upload=False + Some arguments should always imply legacy_upload=False. ''' - c = InsightsConfig(diagnosis=True) + c = InsightsConfig(**config) c.load_all() assert c.legacy_upload is False From b04b8998de4542fc65fb535a2b94da61960f7f32 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Wed, 3 Feb 2021 04:24:07 +0800 Subject: [PATCH 309/892] Add parser for spec to get the count of non-yum type repos (#2906) * Add parser for spec to get the count of non-yum type repos Signed-off-by: Huanhuan Li * Remove uesless line in insights_archive.py Signed-off-by: Huanhuan Li * Add HostContext as dependency Signed-off-by: Huanhuan Li --- insights/collect.py | 4 ++ insights/parsers/satellite_mongodb.py | 40 +++++++++++++++++-- .../parsers/tests/test_satellite_mongodb.py | 31 +++++++++++++- insights/specs/__init__.py | 1 + insights/specs/default.py | 15 ++++++- 5 files changed, 85 insertions(+), 6 deletions(-) diff --git a/insights/collect.py b/insights/collect.py index 485f9edc4..f16d57384 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -157,6 +157,10 @@ - name: insights.combiners.satellite_version.SatelliteVersion enabled: true + # needed for the 'pre-check' of the 'is_satellite_capsule' spec + - name: insights.combiners.satellite_version.CapsuleVersion + enabled: true + # needed because some specs aren't given names before they're used in DefaultSpecs - name: insights.core.spec_factory enabled: true diff --git a/insights/parsers/satellite_mongodb.py b/insights/parsers/satellite_mongodb.py index 01aecfe55..db9e6ccd3 100644 --- a/insights/parsers/satellite_mongodb.py +++ b/insights/parsers/satellite_mongodb.py @@ -6,9 +6,8 @@ MongoDBStorageEngine - command ``mongo pulp_database --eval 'db.serverStatus().storageEngine'`` ----------------------------------------------------------------------------------------------- -The satellite mongodb storage engine parser reads the output of -``mongo pulp_database --eval 'db.serverStatus().storageEngine'`` and -save the storage engine attributes to a dict. +MongoDBNonYumTypeRepos - command ``mongo pulp_database --eval 'db.repo_importers.find({"importer_type_id": { $ne: "yum_importer"}}).count()'`` +---------------------------------------------------------------------------------------------------------------------------------------------- """ from insights import parser, CommandParser @@ -64,3 +63,38 @@ def parse_content(self, content): raise ParseException("Unable to parse the line: {0}".format(line)) if not self: raise SkipException("Cannot get storage engine from Satellite MongoDB") + + +@parser(Specs.satellite_non_yum_type_repos) +class MongoDBNonYumTypeRepos(CommandParser): + """ + Read the ``mongo pulp_database --eval 'db.repo_importers.find({"importer_type_id": { $ne: "yum_importer"}}).count()'`` command + and save the count to attribute named count. + + Sample Output:: + + MongoDB shell version v3.4.9 + connecting to: mongodb://127.0.0.1:27017/pulp_database + MongoDB server version: 3.4.9 + 0 + + Examples:: + + >>> type(satellite_non_yum_type_repos) + + >>> satellite_non_yum_type_repos.count + 0 + + Attributes:: + + count (int): The count of non-yum type repos in pulp_database + + Raises:: + + SkipException: When the output isn't in exptected format + """ + + def parse_content(self, content): + if len(content) != 4 or not content[3].isdigit(): + raise SkipException("Unexpected output for MongoDBNonYumTypeRepos") + self.count = int(content[3]) diff --git a/insights/parsers/tests/test_satellite_mongodb.py b/insights/parsers/tests/test_satellite_mongodb.py index 4e58dc92e..ebdb49430 100644 --- a/insights/parsers/tests/test_satellite_mongodb.py +++ b/insights/parsers/tests/test_satellite_mongodb.py @@ -42,11 +42,31 @@ } '''.strip() +MONGO_PULP_NON_YUM_TYPE_REPOS_OUTPUT1 = """ +0 +""" + +MONGO_PULP_NON_YUM_TYPE_REPOS_OUTPUT2 = """ +MongoDB shell version v3.4.9 +connecting to: mongodb://127.0.0.1:27017/pulp_database +MongoDB server version: 3.4.9 +0 +""" + +MONGO_PULP_NON_YUM_TYPE_REPOS_OUTPUT3 = """ +MongoDB shell version v3.4.9 +connecting to: mongodb://127.0.0.1:27017/pulp_database +MongoDB server version: 3.4.9 +ab +""" + def test_doc_examples(): - output = satellite_mongodb.MongoDBStorageEngine(context_wrap(MONGO_PULP_STORAGE_ENGINE_OUTPUT1)) + engine_output = satellite_mongodb.MongoDBStorageEngine(context_wrap(MONGO_PULP_STORAGE_ENGINE_OUTPUT1)) + repos_output = satellite_mongodb.MongoDBNonYumTypeRepos(context_wrap(MONGO_PULP_NON_YUM_TYPE_REPOS_OUTPUT2)) globs = { - 'satellite_storage_engine': output + 'satellite_storage_engine': engine_output, + 'satellite_non_yum_type_repos': repos_output } failed, tested = doctest.testmod(satellite_mongodb, globs=globs) assert failed == 0 @@ -66,3 +86,10 @@ def test_no_storage_engine(): satellite_mongodb.MongoDBStorageEngine(context_wrap(MONGO_PULP_STORAGE_ENGINE_OUTPUT3)) with pytest.raises(ParseException): satellite_mongodb.MongoDBStorageEngine(context_wrap(MONGO_PULP_STORAGE_ENGINE_OUTPUT4)) + + +def test_bad_yum_repos_output(): + with pytest.raises(SkipException): + satellite_mongodb.MongoDBNonYumTypeRepos(context_wrap(MONGO_PULP_NON_YUM_TYPE_REPOS_OUTPUT1)) + with pytest.raises(SkipException): + satellite_mongodb.MongoDBNonYumTypeRepos(context_wrap(MONGO_PULP_NON_YUM_TYPE_REPOS_OUTPUT3)) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 7dcb78e02..38f849ad1 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -560,6 +560,7 @@ class Specs(SpecSet): sat5_insights_properties = RegistryPoint() satellite_content_hosts_count = RegistryPoint() satellite_mongodb_storage_engine = RegistryPoint() + satellite_non_yum_type_repos = RegistryPoint() satellite_version_rb = RegistryPoint() satellite_custom_hiera = RegistryPoint() scheduler = RegistryPoint(multi_output=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index 56886d7e0..137b28c8f 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -33,7 +33,7 @@ from insights.components.rhel_version import IsRhel8, IsRhel7 from insights.parsers.mdstat import Mdstat from insights.parsers.lsmod import LsMod -from insights.combiners.satellite_version import SatelliteVersion +from insights.combiners.satellite_version import SatelliteVersion, CapsuleVersion from insights.specs import Specs @@ -743,11 +743,24 @@ def is_satellite_server(broker): return True raise SkipComponent + @datasource(CapsuleVersion, HostContext) + def is_satellite_capsule(broker): + """ + bool: Returns True if the host is satellite capsule. + """ + if broker[CapsuleVersion]: + return True + raise SkipComponent + satellite_content_hosts_count = simple_command( "/usr/bin/sudo -iu postgres /usr/bin/psql -d foreman -c 'select count(*) from hosts'", deps=[is_satellite_server] ) satellite_mongodb_storage_engine = simple_command("/usr/bin/mongo pulp_database --eval 'db.serverStatus().storageEngine'") + satellite_non_yum_type_repos = simple_command( + "/usr/bin/mongo pulp_database --eval 'db.repo_importers.find({\"importer_type_id\": { $ne: \"yum_importer\"}}).count()'", + deps=[[is_satellite_server, is_satellite_capsule]] + ) satellite_version_rb = simple_file("/usr/share/foreman/lib/satellite/version.rb") satellite_custom_hiera = simple_file("/etc/foreman-installer/custom-hiera.yaml") scsi = simple_file("/proc/scsi/scsi") From fc927a926eef9f71576590f7559e5e3f38f8c8ff Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Wed, 3 Feb 2021 04:49:51 +0800 Subject: [PATCH 310/892] New parser for LD_LIBRARY_PATH of all PIDs (#2902) * New parser for LD_LIBRARY_PATH of all users Signed-off-by: Xiangce Liu * Change to read the /proc/PID/environ Signed-off-by: Xiangce Liu * Remove the multi_output=True Signed-off-by: Xiangce Liu * fix doc test in python2 Signed-off-by: Xiangce Liu --- .../ld_library_path.rst | 3 + insights/parsers/ld_library_path.py | 65 +++++++++++++++++++ .../parsers/tests/test_ld_library_path.py | 57 ++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 16 ++++- 5 files changed, 141 insertions(+), 1 deletion(-) create mode 100644 docs/shared_parsers_catalog/ld_library_path.rst create mode 100644 insights/parsers/ld_library_path.py create mode 100644 insights/parsers/tests/test_ld_library_path.py diff --git a/docs/shared_parsers_catalog/ld_library_path.rst b/docs/shared_parsers_catalog/ld_library_path.rst new file mode 100644 index 000000000..738401ec9 --- /dev/null +++ b/docs/shared_parsers_catalog/ld_library_path.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ld_library_path + :members: + :show-inheritance: diff --git a/insights/parsers/ld_library_path.py b/insights/parsers/ld_library_path.py new file mode 100644 index 000000000..99dc057f1 --- /dev/null +++ b/insights/parsers/ld_library_path.py @@ -0,0 +1,65 @@ +""" +LdLibraryPath - LD_LIBRARY_PATH of PIDs +======================================= + +Parser for parsing the environment variable LD_LIBRARY_PATH of each PID. + +""" + +from collections import namedtuple +from insights import parser, Parser +from insights.parsers import SkipException, ParseException +from insights.specs import Specs + +LdLibraryPath = namedtuple('LdLibraryPath', ('pid', 'path', 'raw')) +"""namedtuple: Type for storing the LdLibraryPath of PID""" + + +@parser(Specs.ld_library_path_of_pid) +class PidLdLibraryPath(Parser, list): + """ + Base class for parsing the ``LD_LIBRARY_PATH`` variable of each PID of the + system into a list. + + Typical content looks like:: + + 105901 /usr/sap/RH1/SYS/exe/run:/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/sapdb/clients/RH1/lib + 105902 /usr/sap/RH1/SYS/exe/uc/linuxx86_64:/usr/sap/RH1/SYS/exe/run + + Examples: + >>> len(ld_lib_path) + 2 + >>> isinstance(ld_lib_path[0].path, list) + True + >>> len(ld_lib_path[0].path) + 3 + >>> '/sapdb/clients/RH1/lib' in ld_lib_path[0].path + True + >>> ld_lib_path[0].pid + '105901' + + Raises: + SkipException: When the output is empty or nothing needs to parse. + ParseException: When the line cannot be parsed. + + """ + + def parse_content(self, content): + if not content: + raise SkipException + + llds = [] + for line in content: + pid, _, raw = [s.strip() for s in line.partition(' ')] + paths = raw + if not pid.isdigit(): + raise ParseException('Incorrect line: {0}'.format(line)) + if raw and raw[0] == raw[-1] and raw[0] in ('\'', '"'): + paths = raw[1:-1] + paths = paths.split(':') + llds.append(LdLibraryPath(pid, paths, raw)) + + if not llds: + raise SkipException("LD_LIBRARY_PATH not set.") + + self.extend(llds) diff --git a/insights/parsers/tests/test_ld_library_path.py b/insights/parsers/tests/test_ld_library_path.py new file mode 100644 index 000000000..30c635c69 --- /dev/null +++ b/insights/parsers/tests/test_ld_library_path.py @@ -0,0 +1,57 @@ +from insights.parsers.ld_library_path import PidLdLibraryPath +from insights.tests import context_wrap +from insights.parsers import ld_library_path, SkipException, ParseException +import doctest +import pytest + +LD_LIBRARY_PATH_EMPTY = """ +""".strip() + +LD_LIBRARY_PATH_INVALID = """ +LD_LIBRARY_PATH: Undefined variable. +""".strip() + +LD_LIBRARY_PATH_DOC = """ +105901 /usr/sap/RH1/SYS/exe/run:/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/sapdb/clients/RH1/lib +105902 /usr/sap/RH1/SYS/exe/uc/linuxx86_64:/usr/sap/RH1/SYS/exe/run +""".strip() + +LD_LIBRARY_PATH = """ +105901 /usr/sap/RH1/SYS/exe/run:/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/sapdb/clients/RH1/lib +105902 "/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/usr/sap/RH1/SYS/exe/run" +105903 +105904 '' +""".strip() # noqa: W391 + + +def test_ld_library_path(): + ret = PidLdLibraryPath(context_wrap(LD_LIBRARY_PATH)) + assert len(ret) == 4 + assert ret[0].pid == '105901' + assert ret[1].pid == '105902' + assert ret[2].pid == '105903' + assert ret[1].raw == LD_LIBRARY_PATH.splitlines()[1].split()[-1] + assert ret[2].raw == '' + assert ret[3].raw == "''" + assert ret[2].path == [''] + assert ret[3].path == [''] + for p in LD_LIBRARY_PATH.splitlines()[0].split()[-1].split(':'): + assert p in ret[0].path + for p in LD_LIBRARY_PATH.splitlines()[1].split()[-1].strip('"').split(':'): + assert p in ret[1].path + + +def test_empty_and_invalid(): + with pytest.raises(SkipException): + PidLdLibraryPath(context_wrap(LD_LIBRARY_PATH_EMPTY)) + + with pytest.raises(ParseException): + PidLdLibraryPath(context_wrap(LD_LIBRARY_PATH_INVALID)) + + +def test_doc_examples(): + env = { + 'ld_lib_path': PidLdLibraryPath(context_wrap(LD_LIBRARY_PATH_DOC)), + } + failed, total = doctest.testmod(ld_library_path, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 38f849ad1..f7eeafc30 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -284,6 +284,7 @@ class Specs(SpecSet): ksmstate = RegistryPoint() kubepods_cpu_quota = RegistryPoint(multi_output=True) lastupload = RegistryPoint(multi_output=True) + ld_library_path_of_pid = RegistryPoint() libssh_client_config = RegistryPoint(filterable=True) libssh_server_config = RegistryPoint(filterable=True) libvirtd_log = RegistryPoint(filterable=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index 137b28c8f..badc5ac8f 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -13,7 +13,7 @@ import json from grp import getgrgid -from os import stat +from os import stat, listdir as os_listdir from pwd import getpwuid import yaml @@ -479,6 +479,20 @@ def httpd_cmd(broker): kubepods_cpu_quota = glob_file("/sys/fs/cgroup/cpu/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod[a-f0-9_]*.slice/cpu.cfs_quota_us") last_upload_globs = ["/etc/redhat-access-insights/.lastupload", "/etc/insights-client/.lastupload"] lastupload = glob_file(last_upload_globs) + + @datasource() + def ld_library_path_of_pid(broker): + pids = [p for p in sorted(os_listdir('/proc/')) if p.isdigit()] + llds = [] + for p in pids: + with open('/proc/{0}/environ'.format(p), 'r') as fp: + vars = fp.read() + lld = [v.split('=', 1)[-1] for v in vars.split('\x00') if v.startswith('LD_LIBRARY_PATH=')] + llds.append("{0} {1}".format(p, lld[0])) if lld else None + if llds: + return DatasourceProvider('\n'.join(llds), relative_path='insights_commands/cat_all_PID_LD_LIBRARY_PATH') + raise SkipComponent + libssh_client_config = simple_file("/etc/libssh/libssh_client.config") libssh_server_config = simple_file("/etc/libssh/libssh_server.config") libvirtd_log = simple_file("/var/log/libvirt/libvirtd.log") From 3c03990043633311caff495aec178ee1eb0a646d Mon Sep 17 00:00:00 2001 From: Rohan Arora Date: Wed, 3 Feb 2021 03:00:00 +0530 Subject: [PATCH 311/892] Remove EOL Satellite 5 Specs (#2869) Signed-off-by: Rohan Arora --- insights/specs/default.py | 7 ------- insights/specs/insights_archive.py | 1 - insights/specs/sos_archive.py | 7 +++++++ .../tests/client/collection_rules/test_map_components.py | 5 +++++ 4 files changed, 12 insertions(+), 8 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index badc5ac8f..b7ac78178 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -292,8 +292,6 @@ def cloud_cfg(broker): cloud_init_log = simple_file("/var/log/cloud-init.log") cluster_conf = simple_file("/etc/cluster/cluster.conf") cmdline = simple_file("/proc/cmdline") - cobbler_settings = first_file(["/etc/cobbler/settings", "/conf/cobbler/settings"]) - cobbler_modules_conf = first_file(["/etc/cobbler/modules.conf", "/conf/cobbler/modules.conf"]) corosync = simple_file("/etc/sysconfig/corosync") @datasource(HostContext, [IsRhel7, IsRhel8]) @@ -724,11 +722,6 @@ def pcp_enabled(broker): redhat_release = simple_file("/etc/redhat-release") resolv_conf = simple_file("/etc/resolv.conf") rhn_conf = first_file(["/etc/rhn/rhn.conf", "/conf/rhn/rhn/rhn.conf"]) - rhn_entitlement_cert_xml = first_of([glob_file("/etc/sysconfig/rhn/rhn-entitlement-cert.xml*"), - glob_file("/conf/rhn/sysconfig/rhn/rhn-entitlement-cert.xml*")]) - rhn_schema_version = simple_command("/usr/bin/rhn-schema-version") - rhn_taskomatic_daemon_log = first_file(["/var/log/rhn/rhn_taskomatic_daemon.log", - "rhn-logs/rhn/rhn_taskomatic_daemon.log"]) rhsm_conf = simple_file("/etc/rhsm/rhsm.conf") rhsm_log = simple_file("/var/log/rhsm/rhsm.log") rhsm_releasever = simple_file('/var/lib/rhsm/cache/releasever.json') diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index a7ef1809a..93cb7ada0 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -197,7 +197,6 @@ class InsightsArchiveSpecs(Specs): readlink_e_etc_mtab = simple_file("insights_commands/readlink_-e_.etc.mtab") readlink_e_shift_cert_client = simple_file("insights_commands/readlink_-e_.etc.origin.node.certificates.kubelet-client-current.pem") readlink_e_shift_cert_server = simple_file("insights_commands/readlink_-e_.etc.origin.node.certificates.kubelet-server-current.pem") - rhn_schema_version = simple_file("insights_commands/rhn-schema-version") rhev_data_center = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_rhev_data_center") rndc_status = simple_file("insights_commands/rndc_status") rpm_V_packages = first_file(["insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo_chrony", "insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo"]) diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index 8dbaf4fdd..a186773dc 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -39,6 +39,8 @@ class SosSpecs(Specs): ] ) cni_podman_bridge_conf = simple_file("/etc/cni/net.d/87-podman-bridge.conflist") + cobbler_settings = first_file(["/etc/cobbler/settings", "/conf/cobbler/settings"]) + cobbler_modules_conf = first_file(["/etc/cobbler/modules.conf", "/conf/cobbler/modules.conf"]) corosync_cmapctl = glob_file("sos_commands/corosync/corosync-cmapctl*") cpe = simple_file("/etc/system-release-cpe") cpu_smt_control = simple_file("sys/devices/system/cpu/smt/control") @@ -205,16 +207,21 @@ class SosSpecs(Specs): rabbitmq_report_of_containers = glob_file("sos_commands/rabbitmq/docker_exec_-t_rabbitmq-bundle-docker-*_rabbitmqctl_report") rabbitmq_startup_err = simple_file("/var/log/rabbitmq/startup_err") rhn_charsets = first_file(["sos_commands/satellite/rhn-charsets", "sos_commands/rhn/rhn-charsets"]) + rhn_entitlement_cert_xml = first_of([glob_file("/etc/sysconfig/rhn/rhn-entitlement-cert.xml*"), + glob_file("/conf/rhn/sysconfig/rhn/rhn-entitlement-cert.xml*")]) rhn_hibernate_conf = first_file(["/usr/share/rhn/config-defaults/rhn_hibernate.conf", "/config-defaults/rhn_hibernate.conf"]) rhn_search_daemon_log = first_file([ "/var/log/rhn/search/rhn_search_daemon.log", "/rhn-logs/rhn/search/rhn_search_daemon.log" ]) + rhn_schema_version = simple_file("sos_commands/satellite/rhn-schema-version") rhn_server_satellite_log = simple_file("var/log/rhn/rhn_server_satellite.log") rhn_server_xmlrpc_log = first_file([ "/var/log/rhn/rhn_server_xmlrpc.log", "/rhn-logs/rhn/rhn_server_xmlrpc.log" ]) + rhn_taskomatic_daemon_log = first_file(["/var/log/rhn/rhn_taskomatic_daemon.log", + "rhn-logs/rhn/rhn_taskomatic_daemon.log"]) rhosp_release = simple_file("/etc/rhosp-release") root_crontab = first_file(["sos_commands/crontab/root_crontab", "sos_commands/cron/root_crontab"]) route = simple_file("sos_commands/networking/route_-n") diff --git a/insights/tests/client/collection_rules/test_map_components.py b/insights/tests/client/collection_rules/test_map_components.py index e5131e01d..fc7966cc4 100644 --- a/insights/tests/client/collection_rules/test_map_components.py +++ b/insights/tests/client/collection_rules/test_map_components.py @@ -66,6 +66,8 @@ def test_get_component_by_symbolic_name(): 'yum_list_installed', 'zdump_v', 'cni_podman_bridge_conf', + 'cobbler_modules_conf', + 'cobbler_settings', 'cpu_smt_control', 'cpu_vulns_meltdown', 'cpu_vulns_spectre_v1', @@ -77,8 +79,11 @@ def test_get_component_by_symbolic_name(): 'ironic_conf', 'octavia_conf', 'partitions', + 'rhn_entitlement_cert_xml', 'rhn_hibernate_conf', + 'rhn_schema_version', 'rhn_search_daemon_log', + 'rhn_taskomatic_daemon_log', 'rhosp_release', 'secure', 'foreman_tasks_config', From 8d830497b948a007306bd4420f8e053e781d03b0 Mon Sep 17 00:00:00 2001 From: Akshay Gaikwad Date: Wed, 3 Feb 2021 03:12:44 +0530 Subject: [PATCH 312/892] Add virsh_list_all spec path in sosreport (#2924) Signed-off-by: Akshay Gaikwad --- insights/specs/sos_archive.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index a186773dc..5a06b6c95 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -273,6 +273,7 @@ class SosSpecs(Specs): vdsm_import_log = glob_file("var/log/vdsm/import/import-*.log") vgdisplay = first_file(["sos_commands/lvm2/vgdisplay_-vv_--config_global_locking_type_0", "sos_commands/lvm2/vgdisplay_-vv"]) vgs = first_file(["sos_commands/lvm2/vgs_-v_-o_vg_mda_count_vg_mda_free_vg_mda_size_vg_mda_used_count_vg_tags_--config_global_locking_type_0", "sos_commands/lvm2/vgs_-v", "sos_commands/devicemapper/vgs_-v"]) + virsh_list_all = simple_file("sos_commands/virsh/virsh_-r_list_--all") vmcore_dmesg = glob_file("/var/crash/*/vmcore-dmesg.txt") vmware_tools_conf = simple_file("etc/vmware-tools/tools.conf") xfs_info = glob_file("sos_commands/xfs/xfs_info*") From ba20e91e1021bf89e7bbafd2335f34916a0e20e4 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Wed, 3 Feb 2021 05:55:13 +0800 Subject: [PATCH 313/892] Enhance the IsRhel# components and add the 'minor' attr (#2925) Signed-off-by: Xiangce Liu --- insights/components/rhel_version.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/insights/components/rhel_version.py b/insights/components/rhel_version.py index 2ae326487..5b2d2c9e6 100644 --- a/insights/components/rhel_version.py +++ b/insights/components/rhel_version.py @@ -24,12 +24,16 @@ class IsRhel6(object): to determine RHEL version. It checks if RHEL6, if not RHEL6 it raises ``SkipComponent``. + Attributes: + minor (int): The minor version of RHEL 6. + Raises: SkipComponent: When RHEL version is not RHEL6. """ def __init__(self, rhel): if rhel.major != 6: raise SkipComponent('Not RHEL6') + self.minor = rhel.minor @component(RedHatRelease) @@ -39,12 +43,16 @@ class IsRhel7(object): to determine RHEL version. It checks if RHEL7, if not \ RHEL7 it raises ``SkipComponent``. + Attributes: + minor (int): The minor version of RHEL 7. + Raises: SkipComponent: When RHEL version is not RHEL7. """ def __init__(self, rhel): if rhel.major != 7: raise SkipComponent('Not RHEL7') + self.minor = rhel.minor @component(RedHatRelease) @@ -54,9 +62,13 @@ class IsRhel8(object): to determine RHEL version. It checks if RHEL8, if not RHEL8 it raises ``SkipComponent``. + Attributes: + minor (int): The minor version of RHEL 8. + Raises: SkipComponent: When RHEL version is not RHEL8. """ def __init__(self, rhel): if rhel.major != 8: raise SkipComponent('Not RHEL8') + self.minor = rhel.minor From f9446cfe13ce7806778a0f3fefd8e8597c47efcb Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 4 Feb 2021 23:29:55 +0800 Subject: [PATCH 314/892] Add spec to get the block size of gfs2 file system (#2910) * Add spec to get the block size of gfs2 file system Signed-off-by: Huanhuan Li * Add HostContext as depnedency Signed-off-by: Huanhuan Li * Add more tests and change to IsRhel* Signed-off-by: Huanhuan Li --- .../gfs2_file_system_block_size.rst | 3 ++ insights/collect.py | 16 ++++++ .../parsers/gfs2_file_system_block_size.py | 42 ++++++++++++++++ .../tests/test_gfs2_file_system_block_size.py | 49 +++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 26 +++++++++- 6 files changed, 136 insertions(+), 1 deletion(-) create mode 100644 docs/shared_parsers_catalog/gfs2_file_system_block_size.rst create mode 100644 insights/parsers/gfs2_file_system_block_size.py create mode 100644 insights/parsers/tests/test_gfs2_file_system_block_size.py diff --git a/docs/shared_parsers_catalog/gfs2_file_system_block_size.rst b/docs/shared_parsers_catalog/gfs2_file_system_block_size.rst new file mode 100644 index 000000000..32b2bff3d --- /dev/null +++ b/docs/shared_parsers_catalog/gfs2_file_system_block_size.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.gfs2_file_system_block_size + :members: + :show-inheritance: diff --git a/insights/collect.py b/insights/collect.py index f16d57384..e69d69e50 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -161,6 +161,22 @@ - name: insights.combiners.satellite_version.CapsuleVersion enabled: true + # needed for the 'pre-check' of the 'gfs2_mount_points' spec + - name: insights.parsers.mount.Mount + enabled: true + - name: insights.combiners.redhat_release.RedHatRelease + enabled: true + - name: insights.parsers.uname.Uname + enabled: true + - name: insights.parsers.redhat_release.RedhatRelease + enabled: true + - name: insights.components.rhel_version.IsRhel6 + enabled: true + - name: insights.components.rhel_version.IsRhel7 + enabled: true + - name: insights.components.rhel_version.IsRhel8 + enabled: true + # needed because some specs aren't given names before they're used in DefaultSpecs - name: insights.core.spec_factory enabled: true diff --git a/insights/parsers/gfs2_file_system_block_size.py b/insights/parsers/gfs2_file_system_block_size.py new file mode 100644 index 000000000..20c6b80e4 --- /dev/null +++ b/insights/parsers/gfs2_file_system_block_size.py @@ -0,0 +1,42 @@ +""" +GFS2FileSystemBlockSize - command ``stat -fc %s `` +==================================================================== + +The parser parse the output of ``stat -fc %s `` +""" + +from insights import parser, CommandParser +from insights.specs import Specs +from insights.parsers import SkipException + + +@parser(Specs.gfs2_file_system_block_size) +class GFS2FileSystemBlockSize(CommandParser): + """ + Class for parsing ``stat -fc %s `` command output. + The size is kept in the ``block_size`` property. + Typical output of command ``stat -fc %s `` looks like:: + + 4096 + + Examples:: + + >>> type(gfs2_mp) + + >>> gfs2_mp.block_size + 4096 + + Raise:: + + SkipException: When the content isn't in the expected format. + + Attributes:: + + block_size (int): The block size of the gfs2 file system. + """ + + def parse_content(self, content): + if len(content) == 1 and content[0].isdigit(): + self.block_size = int(content[0]) + else: + raise SkipException('The output is invalid.') diff --git a/insights/parsers/tests/test_gfs2_file_system_block_size.py b/insights/parsers/tests/test_gfs2_file_system_block_size.py new file mode 100644 index 000000000..19e150e94 --- /dev/null +++ b/insights/parsers/tests/test_gfs2_file_system_block_size.py @@ -0,0 +1,49 @@ +from insights.parsers import gfs2_file_system_block_size +from insights.tests import context_wrap +from insights.parsers import SkipException +import pytest +import doctest + +BLOCK_SIZE_OUTPUT = """ +4096 +""".strip() + +BLOCK_SIZE_OUTPUT_2 = """ +abc +""".strip() + +BLOCK_SIZE_OUTPUT_3 = """ +512 +""".strip() + +BLOCK_SIZE_OUTPUT_4 = """ +4096 +512 +""".strip() + +BLOCK_SIZE_OUTPUT_5 = """ +stat: missing operand +Try 'stat --help' for more information. +""".strip() + + +def test_exp(): + with pytest.raises(SkipException): + gfs2_file_system_block_size.GFS2FileSystemBlockSize(context_wrap(BLOCK_SIZE_OUTPUT_2)) + with pytest.raises(SkipException): + gfs2_file_system_block_size.GFS2FileSystemBlockSize(context_wrap(BLOCK_SIZE_OUTPUT_4)) + with pytest.raises(SkipException): + gfs2_file_system_block_size.GFS2FileSystemBlockSize(context_wrap(BLOCK_SIZE_OUTPUT_5)) + + +def test_doc(): + env = { + 'gfs2_mp': gfs2_file_system_block_size.GFS2FileSystemBlockSize(context_wrap(BLOCK_SIZE_OUTPUT)) + } + failed, total = doctest.testmod(gfs2_file_system_block_size, globs=env) + assert failed == 0 + + +def test_other_size(): + gfs2 = gfs2_file_system_block_size.GFS2FileSystemBlockSize(context_wrap(BLOCK_SIZE_OUTPUT_3)) + assert gfs2.block_size == 512 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index f7eeafc30..64b48a121 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -188,6 +188,7 @@ class Specs(SpecSet): getconf_page_size = RegistryPoint() getenforce = RegistryPoint() getsebool = RegistryPoint() + gfs2_file_system_block_size = RegistryPoint(multi_output=True) glance_api_conf = RegistryPoint() glance_api_log = RegistryPoint(filterable=True) glance_cache_conf = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index b7ac78178..dc6ffd0f1 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -30,10 +30,11 @@ from insights.combiners.services import Services from insights.combiners.sap import Sap from insights.combiners.ps import Ps -from insights.components.rhel_version import IsRhel8, IsRhel7 +from insights.components.rhel_version import IsRhel8, IsRhel7, IsRhel6 from insights.parsers.mdstat import Mdstat from insights.parsers.lsmod import LsMod from insights.combiners.satellite_version import SatelliteVersion, CapsuleVersion +from insights.parsers.mount import Mount from insights.specs import Specs @@ -382,6 +383,29 @@ def du_dirs_list(broker): getconf_page_size = simple_command("/usr/bin/getconf PAGE_SIZE") getenforce = simple_command("/usr/sbin/getenforce") getsebool = simple_command("/usr/sbin/getsebool -a") + + @datasource(Mount, [IsRhel6, IsRhel7, IsRhel8], HostContext) + def gfs2_mount_points(broker): + """ + Function to search the output of ``mount`` to find all the gfs2 file + systems. + And only run the ``stat`` command on RHEL version that's less than + 8.3. With 8.3 and later, the command ``blkid`` will also output the + block size info. + + Returns: + list: a list of mount points of which the file system type is gfs2 + """ + gfs2_mount_points = [] + if (broker.get(IsRhel6) or broker.get(IsRhel7) or + (broker.get(IsRhel8) and broker[IsRhel8].minor < 3)): + for mnt in broker[Mount]: + if mnt.mount_type == "gfs2": + gfs2_mount_points.append(mnt.mount_point) + if gfs2_mount_points: + return gfs2_mount_points + raise SkipComponent + gfs2_file_system_block_size = foreach_execute(gfs2_mount_points, "/usr/bin/stat -fc %%s %s") gluster_v_info = simple_command("/usr/sbin/gluster volume info") gnocchi_conf = first_file(["/var/lib/config-data/puppet-generated/gnocchi/etc/gnocchi/gnocchi.conf", "/etc/gnocchi/gnocchi.conf"]) gnocchi_metricd_log = first_file(["/var/log/containers/gnocchi/gnocchi-metricd.log", "/var/log/gnocchi/metricd.log"]) From b23a8fa71fe05b713a298b62d5d3f9705ccd40fc Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 4 Feb 2021 23:38:38 +0800 Subject: [PATCH 315/892] Remove the sap_hdb_version from insights_archive.py (#2922) Signed-off-by: Xiangce Liu --- insights/specs/insights_archive.py | 1 - 1 file changed, 1 deletion(-) diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 93cb7ada0..b0c58a535 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -200,7 +200,6 @@ class InsightsArchiveSpecs(Specs): rhev_data_center = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_rhev_data_center") rndc_status = simple_file("insights_commands/rndc_status") rpm_V_packages = first_file(["insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo_chrony", "insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo"]) - sap_hdb_version = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_sap_hdb_version") saphostctl_getcimobject_sapinstance = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostctrl_-function_GetCIMObject_-enuminstances_SAPInstance") satellite_content_hosts_count = first_file([ "insights_commands/sudo_-iu_postgres_.usr.bin.psql_-d_foreman_-c_select_count_from_hosts", From c7ad2ca80baab4fb6ba450129d130cd8b4f3e473 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 4 Feb 2021 23:46:45 +0800 Subject: [PATCH 316/892] Change to use all_instance for HDB Version (#2919) * Change to use all_instance for HDB Version Signed-off-by: Xiangce Liu * Add docstring for sap_sid Signed-off-by: Xiangce Liu * Remove duplicated SID Signed-off-by: Xiangce Liu --- insights/specs/default.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index dc6ffd0f1..abd92acdc 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -756,8 +756,11 @@ def pcp_enabled(broker): @datasource(Sap, HostContext) def sap_sid(broker): + """ + list: List of the SID of all SAP Instances. + """ sap = broker[Sap] - return [sap.sid(i).lower() for i in sap.local_instances] + return list(set(sap.sid(i).lower() for i in sap.all_instances)) sap_hdb_version = foreach_execute(sap_sid, "/usr/bin/sudo -iu %sadm HDB version", keep_rc=True) saphostctl_getcimobject_sapinstance = simple_command("/usr/sap/hostctrl/exe/saphostctrl -function GetCIMObject -enuminstances SAPInstance") From 20cba65acd36959f37c16efe3d012072d3e226f4 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Fri, 5 Feb 2021 00:02:07 +0800 Subject: [PATCH 317/892] New parser of version_info (#2930) Signed-off-by: Xiangce Liu --- docs/shared_parsers_catalog/version_info.rst | 3 ++ insights/parsers/tests/test_version_info.py | 30 +++++++++++ insights/parsers/version_info.py | 56 ++++++++++++++++++++ 3 files changed, 89 insertions(+) create mode 100644 docs/shared_parsers_catalog/version_info.rst create mode 100644 insights/parsers/tests/test_version_info.py create mode 100644 insights/parsers/version_info.py diff --git a/docs/shared_parsers_catalog/version_info.rst b/docs/shared_parsers_catalog/version_info.rst new file mode 100644 index 000000000..2410a98eb --- /dev/null +++ b/docs/shared_parsers_catalog/version_info.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.version_info + :members: + :show-inheritance: diff --git a/insights/parsers/tests/test_version_info.py b/insights/parsers/tests/test_version_info.py new file mode 100644 index 000000000..8c0234d48 --- /dev/null +++ b/insights/parsers/tests/test_version_info.py @@ -0,0 +1,30 @@ +import doctest +from insights.parsers import version_info +from insights.tests import context_wrap + + +VER_INFO_1 = """ +{"core_version": "3.0.8-dev", "client_version": "3.1.1"} +""".strip() + +VER_INFO_2 = """ +{"core_version": "3.0.203-1", "client_version": "3.1.1"} +""".strip() + + +def test_version_info(): + ret = version_info.VersionInfo(context_wrap(VER_INFO_1)) + assert ret.core_version == '3.0.8-dev' + assert ret.client_version == '3.1.1' + + ret = version_info.VersionInfo(context_wrap(VER_INFO_2)) + assert ret.core_version == '3.0.203-1' + assert ret.client_version == '3.1.1' + + +def test_doc_examples(): + env = { + 'ver': version_info.VersionInfo(context_wrap(VER_INFO_2)), + } + failed, total = doctest.testmod(version_info, globs=env) + assert failed == 0 diff --git a/insights/parsers/version_info.py b/insights/parsers/version_info.py new file mode 100644 index 000000000..36eec114f --- /dev/null +++ b/insights/parsers/version_info.py @@ -0,0 +1,56 @@ +""" +VersionInfo - file ``version_info`` +=================================== + +The version of the insights core and insights client that the archive used. + +""" +from insights import JSONParser, parser +from insights.specs import Specs + + +@parser(Specs.version_info) +class VersionInfo(JSONParser): + """ + This parser parses the ``version_info`` file generated by the + ``insights-client`` command. + + Typical content of this file is:: + + {"core_version": "3.0.203-1", "client_version": "3.1.1"} + + .. note:: + + The :attr:`client_version` provided by this Parser is a short version + only, to get the full version of the ``insights-client`` package, + please use the :class:`insights.parsers.installed_rpms.InstalledRpms` + Parser instead. + + Examples: + >>> ver.core_version == '3.0.203-1' + True + >>> ver.client_version == '3.1.1' + True + """ + @property + def core_version(self): + """ + Returns: + (str): The version of the insights core. + """ + return self.data['core_version'] + + @property + def client_version(self): + """ + Returns: + (str): The version of the insights client. + + .. note:: + + This attribute returns a short version of the insights client only, + to get the full version of the ``insights-client`` package, please + use the :class:`insights.parsers.installed_rpms.InstalledRpms` Parser + instead. + """ + return self.data['client_version'] From 8a03797b084f744abd8b6ec683198a752cb3bf3d Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Fri, 5 Feb 2021 01:24:42 +0800 Subject: [PATCH 318/892] Check file existence state before opening it in cloud_cfg datasource (#2921) * Change to catch IOError instead of OSError in cloud_cfg datasource Signed-off-by: Xiangce Liu * Check file existence before open it in cloud_cfg datasource Signed-off-by: Xiangce Liu * Refactor datasource and fix merge conflict * Had to fix some merge conflicts * Added simple_file as a dependency to actually ready the cloud.cfg file so that datasource would only execute if file exists * Want to catch all exceptions in the datasource Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> Co-authored-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/specs/default.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index abd92acdc..1aba9ad24 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -226,8 +226,9 @@ def is_ceph_monitor(broker): cinder_api_log = first_file(["/var/log/containers/cinder/cinder-api.log", "/var/log/cinder/cinder-api.log"]) cinder_conf = first_file(["/var/lib/config-data/puppet-generated/cinder/etc/cinder/cinder.conf", "/etc/cinder/cinder.conf"]) cinder_volume_log = first_file(["/var/log/containers/cinder/volume.log", "/var/log/containers/cinder/cinder-volume.log", "/var/log/cinder/volume.log"]) + cloud_cfg_input = simple_file("/etc/cloud/cloud.cfg") - @datasource(HostContext) + @datasource(cloud_cfg_input, HostContext) def cloud_cfg(broker): """This datasource provides the network configuration collected from ``/etc/cloud/cloud.cfg``. @@ -274,20 +275,20 @@ def cloud_cfg(broker): str: JSON string when the ``network`` parameter is configure, else nothing is returned. Raises: - SkipComponent: When the path does not exist. + SkipComponent: When the path does not exist or any exception occurs. """ relative_path = '/etc/cloud/cloud.cfg' - network_config = '' - try: - with open(relative_path, 'r') as f: - content = yaml.load(f, Loader=yaml.SafeLoader) + content = broker[DefaultSpecs.cloud_cfg_input].content + if content: + content = yaml.load('\n'.join(content), Loader=yaml.SafeLoader) network_config = content.get('network', None) if network_config: return DatasourceProvider(content=json.dumps(network_config), relative_path=relative_path) + except Exception as e: + raise SkipComponent("Unexpected exception:{e}".format(e=str(e))) - except OSError: - raise SkipComponent() + raise SkipComponent() cloud_init_custom_network = simple_file("/etc/cloud/cloud.cfg.d/99-custom-networking.cfg") cloud_init_log = simple_file("/var/log/cloud-init.log") From e20d1807ccdb666b117f5a208acbd1c516eb5239 Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Fri, 5 Feb 2021 08:58:37 -0500 Subject: [PATCH 319/892] update uploader_json_map.json Signed-off-by: Link Dupont --- insights/client/uploader_json_map.json | 61 +++++++------------------- 1 file changed, 16 insertions(+), 45 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index fcea0dbbe..449957513 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -1092,7 +1092,9 @@ "gnome-shell", "haproxy", "heat-engine", + "httpd", "mongdb", + "nginx", "nova-compute", "nova-conductor", "ntpd", @@ -1103,7 +1105,6 @@ "pacemaker-controld", "pacemaker_remote", "pacemakerd", - "pcsd", "pkla-check-auth", "pmcd", "pmie", @@ -1144,8 +1145,10 @@ "gnome-shell", "haproxy", "heat-engine", + "httpd", "mongdb", "mysqld", + "nginx", "nova-compute", "nova-conductor", "ntpd", @@ -1157,7 +1160,6 @@ "pacemaker-controld", "pacemaker_remote", "pacemakerd", - "pcsd", "phc2sys", "pkla-check-auth", "pmcd", @@ -1225,7 +1227,6 @@ "pacemaker-controld", "pacemaker_remote", "pacemakerd", - "pcsd", "pkla-check-auth", "pmcd", "pmie", @@ -1262,8 +1263,10 @@ "gnome-shell", "haproxy", "heat-engine", + "httpd", "mongdb", "neutron-ns-metadata-proxy", + "nginx", "nginx: master process", "nginx: worker process", "nova-compute", @@ -1276,7 +1279,6 @@ "pacemaker-controld", "pacemaker_remote", "pacemakerd", - "pcsd", "pkla-check-auth", "pmcd", "pmie", @@ -1335,11 +1337,6 @@ "pattern": [], "symbolic_name": "readlink_e_shift_cert_server" }, - { - "command": "/usr/bin/rhn-schema-version", - "pattern": [], - "symbolic_name": "rhn_schema_version" - }, { "command": "python -m insights.tools.cat --no-header rhev_data_center", "pattern": [], @@ -1627,17 +1624,23 @@ }, { "file": "/run/udev/rules.d/40-redhat.rules", - "pattern": [], + "pattern": [ + "SUBSYSTEM!=\"memory\", ACTION!=\"add\", GOTO=\"memory_hotplug_end\"" + ], "symbolic_name": "etc_udev_40_redhat_rules" }, { "file": "/usr/lib/udev/rules.d/40-redhat.rules", - "pattern": [], + "pattern": [ + "SUBSYSTEM!=\"memory\", ACTION!=\"add\", GOTO=\"memory_hotplug_end\"" + ], "symbolic_name": "etc_udev_40_redhat_rules" }, { "file": "/usr/local/lib/udev/rules.d/40-redhat.rules", - "pattern": [], + "pattern": [ + "SUBSYSTEM!=\"memory\", ACTION!=\"add\", GOTO=\"memory_hotplug_end\"" + ], "symbolic_name": "etc_udev_40_redhat_rules" }, { @@ -1913,16 +1916,6 @@ "pattern": [], "symbolic_name": "corosync_conf" }, - { - "file": "/etc/cobbler/modules.conf", - "pattern": [], - "symbolic_name": "cobbler_modules_conf" - }, - { - "file": "/etc/cobbler/settings", - "pattern": [], - "symbolic_name": "cobbler_settings" - }, { "file": "/etc/sysconfig/corosync", "pattern": [], @@ -3207,33 +3200,11 @@ ], "symbolic_name": "rh_mongodb26_conf" }, - { - "file": "/etc/sysconfig/rhn/()*rhn-entitlement-cert\\.xml.*", - "pattern": [], - "symbolic_name": "rhn_entitlement_cert_xml" - }, { "file": "/etc/rhn/rhn.conf", "pattern": [], "symbolic_name": "rhn_conf" }, - { - "file": "/usr/share/rhn/config-defaults/rhn_hibernate.conf", - "pattern": [], - "symbolic_name": "rhn_hibernate_conf" - }, - { - "file": "/var/log/rhn/search/rhn_search_daemon.log", - "pattern": [ - "APPARENT DEADLOCK!" - ], - "symbolic_name": "rhn_search_daemon_log" - }, - { - "file": "/var/log/rhn/rhn_taskomatic_daemon.log", - "pattern": [], - "symbolic_name": "rhn_taskomatic_daemon_log" - }, { "file": "/etc/rhosp-release", "pattern": [], @@ -4296,5 +4267,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-01-26T11:44:37.067394" + "version": "2021-01-28T16:16:27.908132" } \ No newline at end of file From 01de3e954ec25e1d8f234097c39e0d8df14e5c92 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 11 Feb 2021 06:16:27 +0800 Subject: [PATCH 320/892] Add parser for spec "ls -lan /var/cache/pulp" (#2905) Signed-off-by: Huanhuan Li --- .../ls_var_cache_pulp.rst | 3 ++ insights/parsers/ls_var_cache_pulp.py | 37 +++++++++++++++++++ .../parsers/tests/test_ls_var_cache_pulp.py | 31 ++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 6 files changed, 74 insertions(+) create mode 100644 docs/shared_parsers_catalog/ls_var_cache_pulp.rst create mode 100644 insights/parsers/ls_var_cache_pulp.py create mode 100644 insights/parsers/tests/test_ls_var_cache_pulp.py diff --git a/docs/shared_parsers_catalog/ls_var_cache_pulp.rst b/docs/shared_parsers_catalog/ls_var_cache_pulp.rst new file mode 100644 index 000000000..921ad97b0 --- /dev/null +++ b/docs/shared_parsers_catalog/ls_var_cache_pulp.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ls_var_cache_pulp + :members: + :show-inheritance: diff --git a/insights/parsers/ls_var_cache_pulp.py b/insights/parsers/ls_var_cache_pulp.py new file mode 100644 index 000000000..f9c197bf4 --- /dev/null +++ b/insights/parsers/ls_var_cache_pulp.py @@ -0,0 +1,37 @@ +""" +LsVarCachePulp - command ``ls -lan /var/cache/pulp`` +==================================================== + +The ``ls -lan /var/cache/pulp`` command provides information for the listing of the ``/var/cache/pulp`` directory. + +Sample input is shown in the Examples. See ``FileListing`` class for +additional information. + +Sample directory list:: + + total 0 + drwxrwxr-x. 5 48 1000 216 Jan 21 12:56 . + drwxr-xr-x. 10 0 0 121 Jan 20 13:57 .. + lrwxrwxrwx. 1 0 0 19 Jan 21 12:56 cache -> /var/lib/pulp/cache + drwxr-xr-x. 2 48 48 6 Jan 21 13:03 reserved_resource_worker-0@dhcp130-202.gsslab.pnq2.redhat.com + drwxr-xr-x. 2 48 48 6 Jan 21 02:03 reserved_resource_worker-1@dhcp130-202.gsslab.pnq2.redhat.com + drwxr-xr-x. 2 48 48 6 Jan 20 14:03 resource_manager@dhcp130-202.gsslab.pnq2.redhat.com + +Examples: + + >>> "journal" in ls_var_cache_pulp + False + >>> "/var/cache/pulp" in ls_var_cache_pulp + True +""" + + +from insights.specs import Specs + +from insights import CommandParser, parser, FileListing + + +@parser(Specs.ls_var_cache_pulp) +class LsVarCachePulp(CommandParser, FileListing): + """Parses output of ``ls -lan /var/cache/pulp`` command.""" + pass diff --git a/insights/parsers/tests/test_ls_var_cache_pulp.py b/insights/parsers/tests/test_ls_var_cache_pulp.py new file mode 100644 index 000000000..fadaa7463 --- /dev/null +++ b/insights/parsers/tests/test_ls_var_cache_pulp.py @@ -0,0 +1,31 @@ +import doctest + +from insights.parsers import ls_var_cache_pulp +from insights.parsers.ls_var_cache_pulp import LsVarCachePulp +from insights.tests import context_wrap + +LS_VAR_CACHE_PULP = """ +total 0 +drwxrwxr-x. 5 48 1000 216 Jan 21 12:56 . +drwxr-xr-x. 10 0 0 121 Jan 20 13:57 .. +lrwxrwxrwx. 1 0 0 19 Jan 21 12:56 cache -> /var/lib/pulp/cache +drwxr-xr-x. 2 48 48 6 Jan 21 13:03 reserved_resource_worker-0@dhcp130-202.gsslab.pnq2.redhat.com +drwxr-xr-x. 2 48 48 6 Jan 21 02:03 reserved_resource_worker-1@dhcp130-202.gsslab.pnq2.redhat.com +drwxr-xr-x. 2 48 48 6 Jan 20 14:03 resource_manager@dhcp130-202.gsslab.pnq2.redhat.com +""" + + +def test_ls_var_cache_pulp(): + ls_var_cache_pulp = LsVarCachePulp(context_wrap(LS_VAR_CACHE_PULP, path="insights_commands/ls_-lan_.var.cache.pulp")) + assert ls_var_cache_pulp.files_of('/var/cache/pulp') == ['cache'] + cache_item = ls_var_cache_pulp.dir_entry('/var/cache/pulp', 'cache') + assert cache_item is not None + assert '/var/lib/pulp/' in cache_item['link'] + + +def test_ls_var_lib_mongodb_doc_examples(): + env = { + 'ls_var_cache_pulp': LsVarCachePulp(context_wrap(LS_VAR_CACHE_PULP, path="insights_commands/ls_-lan_.var.cache.pulp")), + } + failed, total = doctest.testmod(ls_var_cache_pulp, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 64b48a121..2af2266d2 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -312,6 +312,7 @@ class Specs(SpecSet): ls_usr_bin = RegistryPoint(filterable=True) ls_usr_lib64 = RegistryPoint(filterable=True) ls_usr_sbin = RegistryPoint(filterable=True) + ls_var_cache_pulp = RegistryPoint() ls_var_lib_mongodb = RegistryPoint() ls_var_lib_nova_instances = RegistryPoint() ls_var_log = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 1aba9ad24..1f50e88e3 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -542,6 +542,7 @@ def ld_library_path_of_pid(broker): ls_tmp = simple_command("/bin/ls -la /tmp") ls_usr_bin = simple_command("/bin/ls -lan /usr/bin") ls_usr_lib64 = simple_command("/bin/ls -lan /usr/lib64") + ls_var_cache_pulp = simple_command("/bin/ls -lan /var/cache/pulp") ls_var_lib_mongodb = simple_command("/bin/ls -la /var/lib/mongodb") ls_var_lib_nova_instances = simple_command("/bin/ls -laRZ /var/lib/nova/instances") ls_var_log = simple_command("/bin/ls -la /var/log /var/log/audit") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index b0c58a535..f75527555 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -113,6 +113,7 @@ class InsightsArchiveSpecs(Specs): ls_tmp = simple_file("insights_commands/ls_-la_.tmp") ls_usr_bin = simple_file("insights_commands/ls_-lan_.usr.bin") ls_usr_lib64 = simple_file("insights_commands/ls_-lan_.usr.lib64") + ls_var_cache_pulp = simple_file("insights_commands/ls_-lan_.var.cache.pulp") ls_var_lib_mongodb = simple_file("insights_commands/ls_-la_.var.lib.mongodb") ls_var_lib_nova_instances = simple_file("insights_commands/ls_-laRZ_.var.lib.nova.instances") ls_var_log = simple_file("insights_commands/ls_-la_.var.log_.var.log.audit") From 81af16a9ab0b69fa1643fd1dee28cfe77b3fd9b6 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Wed, 10 Feb 2021 16:47:22 -0600 Subject: [PATCH 321/892] Raise exceptions on empty filter strings (#2949) Fixes #2948 Signed-off-by: Christopher Sams --- insights/core/filters.py | 19 +++++++++++++------ insights/tests/test_filters.py | 5 +++++ 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/insights/core/filters.py b/insights/core/filters.py index 22cbd320c..28a3e3cbf 100644 --- a/insights/core/filters.py +++ b/insights/core/filters.py @@ -69,14 +69,21 @@ def add_filter(component, patterns): def inner(component, patterns): if component in _CACHE: del _CACHE[component] + + types = six.string_types + (list, set) + if not isinstance(patterns, types): + raise TypeError("Filter patterns must be of type string, list, or set.") + if isinstance(patterns, six.string_types): - FILTERS[component].add(patterns) + patterns = set([patterns]) elif isinstance(patterns, list): - FILTERS[component] |= set(patterns) - elif isinstance(patterns, set): - FILTERS[component] |= patterns - else: - raise TypeError("patterns must be string, list, or set.") + patterns = set(patterns) + + for pat in patterns: + if not pat: + raise Exception("Filter patterns must not be empy.") + + FILTERS[component] |= patterns if not plugins.is_datasource(component): for dep in dr.run_order(dr.get_dependency_graph(component)): diff --git a/insights/tests/test_filters.py b/insights/tests/test_filters.py index 55c2d4367..04cd4f4d2 100644 --- a/insights/tests/test_filters.py +++ b/insights/tests/test_filters.py @@ -108,3 +108,8 @@ def test_add_filter_exception_not_filterable(): def test_add_filter_exception_raw(): with pytest.raises(Exception): filters.add_filter(Specs.metadata_json, "[]") + + +def test_add_filter_exception_empty(): + with pytest.raises(Exception): + filters.add_filter(Specs.ps_aux, "") From f64019a9e05203c260ed7f42772f9318fa1d1883 Mon Sep 17 00:00:00 2001 From: Akshay Gaikwad Date: Thu, 11 Feb 2021 04:37:42 +0530 Subject: [PATCH 322/892] Correctly handle uppercase VM name in "VirshListAll.get_vm_state()" (#2944) The `keywords` attribute stores VM names in lowercase, however `search()` look-up into `self.cols` that does not contains VM names in lowercase. Signed-off-by: Akshay Gaikwad --- insights/parsers/tests/test_virsh_list_all.py | 1 + insights/parsers/virsh_list_all.py | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/insights/parsers/tests/test_virsh_list_all.py b/insights/parsers/tests/test_virsh_list_all.py index 8c4de6c18..12b83397e 100644 --- a/insights/parsers/tests/test_virsh_list_all.py +++ b/insights/parsers/tests/test_virsh_list_all.py @@ -41,6 +41,7 @@ def test_virsh_output(): assert output.get_vm_state('rhel9.0') is None assert ('cfme' in output) is False assert ('cfme-5.7.13' in output) is True + assert output.get_vm_state("RHOSP10") == "shut off" def test_virsh_output_no_vms(): diff --git a/insights/parsers/virsh_list_all.py b/insights/parsers/virsh_list_all.py index b881757ee..0157f130d 100644 --- a/insights/parsers/virsh_list_all.py +++ b/insights/parsers/virsh_list_all.py @@ -132,7 +132,6 @@ def get_vm_state(self, vmname): str: State of VM. Returns None if, ``vmname`` does not exist. ''' - vmname = vmname.lower() - if vmname in self.keywords: + if vmname.lower() in self.keywords: return self.search(name=vmname)[0]['state'] return None From d7487c1d4c77fa2436c232ce2c6144a2b4bc63a3 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 11 Feb 2021 22:08:22 +0800 Subject: [PATCH 323/892] Add more path to postgresql_log and postgresql_conf (#2923) * For satellite 6.8, it uses rh-postgresql12-postgresql-server, the location of configuration file and error logs changes too. Signed-off-by: Huanhuan Li --- insights/specs/default.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 1f50e88e3..a974f3d6d 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -714,15 +714,19 @@ def pcp_enabled(broker): postconf_builtin = simple_command("/usr/sbin/postconf -C builtin") postconf = simple_command("/usr/sbin/postconf") postgresql_conf = first_file([ - "/var/lib/pgsql/data/postgresql.conf", - "/opt/rh/postgresql92/root/var/lib/pgsql/data/postgresql.conf", - "database/postgresql.conf" - ]) - postgresql_log = first_of([ - glob_file("/var/lib/pgsql/data/pg_log/postgresql-*.log"), - glob_file("/opt/rh/postgresql92/root/var/lib/pgsql/data/pg_log/postgresql-*.log"), - glob_file("/database/postgresql-*.log") - ]) + "/var/opt/rh/rh-postgresql12/lib/pgsql/data/postgresql.conf", + "/var/lib/pgsql/data/postgresql.conf", + "/opt/rh/postgresql92/root/var/lib/pgsql/data/postgresql.conf", + "database/postgresql.conf" + ]) + postgresql_log = first_of( + [ + glob_file("/var/opt/rh/rh-postgresql12/lib/pgsql/data/log/postgresql-*.log"), + glob_file("/var/lib/pgsql/data/pg_log/postgresql-*.log"), + glob_file("/opt/rh/postgresql92/root/var/lib/pgsql/data/pg_log/postgresql-*.log"), + glob_file("/database/postgresql-*.log") + ] + ) puppetserver_config = simple_file("/etc/sysconfig/puppetserver") proc_netstat = simple_file("proc/net/netstat") proc_slabinfo = simple_file("proc/slabinfo") From 671b3983579e478ba739ba97a16b40e6ea566ce1 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 11 Feb 2021 09:27:12 -0500 Subject: [PATCH 324/892] fix: prevent soscleaner from erasing insights_archive.txt (#2946) Signed-off-by: Jeremy Crafts --- insights/contrib/soscleaner.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/insights/contrib/soscleaner.py b/insights/contrib/soscleaner.py index b1af621ec..22eeda335 100644 --- a/insights/contrib/soscleaner.py +++ b/insights/contrib/soscleaner.py @@ -95,6 +95,9 @@ def _skip_file(self, d, files): #if mode == '200' or mode == '444' or mode == '400': # skip_list.append(f) mime_type = content_type.from_file(f_full) + if f == 'insights_archive.txt': + # don't exclude this file! we need it to parse core collection archives + continue if 'text' not in mime_type and 'json' not in mime_type: skip_list.append(f) From 6eea8c1831b24640d24501777516cde0a463f589 Mon Sep 17 00:00:00 2001 From: Stephen Adams Date: Thu, 11 Feb 2021 11:15:14 -0500 Subject: [PATCH 325/892] update uploader_json_map.json Signed-off-by: Stephen Adams --- insights/client/uploader_json_map.json | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index 449957513..d3752c560 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -563,6 +563,11 @@ ], "symbolic_name": "ls_usr_bin" }, + { + "command": "/bin/ls -lan /var/cache/pulp", + "pattern": [], + "symbolic_name": "ls_var_cache_pulp" + }, { "command": "/bin/ls -la /var/lib/mongodb", "pattern": [], @@ -1359,11 +1364,6 @@ "pattern": [], "symbolic_name": "rpm_V_packages" }, - { - "command": "python -m insights.tools.cat --no-header sap_hdb_version", - "pattern": [], - "symbolic_name": "sap_hdb_version" - }, { "command": "/usr/bin/mongo pulp_database --eval 'db.serverStatus().storageEngine'", "pattern": [], @@ -3090,6 +3090,11 @@ "pattern": [], "symbolic_name": "postgresql_conf" }, + { + "file": "/var/opt/rh/rh-postgresql12/lib/pgsql/data/postgresql.conf", + "pattern": [], + "symbolic_name": "postgresql_conf" + }, { "file": "/var/lib/pgsql/data/pg_log/()*postgresql-.+\\.log", "pattern": [ @@ -4209,6 +4214,11 @@ "symbolic_name": "numa_cpus", "pattern": [] }, + { + "glob": "/var/opt/rh/rh-postgresql12/lib/pgsql/data/log/postgresql-*.log", + "symbolic_name": "postgresql_log", + "pattern": [] + }, { "glob": "/etc/rsyslog.d/*.conf", "pattern": [ @@ -4267,5 +4277,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-01-28T16:16:27.908132" + "version": "2021-02-05T09:05:41.082538" } \ No newline at end of file From d3db8ba3483ca161f4b4a3f0cb74e580ce0e5ede Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Thu, 18 Feb 2021 09:13:08 -0600 Subject: [PATCH 326/892] Fix problem with corosync_cmapctl_cmd_list dataaource (#2955) * Check for presence of command on system before returning results to be executed * Make list of commands consistent for all executables * Fix #2954 Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/specs/default.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index a974f3d6d..30156c05a 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -9,6 +9,7 @@ """ import logging +import os import re import json @@ -304,13 +305,22 @@ def corosync_cmapctl_cmd_list(broker): Returns: list: A list of related corosync-cmapctl commands based the RHEL version. """ - if broker.get(IsRhel7): - return ["/usr/sbin/corosync-cmapctl", 'corosync-cmapctl -d runtime.schedmiss.timestamp', 'corosync-cmapctl -d runtime.schedmiss.delay'] - if broker.get(IsRhel8): - return ["/usr/sbin/corosync-cmapctl", '/usr/sbin/corosync-cmapctl -m stats', '/usr/sbin/corosync-cmapctl -C schedmiss'] + corosync_cmd = '/usr/sbin/corosync-cmapctl' + if os.path.exists(corosync_cmd): + if broker.get(IsRhel7): + return [ + corosync_cmd, + ' '.join([corosync_cmd, '-d runtime.schedmiss.timestamp']), + ' '.join([corosync_cmd, '-d runtime.schedmiss.delay'])] + if broker.get(IsRhel8): + return [ + corosync_cmd, + ' '.join([corosync_cmd, '-m stats']), + ' '.join([corosync_cmd, '-C schedmiss'])] + raise SkipComponent() - corosync_cmapctl = foreach_execute(corosync_cmapctl_cmd_list, "%s") + corosync_cmapctl = foreach_execute(corosync_cmapctl_cmd_list, "%s") corosync_conf = simple_file("/etc/corosync/corosync.conf") cpu_cores = glob_file("sys/devices/system/cpu/cpu[0-9]*/online") cpu_siblings = glob_file("sys/devices/system/cpu/cpu[0-9]*/topology/thread_siblings_list") From 2dcbb90ef641d645d7058a2d7be3a3c6ef15307b Mon Sep 17 00:00:00 2001 From: Stanislav Kontar Date: Thu, 18 Feb 2021 16:34:36 +0100 Subject: [PATCH 327/892] Fix handling of edge cases for Sealert parser (#2953) https://github.com/RedHatInsights/insights-core/issues/2951 Signed-off-by: Stanislav Kontar --- insights/parsers/sealert.py | 11 +++----- insights/parsers/tests/test_sealert.py | 36 ++++++++++++++++++++++---- 2 files changed, 35 insertions(+), 12 deletions(-) diff --git a/insights/parsers/sealert.py b/insights/parsers/sealert.py index bbe7a7821..08708e5b0 100644 --- a/insights/parsers/sealert.py +++ b/insights/parsers/sealert.py @@ -6,7 +6,6 @@ from insights import CommandParser from insights import parser from insights.specs import Specs -from insights.parsers import SkipException class Report(object): @@ -101,11 +100,12 @@ class Sealert(CommandParser): Raises: SkipException: When output is empty """ + SELINUX_DISABLED_MESSAGE = "unable to establish connection to setroubleshoot daemon!" - def parse_content(self, content): - if not content: - raise SkipException("Input content is empty") + def __init__(self, context): + super(Sealert, self).__init__(context, extra_bad_lines=[self.SELINUX_DISABLED_MESSAGE]) + def parse_content(self, content): self.raw_lines = content self.reports = [] @@ -114,6 +114,3 @@ def parse_content(self, content): self.reports.append(Report()) if self.reports: # skips the first report if it contains only partial data self.reports[-1].append_line(line) - - if not self.reports: - raise SkipException("No sealert reports") diff --git a/insights/parsers/tests/test_sealert.py b/insights/parsers/tests/test_sealert.py index bd2cf7974..c06f72546 100644 --- a/insights/parsers/tests/test_sealert.py +++ b/insights/parsers/tests/test_sealert.py @@ -1,4 +1,4 @@ -from insights.parsers import SkipException +from insights.core import ContentException from insights.tests import context_wrap from insights.parsers.sealert import Sealert, Report import pytest @@ -120,6 +120,18 @@ """.format(REPORT_1, REPORT_2) +INPUT_NO_REPORTS = "" + +INPUT_DISABLED = """ +Unable to establish connection to setroubleshoot daemon! +Check output of 'journalctl -t setroubleshoot' for more details. +""" + +INPUT_NOT_INSTALLED = """ +bash: /bin/sealert: No such file or directory +""" + + def test_report(): r = Report() r.append_line("") @@ -134,13 +146,27 @@ def test_report(): def test_sealert(): - with pytest.raises(SkipException): - Sealert(context_wrap(INPUT_1)) - with pytest.raises(SkipException): - Sealert(context_wrap("")) sealert = Sealert(context_wrap(INPUT_2)) assert len(sealert.reports) == 2 assert str(sealert.reports[0]) == REPORT_1 assert str(sealert.reports[1]) == REPORT_2 assert sealert.reports[0].lines[10] == REPORT_1.split("\n")[10] assert sealert.reports[0].lines_stripped() == REPORT_1.split("\n") + + +def test_sealert_edge_cases(): + # No reports + sealert = Sealert(context_wrap(INPUT_NO_REPORTS)) + assert len(sealert.reports) == 0 + + # Invalid data + sealert = Sealert(context_wrap(INPUT_1)) + assert len(sealert.reports) == 0 + + # Not available due to disabled + with pytest.raises(ContentException): + Sealert(context_wrap(INPUT_DISABLED)) + + # Not installed + with pytest.raises(ContentException): + Sealert(context_wrap(INPUT_NOT_INSTALLED)) From 67010a3f88acc83e2f24645f6b9e03f2ec895ea0 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 18 Feb 2021 12:14:12 -0500 Subject: [PATCH 328/892] update uploader_json_map.json Signed-off-by: Jeremy Crafts --- insights/client/uploader_json_map.json | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index d3752c560..02110ef71 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -2451,6 +2451,7 @@ ", type vxfs) has no security xattr handler", "- image is referenced in one or more repositories", "/input/input", + "/usr/lib/ocf/resource.d/heartbeat/azure-lb: line 91: kill: Binary: arguments must be process or job IDs", "17763", ": segfault at ", "Abort command issued", @@ -2507,6 +2508,8 @@ "The threshold number of context switches per second per CPU", "This system does not support \"SSSE3\"", "Throttling request took", + "Unit ip6tables.service entered failed state", + "Unit iptables.service entered failed state", "Virtualization daemon", "] trap divide error ", "_NET_ACTIVE_WINDOW", @@ -2559,12 +2562,11 @@ "kernel: kvm: disabled by bios", "kernel: lockd: Unknown symbol register_inet6addr_notifier", "kernel: lockd: Unknown symbol unregister_inet6addr_notifier", + "kernel: megaraid_sas: FW detected to be in faultstate, restarting it", "kernel: megasas: Found FW in FAULT state, will reset adapter.", "kernel: nfs: server", "khash_super_prune_nolock", - "kill: Binary: arguments must be process or job IDs", "link status up for interface", - "megaraid_sas: FW detected to be in faultstate, restarting it", "mode:0x20", "multipathd.service operation timed out. Terminating", "netlink_socket|ERR|fcntl: Too many open file", @@ -2584,8 +2586,6 @@ "start request repeated too quickly for docker.service", "state changed timeout -> done", "swapper: page allocation failure", - "systemd: Unit ip6tables.service entered failed state", - "systemd: Unit iptables.service entered failed state", "systemd[1]: Received SIGCHLD from PID", "tg3_start_xmit", "there is a meaningful conflict", @@ -4217,7 +4217,14 @@ { "glob": "/var/opt/rh/rh-postgresql12/lib/pgsql/data/log/postgresql-*.log", "symbolic_name": "postgresql_log", - "pattern": [] + "pattern": [ + "FATAL", + "checkpoints are occurring too frequently", + "connection limit exceeded for non-superusers", + "database is not accepting commands to avoid wraparound data loss in database", + "must be vacuumed within", + "remaining connection slots are reserved for non-replication superuser connections" + ] }, { "glob": "/etc/rsyslog.d/*.conf", @@ -4277,5 +4284,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-02-05T09:05:41.082538" + "version": "2021-02-11T11:20:06.722974" } \ No newline at end of file From 753763fe069f0c193b709105ad757e7e12dcfeda Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Fri, 19 Feb 2021 08:49:32 -0600 Subject: [PATCH 329/892] Spec to collect pcp data via pmlogsummary (#2950) * Spec to collect pcp data via pmlogsummary * Update PR to include parser and docs * Update spec to perform all collection in the datasource * Add parser and test for pmlog_summary spec * Add documentation to datasource and parser Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Add path to pmlogsummary command Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Updates based on PR feedback * Update datasource to check for file existance and output filename * Add command_with_args datasource to actually execute the command * Update tests and documentation Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Changes to specs Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Removed duplicate import after rebase Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> Co-authored-by: Suraj Patil --- docs/shared_parsers_catalog/pmlog_summary.rst | 3 + insights/parsers/pmlog_summary.py | 107 ++++++++++++++++++ insights/parsers/tests/test_pmlog_summary.py | 53 +++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 32 ++++++ 5 files changed, 196 insertions(+) create mode 100644 docs/shared_parsers_catalog/pmlog_summary.rst create mode 100644 insights/parsers/pmlog_summary.py create mode 100644 insights/parsers/tests/test_pmlog_summary.py diff --git a/docs/shared_parsers_catalog/pmlog_summary.rst b/docs/shared_parsers_catalog/pmlog_summary.rst new file mode 100644 index 000000000..f2124d8ba --- /dev/null +++ b/docs/shared_parsers_catalog/pmlog_summary.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.pmlog_summary + :members: + :show-inheritance: diff --git a/insights/parsers/pmlog_summary.py b/insights/parsers/pmlog_summary.py new file mode 100644 index 000000000..660e175e9 --- /dev/null +++ b/insights/parsers/pmlog_summary.py @@ -0,0 +1,107 @@ +""" +PmLogSummary - Command ``pmlogsummary`` +======================================= +""" + +from insights import parser, CommandParser +from insights.parsers import SkipComponent +from insights.specs import Specs + + +def parse(data): + """ + Parse a set of key/value pairs into a heirarchical dictionary of + typed values + + Arguments: + data (dict): Input dictionary of key/value pairs + + Returns: + dict: Heirarchical dictionary with keys separated at "." and type + conversion of the numerical values + """ + result = {} + + def typed(x): + try: + return float(x) + except Exception: + return x + + def insert(k, v): + cur = result + key_parts = k.split(".") + + # walk down the structure to the correct leaf + for part in key_parts: + if part not in cur: + cur[part] = {} + cur = cur[part] + + # break the value apart and store it + l, r = v.split(None, 1) + cur["val"] = typed(l) + cur["units"] = r.strip() + + def kvs(): + # deal with whitespace and high level splitting + for line in data: + line = line.strip() + if line: + yield line.split(None, 1) + + for k, v in kvs(): + insert(k, v) + + return result + + +@parser(Specs.pmlog_summary) +class PmLogSummary(CommandParser, dict): + """ + Parser to parse the output of the ``pmlogsummary`` command + + Sample output of the command is:: + + mem.util.used 3133919.812 Kbyte + mem.physmem 3997600.000 Kbyte + kernel.all.cpu.user 0.003 none + kernel.all.cpu.sys 0.004 none + kernel.all.cpu.nice 0.000 none + kernel.all.cpu.steal 0.000 none + kernel.all.cpu.idle 3.986 none + disk.all.total 0.252 count / sec + + Output is parsed and stored as a dictionary. Each value is + stored as a dict in the form ``{'val': number or string, 'units': string}``. + Keys are a hierarchy of the input key value split on the "." character. + For instance input line "mem.util.used 3133919.812 Kbyte" is parsed + as:: + + { + 'mem': { + 'util': { + 'used': { + 'val': 3133919.812, + 'units': 'Kbyte' + } + } + } + } + + Example: + >>> type(pmlog_summary) + + >>> 'mem' in pmlog_summary + True + >>> pmlog_summary['disk']['all']['total'] == {'val': 0.252, 'units': 'count / sec'} + True + """ + + def parse_content(self, content): + data = parse(content) + + if len(data) == 0: + raise SkipComponent() + + self.update(data) diff --git a/insights/parsers/tests/test_pmlog_summary.py b/insights/parsers/tests/test_pmlog_summary.py new file mode 100644 index 000000000..eb7826ec4 --- /dev/null +++ b/insights/parsers/tests/test_pmlog_summary.py @@ -0,0 +1,53 @@ +import pytest +import doctest + +from insights.tests import context_wrap +from insights.parsers import SkipComponent +from insights.parsers import pmlog_summary +from insights.parsers.pmlog_summary import PmLogSummary + +PMLOG = """ +mem.util.used 3133919.812 Kbyte +mem.physmem 3997600.000 Kbyte +kernel.all.cpu.user 0.003 none +kernel.all.cpu.sys 0.004 none +kernel.all.cpu.nice 0.000 none +kernel.all.cpu.steal 0.000 none +kernel.all.cpu.idle 3.986 none +disk.all.total 0.252 count / sec +""" + +PMLOG_EMPTY = """ +""" + + +def test_pmlog_summary(): + pmlog_summary = PmLogSummary(context_wrap(PMLOG)) + assert len(pmlog_summary) == 3 + assert len(pmlog_summary['mem']) == 2 + assert pmlog_summary['mem']['util']['used'] == {'val': 3133919.812, 'units': 'Kbyte'} + assert pmlog_summary['mem']['physmem'] == {'val': 3997600.0, 'units': 'Kbyte'} + assert pmlog_summary['disk']['all']['total'] == {'val': 0.252, 'units': 'count / sec'} + assert 'not.present' not in pmlog_summary + assert pmlog_summary['kernel'] == { + 'all': {'cpu': { + 'user': {'val': 0.003, 'units': 'none'}, + 'sys': {'val': 0.004, 'units': 'none'}, + 'nice': {'val': 0.0, 'units': 'none'}, + 'steal': {'val': 0.0, 'units': 'none'}, + 'idle': {'val': 3.986, 'units': 'none'}, + }} + } + + +def test_pmlog_summmary(): + with pytest.raises(SkipComponent): + PmLogSummary(context_wrap(PMLOG_EMPTY)) + + +def test_doc_examples(): + env = { + 'pmlog_summary': PmLogSummary(context_wrap(PMLOG)) + } + failed, _ = doctest.testmod(pmlog_summary, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 2af2266d2..4b587e3da 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -481,6 +481,7 @@ class Specs(SpecSet): pcs_status = RegistryPoint() php_ini = RegistryPoint(filterable=True) pluginconf_d = RegistryPoint(multi_output=True) + pmlog_summary = RegistryPoint() podman_container_inspect = RegistryPoint(multi_output=True) podman_image_inspect = RegistryPoint(multi_output=True) podman_list_containers = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 30156c05a..307f90d52 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -37,6 +37,7 @@ from insights.combiners.satellite_version import SatelliteVersion, CapsuleVersion from insights.parsers.mount import Mount from insights.specs import Specs +import datetime logger = logging.getLogger(__name__) @@ -721,6 +722,37 @@ def pcp_enabled(broker): pcs_status = simple_command("/usr/sbin/pcs status") php_ini = first_file(["/etc/opt/rh/php73/php.ini", "/etc/opt/rh/php72/php.ini", "/etc/php.ini"]) pluginconf_d = glob_file("/etc/yum/pluginconf.d/*.conf") + + @datasource(Ps, HostContext) + def pmlog_summary_file(broker): + """ + Determines the name for the pmlogger file and checks for its existance + + Returns the name of the latest pmlogger summary file if a running ``pmlogger`` + process is detected on the system. + + Returns: + str: Full path to the latest pmlogger file + + Raises: + SkipComponent: raises this exception when the command is not present or + the file is not present + """ + ps = broker[Ps] + if ps.search(COMMAND__contains='pmlogger'): + pcp_log_date = (datetime.date.today() - datetime.timedelta(days=1)).strftime("%Y%m%d") + file = "/var/log/pcp/pmlogger/ros/%s.index" % (pcp_log_date) + try: + if os.path.exists(file) and os.path.isfile(file): + return file + except Exception as e: + SkipComponent("Failed to check for pmlogger file existance: {0}".format(str(e))) + + raise SkipComponent + + pmlog_summary = command_with_args( + "/usr/bin/pmlogsummary %s mem.util.used mem.physmem kernel.all.cpu.user kernel.all.cpu.sys kernel.all.cpu.nice kernel.all.cpu.steal kernel.all.cpu.idle disk.all.total mem.util.cached mem.util.bufmem mem.util.free", + pmlog_summary_file) postconf_builtin = simple_command("/usr/sbin/postconf -C builtin") postconf = simple_command("/usr/sbin/postconf") postgresql_conf = first_file([ From b9a296defc6cb0662b27084f15aec9c5bf5b2e63 Mon Sep 17 00:00:00 2001 From: Akshay Gaikwad Date: Wed, 24 Feb 2021 02:21:16 +0530 Subject: [PATCH 330/892] Add mdadm_E spec for sosreport (#2960) Sosreport specs entry for mdadm_E spec. Signed-off-by: Akshay Gaikwad --- insights/specs/sos_archive.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index 5a06b6c95..6cf614533 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -138,6 +138,7 @@ class SosSpecs(Specs): ls_dev = first_file(["sos_commands/block/ls_-lanR_.dev", "sos_commands/devicemapper/ls_-lanR_.dev"]) lvs = first_file(["sos_commands/lvm2/lvs_-a_-o_lv_tags_devices_--config_global_locking_type_0", "sos_commands/lvm2/lvs_-a_-o_devices"]) manila_conf = first_file(["/var/lib/config-data/puppet-generated/manila/etc/manila/manila.conf", "/etc/manila/manila.conf"]) + mdadm_E = glob_file("sos_commands/md/mdadm_-E_*") mistral_executor_log = simple_file("/var/log/mistral/executor.log") modinfo_all = glob_file("sos_commands/kernel/modinfo_*") mokutil_sbstate = simple_file("sos_commands/boot/mokutil_--sb-state") From 0054169a4f45a97bac80b610f0d7b30d719a06d1 Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Thu, 25 Feb 2021 08:13:24 -0600 Subject: [PATCH 331/892] Use specific exception for missing archive spec (#2963) * When deserializing a spec from an archive use ContentException instead of Exception when file is missing from the archive * Fix #2962 Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/core/serde.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/insights/core/serde.py b/insights/core/serde.py index adff200fe..a4d413402 100644 --- a/insights/core/serde.py +++ b/insights/core/serde.py @@ -87,11 +87,11 @@ def serialize(obj, root=None): def deserialize(data, root=None): - try: - (_type, from_dict) = DESERIALIZERS.get(data["type"]) - return from_dict(_type, data["object"], root=root) - except Exception: + type_data = DESERIALIZERS.get(data["type"]) + if type_data is None: raise Exception("Unrecognized type: %s" % data["type"]) + (_type, from_dict) = type_data + return from_dict(_type, data["object"], root=root) def marshal(v, root=None, pool=None): @@ -146,6 +146,8 @@ def hydrate(self, broker=None): Loads a Broker from a previously saved one. A Broker is created if one isn't provided. """ + from insights.core.spec_factory import ContentException + broker = broker or dr.Broker() for path in glob(os.path.join(self.meta_data, "*")): try: @@ -156,6 +158,8 @@ def hydrate(self, broker=None): if results: broker[comp] = results broker.exec_times[comp] = exec_time + ser_time + except ContentException as ex: + log.debug(ex) except Exception as ex: log.warning(ex) return broker From 69f216ec99b4eaadc53fca24405567ed029c94e7 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 25 Feb 2021 22:21:03 +0800 Subject: [PATCH 332/892] New spec and parser for insights-client.conf (#2961) Signed-off-by: Xiangce Liu --- .../insights_client_conf.rst | 3 +++ insights/parsers/insights_client_conf.py | 24 +++++++++++++++++++ .../tests/test_insights_client_conf.py | 16 +++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 45 insertions(+) create mode 100644 docs/shared_parsers_catalog/insights_client_conf.rst create mode 100644 insights/parsers/insights_client_conf.py create mode 100644 insights/parsers/tests/test_insights_client_conf.py diff --git a/docs/shared_parsers_catalog/insights_client_conf.rst b/docs/shared_parsers_catalog/insights_client_conf.rst new file mode 100644 index 000000000..361170c66 --- /dev/null +++ b/docs/shared_parsers_catalog/insights_client_conf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.insights_client_conf + :members: + :show-inheritance: diff --git a/insights/parsers/insights_client_conf.py b/insights/parsers/insights_client_conf.py new file mode 100644 index 000000000..7fd4702a6 --- /dev/null +++ b/insights/parsers/insights_client_conf.py @@ -0,0 +1,24 @@ +""" +InsightsClientConf - file ``/etc/insights-client/insights-client.conf`` +======================================================================= + +""" +from insights import IniConfigFile, parser, add_filter +from insights.specs import Specs + +add_filter(Specs.insights_client_conf, "[") + + +@parser(Specs.insights_client_conf) +class InsightsClientConf(IniConfigFile): + """ + This class provides parsing for the file ``/etc/insights-client/insights-client.conf``. + + Sample input data is in the format:: + + [insights-client] + auto_update=False + + See the :class:`insights.core.IniConfigFile` class for examples. + """ + pass diff --git a/insights/parsers/tests/test_insights_client_conf.py b/insights/parsers/tests/test_insights_client_conf.py new file mode 100644 index 000000000..749943d89 --- /dev/null +++ b/insights/parsers/tests/test_insights_client_conf.py @@ -0,0 +1,16 @@ +from insights.parsers.insights_client_conf import InsightsClientConf +from insights.tests import context_wrap + +CLIENT_CONF = """ +[insights-client] +auto_update=False +""" + + +def test_insights_client_conf(): + conf = InsightsClientConf(context_wrap(CLIENT_CONF)) + assert conf is not None + assert list(conf.sections()) == ['insights-client'] + assert conf.has_option('insights-client', 'auto_update') + assert not conf.has_option('yabba', 'dabba_do') + assert conf.get('insights-client', 'auto_update') == 'False' diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 4b587e3da..4ee767c66 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -242,6 +242,7 @@ class Specs(SpecSet): init_ora = RegistryPoint() initscript = RegistryPoint(multi_output=True) init_process_cgroup = RegistryPoint() + insights_client_conf = RegistryPoint(filterable=True) installed_rpms = RegistryPoint() interrupts = RegistryPoint() ip6tables_permanent = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 307f90d52..4128943f9 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -486,6 +486,7 @@ def httpd_cmd(broker): imagemagick_policy = glob_file(["/etc/ImageMagick/policy.xml", "/usr/lib*/ImageMagick-6.5.4/config/policy.xml"]) initctl_lst = simple_command("/sbin/initctl --system list") init_process_cgroup = simple_file("/proc/1/cgroup") + insights_client_conf = simple_file('/etc/insights-client/insights-client.conf') interrupts = simple_file("/proc/interrupts") ip_addr = simple_command("/sbin/ip addr") ip_addresses = simple_command("/bin/hostname -I") From 915f89d20dfa9361e7f49f9284d38d9e9549112f Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 25 Feb 2021 10:27:12 -0500 Subject: [PATCH 333/892] check for null PATH (#2936) Signed-off-by: Jeremy --- insights/util/__init__.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/insights/util/__init__.py b/insights/util/__init__.py index 2eacd982a..e91c98027 100644 --- a/insights/util/__init__.py +++ b/insights/util/__init__.py @@ -61,11 +61,13 @@ def which(cmd, env=None): return cmd return None - paths = env.get("PATH").split(os.pathsep) - for path in paths: - c = os.path.join(path, cmd) - if os.access(c, os.X_OK) and os.path.isfile(c): - return c + envpath = env.get("PATH") + if envpath: + paths = envpath.split(os.pathsep) + for path in paths: + c = os.path.join(path, cmd) + if os.access(c, os.X_OK) and os.path.isfile(c): + return c return None From 7767d33d098d912addd8adbe615a31e9b787b073 Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Thu, 25 Feb 2021 16:11:10 +0000 Subject: [PATCH 334/892] update uploader_json_map.json Signed-off-by: Link Dupont --- insights/client/uploader_json_map.json | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index 02110ef71..6d13bc685 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -1648,6 +1648,11 @@ "pattern": [], "symbolic_name": "init_process_cgroup" }, + { + "file": "/etc/insights-client/insights-client.conf", + "pattern": [], + "symbolic_name": "insights_client_conf" + }, { "file": "/etc/insights-client/machine-id", "pattern": [], @@ -4284,5 +4289,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-02-11T11:20:06.722974" + "version": "2021-02-18T12:21:36.722918" } \ No newline at end of file From 2b451480ee656b47e1dab791090d35052ee0e380 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Mon, 1 Mar 2021 09:29:17 +0800 Subject: [PATCH 335/892] New ld_library_path_of_user spec and UserLdLibraryPath parser (#2939) * New ld_library_path_of_user spec and UserLdLibraryPath parser - remove the ld_library_path_of_pid spec - deprecate the PidLdLibraryPath Signed-off-by: Xiangce Liu * Keep the error code and return even error when running the export Signed-off-by: Xiangce Liu * Use 'env' instead of 'export' - And remove the newly added PidLdLibraryPath instead of deprecating it Signed-off-by: Xiangce Liu * Fix flake8 errors Signed-off-by: Xiangce Liu --- insights/parsers/ld_library_path.py | 48 ++++++++----------- .../parsers/tests/test_ld_library_path.py | 36 +++++++------- insights/specs/__init__.py | 2 +- insights/specs/default.py | 33 +++++++++---- 4 files changed, 60 insertions(+), 59 deletions(-) diff --git a/insights/parsers/ld_library_path.py b/insights/parsers/ld_library_path.py index 99dc057f1..013e26299 100644 --- a/insights/parsers/ld_library_path.py +++ b/insights/parsers/ld_library_path.py @@ -1,63 +1,55 @@ """ -LdLibraryPath - LD_LIBRARY_PATH of PIDs -======================================= +LdLibraryPath - LD_LIBRARY_PATH of Users +======================================== -Parser for parsing the environment variable LD_LIBRARY_PATH of each PID. +Parser for parsing the environment variable LD_LIBRARY_PATH of each user """ from collections import namedtuple from insights import parser, Parser -from insights.parsers import SkipException, ParseException +from insights.parsers import SkipException from insights.specs import Specs -LdLibraryPath = namedtuple('LdLibraryPath', ('pid', 'path', 'raw')) -"""namedtuple: Type for storing the LdLibraryPath of PID""" +LdLibraryPath = namedtuple('LdLibraryPath', ('path', 'raw')) +"""namedtuple: Type for storing the LD_LIBRARY_PATH of users""" -@parser(Specs.ld_library_path_of_pid) -class PidLdLibraryPath(Parser, list): +@parser(Specs.ld_library_path_of_user) +class UserLdLibraryPath(Parser, list): """ - Base class for parsing the ``LD_LIBRARY_PATH`` variable of each PID of the - system into a list. + Base class for parsing the ``LD_LIBRARY_PATH`` variable of each regular + user of the system into a list. Typical content looks like:: - 105901 /usr/sap/RH1/SYS/exe/run:/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/sapdb/clients/RH1/lib - 105902 /usr/sap/RH1/SYS/exe/uc/linuxx86_64:/usr/sap/RH1/SYS/exe/run + /usr/sap/RH1/SYS/exe/run:/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/sapdb/clients/RH1/lib + + /usr/sap/RH1/SYS/exe/uc/linuxx86_64:/usr/sap/RH1/SYS/exe/run Examples: >>> len(ld_lib_path) - 2 + 3 >>> isinstance(ld_lib_path[0].path, list) True >>> len(ld_lib_path[0].path) 3 >>> '/sapdb/clients/RH1/lib' in ld_lib_path[0].path True - >>> ld_lib_path[0].pid - '105901' + >>> '' in ld_lib_path[1].path # The empty value is kept. + True Raises: SkipException: When the output is empty or nothing needs to parse. - ParseException: When the line cannot be parsed. - """ def parse_content(self, content): - if not content: - raise SkipException - llds = [] for line in content: - pid, _, raw = [s.strip() for s in line.partition(' ')] - paths = raw - if not pid.isdigit(): - raise ParseException('Incorrect line: {0}'.format(line)) - if raw and raw[0] == raw[-1] and raw[0] in ('\'', '"'): - paths = raw[1:-1] - paths = paths.split(':') - llds.append(LdLibraryPath(pid, paths, raw)) + raw = line + if line and line[0] == line[-1] and line[0] in ('\'', '"'): + line = line[1:-1] + llds.append(LdLibraryPath(line.split(':'), raw)) if not llds: raise SkipException("LD_LIBRARY_PATH not set.") diff --git a/insights/parsers/tests/test_ld_library_path.py b/insights/parsers/tests/test_ld_library_path.py index 30c635c69..3d35fe290 100644 --- a/insights/parsers/tests/test_ld_library_path.py +++ b/insights/parsers/tests/test_ld_library_path.py @@ -1,6 +1,6 @@ -from insights.parsers.ld_library_path import PidLdLibraryPath +from insights.parsers.ld_library_path import UserLdLibraryPath from insights.tests import context_wrap -from insights.parsers import ld_library_path, SkipException, ParseException +from insights.parsers import ld_library_path, SkipException import doctest import pytest @@ -12,46 +12,42 @@ """.strip() LD_LIBRARY_PATH_DOC = """ -105901 /usr/sap/RH1/SYS/exe/run:/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/sapdb/clients/RH1/lib -105902 /usr/sap/RH1/SYS/exe/uc/linuxx86_64:/usr/sap/RH1/SYS/exe/run +/usr/sap/RH1/SYS/exe/run:/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/sapdb/clients/RH1/lib + +/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/usr/sap/RH1/SYS/exe/run """.strip() LD_LIBRARY_PATH = """ -105901 /usr/sap/RH1/SYS/exe/run:/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/sapdb/clients/RH1/lib -105902 "/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/usr/sap/RH1/SYS/exe/run" -105903 -105904 '' +/usr/sap/RH1/SYS/exe/run:/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/sapdb/clients/RH1/lib +"/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/usr/sap/RH1/SYS/exe/run" + +'' """.strip() # noqa: W391 def test_ld_library_path(): - ret = PidLdLibraryPath(context_wrap(LD_LIBRARY_PATH)) + ret = UserLdLibraryPath(context_wrap(LD_LIBRARY_PATH)) assert len(ret) == 4 - assert ret[0].pid == '105901' - assert ret[1].pid == '105902' - assert ret[2].pid == '105903' - assert ret[1].raw == LD_LIBRARY_PATH.splitlines()[1].split()[-1] + assert ret[0].raw == LD_LIBRARY_PATH.splitlines()[0] + assert ret[1].raw == LD_LIBRARY_PATH.splitlines()[1] assert ret[2].raw == '' assert ret[3].raw == "''" assert ret[2].path == [''] assert ret[3].path == [''] - for p in LD_LIBRARY_PATH.splitlines()[0].split()[-1].split(':'): + for p in LD_LIBRARY_PATH.splitlines()[0].split(':'): assert p in ret[0].path - for p in LD_LIBRARY_PATH.splitlines()[1].split()[-1].strip('"').split(':'): + for p in LD_LIBRARY_PATH.splitlines()[1].strip('"').split(':'): assert p in ret[1].path def test_empty_and_invalid(): with pytest.raises(SkipException): - PidLdLibraryPath(context_wrap(LD_LIBRARY_PATH_EMPTY)) - - with pytest.raises(ParseException): - PidLdLibraryPath(context_wrap(LD_LIBRARY_PATH_INVALID)) + UserLdLibraryPath(context_wrap(LD_LIBRARY_PATH_EMPTY)) def test_doc_examples(): env = { - 'ld_lib_path': PidLdLibraryPath(context_wrap(LD_LIBRARY_PATH_DOC)), + 'ld_lib_path': UserLdLibraryPath(context_wrap(LD_LIBRARY_PATH_DOC)), } failed, total = doctest.testmod(ld_library_path, globs=env) assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 4ee767c66..a6c1d8c81 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -286,7 +286,7 @@ class Specs(SpecSet): ksmstate = RegistryPoint() kubepods_cpu_quota = RegistryPoint(multi_output=True) lastupload = RegistryPoint(multi_output=True) - ld_library_path_of_pid = RegistryPoint() + ld_library_path_of_user = RegistryPoint() libssh_client_config = RegistryPoint(filterable=True) libssh_server_config = RegistryPoint(filterable=True) libvirtd_log = RegistryPoint(filterable=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index 4128943f9..f3aba6348 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -14,7 +14,7 @@ import json from grp import getgrgid -from os import stat, listdir as os_listdir +from os import stat from pwd import getpwuid import yaml @@ -515,17 +515,30 @@ def httpd_cmd(broker): last_upload_globs = ["/etc/redhat-access-insights/.lastupload", "/etc/insights-client/.lastupload"] lastupload = glob_file(last_upload_globs) - @datasource() - def ld_library_path_of_pid(broker): - pids = [p for p in sorted(os_listdir('/proc/')) if p.isdigit()] + @datasource(HostContext) + def regular_users(broker): + """ + Returns: The username of all regular users which could log in. + But do NOT collect or store them. + """ + ctx = broker[HostContext] + return ctx.shell_out("/bin/awk -F':' '!/nologin|false|sync|halt|shutdown/{print $1}' /etc/passwd") + + @datasource(regular_users, HostContext) + def ld_library_path_of_user(broker): + """ + Returns: The list of LD_LIBRARY_PATH of each regular user. + Username is NOT collected or stored. + """ + users = broker[DefaultSpecs.regular_users] + ctx = broker[HostContext] llds = [] - for p in pids: - with open('/proc/{0}/environ'.format(p), 'r') as fp: - vars = fp.read() - lld = [v.split('=', 1)[-1] for v in vars.split('\x00') if v.startswith('LD_LIBRARY_PATH=')] - llds.append("{0} {1}".format(p, lld[0])) if lld else None + for u in users: + ret, vvs = ctx.shell_out("/bin/su -l {0} -c /bin/env".format(u), keep_rc=True) + if ret == 0 and vvs: + llds.extend(v.split('=', 1)[-1] for v in vvs if "LD_LIBRARY_PATH=" in v) if llds: - return DatasourceProvider('\n'.join(llds), relative_path='insights_commands/cat_all_PID_LD_LIBRARY_PATH') + return DatasourceProvider('\n'.join(llds), relative_path='insights_commands/echo_user_LD_LIBRARY_PATH') raise SkipComponent libssh_client_config = simple_file("/etc/libssh/libssh_client.config") From 5c258a3b15f836d2dfe8ad04525d32841fcf3cab Mon Sep 17 00:00:00 2001 From: Xiaoxue Wang Date: Tue, 2 Mar 2021 10:53:49 +0800 Subject: [PATCH 336/892] Add spec and parser for dmsetup_status (#2926) * Add spec and parser for dmsetup_status Signed-off-by: XiaoXue Wang * Update according to @xiangce's suggestions Signed-off-by: XiaoXue Wang * Fix flake8 error Signed-off-by: XiaoXue Wang * Update docstrings Signed-off-by: XiaoXue Wang --- insights/parsers/dmsetup.py | 153 ++++++++++++++++++ .../{test_dmsetup_info.py => test_dmsetup.py} | 89 +++++++++- insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + insights/specs/sos_archive.py | 1 + 6 files changed, 245 insertions(+), 1 deletion(-) rename insights/parsers/tests/{test_dmsetup_info.py => test_dmsetup.py} (51%) diff --git a/insights/parsers/dmsetup.py b/insights/parsers/dmsetup.py index 808a80ad5..ba693bef2 100644 --- a/insights/parsers/dmsetup.py +++ b/insights/parsers/dmsetup.py @@ -9,10 +9,14 @@ DmsetupInfo - command ``dmsetup info -C`` ----------------------------------------- +DmsetupStatus - command ``dmsetup status`` +------------------------------------------ + """ from collections import namedtuple from insights import parser, CommandParser from insights.parsers import parse_delimited_table +from insights.parsers import ParseException from insights.specs import Specs @@ -22,6 +26,12 @@ ) """ Data structure to represent dmsetup information """ +SetupStatus = namedtuple('SetupStatus', [ + 'device_name', 'start_sector', 'num_sectors', 'target_type', + 'target_args', 'parsed_args'] +) +""" Data structure to represent dmsetup status """ + @parser(Specs.dmsetup_info) class DmsetupInfo(CommandParser): @@ -121,3 +131,146 @@ def __getitem__(self, idx): Fetch a device by index in devices list """ return self.data[idx] + + +@parser(Specs.dmsetup_status) +class DmsetupStatus(CommandParser, list): + """ + ``dmsetup status -C`` command output + + Example input:: + + rootvg-tanlv: 0 6291456 linear + rootvg-ssnap: 0 16384000 snapshot 1560768/5120000 6088 + rootvg-optvolapp: 0 8192000 snapshot-origin + docker-253:10-1234567-0df13579: 0 20971520 thin 1922048 20971519 + docker-253:10-4254621-0496628a: 0 20971520 thin 1951744 20971519 + docker-253:10-4254621-d392682f: 0 20971520 thin 7106560 20971519 + rootvg-docker--pool: 0 129548288 thin-pool 1 20/49152 38/126512 - rw no_discard_passdown queue_if_no_space - + rootvg-tmpvol: 0 2048000 linear + rootvg-varvol: 0 18874368 snapshot Invalid + rootvg-optvol: 0 8192000 snapshot 616408/5120000 2408 + rootvg-varvol-cow: 0 5120000 linear + appsvg-lvapps_docker: 0 104857600 thin-pool 441 697/2048 20663/102400 - rw no_discard_passdown queue_if_no_space - + + Example data structure produced:: + + [ + SetupStatus( + device_name='rootvg-tanlv', start_sector='0', + num_sectors='6291456', target_type='linear', + target_args=None, parsed_args=None, + ), ... + ] + + Attributes: + names (list): Device names, in order found + by_name (dict): Access to each device by devicename + unparseable_lines (list): Unparseable raw lines + + Example: + >>> len(dmsetup_status) + 12 + >>> dmsetup_status.names[0] + 'rootvg-tanlv' + >>> dmsetup_status[1].target_type + 'snapshot' + >>> dmsetup_status[1].start_sector + '0' + >>> len(dmsetup_status.by_name) + 12 + >>> dmsetup_status[-1].parsed_args['used_metadata_blocks'] + '697' + >>> dmsetup_status[-1].parsed_args['total_metadata_blocks'] + '2048' + >>> dmsetup_status[-1].parsed_args['opts'] + ['rw', 'no_discard_passdown', 'queue_if_no_space', '-'] + """ + + def parse_content(self, content): + self.unparseable_lines = [] + for line in content: + _device_name, _device_info_str = line.rsplit(':', 1) + device_name = _device_name.strip() + device_info_spl = _device_info_str.strip().split(' ', 3) + if len(device_info_spl) < 3: + self.unparseable_lines.append(line) + continue + target_type = device_info_spl[2] + target_args = device_info_spl[3] if len(device_info_spl) == 4 else None + parsed_args = None + if target_args: + try: + parsed_args = self._parse_target_args(target_type, target_args) + except ParseException: + self.unparseable_lines.append(line) + self.append(SetupStatus( + device_name=device_name, + start_sector=device_info_spl[0], + num_sectors=device_info_spl[1], + target_type=target_type, + target_args=target_args, + parsed_args=parsed_args, + )) + + @property + def names(self): + return [dm[0] for dm in self] + + @property + def by_name(self): + return dict((dm[0], dm) for dm in self) + + def _parse_target_args(self, target_type, target_args): + pars_func_name = '_parse_target_args_' + target_type.replace('-', '_') + pars_func = getattr(self, pars_func_name, None) + return pars_func(target_args) if pars_func else None + + def _parse_target_args_thin_pool(self, target_args): + """ + Format: + / + / + ro|rw|out_of_data_space [no_]discard_passdown [error|queue]_if_no_space + needs_check|- metadata_low_watermark + Refer to https://www.kernel.org/doc/Documentation/device-mapper/thin-provisioning.txt . + """ + args = target_args.split() + if len(args) < 8 or '/' not in args[1] or '/' not in args[2]: + raise ParseException("Invalid thin_pool target_args: {0}".format(target_args)) + parsed_args = {} + parsed_args['transaction_id'] = args[0] + parsed_args['used_metadata_blocks'], parsed_args['total_metadata_blocks'] = args[1].split('/', 1) + parsed_args['used_data_blocks'], parsed_args['total_data_blocks'] = args[2].split('/', 1) + parsed_args['held_metadata_root'] = args[3] + parsed_args['opts'] = args[4:8] + parsed_args['metadata_low_watermark'] = args[8] if len(args) > 8 else None + return parsed_args + + def _parse_target_args_thin(self, target_args): + """ + Format: + + Refer to https://www.kernel.org/doc/Documentation/device-mapper/thin-provisioning.txt . + """ + args = target_args.split() + if len(args) < 2: + raise ParseException("Invalid thin target_args: {0}".format(target_args)) + parsed_args = {} + parsed_args['nr_mapped_sectors'] = args[0] + parsed_args['highest_mapped_sector'] = args[1] + return parsed_args + + def _parse_target_args_snapshot(self, target_args): + """ + Format: + / + Refer to https://www.kernel.org/doc/Documentation/device-mapper/snapshot.txt . + """ + args = target_args.split() + if len(args) < 2 or '/' not in args[0]: + raise ParseException("Invalid snapshot target_args: {0}".format(target_args)) + parsed_args = {} + parsed_args['sectors_allocated'], parsed_args['total_sectors'] = args[0].split('/', 1) + parsed_args['metadata_sectors'] = args[1] + return parsed_args diff --git a/insights/parsers/tests/test_dmsetup_info.py b/insights/parsers/tests/test_dmsetup.py similarity index 51% rename from insights/parsers/tests/test_dmsetup_info.py rename to insights/parsers/tests/test_dmsetup.py index 70f3b2968..9a027967e 100644 --- a/insights/parsers/tests/test_dmsetup_info.py +++ b/insights/parsers/tests/test_dmsetup.py @@ -1,6 +1,7 @@ import doctest from insights.parsers import dmsetup from insights.parsers.dmsetup import DmsetupInfo, SetupInfo +from insights.parsers.dmsetup import DmsetupStatus, SetupStatus from insights.tests import context_wrap DMSETUP_INFO_1 = """ @@ -80,6 +81,91 @@ def test_dmsetup_setupinfo(): ) +DMSETUP_STATUS_1 = """ +rootvg-tanlv: 0 6291456 linear +rootvg-ssnap: 0 16384000 snapshot 1560768/5120000 6088 +rootvg-optvolapp: 0 8192000 snapshot-origin +docker-253:10-1234567-0df13579: 0 20971520 thin 1922048 20971519 +docker-253:10-4254621-0496628a: 0 20971520 thin 1951744 20971519 +docker-253:10-4254621-d392682f: 0 20971520 thin 7106560 20971519 +rootvg-docker--pool: 0 129548288 thin-pool 1 20/49152 38/126512 - rw no_discard_passdown queue_if_no_space - +rootvg-tmpvol: 0 2048000 linear +rootvg-varvol: 0 18874368 snapshot Invalid +rootvg-optvol: 0 8192000 snapshot 616408/5120000 2408 +rootvg-varvol-cow: 0 5120000 linear +appsvg-lvapps_docker: 0 104857600 thin-pool 441 697/2048 20663/102400 - rw no_discard_passdown queue_if_no_space - +""".strip() + +DMSETUP_STATUS_2 = """ +rootvg-tanlv: 0 6291456 +rootvg-ssnap: 0 16384000 unknown-type +rootvg-docker--pool: 0 129548288 thin-pool 1 20/49152 38/126512 - rw no_discard_passdown queue_if_no_space +rootvg-optvol: 0 8192000 snapshot-origin +docker-253:10-4254621-d392682f: 0 20971520 thin 7106560 20971519 +docker-253:10-1234567-0df13579: 0 20971520 thin 1922048 +""".strip() + + +def test_dmsetup_status(): + r = DmsetupStatus(context_wrap(DMSETUP_STATUS_1)) + assert len(r) == 12 + assert len(r[0]) == 6 + assert r[0].device_name == 'rootvg-tanlv' + assert r[0].start_sector == '0' + assert r[0].num_sectors == '6291456' + assert r[0].target_type == 'linear' + assert r[0].target_args is None + assert r[0].parsed_args is None + assert r.names == ['rootvg-tanlv', 'rootvg-ssnap', 'rootvg-optvolapp', + 'docker-253:10-1234567-0df13579', 'docker-253:10-4254621-0496628a', + 'docker-253:10-4254621-d392682f', 'rootvg-docker--pool', + 'rootvg-tmpvol', 'rootvg-varvol', 'rootvg-optvol', + 'rootvg-varvol-cow', 'appsvg-lvapps_docker'] + + assert len(r.by_name) == len([dev.device_name for dev in r]) + assert r[0] == SetupStatus( + device_name='rootvg-tanlv', start_sector='0', + num_sectors='6291456', target_type='linear', + target_args=None, parsed_args=None, + ) + assert r[-1] == SetupStatus( + device_name='appsvg-lvapps_docker', start_sector='0', + num_sectors='104857600', target_type='thin-pool', + target_args='441 697/2048 20663/102400 - rw no_discard_passdown queue_if_no_space -', + parsed_args={ + 'transaction_id': '441', + 'used_metadata_blocks': '697', + 'total_metadata_blocks': '2048', + 'used_data_blocks': '20663', + 'total_data_blocks': '102400', + 'held_metadata_root': '-', + 'opts': ['rw', 'no_discard_passdown', 'queue_if_no_space', '-'], + 'metadata_low_watermark': None + }) + assert r[3] == SetupStatus( + device_name='docker-253:10-1234567-0df13579', start_sector='0', + num_sectors='20971520', target_type='thin', target_args='1922048 20971519', + parsed_args={'nr_mapped_sectors': '1922048', 'highest_mapped_sector': '20971519'} + ) + assert r[-3] == SetupStatus( + device_name='rootvg-optvol', start_sector='0', num_sectors='8192000', + target_type='snapshot', target_args='616408/5120000 2408', + parsed_args={'sectors_allocated': '616408', 'total_sectors': '5120000', 'metadata_sectors': '2408'} + ) + assert r[-4] == SetupStatus( + device_name='rootvg-varvol', start_sector='0', num_sectors='18874368', + target_type='snapshot', target_args='Invalid', parsed_args=None + ) + assert r.unparseable_lines == ['rootvg-varvol: 0 18874368 snapshot Invalid'] + + r = DmsetupStatus(context_wrap(DMSETUP_STATUS_2)) + assert len(r) == 5 + assert r.unparseable_lines == [ + 'rootvg-tanlv: 0 6291456', + 'rootvg-docker--pool: 0 129548288 thin-pool 1 20/49152 38/126512 - rw no_discard_passdown queue_if_no_space', + 'docker-253:10-1234567-0df13579: 0 20971520 thin 1922048'] + + DMSETUP_EXAMPLES = """ Name Maj Min Stat Open Targ Event UUID VG00-tmp 253 8 L--w 1 1 0 LVM-gy9uAwD7LuTIApplr2sogbOx5iS0FTax6lLmBji2ueSbX49gxcV76M29cmukQiw4 @@ -93,7 +179,8 @@ def test_dmsetup_setupinfo(): def test_examples(): env = { - 'setup_info': DmsetupInfo(context_wrap(DMSETUP_EXAMPLES)) + 'setup_info': DmsetupInfo(context_wrap(DMSETUP_EXAMPLES)), + 'dmsetup_status': DmsetupStatus(context_wrap(DMSETUP_STATUS_1)) } failed, total = doctest.testmod(dmsetup, globs=env) assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index a6c1d8c81..a9e7a1938 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -128,6 +128,7 @@ class Specs(SpecSet): dmesg_log = RegistryPoint(filterable=True) dmidecode = RegistryPoint() dmsetup_info = RegistryPoint() + dmsetup_status = RegistryPoint() dnf_conf = RegistryPoint(filterable=True) dnf_modules = RegistryPoint() dnf_module_list = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index f3aba6348..0f5bba31d 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -352,6 +352,7 @@ def corosync_cmapctl_cmd_list(broker): dmesg_log = simple_file("/var/log/dmesg") dmidecode = simple_command("/usr/sbin/dmidecode") dmsetup_info = simple_command("/usr/sbin/dmsetup info -C") + dmsetup_status = simple_command("/usr/sbin/dmsetup status") dnf_conf = simple_file("/etc/dnf/dnf.conf") dnf_modules = glob_file("/etc/dnf/modules.d/*.module") docker_info = simple_command("/usr/bin/docker info") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index f75527555..c608a01a6 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -45,6 +45,7 @@ class InsightsArchiveSpecs(Specs): dmesg = simple_file("insights_commands/dmesg") dmidecode = simple_file("insights_commands/dmidecode") dmsetup_info = simple_file("insights_commands/dmsetup_info_-C") + dmsetup_status = simple_file("insights_commands/dmsetup_status") docker_info = simple_file("insights_commands/docker_info") docker_list_containers = simple_file("insights_commands/docker_ps_--all_--no-trunc") docker_list_images = simple_file("insights_commands/docker_images_--all_--no-trunc_--digests") diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index 6cf614533..dd16fcde4 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -67,6 +67,7 @@ class SosSpecs(Specs): dmesg = first_file(["sos_commands/kernel/dmesg", "sos_commands/general/dmesg", "var/log/dmesg"]) dmidecode = simple_file("sos_commands/hardware/dmidecode") dmsetup_info = simple_file("sos_commands/devicemapper/dmsetup_info_-c") + dmsetup_status = simple_file("sos_commands/devicemapper/dmsetup_status") dnsmasq_config = glob_file(["/etc/dnsmasq.conf", "/etc/dnsmasq.d/*.conf"]) dumpe2fs_h = glob_file("sos_commands/filesys/dumpe2fs_-h_*") ethtool = glob_file("sos_commands/networking/ethtool_*", ignore="ethtool_-.*") From bd845cc08359a58f388e9fd6728f71036434adcc Mon Sep 17 00:00:00 2001 From: Rahul Srivastava <44598880+rasrivas-redhat@users.noreply.github.com> Date: Tue, 2 Mar 2021 11:21:51 +0530 Subject: [PATCH 337/892] Update microcode keyword check in the cpuinfo parser (#2966) * Update microcode keyword check in the cpuinfo parser Signed-off-by: rasrivas * added test case microcode check condition Signed-off-by: rasrivas --- insights/parsers/cpuinfo.py | 11 +++++++++++ insights/parsers/tests/test_cpuinfo.py | 4 +++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/insights/parsers/cpuinfo.py b/insights/parsers/cpuinfo.py index 74553671b..8abbbaf31 100644 --- a/insights/parsers/cpuinfo.py +++ b/insights/parsers/cpuinfo.py @@ -65,6 +65,8 @@ '0' >>> cpu_info.get_processor_by_index(0)['vendors'] 'GenuineIntel' + >>> cpu_info.microcode + '1808' """ from collections import defaultdict @@ -141,6 +143,7 @@ def parse_content(self, content): "revision": "revision", "address sizes": "address_sizes", "bugs": "bugs", + "microcode": "microcode" } for line in get_active_lines(content, comment_char="COMMAND>"): @@ -232,6 +235,14 @@ def vendor(self): """ return self.data["vendors"][0] + @property + @defaults() + def microcode(self): + """ + str: Returns the microcode of the first CPU. + """ + return self.data["microcode"][0] + @property @defaults() def core_total(self): diff --git a/insights/parsers/tests/test_cpuinfo.py b/insights/parsers/tests/test_cpuinfo.py index 235afbc83..3a6c89473 100644 --- a/insights/parsers/tests/test_cpuinfo.py +++ b/insights/parsers/tests/test_cpuinfo.py @@ -1424,11 +1424,13 @@ def test_cpuinfo(): "apicid": "0", "flags": "fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf eagerfpu pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch epb intel_pt tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt xsaveopt xsavec xgetbv1 dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp", "address_sizes": "40 bits physical, 48 bits virtual", - "bugs": "cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa itlb_multihit" + "bugs": "cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa itlb_multihit", + "microcode": "1808" } assert cpu_info.cpu_speed == "2900.000" assert cpu_info.cache_size == "20480 KB" assert cpu_info.model_number == "45" + assert cpu_info.microcode == "1808" assert "mmx" in cpu_info.flags assert "avx512f" not in cpu_info.flags for i, cpu in enumerate(cpu_info): From b774fa278f31bbd0793fc6a93a35c252668d9442 Mon Sep 17 00:00:00 2001 From: Suraj Patil <31805557+patilsuraj767@users.noreply.github.com> Date: Wed, 3 Mar 2021 17:07:33 +0000 Subject: [PATCH 338/892] Add new metrics 'kernel.all.cpu.wait.total' (#2967) Signed-off-by: Suraj Patil --- insights/specs/default.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 0f5bba31d..ebd57394c 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -766,7 +766,7 @@ def pmlog_summary_file(broker): raise SkipComponent pmlog_summary = command_with_args( - "/usr/bin/pmlogsummary %s mem.util.used mem.physmem kernel.all.cpu.user kernel.all.cpu.sys kernel.all.cpu.nice kernel.all.cpu.steal kernel.all.cpu.idle disk.all.total mem.util.cached mem.util.bufmem mem.util.free", + "/usr/bin/pmlogsummary %s mem.util.used mem.physmem kernel.all.cpu.user kernel.all.cpu.sys kernel.all.cpu.nice kernel.all.cpu.steal kernel.all.cpu.idle disk.all.total mem.util.cached mem.util.bufmem mem.util.free kernel.all.cpu.wait.total", pmlog_summary_file) postconf_builtin = simple_command("/usr/sbin/postconf -C builtin") postconf = simple_command("/usr/sbin/postconf") From 6362227c0fdc06886546f8af30816c4992e89bc7 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 4 Mar 2021 09:03:14 +0800 Subject: [PATCH 339/892] Update ld_library_path to check SAP users only (#2965) Signed-off-by: Xiangce Liu --- insights/parsers/ld_library_path.py | 24 ++++++---- .../parsers/tests/test_ld_library_path.py | 26 +++++----- insights/specs/default.py | 48 ++++++++----------- 3 files changed, 51 insertions(+), 47 deletions(-) diff --git a/insights/parsers/ld_library_path.py b/insights/parsers/ld_library_path.py index 013e26299..88309f396 100644 --- a/insights/parsers/ld_library_path.py +++ b/insights/parsers/ld_library_path.py @@ -11,7 +11,7 @@ from insights.parsers import SkipException from insights.specs import Specs -LdLibraryPath = namedtuple('LdLibraryPath', ('path', 'raw')) +LdLibraryPath = namedtuple('LdLibraryPath', ('user', 'path', 'raw')) """namedtuple: Type for storing the LD_LIBRARY_PATH of users""" @@ -21,11 +21,16 @@ class UserLdLibraryPath(Parser, list): Base class for parsing the ``LD_LIBRARY_PATH`` variable of each regular user of the system into a list. - Typical content looks like:: + .. note:: + + Currently, only the LD_LIBRARY_PATH SAP users is collected, where the + username is merged by SID and "adm". - /usr/sap/RH1/SYS/exe/run:/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/sapdb/clients/RH1/lib + Typical content looks like:: - /usr/sap/RH1/SYS/exe/uc/linuxx86_64:/usr/sap/RH1/SYS/exe/run + sr1adm /usr/sap/RH1/SYS/exe/run:/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/sapdb/clients/RH1/lib + sr2adm + rh1adm /usr/sap/RH1/SYS/exe/uc/linuxx86_64:/usr/sap/RH1/SYS/exe/run Examples: >>> len(ld_lib_path) @@ -36,6 +41,8 @@ class UserLdLibraryPath(Parser, list): 3 >>> '/sapdb/clients/RH1/lib' in ld_lib_path[0].path True + >>> ld_lib_path[1].user # The empty value is kept. + 'sr2adm' >>> '' in ld_lib_path[1].path # The empty value is kept. True @@ -46,10 +53,11 @@ class UserLdLibraryPath(Parser, list): def parse_content(self, content): llds = [] for line in content: - raw = line - if line and line[0] == line[-1] and line[0] in ('\'', '"'): - line = line[1:-1] - llds.append(LdLibraryPath(line.split(':'), raw)) + user, _, raw = [s.strip() for s in line.partition(' ')] + paths = raw + if raw and raw[0] == raw[-1] and raw[0] in ('\'', '"'): + paths = raw[1:-1] + llds.append(LdLibraryPath(user, paths.split(':'), raw)) if not llds: raise SkipException("LD_LIBRARY_PATH not set.") diff --git a/insights/parsers/tests/test_ld_library_path.py b/insights/parsers/tests/test_ld_library_path.py index 3d35fe290..455b6b6c3 100644 --- a/insights/parsers/tests/test_ld_library_path.py +++ b/insights/parsers/tests/test_ld_library_path.py @@ -12,31 +12,33 @@ """.strip() LD_LIBRARY_PATH_DOC = """ -/usr/sap/RH1/SYS/exe/run:/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/sapdb/clients/RH1/lib - -/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/usr/sap/RH1/SYS/exe/run +sr1adm /usr/sap/RH1/SYS/exe/run:/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/sapdb/clients/RH1/lib +sr2adm +rh1adm /usr/sap/RH1/SYS/exe/uc/linuxx86_64:/usr/sap/RH1/SYS/exe/run """.strip() LD_LIBRARY_PATH = """ -/usr/sap/RH1/SYS/exe/run:/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/sapdb/clients/RH1/lib -"/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/usr/sap/RH1/SYS/exe/run" - -'' +sr1adm /usr/sap/RH1/SYS/exe/run:/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/sapdb/clients/RH1/lib +sr2adm "/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/usr/sap/RH1/SYS/exe/run" +sr3adm +rh1adm '' """.strip() # noqa: W391 def test_ld_library_path(): ret = UserLdLibraryPath(context_wrap(LD_LIBRARY_PATH)) assert len(ret) == 4 - assert ret[0].raw == LD_LIBRARY_PATH.splitlines()[0] - assert ret[1].raw == LD_LIBRARY_PATH.splitlines()[1] + assert ret[0].raw == LD_LIBRARY_PATH.splitlines()[0].split()[-1] + assert ret[1].raw == LD_LIBRARY_PATH.splitlines()[1].split()[-1] + assert ret[2].user == 'sr3adm' assert ret[2].raw == '' - assert ret[3].raw == "''" assert ret[2].path == [''] + assert ret[3].user == 'rh1adm' + assert ret[3].raw == "''" assert ret[3].path == [''] - for p in LD_LIBRARY_PATH.splitlines()[0].split(':'): + for p in LD_LIBRARY_PATH.splitlines()[0].split()[-1].split(':'): assert p in ret[0].path - for p in LD_LIBRARY_PATH.splitlines()[1].strip('"').split(':'): + for p in LD_LIBRARY_PATH.splitlines()[1].split()[-1].strip('"').split(':'): assert p in ret[1].path diff --git a/insights/specs/default.py b/insights/specs/default.py index ebd57394c..f92fd2d0b 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -515,33 +515,6 @@ def httpd_cmd(broker): kubepods_cpu_quota = glob_file("/sys/fs/cgroup/cpu/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod[a-f0-9_]*.slice/cpu.cfs_quota_us") last_upload_globs = ["/etc/redhat-access-insights/.lastupload", "/etc/insights-client/.lastupload"] lastupload = glob_file(last_upload_globs) - - @datasource(HostContext) - def regular_users(broker): - """ - Returns: The username of all regular users which could log in. - But do NOT collect or store them. - """ - ctx = broker[HostContext] - return ctx.shell_out("/bin/awk -F':' '!/nologin|false|sync|halt|shutdown/{print $1}' /etc/passwd") - - @datasource(regular_users, HostContext) - def ld_library_path_of_user(broker): - """ - Returns: The list of LD_LIBRARY_PATH of each regular user. - Username is NOT collected or stored. - """ - users = broker[DefaultSpecs.regular_users] - ctx = broker[HostContext] - llds = [] - for u in users: - ret, vvs = ctx.shell_out("/bin/su -l {0} -c /bin/env".format(u), keep_rc=True) - if ret == 0 and vvs: - llds.extend(v.split('=', 1)[-1] for v in vvs if "LD_LIBRARY_PATH=" in v) - if llds: - return DatasourceProvider('\n'.join(llds), relative_path='insights_commands/echo_user_LD_LIBRARY_PATH') - raise SkipComponent - libssh_client_config = simple_file("/etc/libssh/libssh_client.config") libssh_server_config = simple_file("/etc/libssh/libssh_server.config") libvirtd_log = simple_file("/var/log/libvirt/libvirtd.log") @@ -825,6 +798,27 @@ def sap_sid(broker): sap = broker[Sap] return list(set(sap.sid(i).lower() for i in sap.all_instances)) + @datasource(sap_sid, HostContext) + def ld_library_path_of_user(broker): + """ + Returns: The list of LD_LIBRARY_PATH of specified users. + Username is combined from SAP and 'adm' and is also stored. + """ + sids = broker[DefaultSpecs.sap_sid] + ctx = broker[HostContext] + llds = [] + for sid in sids: + usr = '{0}adm'.format(sid) + ret, vvs = ctx.shell_out("/bin/su -l {0} -c /bin/env".format(usr), keep_rc=True) + if ret != 0: + continue + for v in vvs: + if "LD_LIBRARY_PATH=" in v: + llds.append('{0} {1}'.format(usr, v.split('=', 1)[-1])) + if llds: + return DatasourceProvider('\n'.join(llds), relative_path='insights_commands/echo_user_LD_LIBRARY_PATH') + raise SkipComponent + sap_hdb_version = foreach_execute(sap_sid, "/usr/bin/sudo -iu %sadm HDB version", keep_rc=True) saphostctl_getcimobject_sapinstance = simple_command("/usr/sap/hostctrl/exe/saphostctrl -function GetCIMObject -enuminstances SAPInstance") saphostexec_status = simple_command("/usr/sap/hostctrl/exe/saphostexec -status") From ab193934474edab17b7e12be244cbd6a97123b74 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 4 Mar 2021 14:15:01 -0500 Subject: [PATCH 340/892] update uploader_json_map.json Signed-off-by: Jeremy Crafts --- insights/client/uploader_json_map.json | 49 ++++++++++++++++++++++++-- 1 file changed, 47 insertions(+), 2 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index 6d13bc685..513a96d8b 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -220,6 +220,11 @@ "pattern": [], "symbolic_name": "dmsetup_info" }, + { + "command": "/usr/sbin/dmsetup status", + "pattern": [], + "symbolic_name": "dmsetup_status" + }, { "command": "/usr/bin/docker info", "pattern": [], @@ -1085,6 +1090,7 @@ "auditd", "avahi", "bash", + "ceilometer-poll", "chronyd", "clvmd", "cmirrord", @@ -1094,25 +1100,34 @@ "dnsmasq", "docker", "elasticsearch", + "gnocchi-metricd", "gnome-shell", "haproxy", "heat-engine", "httpd", + "libvirtd", "mongdb", + "multipath", + "multipathd", + "neutron-dhcp-ag", + "neutron-l3-agen", "nginx", "nova-compute", "nova-conductor", "ntpd", + "octavia-worker", "openshift start master api", "openshift start master controllers", "openshift start node", "ora", + "ovs-vswitchd", "pacemaker-controld", "pacemaker_remote", "pacemakerd", "pkla-check-auth", "pmcd", "pmie", + "puppetserver", "radosgw", "redis-server", "rngd", @@ -1138,6 +1153,7 @@ "STAP/8.2", "auditd", "bash", + "ceilometer-poll", "ceph-osd", "chronyd", "clvmd", @@ -1147,21 +1163,29 @@ "dlm_controld", "docker", "elasticsearch", + "gnocchi-metricd", "gnome-shell", "haproxy", "heat-engine", "httpd", + "libvirtd", "mongdb", + "multipath", + "multipathd", "mysqld", + "neutron-dhcp-ag", + "neutron-l3-agen", "nginx", "nova-compute", "nova-conductor", "ntpd", "oc observe csr", + "octavia-worker", "openshift start master api", "openshift start master controllers", "openshift start node", "ora", + "ovs-vswitchd", "pacemaker-controld", "pacemaker_remote", "pacemakerd", @@ -1171,6 +1195,7 @@ "pmie", "postgres", "ptp4l", + "puppetserver", "radosgw", "redis-server", "rngd", @@ -1201,6 +1226,7 @@ "bash", "catalina.base", "ceilometer-coll", + "ceilometer-poll", "chronyd", "clvmd", "cmirrord", @@ -1209,6 +1235,7 @@ "dlm_controld", "docker", "elasticsearch", + "gnocchi-metricd", "gnome-shell", "goferd", "greenplum", @@ -1216,8 +1243,12 @@ "heat-engine", "httpd", "iscsid", + "libvirtd", "mongdb", "multipath", + "multipathd", + "neutron-dhcp-ag", + "neutron-l3-agen", "nfs-server", "nfsd", "nginx", @@ -1229,6 +1260,7 @@ "openshift start master controllers", "openshift start node", "ora", + "ovs-vswitchd", "pacemaker-controld", "pacemaker_remote", "pacemakerd", @@ -1236,6 +1268,7 @@ "pmcd", "pmie", "postgres", + "puppetserver", "radosgw", "redis-server", "rngd", @@ -1257,6 +1290,7 @@ "CMD", "auditd", "bash", + "ceilometer-poll", "chronyd", "clvmd", "cmirrord", @@ -1265,11 +1299,17 @@ "dlm_controld", "docker", "elasticsearch", + "gnocchi-metricd", "gnome-shell", "haproxy", "heat-engine", "httpd", + "libvirtd", "mongdb", + "multipath", + "multipathd", + "neutron-dhcp-ag", + "neutron-l3-agen", "neutron-ns-metadata-proxy", "nginx", "nginx: master process", @@ -1277,16 +1317,19 @@ "nova-compute", "nova-conductor", "ntpd", + "octavia-worker", "openshift start master api", "openshift start master controllers", "openshift start node", "ora", + "ovs-vswitchd", "pacemaker-controld", "pacemaker_remote", "pacemakerd", "pkla-check-auth", "pmcd", "pmie", + "puppetserver", "radosgw", "redis-server", "rngd", @@ -1650,7 +1693,9 @@ }, { "file": "/etc/insights-client/insights-client.conf", - "pattern": [], + "pattern": [ + "[" + ], "symbolic_name": "insights_client_conf" }, { @@ -4289,5 +4334,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-02-18T12:21:36.722918" + "version": "2021-02-25T16:15:16.380804" } \ No newline at end of file From 3b15f3c49119eea77f6e488502e22fd8a836d3fe Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Mon, 8 Mar 2021 18:39:05 -0600 Subject: [PATCH 341/892] Update parser and test to detect lssap issue in BZ1922937 (#2969) * Add test to detect lssap issue in BZ1922937 Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Update parser to ignore bad lines Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/parsers/lssap.py | 13 ++++++++++++- insights/parsers/tests/test_lssap.py | 18 ++++++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/insights/parsers/lssap.py b/insights/parsers/lssap.py index 7076441ea..ccd6a072f 100644 --- a/insights/parsers/lssap.py +++ b/insights/parsers/lssap.py @@ -54,7 +54,18 @@ def parse_content(self, content): self.data = [] # remove lssap version and bar text from content - clean_content = content[2:-1] + start_ndx = end_index = -1 + for i, l in enumerate(content): + if start_ndx == -1 and l.lstrip().startswith("========"): + start_ndx = i + continue + if end_index == -1 and l.strip().startswith("========"): + end_index = i + break + if start_ndx == -1 or end_index == -1: + raise ParseException("Lssap: Unable to parse {0} line(s) of content: ({1})".format(len(content), content)) + + clean_content = content[start_ndx + 1:end_index] if len(clean_content) > 0 and clean_content[0].lstrip().startswith("SID"): self.data = parse_delimited_table(clean_content, delim='|', header_delim=None) else: diff --git a/insights/parsers/tests/test_lssap.py b/insights/parsers/tests/test_lssap.py index 63b820198..ec90f47ac 100644 --- a/insights/parsers/tests/test_lssap.py +++ b/insights/parsers/tests/test_lssap.py @@ -74,6 +74,19 @@ HB2 | foo | bar """.strip() +Lssap_extra_lines = """ +*** ERROR => CTrcOpen: fopen dev_lssap + +--------------------------------------------------- +trc file: "dev_lssap", trc level: 1, release: "721" +--------------------------------------------------- + - lssap version 1.0 - +========================================== + SID Nr Instance SAPLOCALHOST Version DIR_EXECUTABLE + D02| 50| D50| sapcbapp09|722, patch 201, changelist 1718183| /usr/sap/D02/D50/exe +========================================== +""".strip() + def test_doc_examples(): env = {'lssap': lssap.Lssap(context_wrap(Lssap_nw_TEST))} @@ -153,3 +166,8 @@ def test_fail(): with pytest.raises(ParseException) as excinfo: lssap.Lssap(context_wrap('test')) assert "Lssap: Unable to parse 1 line(s) of content: (['test'])" in str(excinfo) + + +def test_valid_extra_lines(): + sap = lssap.Lssap(context_wrap(Lssap_extra_lines)) + assert sap is not None From fd32a0a097e4fdaf6e174451e9bf1e8a0c1f13dd Mon Sep 17 00:00:00 2001 From: Glutexo Date: Tue, 9 Mar 2021 14:34:50 +0100 Subject: [PATCH 342/892] Schedule insights-client-checkin.timer (#2888) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Štěpán Tomsa --- insights/client/phase/v1.py | 18 +- insights/client/schedule.py | 112 ++-- .../tests/client/phase/test_post_update.py | 14 +- insights/tests/client/test_schedule.py | 490 +++++++++++++++++- 4 files changed, 587 insertions(+), 47 deletions(-) diff --git a/insights/client/phase/v1.py b/insights/client/phase/v1.py index 0e3d1c87e..88b2317ca 100644 --- a/insights/client/phase/v1.py +++ b/insights/client/phase/v1.py @@ -73,7 +73,8 @@ def pre_update(client, config): if config.enable_schedule: # enable automatic scheduling logger.debug('Updating config...') - updated = get_scheduler(config).set_daily() + scheduler = get_scheduler(config) + updated = scheduler.schedule() if updated: logger.info('Automatic scheduling for Insights has been enabled.') sys.exit(constants.sig_kill_ok) @@ -194,9 +195,10 @@ def post_update(client, config): elif reg is False: # unregistered sys.exit(constants.sig_kill_bad) - if config.register: - if (not config.disable_schedule and - get_scheduler(config).set_daily()): + if config.register and not config.disable_schedule: + scheduler = get_scheduler(config) + updated = scheduler.schedule() + if updated: logger.info('Automatic scheduling for Insights has been enabled.') return # -------delete everything above this line------- @@ -254,9 +256,11 @@ def post_update(client, config): # system creation and upload are a single event on the platform if reg_check: logger.info('This host has already been registered.') - if (not config.disable_schedule and - get_scheduler(config).set_daily()): - logger.info('Automatic scheduling for Insights has been enabled.') + if not config.disable_schedule: + scheduler = get_scheduler(config) + updated = scheduler.schedule() + if updated: + logger.info('Automatic scheduling for Insights has been enabled.') # set --display-name independent of register # only do this if set from the CLI. normally display_name is sent on upload diff --git a/insights/client/schedule.py b/insights/client/schedule.py index aef868452..64800a420 100644 --- a/insights/client/schedule.py +++ b/insights/client/schedule.py @@ -28,7 +28,7 @@ def active(self): return os.path.isfile(self.target) return False - def set_daily(self): + def schedule(self): logger.debug('Scheduling cron.daily') try: if not os.path.exists(self.target): @@ -51,41 +51,91 @@ def remove_scheduling(self): class InsightsSchedulerSystemd(object): - - @property - def active(self): + ALL_TIMERS = ("insights-client", "insights-client-checkin") + + def __init__(self): + """ + Looks for loaded timers using `systemctl show`, stores their names in self.loaded_timers. No loaded timers + produce (). If an error occurs, self.loaded_timers becomes None and all methods (schedule, remove_scheduling, + active) then return None. + """ + results = self._run_systemctl_commands(self.ALL_TIMERS, "show", "--property", "LoadState") + if not results: + self.loaded_timers = None # Command failed. + else: + self.loaded_timers = tuple( + timer + for timer, result in results.items() + if result["status"] == 0 and result["output"] == "LoadState=loaded\n" + ) + if not self.loaded_timers: + logger.warning("No loaded timers found") + + @staticmethod + def _run_systemctl_command(*args): + cmd_args = " ".join(args) + command = "systemctl %s" % cmd_args + logger.debug("Running command %s", command) try: - systemctl_status = run_command_get_output('systemctl is-enabled insights-client.timer') - return systemctl_status['status'] == 0 + result = run_command_get_output(command) except OSError: - logger.exception('Could not get systemd status') - return False + logger.exception("Could not run %s", command) + return None + else: + logger.debug("Status: %s", result["status"]) + logger.debug("Output: %s", result["output"]) + return result + + @classmethod + def _run_systemctl_commands(cls, timers, *args): + if timers is None: + return None # Could not list loaded timers on init. + + results = {} + + for timer in timers: + unit = "%s.timer" % timer + command_args = args + (unit,) + result = cls._run_systemctl_command(*command_args) + if not result: + return None # Command failed. + results[timer] = result + + return results - def set_daily(self): - logger.debug('Starting systemd timer') - try: - # Start timers in the case of rhel 7 running systemd - systemctl_timer = run_command_get_output('systemctl enable --now insights-client.timer') - logger.debug("Starting Insights Client systemd timer.") - logger.debug("Status: %s", systemctl_timer['status']) - logger.debug("Output: %s", systemctl_timer['output']) - return self.active - except OSError: - logger.exception('Could not start systemd timer') - return False + @property + def active(self): + """ + Runs systemctl is-enabled for each loaded timers. Returns True if all loaded timers are enabled, None if any + systemctl command fails - here or in init. + """ + results = self._run_systemctl_commands(self.loaded_timers, "is-enabled") + return results and all(result["status"] == 0 for result in results.values()) + + def schedule(self): + """ + Runs systemctl enable --now for each loaded timers. Returns True if all loaded timers are successfully enabled, + False if any of them remains inactive. If no timer is loaded, returns {}, if any systemctl command fails - here + or in init - returns None. Both falsey as nothing has been actually enabled. + """ + logger.debug("Starting systemd timers") + results = self._run_systemctl_commands(self.loaded_timers, "enable", "--now") + return results and self.active def remove_scheduling(self): - logger.debug('Stopping all systemd timers') - try: - # Stop timers in the case of rhel 7 running systemd - systemctl_timer = run_command_get_output('systemctl disable --now insights-client.timer') - logger.debug("Stopping Insights Client systemd timer.") - logger.debug("Status: %s", systemctl_timer['status']) - logger.debug("Output: %s", systemctl_timer['output']) - return not self.active - except OSError: - logger.exception('Could not stop systemd timer') - return False + """ + Runs systemctl disable --now for each loaded timers. Returns True if all loaded timers are successfully + disabled, False if any of them remains active. If no timer is loaded, returns {}, if any systemctl command + fails - here or in init - returns None. Both falsey as nothing has been actually disabled. + """ + logger.debug("Stopping all systemd timers") + results = self._run_systemctl_commands(self.loaded_timers, "disable", "--now") + + if results: + active = self.active + return None if active is None else not active + else: + return results def get_scheduler(config, source=None, target='/etc/cron.daily/' + APP_NAME): diff --git a/insights/tests/client/phase/test_post_update.py b/insights/tests/client/phase/test_post_update.py index 110996c05..dbd67bf38 100644 --- a/insights/tests/client/phase/test_post_update.py +++ b/insights/tests/client/phase/test_post_update.py @@ -133,7 +133,7 @@ def test_post_update_register_registered(insights_config, insights_client, get_s insights_client.return_value.get_registration_status.assert_called_once() insights_client.return_value.clear_local_registration.assert_not_called() insights_client.return_value.set_display_name.assert_not_called() - get_scheduler.return_value.set_daily.assert_called_once() + get_scheduler.return_value.schedule.assert_called_once() @patch("insights.client.phase.v1.get_scheduler") @@ -154,7 +154,7 @@ def test_post_update_register_unregistered(insights_config, insights_client, get insights_client.return_value.get_registration_status.assert_called_once() insights_client.return_value.clear_local_registration.assert_not_called() insights_client.return_value.set_display_name.assert_not_called() - get_scheduler.return_value.set_daily.assert_called_once() + get_scheduler.return_value.schedule.assert_called_once() @patch("insights.client.phase.v1.get_scheduler") @@ -217,7 +217,7 @@ def test_post_update_force_register_registered(insights_config, insights_client, insights_client.return_value.get_registration_status.assert_called_once() insights_client.return_value.clear_local_registration.assert_called_once() insights_client.return_value.set_display_name.assert_not_called() - get_scheduler.return_value.set_daily.assert_called_once() + get_scheduler.return_value.schedule.assert_called_once() @patch("insights.client.phase.v1.get_scheduler") @@ -239,7 +239,7 @@ def test_post_update_force_register_unregistered(insights_config, insights_clien insights_client.return_value.get_registration_status.assert_called_once() insights_client.return_value.clear_local_registration.assert_called_once() insights_client.return_value.set_display_name.assert_not_called() - get_scheduler.return_value.set_daily.assert_called_once() + get_scheduler.return_value.schedule.assert_called_once() # ASK @patch("insights.client.phase.v1.get_scheduler") @@ -259,7 +259,7 @@ def test_post_update_set_display_name_cli_no_register_unreg(insights_config, ins insights_client.return_value.get_machine_id.assert_called_once() insights_client.return_value.get_registration_status.assert_called_once() insights_client.return_value.set_display_name.assert_not_called() - get_scheduler.return_value.set_daily.assert_not_called() + get_scheduler.return_value.schedule.assert_not_called() @patch("insights.client.phase.v1.get_scheduler") @@ -279,7 +279,7 @@ def test_post_update_set_display_name_cli_no_register_reg(insights_config, insig insights_client.return_value.get_machine_id.assert_called_once() insights_client.return_value.get_registration_status.assert_called_once() insights_client.return_value.set_display_name.assert_called_once() - get_scheduler.return_value.set_daily.assert_not_called() + get_scheduler.return_value.schedule.assert_not_called() @patch("insights.client.phase.v1.get_scheduler") @@ -301,7 +301,7 @@ def test_post_update_set_display_name_cli_register(insights_config, insights_cli insights_client.return_value.get_registration_status.assert_called_once() insights_client.return_value.clear_local_registration.assert_called_once() insights_client.return_value.set_display_name.assert_not_called() - get_scheduler.return_value.set_daily.assert_called_once() + get_scheduler.return_value.schedule.assert_called_once() @patch("insights.client.phase.v1.InsightsClient") diff --git a/insights/tests/client/test_schedule.py b/insights/tests/client/test_schedule.py index 57a6ce9ea..cae8aaec5 100644 --- a/insights/tests/client/test_schedule.py +++ b/insights/tests/client/test_schedule.py @@ -1,15 +1,27 @@ import tempfile + +from mock.mock import call, patch +from pytest import mark + import insights.client.schedule as sched from insights.client.config import InsightsConfig -def test_set_daily(): +def test_get_schedule_cron(): target = tempfile.mktemp() config = InsightsConfig() with tempfile.NamedTemporaryFile() as source: schedule = sched.get_scheduler(config, source.name, target) + assert isinstance(schedule, sched.InsightsSchedulerCron) + + +def test_schedule_cron(): + target = tempfile.mktemp() + config = InsightsConfig() + with tempfile.NamedTemporaryFile() as source: + schedule = sched.InsightsSchedulerCron(config, source.name, target) assert not schedule.active - assert schedule.set_daily() + assert schedule.schedule() assert schedule.active schedule.remove_scheduling() assert not schedule.active @@ -24,3 +36,477 @@ def test_failed_removal(): with tempfile.NamedTemporaryFile() as source: schedule = sched.get_scheduler(config, source.name, target) schedule.remove_scheduling() + + +def test_get_scheduler_systemd(): + config = InsightsConfig() + schedule = sched.get_scheduler(config, "no cron") + assert isinstance(schedule, sched.InsightsSchedulerSystemd) + + +@patch("insights.client.schedule.run_command_get_output", return_value={"status": 0, "output": "LoadState=loaded\n"}) +def test_init_systemd_calls(run_command_get_output): + sched.InsightsSchedulerSystemd() + calls = ( + call("systemctl show --property LoadState insights-client.timer"), + call("systemctl show --property LoadState insights-client-checkin.timer"), + ) + run_command_get_output.assert_has_calls(calls) + + +@mark.parametrize(("load_states", "loaded_timers"), ( + (("loaded", "loaded"), ("insights-client", "insights-client-checkin")), + (("loaded", "not-found"), ("insights-client",)), + (("not-found", "not-found"), ()), +)) +@patch("insights.client.schedule.run_command_get_output") +def test_init_systemd_loaded_timers(run_command_get_output, load_states, loaded_timers): + run_command_get_output.side_effect = ( + {"status": 0, "output": "LoadState=%s\n" % load_state} + for load_state in load_states + ) + scheduler = sched.InsightsSchedulerSystemd() + assert scheduler.loaded_timers == loaded_timers + + +@mark.parametrize(("side_effect"), (({"status": 0, "output": "LoadState=loaded\n"}, OSError), (OSError,))) +@patch("insights.client.schedule.run_command_get_output") +def test_init_systemd_error(run_command_get_output, side_effect): + run_command_get_output.side_effect = side_effect + scheduler = sched.InsightsSchedulerSystemd() + assert scheduler.loaded_timers is None + + +@patch("insights.client.schedule.run_command_get_output", return_value={"status": 0, "output": "LoadState=loaded\n"}) +def test_init_calls(run_command_get_output): + sched.InsightsSchedulerSystemd() + calls = ( + call("systemctl show --property LoadState insights-client.timer"), + call("systemctl show --property LoadState insights-client-checkin.timer"), + ) + run_command_get_output.assert_has_calls(calls) + + +@mark.parametrize(("outputs", "calls"), ( + ( + ("LoadState=loaded\n", "LoadState=loaded\n", "", "", "", ""), + ( + call("systemctl enable --now insights-client.timer"), + call("systemctl enable --now insights-client-checkin.timer"), + call("systemctl is-enabled insights-client.timer"), + call("systemctl is-enabled insights-client-checkin.timer"), + ), + ), + ( + ("LoadState=loaded\n", "LoadState=not-found\n", "", ""), + ( + call("systemctl enable --now insights-client.timer"), + call("systemctl is-enabled insights-client.timer"), + ), + ), + ( + ("LoadState=not-found\n", "LoadState=not-found\n", "", ""), + (), + ), +)) +@patch("insights.client.schedule.run_command_get_output") +def test_schedule_systemd_calls(run_command_get_output, outputs, calls): + run_command_get_output.side_effect = ({"status": 0, "output": output} for output in outputs) + + schedule = sched.InsightsSchedulerSystemd() + run_command_get_output.reset_mock() + + schedule.schedule() + run_command_get_output.assert_has_calls(calls) + + +@mark.parametrize(("outputs",), ( + (("LoadState=loaded\n", "LoadState=loaded\n", "", "", "", ""),), + (("LoadState=loaded\n", "LoadState=not-found\n", "", ""),), +)) +@patch("insights.client.schedule.run_command_get_output") +def test_schedule_systemd_success(run_command_get_output, outputs): + run_command_get_output.side_effect = ({"status": 0, "output": output} for output in outputs) + schedule = sched.InsightsSchedulerSystemd() + scheduled = schedule.schedule() + assert scheduled is True + + +@mark.parametrize(("side_effect",), ( + (( + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": ""}, + {"status": 0, "output": ""}, + {"status": 1, "output": ""}, + {"status": 0, "output": ""}, + ),), + (( + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": ""}, + {"status": 0, "output": ""}, + {"status": 0, "output": ""}, + {"status": 1, "output": ""}, + ),), + (( + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": ""}, + {"status": 0, "output": ""}, + {"status": 1, "output": ""}, + {"status": 1, "output": ""}, + ),), + (( + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": "LoadState=not-found\n"}, + {"status": 0, "output": ""}, + {"status": 1, "output": ""}, + ),), +)) +@patch("insights.client.schedule.run_command_get_output") +def test_schedule_systemd_inactive(run_command_get_output, side_effect): + run_command_get_output.side_effect = side_effect + + schedule = sched.InsightsSchedulerSystemd() + scheduled = schedule.schedule() + assert scheduled is False + + +@mark.parametrize(("side_effect",), ( + (( + OSError, + ),), + (( + {"status": 0, "output": "LoadState=loaded\n"}, + OSError, + ),), + (( + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": "LoadState=loaded\n"}, + OSError, + ),), + (( + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": ""}, + OSError + ),), + (( + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": ""}, + {"status": 0, "output": ""}, + OSError + ),), + (( + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": ""}, + {"status": 0, "output": ""}, + {"status": 0, "output": ""}, + OSError + ),), +)) +@patch("insights.client.schedule.run_command_get_output") +def test_schedule_systemd_call_error(run_command_get_output, side_effect): + run_command_get_output.side_effect = side_effect + + schedule = sched.InsightsSchedulerSystemd() + scheduled = schedule.schedule() + assert scheduled is None + + +@patch( + "insights.client.schedule.run_command_get_output", + return_value={"status": 0, "output": "LoadState=not-found\n"}, +) +def test_schedule_systemd_not_found(run_command_get_output): + schedule = sched.InsightsSchedulerSystemd() + run_command_get_output.reset_mock() + + scheduled = schedule.schedule() + assert scheduled == {} + run_command_get_output.assert_not_called() + + +@patch("insights.client.schedule.run_command_get_output", side_effect=OSError) +def test_schedule_systemd_init_error(run_command_get_output): + schedule = sched.InsightsSchedulerSystemd() + run_command_get_output.reset_mock() + + scheduled = schedule.schedule() + assert scheduled is None + run_command_get_output.assert_not_called() + + +@mark.parametrize(("outputs", "calls"), ( + ( + ("LoadState=loaded\n", "LoadState=loaded\n", "", "", "", ""), + ( + call("systemctl disable --now insights-client.timer"), + call("systemctl disable --now insights-client-checkin.timer"), + call("systemctl is-enabled insights-client.timer"), + call("systemctl is-enabled insights-client-checkin.timer"), + ), + ), + ( + ("LoadState=loaded\n", "LoadState=not-found\n", "", ""), + ( + call("systemctl disable --now insights-client.timer"), + call("systemctl is-enabled insights-client.timer"), + ), + ), + ( + ("LoadState=not-found\n", "LoadState=not-found\n", "", ""), + (), + ), +)) +@patch("insights.client.schedule.run_command_get_output") +def test_remove_scheduling_systemd_calls(run_command_get_output, outputs, calls): + run_command_get_output.side_effect = ({"status": 0, "output": output} for output in outputs) + + schedule = sched.InsightsSchedulerSystemd() + run_command_get_output.reset_mock() + + schedule.remove_scheduling() + run_command_get_output.assert_has_calls(calls) + + +@mark.parametrize(("side_effect",), ( + (( + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": ""}, + {"status": 0, "output": ""}, + {"status": 1, "output": ""}, + {"status": 1, "output": ""}, + ),), + (( + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": "LoadState=not-found\n"}, + {"status": 0, "output": ""}, + {"status": 1, "output": ""}, + ),), +)) +@patch("insights.client.schedule.run_command_get_output") +def test_remove_scheduling_systemd_success(run_command_get_output, side_effect): + run_command_get_output.side_effect = side_effect + schedule = sched.InsightsSchedulerSystemd() + unscheduled = schedule.remove_scheduling() + assert unscheduled is True + + +@mark.parametrize(("side_effect",), ( + (( + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": ""}, + {"status": 0, "output": ""}, + {"status": 0, "output": ""}, + {"status": 0, "output": ""}, + ),), + (( + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": "LoadState=not-found\n"}, + {"status": 0, "output": ""}, + {"status": 0, "output": ""}, + ),), +)) +@patch("insights.client.schedule.run_command_get_output") +def test_remove_scheduling_systemd_active(run_command_get_output, side_effect): + run_command_get_output.side_effect = side_effect + schedule = sched.InsightsSchedulerSystemd() + unscheduled = schedule.remove_scheduling() + assert unscheduled is False + + +@mark.parametrize(("side_effect",), ( + (( + OSError, + ),), + (( + {"status": 0, "output": "LoadState=loaded\n"}, + OSError, + ),), + (( + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": "LoadState=loaded\n"}, + OSError, + ),), + (( + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": ""}, + OSError + ),), + (( + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": ""}, + {"status": 0, "output": ""}, + OSError + ),), + (( + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": ""}, + {"status": 0, "output": ""}, + {"status": 0, "output": ""}, + OSError + ),), +)) +@patch("insights.client.schedule.run_command_get_output") +def test_remove_scheduling_systemd_error(run_command_get_output, side_effect): + run_command_get_output.side_effect = side_effect + + schedule = sched.InsightsSchedulerSystemd() + unscheduled = schedule.remove_scheduling() + assert unscheduled is None + + +@patch( + "insights.client.schedule.run_command_get_output", + return_value={"status": 0, "output": "LoadState=not-found\n"}, +) +def test_remove_scheduling_systemd_not_found(run_command_get_output): + schedule = sched.InsightsSchedulerSystemd() + run_command_get_output.reset_mock() + + unscheduled = schedule.remove_scheduling() + assert unscheduled == {} + run_command_get_output.assert_not_called() + + +@patch("insights.client.schedule.run_command_get_output", side_effect=OSError) +def test_remove_scheduling_systemd_init_error(run_command_get_output): + schedule = sched.InsightsSchedulerSystemd() + run_command_get_output.reset_mock() + + unscheduled = schedule.remove_scheduling() + assert unscheduled is None + run_command_get_output.assert_not_called() + + +@mark.parametrize(("outputs", "calls"), ( + ( + ("LoadState=loaded\n", "LoadState=loaded\n", "", "", "", ""), + ( + call("systemctl is-enabled insights-client.timer"), + call("systemctl is-enabled insights-client-checkin.timer"), + ), + ), + ( + ("LoadState=loaded\n", "LoadState=not-found\n", "", ""), + ( + call("systemctl is-enabled insights-client.timer"), + ), + ), + ( + ("LoadState=not-found\n", "LoadState=not-found\n", "", ""), + (), + ), +)) +@patch("insights.client.schedule.run_command_get_output") +def test_active_systemd_calls(run_command_get_output, outputs, calls): + run_command_get_output.side_effect = ({"status": 0, "output": output} for output in outputs) + + schedule = sched.InsightsSchedulerSystemd() + run_command_get_output.reset_mock() + schedule.active + + run_command_get_output.assert_has_calls(calls) + + +@mark.parametrize(("outputs",), ( + (("LoadState=loaded\n", "LoadState=loaded\n", "", ""),), + (("LoadState=loaded\n", "LoadState=not-found\n", ""),), +)) +@patch("insights.client.schedule.run_command_get_output") +def test_active_systemd_active(run_command_get_output, outputs): + run_command_get_output.side_effect = ({"status": 0, "output": output} for output in outputs) + schedule = sched.InsightsSchedulerSystemd() + assert schedule.active is True + + +@mark.parametrize(("side_effect",), ( + (( + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": ""}, + {"status": 1, "output": ""} + ),), + (( + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 1, "output": ""}, + {"status": 0, "output": ""} + ),), + (( + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 1, "output": ""}, + {"status": 1, "output": ""} + ),), + (( + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": "LoadState=not-found\n"}, + {"status": 1, "output": ""}, + ),), +)) +@patch("insights.client.schedule.run_command_get_output") +def test_active_systemd_inactive(run_command_get_output, side_effect): + run_command_get_output.side_effect = side_effect + schedule = sched.InsightsSchedulerSystemd() + assert schedule.active is False + + +@mark.parametrize(("side_effect",), ( + (( + OSError, + ),), + (( + {"status": 0, "output": "LoadState=loaded\n"}, + OSError, + ),), + (( + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": "LoadState=loaded\n"}, + OSError, + ),), + (( + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": "LoadState=loaded\n"}, + {"status": 0, "output": ""}, + OSError + ),), +)) +@patch("insights.client.schedule.run_command_get_output") +def test_active_systemd_error(run_command_get_output, side_effect): + run_command_get_output.side_effect = side_effect + schedule = sched.InsightsSchedulerSystemd() + assert schedule.active is None + + +@patch( + "insights.client.schedule.run_command_get_output", + side_effect=( + {"status": 0, "output": "LoadState=not-found\n"}, + {"status": 0, "output": "LoadState=not-found\n"}, + ), +) +def test_active_systemd_not_found(run_command_get_output): + schedule = sched.InsightsSchedulerSystemd() + run_command_get_output.reset_mock() + + assert schedule.active == {} + run_command_get_output.assert_not_called() + + +@patch("insights.client.schedule.run_command_get_output", side_effect=OSError) +def test_active_systemd_init_error(run_command_get_output): + schedule = sched.InsightsSchedulerSystemd() + run_command_get_output.reset_mock() + + assert schedule.active is None + run_command_get_output.assert_not_called() From dcb2b195d08e6f746e9871eb7c65f6787e60dd9e Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 11 Mar 2021 06:07:36 +0800 Subject: [PATCH 343/892] Change to use 'Hostname' instead of 'hostname' combiner (#2972) - The hostname combiner is deprecated Signed-off-by: Xiangce Liu --- insights/combiners/multinode.py | 4 ++-- insights/combiners/sap.py | 4 ++-- insights/core/evaluators.py | 2 +- insights/tests/test_evaluators.py | 6 +++--- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/insights/combiners/multinode.py b/insights/combiners/multinode.py index cff7fd989..a94ace88f 100644 --- a/insights/combiners/multinode.py +++ b/insights/combiners/multinode.py @@ -1,11 +1,11 @@ from insights import combiner -from insights.combiners.hostname import hostname +from insights.combiners.hostname import Hostname from insights.core.context import create_product from insights.parsers.metadata import MetadataJson from insights.specs import Specs -@combiner(MetadataJson, [hostname, Specs.machine_id]) +@combiner(MetadataJson, [Hostname, Specs.machine_id]) def multinode_product(md, hn, machine_id): hn = hn.fqdn if hn else machine_id.content[0].rstrip() return create_product(md.data, hn) diff --git a/insights/combiners/sap.py b/insights/combiners/sap.py index 05b15e2e9..a15edc676 100644 --- a/insights/combiners/sap.py +++ b/insights/combiners/sap.py @@ -11,7 +11,7 @@ from collections import namedtuple from insights import SkipComponent from insights.core.plugins import combiner -from insights.combiners.hostname import hostname +from insights.combiners.hostname import Hostname from insights.parsers.lssap import Lssap from insights.parsers.saphostctrl import SAPHostCtrlInstances @@ -38,7 +38,7 @@ """ -@combiner(hostname, [SAPHostCtrlInstances, Lssap]) +@combiner(Hostname, [SAPHostCtrlInstances, Lssap]) class Sap(dict): """ Combiner for combining the result of :class:`insights.parsers.lssap.Lssap` diff --git a/insights/core/evaluators.py b/insights/core/evaluators.py index 1b59f1732..43b43c4be 100644 --- a/insights/core/evaluators.py +++ b/insights/core/evaluators.py @@ -7,7 +7,7 @@ from ..formats import Formatter from ..specs import Specs -from ..combiners.hostname import hostname as combiner_hostname +from ..combiners.hostname import Hostname as combiner_hostname from ..parsers.branch_info import BranchInfo from ..util import utc from . import dr, plugins diff --git a/insights/tests/test_evaluators.py b/insights/tests/test_evaluators.py index dd145690a..a50dd1b62 100644 --- a/insights/tests/test_evaluators.py +++ b/insights/tests/test_evaluators.py @@ -1,7 +1,7 @@ from insights import dr, rule, make_fail, make_pass, make_fingerprint from insights.core.plugins import component, Response from insights.core.evaluators import InsightsEvaluator, SingleEvaluator -from insights.combiners.hostname import hostname +from insights.combiners.hostname import Hostname from insights.specs import Specs from insights.tests import context_wrap @@ -66,7 +66,7 @@ def show_links(): components = [ - hostname, + Hostname, Specs.redhat_release, Specs.machine_id, one, @@ -235,7 +235,7 @@ def test_insights_evaluator_make_unsure(): def test_insights_evaluator_show_links(): components = [ - hostname, + Hostname, Specs.redhat_release, Specs.machine_id, show_links, From 48291d06a6dbeaf2fe8c81c33f74b08a1f2cac00 Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Wed, 10 Mar 2021 20:58:53 -0600 Subject: [PATCH 344/892] Remove lssap command from collection (#2975) * Remove lssap command from collection * The saphostctrl command is a better source of the information * Fixes two open bugzillas with the lssap command 1922937 and 1936951 * Registry spec and parser will remain for use in the future Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Fix test error, change will be overwritten at release Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/client/uploader_json_map.json | 5 ----- insights/specs/default.py | 1 - insights/specs/insights_archive.py | 1 - 3 files changed, 7 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index 513a96d8b..759067949 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -720,11 +720,6 @@ "pattern": [], "symbolic_name": "lspci_kernel" }, - { - "command": "/usr/sap/hostctrl/exe/lssap", - "pattern": [], - "symbolic_name": "lssap" - }, { "command": "/usr/sbin/lvmconfig --type full", "pattern": [], diff --git a/insights/specs/default.py b/insights/specs/default.py index f92fd2d0b..9f2386f21 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -558,7 +558,6 @@ def httpd_cmd(broker): lsmod = simple_command("/sbin/lsmod") lsof = simple_command("/usr/sbin/lsof") lspci = simple_command("/sbin/lspci -k") - lssap = simple_command("/usr/sap/hostctrl/exe/lssap") lsscsi = simple_command("/usr/bin/lsscsi") lsvmbus = simple_command("/usr/sbin/lsvmbus -vv") lvm_conf = simple_file("/etc/lvm/lvm.conf") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index c608a01a6..45e781cf4 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -131,7 +131,6 @@ class InsightsArchiveSpecs(Specs): lsmod = simple_file("insights_commands/lsmod") lsof = simple_file("insights_commands/lsof") lspci = simple_file("insights_commands/lspci_-k") - lssap = simple_file("insights_commands/usr.sap.hostctrl.exe.lssap") lsscsi = simple_file("insights_commands/lsscsi") lsvmbus = simple_file("insights_commands/lsvmbus_-vv") lvmconfig = first_file([ From 441e289de5b41931178ca7078b635d10b622f3bd Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 11 Mar 2021 22:33:26 +0800 Subject: [PATCH 345/892] Enhance Sap.local_instances to check either of Hostname or FQDN (#2976) Signed-off-by: Xiangce Liu --- insights/combiners/sap.py | 6 ++- insights/combiners/tests/test_sap.py | 59 +++++++++++++++++++++++++++- 2 files changed, 63 insertions(+), 2 deletions(-) diff --git a/insights/combiners/sap.py b/insights/combiners/sap.py index a15edc676..c850be21f 100644 --- a/insights/combiners/sap.py +++ b/insights/combiners/sap.py @@ -80,6 +80,7 @@ class Sap(dict): def __init__(self, hostname, insts, lssap): hn = hostname.hostname + fqdn = hostname.fqdn data = {} self.local_instances = [] self.business_instances = [] @@ -91,7 +92,10 @@ def __init__(self, hostname, insts, lssap): self.all_instances = insts.instances for inst in insts.data: k = inst['InstanceName'] - self.local_instances.append(k) if hn == inst['Hostname'].split('.')[0] else None + if (hn == inst['Hostname'].split('.')[0] or + fqdn == inst['FullQualifiedHostname'] or + fqdn == inst['Hostname']): + self.local_instances.append(k) data[k] = SAPInstances(k, inst['Hostname'], inst['SID'], diff --git a/insights/combiners/tests/test_sap.py b/insights/combiners/tests/test_sap.py index 00ca53622..ac355fa8b 100644 --- a/insights/combiners/tests/test_sap.py +++ b/insights/combiners/tests/test_sap.py @@ -184,6 +184,51 @@ ********************************************************* ''' +SAPHOSTCTRL_HOSTINSTANCES_R_CASE = ''' +********************************************************* + CreationClassName , String , SAPInstance + SID , String , R4D + SystemNumber , String , 12 + InstanceName , String , DVEBMGS12 + Hostname , String , r4d00 + FullQualifiedHostname , String , r4d00.example.corp + SapVersionInfo , String , 753, patch 501, changelist 1967207 +********************************************************* + CreationClassName , String , SAPInstance + SID , String , R4D + SystemNumber , String , 10 + InstanceName , String , ASCS10 + Hostname , String , r4d01 + FullQualifiedHostname , String , r4d01.example.corp + SapVersionInfo , String , 753, patch 501, changelist 1967207 +********************************************************* + CreationClassName , String , SAPInstance + SID , String , WDX + SystemNumber , String , 20 + InstanceName , String , W20 + Hostname , String , r4d02 + FullQualifiedHostname , String , host_97.example.corp + SapVersionInfo , String , 773, patch 121, changelist 1917131 +********************************************************* + CreationClassName , String , SAPInstance + SID , String , SMD + SystemNumber , String , 98 + InstanceName , String , SMDA98 + Hostname , String , r4d01 + FullQualifiedHostname , String , host_97.example.corp + SapVersionInfo , String , 745, patch 400, changelist 1734487 +********************************************************* + CreationClassName , String , SAPInstance + SID , String , SMD + SystemNumber , String , 97 + InstanceName , String , SMDA97 + Hostname , String , r4d00 + FullQualifiedHostname , String , host_97.example.corp + SapVersionInfo , String , 745, patch 400, changelist 1734487 +''' + +HOSTNAME2 = 'host_97.example.corp' + def test_lssap_netweaver(): lssap = Lssap(context_wrap(Lssap_nw_TEST)) @@ -223,7 +268,7 @@ def test_saphostcrtl_hana_2(): sap = Sap(hn, inst, lssap) assert 'D50' not in sap assert 'HDB00' in sap - assert sorted(sap.local_instances) == sorted(['HDB88', 'HDB90', 'SMDA91']) + assert sorted(sap.local_instances) == sorted(['HDB88', 'HDB90', 'SMDA91', 'HDB62', 'HDB00']) assert sorted(sap.all_instances) == sorted([ 'ASCS07', 'ASCS52', 'D54', 'DVEBMGS09', 'ERS08', 'HDB00', 'HDB62', 'HDB88', 'HDB90', 'SCS10', 'SMDA91']) @@ -274,6 +319,18 @@ def test_all(): assert sap.is_ascs is True +def test_r_case(): + saphostctrl = SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES_R_CASE)) + hn = Hostname(HnF(context_wrap(HOSTNAME2)), None, None, None, None) + sap = Sap(hn, saphostctrl, None) + assert sorted(sap.local_instances) == sorted(['W20', 'SMDA98', 'SMDA97']) + assert sap['DVEBMGS12'].version == '753, patch 501, changelist 1967207' + assert sap['ASCS10'].hostname == 'r4d01' + assert sap.is_netweaver is True + assert sap.is_hana is False + assert sap.is_ascs is True + + def test_doc_examples(): env = { 'saps': Sap( From 3e8e906a2c76781604db13fd8b4680133dcd0f00 Mon Sep 17 00:00:00 2001 From: Stephen Adams Date: Thu, 11 Mar 2021 11:36:09 -0500 Subject: [PATCH 346/892] update uploader_json_map.json Signed-off-by: Stephen Adams --- insights/client/uploader_json_map.json | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index 759067949..3efb38d0d 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -1689,7 +1689,8 @@ { "file": "/etc/insights-client/insights-client.conf", "pattern": [ - "[" + "[", + "auto_update" ], "symbolic_name": "insights_client_conf" }, @@ -2095,11 +2096,6 @@ "pattern": [], "symbolic_name": "dnf_modules" }, - { - "file": "/etc/redhat-access-insights/machine-id", - "pattern": [], - "symbolic_name": "machine_id1" - }, { "file": "/etc/sysconfig/docker-storage-setup", "pattern": [], @@ -2461,10 +2457,9 @@ { "file": "/etc/lvm/lvm.conf", "pattern": [ - "auto_activation_volume_list", "filter", "locking_type", - "thin_pool_autoextend", + "use_lvmetad", "volume_list" ], "symbolic_name": "lvm_conf" @@ -2511,6 +2506,7 @@ "DMA Status error. Resetting chip", "Detected Tx Unit Hang", "Device is still in reset", + "Disable lvmetad in lvm.conf. lvmetad should never be enabled in a clustered environment. Set use_lvmetad=0 and kill the lvmetad process", "Error I40E_AQ_RC_EINVAL adding RX filters on PF, promiscuous mode forced on", "Error deleting EBS Disk volume aws", "Error running DeviceResume dm_task_run failed", @@ -2622,8 +2618,8 @@ "platform microcode: firmware: requesting", "reservation conflict", "returned a bad sequence-id error", + "rhsm", "rhsmd: rhsmd process exceeded runtime and was killed", - "server kernel: rhsmcertd-worke", "shm_open failed, Permission denied", "skb_copy", "skb_over_panic", @@ -4329,5 +4325,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-02-25T16:15:16.380804" -} \ No newline at end of file + "version": "2021-03-04T14:18:04.414559" +} From c95e5b25c9e300bb9dfbc9c4737f685c161cb74b Mon Sep 17 00:00:00 2001 From: Alec Cohan <44471274+aleccohan@users.noreply.github.com> Date: Tue, 16 Mar 2021 17:36:04 -0400 Subject: [PATCH 347/892] RHCLOUD-11737: Implement playbook signature validation logic in insights-core (#2945) * RHCLOUD-11737: Implement playbook signature validation logic in inisghts-core Signed-off-by: Alec Cohan * Update yaml.dump to use default_flow_style=False Signed-off-by: Alec Cohan * modify core version chececk to be less strict in the string validaion Signed-off-by: Alec Cohan * remove testing print statements Signed-off-by: Alec Cohan * flake fix Signed-off-by: Alec Cohan Co-authored-by: Jeremy Crafts --- .flake8 | 2 +- .../ansible/playbook_verifier/__init__.py | 136 +- .../ansible/playbook_verifier/__main__.py | 9 +- .../playbook_verifier/contrib/__init__.py | 0 .../playbook_verifier/contrib/gnupg.py | 1646 +++++++++++++++++ .../playbook_verifier/contrib/oyaml.py | 53 + .../apps/ansible/playbook_verifier/public.gpg | 18 + .../client/apps/ansible/test_playbook.yml | 30 +- insights/parsers/vdo_status.py | 2 +- .../client/apps/test_playbook_verifier.py | 112 ++ insights/tests/client/test_utilities.py | 3 + insights/tests/test_filters.py | 2 + insights/tests/test_formats.py | 2 + setup.py | 4 +- 14 files changed, 1985 insertions(+), 34 deletions(-) create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/__init__.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/gnupg.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/oyaml.py create mode 100644 insights/client/apps/ansible/playbook_verifier/public.gpg create mode 100644 insights/tests/client/apps/test_playbook_verifier.py diff --git a/.flake8 b/.flake8 index e42cbab6a..f15d5b515 100644 --- a/.flake8 +++ b/.flake8 @@ -1,3 +1,3 @@ [flake8] ignore = E501,E126,E127,E128,E722,E741 -exclude = insights/contrib,bin,docs,include,lib,lib64,.git,.collections.py,insights/parsers/tests/lvm_test_data.py +exclude = insights/contrib,bin,docs,include,lib,lib64,.git,.collections.py,insights/parsers/tests/lvm_test_data.py,insights/client/apps/ansible/playbook_verifier/contrib diff --git a/insights/client/apps/ansible/playbook_verifier/__init__.py b/insights/client/apps/ansible/playbook_verifier/__init__.py index d1f3e7826..da8f37067 100644 --- a/insights/client/apps/ansible/playbook_verifier/__init__.py +++ b/insights/client/apps/ansible/playbook_verifier/__init__.py @@ -1,37 +1,143 @@ import os +import copy +import yaml +import base64 +import requests +import tempfile +import pkgutil +import insights.client.apps.ansible +from logging import getLogger +from distutils.version import LooseVersion +from insights.client.utilities import get_version_info +from insights.client.apps.ansible.playbook_verifier.contrib import gnupg +from insights.client.constants import InsightsConstants as constants -__all__ = ("verify", "PlaybookValidationError") +__all__ = ("verify", "PlaybookVerificationError") +SIGKEY = 'insights_signature' +PUBLIC_KEY_FOLDER = pkgutil.get_data(insights.client.apps.ansible.__name__, 'playbook_verifier/public.gpg') # Update this when we have the key generated +VERSIONING_URL = 'https://cloud.redhat.com/api/v1/static/egg_version' +EXCLUDABLE_VARIABLES = ['hosts', 'vars'] -class PlaybookValidationError(Exception): +logger = getLogger(__name__) + + +class PlaybookVerificationError(Exception): """ - Exception raised when playbook validation fails + Exception raised when playbook verification fails Attributes: playbook -- stringified playbook yaml from stdin - message -- explanation of why validation failed + message -- explanation of why verification failed """ def __init__(self, message="PLAYBOOK VALIDATION FAILED"): self.message = message - super().__init__(self.message) + super(PlaybookVerificationError, self).__init__(self.message) def __str__(self): return self.message -def verify(unverified_playbook): +def eggVersioningCheck(checkVersion): + currentVersion = requests.get(VERSIONING_URL) + print('currentVersion: ', currentVersion) + currentVersion = currentVersion.text + runningVersion = get_version_info()['core_version'] + + if checkVersion: + if LooseVersion(currentVersion.strip()) < LooseVersion(runningVersion): + raise PlaybookVerificationError(message="EGG VERSION ERROR: Current running egg is not the most recent version") + + return currentVersion + + +def getPublicKey(gpg): + if not PUBLIC_KEY_FOLDER: + raise PlaybookVerificationError(message="PUBLIC KEY IMPORT ERROR: Public key file not found") + + publicKey = PUBLIC_KEY_FOLDER + importResults = gpg.import_keys(publicKey) + if (importResults.count < 1): + raise PlaybookVerificationError(message="PUBLIC KEY NOT IMPORTED: Public key import failed") + + return importResults + + +def excludeDynamicElements(snippet): + exclusions = snippet['vars']['insights_signature_exclude'].split(',') + + for element in exclusions: + element = element.split('/') + + # remove empty strings + element = [string for string in element if string != ''] + + if (len(element) == 1 and element[0] in EXCLUDABLE_VARIABLES): + del snippet[element[0]] + elif (len(element) == 2 and element[0] in EXCLUDABLE_VARIABLES): + try: + del snippet[element[0]][element[1]] + except: + raise PlaybookVerificationError(message='INVALID FIELD: the variable {0} defined in insights_signature_exclude does not exist.'.format(element)) + else: + raise PlaybookVerificationError(message='INVALID EXCLUSION: the variable {0} is not a valid exclusion.'.format(element)) + + return snippet + + +def executeVerification(snippet, encodedSignature): + gpg = gnupg.GPG(gnupghome=constants.insights_core_lib_dir) + serializedSnippet = bytes(yaml.dump(snippet, default_flow_style=False).encode("UTF-8")) + + decodedSignature = base64.b64decode(encodedSignature) + + # load public key + getPublicKey(gpg) + + fd, fn = tempfile.mkstemp() + os.write(fd, decodedSignature) + os.close(fd) + + result = gpg.verify_data(fn, serializedSnippet) + os.unlink(fn) + + return result + + +def verifyPlaybookSnippet(snippet): + if ('vars' not in snippet.keys()): + raise PlaybookVerificationError(message='VARS FIELD NOT FOUND: Verification failed') + elif (SIGKEY not in snippet['vars']): + raise PlaybookVerificationError(message='SIGNATURE NOT FOUND: Verification failed') + + encodedSignature = snippet['vars'][SIGKEY] + snippetCopy = copy.deepcopy(snippet) + + snippetCopy = excludeDynamicElements(snippetCopy) + + return executeVerification(snippetCopy, encodedSignature) + + +def verify(playbook, checkVersion=True, skipVerify=False): """ Verify the signed playbook. - Input: stringified "broken" unverified playbook - Output: stringified "verified" playbook - Error: exception + Input: unverified playbook (dictionary format) + Output: "verified" playbook (dictionary format) + Error: Playbook Verification failure / Playbook Signature not found. """ - # Skeleton implementation ... "bless" the incoming playbook - ERROR = os.getenv('ANSIBLE_PLAYBOOK_VERIFIER_THROW_ERROR') - if ERROR: - raise PlaybookValidationError() + logger.info('Playbook Verification has started') + + # Egg Version Check + eggVersioningCheck(checkVersion) + + if not skipVerify: + for snippet in playbook: + verified = verifyPlaybookSnippet(snippet) + + if not verified: + raise PlaybookVerificationError(message="SIGNATURE NOT VALID: Template [name: {0}] has invalid signature".format(snippet['name'])) - verified_playbook = unverified_playbook - return verified_playbook + logger.info('All templates successfully validated') + return playbook diff --git a/insights/client/apps/ansible/playbook_verifier/__main__.py b/insights/client/apps/ansible/playbook_verifier/__main__.py index 83ad27223..399246b29 100644 --- a/insights/client/apps/ansible/playbook_verifier/__main__.py +++ b/insights/client/apps/ansible/playbook_verifier/__main__.py @@ -1,5 +1,5 @@ import sys - +from insights.client.apps.ansible.playbook_verifier.contrib import oyaml as yaml from insights.client.apps.ansible.playbook_verifier import verify @@ -14,12 +14,13 @@ def read_playbook(): return unverified_playbook -unverified_playbook = read_playbook() +playbook = read_playbook() +playbook_yaml = yaml.load(playbook) try: - verified_playbook = verify(unverified_playbook) + verified_playbook = verify(playbook_yaml, checkVersion=False) except Exception as e: sys.stderr.write(e.message) sys.exit(1) -print(verified_playbook) +print(playbook) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/__init__.py b/insights/client/apps/ansible/playbook_verifier/contrib/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/gnupg.py b/insights/client/apps/ansible/playbook_verifier/contrib/gnupg.py new file mode 100644 index 000000000..671b46ddc --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/gnupg.py @@ -0,0 +1,1646 @@ +""" A wrapper for the 'gpg' command:: + +Portions of this module are derived from A.M. Kuchling's well-designed +GPG.py, using Richard Jones' updated version 1.3, which can be found +in the pycrypto CVS repository on Sourceforge: + +http://pycrypto.cvs.sourceforge.net/viewvc/pycrypto/gpg/GPG.py + +This module is *not* forward-compatible with amk's; some of the +old interface has changed. For instance, since I've added decrypt +functionality, I elected to initialize with a 'gnupghome' argument +instead of 'keyring', so that gpg can find both the public and secret +keyrings. I've also altered some of the returned objects in order for +the caller to not have to know as much about the internals of the +result classes. + +While the rest of ISconf is released under the GPL, I am releasing +this single file under the same terms that A.M. Kuchling used for +pycrypto. + +Steve Traugott, stevegt@terraluna.org +Thu Jun 23 21:27:20 PDT 2005 + +This version of the module has been modified from Steve Traugott's version +(see http://trac.t7a.org/isconf/browser/trunk/lib/python/isconf/GPG.py) by +Vinay Sajip to make use of the subprocess module (Steve's version uses os.fork() +and so does not work on Windows). Renamed to gnupg.py to avoid confusion with +the previous versions. + +Modifications Copyright (C) 2008-2019 Vinay Sajip. All rights reserved. + +A unittest harness (test_gnupg.py) has also been added. +""" + +__version__ = "0.4.6" +__author__ = "Vinay Sajip" +__date__ = "$17-Apr-2020 09:35:35$" + +try: + from io import StringIO +except ImportError: # pragma: no cover + from cStringIO import StringIO + +import codecs +import locale +import logging +import os +import re +import socket +from subprocess import Popen +from subprocess import PIPE +import sys +import threading + +STARTUPINFO = None +if os.name == 'nt': # pragma: no cover + try: + from subprocess import STARTUPINFO, STARTF_USESHOWWINDOW, SW_HIDE + except ImportError: + STARTUPINFO = None + +try: + import logging.NullHandler as NullHandler +except ImportError: + class NullHandler(logging.Handler): + def handle(self, record): + pass +try: + unicode + _py3k = False + string_types = basestring + text_type = unicode +except NameError: + _py3k = True + string_types = str + text_type = str + +logger = logging.getLogger(__name__) +if not logger.handlers: + logger.addHandler(NullHandler()) + +# We use the test below because it works for Jython as well as CPython +if os.path.__name__ == 'ntpath': # pragma: no cover + # On Windows, we don't need shell quoting, other than worrying about + # paths with spaces in them. + def shell_quote(s): + return '"%s"' % s +else: + # Section copied from sarge + + # This regex determines which shell input needs quoting + # because it may be unsafe + UNSAFE = re.compile(r'[^\w%+,./:=@-]') + + def shell_quote(s): + """ + Quote text so that it is safe for Posix command shells. + + For example, "*.py" would be converted to "'*.py'". If the text is + considered safe it is returned unquoted. + + :param s: The value to quote + :type s: str (or unicode on 2.x) + :return: A safe version of the input, from the point of view of Posix + command shells + :rtype: The passed-in type + """ + if not isinstance(s, string_types): # pragma: no cover + raise TypeError('Expected string type, got %s' % type(s)) + if not s: + result = "''" + elif not UNSAFE.search(s): + result = s + else: + result = "'%s'" % s.replace("'", r"'\''") + return result + + # end of sarge code + +# Now that we use shell=False, we shouldn't need to quote arguments. +# Use no_quote instead of shell_quote to remind us of where quoting +# was needed. However, note that we still need, on 2.x, to encode any +# Unicode argument with the file system encoding - see Issue #41 and +# Python issue #1759845 ("subprocess.call fails with unicode strings in +# command line"). + +# Allows the encoding used to be overridden in special cases by setting +# this module attribute appropriately. +fsencoding = sys.getfilesystemencoding() + +def no_quote(s): + if not _py3k and isinstance(s, text_type): + s = s.encode(fsencoding) + return s + +def _copy_data(instream, outstream): + # Copy one stream to another + sent = 0 + if hasattr(sys.stdin, 'encoding'): + enc = sys.stdin.encoding + else: # pragma: no cover + enc = 'ascii' + while True: + # See issue #39: read can fail when e.g. a text stream is provided + # for what is actually a binary file + try: + data = instream.read(1024) + except UnicodeError: + logger.warning('Exception occurred while reading', exc_info=1) + break + if not data: + break + sent += len(data) + # logger.debug("sending chunk (%d): %r", sent, data[:256]) + try: + outstream.write(data) + except UnicodeError: # pragma: no cover + outstream.write(data.encode(enc)) + except: + # Can sometimes get 'broken pipe' errors even when the data has all + # been sent + logger.exception('Error sending data') + break + try: + outstream.close() + except IOError: # pragma: no cover + logger.warning('Exception occurred while closing: ignored', exc_info=1) + logger.debug("closed output, %d bytes sent", sent) + +def _threaded_copy_data(instream, outstream): + wr = threading.Thread(target=_copy_data, args=(instream, outstream)) + wr.setDaemon(True) + logger.debug('data copier: %r, %r, %r', wr, instream, outstream) + wr.start() + return wr + +def _write_passphrase(stream, passphrase, encoding): + passphrase = '%s\n' % passphrase + passphrase = passphrase.encode(encoding) + stream.write(passphrase) + logger.debug('Wrote passphrase') + +def _is_sequence(instance): + return isinstance(instance, (list, tuple, set, frozenset)) + +def _make_memory_stream(s): + try: + from io import BytesIO + rv = BytesIO(s) + except ImportError: # pragma: no cover + rv = StringIO(s) + return rv + +def _make_binary_stream(s, encoding): + if _py3k: + if isinstance(s, str): + s = s.encode(encoding) + else: + if type(s) is not str: + s = s.encode(encoding) + return _make_memory_stream(s) + +class Verify(object): + "Handle status messages for --verify" + + TRUST_UNDEFINED = 0 + TRUST_NEVER = 1 + TRUST_MARGINAL = 2 + TRUST_FULLY = 3 + TRUST_ULTIMATE = 4 + + TRUST_LEVELS = { + "TRUST_UNDEFINED" : TRUST_UNDEFINED, + "TRUST_NEVER" : TRUST_NEVER, + "TRUST_MARGINAL" : TRUST_MARGINAL, + "TRUST_FULLY" : TRUST_FULLY, + "TRUST_ULTIMATE" : TRUST_ULTIMATE, + } + + # for now, just the most common error codes. This can be expanded as and + # when reports come in of other errors. + GPG_SYSTEM_ERROR_CODES = { + 1: 'permission denied', + 35: 'file exists', + 81: 'file not found', + 97: 'not a directory', + } + + GPG_ERROR_CODES = { + 11: 'incorrect passphrase', + } + + def __init__(self, gpg): + self.gpg = gpg + self.valid = False + self.fingerprint = self.creation_date = self.timestamp = None + self.signature_id = self.key_id = None + self.username = None + self.key_id = None + self.key_status = None + self.status = None + self.pubkey_fingerprint = None + self.expire_timestamp = None + self.sig_timestamp = None + self.trust_text = None + self.trust_level = None + self.sig_info = {} + + def __nonzero__(self): + return self.valid + + __bool__ = __nonzero__ + + def handle_status(self, key, value): + + def update_sig_info(**kwargs): + sig_id = self.signature_id + if sig_id: + info = self.sig_info[sig_id] + info.update(kwargs) + + if key in self.TRUST_LEVELS: + self.trust_text = key + self.trust_level = self.TRUST_LEVELS[key] + update_sig_info(trust_level=self.trust_level, + trust_text=self.trust_text) + elif key in ("WARNING", "ERROR"): + logger.warning('potential problem: %s: %s', key, value) + elif key == "BADSIG": # pragma: no cover + self.valid = False + self.status = 'signature bad' + self.key_id, self.username = value.split(None, 1) + update_sig_info(keyid=self.key_id, username=self.username, + status=self.status) + elif key == "ERRSIG": # pragma: no cover + self.valid = False + parts = value.split() + (self.key_id, + algo, hash_algo, + cls, + self.timestamp) = parts[:5] + # Since GnuPG 2.2.7, a fingerprint is tacked on + if len(parts) >= 7: + self.fingerprint = parts[6] + self.status = 'signature error' + update_sig_info(keyid=self.key_id, timestamp=self.timestamp, + fingerprint=self.fingerprint, status=self.status) + elif key == "EXPSIG": # pragma: no cover + self.valid = False + self.status = 'signature expired' + self.key_id, self.username = value.split(None, 1) + update_sig_info(keyid=self.key_id, username=self.username, + status=self.status) + elif key == "GOODSIG": + self.valid = True + self.status = 'signature good' + self.key_id, self.username = value.split(None, 1) + update_sig_info(keyid=self.key_id, username=self.username, + status=self.status) + elif key == "VALIDSIG": + fingerprint, creation_date, sig_ts, expire_ts = value.split()[:4] + (self.fingerprint, + self.creation_date, + self.sig_timestamp, + self.expire_timestamp) = (fingerprint, creation_date, sig_ts, + expire_ts) + # may be different if signature is made with a subkey + self.pubkey_fingerprint = value.split()[-1] + self.status = 'signature valid' + update_sig_info(fingerprint=fingerprint, creation_date=creation_date, + timestamp=sig_ts, expiry=expire_ts, + pubkey_fingerprint=self.pubkey_fingerprint, + status=self.status) + elif key == "SIG_ID": + sig_id, creation_date, timestamp = value.split() + self.sig_info[sig_id] = {'creation_date': creation_date, + 'timestamp': timestamp} + (self.signature_id, + self.creation_date, self.timestamp) = (sig_id, creation_date, + timestamp) + elif key == "DECRYPTION_FAILED": # pragma: no cover + self.valid = False + self.key_id = value + self.status = 'decryption failed' + elif key == "NO_PUBKEY": # pragma: no cover + self.valid = False + self.key_id = value + self.status = 'no public key' + elif key in ("EXPKEYSIG", "REVKEYSIG"): # pragma: no cover + # signed with expired or revoked key + self.valid = False + self.key_id = value.split()[0] + if key == "EXPKEYSIG": + self.key_status = 'signing key has expired' + else: + self.key_status = 'signing key was revoked' + self.status = self.key_status + update_sig_info(status=self.status, keyid=self.key_id) + elif key in ("UNEXPECTED", "FAILURE"): # pragma: no cover + self.valid = False + self.key_id = value + if key == "UNEXPECTED": + self.status = 'unexpected data' + else: + # N.B. there might be other reasons. For example, if an output + # file can't be created - /dev/null/foo will lead to a + # "not a directory" error, but which is not sent as a status + # message with the [GNUPG:] prefix. Similarly if you try to + # write to "/etc/foo" as a non-root user, a "permission denied" + # error will be sent as a non-status message. + message = 'error - %s' % value + operation, code = value.rsplit(' ', 1) + if code.isdigit(): + code = int(code) & 0xFFFFFF # lose the error source + if self.gpg.error_map and code in self.gpg.error_map: + message = '%s: %s' % (operation, self.gpg.error_map[code]) + else: + system_error = bool(code & 0x8000) + code = code & 0x7FFF + if system_error: + mapping = self.GPG_SYSTEM_ERROR_CODES + else: + mapping = self.GPG_ERROR_CODES + if code in mapping: + message = '%s: %s' % (operation, mapping[code]) + if not self.status: + self.status = message + elif key in ("DECRYPTION_INFO", "PLAINTEXT", "PLAINTEXT_LENGTH", + "NO_SECKEY", "BEGIN_SIGNING"): + pass + else: # pragma: no cover + logger.debug('message ignored: %s, %s', key, value) + +class ImportResult(object): + "Handle status messages for --import" + + counts = '''count no_user_id imported imported_rsa unchanged + n_uids n_subk n_sigs n_revoc sec_read sec_imported + sec_dups not_imported'''.split() + def __init__(self, gpg): + self.gpg = gpg + self.results = [] + self.fingerprints = [] + for result in self.counts: + setattr(self, result, 0) + + def __nonzero__(self): + if self.not_imported: return False + if not self.fingerprints: return False + return True + + __bool__ = __nonzero__ + + ok_reason = { + '0': 'Not actually changed', + '1': 'Entirely new key', + '2': 'New user IDs', + '4': 'New signatures', + '8': 'New subkeys', + '16': 'Contains private key', + } + + problem_reason = { + '0': 'No specific reason given', + '1': 'Invalid Certificate', + '2': 'Issuer Certificate missing', + '3': 'Certificate Chain too long', + '4': 'Error storing certificate', + } + + def handle_status(self, key, value): + if key in ("WARNING", "ERROR"): + logger.warning('potential problem: %s: %s', key, value) + elif key in ("IMPORTED", "KEY_CONSIDERED"): + # this duplicates info we already see in import_ok & import_problem + pass + elif key == "NODATA": # pragma: no cover + self.results.append({'fingerprint': None, + 'problem': '0', 'text': 'No valid data found'}) + elif key == "IMPORT_OK": + reason, fingerprint = value.split() + reasons = [] + for code, text in list(self.ok_reason.items()): + if int(reason) | int(code) == int(reason): + reasons.append(text) + reasontext = '\n'.join(reasons) + "\n" + self.results.append({'fingerprint': fingerprint, + 'ok': reason, 'text': reasontext}) + self.fingerprints.append(fingerprint) + elif key == "IMPORT_PROBLEM": # pragma: no cover + try: + reason, fingerprint = value.split() + except: + reason = value + fingerprint = '' + self.results.append({'fingerprint': fingerprint, + 'problem': reason, 'text': self.problem_reason[reason]}) + elif key == "IMPORT_RES": + import_res = value.split() + for i, count in enumerate(self.counts): + setattr(self, count, int(import_res[i])) + elif key == "KEYEXPIRED": # pragma: no cover + self.results.append({'fingerprint': None, + 'problem': '0', 'text': 'Key expired'}) + elif key == "SIGEXPIRED": # pragma: no cover + self.results.append({'fingerprint': None, + 'problem': '0', 'text': 'Signature expired'}) + elif key == "FAILURE": # pragma: no cover + self.results.append({'fingerprint': None, + 'problem': '0', 'text': 'Other failure'}) + else: # pragma: no cover + logger.debug('message ignored: %s, %s', key, value) + + def summary(self): + result = [] + result.append('%d imported' % self.imported) + if self.not_imported: # pragma: no cover + result.append('%d not imported' % self.not_imported) + return ', '.join(result) + +ESCAPE_PATTERN = re.compile(r'\\x([0-9a-f][0-9a-f])', re.I) +BASIC_ESCAPES = { + r'\n': '\n', + r'\r': '\r', + r'\f': '\f', + r'\v': '\v', + r'\b': '\b', + r'\0': '\0', +} + +class SendResult(object): + def __init__(self, gpg): + self.gpg = gpg + + def handle_status(self, key, value): + logger.debug('SendResult: %s: %s', key, value) + +def _set_fields(target, fieldnames, args): + for i, var in enumerate(fieldnames): + if i < len(args): + target[var] = args[i] + else: + target[var] = 'unavailable' + +class SearchKeys(list): + ''' Handle status messages for --search-keys. + + Handle pub and uid (relating the latter to the former). + + Don't care about the rest + ''' + + UID_INDEX = 1 + FIELDS = 'type keyid algo length date expires'.split() + + def __init__(self, gpg): + self.gpg = gpg + self.curkey = None + self.fingerprints = [] + self.uids = [] + + def get_fields(self, args): + result = {} + _set_fields(result, self.FIELDS, args) + result['uids'] = [] + result['sigs'] = [] + return result + + def pub(self, args): + self.curkey = curkey = self.get_fields(args) + self.append(curkey) + + def uid(self, args): + uid = args[self.UID_INDEX] + uid = ESCAPE_PATTERN.sub(lambda m: chr(int(m.group(1), 16)), uid) + for k, v in BASIC_ESCAPES.items(): + uid = uid.replace(k, v) + self.curkey['uids'].append(uid) + self.uids.append(uid) + + def handle_status(self, key, value): # pragma: no cover + pass + +class ListKeys(SearchKeys): + ''' Handle status messages for --list-keys, --list-sigs. + + Handle pub and uid (relating the latter to the former). + + Don't care about (info from src/DETAILS): + + crt = X.509 certificate + crs = X.509 certificate and private key available + uat = user attribute (same as user id except for field 10). + sig = signature + rev = revocation signature + pkd = public key data (special field format, see below) + grp = reserved for gpgsm + rvk = revocation key + ''' + + UID_INDEX = 9 + FIELDS = 'type trust length algo keyid date expires dummy ownertrust uid sig cap issuer flag token hash curve compliance updated origin'.split() + + def __init__(self, gpg): + super(ListKeys, self).__init__(gpg) + self.in_subkey = False + self.key_map = {} + + def key(self, args): + self.curkey = curkey = self.get_fields(args) + if curkey['uid']: + curkey['uids'].append(curkey['uid']) + del curkey['uid'] + curkey['subkeys'] = [] + self.append(curkey) + self.in_subkey = False + + pub = sec = key + + def fpr(self, args): + fp = args[9] + if fp in self.key_map and self.gpg.check_fingerprint_collisions: # pragma: no cover + raise ValueError('Unexpected fingerprint collision: %s' % fp) + if not self.in_subkey: + self.curkey['fingerprint'] = fp + self.fingerprints.append(fp) + self.key_map[fp] = self.curkey + else: + self.curkey['subkeys'][-1].append(fp) + self.key_map[fp] = self.curkey + + def _collect_subkey_info(self, curkey, args): + info_map = curkey.setdefault('subkey_info', {}) + info = {} + _set_fields(info, self.FIELDS, args) + info_map[args[4]] = info + + def sub(self, args): + # See issue #81. We create a dict with more information about + # subkeys, but for backward compatibility reason, have to add it in + # as a separate entry 'subkey_info' + subkey = [args[4], args[11]] # keyid, type + self.curkey['subkeys'].append(subkey) + self._collect_subkey_info(self.curkey, args) + self.in_subkey = True + + def ssb(self, args): + subkey = [args[4], None] # keyid, type + self.curkey['subkeys'].append(subkey) + self._collect_subkey_info(self.curkey, args) + self.in_subkey = True + + def sig(self, args): + # keyid, uid, sigclass + self.curkey['sigs'].append((args[4], args[9], args[10])) + +class ScanKeys(ListKeys): + ''' Handle status messages for --with-fingerprint.''' + + def sub(self, args): + # --with-fingerprint --with-colons somehow outputs fewer colons, + # use the last value args[-1] instead of args[11] + subkey = [args[4], args[-1]] + self.curkey['subkeys'].append(subkey) + self._collect_subkey_info(self.curkey, args) + self.in_subkey = True + +class TextHandler(object): + def _as_text(self): + return self.data.decode(self.gpg.encoding, self.gpg.decode_errors) + + if _py3k: + __str__ = _as_text + else: + __unicode__ = _as_text + + def __str__(self): + return self.data + + +class Crypt(Verify, TextHandler): + "Handle status messages for --encrypt and --decrypt" + def __init__(self, gpg): + Verify.__init__(self, gpg) + self.data = '' + self.ok = False + self.status = '' + self.key_id = None + + def __nonzero__(self): + if self.ok: return True + return False + + __bool__ = __nonzero__ + + def handle_status(self, key, value): + if key in ("WARNING", "ERROR"): + logger.warning('potential problem: %s: %s', key, value) + elif key == "NODATA": + self.status = "no data was provided" + elif key in ("NEED_PASSPHRASE", "BAD_PASSPHRASE", "GOOD_PASSPHRASE", + "MISSING_PASSPHRASE", "DECRYPTION_FAILED", + "KEY_NOT_CREATED", "NEED_PASSPHRASE_PIN"): + self.status = key.replace("_", " ").lower() + elif key == "NEED_PASSPHRASE_SYM": + self.status = 'need symmetric passphrase' + elif key == "BEGIN_DECRYPTION": + self.status = 'decryption incomplete' + elif key == "BEGIN_ENCRYPTION": + self.status = 'encryption incomplete' + elif key == "DECRYPTION_OKAY": + self.status = 'decryption ok' + self.ok = True + elif key == "END_ENCRYPTION": + self.status = 'encryption ok' + self.ok = True + elif key == "INV_RECP": # pragma: no cover + self.status = 'invalid recipient' + elif key == "KEYEXPIRED": # pragma: no cover + self.status = 'key expired' + elif key == "SIG_CREATED": # pragma: no cover + self.status = 'sig created' + elif key == "SIGEXPIRED": # pragma: no cover + self.status = 'sig expired' + elif key == "ENC_TO": # pragma: no cover + # ENC_TO + self.key_id = value.split(' ', 1)[0] + elif key in ("USERID_HINT", "GOODMDC", + "END_DECRYPTION", "CARDCTRL", "BADMDC", + "SC_OP_FAILURE", "SC_OP_SUCCESS", + "PINENTRY_LAUNCHED", "KEY_CONSIDERED"): + pass + else: + Verify.handle_status(self, key, value) + +class GenKey(object): + "Handle status messages for --gen-key" + def __init__(self, gpg): + self.gpg = gpg + self.type = None + self.fingerprint = None + + def __nonzero__(self): + if self.fingerprint: return True + return False + + __bool__ = __nonzero__ + + def __str__(self): + return self.fingerprint or '' + + def handle_status(self, key, value): + if key in ("WARNING", "ERROR"): # pragma: no cover + logger.warning('potential problem: %s: %s', key, value) + elif key == "KEY_CREATED": + (self.type,self.fingerprint) = value.split() + elif key in ("PROGRESS", "GOOD_PASSPHRASE", "KEY_NOT_CREATED"): + pass + else: # pragma: no cover + logger.debug('message ignored: %s, %s', key, value) + +class ExportResult(GenKey): + """Handle status messages for --export[-secret-key]. + + For now, just use an existing class to base it on - if needed, we + can override handle_status for more specific message handling. + """ + def handle_status(self, key, value): + if key in ("EXPORTED", "EXPORT_RES"): + pass + else: + super(ExportResult, self).handle_status(key, value) + +class DeleteResult(object): + "Handle status messages for --delete-key and --delete-secret-key" + def __init__(self, gpg): + self.gpg = gpg + self.status = 'ok' + + def __str__(self): + return self.status + + problem_reason = { + '1': 'No such key', + '2': 'Must delete secret key first', + '3': 'Ambiguous specification', + } + + def handle_status(self, key, value): + if key == "DELETE_PROBLEM": # pragma: no cover + self.status = self.problem_reason.get(value, + "Unknown error: %r" % value) + else: # pragma: no cover + logger.debug('message ignored: %s, %s', key, value) + + def __nonzero__(self): + return self.status == 'ok' + + __bool__ = __nonzero__ + + +class TrustResult(DeleteResult): + pass + + +class Sign(TextHandler): + "Handle status messages for --sign" + def __init__(self, gpg): + self.gpg = gpg + self.type = None + self.hash_algo = None + self.fingerprint = None + self.status = None + self.key_id = None + self.username = None + + def __nonzero__(self): + return self.fingerprint is not None + + __bool__ = __nonzero__ + + def handle_status(self, key, value): + if key in ("WARNING", "ERROR", "FAILURE"): # pragma: no cover + logger.warning('potential problem: %s: %s', key, value) + elif key in ("KEYEXPIRED", "SIGEXPIRED"): # pragma: no cover + self.status = 'key expired' + elif key == "KEYREVOKED": # pragma: no cover + self.status = 'key revoked' + elif key == "SIG_CREATED": + (self.type, + algo, self.hash_algo, cls, self.timestamp, self.fingerprint + ) = value.split() + self.status = 'signature created' + elif key == "USERID_HINT": # pragma: no cover + self.key_id, self.username = value.split(' ', 1) + elif key == "BAD_PASSPHRASE": + self.status = 'bad passphrase' + elif key in ("NEED_PASSPHRASE", "GOOD_PASSPHRASE", "BEGIN_SIGNING"): + pass + else: # pragma: no cover + logger.debug('message ignored: %s, %s', key, value) + +VERSION_RE = re.compile(r'gpg \(GnuPG(?:/MacGPG2)?\) (\d+(\.\d+)*)'.encode('ascii'), re.I) +HEX_DIGITS_RE = re.compile(r'[0-9a-f]+$', re.I) + +class GPG(object): + + error_map = None + + decode_errors = 'strict' + + result_map = { + 'crypt': Crypt, + 'delete': DeleteResult, + 'generate': GenKey, + 'import': ImportResult, + 'send': SendResult, + 'list': ListKeys, + 'scan': ScanKeys, + 'search': SearchKeys, + 'sign': Sign, + 'trust': TrustResult, + 'verify': Verify, + 'export': ExportResult, + } + + "Encapsulate access to the gpg executable" + def __init__(self, gpgbinary='gpg', gnupghome=None, verbose=False, + use_agent=False, keyring=None, options=None, + secret_keyring=None): + """Initialize a GPG process wrapper. Options are: + + gpgbinary -- full pathname for GPG binary. + + gnupghome -- full pathname to where we can find the public and + private keyrings. Default is whatever gpg defaults to. + keyring -- name of alternative keyring file to use, or list of such + keyrings. If specified, the default keyring is not used. + options =-- a list of additional options to pass to the GPG binary. + secret_keyring -- name of alternative secret keyring file to use, or + list of such keyrings. + """ + self.gpgbinary = gpgbinary + self.gnupghome = gnupghome + # issue 112: fail if the specified value isn't a directory + if gnupghome and not os.path.isdir(gnupghome): + raise ValueError('gnupghome should be a directory (it isn\'t): %s' % gnupghome) + if keyring: + # Allow passing a string or another iterable. Make it uniformly + # a list of keyring filenames + if isinstance(keyring, string_types): + keyring = [keyring] + self.keyring = keyring + if secret_keyring: + # Allow passing a string or another iterable. Make it uniformly + # a list of keyring filenames + if isinstance(secret_keyring, string_types): + secret_keyring = [secret_keyring] + self.secret_keyring = secret_keyring + self.verbose = verbose + self.use_agent = use_agent + if isinstance(options, str): # pragma: no cover + options = [options] + self.options = options + self.on_data = None # or a callable - will be called with data chunks + # Changed in 0.3.7 to use Latin-1 encoding rather than + # locale.getpreferredencoding falling back to sys.stdin.encoding + # falling back to utf-8, because gpg itself uses latin-1 as the default + # encoding. + self.encoding = 'latin-1' + if gnupghome and not os.path.isdir(self.gnupghome): + os.makedirs(self.gnupghome,0x1C0) + try: + p = self._open_subprocess(["--version"]) + except OSError: + msg = 'Unable to run gpg (%s) - it may not be available.' % self.gpgbinary + logger.exception(msg) + raise OSError(msg) + result = self.result_map['verify'](self) # any result will do for this + self._collect_output(p, result, stdin=p.stdin) + if p.returncode != 0: # pragma: no cover + raise ValueError("Error invoking gpg: %s: %s" % (p.returncode, + result.stderr)) + m = VERSION_RE.match(result.data) + if not m: # pragma: no cover + self.version = None + else: + dot = '.'.encode('ascii') + self.version = tuple([int(s) for s in m.groups()[0].split(dot)]) + + # See issue #97. It seems gpg allow duplicate keys in keyrings, so we + # can't be too strict. + self.check_fingerprint_collisions = False + + def make_args(self, args, passphrase): + """ + Make a list of command line elements for GPG. The value of ``args`` + will be appended. The ``passphrase`` argument needs to be True if + a passphrase will be sent to GPG, else False. + """ + cmd = [self.gpgbinary, '--status-fd', '2', '--no-tty', '--no-verbose'] + if 'DEBUG_IPC' in os.environ: + cmd.extend(['--debug', 'ipc']) + if passphrase and hasattr(self, 'version'): + if self.version >= (2, 1): + cmd[1:1] = ['--pinentry-mode', 'loopback'] + cmd.extend(['--fixed-list-mode', '--batch', '--with-colons']) + if self.gnupghome: + cmd.extend(['--homedir', no_quote(self.gnupghome)]) + if self.keyring: + cmd.append('--no-default-keyring') + for fn in self.keyring: + cmd.extend(['--keyring', no_quote(fn)]) + if self.secret_keyring: + for fn in self.secret_keyring: + cmd.extend(['--secret-keyring', no_quote(fn)]) + if passphrase: + cmd.extend(['--passphrase-fd', '0']) + if self.use_agent: # pragma: no cover + cmd.append('--use-agent') + if self.options: + cmd.extend(self.options) + cmd.extend(args) + return cmd + + def _open_subprocess(self, args, passphrase=False): + # Internal method: open a pipe to a GPG subprocess and return + # the file objects for communicating with it. + + # def debug_print(cmd): + # result = [] + # for c in cmd: + # if ' ' not in c: + # result.append(c) + # else: + # if '"' not in c: + # result.append('"%s"' % c) + # elif "'" not in c: + # result.append("'%s'" % c) + # else: + # result.append(c) # give up + # return ' '.join(cmd) + from subprocess import list2cmdline as debug_print + + cmd = self.make_args(args, passphrase) + if self.verbose: # pragma: no cover + print(debug_print(cmd)) + if not STARTUPINFO: + si = None + else: # pragma: no cover + si = STARTUPINFO() + si.dwFlags = STARTF_USESHOWWINDOW + si.wShowWindow = SW_HIDE + result = Popen(cmd, shell=False, stdin=PIPE, stdout=PIPE, stderr=PIPE, + startupinfo=si) + logger.debug("%s: %s", result.pid, debug_print(cmd)) + return result + + def _read_response(self, stream, result): + # Internal method: reads all the stderr output from GPG, taking notice + # only of lines that begin with the magic [GNUPG:] prefix. + # + # Calls methods on the response object for each valid token found, + # with the arg being the remainder of the status line. + lines = [] + while True: + line = stream.readline() + if len(line) == 0: + break + lines.append(line) + line = line.rstrip() + if self.verbose: # pragma: no cover + print(line) + logger.debug("%s", line) + if line[0:9] == '[GNUPG:] ': + # Chop off the prefix + line = line[9:] + L = line.split(None, 1) + keyword = L[0] + if len(L) > 1: + value = L[1] + else: + value = "" + result.handle_status(keyword, value) + result.stderr = ''.join(lines) + + def _read_data(self, stream, result, on_data=None): + # Read the contents of the file from GPG's stdout + chunks = [] + while True: + data = stream.read(1024) + if len(data) == 0: + if on_data: + on_data(data) + break + logger.debug("chunk: %r" % data[:256]) + append = True + if on_data: + append = on_data(data) != False + if append: + chunks.append(data) + if _py3k: + # Join using b'' or '', as appropriate + result.data = type(data)().join(chunks) + else: + result.data = ''.join(chunks) + + def _collect_output(self, process, result, writer=None, stdin=None): + """ + Drain the subprocesses output streams, writing the collected output + to the result. If a writer thread (writing to the subprocess) is given, + make sure it's joined before returning. If a stdin stream is given, + close it before returning. + """ + stderr = codecs.getreader(self.encoding)(process.stderr) + rr = threading.Thread(target=self._read_response, args=(stderr, result)) + rr.setDaemon(True) + logger.debug('stderr reader: %r', rr) + rr.start() + + stdout = process.stdout + dr = threading.Thread(target=self._read_data, args=(stdout, result, + self.on_data)) + dr.setDaemon(True) + logger.debug('stdout reader: %r', dr) + dr.start() + + dr.join() + rr.join() + if writer is not None: + writer.join() + process.wait() + rc = process.returncode + if rc != 0: + logger.warning('gpg returned a non-zero error code: %d', rc) + if stdin is not None: + try: + stdin.close() + except IOError: # pragma: no cover + pass + stderr.close() + stdout.close() + + def _handle_io(self, args, fileobj, result, passphrase=None, binary=False): + "Handle a call to GPG - pass input data, collect output data" + # Handle a basic data call - pass data to GPG, handle the output + # including status information. Garbage In, Garbage Out :) + p = self._open_subprocess(args, passphrase is not None) + if not binary: # pragma: no cover + stdin = codecs.getwriter(self.encoding)(p.stdin) + else: + stdin = p.stdin + if passphrase: + _write_passphrase(stdin, passphrase, self.encoding) + writer = _threaded_copy_data(fileobj, stdin) + self._collect_output(p, result, writer, stdin) + return result + + # + # SIGNATURE METHODS + # + def sign(self, message, **kwargs): + """sign message""" + f = _make_binary_stream(message, self.encoding) + result = self.sign_file(f, **kwargs) + f.close() + return result + + def set_output_without_confirmation(self, args, output): + "If writing to a file which exists, avoid a confirmation message." + if os.path.exists(output): + # We need to avoid an overwrite confirmation message + args.extend(['--yes']) + args.extend(['--output', no_quote(output)]) + + def is_valid_passphrase(self, passphrase): + """ + Confirm that the passphrase doesn't contain newline-type characters - + it is passed in a pipe to gpg, and so not checking could lead to + spoofing attacks by passing arbitrary text after passphrase and newline. + """ + return ('\n' not in passphrase and '\r' not in passphrase and + '\x00' not in passphrase) + + def sign_file(self, file, keyid=None, passphrase=None, clearsign=True, + detach=False, binary=False, output=None, extra_args=None): + """sign file""" + if passphrase and not self.is_valid_passphrase(passphrase): + raise ValueError('Invalid passphrase') + logger.debug("sign_file: %s", file) + if binary: # pragma: no cover + args = ['-s'] + else: + args = ['-sa'] + # You can't specify detach-sign and clearsign together: gpg ignores + # the detach-sign in that case. + if detach: + args.append("--detach-sign") + elif clearsign: + args.append("--clearsign") + if keyid: + args.extend(['--default-key', no_quote(keyid)]) + if output: # write the output to a file with the specified name + self.set_output_without_confirmation(args, output) + + if extra_args: + args.extend(extra_args) + result = self.result_map['sign'](self) + #We could use _handle_io here except for the fact that if the + #passphrase is bad, gpg bails and you can't write the message. + p = self._open_subprocess(args, passphrase is not None) + try: + stdin = p.stdin + if passphrase: + _write_passphrase(stdin, passphrase, self.encoding) + writer = _threaded_copy_data(file, stdin) + except IOError: # pragma: no cover + logging.exception("error writing message") + writer = None + self._collect_output(p, result, writer, stdin) + return result + + def verify(self, data, **kwargs): + """Verify the signature on the contents of the string 'data' + + >>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg') + >>> if not os.path.isdir('keys'): os.mkdir('keys') + >>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome='keys') + >>> input = gpg.gen_key_input(passphrase='foo') + >>> key = gpg.gen_key(input) + >>> assert key + >>> sig = gpg.sign('hello',keyid=key.fingerprint,passphrase='bar') + >>> assert not sig + >>> sig = gpg.sign('hello',keyid=key.fingerprint,passphrase='foo') + >>> assert sig + >>> verify = gpg.verify(sig.data) + >>> assert verify + + """ + f = _make_binary_stream(data, self.encoding) + result = self.verify_file(f, **kwargs) + f.close() + return result + + def verify_file(self, file, data_filename=None, close_file=True, extra_args=None): + "Verify the signature on the contents of the file-like object 'file'" + logger.debug('verify_file: %r, %r', file, data_filename) + result = self.result_map['verify'](self) + args = ['--verify'] + if extra_args: + args.extend(extra_args) + if data_filename is None: + self._handle_io(args, file, result, binary=True) + else: + logger.debug('Handling detached verification') + import tempfile + fd, fn = tempfile.mkstemp(prefix='pygpg') + s = file.read() + if close_file: + file.close() + logger.debug('Wrote to temp file: %r', s) + os.write(fd, s) + os.close(fd) + args.append(no_quote(fn)) + args.append(no_quote(data_filename)) + try: + p = self._open_subprocess(args) + self._collect_output(p, result, stdin=p.stdin) + finally: + os.unlink(fn) + return result + + def verify_data(self, sig_filename, data, extra_args=None): + "Verify the signature in sig_filename against data in memory" + logger.debug('verify_data: %r, %r ...', sig_filename, data[:16]) + result = self.result_map['verify'](self) + args = ['--verify'] + if extra_args: + args.extend(extra_args) + args.extend([no_quote(sig_filename), '-']) + stream = _make_memory_stream(data) + self._handle_io(args, stream, result, binary=True) + return result + + # + # KEY MANAGEMENT + # + + def import_keys(self, key_data, extra_args=None): + """ + Import the key_data into our keyring. + """ + result = self.result_map['import'](self) + logger.debug('import_keys: %r', key_data[:256]) + data = _make_binary_stream(key_data, self.encoding) + args = ['--import'] + if extra_args: + args.extend(extra_args) + self._handle_io(args, data, result, binary=True) + logger.debug('import_keys result: %r', result.__dict__) + data.close() + return result + + def recv_keys(self, keyserver, *keyids): + """Import a key from a keyserver + + >>> import shutil + >>> shutil.rmtree("keys", ignore_errors=True) + >>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg') + >>> if not os.path.isdir('keys'): os.mkdir('keys') + >>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome='keys') + >>> os.chmod('keys', 0x1C0) + >>> result = gpg.recv_keys('pgp.mit.edu', '92905378') + >>> if 'NO_EXTERNAL_TESTS' not in os.environ: assert result + + """ + result = self.result_map['import'](self) + logger.debug('recv_keys: %r', keyids) + data = _make_binary_stream("", self.encoding) + #data = "" + args = ['--keyserver', no_quote(keyserver), '--recv-keys'] + args.extend([no_quote(k) for k in keyids]) + self._handle_io(args, data, result, binary=True) + logger.debug('recv_keys result: %r', result.__dict__) + data.close() + return result + + def send_keys(self, keyserver, *keyids): + """Send a key to a keyserver. + + Note: it's not practical to test this function without sending + arbitrary data to live keyservers. + """ + result = self.result_map['send'](self) + logger.debug('send_keys: %r', keyids) + data = _make_binary_stream('', self.encoding) + #data = "" + args = ['--keyserver', no_quote(keyserver), '--send-keys'] + args.extend([no_quote(k) for k in keyids]) + self._handle_io(args, data, result, binary=True) + logger.debug('send_keys result: %r', result.__dict__) + data.close() + return result + + def delete_keys(self, fingerprints, secret=False, passphrase=None, + expect_passphrase=True): + """ + Delete the indicated keys. + + Since GnuPG 2.1, you can't delete secret keys without providing a + passphrase. However, if you're expecting the passphrase to go to gpg + via pinentry, you should specify expect_passphrase=False. (It's only + checked for GnuPG >= 2.1). + """ + if passphrase and not self.is_valid_passphrase(passphrase): + raise ValueError('Invalid passphrase') + which='key' + if secret: # pragma: no cover + if (self.version >= (2, 1) and passphrase is None and + expect_passphrase): + raise ValueError('For GnuPG >= 2.1, deleting secret keys ' + 'needs a passphrase to be provided') + which='secret-key' + if _is_sequence(fingerprints): # pragma: no cover + fingerprints = [no_quote(s) for s in fingerprints] + else: + fingerprints = [no_quote(fingerprints)] + args = ['--delete-%s' % which] + if secret and self.version >= (2, 1): + args.insert(0, '--yes') + args.extend(fingerprints) + result = self.result_map['delete'](self) + if not secret or self.version < (2, 1): + p = self._open_subprocess(args) + self._collect_output(p, result, stdin=p.stdin) + else: + # Need to send in a passphrase. + f = _make_binary_stream('', self.encoding) + try: + self._handle_io(args, f, result, passphrase=passphrase, + binary=True) + finally: + f.close() + return result + + def export_keys(self, keyids, secret=False, armor=True, minimal=False, + passphrase=None, expect_passphrase=True): + """ + Export the indicated keys. A 'keyid' is anything gpg accepts. + + Since GnuPG 2.1, you can't export secret keys without providing a + passphrase. However, if you're expecting the passphrase to go to gpg + via pinentry, you should specify expect_passphrase=False. (It's only + checked for GnuPG >= 2.1). + """ + if passphrase and not self.is_valid_passphrase(passphrase): + raise ValueError('Invalid passphrase') + which='' + if secret: + which='-secret-key' + if (self.version >= (2, 1) and passphrase is None and + expect_passphrase): + raise ValueError('For GnuPG >= 2.1, exporting secret keys ' + 'needs a passphrase to be provided') + if _is_sequence(keyids): + keyids = [no_quote(k) for k in keyids] + else: + keyids = [no_quote(keyids)] + args = ['--export%s' % which] + if armor: + args.append('--armor') + if minimal: # pragma: no cover + args.extend(['--export-options','export-minimal']) + args.extend(keyids) + # gpg --export produces no status-fd output; stdout will be + # empty in case of failure + #stdout, stderr = p.communicate() + result = self.result_map['export'](self) + if not secret or self.version < (2, 1): + p = self._open_subprocess(args) + self._collect_output(p, result, stdin=p.stdin) + else: + # Need to send in a passphrase. + f = _make_binary_stream('', self.encoding) + try: + self._handle_io(args, f, result, passphrase=passphrase, + binary=True) + finally: + f.close() + logger.debug('export_keys result: %r', result.data) + # Issue #49: Return bytes if armor not specified, else text + result = result.data + if armor: + result = result.decode(self.encoding, self.decode_errors) + return result + + def _get_list_output(self, p, kind): + # Get the response information + result = self.result_map[kind](self) + self._collect_output(p, result, stdin=p.stdin) + lines = result.data.decode(self.encoding, + self.decode_errors).splitlines() + valid_keywords = 'pub uid sec fpr sub ssb sig'.split() + for line in lines: + if self.verbose: # pragma: no cover + print(line) + logger.debug("line: %r", line.rstrip()) + if not line: # pragma: no cover + break + L = line.strip().split(':') + if not L: # pragma: no cover + continue + keyword = L[0] + if keyword in valid_keywords: + getattr(result, keyword)(L) + return result + + def list_keys(self, secret=False, keys=None, sigs=False): + """ list the keys currently in the keyring + + >>> import shutil + >>> shutil.rmtree("keys", ignore_errors=True) + >>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg') + >>> if not os.path.isdir('keys'): os.mkdir('keys') + >>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome='keys') + >>> input = gpg.gen_key_input(passphrase='foo') + >>> result = gpg.gen_key(input) + >>> fp1 = result.fingerprint + >>> result = gpg.gen_key(input) + >>> fp2 = result.fingerprint + >>> pubkeys = gpg.list_keys() + >>> assert fp1 in pubkeys.fingerprints + >>> assert fp2 in pubkeys.fingerprints + + """ + + if sigs: + which = 'sigs' + else: + which = 'keys' + if secret: + which='secret-keys' + args = ['--list-%s' % which, + '--fingerprint', '--fingerprint'] # get subkey FPs, too + if keys: + if isinstance(keys, string_types): + keys = [keys] + args.extend(keys) + p = self._open_subprocess(args) + return self._get_list_output(p, 'list') + + def scan_keys(self, filename): + """ + List details of an ascii armored or binary key file + without first importing it to the local keyring. + + The function achieves this on modern GnuPG by running: + + $ gpg --dry-run --import-options import-show --import + + On older versions, it does the *much* riskier: + + $ gpg --with-fingerprint --with-colons filename + """ + if self.version >= (2, 1): + args = ['--dry-run', '--import-options', 'import-show', '--import'] + else: + logger.warning('Trying to list packets, but if the file is not a ' + 'keyring, might accidentally decrypt') + args = ['--with-fingerprint', '--with-colons', '--fixed-list-mode'] + args.append(no_quote(filename)) + p = self._open_subprocess(args) + return self._get_list_output(p, 'scan') + + def search_keys(self, query, keyserver='pgp.mit.edu'): + """ search keyserver by query (using --search-keys option) + + >>> import shutil + >>> shutil.rmtree('keys', ignore_errors=True) + >>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg') + >>> if not os.path.isdir('keys'): os.mkdir('keys') + >>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome='keys') + >>> os.chmod('keys', 0x1C0) + >>> result = gpg.search_keys('') + >>> if 'NO_EXTERNAL_TESTS' not in os.environ: assert result, 'Failed using default keyserver' + >>> #keyserver = 'keyserver.ubuntu.com' + >>> #result = gpg.search_keys('', keyserver) + >>> #assert result, 'Failed using keyserver.ubuntu.com' + + """ + query = query.strip() + if HEX_DIGITS_RE.match(query): + query = '0x' + query + args = ['--fingerprint', + '--keyserver', no_quote(keyserver), '--search-keys', + no_quote(query)] + p = self._open_subprocess(args) + + # Get the response information + result = self.result_map['search'](self) + self._collect_output(p, result, stdin=p.stdin) + lines = result.data.decode(self.encoding, + self.decode_errors).splitlines() + valid_keywords = ['pub', 'uid'] + for line in lines: + if self.verbose: # pragma: no cover + print(line) + logger.debug('line: %r', line.rstrip()) + if not line: # sometimes get blank lines on Windows + continue + L = line.strip().split(':') + if not L: # pragma: no cover + continue + keyword = L[0] + if keyword in valid_keywords: + getattr(result, keyword)(L) + return result + + def gen_key(self, input): + """Generate a key; you might use gen_key_input() to create the + control input. + + >>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg') + >>> if not os.path.isdir('keys'): os.mkdir('keys') + >>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome='keys') + >>> input = gpg.gen_key_input(passphrase='foo') + >>> result = gpg.gen_key(input) + >>> assert result + >>> result = gpg.gen_key('foo') + >>> assert not result + + """ + args = ["--gen-key"] + result = self.result_map['generate'](self) + f = _make_binary_stream(input, self.encoding) + self._handle_io(args, f, result, binary=True) + f.close() + return result + + def gen_key_input(self, **kwargs): + """ + Generate --gen-key input per gpg doc/DETAILS + """ + parms = {} + for key, val in list(kwargs.items()): + key = key.replace('_','-').title() + if str(val).strip(): # skip empty strings + parms[key] = val + parms.setdefault('Key-Type','RSA') + if 'key_curve' not in kwargs: + parms.setdefault('Key-Length',2048) + parms.setdefault('Name-Real', "Autogenerated Key") + logname = (os.environ.get('LOGNAME') or os.environ.get('USERNAME') or + 'unspecified') + hostname = socket.gethostname() + parms.setdefault('Name-Email', "%s@%s" % (logname.replace(' ', '_'), + hostname)) + out = "Key-Type: %s\n" % parms.pop('Key-Type') + for key, val in list(parms.items()): + out += "%s: %s\n" % (key, val) + out += "%commit\n" + return out + + # Key-Type: RSA + # Key-Length: 1024 + # Name-Real: ISdlink Server on %s + # Name-Comment: Created by %s + # Name-Email: isdlink@%s + # Expire-Date: 0 + # %commit + # + # + # Key-Type: DSA + # Key-Length: 1024 + # Subkey-Type: ELG-E + # Subkey-Length: 1024 + # Name-Real: Joe Tester + # Name-Comment: with stupid passphrase + # Name-Email: joe@foo.bar + # Expire-Date: 0 + # Passphrase: abc + # %pubring foo.pub + # %secring foo.sec + # %commit + + # + # ENCRYPTION + # + def encrypt_file(self, file, recipients, sign=None, + always_trust=False, passphrase=None, + armor=True, output=None, symmetric=False, extra_args=None): + "Encrypt the message read from the file-like object 'file'" + if passphrase and not self.is_valid_passphrase(passphrase): + raise ValueError('Invalid passphrase') + args = ['--encrypt'] + if symmetric: + # can't be False or None - could be True or a cipher algo value + # such as AES256 + args = ['--symmetric'] + if symmetric is not True: + args.extend(['--cipher-algo', no_quote(symmetric)]) + # else use the default, currently CAST5 + else: + if not recipients: + raise ValueError('No recipients specified with asymmetric ' + 'encryption') + if not _is_sequence(recipients): + recipients = (recipients,) + for recipient in recipients: + args.extend(['--recipient', no_quote(recipient)]) + if armor: # create ascii-armored output - False for binary output + args.append('--armor') + if output: # write the output to a file with the specified name + self.set_output_without_confirmation(args, output) + if sign is True: # pragma: no cover + args.append('--sign') + elif sign: # pragma: no cover + args.extend(['--sign', '--default-key', no_quote(sign)]) + if always_trust: # pragma: no cover + args.append('--always-trust') + if extra_args: + args.extend(extra_args) + result = self.result_map['crypt'](self) + self._handle_io(args, file, result, passphrase=passphrase, binary=True) + logger.debug('encrypt result: %r', result.data) + return result + + def encrypt(self, data, recipients, **kwargs): + """Encrypt the message contained in the string 'data' + + >>> import shutil + >>> if os.path.exists("keys"): + ... shutil.rmtree("keys", ignore_errors=True) + >>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg') + >>> if not os.path.isdir('keys'): os.mkdir('keys') + >>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome='keys') + >>> input = gpg.gen_key_input(name_email='user1@test', passphrase='pp1') + >>> result = gpg.gen_key(input) + >>> fp1 = result.fingerprint + >>> input = gpg.gen_key_input(name_email='user2@test', passphrase='pp2') + >>> result = gpg.gen_key(input) + >>> fp2 = result.fingerprint + >>> result = gpg.encrypt("hello",fp2) + >>> message = str(result) + >>> assert message != 'hello' + >>> result = gpg.decrypt(message, passphrase='pp2') + >>> assert result + >>> str(result) + 'hello' + >>> result = gpg.encrypt("hello again", fp1) + >>> message = str(result) + >>> result = gpg.decrypt(message, passphrase='bar') + >>> result.status in ('decryption failed', 'bad passphrase') + True + >>> assert not result + >>> result = gpg.decrypt(message, passphrase='pp1') + >>> result.status == 'decryption ok' + True + >>> str(result) + 'hello again' + >>> result = gpg.encrypt("signed hello", fp2, sign=fp1, passphrase='pp1') + >>> result.status == 'encryption ok' + True + >>> message = str(result) + >>> result = gpg.decrypt(message, passphrase='pp2') + >>> result.status == 'decryption ok' + True + >>> assert result.fingerprint == fp1 + + """ + data = _make_binary_stream(data, self.encoding) + result = self.encrypt_file(data, recipients, **kwargs) + data.close() + return result + + def decrypt(self, message, **kwargs): + data = _make_binary_stream(message, self.encoding) + result = self.decrypt_file(data, **kwargs) + data.close() + return result + + def decrypt_file(self, file, always_trust=False, passphrase=None, + output=None, extra_args=None): + if passphrase and not self.is_valid_passphrase(passphrase): + raise ValueError('Invalid passphrase') + args = ["--decrypt"] + if output: # write the output to a file with the specified name + self.set_output_without_confirmation(args, output) + if always_trust: # pragma: no cover + args.append("--always-trust") + if extra_args: + args.extend(extra_args) + result = self.result_map['crypt'](self) + self._handle_io(args, file, result, passphrase, binary=True) + logger.debug('decrypt result: %r', result.data) + return result + + def trust_keys(self, fingerprints, trustlevel): + levels = Verify.TRUST_LEVELS + if trustlevel not in levels: + poss = ', '.join(sorted(levels)) + raise ValueError('Invalid trust level: "%s" (must be one of %s)' % + (trustlevel, poss)) + trustlevel = levels[trustlevel] + 2 + import tempfile + try: + fd, fn = tempfile.mkstemp() + lines = [] + if isinstance(fingerprints, string_types): + fingerprints = [fingerprints] + for f in fingerprints: + lines.append('%s:%s:' % (f, trustlevel)) + # The trailing newline is required! + s = os.linesep.join(lines) + os.linesep + logger.debug('writing ownertrust info: %s', s); + os.write(fd, s.encode(self.encoding)) + os.close(fd) + result = self.result_map['trust'](self) + p = self._open_subprocess(['--import-ownertrust', fn]) + self._collect_output(p, result, stdin=p.stdin) + if p.returncode != 0: + raise ValueError('gpg returned an error - return code %d' % + p.returncode) + finally: + os.remove(fn) + return result diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/oyaml.py b/insights/client/apps/ansible/playbook_verifier/contrib/oyaml.py new file mode 100644 index 000000000..9de26fa92 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/oyaml.py @@ -0,0 +1,53 @@ +import platform +import sys +from collections import OrderedDict + +import yaml as pyyaml + + +_items = "viewitems" if sys.version_info < (3,) else "items" +_std_dict_is_order_preserving = sys.version_info >= (3, 7) or ( + sys.version_info >= (3, 6) and platform.python_implementation() == "CPython" +) + + +def map_representer(dumper, data): + return dumper.represent_dict(getattr(data, _items)()) + + +def map_constructor(loader, node): + loader.flatten_mapping(node) + pairs = loader.construct_pairs(node) + try: + return OrderedDict(pairs) + except TypeError: + loader.construct_mapping(node) # trigger any contextual error + raise + + +_loaders = [getattr(pyyaml.loader, x) for x in pyyaml.loader.__all__] +_dumpers = [getattr(pyyaml.dumper, x) for x in pyyaml.dumper.__all__] +try: + _cyaml = pyyaml.cyaml.__all__ +except AttributeError: + pass +else: + _loaders += [getattr(pyyaml.cyaml, x) for x in _cyaml if x.endswith("Loader")] + _dumpers += [getattr(pyyaml.cyaml, x) for x in _cyaml if x.endswith("Dumper")] + +Dumper = None +for Dumper in _dumpers: + pyyaml.add_representer(dict, map_representer, Dumper=Dumper) + pyyaml.add_representer(OrderedDict, map_representer, Dumper=Dumper) + +Loader = None +if not _std_dict_is_order_preserving: + for Loader in _loaders: + pyyaml.add_constructor("tag:yaml.org,2002:map", map_constructor, Loader=Loader) + + +# Merge PyYAML namespace into ours. +# This allows users a drop-in replacement: +# import oyaml as yaml +del map_constructor, map_representer, Loader, Dumper +from yaml import * diff --git a/insights/client/apps/ansible/playbook_verifier/public.gpg b/insights/client/apps/ansible/playbook_verifier/public.gpg new file mode 100644 index 000000000..82bd7cc13 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/public.gpg @@ -0,0 +1,18 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBGAbFTwBCACpkvYKGGck/pmpc5fN7b+BGHIxQujxxxQsm+iGhNiOvvdI15W+ +xkzQH/NdciShqwt5KmgGVWK4OV8MbdT2PQZ89K3RA0Eh+QYZ7GANlpnLTE2oYeO+ +2thNLWf7HyL8y+Bh4R/freAU3Tnncw2n9BkS/3HYs5i7ZWxoYs1uLC54wqmQLnXC +0qRZWO9O8p0qE3sPXQj97PRvqi1vf+fuIk8E7ZqxRzYA2M2YMXCOTwkPsNmUgAcp +vTS5MwKWHwI6TaJRjvQaam37tRjGuNdqFESt/Ve61ax3ggf+krZAvoAEmDNhlvRX +zDizemSZN5KwJGRKUwolmHTWn2LvEg/aKGUjABEBAAG0J0F1dG9nZW5lcmF0ZWQg +S2V5IDxleGFtcGxlQGV4YW1wbGUuY29tPokBTgQTAQgAOBYhBALLuiYuZcUH90rj +uvTd9VdHEPSNBQJgGxU8AhsvBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEPTd +9VdHEPSNwdoH/13591eoYRJ3s5MANTpjbFv7AklOMxXsIhuxXTzyAuNogp3OWR93 +PnNMMo71o2oPcUh/E51EZVa+dhLfvXH2KRokfUzUWWiG7MHHbH9j0chgYtRHR0H9 +gBZ2jOzoew7Yuz1bKyitSb4VR6A+l8ryO7iesUtXUiDtp7ARnI3CJU0NkoRvvzQR +QrXnCii4F6SjnJXgcbQ/ry78toYG3BKTPwNgwRdbmy0ngNkeG2c0LiCZ2ZNJmqqp +0vAx06GasItIZ9WmdS3qZxTgz0vCgpxyrMKkJwRPSeX++jlMp0h2+W4vc75WIXPC +f1h3KZ/Vq8ZmrDCXgeC2TFC0yzijjmWBr4k= +=fNnm +-----END PGP PUBLIC KEY BLOCK----- diff --git a/insights/client/apps/ansible/test_playbook.yml b/insights/client/apps/ansible/test_playbook.yml index e64975c9f..47e906d19 100644 --- a/insights/client/apps/ansible/test_playbook.yml +++ b/insights/client/apps/ansible/test_playbook.yml @@ -9,19 +9,25 @@ # https://cloud.redhat.com/insights/remediations/44466a02-24a1-47b4-84cb-391aeff4523 # Generated by Red Hat Insights on Thu, 29 Oct 2020 12:24:17 GMT # Created by some-user - -# Fixes test:ping -# Identifier: (test:ping,fix) -# Version: unknown -- name: ping - hosts: "host1,host2" - tasks: - - ping: - - name: run insights - hosts: "host1,host2" - become: True - gather_facts: False + hosts: host1,host2 + become: true + gather_facts: false + vars: + insights_signature_exclude: /hosts,/vars/insights_signature + insights_signature: !!binary | + TFMwdExTMUNSVWRKVGlCUVIxQWdVMGxIVGtGVVZWSkZMUzB0TFMwS0NtbFJSWHBDUVVGQ1EwRkJa + RVpwUlVWQmMzVTJTbWsxYkhoUlpqTlRkVTgyT1U0ek1WWXdZMUU1U1RCR1FXMUJkMUkyVVVGRFoy + dFJPVTR6TVZZd1kxRUtPVWt4TnpGUlppOVZkMjRyUWtSdk5EUkZibUlyVWtOSGFVTkdhR2x3WWts + S1dHNUhhakJ1VFdkU1lVNHpZV3BUV1ZCT1NFOUZLMGhoZFVwYVdEbGthZ3B5T0U5dEsyOXJjVGhZ + TkZobGEySk9hV3hqWTBScVdtODNWMWQxWW1SVGNXdDZMMjB3YWpacFNEUlNTMnhJUnpONmNFdGFX + bE13VkRsM2RVbzNTakZGQ21wWmRGaExZMW95Ymk5bmNVOTNhWEIwUVdoNFdHdHhXa2RDYm1OM1JH + NTBLM2xhV0RoWmNqWmtTbk5QUkdGVmFTOHlSRTlVT0c5S1RYaFZZV05oYURrS2FFaFhMM0JrY21o + eGRXSjVMemMzVTNob2VYaEJlV3hvV1dsTFoweDZUMDFFUWpsRFZEaHhTMk5NYVZCVGVXSTBhMFJv + WTBsS015dDRNeTlqUkRWVmVBcDVPRE01VjJKUlNGZEZhRFUxTWs5dWN6YzRXbk5KZEVsaE5XUnpj + REpNUlZkV1EwOXhPVWxLT1VodmFHTjVhRmhpTUZGeVJWbEZZVzlNUTFkNFlXUnRDamMwVGtKYVNF + eGhUbk16WWs5dU1XdG5VM0Z1V2t4cVRXSnlLeklyZHowOUNqMXJhMEpXQ2kwdExTMHRSVTVFSUZC + SFVDQlRTVWRPUVZSVlVrVXRMUzB0TFFvPQ== tasks: - name: run insights command: insights-client diff --git a/insights/parsers/vdo_status.py b/insights/parsers/vdo_status.py index 2f9225baf..35c8eecd3 100644 --- a/insights/parsers/vdo_status.py +++ b/insights/parsers/vdo_status.py @@ -64,7 +64,7 @@ class VDOStatus(YAMLParser): 'enabled' >>> vdo['VDOs']['vdo1']['VDO statistics']['/dev/mapper/vdo1']['1K-blocks'] 7340032 - >>> vdo['VDO status'] + >>> dict(vdo['VDO status']) {'Date': '2019-07-24 20:48:16-04:00', 'Node': 'dell-m620-10.rhts.gsslab.pek2.redhat.com'} >>> vdo['VDOs']['vdo2']['Acknowledgement threads'] 1 diff --git a/insights/tests/client/apps/test_playbook_verifier.py b/insights/tests/client/apps/test_playbook_verifier.py new file mode 100644 index 000000000..6ba4c608f --- /dev/null +++ b/insights/tests/client/apps/test_playbook_verifier.py @@ -0,0 +1,112 @@ +# -*- coding: UTF-8 -*- +import sys +import pytest + +from insights.client.apps.ansible.playbook_verifier import verify, PlaybookVerificationError +from mock.mock import patch +from pytest import raises + + +@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above') +def test_skip_validation(): + result = verify([{'name': "test playbook"}], skipVerify=True, checkVersion=False) + assert result == [{'name': "test playbook"}] + + +@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above') +@patch('requests.get') +def test_egg_validation_error(mock_get): + mock_get.return_value.text = '3.0.0' + egg_error = 'EGG VERSION ERROR: Current running egg is not the most recent version' + fake_playbook = [{'name': "test playbook"}] + + with raises(PlaybookVerificationError) as error: + verify(fake_playbook) + assert egg_error in str(error.value) + + +@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above') +def test_vars_not_found_error(): + vars_error = 'VARS FIELD NOT FOUND: Verification failed' + fake_playbook = [{'name': "test playbook"}] + + with raises(PlaybookVerificationError) as error: + verify(fake_playbook, checkVersion=False) + assert vars_error in str(error.value) + + +@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above') +def test_signature_not_found_error(): + sig_error = 'SIGNATURE NOT FOUND: Verification failed' + fake_playbook = [{'name': "test playbook", 'vars': {}}] + + with raises(PlaybookVerificationError) as error: + verify(fake_playbook, checkVersion=False) + assert sig_error in str(error.value) + + +@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above') +@patch('insights.client.apps.ansible.playbook_verifier.PUBLIC_KEY_FOLDER', './testing') +def test_key_not_imported(): + key_error = "PUBLIC KEY NOT IMPORTED: Public key import failed" + fake_playbook = [{ + 'name': "test playbook", + 'vars': { + 'insights_signature': 'TFMwdExTMUNSVWRKVGlCUVIxQWdVMGxIVGtGVVZWSkZMUzB0TFMwS0N==', + 'insights_signature_exclude': '/vars/insights_signature' + } + }] + + with raises(PlaybookVerificationError) as error: + verify(fake_playbook, checkVersion=False) + assert key_error in str(error.value) + + +@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above') +@patch('insights.client.apps.ansible.playbook_verifier.PUBLIC_KEY_FOLDER', None) +def test_key_import_error(): + key_error = "PUBLIC KEY IMPORT ERROR: Public key file not found" + fake_playbook = [{ + 'name': "test playbook", + 'vars': { + 'insights_signature': 'TFMwdExTMUNSVWRKVGlCUVIxQWdVMGxIVGtGVVZWSkZMUzB0TFMwS0N==', + 'insights_signature_exclude': '/vars/insights_signature' + } + }] + + with raises(PlaybookVerificationError) as error: + verify(fake_playbook, checkVersion=False) + assert key_error in str(error.value) + + +@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above') +@patch('insights.client.apps.ansible.playbook_verifier.verifyPlaybookSnippet', return_value=[]) +def test_playbook_verification_error(call): + key_error = 'SIGNATURE NOT VALID: Template [name: test playbook] has invalid signature' + fake_playbook = [{ + 'name': "test playbook", + 'vars': { + 'insights_signature': 'TFMwdExTMUNSVWRKVGlCUVIxQWdVMGxIVGtGVVZWSkZMUzB0TFMwS0N==', + 'insights_signature_exclude': '/vars/insights_signature' + } + }] + + with raises(PlaybookVerificationError) as error: + verify(fake_playbook, checkVersion=False) + assert key_error in str(error.value) + + +@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above') +@patch('insights.client.apps.ansible.playbook_verifier.contrib.gnupg.GPG.verify_data') +def test_playbook_verification_success(mock_method): + mock_method.return_value = True + fake_playbook = [{ + 'name': "test playbook", + 'vars': { + 'insights_signature': 'TFMwdExTMUNSVWRKVGlCUVIxQWdVMGxIVGtGVVZWSkZMUzB0TFMwS0N==', + 'insights_signature_exclude': '/vars/insights_signature' + } + }] + + result = verify(fake_playbook, checkVersion=False) + assert result == fake_playbook diff --git a/insights/tests/client/test_utilities.py b/insights/tests/client/test_utilities.py index 856d5374a..2dec5940e 100644 --- a/insights/tests/client/test_utilities.py +++ b/insights/tests/client/test_utilities.py @@ -1,4 +1,5 @@ import os +import sys import tempfile import uuid import insights.client.utilities as util @@ -6,6 +7,7 @@ import re import mock import six +import pytest from mock.mock import patch @@ -275,6 +277,7 @@ def test_get_tags_nonexist(): assert got is None +@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier uses oyaml library which is incompatable with this test') def test_write_tags(): tags = {'foo': 'bar'} fp = tempfile.NamedTemporaryFile() diff --git a/insights/tests/test_filters.py b/insights/tests/test_filters.py index 04cd4f4d2..f47d1719a 100644 --- a/insights/tests/test_filters.py +++ b/insights/tests/test_filters.py @@ -6,6 +6,7 @@ from insights.specs.default import DefaultSpecs import pytest +import sys def setup_function(func): @@ -38,6 +39,7 @@ def teardown_function(func): del filters.FILTERS[Specs.ps_aux] +@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier code uses oyaml library which is incompatable with this test') def test_filter_dumps_loads(): r = filters.dumps() assert r is not None diff --git a/insights/tests/test_formats.py b/insights/tests/test_formats.py index 67f37f7f3..41c2f3666 100644 --- a/insights/tests/test_formats.py +++ b/insights/tests/test_formats.py @@ -1,3 +1,4 @@ +import sys import pytest from six import StringIO from insights import dr, make_fail, rule @@ -69,6 +70,7 @@ def test_syslog_format_archive(): assert SL_PATH in data +@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier code uses oyaml library which is incompatable with this test') def test_yaml_format(): broker = dr.Broker() output = StringIO() diff --git a/setup.py b/setup.py index d3aadc939..070d15e63 100644 --- a/setup.py +++ b/setup.py @@ -55,7 +55,9 @@ def maybe_require(pkg): client = set([ - 'requests' + 'requests', + 'python-gnupg==0.4.6', + 'oyaml' ]) develop = set([ From 9b8be036399ba123f0d74553fe8a539b07165179 Mon Sep 17 00:00:00 2001 From: Rohan Arora Date: Wed, 17 Mar 2021 06:47:24 +0530 Subject: [PATCH 348/892] Remove deprecated warning for VirtUuidFacts (#2979) SubscriptionManagerFactsList was added and then removed in (#1935) Signed-off-by: Rohan Arora --- insights/parsers/virt_uuid_facts.py | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/insights/parsers/virt_uuid_facts.py b/insights/parsers/virt_uuid_facts.py index 06d68b6c5..67e3356a8 100644 --- a/insights/parsers/virt_uuid_facts.py +++ b/insights/parsers/virt_uuid_facts.py @@ -16,7 +16,6 @@ 2 """ -from insights.util import deprecated from insights.specs import Specs from .. import JSONParser, parser @@ -24,12 +23,4 @@ @parser(Specs.virt_uuid_facts) class VirtUuidFacts(JSONParser): - """ - .. warning:: - This parser is deprecated, please use - :py:class:`insights.parsers.subscription_manager_list.SubscriptionManagerFactsList` instead. - - """ - def __init__(self, *args, **kwargs): - deprecated(VirtUuidFacts, "Import SubscriptionManagerFactsList from insights.parsers.subscription_manager_list instead") - super(VirtUuidFacts, self).__init__(*args, **kwargs) + pass From a1c2d654c46cee3fbee89f0a6abaa1d5421a998c Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Wed, 17 Mar 2021 09:19:26 +0800 Subject: [PATCH 349/892] New parser for "lspci -vmmkn" (#2973) * New parser for lspci -vmmkn Signed-off-by: Xiangce Liu * Add spec for lspci_vmmkn Signed-off-by: Xiangce Liu * Skip the empty lines in middle and tail Signed-off-by: Xiangce Liu * Module is a list Signed-off-by: Xiangce Liu * fix doc errors Signed-off-by: Xiangce Liu --- insights/parsers/lspci.py | 112 ++++++++++++++++++++++++++- insights/parsers/tests/test_lspci.py | 72 ++++++++++++++++- insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 5 files changed, 179 insertions(+), 8 deletions(-) diff --git a/insights/parsers/lspci.py b/insights/parsers/lspci.py index c1d3bc329..106d4a656 100644 --- a/insights/parsers/lspci.py +++ b/insights/parsers/lspci.py @@ -1,13 +1,21 @@ """ +LsPci - Commands ``lspci`` +========================== + +The parsers in this module are to parse the PCI device information gathered +from the ``/sbin/lspci`` commands. + LsPci - Command ``lspci -k`` -============================ +---------------------------- -To parse the PCI device information gathered from the ``/sbin/lspci -k`` command. +LsPciVmmkn - Command ``lspci -vmmkn`` +------------------------------------- """ import re -from .. import LogFileOutput, parser, CommandParser, get_active_lines +from insights import LogFileOutput, parser, CommandParser, get_active_lines +from insights.parsers import SkipException from insights.specs import Specs @@ -103,4 +111,100 @@ def pci_dev_list(self): """ The list of PCI devices. """ - return self.data.keys() + return list(self.data.keys()) + + +@parser(Specs.lspci_vmmkn) +class LsPciVmmkn(CommandParser, list): + """ + Class to parse the PCI device information gathered from the + ``/sbin/lspci -vmmkn`` command. + + Typical output of the ``lspci -vmmkn`` command is:: + + Slot: 00:00.0 + Class: 0600 + Vendor: 8086 + Device: 1237 + SVendor: 1af4 + SDevice: 1100 + Rev: 02 + + Slot: 00:01.0 + Class: 0101 + Vendor: 8086 + Device: 7010 + SVendor: 1af4 + SDevice: 1100 + ProgIf: 80 + Driver: ata_piix + Module: ata_piix + Module: ata_generic + + Slot: 00:01.1 + Class: 0c03 + Vendor: 8086 + Device: 7020 + SVendor: 1af4 + SDevice: 1100 + Rev: 01 + Driver: uhci_hcd + + Slot: 00:03.0 + Class: 0200 + Vendor: 1af4 + Device: 1000 + SVendor: 1af4 + SDevice: 0001 + PhySlot: 3 + Driver: virtio-pci + + Examples: + >>> type(lspci_vmmkn) + + >>> sorted(lspci_vmmkn.pci_dev_list) + ['00:00.0', '00:01.0', '00:01.1', '00:03.0'] + >>> lspci_vmmkn[0].get('Driver') is None + True + >>> lspci_vmmkn[-1].get('Driver') + 'virtio-pci' + >>> len(lspci_vmmkn[1].get('Module')) + 2 + + Attributes: + + """ + def parse_content(self, content): + # Remove the white-trailing of the output + while content and not content[-1].strip(): + content.pop(-1) + + dev = {} + self.append(dev) + for line in content: + line = line.strip() + if not line: + # Skip empty lines + if dev: + dev = {} + self.append(dev) + continue + key, val = [i.strip() for i in line.split(':', 1)] + # Module could have multiple values + if key == 'Module': + if key in dev: + dev[key].append(val) + else: + dev[key] = [val] + else: + dev[key] = val + + if len(self) <= 1 and not dev: + raise SkipException() + + @property + def pci_dev_list(self): + """ + The list of PCI devices. + """ + return [i['Slot'] for i in self] diff --git a/insights/parsers/tests/test_lspci.py b/insights/parsers/tests/test_lspci.py index 7a4d9b72c..361837d6c 100644 --- a/insights/parsers/tests/test_lspci.py +++ b/insights/parsers/tests/test_lspci.py @@ -1,7 +1,9 @@ -from insights.parsers import lspci -from insights.parsers.lspci import LsPci -from insights.tests import context_wrap import doctest +import pytest + +from insights.parsers import lspci, SkipException +from insights.parsers.lspci import LsPci, LsPciVmmkn +from insights.tests import context_wrap LSPCI_0 = """ @@ -213,6 +215,46 @@ Kernel modules: iwlwifi """.strip() +LSPCI_VMMKN = """ +Slot: 00:00.0 +Class: 0600 +Vendor: 8086 +Device: 1237 +SVendor: 1af4 +SDevice: 1100 +Rev: 02 + +Slot: 00:01.0 +Class: 0101 +Vendor: 8086 +Device: 7010 +SVendor: 1af4 +SDevice: 1100 +ProgIf: 80 +Driver: ata_piix +Module: ata_piix +Module: ata_generic + +Slot: 00:01.1 +Class: 0c03 +Vendor: 8086 +Device: 7020 +SVendor: 1af4 +SDevice: 1100 +Rev: 01 +Driver: uhci_hcd + + +Slot: 00:03.0 +Class: 0200 +Vendor: 1af4 +Device: 1000 +SVendor: 1af4 +SDevice: 0001 +PhySlot: 3 +Driver: virtio-pci +""".strip() + def test_lspci(): LsPci.token_scan('centrino', 'Centrino') @@ -252,9 +294,31 @@ def test_lspci_driver(): assert len(output.pci_dev_list) == 1 +def test_lspci_vmmkn(): + lspci_vmmkn = LsPciVmmkn(context_wrap(LSPCI_VMMKN)) + assert sorted(lspci_vmmkn.pci_dev_list) == ['00:00.0', '00:01.0', '00:01.1', '00:03.0'] + assert lspci_vmmkn[0].get('Driver') is None + assert lspci_vmmkn[1].get('Vendor') == '8086' + assert lspci_vmmkn[1].get('Device') == '7010' + assert lspci_vmmkn[2].get('SVendor') == '1af4' + assert lspci_vmmkn[3].get('SDevice') == '0001' + assert lspci_vmmkn[-1].get('Driver') == 'virtio-pci' + assert sorted(lspci_vmmkn[1].get('Module')) == sorted(['ata_piix', 'ata_generic']) + assert lspci_vmmkn[-1].get('Module') is None + + +def test_lspci_vmmkn_ab(): + with pytest.raises(SkipException): + LsPciVmmkn(context_wrap('')) + + with pytest.raises(SkipException): + LsPciVmmkn(context_wrap(' \n '.splitlines())) + + def test_doc_examples(): env = { - 'lspci': LsPci(context_wrap(LSPCI_DRIVER_DOC)) + 'lspci': LsPci(context_wrap(LSPCI_DRIVER_DOC)), + 'lspci_vmmkn': LsPciVmmkn(context_wrap(LSPCI_VMMKN)) } failed, total = doctest.testmod(lspci, globs=env) assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index a9e7a1938..cc999c5d8 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -334,6 +334,7 @@ class Specs(SpecSet): lsmod = RegistryPoint() lsof = RegistryPoint(filterable=True) lspci = RegistryPoint() + lspci_vmmkn = RegistryPoint() lssap = RegistryPoint() lsscsi = RegistryPoint() lsvmbus = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 9f2386f21..e6b8e69e4 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -558,6 +558,7 @@ def httpd_cmd(broker): lsmod = simple_command("/sbin/lsmod") lsof = simple_command("/usr/sbin/lsof") lspci = simple_command("/sbin/lspci -k") + lspci_vmmkn = simple_command("/sbin/lspci -vmmkn") lsscsi = simple_command("/usr/bin/lsscsi") lsvmbus = simple_command("/usr/sbin/lsvmbus -vv") lvm_conf = simple_file("/etc/lvm/lvm.conf") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 45e781cf4..30ea8c471 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -131,6 +131,7 @@ class InsightsArchiveSpecs(Specs): lsmod = simple_file("insights_commands/lsmod") lsof = simple_file("insights_commands/lsof") lspci = simple_file("insights_commands/lspci_-k") + lspci_vmmkn = simple_file("insights_commands/lspci_-vmmkn") lsscsi = simple_file("insights_commands/lsscsi") lsvmbus = simple_file("insights_commands/lsvmbus_-vv") lvmconfig = first_file([ From de2a8b0530e654c40eea46191f4cd2989942e422 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 18 Mar 2021 22:49:02 +0800 Subject: [PATCH 350/892] New Combiner LsPci for lspci commands (#2982) * New Combiner LsPci for lspci commands Signed-off-by: Xiangce Liu * Add comment Signed-off-by: Xiangce Liu * fix flake8 error Signed-off-by: Xiangce Liu --- docs/shared_combiners_catalog/lspci.rst | 3 + insights/combiners/lspci.py | 166 ++++++++++++++++++++++++ insights/combiners/tests/test_lspci.py | 158 ++++++++++++++++++++++ insights/parsers/lspci.py | 2 +- 4 files changed, 328 insertions(+), 1 deletion(-) create mode 100644 docs/shared_combiners_catalog/lspci.rst create mode 100644 insights/combiners/lspci.py create mode 100644 insights/combiners/tests/test_lspci.py diff --git a/docs/shared_combiners_catalog/lspci.rst b/docs/shared_combiners_catalog/lspci.rst new file mode 100644 index 000000000..a8af58515 --- /dev/null +++ b/docs/shared_combiners_catalog/lspci.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.combiners.lspci + :members: + :show-inheritance: diff --git a/insights/combiners/lspci.py b/insights/combiners/lspci.py new file mode 100644 index 000000000..cf56a97ab --- /dev/null +++ b/insights/combiners/lspci.py @@ -0,0 +1,166 @@ +""" +LsPci - Commands ``lspci`` +========================== + +This combiner combines the following Parsers to a list. +- LsPci - the output of command ``lspci -k`` +- LsPciVmmkn - the output of command ``lspci -vmmkn`` +""" +from insights import combiner +from insights.parsers import keyword_search +from insights.parsers.lspci import LsPci, LsPciVmmkn + + +@combiner([LsPci, LsPciVmmkn]) +class LsPci(list): + """ + Combines the Parser LsPci of ``/sbin/lspci -k`` command and Parser + LsPciVmmkn of ``/sbin/lspci -vmmkn`` command. + + Typical output of the ``lspci -k`` command is:: + + 00:00.0 Host bridge: Intel Corporation Haswell-ULT DRAM Controller (rev 09) + Subsystem: Lenovo ThinkPad X240 + Kernel driver in use: hsw_uncore + 00:02.0 VGA compatible controller: Intel Corporation Haswell-ULT Integrated Graphics Controller (rev 09) + Subsystem: Lenovo ThinkPad X240 + Kernel driver in use: i915 + Kernel modules: i915 + 00:03.0 Audio device: Intel Corporation Haswell-ULT HD Audio Controller (rev 09) + Subsystem: Lenovo ThinkPad X240 + Kernel driver in use: snd_hda_intel + Kernel modules: snd_hda_intel + 00:16.0 Communication controller: Intel Corporation 8 Series HECI #0 (rev 04) + Subsystem: Lenovo ThinkPad X240 + Kernel driver in use: mei_me + Kernel modules: mei_me + 00:19.0 Ethernet controller: Intel Corporation Ethernet Connection I218-LM (rev 04) + Subsystem: Lenovo ThinkPad X240 + Kernel driver in use: e1000e + Kernel modules: e1000e + 00:1b.0 Audio device: Intel Corporation 8 Series HD Audio Controller (rev 04) + Subsystem: Lenovo ThinkPad X240 + Kernel driver in use: snd_hda_intel + Kernel modules: snd_hda_intel + + Typical output of the ``lspci -vmmkn`` command is:: + + Slot: 00:00.0 + Class: 0600 + Vendor: 8086 + Device: 0a04 + SVendor: 17aa + SDevice: 2214 + Rev: 09 + Driver: hsw_uncore + + Slot: 00:02.0 + Class: 0300 + Vendor: 8086 + Device: 0a16 + SVendor: 17aa + SDevice: 2214 + Rev: 09 + Driver: i915 + Module: i915 + + Slot: 00:03.0 + Class: 0403 + Vendor: 8086 + Device: 0a0c + SVendor: 17aa + SDevice: 2214 + Rev: 09 + Driver: snd_hda_intel + Module: snd_hda_intel + + Slot: 00:16.0 + Class: 0780 + Vendor: 8086 + Device: 9c3a + SVendor: 17aa + SDevice: 2214 + Rev: 04 + Driver: mei_me + Module: mei_me + + Slot: 00:19.0 + Class: 0200 + Vendor: 8086 + Device: 155a + SVendor: 17aa + SDevice: 2214 + Rev: 04 + Driver: e1000e + Module: e1000e + + Slot: 00:1b.0 + Class: 0403 + Vendor: 8086 + Device: 9c20 + SVendor: 17aa + SDevice: 2214 + Rev: 04 + Driver: snd_hda_intel + Module: snd_hda_intel + + Examples: + >>> type(lspci) + + >>> sorted(lspci.pci_dev_list) + ['00:00.0', '00:02.0', '00:03.0', '00:16.0', '00:19.0', '00:1b.0'] + >>> lspci.search(Dev_Details__contains='I218')[0]['Slot'] + '00:19.0' + """ + def __init__(self, lspci_k, lspci_vmmkn): + if lspci_vmmkn: + for dev in lspci_vmmkn: + if lspci_k and dev['Slot'] in lspci_k: + dev_k = lspci_k.data[dev['Slot']] + dev_k.pop('Kernel driver in use') if 'Kernel driver in use' in dev_k else None + dev_k.pop('Kernel modules') if 'Kernel modules' in dev_k else None + dev.update(dev_k) + self.append(dev) + self._pci_dev_list = lspci_vmmkn.pci_dev_list + else: + for dev in lspci_k.data.values(): + dev.update(Driver=dev.pop('Kernel driver in use')) if 'Kernel driver in use' in dev else None + dev.update(Module=[i.strip() for i in dev.pop('Kernel modules').split(',')]) if 'Kernel modules' in dev else None + self.append(dev) + self._pci_dev_list = lspci_k.pci_dev_list + + @property + def pci_dev_list(self): + """ + The list of PCI devices. + """ + return self._pci_dev_list + + def search(self, **kwargs): + """ + Get the details of PCI devices by searching the table with kwargs. + + This uses the :py:func:`insights.parsers.keyword_search` function for + searching; see its documentation for usage details. If no search + parameters are given, no rows are returned. + + It simplify the value of the column according to actual usage. + + Returns: + list: A list of dictionaries of PCI devices that match the given + search criteria. + + Examples: + >>> len(lspci.search(Subsystem__startswith='Lenovo')) + 6 + >>> len(lspci.search(Subsystem__startswith='Lenovo', Dev_Details__startswith='Audio device')) + 2 + >>> lspci.search(Driver='snd_hda_intel', Dev_Details__contains='8') == [ + ... {'Slot': '00:1b.0', 'Class': '0403', 'Vendor': '8086', + ... 'Device': '9c20', 'SVendor': '17aa', 'SDevice': '2214', + ... 'Rev': '04', 'Driver': 'snd_hda_intel', + ... 'Module': ['snd_hda_intel'], 'Subsystem': 'Lenovo ThinkPad X240', + ... 'Dev_Details': 'Audio device: Intel Corporation 8 Series HD Audio Controller (rev 04)'}] + True + """ + return keyword_search(self, **kwargs) diff --git a/insights/combiners/tests/test_lspci.py b/insights/combiners/tests/test_lspci.py new file mode 100644 index 000000000..70fd897cb --- /dev/null +++ b/insights/combiners/tests/test_lspci.py @@ -0,0 +1,158 @@ +import doctest + +from insights.combiners import lspci +from insights.combiners.lspci import LsPci +from insights.parsers.lspci import LsPci as LsPciParser, LsPciVmmkn +from insights.tests import context_wrap + + +LSPCI_K = """ +00:00.0 Host bridge: Intel Corporation Haswell-ULT DRAM Controller (rev 09) + Subsystem: Lenovo ThinkPad X240 + Kernel driver in use: hsw_uncore +00:02.0 VGA compatible controller: Intel Corporation Haswell-ULT Integrated Graphics Controller (rev 09) + Subsystem: Lenovo ThinkPad X240 + Kernel driver in use: i915 + Kernel modules: i915 +00:03.0 Audio device: Intel Corporation Haswell-ULT HD Audio Controller (rev 09) + Subsystem: Lenovo ThinkPad X240 + Kernel driver in use: snd_hda_intel + Kernel modules: snd_hda_intel +00:16.0 Communication controller: Intel Corporation 8 Series HECI #0 (rev 04) + Subsystem: Lenovo ThinkPad X240 + Kernel driver in use: mei_me + Kernel modules: mei_me +00:19.0 Ethernet controller: Intel Corporation Ethernet Connection I218-LM (rev 04) + Subsystem: Lenovo ThinkPad X240 + Kernel driver in use: e1000e + Kernel modules: e1000e +00:1b.0 Audio device: Intel Corporation 8 Series HD Audio Controller (rev 04) + Subsystem: Lenovo ThinkPad X240 + Kernel driver in use: snd_hda_intel + Kernel modules: snd_hda_intel +""".strip() + +LSPCI_VMMKN = """ + +Slot: 00:00.0 +Class: 0600 +Vendor: 8086 +Device: 0a04 +SVendor: 17aa +SDevice: 2214 +Rev: 09 +Driver: hsw_uncore + +Slot: 00:02.0 +Class: 0300 +Vendor: 8086 +Device: 0a16 +SVendor: 17aa +SDevice: 2214 +Rev: 09 +Driver: i915 +Module: i915 + +Slot: 00:03.0 +Class: 0403 +Vendor: 8086 +Device: 0a0c +SVendor: 17aa +SDevice: 2214 +Rev: 09 +Driver: snd_hda_intel +Module: snd_hda_intel + +Slot: 00:16.0 +Class: 0780 +Vendor: 8086 +Device: 9c3a +SVendor: 17aa +SDevice: 2214 +Rev: 04 +Driver: mei_me +Module: mei_me + + +Slot: 00:19.0 +Class: 0200 +Vendor: 8086 +Device: 155a +SVendor: 17aa +SDevice: 2214 +Rev: 04 +Driver: e1000e +Module: e1000e + +Slot: 00:1b.0 +Class: 0403 +Vendor: 8086 +Device: 9c20 +SVendor: 17aa +SDevice: 2214 +Rev: 04 +Driver: snd_hda_intel +Module: snd_hda_intel +""" + + +def test_lspci_k(): + lspci_k = LsPciParser(context_wrap(LSPCI_K)) + lspci = LsPci(lspci_k, None) + assert sorted(lspci.pci_dev_list) == ['00:00.0', '00:02.0', '00:03.0', '00:16.0', '00:19.0', '00:1b.0'] + assert lspci.search(Dev_Details__contains='I218') == [ + { + 'Driver': 'e1000e', 'Module': ['e1000e'], + 'Subsystem': 'Lenovo ThinkPad X240', + 'Dev_Details': 'Ethernet controller: Intel Corporation Ethernet Connection I218-LM (rev 04)' + } + ] + assert lspci.search(Slot__startwith='00:1b.0') == [] + + +def test_lspci_vmmkn(): + lspci_vmmkn = LsPciVmmkn(context_wrap(LSPCI_VMMKN)) + lspci = LsPci(None, lspci_vmmkn) + assert sorted(lspci.pci_dev_list) == ['00:00.0', '00:02.0', '00:03.0', '00:16.0', '00:19.0', '00:1b.0'] + assert lspci.search(Device='155a', Vendor='8086') == [ + { + 'Slot': '00:19.0', 'Class': '0200', 'Vendor': '8086', + 'Device': '155a', 'SVendor': '17aa', 'SDevice': '2214', + 'Rev': '04', 'Driver': 'e1000e', 'Module': ['e1000e'], + } + ] + assert lspci.search(Dev_Details__contains='I218') == [] + + +def test_lspci_both(): + lspci_vmmkn = LsPciVmmkn(context_wrap(LSPCI_VMMKN)) + lspci_k = LsPciParser(context_wrap(LSPCI_K)) + lspci = LsPci(lspci_k, lspci_vmmkn) + assert sorted(lspci.pci_dev_list) == ['00:00.0', '00:02.0', '00:03.0', '00:16.0', '00:19.0', '00:1b.0'] + assert lspci.search(Dev_Details__contains='I218') == [ + { + 'Slot': '00:19.0', 'Class': '0200', 'Vendor': '8086', + 'Device': '155a', 'SVendor': '17aa', 'SDevice': '2214', + 'Rev': '04', 'Driver': 'e1000e', 'Module': ['e1000e'], + 'Subsystem': 'Lenovo ThinkPad X240', + 'Dev_Details': 'Ethernet controller: Intel Corporation Ethernet Connection I218-LM (rev 04)' + } + ] + assert lspci.search(Slot='00:1b.0') == [ + { + 'Slot': '00:1b.0', 'Class': '0403', 'Vendor': '8086', + 'Device': '9c20', 'SVendor': '17aa', 'SDevice': '2214', + 'Rev': '04', 'Driver': 'snd_hda_intel', + 'Module': ['snd_hda_intel'], + 'Subsystem': 'Lenovo ThinkPad X240', + 'Dev_Details': 'Audio device: Intel Corporation 8 Series HD Audio Controller (rev 04)' + } + ] + + +def test_doc_examples(): + lspci_vmmkn = LsPciVmmkn(context_wrap(LSPCI_VMMKN)) + lspci_k = LsPciParser(context_wrap(LSPCI_K)) + env = {'lspci': LsPci(lspci_k, lspci_vmmkn)} + failed, total = doctest.testmod(lspci, globs=env) + assert failed == 0 diff --git a/insights/parsers/lspci.py b/insights/parsers/lspci.py index 106d4a656..db8d19cbe 100644 --- a/insights/parsers/lspci.py +++ b/insights/parsers/lspci.py @@ -87,7 +87,7 @@ def parse_content(self, content): if bus_device_function_re.match(parts[0]): bus_device_function = parts[0] - device_details = ' '.join(map(str, parts[1:])) + device_details = line.split(None, 1)[-1] # keep the raw line self.data[bus_device_function] = {'Dev_Details': device_details.lstrip()} elif bus_device_function and (line.split(":")[0].strip() in fields): parts = line.split(':') From ba6805b4f1aa6e43e64ec07bddb0f8d6a34ee2f3 Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Thu, 18 Mar 2021 12:30:19 -0400 Subject: [PATCH 351/892] update uploader_json_map.json Signed-off-by: Link Dupont --- insights/client/uploader_json_map.json | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index 3efb38d0d..f4107d9ef 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -720,6 +720,11 @@ "pattern": [], "symbolic_name": "lspci_kernel" }, + { + "command": "/sbin/lspci -vmmkn", + "pattern": [], + "symbolic_name": "lspci_vmmkn" + }, { "command": "/usr/sbin/lvmconfig --type full", "pattern": [], @@ -3372,7 +3377,7 @@ { "file": "/var/log/secure", "pattern": [ - "[CAUTION] This_is_the default_filter_string_for_all_large_files!" + "Could not set limit for 'nofile': Operation not permitted" ], "symbolic_name": "secure" }, @@ -4325,5 +4330,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-03-04T14:18:04.414559" + "version": "2021-03-11T11:43:31.335066" } From b532e911cb4f05ee206922a16f8203818e92d285 Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Tue, 23 Mar 2021 15:30:00 -0500 Subject: [PATCH 352/892] Fix exception in YumRepolist parser (#2983) * Fix #2857 issue where parser throws exception with empty repolist * Add tests for exception handlers Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/parsers/tests/test_yum_repolist.py | 22 +++++++++++++++++++++ insights/parsers/yum.py | 21 +++++++++++++------- 2 files changed, 36 insertions(+), 7 deletions(-) diff --git a/insights/parsers/tests/test_yum_repolist.py b/insights/parsers/tests/test_yum_repolist.py index 4a8d35666..1361b47c4 100644 --- a/insights/parsers/tests/test_yum_repolist.py +++ b/insights/parsers/tests/test_yum_repolist.py @@ -133,6 +133,16 @@ repolist: 58096 """.strip() +# This output occurred in Bugzilla 1929384 +YUM_REPOLIST_ZERO = """ +repolist: 0 +""".strip() + +YUM_REPOLIST_MISSING_HEADER = """ +Loaded plugins: product-id, rhnplugin, security, subscription-manager +Updating certificate-based repositories. +""" + def test_yum_repolist(): repo_list = YumRepoList(context_wrap(YUM_REPOLIST_CONTENT)) @@ -234,3 +244,15 @@ def test_repos_without_ends(): repo_list = YumRepoList(context_wrap(YUM_REPOLIST_CONTENT_MISSING_END)) assert 1 == len(repo_list) assert 0 == len(repo_list.rhel_repos) + + +def test_repolist_zero(): + with pytest.raises(SkipException) as se: + YumRepoList(context_wrap(YUM_REPOLIST_ZERO)) + assert 'No repolist.' in str(se) + + +def test_repolist_missing_header(): + with pytest.raises(ParseException) as se: + YumRepoList(context_wrap(YUM_REPOLIST_MISSING_HEADER)) + assert 'Failed to parser yum repolist' in str(se) diff --git a/insights/parsers/yum.py b/insights/parsers/yum.py index 7053a979e..66858dde2 100644 --- a/insights/parsers/yum.py +++ b/insights/parsers/yum.py @@ -11,7 +11,7 @@ """ from insights import parser, CommandParser -from insights.parsers import SkipException, parse_fixed_table +from insights.parsers import SkipException, parse_fixed_table, ParseException from insights.specs import Specs eus = [ @@ -127,6 +127,9 @@ def parse_content(self, content): if not content: raise SkipException('No repolist.') + if content[0].startswith('repolist:'): + raise SkipException('No repolist.') + trailing_line_prefix = [ 'repolist:', 'Uploading Enabled', @@ -137,12 +140,16 @@ def parse_content(self, content): self.data = [] self.repos = {} - self.data = parse_fixed_table( - content, - heading_ignore=['repo id'], - header_substitute=[('repo id', 'id '), ('repo name', 'name ')], - trailing_ignore=trailing_line_prefix, - empty_exception=True) + try: + self.data = parse_fixed_table( + content, + heading_ignore=['repo id'], + header_substitute=[('repo id', 'id '), ('repo name', 'name ')], + trailing_ignore=trailing_line_prefix, + empty_exception=True) + except ValueError as e: + # ValueError raised by parse_fixed_table + raise ParseException('Failed to parser yum repolist: {0}'.format(str(e))) if not self.data: raise SkipException('No repolist.') From a4b5534b3cc85ab1dba04704d4a8d21718cee679 Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Wed, 24 Mar 2021 09:29:05 -0500 Subject: [PATCH 353/892] Fix datasourceproviders to deserialize as text (#2985) * Fix 2970 issue with datasources where the DatasourceProvider output was being deserialized as raw instead of text Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/core/spec_factory.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/core/spec_factory.py b/insights/core/spec_factory.py index a600836a7..87ac00ba2 100644 --- a/insights/core/spec_factory.py +++ b/insights/core/spec_factory.py @@ -1076,4 +1076,4 @@ def serialize_datasource_provider(obj, root): @deserializer(DatasourceProvider) def deserialize_datasource_provider(_type, data, root): - return SerializedRawOutputProvider(data["relative_path"], root) + return SerializedOutputProvider(data["relative_path"], root) From 6c4b4bcd5a50b6a5fb4c780d90c51a1a84d07206 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Wed, 24 Mar 2021 16:08:25 -0500 Subject: [PATCH 354/892] Fix bug where missing endscript caused an infinite loop. (#2990) Fixes #2989 Signed-off-by: Christopher Sams --- insights/combiners/logrotate_conf.py | 8 +++---- .../tests/test_logrotate_conf_tree.py | 24 +++++++++++++++++++ 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/insights/combiners/logrotate_conf.py b/insights/combiners/logrotate_conf.py index 222261be8..b786c2ddd 100644 --- a/insights/combiners/logrotate_conf.py +++ b/insights/combiners/logrotate_conf.py @@ -159,8 +159,8 @@ def __init__(self, ctx): ScriptStart = WS >> PosMarker(Choice([Literal(s) for s in scripts])) << WS ScriptEnd = Literal("endscript") Line = (WS >> AnyChar.until(EOL) << WS).map(lambda x: "".join(x)) - Lines = Line.until(ScriptEnd).map(lambda x: "\n".join(x)) - Script = ScriptStart + Lines << ScriptEnd + Lines = Line.until(ScriptEnd | EOF).map(lambda x: "\n".join(x)) + Script = ScriptStart + Lines << Opt(ScriptEnd) Script = Script.map(lambda x: [x[0], [x[1]], None]) BeginBlock = WS >> LeftCurly << WS EndBlock = WS >> RightCurly @@ -172,7 +172,7 @@ def __init__(self, ctx): Stanza <= WS >> (Stmt | Comment) << WS Doc = Many(Stanza).map(skip_none).map(self.to_entries) - self.Top = Doc + EOF + self.Top = Doc << EOF def to_entries(self, x): ret = [] @@ -194,7 +194,7 @@ def parse_doc(content, ctx=None): if isinstance(content, list): content = "\n".join(content) parse = DocParser(ctx) - result = parse(content)[0] + result = parse(content) return Entry(children=result, src=ctx) diff --git a/insights/combiners/tests/test_logrotate_conf_tree.py b/insights/combiners/tests/test_logrotate_conf_tree.py index 0566b9b15..212c1aa1e 100644 --- a/insights/combiners/tests/test_logrotate_conf_tree.py +++ b/insights/combiners/tests/test_logrotate_conf_tree.py @@ -1,4 +1,6 @@ # coding=utf-8 +import pytest + from insights.parsr.query import first from insights.combiners.logrotate_conf import _LogRotateConf, LogRotateConfTree from insights.tests import context_wrap @@ -66,6 +68,22 @@ """.strip() +LOGROTATE_MISSING_ENDSCRIPT = """ +/var/log/example/*.log { + daily + missingok + rotate 10 + dateext + dateyesterday + notifempty + sharedscripts + postrotate + [ ! -f /var/run/openresty.pid ] || kill -USR1 `cat /var/run/example.pid` + /usr/local/bin/mc cp $1 minio/matrix-prod-cluster/node1/example/ +} +""".strip() + + def test_logrotate_tree(): p = _LogRotateConf(context_wrap(CONF, path="/etc/logrotate.conf")) conf = LogRotateConfTree([p]) @@ -82,3 +100,9 @@ def test_junk_space(): p = _LogRotateConf(context_wrap(JUNK_SPACE, path="/etc/logrotate.conf")) conf = LogRotateConfTree([p]) assert "compress" in conf["/var/log/spooler"] + + +def test_logrotate_conf_combiner_missing_endscript(): + with pytest.raises(Exception): + p = _LogRotateConf(context_wrap(LOGROTATE_MISSING_ENDSCRIPT, path='/etc/logrotate.conf')), + print(p) From 8acced3610df32ae491af5cf25515d63375f33ee Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 25 Mar 2021 07:01:31 +0800 Subject: [PATCH 355/892] Enhance tuned list parser Tuned (#2987) * Enhance tuned list parser Tuned Signed-off-by: Xiangce Liu * fix flake8 error Signed-off-by: Xiangce Liu * only strip the useful value Signed-off-by: Xiangce Liu --- insights/parsers/tests/test_tuned.py | 86 +++++++++++++++++++++------- insights/parsers/tuned.py | 41 ++++++++----- 2 files changed, 92 insertions(+), 35 deletions(-) diff --git a/insights/parsers/tests/test_tuned.py b/insights/parsers/tests/test_tuned.py index 21154e1d0..2292ba4a6 100644 --- a/insights/parsers/tests/test_tuned.py +++ b/insights/parsers/tests/test_tuned.py @@ -1,3 +1,7 @@ +import pytest +import doctest + +from insights.parsers import SkipException, tuned from insights.parsers.tuned import Tuned from insights.tests import context_wrap @@ -30,32 +34,70 @@ Preset profile: virtual-guest '''.strip() +TUNED_OUTPUT3 = ''' +Available profiles: +- balanced - General non-specialized tuned profile +- desktop - Optimize for the desktop use-case +- hpc-compute - Optimize for HPC compute workloads +- latency-performance - Optimize for deterministic performance at the cost of increased power consumption +- network-latency - Optimize for deterministic performance at the cost of increased power consumption, focused on low latency network performance +- network-throughput - Optimize for streaming network throughput, generally only necessary on older CPUs or 40G+ networks +- powersave - Optimize for low power consumption +- sap-netweaver - Optimize for SAP NetWeaver +- throughput-performance - Broadly applicable tuning that provides excellent performance across a variety of common server workloads +- virtual-guest - Optimize for running inside a virtual guest +- virtual-guest-vmware +- virtual-host - Optimize for running KVM guests +Current active profile: virtual-guest-vmware +'''.strip() + +TUNED_OUTPUT4 = ''' +'''.strip() + def test_active_profile(): tuned_output = Tuned(context_wrap(TUNED_OUTPUT)) - assert len(tuned_output.data.get('available')) == 9 - assert tuned_output.data.get('active') == 'virtual-guest' - assert tuned_output.data.get('available') == ['balanced', - 'desktop', - 'latency-performance', - 'network-latency', - 'network-throughput', - 'powersave', - 'throughput-performance', - 'virtual-guest', - 'virtual-host'] + assert len(tuned_output.get('available')) == 9 + assert tuned_output.get('active') == 'virtual-guest' + assert tuned_output.get('available') == ['balanced', + 'desktop', + 'latency-performance', + 'network-latency', + 'network-throughput', + 'powersave', + 'throughput-performance', + 'virtual-guest', + 'virtual-host'] def test_preset_profile(): tuned_output = Tuned(context_wrap(TUNED_OUTPUT2)) - assert len(tuned_output.data.get('available')) == 9 - assert tuned_output.data.get('preset') == 'virtual-guest' - assert tuned_output.data.get('available') == ['balanced', - 'desktop', - 'latency-performance', - 'network-latency', - 'network-throughput', - 'powersave', - 'throughput-performance', - 'virtual-guest', - 'virtual-host'] + assert len(tuned_output.get('available')) == 9 + assert tuned_output.get('preset') == 'virtual-guest' + assert tuned_output.get('available') == ['balanced', + 'desktop', + 'latency-performance', + 'network-latency', + 'network-throughput', + 'powersave', + 'throughput-performance', + 'virtual-guest', + 'virtual-host'] + + +def test_tuned_profile(): + tuned_output = Tuned(context_wrap(TUNED_OUTPUT3)) + assert len(tuned_output.get('available')) == 12 + assert tuned_output.get('preset') is None + assert tuned_output.get('active') == 'virtual-guest-vmware' + assert 'sap-netweaver' in tuned_output.get('available') + assert 'virtual-guest-vmware' in tuned_output.get('available') + + with pytest.raises(SkipException): + Tuned(context_wrap('')) + + +def test_doc_example(): + env = {'tuned': Tuned(context_wrap(TUNED_OUTPUT))} + failed, total = doctest.testmod(tuned, globs=env) + assert failed == 0 diff --git a/insights/parsers/tuned.py b/insights/parsers/tuned.py index 3bf05d25a..df259ca78 100644 --- a/insights/parsers/tuned.py +++ b/insights/parsers/tuned.py @@ -31,35 +31,50 @@ Examples: - >>> result = shared[Tuned] - >>> 'active' in result.data + >>> type(tuned) + + >>> 'active' in tuned True - >>> result.data['active'] + >>> tuned['active'] 'virtual-guest' - >>> len(result.data['available']) + >>> len(tuned['available']) 9 - >>> 'balanced' in result.data['available'] + >>> 'balanced' in tuned['available'] True """ -from .. import parser, CommandParser +from insights import parser, CommandParser +from insights.parsers import SkipException from insights.specs import Specs @parser(Specs.tuned_adm) -class Tuned(CommandParser): +class Tuned(CommandParser, dict): """ - Parse data from the ``/usr/sbin/tuned-adm list`` command. + Parse output from the ``/usr/sbin/tuned-adm list`` command. + + Raises: + SkipException: When noting needs to parse """ def parse_content(self, content): - self.data = {} - self.data['available'] = [] + data = {} for line in content: if line.startswith('-'): - self.data['available'].append(line.split('- ')[1]) + data.update(available=[]) if 'available' not in data else None + data['available'].append(line.split('- ')[1].strip()) elif line.startswith('Current'): - self.data['active'] = line.split(': ')[1] + data['active'] = line.split(': ')[1].strip() elif line.startswith('Preset'): - self.data['preset'] = line.split(': ')[1] + data['preset'] = line.split(': ')[1].strip() # Ignore everything else for now + if not data: + raise SkipException + self.update(data) + + @property + def data(self): + ''' + For backward compatibility. + ''' + return self From 1e2893c8d53595e1433240904c69b221c5262b05 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 25 Mar 2021 09:02:53 +0800 Subject: [PATCH 356/892] Add parser to parse satellite custom ca cert chain (#2981) * Add parser to parse satellite custom ca cert chain Signed-off-by: Huanhuan Li * Replace two colons with one colon for sphinx keywords Signed-off-by: Huanhuan Li * Remove the earliest_date part to parse_content Signed-off-by: Huanhuan Li * Remove blank lines. Signed-off-by: Huanhuan Li * Remove dependency Signed-off-by: Huanhuan Li * Simplify the code about getting the earlist expiry date. Signed-off-by: Huanhuan Li * Don't save timezone info * Keep consistent with CertificatesEnddate Signed-off-by: Huanhuan Li * Don't change the API of ExpirationDate Signed-off-by: Huanhuan Li * Rename the parser * It also supports other info like subject, issuer Signed-off-by: Huanhuan Li --- .../certificate_chain.rst | 3 + insights/parsers/certificate_chain.py | 118 +++++++++++++++++ .../parsers/tests/test_certificate_chain.py | 119 ++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 3 + insights/specs/insights_archive.py | 1 + 6 files changed, 245 insertions(+) create mode 100644 docs/shared_parsers_catalog/certificate_chain.rst create mode 100644 insights/parsers/certificate_chain.py create mode 100644 insights/parsers/tests/test_certificate_chain.py diff --git a/docs/shared_parsers_catalog/certificate_chain.rst b/docs/shared_parsers_catalog/certificate_chain.rst new file mode 100644 index 000000000..65c712cb9 --- /dev/null +++ b/docs/shared_parsers_catalog/certificate_chain.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.certificate_chain + :members: + :show-inheritance: diff --git a/insights/parsers/certificate_chain.py b/insights/parsers/certificate_chain.py new file mode 100644 index 000000000..8871c68f8 --- /dev/null +++ b/insights/parsers/certificate_chain.py @@ -0,0 +1,118 @@ +""" +Get Certificate Chain Info +========================== + +This module contains the following parsers: + +SatelliteCustomCaChain - command ``awk 'BEGIN { pipe="openssl x509 -noout -subject -enddate"} /^-+BEGIN CERT/,/^-+END CERT/ { print | pipe } /^-+END CERT/ { close(pipe); printf("\\n")}' /etc/pki/katello/certs/katello-server-ca.crt`` +======================================================================================================================================================================================================================================== +""" + +from insights import parser, CommandParser +from datetime import datetime +from insights.parsers import ParseException, SkipException +from insights.specs import Specs +from insights.parsers.certificates_enddate import CertificatesEnddate + + +class CertificateChain(CommandParser, list): + """ + Class to parse the output of "openssl -in -xxx -xxx". + Blank line is added to distinguish different certs in the chain. + Currently it only supports the attributes which the output is in + key=value pairs. + + Sample Output:: + + issuer= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com + subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.b.com + notBefore=Dec 7 07:02:33 2020 GMT + notAfter=Jan 18 07:02:33 2038 GMT + + issuer= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.c.com + subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.d.com + notBefore=Nov 30 07:02:42 2020 GMT + notAfter=Jan 18 07:02:43 2018 GMT + + Examples: + >>> type(certs) + + >>> len(certs) + 2 + >>> certs.earliest_expiry_date.str + 'Jan 18 07:02:43 2018' + """ + + expire_date_format = '%b %d %H:%M:%S %Y' + + def parse_content(self, content): + """ + Parse the content of crt chain file. And it saves the expiration + info of each crt in a list of dict. The value of notBefore and + notAfter are saved to an instance of ExpirationDate, it + contains the date in string and datetime format. + + Attributes: + earliest_expiry_date(ExpirationDate): + The earliest expiry datetime of the certs in the chain. + None when there isn't "notAfter" for all the certs + in the chain. + + Raises: + ParseException: when the output isn't in key=value format or + the notAfter or notBefore isn't expected format. + """ + if len(content) < 1: + raise SkipException("No cert in the output") + data = {} + self.append(data) + self.earliest_expiry_date = None + for index, line in enumerate(content): + if not line.strip(): + # a new cert starts + if data: + data = {} + self.append(data) + continue + if '=' not in line: + raise ParseException('The line %s is not in key=value format' % line) + key, value = [item.strip() for item in line.split('=', 1)] + value_without_tz = value.rsplit(" ", 1)[0] + if key in ['notBefore', 'notAfter']: + try: + date_time = datetime.strptime(value_without_tz, self.expire_date_format) + except Exception: + raise ParseException('The %s is not in %s format.' % (key, self.expire_date_format)) + value = CertificatesEnddate.ExpirationDate(value_without_tz, date_time) + data[key] = value + + for one_cert in self: + expire_date = one_cert.get('notAfter') + if expire_date and (self.earliest_expiry_date is None or expire_date.datetime < self.earliest_expiry_date.datetime): + self.earliest_expiry_date = expire_date + + +@parser(Specs.satellite_custom_ca_chain) +class SatelliteCustomCaChain(CertificateChain): + """ + .. note:: + Please refer to its super-class :class:`insights.parsers.certificate_chain.CertificateChain` for more + details. + + Sample Output:: + + subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com + notAfter=Jan 18 07:02:33 2038 GMT + + subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.b.com + notAfter=Jan 18 07:02:43 2028 GMT + + Examples: + >>> type(satellite_ca_certs) + + >>> len(satellite_ca_certs) + 2 + >>> satellite_ca_certs.earliest_expiry_date.str + 'Jan 18 07:02:43 2028' + """ + pass diff --git a/insights/parsers/tests/test_certificate_chain.py b/insights/parsers/tests/test_certificate_chain.py new file mode 100644 index 000000000..84f511611 --- /dev/null +++ b/insights/parsers/tests/test_certificate_chain.py @@ -0,0 +1,119 @@ +import doctest +import pytest + +from insights.parsers import certificate_chain, ParseException, SkipException +from insights.tests import context_wrap + + +SATELLITE_OUTPUT1 = """ +subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com +notAfter=Jan 18 07:02:33 2038 GMT + +subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.b.com +notAfter=Jan 18 07:02:43 2018 GMT + +subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.c.com +notAfter=Jan 18 07:02:43 2048 GMT + +""" + +SATELLITE_OUTPUT2 = """ +subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com +notAfter=Jan 18 07:02:33 2038 GMT + +subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.b.com +notAfter=Jan 18 07:02:43 2028 GMT + +""" + +OUTPUT1 = """ +issuer= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com +subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.b.com +notBefore=Dec 7 07:02:33 2020 GMT +notAfter=Jan 18 07:02:33 2038 GMT + +issuer= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.d.com +subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.c.com +notBefore=Nov 30 07:02:42 2020 GMT +notAfter=Jan 18 07:02:43 2018 GMT + +""" + +OUTPUT2 = """ +notAfter=Dec 4 07:04:05 2035 GMT +subject= /CN=Puppet CA: abc.d.com +issuer= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=abc.d.com +""" + +BAD_OUTPUT1 = """ +subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com +notAfterJan 18 07:02:33 2038 GMT + +subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.b.com +notAfterJan 18 07:02:43 2018 GMT + +subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.c.com +notAfterJan 18 07:02:43 2048 GMT + +""" + +BAD_OUTPUT2 = """ +subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com +notAfter=2038 Jan 18 07:02:33 GMT + +subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.b.com +notAfterJan 18 07:02:43 2018 GMT + +subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.c.com +notAfterJan 18 07:02:43 2048 GMT + +""" + +BAD_OUTPUT3 = """ + +""" + + +def test_certificates_chain(): + certs = certificate_chain.SatelliteCustomCaChain(context_wrap(OUTPUT1)) + assert len(certs) == 2 + assert certs.earliest_expiry_date.str == 'Jan 18 07:02:43 2018' + for cert in certs: + if cert['notAfter'].str == certs.earliest_expiry_date.str: + assert cert['issuer'] == '/C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.d.com' + assert cert['notBefore'].str == 'Nov 30 07:02:42 2020' + assert cert['subject'] == '/C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.c.com' + assert cert['notBefore'].str == 'Nov 30 07:02:42 2020' + + certs = certificate_chain.SatelliteCustomCaChain(context_wrap(OUTPUT2)) + assert len(certs) == 1 + assert certs[0]['issuer'] == '/C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=abc.d.com' + + +def test_satellite_ca_chain(): + certs = certificate_chain.CertificateChain(context_wrap(SATELLITE_OUTPUT1)) + assert len(certs) == 3 + assert certs.earliest_expiry_date.str == 'Jan 18 07:02:43 2018' + for cert in certs: + if cert['notAfter'].str == certs.earliest_expiry_date.str: + assert cert['subject'] == '/C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.b.com' + + +def test_doc(): + certs = certificate_chain.CertificateChain(context_wrap(OUTPUT1)) + satellite_ca_certs = certificate_chain.SatelliteCustomCaChain(context_wrap(SATELLITE_OUTPUT2)) + globs = { + 'certs': certs, + 'satellite_ca_certs': satellite_ca_certs + } + failed, tested = doctest.testmod(certificate_chain, globs=globs) + assert failed == 0 + + +def test_certificates_chain_except(): + with pytest.raises(ParseException): + certificate_chain.CertificateChain(context_wrap(BAD_OUTPUT1)) + with pytest.raises(ParseException): + certificate_chain.CertificateChain(context_wrap(BAD_OUTPUT2)) + with pytest.raises(SkipException): + certificate_chain.SatelliteCustomCaChain(context_wrap(BAD_OUTPUT3)) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index cc999c5d8..7a31c34ab 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -566,6 +566,7 @@ class Specs(SpecSet): saphostexec_version = RegistryPoint() sat5_insights_properties = RegistryPoint() satellite_content_hosts_count = RegistryPoint() + satellite_custom_ca_chain = RegistryPoint() satellite_mongodb_storage_engine = RegistryPoint() satellite_non_yum_type_repos = RegistryPoint() satellite_version_rb = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index e6b8e69e4..bf8f49d0b 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -847,6 +847,9 @@ def is_satellite_capsule(broker): "/usr/bin/sudo -iu postgres /usr/bin/psql -d foreman -c 'select count(*) from hosts'", deps=[is_satellite_server] ) + satellite_custom_ca_chain = simple_command( + '/usr/bin/awk \'BEGIN { pipe="openssl x509 -noout -subject -enddate"} /^-+BEGIN CERT/,/^-+END CERT/ { print | pipe } /^-+END CERT/ { close(pipe); printf("\\n")}\' /etc/pki/katello/certs/katello-server-ca.crt', + ) satellite_mongodb_storage_engine = simple_command("/usr/bin/mongo pulp_database --eval 'db.serverStatus().storageEngine'") satellite_non_yum_type_repos = simple_command( "/usr/bin/mongo pulp_database --eval 'db.repo_importers.find({\"importer_type_id\": { $ne: \"yum_importer\"}}).count()'", diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 30ea8c471..6b962f439 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -209,6 +209,7 @@ class InsightsArchiveSpecs(Specs): ]) saphostexec_status = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostexec_-status") saphostexec_version = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostexec_-version") + satellite_custom_ca_chain = simple_file("insights_commands/awk_BEGIN_pipe_openssl_x509_-noout_-subject_-enddate_._-_BEGIN_CERT._._-_END_CERT._print_pipe_._-_END_CERT._close_pipe_printf_n_.etc.pki.katello.certs.katello-server-ca.crt") satellite_mongodb_storage_engine = simple_file("insights_commands/mongo_pulp_database_--eval_db.serverStatus_.storageEngine") sealert = simple_file('insights_commands/sealert_-l') sestatus = simple_file("insights_commands/sestatus_-b") From 9f7dca5ac6c89923f1106878d4bac96b7f99d01a Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 25 Mar 2021 21:33:01 +0800 Subject: [PATCH 357/892] Add new spec to get the satellite admin settings (#2929) * Add new spec to get the satellite admin settings Signed-off-by: Huanhuan Li * Check column order Signed-off-by: Huanhuan Li * Write a common parser for postgresql output * Update parser SatelliteAdminSettings to extend the common parser Signed-off-by: Huanhuan Li * Restructure the code to make it more readable Signed-off-by: Huanhuan Li * Transfer the settings table to dict format Signed-off-by: Huanhuan Li * Transfer to CSV format * The parser will be simpler if the postgresql output is in csv format Signed-off-by: Huanhuan Li * Update docstring * Add more test for the different types of settings table Signed-off-by: Huanhuan Li * Add has_setting method and raise KeyError in get_setting method Signed-off-by: Huanhuan Li * Save value to avoid getting twice * Replace two colon with one colon for sphinx keywords Signed-off-by: Huanhuan Li * Revert "Add has_setting method and raise KeyError in get_setting method" This reverts commit 576b1c58ad5ac38d6e1122e2bac4c94570982f6a. Signed-off-by: Huanhuan Li * Delete useless blank lines Signed-off-by: Huanhuan Li * Update docstring Signed-off-by: Huanhuan Li --- .../satellite_postgresql_query.rst | 3 + .../parsers/satellite_postgresql_query.py | 176 +++++++++++++++ .../tests/test_satellite_postgresql_query.py | 206 ++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 4 + 5 files changed, 390 insertions(+) create mode 100644 docs/shared_parsers_catalog/satellite_postgresql_query.rst create mode 100644 insights/parsers/satellite_postgresql_query.py create mode 100644 insights/parsers/tests/test_satellite_postgresql_query.py diff --git a/docs/shared_parsers_catalog/satellite_postgresql_query.rst b/docs/shared_parsers_catalog/satellite_postgresql_query.rst new file mode 100644 index 000000000..8a8c710a1 --- /dev/null +++ b/docs/shared_parsers_catalog/satellite_postgresql_query.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.satellite_postgresql_query + :members: + :show-inheritance: diff --git a/insights/parsers/satellite_postgresql_query.py b/insights/parsers/satellite_postgresql_query.py new file mode 100644 index 000000000..49fea1207 --- /dev/null +++ b/insights/parsers/satellite_postgresql_query.py @@ -0,0 +1,176 @@ +""" +Satellite PostgreSQL database queries +===================================== + +This module contains the following parsers: + +SatelliteAdminSettings - command ``psql -d foreman -c 'select name, value, "default" from settings where name in (\'destroy_vm_on_host_delete\', \'unregister_delete_host\') --csv'`` +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +""" + +import os +import yaml +from csv import DictReader + +from insights import parser, CommandParser +from insights.specs import Specs +from insights.parsers import SkipException, ParseException +from insights.parsers import keyword_search + + +class SatellitePostgreSQLQuery(CommandParser, list): + """ + Parent class of satellite postgresql table queries. + It saves the rows data into a list. Each row is saved into a dict. + The key is the column name, the value is the value of the column. + + Resultant data structure:: + + [ + { + 'name': 'abc', + 'url': '', + 'value': 'test' + }, + { + 'name': 'def', + 'url': 'http://xx.com', + 'value': '' + } + ] + + Sample Output:: + + name,url,value + abc,,test + def,http://xx.com, + + + Examples: + >>> type(query) + + >>> rows = query.search(name='abc') + >>> len(rows) + 1 + >>> rows[0]['value'] + 'test' + >>> columns=query.get_columns() + >>> 'url' in columns + True + >>> 'name' in columns + True + + Raises: + SkipException: when there isn't data in the table + ParseException: when the output isn't in good csv format + """ + + def parse_content(self, content): + if not content or len(content) == 1: + raise SkipException("There is no data in the table") + try: + # keep the line break for yaml parse in some table + reader = DictReader(os.linesep.join(content).splitlines(True)) + except Exception: + raise ParseException("The content isn't in csv format") + for row in reader: + self.append(row) + + def get_columns(self): + return list(self[0].keys()) + + def search(self, **kwargs): + """ + Get the rows by searching the table with kwargs. + This uses the :py:func:`insights.parsers.keyword_search` function for + searching; see its documentation for usage details. If no search + parameters are given, no rows are returned. + + It simplify the value of the column according to actual usage. + + Returns: + list: A list of dictionaries of rows that match the given + search criteria. + + Examples: + >>> query.search(name__startswith='abc') == [ + ... {'name': 'abc', 'url': '', 'value': 'test'}, + ... {'name': 'abcdef', 'url': '', 'value': 'test2'} + ... ] + True + >>> query.search(name__startswith='abc', value='test') == [ + ... {'name': 'abc', 'url': '', 'value': 'test'} + ... ] + True + """ + + return keyword_search(self, **kwargs) + + +@parser(Specs.satellite_settings) +class SatelliteAdminSettings(SatellitePostgreSQLQuery): + """ + Parse the output of the command ``psql -d foreman -c '"select name, value, "default" from settings where name in ('destroy_vm_on_host_delete', 'unregister_delete_host') --csv"``. + + Sample output:: + + name,value,default + unregister_delete_host,"--- true + ...","--- false + ..." + destroy_vm_on_host_delete,,"--- true + ..." + + Examples: + >>> type(table) + + >>> table.get_setting('unregister_delete_host') + True + >>> table.get_setting('destroy_vm_on_host_delete') + True + """ + + def _parse_yaml(self, value): + if value: + try: + return yaml.safe_load(value) + except Exception: + raise ParseException("Bad format value: %s" % value) + return value + + def parse_content(self, content): + """ + The "default" and "value" columns must be selected, or else the + settings value can't be determined. + The "default" and "value" column are in yaml format, it is transfer to + python object. + + Raises: + SkipException: when value or default column isn't found in the + table. + ParseException: when the value or default in bad yaml format. + """ + super(SatelliteAdminSettings, self).parse_content(content) + if not all(item in self.get_columns() for item in ['default', 'value']): + raise SkipException('No default, value columns in the table.') + for row in self: + row['default'] = self._parse_yaml(row['default']) + row['value'] = self._parse_yaml(row['value']) + + def get_setting(self, setting_name): + """ + Get the actual value of setting_name. + If the value column isn't empty, the value of the setting_name is the + value column, or else it's the default column. + + Args: + setting_name (str): the value of name column which is searched in the table. + + Returns: + It depends on the setting, maybe boolean, string, int or a list. + None if the setting_name doesn't exist in the table. + """ + rows = self.search(name=setting_name) + if rows: + value = rows[0].get('value') + return rows[0].get('default') if value == '' else value diff --git a/insights/parsers/tests/test_satellite_postgresql_query.py b/insights/parsers/tests/test_satellite_postgresql_query.py new file mode 100644 index 000000000..a401a7a27 --- /dev/null +++ b/insights/parsers/tests/test_satellite_postgresql_query.py @@ -0,0 +1,206 @@ +import doctest +import pytest + +from insights.parsers import ( + satellite_postgresql_query, ParseException, SkipException) +from insights.core.plugins import ContentException +from insights.tests import context_wrap + + +SATELLITE_POSTGRESQL_WRONG_1 = ''' +-bash: psql: command not found +''' + +SATELLITE_POSTGRESQL_WRONG_2 = ''' +su: user postgres does not exist +''' + +SATELLITE_POSTGRESQL_WRONG_3 = ''' +psql: FATAL: database "foreman" does not exist +''' + +SATELLITE_POSTGRESQL_WRONG_4 = ''' +''' + +SATELLITE_POSTGRESQL_WRONG_5 = ''' +name,default,value +''' + +test_data_1 = ''' +name +fix_db_cache +foreman_tasks_sync_task_timeout +dynflow_enable_console +dynflow_console_require_auth +foreman_tasks_proxy_action_retry_count +''' + +test_data_2 = ''' +id,name,created_at,updated_at +1,project-receptor.satellite_receptor_installer,2021-01-30 01:14:22.848735,2021-01-30 01:14:22.848735 +2,theforeman.foreman_scap_client,2021-01-30 01:14:22.916142,2021-01-30 01:14:22.91614 +''' + +test_data_3 = ''' +name,url,value +abc,,test +abcdef,,test2 +def,http://xx.com, +''' + +SATELLITE_SETTINGS_1 = ''' +name,value,default +unregister_delete_host,"--- true +...","--- false +..." +destroy_vm_on_host_delete,,"--- true +..." +''' + +SATELLITE_SETTINGS_2 = ''' +name,value,default +unregister_delete_host,,"--- false +..." +destroy_vm_on_host_delete,,"--- true +..." +''' + +SATELLITE_SETTINGS_3 = ''' +name,value,default +unregister_delete_host,"--- false +...","--- true +..." +destroy_vm_on_host_delete,"--- false +...","--- true +..." +''' + +SATELLITE_SETTINGS_WITH_DIFFERENT_TYPES = ''' +name,value,default +http_proxy_except_list,,--- [] +trusted_hosts,,--- [] +oidc_audience,,--- [] +ignored_interface_identifiers,,"--- +- lo +- en*v* +- usb* +- vnet* +- macvtap* +- _vdsmdummy_ +- veth* +- docker* +- tap* +- qbr* +- qvb* +- qvo* +- qr-* +- qg-* +- vlinuxbr* +- vovsbr*" +dns_timeout,,"--- +- 5 +- 10 +- 15 +- 20" +foreman_tasks_troubleshooting_url,,"--- https://access.redhat.com/solutions/satellite6-tasks#%{label} +..." +remote_execution_ssh_user,,"--- root +..." +foreman_tasks_sync_task_timeout,,"--- 120 +..." +foreman_tasks_proxy_action_retry_count,,"--- 4 +..." +''' + +SATELLITE_SETTINGS_BAD_1 = ''' +name,value +unregister_delete_host,"--- true +..." +destroy_vm_on_host_delete, +''' + +SATELLITE_SETTINGS_BAD_2 = ''' +name,value,default +unregister_delete_host,"--- true:: def +...","--- false +..." +destroy_vm_on_host_delete,,"--- true +..." +''' + + +def test_satellite_postgesql_query_exception(): + with pytest.raises(ContentException): + satellite_postgresql_query.SatellitePostgreSQLQuery(context_wrap(SATELLITE_POSTGRESQL_WRONG_1)) + with pytest.raises(SkipException): + satellite_postgresql_query.SatellitePostgreSQLQuery(context_wrap(SATELLITE_POSTGRESQL_WRONG_2)) + with pytest.raises(SkipException): + satellite_postgresql_query.SatellitePostgreSQLQuery(context_wrap(SATELLITE_POSTGRESQL_WRONG_3)) + with pytest.raises(SkipException): + satellite_postgresql_query.SatellitePostgreSQLQuery(context_wrap(SATELLITE_POSTGRESQL_WRONG_4)) + with pytest.raises(SkipException): + satellite_postgresql_query.SatellitePostgreSQLQuery(context_wrap(SATELLITE_POSTGRESQL_WRONG_5)) + + +def test_satellite_postgesql_query(): + table = satellite_postgresql_query.SatellitePostgreSQLQuery(context_wrap(test_data_1)) + assert len(table) == 5 + assert table.get_columns() == ['name'] + rows = table.search(name='fix_db_cache') + assert len(rows) == 1 + assert rows[0]['name'] == 'fix_db_cache' + rows = table.search(name__startswith='dynflow') + assert len(rows) == 2 + assert rows[0]['name'] == 'dynflow_enable_console' + assert rows[1]['name'] == 'dynflow_console_require_auth' + + table = satellite_postgresql_query.SatellitePostgreSQLQuery(context_wrap(test_data_2)) + assert len(table) == 2 + rows = table.search(id='1') + assert len(rows) == 1 + assert rows[0]['name'] == 'project-receptor.satellite_receptor_installer' + + +def test_HTL_doc_examples(): + query = satellite_postgresql_query.SatellitePostgreSQLQuery(context_wrap(test_data_3)) + settings = satellite_postgresql_query.SatelliteAdminSettings(context_wrap(SATELLITE_SETTINGS_1)) + globs = { + 'query': query, + 'table': settings + } + failed, tested = doctest.testmod(satellite_postgresql_query, globs=globs) + assert failed == 0 + + +def test_satellite_admin_settings(): + settings = satellite_postgresql_query.SatelliteAdminSettings(context_wrap(SATELLITE_SETTINGS_2)) + assert(len(settings)) == 2 + assert not settings.get_setting('unregister_delete_host') + assert settings.get_setting('destroy_vm_on_host_delete') + + settings = satellite_postgresql_query.SatelliteAdminSettings(context_wrap(SATELLITE_SETTINGS_3)) + assert(len(settings)) == 2 + assert not settings.get_setting('unregister_delete_host') + assert not settings.get_setting('destroy_vm_on_host_delete') + assert settings.get_setting('non_exist_column') is None + + table = satellite_postgresql_query.SatelliteAdminSettings(context_wrap(SATELLITE_SETTINGS_WITH_DIFFERENT_TYPES)) + setting_value = table.get_setting('ignored_interface_identifiers') + assert isinstance(setting_value, list) + for item in ['lo', 'en*v*', 'usb*', 'vnet*', 'macvtap*', '_vdsmdummy_', 'veth*', + 'docker*', 'tap*', 'qbr*', 'qvb*', 'qvo*', 'qr-*', 'qg-*', + 'vlinuxbr*', 'vovsbr*']: + assert item in setting_value + setting_value = table.get_setting('foreman_tasks_troubleshooting_url') + assert isinstance(setting_value, str) + assert setting_value == 'https://access.redhat.com/solutions/satellite6-tasks#%{label}' + setting_value = table.get_setting('foreman_tasks_sync_task_timeout') + assert isinstance(setting_value, int) + assert setting_value == 120 + + +def test_satellite_admin_settings_exception(): + with pytest.raises(SkipException): + satellite_postgresql_query.SatelliteAdminSettings(context_wrap(SATELLITE_SETTINGS_BAD_1)) + with pytest.raises(ParseException): + satellite_postgresql_query.SatelliteAdminSettings(context_wrap(SATELLITE_SETTINGS_BAD_2)) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 7a31c34ab..fb5e95634 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -569,6 +569,7 @@ class Specs(SpecSet): satellite_custom_ca_chain = RegistryPoint() satellite_mongodb_storage_engine = RegistryPoint() satellite_non_yum_type_repos = RegistryPoint() + satellite_settings = RegistryPoint() satellite_version_rb = RegistryPoint() satellite_custom_hiera = RegistryPoint() scheduler = RegistryPoint(multi_output=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index bf8f49d0b..e6d516a55 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -855,6 +855,10 @@ def is_satellite_capsule(broker): "/usr/bin/mongo pulp_database --eval 'db.repo_importers.find({\"importer_type_id\": { $ne: \"yum_importer\"}}).count()'", deps=[[is_satellite_server, is_satellite_capsule]] ) + satellite_settings = simple_command( + "/usr/bin/sudo -iu postgres /usr/bin/psql -d foreman -c \"select name, value, \\\"default\\\" from settings where name in ('destroy_vm_on_host_delete', 'unregister_delete_host')\" --csv", + deps=[is_satellite_server] + ) satellite_version_rb = simple_file("/usr/share/foreman/lib/satellite/version.rb") satellite_custom_hiera = simple_file("/etc/foreman-installer/custom-hiera.yaml") scsi = simple_file("/proc/scsi/scsi") From b9e758319b0bcd456ce0cedfe07852add77a1c5a Mon Sep 17 00:00:00 2001 From: Stephen Adams Date: Thu, 25 Mar 2021 10:36:20 -0400 Subject: [PATCH 358/892] update uploader_json_map.json Signed-off-by: Stephen Adams --- insights/client/uploader_json_map.json | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index f4107d9ef..fbd4d7f6e 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -151,6 +151,7 @@ " is now offline", "AMD Secure Memory Encryption (SME) active", "Amazon EC2", + "BIOS Hyper-V UEFI Release", "Brought up ", "CIFS VFS: protocol revalidation - security settings mismatch", "CSUM", @@ -165,6 +166,7 @@ "Emulex OneConnect OCe10100, FCoE Initiator", "FEATURE IBPB_SUPPORT", "FEATURE SPEC_CTRL", + "Hyper-V Host Build", "Ignoring BGRT: failed to map image header memory", "Ignoring BGRT: failed to map image memory", "Kernel page table isolation", @@ -1407,6 +1409,11 @@ "pattern": [], "symbolic_name": "rpm_V_packages" }, + { + "command": "/usr/bin/awk 'BEGIN { pipe=\"openssl x509 -noout -subject -enddate\"} /^-+BEGIN CERT/,/^-+END CERT/ { print | pipe } /^-+END CERT/ { close(pipe); printf(\"\\n\")}' /etc/pki/katello/certs/katello-server-ca.crt", + "pattern": [], + "symbolic_name": "satellite_custom_ca_chain" + }, { "command": "/usr/bin/mongo pulp_database --eval 'db.serverStatus().storageEngine'", "pattern": [], @@ -1695,7 +1702,8 @@ "file": "/etc/insights-client/insights-client.conf", "pattern": [ "[", - "auto_update" + "auto_update", + "core_collect" ], "symbolic_name": "insights_client_conf" }, @@ -2472,9 +2480,7 @@ { "file": "/var/log/mariadb/mariadb.log", "pattern": [ - "Duplicate entry", - "Too many open files", - "for key 'PRIMARY'" + "Too many open files" ], "symbolic_name": "mariadb_log" }, @@ -4330,5 +4336,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-03-11T11:43:31.335066" -} + "version": "2021-03-24T20:04:01.898853" +} \ No newline at end of file From 9306ed63fb5fb29368af398d648cb059f95526e9 Mon Sep 17 00:00:00 2001 From: Alec Cohan <44471274+aleccohan@users.noreply.github.com> Date: Thu, 25 Mar 2021 14:22:20 -0400 Subject: [PATCH 359/892] Add loadPlaybookYaml funtion to verifier functionality (#2988) * Add loadPlaybookYaml funtion to verifier functionality Signed-off-by: Alec Cohan * Linting changes Signed-off-by: Alec Cohan Co-authored-by: Jeremy Crafts --- .../apps/ansible/playbook_verifier/__init__.py | 12 ++++++++++-- .../apps/ansible/playbook_verifier/__main__.py | 5 ++--- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/insights/client/apps/ansible/playbook_verifier/__init__.py b/insights/client/apps/ansible/playbook_verifier/__init__.py index da8f37067..86bf61d1c 100644 --- a/insights/client/apps/ansible/playbook_verifier/__init__.py +++ b/insights/client/apps/ansible/playbook_verifier/__init__.py @@ -1,6 +1,5 @@ import os import copy -import yaml import base64 import requests import tempfile @@ -10,9 +9,10 @@ from distutils.version import LooseVersion from insights.client.utilities import get_version_info from insights.client.apps.ansible.playbook_verifier.contrib import gnupg +from insights.client.apps.ansible.playbook_verifier.contrib import oyaml as yaml from insights.client.constants import InsightsConstants as constants -__all__ = ("verify", "PlaybookVerificationError") +__all__ = ("loadPlaybookYaml", "verify", "PlaybookVerificationError") SIGKEY = 'insights_signature' PUBLIC_KEY_FOLDER = pkgutil.get_data(insights.client.apps.ansible.__name__, 'playbook_verifier/public.gpg') # Update this when we have the key generated @@ -141,3 +141,11 @@ def verify(playbook, checkVersion=True, skipVerify=False): logger.info('All templates successfully validated') return playbook + + +def loadPlaybookYaml(playbook): + """ + Load playbook yaml using current yaml library implementation + output: playbook yaml + """ + return yaml.load(playbook) diff --git a/insights/client/apps/ansible/playbook_verifier/__main__.py b/insights/client/apps/ansible/playbook_verifier/__main__.py index 399246b29..2177f60e1 100644 --- a/insights/client/apps/ansible/playbook_verifier/__main__.py +++ b/insights/client/apps/ansible/playbook_verifier/__main__.py @@ -1,6 +1,5 @@ import sys -from insights.client.apps.ansible.playbook_verifier.contrib import oyaml as yaml -from insights.client.apps.ansible.playbook_verifier import verify +from insights.client.apps.ansible.playbook_verifier import verify, loadPlaybookYaml def read_playbook(): @@ -15,7 +14,7 @@ def read_playbook(): playbook = read_playbook() -playbook_yaml = yaml.load(playbook) +playbook_yaml = loadPlaybookYaml(playbook) try: verified_playbook = verify(playbook_yaml, checkVersion=False) From a35e43004b1950cf6d6320055067f664829f54ec Mon Sep 17 00:00:00 2001 From: Akshay Ghodake Date: Fri, 26 Mar 2021 11:59:06 +0530 Subject: [PATCH 360/892] [New-Spec] tuned_adm (#2992) Signed-off-by: Akshay Ghodake --- insights/specs/sos_archive.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index dd16fcde4..05c2dd3eb 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -265,6 +265,7 @@ class SosSpecs(Specs): testparm_s = simple_file("sos_commands/samba/testparm_s") tomcat_web_xml = first_of([glob_file("/etc/tomcat*/web.xml"), glob_file("/conf/tomcat/tomcat*/web.xml")]) + tuned_adm = simple_file("sos_commands/tuned/tuned-adm_list") tuned_conf = simple_file("/etc/tuned.conf") udev_persistent_net_rules = simple_file("/etc/udev/rules.d/70-persistent-net.rules") uname = simple_file("sos_commands/kernel/uname_-a") From febf53077b8a42931484c4d13283bfe9b644626e Mon Sep 17 00:00:00 2001 From: Rohan Arora Date: Mon, 29 Mar 2021 07:46:42 +0530 Subject: [PATCH 361/892] Add timestamp to RhsmLog parser (#2994) Signed-off-by: Rohan Arora --- insights/parsers/rhsm_log.py | 32 ++++++++++++++++++++++--- insights/parsers/tests/test_rhsm_log.py | 24 +++++++++++++++++++ 2 files changed, 53 insertions(+), 3 deletions(-) diff --git a/insights/parsers/rhsm_log.py b/insights/parsers/rhsm_log.py index b44e90648..09d56ab52 100644 --- a/insights/parsers/rhsm_log.py +++ b/insights/parsers/rhsm_log.py @@ -1,3 +1,5 @@ +from datetime import datetime + from .. import LogFileOutput, parser from insights.specs import Specs @@ -7,8 +9,32 @@ class RhsmLog(LogFileOutput): """ Class for parsing the log file: ``/var/log/rhsm/rhsm.log``. - .. note:: - Please refer to its super-class :class:`insights.core.LogFileOutput` + Sample input:: + 2016-07-31 04:06:41,215 [DEBUG] rhsmcertd-worker:24440 @identity.py:131 - Loading consumer info from identity certificates. + 2016-07-31 04:06:41,221 [DEBUG] rhsmcertd-worker:24440 @connection.py:475 - Loaded CA certificates from /etc/rhsm/ca/: redhat-uep.pem + 2016-07-31 04:06:41,221 [DEBUG] rhsmcertd-worker:24440 @connection.py:523 - Making request: GET /subscription/consumers/a808d48e-36bf-4071-a00a-0efacc511b2b/certificates/serials + 2016-07-31 04:07:21,245 [ERROR] rhsmcertd-worker:24440 @entcertlib.py:121 - [Errno -2] Name or service not known + + Examples: + >>> log = rhsm_log.get('Name or service not known')[0] + >>> log.get('raw_message') + '2016-07-31 04:07:21,245 [ERROR] rhsmcertd-worker:24440 @entcertlib.py:121 - [Errno -2] Name or service not known' + >>> log.get('message') + '[ERROR] rhsmcertd-worker:24440 @entcertlib.py:121 - [Errno -2] Name or service not known' + >>> log.get("timestamp") + datetime.datetime(2016, 7, 31, 4, 7, 21, 245000) """ + time_format = '%Y-%m-%d %H:%M:%S,%f' - pass + def _parse_line(self, line): + """ + Parse log line to valid components + """ + msg_info = {'raw_message': line} + line_split = line.split(None, 2) + try: + msg_info['timestamp'] = datetime.strptime(' '.join(line_split[:2]), self.time_format) + msg_info['message'] = line_split[2] + except (ValueError, IndexError): + pass + return msg_info diff --git a/insights/parsers/tests/test_rhsm_log.py b/insights/parsers/tests/test_rhsm_log.py index 93648f074..1fa4b9768 100644 --- a/insights/parsers/tests/test_rhsm_log.py +++ b/insights/parsers/tests/test_rhsm_log.py @@ -1,4 +1,8 @@ +import doctest +from datetime import datetime + from insights import add_filter +from insights.parsers import rhsm_log from insights.parsers.rhsm_log import RhsmLog from insights.specs import Specs from insights.tests import context_wrap @@ -18,6 +22,12 @@ File "/usr/share/rhsm/subscription_manager/managercli.py", line 600, in _do_command """.strip() +# For Coverage +LOG3 = """ +[ERROR] +2011-12-27-08:41:13,104 [ERROR] @managercli.py:66 - certificate verify failed +""" + add_filter(Specs.rhsm_log, [ "[ERROR]", "[Errno" @@ -29,9 +39,23 @@ def test_rhsm_log(): ern_list = rlog.get('[Errno -2]') assert 1 == len(ern_list) assert ern_list[0]['raw_message'] == "2016-07-31 04:07:21,245 [ERROR] rhsmcertd-worker:24440 @entcertlib.py:121 - [Errno -2] Name or service not known" + assert ern_list[0]['timestamp'] == datetime(2016, 7, 31, 4, 7, 21, 245000) + assert ern_list[0]['message'] == "[ERROR] rhsmcertd-worker:24440 @entcertlib.py:121 - [Errno -2] Name or service not known" rlog = RhsmLog(context_wrap(LOG2)) ern_list = rlog.get('[Errno -2]') assert 0 == len(ern_list) err_list = rlog.get('ERROR') assert 2 == len(err_list) + + rlog = RhsmLog(context_wrap(LOG3)) + err_list = rlog.get('ERROR') + assert err_list[0].get('timestamp') is None + assert err_list[1].get('timestamp') is None + + +def test_doc(): + failed_count, tests = doctest.testmod( + rhsm_log, globs={'rhsm_log': RhsmLog(context_wrap(LOG1))} + ) + assert failed_count == 0 From 38539ca82d84568cf98e2e9a9564dfa7dd0a7681 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Mon, 29 Mar 2021 11:19:06 +0800 Subject: [PATCH 362/892] Add spec to get satellite compute resources (#2934) * New spec to get satellite compute resources Signed-off-by: Huanhuan Li * Remove useless colon Signed-off-by: Huanhuan Li --- .../parsers/satellite_postgresql_query.py | 29 +++++++++++++++++++ .../tests/test_satellite_postgresql_query.py | 18 +++++++++++- insights/specs/__init__.py | 1 + insights/specs/default.py | 4 +++ 4 files changed, 51 insertions(+), 1 deletion(-) diff --git a/insights/parsers/satellite_postgresql_query.py b/insights/parsers/satellite_postgresql_query.py index 49fea1207..bed986f37 100644 --- a/insights/parsers/satellite_postgresql_query.py +++ b/insights/parsers/satellite_postgresql_query.py @@ -6,6 +6,8 @@ SatelliteAdminSettings - command ``psql -d foreman -c 'select name, value, "default" from settings where name in (\'destroy_vm_on_host_delete\', \'unregister_delete_host\') --csv'`` ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +SatelliteComputeResources - command ``psql -d foreman -c 'select name, type from compute_resources'`` +----------------------------------------------------------------------------------------------------- """ import os @@ -174,3 +176,30 @@ def get_setting(self, setting_name): if rows: value = rows[0].get('value') return rows[0].get('default') if value == '' else value + + +@parser(Specs.satellite_compute_resources) +class SatelliteComputeResources(SatellitePostgreSQLQuery): + """ + Parse the output of the command ``psql -d foreman -c 'select name, type from compute_resources' --csv``. + + .. note:: + Please refer to its super-class :class:`insights.parsers.satellite_postgresql_query.SatellitePostgreSQLQuery` for more + details. + + Sample output:: + + name,type + test_compute_resource1,Foreman::Model::Libvirt + test_compute_resource2,Foreman::Model::RHV + + Examples: + >>> type(resources_table) + + >>> rows=resources_table.search(type='Foreman::Model::Libvirt') + >>> len(rows) + 1 + >>> rows[0]['name'] + 'test_compute_resource1' + """ + pass diff --git a/insights/parsers/tests/test_satellite_postgresql_query.py b/insights/parsers/tests/test_satellite_postgresql_query.py index a401a7a27..a93c0f581 100644 --- a/insights/parsers/tests/test_satellite_postgresql_query.py +++ b/insights/parsers/tests/test_satellite_postgresql_query.py @@ -128,6 +128,12 @@ ..." ''' +SATELLITE_COMPUTE_RESOURCE_1 = ''' +name,type +test_compute_resource1,Foreman::Model::Libvirt +test_compute_resource2,Foreman::Model::RHV +''' + def test_satellite_postgesql_query_exception(): with pytest.raises(ContentException): @@ -164,9 +170,11 @@ def test_satellite_postgesql_query(): def test_HTL_doc_examples(): query = satellite_postgresql_query.SatellitePostgreSQLQuery(context_wrap(test_data_3)) settings = satellite_postgresql_query.SatelliteAdminSettings(context_wrap(SATELLITE_SETTINGS_1)) + resources_table = satellite_postgresql_query.SatelliteComputeResources(context_wrap(SATELLITE_COMPUTE_RESOURCE_1)) globs = { 'query': query, - 'table': settings + 'table': settings, + 'resources_table': resources_table, } failed, tested = doctest.testmod(satellite_postgresql_query, globs=globs) assert failed == 0 @@ -204,3 +212,11 @@ def test_satellite_admin_settings_exception(): satellite_postgresql_query.SatelliteAdminSettings(context_wrap(SATELLITE_SETTINGS_BAD_1)) with pytest.raises(ParseException): satellite_postgresql_query.SatelliteAdminSettings(context_wrap(SATELLITE_SETTINGS_BAD_2)) + + +def test_satellite_compute_resources(): + resources_table = satellite_postgresql_query.SatelliteComputeResources(context_wrap(SATELLITE_COMPUTE_RESOURCE_1)) + assert len(resources_table) == 2 + rows = resources_table.search(type='Foreman::Model::RHV') + assert len(rows) == 1 + assert rows[0]['name'] == 'test_compute_resource2' diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index fb5e95634..1ef4c027e 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -565,6 +565,7 @@ class Specs(SpecSet): saphostexec_status = RegistryPoint() saphostexec_version = RegistryPoint() sat5_insights_properties = RegistryPoint() + satellite_compute_resources = RegistryPoint() satellite_content_hosts_count = RegistryPoint() satellite_custom_ca_chain = RegistryPoint() satellite_mongodb_storage_engine = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index e6d516a55..b511f3bda 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -843,6 +843,10 @@ def is_satellite_capsule(broker): return True raise SkipComponent + satellite_compute_resources = simple_command( + "/usr/bin/sudo -iu postgres /usr/bin/psql -d foreman -c 'select name, type from compute_resources' --csv", + deps=[is_satellite_server] + ) satellite_content_hosts_count = simple_command( "/usr/bin/sudo -iu postgres /usr/bin/psql -d foreman -c 'select count(*) from hosts'", deps=[is_satellite_server] From 77ab46afaecb6d6b6670ede624d6e7c869316f3f Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Tue, 30 Mar 2021 10:24:34 -0400 Subject: [PATCH 363/892] refactor: remove insecure_connection config option (#2894) Signed-off-by: Jeremy Crafts --- insights/client/config.py | 4 ---- insights/client/connection.py | 5 ---- .../auto_config/test_autoconfig_urls.py | 24 +++++++++---------- .../auto_config/test_branch_info_call.py | 4 ++-- insights/tests/client/connection/test_init.py | 2 +- .../tests/client/connection/test_url_guess.py | 8 +++---- 6 files changed, 19 insertions(+), 28 deletions(-) diff --git a/insights/client/config.py b/insights/client/config.py index 4adf7d009..a2fd13b26 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -193,10 +193,6 @@ def _core_collect_default(): # non-CLI 'default': 120.0 }, - 'insecure_connection': { - # non-CLI - 'default': False - }, 'keep_archive': { 'default': False, 'opt': ['--keep-archive'], diff --git a/insights/client/connection.py b/insights/client/connection.py index 77ab84744..96aac8959 100644 --- a/insights/client/connection.py +++ b/insights/client/connection.py @@ -100,11 +100,6 @@ def __init__(self, config): self.cert_verify = True protocol = "https://" - insecure_connection = self.config.insecure_connection - if insecure_connection: - # This really should not be used. - protocol = "http://" - self.cert_verify = False self.auto_config = self.config.auto_config diff --git a/insights/tests/client/auto_config/test_autoconfig_urls.py b/insights/tests/client/auto_config/test_autoconfig_urls.py index d2ee6c32d..73518148c 100644 --- a/insights/tests/client/auto_config/test_autoconfig_urls.py +++ b/insights/tests/client/auto_config/test_autoconfig_urls.py @@ -11,7 +11,7 @@ def test_rhsm_legacy_url(set_auto_configuration, initConfig): Ensure the correct host URL is selected for auto_config on a legacy RHSM upload ''' initConfig().get.side_effect = ['subscription.rhsm.redhat.com', '443', '', '', '', '', ''] - config = Mock(base_url=None, upload_url=None, legacy_upload=True, insecure_connection=False) + config = Mock(base_url=None, upload_url=None, legacy_upload=True) _try_satellite6_configuration(config) set_auto_configuration.assert_called_with(config, 'cert-api.access.redhat.com', None, None, False, False) @@ -25,7 +25,7 @@ def test_rhsm_platform_url(set_auto_configuration, initConfig): Ensure the correct host URL is selected for auto_config on a platform RHSM upload ''' initConfig().get.side_effect = ['subscription.rhsm.redhat.com', '443', '', '', '', '', ''] - config = Mock(base_url=None, upload_url=None, legacy_upload=False, insecure_connection=False) + config = Mock(base_url=None, upload_url=None, legacy_upload=False) _try_satellite6_configuration(config) # set_auto_configuration.assert_called_with(config, 'cloud.redhat.com', None, None, False) set_auto_configuration.assert_called_with(config, 'cert-api.access.redhat.com', None, None, False, False) @@ -44,7 +44,7 @@ def test_rhsm_stage_legacy_url(set_auto_configuration, initConfig): ''' initConfig().get.side_effect = ['subscription.rhsm.stage.redhat.com', '443', '', '', '', '', ''] - config = Mock(base_url=None, upload_url=None, legacy_upload=True, insecure_connection=False) + config = Mock(base_url=None, upload_url=None, legacy_upload=True) _try_satellite6_configuration(config) # config.legacy_upload is modified in the function config.legacy_upload = False @@ -60,7 +60,7 @@ def test_rhsm_stage_platform_url(set_auto_configuration, initConfig): Ensure the correct host URL is selected for auto_config on a platform staging RHSM upload ''' initConfig().get.side_effect = ['subscription.rhsm.stage.redhat.com', '443', '', '', '', '', ''] - config = Mock(base_url=None, upload_url=None, legacy_upload=False, insecure_connection=False) + config = Mock(base_url=None, upload_url=None, legacy_upload=False) _try_satellite6_configuration(config) set_auto_configuration.assert_called_with(config, 'cert.cloud.stage.redhat.com', None, None, False, True) @@ -74,7 +74,7 @@ def test_sat_legacy_url(set_auto_configuration, initConfig): Ensure the correct host URL is selected for auto_config on a legacy Sat upload ''' initConfig().get.side_effect = ['test.satellite.com', '443', '', '', '', '', 'test_cert'] - config = Mock(base_url=None, upload_url=None, legacy_upload=True, insecure_connection=False) + config = Mock(base_url=None, upload_url=None, legacy_upload=True) _try_satellite6_configuration(config) set_auto_configuration.assert_called_with(config, 'test.satellite.com:443/redhat_access', 'test_cert', None, True, False) @@ -88,7 +88,7 @@ def test_sat_platform_url(set_auto_configuration, initConfig): Ensure the correct host URL is selected for auto_config on a platform Sat upload ''' initConfig().get.side_effect = ['test.satellite.com', '443', '', '', '', '', 'test_cert'] - config = Mock(base_url=None, upload_url=None, legacy_upload=False, insecure_connection=False) + config = Mock(base_url=None, upload_url=None, legacy_upload=False) _try_satellite6_configuration(config) set_auto_configuration.assert_called_with(config, 'test.satellite.com:443/redhat_access', 'test_cert', None, True, False) @@ -98,7 +98,7 @@ def test_rhsm_legacy_base_url_configured(): ''' Ensure the correct base URL is assembled for a legacy RHSM upload ''' - config = Mock(base_url=None, upload_url=None, legacy_upload=True, insecure_connection=False, proxy=None) + config = Mock(base_url=None, upload_url=None, legacy_upload=True, proxy=None) set_auto_configuration(config, 'cert-api.access.redhat.com', None, None, False, False) assert config.base_url == 'cert-api.access.redhat.com/r/insights' @@ -108,7 +108,7 @@ def test_rhsm_platform_base_url_configured(): ''' Ensure the correct base URL is assembled for a platform RHSM upload ''' - config = Mock(base_url=None, upload_url=None, legacy_upload=False, insecure_connection=False, proxy=None) + config = Mock(base_url=None, upload_url=None, legacy_upload=False, proxy=None) # set_auto_configuration(config, 'cloud.redhat.com', None, None, False) # assert config.base_url == 'cloud.redhat.com/api' # [CIRCUS MUSIC] @@ -122,7 +122,7 @@ def test_sat_legacy_base_url_configured(): ''' Ensure the correct base URL is assembled for a legacy RHSM upload ''' - config = Mock(base_url=None, upload_url=None, legacy_upload=True, insecure_connection=False, proxy=None) + config = Mock(base_url=None, upload_url=None, legacy_upload=True, proxy=None) set_auto_configuration(config, 'test.satellite.com:443/redhat_access', 'test_cert', None, True, False) assert config.base_url == 'test.satellite.com:443/redhat_access/r/insights' @@ -132,7 +132,7 @@ def test_sat_platform_base_url_configured(): ''' Ensure the correct base URL is assembled for a platform RHSM upload ''' - config = Mock(base_url=None, upload_url=None, legacy_upload=False, insecure_connection=False, proxy=None) + config = Mock(base_url=None, upload_url=None, legacy_upload=False, proxy=None) set_auto_configuration(config, 'test.satellite.com:443/redhat_access', 'test_cert', None, True, False) # assert config.base_url == 'test.satellite.com:443/redhat_access/r/insights/platform' assert config.base_url == 'test.satellite.com:443/redhat_access/r/insights' @@ -173,7 +173,7 @@ def test_rhsm_stage_legacy_base_url_configured(): This will still force legacy_upload=False as there is no classic staging env, so the result is the same as platform upload. ''' - config = Mock(base_url=None, upload_url=None, legacy_upload=True, insecure_connection=False, proxy=None) + config = Mock(base_url=None, upload_url=None, legacy_upload=True, proxy=None) set_auto_configuration(config, 'cert.cloud.stage.redhat.com', None, None, False, True) assert config.base_url == 'cert.cloud.stage.redhat.com/api' @@ -183,6 +183,6 @@ def test_rhsm_stage_platform_base_url_configured(): ''' Ensure the correct base URL is assembled for a platform staging RHSM upload ''' - config = Mock(base_url=None, upload_url=None, legacy_upload=False, insecure_connection=False, proxy=None) + config = Mock(base_url=None, upload_url=None, legacy_upload=False, proxy=None) set_auto_configuration(config, 'cert.cloud.stage.redhat.com', None, None, False, True) assert config.base_url == 'cert.cloud.stage.redhat.com/api' diff --git a/insights/tests/client/auto_config/test_branch_info_call.py b/insights/tests/client/auto_config/test_branch_info_call.py index 40a3edac5..26104f632 100644 --- a/insights/tests/client/auto_config/test_branch_info_call.py +++ b/insights/tests/client/auto_config/test_branch_info_call.py @@ -7,7 +7,7 @@ def test_sat_branch_info_called(connection): ''' When is_satellite is True, means we're on sat. get_branch_info should be called. ''' - config = Mock(base_url=None, upload_url=None, legacy_upload=False, insecure_connection=False) + config = Mock(base_url=None, upload_url=None, legacy_upload=False) set_auto_configuration(config, 'test.com:443/redhat_access', 'some_cert', None, True, False) connection.return_value.get_branch_info.assert_called_once() @@ -17,6 +17,6 @@ def test_rhsm_branch_info_not_called(connection): ''' When is_satellite is False, means we're on direct RHSM. get_branch_info should not be called. ''' - config = Mock(base_url=None, upload_url=None, legacy_upload=False, insecure_connection=False) + config = Mock(base_url=None, upload_url=None, legacy_upload=False) set_auto_configuration(config, 'cert-api.access.redhat.com', None, None, False, False) connection.return_value.get_branch_info.assert_not_called() diff --git a/insights/tests/client/connection/test_init.py b/insights/tests/client/connection/test_init.py index 9e195d4f7..dc26c6d7e 100644 --- a/insights/tests/client/connection/test_init.py +++ b/insights/tests/client/connection/test_init.py @@ -13,7 +13,7 @@ def test_inventory_url_from_base_url(get_proxies, init_session): """ Inventory URL is composed correctly from the given base URL. """ - config = Mock(base_url="www.example.com", insecure_connection=False) + config = Mock(base_url="www.example.com") connection = InsightsConnection(config) assert connection.inventory_url == "https://www.example.com/inventory/v1" diff --git a/insights/tests/client/connection/test_url_guess.py b/insights/tests/client/connection/test_url_guess.py index 650bd5959..240ef80e1 100644 --- a/insights/tests/client/connection/test_url_guess.py +++ b/insights/tests/client/connection/test_url_guess.py @@ -8,7 +8,7 @@ def test_url_guess_legacy(get_proxies, init_session): """ Connection should guess the right URLs if there's nothing in the config (the default) """ - config = Mock(base_url=None, upload_url=None, legacy_upload=True, insecure_connection=False, analyze_container=False) + config = Mock(base_url=None, upload_url=None, legacy_upload=True, analyze_container=False) connection = InsightsConnection(config) assert connection.base_url == 'https://cert-api.access.redhat.com/r/insights' @@ -21,7 +21,7 @@ def test_url_guess_platform(get_proxies, init_session): """ Connection should guess the right URLs if there's nothing in the config (the default) """ - config = Mock(base_url=None, upload_url=None, legacy_upload=False, insecure_connection=False) + config = Mock(base_url=None, upload_url=None, legacy_upload=False) connection = InsightsConnection(config) # assert connection.base_url == 'https://cloud.redhat.com/api' @@ -36,7 +36,7 @@ def test_branch_info_url_guess_legacy(get_proxies, init_session): """ Satellite branch info URL should be set properly """ - config = Mock(base_url='sat.test.com:443/redhat_access/r/insights', upload_url=None, legacy_upload=True, insecure_connection=False, branch_info_url=None) + config = Mock(base_url='sat.test.com:443/redhat_access/r/insights', upload_url=None, legacy_upload=True, branch_info_url=None) connection = InsightsConnection(config) assert connection.branch_info_url == 'https://sat.test.com:443/redhat_access/r/insights/v1/branch_info' @@ -48,7 +48,7 @@ def test_branch_info_url_guess_platform(get_proxies, init_session): """ Satellite branch info URL should be the same on platform as on legacy """ - config = Mock(base_url='sat.test.com:443/redhat_access/r/insights', upload_url=None, legacy_upload=False, insecure_connection=False, branch_info_url=None) + config = Mock(base_url='sat.test.com:443/redhat_access/r/insights', upload_url=None, legacy_upload=False, branch_info_url=None) connection = InsightsConnection(config) assert connection.branch_info_url == 'https://sat.test.com:443/redhat_access/r/insights/v1/branch_info' From 8eb688a5e5cd3fed74b59f306ccf49fc349867ad Mon Sep 17 00:00:00 2001 From: Andrew Kofink Date: Wed, 31 Mar 2021 11:23:08 -0400 Subject: [PATCH 364/892] feat: RHICOMPL-1401 Send OS info in profile request (#2971) * feat: RHICOMPL-1401 Send OS info profile request Tailoring will now happen against specific rule sets per OS minor version rather than the latest SSG for a given major OS version. This is part of COMP-E-133. If os_minor_version is not returned by the compliance API, the old behavior of scanning against the initial profile is used. Signed-off-by: Andrew Kofink * refactor: RHICOMPL-1401 s/profile/policy/ Signed-off-by: Andrew Kofink Co-authored-by: Jeremy Crafts --- insights/client/apps/compliance/__init__.py | 61 ++++++++----- insights/tests/client/apps/test_compliance.py | 85 ++++++++++++++++--- 2 files changed, 111 insertions(+), 35 deletions(-) diff --git a/insights/client/apps/compliance/__init__.py b/insights/client/apps/compliance/__init__.py index 0fe5bfaf9..be3434d33 100644 --- a/insights/client/apps/compliance/__init__.py +++ b/insights/client/apps/compliance/__init__.py @@ -27,50 +27,51 @@ def __init__(self, config): def oscap_scan(self): self._assert_oscap_rpms_exist() - policies = self.get_policies() - if not policies: + initial_profiles = self.get_initial_profiles() + matching_os_profiles = self.get_profiles_matching_os() + profiles = self.profile_union_by_ref_id(matching_os_profiles, initial_profiles) + if not profiles: logger.error("System is not associated with any profiles. Assign profiles using the Compliance web UI.\n") exit(constants.sig_kill_bad) - for policy in policies: + for profile in profiles: self.run_scan( - policy['attributes']['ref_id'], - self.find_scap_policy(policy['attributes']['ref_id']), - '/var/tmp/oscap_results-{0}.xml'.format(policy['attributes']['ref_id']), - tailoring_file_path=self.download_tailoring_file(policy) + profile['attributes']['ref_id'], + self.find_scap_policy(profile['attributes']['ref_id']), + '/var/tmp/oscap_results-{0}.xml'.format(profile['attributes']['ref_id']), + tailoring_file_path=self.download_tailoring_file(profile) ) return self.archive.create_tar_file(), COMPLIANCE_CONTENT_TYPE - def download_tailoring_file(self, policy): - if 'tailored' not in policy['attributes'] or policy['attributes']['tailored'] is False: + def download_tailoring_file(self, profile): + if ('tailored' not in profile['attributes'] or profile['attributes']['tailored'] is False or + ('os_minor_version' in profile['attributes'] and profile['attributes']['os_minor_version'] != self.os_minor_version())): return None # Download tailoring file to pass as argument to run_scan logger.debug( - "Policy {0} is a tailored policy. Starting tailoring file download...".format(policy['attributes']['ref_id']) + "Policy {0} is a tailored policy. Starting tailoring file download...".format(profile['attributes']['ref_id']) ) - tailoring_file_path = "/var/tmp/oscap_tailoring_file-{0}.xml".format(policy['attributes']['ref_id']) + tailoring_file_path = "/var/tmp/oscap_tailoring_file-{0}.xml".format(profile['attributes']['ref_id']) response = self.conn.session.get( - "https://{0}/compliance/profiles/{1}/tailoring_file".format(self.config.base_url, policy['id']) + "https://{0}/compliance/profiles/{1}/tailoring_file".format(self.config.base_url, profile['id']) ) logger.debug("Response code: {0}".format(response.status_code)) if response.content is None: - logger.info("Problem downloading tailoring file for {0} to {1}".format(policy['attributes']['ref_id'], tailoring_file_path)) + logger.info("Problem downloading tailoring file for {0} to {1}".format(profile['attributes']['ref_id'], tailoring_file_path)) return None with open(tailoring_file_path, mode="w+b") as f: f.write(response.content) - logger.info("Saved tailoring file for {0} to {1}".format(policy['attributes']['ref_id'], tailoring_file_path)) + logger.info("Saved tailoring file for {0} to {1}".format(profile['attributes']['ref_id'], tailoring_file_path)) - logger.debug("Policy {0} tailoring file download finished".format(policy['attributes']['ref_id'])) + logger.debug("Policy {0} tailoring file download finished".format(profile['attributes']['ref_id'])) return tailoring_file_path - # TODO: Not a typo! This endpoint gives OSCAP policies, not profiles - # We need to update compliance-backend to fix this - def get_policies(self): + def get_profiles(self, search): response = self.conn.session.get("https://{0}/compliance/profiles".format(self.config.base_url), - params={'search': 'system_names={0} external=false canonical=false'.format(self.hostname)}) + params={'search': search}) logger.debug("Content of the response: {0} - {1}".format(response, response.json())) if response.status_code == 200: @@ -78,12 +79,30 @@ def get_policies(self): else: return [] + def get_initial_profiles(self): + return self.get_profiles('system_names={0} canonical=false external=false'.format(self.hostname)) + + def get_profiles_matching_os(self): + return self.get_profiles('system_names={0} canonical=false os_minor_version={1}'.format(self.hostname, self.os_minor_version())) + + def profile_union_by_ref_id(self, prioritized_profiles, merged_profiles): + profiles = dict((p['attributes']['ref_id'], p) for p in merged_profiles) + profiles.update(dict((p['attributes']['ref_id'], p) for p in prioritized_profiles)) + + return list(profiles.values()) + def os_release(self): _, version, _ = linux_distribution() - return findall("^[6-8]", version)[0] + return version + + def os_major_version(self): + return findall("^[6-8]", self.os_release())[0] + + def os_minor_version(self): + return findall("\d+$", self.os_release())[0] def profile_files(self): - return glob("{0}*rhel{1}*.xml".format(POLICY_FILE_LOCATION, self.os_release())) + return glob("{0}*rhel{1}*.xml".format(POLICY_FILE_LOCATION, self.os_major_version())) def find_scap_policy(self, profile_ref_id): grepcmd = 'grep ' + profile_ref_id + ' ' + ' '.join(self.profile_files()) diff --git a/insights/tests/client/apps/test_compliance.py b/insights/tests/client/apps/test_compliance.py index 1495f4795..1b7ba0615 100644 --- a/insights/tests/client/apps/test_compliance.py +++ b/insights/tests/client/apps/test_compliance.py @@ -13,7 +13,8 @@ @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None, compressor='gz') def test_oscap_scan(config, assert_rpms): compliance_client = ComplianceClient(config) - compliance_client.get_policies = lambda: [{'attributes': {'ref_id': 'foo', 'tailored': False}}] + compliance_client.get_initial_profiles = lambda: [{'attributes': {'ref_id': 'foo', 'tailored': False}}] + compliance_client.get_profiles_matching_os = lambda: [] compliance_client.find_scap_policy = lambda ref_id: '/usr/share/xml/scap/foo.xml' compliance_client.run_scan = lambda ref_id, policy_xml, output_path, tailoring_file_path: None compliance_client.archive.archive_tmp_dir = '/tmp' @@ -27,7 +28,8 @@ def test_oscap_scan(config, assert_rpms): @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) def test_missing_packages(config, call): compliance_client = ComplianceClient(config) - compliance_client.get_policies = lambda: [{'attributes': {'ref_id': 'foo'}}] + compliance_client.get_initial_profiles = lambda: [{'attributes': {'ref_id': 'foo'}}] + compliance_client.get_profiles_matching_os = lambda: [] compliance_client.find_scap_policy = lambda ref_id: '/usr/share/xml/scap/foo.xml' compliance_client.run_scan = lambda ref_id, policy_xml: None with raises(SystemExit): @@ -38,7 +40,8 @@ def test_missing_packages(config, call): @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) def test_errored_rpm_call(config, call): compliance_client = ComplianceClient(config) - compliance_client.get_policies = lambda: [{'attributes': {'ref_id': 'foo'}}] + compliance_client.get_initial_profiles = lambda: [{'attributes': {'ref_id': 'foo'}}] + compliance_client.get_profiles_matching_os = lambda: [] compliance_client.find_scap_policy = lambda ref_id: '/usr/share/xml/scap/foo.xml' compliance_client.run_scan = lambda ref_id, policy_xml: None with raises(SystemExit): @@ -46,37 +49,70 @@ def test_errored_rpm_call(config, call): @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) -def test_get_policies(config): +def test_get_profiles(config): compliance_client = ComplianceClient(config) compliance_client.hostname = 'foo' compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': [{'attributes': 'data'}]}))) - assert compliance_client.get_policies() == [{'attributes': 'data'}] - compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_names=foo external=false canonical=false'}) + assert compliance_client.get_profiles('search string') == [{'attributes': 'data'}] + compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'search string'}) @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) -def test_get_policies_no_policies(config): +def test_get_profiles_no_profiles(config): compliance_client = ComplianceClient(config) compliance_client.hostname = 'foo' compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': []}))) - assert compliance_client.get_policies() == [] - compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_names=foo external=false canonical=false'}) + assert compliance_client.get_profiles('search string') == [] + compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'search string'}) @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) -def test_get_policies_error(config): +def test_get_profiles_error(config): compliance_client = ComplianceClient(config) compliance_client.hostname = 'foo' compliance_client.conn.session.get = Mock(return_value=Mock(status_code=500)) - assert compliance_client.get_policies() == [] - compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_names=foo external=false canonical=false'}) + assert compliance_client.get_profiles('search string') == [] + compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'search string'}) + + +@patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) +def test_get_initial_profiles(config): + compliance_client = ComplianceClient(config) + compliance_client.hostname = 'foo' + compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': [{'attributes': 'data'}]}))) + assert compliance_client.get_initial_profiles() == [{'attributes': 'data'}] + compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_names=foo canonical=false external=false'}) + + +@patch("insights.client.apps.compliance.linux_distribution", return_value=(None, '6.5', None)) +@patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) +def test_get_profiles_matching_os(config, linux_distro_mock): + compliance_client = ComplianceClient(config) + compliance_client.hostname = 'foo' + compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': [{'attributes': 'data'}]}))) + assert compliance_client.get_profiles_matching_os() == [{'attributes': 'data'}] + compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_names=foo canonical=false os_minor_version=5'}) @patch("insights.client.apps.compliance.linux_distribution", return_value=(None, '6.5', None)) @patch("insights.client.config.InsightsConfig") def test_os_release(config, linux_distro_mock): compliance_client = ComplianceClient(config) - assert compliance_client.os_release() == '6' + assert compliance_client.os_release() == '6.5' + + +@patch("insights.client.apps.compliance.linux_distribution", return_value=(None, '6.5', None)) +@patch("insights.client.config.InsightsConfig") +def test_os_minor_version(config, linux_distro_mock): + compliance_client = ComplianceClient(config) + assert compliance_client.os_minor_version() == '5' + + +@patch("insights.client.apps.compliance.linux_distribution", return_value=(None, '6.5', None)) +@patch("insights.client.config.InsightsConfig") +def test_os_major_version(config, linux_distro_mock): + compliance_client = ComplianceClient(config) + assert compliance_client.os_major_version() == '6' @patch("insights.client.config.InsightsConfig") @@ -156,11 +192,32 @@ def test_tailored_file_is_not_downloaded_if_tailored_is_missing(config): @patch("insights.client.apps.compliance.open", new_callable=mock_open) @patch("insights.client.config.InsightsConfig") -def test_tailored_file_is_downloaded_if_needed(config, call): +def test_tailored_file_is_downloaded_from_initial_profile_if_os_minor_version_is_missing(config, call): compliance_client = ComplianceClient(config) compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': [{'attributes': 'data'}]}))) tailoring_file_path = "/var/tmp/oscap_tailoring_file-aaaaa.xml" assert tailoring_file_path == compliance_client.download_tailoring_file({'id': 'foo', 'attributes': {'tailored': True, 'ref_id': 'aaaaa'}}) + assert compliance_client.download_tailoring_file({'id': 'foo', 'attributes': {'tailored': False, 'ref_id': 'aaaaa'}}) is None + + +@patch("insights.client.apps.compliance.linux_distribution", return_value=(None, '6.5', None)) +@patch("insights.client.config.InsightsConfig") +def test_tailored_file_is_not_downloaded_if_os_minor_version_mismatches(config, linux_distro_mock): + compliance_client = ComplianceClient(config) + compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': [{'attributes': 'data'}]}))) + assert compliance_client.download_tailoring_file({'id': 'foo', 'attributes': {'tailored': True, 'ref_id': 'aaaaa', 'os_minor_version': '2'}}) is None + assert compliance_client.download_tailoring_file({'id': 'foo', 'attributes': {'tailored': False, 'ref_id': 'aaaaa', 'os_minor_version': '2'}}) is None + + +@patch("insights.client.apps.compliance.linux_distribution", return_value=(None, '6.5', None)) +@patch("insights.client.apps.compliance.open", new_callable=mock_open) +@patch("insights.client.config.InsightsConfig") +def test_tailored_file_is_downloaded_if_needed(config, call, linux_distro_mock): + compliance_client = ComplianceClient(config) + compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': [{'attributes': 'data'}]}))) + tailoring_file_path = "/var/tmp/oscap_tailoring_file-aaaaa.xml" + assert tailoring_file_path == compliance_client.download_tailoring_file({'id': 'foo', 'attributes': {'tailored': True, 'ref_id': 'aaaaa', 'os_minor_version': '5'}}) + assert compliance_client.download_tailoring_file({'id': 'foo', 'attributes': {'tailored': False, 'ref_id': 'aaaaa', 'os_minor_version': '5'}}) is None @patch("insights.client.config.InsightsConfig") From f0d5a322bfc2d12ab686600d8048297e42ef0c2a Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 1 Apr 2021 01:09:15 +0800 Subject: [PATCH 365/892] Remove the duplicated lspci_kernel (#2995) Signed-off-by: Xiangce Liu --- insights/client/map_components.py | 1 - insights/client/uploader_json_map.json | 5 ----- 2 files changed, 6 deletions(-) diff --git a/insights/client/map_components.py b/insights/client/map_components.py index 72d8bf5ad..68c28ecc2 100644 --- a/insights/client/map_components.py +++ b/insights/client/map_components.py @@ -127,7 +127,6 @@ def _get_component_by_symbolic_name(sname): spec_prefix = "insights.specs.default.DefaultSpecs." spec_conversion = { 'getconf_pagesize': 'getconf_page_size', - 'lspci_kernel': 'lspci', 'netstat__agn': 'netstat_agn', 'rpm__V_packages': 'rpm_V_packages', diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index fbd4d7f6e..80946d731 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -717,11 +717,6 @@ "pattern": [], "symbolic_name": "lspci" }, - { - "command": "/sbin/lspci -k", - "pattern": [], - "symbolic_name": "lspci_kernel" - }, { "command": "/sbin/lspci -vmmkn", "pattern": [], From f34fc823c317d47459dabc9acfc2618555817845 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 1 Apr 2021 01:26:32 +0800 Subject: [PATCH 366/892] Add parser to get the issuer of satellite client cert (#2991) * Add parser to get the issuer of satellite client cert * Add parser to get issuer of "/etc/rhsm/ca/katello-default-ca.pem". * Refactor CertificateChain to reuse the CertificateInfo since chain is just a list of cert. Signed-off-by: Huanhuan Li * Extract the parsing content part to a function Signed-off-by: Huanhuan Li * Raise SkipException if the output is empty Signed-off-by: Huanhuan Li --- .../certificate_chain.rst | 3 - .../ssl_certificate.rst | 3 + insights/parsers/certificate_chain.py | 118 ----------- insights/parsers/ssl_certificate.py | 198 ++++++++++++++++++ .../parsers/tests/test_certificate_chain.py | 119 ----------- .../parsers/tests/test_ssl_certificate.py | 153 ++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 9 files changed, 357 insertions(+), 240 deletions(-) delete mode 100644 docs/shared_parsers_catalog/certificate_chain.rst create mode 100644 docs/shared_parsers_catalog/ssl_certificate.rst delete mode 100644 insights/parsers/certificate_chain.py create mode 100644 insights/parsers/ssl_certificate.py delete mode 100644 insights/parsers/tests/test_certificate_chain.py create mode 100644 insights/parsers/tests/test_ssl_certificate.py diff --git a/docs/shared_parsers_catalog/certificate_chain.rst b/docs/shared_parsers_catalog/certificate_chain.rst deleted file mode 100644 index 65c712cb9..000000000 --- a/docs/shared_parsers_catalog/certificate_chain.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.parsers.certificate_chain - :members: - :show-inheritance: diff --git a/docs/shared_parsers_catalog/ssl_certificate.rst b/docs/shared_parsers_catalog/ssl_certificate.rst new file mode 100644 index 000000000..aa27fe957 --- /dev/null +++ b/docs/shared_parsers_catalog/ssl_certificate.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ssl_certificate + :members: + :show-inheritance: diff --git a/insights/parsers/certificate_chain.py b/insights/parsers/certificate_chain.py deleted file mode 100644 index 8871c68f8..000000000 --- a/insights/parsers/certificate_chain.py +++ /dev/null @@ -1,118 +0,0 @@ -""" -Get Certificate Chain Info -========================== - -This module contains the following parsers: - -SatelliteCustomCaChain - command ``awk 'BEGIN { pipe="openssl x509 -noout -subject -enddate"} /^-+BEGIN CERT/,/^-+END CERT/ { print | pipe } /^-+END CERT/ { close(pipe); printf("\\n")}' /etc/pki/katello/certs/katello-server-ca.crt`` -======================================================================================================================================================================================================================================== -""" - -from insights import parser, CommandParser -from datetime import datetime -from insights.parsers import ParseException, SkipException -from insights.specs import Specs -from insights.parsers.certificates_enddate import CertificatesEnddate - - -class CertificateChain(CommandParser, list): - """ - Class to parse the output of "openssl -in -xxx -xxx". - Blank line is added to distinguish different certs in the chain. - Currently it only supports the attributes which the output is in - key=value pairs. - - Sample Output:: - - issuer= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com - subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.b.com - notBefore=Dec 7 07:02:33 2020 GMT - notAfter=Jan 18 07:02:33 2038 GMT - - issuer= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.c.com - subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.d.com - notBefore=Nov 30 07:02:42 2020 GMT - notAfter=Jan 18 07:02:43 2018 GMT - - Examples: - >>> type(certs) - - >>> len(certs) - 2 - >>> certs.earliest_expiry_date.str - 'Jan 18 07:02:43 2018' - """ - - expire_date_format = '%b %d %H:%M:%S %Y' - - def parse_content(self, content): - """ - Parse the content of crt chain file. And it saves the expiration - info of each crt in a list of dict. The value of notBefore and - notAfter are saved to an instance of ExpirationDate, it - contains the date in string and datetime format. - - Attributes: - earliest_expiry_date(ExpirationDate): - The earliest expiry datetime of the certs in the chain. - None when there isn't "notAfter" for all the certs - in the chain. - - Raises: - ParseException: when the output isn't in key=value format or - the notAfter or notBefore isn't expected format. - """ - if len(content) < 1: - raise SkipException("No cert in the output") - data = {} - self.append(data) - self.earliest_expiry_date = None - for index, line in enumerate(content): - if not line.strip(): - # a new cert starts - if data: - data = {} - self.append(data) - continue - if '=' not in line: - raise ParseException('The line %s is not in key=value format' % line) - key, value = [item.strip() for item in line.split('=', 1)] - value_without_tz = value.rsplit(" ", 1)[0] - if key in ['notBefore', 'notAfter']: - try: - date_time = datetime.strptime(value_without_tz, self.expire_date_format) - except Exception: - raise ParseException('The %s is not in %s format.' % (key, self.expire_date_format)) - value = CertificatesEnddate.ExpirationDate(value_without_tz, date_time) - data[key] = value - - for one_cert in self: - expire_date = one_cert.get('notAfter') - if expire_date and (self.earliest_expiry_date is None or expire_date.datetime < self.earliest_expiry_date.datetime): - self.earliest_expiry_date = expire_date - - -@parser(Specs.satellite_custom_ca_chain) -class SatelliteCustomCaChain(CertificateChain): - """ - .. note:: - Please refer to its super-class :class:`insights.parsers.certificate_chain.CertificateChain` for more - details. - - Sample Output:: - - subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com - notAfter=Jan 18 07:02:33 2038 GMT - - subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.b.com - notAfter=Jan 18 07:02:43 2028 GMT - - Examples: - >>> type(satellite_ca_certs) - - >>> len(satellite_ca_certs) - 2 - >>> satellite_ca_certs.earliest_expiry_date.str - 'Jan 18 07:02:43 2028' - """ - pass diff --git a/insights/parsers/ssl_certificate.py b/insights/parsers/ssl_certificate.py new file mode 100644 index 000000000..5d8c2a1a7 --- /dev/null +++ b/insights/parsers/ssl_certificate.py @@ -0,0 +1,198 @@ +""" +Get SSL Certificate Info +======================== + +This module contains the following parsers: + +SatelliteCustomCaChain - command ``awk 'BEGIN { pipe="openssl x509 -noout -subject -enddate"} /^-+BEGIN CERT/,/^-+END CERT/ { print | pipe } /^-+END CERT/ { close(pipe); printf("\\n")}' /etc/pki/katello/certs/katello-server-ca.crt`` +======================================================================================================================================================================================================================================== +RhsmKatelloDefaultCACert - command ``openssl x509 -in /etc/rhsm/ca/katello-default-ca.pem -noout -issuer`` +========================================================================================================== +""" + +from insights import parser, CommandParser +from datetime import datetime +from insights.parsers import ParseException, SkipException +from insights.specs import Specs +from insights.parsers.certificates_enddate import CertificatesEnddate + + +def parse_openssl_output(content): + """ + It parses the output of "openssl -in -xxx". + Currently it only supports the attributes which the output is in + key=value pairs. It saves the cert info into a dict. The value of notBefore and + notAfter are saved to an instance of ExpirationDate, which contains the date + in string and datetime format. + + Raises: + ParseException: when the output isn't in key=value format or + the notAfter or notBefore isn't expected format. + """ + date_format = '%b %d %H:%M:%S %Y' + data = {} + for line in content: + if '=' not in line: + raise ParseException('The line %s is not in key=value format' % line) + key, value = [item.strip() for item in line.split('=', 1)] + if key in ['notBefore', 'notAfter']: + value_without_tz = value.rsplit(" ", 1)[0] + try: + date_time = datetime.strptime(value_without_tz, date_format) + except Exception: + raise ParseException('The %s is not in %s format.' % (key, date_format)) + value = CertificatesEnddate.ExpirationDate(value_without_tz, date_time) + data[key] = value + return data + + +class CertificateInfo(CommandParser, dict): + """ + Base class to parse the output of "openssl -in -xxx". + Currently it only supports the attributes which the output is in + key=value pairs. + + Sample Output:: + + issuer= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=a.b.c.com + notBefore=Dec 7 07:02:33 2020 GMT + notAfter=Jan 18 07:02:33 2038 GMT + subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=a.b.c.com + + Examples: + >>> type(cert) + + >>> 'issuer' in cert + True + >>> cert['issuer'] + '/C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=a.b.c.com' + >>> cert['notBefore'].str + 'Dec 7 07:02:33 2020' + + Raises: + SkipException: when the command output is empty. + """ + + def __init__(self, context): + super(CertificateInfo, self).__init__( + context, + extra_bad_lines=['error opening certificate', 'unable to load certificate']) + + def parse_content(self, content): + """ + This uses the :py:func:`insights.parsers.ssl_certificate.parse_openssl_output` function. + See its documentation for parsing details. + """ + + self.update(parse_openssl_output(content)) + if not self: + raise SkipException("There is not any info in the cert.") + + +class CertificateChain(CommandParser, list): + """ + Base class to parse the output of "openssl -in -xxx". + Blank line is added to distinguish different certs in the chain. + Currently it only supports the attributes which the output is in + key=value pairs. + + Sample Output:: + + issuer= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com + subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.b.com + notBefore=Dec 7 07:02:33 2020 GMT + notAfter=Jan 18 07:02:33 2038 GMT + + issuer= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.c.com + subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.d.com + notBefore=Nov 30 07:02:42 2020 GMT + notAfter=Jan 18 07:02:43 2018 GMT + + Examples: + >>> type(certs) + + >>> len(certs) + 2 + >>> certs.earliest_expiry_date.str + 'Jan 18 07:02:43 2018' + """ + + def parse_content(self, content): + """ + Parse the content of cert chain file. And it saves the certs + in a list of dict. + This uses the :py:func:`insights.parsers.ssl_certificate.parse_openssl_output` function. + See its documentation for parsing details. + + Attributes: + earliest_expiry_date(ExpirationDate): + The earliest expiry datetime of the certs in the chain. + None when there isn't "notAfter" for all the certs + in the chain. + + Raises: + SkipException: when the command output is empty. + """ + + self.earliest_expiry_date = None + start_index = 0 + for index, line in enumerate(content): + if not line.strip(): + # one cert ends + if start_index != index: + self.append(parse_openssl_output(content[start_index:index])) + start_index = index + 1 + if index == len(content) - 1: + self.append(parse_openssl_output(content=content[start_index:index + 1])) + if not self: + raise SkipException("There is not any info in the ca cert chain.") + for one_cert in self: + expire_date = one_cert.get('notAfter') + if expire_date and (self.earliest_expiry_date is None or expire_date.datetime < self.earliest_expiry_date.datetime): + self.earliest_expiry_date = expire_date + + +@parser(Specs.satellite_custom_ca_chain) +class SatelliteCustomCaChain(CertificateChain): + """ + .. note:: + Please refer to its super-class :class:`insights.parsers.ssl_certificate.CertificateChain` for more + details. + + Sample Output:: + + subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com + notAfter=Jan 18 07:02:33 2038 GMT + + subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.b.com + notAfter=Jan 18 07:02:43 2028 GMT + + Examples: + >>> type(satellite_ca_certs) + + >>> len(satellite_ca_certs) + 2 + >>> satellite_ca_certs.earliest_expiry_date.str + 'Jan 18 07:02:43 2028' + """ + pass + + +@parser(Specs.rhsm_katello_default_ca_cert) +class RhsmKatelloDefaultCACert(CertificateInfo): + """ + .. note:: + Please refer to its super-class :class:`insights.parsers.ssl_certificate.CertificateInfo` for more + details. + + Sample Output:: + + issuer= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=a.b.c.com + + Examples: + >>> type(rhsm_katello_default_ca) + + >>> rhsm_katello_default_ca['issuer'] + '/C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=a.b.c.com' + """ + pass diff --git a/insights/parsers/tests/test_certificate_chain.py b/insights/parsers/tests/test_certificate_chain.py deleted file mode 100644 index 84f511611..000000000 --- a/insights/parsers/tests/test_certificate_chain.py +++ /dev/null @@ -1,119 +0,0 @@ -import doctest -import pytest - -from insights.parsers import certificate_chain, ParseException, SkipException -from insights.tests import context_wrap - - -SATELLITE_OUTPUT1 = """ -subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com -notAfter=Jan 18 07:02:33 2038 GMT - -subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.b.com -notAfter=Jan 18 07:02:43 2018 GMT - -subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.c.com -notAfter=Jan 18 07:02:43 2048 GMT - -""" - -SATELLITE_OUTPUT2 = """ -subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com -notAfter=Jan 18 07:02:33 2038 GMT - -subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.b.com -notAfter=Jan 18 07:02:43 2028 GMT - -""" - -OUTPUT1 = """ -issuer= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com -subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.b.com -notBefore=Dec 7 07:02:33 2020 GMT -notAfter=Jan 18 07:02:33 2038 GMT - -issuer= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.d.com -subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.c.com -notBefore=Nov 30 07:02:42 2020 GMT -notAfter=Jan 18 07:02:43 2018 GMT - -""" - -OUTPUT2 = """ -notAfter=Dec 4 07:04:05 2035 GMT -subject= /CN=Puppet CA: abc.d.com -issuer= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=abc.d.com -""" - -BAD_OUTPUT1 = """ -subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com -notAfterJan 18 07:02:33 2038 GMT - -subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.b.com -notAfterJan 18 07:02:43 2018 GMT - -subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.c.com -notAfterJan 18 07:02:43 2048 GMT - -""" - -BAD_OUTPUT2 = """ -subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com -notAfter=2038 Jan 18 07:02:33 GMT - -subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.b.com -notAfterJan 18 07:02:43 2018 GMT - -subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.c.com -notAfterJan 18 07:02:43 2048 GMT - -""" - -BAD_OUTPUT3 = """ - -""" - - -def test_certificates_chain(): - certs = certificate_chain.SatelliteCustomCaChain(context_wrap(OUTPUT1)) - assert len(certs) == 2 - assert certs.earliest_expiry_date.str == 'Jan 18 07:02:43 2018' - for cert in certs: - if cert['notAfter'].str == certs.earliest_expiry_date.str: - assert cert['issuer'] == '/C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.d.com' - assert cert['notBefore'].str == 'Nov 30 07:02:42 2020' - assert cert['subject'] == '/C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.c.com' - assert cert['notBefore'].str == 'Nov 30 07:02:42 2020' - - certs = certificate_chain.SatelliteCustomCaChain(context_wrap(OUTPUT2)) - assert len(certs) == 1 - assert certs[0]['issuer'] == '/C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=abc.d.com' - - -def test_satellite_ca_chain(): - certs = certificate_chain.CertificateChain(context_wrap(SATELLITE_OUTPUT1)) - assert len(certs) == 3 - assert certs.earliest_expiry_date.str == 'Jan 18 07:02:43 2018' - for cert in certs: - if cert['notAfter'].str == certs.earliest_expiry_date.str: - assert cert['subject'] == '/C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.b.com' - - -def test_doc(): - certs = certificate_chain.CertificateChain(context_wrap(OUTPUT1)) - satellite_ca_certs = certificate_chain.SatelliteCustomCaChain(context_wrap(SATELLITE_OUTPUT2)) - globs = { - 'certs': certs, - 'satellite_ca_certs': satellite_ca_certs - } - failed, tested = doctest.testmod(certificate_chain, globs=globs) - assert failed == 0 - - -def test_certificates_chain_except(): - with pytest.raises(ParseException): - certificate_chain.CertificateChain(context_wrap(BAD_OUTPUT1)) - with pytest.raises(ParseException): - certificate_chain.CertificateChain(context_wrap(BAD_OUTPUT2)) - with pytest.raises(SkipException): - certificate_chain.SatelliteCustomCaChain(context_wrap(BAD_OUTPUT3)) diff --git a/insights/parsers/tests/test_ssl_certificate.py b/insights/parsers/tests/test_ssl_certificate.py new file mode 100644 index 000000000..f8bb7e336 --- /dev/null +++ b/insights/parsers/tests/test_ssl_certificate.py @@ -0,0 +1,153 @@ +import doctest +import pytest + +from insights.parsers import ssl_certificate, ParseException, SkipException +from insights.core.plugins import ContentException +from insights.tests import context_wrap + + +CERTIFICATE_OUTPUT1 = """ +issuer= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=a.b.c.com +notBefore=Dec 7 07:02:33 2020 GMT +notAfter=Jan 18 07:02:33 2038 GMT +subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=a.b.c.com +""" + +CERTIFICATE_CHAIN_OUTPUT1 = """ +issuer= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com +subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.b.com +notBefore=Dec 7 07:02:33 2020 GMT +notAfter=Jan 18 07:02:33 2038 GMT + + +issuer= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.d.com +subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.c.com +notBefore=Nov 30 07:02:42 2020 GMT +notAfter=Jan 18 07:02:43 2018 GMT +""" + +CERTIFICATE_CHAIN_OUTPUT2 = """ +notAfter=Dec 4 07:04:05 2035 GMT +subject= /CN=Puppet CA: abc.d.com +issuer= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=abc.d.com +""" + +SATELLITE_OUTPUT1 = """ +subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com +notAfter=Jan 18 07:02:33 2038 GMT + +subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.b.com +notAfter=Jan 18 07:02:43 2018 GMT + +subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.c.com +notAfter=Jan 18 07:02:43 2048 GMT +""" + +SATELLITE_OUTPUT2 = """ +subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com +notAfter=Jan 18 07:02:33 2038 GMT + +subject= /C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.b.com +notAfter=Jan 18 07:02:43 2028 GMT +""" + +RHSM_KATELLO_CERT_OUTPUT1 = """ +issuer= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=a.b.c.com +""" + +BAD_OUTPUT1 = """ +subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com +notAfterJan 18 07:02:33 2038 GMT +""" + +BAD_OUTPUT2 = """ +subject= /C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com +notAfter=2038 Jan 18 07:02:33 GMT +""" + +BAD_OUTPUT3 = """ +Error opening Certificate /etc/rhsm/ca/katello-default-ca.pem +139814540982160:error:02001002:system library:fopen:No such file or directory:bss_file.c:402:fopen('/etc/rhsm/ca/katello-default-ca.pem','r') +139814540982160:error:20074002:BIO routines:FILE_CTRL:system lib:bss_file.c:404: +unable to load certificate +""" + +BAD_OUTPUT4 = """ + +""" + + +def test_certificate_info_exception(): + with pytest.raises(ParseException): + ssl_certificate.CertificateInfo(context_wrap(BAD_OUTPUT1)) + with pytest.raises(ParseException): + ssl_certificate.CertificateInfo(context_wrap(BAD_OUTPUT2)) + with pytest.raises(ContentException): + ssl_certificate.CertificateInfo(context_wrap(BAD_OUTPUT3)) + with pytest.raises(SkipException): + ssl_certificate.CertificateInfo(context_wrap(BAD_OUTPUT4)) + + +def test_certificate_chain_exception(): + with pytest.raises(SkipException): + ssl_certificate.CertificateChain(context_wrap(BAD_OUTPUT4)) + + +def test_certificate_info(): + cert = ssl_certificate.CertificateInfo(context_wrap(CERTIFICATE_OUTPUT1)) + assert cert['issuer'] == '/C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=a.b.c.com' + assert cert['notBefore'].str == 'Dec 7 07:02:33 2020' + assert cert['notAfter'].str == 'Jan 18 07:02:33 2038' + assert cert['subject'] == '/C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=a.b.c.com' + + +def test_certificates_chain(): + certs = ssl_certificate.CertificateChain(context_wrap(CERTIFICATE_CHAIN_OUTPUT1)) + assert len(certs) == 2 + assert certs.earliest_expiry_date.str == 'Jan 18 07:02:43 2018' + for cert in certs: + if cert['notAfter'].str == certs.earliest_expiry_date.str: + assert cert['issuer'] == '/C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.d.com' + assert cert['notBefore'].str == 'Nov 30 07:02:42 2020' + assert cert['subject'] == '/C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.c.com' + assert cert['notBefore'].str == 'Nov 30 07:02:42 2020' + + certs = ssl_certificate.CertificateChain(context_wrap(CERTIFICATE_CHAIN_OUTPUT2)) + assert len(certs) == 1 + assert certs[0]['issuer'] == '/C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=abc.d.com' + + certs = ssl_certificate.CertificateChain(context_wrap(RHSM_KATELLO_CERT_OUTPUT1)) + assert len(certs) == 1 + + +def test_satellite_ca_chain(): + certs = ssl_certificate.SatelliteCustomCaChain(context_wrap(SATELLITE_OUTPUT1)) + assert len(certs) == 3 + assert certs.earliest_expiry_date.str == 'Jan 18 07:02:43 2018' + assert certs[0]['subject'] == '/C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=test.a.com' + assert certs[0]['notAfter'].str == 'Jan 18 07:02:33 2038' + assert certs[1]['subject'] == '/C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.b.com' + assert certs[1]['notAfter'].str == 'Jan 18 07:02:43 2018' + assert certs[2]['subject'] == '/C=US/ST=North Carolina/O=Katello/OU=SomeOrgUnit/CN=test.c.com' + assert certs[2]['notAfter'].str == 'Jan 18 07:02:43 2048' + + +def test_rhsm_katello_default_ca(): + rhsm_katello_default_ca = ssl_certificate.RhsmKatelloDefaultCACert(context_wrap(RHSM_KATELLO_CERT_OUTPUT1)) + assert rhsm_katello_default_ca['issuer'] == '/C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=a.b.c.com' + + +def test_doc(): + cert = ssl_certificate.CertificateInfo(context_wrap(CERTIFICATE_OUTPUT1)) + ca_cert = ssl_certificate.CertificateChain(context_wrap(CERTIFICATE_CHAIN_OUTPUT1)) + satellite_ca_certs = ssl_certificate.SatelliteCustomCaChain(context_wrap(SATELLITE_OUTPUT2)) + rhsm_katello_default_ca = ssl_certificate.RhsmKatelloDefaultCACert(context_wrap(RHSM_KATELLO_CERT_OUTPUT1)) + + globs = { + 'cert': cert, + 'certs': ca_cert, + 'satellite_ca_certs': satellite_ca_certs, + 'rhsm_katello_default_ca': rhsm_katello_default_ca + } + failed, tested = doctest.testmod(ssl_certificate, globs=globs) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 1ef4c027e..a75c53d34 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -548,6 +548,7 @@ class Specs(SpecSet): rhn_taskomatic_daemon_log = RegistryPoint(filterable=False) rhosp_release = RegistryPoint() rhsm_conf = RegistryPoint() + rhsm_katello_default_ca_cert = RegistryPoint() rhsm_log = RegistryPoint(filterable=True) rhsm_releasever = RegistryPoint() rndc_status = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index b511f3bda..2e53952a5 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -766,6 +766,7 @@ def pmlog_summary_file(broker): pulp_worker_defaults = simple_file("etc/default/pulp_workers") puppet_ca_cert_expire_date = simple_command("/usr/bin/openssl x509 -in /etc/puppetlabs/puppet/ssl/ca/ca_crt.pem -enddate -noout") pvs_noheadings = simple_command("/sbin/pvs --nameprefixes --noheadings --separator='|' -a -o pv_all,vg_name --config=\"global{locking_type=0}\"") + rhsm_katello_default_ca_cert = simple_command("/usr/bin/openssl x509 -in /etc/rhsm/ca/katello-default-ca.pem -noout -issuer") qemu_conf = simple_file("/etc/libvirt/qemu.conf") qemu_xml = glob_file(r"/etc/libvirt/qemu/*.xml") qpid_stat_g = simple_command("/usr/bin/qpid-stat -g --ssl-certificate=/etc/pki/katello/qpid_client_striped.crt -b amqps://localhost:5671") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 6b962f439..245143e5c 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -200,6 +200,7 @@ class InsightsArchiveSpecs(Specs): readlink_e_shift_cert_client = simple_file("insights_commands/readlink_-e_.etc.origin.node.certificates.kubelet-client-current.pem") readlink_e_shift_cert_server = simple_file("insights_commands/readlink_-e_.etc.origin.node.certificates.kubelet-server-current.pem") rhev_data_center = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_rhev_data_center") + rhsm_katello_default_ca_cert = simple_file("insights_commands/openssl_x509_-in_.etc.rhsm.ca.katello-default-ca.pem_-noout_-issuer") rndc_status = simple_file("insights_commands/rndc_status") rpm_V_packages = first_file(["insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo_chrony", "insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo"]) saphostctl_getcimobject_sapinstance = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostctrl_-function_GetCIMObject_-enuminstances_SAPInstance") From ef9e8db48a1678e382b10e2c51ca514f71d798af Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Wed, 31 Mar 2021 12:36:05 -0500 Subject: [PATCH 367/892] Use SIGTERM for rpm and yum commands (#2974) * Use SIGTERM for rpm and yum commands * Need to apply #2630 for core collection * Fixes bugzilla 1935846 Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Remove unnecessary debug log message Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Add signum arg to simple_command Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Insure all commands have a default signal Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/core/context.py | 8 ++++---- insights/core/spec_factory.py | 23 ++++++++++++++--------- insights/specs/default.py | 13 +++++++------ insights/util/subproc.py | 2 ++ 4 files changed, 27 insertions(+), 19 deletions(-) diff --git a/insights/core/context.py b/insights/core/context.py index 6ee6295af..47acac36b 100644 --- a/insights/core/context.py +++ b/insights/core/context.py @@ -180,17 +180,17 @@ def handles(cls, files): return (closest_root, cls) return (None, None) - def check_output(self, cmd, timeout=None, keep_rc=False, env=None): + def check_output(self, cmd, timeout=None, keep_rc=False, env=None, signum=None): """ Subclasses can override to provide special environment setup, command prefixes, etc. """ - return subproc.call(cmd, timeout=timeout or self.timeout, + return subproc.call(cmd, timeout=timeout or self.timeout, signum=signum, keep_rc=keep_rc, env=env) - def shell_out(self, cmd, split=True, timeout=None, keep_rc=False, env=None): + def shell_out(self, cmd, split=True, timeout=None, keep_rc=False, env=None, signum=None): env = env or os.environ rc = None - raw = self.check_output(cmd, timeout=timeout, keep_rc=keep_rc, env=env) + raw = self.check_output(cmd, timeout=timeout, keep_rc=keep_rc, env=env, signum=signum) if keep_rc: rc, output = raw else: diff --git a/insights/core/spec_factory.py b/insights/core/spec_factory.py index 87ac00ba2..9bc5903ae 100644 --- a/insights/core/spec_factory.py +++ b/insights/core/spec_factory.py @@ -2,6 +2,7 @@ import logging import os import re +import signal import six import traceback import codecs @@ -302,7 +303,7 @@ class CommandOutputProvider(ContentProvider): """ Class used in datasources to return output from commands. """ - def __init__(self, cmd, ctx, args=None, split=True, keep_rc=False, ds=None, timeout=None, inherit_env=None): + def __init__(self, cmd, ctx, args=None, split=True, keep_rc=False, ds=None, timeout=None, inherit_env=None, signum=None): super(CommandOutputProvider, self).__init__() self.cmd = cmd self.root = "insights_commands" @@ -314,6 +315,7 @@ def __init__(self, cmd, ctx, args=None, split=True, keep_rc=False, ds=None, time self.ds = ds self.timeout = timeout self.inherit_env = inherit_env or [] + self.signum = signum or signal.SIGKILL self._content = None self.rc = None @@ -359,7 +361,7 @@ def load(self): command = self.create_args() raw = self.ctx.shell_out(command, split=self.split, keep_rc=self.keep_rc, - timeout=self.timeout, env=self.create_env()) + timeout=self.timeout, env=self.create_env(), signum=self.signum) if self.keep_rc: self.rc, output = raw else: @@ -390,7 +392,7 @@ def write(self, dst): fs.ensure_path(os.path.dirname(dst)) if args: timeout = self.timeout or self.ctx.timeout - p = Pipeline(*args, timeout=timeout, env=self.create_env()) + p = Pipeline(*args, timeout=timeout, signum=self.signum, env=self.create_env()) return p.write(dst, keep_rc=self.keep_rc) def __repr__(self): @@ -731,7 +733,7 @@ class simple_command(object): no arguments """ - def __init__(self, cmd, context=HostContext, deps=[], split=True, keep_rc=False, timeout=None, inherit_env=[], **kwargs): + def __init__(self, cmd, context=HostContext, deps=[], split=True, keep_rc=False, timeout=None, inherit_env=[], signum=None, **kwargs): self.cmd = cmd self.context = context self.split = split @@ -739,13 +741,14 @@ def __init__(self, cmd, context=HostContext, deps=[], split=True, keep_rc=False, self.keep_rc = keep_rc self.timeout = timeout self.inherit_env = inherit_env + self.signum = signum self.__name__ = self.__class__.__name__ datasource(self.context, *deps, raw=self.raw, **kwargs)(self) def __call__(self, broker): ctx = broker[self.context] return CommandOutputProvider(self.cmd, ctx, split=self.split, - keep_rc=self.keep_rc, ds=self, timeout=self.timeout, inherit_env=self.inherit_env) + keep_rc=self.keep_rc, ds=self, timeout=self.timeout, inherit_env=self.inherit_env, signum=self.signum) class command_with_args(object): @@ -775,7 +778,7 @@ class command_with_args(object): specified arguments passed by the provider. """ - def __init__(self, cmd, provider, context=HostContext, deps=None, split=True, keep_rc=False, timeout=None, inherit_env=None, **kwargs): + def __init__(self, cmd, provider, context=HostContext, deps=None, split=True, keep_rc=False, timeout=None, inherit_env=None, signum=None, **kwargs): deps = deps if deps is not None else [] self.cmd = cmd self.provider = provider @@ -785,6 +788,7 @@ def __init__(self, cmd, provider, context=HostContext, deps=None, split=True, ke self.keep_rc = keep_rc self.timeout = timeout self.inherit_env = inherit_env if inherit_env is not None else [] + self.signum = signum self.__name__ = self.__class__.__name__ datasource(self.provider, self.context, *deps, raw=self.raw, **kwargs)(self) @@ -796,7 +800,7 @@ def __call__(self, broker): try: self.cmd = self.cmd % source return CommandOutputProvider(self.cmd, ctx, split=self.split, - keep_rc=self.keep_rc, ds=self, timeout=self.timeout, inherit_env=self.inherit_env) + keep_rc=self.keep_rc, ds=self, timeout=self.timeout, inherit_env=self.inherit_env, signum=self.signum) except: log.debug(traceback.format_exc()) raise ContentException("No results found for [%s]" % self.cmd) @@ -835,7 +839,7 @@ class foreach_execute(object): created by substituting each element of provider into the cmd template. """ - def __init__(self, provider, cmd, context=HostContext, deps=[], split=True, keep_rc=False, timeout=None, inherit_env=[], **kwargs): + def __init__(self, provider, cmd, context=HostContext, deps=[], split=True, keep_rc=False, timeout=None, inherit_env=[], signum=None, **kwargs): self.provider = provider self.cmd = cmd self.context = context @@ -844,6 +848,7 @@ def __init__(self, provider, cmd, context=HostContext, deps=[], split=True, keep self.keep_rc = keep_rc self.timeout = timeout self.inherit_env = inherit_env + self.signum = signum self.__name__ = self.__class__.__name__ datasource(self.provider, self.context, *deps, multi_output=True, raw=self.raw, **kwargs)(self) @@ -860,7 +865,7 @@ def __call__(self, broker): the_cmd = self.cmd % e cop = CommandOutputProvider(the_cmd, ctx, args=e, split=self.split, keep_rc=self.keep_rc, ds=self, - timeout=self.timeout, inherit_env=self.inherit_env) + timeout=self.timeout, inherit_env=self.inherit_env, signum=self.signum) result.append(cop) except: log.debug(traceback.format_exc()) diff --git a/insights/specs/default.py b/insights/specs/default.py index 2e53952a5..577bafaaf 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -12,6 +12,7 @@ import os import re import json +import signal from grp import getgrgid from os import stat @@ -94,7 +95,7 @@ def _get_package(broker, command): ctx = broker[HostContext] resolved = ctx.shell_out("/usr/bin/readlink -e {0}".format(command)) if resolved: - pkg = ctx.shell_out("/usr/bin/rpm -qf {0}".format(resolved[0])) + pkg = ctx.shell_out("/usr/bin/rpm -qf {0}".format(resolved[0]), signum=signal.SIGTERM) if pkg: return pkg[0] raise SkipComponent @@ -787,7 +788,7 @@ def pmlog_summary_file(broker): rhsm_log = simple_file("/var/log/rhsm/rhsm.log") rhsm_releasever = simple_file('/var/lib/rhsm/cache/releasever.json') rndc_status = simple_command("/usr/sbin/rndc status") - rpm_V_packages = simple_command("/bin/rpm -V coreutils procps procps-ng shadow-utils passwd sudo chrony", keep_rc=True) + rpm_V_packages = simple_command("/bin/rpm -V coreutils procps procps-ng shadow-utils passwd sudo chrony", keep_rc=True, signum=signal.SIGTERM) rsyslog_conf = glob_file(["/etc/rsyslog.conf", "/etc/rsyslog.d/*.conf"]) samba = simple_file("/etc/samba/smb.conf") @@ -972,11 +973,11 @@ def is_mod_loaded_for_ss(broker): x86_retp_enabled = simple_file("sys/kernel/debug/x86/retp_enabled") xinetd_conf = glob_file(["/etc/xinetd.conf", "/etc/xinetd.d/*"]) yum_conf = simple_file("/etc/yum.conf") - yum_list_available = simple_command("yum -C --noplugins list available") + yum_list_available = simple_command("yum -C --noplugins list available", signum=signal.SIGTERM) yum_log = simple_file("/var/log/yum.log") - yum_repolist = simple_command("/usr/bin/yum -C --noplugins repolist") + yum_repolist = simple_command("/usr/bin/yum -C --noplugins repolist", signum=signal.SIGTERM) yum_repos_d = glob_file("/etc/yum.repos.d/*.repo") - yum_updateinfo = simple_command("/usr/bin/yum -C updateinfo list") + yum_updateinfo = simple_command("/usr/bin/yum -C updateinfo list", signum=signal.SIGTERM) zipl_conf = simple_file("/etc/zipl.conf") rpm_format = format_rpm() - installed_rpms = simple_command("/bin/rpm -qa --qf '%s'" % rpm_format, context=HostContext) + installed_rpms = simple_command("/bin/rpm -qa --qf '%s'" % rpm_format, context=HostContext, signum=signal.SIGTERM) diff --git a/insights/util/subproc.py b/insights/util/subproc.py index 8d5f234cf..cf7fec50f 100644 --- a/insights/util/subproc.py +++ b/insights/util/subproc.py @@ -204,6 +204,8 @@ def call(cmd, if not isinstance(cmd, list): cmd = [cmd] + signum = signum or signal.SIGKILL + p = Pipeline(*cmd, timeout=timeout, signum=signum, env=env) res = p(keep_rc=keep_rc) From 5cb4bc8f40d8e7b7fe2738af4be669bde5bdd81a Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Wed, 31 Mar 2021 15:59:39 -0400 Subject: [PATCH 368/892] update uploader_json_map.json Signed-off-by: Jeremy Crafts --- insights/client/uploader_json_map.json | 32 ++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index 80946d731..1444ac70c 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -1089,6 +1089,7 @@ "bash", "ceilometer-poll", "chronyd", + "cinder-volume", "clvmd", "cmirrord", "corosync", @@ -1103,14 +1104,17 @@ "heat-engine", "httpd", "libvirtd", + "memcached", "mongdb", "multipath", "multipathd", "neutron-dhcp-ag", "neutron-l3-agen", + "neutron-server", "nginx", "nova-compute", "nova-conductor", + "nova-scheduler", "ntpd", "octavia-worker", "openshift start master api", @@ -1131,6 +1135,7 @@ "sap", "snmpd", "spausedd", + "swift-proxy-ser", "tuned" ], "symbolic_name": "ps_alxwww" @@ -1153,6 +1158,7 @@ "ceilometer-poll", "ceph-osd", "chronyd", + "cinder-volume", "clvmd", "cmirrord", "corosync", @@ -1166,15 +1172,18 @@ "heat-engine", "httpd", "libvirtd", + "memcached", "mongdb", "multipath", "multipathd", "mysqld", "neutron-dhcp-ag", "neutron-l3-agen", + "neutron-server", "nginx", "nova-compute", "nova-conductor", + "nova-scheduler", "ntpd", "oc observe csr", "octavia-worker", @@ -1199,6 +1208,7 @@ "sap", "snmpd", "spausedd", + "swift-proxy-ser", "tuned" ], "symbolic_name": "ps_aux" @@ -1225,6 +1235,7 @@ "ceilometer-coll", "ceilometer-poll", "chronyd", + "cinder-volume", "clvmd", "cmirrord", "corosync", @@ -1241,16 +1252,19 @@ "httpd", "iscsid", "libvirtd", + "memcached", "mongdb", "multipath", "multipathd", "neutron-dhcp-ag", "neutron-l3-agen", + "neutron-server", "nfs-server", "nfsd", "nginx", "nova-compute", "nova-conductor", + "nova-scheduler", "ntpd", "octavia-worker", "openshift start master api", @@ -1273,6 +1287,7 @@ "smbd", "snmpd", "spausedd", + "swift-proxy-ser", "target_completi", "tgtd", "tuned" @@ -1289,6 +1304,7 @@ "bash", "ceilometer-poll", "chronyd", + "cinder-volume", "clvmd", "cmirrord", "corosync", @@ -1302,17 +1318,20 @@ "heat-engine", "httpd", "libvirtd", + "memcached", "mongdb", "multipath", "multipathd", "neutron-dhcp-ag", "neutron-l3-agen", "neutron-ns-metadata-proxy", + "neutron-server", "nginx", "nginx: master process", "nginx: worker process", "nova-compute", "nova-conductor", + "nova-scheduler", "ntpd", "octavia-worker", "openshift start master api", @@ -1333,6 +1352,7 @@ "sap", "snmpd", "spausedd", + "swift-proxy-ser", "tuned" ], "symbolic_name": "ps_ef" @@ -1387,6 +1407,11 @@ "pattern": [], "symbolic_name": "rhev_data_center" }, + { + "command": "/usr/bin/openssl x509 -in /etc/rhsm/ca/katello-default-ca.pem -noout -issuer", + "pattern": [], + "symbolic_name": "rhsm_katello_default_ca_cert" + }, { "command": "/usr/sbin/rndc status", "pattern": [], @@ -2606,7 +2631,6 @@ "kernel: Memory cgroup out of memory: Kill process", "kernel: TCP: out of memory -- consider tuning tcp_mem", "kernel: bnx2fc: byte_count", - "kernel: kvm: disabled by bios", "kernel: lockd: Unknown symbol register_inet6addr_notifier", "kernel: lockd: Unknown symbol unregister_inet6addr_notifier", "kernel: megaraid_sas: FW detected to be in faultstate, restarting it", @@ -3687,9 +3711,7 @@ "RPC call Host.setupNetworks failed", "Stopping connection", "The name org.fedoraproject.FirewallD1 was not provided by any .service files", - "The vm start process failed", - "lastCheck", - "looking for unfetched domain" + "The vm start process failed" ], "symbolic_name": "vdsm_log" }, @@ -4331,5 +4353,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-03-24T20:04:01.898853" + "version": "2021-03-25T10:42:19.047966" } \ No newline at end of file From 69727ad1655ca6a6bebf06e735a7ac5ffe65cfbc Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Thu, 1 Apr 2021 10:16:08 -0500 Subject: [PATCH 369/892] Add lssap spec to insights archives (#3001) * This spec is no longer collect by new clients but may still be collected by older clients so adding back to support those Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/specs/insights_archive.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 245143e5c..80e85e31f 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -132,6 +132,7 @@ class InsightsArchiveSpecs(Specs): lsof = simple_file("insights_commands/lsof") lspci = simple_file("insights_commands/lspci_-k") lspci_vmmkn = simple_file("insights_commands/lspci_-vmmkn") + lssap = simple_file("insights_commands/usr.sap.hostctrl.exe.lssap") lsscsi = simple_file("insights_commands/lsscsi") lsvmbus = simple_file("insights_commands/lsvmbus_-vv") lvmconfig = first_file([ From d06a1eb7a511cde12ea9bbe8a2207ab3a2ed499a Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 1 Apr 2021 21:34:10 -0400 Subject: [PATCH 370/892] fix(ultralight checkins): wrap thrown exception with catch (#2865) Signed-off-by: Jeremy Crafts --- insights/client/phase/v1.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/insights/client/phase/v1.py b/insights/client/phase/v1.py index 88b2317ca..30e8d482a 100644 --- a/insights/client/phase/v1.py +++ b/insights/client/phase/v1.py @@ -112,7 +112,12 @@ def pre_update(client, config): sys.exit(constants.sig_kill_ok) if config.checkin: - checkin_success = client.checkin() + try: + checkin_success = client.checkin() + except Exception as e: + print(e) + sys.exit(constants.sig_kill_bad) + if checkin_success: sys.exit(constants.sig_kill_ok) else: From f62db5cadf29db3e775027b207971b63b00c72fa Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Fri, 2 Apr 2021 02:27:15 -0500 Subject: [PATCH 371/892] Update redhat-release parser to fix issues (#3000) * Add tests for redhat_release Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Update redhat-release parser to fix issues * Update parser to handle additional release strings * Update documentation and tests * Fix #2999 Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Added tests for centos and fedora Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/parsers/redhat_release.py | 98 ++++++++++++------- insights/parsers/tests/test_redhat_release.py | 84 ++++++++++++++++ 2 files changed, 146 insertions(+), 36 deletions(-) diff --git a/insights/parsers/redhat_release.py b/insights/parsers/redhat_release.py index 5bc134c8b..643b14ae7 100644 --- a/insights/parsers/redhat_release.py +++ b/insights/parsers/redhat_release.py @@ -8,23 +8,24 @@ Red Hat Enterprise Linux Server release 7.2 (Maipo) -This module parses the file content and stores data in the dict ``self.parsed``. -The version info can also be get via ``obj.major`` and ``obj.minor``. -Property ``is_rhel`` and ``is_hypervisor`` specifies the host type. +This module parses the file contents and stores data in the class +attributes described below. Examples: - >>> rh_rls_content = ''' - ... Red Hat Enterprise Linux Server release 7.2 (Maipo) - ... '''.strip() - >>> from insights.tests import context_wrap - >>> shared = {RedhatRelease: RedhatRelease(context_wrap(rh_rls_content))} - >>> release = shared[RedhatRelease] - >>> assert release.raw == rh_rls_content - >>> assert release.major == 7 - >>> assert release.minor == 2 - >>> assert release.version == "7.2" - >>> assert release.is_rhel - >>> assert release.product == "Red Hat Enterprise Linux Server" + >>> type(rh_release) + + >>> rh_release.raw + 'Red Hat Enterprise Linux Server release 7.2 (Maipo)' + >>> rh_release.major + 7 + >>> rh_release.minor + 2 + >>> rh_release.version + '7.2' + >>> rh_release.is_rhel + True + >>> rh_release.product + 'Red Hat Enterprise Linux Server' """ from .. import Parser, parser from ..specs import Specs @@ -32,43 +33,68 @@ @parser(Specs.redhat_release) class RedhatRelease(Parser): - """Parses the content of file ``/etc/redhat-release``.""" + """Parses the content of file ``/etc/redhat-release`` + + Attributes: + is_beta(bool): True if this is a Beta release + is_centos(bool): True if this release is CentOS + is_fedora(bool): True if this release is Fedora + is_rhel(bool): True if this release is Red Hat Enterprise Linux + major(int): Major release number or None + minor(int): Minor release number or None + parsed(dict): Dictionary containing the parsed strings for ``product``, ``version``, and ``code_name`` + raw(string): Unparsed redhat-release string + """ def parse_content(self, content): self.raw = content[0] + self.is_beta = False product, _, version_name = [v.strip() for v in content[0].partition("release")] - version_name_split = [v.strip() for v in version_name.split(None, 1)] - code_name = (version_name_split[1].strip("()") - if len(version_name_split) > 1 else None) + if 'Beta' in version_name: + # Red Hat Enterprise Linux release 8.5 Beta (Ootpa) + version_number, code_name = version_name.split('Beta', 1) + self.is_beta = True + elif '(' in version_name: + # Red Hat Enterprise Linux Workstation release 6.10(Santiago) + # Red Hat Enterprise Linux Workstation release 6.10 (Santiago) + version_number, code_name = version_name.split('(', 1) + else: + # Red Hat Enterprise Linux Workstation release 6.10 + version_number = version_name + code_name = None + self.parsed = { "product": product, - "version": version_name_split[0], - "code_name": code_name + "version": version_number.strip(), + "code_name": code_name.strip().strip('()') if code_name is not None else None } - @property - def major(self): - """int: the major version of this OS.""" - return int(self.parsed["version"].split(".")[0]) + self.is_rhel = 'red hat enterprise linux' in self.parsed['product'].lower() + self.is_centos = 'centos' in self.parsed['product'].lower() + self.is_fedora = 'fedora' in self.parsed['product'].lower() - @property - def minor(self): - """int: the minor version of this OS.""" - s = self.parsed["version"].split("-", 1)[0].split(".") - if len(s) > 1: - return int(s[1]) + v_parts = self.parsed['version'].split('.') + self.major = int(v_parts[0]) if v_parts[0].isdigit() else None + if len(v_parts) >= 2: + if '-' in v_parts[1]: + minor = v_parts[1].split('-')[0] + else: + minor = v_parts[1] + self.minor = int(minor) if minor.isdigit() else None + else: + self.minor = None @property def version(self): """string: version of this OS.""" return self.parsed["version"] - @property - def is_rhel(self): - """bool: True if this OS belong to RHEL, else False.""" - return "Red Hat Enterprise Linux" in self.parsed["product"] - @property def product(self): """string: product of this OS.""" return self.parsed["product"] + + @property + def code_name(self): + """string: code name of this OS or None.""" + return self.parsed["code_name"] diff --git a/insights/parsers/tests/test_redhat_release.py b/insights/parsers/tests/test_redhat_release.py index 8da2bb10c..11fc26c39 100644 --- a/insights/parsers/tests/test_redhat_release.py +++ b/insights/parsers/tests/test_redhat_release.py @@ -1,3 +1,6 @@ +import doctest + +from insights.parsers import redhat_release from insights.parsers.redhat_release import RedhatRelease from insights.tests import context_wrap @@ -26,6 +29,26 @@ Fedora release 23 (Twenty Three) """.strip() +REDHAT_RELEASE8 = """ +Red Hat Enterprise Linux release 8.2 (Ootpa) +""".strip() + +REDHAT_RELEASE10 = """ +Red Hat Enterprise Linux Server release 6.10(Santiago) +""".strip() + +REDHAT_RELEASE_BETA = """ +Red Hat Enterprise Linux Server release 8.5 Beta (Ootpa) +""".strip() + +CENTOS_STREAM = """ +CentOS Stream release 8 +""".strip() + +CENTOS_7 = """ +CentOS Linux release 7.6.1810 (Core) +""".strip() + def test_rhe6(): release = RedhatRelease(context_wrap(REDHAT_RELEASE1)) @@ -84,4 +107,65 @@ def test_fedora23(): assert release.minor is None assert release.version == "23" assert not release.is_rhel + assert release.is_fedora assert release.product == "Fedora" + + +def test_rhel6_10(): + release = RedhatRelease(context_wrap(REDHAT_RELEASE10)) + assert release.raw == REDHAT_RELEASE10 + assert release.major == 6 + assert release.minor == 10 + assert release.version == "6.10" + assert release.is_rhel + assert release.product == "Red Hat Enterprise Linux Server" + + +def test_rhel8(): + release = RedhatRelease(context_wrap(REDHAT_RELEASE8)) + assert release.raw == REDHAT_RELEASE8 + assert release.major == 8 + assert release.minor == 2 + assert release.version == "8.2" + assert release.is_rhel + assert release.product == "Red Hat Enterprise Linux" + + +def test_rhel_beta(): + release = RedhatRelease(context_wrap(REDHAT_RELEASE_BETA)) + assert release.raw == REDHAT_RELEASE_BETA + assert release.major == 8 + assert release.minor == 5 + assert release.version == "8.5" + assert release.is_rhel + assert release.is_beta + assert release.parsed['code_name'] == 'Ootpa' + assert release.product == "Red Hat Enterprise Linux Server" + + +def test_centos_stream(): + release = RedhatRelease(context_wrap(CENTOS_STREAM)) + assert release.major == 8 + assert release.minor is None + assert release.product == 'CentOS Stream' + assert release.is_centos + assert not release.is_rhel + + +def test_centos_7(): + release = RedhatRelease(context_wrap(CENTOS_7)) + assert release.major == 7 + assert release.minor == 6 + assert release.product == 'CentOS Linux' + assert release.code_name == 'Core' + assert release.is_centos + assert not release.is_rhel + + +def test_examples(): + release = RedhatRelease(context_wrap(REDHAT_RELEASE2)) + globs = { + 'rh_release': release + } + failed, tested = doctest.testmod(redhat_release, globs=globs) + assert failed == 0 From ad7fcedff5fb47040c0d00cd85bbda1f0f12fe0c Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Tue, 6 Apr 2021 14:30:39 -0500 Subject: [PATCH 372/892] Allow scalars in parsr.query.choose results. (#3003) Signed-off-by: Christopher Sams --- insights/parsr/query/__init__.py | 4 +++- insights/parsr/query/tests/test_choose.py | 5 +++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/insights/parsr/query/__init__.py b/insights/parsr/query/__init__.py index db913f760..fbcbc06b7 100644 --- a/insights/parsr/query/__init__.py +++ b/insights/parsr/query/__init__.py @@ -351,9 +351,11 @@ def choose(self, chooser): for k, v in r.items(): if isinstance(v, list): tmp.append(Entry(k, children=v, set_parents=False)) - else: + elif isinstance(v, Entry): for i in v.children: tmp.append(Entry(k, i.attrs, i.children, set_parents=False)) + else: + tmp.append(Entry(k, attrs=(v,), set_parents=False)) else: if isinstance(r, list): tmp.extend(r) diff --git a/insights/parsr/query/tests/test_choose.py b/insights/parsr/query/tests/test_choose.py index 0ced81da7..c6c6d5818 100644 --- a/insights/parsr/query/tests/test_choose.py +++ b/insights/parsr/query/tests/test_choose.py @@ -121,3 +121,8 @@ def test_rename(): assert "mytype" in res assert "reason" in res assert "type" not in res + + +def test_scalar_value(): + res = conf.status.conditions.choose(lambda c: ({"reason": c.reason.value or "None Provided"}, c.status)) + assert res.where("reason", "None Provided") From 3fdbaa2e0e48dfbeeae3bea3023c9da3c6acaf4c Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Wed, 7 Apr 2021 12:04:02 -0500 Subject: [PATCH 373/892] Docutils 0.17 break doc build (#3005) Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 070d15e63..5316d6689 100644 --- a/setup.py +++ b/setup.py @@ -66,6 +66,7 @@ def maybe_require(pkg): ]) docs = set([ + 'docutils==0.16', 'Sphinx<=3.0.2', 'nbsphinx', 'sphinx_rtd_theme', From 77dcfdbb37d0deb2219297f48b8cf847c44292b5 Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Wed, 7 Apr 2021 15:25:57 -0500 Subject: [PATCH 374/892] Update jenkinsfile to use updated python3 image (#3006) * Updated python3 CI image uses RHEL default python3 Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- Jenkinsfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 5508ab356..78671ad80 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -69,14 +69,14 @@ pipeline { steps { echo "Testing with Pytest..." sh """ - /bin/python36 -m venv .testenv + /bin/python3 -m venv .testenv source .testenv/bin/activate pip install -e .[testing] pytest """ echo "Testing with Linter..." sh """ - /bin/python36 -m venv .lintenv + /bin/python3 -m venv .lintenv source .lintenv/bin/activate pip install -e .[linting] flake8 @@ -99,7 +99,7 @@ pipeline { steps { echo "Building Docs..." sh """ - /bin/python36 -m venv .docenv + /bin/python3 -m venv .docenv source .docenv/bin/activate pip install -e .[docs] sphinx-build -W -b html -qa -E docs docs/_build/html From f3002dbd575f1c4accff39495e59c208d1cde9c6 Mon Sep 17 00:00:00 2001 From: Stanislav Kontar Date: Thu, 8 Apr 2021 03:06:44 +0200 Subject: [PATCH 375/892] Add scheduler spec also for Insights archive (#2996) Signed-off-by: Stanislav Kontar --- insights/specs/default.py | 1 + insights/specs/sos_archive.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 577bafaaf..6a68bfc54 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -867,6 +867,7 @@ def is_satellite_capsule(broker): ) satellite_version_rb = simple_file("/usr/share/foreman/lib/satellite/version.rb") satellite_custom_hiera = simple_file("/etc/foreman-installer/custom-hiera.yaml") + scheduler = glob_file("/sys/block/*/queue/scheduler") scsi = simple_file("/proc/scsi/scsi") scsi_eh_deadline = glob_file('/sys/class/scsi_host/host[0-9]*/eh_deadline') scsi_fwver = glob_file('/sys/class/scsi_host/host[0-9]*/fwrev') diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index 05c2dd3eb..ac6c11983 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -229,7 +229,6 @@ class SosSpecs(Specs): route = simple_file("sos_commands/networking/route_-n") sap_host_profile = simple_file("/usr/sap/hostctrl/exe/host_profile") sched_rt_runtime_us = simple_file("/proc/sys/kernel/sched_rt_runtime_us") - scheduler = glob_file("/sys/block/*/queue/scheduler") scsi_mod_use_blk_mq = simple_file("/sys/module/scsi_mod/parameters/use_blk_mq") secure = simple_file("/var/log/secure") sestatus = simple_file("sos_commands/selinux/sestatus_-b") From 132f778a16d8f2d65b3ad76cba27c3122849116a Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 8 Apr 2021 16:27:35 +0800 Subject: [PATCH 376/892] Extend CertificateInfo to avoid duplicate code (#3009) Signed-off-by: Huanhuan Li --- .../parsers/puppet_ca_cert_expire_date.py | 31 +++++++++++-------- .../tests/test_puppet_ca_cert_expire_date.py | 17 ++++++++-- 2 files changed, 33 insertions(+), 15 deletions(-) diff --git a/insights/parsers/puppet_ca_cert_expire_date.py b/insights/parsers/puppet_ca_cert_expire_date.py index d2d138982..cc5479567 100644 --- a/insights/parsers/puppet_ca_cert_expire_date.py +++ b/insights/parsers/puppet_ca_cert_expire_date.py @@ -13,34 +13,39 @@ >>> type(date_info) - >>> date_info.expire_date + >>> date_info['notAfter'].datetime datetime.datetime(2035, 12, 4, 7, 4, 5) """ -from datetime import datetime -from insights import parser, CommandParser +from insights import parser from insights.specs import Specs -from insights.parsers import SkipException, ParseException +from insights.parsers import SkipException +from insights.parsers.ssl_certificate import CertificateInfo @parser(Specs.puppet_ca_cert_expire_date) -class PuppetCertExpireDate(CommandParser): +class PuppetCertExpireDate(CertificateInfo): """ + .. note:: + Please refer to its super-class :class:`insights.parsers.ssl_certificate.CertificateInfo` for more + details. + + .. warning:: + The attribute expire_date is deprecated, please get the value from the dictionary directly instead. + Read the ``openssl x509 -in /etc/puppetlabs/puppet/ssl/ca/ca_crt.pem -enddate -noout`` and set the date to property ``expire_date``. Attributes: expire_date (datetime): The date when the puppet ca cert will be expired + + Raises: + SkipException: when notAfter isn't in the output """ def parse_content(self, content): - if len(content) == 1 and content[0].startswith('notAfter='): - date_format = '%b %d %H:%M:%S %Y %Z' - date_str = content[0].split('=', 1)[1] - try: - self.expire_date = datetime.strptime(date_str, date_format) - except Exception: - raise ParseException("Can not parse the date format") - else: + super(PuppetCertExpireDate, self).parse_content(content) + if 'notAfter' not in self: raise SkipException("Cannot get the puppet ca cert expire info") + self.expire_date = self['notAfter'].datetime diff --git a/insights/parsers/tests/test_puppet_ca_cert_expire_date.py b/insights/parsers/tests/test_puppet_ca_cert_expire_date.py index 0e1b43557..6c9303b1d 100644 --- a/insights/parsers/tests/test_puppet_ca_cert_expire_date.py +++ b/insights/parsers/tests/test_puppet_ca_cert_expire_date.py @@ -1,6 +1,7 @@ import doctest import pytest +from insights.core.plugins import ContentException from insights.parsers import ( puppet_ca_cert_expire_date, SkipException, ParseException) from insights.tests import context_wrap @@ -25,6 +26,10 @@ Mon Jan 4 02:31:28 EST 202 ''' +WRONG_PUPPET_CERT_INFO_4 = ''' +abc=def +''' + def test_HTL_doc_examples(): date_info = puppet_ca_cert_expire_date.PuppetCertExpireDate(context_wrap(PUPPET_CERT_EXPIRE_INFO)) @@ -35,10 +40,18 @@ def test_HTL_doc_examples(): assert failed == 0 +def test_parser(): + date_info = puppet_ca_cert_expire_date.PuppetCertExpireDate(context_wrap(PUPPET_CERT_EXPIRE_INFO)) + assert 'notAfter' in date_info + assert date_info['notAfter'].str == 'Dec 4 07:04:05 2035' + + def test_wrong_output(): - with pytest.raises(SkipException): + with pytest.raises(ContentException): puppet_ca_cert_expire_date.PuppetCertExpireDate(context_wrap(WRONG_PUPPET_CERT_INFO_1)) - with pytest.raises(SkipException): + with pytest.raises(ParseException): puppet_ca_cert_expire_date.PuppetCertExpireDate(context_wrap(WRONG_PUPPET_CERT_INFO_3)) with pytest.raises(ParseException): puppet_ca_cert_expire_date.PuppetCertExpireDate(context_wrap(WRONG_PUPPET_CERT_INFO_2)) + with pytest.raises(SkipException): + puppet_ca_cert_expire_date.PuppetCertExpireDate(context_wrap(WRONG_PUPPET_CERT_INFO_4)) From 9e7fcc9d696b7d0eda30387ba814ed6a59707b59 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Fri, 9 Apr 2021 13:26:28 +0800 Subject: [PATCH 377/892] Update spec "certificates_enddate" to make it always success (#3011) * Update spec "certificates_enddate" to make it always success * Since it is to check expired date of certificates for several directories by command find, even the directories don't exist, the command shouldn't fail Signed-off-by: Huanhuan Li * Update "CertificatesEnddate" to remove unsupported usage Signed-off-by: Huanhuan Li --- insights/parsers/certificates_enddate.py | 38 ++++++++++------- .../tests/test_certificates_enddate.py | 42 ++++++++++++++----- insights/specs/default.py | 2 +- 3 files changed, 55 insertions(+), 27 deletions(-) diff --git a/insights/parsers/certificates_enddate.py b/insights/parsers/certificates_enddate.py index 9a350c650..51737a27c 100644 --- a/insights/parsers/certificates_enddate.py +++ b/insights/parsers/certificates_enddate.py @@ -2,9 +2,9 @@ CertificatesEnddate - command ``/usr/bin/openssl x509 -noout -enddate -in path/to/cert/file`` ============================================================================================= -This command gets the enddates of certificate files. +This command gets the enddate of the certificate files. -Typical output of this command is:: +Sample Output:: /usr/bin/find: '/etc/origin/node': No such file or directory /usr/bin/find: '/etc/origin/master': No such file or directory @@ -26,41 +26,49 @@ FileName= /etc/pki/entitlement/2387590574974617178.pem Examples: - >>> cert_enddate = shared[CertificatesEnddate] - >>> paths = cert_enddate.get_certificates_path - >>> paths[0] - '/etc/origin/node/cert.pem' - >>> cert_enddate.expiration_date(paths[0]).datetime - datetime(2019, 05, 25, 16, 39, 40) - >>> cert_enddate.expiration_date(paths[0]).str + >>> type(cert_enddate) + + >>> paths = cert_enddate.certificates_path + >>> '/etc/origin/node/cert.pem' in paths + True + >>> cert_enddate.expiration_date('/etc/origin/node/cert.pem').datetime + datetime.datetime(2019, 5, 25, 16, 39, 40) + >>> cert_enddate.expiration_date('/etc/origin/node/cert.pem').str 'May 25 16:39:40 2019' """ from datetime import datetime from collections import namedtuple -from .. import parser, LegacyItemAccess, CommandParser +from insights import parser, CommandParser +from insights.parsers import SkipException from insights.specs import Specs @parser(Specs.certificates_enddate) -class CertificatesEnddate(LegacyItemAccess, CommandParser): - """Class to parse the expiration dates.""" +class CertificatesEnddate(CommandParser, dict): + """Class to parse the expiration date.""" ExpirationDate = namedtuple('ExpirationDate', ['str', 'datetime']) """namedtuple: contains the expiration date in string and datetime format.""" def parse_content(self, content): """Parse the content of crt files.""" - self.data = {} datestamp = None for l in content: if datestamp and l.startswith("FileName="): - self.data[l.split("=")[-1].strip()] = datestamp + self[l.split("=")[-1].strip()] = datestamp datestamp = None elif l.startswith("notAfter="): datestamp = l.split("=")[-1].rsplit(" ", 1)[0] else: datestamp = None + if not self: + raise SkipException("No certification files found.") + + @property + def data(self): + """ Set data as property to keep compatibility """ + return self @property def certificates_path(self): @@ -83,5 +91,5 @@ def expiration_date(self, path): try: path_datetime = datetime.strptime(path_date, '%b %d %H:%M:%S %Y') return self.ExpirationDate(path_date, path_datetime) - except: + except Exception: return self.ExpirationDate(path_date, None) diff --git a/insights/parsers/tests/test_certificates_enddate.py b/insights/parsers/tests/test_certificates_enddate.py index aa6516341..d77dc162f 100644 --- a/insights/parsers/tests/test_certificates_enddate.py +++ b/insights/parsers/tests/test_certificates_enddate.py @@ -1,6 +1,9 @@ +import pytest +import doctest from datetime import datetime -from insights.parsers.certificates_enddate import CertificatesEnddate +from insights.parsers import certificates_enddate from insights.tests import context_wrap +from insights.parsers import SkipException CRT1 = """ @@ -24,8 +27,6 @@ FileName= /etc/pki/entitlement/2387590574974617178.pem """.strip() -CRT2 = "" - CRT3 = """ FileName= /etc/origin/node/cert.pem notAfter=May 25 16:39:40 2019 GMT @@ -63,35 +64,39 @@ PATH1 = "/etc/origin/node/cert.pem" +CRT7 = """ +/usr/bin/find: '/etc/origin/node': No such file or directory +/usr/bin/find: '/etc/origin/master': No such file or directory +/usr/bin/find: '/etc/pki': No such file or directory +/usr/bin/find: '/etc/ipa': No such file or directory +""" + def test_certificates_enddate(): - Cert1 = CertificatesEnddate(context_wrap(CRT1)) + Cert1 = certificates_enddate.CertificatesEnddate(context_wrap(CRT1)) assert PATH1 in Cert1.certificates_path expiration_date = Cert1.expiration_date(PATH1) assert expiration_date.str == 'May 25 16:39:40 2019' assert expiration_date.datetime == datetime(2019, 5, 25, 16, 39, 40) - Cert2 = CertificatesEnddate(context_wrap(CRT2)) - assert Cert2.certificates_path == [] - - Cert3 = CertificatesEnddate(context_wrap(CRT3)) + Cert3 = certificates_enddate.CertificatesEnddate(context_wrap(CRT3)) assert (set(Cert3.certificates_path) == set([ '/etc/pki/consumer/cert.pem', '/etc/pki/ca-trust/extracted/pem/email-ca-bundle.pem'])) - Cert4 = CertificatesEnddate(context_wrap(CRT4)) + Cert4 = certificates_enddate.CertificatesEnddate(context_wrap(CRT4)) assert (set(Cert4.certificates_path) == set([ '/etc/pki/consumer/cert.pem', '/etc/pki/ca-trust/extracted/pem/email-ca-bundle.pem'])) - Cert5 = CertificatesEnddate(context_wrap(CRT5)) + Cert5 = certificates_enddate.CertificatesEnddate(context_wrap(CRT5)) assert (set(Cert5.certificates_path) == set([ '/etc/pki/ca-trust/extracted/pem/email-ca-bundle.pem'])) def test_certificates_enddate_unparsable_datatime(): - Cert6 = CertificatesEnddate(context_wrap(CRT6)) + Cert6 = certificates_enddate.CertificatesEnddate(context_wrap(CRT6)) assert (set(Cert6.certificates_path) == set([ '/etc/pki/consumer/cert.pem', '/etc/pki/ca-trust/extracted/pem/email-ca-bundle.pem'])) @@ -102,3 +107,18 @@ def test_certificates_enddate_unparsable_datatime(): assert (Cert6.expiration_date( '/etc/pki/ca-trust/extracted/pem/email-ca-bundle.pem').datetime is None) assert (Cert6.expiration_date('/etc/pki/email-ca-bundle.pem') is None) + + +def test_doc(): + cert_enddate = certificates_enddate.CertificatesEnddate(context_wrap(CRT1)) + + globs = { + 'cert_enddate': cert_enddate + } + failed, tested = doctest.testmod(certificates_enddate, globs=globs) + assert failed == 0 + + +def test_exception(): + with pytest.raises(SkipException): + certificates_enddate.CertificatesEnddate(context_wrap(CRT7)) diff --git a/insights/specs/default.py b/insights/specs/default.py index 6a68bfc54..041a2e148 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -221,7 +221,7 @@ def is_ceph_monitor(broker): ceph_osd_tree = simple_command("/usr/bin/ceph osd tree -f json") ceph_s = simple_command("/usr/bin/ceph -s -f json") ceph_v = simple_command("/usr/bin/ceph -v") - certificates_enddate = simple_command("/usr/bin/find /etc/origin/node /etc/origin/master /etc/pki /etc/ipa -type f -exec /usr/bin/openssl x509 -noout -enddate -in '{}' \; -exec echo 'FileName= {}' \;") + certificates_enddate = simple_command("/usr/bin/find /etc/origin/node /etc/origin/master /etc/pki /etc/ipa -type f -exec /usr/bin/openssl x509 -noout -enddate -in '{}' \; -exec echo 'FileName= {}' \;", keep_rc=True) chkconfig = simple_command("/sbin/chkconfig --list") chrony_conf = simple_file("/etc/chrony.conf") chronyc_sources = simple_command("/usr/bin/chronyc sources") From cf343971cde6d14fe902491dc0ac0b7ce0ef224d Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Fri, 9 Apr 2021 17:29:10 +0800 Subject: [PATCH 378/892] Update spec to make it always success (#3012) * these two commands should still return even some file doen't exist Signed-off-by: Huanhuan Li --- insights/specs/default.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 041a2e148..b72fc53f2 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -530,7 +530,7 @@ def httpd_cmd(broker): etc_and_sub_dirs = sorted(["/etc", "/etc/pki/tls/private", "/etc/pki/tls/certs", "/etc/pki/ovirt-vmconsole", "/etc/nova/migration", "/etc/sysconfig", "/etc/cloud/cloud.cfg.d", "/etc/rc.d/init.d"]) - ls_etc = simple_command("/bin/ls -lan {0}".format(' '.join(etc_and_sub_dirs))) + ls_etc = simple_command("/bin/ls -lan {0}".format(' '.join(etc_and_sub_dirs)), keep_rc=True) ls_ipa_idoverride_memberof = simple_command("/bin/ls -lan /usr/share/ipa/ui/js/plugins/idoverride-memberof") ls_lib_firmware = simple_command("/bin/ls -lanR /lib/firmware") ls_ocp_cni_openshift_sdn = simple_command("/bin/ls -l /var/lib/cni/networks/openshift-sdn") @@ -573,7 +573,7 @@ def httpd_cmd(broker): def md5chk_file_list(broker): """ Provide a list of files to be processed by the ``md5chk_files`` spec """ return ["/etc/pki/product/69.pem", "/etc/pki/product-default/69.pem", "/usr/lib/libsoftokn3.so", "/usr/lib64/libsoftokn3.so", "/usr/lib/libfreeblpriv3.so", "/usr/lib64/libfreeblpriv3.so"] - md5chk_files = foreach_execute(md5chk_file_list, "/usr/bin/md5sum %s") + md5chk_files = foreach_execute(md5chk_file_list, "/usr/bin/md5sum %s", keep_rc=True) mdstat = simple_file("/proc/mdstat") @datasource(Mdstat, HostContext) From 3b1a0c8bc4fa2f46885f33f6e975f8f2abdc8b36 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Mon, 12 Apr 2021 16:41:29 +0800 Subject: [PATCH 379/892] Update spec "postgresql_conf" and "postgresql_log" (#3015) * Remove unsupported poggresql conf and log path * remove the path which is only supported in sosreport to sos_archive Signed-off-by: Huanhuan Li --- insights/specs/default.py | 4 ---- insights/specs/sos_archive.py | 12 ++++++++++++ 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index b72fc53f2..3f0cdd376 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -747,15 +747,11 @@ def pmlog_summary_file(broker): postgresql_conf = first_file([ "/var/opt/rh/rh-postgresql12/lib/pgsql/data/postgresql.conf", "/var/lib/pgsql/data/postgresql.conf", - "/opt/rh/postgresql92/root/var/lib/pgsql/data/postgresql.conf", - "database/postgresql.conf" ]) postgresql_log = first_of( [ glob_file("/var/opt/rh/rh-postgresql12/lib/pgsql/data/log/postgresql-*.log"), glob_file("/var/lib/pgsql/data/pg_log/postgresql-*.log"), - glob_file("/opt/rh/postgresql92/root/var/lib/pgsql/data/pg_log/postgresql-*.log"), - glob_file("/database/postgresql-*.log") ] ) puppetserver_config = simple_file("/etc/sysconfig/puppetserver") diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index ac6c11983..6b22cd36d 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -175,6 +175,18 @@ class SosSpecs(Specs): podman_image_inspect = glob_file("sos_commands/podman/podman_inspect_*") podman_list_containers = first_file(["sos_commands/podman/podman_ps_-a", "sos_commands/podman/podman_ps"]) podman_list_images = simple_file("sos_commands/podman/podman_images") + postgresql_conf = first_file([ + "/var/opt/rh/rh-postgresql12/lib/pgsql/data/postgresql.conf", + "/var/lib/pgsql/data/postgresql.conf", + "database/postgresql.conf" + ]) + postgresql_log = first_of( + [ + glob_file("/var/opt/rh/rh-postgresql12/lib/pgsql/data/log/postgresql-*.log"), + glob_file("/var/lib/pgsql/data/pg_log/postgresql-*.log"), + glob_file("/database/postgresql-*.log") + ] + ) ps_alxwww = simple_file("sos_commands/process/ps_alxwww") ps_aux = first_file(["sos_commands/process/ps_aux", "sos_commands/process/ps_auxwww", "sos_commands/process/ps_auxcww"]) ps_auxcww = first_file(["sos_commands/process/ps_auxcww", "sos_commands/process/ps_auxwww", "sos_commands/process/ps_aux"]) From 49e2eb9047024b34b76cb17efe2b47875104684a Mon Sep 17 00:00:00 2001 From: Alec Cohan <44471274+aleccohan@users.noreply.github.com> Date: Tue, 13 Apr 2021 13:09:47 -0400 Subject: [PATCH 380/892] update verification function to skip Verify by default and skip version check. Also add env-variables for testing (#3010) Signed-off-by: Alec Cohan --- .../apps/ansible/playbook_verifier/__init__.py | 3 +-- .../apps/ansible/playbook_verifier/__main__.py | 10 +++++++++- .../client/apps/test_playbook_verifier.py | 18 +++++++++--------- 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/insights/client/apps/ansible/playbook_verifier/__init__.py b/insights/client/apps/ansible/playbook_verifier/__init__.py index 86bf61d1c..ba984419a 100644 --- a/insights/client/apps/ansible/playbook_verifier/__init__.py +++ b/insights/client/apps/ansible/playbook_verifier/__init__.py @@ -41,7 +41,6 @@ def __str__(self): def eggVersioningCheck(checkVersion): currentVersion = requests.get(VERSIONING_URL) - print('currentVersion: ', currentVersion) currentVersion = currentVersion.text runningVersion = get_version_info()['core_version'] @@ -119,7 +118,7 @@ def verifyPlaybookSnippet(snippet): return executeVerification(snippetCopy, encodedSignature) -def verify(playbook, checkVersion=True, skipVerify=False): +def verify(playbook, checkVersion=False, skipVerify=True): """ Verify the signed playbook. diff --git a/insights/client/apps/ansible/playbook_verifier/__main__.py b/insights/client/apps/ansible/playbook_verifier/__main__.py index 2177f60e1..130a46262 100644 --- a/insights/client/apps/ansible/playbook_verifier/__main__.py +++ b/insights/client/apps/ansible/playbook_verifier/__main__.py @@ -1,3 +1,4 @@ +import os import sys from insights.client.apps.ansible.playbook_verifier import verify, loadPlaybookYaml @@ -15,9 +16,16 @@ def read_playbook(): playbook = read_playbook() playbook_yaml = loadPlaybookYaml(playbook) +skipVerify = True +checkVersion = False + +if (os.environ.get('SKIP_VERIFY')): + skipVerify = False +if (os.environ.get('CHECK_VERSION')): + checkVersion = True try: - verified_playbook = verify(playbook_yaml, checkVersion=False) + verified_playbook = verify(playbook_yaml, checkVersion, skipVerify) except Exception as e: sys.stderr.write(e.message) sys.exit(1) diff --git a/insights/tests/client/apps/test_playbook_verifier.py b/insights/tests/client/apps/test_playbook_verifier.py index 6ba4c608f..6b63f238f 100644 --- a/insights/tests/client/apps/test_playbook_verifier.py +++ b/insights/tests/client/apps/test_playbook_verifier.py @@ -9,8 +9,8 @@ @pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above') def test_skip_validation(): - result = verify([{'name': "test playbook"}], skipVerify=True, checkVersion=False) - assert result == [{'name': "test playbook"}] + result = verify([{'name': "test playbook", 'vars': {}}], skipVerify=True, checkVersion=False) + assert result == [{'name': "test playbook", 'vars': {}}] @pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above') @@ -21,7 +21,7 @@ def test_egg_validation_error(mock_get): fake_playbook = [{'name': "test playbook"}] with raises(PlaybookVerificationError) as error: - verify(fake_playbook) + verify(fake_playbook, checkVersion=True) assert egg_error in str(error.value) @@ -31,7 +31,7 @@ def test_vars_not_found_error(): fake_playbook = [{'name': "test playbook"}] with raises(PlaybookVerificationError) as error: - verify(fake_playbook, checkVersion=False) + verify(fake_playbook, skipVerify=False) assert vars_error in str(error.value) @@ -41,7 +41,7 @@ def test_signature_not_found_error(): fake_playbook = [{'name': "test playbook", 'vars': {}}] with raises(PlaybookVerificationError) as error: - verify(fake_playbook, checkVersion=False) + verify(fake_playbook, skipVerify=False) assert sig_error in str(error.value) @@ -58,7 +58,7 @@ def test_key_not_imported(): }] with raises(PlaybookVerificationError) as error: - verify(fake_playbook, checkVersion=False) + verify(fake_playbook, skipVerify=False) assert key_error in str(error.value) @@ -75,7 +75,7 @@ def test_key_import_error(): }] with raises(PlaybookVerificationError) as error: - verify(fake_playbook, checkVersion=False) + verify(fake_playbook, skipVerify=False) assert key_error in str(error.value) @@ -92,7 +92,7 @@ def test_playbook_verification_error(call): }] with raises(PlaybookVerificationError) as error: - verify(fake_playbook, checkVersion=False) + verify(fake_playbook, skipVerify=False) assert key_error in str(error.value) @@ -108,5 +108,5 @@ def test_playbook_verification_success(mock_method): } }] - result = verify(fake_playbook, checkVersion=False) + result = verify(fake_playbook, skipVerify=False) assert result == fake_playbook From baeee37a0ad7df4974cf67dfacd53c22588529eb Mon Sep 17 00:00:00 2001 From: Rohan Arora Date: Wed, 14 Apr 2021 12:41:07 +0530 Subject: [PATCH 381/892] Add conf file path for rh-mongodb34 to mongod_conf (#3013) Signed-off-by: Rohan Arora --- insights/parsers/mongod_conf.py | 10 ++++++---- insights/specs/default.py | 3 ++- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/insights/parsers/mongod_conf.py b/insights/parsers/mongod_conf.py index 27a9b255c..fc08458e8 100644 --- a/insights/parsers/mongod_conf.py +++ b/insights/parsers/mongod_conf.py @@ -1,13 +1,15 @@ """ -MongodbConf - files ``/etc/mongod.conf``, ``/etc/mongod.conf`` and ``/etc/opt/rh/rh-mongodb26/mongod.conf`` -=========================================================================================================== +MongodbConf - files - Configuration files for MongoDB +===================================================== This module contains the following files: ``/etc/mongod.conf``, ``/etc/mongodb.conf`` , - ``/etc/opt/rh/rh-mongodb26/mongod.conf``. + ``/etc/opt/rh/rh-mongodb26/mongod.conf`` + ``/etc/opt/rh/rh-mongodb34/mongod.conf`` -They are provided by package mongodb-server or rh-mongodb26-mongodb-server. +They are provided by package mongodb-server, rh-mongodb26-mongodb-server or +rh-mongodb34-mongodb-server. These MongoDB configuration files may use the **YAML** format or the standard **key-value pair** format. diff --git a/insights/specs/default.py b/insights/specs/default.py index 3f0cdd376..61fcadc35 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -595,7 +595,8 @@ def md_device_list(broker): mongod_conf = glob_file([ "/etc/mongod.conf", "/etc/mongodb.conf", - "/etc/opt/rh/rh-mongodb26/mongod.conf" + "/etc/opt/rh/rh-mongodb26/mongod.conf", + "/etc/opt/rh/rh-mongodb34/mongod.conf" ]) mount = simple_command("/bin/mount") mounts = simple_file("/proc/mounts") From 64d533a05949dec9b9e0472acdd048a4548cdd10 Mon Sep 17 00:00:00 2001 From: Stephen Date: Wed, 14 Apr 2021 03:25:20 -0400 Subject: [PATCH 382/892] Azure marketplace parser (#3017) * [parser] add a parser for azure marketplace We'll need to identify azure marketplace systems inside puptoo. Since we rely on core for this, we need to have this parser in place. RHCLOUD-13520 Signed-off-by: Stephen Adams * [parser] fix docs for azure_instance_plan Signed-off-by: Stephen Adams * [parser] Fix docs and add try/except for json Signed-off-by: Stephen Adams --- .../azure_instance_plan.rst | 3 + insights/parsers/azure_instance_plan.py | 68 +++++++++++++++++ .../parsers/tests/test_azure_instance_plan.py | 76 +++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 149 insertions(+) create mode 100644 docs/shared_parsers_catalog/azure_instance_plan.rst create mode 100644 insights/parsers/azure_instance_plan.py create mode 100644 insights/parsers/tests/test_azure_instance_plan.py diff --git a/docs/shared_parsers_catalog/azure_instance_plan.rst b/docs/shared_parsers_catalog/azure_instance_plan.rst new file mode 100644 index 000000000..a41a1acb6 --- /dev/null +++ b/docs/shared_parsers_catalog/azure_instance_plan.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.azure_instance_plan + :members: + :show-inheritance: diff --git a/insights/parsers/azure_instance_plan.py b/insights/parsers/azure_instance_plan.py new file mode 100644 index 000000000..76b9ad074 --- /dev/null +++ b/insights/parsers/azure_instance_plan.py @@ -0,0 +1,68 @@ +""" +AzureInstancePlan +================= + +This parser reads the output of a command +``curl -s -H Metadata:true http://169.254.169.254/metadata/instance/compute/plan?api-version=2018-10-01&format=json``, +which is used to check whether the instance is a marketplace image. + +For more details, See: https://docs.microsoft.com/en-us/rest/api/compute/virtualmachines/createorupdate#plan + +""" +import json + +from insights.parsers import SkipException, ParseException +from insights import parser, CommandParser +from insights.specs import Specs + + +@parser(Specs.azure_instance_plan) +class AzureInstancePlan(CommandParser): + """ + Class for parsing the Azure Instance Plan returned by command + ``curl -s -H Metadata:true http://169.254.169.254/metadata/instance/compute/plan?api-version=2018-10-01&format=json``, + + + Typical Output of this command is:: + + { + "name": "planName", + "product": "planProduct", + "publisher": "planPublisher" + }, + + Raises: + SkipException: When content is empty or no parse-able content. + + Attributes: + name (str): The name of the plan for the VM Instance in Azure, e.g: rhel7 + size (str): The product of the plan for the VM Instance in Azure, e.g: RHEL + publisher (str): The publisher of the plan for the VM Instance in Azure, e.g: Red hat + raw (str): The full JSON of the plan returned by the ``curl`` command + + Examples: + >>> azure_plan.name == 'planName' + True + >>> azure_plan.product == 'planProduct' + True + >>> azure_plan.publisher == 'planPublisher' + True + """ + + def parse_content(self, content): + if not content or 'curl: ' in content[0]: + raise SkipException() + try: + plan = json.loads(content[0]) + except: + raise ParseException("Unable to parse JSON") + + self.raw = content[0] + self.name = plan["name"] if plan["name"] != "" else None + self.product = plan["product"] if plan["product"] != "" else None + self.publisher = plan["publisher"] if plan["publisher"] != "" else None + + def __repr__(self): + return " Date: Wed, 14 Apr 2021 09:17:28 -0400 Subject: [PATCH 383/892] [spec] Add azure_instance_plan to archive specs (#3021) Signed-off-by: Stephen Adams --- insights/specs/insights_archive.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 80e85e31f..421fe0a8e 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -18,6 +18,7 @@ class InsightsArchiveSpecs(Specs): aws_instance_id_pkcs7 = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_aws_instance_id_pkcs7") awx_manage_check_license = simple_file("insights_commands/awx-manage_check_license") azure_instance_type = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_azure_instance_type") + azure_instance_plan = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_azure_instance_plan") bios_uuid = simple_file("insights_commands/dmidecode_-s_system-uuid") blkid = simple_file("insights_commands/blkid_-c_.dev.null") brctl_show = simple_file("insights_commands/brctl_show") From 1de061d65114cd6912f178adacd03d252b1504dc Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 15 Apr 2021 04:46:40 +0800 Subject: [PATCH 384/892] Run the sap_hdb_version datasource only for SAP HANA (#3020) - https://bugzilla.redhat.com/show_bug.cgi?id=1949056 Currently, sap_hdb_version runs for all the `adm` users, while `HDB` is only valid for HANA instances, this patch limits it to HANA SID only. Signed-off-by: Xiangce Liu --- insights/specs/default.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 99995ae46..9d53ecdc3 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -798,6 +798,14 @@ def sap_sid(broker): sap = broker[Sap] return list(set(sap.sid(i).lower() for i in sap.all_instances)) + @datasource(Sap, HostContext) + def sap_hana_sid(broker): + """ + list: List of the SID of SAP HANA Instances. + """ + sap = broker[Sap] + return list(set(sap.sid(i).lower() for i in sap.all_instances if sap.type(i) == 'HDB')) + @datasource(sap_sid, HostContext) def ld_library_path_of_user(broker): """ @@ -819,7 +827,7 @@ def ld_library_path_of_user(broker): return DatasourceProvider('\n'.join(llds), relative_path='insights_commands/echo_user_LD_LIBRARY_PATH') raise SkipComponent - sap_hdb_version = foreach_execute(sap_sid, "/usr/bin/sudo -iu %sadm HDB version", keep_rc=True) + sap_hdb_version = foreach_execute(sap_hana_sid, "/usr/bin/sudo -iu %sadm HDB version", keep_rc=True) saphostctl_getcimobject_sapinstance = simple_command("/usr/sap/hostctrl/exe/saphostctrl -function GetCIMObject -enuminstances SAPInstance") saphostexec_status = simple_command("/usr/sap/hostctrl/exe/saphostexec -status") saphostexec_version = simple_command("/usr/sap/hostctrl/exe/saphostexec -version") From 19162967300097d3666bb10200b49730761907a3 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 15 Apr 2021 10:37:34 +0800 Subject: [PATCH 385/892] Remove the spec because it is not used by any rule (#3018) * Remove the spec because it is not used by any rule * The ssl certificate changes from satellite 6.6, however since no rules use this spec, no need to update it, we can remove it directly Signed-off-by: Huanhuan Li * Skip spec Signed-off-by: Huanhuan Li --- insights/specs/default.py | 1 - insights/specs/insights_archive.py | 1 - insights/tests/client/collection_rules/test_map_components.py | 3 ++- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 9d53ecdc3..eb3c52701 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -768,7 +768,6 @@ def pmlog_summary_file(broker): rhsm_katello_default_ca_cert = simple_command("/usr/bin/openssl x509 -in /etc/rhsm/ca/katello-default-ca.pem -noout -issuer") qemu_conf = simple_file("/etc/libvirt/qemu.conf") qemu_xml = glob_file(r"/etc/libvirt/qemu/*.xml") - qpid_stat_g = simple_command("/usr/bin/qpid-stat -g --ssl-certificate=/etc/pki/katello/qpid_client_striped.crt -b amqps://localhost:5671") qpidd_conf = simple_file("/etc/qpid/qpidd.conf") rabbitmq_env = simple_file("/etc/rabbitmq/rabbitmq-env.conf") rabbitmq_report = simple_command("/usr/sbin/rabbitmqctl report") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 421fe0a8e..e9a14295b 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -195,7 +195,6 @@ class InsightsArchiveSpecs(Specs): ps_eo = simple_file("insights_commands/ps_-eo_pid_ppid_comm") puppet_ca_cert_expire_date = simple_file("insights_commands/openssl_x509_-in_.etc.puppetlabs.puppet.ssl.ca.ca_crt.pem_-enddate_-noout") pvs_noheadings = simple_file("insights_commands/pvs_--nameprefixes_--noheadings_--separator_-a_-o_pv_all_vg_name_--config_global_locking_type_0") - qpid_stat_g = simple_file("insights_commands/qpid-stat_-g_--ssl-certificate_.etc.pki.katello.qpid_client_striped.crt_-b_amqps_..localhost_5671") rabbitmq_report = simple_file("insights_commands/rabbitmqctl_report") rabbitmq_users = simple_file("insights_commands/rabbitmqctl_list_users") readlink_e_etc_mtab = simple_file("insights_commands/readlink_-e_.etc.mtab") diff --git a/insights/tests/client/collection_rules/test_map_components.py b/insights/tests/client/collection_rules/test_map_components.py index fc7966cc4..4d58b2a54 100644 --- a/insights/tests/client/collection_rules/test_map_components.py +++ b/insights/tests/client/collection_rules/test_map_components.py @@ -100,7 +100,8 @@ def test_get_component_by_symbolic_name(): 'sap_host_profile', 'sched_rt_runtime_us', 'libvirtd_qemu_log', - 'mlx4_port' + 'mlx4_port', + 'qpid_stat_g' ] # first, make sure our list is proper and one of these From 7bf34be25703e54a310df84480b468151a3df410 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 15 Apr 2021 21:41:39 +0800 Subject: [PATCH 386/892] Use 'su' for sap_hdb_version instead of 'sudo' (#3022) * Use 'su' for sap_hdb_version instead of 'sudo' Signed-off-by: Xiangce Liu * add the missed '-c' Signed-off-by: Xiangce Liu --- insights/specs/default.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index eb3c52701..1e318f083 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -826,7 +826,7 @@ def ld_library_path_of_user(broker): return DatasourceProvider('\n'.join(llds), relative_path='insights_commands/echo_user_LD_LIBRARY_PATH') raise SkipComponent - sap_hdb_version = foreach_execute(sap_hana_sid, "/usr/bin/sudo -iu %sadm HDB version", keep_rc=True) + sap_hdb_version = foreach_execute(sap_hana_sid, "/bin/su -l %sadm -c 'HDB version'", keep_rc=True) saphostctl_getcimobject_sapinstance = simple_command("/usr/sap/hostctrl/exe/saphostctrl -function GetCIMObject -enuminstances SAPInstance") saphostexec_status = simple_command("/usr/sap/hostctrl/exe/saphostexec -status") saphostexec_version = simple_command("/usr/sap/hostctrl/exe/saphostexec -version") From f2971aa2f5fdae9bed3f72626d3830904666740f Mon Sep 17 00:00:00 2001 From: Stephen Date: Thu, 15 Apr 2021 11:51:06 -0400 Subject: [PATCH 387/892] [spec] Add spec for collecting ansible_host (#3019) * [spec] Add spec for collecting ansible_host The client will begin collecting ansible_host and putting it into the root of the archive. This spec will let us dig it out of puptoo for system_profile RHCLOUD-13536 Signed-off-by: Stephen Adams * Remove ansible_host from insights_archive This one is covered by the core3specs Signed-off-by: Stephen Adams * [specs] move ansible_host and display_name to default Signed-off-by: Stephen Adams * [fix] put display and ansible_host in proper files Signed-off-by: Stephen Adams * [fix] typo in display_name spec --- insights/specs/__init__.py | 1 + insights/specs/core3_archive.py | 1 + insights/specs/insights_archive.py | 1 + 3 files changed, 3 insertions(+) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 5fb4f592e..fa3190714 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -20,6 +20,7 @@ class Specs(SpecSet): abrt_status_bare = RegistryPoint() alternatives_display_python = RegistryPoint() amq_broker = RegistryPoint(multi_output=True) + ansible_host = RegistryPoint() auditctl_status = RegistryPoint() auditd_conf = RegistryPoint() audit_log = RegistryPoint(filterable=True) diff --git a/insights/specs/core3_archive.py b/insights/specs/core3_archive.py index db5cd366f..75dbda6a0 100644 --- a/insights/specs/core3_archive.py +++ b/insights/specs/core3_archive.py @@ -13,6 +13,7 @@ class Core3Specs(Specs): + ansible_host = simple_file("ansible_host") branch_info = simple_file("/branch_info", kind=RawFileProvider) display_name = simple_file("display_name") version_info = simple_file("version_info") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index e9a14295b..2d933fb2e 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -13,6 +13,7 @@ class InsightsArchiveSpecs(Specs): abrt_status_bare = simple_file("insights_commands/abrt_status_--bare_True") all_installed_rpms = glob_file("insights_commands/rpm_-qa*") alternatives_display_python = simple_file("insights_commands/alternatives_--display_python") + ansible_host = simple_file("ansible_host") auditctl_status = simple_file("insights_commands/auditctl_-s") aws_instance_id_doc = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_aws_instance_id_doc") aws_instance_id_pkcs7 = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_aws_instance_id_pkcs7") From e81c59e9d526cba31004aeac32822a5f3770414d Mon Sep 17 00:00:00 2001 From: Sheng Lao <39508521+shlao@users.noreply.github.com> Date: Fri, 16 Apr 2021 03:22:41 +0800 Subject: [PATCH 388/892] Update the spec of UpdateLsinitrd (#2775) Signed-off-by: shlao Co-authored-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 2 files changed, 2 insertions(+) diff --git a/insights/specs/default.py b/insights/specs/default.py index 1e318f083..dd7942871 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -557,6 +557,7 @@ def httpd_cmd(broker): lsblk = simple_command("/bin/lsblk") lsblk_pairs = simple_command("/bin/lsblk -P -o NAME,KNAME,MAJ:MIN,FSTYPE,MOUNTPOINT,LABEL,UUID,RA,RO,RM,MODEL,SIZE,STATE,OWNER,GROUP,MODE,ALIGNMENT,MIN-IO,OPT-IO,PHY-SEC,LOG-SEC,ROTA,SCHED,RQ-SIZE,TYPE,DISC-ALN,DISC-GRAN,DISC-MAX,DISC-ZERO") lscpu = simple_command("/usr/bin/lscpu") + lsinitrd = simple_command("/usr/bin/lsinitrd") lsmod = simple_command("/sbin/lsmod") lsof = simple_command("/usr/sbin/lsof") lspci = simple_command("/sbin/lspci -k") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 2d933fb2e..3b522914f 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -107,6 +107,7 @@ class InsightsArchiveSpecs(Specs): ls_etc = simple_file("insights_commands/ls_-lan_.etc_.etc.cloud.cloud.cfg.d_.etc.nova.migration_.etc.pki.ovirt-vmconsole_.etc.pki.tls.certs_.etc.pki.tls.private_.etc.rc.d.init.d_.etc.sysconfig") ls_ipa_idoverride_memberof = simple_file("insights_commands/ls_-lan_.usr.share.ipa.ui.js.plugins.idoverride-memberof") ls_lib_firmware = simple_file("insights_commands/ls_-lanR_.lib.firmware") + lsinitrd = simple_file("insights_commands/lsinitrd") ls_ocp_cni_openshift_sdn = simple_file("insights_commands/ls_-l_.var.lib.cni.networks.openshift-sdn") ls_origin_local_volumes_pods = simple_file("insights_commands/ls_-l_.var.lib.origin.openshift.local.volumes.pods") ls_osroot = simple_file("insights_commands/ls_-lan") From a3c47c34a1306841fc32c497037445603e4ebc4a Mon Sep 17 00:00:00 2001 From: Stephen Adams Date: Thu, 15 Apr 2021 15:30:07 -0400 Subject: [PATCH 389/892] [release] update uploader_json_map.json Signed-off-by: Stephen Adams --- insights/client/uploader_json_map.json | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index 1444ac70c..888bb3c58 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -30,6 +30,11 @@ "pattern": [], "symbolic_name": "azure_instance_type" }, + { + "command": "python -m insights.tools.cat --no-header azure_instance_plan", + "pattern": [], + "symbolic_name": "azure_instance_plan" + }, { "command": "/sbin/auditctl -s", "pattern": [], @@ -314,11 +319,6 @@ "pattern": [], "symbolic_name": "facter" }, - { - "command": "/usr/bin/qpid-stat -g --ssl-certificate=/etc/pki/katello/qpid_client_striped.crt -b amqps://localhost:5671", - "pattern": [], - "symbolic_name": "qpid_stat_g" - }, { "command": "/bin/fc-match -sv 'sans:regular:roman' family fontformat", "pattern": [], @@ -691,6 +691,11 @@ "pattern": [], "symbolic_name": "lscpu" }, + { + "command": "/usr/bin/lsinitrd", + "pattern": [], + "symbolic_name": "lsinitrd" + }, { "command": "/sbin/lsmod", "pattern": [], @@ -4309,6 +4314,11 @@ ], "symbolic_name": "rsyslog_conf" }, + { + "glob": "/sys/block/*/queue/scheduler", + "symbolic_name": "scheduler", + "pattern": [] + }, { "glob": "/sys/class/scsi_host/host[0-9]*/fwrev", "symbolic_name": "scsi_fwver", @@ -4353,5 +4363,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-03-25T10:42:19.047966" -} \ No newline at end of file + "version": "2021-03-31T16:03:45.694442" +} From 2ea7476bd5756b2af971f4d4cf456ace6d3387ca Mon Sep 17 00:00:00 2001 From: Stephen Date: Sun, 18 Apr 2021 21:10:47 -0400 Subject: [PATCH 390/892] [fix] Fix attribute in docs for azure_instance_plan (#3024) Signed-off-by: Stephen Adams --- insights/parsers/azure_instance_plan.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/parsers/azure_instance_plan.py b/insights/parsers/azure_instance_plan.py index 76b9ad074..fd9b52846 100644 --- a/insights/parsers/azure_instance_plan.py +++ b/insights/parsers/azure_instance_plan.py @@ -36,7 +36,7 @@ class AzureInstancePlan(CommandParser): Attributes: name (str): The name of the plan for the VM Instance in Azure, e.g: rhel7 - size (str): The product of the plan for the VM Instance in Azure, e.g: RHEL + product (str): The product of the plan for the VM Instance in Azure, e.g: RHEL publisher (str): The publisher of the plan for the VM Instance in Azure, e.g: Red hat raw (str): The full JSON of the plan returned by the ``curl`` command From 76bd2467d8266ca59e4b2d18ea23ea7dc3a89ea6 Mon Sep 17 00:00:00 2001 From: Stephen Date: Mon, 19 Apr 2021 12:28:28 -0400 Subject: [PATCH 391/892] [testing] download idna 2.7 directly (#3025) * [testing] download idna 2.7 directly This package was removed from pypi. We still need it for python2, so we'll download it directly via git. Signed-off-by: Stephen Adams * [testing] fix pycparser package Signed-off-by: Stephen Adams --- Jenkinsfile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 78671ad80..8e8f6979e 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -21,8 +21,8 @@ pipeline { sh """ virtualenv .testenv source .testenv/bin/activate - pip install "idna<=2.7" - pip install "pycparser<=2.18" + pip install https://github.com/kjd/idna/archive/refs/tags/v2.7.zip + pip install https://github.com/eliben/pycparser/archive/refs/tags/release_v2.18.zip pip install -e .[testing] pytest """ @@ -30,8 +30,8 @@ pipeline { sh """ virtualenv .lintenv source .lintenv/bin/activate - pip install "idna<=2.7" - pip install "pycparser<=2.18" + pip install https://github.com/kjd/idna/archive/refs/tags/v2.7.zip + pip install https://github.com/eliben/pycparser/archive/refs/tags/release_v2.18.zip pip install -e .[linting] flake8 """ From d32a499f6be0683042005abec705fd8b96b34788 Mon Sep 17 00:00:00 2001 From: Glutexo Date: Tue, 20 Apr 2021 22:06:57 +0200 Subject: [PATCH 392/892] =?UTF-8?q?=F0=9F=91=B7=F0=9F=8F=BB=E2=80=8D?= =?UTF-8?q?=E2=99=82=EF=B8=8F=20Pipeline:=20Upgrade=20PIP=20and=20revert?= =?UTF-8?q?=20pip=20install=20changes=20(#3028)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Upgrade PIP to 9.0.3 Upgraded PIP to the last version that supports Python 2.6. This may resolve possible API incompatibilities and thus PyPI query failures. PIP >=9 also supports Python version checks, not installing packages incompatible with current Python version. Signed-off-by: Štěpán Tomsa * Revert idna and pycparser to PyPI Revert idna and pycparser back to PyPI as the problem with package search extends even to packages required by install -e .[testing]. Signed-off-by: Štěpán Tomsa * Upgrade PIP from PyPI The PIP upgrade does not resolve the PyPI query problem. It does not make sense then to bypass it. Signed-off-by: Štěpán Tomsa --- Jenkinsfile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 8e8f6979e..a6d7113d2 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -21,8 +21,9 @@ pipeline { sh """ virtualenv .testenv source .testenv/bin/activate - pip install https://github.com/kjd/idna/archive/refs/tags/v2.7.zip - pip install https://github.com/eliben/pycparser/archive/refs/tags/release_v2.18.zip + pip install --upgrade "pip<10" + pip install "idna<=2.7" + pip install "pycparser<=2.18" pip install -e .[testing] pytest """ From d2fe5d0f1167e464d0e4a3dcbfd36fe65c998dc4 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Wed, 21 Apr 2021 04:19:32 +0800 Subject: [PATCH 393/892] New parser for sap_hana_landscape (#3023) - And refine the dependent Datasources and make them reusable Signed-off-by: Xiangce Liu --- .../sap_hana_python_script.rst | 3 + insights/parsers/sap_hana_python_script.py | 84 +++++++++++++++++ .../tests/test_sap_hana_python_script.py | 92 +++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 37 ++++++-- 5 files changed, 211 insertions(+), 6 deletions(-) create mode 100644 docs/shared_parsers_catalog/sap_hana_python_script.rst create mode 100644 insights/parsers/sap_hana_python_script.py create mode 100644 insights/parsers/tests/test_sap_hana_python_script.py diff --git a/docs/shared_parsers_catalog/sap_hana_python_script.rst b/docs/shared_parsers_catalog/sap_hana_python_script.rst new file mode 100644 index 000000000..80d4ba733 --- /dev/null +++ b/docs/shared_parsers_catalog/sap_hana_python_script.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.sap_hana_python_script + :members: + :show-inheritance: diff --git a/insights/parsers/sap_hana_python_script.py b/insights/parsers/sap_hana_python_script.py new file mode 100644 index 000000000..ca4bf0762 --- /dev/null +++ b/insights/parsers/sap_hana_python_script.py @@ -0,0 +1,84 @@ +""" +SapHanaPython - Commands +======================== + +Shared parser for parsing output of the SAP HANA python support scripts. + +The following parsers are included in this module: + +HanaLandscape - landscapeHostConfiguration.py +--------------------------------------------- +""" +from insights import parser, CommandParser +from insights.parsers import SkipException +from insights.specs import Specs + + +class SapHanaPython(CommandParser, list): + """ + Base class for parsering the output of SAP HANA python support scripts. + + Attributes: + overall_status(bool): The overall host status. + + Raises: + SkipException: When nothing is parsered. + """ + + def parse_content(self, content): + self.overall_status = None + header = True + keys = [] + for line in content: + line = line.strip() + if not line.startswith('|'): + if line.startswith('overall host status:'): + self.overall_status = line.split(':')[-1].strip() + continue + # discard the empty head and tail in each line + lsp = [i.strip() for i in line.split('|')][1:-1] + if '--' in lsp[0]: + header = False + continue + if header: + keys = [' '.join(k).strip() for k in zip(keys, lsp)] if keys else lsp + else: + self.append(dict(zip(keys, lsp))) + + if len(self) == 0: + raise SkipException + + +@parser(Specs.sap_hana_landscape) +class HanaLandscape(SapHanaPython): + """ + Class for parsing the output of `/usr/sap//HDB/exe/python_support/landscapeHostConfiguration.py` command. + + Typical output of is:: + + | Host | Host | Host | Failover | Remove | Storage | Failover | Failover | NameServer | NameServer | IndexServer | IndexServer | + | | Active | Status | Status | Status | Partition | Config Group | Actual Group | Config Role | Actual Role | Config Role | Actual Role | + | ------ | ------ | ------ | -------- | ------ | --------- | ------------ | ------------ | ----------- | ----------- | ----------- | ----------- | + | node1 | yes | ok | | | 1 | default | default | master 1 | master | worker | master | + overall host status: ok + + Attributes: + scale_up(bool): True for 'Scale Up' HANA System + scale_out(bool): True for 'Scale Out' HANA System + + Examples: + >>> type(hana_sta) + + >>> hana_sta.scale_up + True + >>> len(hana_sta) + 1 + >>> hana_sta[0]['Host'] == 'node1' + True + >>> hana_sta.overall_status == 'ok' + True + """ + def __init__(self, *args, **kwargs): + super(HanaLandscape, self).__init__(*args, **kwargs) + self.scale_up = len(self) == 1 + self.scale_out = len(self) > 1 diff --git a/insights/parsers/tests/test_sap_hana_python_script.py b/insights/parsers/tests/test_sap_hana_python_script.py new file mode 100644 index 000000000..90051ef8b --- /dev/null +++ b/insights/parsers/tests/test_sap_hana_python_script.py @@ -0,0 +1,92 @@ +import pytest +import doctest + +from insights.parsers import SkipException, sap_hana_python_script +from insights.parsers.sap_hana_python_script import HanaLandscape +from insights.tests import context_wrap + +LANDSCAPE_SCALE_UP = """ +| Host | Host | Host | Failover | Remove | Storage | Failover | Failover | NameServer | NameServer | IndexServer | IndexServer | +| | Active | Status | Status | Status | Partition | Config Group | Actual Group | Config Role | Actual Role | Config Role | Actual Role | +| ------ | ------ | ------ | -------- | ------ | --------- | ------------ | ------------ | ----------- | ----------- | ----------- | ----------- | +| node1 | yes | ok | | | 1 | default | default | master 1 | master | worker | master | +overall host status: ok +""".strip() + +LANDSCAPE_SCALE_OUT = """ +| Host | Host | Host | Failover | Remove | Storage | Failover | Failover | NameServer | NameServer | IndexServer | IndexServer | +| | Active | Status | Status | Status | Partition | Config Group | Actual Group | Config Role | Actual Role | Config Role | Actual Role | +| ------ | ------ | ------ | -------- | ------ | --------- | ------------ | ------------ | ----------- | ----------- | ----------- | ----------- | +| node1 | yes | info | | | 0 | default | default | master 1 | slave | worker | standby | +| node2 | yes | ok | | | 2 | default | default | master 2 | slave | worker | slave | +| node3 | yes | info | | | 1 | default | default | master 3 | master | standby | master | +overall host status: ok +""".strip() + +LANDSCAPE_SCALE_UP_NG = """ +nameserver vm37-39:30201 not responding. +nameserver vm37-39:30201 not responding. +| Host | Host | Host | Failover | Remove | Storage | Storage | Failover | Failover | NameServer | NameServer | IndexServer | IndexServer | Host | Host | Worker | Worker | +| | Active | Status | Status | Status | Config | Actual | Config | Actual | Config | Actual | Config | Actual | Config | Actual | Config | Actual | +| | | | | | Partition | Partition | Group | Group | Role | Role | Role | Role | Roles | Roles | Groups | Groups | +| ------- | ------ | ------ | -------- | ------ | --------- | --------- | -------- | -------- | ---------- | ---------- | ----------- | ----------- | ---------------- | ------ | ------- | ------ | +| vm37-39 | no | error | ? | ? | ? | ? | ? | ? | master 1 | ? | worker | ? | worker xs_worker | ? | default | ? | + +overall host status: error +""".strip() + +LANDSCAPE_SCALE_UP_AB_1 = """ +| Host | Host | Host | Failover | Remove | Storage | Failover | Failover | NameServer | NameServer | IndexServer | IndexServer | +| | Active | Status | Status | Status | Partition | Config Group | Actual Group | Config Role | Actual Role | Config Role | Actual Role | +| ------ | ------ | ------ | -------- | ------ | --------- | ------------ | ------------ | ----------- | ----------- | ----------- | ----------- | +""".strip() + + +def test_doc_examples(): + env = {'hana_sta': HanaLandscape(context_wrap(LANDSCAPE_SCALE_UP))} + failed, total = doctest.testmod(sap_hana_python_script, globs=env) + assert failed == 0 + + +def test_HanaLandscape_ab(): + with pytest.raises(SkipException): + HanaLandscape(context_wrap(LANDSCAPE_SCALE_UP_AB_1)) + + +def test_HanaLandscape(): + d = HanaLandscape(context_wrap(LANDSCAPE_SCALE_UP)) + assert len(d) == 1 + assert d[0]['Host'] == 'node1' + assert d[0]['Failover Config Group'] == 'default' + assert d[0]['Failover Status'] == '' + assert d.overall_status == 'ok' + assert d.scale_up is True + assert d.scale_out is False + + d = HanaLandscape(context_wrap(LANDSCAPE_SCALE_UP_NG)) + assert len(d) == 1 + assert d[0]['Host'] == 'vm37-39' + assert d[0]['Failover Config Group'] == '?' + assert d[0]['Failover Status'] == '?' + assert d[0]['Host Status'] == 'error' + assert d.overall_status == 'error' + assert d.scale_up is True + assert d.scale_out is False + + d = HanaLandscape(context_wrap(LANDSCAPE_SCALE_OUT)) + assert len(d) == 3 + assert d.overall_status == 'ok' + assert d[0]['Host'] == 'node1' + assert d[0]['Failover Config Group'] == 'default' + assert d[0]['Failover Status'] == '' + assert d[0]['Host Status'] == 'info' + assert d[1]['Host'] == 'node2' + assert d[1]['Failover Config Group'] == 'default' + assert d[1]['Failover Status'] == '' + assert d[1]['Host Status'] == 'ok' + assert d[2]['Host'] == 'node3' + assert d[2]['Failover Config Group'] == 'default' + assert d[2]['Failover Status'] == '' + assert d[2]['Host Status'] == 'info' + assert d.scale_up is False + assert d.scale_out is True diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index fa3190714..2fee5b6e0 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -561,6 +561,7 @@ class Specs(SpecSet): samba = RegistryPoint(filterable=True) sap_dev_disp = RegistryPoint(multi_output=True, filterable=True) sap_dev_rd = RegistryPoint(multi_output=True, filterable=True) + sap_hana_landscape = RegistryPoint(multi_output=True) sap_hdb_version = RegistryPoint(multi_output=True) sap_host_profile = RegistryPoint(filterable=True) sapcontrol_getsystemupdatelist = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index dd7942871..1cfeb4c94 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -791,20 +791,44 @@ def pmlog_summary_file(broker): samba = simple_file("/etc/samba/smb.conf") @datasource(Sap, HostContext) - def sap_sid(broker): + def sap_instance(broker): """ - list: List of the SID of all SAP Instances. + list: List of all SAP Instances. """ sap = broker[Sap] - return list(set(sap.sid(i).lower() for i in sap.all_instances)) + return list(v for v in sap.values()) - @datasource(Sap, HostContext) + @datasource(sap_instance, HostContext) + def sap_hana_instance(broker): + """ + list: List of the SAP HANA Instances. + """ + sap = broker[DefaultSpecs.sap_instance] + return list(v for v in sap if v.type == 'HDB') + + @datasource(sap_instance, HostContext) + def sap_sid(broker): + """ + list: List of the SID of all the SAP Instances. + """ + sap = broker[DefaultSpecs.sap_instance] + return list(set(h.sid.lower() for h in sap)) + + @datasource(sap_hana_instance, HostContext) def sap_hana_sid(broker): """ list: List of the SID of SAP HANA Instances. """ - sap = broker[Sap] - return list(set(sap.sid(i).lower() for i in sap.all_instances if sap.type(i) == 'HDB')) + hana = broker[DefaultSpecs.sap_hana_instance] + return list(set(h.sid.lower() for h in hana)) + + @datasource(sap_hana_instance, HostContext) + def sap_hana_sid_SID_nr(broker): + """ + list: List of tuples (sid, SID, Nr) of SAP HANA Instances. + """ + hana = broker[DefaultSpecs.sap_hana_instance] + return list((h.sid.lower(), h.sid, h.number) for h in hana) @datasource(sap_sid, HostContext) def ld_library_path_of_user(broker): @@ -827,6 +851,7 @@ def ld_library_path_of_user(broker): return DatasourceProvider('\n'.join(llds), relative_path='insights_commands/echo_user_LD_LIBRARY_PATH') raise SkipComponent + sap_hana_landscape = foreach_execute(sap_hana_sid_SID_nr, "/bin/su -l %sadm -c 'python /usr/sap/%s/HDB%s/exe/python_support/landscapeHostConfiguration.py'", keep_rc=True) sap_hdb_version = foreach_execute(sap_hana_sid, "/bin/su -l %sadm -c 'HDB version'", keep_rc=True) saphostctl_getcimobject_sapinstance = simple_command("/usr/sap/hostctrl/exe/saphostctrl -function GetCIMObject -enuminstances SAPInstance") saphostexec_status = simple_command("/usr/sap/hostctrl/exe/saphostexec -status") From d92d173090dbfe8e7177f24812a6b3655663ffad Mon Sep 17 00:00:00 2001 From: Stephen Date: Thu, 22 Apr 2021 08:41:45 -0400 Subject: [PATCH 394/892] [parser] GCP parser to gather license codes from instances (#3030) * [parser] GCP parser to gather license codes from instances This is needed to identify marketplace systems in the GCP cloud. Signed-off-by: Stephen Adams * [parser] fix docs for gcp_license_codes parser Signed-off-by: Stephen Adams --- .../gcp_license_codes.rst | 3 + insights/parsers/gcp_license_codes.py | 61 ++++++++++++++++ .../parsers/tests/test_gcp_license_codes.py | 70 +++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 10 +++ insights/specs/insights_archive.py | 1 + 6 files changed, 146 insertions(+) create mode 100644 docs/shared_parsers_catalog/gcp_license_codes.rst create mode 100644 insights/parsers/gcp_license_codes.py create mode 100644 insights/parsers/tests/test_gcp_license_codes.py diff --git a/docs/shared_parsers_catalog/gcp_license_codes.rst b/docs/shared_parsers_catalog/gcp_license_codes.rst new file mode 100644 index 000000000..dec08c3cc --- /dev/null +++ b/docs/shared_parsers_catalog/gcp_license_codes.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.gcp_license_codes + :members: + :show-inheritance: diff --git a/insights/parsers/gcp_license_codes.py b/insights/parsers/gcp_license_codes.py new file mode 100644 index 000000000..67626e0de --- /dev/null +++ b/insights/parsers/gcp_license_codes.py @@ -0,0 +1,61 @@ +""" +GCPLicenseCodes +=============== + +This parser reads the output of a command +``curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/licenses/?recursive=True"``, +which is used to check whether the google cloud instance is a licensed marketplace instance. + +For more details, See: https://cloud.google.com/compute/docs/reference/rest/v1/images/get#body.Image.FIELDS.license_code + +""" +import json + +from insights.parsers import SkipException, ParseException +from insights import parser, CommandParser +from insights.specs import Specs + + +@parser(Specs.gcp_license_codes) +class GCPLicenseCodes(CommandParser): + """ + Class for parsing the GCP License Codes returned by command + ``curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/licenses/?recursive=True"``, + + + Typical Output of this command is:: + + [{"id": "601259152637613565"}] + + Raises: + SkipException: When content is empty or no parse-able content. + ParseException: When the json is unable to be parsed + + Attributes: + ids (list): A list containing the IDs found on the instance + raw (str): The full JSON of the plan returned by the ``curl`` command + + Examples: + >>> gcp_licenses.ids == ["601259152637613565"] + True + >>> gcp_licenses.raw == [{"id": "601259152637613565"}] + True + """ + + def parse_content(self, content): + if not content or 'curl: ' in content[0]: + raise SkipException() + try: + license_list = json.loads(content[0]) + except: + raise ParseException("Unable to parse JSON") + + self.raw = license_list + self.ids = None + if len(license_list) >= 1: + self.ids = [l["id"] for l in license_list] + + def __repr__(self): + return "ids: {i}, raw: {r}".format( + i=self.ids, r=self.raw + ) diff --git a/insights/parsers/tests/test_gcp_license_codes.py b/insights/parsers/tests/test_gcp_license_codes.py new file mode 100644 index 000000000..48306d184 --- /dev/null +++ b/insights/parsers/tests/test_gcp_license_codes.py @@ -0,0 +1,70 @@ +import pytest +import doctest + +from insights.parsers import gcp_license_codes +from insights.parsers.gcp_license_codes import GCPLicenseCodes +from insights.tests import context_wrap +from insights.parsers import SkipException, ParseException + +GCP_LICENSE_CODES_1 = '[{"id": "123451234512345"}]' +GCP_LICENSE_CODES_2 = '[{"id": "123451234512345"}, {"id": "238949287234"}]' +GCP_LICENSE_CODES_3 = '[]' + +GCP_LICENSE_CODES_4 = """ + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed + 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 + 100 1126 100 1126 0 0 1374k 0 --:--:-- --:--:-- --:--:-- 1099k +[{"id": "123451234512345"}] +""" + +GCP_LICENSE_CODES_DOC = '[{"id": "601259152637613565"}]' + +GCP_LICENSE_CODES_AB_1 = """ +curl: (7) Failed to connect to 169.254.169.254 port 80: Connection timed out +""".strip() +GCP_LICENSE_CODES_AB_2 = """ +curl: (7) couldn't connect to host +""".strip() +GCP_LICENSE_CODES_AB_3 = """ +curl: (28) connect() timed out! +""".strip() + + +def test_azure_instance_place_ab_other(): + with pytest.raises(SkipException): + GCPLicenseCodes(context_wrap(GCP_LICENSE_CODES_AB_1)) + + with pytest.raises(SkipException): + GCPLicenseCodes(context_wrap(GCP_LICENSE_CODES_AB_2)) + + with pytest.raises(SkipException): + GCPLicenseCodes(context_wrap(GCP_LICENSE_CODES_AB_3)) + + with pytest.raises(SkipException): + GCPLicenseCodes(context_wrap('')) + + with pytest.raises(ParseException): + GCPLicenseCodes(context_wrap(GCP_LICENSE_CODES_4)) + + +def test_gcp_license_codes(): + gcp_licenses = GCPLicenseCodes(context_wrap(GCP_LICENSE_CODES_1)) + assert gcp_licenses.ids == ["123451234512345"] + assert gcp_licenses.raw == [{"id": "123451234512345"}] + + gcp_licenses = GCPLicenseCodes(context_wrap(GCP_LICENSE_CODES_2)) + assert gcp_licenses.ids == ["123451234512345", "238949287234"] + assert gcp_licenses.raw == [{"id": "123451234512345"}, {"id": "238949287234"}] + + gcp_licenses = GCPLicenseCodes(context_wrap(GCP_LICENSE_CODES_3)) + assert gcp_licenses.ids is None + assert gcp_licenses.raw == [] + + +def test_doc_examples(): + env = { + 'gcp_licenses': GCPLicenseCodes(context_wrap(GCP_LICENSE_CODES_DOC)) + } + failed, total = doctest.testmod(gcp_license_codes, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 2fee5b6e0..dcaeb3c54 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -187,6 +187,7 @@ class Specs(SpecSet): freeipa_healthcheck_log = RegistryPoint() fstab = RegistryPoint() galera_cnf = RegistryPoint() + gcp_license_codes = RegistryPoint() getcert_list = RegistryPoint() getconf_page_size = RegistryPoint() getenforce = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 1cfeb4c94..7b3ea388b 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -424,6 +424,16 @@ def gfs2_mount_points(broker): gluster_v_info = simple_command("/usr/sbin/gluster volume info") gnocchi_conf = first_file(["/var/lib/config-data/puppet-generated/gnocchi/etc/gnocchi/gnocchi.conf", "/etc/gnocchi/gnocchi.conf"]) gnocchi_metricd_log = first_file(["/var/log/containers/gnocchi/gnocchi-metricd.log", "/var/log/gnocchi/metricd.log"]) + + @datasource(CloudProvider, HostContext) + def is_gcp(broker): + """ bool: Returns True if this node is identified as running in GCP """ + cp = broker[CloudProvider] + if cp and cp.cloud_provider == CloudProvider.GOOGLE: + return True + raise SkipComponent() + + gcp_license_codes = simple_command("/usr/bin/curl -s curl -H Metadata-Flavor: Google http://metadata.google.internal/computeMetadata/v1/instance/licenses/?recursive=True --connect-timeout 5", deps=[is_gcp]) grub_conf = simple_file("/boot/grub/grub.conf") grub_config_perms = simple_command("/bin/ls -l /boot/grub2/grub.cfg") # only RHEL7 and updwards grub_efi_conf = simple_file("/boot/efi/EFI/redhat/grub.conf") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 3b522914f..f173f794e 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -67,6 +67,7 @@ class InsightsArchiveSpecs(Specs): fcoeadm_i = simple_file("insights_commands/fcoeadm_-i") findmnt_lo_propagation = simple_file("insights_commands/findmnt_-lo_PROPAGATION") firewall_cmd_list_all_zones = simple_file("insights_commands/firewall-cmd_--list-all-zones") + gcp_license_codes = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_gcp_license_codes") getconf_page_size = simple_file("insights_commands/getconf_PAGE_SIZE") getenforce = simple_file("insights_commands/getenforce") getsebool = simple_file("insights_commands/getsebool_-a") From 6106ce3cf96c89fdfc279c31f43c1c982d65ce83 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Thu, 22 Apr 2021 08:13:13 -0500 Subject: [PATCH 395/892] Collect /usr/libexec/greenboot/greenboot-status (#3031) Signed-off-by: Christopher Sams --- .../greenboot_status.rst | 3 + insights/parsers/greenboot_status.py | 33 +++++++++ .../parsers/tests/test_greenboot_status.py | 70 +++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 108 insertions(+) create mode 100644 docs/shared_parsers_catalog/greenboot_status.rst create mode 100644 insights/parsers/greenboot_status.py create mode 100644 insights/parsers/tests/test_greenboot_status.py diff --git a/docs/shared_parsers_catalog/greenboot_status.rst b/docs/shared_parsers_catalog/greenboot_status.rst new file mode 100644 index 000000000..e90179d36 --- /dev/null +++ b/docs/shared_parsers_catalog/greenboot_status.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.greenboot_status + :members: + :show-inheritance: diff --git a/insights/parsers/greenboot_status.py b/insights/parsers/greenboot_status.py new file mode 100644 index 000000000..f9f2824cb --- /dev/null +++ b/insights/parsers/greenboot_status.py @@ -0,0 +1,33 @@ +""" +GreenbootStatus - Command ``/usr/libexec/greenboot/greenboot-status`` +===================================================================== +""" +from insights import parser +from insights.core import LogFileOutput +from insights.core.filters import add_filter +from insights.specs import Specs + + +_green = "Boot Status is GREEN" +_red = "Boot Status is RED" +add_filter(Specs.greenboot_status, [_green, _red]) + + +@parser(Specs.greenboot_status) +class GreenbootStatus(LogFileOutput): + """ + Collect the filtered output of ``/usr/libexec/greenboot/greenboot-status``. + + The data are lines from the journal log + + Attributes: + red (bool): True if the system is in RED status. + green (bool): True if the system is in GREEN status. + + .. note:: + Please refer to the super-class :class:`insights.core.LogFileOutput` + """ + + +GreenbootStatus.token_scan("green", _green) +GreenbootStatus.token_scan("red", _red) diff --git a/insights/parsers/tests/test_greenboot_status.py b/insights/parsers/tests/test_greenboot_status.py new file mode 100644 index 000000000..5ce8cb3b6 --- /dev/null +++ b/insights/parsers/tests/test_greenboot_status.py @@ -0,0 +1,70 @@ +from insights.tests import context_wrap +from insights.parsers.greenboot_status import GreenbootStatus + + +GREEN = """ +Boot Status is GREEN - Health Check SUCCESS +""" + + +RED = """ +Mar 04 15:47:12 example greenboot[768]: Script 'check-dns.sh' SUCCESS +Mar 04 15:47:12 example required-services.sh[999]: active +Mar 04 15:47:12 example required-services.sh[999]: active +Mar 04 15:47:12 example required-services.sh[999]: inactive +Mar 04 15:47:10 example NetworkManager[886]: [1614872830.0295] manager: NetworkManager state is now CONNECTED_GLOBAL +Mar 04 15:47:12 example check-dns.sh[801]: PING 192.168.81.1 (192.168.81.1) 56(84) bytes of data. +Mar 04 15:47:12 example check-dns.sh[801]: 64 bytes from 192.168.81.1: icmp_seq=1 ttl=64 time=0.253 ms +Mar 04 15:47:12 example check-dns.sh[801]: --- 192.168.81.1 ping statistics --- +Mar 04 15:47:12 example check-dns.sh[801]: 1 packets transmitted, 1 received, 0% packet loss, time 0ms +Mar 04 15:47:12 example check-dns.sh[801]: rtt min/avg/max/mdev = 0.253/0.253/0.253/0.000 ms +Mar 04 15:47:12 example greenboot[768]: Script 'check-dns.sh' SUCCESS +Mar 04 15:47:12 example required-services.sh[999]: active +Mar 04 15:47:12 example required-services.sh[999]: active +Mar 04 15:47:12 example required-services.sh[999]: inactive +Mar 04 15:47:12 example greenboot[768]: Script 'required-services.sh' FAILURE (exit code '3') +Mar 04 15:47:12 example systemd[1]: greenboot-healthcheck.service: Main process exited, code=exited, status=3/NOTIMPLEMENTED +Mar 04 15:47:12 example systemd[1]: greenboot-healthcheck.service: Failed with result 'exit-code'. +Mar 04 15:47:12 example systemd[1]: Failed to start greenboot Health Checks Runner. +Mar 04 15:47:12 example systemd[1]: Dependency failed for Boot Completion Check. +Mar 04 15:47:12 example systemd[1]: Dependency failed for Mark boot as successful in grubenv. +Mar 04 15:47:12 example systemd[1]: Dependency failed for Multi-User System. +Mar 04 15:47:12 example systemd[1]: multi-user.target: Job multi-user.target/start failed with result 'dependency'. +Mar 04 15:47:12 example systemd[1]: greenboot-grub2-set-success.service: Job greenboot-grub2-set-success.service/start failed with result 'dependency'. +Mar 04 15:47:12 example systemd[1]: Dependency failed for greenboot Success Scripts Runner. +Mar 04 15:47:12 example systemd[1]: greenboot-task-runner.service: Job greenboot-task-runner.service/start failed with result 'dependency'. +Mar 04 15:47:12 example systemd[1]: boot-complete.target: Job boot-complete.target/start failed with result 'dependency'. +Mar 04 15:47:12 example systemd[1]: greenboot-healthcheck.service: Triggering OnFailure= dependencies. +Mar 04 15:47:12 example systemd[1]: Starting greenboot Failure Scripts Runner... +Mar 04 15:47:12 example systemd[1]: Starting Update UTMP about System Runlevel Changes... +Mar 04 15:47:12 example greenboot[1004]: Boot Status is RED - Health Check FAILURE! +Mar 04 15:47:12 example greenboot[1004]: Running Red Scripts... +Mar 04 15:47:12 example systemd[1]: Started greenboot Failure Scripts Runner. +Mar 04 15:47:12 example systemd[1]: Starting Reboot on red boot status... +Mar 04 15:47:12 example systemd[1]: Starting greenboot MotD Generator... +Mar 04 15:47:12 example systemd[1]: Reached target Generic red boot target. +Mar 04 15:47:12 example redboot-auto-reboot[1009]: SYSTEM is UNHEALTHY, but boot_counter is unset in grubenv. Manual intervention necessary. +Mar 04 15:47:12 example systemd[1]: systemd-update-utmp-runlevel.service: Succeeded. +Mar 04 15:47:12 example systemd[1]: Started Update UTMP about System Runlevel Changes. +Mar 04 15:47:12 example systemd[1]: redboot-auto-reboot.service: Main process exited, code=exited, status=1/FAILURE +Mar 04 15:47:12 example systemd[1]: redboot-auto-reboot.service: Failed with result 'exit-code'. +Mar 04 15:47:12 example systemd[1]: Failed to start Reboot on red boot status. +Mar 04 15:47:12 example greenboot-status[1010]: Script 'required-services.sh' FAILURE (exit code '3') +Mar 04 15:47:12 example greenboot-status[1010]: Boot Status is RED - Health Check FAILURE! +Mar 04 15:47:12 example greenboot-status[1010]: SYSTEM is UNHEALTHY, but boot_counter is unset in grubenv. Manual intervention necessary. +Mar 04 15:47:12 example systemd[1]: Started greenboot MotD Generator. +""" + + +def test_greenboot_status_green(): + green = context_wrap(GREEN) + p = GreenbootStatus(green) + assert p.green + assert not p.red + + +def test_greenboot_status_red(): + red = context_wrap(RED) + p = GreenbootStatus(red) + assert p.red + assert not p.green diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index dcaeb3c54..cd8da93d6 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -202,6 +202,7 @@ class Specs(SpecSet): gluster_peer_status = RegistryPoint() gnocchi_conf = RegistryPoint(filterable=True) gnocchi_metricd_log = RegistryPoint(filterable=True) + greenboot_status = RegistryPoint(filterable=True) grub_conf = RegistryPoint() grub_config_perms = RegistryPoint() grub_efi_conf = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 7b3ea388b..1123c7add 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -434,6 +434,7 @@ def is_gcp(broker): raise SkipComponent() gcp_license_codes = simple_command("/usr/bin/curl -s curl -H Metadata-Flavor: Google http://metadata.google.internal/computeMetadata/v1/instance/licenses/?recursive=True --connect-timeout 5", deps=[is_gcp]) + greenboot_status = simple_command("/usr/libexec/greenboot/greenboot-status") grub_conf = simple_file("/boot/grub/grub.conf") grub_config_perms = simple_command("/bin/ls -l /boot/grub2/grub.cfg") # only RHEL7 and updwards grub_efi_conf = simple_file("/boot/efi/EFI/redhat/grub.conf") From 88bb4db8793e5b97881abcef7ffb6eefab474946 Mon Sep 17 00:00:00 2001 From: Ryan Blakley <3789184+ryan-blakley@users.noreply.github.com> Date: Thu, 22 Apr 2021 09:28:45 -0400 Subject: [PATCH 396/892] Fix ValueError exception in Lvm parser. (#3032) * Fix issue were leaked file descriptors messages in the output caused exceptions. * Add test for parsing with the leaked file descriptor messages. Signed-off-by: Ryan Blakley --- insights/parsers/lvm.py | 3 ++- insights/parsers/tests/test_pvs.py | 30 ++++++++++++++++++++---------- insights/parsers/tests/test_vgs.py | 30 +++++++++++++++++++----------- 3 files changed, 41 insertions(+), 22 deletions(-) diff --git a/insights/parsers/lvm.py b/insights/parsers/lvm.py index 40ed8a36f..5a4257ed4 100644 --- a/insights/parsers/lvm.py +++ b/insights/parsers/lvm.py @@ -130,7 +130,8 @@ def parse_content(self, content): if "Unrecognised field:" in content[-1]: raise ParseException(content[-1]) d = {"warnings": set(find_warnings(content))} - content = [l for l in content if l not in d["warnings"]] + content = [l for l in content + if l not in d["warnings"] and not l.startswith("File descriptor ")] d["content"] = list(map_keys(parse_keypair_lines(content), self.KEYS)) self.data = d if d else None diff --git a/insights/parsers/tests/test_pvs.py b/insights/parsers/tests/test_pvs.py index f6be5e895..f70c5b81d 100644 --- a/insights/parsers/tests/test_pvs.py +++ b/insights/parsers/tests/test_pvs.py @@ -3,6 +3,8 @@ from insights.tests import context_wrap from .test_lvm import compare_partial_dicts +FD_LEAK_HEADER = "File descriptor 5 (/dev/null) leaked on invocation. Parent PID 99999: timeout\n" + PVS_INFO = """ WARNING: Locking disabled. Be careful! This could corrupt your metadata. LVM2_PV_FMT=''|LVM2_PV_UUID=''|LVM2_DEV_SIZE='500.00m'|LVM2_PV_NAME='/dev/sda1'|LVM2_PV_MDA_FREE='0 '|LVM2_PV_MDA_SIZE='0 '|LVM2_PE_START='0 '|LVM2_PV_SIZE='0 '|LVM2_PV_FREE='0 '|LVM2_PV_USED='0 '|LVM2_PV_ATTR='---'|LVM2_PV_ALLOCATABLE=''|LVM2_PV_EXPORTED=''|LVM2_PV_MISSING=''|LVM2_PV_PE_COUNT='0'|LVM2_PV_PE_ALLOC_COUNT='0'|LVM2_PV_TAGS=''|LVM2_PV_MDA_COUNT='0'|LVM2_PV_MDA_USED_COUNT='0'|LVM2_PV_BA_START='0 '|LVM2_PV_BA_SIZE='0 '|LVM2_PV_MAJOR='253'|LVM2_PV_MINOR='1' @@ -125,12 +127,15 @@ def test_pvs(): - pvs_records = Pvs(context_wrap(PVS_INFO)) - assert len(list(pvs_records)) == 2 - for k, v in PVS_SDA2_INFO.items(): - assert pvs_records.data["content"][1][k] == v - assert pvs_records["/dev/sda1"]["Attr"] == "---" - assert pvs_records.data["content"][0]['LVM2_PV_MINOR'] == '1' + def check(pvs_records): + assert len(list(pvs_records)) == 2 + for k, v in PVS_SDA2_INFO.items(): + assert pvs_records.data["content"][1][k] == v + assert pvs_records["/dev/sda1"]["Attr"] == "---" + assert pvs_records.data["content"][0]['LVM2_PV_MINOR'] == '1' + + check(Pvs(context_wrap(PVS_INFO))) + check(Pvs(context_wrap(FD_LEAK_HEADER + PVS_INFO))) pvs_records = Pvs(context_wrap(PVS_INFO_LONG)) assert len(list(pvs_records)) == 31 @@ -173,11 +178,16 @@ def test_pvs_dup(): def test_pvs_headings(): + def check(pvs_records): + assert len(pvs_records.data) == 9 + for k, v in PVS_HEADINGS_6.items(): + assert pvs_records[6][k] == v + assert pvs_records[6]['Missing'] is None + pvs_records = PvsHeadings(context_wrap(PVS_HEADINGS)) - assert len(pvs_records.data) == 9 - for k, v in PVS_HEADINGS_6.items(): - assert pvs_records[6][k] == v - assert pvs_records[6]['Missing'] is None + check(pvs_records) + pvs_records = PvsHeadings(context_wrap(FD_LEAK_HEADER + PVS_HEADINGS)) + check(pvs_records) # Test vg method fedora_pvs = pvs_records.vg('fedora') diff --git a/insights/parsers/tests/test_vgs.py b/insights/parsers/tests/test_vgs.py index 96cc1be38..07d2f5509 100644 --- a/insights/parsers/tests/test_vgs.py +++ b/insights/parsers/tests/test_vgs.py @@ -1,6 +1,8 @@ from insights.parsers.lvm import Vgs, VgsHeadings from insights.tests import context_wrap +FD_LEAK_HEADER = "File descriptor 5 (/dev/null) leaked on invocation. Parent PID 99999: timeout\n" + VGS_INFO = """ LVM2_VG_FMT='lvm2'|LVM2_VG_UUID='YCpusB-LEly-THGL-YXhC-t3q6-mUQV-wyFZrx'|LVM2_VG_NAME='rhel'|LVM2_VG_ATTR='wz--n-'|LVM2_VG_PERMISSIONS='writeable'|LVM2_VG_EXTENDABLE='extendable'|LVM2_VG_EXPORTED=''|LVM2_VG_PARTIAL=''|LVM2_VG_ALLOCATION_POLICY='normal'|LVM2_VG_CLUSTERED=''|LVM2_VG_SIZE='476.45g'|LVM2_VG_FREE='4.00m'|LVM2_VG_SYSID=''|LVM2_VG_SYSTEMID=''|LVM2_VG_LOCKTYPE=''|LVM2_VG_LOCKARGS=''|LVM2_VG_EXTENT_SIZE='4.00m'|LVM2_VG_EXTENT_COUNT='121971'|LVM2_VG_FREE_COUNT='1'|LVM2_MAX_LV='0'|LVM2_MAX_PV='0'|LVM2_PV_COUNT='1'|LVM2_LV_COUNT='3'|LVM2_SNAP_COUNT='0'|LVM2_VG_SEQNO='4'|LVM2_VG_TAGS=''|LVM2_VG_PROFILE=''|LVM2_VG_MDA_COUNT='1'|LVM2_VG_MDA_USED_COUNT='1'|LVM2_VG_MDA_FREE='0 '|LVM2_VG_MDA_SIZE='1020.00k'|LVM2_VG_MDA_COPIES='unmanaged' LVM2_VG_FMT='lvm2'|LVM2_VG_UUID='123456-LEly-THGL-YXhC-t3q6-mUQV-123456'|LVM2_VG_NAME='fedora'|LVM2_VG_ATTR='wz--n-'|LVM2_VG_PERMISSIONS='writeable'|LVM2_VG_EXTENDABLE='extendable'|LVM2_VG_EXPORTED=''|LVM2_VG_PARTIAL=''|LVM2_VG_ALLOCATION_POLICY='normal'|LVM2_VG_CLUSTERED=''|LVM2_VG_SIZE='476.45g'|LVM2_VG_FREE='4.00m'|LVM2_VG_SYSID=''|LVM2_VG_SYSTEMID=''|LVM2_VG_LOCKTYPE=''|LVM2_VG_LOCKARGS=''|LVM2_VG_EXTENT_SIZE='4.00m'|LVM2_VG_EXTENT_COUNT='121971'|LVM2_VG_FREE_COUNT='1'|LVM2_MAX_LV='0'|LVM2_MAX_PV='0'|LVM2_PV_COUNT='1'|LVM2_LV_COUNT='3'|LVM2_SNAP_COUNT='0'|LVM2_VG_SEQNO='4'|LVM2_VG_TAGS=''|LVM2_VG_PROFILE=''|LVM2_VG_MDA_COUNT='1'|LVM2_VG_MDA_USED_COUNT='1'|LVM2_VG_MDA_FREE='0 '|LVM2_VG_MDA_SIZE='1020.00k'|LVM2_VG_MDA_COPIES='unmanaged' @@ -76,17 +78,23 @@ def test_vgs(): - vgs_records = Vgs(context_wrap(VGS_INFO)) - assert len(list(vgs_records)) == 2 - for k, v in VGS_INFO_FEDORA.items(): - assert vgs_records["fedora"][k] == v - assert vgs_records["fedora"]['LVM2_VG_SEQNO'] == '4' + def check(vgs_records): + assert len(list(vgs_records)) == 2 + for k, v in VGS_INFO_FEDORA.items(): + assert vgs_records["fedora"][k] == v + assert vgs_records["fedora"]['LVM2_VG_SEQNO'] == '4' + + check(Vgs(context_wrap(VGS_INFO))) + check(Vgs(context_wrap(FD_LEAK_HEADER + VGS_INFO))) def test_vgs_headers(): - vgs_info = VgsHeadings(context_wrap(VGS_HEADER_INFO)) - assert vgs_info is not None - assert len(vgs_info.data) == 6 - for k, v in VGS_HEADER_5.items(): - assert vgs_info[5][k] == v - assert vgs_info[5]['VPerms'] is None + def check(vgs_info): + assert vgs_info is not None + assert len(vgs_info.data) == 6 + for k, v in VGS_HEADER_5.items(): + assert vgs_info[5][k] == v + assert vgs_info[5]['VPerms'] is None + + check(VgsHeadings(context_wrap(VGS_HEADER_INFO))) + check(VgsHeadings(context_wrap(FD_LEAK_HEADER + VGS_HEADER_INFO))) From d300b53557a22a348124b6273265f4e5dc2d98a6 Mon Sep 17 00:00:00 2001 From: Stephen Adams Date: Thu, 22 Apr 2021 10:59:33 -0400 Subject: [PATCH 397/892] update uploader_json_map.json Signed-off-by: Stephen Adams --- insights/client/uploader_json_map.json | 91 ++++++++------------------ 1 file changed, 29 insertions(+), 62 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index 888bb3c58..b1d2b99fc 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -166,8 +166,6 @@ "Device is ineligible for IOMMU domain attach due to platform RMRR requirement", "Dropping TSO", "EDAC ", - "EFI", - "Efi", "Emulex OneConnect OCe10100, FCoE Initiator", "FEATURE IBPB_SUPPORT", "FEATURE SPEC_CTRL", @@ -195,7 +193,6 @@ "blocked FC remote port time out: removing target and saving binding", "crashkernel=auto resulted in zero bytes of reserved memory", "e1000: E1000 MODULE IS NOT SUPPORTED", - "efi", "fw=8.08.", "l1tf", "mce: ", @@ -339,6 +336,11 @@ "pattern": [], "symbolic_name": "firewall_cmd_list_all_zones" }, + { + "command": "python -m insights.tools.cat --no-header gcp_license_codes", + "pattern": [], + "symbolic_name": "gcp_license_codes" + }, { "command": "/usr/bin/getconf PAGE_SIZE", "pattern": [], @@ -693,7 +695,11 @@ }, { "command": "/usr/bin/lsinitrd", - "pattern": [], + "pattern": [ + "=====", + "Image:", + "lvm" + ], "symbolic_name": "lsinitrd" }, { @@ -1116,6 +1122,7 @@ "neutron-dhcp-ag", "neutron-l3-agen", "neutron-server", + "nfsd", "nginx", "nova-compute", "nova-conductor", @@ -1133,11 +1140,11 @@ "pkla-check-auth", "pmcd", "pmie", - "puppetserver", "radosgw", "redis-server", "rngd", "sap", + "smbd", "snmpd", "spausedd", "swift-proxy-ser", @@ -1185,6 +1192,7 @@ "neutron-dhcp-ag", "neutron-l3-agen", "neutron-server", + "nfsd", "nginx", "nova-compute", "nova-conductor", @@ -1204,13 +1212,12 @@ "pkla-check-auth", "pmcd", "pmie", - "postgres", "ptp4l", - "puppetserver", "radosgw", "redis-server", "rngd", "sap", + "smbd", "snmpd", "spausedd", "swift-proxy-ser", @@ -1237,7 +1244,6 @@ "auditd", "bash", "catalina.base", - "ceilometer-coll", "ceilometer-poll", "chronyd", "cinder-volume", @@ -1284,7 +1290,6 @@ "pmcd", "pmie", "postgres", - "puppetserver", "radosgw", "redis-server", "rngd", @@ -1331,6 +1336,7 @@ "neutron-l3-agen", "neutron-ns-metadata-proxy", "neutron-server", + "nfsd", "nginx", "nginx: master process", "nginx: worker process", @@ -1350,11 +1356,11 @@ "pkla-check-auth", "pmcd", "pmie", - "puppetserver", "radosgw", "redis-server", "rngd", "sap", + "smbd", "snmpd", "spausedd", "swift-proxy-ser", @@ -1926,8 +1932,7 @@ "file": "/var/log/cinder/cinder-api.log", "pattern": [ "DEBUG oslo.messaging._drivers.impl_rabbit", - "Timed out waiting for RPC response", - "Too many connections" + "Timed out waiting for RPC response" ], "symbolic_name": "cinder_api_log" }, @@ -1935,8 +1940,7 @@ "file": "/var/log/containers/cinder/cinder-api.log", "pattern": [ "DEBUG oslo.messaging._drivers.impl_rabbit", - "Timed out waiting for RPC response", - "Too many connections" + "Timed out waiting for RPC response" ], "symbolic_name": "cinder_api_log" }, @@ -1949,10 +1953,7 @@ "file": "/var/log/containers/cinder/cinder-volume.log", "pattern": [ "DEBUG oslo.messaging._drivers.impl_rabbit", - "Image cloning unsuccessful for image", - "Message: NFS file could not be discovered.", - "Timed out waiting for RPC response", - "[Errno 24] Too many open files" + "Timed out waiting for RPC response" ], "symbolic_name": "cinder_volume_log" }, @@ -1960,10 +1961,7 @@ "file": "/var/log/containers/cinder/volume.log", "pattern": [ "DEBUG oslo.messaging._drivers.impl_rabbit", - "Image cloning unsuccessful for image", - "Message: NFS file could not be discovered.", - "Timed out waiting for RPC response", - "[Errno 24] Too many open files" + "Timed out waiting for RPC response" ], "symbolic_name": "cinder_volume_log" }, @@ -1971,10 +1969,7 @@ "file": "/var/log/cinder/volume.log", "pattern": [ "DEBUG oslo.messaging._drivers.impl_rabbit", - "Image cloning unsuccessful for image", - "Message: NFS file could not be discovered.", - "Timed out waiting for RPC response", - "[Errno 24] Too many open files" + "Timed out waiting for RPC response" ], "symbolic_name": "cinder_volume_log" }, @@ -2101,8 +2096,6 @@ "Amazon EC2", "CVE-2017-1000364", "CVE-2018-14634", - "EFI", - "Efi", "FEATURE IBPB_SUPPORT", "FEATURE SPEC_CTRL", "Kernel page table isolation", @@ -2114,7 +2107,6 @@ "PM: hibernation exit", "Secure boot enabled", "__cpufreq_add_dev", - "efi", "hv_vmbus: probe failed for device", "l1tf", "x86/pti" @@ -2711,7 +2703,6 @@ { "file": "/var/log/mysql/mysqld.log", "pattern": [ - "OpenSSL internal error, assertion failed: Digest MD5 forbidden in FIPS mode!", "SSL error", "Too many open files", "[ERROR]", @@ -2722,7 +2713,6 @@ { "file": "/var/log/mysql.log", "pattern": [ - "OpenSSL internal error, assertion failed: Digest MD5 forbidden in FIPS mode!", "SSL error", "Too many open files", "[ERROR]", @@ -2770,9 +2760,7 @@ "file": "/var/log/nova/nova-api.log", "pattern": [ "DEBUG oslo.messaging._drivers.impl_rabbit", - "Timed out waiting for RPC response", - "Timed out waiting for a reply to message ID", - "Too many connections" + "Timed out waiting for RPC response" ], "symbolic_name": "nova_api_log" }, @@ -2780,9 +2768,7 @@ "file": "/var/log/containers/nova/nova-api.log", "pattern": [ "DEBUG oslo.messaging._drivers.impl_rabbit", - "Timed out waiting for RPC response", - "Timed out waiting for a reply to message ID", - "Too many connections" + "Timed out waiting for RPC response" ], "symbolic_name": "nova_api_log" }, @@ -2808,7 +2794,6 @@ "INFO os_brick.initiator.linuxscsi", "Instance shutdown by itself. Calling the stop API.", "Live Migration failure: internal error: process exited while connecting to monitor", - "Live Migration failure: operation failed: Failed to connect to remote libvirt URI qemu+ssh", "Migration pre-check error: Unable to migrate", "No such device or address", "Resuming guest", @@ -2816,16 +2801,13 @@ "Successfully plugged vif VIFBridge", "Timed out waiting for RPC response", "cmt=off: Property '.cmt' not found", - "does not match source", "error: Failed to start domain", "from mountpoint /dev", "is not active", "is not on shared storage", "libvirt-guests.sh", "libvirtError: Requested operation is not valid: domain has assigned non-USB host devices", - "libvirtError: Unable to delete file /var/lib/nova/instances/", - "unsupported configuration: Target network card MTU", - "unsupported configuration: Unable to find security driver for model selinux" + "libvirtError: Unable to delete file /var/lib/nova/instances/" ], "symbolic_name": "nova_compute_log" }, @@ -2851,7 +2833,6 @@ "INFO os_brick.initiator.linuxscsi", "Instance shutdown by itself. Calling the stop API.", "Live Migration failure: internal error: process exited while connecting to monitor", - "Live Migration failure: operation failed: Failed to connect to remote libvirt URI qemu+ssh", "Migration pre-check error: Unable to migrate", "No such device or address", "Resuming guest", @@ -2859,16 +2840,13 @@ "Successfully plugged vif VIFBridge", "Timed out waiting for RPC response", "cmt=off: Property '.cmt' not found", - "does not match source", "error: Failed to start domain", "from mountpoint /dev", "is not active", "is not on shared storage", "libvirt-guests.sh", "libvirtError: Requested operation is not valid: domain has assigned non-USB host devices", - "libvirtError: Unable to delete file /var/lib/nova/instances/", - "unsupported configuration: Target network card MTU", - "unsupported configuration: Unable to find security driver for model selinux" + "libvirtError: Unable to delete file /var/lib/nova/instances/" ], "symbolic_name": "nova_compute_log" }, @@ -3177,8 +3155,6 @@ "FATAL", "checkpoints are occurring too frequently", "connection limit exceeded for non-superusers", - "database is not accepting commands to avoid wraparound data loss in database", - "must be vacuumed within", "remaining connection slots are reserved for non-replication superuser connections" ], "symbolic_name": "postgresql_log" @@ -3476,7 +3452,6 @@ "MAXAUTHTRIES", "MAXSTARTUPS", "Macs", - "Match", "MaxAuthTries", "MaxStartups", "Maxauthtries", @@ -3484,7 +3459,6 @@ "PERMITEMPTYPASSWORDS", "PERMITROOTLOGIN", "PROTOCOL", - "PasswordAuthentication", "PermitEmptyPasswords", "PermitRootLogin", "Permitemptypasswords", @@ -3920,7 +3894,6 @@ "allow_automatic_dhcp_failover", "api_workers", "debug", - "dhcp_agents_per_network", "ipam_driver", "router_distributed", "rpc_workers", @@ -3937,7 +3910,6 @@ "allow_automatic_dhcp_failover", "api_workers", "debug", - "dhcp_agents_per_network", "ipam_driver", "router_distributed", "rpc_workers", @@ -4044,16 +4016,14 @@ { "file": "/etc/neutron/metadata_agent.ini", "pattern": [ - "[", - "auth_url" + "[" ], "symbolic_name": "neutron_metadata_agent_ini" }, { "file": "/var/lib/config-data/puppet-generated/neutron/etc/neutron/metadata_agent.ini", "pattern": [ - "[", - "auth_url" + "[" ], "symbolic_name": "neutron_metadata_agent_ini" }, @@ -4276,7 +4246,6 @@ "glob": "/var/opt/rh/rh-mysql*/log/mysql/mysqld.log", "symbolic_name": "mysql_log", "pattern": [ - "OpenSSL internal error, assertion failed: Digest MD5 forbidden in FIPS mode!", "SSL error", "Too many open files", "[ERROR]", @@ -4295,8 +4264,6 @@ "FATAL", "checkpoints are occurring too frequently", "connection limit exceeded for non-superusers", - "database is not accepting commands to avoid wraparound data loss in database", - "must be vacuumed within", "remaining connection slots are reserved for non-replication superuser connections" ] }, @@ -4363,5 +4330,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-03-31T16:03:45.694442" -} + "version": "2021-04-15T15:33:43.735037" +} \ No newline at end of file From 64fe3be5019760a04e9b07b565754da54cd1cf1c Mon Sep 17 00:00:00 2001 From: Sheng Lao <39508521+shlao@users.noreply.github.com> Date: Tue, 27 Apr 2021 11:27:38 +0800 Subject: [PATCH 398/892] Disable the spec Lsinitrd (#3036) Signed-off-by: shlao --- insights/specs/default.py | 1 - insights/specs/insights_archive.py | 1 - insights/tests/client/collection_rules/test_map_components.py | 3 ++- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 1123c7add..585a24de8 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -568,7 +568,6 @@ def httpd_cmd(broker): lsblk = simple_command("/bin/lsblk") lsblk_pairs = simple_command("/bin/lsblk -P -o NAME,KNAME,MAJ:MIN,FSTYPE,MOUNTPOINT,LABEL,UUID,RA,RO,RM,MODEL,SIZE,STATE,OWNER,GROUP,MODE,ALIGNMENT,MIN-IO,OPT-IO,PHY-SEC,LOG-SEC,ROTA,SCHED,RQ-SIZE,TYPE,DISC-ALN,DISC-GRAN,DISC-MAX,DISC-ZERO") lscpu = simple_command("/usr/bin/lscpu") - lsinitrd = simple_command("/usr/bin/lsinitrd") lsmod = simple_command("/sbin/lsmod") lsof = simple_command("/usr/sbin/lsof") lspci = simple_command("/sbin/lspci -k") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index f173f794e..3bc71ba7a 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -108,7 +108,6 @@ class InsightsArchiveSpecs(Specs): ls_etc = simple_file("insights_commands/ls_-lan_.etc_.etc.cloud.cloud.cfg.d_.etc.nova.migration_.etc.pki.ovirt-vmconsole_.etc.pki.tls.certs_.etc.pki.tls.private_.etc.rc.d.init.d_.etc.sysconfig") ls_ipa_idoverride_memberof = simple_file("insights_commands/ls_-lan_.usr.share.ipa.ui.js.plugins.idoverride-memberof") ls_lib_firmware = simple_file("insights_commands/ls_-lanR_.lib.firmware") - lsinitrd = simple_file("insights_commands/lsinitrd") ls_ocp_cni_openshift_sdn = simple_file("insights_commands/ls_-l_.var.lib.cni.networks.openshift-sdn") ls_origin_local_volumes_pods = simple_file("insights_commands/ls_-l_.var.lib.origin.openshift.local.volumes.pods") ls_osroot = simple_file("insights_commands/ls_-lan") diff --git a/insights/tests/client/collection_rules/test_map_components.py b/insights/tests/client/collection_rules/test_map_components.py index 4d58b2a54..600473a90 100644 --- a/insights/tests/client/collection_rules/test_map_components.py +++ b/insights/tests/client/collection_rules/test_map_components.py @@ -101,7 +101,8 @@ def test_get_component_by_symbolic_name(): 'sched_rt_runtime_us', 'libvirtd_qemu_log', 'mlx4_port', - 'qpid_stat_g' + 'qpid_stat_g', + 'lsinitrd' ] # first, make sure our list is proper and one of these From 0c05b0995e177622d8b63d8507b2da8d4f73fd9a Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Wed, 28 Apr 2021 04:18:12 +0800 Subject: [PATCH 399/892] Update parser MongodbConf to support another dbPath format (#3039) Signed-off-by: Huanhuan Li --- insights/parsers/mongod_conf.py | 2 +- insights/parsers/tests/test_mongod_conf.py | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/insights/parsers/mongod_conf.py b/insights/parsers/mongod_conf.py index fc08458e8..79a1276fc 100644 --- a/insights/parsers/mongod_conf.py +++ b/insights/parsers/mongod_conf.py @@ -147,7 +147,7 @@ def dbpath(self): if a key-value pair conf. """ if self.is_yaml: - return self.get('storage', {}).get('dbPath') + return self.get('storage', {}).get('dbPath') or self.get('storage.dbPath') else: return self.get('dbpath') diff --git a/insights/parsers/tests/test_mongod_conf.py b/insights/parsers/tests/test_mongod_conf.py index e42419e58..0b715cd00 100644 --- a/insights/parsers/tests/test_mongod_conf.py +++ b/insights/parsers/tests/test_mongod_conf.py @@ -90,6 +90,11 @@ """.strip() +YAML_CONF_FORMAT_2 = """ +storage.dbPath: /var/lib/mongodb +storage.journal.enabled: true +""" + def test_mongodb_conf(): @@ -119,6 +124,10 @@ def test_mongodb_conf(): assert result.get("abc") == '' assert result.get("def") is None + result = MongodbConf(context_wrap(YAML_CONF_FORMAT_2)) + assert result.is_yaml + assert result.dbpath == '/var/lib/mongodb' + result = MongodbConf(context_wrap(NORMAL_CONF_V1)) assert result.is_yaml is False assert len(result.data) == 2 From 1e5a991fdea9797f3fd99496a1d9d5e69f67b3b1 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Tue, 27 Apr 2021 15:31:01 -0500 Subject: [PATCH 400/892] ocp and ocpshell supports yaml files with multiple docs (#3037) Signed-off-by: Christopher Sams --- insights/ocp.py | 9 +++++---- insights/ocpshell.py | 7 ++++++- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/insights/ocp.py b/insights/ocp.py index 535695313..82a924cd9 100644 --- a/insights/ocp.py +++ b/insights/ocp.py @@ -36,8 +36,8 @@ def _get_files(path): def _load(path): with open(path) as f: - doc = yaml.load(f, Loader=Loader) - return from_dict(doc, src=path) + for doc in yaml.load_all(f, Loader=Loader): + yield from_dict(doc, src=path) def _process(path, excludes=None): @@ -46,7 +46,8 @@ def _process(path, excludes=None): if excludes and any(fnmatch(f, e) for e in excludes): continue try: - yield _load(f) + for d in _load(f): + yield d except Exception: log.debug("Failed to load %s; skipping.", f) @@ -58,7 +59,7 @@ def analyze(paths, excludes=None): results = [] for path in paths: if content_type.from_file(path) == "text/plain": - results.append(_load(path)) + results.extend(_load(path)) elif os.path.isdir(path): results.extend(_process(path, excludes)) else: diff --git a/insights/ocpshell.py b/insights/ocpshell.py index 4fbd2269d..b8702b0a1 100755 --- a/insights/ocpshell.py +++ b/insights/ocpshell.py @@ -65,10 +65,15 @@ def main(): import IPython from traitlets.config.loader import Config + ns = dict(locals()) + ns["analyze"] = analyze + ns["ALL"] = None + ns["ANY"] = None + IPython.core.completer.Completer.use_jedi = False c = Config() c.TerminalInteractiveShell.banner1 = banner - IPython.start_ipython([], user_ns=locals(), config=c) + IPython.start_ipython([], user_ns=ns, config=c) if __name__ == "__main__": From 7263162a26eb01ef0c77baca42a0078feb81fda2 Mon Sep 17 00:00:00 2001 From: Ryan Blakley <3789184+ryan-blakley@users.noreply.github.com> Date: Tue, 27 Apr 2021 16:33:59 -0400 Subject: [PATCH 401/892] Update spec pvs, vgs, lvs, and vgdisplay (#3035) * The file outputted by the pvs, vgs, lvs and vgdisplay commands changed in sos v3.6 and again in v4.0, so I added the new filenames to the specs. Signed-off-by: Ryan Blakley --- insights/specs/sos_archive.py | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index 6b22cd36d..0010928bb 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -137,7 +137,13 @@ class SosSpecs(Specs): ]) lsscsi = simple_file("sos_commands/scsi/lsscsi") ls_dev = first_file(["sos_commands/block/ls_-lanR_.dev", "sos_commands/devicemapper/ls_-lanR_.dev"]) - lvs = first_file(["sos_commands/lvm2/lvs_-a_-o_lv_tags_devices_--config_global_locking_type_0", "sos_commands/lvm2/lvs_-a_-o_devices"]) + lvs = first_file([ + "sos_commands/lvm2/lvs_-a_-o_lv_tags_devices_lv_kernel_read_ahead_lv_read_ahead_stripes_stripesize_--config_global_metadata_read_only_1_--nolocking_--foreign", + "sos_commands/lvm2/lvs_-a_-o_lv_tags_devices_lv_kernel_read_ahead_lv_read_ahead_stripes_stripesize_--config_global_locking_type_0_metadata_read_only_1", + "sos_commands/lvm2/lvs_-a_-o_lv_tags_devices_--config_global_locking_type_0", + "sos_commands/lvm2/lvs_-a_-o_devices", + "sos_commands/devicemapper/lvs_-a_-o__devices" + ]) manila_conf = first_file(["/var/lib/config-data/puppet-generated/manila/etc/manila/manila.conf", "/etc/manila/manila.conf"]) mdadm_E = glob_file("sos_commands/md/mdadm_-E_*") mistral_executor_log = simple_file("/var/log/mistral/executor.log") @@ -195,7 +201,13 @@ class SosSpecs(Specs): "/etc/puppetlabs/puppet/ssl/certs/ca.pem", "sos_commands/foreman/foreman-debug/var/lib/puppet/ssl/certs/ca.pem" ]) - pvs = first_file(["sos_commands/lvm2/pvs_-a_-v_-o_pv_mda_free_pv_mda_size_pv_mda_count_pv_mda_used_count_pe_start_--config_global_locking_type_0", "sos_commands/lvm2/pvs_-a_-v", "sos_commands/devicemapper/pvs_-a_-v"]) + pvs = first_file([ + "sos_commands/lvm2/pvs_-a_-v_-o_pv_mda_free_pv_mda_size_pv_mda_count_pv_mda_used_count_pe_start_--config_global_metadata_read_only_1_--nolocking_--foreign", + "sos_commands/lvm2/pvs_-a_-v_-o_pv_mda_free_pv_mda_size_pv_mda_count_pv_mda_used_count_pe_start_--config_global_locking_type_0_metadata_read_only_1", + "sos_commands/lvm2/pvs_-a_-v_-o_pv_mda_free_pv_mda_size_pv_mda_count_pv_mda_used_count_pe_start_--config_global_locking_type_0", + "sos_commands/lvm2/pvs_-a_-v", + "sos_commands/devicemapper/pvs_-a_-v" + ]) qpid_stat_q = first_file([ "sos_commands/pulp/qpid-stat_-q_--ssl-certificate_.etc.pki.pulp.qpid.client.crt_-b_amqps_..localhost_5671", "sos_commands/pulp/qpid-stat_-q_--ssl-certificate_.etc.pki.katello.qpid_client_striped.crt_-b_amqps_..localhost_5671", @@ -285,8 +297,20 @@ class SosSpecs(Specs): vdsm_conf = simple_file("etc/vdsm/vdsm.conf") vdsm_id = simple_file("etc/vdsm/vdsm.id") vdsm_import_log = glob_file("var/log/vdsm/import/import-*.log") - vgdisplay = first_file(["sos_commands/lvm2/vgdisplay_-vv_--config_global_locking_type_0", "sos_commands/lvm2/vgdisplay_-vv"]) - vgs = first_file(["sos_commands/lvm2/vgs_-v_-o_vg_mda_count_vg_mda_free_vg_mda_size_vg_mda_used_count_vg_tags_--config_global_locking_type_0", "sos_commands/lvm2/vgs_-v", "sos_commands/devicemapper/vgs_-v"]) + vgdisplay = first_file([ + "sos_commands/lvm2/vgdisplay_-vv_--config_global_metadata_read_only_1_--nolocking_--foreign", + "sos_commands/lvm2/vgdisplay_-vv_--config_global_locking_type_0_metadata_read_only_1", + "sos_commands/lvm2/vgdisplay_-vv_--config_global_locking_type_0", + "sos_commands/lvm2/vgdisplay_-vv", + "sos_commands/devicemapper/vgdisplay_-vv" + ]) + vgs = first_file([ + "sos_commands/lvm2/vgs_-v_-o_vg_mda_count_vg_mda_free_vg_mda_size_vg_mda_used_count_vg_tags_systemid_--config_global_metadata_read_only_1_--nolocking_--foreign", + "sos_commands/lvm2/vgs_-v_-o_vg_mda_count_vg_mda_free_vg_mda_size_vg_mda_used_count_vg_tags_--config_global_locking_type_0_metadata_read_only_1", + "sos_commands/lvm2/vgs_-v_-o_vg_mda_count_vg_mda_free_vg_mda_size_vg_mda_used_count_vg_tags_--config_global_locking_type_0", + "sos_commands/lvm2/vgs_-v", + "sos_commands/devicemapper/vgs_-v" + ]) virsh_list_all = simple_file("sos_commands/virsh/virsh_-r_list_--all") vmcore_dmesg = glob_file("/var/crash/*/vmcore-dmesg.txt") vmware_tools_conf = simple_file("etc/vmware-tools/tools.conf") From a8da9a1674e140f0484ec0e7aab18d3e1d20fe11 Mon Sep 17 00:00:00 2001 From: Rahul Srivastava <44598880+rasrivas-redhat@users.noreply.github.com> Date: Wed, 28 Apr 2021 14:19:03 +0530 Subject: [PATCH 402/892] New parser pmrep command (#3033) * New parser pmrep command Signed-off-by: rasrivas * updated the test codes Signed-off-by: rasrivas * updated the doc test code Signed-off-by: rasrivas * updated code as per the output of the output of the csv command Signed-off-by: rasrivas * removed unwanted line Signed-off-by: rasrivas * updated the parser code and test cases Signed-off-by: rasrivas * updated the logic Signed-off-by: rasrivas * update command in the insights_archive file Signed-off-by: rasrivas * fixed the pipeline fail by fixing the code to sort the test case Signed-off-by: rasrivas * updated the test case code Signed-off-by: rasrivas * fixed the pipeline error Signed-off-by: rasrivas * added a search method Signed-off-by: rasrivas * updated the code for the DictReader method Signed-off-by: rasrivas * updated keyword_search method and updated the search method Signed-off-by: rasrivas * fixed the test cases code for sorting the list Signed-off-by: rasrivas --- docs/shared_parsers_catalog/pmrep.rst | 3 ++ insights/parsers/__init__.py | 1 + insights/parsers/pmrep.py | 61 ++++++++++++++++++++++++ insights/parsers/tests/test_pmrep.py | 68 +++++++++++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 7 files changed, 136 insertions(+) create mode 100644 docs/shared_parsers_catalog/pmrep.rst create mode 100644 insights/parsers/pmrep.py create mode 100644 insights/parsers/tests/test_pmrep.py diff --git a/docs/shared_parsers_catalog/pmrep.rst b/docs/shared_parsers_catalog/pmrep.rst new file mode 100644 index 000000000..df5b40aa1 --- /dev/null +++ b/docs/shared_parsers_catalog/pmrep.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.pmrep + :members: + :show-inheritance: diff --git a/insights/parsers/__init__.py b/insights/parsers/__init__.py index 77aa03ed9..d490451ae 100644 --- a/insights/parsers/__init__.py +++ b/insights/parsers/__init__.py @@ -513,6 +513,7 @@ def keyword_search(rows, **kwargs): 'default': lambda s, v: s == v, 'contains': lambda s, v: s is not None and v in s, 'startswith': lambda s, v: s is not None and s.startswith(v), + 'endswith': lambda s, v: s is not None and s.endswith(v), 'lower_value': lambda s, v: None not in (s, v) and s.lower() == v.lower(), } diff --git a/insights/parsers/pmrep.py b/insights/parsers/pmrep.py new file mode 100644 index 000000000..29453b9ef --- /dev/null +++ b/insights/parsers/pmrep.py @@ -0,0 +1,61 @@ +""" +Pmrep - command ``pmrep -t 1s -T 1s network.interface.out.packets network.interface.collisions swap.pagesout -o csv`` +===================================================================================================================== + +Parse the content of the ``pmrep -t 1s -T 1s network.interface.out.packets network.interface.collisions swap.pagesout -o csv`` command. + +Sample ``pmrep -t 1s -T 1s network.interface.out.packets network.interface.collisions swap.pagesout -o csv`` command output:: + + Time,"network.interface.out.packets-lo","network.interface.out.packets-eth0","network.interface.collisions-lo","network.interface.collisions-eth0","swap.pagesout" + 2021-04-26 05:42:24,,,,, + 2021-04-26 05:42:25,1.000,2.000,3.000,4.000,5.000 + +Examples: + >>> type(pmrep_doc_obj) + + >>> pmrep_doc_obj = sorted(pmrep_doc_obj, key=lambda x: x['name']) + >>> pmrep_doc_obj[1] + {'name': 'network.interface.collisions-eth0', 'value': '4.000'} + >>> pmrep_doc_obj[4] + {'name': 'network.interface.out.packets-lo', 'value': '1.000'} + >>> pmrep_doc_obj[5] + {'name': 'swap.pagesout', 'value': '5.000'} +""" + +from csv import DictReader +from insights import parser, CommandParser +from insights.specs import Specs +from insights.parsers import SkipException, ParseException, keyword_search + + +@parser(Specs.pmrep_metrics) +class PMREPMetrics(CommandParser, list): + """Parses output of ``pmrep -t 1s -T 1s network.interface.out.packets network.interface.collisions swap.pagesout -o csv`` command.""" + def parse_content(self, content): + if not content or len(content) == 1: + raise SkipException("There is no data in the table") + try: + reader = DictReader(content) + except Exception: + raise ParseException("The content isn't in csv format") + for k, v in dict(list(reader)[-1]).items(): + self.append(dict(name=k, value=v)) + + def search(self, **kwargs): + """ + Get the rows by searching the table with kwargs. + This uses the :py:func:`insights.parsers.keyword_search` function for + searching; see its documentation for usage details. If no search + parameters are given, no rows are returned. + + Returns: + list: A list of dictionaries of rows that match the given + search criteria. + + Examples: + >>> pmrep_doc_obj.search(name__endswith='lo') + [{'name': 'network.interface.out.packets-lo', 'value': '1.000'}, {'name': 'network.interface.collisions-lo', 'value': '3.000'}] + >>> pmrep_doc_obj.search(name__endswith='swap.pagesout') + [{'name': 'swap.pagesout', 'value': '5.000'}] + """ + return keyword_search(self, **kwargs) diff --git a/insights/parsers/tests/test_pmrep.py b/insights/parsers/tests/test_pmrep.py new file mode 100644 index 000000000..d942bc158 --- /dev/null +++ b/insights/parsers/tests/test_pmrep.py @@ -0,0 +1,68 @@ +import pytest +import doctest +from insights.tests import context_wrap +from insights.parsers import SkipException +from insights.parsers import pmrep +from insights.parsers.pmrep import PMREPMetrics + +PMREPMETRIC_DATA = """ +Time,"network.interface.out.packets-lo","network.interface.out.packets-eth0","network.interface.collisions-lo","network.interface.collisions-eth0","swap.pagesout" +2021-04-26 05:42:24,,,,, +2021-04-26 05:42:25,1.000,2.000,3.000,4.000,5.000 +""".strip() + +PMREPMETRIC_DATA_2 = """ +Time,"network.interface.out.packets-lo","network.interface.collisions-lo","swap.pagesout" +2021-04-26 05:42:24,,, +2021-04-26 05:42:25,1.000,2.000,3.000 +""".strip() + +PMREPMETRIC_WRONG_DATA = """ +Time,"network.interface.out.packets-lo","network.interface.collisions-lo","swap.pagesout" +""".strip() + + +PMREPMETRIC_EMPTY_DATA = """ +""".strip() + + +def test_pmrep_info(): + pmrep_table = PMREPMetrics(context_wrap(PMREPMETRIC_DATA)) + pmrep_table = sorted(pmrep_table, key=lambda x: x['name']) + assert pmrep_table[0] == {'name': 'Time', 'value': '2021-04-26 05:42:25'} + assert pmrep_table[1] == {'name': 'network.interface.collisions-eth0', 'value': '4.000'} + assert pmrep_table[2] == {'name': 'network.interface.collisions-lo', 'value': '3.000'} + assert pmrep_table[3] == {'name': 'network.interface.out.packets-eth0', 'value': '2.000'} + assert pmrep_table[4] == {'name': 'network.interface.out.packets-lo', 'value': '1.000'} + assert pmrep_table[5] == {'name': 'swap.pagesout', 'value': '5.000'} + + pmrep_table = PMREPMetrics(context_wrap(PMREPMETRIC_DATA_2)) + pmrep_table = sorted(pmrep_table, key=lambda x: x['name']) + assert pmrep_table[0] == {'name': 'Time', 'value': '2021-04-26 05:42:25'} + assert pmrep_table[1] == {'name': 'network.interface.collisions-lo', 'value': '2.000'} + assert pmrep_table[2] == {'name': 'network.interface.out.packets-lo', 'value': '1.000'} + assert pmrep_table[3] == {'name': 'swap.pagesout', 'value': '3.000'} + + pmrep_table = PMREPMetrics(context_wrap(PMREPMETRIC_DATA)) + assert pmrep_table.search(name__endswith='lo') == [{'name': 'network.interface.out.packets-lo', 'value': '1.000'}, {'name': 'network.interface.collisions-lo', 'value': '3.000'}] + assert pmrep_table.search(name__endswith='swap.pagesout') == [{'name': 'swap.pagesout', 'value': '5.000'}] + + +def test_empty(): + with pytest.raises(SkipException) as e: + PMREPMetrics(context_wrap(PMREPMETRIC_EMPTY_DATA)) + assert 'There is no data in the table' in str(e) + + +def test_wrong_data(): + with pytest.raises(SkipException) as e: + PMREPMetrics(context_wrap(PMREPMETRIC_WRONG_DATA)) + assert 'There is no data in the table' in str(e) + + +def test_pmrep_doc_examples(): + env = { + 'pmrep_doc_obj': PMREPMetrics(context_wrap(PMREPMETRIC_DATA)), + } + failed, total = doctest.testmod(pmrep, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index cd8da93d6..eb7288341 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -489,6 +489,7 @@ class Specs(SpecSet): php_ini = RegistryPoint(filterable=True) pluginconf_d = RegistryPoint(multi_output=True) pmlog_summary = RegistryPoint() + pmrep_metrics = RegistryPoint() podman_container_inspect = RegistryPoint(multi_output=True) podman_image_inspect = RegistryPoint(multi_output=True) podman_list_containers = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 585a24de8..3477ce8d9 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -755,6 +755,7 @@ def pmlog_summary_file(broker): pmlog_summary = command_with_args( "/usr/bin/pmlogsummary %s mem.util.used mem.physmem kernel.all.cpu.user kernel.all.cpu.sys kernel.all.cpu.nice kernel.all.cpu.steal kernel.all.cpu.idle disk.all.total mem.util.cached mem.util.bufmem mem.util.free kernel.all.cpu.wait.total", pmlog_summary_file) + pmrep_metrics = simple_command("pmrep -t 1s -T 1s network.interface.out.packets network.interface.collisions swap.pagesout -o csv") postconf_builtin = simple_command("/usr/sbin/postconf -C builtin") postconf = simple_command("/usr/sbin/postconf") postgresql_conf = first_file([ diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 3bc71ba7a..9b94c2675 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -187,6 +187,7 @@ class InsightsArchiveSpecs(Specs): pcp_metrics = simple_file("insights_commands/curl_-s_http_..127.0.0.1_44322.metrics_--connect-timeout_5") pcs_quorum_status = simple_file("insights_commands/pcs_quorum_status") pcs_status = simple_file("insights_commands/pcs_status") + pmrep_metrics = simple_file("insights_commands/pmrep_-t_1s_-T_1s_network.interface.out.packets_network.interface.collisions_swap.pagesout_-o_csv") postconf_builtin = simple_file("insights_commands/postconf_-C_builtin") postconf = simple_file("insights_commands/postconf") ps_alxwww = simple_file("insights_commands/ps_alxwww") From fc083528d1443dfaee2ae22d903f0e5306f5e8e6 Mon Sep 17 00:00:00 2001 From: Ryan Blakley <3789184+ryan-blakley@users.noreply.github.com> Date: Thu, 29 Apr 2021 08:33:43 -0400 Subject: [PATCH 403/892] Fix ValueError exception in dmsetup parser (#3043) * The parser wasn't setup to parse the output if there weren't any devices found. * Added test for "No devices found" output. * Fix #3042 Signed-off-by: Ryan Blakley --- insights/parsers/dmsetup.py | 3 +++ insights/parsers/tests/test_dmsetup.py | 8 ++++++++ 2 files changed, 11 insertions(+) diff --git a/insights/parsers/dmsetup.py b/insights/parsers/dmsetup.py index ba693bef2..ae77e6da9 100644 --- a/insights/parsers/dmsetup.py +++ b/insights/parsers/dmsetup.py @@ -189,6 +189,9 @@ class DmsetupStatus(CommandParser, list): def parse_content(self, content): self.unparseable_lines = [] + if content[0].lower() == "no devices found": + return + for line in content: _device_name, _device_info_str = line.rsplit(':', 1) device_name = _device_name.strip() diff --git a/insights/parsers/tests/test_dmsetup.py b/insights/parsers/tests/test_dmsetup.py index 9a027967e..634336449 100644 --- a/insights/parsers/tests/test_dmsetup.py +++ b/insights/parsers/tests/test_dmsetup.py @@ -105,6 +105,10 @@ def test_dmsetup_setupinfo(): docker-253:10-1234567-0df13579: 0 20971520 thin 1922048 """.strip() +DMSETUP_STATUS_3 = """ +No devices found +""".strip() + def test_dmsetup_status(): r = DmsetupStatus(context_wrap(DMSETUP_STATUS_1)) @@ -165,6 +169,10 @@ def test_dmsetup_status(): 'rootvg-docker--pool: 0 129548288 thin-pool 1 20/49152 38/126512 - rw no_discard_passdown queue_if_no_space', 'docker-253:10-1234567-0df13579: 0 20971520 thin 1922048'] + r = DmsetupStatus(context_wrap(DMSETUP_STATUS_3)) + assert len(r) == 0 + assert r.unparseable_lines == [] + DMSETUP_EXAMPLES = """ Name Maj Min Stat Open Targ Event UUID From 9540ddedb21485c0b3a36a8fef70a52dfd16efbd Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Thu, 29 Apr 2021 21:31:43 +0800 Subject: [PATCH 404/892] Enhance docstring installrpm (#3047) Signed-off-by: jiazhang --- insights/parsers/installed_rpms.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/insights/parsers/installed_rpms.py b/insights/parsers/installed_rpms.py index bb5956524..b8c1d486b 100644 --- a/insights/parsers/installed_rpms.py +++ b/insights/parsers/installed_rpms.py @@ -333,6 +333,11 @@ class InstalledRpm(object): It may also contain supplementary information from SOS report or epoch information from JSON. + When comparing rpms whose epoch is not ``null``, it is necessary to create + InstalledRpm object with epoch information like following example:: + + InstalledRpm.from_json('{"name":"microcode_ctl","epoch":"4","version":"20200609","release":"2.20201027.1.el8_3"}' + Factory methods are provided such as ``from_package`` to create an object from a short package string:: From 57e582d6e26e66201554ae2af5c348afbeb1980d Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Thu, 29 Apr 2021 11:35:24 -0400 Subject: [PATCH 405/892] update uploader_json_map.json Signed-off-by: Link Dupont --- insights/client/uploader_json_map.json | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index b1d2b99fc..2f339e64e 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -693,15 +693,6 @@ "pattern": [], "symbolic_name": "lscpu" }, - { - "command": "/usr/bin/lsinitrd", - "pattern": [ - "=====", - "Image:", - "lvm" - ], - "symbolic_name": "lsinitrd" - }, { "command": "/sbin/lsmod", "pattern": [], @@ -1069,6 +1060,11 @@ "pattern": [], "symbolic_name": "pcs_status" }, + { + "command": "pmrep -t 1s -T 1s network.interface.out.packets network.interface.collisions swap.pagesout -o csv", + "pattern": [], + "symbolic_name": "pmrep_metrics" + }, { "command": "/usr/sbin/postconf -C builtin", "pattern": [ @@ -4330,5 +4326,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-04-15T15:33:43.735037" + "version": "2021-04-22T11:04:17.009396" } \ No newline at end of file From 00ada526903a95a27ed44450aac2129c18eb6c9b Mon Sep 17 00:00:00 2001 From: Ryan Blakley <3789184+ryan-blakley@users.noreply.github.com> Date: Fri, 30 Apr 2021 10:23:23 -0400 Subject: [PATCH 406/892] Fix ValueError exception in systemd_analyze parser (#3041) * The parser wasn't setup to parse all of the possible column amounts, it was only setup to expect two columns, so seconds only. But the command can have up to 8 columns of output, so I updated it to parse all of the columns properly. * Added all of the possible columns to the test file. * Fix #3040 Signed-off-by: Ryan Blakley --- insights/parsers/systemd_analyze.py | 36 +++++++++++++++---- .../parsers/tests/test_systemd_analyze.py | 10 ++++++ 2 files changed, 40 insertions(+), 6 deletions(-) diff --git a/insights/parsers/systemd_analyze.py b/insights/parsers/systemd_analyze.py index 0714a5fcf..7174992e9 100644 --- a/insights/parsers/systemd_analyze.py +++ b/insights/parsers/systemd_analyze.py @@ -55,10 +55,34 @@ def parse_content(self, content): raise SkipException for c in content: - time, service = c.split() - if time.endswith('ms'): - _time = round(float(time.strip('ms')) / 1000, 5) - else: - _time = round(float(time.strip('ms')), 5) + cols = c.split() + # Check to make sure that the first character of the first + # entry is a number. This will hopefully exclude any errors + # that are outputted in the file. + if cols[0][0].isdigit(): + # The service should be the last column, so just + # remove the last column from the list before looping. + service = cols.pop() + time = 0 + for x in cols: + # Convert each column to seconds, and add them up. + if x.endswith('y'): + # Pulled the 31557600 from systemd src. + time += int(x.strip('y')) * 31557600 + elif x.endswith('month'): + # Pulled the 2629800 from systemd src. + time += int(x.strip('month')) * 2629800 + elif x.endswith('w'): + time += int(x.strip('w')) * 7 * 24 * 60 ** 2 + elif x.endswith('d'): + time += int(x.strip('d')) * 24 * 60 ** 2 + elif x.endswith('h'): + time += int(x.strip('h')) * 60 ** 2 + elif x.endswith('min'): + time += int(x.strip('min')) * 60 + elif x.endswith('ms'): + time += float(x.strip('ms')) / 1000 + elif x.endswith('s'): + time += float(x.strip('s')) - self[service] = _time + self[service] = time diff --git a/insights/parsers/tests/test_systemd_analyze.py b/insights/parsers/tests/test_systemd_analyze.py index d718b23f4..19809d077 100644 --- a/insights/parsers/tests/test_systemd_analyze.py +++ b/insights/parsers/tests/test_systemd_analyze.py @@ -7,6 +7,12 @@ OUTPUT = """ +1y 1month 2w 6d 1h 33min 53.782s dev-sdy.device +2month 3w 4d 7h 10min 17.082s dev-sdm.device +1w 5d 2h 8min 12.802s dev-sdw.device +3d 8h 57min 30.859s dev-sdd.device +2h 56min 26.721s dev-mapper-vg_root\x2dlv_root.device +5min 230ms splunk.service 33.080s cloud-init-local.service 32.423s unbound-anchor.service 2.773s kdump.service @@ -24,7 +30,11 @@ def test_output(): assert ('cloud-init-local.service' in output) is True # Test time(seconds) + assert output.get('tuned.service', 0) == 0.872 assert output.get('cloud-init.service', 0) == 1.304 + assert output.get('splunk.service', 0) == 300.23 + assert output.get('dev-sdd.device', 0) == 291450.859 + assert output.get('dev-sdy.device', 0) == 35921033.782 with pytest.raises(SkipException): assert systemd_analyze.SystemdAnalyzeBlame(context_wrap("")) is None From 82b09a1e23c4304f2fe615a00d4ec34aa77e36dc Mon Sep 17 00:00:00 2001 From: Ryan Blakley <3789184+ryan-blakley@users.noreply.github.com> Date: Tue, 4 May 2021 15:29:20 -0400 Subject: [PATCH 407/892] Fix ValueError exception in virsh_list_all parser (#3045) * The parser didn't account for error messages in the content, so I updated it to exclude the error messages. * Add test entry for error messages. * Fix #3044 Signed-off-by: Ryan Blakley --- insights/parsers/tests/test_virsh_list_all.py | 29 ++++++++++++------- insights/parsers/virsh_list_all.py | 4 +++ 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/insights/parsers/tests/test_virsh_list_all.py b/insights/parsers/tests/test_virsh_list_all.py index 12b83397e..060b85e1a 100644 --- a/insights/parsers/tests/test_virsh_list_all.py +++ b/insights/parsers/tests/test_virsh_list_all.py @@ -7,6 +7,12 @@ BLANK = """ """.strip() +ERROR = """ +error: Failed to reconnect to the hypervisor +error: no valid connection +error: internal error Unable to locate libvirtd daemon in /usr/sbin (to override, set $LIBVIRTD_PATH to the name of the libvirtd binary) +""".strip() + NO_RESULT = """ Id Name State ---------------------------------------------------- @@ -31,6 +37,13 @@ """.strip() +def assert_if_none(output): + assert output.fields == [] + assert output.cols == [] + assert output.keywords == [] + assert output.get_vm_state('NORHEL') is None + + def test_virsh_output(): output = virsh_list_all.VirshListAll(context_wrap(OUTPUT)) assert len(output.search(state='shut off')) == 11 @@ -45,19 +58,15 @@ def test_virsh_output(): def test_virsh_output_no_vms(): - output = virsh_list_all.VirshListAll(context_wrap(NO_RESULT)) - assert output.fields == [] - assert output.cols == [] - assert output.keywords == [] - assert output.get_vm_state('NORHEL') is None + assert_if_none(virsh_list_all.VirshListAll(context_wrap(NO_RESULT))) def test_virsh_output_blank(): - output = virsh_list_all.VirshListAll(context_wrap(BLANK)) - assert output.fields == [] - assert output.cols == [] - assert output.keywords == [] - assert output.get_vm_state('NORHEL') is None + assert_if_none(virsh_list_all.VirshListAll(context_wrap(BLANK))) + + +def test_virsh_output_error(): + assert_if_none(virsh_list_all.VirshListAll(context_wrap(ERROR))) def test_virsh_list_all_documentation(): diff --git a/insights/parsers/virsh_list_all.py b/insights/parsers/virsh_list_all.py index 0157f130d..f8d391336 100644 --- a/insights/parsers/virsh_list_all.py +++ b/insights/parsers/virsh_list_all.py @@ -76,6 +76,10 @@ def parse_content(self, content): self.fields = [] self.cols = [] self.keywords = [] + # Check and remove any error message, or empty lines. This to + # prevent any ValueError exceptions when parse_fixed_table is + # called below. + content = [l for l in content if not l.startswith("error: ") and l != ""] if not content: return From 95c836705ad8544489f558a6916e3f6747e09907 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Thu, 6 May 2021 07:57:46 -0500 Subject: [PATCH 408/892] Collect `rpm-ostree status --json` (#3027) * Collect `rpm-ostree status --json` Signed-off-by: Christopher Sams * Add doctest to unit tests. Signed-off-by: Christopher Sams --- .../rpm_ostree_status.rst | 3 + insights/parsers/rpm_ostree_status.py | 83 +++++++++++++++++++ .../parsers/tests/test_rpm_ostree_status.py | 63 ++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 6 files changed, 152 insertions(+) create mode 100644 docs/shared_parsers_catalog/rpm_ostree_status.rst create mode 100644 insights/parsers/rpm_ostree_status.py create mode 100644 insights/parsers/tests/test_rpm_ostree_status.py diff --git a/docs/shared_parsers_catalog/rpm_ostree_status.rst b/docs/shared_parsers_catalog/rpm_ostree_status.rst new file mode 100644 index 000000000..def269695 --- /dev/null +++ b/docs/shared_parsers_catalog/rpm_ostree_status.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.rpm_ostree_status + :members: + :show-inheritance: \ No newline at end of file diff --git a/insights/parsers/rpm_ostree_status.py b/insights/parsers/rpm_ostree_status.py new file mode 100644 index 000000000..3e7036e4f --- /dev/null +++ b/insights/parsers/rpm_ostree_status.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +""" +RpmOstreeStatus - Command ``/usr/bin/rpm-ostree status --json`` +=============================================================== + +The ``RpmOstreeStatus`` class parses the output of the ``/usr/bin/rpm-ostree status --json`` command. + +rpm-ostree is a hybrid image and package system; as the name suggests, it +uses OSTree for the image side, and RPM for the package side. It supports +composing RPMs server-side into an OSTree commit (like an image), and clients +can replicate that bit-for-bit, with fast incremental updates. Additionally, +the hybrid nature comes to the fore with client-side package layering and +overrides. + +The status command gives information pertaining to the current deployment in +use. It lists the names and refspecs of all possible deployments in order, +such that the first deployment in the list is the default upon boot. + +Sample input data:: + + { + "deployments" : [ + { + "base-commit-meta" : { + "rpmostree.inputhash" : "d272136f0a700a049da30520591205fec5474125474a58a4c9a63ecc8243f227" + }, + "requested-local-packages" : [ + ], + "base-removals" : [ + ], + "unlocked" : "none", + "booted" : true, + "initramfs-etc" : [ + ], + "id" : "rhel-f0c0294860db563e5906db8c9f257d2bfebe40c93e0320b0e380b879f545e267.0", + "osname" : "rhel", + "origin" : "edge:rhel/8/x86_64/edge", + "pinned" : false, + "regenerate-initramfs" : false, + "base-local-replacements" : [ + ], + "checksum" : "f0c0294860db563e5906db8c9f257d2bfebe40c93e0320b0e380b879f545e267", + "requested-base-local-replacements" : [ + ], + "timestamp" : 1614717652, + "requested-packages" : [ + ], + "serial" : 0, + "packages" : [ + ], + "gpg-enabled" : false, + "requested-base-removals" : [ + ] + } + ], + "transaction" : null, + "cached-update" : null + } + +Examples: + >>> type(status) + + >>> len(status.query.deployments.where("booted", True)) + 1 +""" +from .. import parser, CommandParser, YAMLParser +from ..parsr.query import from_dict +from insights.specs import Specs + + +@parser(Specs.rpm_ostree_status) +class RpmOstreeStatus(CommandParser, YAMLParser): + """ + Class ``RpmOstreeStatus`` parses the output of the ``rpm-ostree status --json`` command. + + Attributes: + data (dict): The parsed output of the command. + query (insights.parsr.query.Entry): The queryable object representing + the data dictionary. + """ + def parse_content(self, content): + super(RpmOstreeStatus, self).parse_content(content) + self.query = from_dict(self.data) diff --git a/insights/parsers/tests/test_rpm_ostree_status.py b/insights/parsers/tests/test_rpm_ostree_status.py new file mode 100644 index 000000000..c8e13822b --- /dev/null +++ b/insights/parsers/tests/test_rpm_ostree_status.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +import doctest +from insights.parsers import rpm_ostree_status +from insights.parsers.rpm_ostree_status import RpmOstreeStatus +from insights.tests import context_wrap + + +GOOD = """ + { + "deployments" : [ + { + "base-commit-meta" : { + "rpmostree.inputhash" : "d272136f0a700a049da30520591205fec5474125474a58a4c9a63ecc8243f227" + }, + "requested-local-packages" : [ + ], + "base-removals" : [ + ], + "unlocked" : "none", + "booted" : true, + "initramfs-etc" : [ + ], + "id" : "rhel-f0c0294860db563e5906db8c9f257d2bfebe40c93e0320b0e380b879f545e267.0", + "osname" : "rhel", + "origin" : "edge:rhel/8/x86_64/edge", + "pinned" : false, + "regenerate-initramfs" : false, + "base-local-replacements" : [ + ], + "checksum" : "f0c0294860db563e5906db8c9f257d2bfebe40c93e0320b0e380b879f545e267", + "requested-base-local-replacements" : [ + ], + "timestamp" : 1614717652, + "requested-packages" : [ + ], + "serial" : 0, + "packages" : [ + ], + "gpg-enabled" : false, + "requested-base-removals" : [ + ] + } + ], + "transaction" : null, + "cached-update" : null + } +""".strip() + + +def test_good_data(): + data = context_wrap(GOOD) + status = RpmOstreeStatus(data) + assert status.data["deployments"][0]["booted"] + assert len(status.query.deployments.where("booted", True)) == 1 + + +def test_doc_examples(): + data = context_wrap(GOOD) + env = { + 'status': RpmOstreeStatus(data) + } + failed, _ = doctest.testmod(rpm_ostree_status, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index eb7288341..e775853eb 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -559,6 +559,7 @@ class Specs(SpecSet): rndc_status = RegistryPoint() root_crontab = RegistryPoint() route = RegistryPoint() + rpm_ostree_status = RegistryPoint() rpm_V_packages = RegistryPoint() rsyslog_conf = RegistryPoint(filterable=True, multi_output=True) samba = RegistryPoint(filterable=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index 3477ce8d9..0f9b47acf 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -797,6 +797,7 @@ def pmlog_summary_file(broker): rhsm_log = simple_file("/var/log/rhsm/rhsm.log") rhsm_releasever = simple_file('/var/lib/rhsm/cache/releasever.json') rndc_status = simple_command("/usr/sbin/rndc status") + rpm_ostree_status = simple_command("/usr/bin/rpm-ostree status --json") rpm_V_packages = simple_command("/bin/rpm -V coreutils procps procps-ng shadow-utils passwd sudo chrony", keep_rc=True, signum=signal.SIGTERM) rsyslog_conf = glob_file(["/etc/rsyslog.conf", "/etc/rsyslog.d/*.conf"]) samba = simple_file("/etc/samba/smb.conf") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 9b94c2675..82f4b84d2 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -206,6 +206,7 @@ class InsightsArchiveSpecs(Specs): rhev_data_center = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_rhev_data_center") rhsm_katello_default_ca_cert = simple_file("insights_commands/openssl_x509_-in_.etc.rhsm.ca.katello-default-ca.pem_-noout_-issuer") rndc_status = simple_file("insights_commands/rndc_status") + rpm_ostree_status = simple_file("insights_commands/rpm-ostree_status_--json") rpm_V_packages = first_file(["insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo_chrony", "insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo"]) saphostctl_getcimobject_sapinstance = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostctrl_-function_GetCIMObject_-enuminstances_SAPInstance") satellite_content_hosts_count = first_file([ From 1e1fc3086b5f713471401ba20ea9556e43b2014f Mon Sep 17 00:00:00 2001 From: Andrew Kofink Date: Thu, 6 May 2021 13:18:40 -0400 Subject: [PATCH 409/892] fix(compliance): RHICOMPL-1111 Use ID instead of hostname (#3046) This changes all API calls to Compliance to use the host ID, retrieved from the Inventory API via machine_id, rather than the hostname. This fixes the use case when a user has updated the display name using the client's --display-name option. Signed-off-by: Andrew Kofink --- insights/client/apps/compliance/__init__.py | 15 ++++++--- insights/tests/client/apps/test_compliance.py | 32 +++++++++++++++---- 2 files changed, 36 insertions(+), 11 deletions(-) diff --git a/insights/client/apps/compliance/__init__.py b/insights/client/apps/compliance/__init__.py index be3434d33..36eac2e1d 100644 --- a/insights/client/apps/compliance/__init__.py +++ b/insights/client/apps/compliance/__init__.py @@ -2,7 +2,6 @@ from insights.client.archive import InsightsArchive from insights.client.connection import InsightsConnection from insights.client.constants import InsightsConstants as constants -from insights.client.utilities import determine_hostname from logging import getLogger from platform import linux_distribution from re import findall @@ -22,10 +21,10 @@ class ComplianceClient: def __init__(self, config): self.config = config self.conn = InsightsConnection(config) - self.hostname = determine_hostname() self.archive = InsightsArchive(config) def oscap_scan(self): + self.inventory_id = self._get_inventory_id() self._assert_oscap_rpms_exist() initial_profiles = self.get_initial_profiles() matching_os_profiles = self.get_profiles_matching_os() @@ -80,10 +79,10 @@ def get_profiles(self, search): return [] def get_initial_profiles(self): - return self.get_profiles('system_names={0} canonical=false external=false'.format(self.hostname)) + return self.get_profiles('system_ids={0} canonical=false external=false'.format(self.inventory_id)) def get_profiles_matching_os(self): - return self.get_profiles('system_names={0} canonical=false os_minor_version={1}'.format(self.hostname, self.os_minor_version())) + return self.get_profiles('system_ids={0} canonical=false os_minor_version={1}'.format(self.inventory_id, self.os_minor_version())) def profile_union_by_ref_id(self, prioritized_profiles, merged_profiles): profiles = dict((p['attributes']['ref_id'], p) for p in merged_profiles) @@ -154,3 +153,11 @@ def _assert_oscap_rpms_exist(self): if len(rpm.strip().split('\n')) < len(REQUIRED_PACKAGES): logger.error('Missing required packages for compliance scanning. Please ensure the following packages are installed: {0}\n'.format(', '.join(REQUIRED_PACKAGES))) exit(constants.sig_kill_bad) + + def _get_inventory_id(self): + systems = self.conn._fetch_system_by_machine_id() + if len(systems) == 1 and 'id' in systems[0]: + return systems[0].get('id') + else: + logger.error('Failed to find system in Inventory') + exit(constants.sig_kill_bad) diff --git a/insights/tests/client/apps/test_compliance.py b/insights/tests/client/apps/test_compliance.py index 1b7ba0615..b6788a4ad 100644 --- a/insights/tests/client/apps/test_compliance.py +++ b/insights/tests/client/apps/test_compliance.py @@ -13,6 +13,7 @@ @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None, compressor='gz') def test_oscap_scan(config, assert_rpms): compliance_client = ComplianceClient(config) + compliance_client._get_inventory_id = lambda: '' compliance_client.get_initial_profiles = lambda: [{'attributes': {'ref_id': 'foo', 'tailored': False}}] compliance_client.get_profiles_matching_os = lambda: [] compliance_client.find_scap_policy = lambda ref_id: '/usr/share/xml/scap/foo.xml' @@ -28,6 +29,7 @@ def test_oscap_scan(config, assert_rpms): @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) def test_missing_packages(config, call): compliance_client = ComplianceClient(config) + compliance_client._get_inventory_id = lambda: '' compliance_client.get_initial_profiles = lambda: [{'attributes': {'ref_id': 'foo'}}] compliance_client.get_profiles_matching_os = lambda: [] compliance_client.find_scap_policy = lambda ref_id: '/usr/share/xml/scap/foo.xml' @@ -40,6 +42,7 @@ def test_missing_packages(config, call): @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) def test_errored_rpm_call(config, call): compliance_client = ComplianceClient(config) + compliance_client._get_inventory_id = lambda: '' compliance_client.get_initial_profiles = lambda: [{'attributes': {'ref_id': 'foo'}}] compliance_client.get_profiles_matching_os = lambda: [] compliance_client.find_scap_policy = lambda ref_id: '/usr/share/xml/scap/foo.xml' @@ -51,7 +54,7 @@ def test_errored_rpm_call(config, call): @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) def test_get_profiles(config): compliance_client = ComplianceClient(config) - compliance_client.hostname = 'foo' + compliance_client.inventory_id = '068040f1-08c8-43e4-949f-7d6470e9111c' compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': [{'attributes': 'data'}]}))) assert compliance_client.get_profiles('search string') == [{'attributes': 'data'}] compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'search string'}) @@ -60,7 +63,7 @@ def test_get_profiles(config): @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) def test_get_profiles_no_profiles(config): compliance_client = ComplianceClient(config) - compliance_client.hostname = 'foo' + compliance_client.inventory_id = '068040f1-08c8-43e4-949f-7d6470e9111c' compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': []}))) assert compliance_client.get_profiles('search string') == [] compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'search string'}) @@ -69,7 +72,7 @@ def test_get_profiles_no_profiles(config): @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) def test_get_profiles_error(config): compliance_client = ComplianceClient(config) - compliance_client.hostname = 'foo' + compliance_client.inventory_id = '068040f1-08c8-43e4-949f-7d6470e9111c' compliance_client.conn.session.get = Mock(return_value=Mock(status_code=500)) assert compliance_client.get_profiles('search string') == [] compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'search string'}) @@ -78,20 +81,20 @@ def test_get_profiles_error(config): @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) def test_get_initial_profiles(config): compliance_client = ComplianceClient(config) - compliance_client.hostname = 'foo' + compliance_client.inventory_id = '068040f1-08c8-43e4-949f-7d6470e9111c' compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': [{'attributes': 'data'}]}))) assert compliance_client.get_initial_profiles() == [{'attributes': 'data'}] - compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_names=foo canonical=false external=false'}) + compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_ids=068040f1-08c8-43e4-949f-7d6470e9111c canonical=false external=false'}) @patch("insights.client.apps.compliance.linux_distribution", return_value=(None, '6.5', None)) @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) def test_get_profiles_matching_os(config, linux_distro_mock): compliance_client = ComplianceClient(config) - compliance_client.hostname = 'foo' + compliance_client.inventory_id = '068040f1-08c8-43e4-949f-7d6470e9111c' compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': [{'attributes': 'data'}]}))) assert compliance_client.get_profiles_matching_os() == [{'attributes': 'data'}] - compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_names=foo canonical=false os_minor_version=5'}) + compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_ids=068040f1-08c8-43e4-949f-7d6470e9111c canonical=false os_minor_version=5'}) @patch("insights.client.apps.compliance.linux_distribution", return_value=(None, '6.5', None)) @@ -232,3 +235,18 @@ def test_build_oscap_command_append_tailoring_path(config): compliance_client = ComplianceClient(config) expected_command = 'oscap xccdf eval --profile aaaaa --tailoring-file tailoring_path --results output_path xml_sample' assert expected_command == compliance_client.build_oscap_command('aaaaa', 'xml_sample', 'output_path', 'tailoring_path') + + +@patch("insights.client.config.InsightsConfig") +def test__get_inventory_id(config): + compliance_client = ComplianceClient(config) + compliance_client.conn._fetch_system_by_machine_id = lambda: [] + with raises(SystemExit): + compliance_client._get_inventory_id() + + compliance_client.conn._fetch_system_by_machine_id = lambda: [{}] + with raises(SystemExit): + compliance_client._get_inventory_id() + + compliance_client.conn._fetch_system_by_machine_id = lambda: [{'id': '12345'}] + assert compliance_client._get_inventory_id() == '12345' From 9a2710164979ba8a9889b5df2d46552f8d94cf41 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 6 May 2021 13:28:44 -0400 Subject: [PATCH 410/892] refactor: pull os-release spec usage out into util func (#3049) * pull os-release spec usage out into util func * Update compliance tests for os_release_info Signed-off-by: Jeremy Crafts Co-authored-by: Andrew Kofink --- insights/client/apps/compliance/__init__.py | 4 +-- insights/client/connection.py | 28 ++------------- insights/client/utilities.py | 34 +++++++++++++++++++ insights/tests/client/apps/test_compliance.py | 24 ++++++------- 4 files changed, 51 insertions(+), 39 deletions(-) diff --git a/insights/client/apps/compliance/__init__.py b/insights/client/apps/compliance/__init__.py index 36eac2e1d..34b4a7c5f 100644 --- a/insights/client/apps/compliance/__init__.py +++ b/insights/client/apps/compliance/__init__.py @@ -2,8 +2,8 @@ from insights.client.archive import InsightsArchive from insights.client.connection import InsightsConnection from insights.client.constants import InsightsConstants as constants +from insights.client.utilities import os_release_info from logging import getLogger -from platform import linux_distribution from re import findall from sys import exit from insights.util.subproc import call @@ -91,7 +91,7 @@ def profile_union_by_ref_id(self, prioritized_profiles, merged_profiles): return list(profiles.values()) def os_release(self): - _, version, _ = linux_distribution() + _, version = os_release_info() return version def os_major_version(self): diff --git a/insights/client/connection.py b/insights/client/connection.py index 96aac8959..448c487b7 100644 --- a/insights/client/connection.py +++ b/insights/client/connection.py @@ -26,14 +26,12 @@ from .utilities import (determine_hostname, generate_machine_id, write_unregistered_file, - write_registered_file) + write_registered_file, + os_release_info) from .cert_auth import rhsmCertificate from .constants import InsightsConstants as constants from .url_cache import URLCache from insights import package_info -from insights.core.context import Context -from insights.parsers.os_release import OsRelease -from insights.parsers.redhat_release import RedhatRelease from insights.util.canonical_facts import get_canonical_facts warnings.simplefilter('ignore') @@ -209,27 +207,7 @@ def user_agent(self): python_version = "%s %s" % (platform.python_implementation(), platform.python_version()) - os_family = "Unknown" - os_release = "" - for p in ["/etc/os-release", "/etc/redhat-release"]: - try: - with open(p) as f: - data = f.readlines() - - ctx = Context(content=data, path=p, relative_path=p) - if p == "/etc/os-release": - rls = OsRelease(ctx) - os_family = rls.data.get("NAME") - os_release = rls.data.get("VERSION_ID") - elif p == "/etc/redhat-release": - rls = RedhatRelease(ctx) - os_family = rls.product - os_release = rls.version - break - except IOError: - continue - except Exception as e: - logger.warning("Failed to detect OS version: %s", e) + os_family, os_release = os_release_info() kernel_version = "%s %s" % (platform.system(), platform.release()) ua = "{client_version} ({core_version}; {requests_version}) {os_family} {os_release} ({python_version}; {kernel_version}); {parent_process}".format( diff --git a/insights/client/utilities.py b/insights/client/utilities.py index d0524ba37..15adc091e 100644 --- a/insights/client/utilities.py +++ b/insights/client/utilities.py @@ -25,6 +25,10 @@ from .constants import InsightsConstants as constants from .collection_rules import InsightsUploadConf, load_yaml +from insights.core.context import Context +from insights.parsers.os_release import OsRelease +from insights.parsers.redhat_release import RedhatRelease + try: from insights_client.constants import InsightsConstants as wrapper_constants except ImportError: @@ -411,3 +415,33 @@ def get_parent_process(): return name else: return "unknown" + + +def os_release_info(): + ''' + Use insights-core to fetch the os-release or redhat-release info + + Returns a tuple of OS name and version + ''' + os_family = "Unknown" + os_release = "" + for p in ["/etc/os-release", "/etc/redhat-release"]: + try: + with open(p) as f: + data = f.readlines() + + ctx = Context(content=data, path=p, relative_path=p) + if p == "/etc/os-release": + rls = OsRelease(ctx) + os_family = rls.data.get("NAME") + os_release = rls.data.get("VERSION_ID") + elif p == "/etc/redhat-release": + rls = RedhatRelease(ctx) + os_family = rls.product + os_release = rls.version + break + except IOError: + continue + except Exception as e: + logger.warning("Failed to detect OS version: %s", e) + return (os_family, os_release) diff --git a/insights/tests/client/apps/test_compliance.py b/insights/tests/client/apps/test_compliance.py index b6788a4ad..708006396 100644 --- a/insights/tests/client/apps/test_compliance.py +++ b/insights/tests/client/apps/test_compliance.py @@ -87,9 +87,9 @@ def test_get_initial_profiles(config): compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_ids=068040f1-08c8-43e4-949f-7d6470e9111c canonical=false external=false'}) -@patch("insights.client.apps.compliance.linux_distribution", return_value=(None, '6.5', None)) +@patch("insights.client.apps.compliance.os_release_info", return_value=(None, '6.5')) @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) -def test_get_profiles_matching_os(config, linux_distro_mock): +def test_get_profiles_matching_os(config, os_release_info_mock): compliance_client = ComplianceClient(config) compliance_client.inventory_id = '068040f1-08c8-43e4-949f-7d6470e9111c' compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': [{'attributes': 'data'}]}))) @@ -97,23 +97,23 @@ def test_get_profiles_matching_os(config, linux_distro_mock): compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_ids=068040f1-08c8-43e4-949f-7d6470e9111c canonical=false os_minor_version=5'}) -@patch("insights.client.apps.compliance.linux_distribution", return_value=(None, '6.5', None)) +@patch("insights.client.apps.compliance.os_release_info", return_value=(None, '6.5')) @patch("insights.client.config.InsightsConfig") -def test_os_release(config, linux_distro_mock): +def test_os_release(config, os_release_info_mock): compliance_client = ComplianceClient(config) assert compliance_client.os_release() == '6.5' -@patch("insights.client.apps.compliance.linux_distribution", return_value=(None, '6.5', None)) +@patch("insights.client.apps.compliance.os_release_info", return_value=(None, '6.5')) @patch("insights.client.config.InsightsConfig") -def test_os_minor_version(config, linux_distro_mock): +def test_os_minor_version(config, os_release_info_mock): compliance_client = ComplianceClient(config) assert compliance_client.os_minor_version() == '5' -@patch("insights.client.apps.compliance.linux_distribution", return_value=(None, '6.5', None)) +@patch("insights.client.apps.compliance.os_release_info", return_value=(None, '6.5')) @patch("insights.client.config.InsightsConfig") -def test_os_major_version(config, linux_distro_mock): +def test_os_major_version(config, os_release_info_mock): compliance_client = ComplianceClient(config) assert compliance_client.os_major_version() == '6' @@ -203,19 +203,19 @@ def test_tailored_file_is_downloaded_from_initial_profile_if_os_minor_version_is assert compliance_client.download_tailoring_file({'id': 'foo', 'attributes': {'tailored': False, 'ref_id': 'aaaaa'}}) is None -@patch("insights.client.apps.compliance.linux_distribution", return_value=(None, '6.5', None)) +@patch("insights.client.apps.compliance.os_release_info", return_value=(None, '6.5')) @patch("insights.client.config.InsightsConfig") -def test_tailored_file_is_not_downloaded_if_os_minor_version_mismatches(config, linux_distro_mock): +def test_tailored_file_is_not_downloaded_if_os_minor_version_mismatches(config, os_release_info_mock): compliance_client = ComplianceClient(config) compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': [{'attributes': 'data'}]}))) assert compliance_client.download_tailoring_file({'id': 'foo', 'attributes': {'tailored': True, 'ref_id': 'aaaaa', 'os_minor_version': '2'}}) is None assert compliance_client.download_tailoring_file({'id': 'foo', 'attributes': {'tailored': False, 'ref_id': 'aaaaa', 'os_minor_version': '2'}}) is None -@patch("insights.client.apps.compliance.linux_distribution", return_value=(None, '6.5', None)) +@patch("insights.client.apps.compliance.os_release_info", return_value=(None, '6.5')) @patch("insights.client.apps.compliance.open", new_callable=mock_open) @patch("insights.client.config.InsightsConfig") -def test_tailored_file_is_downloaded_if_needed(config, call, linux_distro_mock): +def test_tailored_file_is_downloaded_if_needed(config, call, os_release_info_mock): compliance_client = ComplianceClient(config) compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': [{'attributes': 'data'}]}))) tailoring_file_path = "/var/tmp/oscap_tailoring_file-aaaaa.xml" From 6694c6fdf79a01b9f9169bc64d324f964bc2b2e6 Mon Sep 17 00:00:00 2001 From: Alec Cohan <44471274+aleccohan@users.noreply.github.com> Date: Thu, 6 May 2021 13:37:45 -0400 Subject: [PATCH 411/892] add ruamel.yaml to verifier (#3048) * Add ruamel.yaml to verifier Signed-off-by: Alec Cohan add __init__.py to ruamel module Signed-off-by: Alec Cohan update ruamel.yaml package to work with python 2.7 Signed-off-by: Alec Cohan * importlib error skip for 2.6 Signed-off-by: Jeremy Co-authored-by: Jeremy --- .../ansible/playbook_verifier/__init__.py | 25 +- .../contrib/ruamel_yaml/__init__.py | 0 .../ruamel.yaml-0.16.13-py3.8-nspkg.pth | 1 + .../ruamel.yaml-0.16.13.dist-info/INSTALLER | 1 + .../ruamel.yaml-0.16.13.dist-info/LICENSE | 21 + .../ruamel.yaml-0.16.13.dist-info/METADATA | 815 +++++++ .../ruamel.yaml-0.16.13.dist-info/RECORD | 66 + .../ruamel.yaml-0.16.13.dist-info/REQUESTED | 0 .../ruamel.yaml-0.16.13.dist-info/WHEEL | 6 + .../contrib/ruamel_yaml/ruamel/__init__.py | 0 .../ruamel_yaml/ruamel/yaml/__init__.py | 59 + .../contrib/ruamel_yaml/ruamel/yaml/anchor.py | 19 + .../ruamel_yaml/ruamel/yaml/comments.py | 1154 ++++++++++ .../contrib/ruamel_yaml/ruamel/yaml/compat.py | 324 +++ .../ruamel_yaml/ruamel/yaml/composer.py | 238 ++ .../ruamel/yaml/configobjwalker.py | 14 + .../ruamel_yaml/ruamel/yaml/constructor.py | 1806 +++++++++++++++ .../contrib/ruamel_yaml/ruamel/yaml/cyaml.py | 185 ++ .../contrib/ruamel_yaml/ruamel/yaml/dumper.py | 221 ++ .../ruamel_yaml/ruamel/yaml/emitter.py | 1696 ++++++++++++++ .../contrib/ruamel_yaml/ruamel/yaml/error.py | 311 +++ .../contrib/ruamel_yaml/ruamel/yaml/events.py | 157 ++ .../contrib/ruamel_yaml/ruamel/yaml/loader.py | 74 + .../contrib/ruamel_yaml/ruamel/yaml/main.py | 1534 +++++++++++++ .../contrib/ruamel_yaml/ruamel/yaml/nodes.py | 131 ++ .../contrib/ruamel_yaml/ruamel/yaml/parser.py | 802 +++++++ .../contrib/ruamel_yaml/ruamel/yaml/py.typed | 0 .../contrib/ruamel_yaml/ruamel/yaml/reader.py | 311 +++ .../ruamel_yaml/ruamel/yaml/representer.py | 1282 +++++++++++ .../ruamel_yaml/ruamel/yaml/resolver.py | 399 ++++ .../ruamel_yaml/ruamel/yaml/scalarbool.py | 51 + .../ruamel_yaml/ruamel/yaml/scalarfloat.py | 127 ++ .../ruamel_yaml/ruamel/yaml/scalarint.py | 130 ++ .../ruamel_yaml/ruamel/yaml/scalarstring.py | 156 ++ .../ruamel_yaml/ruamel/yaml/scanner.py | 1980 +++++++++++++++++ .../ruamel_yaml/ruamel/yaml/serializer.py | 240 ++ .../ruamel_yaml/ruamel/yaml/timestamp.py | 54 + .../contrib/ruamel_yaml/ruamel/yaml/tokens.py | 286 +++ .../contrib/ruamel_yaml/ruamel/yaml/util.py | 190 ++ .../client/apps/ansible/test_playbook.yml | 44 +- .../client/apps/test_playbook_verifier.py | 6 +- 41 files changed, 14891 insertions(+), 25 deletions(-) create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/__init__.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13-py3.8-nspkg.pth create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/INSTALLER create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/LICENSE create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/METADATA create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/RECORD create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/REQUESTED create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/WHEEL create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/__init__.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/__init__.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/anchor.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/comments.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/compat.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/composer.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/configobjwalker.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/constructor.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/cyaml.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/dumper.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/emitter.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/error.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/events.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/loader.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/main.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/nodes.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/parser.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/py.typed create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/reader.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/representer.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/resolver.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarbool.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarfloat.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarint.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarstring.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scanner.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/serializer.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/timestamp.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/tokens.py create mode 100644 insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/util.py diff --git a/insights/client/apps/ansible/playbook_verifier/__init__.py b/insights/client/apps/ansible/playbook_verifier/__init__.py index ba984419a..f02ef74f6 100644 --- a/insights/client/apps/ansible/playbook_verifier/__init__.py +++ b/insights/client/apps/ansible/playbook_verifier/__init__.py @@ -4,12 +4,13 @@ import requests import tempfile import pkgutil +import hashlib import insights.client.apps.ansible from logging import getLogger from distutils.version import LooseVersion from insights.client.utilities import get_version_info from insights.client.apps.ansible.playbook_verifier.contrib import gnupg -from insights.client.apps.ansible.playbook_verifier.contrib import oyaml as yaml +from insights.client.apps.ansible.playbook_verifier.contrib.ruamel_yaml.ruamel import yaml from insights.client.constants import InsightsConstants as constants __all__ = ("loadPlaybookYaml", "verify", "PlaybookVerificationError") @@ -21,6 +22,12 @@ logger = getLogger(__name__) +yaml = yaml.YAML(typ='rt') +yaml.indent(mapping=2, sequence=4, offset=2) +yaml.default_flow_style = False +yaml.preserve_quotes = True +yaml.width = 200 + class PlaybookVerificationError(Exception): """ @@ -39,6 +46,18 @@ def __str__(self): return self.message +def createSnippetHash(snippet): + """ + Function that creates and returns a hash of the snippet given to the function. + output: snippetHash (bytes) + """ + snippetHash = hashlib.sha256() + serializedSnippet = str(snippet).encode("UTF-8") + snippetHash.update(serializedSnippet) + + return snippetHash.digest() + + def eggVersioningCheck(checkVersion): currentVersion = requests.get(VERSIONING_URL) currentVersion = currentVersion.text @@ -87,7 +106,7 @@ def excludeDynamicElements(snippet): def executeVerification(snippet, encodedSignature): gpg = gnupg.GPG(gnupghome=constants.insights_core_lib_dir) - serializedSnippet = bytes(yaml.dump(snippet, default_flow_style=False).encode("UTF-8")) + snippetHash = createSnippetHash(snippet) decodedSignature = base64.b64decode(encodedSignature) @@ -98,7 +117,7 @@ def executeVerification(snippet, encodedSignature): os.write(fd, decodedSignature) os.close(fd) - result = gpg.verify_data(fn, serializedSnippet) + result = gpg.verify_data(fn, snippetHash) os.unlink(fn) return result diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/__init__.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13-py3.8-nspkg.pth b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13-py3.8-nspkg.pth new file mode 100644 index 000000000..68e19a260 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13-py3.8-nspkg.pth @@ -0,0 +1 @@ +import sys, types, os;has_mfs = sys.version_info > (3, 5);p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('ruamel',));importlib = has_mfs and __import__('importlib.util');has_mfs and __import__('importlib.machinery');m = has_mfs and sys.modules.setdefault('ruamel', importlib.util.module_from_spec(importlib.machinery.PathFinder.find_spec('ruamel', [os.path.dirname(p)])));m = m or sys.modules.setdefault('ruamel', types.ModuleType('ruamel'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/INSTALLER b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/INSTALLER new file mode 100644 index 000000000..a1b589e38 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/LICENSE b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/LICENSE new file mode 100644 index 000000000..3f65b07a8 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/LICENSE @@ -0,0 +1,21 @@ + The MIT License (MIT) + + Copyright (c) 2014-2021 Anthon van der Neut, Ruamel bvba + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/METADATA b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/METADATA new file mode 100644 index 000000000..92fc1d490 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/METADATA @@ -0,0 +1,815 @@ +Metadata-Version: 2.1 +Name: ruamel.yaml +Version: 0.16.13 +Summary: ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order +Home-page: https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree +Author: Anthon van der Neut +Author-email: a.van.der.neut@ruamel.eu +License: MIT license +Keywords: yaml 1.2 parser round-trip preserve quotes order config +Platform: UNKNOWN +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: Jython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup +Classifier: Typing :: Typed +Description-Content-Type: text/x-rst +Requires-Dist: ruamel.yaml.clib (>=0.1.2) ; platform_python_implementation=="CPython" and python_version<"3.10" +Requires-Dist: ruamel.ordereddict ; platform_python_implementation=="CPython" and python_version<="2.7" +Provides-Extra: docs +Requires-Dist: ryd ; extra == 'docs' +Provides-Extra: jinja2 +Requires-Dist: ruamel.yaml.jinja2 (>=0.2) ; extra == 'jinja2' + + +ruamel.yaml +=========== + +``ruamel.yaml`` is a YAML 1.2 loader/dumper package for Python. + +:version: 0.16.13 +:updated: 2021-03-05 +:documentation: http://yaml.readthedocs.io +:repository: https://sourceforge.net/projects/ruamel-yaml/ +:pypi: https://pypi.org/project/ruamel.yaml/ + +*The 0.16.13 release is the last that will tested to be working on Python 2.7. +The 0.17 series will still be tested on Python 3.5, but the 0.18 will not. The +0.17 series will also stop support for the old PyYAML functions, so a `YAML()` instance +will need to be created.* + +*Please adjust your dependencies accordingly if necessary.* + + +Starting with version 0.15.0 the way YAML files are loaded and dumped +is changing. See the API doc for details. Currently existing +functionality will throw a warning before being changed/removed. +**For production systems you should pin the version being used with +``ruamel.yaml<=0.15``**. There might be bug fixes in the 0.14 series, +but new functionality is likely only to be available via the new API. + +If your package uses ``ruamel.yaml`` and is not listed on PyPI, drop +me an email, preferably with some information on how you use the +package (or a link to bitbucket/github) and I'll keep you informed +when the status of the API is stable enough to make the transition. + +* `Overview `_ +* `Installing `_ +* `Basic Usage `_ +* `Details `_ +* `Examples `_ +* `API `_ +* `Differences with PyYAML `_ + +.. image:: https://readthedocs.org/projects/yaml/badge/?version=stable + :target: https://yaml.readthedocs.org/en/stable + +.. image:: https://bestpractices.coreinfrastructure.org/projects/1128/badge + :target: https://bestpractices.coreinfrastructure.org/projects/1128 + +.. image:: https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/_doc/_static/license.svg?format=raw + :target: https://opensource.org/licenses/MIT + +.. image:: https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/_doc/_static/pypi.svg?format=raw + :target: https://pypi.org/project/ruamel.yaml/ + +.. image:: https://sourceforge.net/p/oitnb/code/ci/default/tree/_doc/_static/oitnb.svg?format=raw + :target: https://pypi.org/project/oitnb/ + +.. image:: http://www.mypy-lang.org/static/mypy_badge.svg + :target: http://mypy-lang.org/ + +ChangeLog +========= + +.. should insert NEXT: at the beginning of line for next key (with empty line) + +0.16.13 (2021-03-05): + - fix for issue 359: could not update() CommentedMap with keyword arguments + (reported by `Steve Franchak `__) + - fix for issue 365: unable to dump mutated TimeStamp objects + (reported by Anton Akmerov `__) + - fix for issue 371: unable to addd comment without starting space + (reported by 'Mark Grandi `__) + - fix for issue 373: recursive call to walk_tree not preserving all params + (reported by `eulores `__) + - a None value in a flow-style sequence is now dumped as `null` instead + of `!!null ''` (reported by mcarans on + `StackOverlow `__) + +0.16.12 (2020-09-04): + - update links in doc + +0.16.11 (2020-09-03): + - workaround issue with setuptools 0.50 and importing pip ( fix by jaraco + https://github.com/pypa/setuptools/issues/2355#issuecomment-685159580 ) + +0.16.10 (2020-02-12): + - (auto) updated image references in README to sourceforge + +0.16.9 (2020-02-11): + - update CHANGES + +0.16.8 (2020-02-11): + - update requirements so that ruamel.yaml.clib is installed for 3.8, + as it has become available (via manylinux builds) + +0.16.7 (2020-01-30): + - fix typchecking issue on TaggedScalar (reported by Jens Nielsen) + - fix error in dumping literal scalar in sequence with comments before element + (reported by `EJ Etherington `__) + +0.16.6 (2020-01-20): + - fix empty string mapping key roundtripping with preservation of quotes as `? ''` + (reported via email by Tomer Aharoni). + - fix incorrect state setting in class constructor (reported by `Douglas Raillard + `__) + - adjust deprecation warning test for Hashable, as that no longer warns (reported + by `Jason Montleon `__) + +0.16.5 (2019-08-18): + - allow for ``YAML(typ=['unsafe', 'pytypes'])`` + +0.16.4 (2019-08-16): + - fix output of TAG directives with # (reported by `Thomas Smith + `__) + + +0.16.3 (2019-08-15): + - split construct_object + - change stuff back to keep mypy happy + - move setting of version based on YAML directive to scanner, allowing to + check for file version during TAG directive scanning + +0.16.2 (2019-08-15): + - preserve YAML and TAG directives on roundtrip, correctly output # + in URL for YAML 1.2 (both reported by `Thomas Smith + `__) + +0.16.1 (2019-08-08): + - Force the use of new version of ruamel.yaml.clib (reported by `Alex Joz + `__) + - Allow '#' in tag URI as these are allowed in YAML 1.2 (reported by + `Thomas Smith + `__) + +0.16.0 (2019-07-25): + - split of C source that generates .so file to ruamel.yaml.clib + - duplicate keys are now an error when working with the old API as well + +0.15.100 (2019-07-17): + - fixing issue with dumping deep-copied data from commented YAML, by + providing both the memo parameter to __deepcopy__, and by allowing + startmarks to be compared on their content (reported by `Theofilos + Petsios + `__) + +0.15.99 (2019-07-12): + - add `py.typed` to distribution, based on a PR submitted by + `Michael Crusoe + `__ + - merge PR 40 (also by Michael Crusoe) to more accurately specify + repository in the README (also reported in a misunderstood issue + some time ago) + +0.15.98 (2019-07-09): + - regenerate ext/_ruamel_yaml.c with Cython version 0.29.12, needed + for Python 3.8.0b2 (reported by `John Vandenberg + `__) + +0.15.97 (2019-06-06): + - regenerate ext/_ruamel_yaml.c with Cython version 0.29.10, needed for + Python 3.8.0b1 + - regenerate ext/_ruamel_yaml.c with Cython version 0.29.9, needed for + Python 3.8.0a4 (reported by `Anthony Sottile + `__) + +0.15.96 (2019-05-16): + - fix failure to indent comments on round-trip anchored block style + scalars in block sequence (reported by `William Kimball + `__) + +0.15.95 (2019-05-16): + - fix failure to round-trip anchored scalars in block sequence + (reported by `William Kimball + `__) + - wheel files for Python 3.4 no longer provided (`Python 3.4 EOL 2019-03-18 + `__) + +0.15.94 (2019-04-23): + - fix missing line-break after end-of-file comments not ending in + line-break (reported by `Philip Thompson + `__) + +0.15.93 (2019-04-21): + - fix failure to parse empty implicit flow mapping key + - in YAML 1.1 plains scalars `y`, 'n', `Y`, and 'N' are now + correctly recognised as booleans and such strings dumped quoted + (reported by `Marcel Bollmann + `__) + +0.15.92 (2019-04-16): + - fix failure to parse empty implicit block mapping key (reported by + `Nolan W `__) + +0.15.91 (2019-04-05): + - allowing duplicate keys would not work for merge keys (reported by mamacdon on + `StackOverflow `__ + +0.15.90 (2019-04-04): + - fix issue with updating `CommentedMap` from list of tuples (reported by + `Peter Henry `__) + +0.15.89 (2019-02-27): + - fix for items with flow-mapping in block sequence output on single line + (reported by `Zahari Dim `__) + - fix for safe dumping erroring in creation of representereror when dumping namedtuple + (reported and solution by `Jaakko Kantojärvi `__) + +0.15.88 (2019-02-12): + - fix inclusing of python code from the subpackage data (containing extra tests, + reported by `Florian Apolloner `__) + +0.15.87 (2019-01-22): + - fix problem with empty lists and the code to reinsert merge keys (reported via email + by Zaloo) + +0.15.86 (2019-01-16): + - reinsert merge key in its old position (reported by grumbler on + `StackOverflow `__) + - fix for issue with non-ASCII anchor names (reported and fix + provided by Dandaleon Flux via email) + - fix for issue when parsing flow mapping value starting with colon (in pure Python only) + (reported by `FichteFoll `__) + +0.15.85 (2019-01-08): + - the types used by ``SafeConstructor`` for mappings and sequences can + now by set by assigning to ``XXXConstructor.yaml_base_dict_type`` + (and ``..._list_type``), preventing the need to copy two methods + with 50+ lines that had ``var = {}`` hardcoded. (Implemented to + help solve an feature request by `Anthony Sottile + `__ in an easier way) + +0.15.84 (2019-01-07): + - fix for ``CommentedMap.copy()`` not returning ``CommentedMap``, let alone copying comments etc. + (reported by `Anthony Sottile `__) + +0.15.83 (2019-01-02): + - fix for bug in roundtripping aliases used as key (reported via email by Zaloo) + +0.15.82 (2018-12-28): + - anchors and aliases on scalar int, float, string and bool are now preserved. Anchors + do not need a referring alias for these (reported by + `Alex Harvey `__) + - anchors no longer lost on tagged objects when roundtripping (reported by `Zaloo + `__) + +0.15.81 (2018-12-06): + - fix issue dumping methods of metaclass derived classes (reported and fix provided + by `Douglas Raillard `__) + +0.15.80 (2018-11-26): + - fix issue emitting BEL character when round-tripping invalid folded input + (reported by Isaac on `StackOverflow `__) + +0.15.79 (2018-11-21): + - fix issue with anchors nested deeper than alias (reported by gaFF on + `StackOverflow `__) + +0.15.78 (2018-11-15): + - fix setup issue for 3.8 (reported by `Sidney Kuyateh + `__) + +0.15.77 (2018-11-09): + - setting `yaml.sort_base_mapping_type_on_output = False`, will prevent + explicit sorting by keys in the base representer of mappings. Roundtrip + already did not do this. Usage only makes real sense for Python 3.6+ + (feature request by `Sebastian Gerber `__). + - implement Python version check in YAML metadata in ``_test/test_z_data.py`` + +0.15.76 (2018-11-01): + - fix issue with empty mapping and sequence loaded as flow-style + (mapping reported by `Min RK `__, sequence + by `Maged Ahmed `__) + +0.15.75 (2018-10-27): + - fix issue with single '?' scalar (reported by `Terrance + `__) + - fix issue with duplicate merge keys (prompted by `answering + `__ a + `StackOverflow question `__ + by `math `__) + +0.15.74 (2018-10-17): + - fix dropping of comment on rt before sequence item that is sequence item + (reported by `Thorsten Kampe `__) + +0.15.73 (2018-10-16): + - fix irregular output on pre-comment in sequence within sequence (reported + by `Thorsten Kampe `__) + - allow non-compact (i.e. next line) dumping sequence/mapping within sequence. + +0.15.72 (2018-10-06): + - fix regression on explicit 1.1 loading with the C based scanner/parser + (reported by `Tomas Vavra `__) + +0.15.71 (2018-09-26): + - some of the tests now live in YAML files in the + `yaml.data `__ repository. + ``_test/test_z_data.py`` processes these. + - fix regression where handcrafted CommentedMaps could not be initiated (reported by + `Dan Helfman `__) + - fix regression with non-root literal scalars that needed indent indicator + (reported by `Clark Breyman `__) + - tag:yaml.org,2002:python/object/apply now also uses __qualname__ on PY3 + (reported by `Douglas RAILLARD `__) + - issue with self-referring object creation + (reported and fix by `Douglas RAILLARD `__) + +0.15.70 (2018-09-21): + - reverted CommentedMap and CommentedSeq to subclass ordereddict resp. list, + reimplemented merge maps so that both ``dict(**commented_map_instance)`` and JSON + dumping works. This also allows checking with ``isinstance()`` on ``dict`` resp. ``list``. + (Proposed by `Stuart Berg `__, with feedback + from `blhsing `__ on + `StackOverflow `__) + +0.15.69 (2018-09-20): + - fix issue with dump_all gobbling end-of-document comments on parsing + (reported by `Pierre B. `__) + +0.15.68 (2018-09-20): + - fix issue with parsabel, but incorrect output with nested flow-style sequences + (reported by `Dougal Seeley `__) + - fix issue with loading Python objects that have __setstate__ and recursion in parameters + (reported by `Douglas RAILLARD `__) + +0.15.67 (2018-09-19): + - fix issue with extra space inserted with non-root literal strings + (Issue reported and PR with fix provided by + `Naomi Seyfer `__.) + +0.15.66 (2018-09-07): + - fix issue with fold indicating characters inserted in safe_load-ed folded strings + (reported by `Maximilian Hils `__). + +0.15.65 (2018-09-07): + - fix issue #232 revert to throw ParserError for unexcpected ``]`` + and ``}`` instead of IndexError. (Issue reported and PR with fix + provided by `Naomi Seyfer `__.) + - added ``key`` and ``reverse`` parameter (suggested by Jannik Klemm via email) + - indent root level literal scalars that have directive or document end markers + at the beginning of a line + +0.15.64 (2018-08-30): + - support round-trip of tagged sequences: ``!Arg [a, {b: 1}]`` + - single entry mappings in flow sequences now written by default without braces, + set ``yaml.brace_single_entry_mapping_in_flow_sequence=True`` to force + getting ``[a, {b: 1}, {c: {d: 2}}]`` instead of the default ``[a, b: 1, c: {d: 2}]`` + - fix issue when roundtripping floats starting with a dot such as ``.5`` + (reported by `Harrison Gregg `__) + +0.15.63 (2018-08-29): + - small fix only necessary for Windows users that don't use wheels. + +0.15.62 (2018-08-29): + - C based reader/scanner & emitter now allow setting of 1.2 as YAML version. + ** The loading/dumping is still YAML 1.1 code**, so use the common subset of + YAML 1.2 and 1.1 (reported by `Ge Yang `__) + +0.15.61 (2018-08-23): + - support for round-tripping folded style scalars (initially requested + by `Johnathan Viduchinsky `__) + - update of C code + - speed up of scanning (~30% depending on the input) + +0.15.60 (2018-08-18): + - again allow single entry map in flow sequence context (reported by + `Lee Goolsbee `__) + - cleanup for mypy + - spurious print in library (reported by + `Lele Gaifax `__), now automatically checked + +0.15.59 (2018-08-17): + - issue with C based loader and leading zeros (reported by + `Tom Hamilton Stubber `__) + +0.15.58 (2018-08-17): + - simple mappings can now be used as keys when round-tripping:: + + {a: 1, b: 2}: hello world + + although using the obvious operations (del, popitem) on the key will + fail, you can mutilate it by going through its attributes. If you load the + above YAML in `d`, then changing the value is cumbersome: + + d = {CommentedKeyMap([('a', 1), ('b', 2)]): "goodbye"} + + and changing the key even more so: + + d[CommentedKeyMap([('b', 1), ('a', 2)])] = d.pop( + CommentedKeyMap([('a', 1), ('b', 2)])) + + (you can use a `dict` instead of a list of tuples (or ordereddict), but that might result + in a different order, of the keys of the key, in the output) + - check integers to dump with 1.2 patterns instead of 1.1 (reported by + `Lele Gaifax `__) + + +0.15.57 (2018-08-15): + - Fix that CommentedSeq could no longer be used in adding or do a sort + (reported by `Christopher Wright `__) + +0.15.56 (2018-08-15): + - fix issue with ``python -O`` optimizing away code (reported, and detailed cause + pinpointed, by `Alex Grönholm `__) + +0.15.55 (2018-08-14): + - unmade ``CommentedSeq`` a subclass of ``list``. It is now + indirectly a subclass of the standard + ``collections.abc.MutableSequence`` (without .abc if you are + still on Python2.7). If you do ``isinstance(yaml.load('[1, 2]'), + list)``) anywhere in your code replace ``list`` with + ``MutableSequence``. Directly, ``CommentedSeq`` is a subclass of + the abstract baseclass ``ruamel.yaml.compat.MutableScliceableSequence``, + with the result that *(extended) slicing is supported on + ``CommentedSeq``*. + (reported by `Stuart Berg `__) + - duplicate keys (or their values) with non-ascii now correctly + report in Python2, instead of raising a Unicode error. + (Reported by `Jonathan Pyle `__) + +0.15.54 (2018-08-13): + - fix issue where a comment could pop-up twice in the output (reported by + `Mike Kazantsev `__ and by + `Nate Peterson `__) + - fix issue where JSON object (mapping) without spaces was not parsed + properly (reported by `Marc Schmidt `__) + - fix issue where comments after empty flow-style mappings were not emitted + (reported by `Qinfench Chen `__) + +0.15.53 (2018-08-12): + - fix issue with flow style mapping with comments gobbled newline (reported + by `Christopher Lambert `__) + - fix issue where single '+' under YAML 1.2 was interpreted as + integer, erroring out (reported by `Jethro Yu + `__) + +0.15.52 (2018-08-09): + - added `.copy()` mapping representation for round-tripping + (``CommentedMap``) to fix incomplete copies of merged mappings + (reported by `Will Richards + `__) + - Also unmade that class a subclass of ordereddict to solve incorrect behaviour + for ``{**merged-mapping}`` and ``dict(**merged-mapping)`` (reported independently by + `Tim Olsson `__ and + `Filip Matzner `__) + +0.15.51 (2018-08-08): + - Fix method name dumps (were not dotted) and loads (reported by `Douglas Raillard + `__) + - Fix spurious trailing white-space caused when the comment start + column was no longer reached and there was no actual EOL comment + (e.g. following empty line) and doing substitutions, or when + quotes around scalars got dropped. (reported by `Thomas Guillet + `__) + +0.15.50 (2018-08-05): + - Allow ``YAML()`` as a context manager for output, thereby making it much easier + to generate multi-documents in a stream. + - Fix issue with incorrect type information for `load()` and `dump()` (reported + by `Jimbo Jim `__) + +0.15.49 (2018-08-05): + - fix preservation of leading newlines in root level literal style scalar, + and preserve comment after literal style indicator (``| # some comment``) + Both needed for round-tripping multi-doc streams in + `ryd `__. + +0.15.48 (2018-08-03): + - housekeeping: ``oitnb`` for formatting, mypy 0.620 upgrade and conformity + +0.15.47 (2018-07-31): + - fix broken 3.6 manylinux1, the result of an unclean ``build`` (reported by + `Roman Sichnyi `__) + + +0.15.46 (2018-07-29): + - fixed DeprecationWarning for importing from ``collections`` on 3.7 + (issue 210, reported by `Reinoud Elhorst + `__). It was `difficult to find + why tox/pytest did not report + `__ and as time + consuming to actually `fix + `__ the tests. + +0.15.45 (2018-07-26): + - After adding failing test for ``YAML.load_all(Path())``, remove StopIteration + (PR provided by `Zachary Buhman `__, + also reported by `Steven Hiscocks `__. + +0.15.44 (2018-07-14): + - Correct loading plain scalars consisting of numerals only and + starting with `0`, when not explicitly specifying YAML version + 1.1. This also fixes the issue about dumping string `'019'` as + plain scalars as reported by `Min RK + `__, that prompted this chance. + +0.15.43 (2018-07-12): + - merge PR33: Python2.7 on Windows is narrow, but has no + ``sysconfig.get_config_var('Py_UNICODE_SIZE')``. (merge provided by + `Marcel Bargull `__) + - ``register_class()`` now returns class (proposed by + `Mike Nerone `__} + +0.15.42 (2018-07-01): + - fix regression showing only on narrow Python 2.7 (py27mu) builds + (with help from + `Marcel Bargull `__ and + `Colm O'Connor `__). + - run pre-commit ``tox`` on Python 2.7 wide and narrow, as well as + 3.4/3.5/3.6/3.7/pypy + +0.15.41 (2018-06-27): + - add detection of C-compile failure (investigation prompted by + `StackOverlow `__ by + `Emmanuel Blot `__), + which was removed while no longer dependent on ``libyaml``, C-extensions + compilation still needs a compiler though. + +0.15.40 (2018-06-18): + - added links to landing places as suggested in issue 190 by + `KostisA `__ + - fixes issue #201: decoding unicode escaped tags on Python2, reported + by `Dan Abolafia `__ + +0.15.39 (2018-06-17): + - merge PR27 improving package startup time (and loading when regexp not + actually used), provided by + `Marcel Bargull `__ + +0.15.38 (2018-06-13): + - fix for losing precision when roundtripping floats by + `Rolf Wojtech `__ + - fix for hardcoded dir separator not working for Windows by + `Nuno André `__ + - typo fix by `Andrey Somov `__ + +0.15.37 (2018-03-21): + - again trying to create installable files for 187 + +0.15.36 (2018-02-07): + - fix issue 187, incompatibility of C extension with 3.7 (reported by + Daniel Blanchard) + +0.15.35 (2017-12-03): + - allow ``None`` as stream when specifying ``transform`` parameters to + ``YAML.dump()``. + This is useful if the transforming function doesn't return a meaningful value + (inspired by `StackOverflow `__ by + `rsaw `__). + +0.15.34 (2017-09-17): + - fix for issue 157: CDumper not dumping floats (reported by Jan Smitka) + +0.15.33 (2017-08-31): + - support for "undefined" round-tripping tagged scalar objects (in addition to + tagged mapping object). Inspired by a use case presented by Matthew Patton + on `StackOverflow `__. + - fix issue 148: replace cryptic error message when using !!timestamp with an + incorrectly formatted or non- scalar. Reported by FichteFoll. + +0.15.32 (2017-08-21): + - allow setting ``yaml.default_flow_style = None`` (default: ``False``) for + for ``typ='rt'``. + - fix for issue 149: multiplications on ``ScalarFloat`` now return ``float`` + (reported by jan.brezina@tul.cz) + +0.15.31 (2017-08-15): + - fix Comment dumping + +0.15.30 (2017-08-14): + - fix for issue with "compact JSON" not parsing: ``{"in":{},"out":{}}`` + (reported on `StackOverflow `__ by + `mjalkio `_ + +0.15.29 (2017-08-14): + - fix issue #51: different indents for mappings and sequences (reported by + Alex Harvey) + - fix for flow sequence/mapping as element/value of block sequence with + sequence-indent minus dash-offset not equal two. + +0.15.28 (2017-08-13): + - fix issue #61: merge of merge cannot be __repr__-ed (reported by Tal Liron) + +0.15.27 (2017-08-13): + - fix issue 62, YAML 1.2 allows ``?`` and ``:`` in plain scalars if non-ambigious + (reported by nowox) + - fix lists within lists which would make comments disappear + +0.15.26 (2017-08-10): + - fix for disappearing comment after empty flow sequence (reported by + oit-tzhimmash) + +0.15.25 (2017-08-09): + - fix for problem with dumping (unloaded) floats (reported by eyenseo) + +0.15.24 (2017-08-09): + - added ScalarFloat which supports roundtripping of 23.1, 23.100, + 42.00E+56, 0.0, -0.0 etc. while keeping the format. Underscores in mantissas + are not preserved/supported (yet, is anybody using that?). + - (finally) fixed longstanding issue 23 (reported by `Antony Sottile + `__), now handling comment between block + mapping key and value correctly + - warn on YAML 1.1 float input that is incorrect (triggered by invalid YAML + provided by Cecil Curry) + - allow setting of boolean representation (`false`, `true`) by using: + ``yaml.boolean_representation = [u'False', u'True']`` + +0.15.23 (2017-08-01): + - fix for round_tripping integers on 2.7.X > sys.maxint (reported by ccatterina) + +0.15.22 (2017-07-28): + - fix for round_tripping singe excl. mark tags doubling (reported and fix by Jan Brezina) + +0.15.21 (2017-07-25): + - fix for writing unicode in new API, (reported on + `StackOverflow `__ + +0.15.20 (2017-07-23): + - wheels for windows including C extensions + +0.15.19 (2017-07-13): + - added object constructor for rt, decorator ``yaml_object`` to replace YAMLObject. + - fix for problem using load_all with Path() instance + - fix for load_all in combination with zero indent block style literal + (``pure=True`` only!) + +0.15.18 (2017-07-04): + - missing ``pure`` attribute on ``YAML`` useful for implementing `!include` tag + constructor for `including YAML files in a YAML file + `__ + - some documentation improvements + - trigger of doc build on new revision + +0.15.17 (2017-07-03): + - support for Unicode supplementary Plane **output** + (input was already supported, triggered by + `this `__ Stack Overflow Q&A) + +0.15.16 (2017-07-01): + - minor typing issues (reported and fix provided by + `Manvendra Singh `__ + - small doc improvements + +0.15.15 (2017-06-27): + - fix for issue 135, typ='safe' not dumping in Python 2.7 + (reported by Andrzej Ostrowski `__) + +0.15.14 (2017-06-25): + - fix for issue 133, in setup.py: change ModuleNotFoundError to + ImportError (reported and fix by + `Asley Drake `__) + +0.15.13 (2017-06-24): + - suppress duplicate key warning on mappings with merge keys (reported by + Cameron Sweeney) + +0.15.12 (2017-06-24): + - remove fatal dependency of setup.py on wheel package (reported by + Cameron Sweeney) + +0.15.11 (2017-06-24): + - fix for issue 130, regression in nested merge keys (reported by + `David Fee `__) + +0.15.10 (2017-06-23): + - top level PreservedScalarString not indented if not explicitly asked to + - remove Makefile (not very useful anyway) + - some mypy additions + +0.15.9 (2017-06-16): + - fix for issue 127: tagged scalars were always quoted and seperated + by a newline when in a block sequence (reported and largely fixed by + `Tommy Wang `__) + +0.15.8 (2017-06-15): + - allow plug-in install via ``install ruamel.yaml[jinja2]`` + +0.15.7 (2017-06-14): + - add plug-in mechanism for load/dump pre resp. post-processing + +0.15.6 (2017-06-10): + - a set() with duplicate elements now throws error in rt loading + - support for toplevel column zero literal/folded scalar in explicit documents + +0.15.5 (2017-06-08): + - repeat `load()` on a single `YAML()` instance would fail. + +0.15.4 (2017-06-08): + - `transform` parameter on dump that expects a function taking a + string and returning a string. This allows transformation of the output + before it is written to stream. This forces creation of the complete output in memory! + - some updates to the docs + +0.15.3 (2017-06-07): + - No longer try to compile C extensions on Windows. Compilation can be forced by setting + the environment variable `RUAMEL_FORCE_EXT_BUILD` to some value + before starting the `pip install`. + +0.15.2 (2017-06-07): + - update to conform to mypy 0.511: mypy --strict + +0.15.1 (2017-06-07): + - `duplicate keys `__ + in mappings generate an error (in the old API this change generates a warning until 0.16) + - dependecy on ruamel.ordereddict for 2.7 now via extras_require + +0.15.0 (2017-06-04): + - it is now allowed to pass in a ``pathlib.Path`` as "stream" parameter to all + load/dump functions + - passing in a non-supported object (e.g. a string) as "stream" will result in a + much more meaningful YAMLStreamError. + - assigning a normal string value to an existing CommentedMap key or CommentedSeq + element will result in a value cast to the previous value's type if possible. + - added ``YAML`` class for new API + +0.14.12 (2017-05-14): + - fix for issue 119, deepcopy not returning subclasses (reported and PR by + Constantine Evans ) + +0.14.11 (2017-05-01): + - fix for issue 103 allowing implicit documents after document end marker line (``...``) + in YAML 1.2 + +0.14.10 (2017-04-26): + - fix problem with emitting using cyaml + +0.14.9 (2017-04-22): + - remove dependency on ``typing`` while still supporting ``mypy`` + (http://stackoverflow.com/a/43516781/1307905) + - fix unclarity in doc that stated 2.6 is supported (reported by feetdust) + +0.14.8 (2017-04-19): + - fix Text not available on 3.5.0 and 3.5.1, now proactively setting version guards + on all files (reported by `João Paulo Magalhães `__) + +0.14.7 (2017-04-18): + - round trip of integers (decimal, octal, hex, binary) now preserve + leading zero(s) padding and underscores. Underscores are presumed + to be at regular distances (i.e. ``0o12_345_67`` dumps back as + ``0o1_23_45_67`` as the space from the last digit to the + underscore before that is the determining factor). + +0.14.6 (2017-04-14): + - binary, octal and hex integers are now preserved by default. This + was a known deficiency. Working on this was prompted by the issue report (112) + from devnoname120, as well as the additional experience with `.replace()` + on `scalarstring` classes. + - fix issues 114: cannot install on Buildozer (reported by mixmastamyk). + Setting env. var ``RUAMEL_NO_PIP_INSTALL_CHECK`` will suppress ``pip``-check. + +0.14.5 (2017-04-04): + - fix issue 109: None not dumping correctly at top level (reported by Andrea Censi) + - fix issue 110: .replace on Preserved/DoubleQuoted/SingleQuoted ScalarString + would give back "normal" string (reported by sandres23) + +0.14.4 (2017-03-31): + - fix readme + +0.14.3 (2017-03-31): + - fix for 0o52 not being a string in YAML 1.1 (reported on + `StackOverflow Q&A 43138503 `__ by + `Frank D `__) + +0.14.2 (2017-03-23): + - fix for old default pip on Ubuntu 14.04 (reported by Sébastien Maccagnoni-Munch) + +0.14.1 (2017-03-22): + - fix Text not available on 3.5.0 and 3.5.1 (reported by Charles Bouchard-Légaré) + +0.14.0 (2017-03-21): + - updates for mypy --strict + - preparation for moving away from inheritance in Loader and Dumper, calls from e.g. + the Representer to the Serializer.serialize() are now done via the attribute + .serializer.serialize(). Usage of .serialize() outside of Serializer will be + deprecated soon + - some extra tests on main.py functions + +---- + +For older changes see the file +`CHANGES `_ + + diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/RECORD b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/RECORD new file mode 100644 index 000000000..3158ffc8f --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/RECORD @@ -0,0 +1,66 @@ +ruamel.yaml-0.16.13-py3.8-nspkg.pth,sha256=REN23ka76qAVtiuuP-WSrHAD4leicUsFB_AVCDfRe8U,539 +ruamel.yaml-0.16.13.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +ruamel.yaml-0.16.13.dist-info/LICENSE,sha256=wjyOB0soSsZk6bvkLuDrECh_0MViEw8Wlpb0UqCqVIU,1121 +ruamel.yaml-0.16.13.dist-info/METADATA,sha256=aO-BSNc5uLDwa1bC02EcSHf_Fr1gyhCL51tA98ZSd5g,36293 +ruamel.yaml-0.16.13.dist-info/RECORD,, +ruamel.yaml-0.16.13.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +ruamel.yaml-0.16.13.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110 +ruamel.yaml-0.16.13.dist-info/namespace_packages.txt,sha256=lu5ar9ilvyS03jNvS5x9I0_3NwCKkvIlY2k0QH9AArk,7 +ruamel.yaml-0.16.13.dist-info/top_level.txt,sha256=lu5ar9ilvyS03jNvS5x9I0_3NwCKkvIlY2k0QH9AArk,7 +ruamel/yaml/__init__.py,sha256=xElolpcdkbJ9aVecGFN3lSCt3ERtN0Fpp6PiOckm_w8,2160 +ruamel/yaml/__pycache__/__init__.cpython-36.pyc,, +ruamel/yaml/__pycache__/anchor.cpython-36.pyc,, +ruamel/yaml/__pycache__/comments.cpython-36.pyc,, +ruamel/yaml/__pycache__/compat.cpython-36.pyc,, +ruamel/yaml/__pycache__/composer.cpython-36.pyc,, +ruamel/yaml/__pycache__/configobjwalker.cpython-36.pyc,, +ruamel/yaml/__pycache__/constructor.cpython-36.pyc,, +ruamel/yaml/__pycache__/cyaml.cpython-36.pyc,, +ruamel/yaml/__pycache__/dumper.cpython-36.pyc,, +ruamel/yaml/__pycache__/emitter.cpython-36.pyc,, +ruamel/yaml/__pycache__/error.cpython-36.pyc,, +ruamel/yaml/__pycache__/events.cpython-36.pyc,, +ruamel/yaml/__pycache__/loader.cpython-36.pyc,, +ruamel/yaml/__pycache__/main.cpython-36.pyc,, +ruamel/yaml/__pycache__/nodes.cpython-36.pyc,, +ruamel/yaml/__pycache__/parser.cpython-36.pyc,, +ruamel/yaml/__pycache__/reader.cpython-36.pyc,, +ruamel/yaml/__pycache__/representer.cpython-36.pyc,, +ruamel/yaml/__pycache__/resolver.cpython-36.pyc,, +ruamel/yaml/__pycache__/scalarbool.cpython-36.pyc,, +ruamel/yaml/__pycache__/scalarfloat.cpython-36.pyc,, +ruamel/yaml/__pycache__/scalarint.cpython-36.pyc,, +ruamel/yaml/__pycache__/scalarstring.cpython-36.pyc,, +ruamel/yaml/__pycache__/scanner.cpython-36.pyc,, +ruamel/yaml/__pycache__/serializer.cpython-36.pyc,, +ruamel/yaml/__pycache__/timestamp.cpython-36.pyc,, +ruamel/yaml/__pycache__/tokens.cpython-36.pyc,, +ruamel/yaml/__pycache__/util.cpython-36.pyc,, +ruamel/yaml/anchor.py,sha256=nuwuT1qRhXm1qw8sGrkJXyS83Z1V2y8s3CfBcVHOcFw,500 +ruamel/yaml/comments.py,sha256=-alQQy-DkutBSleoccs3fsjYnOhmNMYAX4VlTpvOL4k,35198 +ruamel/yaml/compat.py,sha256=b7Oo6_9etUsHTOwJYwqbtdis0PAgfLPX9_RPCp6bSY4,8720 +ruamel/yaml/composer.py,sha256=1Qq_e2UHJ7kg0hdFlZysckudMNCL9AbPxpafM208LqU,8292 +ruamel/yaml/configobjwalker.py,sha256=ceGXcllWyXuj3ZMlx250qcyKtWEQFCZzHv2U0zxGbGk,342 +ruamel/yaml/constructor.py,sha256=CfrevtL518frqAnq7UFstLeS-WoChtC_W7lxVXc6MtE,70520 +ruamel/yaml/cyaml.py,sha256=D7lSKxk_eJf4V4A1wlc7a7h_XGuKA_1x4lKD-cce3g8,6584 +ruamel/yaml/dumper.py,sha256=mneJV-_kKccjquDOVo4TGVpsx7w6bPadp9sw2h5WkLw,6625 +ruamel/yaml/emitter.py,sha256=L0SrncZ7rKWuwPfV3peJgfnB7QDyOAAKB1oXKi5F_T8,64430 +ruamel/yaml/error.py,sha256=4uu2Nzj8h8lP59tMJZA5g7HAHQosscj-TU6ihCh6gtg,8979 +ruamel/yaml/events.py,sha256=0WBCPgpFzv4MKDN7ijApePTP58bxzTNM7Me1Hi0HA8g,3902 +ruamel/yaml/loader.py,sha256=Ke8uCiUocDZcooIzB7-GhqywIteVnIQ3hhG9qDLihQk,2951 +ruamel/yaml/main.py,sha256=lAjb9ovyGlasu5x5DriDM9YFeq-ILtHepZoVIdVfUcA,54126 +ruamel/yaml/nodes.py,sha256=KglOPI3ex9RvVCOm7CPtkPkdeYQXKLYbd2Ji_siaDJ0,3716 +ruamel/yaml/parser.py,sha256=dkBWBKfuVxuuUd7yFNR1emYz7AMGR-Ratlz8uxCVXxI,33245 +ruamel/yaml/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +ruamel/yaml/reader.py,sha256=qu3kzrVpxDxvTyOyFjFUTbmq__hBauNFw5HTOSYJTCY,10876 +ruamel/yaml/representer.py,sha256=-AQjn4j6gxfxht8yIeoRdQuUkai4EHVsPfQKHukrXlw,48729 +ruamel/yaml/resolver.py,sha256=k8SuJkqeomSc_fEKJivVRaO0E19cLTZN1yTlsLZk0c0,15341 +ruamel/yaml/scalarbool.py,sha256=hob48OhRenryWj5HZp4500KUn7OXnxsadS5trKMPEok,1531 +ruamel/yaml/scalarfloat.py,sha256=I-WILi1s_-XfW3hZy_FrMeZavJzNx0caZ7fICm5Akzw,4406 +ruamel/yaml/scalarint.py,sha256=vhHYXeOsevwb6S99Xgt9Qweu-OkOjFWVD3HC3HuYO8s,4462 +ruamel/yaml/scalarstring.py,sha256=xJnrp7aUbI4l_nJIF1C__J59h-71bM5b2uIXln3x-Fs,4536 +ruamel/yaml/scanner.py,sha256=f_QQBVGlp1ZU0U172s31Ry8a2Ao1frXNKtbPNlY0wqA,72192 +ruamel/yaml/serializer.py,sha256=waa4VLbKgacMiwYVhgbaK-EmRAygEvBEC7aPReWujL0,8412 +ruamel/yaml/timestamp.py,sha256=c07UAzB4HcTZqC4NADL-gLVCOa9byI8gqJhIYk9UbtQ,1792 +ruamel/yaml/tokens.py,sha256=z1gPCgyz7dhdBdKKK3UPTw_EAwELaPRRExMa9KIV6q8,7471 +ruamel/yaml/util.py,sha256=B_SnRV9VV7OKLAVolZqepFLqAhnpJRWNrgdrKDkvEao,6127 diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/REQUESTED b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/REQUESTED new file mode 100644 index 000000000..e69de29bb diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/WHEEL b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/WHEEL new file mode 100644 index 000000000..ef99c6cf3 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.34.2) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/__init__.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/__init__.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/__init__.py new file mode 100644 index 000000000..ae058138a --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/__init__.py @@ -0,0 +1,59 @@ +# coding: utf-8 + +from __future__ import print_function, absolute_import, division, unicode_literals + +if False: # MYPY + from typing import Dict, Any # NOQA + +_package_data = dict( + full_package_name='ruamel.yaml', + version_info=(0, 16, 13), + __version__='0.16.13', + author='Anthon van der Neut', + author_email='a.van.der.neut@ruamel.eu', + description='ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order', # NOQA + entry_points=None, + since=2014, + extras_require={ + ':platform_python_implementation=="CPython" and python_version<="2.7"': ['ruamel.ordereddict'], # NOQA + ':platform_python_implementation=="CPython" and python_version<"3.10"': ['ruamel.yaml.clib>=0.1.2'], # NOQA + 'jinja2': ['ruamel.yaml.jinja2>=0.2'], + 'docs': ['ryd'], + }, + classifiers=[ + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: Implementation :: CPython', + 'Programming Language :: Python :: Implementation :: PyPy', + 'Programming Language :: Python :: Implementation :: Jython', + 'Topic :: Software Development :: Libraries :: Python Modules', + 'Topic :: Text Processing :: Markup', + 'Typing :: Typed', + ], + keywords='yaml 1.2 parser round-trip preserve quotes order config', + read_the_docs='yaml', + supported=[(2, 7), (3, 5)], # minimum + tox=dict( + env='*', # remove 'pn', no longer test narrow Python 2.7 for unicode patterns and PyPy + deps='ruamel.std.pathlib', + fl8excl='_test/lib', + ), + universal=True, + rtfd='yaml', +) # type: Dict[Any, Any] + + +version_info = _package_data['version_info'] +__version__ = _package_data['__version__'] + +try: + from .cyaml import * # NOQA + + __with_libyaml__ = True +except (ImportError, ValueError): # for Jython + __with_libyaml__ = False + +from ...ruamel.yaml.main import * # NOQA diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/anchor.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/anchor.py new file mode 100644 index 000000000..d70212603 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/anchor.py @@ -0,0 +1,19 @@ +if False: # MYPY + from typing import Any, Dict, Optional, List, Union, Optional, Iterator # NOQA + +anchor_attrib = '_yaml_anchor' + + +class Anchor(object): + __slots__ = 'value', 'always_dump' + attrib = anchor_attrib + + def __init__(self): + # type: () -> None + self.value = None + self.always_dump = False + + def __repr__(self): + # type: () -> Any + ad = ', (always dump)' if self.always_dump else "" + return 'Anchor({!r}{})'.format(self.value, ad) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/comments.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/comments.py new file mode 100644 index 000000000..070597c2d --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/comments.py @@ -0,0 +1,1154 @@ +# coding: utf-8 + +from __future__ import absolute_import, print_function + +""" +stuff to deal with comments and formatting on dict/list/ordereddict/set +these are not really related, formatting could be factored out as +a separate base +""" + +import sys +import copy + + +from ...ruamel.yaml.compat import ordereddict # type: ignore +from ...ruamel.yaml.compat import PY2, string_types, MutableSliceableSequence +from ...ruamel.yaml.scalarstring import ScalarString +from ...ruamel.yaml.anchor import Anchor + +if PY2: + from collections import MutableSet, Sized, Set, Mapping +else: + from collections.abc import MutableSet, Sized, Set, Mapping + +if False: # MYPY + from typing import Any, Dict, Optional, List, Union, Optional, Iterator # NOQA + +# fmt: off +__all__ = ['CommentedSeq', 'CommentedKeySeq', + 'CommentedMap', 'CommentedOrderedMap', + 'CommentedSet', 'comment_attrib', 'merge_attrib'] +# fmt: on + +comment_attrib = '_yaml_comment' +format_attrib = '_yaml_format' +line_col_attrib = '_yaml_line_col' +merge_attrib = '_yaml_merge' +tag_attrib = '_yaml_tag' + + +class Comment(object): + # sys.getsize tested the Comment objects, __slots__ makes them bigger + # and adding self.end did not matter + __slots__ = 'comment', '_items', '_end', '_start' + attrib = comment_attrib + + def __init__(self): + # type: () -> None + self.comment = None # [post, [pre]] + # map key (mapping/omap/dict) or index (sequence/list) to a list of + # dict: post_key, pre_key, post_value, pre_value + # list: pre item, post item + self._items = {} # type: Dict[Any, Any] + # self._start = [] # should not put these on first item + self._end = [] # type: List[Any] # end of document comments + + def __str__(self): + # type: () -> str + if bool(self._end): + end = ',\n end=' + str(self._end) + else: + end = "" + return 'Comment(comment={0},\n items={1}{2})'.format(self.comment, self._items, end) + + @property + def items(self): + # type: () -> Any + return self._items + + @property + def end(self): + # type: () -> Any + return self._end + + @end.setter + def end(self, value): + # type: (Any) -> None + self._end = value + + @property + def start(self): + # type: () -> Any + return self._start + + @start.setter + def start(self, value): + # type: (Any) -> None + self._start = value + + +# to distinguish key from None +def NoComment(): + # type: () -> None + pass + + +class Format(object): + __slots__ = ('_flow_style',) + attrib = format_attrib + + def __init__(self): + # type: () -> None + self._flow_style = None # type: Any + + def set_flow_style(self): + # type: () -> None + self._flow_style = True + + def set_block_style(self): + # type: () -> None + self._flow_style = False + + def flow_style(self, default=None): + # type: (Optional[Any]) -> Any + """if default (the flow_style) is None, the flow style tacked on to + the object explicitly will be taken. If that is None as well the + default flow style rules the format down the line, or the type + of the constituent values (simple -> flow, map/list -> block)""" + if self._flow_style is None: + return default + return self._flow_style + + +class LineCol(object): + attrib = line_col_attrib + + def __init__(self): + # type: () -> None + self.line = None + self.col = None + self.data = None # type: Optional[Dict[Any, Any]] + + def add_kv_line_col(self, key, data): + # type: (Any, Any) -> None + if self.data is None: + self.data = {} + self.data[key] = data + + def key(self, k): + # type: (Any) -> Any + return self._kv(k, 0, 1) + + def value(self, k): + # type: (Any) -> Any + return self._kv(k, 2, 3) + + def _kv(self, k, x0, x1): + # type: (Any, Any, Any) -> Any + if self.data is None: + return None + data = self.data[k] + return data[x0], data[x1] + + def item(self, idx): + # type: (Any) -> Any + if self.data is None: + return None + return self.data[idx][0], self.data[idx][1] + + def add_idx_line_col(self, key, data): + # type: (Any, Any) -> None + if self.data is None: + self.data = {} + self.data[key] = data + + +class Tag(object): + """store tag information for roundtripping""" + + __slots__ = ('value',) + attrib = tag_attrib + + def __init__(self): + # type: () -> None + self.value = None + + def __repr__(self): + # type: () -> Any + return '{0.__class__.__name__}({0.value!r})'.format(self) + + +class CommentedBase(object): + @property + def ca(self): + # type: () -> Any + if not hasattr(self, Comment.attrib): + setattr(self, Comment.attrib, Comment()) + return getattr(self, Comment.attrib) + + def yaml_end_comment_extend(self, comment, clear=False): + # type: (Any, bool) -> None + if comment is None: + return + if clear or self.ca.end is None: + self.ca.end = [] + self.ca.end.extend(comment) + + def yaml_key_comment_extend(self, key, comment, clear=False): + # type: (Any, Any, bool) -> None + r = self.ca._items.setdefault(key, [None, None, None, None]) + if clear or r[1] is None: + if comment[1] is not None: + assert isinstance(comment[1], list) + r[1] = comment[1] + else: + r[1].extend(comment[0]) + r[0] = comment[0] + + def yaml_value_comment_extend(self, key, comment, clear=False): + # type: (Any, Any, bool) -> None + r = self.ca._items.setdefault(key, [None, None, None, None]) + if clear or r[3] is None: + if comment[1] is not None: + assert isinstance(comment[1], list) + r[3] = comment[1] + else: + r[3].extend(comment[0]) + r[2] = comment[0] + + def yaml_set_start_comment(self, comment, indent=0): + # type: (Any, Any) -> None + """overwrites any preceding comment lines on an object + expects comment to be without `#` and possible have multiple lines + """ + from .error import CommentMark + from .tokens import CommentToken + + pre_comments = self._yaml_get_pre_comment() + if comment[-1] == '\n': + comment = comment[:-1] # strip final newline if there + start_mark = CommentMark(indent) + for com in comment.split('\n'): + c = com.strip() + if len(c) > 0 and c[0] != '#': + com = '# ' + com + pre_comments.append(CommentToken(com + '\n', start_mark, None)) + + def yaml_set_comment_before_after_key( + self, key, before=None, indent=0, after=None, after_indent=None + ): + # type: (Any, Any, Any, Any, Any) -> None + """ + expects comment (before/after) to be without `#` and possible have multiple lines + """ + from ...ruamel.yaml.error import CommentMark + from ...ruamel.yaml.tokens import CommentToken + + def comment_token(s, mark): + # type: (Any, Any) -> Any + # handle empty lines as having no comment + return CommentToken(('# ' if s else "") + s + '\n', mark, None) + + if after_indent is None: + after_indent = indent + 2 + if before and (len(before) > 1) and before[-1] == '\n': + before = before[:-1] # strip final newline if there + if after and after[-1] == '\n': + after = after[:-1] # strip final newline if there + start_mark = CommentMark(indent) + c = self.ca.items.setdefault(key, [None, [], None, None]) + if before == '\n': + c[1].append(comment_token("", start_mark)) + elif before: + for com in before.split('\n'): + c[1].append(comment_token(com, start_mark)) + if after: + start_mark = CommentMark(after_indent) + if c[3] is None: + c[3] = [] + for com in after.split('\n'): + c[3].append(comment_token(com, start_mark)) # type: ignore + + @property + def fa(self): + # type: () -> Any + """format attribute + + set_flow_style()/set_block_style()""" + if not hasattr(self, Format.attrib): + setattr(self, Format.attrib, Format()) + return getattr(self, Format.attrib) + + def yaml_add_eol_comment(self, comment, key=NoComment, column=None): + # type: (Any, Optional[Any], Optional[Any]) -> None + """ + there is a problem as eol comments should start with ' #' + (but at the beginning of the line the space doesn't have to be before + the #. The column index is for the # mark + """ + from .tokens import CommentToken + from .error import CommentMark + + if column is None: + try: + column = self._yaml_get_column(key) + except AttributeError: + column = 0 + if comment[0] != '#': + comment = '# ' + comment + if column is None: + if comment[0] == '#': + comment = ' ' + comment + column = 0 + start_mark = CommentMark(column) + ct = [CommentToken(comment, start_mark, None), None] + self._yaml_add_eol_comment(ct, key=key) + + @property + def lc(self): + # type: () -> Any + if not hasattr(self, LineCol.attrib): + setattr(self, LineCol.attrib, LineCol()) + return getattr(self, LineCol.attrib) + + def _yaml_set_line_col(self, line, col): + # type: (Any, Any) -> None + self.lc.line = line + self.lc.col = col + + def _yaml_set_kv_line_col(self, key, data): + # type: (Any, Any) -> None + self.lc.add_kv_line_col(key, data) + + def _yaml_set_idx_line_col(self, key, data): + # type: (Any, Any) -> None + self.lc.add_idx_line_col(key, data) + + @property + def anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + setattr(self, Anchor.attrib, Anchor()) + return getattr(self, Anchor.attrib) + + def yaml_anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + return None + return self.anchor + + def yaml_set_anchor(self, value, always_dump=False): + # type: (Any, bool) -> None + self.anchor.value = value + self.anchor.always_dump = always_dump + + @property + def tag(self): + # type: () -> Any + if not hasattr(self, Tag.attrib): + setattr(self, Tag.attrib, Tag()) + return getattr(self, Tag.attrib) + + def yaml_set_tag(self, value): + # type: (Any) -> None + self.tag.value = value + + def copy_attributes(self, t, memo=None): + # type: (Any, Any) -> None + # fmt: off + for a in [Comment.attrib, Format.attrib, LineCol.attrib, Anchor.attrib, + Tag.attrib, merge_attrib]: + if hasattr(self, a): + if memo is not None: + setattr(t, a, copy.deepcopy(getattr(self, a, memo))) + else: + setattr(t, a, getattr(self, a)) + # fmt: on + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + raise NotImplementedError + + def _yaml_get_pre_comment(self): + # type: () -> Any + raise NotImplementedError + + def _yaml_get_column(self, key): + # type: (Any) -> Any + raise NotImplementedError + + +class CommentedSeq(MutableSliceableSequence, list, CommentedBase): # type: ignore + __slots__ = (Comment.attrib, '_lst') + + def __init__(self, *args, **kw): + # type: (Any, Any) -> None + list.__init__(self, *args, **kw) + + def __getsingleitem__(self, idx): + # type: (Any) -> Any + return list.__getitem__(self, idx) + + def __setsingleitem__(self, idx, value): + # type: (Any, Any) -> None + # try to preserve the scalarstring type if setting an existing key to a new value + if idx < len(self): + if ( + isinstance(value, string_types) + and not isinstance(value, ScalarString) + and isinstance(self[idx], ScalarString) + ): + value = type(self[idx])(value) + list.__setitem__(self, idx, value) + + def __delsingleitem__(self, idx=None): + # type: (Any) -> Any + list.__delitem__(self, idx) + self.ca.items.pop(idx, None) # might not be there -> default value + for list_index in sorted(self.ca.items): + if list_index < idx: + continue + self.ca.items[list_index - 1] = self.ca.items.pop(list_index) + + def __len__(self): + # type: () -> int + return list.__len__(self) + + def insert(self, idx, val): + # type: (Any, Any) -> None + """the comments after the insertion have to move forward""" + list.insert(self, idx, val) + for list_index in sorted(self.ca.items, reverse=True): + if list_index < idx: + break + self.ca.items[list_index + 1] = self.ca.items.pop(list_index) + + def extend(self, val): + # type: (Any) -> None + list.extend(self, val) + + def __eq__(self, other): + # type: (Any) -> bool + return list.__eq__(self, other) + + def _yaml_add_comment(self, comment, key=NoComment): + # type: (Any, Optional[Any]) -> None + if key is not NoComment: + self.yaml_key_comment_extend(key, comment) + else: + self.ca.comment = comment + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + self._yaml_add_comment(comment, key=key) + + def _yaml_get_columnX(self, key): + # type: (Any) -> Any + return self.ca.items[key][0].start_mark.column + + def _yaml_get_column(self, key): + # type: (Any) -> Any + column = None + sel_idx = None + pre, post = key - 1, key + 1 + if pre in self.ca.items: + sel_idx = pre + elif post in self.ca.items: + sel_idx = post + else: + # self.ca.items is not ordered + for row_idx, _k1 in enumerate(self): + if row_idx >= key: + break + if row_idx not in self.ca.items: + continue + sel_idx = row_idx + if sel_idx is not None: + column = self._yaml_get_columnX(sel_idx) + return column + + def _yaml_get_pre_comment(self): + # type: () -> Any + pre_comments = [] # type: List[Any] + if self.ca.comment is None: + self.ca.comment = [None, pre_comments] + else: + self.ca.comment[1] = pre_comments + return pre_comments + + def __deepcopy__(self, memo): + # type: (Any) -> Any + res = self.__class__() + memo[id(self)] = res + for k in self: + res.append(copy.deepcopy(k, memo)) + self.copy_attributes(res, memo=memo) + return res + + def __add__(self, other): + # type: (Any) -> Any + return list.__add__(self, other) + + def sort(self, key=None, reverse=False): # type: ignore + # type: (Any, bool) -> None + if key is None: + tmp_lst = sorted(zip(self, range(len(self))), reverse=reverse) + list.__init__(self, [x[0] for x in tmp_lst]) + else: + tmp_lst = sorted( + zip(map(key, list.__iter__(self)), range(len(self))), reverse=reverse + ) + list.__init__(self, [list.__getitem__(self, x[1]) for x in tmp_lst]) + itm = self.ca.items + self.ca._items = {} + for idx, x in enumerate(tmp_lst): + old_index = x[1] + if old_index in itm: + self.ca.items[idx] = itm[old_index] + + def __repr__(self): + # type: () -> Any + return list.__repr__(self) + + +class CommentedKeySeq(tuple, CommentedBase): # type: ignore + """This primarily exists to be able to roundtrip keys that are sequences""" + + def _yaml_add_comment(self, comment, key=NoComment): + # type: (Any, Optional[Any]) -> None + if key is not NoComment: + self.yaml_key_comment_extend(key, comment) + else: + self.ca.comment = comment + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + self._yaml_add_comment(comment, key=key) + + def _yaml_get_columnX(self, key): + # type: (Any) -> Any + return self.ca.items[key][0].start_mark.column + + def _yaml_get_column(self, key): + # type: (Any) -> Any + column = None + sel_idx = None + pre, post = key - 1, key + 1 + if pre in self.ca.items: + sel_idx = pre + elif post in self.ca.items: + sel_idx = post + else: + # self.ca.items is not ordered + for row_idx, _k1 in enumerate(self): + if row_idx >= key: + break + if row_idx not in self.ca.items: + continue + sel_idx = row_idx + if sel_idx is not None: + column = self._yaml_get_columnX(sel_idx) + return column + + def _yaml_get_pre_comment(self): + # type: () -> Any + pre_comments = [] # type: List[Any] + if self.ca.comment is None: + self.ca.comment = [None, pre_comments] + else: + self.ca.comment[1] = pre_comments + return pre_comments + + +class CommentedMapView(Sized): + __slots__ = ('_mapping',) + + def __init__(self, mapping): + # type: (Any) -> None + self._mapping = mapping + + def __len__(self): + # type: () -> int + count = len(self._mapping) + return count + + +class CommentedMapKeysView(CommentedMapView, Set): # type: ignore + __slots__ = () + + @classmethod + def _from_iterable(self, it): + # type: (Any) -> Any + return set(it) + + def __contains__(self, key): + # type: (Any) -> Any + return key in self._mapping + + def __iter__(self): + # type: () -> Any # yield from self._mapping # not in py27, pypy + # for x in self._mapping._keys(): + for x in self._mapping: + yield x + + +class CommentedMapItemsView(CommentedMapView, Set): # type: ignore + __slots__ = () + + @classmethod + def _from_iterable(self, it): + # type: (Any) -> Any + return set(it) + + def __contains__(self, item): + # type: (Any) -> Any + key, value = item + try: + v = self._mapping[key] + except KeyError: + return False + else: + return v == value + + def __iter__(self): + # type: () -> Any + for key in self._mapping._keys(): + yield (key, self._mapping[key]) + + +class CommentedMapValuesView(CommentedMapView): + __slots__ = () + + def __contains__(self, value): + # type: (Any) -> Any + for key in self._mapping: + if value == self._mapping[key]: + return True + return False + + def __iter__(self): + # type: () -> Any + for key in self._mapping._keys(): + yield self._mapping[key] + + +class CommentedMap(ordereddict, CommentedBase): # type: ignore + __slots__ = (Comment.attrib, '_ok', '_ref') + + def __init__(self, *args, **kw): + # type: (Any, Any) -> None + self._ok = set() # type: MutableSet[Any] # own keys + self._ref = [] # type: List[CommentedMap] + ordereddict.__init__(self, *args, **kw) + + def _yaml_add_comment(self, comment, key=NoComment, value=NoComment): + # type: (Any, Optional[Any], Optional[Any]) -> None + """values is set to key to indicate a value attachment of comment""" + if key is not NoComment: + self.yaml_key_comment_extend(key, comment) + return + if value is not NoComment: + self.yaml_value_comment_extend(value, comment) + else: + self.ca.comment = comment + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + """add on the value line, with value specified by the key""" + self._yaml_add_comment(comment, value=key) + + def _yaml_get_columnX(self, key): + # type: (Any) -> Any + return self.ca.items[key][2].start_mark.column + + def _yaml_get_column(self, key): + # type: (Any) -> Any + column = None + sel_idx = None + pre, post, last = None, None, None + for x in self: + if pre is not None and x != key: + post = x + break + if x == key: + pre = last + last = x + if pre in self.ca.items: + sel_idx = pre + elif post in self.ca.items: + sel_idx = post + else: + # self.ca.items is not ordered + for k1 in self: + if k1 >= key: + break + if k1 not in self.ca.items: + continue + sel_idx = k1 + if sel_idx is not None: + column = self._yaml_get_columnX(sel_idx) + return column + + def _yaml_get_pre_comment(self): + # type: () -> Any + pre_comments = [] # type: List[Any] + if self.ca.comment is None: + self.ca.comment = [None, pre_comments] + else: + self.ca.comment[1] = pre_comments + return pre_comments + + def update(self, *vals, **kw): + # type: (Any, Any) -> None + try: + ordereddict.update(self, *vals, **kw) + except TypeError: + # probably a dict that is used + for x in vals[0]: + self[x] = vals[0][x] + try: + self._ok.update(vals.keys()) # type: ignore + except AttributeError: + # assume one argument that is a list/tuple of two element lists/tuples + for x in vals[0]: + self._ok.add(x[0]) + if kw: + self._ok.add(*kw.keys()) + + def insert(self, pos, key, value, comment=None): + # type: (Any, Any, Any, Optional[Any]) -> None + """insert key value into given position + attach comment if provided + """ + ordereddict.insert(self, pos, key, value) + self._ok.add(key) + if comment is not None: + self.yaml_add_eol_comment(comment, key=key) + + def mlget(self, key, default=None, list_ok=False): + # type: (Any, Any, Any) -> Any + """multi-level get that expects dicts within dicts""" + if not isinstance(key, list): + return self.get(key, default) + # assume that the key is a list of recursively accessible dicts + + def get_one_level(key_list, level, d): + # type: (Any, Any, Any) -> Any + if not list_ok: + assert isinstance(d, dict) + if level >= len(key_list): + if level > len(key_list): + raise IndexError + return d[key_list[level - 1]] + return get_one_level(key_list, level + 1, d[key_list[level - 1]]) + + try: + return get_one_level(key, 1, self) + except KeyError: + return default + except (TypeError, IndexError): + if not list_ok: + raise + return default + + def __getitem__(self, key): + # type: (Any) -> Any + try: + return ordereddict.__getitem__(self, key) + except KeyError: + for merged in getattr(self, merge_attrib, []): + if key in merged[1]: + return merged[1][key] + raise + + def __setitem__(self, key, value): + # type: (Any, Any) -> None + # try to preserve the scalarstring type if setting an existing key to a new value + if key in self: + if ( + isinstance(value, string_types) + and not isinstance(value, ScalarString) + and isinstance(self[key], ScalarString) + ): + value = type(self[key])(value) + ordereddict.__setitem__(self, key, value) + self._ok.add(key) + + def _unmerged_contains(self, key): + # type: (Any) -> Any + if key in self._ok: + return True + return None + + def __contains__(self, key): + # type: (Any) -> bool + return bool(ordereddict.__contains__(self, key)) + + def get(self, key, default=None): + # type: (Any, Any) -> Any + try: + return self.__getitem__(key) + except: # NOQA + return default + + def __repr__(self): + # type: () -> Any + return ordereddict.__repr__(self).replace('CommentedMap', 'ordereddict') + + def non_merged_items(self): + # type: () -> Any + for x in ordereddict.__iter__(self): + if x in self._ok: + yield x, ordereddict.__getitem__(self, x) + + def __delitem__(self, key): + # type: (Any) -> None + # for merged in getattr(self, merge_attrib, []): + # if key in merged[1]: + # value = merged[1][key] + # break + # else: + # # not found in merged in stuff + # ordereddict.__delitem__(self, key) + # for referer in self._ref: + # referer.update_key_value(key) + # return + # + # ordereddict.__setitem__(self, key, value) # merge might have different value + # self._ok.discard(key) + self._ok.discard(key) + ordereddict.__delitem__(self, key) + for referer in self._ref: + referer.update_key_value(key) + + def __iter__(self): + # type: () -> Any + for x in ordereddict.__iter__(self): + yield x + + def _keys(self): + # type: () -> Any + for x in ordereddict.__iter__(self): + yield x + + def __len__(self): + # type: () -> int + return int(ordereddict.__len__(self)) + + def __eq__(self, other): + # type: (Any) -> bool + return bool(dict(self) == other) + + if PY2: + + def keys(self): + # type: () -> Any + return list(self._keys()) + + def iterkeys(self): + # type: () -> Any + return self._keys() + + def viewkeys(self): + # type: () -> Any + return CommentedMapKeysView(self) + + else: + + def keys(self): + # type: () -> Any + return CommentedMapKeysView(self) + + if PY2: + + def _values(self): + # type: () -> Any + for x in ordereddict.__iter__(self): + yield ordereddict.__getitem__(self, x) + + def values(self): + # type: () -> Any + return list(self._values()) + + def itervalues(self): + # type: () -> Any + return self._values() + + def viewvalues(self): + # type: () -> Any + return CommentedMapValuesView(self) + + else: + + def values(self): + # type: () -> Any + return CommentedMapValuesView(self) + + def _items(self): + # type: () -> Any + for x in ordereddict.__iter__(self): + yield x, ordereddict.__getitem__(self, x) + + if PY2: + + def items(self): + # type: () -> Any + return list(self._items()) + + def iteritems(self): + # type: () -> Any + return self._items() + + def viewitems(self): + # type: () -> Any + return CommentedMapItemsView(self) + + else: + + def items(self): + # type: () -> Any + return CommentedMapItemsView(self) + + @property + def merge(self): + # type: () -> Any + if not hasattr(self, merge_attrib): + setattr(self, merge_attrib, []) + return getattr(self, merge_attrib) + + def copy(self): + # type: () -> Any + x = type(self)() # update doesn't work + for k, v in self._items(): + x[k] = v + self.copy_attributes(x) + return x + + def add_referent(self, cm): + # type: (Any) -> None + if cm not in self._ref: + self._ref.append(cm) + + def add_yaml_merge(self, value): + # type: (Any) -> None + for v in value: + v[1].add_referent(self) + for k, v in v[1].items(): + if ordereddict.__contains__(self, k): + continue + ordereddict.__setitem__(self, k, v) + self.merge.extend(value) + + def update_key_value(self, key): + # type: (Any) -> None + if key in self._ok: + return + for v in self.merge: + if key in v[1]: + ordereddict.__setitem__(self, key, v[1][key]) + return + ordereddict.__delitem__(self, key) + + def __deepcopy__(self, memo): + # type: (Any) -> Any + res = self.__class__() + memo[id(self)] = res + for k in self: + res[k] = copy.deepcopy(self[k], memo) + self.copy_attributes(res, memo=memo) + return res + + +# based on brownie mappings +@classmethod # type: ignore +def raise_immutable(cls, *args, **kwargs): + # type: (Any, *Any, **Any) -> None + raise TypeError('{} objects are immutable'.format(cls.__name__)) + + +class CommentedKeyMap(CommentedBase, Mapping): # type: ignore + __slots__ = Comment.attrib, '_od' + """This primarily exists to be able to roundtrip keys that are mappings""" + + def __init__(self, *args, **kw): + # type: (Any, Any) -> None + if hasattr(self, '_od'): + raise_immutable(self) + try: + self._od = ordereddict(*args, **kw) + except TypeError: + if PY2: + self._od = ordereddict(args[0].items()) + else: + raise + + __delitem__ = __setitem__ = clear = pop = popitem = setdefault = update = raise_immutable + + # need to implement __getitem__, __iter__ and __len__ + def __getitem__(self, index): + # type: (Any) -> Any + return self._od[index] + + def __iter__(self): + # type: () -> Iterator[Any] + for x in self._od.__iter__(): + yield x + + def __len__(self): + # type: () -> int + return len(self._od) + + def __hash__(self): + # type: () -> Any + return hash(tuple(self.items())) + + def __repr__(self): + # type: () -> Any + if not hasattr(self, merge_attrib): + return self._od.__repr__() + return 'ordereddict(' + repr(list(self._od.items())) + ')' + + @classmethod + def fromkeys(keys, v=None): + # type: (Any, Any) -> Any + return CommentedKeyMap(dict.fromkeys(keys, v)) + + def _yaml_add_comment(self, comment, key=NoComment): + # type: (Any, Optional[Any]) -> None + if key is not NoComment: + self.yaml_key_comment_extend(key, comment) + else: + self.ca.comment = comment + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + self._yaml_add_comment(comment, key=key) + + def _yaml_get_columnX(self, key): + # type: (Any) -> Any + return self.ca.items[key][0].start_mark.column + + def _yaml_get_column(self, key): + # type: (Any) -> Any + column = None + sel_idx = None + pre, post = key - 1, key + 1 + if pre in self.ca.items: + sel_idx = pre + elif post in self.ca.items: + sel_idx = post + else: + # self.ca.items is not ordered + for row_idx, _k1 in enumerate(self): + if row_idx >= key: + break + if row_idx not in self.ca.items: + continue + sel_idx = row_idx + if sel_idx is not None: + column = self._yaml_get_columnX(sel_idx) + return column + + def _yaml_get_pre_comment(self): + # type: () -> Any + pre_comments = [] # type: List[Any] + if self.ca.comment is None: + self.ca.comment = [None, pre_comments] + else: + self.ca.comment[1] = pre_comments + return pre_comments + + +class CommentedOrderedMap(CommentedMap): + __slots__ = (Comment.attrib,) + + +class CommentedSet(MutableSet, CommentedBase): # type: ignore # NOQA + __slots__ = Comment.attrib, 'odict' + + def __init__(self, values=None): + # type: (Any) -> None + self.odict = ordereddict() + MutableSet.__init__(self) + if values is not None: + self |= values # type: ignore + + def _yaml_add_comment(self, comment, key=NoComment, value=NoComment): + # type: (Any, Optional[Any], Optional[Any]) -> None + """values is set to key to indicate a value attachment of comment""" + if key is not NoComment: + self.yaml_key_comment_extend(key, comment) + return + if value is not NoComment: + self.yaml_value_comment_extend(value, comment) + else: + self.ca.comment = comment + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + """add on the value line, with value specified by the key""" + self._yaml_add_comment(comment, value=key) + + def add(self, value): + # type: (Any) -> None + """Add an element.""" + self.odict[value] = None + + def discard(self, value): + # type: (Any) -> None + """Remove an element. Do not raise an exception if absent.""" + del self.odict[value] + + def __contains__(self, x): + # type: (Any) -> Any + return x in self.odict + + def __iter__(self): + # type: () -> Any + for x in self.odict: + yield x + + def __len__(self): + # type: () -> int + return len(self.odict) + + def __repr__(self): + # type: () -> str + return 'set({0!r})'.format(self.odict.keys()) + + +class TaggedScalar(CommentedBase): + # the value and style attributes are set during roundtrip construction + def __init__(self, value=None, style=None, tag=None): + # type: (Any, Any, Any) -> None + self.value = value + self.style = style + if tag is not None: + self.yaml_set_tag(tag) + + def __str__(self): + # type: () -> Any + return self.value + + +def dump_comments(d, name="", sep='.', out=sys.stdout): + # type: (Any, str, str, Any) -> None + """ + recursively dump comments, all but the toplevel preceded by the path + in dotted form x.0.a + """ + if isinstance(d, dict) and hasattr(d, 'ca'): + if name: + sys.stdout.write('{}\n'.format(name)) + out.write('{}\n'.format(d.ca)) # type: ignore + for k in d: + dump_comments(d[k], name=(name + sep + k) if name else k, sep=sep, out=out) + elif isinstance(d, list) and hasattr(d, 'ca'): + if name: + sys.stdout.write('{}\n'.format(name)) + out.write('{}\n'.format(d.ca)) # type: ignore + for idx, k in enumerate(d): + dump_comments( + k, name=(name + sep + str(idx)) if name else str(idx), sep=sep, out=out + ) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/compat.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/compat.py new file mode 100644 index 000000000..95f75b358 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/compat.py @@ -0,0 +1,324 @@ +# coding: utf-8 + +from __future__ import print_function + +# partially from package six by Benjamin Peterson + +import sys +import os +import types +import traceback +from abc import abstractmethod + + +# fmt: off +if False: # MYPY + from typing import Any, Dict, Optional, List, Union, BinaryIO, IO, Text, Tuple # NOQA + from typing import Optional # NOQA +# fmt: on + +_DEFAULT_YAML_VERSION = (1, 2) + +try: + from ...ruamel.ordereddict import ordereddict +except: # NOQA + try: + from collections import OrderedDict + except ImportError: + from ordereddict import OrderedDict # type: ignore + # to get the right name import ... as ordereddict doesn't do that + + class ordereddict(OrderedDict): # type: ignore + if not hasattr(OrderedDict, 'insert'): + + def insert(self, pos, key, value): + # type: (int, Any, Any) -> None + if pos >= len(self): + self[key] = value + return + od = ordereddict() + od.update(self) + for k in od: + del self[k] + for index, old_key in enumerate(od): + if pos == index: + self[key] = value + self[old_key] = od[old_key] + + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + + +if PY3: + + def utf8(s): + # type: (str) -> str + return s + + def to_str(s): + # type: (str) -> str + return s + + def to_unicode(s): + # type: (str) -> str + return s + + +else: + if False: + unicode = str + + def utf8(s): + # type: (unicode) -> str + return s.encode('utf-8') + + def to_str(s): + # type: (str) -> str + return str(s) + + def to_unicode(s): + # type: (str) -> unicode + return unicode(s) # NOQA + + +if PY3: + string_types = str + integer_types = int + class_types = type + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize + unichr = chr + import io + + StringIO = io.StringIO + BytesIO = io.BytesIO + # have unlimited precision + no_limit_int = int + from collections.abc import Hashable, MutableSequence, MutableMapping, Mapping # NOQA + +else: + string_types = basestring # NOQA + integer_types = (int, long) # NOQA + class_types = (type, types.ClassType) + text_type = unicode # NOQA + binary_type = str + + # to allow importing + unichr = unichr + from StringIO import StringIO as _StringIO + + StringIO = _StringIO + import cStringIO + + BytesIO = cStringIO.StringIO + # have unlimited precision + no_limit_int = long # NOQA not available on Python 3 + from collections import Hashable, MutableSequence, MutableMapping, Mapping # NOQA + +if False: # MYPY + # StreamType = Union[BinaryIO, IO[str], IO[unicode], StringIO] + # StreamType = Union[BinaryIO, IO[str], StringIO] # type: ignore + StreamType = Any + + StreamTextType = StreamType # Union[Text, StreamType] + VersionType = Union[List[int], str, Tuple[int, int]] + +if PY3: + builtins_module = 'builtins' +else: + builtins_module = '__builtin__' + +UNICODE_SIZE = 4 if sys.maxunicode > 65535 else 2 + + +def with_metaclass(meta, *bases): + # type: (Any, Any) -> Any + """Create a base class with a metaclass.""" + return meta('NewBase', bases, {}) + + +DBG_TOKEN = 1 +DBG_EVENT = 2 +DBG_NODE = 4 + + +_debug = None # type: Optional[int] +if 'RUAMELDEBUG' in os.environ: + _debugx = os.environ.get('RUAMELDEBUG') + if _debugx is None: + _debug = 0 + else: + _debug = int(_debugx) + + +if bool(_debug): + + class ObjectCounter(object): + def __init__(self): + # type: () -> None + self.map = {} # type: Dict[Any, Any] + + def __call__(self, k): + # type: (Any) -> None + self.map[k] = self.map.get(k, 0) + 1 + + def dump(self): + # type: () -> None + for k in sorted(self.map): + sys.stdout.write('{} -> {}'.format(k, self.map[k])) + + object_counter = ObjectCounter() + + +# used from yaml util when testing +def dbg(val=None): + # type: (Any) -> Any + global _debug + if _debug is None: + # set to true or false + _debugx = os.environ.get('YAMLDEBUG') + if _debugx is None: + _debug = 0 + else: + _debug = int(_debugx) + if val is None: + return _debug + return _debug & val + + +class Nprint(object): + def __init__(self, file_name=None): + # type: (Any) -> None + self._max_print = None # type: Any + self._count = None # type: Any + self._file_name = file_name + + def __call__(self, *args, **kw): + # type: (Any, Any) -> None + if not bool(_debug): + return + out = sys.stdout if self._file_name is None else open(self._file_name, 'a') + dbgprint = print # to fool checking for print statements by dv utility + kw1 = kw.copy() + kw1['file'] = out + dbgprint(*args, **kw1) + out.flush() + if self._max_print is not None: + if self._count is None: + self._count = self._max_print + self._count -= 1 + if self._count == 0: + dbgprint('forced exit\n') + traceback.print_stack() + out.flush() + sys.exit(0) + if self._file_name: + out.close() + + def set_max_print(self, i): + # type: (int) -> None + self._max_print = i + self._count = None + + +nprint = Nprint() +nprintf = Nprint('/var/tmp/ruamel.yaml.log') + +# char checkers following production rules + + +def check_namespace_char(ch): + # type: (Any) -> bool + if u'\x21' <= ch <= u'\x7E': # ! to ~ + return True + if u'\xA0' <= ch <= u'\uD7FF': + return True + if (u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF': # excl. byte order mark + return True + if u'\U00010000' <= ch <= u'\U0010FFFF': + return True + return False + + +def check_anchorname_char(ch): + # type: (Any) -> bool + if ch in u',[]{}': + return False + return check_namespace_char(ch) + + +def version_tnf(t1, t2=None): + # type: (Any, Any) -> Any + """ + return True if ruamel.yaml version_info < t1, None if t2 is specified and bigger else False + """ + from ...ruamel.yaml import version_info # NOQA + + if version_info < t1: + return True + if t2 is not None and version_info < t2: + return None + return False + + +class MutableSliceableSequence(MutableSequence): # type: ignore + __slots__ = () + + def __getitem__(self, index): + # type: (Any) -> Any + if not isinstance(index, slice): + return self.__getsingleitem__(index) + return type(self)([self[i] for i in range(*index.indices(len(self)))]) # type: ignore + + def __setitem__(self, index, value): + # type: (Any, Any) -> None + if not isinstance(index, slice): + return self.__setsingleitem__(index, value) + assert iter(value) + # nprint(index.start, index.stop, index.step, index.indices(len(self))) + if index.step is None: + del self[index.start : index.stop] + for elem in reversed(value): + self.insert(0 if index.start is None else index.start, elem) + else: + range_parms = index.indices(len(self)) + nr_assigned_items = (range_parms[1] - range_parms[0] - 1) // range_parms[2] + 1 + # need to test before changing, in case TypeError is caught + if nr_assigned_items < len(value): + raise TypeError( + 'too many elements in value {} < {}'.format(nr_assigned_items, len(value)) + ) + elif nr_assigned_items > len(value): + raise TypeError( + 'not enough elements in value {} > {}'.format( + nr_assigned_items, len(value) + ) + ) + for idx, i in enumerate(range(*range_parms)): + self[i] = value[idx] + + def __delitem__(self, index): + # type: (Any) -> None + if not isinstance(index, slice): + return self.__delsingleitem__(index) + # nprint(index.start, index.stop, index.step, index.indices(len(self))) + for i in reversed(range(*index.indices(len(self)))): + del self[i] + + @abstractmethod + def __getsingleitem__(self, index): + # type: (Any) -> Any + raise IndexError + + @abstractmethod + def __setsingleitem__(self, index, value): + # type: (Any, Any) -> None + raise IndexError + + @abstractmethod + def __delsingleitem__(self, index): + # type: (Any) -> None + raise IndexError diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/composer.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/composer.py new file mode 100644 index 000000000..0d830e37d --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/composer.py @@ -0,0 +1,238 @@ +# coding: utf-8 + +from __future__ import absolute_import, print_function + +import warnings + +from ...ruamel.yaml.error import MarkedYAMLError, ReusedAnchorWarning +from ...ruamel.yaml.compat import utf8, nprint, nprintf # NOQA + +from ...ruamel.yaml.events import ( + StreamStartEvent, + StreamEndEvent, + MappingStartEvent, + MappingEndEvent, + SequenceStartEvent, + SequenceEndEvent, + AliasEvent, + ScalarEvent, +) +from ...ruamel.yaml.nodes import MappingNode, ScalarNode, SequenceNode + +if False: # MYPY + from typing import Any, Dict, Optional, List # NOQA + +__all__ = ['Composer', 'ComposerError'] + + +class ComposerError(MarkedYAMLError): + pass + + +class Composer(object): + def __init__(self, loader=None): + # type: (Any) -> None + self.loader = loader + if self.loader is not None and getattr(self.loader, '_composer', None) is None: + self.loader._composer = self + self.anchors = {} # type: Dict[Any, Any] + + @property + def parser(self): + # type: () -> Any + if hasattr(self.loader, 'typ'): + self.loader.parser + return self.loader._parser + + @property + def resolver(self): + # type: () -> Any + # assert self.loader._resolver is not None + if hasattr(self.loader, 'typ'): + self.loader.resolver + return self.loader._resolver + + def check_node(self): + # type: () -> Any + # Drop the STREAM-START event. + if self.parser.check_event(StreamStartEvent): + self.parser.get_event() + + # If there are more documents available? + return not self.parser.check_event(StreamEndEvent) + + def get_node(self): + # type: () -> Any + # Get the root node of the next document. + if not self.parser.check_event(StreamEndEvent): + return self.compose_document() + + def get_single_node(self): + # type: () -> Any + # Drop the STREAM-START event. + self.parser.get_event() + + # Compose a document if the stream is not empty. + document = None # type: Any + if not self.parser.check_event(StreamEndEvent): + document = self.compose_document() + + # Ensure that the stream contains no more documents. + if not self.parser.check_event(StreamEndEvent): + event = self.parser.get_event() + raise ComposerError( + 'expected a single document in the stream', + document.start_mark, + 'but found another document', + event.start_mark, + ) + + # Drop the STREAM-END event. + self.parser.get_event() + + return document + + def compose_document(self): + # type: (Any) -> Any + # Drop the DOCUMENT-START event. + self.parser.get_event() + + # Compose the root node. + node = self.compose_node(None, None) + + # Drop the DOCUMENT-END event. + self.parser.get_event() + + self.anchors = {} + return node + + def compose_node(self, parent, index): + # type: (Any, Any) -> Any + if self.parser.check_event(AliasEvent): + event = self.parser.get_event() + alias = event.anchor + if alias not in self.anchors: + raise ComposerError( + None, None, 'found undefined alias %r' % utf8(alias), event.start_mark + ) + return self.anchors[alias] + event = self.parser.peek_event() + anchor = event.anchor + if anchor is not None: # have an anchor + if anchor in self.anchors: + # raise ComposerError( + # "found duplicate anchor %r; first occurrence" + # % utf8(anchor), self.anchors[anchor].start_mark, + # "second occurrence", event.start_mark) + ws = ( + '\nfound duplicate anchor {!r}\nfirst occurrence {}\nsecond occurrence ' + '{}'.format((anchor), self.anchors[anchor].start_mark, event.start_mark) + ) + warnings.warn(ws, ReusedAnchorWarning) + self.resolver.descend_resolver(parent, index) + if self.parser.check_event(ScalarEvent): + node = self.compose_scalar_node(anchor) + elif self.parser.check_event(SequenceStartEvent): + node = self.compose_sequence_node(anchor) + elif self.parser.check_event(MappingStartEvent): + node = self.compose_mapping_node(anchor) + self.resolver.ascend_resolver() + return node + + def compose_scalar_node(self, anchor): + # type: (Any) -> Any + event = self.parser.get_event() + tag = event.tag + if tag is None or tag == u'!': + tag = self.resolver.resolve(ScalarNode, event.value, event.implicit) + node = ScalarNode( + tag, + event.value, + event.start_mark, + event.end_mark, + style=event.style, + comment=event.comment, + anchor=anchor, + ) + if anchor is not None: + self.anchors[anchor] = node + return node + + def compose_sequence_node(self, anchor): + # type: (Any) -> Any + start_event = self.parser.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolver.resolve(SequenceNode, None, start_event.implicit) + node = SequenceNode( + tag, + [], + start_event.start_mark, + None, + flow_style=start_event.flow_style, + comment=start_event.comment, + anchor=anchor, + ) + if anchor is not None: + self.anchors[anchor] = node + index = 0 + while not self.parser.check_event(SequenceEndEvent): + node.value.append(self.compose_node(node, index)) + index += 1 + end_event = self.parser.get_event() + if node.flow_style is True and end_event.comment is not None: + if node.comment is not None: + nprint( + 'Warning: unexpected end_event commment in sequence ' + 'node {}'.format(node.flow_style) + ) + node.comment = end_event.comment + node.end_mark = end_event.end_mark + self.check_end_doc_comment(end_event, node) + return node + + def compose_mapping_node(self, anchor): + # type: (Any) -> Any + start_event = self.parser.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolver.resolve(MappingNode, None, start_event.implicit) + node = MappingNode( + tag, + [], + start_event.start_mark, + None, + flow_style=start_event.flow_style, + comment=start_event.comment, + anchor=anchor, + ) + if anchor is not None: + self.anchors[anchor] = node + while not self.parser.check_event(MappingEndEvent): + # key_event = self.parser.peek_event() + item_key = self.compose_node(node, None) + # if item_key in node.value: + # raise ComposerError("while composing a mapping", + # start_event.start_mark, + # "found duplicate key", key_event.start_mark) + item_value = self.compose_node(node, item_key) + # node.value[item_key] = item_value + node.value.append((item_key, item_value)) + end_event = self.parser.get_event() + if node.flow_style is True and end_event.comment is not None: + node.comment = end_event.comment + node.end_mark = end_event.end_mark + self.check_end_doc_comment(end_event, node) + return node + + def check_end_doc_comment(self, end_event, node): + # type: (Any, Any) -> None + if end_event.comment and end_event.comment[1]: + # pre comments on an end_event, no following to move to + if node.comment is None: + node.comment = [None, None] + assert not isinstance(node, ScalarEvent) + # this is a post comment on a mapping node, add as third element + # in the list + node.comment.append(end_event.comment[1]) + end_event.comment[1] = None diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/configobjwalker.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/configobjwalker.py new file mode 100644 index 000000000..8c6504f8f --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/configobjwalker.py @@ -0,0 +1,14 @@ +# coding: utf-8 + +import warnings + +from ...ruamel.yaml.util import configobj_walker as new_configobj_walker + +if False: # MYPY + from typing import Any # NOQA + + +def configobj_walker(cfg): + # type: (Any) -> Any + warnings.warn('configobj_walker has moved to ruamel.yaml.util, please update your code') + return new_configobj_walker(cfg) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/constructor.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/constructor.py new file mode 100644 index 000000000..6f827aea1 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/constructor.py @@ -0,0 +1,1806 @@ +# coding: utf-8 + +from __future__ import print_function, absolute_import, division + +import datetime +import base64 +import binascii +import re +import sys +import types +import warnings + +# fmt: off +from ...ruamel.yaml.error import (MarkedYAMLError, MarkedYAMLFutureWarning, + MantissaNoDotYAML1_1Warning) +from ...ruamel.yaml.nodes import * # NOQA +from ...ruamel.yaml.nodes import (SequenceNode, MappingNode, ScalarNode) +from ...ruamel.yaml.compat import (utf8, builtins_module, to_str, PY2, PY3, # NOQA + text_type, nprint, nprintf, version_tnf) +from ...ruamel.yaml.compat import ordereddict, Hashable, MutableSequence # type: ignore +from ...ruamel.yaml.compat import MutableMapping # type: ignore + +from ...ruamel.yaml.comments import * # NOQA +from ...ruamel.yaml.comments import (CommentedMap, CommentedOrderedMap, CommentedSet, + CommentedKeySeq, CommentedSeq, TaggedScalar, + CommentedKeyMap) +from ...ruamel.yaml.scalarstring import (SingleQuotedScalarString, DoubleQuotedScalarString, + LiteralScalarString, FoldedScalarString, + PlainScalarString, ScalarString,) +from ...ruamel.yaml.scalarint import ScalarInt, BinaryInt, OctalInt, HexInt, HexCapsInt +from ...ruamel.yaml.scalarfloat import ScalarFloat +from ...ruamel.yaml.scalarbool import ScalarBoolean +from ...ruamel.yaml.timestamp import TimeStamp +from ...ruamel.yaml.util import RegExp + +if False: # MYPY + from typing import Any, Dict, List, Set, Generator, Union, Optional # NOQA + + +__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor', + 'ConstructorError', 'RoundTripConstructor'] +# fmt: on + + +class ConstructorError(MarkedYAMLError): + pass + + +class DuplicateKeyFutureWarning(MarkedYAMLFutureWarning): + pass + + +class DuplicateKeyError(MarkedYAMLFutureWarning): + pass + + +class BaseConstructor(object): + + yaml_constructors = {} # type: Dict[Any, Any] + yaml_multi_constructors = {} # type: Dict[Any, Any] + + def __init__(self, preserve_quotes=None, loader=None): + # type: (Optional[bool], Any) -> None + self.loader = loader + if self.loader is not None and getattr(self.loader, '_constructor', None) is None: + self.loader._constructor = self + self.loader = loader + self.yaml_base_dict_type = dict + self.yaml_base_list_type = list + self.constructed_objects = {} # type: Dict[Any, Any] + self.recursive_objects = {} # type: Dict[Any, Any] + self.state_generators = [] # type: List[Any] + self.deep_construct = False + self._preserve_quotes = preserve_quotes + self.allow_duplicate_keys = version_tnf((0, 15, 1), (0, 16)) + + @property + def composer(self): + # type: () -> Any + if hasattr(self.loader, 'typ'): + return self.loader.composer + try: + return self.loader._composer + except AttributeError: + sys.stdout.write('slt {}\n'.format(type(self))) + sys.stdout.write('slc {}\n'.format(self.loader._composer)) + sys.stdout.write('{}\n'.format(dir(self))) + raise + + @property + def resolver(self): + # type: () -> Any + if hasattr(self.loader, 'typ'): + return self.loader.resolver + return self.loader._resolver + + def check_data(self): + # type: () -> Any + # If there are more documents available? + return self.composer.check_node() + + def get_data(self): + # type: () -> Any + # Construct and return the next document. + if self.composer.check_node(): + return self.construct_document(self.composer.get_node()) + + def get_single_data(self): + # type: () -> Any + # Ensure that the stream contains a single document and construct it. + node = self.composer.get_single_node() + if node is not None: + return self.construct_document(node) + return None + + def construct_document(self, node): + # type: (Any) -> Any + data = self.construct_object(node) + while bool(self.state_generators): + state_generators = self.state_generators + self.state_generators = [] + for generator in state_generators: + for _dummy in generator: + pass + self.constructed_objects = {} + self.recursive_objects = {} + self.deep_construct = False + return data + + def construct_object(self, node, deep=False): + # type: (Any, bool) -> Any + """deep is True when creating an object/mapping recursively, + in that case want the underlying elements available during construction + """ + if node in self.constructed_objects: + return self.constructed_objects[node] + if deep: + old_deep = self.deep_construct + self.deep_construct = True + if node in self.recursive_objects: + return self.recursive_objects[node] + # raise ConstructorError( + # None, None, 'found unconstructable recursive node', node.start_mark + # ) + self.recursive_objects[node] = None + data = self.construct_non_recursive_object(node) + + self.constructed_objects[node] = data + del self.recursive_objects[node] + if deep: + self.deep_construct = old_deep + return data + + def construct_non_recursive_object(self, node, tag=None): + # type: (Any, Optional[str]) -> Any + constructor = None # type: Any + tag_suffix = None + if tag is None: + tag = node.tag + if tag in self.yaml_constructors: + constructor = self.yaml_constructors[tag] + else: + for tag_prefix in self.yaml_multi_constructors: + if tag.startswith(tag_prefix): + tag_suffix = tag[len(tag_prefix) :] + constructor = self.yaml_multi_constructors[tag_prefix] + break + else: + if None in self.yaml_multi_constructors: + tag_suffix = tag + constructor = self.yaml_multi_constructors[None] + elif None in self.yaml_constructors: + constructor = self.yaml_constructors[None] + elif isinstance(node, ScalarNode): + constructor = self.__class__.construct_scalar + elif isinstance(node, SequenceNode): + constructor = self.__class__.construct_sequence + elif isinstance(node, MappingNode): + constructor = self.__class__.construct_mapping + if tag_suffix is None: + data = constructor(self, node) + else: + data = constructor(self, tag_suffix, node) + if isinstance(data, types.GeneratorType): + generator = data + data = next(generator) + if self.deep_construct: + for _dummy in generator: + pass + else: + self.state_generators.append(generator) + return data + + def construct_scalar(self, node): + # type: (Any) -> Any + if not isinstance(node, ScalarNode): + raise ConstructorError( + None, None, 'expected a scalar node, but found %s' % node.id, node.start_mark + ) + return node.value + + def construct_sequence(self, node, deep=False): + # type: (Any, bool) -> Any + """deep is True when creating an object/mapping recursively, + in that case want the underlying elements available during construction + """ + if not isinstance(node, SequenceNode): + raise ConstructorError( + None, None, 'expected a sequence node, but found %s' % node.id, node.start_mark + ) + return [self.construct_object(child, deep=deep) for child in node.value] + + def construct_mapping(self, node, deep=False): + # type: (Any, bool) -> Any + """deep is True when creating an object/mapping recursively, + in that case want the underlying elements available during construction + """ + if not isinstance(node, MappingNode): + raise ConstructorError( + None, None, 'expected a mapping node, but found %s' % node.id, node.start_mark + ) + total_mapping = self.yaml_base_dict_type() + if getattr(node, 'merge', None) is not None: + todo = [(node.merge, False), (node.value, False)] + else: + todo = [(node.value, True)] + for values, check in todo: + mapping = self.yaml_base_dict_type() # type: Dict[Any, Any] + for key_node, value_node in values: + # keys can be list -> deep + key = self.construct_object(key_node, deep=True) + # lists are not hashable, but tuples are + if not isinstance(key, Hashable): + if isinstance(key, list): + key = tuple(key) + if PY2: + try: + hash(key) + except TypeError as exc: + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'found unacceptable key (%s)' % exc, + key_node.start_mark, + ) + else: + if not isinstance(key, Hashable): + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'found unhashable key', + key_node.start_mark, + ) + + value = self.construct_object(value_node, deep=deep) + if check: + if self.check_mapping_key(node, key_node, mapping, key, value): + mapping[key] = value + else: + mapping[key] = value + total_mapping.update(mapping) + return total_mapping + + def check_mapping_key(self, node, key_node, mapping, key, value): + # type: (Any, Any, Any, Any, Any) -> bool + """return True if key is unique""" + if key in mapping: + if not self.allow_duplicate_keys: + mk = mapping.get(key) + if PY2: + if isinstance(key, unicode): + key = key.encode('utf-8') + if isinstance(value, unicode): + value = value.encode('utf-8') + if isinstance(mk, unicode): + mk = mk.encode('utf-8') + args = [ + 'while constructing a mapping', + node.start_mark, + 'found duplicate key "{}" with value "{}" ' + '(original value: "{}")'.format(key, value, mk), + key_node.start_mark, + """ + To suppress this check see: + http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys + """, + """\ + Duplicate keys will become an error in future releases, and are errors + by default when using the new API. + """, + ] + if self.allow_duplicate_keys is None: + warnings.warn(DuplicateKeyFutureWarning(*args)) + else: + raise DuplicateKeyError(*args) + return False + return True + + def check_set_key(self, node, key_node, setting, key): + # type: (Any, Any, Any, Any, Any) -> None + if key in setting: + if not self.allow_duplicate_keys: + if PY2: + if isinstance(key, unicode): + key = key.encode('utf-8') + args = [ + 'while constructing a set', + node.start_mark, + 'found duplicate key "{}"'.format(key), + key_node.start_mark, + """ + To suppress this check see: + http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys + """, + """\ + Duplicate keys will become an error in future releases, and are errors + by default when using the new API. + """, + ] + if self.allow_duplicate_keys is None: + warnings.warn(DuplicateKeyFutureWarning(*args)) + else: + raise DuplicateKeyError(*args) + + def construct_pairs(self, node, deep=False): + # type: (Any, bool) -> Any + if not isinstance(node, MappingNode): + raise ConstructorError( + None, None, 'expected a mapping node, but found %s' % node.id, node.start_mark + ) + pairs = [] + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + value = self.construct_object(value_node, deep=deep) + pairs.append((key, value)) + return pairs + + @classmethod + def add_constructor(cls, tag, constructor): + # type: (Any, Any) -> None + if 'yaml_constructors' not in cls.__dict__: + cls.yaml_constructors = cls.yaml_constructors.copy() + cls.yaml_constructors[tag] = constructor + + @classmethod + def add_multi_constructor(cls, tag_prefix, multi_constructor): + # type: (Any, Any) -> None + if 'yaml_multi_constructors' not in cls.__dict__: + cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() + cls.yaml_multi_constructors[tag_prefix] = multi_constructor + + +class SafeConstructor(BaseConstructor): + def construct_scalar(self, node): + # type: (Any) -> Any + if isinstance(node, MappingNode): + for key_node, value_node in node.value: + if key_node.tag == u'tag:yaml.org,2002:value': + return self.construct_scalar(value_node) + return BaseConstructor.construct_scalar(self, node) + + def flatten_mapping(self, node): + # type: (Any) -> Any + """ + This implements the merge key feature http://yaml.org/type/merge.html + by inserting keys from the merge dict/list of dicts if not yet + available in this node + """ + merge = [] # type: List[Any] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == u'tag:yaml.org,2002:merge': + if merge: # double << key + if self.allow_duplicate_keys: + del node.value[index] + index += 1 + continue + args = [ + 'while constructing a mapping', + node.start_mark, + 'found duplicate key "{}"'.format(key_node.value), + key_node.start_mark, + """ + To suppress this check see: + http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys + """, + """\ + Duplicate keys will become an error in future releases, and are errors + by default when using the new API. + """, + ] + if self.allow_duplicate_keys is None: + warnings.warn(DuplicateKeyFutureWarning(*args)) + else: + raise DuplicateKeyError(*args) + del node.value[index] + if isinstance(value_node, MappingNode): + self.flatten_mapping(value_node) + merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'expected a mapping for merging, but found %s' % subnode.id, + subnode.start_mark, + ) + self.flatten_mapping(subnode) + submerge.append(subnode.value) + submerge.reverse() + for value in submerge: + merge.extend(value) + else: + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'expected a mapping or list of mappings for merging, ' + 'but found %s' % value_node.id, + value_node.start_mark, + ) + elif key_node.tag == u'tag:yaml.org,2002:value': + key_node.tag = u'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + if bool(merge): + node.merge = merge # separate merge keys to be able to update without duplicate + node.value = merge + node.value + + def construct_mapping(self, node, deep=False): + # type: (Any, bool) -> Any + """deep is True when creating an object/mapping recursively, + in that case want the underlying elements available during construction + """ + if isinstance(node, MappingNode): + self.flatten_mapping(node) + return BaseConstructor.construct_mapping(self, node, deep=deep) + + def construct_yaml_null(self, node): + # type: (Any) -> Any + self.construct_scalar(node) + return None + + # YAML 1.2 spec doesn't mention yes/no etc any more, 1.1 does + bool_values = { + u'yes': True, + u'no': False, + u'y': True, + u'n': False, + u'true': True, + u'false': False, + u'on': True, + u'off': False, + } + + def construct_yaml_bool(self, node): + # type: (Any) -> bool + value = self.construct_scalar(node) + return self.bool_values[value.lower()] + + def construct_yaml_int(self, node): + # type: (Any) -> int + value_s = to_str(self.construct_scalar(node)) + value_s = value_s.replace('_', "") + sign = +1 + if value_s[0] == '-': + sign = -1 + if value_s[0] in '+-': + value_s = value_s[1:] + if value_s == '0': + return 0 + elif value_s.startswith('0b'): + return sign * int(value_s[2:], 2) + elif value_s.startswith('0x'): + return sign * int(value_s[2:], 16) + elif value_s.startswith('0o'): + return sign * int(value_s[2:], 8) + elif self.resolver.processing_version == (1, 1) and value_s[0] == '0': + return sign * int(value_s, 8) + elif self.resolver.processing_version == (1, 1) and ':' in value_s: + digits = [int(part) for part in value_s.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit * base + base *= 60 + return sign * value + else: + return sign * int(value_s) + + inf_value = 1e300 + while inf_value != inf_value * inf_value: + inf_value *= inf_value + nan_value = -inf_value / inf_value # Trying to make a quiet NaN (like C99). + + def construct_yaml_float(self, node): + # type: (Any) -> float + value_so = to_str(self.construct_scalar(node)) + value_s = value_so.replace('_', "").lower() + sign = +1 + if value_s[0] == '-': + sign = -1 + if value_s[0] in '+-': + value_s = value_s[1:] + if value_s == '.inf': + return sign * self.inf_value + elif value_s == '.nan': + return self.nan_value + elif self.resolver.processing_version != (1, 2) and ':' in value_s: + digits = [float(part) for part in value_s.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit * base + base *= 60 + return sign * value + else: + if self.resolver.processing_version != (1, 2) and 'e' in value_s: + # value_s is lower case independent of input + mantissa, exponent = value_s.split('e') + if '.' not in mantissa: + warnings.warn(MantissaNoDotYAML1_1Warning(node, value_so)) + return sign * float(value_s) + + if PY3: + + def construct_yaml_binary(self, node): + # type: (Any) -> Any + try: + value = self.construct_scalar(node).encode('ascii') + except UnicodeEncodeError as exc: + raise ConstructorError( + None, + None, + 'failed to convert base64 data into ascii: %s' % exc, + node.start_mark, + ) + try: + if hasattr(base64, 'decodebytes'): + return base64.decodebytes(value) + else: + return base64.decodestring(value) + except binascii.Error as exc: + raise ConstructorError( + None, None, 'failed to decode base64 data: %s' % exc, node.start_mark + ) + + else: + + def construct_yaml_binary(self, node): + # type: (Any) -> Any + value = self.construct_scalar(node) + try: + return to_str(value).decode('base64') + except (binascii.Error, UnicodeEncodeError) as exc: + raise ConstructorError( + None, None, 'failed to decode base64 data: %s' % exc, node.start_mark + ) + + timestamp_regexp = RegExp( + u"""^(?P[0-9][0-9][0-9][0-9]) + -(?P[0-9][0-9]?) + -(?P[0-9][0-9]?) + (?:((?P[Tt])|[ \\t]+) # explictly not retaining extra spaces + (?P[0-9][0-9]?) + :(?P[0-9][0-9]) + :(?P[0-9][0-9]) + (?:\\.(?P[0-9]*))? + (?:[ \\t]*(?PZ|(?P[-+])(?P[0-9][0-9]?) + (?::(?P[0-9][0-9]))?))?)?$""", + re.X, + ) + + def construct_yaml_timestamp(self, node, values=None): + # type: (Any, Any) -> Any + if values is None: + try: + match = self.timestamp_regexp.match(node.value) + except TypeError: + match = None + if match is None: + raise ConstructorError( + None, + None, + 'failed to construct timestamp from "{}"'.format(node.value), + node.start_mark, + ) + values = match.groupdict() + year = int(values['year']) + month = int(values['month']) + day = int(values['day']) + if not values['hour']: + return datetime.date(year, month, day) + hour = int(values['hour']) + minute = int(values['minute']) + second = int(values['second']) + fraction = 0 + if values['fraction']: + fraction_s = values['fraction'][:6] + while len(fraction_s) < 6: + fraction_s += '0' + fraction = int(fraction_s) + if len(values['fraction']) > 6 and int(values['fraction'][6]) > 4: + fraction += 1 + delta = None + if values['tz_sign']: + tz_hour = int(values['tz_hour']) + minutes = values['tz_minute'] + tz_minute = int(minutes) if minutes else 0 + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if values['tz_sign'] == '-': + delta = -delta + # should do something else instead (or hook this up to the preceding if statement + # in reverse + # if delta is None: + # return datetime.datetime(year, month, day, hour, minute, second, fraction) + # return datetime.datetime(year, month, day, hour, minute, second, fraction, + # datetime.timezone.utc) + # the above is not good enough though, should provide tzinfo. In Python3 that is easily + # doable drop that kind of support for Python2 as it has not native tzinfo + data = datetime.datetime(year, month, day, hour, minute, second, fraction) + if delta: + data -= delta + return data + + def construct_yaml_omap(self, node): + # type: (Any) -> Any + # Note: we do now check for duplicate keys + omap = ordereddict() + yield omap + if not isinstance(node, SequenceNode): + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + 'expected a sequence, but found %s' % node.id, + node.start_mark, + ) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + 'expected a mapping of length 1, but found %s' % subnode.id, + subnode.start_mark, + ) + if len(subnode.value) != 1: + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + 'expected a single mapping item, but found %d items' % len(subnode.value), + subnode.start_mark, + ) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + assert key not in omap + value = self.construct_object(value_node) + omap[key] = value + + def construct_yaml_pairs(self, node): + # type: (Any) -> Any + # Note: the same code as `construct_yaml_omap`. + pairs = [] # type: List[Any] + yield pairs + if not isinstance(node, SequenceNode): + raise ConstructorError( + 'while constructing pairs', + node.start_mark, + 'expected a sequence, but found %s' % node.id, + node.start_mark, + ) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError( + 'while constructing pairs', + node.start_mark, + 'expected a mapping of length 1, but found %s' % subnode.id, + subnode.start_mark, + ) + if len(subnode.value) != 1: + raise ConstructorError( + 'while constructing pairs', + node.start_mark, + 'expected a single mapping item, but found %d items' % len(subnode.value), + subnode.start_mark, + ) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + pairs.append((key, value)) + + def construct_yaml_set(self, node): + # type: (Any) -> Any + data = set() # type: Set[Any] + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_str(self, node): + # type: (Any) -> Any + value = self.construct_scalar(node) + if PY3: + return value + try: + return value.encode('ascii') + except UnicodeEncodeError: + return value + + def construct_yaml_seq(self, node): + # type: (Any) -> Any + data = self.yaml_base_list_type() # type: List[Any] + yield data + data.extend(self.construct_sequence(node)) + + def construct_yaml_map(self, node): + # type: (Any) -> Any + data = self.yaml_base_dict_type() # type: Dict[Any, Any] + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_object(self, node, cls): + # type: (Any, Any) -> Any + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = self.construct_mapping(node, deep=True) + data.__setstate__(state) + else: + state = self.construct_mapping(node) + data.__dict__.update(state) + + def construct_undefined(self, node): + # type: (Any) -> None + raise ConstructorError( + None, + None, + 'could not determine a constructor for the tag %r' % utf8(node.tag), + node.start_mark, + ) + + +SafeConstructor.add_constructor(u'tag:yaml.org,2002:null', SafeConstructor.construct_yaml_null) + +SafeConstructor.add_constructor(u'tag:yaml.org,2002:bool', SafeConstructor.construct_yaml_bool) + +SafeConstructor.add_constructor(u'tag:yaml.org,2002:int', SafeConstructor.construct_yaml_int) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:float', SafeConstructor.construct_yaml_float +) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:binary', SafeConstructor.construct_yaml_binary +) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:timestamp', SafeConstructor.construct_yaml_timestamp +) + +SafeConstructor.add_constructor(u'tag:yaml.org,2002:omap', SafeConstructor.construct_yaml_omap) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:pairs', SafeConstructor.construct_yaml_pairs +) + +SafeConstructor.add_constructor(u'tag:yaml.org,2002:set', SafeConstructor.construct_yaml_set) + +SafeConstructor.add_constructor(u'tag:yaml.org,2002:str', SafeConstructor.construct_yaml_str) + +SafeConstructor.add_constructor(u'tag:yaml.org,2002:seq', SafeConstructor.construct_yaml_seq) + +SafeConstructor.add_constructor(u'tag:yaml.org,2002:map', SafeConstructor.construct_yaml_map) + +SafeConstructor.add_constructor(None, SafeConstructor.construct_undefined) + +if PY2: + + class classobj: + pass + + +class Constructor(SafeConstructor): + def construct_python_str(self, node): + # type: (Any) -> Any + return utf8(self.construct_scalar(node)) + + def construct_python_unicode(self, node): + # type: (Any) -> Any + return self.construct_scalar(node) + + if PY3: + + def construct_python_bytes(self, node): + # type: (Any) -> Any + try: + value = self.construct_scalar(node).encode('ascii') + except UnicodeEncodeError as exc: + raise ConstructorError( + None, + None, + 'failed to convert base64 data into ascii: %s' % exc, + node.start_mark, + ) + try: + if hasattr(base64, 'decodebytes'): + return base64.decodebytes(value) + else: + return base64.decodestring(value) + except binascii.Error as exc: + raise ConstructorError( + None, None, 'failed to decode base64 data: %s' % exc, node.start_mark + ) + + def construct_python_long(self, node): + # type: (Any) -> int + val = self.construct_yaml_int(node) + if PY3: + return val + return int(val) + + def construct_python_complex(self, node): + # type: (Any) -> Any + return complex(self.construct_scalar(node)) + + def construct_python_tuple(self, node): + # type: (Any) -> Any + return tuple(self.construct_sequence(node)) + + def find_python_module(self, name, mark): + # type: (Any, Any) -> Any + if not name: + raise ConstructorError( + 'while constructing a Python module', + mark, + 'expected non-empty name appended to the tag', + mark, + ) + try: + __import__(name) + except ImportError as exc: + raise ConstructorError( + 'while constructing a Python module', + mark, + 'cannot find module %r (%s)' % (utf8(name), exc), + mark, + ) + return sys.modules[name] + + def find_python_name(self, name, mark): + # type: (Any, Any) -> Any + if not name: + raise ConstructorError( + 'while constructing a Python object', + mark, + 'expected non-empty name appended to the tag', + mark, + ) + if u'.' in name: + lname = name.split('.') + lmodule_name = lname + lobject_name = [] # type: List[Any] + while len(lmodule_name) > 1: + lobject_name.insert(0, lmodule_name.pop()) + module_name = '.'.join(lmodule_name) + try: + __import__(module_name) + # object_name = '.'.join(object_name) + break + except ImportError: + continue + else: + module_name = builtins_module + lobject_name = [name] + try: + __import__(module_name) + except ImportError as exc: + raise ConstructorError( + 'while constructing a Python object', + mark, + 'cannot find module %r (%s)' % (utf8(module_name), exc), + mark, + ) + module = sys.modules[module_name] + object_name = '.'.join(lobject_name) + obj = module + while lobject_name: + if not hasattr(obj, lobject_name[0]): + + raise ConstructorError( + 'while constructing a Python object', + mark, + 'cannot find %r in the module %r' % (utf8(object_name), module.__name__), + mark, + ) + obj = getattr(obj, lobject_name.pop(0)) + return obj + + def construct_python_name(self, suffix, node): + # type: (Any, Any) -> Any + value = self.construct_scalar(node) + if value: + raise ConstructorError( + 'while constructing a Python name', + node.start_mark, + 'expected the empty value, but found %r' % utf8(value), + node.start_mark, + ) + return self.find_python_name(suffix, node.start_mark) + + def construct_python_module(self, suffix, node): + # type: (Any, Any) -> Any + value = self.construct_scalar(node) + if value: + raise ConstructorError( + 'while constructing a Python module', + node.start_mark, + 'expected the empty value, but found %r' % utf8(value), + node.start_mark, + ) + return self.find_python_module(suffix, node.start_mark) + + def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False): + # type: (Any, Any, Any, Any, bool) -> Any + if not args: + args = [] + if not kwds: + kwds = {} + cls = self.find_python_name(suffix, node.start_mark) + if PY3: + if newobj and isinstance(cls, type): + return cls.__new__(cls, *args, **kwds) + else: + return cls(*args, **kwds) + else: + if newobj and isinstance(cls, type(classobj)) and not args and not kwds: + instance = classobj() + instance.__class__ = cls + return instance + elif newobj and isinstance(cls, type): + return cls.__new__(cls, *args, **kwds) + else: + return cls(*args, **kwds) + + def set_python_instance_state(self, instance, state): + # type: (Any, Any) -> None + if hasattr(instance, '__setstate__'): + instance.__setstate__(state) + else: + slotstate = {} # type: Dict[Any, Any] + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if hasattr(instance, '__dict__'): + instance.__dict__.update(state) + elif state: + slotstate.update(state) + for key, value in slotstate.items(): + setattr(instance, key, value) + + def construct_python_object(self, suffix, node): + # type: (Any, Any) -> Any + # Format: + # !!python/object:module.name { ... state ... } + instance = self.make_python_instance(suffix, node, newobj=True) + self.recursive_objects[node] = instance + yield instance + deep = hasattr(instance, '__setstate__') + state = self.construct_mapping(node, deep=deep) + self.set_python_instance_state(instance, state) + + def construct_python_object_apply(self, suffix, node, newobj=False): + # type: (Any, Any, bool) -> Any + # Format: + # !!python/object/apply # (or !!python/object/new) + # args: [ ... arguments ... ] + # kwds: { ... keywords ... } + # state: ... state ... + # listitems: [ ... listitems ... ] + # dictitems: { ... dictitems ... } + # or short format: + # !!python/object/apply [ ... arguments ... ] + # The difference between !!python/object/apply and !!python/object/new + # is how an object is created, check make_python_instance for details. + if isinstance(node, SequenceNode): + args = self.construct_sequence(node, deep=True) + kwds = {} # type: Dict[Any, Any] + state = {} # type: Dict[Any, Any] + listitems = [] # type: List[Any] + dictitems = {} # type: Dict[Any, Any] + else: + value = self.construct_mapping(node, deep=True) + args = value.get('args', []) + kwds = value.get('kwds', {}) + state = value.get('state', {}) + listitems = value.get('listitems', []) + dictitems = value.get('dictitems', {}) + instance = self.make_python_instance(suffix, node, args, kwds, newobj) + if bool(state): + self.set_python_instance_state(instance, state) + if bool(listitems): + instance.extend(listitems) + if bool(dictitems): + for key in dictitems: + instance[key] = dictitems[key] + return instance + + def construct_python_object_new(self, suffix, node): + # type: (Any, Any) -> Any + return self.construct_python_object_apply(suffix, node, newobj=True) + + +Constructor.add_constructor(u'tag:yaml.org,2002:python/none', Constructor.construct_yaml_null) + +Constructor.add_constructor(u'tag:yaml.org,2002:python/bool', Constructor.construct_yaml_bool) + +Constructor.add_constructor(u'tag:yaml.org,2002:python/str', Constructor.construct_python_str) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/unicode', Constructor.construct_python_unicode +) + +if PY3: + Constructor.add_constructor( + u'tag:yaml.org,2002:python/bytes', Constructor.construct_python_bytes + ) + +Constructor.add_constructor(u'tag:yaml.org,2002:python/int', Constructor.construct_yaml_int) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/long', Constructor.construct_python_long +) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/float', Constructor.construct_yaml_float +) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/complex', Constructor.construct_python_complex +) + +Constructor.add_constructor(u'tag:yaml.org,2002:python/list', Constructor.construct_yaml_seq) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/tuple', Constructor.construct_python_tuple +) + +Constructor.add_constructor(u'tag:yaml.org,2002:python/dict', Constructor.construct_yaml_map) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/name:', Constructor.construct_python_name +) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/module:', Constructor.construct_python_module +) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object:', Constructor.construct_python_object +) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/apply:', Constructor.construct_python_object_apply +) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/new:', Constructor.construct_python_object_new +) + + +class RoundTripConstructor(SafeConstructor): + """need to store the comments on the node itself, + as well as on the items + """ + + def construct_scalar(self, node): + # type: (Any) -> Any + if not isinstance(node, ScalarNode): + raise ConstructorError( + None, None, 'expected a scalar node, but found %s' % node.id, node.start_mark + ) + + if node.style == '|' and isinstance(node.value, text_type): + lss = LiteralScalarString(node.value, anchor=node.anchor) + if node.comment and node.comment[1]: + lss.comment = node.comment[1][0] # type: ignore + return lss + if node.style == '>' and isinstance(node.value, text_type): + fold_positions = [] # type: List[int] + idx = -1 + while True: + idx = node.value.find('\a', idx + 1) + if idx < 0: + break + fold_positions.append(idx - len(fold_positions)) + fss = FoldedScalarString(node.value.replace('\a', ''), anchor=node.anchor) + if node.comment and node.comment[1]: + fss.comment = node.comment[1][0] # type: ignore + if fold_positions: + fss.fold_pos = fold_positions # type: ignore + return fss + elif bool(self._preserve_quotes) and isinstance(node.value, text_type): + if node.style == "'": + return SingleQuotedScalarString(node.value, anchor=node.anchor) + if node.style == '"': + return DoubleQuotedScalarString(node.value, anchor=node.anchor) + if node.anchor: + return PlainScalarString(node.value, anchor=node.anchor) + return node.value + + def construct_yaml_int(self, node): + # type: (Any) -> Any + width = None # type: Any + value_su = to_str(self.construct_scalar(node)) + try: + sx = value_su.rstrip('_') + underscore = [len(sx) - sx.rindex('_') - 1, False, False] # type: Any + except ValueError: + underscore = None + except IndexError: + underscore = None + value_s = value_su.replace('_', "") + sign = +1 + if value_s[0] == '-': + sign = -1 + if value_s[0] in '+-': + value_s = value_s[1:] + if value_s == '0': + return 0 + elif value_s.startswith('0b'): + if self.resolver.processing_version > (1, 1) and value_s[2] == '0': + width = len(value_s[2:]) + if underscore is not None: + underscore[1] = value_su[2] == '_' + underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_' + return BinaryInt( + sign * int(value_s[2:], 2), + width=width, + underscore=underscore, + anchor=node.anchor, + ) + elif value_s.startswith('0x'): + # default to lower-case if no a-fA-F in string + if self.resolver.processing_version > (1, 1) and value_s[2] == '0': + width = len(value_s[2:]) + hex_fun = HexInt # type: Any + for ch in value_s[2:]: + if ch in 'ABCDEF': # first non-digit is capital + hex_fun = HexCapsInt + break + if ch in 'abcdef': + break + if underscore is not None: + underscore[1] = value_su[2] == '_' + underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_' + return hex_fun( + sign * int(value_s[2:], 16), + width=width, + underscore=underscore, + anchor=node.anchor, + ) + elif value_s.startswith('0o'): + if self.resolver.processing_version > (1, 1) and value_s[2] == '0': + width = len(value_s[2:]) + if underscore is not None: + underscore[1] = value_su[2] == '_' + underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_' + return OctalInt( + sign * int(value_s[2:], 8), + width=width, + underscore=underscore, + anchor=node.anchor, + ) + elif self.resolver.processing_version != (1, 2) and value_s[0] == '0': + return sign * int(value_s, 8) + elif self.resolver.processing_version != (1, 2) and ':' in value_s: + digits = [int(part) for part in value_s.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit * base + base *= 60 + return sign * value + elif self.resolver.processing_version > (1, 1) and value_s[0] == '0': + # not an octal, an integer with leading zero(s) + if underscore is not None: + # cannot have a leading underscore + underscore[2] = len(value_su) > 1 and value_su[-1] == '_' + return ScalarInt(sign * int(value_s), width=len(value_s), underscore=underscore) + elif underscore: + # cannot have a leading underscore + underscore[2] = len(value_su) > 1 and value_su[-1] == '_' + return ScalarInt( + sign * int(value_s), width=None, underscore=underscore, anchor=node.anchor + ) + elif node.anchor: + return ScalarInt(sign * int(value_s), width=None, anchor=node.anchor) + else: + return sign * int(value_s) + + def construct_yaml_float(self, node): + # type: (Any) -> Any + def leading_zeros(v): + # type: (Any) -> int + lead0 = 0 + idx = 0 + while idx < len(v) and v[idx] in '0.': + if v[idx] == '0': + lead0 += 1 + idx += 1 + return lead0 + + # underscore = None + m_sign = False # type: Any + value_so = to_str(self.construct_scalar(node)) + value_s = value_so.replace('_', "").lower() + sign = +1 + if value_s[0] == '-': + sign = -1 + if value_s[0] in '+-': + m_sign = value_s[0] + value_s = value_s[1:] + if value_s == '.inf': + return sign * self.inf_value + if value_s == '.nan': + return self.nan_value + if self.resolver.processing_version != (1, 2) and ':' in value_s: + digits = [float(part) for part in value_s.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit * base + base *= 60 + return sign * value + if 'e' in value_s: + try: + mantissa, exponent = value_so.split('e') + exp = 'e' + except ValueError: + mantissa, exponent = value_so.split('E') + exp = 'E' + if self.resolver.processing_version != (1, 2): + # value_s is lower case independent of input + if '.' not in mantissa: + warnings.warn(MantissaNoDotYAML1_1Warning(node, value_so)) + lead0 = leading_zeros(mantissa) + width = len(mantissa) + prec = mantissa.find('.') + if m_sign: + width -= 1 + e_width = len(exponent) + e_sign = exponent[0] in '+-' + # nprint('sf', width, prec, m_sign, exp, e_width, e_sign) + return ScalarFloat( + sign * float(value_s), + width=width, + prec=prec, + m_sign=m_sign, + m_lead0=lead0, + exp=exp, + e_width=e_width, + e_sign=e_sign, + anchor=node.anchor, + ) + width = len(value_so) + prec = value_so.index('.') # you can use index, this would not be float without dot + lead0 = leading_zeros(value_so) + return ScalarFloat( + sign * float(value_s), + width=width, + prec=prec, + m_sign=m_sign, + m_lead0=lead0, + anchor=node.anchor, + ) + + def construct_yaml_str(self, node): + # type: (Any) -> Any + value = self.construct_scalar(node) + if isinstance(value, ScalarString): + return value + if PY3: + return value + try: + return value.encode('ascii') + except AttributeError: + # in case you replace the node dynamically e.g. with a dict + return value + except UnicodeEncodeError: + return value + + def construct_rt_sequence(self, node, seqtyp, deep=False): + # type: (Any, Any, bool) -> Any + if not isinstance(node, SequenceNode): + raise ConstructorError( + None, None, 'expected a sequence node, but found %s' % node.id, node.start_mark + ) + ret_val = [] + if node.comment: + seqtyp._yaml_add_comment(node.comment[:2]) + if len(node.comment) > 2: + seqtyp.yaml_end_comment_extend(node.comment[2], clear=True) + if node.anchor: + from ...ruamel.yaml.serializer import templated_id + + if not templated_id(node.anchor): + seqtyp.yaml_set_anchor(node.anchor) + for idx, child in enumerate(node.value): + if child.comment: + seqtyp._yaml_add_comment(child.comment, key=idx) + child.comment = None # if moved to sequence remove from child + ret_val.append(self.construct_object(child, deep=deep)) + seqtyp._yaml_set_idx_line_col( + idx, [child.start_mark.line, child.start_mark.column] + ) + return ret_val + + def flatten_mapping(self, node): + # type: (Any) -> Any + """ + This implements the merge key feature http://yaml.org/type/merge.html + by inserting keys from the merge dict/list of dicts if not yet + available in this node + """ + + def constructed(value_node): + # type: (Any) -> Any + # If the contents of a merge are defined within the + # merge marker, then they won't have been constructed + # yet. But if they were already constructed, we need to use + # the existing object. + if value_node in self.constructed_objects: + value = self.constructed_objects[value_node] + else: + value = self.construct_object(value_node, deep=False) + return value + + # merge = [] + merge_map_list = [] # type: List[Any] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == u'tag:yaml.org,2002:merge': + if merge_map_list: # double << key + if self.allow_duplicate_keys: + del node.value[index] + index += 1 + continue + args = [ + 'while constructing a mapping', + node.start_mark, + 'found duplicate key "{}"'.format(key_node.value), + key_node.start_mark, + """ + To suppress this check see: + http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys + """, + """\ + Duplicate keys will become an error in future releases, and are errors + by default when using the new API. + """, + ] + if self.allow_duplicate_keys is None: + warnings.warn(DuplicateKeyFutureWarning(*args)) + else: + raise DuplicateKeyError(*args) + del node.value[index] + if isinstance(value_node, MappingNode): + merge_map_list.append((index, constructed(value_node))) + # self.flatten_mapping(value_node) + # merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + # submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'expected a mapping for merging, but found %s' % subnode.id, + subnode.start_mark, + ) + merge_map_list.append((index, constructed(subnode))) + # self.flatten_mapping(subnode) + # submerge.append(subnode.value) + # submerge.reverse() + # for value in submerge: + # merge.extend(value) + else: + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'expected a mapping or list of mappings for merging, ' + 'but found %s' % value_node.id, + value_node.start_mark, + ) + elif key_node.tag == u'tag:yaml.org,2002:value': + key_node.tag = u'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + return merge_map_list + # if merge: + # node.value = merge + node.value + + def _sentinel(self): + # type: () -> None + pass + + def construct_mapping(self, node, maptyp, deep=False): # type: ignore + # type: (Any, Any, bool) -> Any + if not isinstance(node, MappingNode): + raise ConstructorError( + None, None, 'expected a mapping node, but found %s' % node.id, node.start_mark + ) + merge_map = self.flatten_mapping(node) + # mapping = {} + if node.comment: + maptyp._yaml_add_comment(node.comment[:2]) + if len(node.comment) > 2: + maptyp.yaml_end_comment_extend(node.comment[2], clear=True) + if node.anchor: + from ...ruamel.yaml.serializer import templated_id + + if not templated_id(node.anchor): + maptyp.yaml_set_anchor(node.anchor) + last_key, last_value = None, self._sentinel + for key_node, value_node in node.value: + # keys can be list -> deep + key = self.construct_object(key_node, deep=True) + # lists are not hashable, but tuples are + if not isinstance(key, Hashable): + if isinstance(key, MutableSequence): + key_s = CommentedKeySeq(key) + if key_node.flow_style is True: + key_s.fa.set_flow_style() + elif key_node.flow_style is False: + key_s.fa.set_block_style() + key = key_s + elif isinstance(key, MutableMapping): + key_m = CommentedKeyMap(key) + if key_node.flow_style is True: + key_m.fa.set_flow_style() + elif key_node.flow_style is False: + key_m.fa.set_block_style() + key = key_m + if PY2: + try: + hash(key) + except TypeError as exc: + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'found unacceptable key (%s)' % exc, + key_node.start_mark, + ) + else: + if not isinstance(key, Hashable): + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'found unhashable key', + key_node.start_mark, + ) + value = self.construct_object(value_node, deep=deep) + if self.check_mapping_key(node, key_node, maptyp, key, value): + if key_node.comment and len(key_node.comment) > 4 and key_node.comment[4]: + if last_value is None: + key_node.comment[0] = key_node.comment.pop(4) + maptyp._yaml_add_comment(key_node.comment, value=last_key) + else: + key_node.comment[2] = key_node.comment.pop(4) + maptyp._yaml_add_comment(key_node.comment, key=key) + key_node.comment = None + if key_node.comment: + maptyp._yaml_add_comment(key_node.comment, key=key) + if value_node.comment: + maptyp._yaml_add_comment(value_node.comment, value=key) + maptyp._yaml_set_kv_line_col( + key, + [ + key_node.start_mark.line, + key_node.start_mark.column, + value_node.start_mark.line, + value_node.start_mark.column, + ], + ) + maptyp[key] = value + last_key, last_value = key, value # could use indexing + # do this last, or <<: before a key will prevent insertion in instances + # of collections.OrderedDict (as they have no __contains__ + if merge_map: + maptyp.add_yaml_merge(merge_map) + + def construct_setting(self, node, typ, deep=False): + # type: (Any, Any, bool) -> Any + if not isinstance(node, MappingNode): + raise ConstructorError( + None, None, 'expected a mapping node, but found %s' % node.id, node.start_mark + ) + if node.comment: + typ._yaml_add_comment(node.comment[:2]) + if len(node.comment) > 2: + typ.yaml_end_comment_extend(node.comment[2], clear=True) + if node.anchor: + from ...ruamel.yaml.serializer import templated_id + + if not templated_id(node.anchor): + typ.yaml_set_anchor(node.anchor) + for key_node, value_node in node.value: + # keys can be list -> deep + key = self.construct_object(key_node, deep=True) + # lists are not hashable, but tuples are + if not isinstance(key, Hashable): + if isinstance(key, list): + key = tuple(key) + if PY2: + try: + hash(key) + except TypeError as exc: + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'found unacceptable key (%s)' % exc, + key_node.start_mark, + ) + else: + if not isinstance(key, Hashable): + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'found unhashable key', + key_node.start_mark, + ) + # construct but should be null + value = self.construct_object(value_node, deep=deep) # NOQA + self.check_set_key(node, key_node, typ, key) + if key_node.comment: + typ._yaml_add_comment(key_node.comment, key=key) + if value_node.comment: + typ._yaml_add_comment(value_node.comment, value=key) + typ.add(key) + + def construct_yaml_seq(self, node): + # type: (Any) -> Any + data = CommentedSeq() + data._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + if node.comment: + data._yaml_add_comment(node.comment) + yield data + data.extend(self.construct_rt_sequence(node, data)) + self.set_collection_style(data, node) + + def construct_yaml_map(self, node): + # type: (Any) -> Any + data = CommentedMap() + data._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + yield data + self.construct_mapping(node, data, deep=True) + self.set_collection_style(data, node) + + def set_collection_style(self, data, node): + # type: (Any, Any) -> None + if len(data) == 0: + return + if node.flow_style is True: + data.fa.set_flow_style() + elif node.flow_style is False: + data.fa.set_block_style() + + def construct_yaml_object(self, node, cls): + # type: (Any, Any) -> Any + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = SafeConstructor.construct_mapping(self, node, deep=True) + data.__setstate__(state) + else: + state = SafeConstructor.construct_mapping(self, node) + data.__dict__.update(state) + + def construct_yaml_omap(self, node): + # type: (Any) -> Any + # Note: we do now check for duplicate keys + omap = CommentedOrderedMap() + omap._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + if node.flow_style is True: + omap.fa.set_flow_style() + elif node.flow_style is False: + omap.fa.set_block_style() + yield omap + if node.comment: + omap._yaml_add_comment(node.comment[:2]) + if len(node.comment) > 2: + omap.yaml_end_comment_extend(node.comment[2], clear=True) + if not isinstance(node, SequenceNode): + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + 'expected a sequence, but found %s' % node.id, + node.start_mark, + ) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + 'expected a mapping of length 1, but found %s' % subnode.id, + subnode.start_mark, + ) + if len(subnode.value) != 1: + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + 'expected a single mapping item, but found %d items' % len(subnode.value), + subnode.start_mark, + ) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + assert key not in omap + value = self.construct_object(value_node) + if key_node.comment: + omap._yaml_add_comment(key_node.comment, key=key) + if subnode.comment: + omap._yaml_add_comment(subnode.comment, key=key) + if value_node.comment: + omap._yaml_add_comment(value_node.comment, value=key) + omap[key] = value + + def construct_yaml_set(self, node): + # type: (Any) -> Any + data = CommentedSet() + data._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + yield data + self.construct_setting(node, data) + + def construct_undefined(self, node): + # type: (Any) -> Any + try: + if isinstance(node, MappingNode): + data = CommentedMap() + data._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + if node.flow_style is True: + data.fa.set_flow_style() + elif node.flow_style is False: + data.fa.set_block_style() + data.yaml_set_tag(node.tag) + yield data + if node.anchor: + data.yaml_set_anchor(node.anchor) + self.construct_mapping(node, data) + return + elif isinstance(node, ScalarNode): + data2 = TaggedScalar() + data2.value = self.construct_scalar(node) + data2.style = node.style + data2.yaml_set_tag(node.tag) + yield data2 + if node.anchor: + data2.yaml_set_anchor(node.anchor, always_dump=True) + return + elif isinstance(node, SequenceNode): + data3 = CommentedSeq() + data3._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + if node.flow_style is True: + data3.fa.set_flow_style() + elif node.flow_style is False: + data3.fa.set_block_style() + data3.yaml_set_tag(node.tag) + yield data3 + if node.anchor: + data3.yaml_set_anchor(node.anchor) + data3.extend(self.construct_sequence(node)) + return + except: # NOQA + pass + raise ConstructorError( + None, + None, + 'could not determine a constructor for the tag %r' % utf8(node.tag), + node.start_mark, + ) + + def construct_yaml_timestamp(self, node, values=None): + # type: (Any, Any) -> Any + try: + match = self.timestamp_regexp.match(node.value) + except TypeError: + match = None + if match is None: + raise ConstructorError( + None, + None, + 'failed to construct timestamp from "{}"'.format(node.value), + node.start_mark, + ) + values = match.groupdict() + if not values['hour']: + return SafeConstructor.construct_yaml_timestamp(self, node, values) + for part in ['t', 'tz_sign', 'tz_hour', 'tz_minute']: + if values[part]: + break + else: + return SafeConstructor.construct_yaml_timestamp(self, node, values) + year = int(values['year']) + month = int(values['month']) + day = int(values['day']) + hour = int(values['hour']) + minute = int(values['minute']) + second = int(values['second']) + fraction = 0 + if values['fraction']: + fraction_s = values['fraction'][:6] + while len(fraction_s) < 6: + fraction_s += '0' + fraction = int(fraction_s) + if len(values['fraction']) > 6 and int(values['fraction'][6]) > 4: + fraction += 1 + delta = None + if values['tz_sign']: + tz_hour = int(values['tz_hour']) + minutes = values['tz_minute'] + tz_minute = int(minutes) if minutes else 0 + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if values['tz_sign'] == '-': + delta = -delta + # shold check for NOne and solve issue 366 should be tzinfo=delta) + if delta: + dt = datetime.datetime(year, month, day, hour, minute) + dt -= delta + data = TimeStamp(dt.year, dt.month, dt.day, dt.hour, dt.minute, second, fraction) + data._yaml['delta'] = delta + tz = values['tz_sign'] + values['tz_hour'] + if values['tz_minute']: + tz += ':' + values['tz_minute'] + data._yaml['tz'] = tz + else: + data = TimeStamp(year, month, day, hour, minute, second, fraction) + if values['tz']: # no delta + data._yaml['tz'] = values['tz'] + + if values['t']: + data._yaml['t'] = True + return data + + def construct_yaml_bool(self, node): + # type: (Any) -> Any + b = SafeConstructor.construct_yaml_bool(self, node) + if node.anchor: + return ScalarBoolean(b, anchor=node.anchor) + return b + + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:null', RoundTripConstructor.construct_yaml_null +) + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:bool', RoundTripConstructor.construct_yaml_bool +) + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:int', RoundTripConstructor.construct_yaml_int +) + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:float', RoundTripConstructor.construct_yaml_float +) + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:binary', RoundTripConstructor.construct_yaml_binary +) + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:timestamp', RoundTripConstructor.construct_yaml_timestamp +) + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:omap', RoundTripConstructor.construct_yaml_omap +) + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:pairs', RoundTripConstructor.construct_yaml_pairs +) + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:set', RoundTripConstructor.construct_yaml_set +) + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:str', RoundTripConstructor.construct_yaml_str +) + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:seq', RoundTripConstructor.construct_yaml_seq +) + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:map', RoundTripConstructor.construct_yaml_map +) + +RoundTripConstructor.add_constructor(None, RoundTripConstructor.construct_undefined) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/cyaml.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/cyaml.py new file mode 100644 index 000000000..f8cf47a94 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/cyaml.py @@ -0,0 +1,185 @@ +# coding: utf-8 + +from __future__ import absolute_import + +from _ruamel_yaml import CParser, CEmitter # type: ignore + +from ...ruamel.yaml.constructor import Constructor, BaseConstructor, SafeConstructor +from ...ruamel.yaml.representer import Representer, SafeRepresenter, BaseRepresenter +from ...ruamel.yaml.resolver import Resolver, BaseResolver + +if False: # MYPY + from typing import Any, Union, Optional # NOQA + from ...ruamel.yaml.compat import StreamTextType, StreamType, VersionType # NOQA + +__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', 'CBaseDumper', 'CSafeDumper', 'CDumper'] + + +# this includes some hacks to solve the usage of resolver by lower level +# parts of the parser + + +class CBaseLoader(CParser, BaseConstructor, BaseResolver): # type: ignore + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + CParser.__init__(self, stream) + self._parser = self._composer = self + BaseConstructor.__init__(self, loader=self) + BaseResolver.__init__(self, loadumper=self) + # self.descend_resolver = self._resolver.descend_resolver + # self.ascend_resolver = self._resolver.ascend_resolver + # self.resolve = self._resolver.resolve + + +class CSafeLoader(CParser, SafeConstructor, Resolver): # type: ignore + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + CParser.__init__(self, stream) + self._parser = self._composer = self + SafeConstructor.__init__(self, loader=self) + Resolver.__init__(self, loadumper=self) + # self.descend_resolver = self._resolver.descend_resolver + # self.ascend_resolver = self._resolver.ascend_resolver + # self.resolve = self._resolver.resolve + + +class CLoader(CParser, Constructor, Resolver): # type: ignore + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + CParser.__init__(self, stream) + self._parser = self._composer = self + Constructor.__init__(self, loader=self) + Resolver.__init__(self, loadumper=self) + # self.descend_resolver = self._resolver.descend_resolver + # self.ascend_resolver = self._resolver.ascend_resolver + # self.resolve = self._resolver.resolve + + +class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): # type: ignore + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + CEmitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + encoding=encoding, + allow_unicode=allow_unicode, + line_break=line_break, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + ) + self._emitter = self._serializer = self._representer = self + BaseRepresenter.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=self, + ) + BaseResolver.__init__(self, loadumper=self) + + +class CSafeDumper(CEmitter, SafeRepresenter, Resolver): # type: ignore + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + self._emitter = self._serializer = self._representer = self + CEmitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + encoding=encoding, + allow_unicode=allow_unicode, + line_break=line_break, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + ) + self._emitter = self._serializer = self._representer = self + SafeRepresenter.__init__( + self, default_style=default_style, default_flow_style=default_flow_style + ) + Resolver.__init__(self) + + +class CDumper(CEmitter, Representer, Resolver): # type: ignore + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + CEmitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + encoding=encoding, + allow_unicode=allow_unicode, + line_break=line_break, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + ) + self._emitter = self._serializer = self._representer = self + Representer.__init__( + self, default_style=default_style, default_flow_style=default_flow_style + ) + Resolver.__init__(self) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/dumper.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/dumper.py new file mode 100644 index 000000000..80b7f4a97 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/dumper.py @@ -0,0 +1,221 @@ +# coding: utf-8 + +from __future__ import absolute_import + +from ...ruamel.yaml.emitter import Emitter +from ...ruamel.yaml.serializer import Serializer +from ...ruamel.yaml.representer import ( + Representer, + SafeRepresenter, + BaseRepresenter, + RoundTripRepresenter, +) +from ...ruamel.yaml.resolver import Resolver, BaseResolver, VersionedResolver + +if False: # MYPY + from typing import Any, Dict, List, Union, Optional # NOQA + from ...ruamel.yaml.compat import StreamType, VersionType # NOQA + +__all__ = ['BaseDumper', 'SafeDumper', 'Dumper', 'RoundTripDumper'] + + +class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (Any, StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + Emitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + block_seq_indent=block_seq_indent, + dumper=self, + ) + Serializer.__init__( + self, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + dumper=self, + ) + BaseRepresenter.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=self, + ) + BaseResolver.__init__(self, loadumper=self) + + +class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + Emitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + block_seq_indent=block_seq_indent, + dumper=self, + ) + Serializer.__init__( + self, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + dumper=self, + ) + SafeRepresenter.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=self, + ) + Resolver.__init__(self, loadumper=self) + + +class Dumper(Emitter, Serializer, Representer, Resolver): + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + Emitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + block_seq_indent=block_seq_indent, + dumper=self, + ) + Serializer.__init__( + self, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + dumper=self, + ) + Representer.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=self, + ) + Resolver.__init__(self, loadumper=self) + + +class RoundTripDumper(Emitter, Serializer, RoundTripRepresenter, VersionedResolver): + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Optional[bool], Optional[int], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + Emitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + block_seq_indent=block_seq_indent, + top_level_colon_align=top_level_colon_align, + prefix_colon=prefix_colon, + dumper=self, + ) + Serializer.__init__( + self, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + dumper=self, + ) + RoundTripRepresenter.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=self, + ) + VersionedResolver.__init__(self, loader=self) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/emitter.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/emitter.py new file mode 100644 index 000000000..efc543826 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/emitter.py @@ -0,0 +1,1696 @@ +# coding: utf-8 + +from __future__ import absolute_import +from __future__ import print_function + +# Emitter expects events obeying the following grammar: +# stream ::= STREAM-START document* STREAM-END +# document ::= DOCUMENT-START node DOCUMENT-END +# node ::= SCALAR | sequence | mapping +# sequence ::= SEQUENCE-START node* SEQUENCE-END +# mapping ::= MAPPING-START (node node)* MAPPING-END + +import sys +from ...ruamel.yaml.error import YAMLError, YAMLStreamError +from ...ruamel.yaml.events import * # NOQA + +# fmt: off +from ...ruamel.yaml.compat import utf8, text_type, PY2, nprint, dbg, DBG_EVENT, \ + check_anchorname_char +# fmt: on + +if False: # MYPY + from typing import Any, Dict, List, Union, Text, Tuple, Optional # NOQA + from ...ruamel.yaml.compat import StreamType # NOQA + +__all__ = ['Emitter', 'EmitterError'] + + +class EmitterError(YAMLError): + pass + + +class ScalarAnalysis(object): + def __init__( + self, + scalar, + empty, + multiline, + allow_flow_plain, + allow_block_plain, + allow_single_quoted, + allow_double_quoted, + allow_block, + ): + # type: (Any, Any, Any, bool, bool, bool, bool, bool) -> None + self.scalar = scalar + self.empty = empty + self.multiline = multiline + self.allow_flow_plain = allow_flow_plain + self.allow_block_plain = allow_block_plain + self.allow_single_quoted = allow_single_quoted + self.allow_double_quoted = allow_double_quoted + self.allow_block = allow_block + + +class Indents(object): + # replacement for the list based stack of None/int + def __init__(self): + # type: () -> None + self.values = [] # type: List[Tuple[int, bool]] + + def append(self, val, seq): + # type: (Any, Any) -> None + self.values.append((val, seq)) + + def pop(self): + # type: () -> Any + return self.values.pop()[0] + + def last_seq(self): + # type: () -> bool + # return the seq(uence) value for the element added before the last one + # in increase_indent() + try: + return self.values[-2][1] + except IndexError: + return False + + def seq_flow_align(self, seq_indent, column): + # type: (int, int) -> int + # extra spaces because of dash + if len(self.values) < 2 or not self.values[-1][1]: + return 0 + # -1 for the dash + base = self.values[-1][0] if self.values[-1][0] is not None else 0 + return base + seq_indent - column - 1 + + def __len__(self): + # type: () -> int + return len(self.values) + + +class Emitter(object): + # fmt: off + DEFAULT_TAG_PREFIXES = { + u'!': u'!', + u'tag:yaml.org,2002:': u'!!', + } + # fmt: on + + MAX_SIMPLE_KEY_LENGTH = 128 + + def __init__( + self, + stream, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + brace_single_entry_mapping_in_flow_sequence=None, + dumper=None, + ): + # type: (StreamType, Any, Optional[int], Optional[int], Optional[bool], Any, Optional[int], Optional[bool], Any, Optional[bool], Any) -> None # NOQA + self.dumper = dumper + if self.dumper is not None and getattr(self.dumper, '_emitter', None) is None: + self.dumper._emitter = self + self.stream = stream + + # Encoding can be overriden by STREAM-START. + self.encoding = None # type: Optional[Text] + self.allow_space_break = None + + # Emitter is a state machine with a stack of states to handle nested + # structures. + self.states = [] # type: List[Any] + self.state = self.expect_stream_start # type: Any + + # Current event and the event queue. + self.events = [] # type: List[Any] + self.event = None # type: Any + + # The current indentation level and the stack of previous indents. + self.indents = Indents() + self.indent = None # type: Optional[int] + + # flow_context is an expanding/shrinking list consisting of '{' and '[' + # for each unclosed flow context. If empty list that means block context + self.flow_context = [] # type: List[Text] + + # Contexts. + self.root_context = False + self.sequence_context = False + self.mapping_context = False + self.simple_key_context = False + + # Characteristics of the last emitted character: + # - current position. + # - is it a whitespace? + # - is it an indention character + # (indentation space, '-', '?', or ':')? + self.line = 0 + self.column = 0 + self.whitespace = True + self.indention = True + self.compact_seq_seq = True # dash after dash + self.compact_seq_map = True # key after dash + # self.compact_ms = False # dash after key, only when excplicit key with ? + self.no_newline = None # type: Optional[bool] # set if directly after `- ` + + # Whether the document requires an explicit document end indicator + self.open_ended = False + + # colon handling + self.colon = u':' + self.prefixed_colon = self.colon if prefix_colon is None else prefix_colon + self.colon + # single entry mappings in flow sequence + self.brace_single_entry_mapping_in_flow_sequence = ( + brace_single_entry_mapping_in_flow_sequence # NOQA + ) + + # Formatting details. + self.canonical = canonical + self.allow_unicode = allow_unicode + # set to False to get "\Uxxxxxxxx" for non-basic unicode like emojis + self.unicode_supplementary = sys.maxunicode > 0xFFFF + self.sequence_dash_offset = block_seq_indent if block_seq_indent else 0 + self.top_level_colon_align = top_level_colon_align + self.best_sequence_indent = 2 + self.requested_indent = indent # specific for literal zero indent + if indent and 1 < indent < 10: + self.best_sequence_indent = indent + self.best_map_indent = self.best_sequence_indent + # if self.best_sequence_indent < self.sequence_dash_offset + 1: + # self.best_sequence_indent = self.sequence_dash_offset + 1 + self.best_width = 80 + if width and width > self.best_sequence_indent * 2: + self.best_width = width + self.best_line_break = u'\n' # type: Any + if line_break in [u'\r', u'\n', u'\r\n']: + self.best_line_break = line_break + + # Tag prefixes. + self.tag_prefixes = None # type: Any + + # Prepared anchor and tag. + self.prepared_anchor = None # type: Any + self.prepared_tag = None # type: Any + + # Scalar analysis and style. + self.analysis = None # type: Any + self.style = None # type: Any + + self.scalar_after_indicator = True # write a scalar on the same line as `---` + + self.alt_null = 'null' + + @property + def stream(self): + # type: () -> Any + try: + return self._stream + except AttributeError: + raise YAMLStreamError('output stream needs to specified') + + @stream.setter + def stream(self, val): + # type: (Any) -> None + if val is None: + return + if not hasattr(val, 'write'): + raise YAMLStreamError('stream argument needs to have a write() method') + self._stream = val + + @property + def serializer(self): + # type: () -> Any + try: + if hasattr(self.dumper, 'typ'): + return self.dumper.serializer + return self.dumper._serializer + except AttributeError: + return self # cyaml + + @property + def flow_level(self): + # type: () -> int + return len(self.flow_context) + + def dispose(self): + # type: () -> None + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def emit(self, event): + # type: (Any) -> None + if dbg(DBG_EVENT): + nprint(event) + self.events.append(event) + while not self.need_more_events(): + self.event = self.events.pop(0) + self.state() + self.event = None + + # In some cases, we wait for a few next events before emitting. + + def need_more_events(self): + # type: () -> bool + if not self.events: + return True + event = self.events[0] + if isinstance(event, DocumentStartEvent): + return self.need_events(1) + elif isinstance(event, SequenceStartEvent): + return self.need_events(2) + elif isinstance(event, MappingStartEvent): + return self.need_events(3) + else: + return False + + def need_events(self, count): + # type: (int) -> bool + level = 0 + for event in self.events[1:]: + if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): + level += 1 + elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): + level -= 1 + elif isinstance(event, StreamEndEvent): + level = -1 + if level < 0: + return False + return len(self.events) < count + 1 + + def increase_indent(self, flow=False, sequence=None, indentless=False): + # type: (bool, Optional[bool], bool) -> None + self.indents.append(self.indent, sequence) + if self.indent is None: # top level + if flow: + # self.indent = self.best_sequence_indent if self.indents.last_seq() else \ + # self.best_map_indent + # self.indent = self.best_sequence_indent + self.indent = self.requested_indent + else: + self.indent = 0 + elif not indentless: + self.indent += ( + self.best_sequence_indent if self.indents.last_seq() else self.best_map_indent + ) + # if self.indents.last_seq(): + # if self.indent == 0: # top level block sequence + # self.indent = self.best_sequence_indent - self.sequence_dash_offset + # else: + # self.indent += self.best_sequence_indent + # else: + # self.indent += self.best_map_indent + + # States. + + # Stream handlers. + + def expect_stream_start(self): + # type: () -> None + if isinstance(self.event, StreamStartEvent): + if PY2: + if self.event.encoding and not getattr(self.stream, 'encoding', None): + self.encoding = self.event.encoding + else: + if self.event.encoding and not hasattr(self.stream, 'encoding'): + self.encoding = self.event.encoding + self.write_stream_start() + self.state = self.expect_first_document_start + else: + raise EmitterError('expected StreamStartEvent, but got %s' % (self.event,)) + + def expect_nothing(self): + # type: () -> None + raise EmitterError('expected nothing, but got %s' % (self.event,)) + + # Document handlers. + + def expect_first_document_start(self): + # type: () -> Any + return self.expect_document_start(first=True) + + def expect_document_start(self, first=False): + # type: (bool) -> None + if isinstance(self.event, DocumentStartEvent): + if (self.event.version or self.event.tags) and self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + if self.event.version: + version_text = self.prepare_version(self.event.version) + self.write_version_directive(version_text) + self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() + if self.event.tags: + handles = sorted(self.event.tags.keys()) + for handle in handles: + prefix = self.event.tags[handle] + self.tag_prefixes[prefix] = handle + handle_text = self.prepare_tag_handle(handle) + prefix_text = self.prepare_tag_prefix(prefix) + self.write_tag_directive(handle_text, prefix_text) + implicit = ( + first + and not self.event.explicit + and not self.canonical + and not self.event.version + and not self.event.tags + and not self.check_empty_document() + ) + if not implicit: + self.write_indent() + self.write_indicator(u'---', True) + if self.canonical: + self.write_indent() + self.state = self.expect_document_root + elif isinstance(self.event, StreamEndEvent): + if self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + self.write_stream_end() + self.state = self.expect_nothing + else: + raise EmitterError('expected DocumentStartEvent, but got %s' % (self.event,)) + + def expect_document_end(self): + # type: () -> None + if isinstance(self.event, DocumentEndEvent): + self.write_indent() + if self.event.explicit: + self.write_indicator(u'...', True) + self.write_indent() + self.flush_stream() + self.state = self.expect_document_start + else: + raise EmitterError('expected DocumentEndEvent, but got %s' % (self.event,)) + + def expect_document_root(self): + # type: () -> None + self.states.append(self.expect_document_end) + self.expect_node(root=True) + + # Node handlers. + + def expect_node(self, root=False, sequence=False, mapping=False, simple_key=False): + # type: (bool, bool, bool, bool) -> None + self.root_context = root + self.sequence_context = sequence # not used in PyYAML + self.mapping_context = mapping + self.simple_key_context = simple_key + if isinstance(self.event, AliasEvent): + self.expect_alias() + elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): + if ( + self.process_anchor(u'&') + and isinstance(self.event, ScalarEvent) + and self.sequence_context + ): + self.sequence_context = False + if ( + root + and isinstance(self.event, ScalarEvent) + and not self.scalar_after_indicator + ): + self.write_indent() + self.process_tag() + if isinstance(self.event, ScalarEvent): + # nprint('@', self.indention, self.no_newline, self.column) + self.expect_scalar() + elif isinstance(self.event, SequenceStartEvent): + # nprint('@', self.indention, self.no_newline, self.column) + i2, n2 = self.indention, self.no_newline # NOQA + if self.event.comment: + if self.event.flow_style is False and self.event.comment: + if self.write_post_comment(self.event): + self.indention = False + self.no_newline = True + if self.write_pre_comment(self.event): + self.indention = i2 + self.no_newline = not self.indention + if ( + self.flow_level + or self.canonical + or self.event.flow_style + or self.check_empty_sequence() + ): + self.expect_flow_sequence() + else: + self.expect_block_sequence() + elif isinstance(self.event, MappingStartEvent): + if self.event.flow_style is False and self.event.comment: + self.write_post_comment(self.event) + if self.event.comment and self.event.comment[1]: + self.write_pre_comment(self.event) + if ( + self.flow_level + or self.canonical + or self.event.flow_style + or self.check_empty_mapping() + ): + self.expect_flow_mapping(single=self.event.nr_items == 1) + else: + self.expect_block_mapping() + else: + raise EmitterError('expected NodeEvent, but got %s' % (self.event,)) + + def expect_alias(self): + # type: () -> None + if self.event.anchor is None: + raise EmitterError('anchor is not specified for alias') + self.process_anchor(u'*') + self.state = self.states.pop() + + def expect_scalar(self): + # type: () -> None + self.increase_indent(flow=True) + self.process_scalar() + self.indent = self.indents.pop() + self.state = self.states.pop() + + # Flow sequence handlers. + + def expect_flow_sequence(self): + # type: () -> None + ind = self.indents.seq_flow_align(self.best_sequence_indent, self.column) + self.write_indicator(u' ' * ind + u'[', True, whitespace=True) + self.increase_indent(flow=True, sequence=True) + self.flow_context.append('[') + self.state = self.expect_first_flow_sequence_item + + def expect_first_flow_sequence_item(self): + # type: () -> None + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + popped = self.flow_context.pop() + assert popped == '[' + self.write_indicator(u']', False) + if self.event.comment and self.event.comment[0]: + # eol comment on empty flow sequence + self.write_post_comment(self.event) + elif self.flow_level == 0: + self.write_line_break() + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + def expect_flow_sequence_item(self): + # type: () -> None + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + popped = self.flow_context.pop() + assert popped == '[' + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u']', False) + if self.event.comment and self.event.comment[0]: + # eol comment on flow sequence + self.write_post_comment(self.event) + else: + self.no_newline = False + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + # Flow mapping handlers. + + def expect_flow_mapping(self, single=False): + # type: (Optional[bool]) -> None + ind = self.indents.seq_flow_align(self.best_sequence_indent, self.column) + map_init = u'{' + if ( + single + and self.flow_level + and self.flow_context[-1] == '[' + and not self.canonical + and not self.brace_single_entry_mapping_in_flow_sequence + ): + # single map item with flow context, no curly braces necessary + map_init = u'' + self.write_indicator(u' ' * ind + map_init, True, whitespace=True) + self.flow_context.append(map_init) + self.increase_indent(flow=True, sequence=False) + self.state = self.expect_first_flow_mapping_key + + def expect_first_flow_mapping_key(self): + # type: () -> None + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + popped = self.flow_context.pop() + assert popped == '{' # empty flow mapping + self.write_indicator(u'}', False) + if self.event.comment and self.event.comment[0]: + # eol comment on empty mapping + self.write_post_comment(self.event) + elif self.flow_level == 0: + self.write_line_break() + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_key(self): + # type: () -> None + if isinstance(self.event, MappingEndEvent): + # if self.event.comment and self.event.comment[1]: + # self.write_pre_comment(self.event) + self.indent = self.indents.pop() + popped = self.flow_context.pop() + assert popped in [u'{', u''] + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + if popped != u'': + self.write_indicator(u'}', False) + if self.event.comment and self.event.comment[0]: + # eol comment on flow mapping, never reached on empty mappings + self.write_post_comment(self.event) + else: + self.no_newline = False + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_simple_value(self): + # type: () -> None + self.write_indicator(self.prefixed_colon, False) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + def expect_flow_mapping_value(self): + # type: () -> None + if self.canonical or self.column > self.best_width: + self.write_indent() + self.write_indicator(self.prefixed_colon, True) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + # Block sequence handlers. + + def expect_block_sequence(self): + # type: () -> None + if self.mapping_context: + indentless = not self.indention + else: + indentless = False + if not self.compact_seq_seq and self.column != 0: + self.write_line_break() + self.increase_indent(flow=False, sequence=True, indentless=indentless) + self.state = self.expect_first_block_sequence_item + + def expect_first_block_sequence_item(self): + # type: () -> Any + return self.expect_block_sequence_item(first=True) + + def expect_block_sequence_item(self, first=False): + # type: (bool) -> None + if not first and isinstance(self.event, SequenceEndEvent): + if self.event.comment and self.event.comment[1]: + # final comments on a block list e.g. empty line + self.write_pre_comment(self.event) + self.indent = self.indents.pop() + self.state = self.states.pop() + self.no_newline = False + else: + if self.event.comment and self.event.comment[1]: + self.write_pre_comment(self.event) + nonl = self.no_newline if self.column == 0 else False + self.write_indent() + ind = self.sequence_dash_offset # if len(self.indents) > 1 else 0 + self.write_indicator(u' ' * ind + u'-', True, indention=True) + if nonl or self.sequence_dash_offset + 2 > self.best_sequence_indent: + self.no_newline = True + self.states.append(self.expect_block_sequence_item) + self.expect_node(sequence=True) + + # Block mapping handlers. + + def expect_block_mapping(self): + # type: () -> None + if not self.mapping_context and not (self.compact_seq_map or self.column == 0): + self.write_line_break() + self.increase_indent(flow=False, sequence=False) + self.state = self.expect_first_block_mapping_key + + def expect_first_block_mapping_key(self): + # type: () -> None + return self.expect_block_mapping_key(first=True) + + def expect_block_mapping_key(self, first=False): + # type: (Any) -> None + if not first and isinstance(self.event, MappingEndEvent): + if self.event.comment and self.event.comment[1]: + # final comments from a doc + self.write_pre_comment(self.event) + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + if self.event.comment and self.event.comment[1]: + # final comments from a doc + self.write_pre_comment(self.event) + self.write_indent() + if self.check_simple_key(): + if not isinstance( + self.event, (SequenceStartEvent, MappingStartEvent) + ): # sequence keys + try: + if self.event.style == '?': + self.write_indicator(u'?', True, indention=True) + except AttributeError: # aliases have no style + pass + self.states.append(self.expect_block_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + if isinstance(self.event, AliasEvent): + self.stream.write(u' ') + else: + self.write_indicator(u'?', True, indention=True) + self.states.append(self.expect_block_mapping_value) + self.expect_node(mapping=True) + + def expect_block_mapping_simple_value(self): + # type: () -> None + if getattr(self.event, 'style', None) != '?': + # prefix = u'' + if self.indent == 0 and self.top_level_colon_align is not None: + # write non-prefixed colon + c = u' ' * (self.top_level_colon_align - self.column) + self.colon + else: + c = self.prefixed_colon + self.write_indicator(c, False) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + def expect_block_mapping_value(self): + # type: () -> None + self.write_indent() + self.write_indicator(self.prefixed_colon, True, indention=True) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + # Checkers. + + def check_empty_sequence(self): + # type: () -> bool + return ( + isinstance(self.event, SequenceStartEvent) + and bool(self.events) + and isinstance(self.events[0], SequenceEndEvent) + ) + + def check_empty_mapping(self): + # type: () -> bool + return ( + isinstance(self.event, MappingStartEvent) + and bool(self.events) + and isinstance(self.events[0], MappingEndEvent) + ) + + def check_empty_document(self): + # type: () -> bool + if not isinstance(self.event, DocumentStartEvent) or not self.events: + return False + event = self.events[0] + return ( + isinstance(event, ScalarEvent) + and event.anchor is None + and event.tag is None + and event.implicit + and event.value == "" + ) + + def check_simple_key(self): + # type: () -> bool + length = 0 + if isinstance(self.event, NodeEvent) and self.event.anchor is not None: + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + length += len(self.prepared_anchor) + if ( + isinstance(self.event, (ScalarEvent, CollectionStartEvent)) + and self.event.tag is not None + ): + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(self.event.tag) + length += len(self.prepared_tag) + if isinstance(self.event, ScalarEvent): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + length += len(self.analysis.scalar) + return length < self.MAX_SIMPLE_KEY_LENGTH and ( + isinstance(self.event, AliasEvent) + or (isinstance(self.event, SequenceStartEvent) and self.event.flow_style is True) + or (isinstance(self.event, MappingStartEvent) and self.event.flow_style is True) + or ( + isinstance(self.event, ScalarEvent) + # if there is an explicit style for an empty string, it is a simple key + and not (self.analysis.empty and self.style and self.style not in '\'"') + and not self.analysis.multiline + ) + or self.check_empty_sequence() + or self.check_empty_mapping() + ) + + # Anchor, Tag, and Scalar processors. + + def process_anchor(self, indicator): + # type: (Any) -> bool + if self.event.anchor is None: + self.prepared_anchor = None + return False + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + if self.prepared_anchor: + self.write_indicator(indicator + self.prepared_anchor, True) + # issue 288 + self.no_newline = False + self.prepared_anchor = None + return True + + def process_tag(self): + # type: () -> None + tag = self.event.tag + if isinstance(self.event, ScalarEvent): + if self.style is None: + self.style = self.choose_scalar_style() + if self.event.value == '' and self.style == "'" and \ + tag == 'tag:yaml.org,2002:null' and self.alt_null is not None: + self.event.value = self.alt_null + self.analysis = None + self.style = self.choose_scalar_style() + if (not self.canonical or tag is None) and ( + (self.style == "" and self.event.implicit[0]) + or (self.style != "" and self.event.implicit[1]) + ): + self.prepared_tag = None + return + if self.event.implicit[0] and tag is None: + tag = u'!' + self.prepared_tag = None + else: + if (not self.canonical or tag is None) and self.event.implicit: + self.prepared_tag = None + return + if tag is None: + raise EmitterError('tag is not specified') + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(tag) + if self.prepared_tag: + self.write_indicator(self.prepared_tag, True) + if ( + self.sequence_context + and not self.flow_level + and isinstance(self.event, ScalarEvent) + ): + self.no_newline = True + self.prepared_tag = None + + def choose_scalar_style(self): + # type: () -> Any + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.event.style == '"' or self.canonical: + return '"' + if (not self.event.style or self.event.style == '?') and ( + self.event.implicit[0] or not self.event.implicit[2] + ): + if not ( + self.simple_key_context and (self.analysis.empty or self.analysis.multiline) + ) and ( + self.flow_level + and self.analysis.allow_flow_plain + or (not self.flow_level and self.analysis.allow_block_plain) + ): + return "" + self.analysis.allow_block = True + if self.event.style and self.event.style in '|>': + if ( + not self.flow_level + and not self.simple_key_context + and self.analysis.allow_block + ): + return self.event.style + if not self.event.style and self.analysis.allow_double_quoted: + if "'" in self.event.value or '\n' in self.event.value: + return '"' + if not self.event.style or self.event.style == "'": + if self.analysis.allow_single_quoted and not ( + self.simple_key_context and self.analysis.multiline + ): + return "'" + return '"' + + def process_scalar(self): + # type: () -> None + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.style is None: + self.style = self.choose_scalar_style() + split = not self.simple_key_context + # if self.analysis.multiline and split \ + # and (not self.style or self.style in '\'\"'): + # self.write_indent() + # nprint('xx', self.sequence_context, self.flow_level) + if self.sequence_context and not self.flow_level: + self.write_indent() + if self.style == '"': + self.write_double_quoted(self.analysis.scalar, split) + elif self.style == "'": + self.write_single_quoted(self.analysis.scalar, split) + elif self.style == '>': + self.write_folded(self.analysis.scalar) + elif self.style == '|': + self.write_literal(self.analysis.scalar, self.event.comment) + else: + self.write_plain(self.analysis.scalar, split) + self.analysis = None + self.style = None + if self.event.comment: + self.write_post_comment(self.event) + + # Analyzers. + + def prepare_version(self, version): + # type: (Any) -> Any + major, minor = version + if major != 1: + raise EmitterError('unsupported YAML version: %d.%d' % (major, minor)) + return u'%d.%d' % (major, minor) + + def prepare_tag_handle(self, handle): + # type: (Any) -> Any + if not handle: + raise EmitterError('tag handle must not be empty') + if handle[0] != u'!' or handle[-1] != u'!': + raise EmitterError("tag handle must start and end with '!': %r" % (utf8(handle))) + for ch in handle[1:-1]: + if not ( + u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' or ch in u'-_' + ): + raise EmitterError( + 'invalid character %r in the tag handle: %r' % (utf8(ch), utf8(handle)) + ) + return handle + + def prepare_tag_prefix(self, prefix): + # type: (Any) -> Any + if not prefix: + raise EmitterError('tag prefix must not be empty') + chunks = [] # type: List[Any] + start = end = 0 + if prefix[0] == u'!': + end = 1 + ch_set = u"-;/?:@&=+$,_.~*'()[]" + if self.dumper: + version = getattr(self.dumper, 'version', (1, 2)) + if version is None or version >= (1, 2): + ch_set += u'#' + while end < len(prefix): + ch = prefix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' or ch in ch_set: + end += 1 + else: + if start < end: + chunks.append(prefix[start:end]) + start = end = end + 1 + data = utf8(ch) + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(prefix[start:end]) + return "".join(chunks) + + def prepare_tag(self, tag): + # type: (Any) -> Any + if not tag: + raise EmitterError('tag must not be empty') + if tag == u'!': + return tag + handle = None + suffix = tag + prefixes = sorted(self.tag_prefixes.keys()) + for prefix in prefixes: + if tag.startswith(prefix) and (prefix == u'!' or len(prefix) < len(tag)): + handle = self.tag_prefixes[prefix] + suffix = tag[len(prefix) :] + chunks = [] # type: List[Any] + start = end = 0 + ch_set = u"-;/?:@&=+$,_.~*'()[]" + if self.dumper: + version = getattr(self.dumper, 'version', (1, 2)) + if version is None or version >= (1, 2): + ch_set += u'#' + while end < len(suffix): + ch = suffix[end] + if ( + u'0' <= ch <= u'9' + or u'A' <= ch <= u'Z' + or u'a' <= ch <= u'z' + or ch in ch_set + or (ch == u'!' and handle != u'!') + ): + end += 1 + else: + if start < end: + chunks.append(suffix[start:end]) + start = end = end + 1 + data = utf8(ch) + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(suffix[start:end]) + suffix_text = "".join(chunks) + if handle: + return u'%s%s' % (handle, suffix_text) + else: + return u'!<%s>' % suffix_text + + def prepare_anchor(self, anchor): + # type: (Any) -> Any + if not anchor: + raise EmitterError('anchor must not be empty') + for ch in anchor: + if not check_anchorname_char(ch): + raise EmitterError( + 'invalid character %r in the anchor: %r' % (utf8(ch), utf8(anchor)) + ) + return anchor + + def analyze_scalar(self, scalar): + # type: (Any) -> Any + # Empty scalar is a special case. + if not scalar: + return ScalarAnalysis( + scalar=scalar, + empty=True, + multiline=False, + allow_flow_plain=False, + allow_block_plain=True, + allow_single_quoted=True, + allow_double_quoted=True, + allow_block=False, + ) + + # Indicators and special characters. + block_indicators = False + flow_indicators = False + line_breaks = False + special_characters = False + + # Important whitespace combinations. + leading_space = False + leading_break = False + trailing_space = False + trailing_break = False + break_space = False + space_break = False + + # Check document indicators. + if scalar.startswith(u'---') or scalar.startswith(u'...'): + block_indicators = True + flow_indicators = True + + # First character or preceded by a whitespace. + preceeded_by_whitespace = True + + # Last character or followed by a whitespace. + followed_by_whitespace = len(scalar) == 1 or scalar[1] in u'\0 \t\r\n\x85\u2028\u2029' + + # The previous character is a space. + previous_space = False + + # The previous character is a break. + previous_break = False + + index = 0 + while index < len(scalar): + ch = scalar[index] + + # Check for indicators. + if index == 0: + # Leading indicators are special characters. + if ch in u'#,[]{}&*!|>\'"%@`': + flow_indicators = True + block_indicators = True + if ch in u'?:': # ToDo + if self.serializer.use_version == (1, 1): + flow_indicators = True + elif len(scalar) == 1: # single character + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'-' and followed_by_whitespace: + flow_indicators = True + block_indicators = True + else: + # Some indicators cannot appear within a scalar as well. + if ch in u',[]{}': # http://yaml.org/spec/1.2/spec.html#id2788859 + flow_indicators = True + if ch == u'?' and self.serializer.use_version == (1, 1): + flow_indicators = True + if ch == u':': + if followed_by_whitespace: + flow_indicators = True + block_indicators = True + if ch == u'#' and preceeded_by_whitespace: + flow_indicators = True + block_indicators = True + + # Check for line breaks, special, and unicode characters. + if ch in u'\n\x85\u2028\u2029': + line_breaks = True + if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'): + if ( + ch == u'\x85' + or u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD' + or (self.unicode_supplementary and (u'\U00010000' <= ch <= u'\U0010FFFF')) + ) and ch != u'\uFEFF': + # unicode_characters = True + if not self.allow_unicode: + special_characters = True + else: + special_characters = True + + # Detect important whitespace combinations. + if ch == u' ': + if index == 0: + leading_space = True + if index == len(scalar) - 1: + trailing_space = True + if previous_break: + break_space = True + previous_space = True + previous_break = False + elif ch in u'\n\x85\u2028\u2029': + if index == 0: + leading_break = True + if index == len(scalar) - 1: + trailing_break = True + if previous_space: + space_break = True + previous_space = False + previous_break = True + else: + previous_space = False + previous_break = False + + # Prepare for the next character. + index += 1 + preceeded_by_whitespace = ch in u'\0 \t\r\n\x85\u2028\u2029' + followed_by_whitespace = ( + index + 1 >= len(scalar) or scalar[index + 1] in u'\0 \t\r\n\x85\u2028\u2029' + ) + + # Let's decide what styles are allowed. + allow_flow_plain = True + allow_block_plain = True + allow_single_quoted = True + allow_double_quoted = True + allow_block = True + + # Leading and trailing whitespaces are bad for plain scalars. + if leading_space or leading_break or trailing_space or trailing_break: + allow_flow_plain = allow_block_plain = False + + # We do not permit trailing spaces for block scalars. + if trailing_space: + allow_block = False + + # Spaces at the beginning of a new line are only acceptable for block + # scalars. + if break_space: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + + # Spaces followed by breaks, as well as special character are only + # allowed for double quoted scalars. + if special_characters: + allow_flow_plain = allow_block_plain = allow_single_quoted = allow_block = False + elif space_break: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + if not self.allow_space_break: + allow_block = False + + # Although the plain scalar writer supports breaks, we never emit + # multiline plain scalars. + if line_breaks: + allow_flow_plain = allow_block_plain = False + + # Flow indicators are forbidden for flow plain scalars. + if flow_indicators: + allow_flow_plain = False + + # Block indicators are forbidden for block plain scalars. + if block_indicators: + allow_block_plain = False + + return ScalarAnalysis( + scalar=scalar, + empty=False, + multiline=line_breaks, + allow_flow_plain=allow_flow_plain, + allow_block_plain=allow_block_plain, + allow_single_quoted=allow_single_quoted, + allow_double_quoted=allow_double_quoted, + allow_block=allow_block, + ) + + # Writers. + + def flush_stream(self): + # type: () -> None + if hasattr(self.stream, 'flush'): + self.stream.flush() + + def write_stream_start(self): + # type: () -> None + # Write BOM if needed. + if self.encoding and self.encoding.startswith('utf-16'): + self.stream.write(u'\uFEFF'.encode(self.encoding)) + + def write_stream_end(self): + # type: () -> None + self.flush_stream() + + def write_indicator(self, indicator, need_whitespace, whitespace=False, indention=False): + # type: (Any, Any, bool, bool) -> None + if self.whitespace or not need_whitespace: + data = indicator + else: + data = u' ' + indicator + self.whitespace = whitespace + self.indention = self.indention and indention + self.column += len(data) + self.open_ended = False + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + + def write_indent(self): + # type: () -> None + indent = self.indent or 0 + if ( + not self.indention + or self.column > indent + or (self.column == indent and not self.whitespace) + ): + if bool(self.no_newline): + self.no_newline = False + else: + self.write_line_break() + if self.column < indent: + self.whitespace = True + data = u' ' * (indent - self.column) + self.column = indent + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_line_break(self, data=None): + # type: (Any) -> None + if data is None: + data = self.best_line_break + self.whitespace = True + self.indention = True + self.line += 1 + self.column = 0 + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + + def write_version_directive(self, version_text): + # type: (Any) -> None + data = u'%%YAML %s' % version_text + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + def write_tag_directive(self, handle_text, prefix_text): + # type: (Any, Any) -> None + data = u'%%TAG %s %s' % (handle_text, prefix_text) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + # Scalar streams. + + def write_single_quoted(self, text, split=True): + # type: (Any, Any) -> None + if self.root_context: + if self.requested_indent is not None: + self.write_line_break() + if self.requested_indent != 0: + self.write_indent() + self.write_indicator(u"'", True) + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch is None or ch != u' ': + if ( + start + 1 == end + and self.column > self.best_width + and split + and start != 0 + and end != len(text) + ): + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u"'": + if start < end: + data = text[start:end] + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch == u"'": + data = u"''" + self.column += 2 + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ch is not None: + spaces = ch == u' ' + breaks = ch in u'\n\x85\u2028\u2029' + end += 1 + self.write_indicator(u"'", False) + + ESCAPE_REPLACEMENTS = { + u'\0': u'0', + u'\x07': u'a', + u'\x08': u'b', + u'\x09': u't', + u'\x0A': u'n', + u'\x0B': u'v', + u'\x0C': u'f', + u'\x0D': u'r', + u'\x1B': u'e', + u'"': u'"', + u'\\': u'\\', + u'\x85': u'N', + u'\xA0': u'_', + u'\u2028': u'L', + u'\u2029': u'P', + } + + def write_double_quoted(self, text, split=True): + # type: (Any, Any) -> None + if self.root_context: + if self.requested_indent is not None: + self.write_line_break() + if self.requested_indent != 0: + self.write_indent() + self.write_indicator(u'"', True) + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if ( + ch is None + or ch in u'"\\\x85\u2028\u2029\uFEFF' + or not ( + u'\x20' <= ch <= u'\x7E' + or ( + self.allow_unicode + and (u'\xA0' <= ch <= u'\uD7FF' or u'\uE000' <= ch <= u'\uFFFD') + ) + ) + ): + if start < end: + data = text[start:end] + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + if ch in self.ESCAPE_REPLACEMENTS: + data = u'\\' + self.ESCAPE_REPLACEMENTS[ch] + elif ch <= u'\xFF': + data = u'\\x%02X' % ord(ch) + elif ch <= u'\uFFFF': + data = u'\\u%04X' % ord(ch) + else: + data = u'\\U%08X' % ord(ch) + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ( + 0 < end < len(text) - 1 + and (ch == u' ' or start >= end) + and self.column + (end - start) > self.best_width + and split + ): + data = text[start:end] + u'\\' + if start < end: + start = end + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + self.write_indent() + self.whitespace = False + self.indention = False + if text[start] == u' ': + data = u'\\' + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + end += 1 + self.write_indicator(u'"', False) + + def determine_block_hints(self, text): + # type: (Any) -> Any + indent = 0 + indicator = u'' + hints = u'' + if text: + if text[0] in u' \n\x85\u2028\u2029': + indent = self.best_sequence_indent + hints += text_type(indent) + elif self.root_context: + for end in ['\n---', '\n...']: + pos = 0 + while True: + pos = text.find(end, pos) + if pos == -1: + break + try: + if text[pos + 4] in ' \r\n': + break + except IndexError: + pass + pos += 1 + if pos > -1: + break + if pos > 0: + indent = self.best_sequence_indent + if text[-1] not in u'\n\x85\u2028\u2029': + indicator = u'-' + elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029': + indicator = u'+' + hints += indicator + return hints, indent, indicator + + def write_folded(self, text): + # type: (Any) -> None + hints, _indent, _indicator = self.determine_block_hints(text) + self.write_indicator(u'>' + hints, True) + if _indicator == u'+': + self.open_ended = True + self.write_line_break() + leading_space = True + spaces = False + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029\a': + if ( + not leading_space + and ch is not None + and ch != u' ' + and text[start] == u'\n' + ): + self.write_line_break() + leading_space = ch == u' ' + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + elif spaces: + if ch != u' ': + if start + 1 == end and self.column > self.best_width: + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029\a': + data = text[start:end] + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + if ch == u'\a': + if end < (len(text) - 1) and not text[end + 2].isspace(): + self.write_line_break() + self.write_indent() + end += 2 # \a and the space that is inserted on the fold + else: + raise EmitterError('unexcpected fold indicator \\a before space') + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = ch in u'\n\x85\u2028\u2029' + spaces = ch == u' ' + end += 1 + + def write_literal(self, text, comment=None): + # type: (Any, Any) -> None + hints, _indent, _indicator = self.determine_block_hints(text) + self.write_indicator(u'|' + hints, True) + try: + comment = comment[1][0] + if comment: + self.stream.write(comment) + except (TypeError, IndexError): + pass + if _indicator == u'+': + self.open_ended = True + self.write_line_break() + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + if self.root_context: + idnx = self.indent if self.indent is not None else 0 + self.stream.write(u' ' * (_indent + idnx)) + else: + self.write_indent() + start = end + else: + if ch is None or ch in u'\n\x85\u2028\u2029': + data = text[start:end] + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = ch in u'\n\x85\u2028\u2029' + end += 1 + + def write_plain(self, text, split=True): + # type: (Any, Any) -> None + if self.root_context: + if self.requested_indent is not None: + self.write_line_break() + if self.requested_indent != 0: + self.write_indent() + else: + self.open_ended = True + if not text: + return + if not self.whitespace: + data = u' ' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.whitespace = False + self.indention = False + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch != u' ': + if start + 1 == end and self.column > self.best_width and split: + self.write_indent() + self.whitespace = False + self.indention = False + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch not in u'\n\x85\u2028\u2029': # type: ignore + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + self.whitespace = False + self.indention = False + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + try: + self.stream.write(data) + except: # NOQA + sys.stdout.write(repr(data) + '\n') + raise + start = end + if ch is not None: + spaces = ch == u' ' + breaks = ch in u'\n\x85\u2028\u2029' + end += 1 + + def write_comment(self, comment, pre=False): + # type: (Any, bool) -> None + value = comment.value + # nprintf('{:02d} {:02d} {!r}'.format(self.column, comment.start_mark.column, value)) + if not pre and value[-1] == '\n': + value = value[:-1] + try: + # get original column position + col = comment.start_mark.column + if comment.value and comment.value.startswith('\n'): + # never inject extra spaces if the comment starts with a newline + # and not a real comment (e.g. if you have an empty line following a key-value + col = self.column + elif col < self.column + 1: + ValueError + except ValueError: + col = self.column + 1 + # nprint('post_comment', self.line, self.column, value) + try: + # at least one space if the current column >= the start column of the comment + # but not at the start of a line + nr_spaces = col - self.column + if self.column and value.strip() and nr_spaces < 1 and value[0] != '\n': + nr_spaces = 1 + value = ' ' * nr_spaces + value + try: + if bool(self.encoding): + value = value.encode(self.encoding) + except UnicodeDecodeError: + pass + self.stream.write(value) + except TypeError: + raise + if not pre: + self.write_line_break() + + def write_pre_comment(self, event): + # type: (Any) -> bool + comments = event.comment[1] + if comments is None: + return False + try: + start_events = (MappingStartEvent, SequenceStartEvent) + for comment in comments: + if isinstance(event, start_events) and getattr(comment, 'pre_done', None): + continue + if self.column != 0: + self.write_line_break() + self.write_comment(comment, pre=True) + if isinstance(event, start_events): + comment.pre_done = True + except TypeError: + sys.stdout.write('eventtt {} {}'.format(type(event), event)) + raise + return True + + def write_post_comment(self, event): + # type: (Any) -> bool + if self.event.comment[0] is None: + return False + comment = event.comment[0] + self.write_comment(comment) + return True diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/error.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/error.py new file mode 100644 index 000000000..cfad4a6f4 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/error.py @@ -0,0 +1,311 @@ +# coding: utf-8 + +from __future__ import absolute_import + +import warnings +import textwrap + +from ...ruamel.yaml.compat import utf8 + +if False: # MYPY + from typing import Any, Dict, Optional, List, Text # NOQA + + +__all__ = [ + 'FileMark', + 'StringMark', + 'CommentMark', + 'YAMLError', + 'MarkedYAMLError', + 'ReusedAnchorWarning', + 'UnsafeLoaderWarning', + 'MarkedYAMLWarning', + 'MarkedYAMLFutureWarning', +] + + +class StreamMark(object): + __slots__ = 'name', 'index', 'line', 'column' + + def __init__(self, name, index, line, column): + # type: (Any, int, int, int) -> None + self.name = name + self.index = index + self.line = line + self.column = column + + def __str__(self): + # type: () -> Any + where = ' in "%s", line %d, column %d' % (self.name, self.line + 1, self.column + 1) + return where + + def __eq__(self, other): + # type: (Any) -> bool + if self.line != other.line or self.column != other.column: + return False + if self.name != other.name or self.index != other.index: + return False + return True + + def __ne__(self, other): + # type: (Any) -> bool + return not self.__eq__(other) + + +class FileMark(StreamMark): + __slots__ = () + + +class StringMark(StreamMark): + __slots__ = 'name', 'index', 'line', 'column', 'buffer', 'pointer' + + def __init__(self, name, index, line, column, buffer, pointer): + # type: (Any, int, int, int, Any, Any) -> None + StreamMark.__init__(self, name, index, line, column) + self.buffer = buffer + self.pointer = pointer + + def get_snippet(self, indent=4, max_length=75): + # type: (int, int) -> Any + if self.buffer is None: # always False + return None + head = "" + start = self.pointer + while start > 0 and self.buffer[start - 1] not in u'\0\r\n\x85\u2028\u2029': + start -= 1 + if self.pointer - start > max_length / 2 - 1: + head = ' ... ' + start += 5 + break + tail = "" + end = self.pointer + while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029': + end += 1 + if end - self.pointer > max_length / 2 - 1: + tail = ' ... ' + end -= 5 + break + snippet = utf8(self.buffer[start:end]) + caret = '^' + caret = '^ (line: {})'.format(self.line + 1) + return ( + ' ' * indent + + head + + snippet + + tail + + '\n' + + ' ' * (indent + self.pointer - start + len(head)) + + caret + ) + + def __str__(self): + # type: () -> Any + snippet = self.get_snippet() + where = ' in "%s", line %d, column %d' % (self.name, self.line + 1, self.column + 1) + if snippet is not None: + where += ':\n' + snippet + return where + + +class CommentMark(object): + __slots__ = ('column',) + + def __init__(self, column): + # type: (Any) -> None + self.column = column + + +class YAMLError(Exception): + pass + + +class MarkedYAMLError(YAMLError): + def __init__( + self, + context=None, + context_mark=None, + problem=None, + problem_mark=None, + note=None, + warn=None, + ): + # type: (Any, Any, Any, Any, Any, Any) -> None + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + # warn is ignored + + def __str__(self): + # type: () -> Any + lines = [] # type: List[str] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None and ( + self.problem is None + or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column + ): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None and self.note: + note = textwrap.dedent(self.note) + lines.append(note) + return '\n'.join(lines) + + +class YAMLStreamError(Exception): + pass + + +class YAMLWarning(Warning): + pass + + +class MarkedYAMLWarning(YAMLWarning): + def __init__( + self, + context=None, + context_mark=None, + problem=None, + problem_mark=None, + note=None, + warn=None, + ): + # type: (Any, Any, Any, Any, Any, Any) -> None + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + self.warn = warn + + def __str__(self): + # type: () -> Any + lines = [] # type: List[str] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None and ( + self.problem is None + or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column + ): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None and self.note: + note = textwrap.dedent(self.note) + lines.append(note) + if self.warn is not None and self.warn: + warn = textwrap.dedent(self.warn) + lines.append(warn) + return '\n'.join(lines) + + +class ReusedAnchorWarning(YAMLWarning): + pass + + +class UnsafeLoaderWarning(YAMLWarning): + text = """ +The default 'Loader' for 'load(stream)' without further arguments can be unsafe. +Use 'load(stream, Loader=ruamel.yaml.Loader)' explicitly if that is OK. +Alternatively include the following in your code: + + import warnings + warnings.simplefilter('ignore', ruamel.yaml.error.UnsafeLoaderWarning) + +In most other cases you should consider using 'safe_load(stream)'""" + pass + + +warnings.simplefilter('once', UnsafeLoaderWarning) + + +class MantissaNoDotYAML1_1Warning(YAMLWarning): + def __init__(self, node, flt_str): + # type: (Any, Any) -> None + self.node = node + self.flt = flt_str + + def __str__(self): + # type: () -> Any + line = self.node.start_mark.line + col = self.node.start_mark.column + return """ +In YAML 1.1 floating point values should have a dot ('.') in their mantissa. +See the Floating-Point Language-Independent Type for YAML™ Version 1.1 specification +( http://yaml.org/type/float.html ). This dot is not required for JSON nor for YAML 1.2 + +Correct your float: "{}" on line: {}, column: {} + +or alternatively include the following in your code: + + import warnings + warnings.simplefilter('ignore', ruamel.yaml.error.MantissaNoDotYAML1_1Warning) + +""".format( + self.flt, line, col + ) + + +warnings.simplefilter('once', MantissaNoDotYAML1_1Warning) + + +class YAMLFutureWarning(Warning): + pass + + +class MarkedYAMLFutureWarning(YAMLFutureWarning): + def __init__( + self, + context=None, + context_mark=None, + problem=None, + problem_mark=None, + note=None, + warn=None, + ): + # type: (Any, Any, Any, Any, Any, Any) -> None + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + self.warn = warn + + def __str__(self): + # type: () -> Any + lines = [] # type: List[str] + if self.context is not None: + lines.append(self.context) + + if self.context_mark is not None and ( + self.problem is None + or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column + ): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None and self.note: + note = textwrap.dedent(self.note) + lines.append(note) + if self.warn is not None and self.warn: + warn = textwrap.dedent(self.warn) + lines.append(warn) + return '\n'.join(lines) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/events.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/events.py new file mode 100644 index 000000000..58b212190 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/events.py @@ -0,0 +1,157 @@ +# coding: utf-8 + +# Abstract classes. + +if False: # MYPY + from typing import Any, Dict, Optional, List # NOQA + + +def CommentCheck(): + # type: () -> None + pass + + +class Event(object): + __slots__ = 'start_mark', 'end_mark', 'comment' + + def __init__(self, start_mark=None, end_mark=None, comment=CommentCheck): + # type: (Any, Any, Any) -> None + self.start_mark = start_mark + self.end_mark = end_mark + # assert comment is not CommentCheck + if comment is CommentCheck: + comment = None + self.comment = comment + + def __repr__(self): + # type: () -> Any + attributes = [ + key + for key in ['anchor', 'tag', 'implicit', 'value', 'flow_style', 'style'] + if hasattr(self, key) + ] + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) for key in attributes]) + if self.comment not in [None, CommentCheck]: + arguments += ', comment={!r}'.format(self.comment) + return '%s(%s)' % (self.__class__.__name__, arguments) + + +class NodeEvent(Event): + __slots__ = ('anchor',) + + def __init__(self, anchor, start_mark=None, end_mark=None, comment=None): + # type: (Any, Any, Any, Any) -> None + Event.__init__(self, start_mark, end_mark, comment) + self.anchor = anchor + + +class CollectionStartEvent(NodeEvent): + __slots__ = 'tag', 'implicit', 'flow_style', 'nr_items' + + def __init__( + self, + anchor, + tag, + implicit, + start_mark=None, + end_mark=None, + flow_style=None, + comment=None, + nr_items=None, + ): + # type: (Any, Any, Any, Any, Any, Any, Any, Optional[int]) -> None + NodeEvent.__init__(self, anchor, start_mark, end_mark, comment) + self.tag = tag + self.implicit = implicit + self.flow_style = flow_style + self.nr_items = nr_items + + +class CollectionEndEvent(Event): + __slots__ = () + + +# Implementations. + + +class StreamStartEvent(Event): + __slots__ = ('encoding',) + + def __init__(self, start_mark=None, end_mark=None, encoding=None, comment=None): + # type: (Any, Any, Any, Any) -> None + Event.__init__(self, start_mark, end_mark, comment) + self.encoding = encoding + + +class StreamEndEvent(Event): + __slots__ = () + + +class DocumentStartEvent(Event): + __slots__ = 'explicit', 'version', 'tags' + + def __init__( + self, + start_mark=None, + end_mark=None, + explicit=None, + version=None, + tags=None, + comment=None, + ): + # type: (Any, Any, Any, Any, Any, Any) -> None + Event.__init__(self, start_mark, end_mark, comment) + self.explicit = explicit + self.version = version + self.tags = tags + + +class DocumentEndEvent(Event): + __slots__ = ('explicit',) + + def __init__(self, start_mark=None, end_mark=None, explicit=None, comment=None): + # type: (Any, Any, Any, Any) -> None + Event.__init__(self, start_mark, end_mark, comment) + self.explicit = explicit + + +class AliasEvent(NodeEvent): + __slots__ = () + + +class ScalarEvent(NodeEvent): + __slots__ = 'tag', 'implicit', 'value', 'style' + + def __init__( + self, + anchor, + tag, + implicit, + value, + start_mark=None, + end_mark=None, + style=None, + comment=None, + ): + # type: (Any, Any, Any, Any, Any, Any, Any, Any) -> None + NodeEvent.__init__(self, anchor, start_mark, end_mark, comment) + self.tag = tag + self.implicit = implicit + self.value = value + self.style = style + + +class SequenceStartEvent(CollectionStartEvent): + __slots__ = () + + +class SequenceEndEvent(CollectionEndEvent): + __slots__ = () + + +class MappingStartEvent(CollectionStartEvent): + __slots__ = () + + +class MappingEndEvent(CollectionEndEvent): + __slots__ = () diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/loader.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/loader.py new file mode 100644 index 000000000..ae8c8b8c0 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/loader.py @@ -0,0 +1,74 @@ +# coding: utf-8 + +from __future__ import absolute_import + + +from ...ruamel.yaml.reader import Reader +from ...ruamel.yaml.scanner import Scanner, RoundTripScanner +from ...ruamel.yaml.parser import Parser, RoundTripParser +from ...ruamel.yaml.composer import Composer +from ...ruamel.yaml.constructor import ( + BaseConstructor, + SafeConstructor, + Constructor, + RoundTripConstructor, +) +from ...ruamel.yaml.resolver import VersionedResolver + +if False: # MYPY + from typing import Any, Dict, List, Union, Optional # NOQA + from ...ruamel.yaml.compat import StreamTextType, VersionType # NOQA + +__all__ = ['BaseLoader', 'SafeLoader', 'Loader', 'RoundTripLoader'] + + +class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, VersionedResolver): + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + Reader.__init__(self, stream, loader=self) + Scanner.__init__(self, loader=self) + Parser.__init__(self, loader=self) + Composer.__init__(self, loader=self) + BaseConstructor.__init__(self, loader=self) + VersionedResolver.__init__(self, version, loader=self) + + +class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, VersionedResolver): + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + Reader.__init__(self, stream, loader=self) + Scanner.__init__(self, loader=self) + Parser.__init__(self, loader=self) + Composer.__init__(self, loader=self) + SafeConstructor.__init__(self, loader=self) + VersionedResolver.__init__(self, version, loader=self) + + +class Loader(Reader, Scanner, Parser, Composer, Constructor, VersionedResolver): + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + Reader.__init__(self, stream, loader=self) + Scanner.__init__(self, loader=self) + Parser.__init__(self, loader=self) + Composer.__init__(self, loader=self) + Constructor.__init__(self, loader=self) + VersionedResolver.__init__(self, version, loader=self) + + +class RoundTripLoader( + Reader, + RoundTripScanner, + RoundTripParser, + Composer, + RoundTripConstructor, + VersionedResolver, +): + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + # self.reader = Reader.__init__(self, stream) + Reader.__init__(self, stream, loader=self) + RoundTripScanner.__init__(self, loader=self) + RoundTripParser.__init__(self, loader=self) + Composer.__init__(self, loader=self) + RoundTripConstructor.__init__(self, preserve_quotes=preserve_quotes, loader=self) + VersionedResolver.__init__(self, version, loader=self) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/main.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/main.py new file mode 100644 index 000000000..2abc7c96f --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/main.py @@ -0,0 +1,1534 @@ +# coding: utf-8 + +from __future__ import absolute_import, unicode_literals, print_function + +import sys +import os +import warnings +import glob +from importlib import import_module + + +from ... import ruamel +from ...ruamel.yaml.error import UnsafeLoaderWarning, YAMLError # NOQA + +from ...ruamel.yaml.tokens import * # NOQA +from ...ruamel.yaml.events import * # NOQA +from ...ruamel.yaml.nodes import * # NOQA + +from ...ruamel.yaml.loader import BaseLoader, SafeLoader, Loader, RoundTripLoader # NOQA +from ...ruamel.yaml.dumper import BaseDumper, SafeDumper, Dumper, RoundTripDumper # NOQA +from ...ruamel.yaml.compat import StringIO, BytesIO, with_metaclass, PY3, nprint +from ...ruamel.yaml.resolver import VersionedResolver, Resolver # NOQA +from ...ruamel.yaml.representer import ( + BaseRepresenter, + SafeRepresenter, + Representer, + RoundTripRepresenter, +) +from ...ruamel.yaml.constructor import ( + BaseConstructor, + SafeConstructor, + Constructor, + RoundTripConstructor, +) +from ...ruamel.yaml.loader import Loader as UnsafeLoader + +if False: # MYPY + from typing import List, Set, Dict, Union, Any, Callable, Optional, Text # NOQA + from ...ruamel.yaml.compat import StreamType, StreamTextType, VersionType # NOQA + + if PY3: + from pathlib import Path + else: + Path = Any + +try: + from _ruamel_yaml import CParser, CEmitter # type: ignore +except: # NOQA + CParser = CEmitter = None + +# import io + +enforce = object() + + +# YAML is an acronym, i.e. spoken: rhymes with "camel". And thus a +# subset of abbreviations, which should be all caps according to PEP8 + + +class YAML(object): + def __init__( + self, _kw=enforce, typ=None, pure=False, output=None, plug_ins=None # input=None, + ): + # type: (Any, Optional[Text], Any, Any, Any) -> None + """ + _kw: not used, forces keyword arguments in 2.7 (in 3 you can do (*, safe_load=..) + typ: 'rt'/None -> RoundTripLoader/RoundTripDumper, (default) + 'safe' -> SafeLoader/SafeDumper, + 'unsafe' -> normal/unsafe Loader/Dumper + 'base' -> baseloader + pure: if True only use Python modules + input/output: needed to work as context manager + plug_ins: a list of plug-in files + """ + if _kw is not enforce: + raise TypeError( + '{}.__init__() takes no positional argument but at least ' + 'one was given ({!r})'.format(self.__class__.__name__, _kw) + ) + + self.typ = ['rt'] if typ is None else (typ if isinstance(typ, list) else [typ]) + self.pure = pure + + # self._input = input + self._output = output + self._context_manager = None # type: Any + + self.plug_ins = [] # type: List[Any] + for pu in ([] if plug_ins is None else plug_ins) + self.official_plug_ins(): + file_name = pu.replace(os.sep, '.') + self.plug_ins.append(import_module(file_name)) + self.Resolver = ruamel.yaml.resolver.VersionedResolver # type: Any + self.allow_unicode = True + self.Reader = None # type: Any + self.Representer = None # type: Any + self.Constructor = None # type: Any + self.Scanner = None # type: Any + self.Serializer = None # type: Any + self.default_flow_style = None # type: Any + typ_found = 1 + setup_rt = False + if 'rt' in self.typ: + setup_rt = True + elif 'safe' in self.typ: + self.Emitter = ( + ruamel.yaml.emitter.Emitter if pure or CEmitter is None else CEmitter + ) + self.Representer = ruamel.yaml.representer.SafeRepresenter + self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser + self.Composer = ruamel.yaml.composer.Composer + self.Constructor = ruamel.yaml.constructor.SafeConstructor + elif 'base' in self.typ: + self.Emitter = ruamel.yaml.emitter.Emitter + self.Representer = ruamel.yaml.representer.BaseRepresenter + self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser + self.Composer = ruamel.yaml.composer.Composer + self.Constructor = ruamel.yaml.constructor.BaseConstructor + elif 'unsafe' in self.typ: + self.Emitter = ( + ruamel.yaml.emitter.Emitter if pure or CEmitter is None else CEmitter + ) + self.Representer = ruamel.yaml.representer.Representer + self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser + self.Composer = ruamel.yaml.composer.Composer + self.Constructor = ruamel.yaml.constructor.Constructor + else: + setup_rt = True + typ_found = 0 + if setup_rt: + self.default_flow_style = False + # no optimized rt-dumper yet + self.Emitter = ruamel.yaml.emitter.Emitter + self.Serializer = ruamel.yaml.serializer.Serializer + self.Representer = ruamel.yaml.representer.RoundTripRepresenter + self.Scanner = ruamel.yaml.scanner.RoundTripScanner + # no optimized rt-parser yet + self.Parser = ruamel.yaml.parser.RoundTripParser + self.Composer = ruamel.yaml.composer.Composer + self.Constructor = ruamel.yaml.constructor.RoundTripConstructor + del setup_rt + self.stream = None + self.canonical = None + self.old_indent = None + self.width = None + self.line_break = None + + self.map_indent = None + self.sequence_indent = None + self.sequence_dash_offset = 0 + self.compact_seq_seq = None + self.compact_seq_map = None + self.sort_base_mapping_type_on_output = None # default: sort + + self.top_level_colon_align = None + self.prefix_colon = None + self.version = None + self.preserve_quotes = None + self.allow_duplicate_keys = False # duplicate keys in map, set + self.encoding = 'utf-8' + self.explicit_start = None + self.explicit_end = None + self.tags = None + self.default_style = None + self.top_level_block_style_scalar_no_indent_error_1_1 = False + # directives end indicator with single scalar document + self.scalar_after_indicator = None + # [a, b: 1, c: {d: 2}] vs. [a, {b: 1}, {c: {d: 2}}] + self.brace_single_entry_mapping_in_flow_sequence = False + for module in self.plug_ins: + if getattr(module, 'typ', None) in self.typ: + typ_found += 1 + module.init_typ(self) + break + if typ_found == 0: + raise NotImplementedError( + 'typ "{}"not recognised (need to install plug-in?)'.format(self.typ) + ) + + @property + def reader(self): + # type: () -> Any + try: + return self._reader # type: ignore + except AttributeError: + self._reader = self.Reader(None, loader=self) + return self._reader + + @property + def scanner(self): + # type: () -> Any + try: + return self._scanner # type: ignore + except AttributeError: + self._scanner = self.Scanner(loader=self) + return self._scanner + + @property + def parser(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + if self.Parser is not CParser: + setattr(self, attr, self.Parser(loader=self)) + else: + if getattr(self, '_stream', None) is None: + # wait for the stream + return None + else: + # if not hasattr(self._stream, 'read') and hasattr(self._stream, 'open'): + # # pathlib.Path() instance + # setattr(self, attr, CParser(self._stream)) + # else: + setattr(self, attr, CParser(self._stream)) + # self._parser = self._composer = self + # nprint('scanner', self.loader.scanner) + + return getattr(self, attr) + + @property + def composer(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + setattr(self, attr, self.Composer(loader=self)) + return getattr(self, attr) + + @property + def constructor(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + cnst = self.Constructor(preserve_quotes=self.preserve_quotes, loader=self) + cnst.allow_duplicate_keys = self.allow_duplicate_keys + setattr(self, attr, cnst) + return getattr(self, attr) + + @property + def resolver(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + setattr(self, attr, self.Resolver(version=self.version, loader=self)) + return getattr(self, attr) + + @property + def emitter(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + if self.Emitter is not CEmitter: + _emitter = self.Emitter( + None, + canonical=self.canonical, + indent=self.old_indent, + width=self.width, + allow_unicode=self.allow_unicode, + line_break=self.line_break, + prefix_colon=self.prefix_colon, + brace_single_entry_mapping_in_flow_sequence=self.brace_single_entry_mapping_in_flow_sequence, # NOQA + dumper=self, + ) + setattr(self, attr, _emitter) + if self.map_indent is not None: + _emitter.best_map_indent = self.map_indent + if self.sequence_indent is not None: + _emitter.best_sequence_indent = self.sequence_indent + if self.sequence_dash_offset is not None: + _emitter.sequence_dash_offset = self.sequence_dash_offset + # _emitter.block_seq_indent = self.sequence_dash_offset + if self.compact_seq_seq is not None: + _emitter.compact_seq_seq = self.compact_seq_seq + if self.compact_seq_map is not None: + _emitter.compact_seq_map = self.compact_seq_map + else: + if getattr(self, '_stream', None) is None: + # wait for the stream + return None + return None + return getattr(self, attr) + + @property + def serializer(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + setattr( + self, + attr, + self.Serializer( + encoding=self.encoding, + explicit_start=self.explicit_start, + explicit_end=self.explicit_end, + version=self.version, + tags=self.tags, + dumper=self, + ), + ) + return getattr(self, attr) + + @property + def representer(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + repres = self.Representer( + default_style=self.default_style, + default_flow_style=self.default_flow_style, + dumper=self, + ) + if self.sort_base_mapping_type_on_output is not None: + repres.sort_base_mapping_type_on_output = self.sort_base_mapping_type_on_output + setattr(self, attr, repres) + return getattr(self, attr) + + # separate output resolver? + + # def load(self, stream=None): + # if self._context_manager: + # if not self._input: + # raise TypeError("Missing input stream while dumping from context manager") + # for data in self._context_manager.load(): + # yield data + # return + # if stream is None: + # raise TypeError("Need a stream argument when not loading from context manager") + # return self.load_one(stream) + + def load(self, stream): + # type: (Union[Path, StreamTextType]) -> Any + """ + at this point you either have the non-pure Parser (which has its own reader and + scanner) or you have the pure Parser. + If the pure Parser is set, then set the Reader and Scanner, if not already set. + If either the Scanner or Reader are set, you cannot use the non-pure Parser, + so reset it to the pure parser and set the Reader resp. Scanner if necessary + """ + if not hasattr(stream, 'read') and hasattr(stream, 'open'): + # pathlib.Path() instance + with stream.open('rb') as fp: + return self.load(fp) + constructor, parser = self.get_constructor_parser(stream) + try: + return constructor.get_single_data() + finally: + parser.dispose() + try: + self._reader.reset_reader() + except AttributeError: + pass + try: + self._scanner.reset_scanner() + except AttributeError: + pass + + def load_all(self, stream, _kw=enforce): # , skip=None): + # type: (Union[Path, StreamTextType], Any) -> Any + if _kw is not enforce: + raise TypeError( + '{}.__init__() takes no positional argument but at least ' + 'one was given ({!r})'.format(self.__class__.__name__, _kw) + ) + if not hasattr(stream, 'read') and hasattr(stream, 'open'): + # pathlib.Path() instance + with stream.open('r') as fp: + for d in self.load_all(fp, _kw=enforce): + yield d + return + # if skip is None: + # skip = [] + # elif isinstance(skip, int): + # skip = [skip] + constructor, parser = self.get_constructor_parser(stream) + try: + while constructor.check_data(): + yield constructor.get_data() + finally: + parser.dispose() + try: + self._reader.reset_reader() + except AttributeError: + pass + try: + self._scanner.reset_scanner() + except AttributeError: + pass + + def get_constructor_parser(self, stream): + # type: (StreamTextType) -> Any + """ + the old cyaml needs special setup, and therefore the stream + """ + if self.Parser is not CParser: + if self.Reader is None: + self.Reader = ruamel.yaml.reader.Reader + if self.Scanner is None: + self.Scanner = ruamel.yaml.scanner.Scanner + self.reader.stream = stream + else: + if self.Reader is not None: + if self.Scanner is None: + self.Scanner = ruamel.yaml.scanner.Scanner + self.Parser = ruamel.yaml.parser.Parser + self.reader.stream = stream + elif self.Scanner is not None: + if self.Reader is None: + self.Reader = ruamel.yaml.reader.Reader + self.Parser = ruamel.yaml.parser.Parser + self.reader.stream = stream + else: + # combined C level reader>scanner>parser + # does some calls to the resolver, e.g. BaseResolver.descend_resolver + # if you just initialise the CParser, to much of resolver.py + # is actually used + rslvr = self.Resolver + # if rslvr is ruamel.yaml.resolver.VersionedResolver: + # rslvr = ruamel.yaml.resolver.Resolver + + class XLoader(self.Parser, self.Constructor, rslvr): # type: ignore + def __init__(selfx, stream, version=self.version, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None # NOQA + CParser.__init__(selfx, stream) + selfx._parser = selfx._composer = selfx + self.Constructor.__init__(selfx, loader=selfx) + selfx.allow_duplicate_keys = self.allow_duplicate_keys + rslvr.__init__(selfx, version=version, loadumper=selfx) + + self._stream = stream + loader = XLoader(stream) + return loader, loader + return self.constructor, self.parser + + def dump(self, data, stream=None, _kw=enforce, transform=None): + # type: (Any, Union[Path, StreamType], Any, Any) -> Any + if self._context_manager: + if not self._output: + raise TypeError('Missing output stream while dumping from context manager') + if _kw is not enforce: + raise TypeError( + '{}.dump() takes one positional argument but at least ' + 'two were given ({!r})'.format(self.__class__.__name__, _kw) + ) + if transform is not None: + raise TypeError( + '{}.dump() in the context manager cannot have transform keyword ' + ''.format(self.__class__.__name__) + ) + self._context_manager.dump(data) + else: # old style + if stream is None: + raise TypeError('Need a stream argument when not dumping from context manager') + return self.dump_all([data], stream, _kw, transform=transform) + + def dump_all(self, documents, stream, _kw=enforce, transform=None): + # type: (Any, Union[Path, StreamType], Any, Any) -> Any + if self._context_manager: + raise NotImplementedError + if _kw is not enforce: + raise TypeError( + '{}.dump(_all) takes two positional argument but at least ' + 'three were given ({!r})'.format(self.__class__.__name__, _kw) + ) + self._output = stream + self._context_manager = YAMLContextManager(self, transform=transform) + for data in documents: + self._context_manager.dump(data) + self._context_manager.teardown_output() + self._output = None + self._context_manager = None + + def Xdump_all(self, documents, stream, _kw=enforce, transform=None): + # type: (Any, Union[Path, StreamType], Any, Any) -> Any + """ + Serialize a sequence of Python objects into a YAML stream. + """ + if not hasattr(stream, 'write') and hasattr(stream, 'open'): + # pathlib.Path() instance + with stream.open('w') as fp: + return self.dump_all(documents, fp, _kw, transform=transform) + if _kw is not enforce: + raise TypeError( + '{}.dump(_all) takes two positional argument but at least ' + 'three were given ({!r})'.format(self.__class__.__name__, _kw) + ) + # The stream should have the methods `write` and possibly `flush`. + if self.top_level_colon_align is True: + tlca = max([len(str(x)) for x in documents[0]]) # type: Any + else: + tlca = self.top_level_colon_align + if transform is not None: + fstream = stream + if self.encoding is None: + stream = StringIO() + else: + stream = BytesIO() + serializer, representer, emitter = self.get_serializer_representer_emitter( + stream, tlca + ) + try: + self.serializer.open() + for data in documents: + try: + self.representer.represent(data) + except AttributeError: + # nprint(dir(dumper._representer)) + raise + self.serializer.close() + finally: + try: + self.emitter.dispose() + except AttributeError: + raise + # self.dumper.dispose() # cyaml + delattr(self, '_serializer') + delattr(self, '_emitter') + if transform: + val = stream.getvalue() + if self.encoding: + val = val.decode(self.encoding) + if fstream is None: + transform(val) + else: + fstream.write(transform(val)) + return None + + def get_serializer_representer_emitter(self, stream, tlca): + # type: (StreamType, Any) -> Any + # we have only .Serializer to deal with (vs .Reader & .Scanner), much simpler + if self.Emitter is not CEmitter: + if self.Serializer is None: + self.Serializer = ruamel.yaml.serializer.Serializer + self.emitter.stream = stream + self.emitter.top_level_colon_align = tlca + if self.scalar_after_indicator is not None: + self.emitter.scalar_after_indicator = self.scalar_after_indicator + return self.serializer, self.representer, self.emitter + if self.Serializer is not None: + # cannot set serializer with CEmitter + self.Emitter = ruamel.yaml.emitter.Emitter + self.emitter.stream = stream + self.emitter.top_level_colon_align = tlca + if self.scalar_after_indicator is not None: + self.emitter.scalar_after_indicator = self.scalar_after_indicator + return self.serializer, self.representer, self.emitter + # C routines + + rslvr = ( + ruamel.yaml.resolver.BaseResolver + if 'base' in self.typ + else ruamel.yaml.resolver.Resolver + ) + + class XDumper(CEmitter, self.Representer, rslvr): # type: ignore + def __init__( + selfx, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + CEmitter.__init__( + selfx, + stream, + canonical=canonical, + indent=indent, + width=width, + encoding=encoding, + allow_unicode=allow_unicode, + line_break=line_break, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + ) + selfx._emitter = selfx._serializer = selfx._representer = selfx + self.Representer.__init__( + selfx, default_style=default_style, default_flow_style=default_flow_style + ) + rslvr.__init__(selfx) + + self._stream = stream + dumper = XDumper( + stream, + default_style=self.default_style, + default_flow_style=self.default_flow_style, + canonical=self.canonical, + indent=self.old_indent, + width=self.width, + allow_unicode=self.allow_unicode, + line_break=self.line_break, + explicit_start=self.explicit_start, + explicit_end=self.explicit_end, + version=self.version, + tags=self.tags, + ) + self._emitter = self._serializer = dumper + return dumper, dumper, dumper + + # basic types + def map(self, **kw): + # type: (Any) -> Any + if 'rt' in self.typ: + from ...ruamel.yaml.comments import CommentedMap + + return CommentedMap(**kw) + else: + return dict(**kw) + + def seq(self, *args): + # type: (Any) -> Any + if 'rt' in self.typ: + from ...ruamel.yaml.comments import CommentedSeq + + return CommentedSeq(*args) + else: + return list(*args) + + # helpers + def official_plug_ins(self): + # type: () -> Any + bd = os.path.dirname(__file__) + gpbd = os.path.dirname(os.path.dirname(bd)) + res = [x.replace(gpbd, "")[1:-3] for x in glob.glob(bd + '/*/__plug_in__.py')] + return res + + def register_class(self, cls): + # type:(Any) -> Any + """ + register a class for dumping loading + - if it has attribute yaml_tag use that to register, else use class name + - if it has methods to_yaml/from_yaml use those to dump/load else dump attributes + as mapping + """ + tag = getattr(cls, 'yaml_tag', '!' + cls.__name__) + try: + self.representer.add_representer(cls, cls.to_yaml) + except AttributeError: + + def t_y(representer, data): + # type: (Any, Any) -> Any + return representer.represent_yaml_object( + tag, data, cls, flow_style=representer.default_flow_style + ) + + self.representer.add_representer(cls, t_y) + try: + self.constructor.add_constructor(tag, cls.from_yaml) + except AttributeError: + + def f_y(constructor, node): + # type: (Any, Any) -> Any + return constructor.construct_yaml_object(node, cls) + + self.constructor.add_constructor(tag, f_y) + return cls + + def parse(self, stream): + # type: (StreamTextType) -> Any + """ + Parse a YAML stream and produce parsing events. + """ + _, parser = self.get_constructor_parser(stream) + try: + while parser.check_event(): + yield parser.get_event() + finally: + parser.dispose() + try: + self._reader.reset_reader() + except AttributeError: + pass + try: + self._scanner.reset_scanner() + except AttributeError: + pass + + # ### context manager + + def __enter__(self): + # type: () -> Any + self._context_manager = YAMLContextManager(self) + return self + + def __exit__(self, typ, value, traceback): + # type: (Any, Any, Any) -> None + if typ: + nprint('typ', typ) + self._context_manager.teardown_output() + # self._context_manager.teardown_input() + self._context_manager = None + + # ### backwards compatibility + def _indent(self, mapping=None, sequence=None, offset=None): + # type: (Any, Any, Any) -> None + if mapping is not None: + self.map_indent = mapping + if sequence is not None: + self.sequence_indent = sequence + if offset is not None: + self.sequence_dash_offset = offset + + @property + def indent(self): + # type: () -> Any + return self._indent + + @indent.setter + def indent(self, val): + # type: (Any) -> None + self.old_indent = val + + @property + def block_seq_indent(self): + # type: () -> Any + return self.sequence_dash_offset + + @block_seq_indent.setter + def block_seq_indent(self, val): + # type: (Any) -> None + self.sequence_dash_offset = val + + def compact(self, seq_seq=None, seq_map=None): + # type: (Any, Any) -> None + self.compact_seq_seq = seq_seq + self.compact_seq_map = seq_map + + +class YAMLContextManager(object): + def __init__(self, yaml, transform=None): + # type: (Any, Any) -> None # used to be: (Any, Optional[Callable]) -> None + self._yaml = yaml + self._output_inited = False + self._output_path = None + self._output = self._yaml._output + self._transform = transform + + # self._input_inited = False + # self._input = input + # self._input_path = None + # self._transform = yaml.transform + # self._fstream = None + + if not hasattr(self._output, 'write') and hasattr(self._output, 'open'): + # pathlib.Path() instance, open with the same mode + self._output_path = self._output + self._output = self._output_path.open('w') + + # if not hasattr(self._stream, 'write') and hasattr(stream, 'open'): + # if not hasattr(self._input, 'read') and hasattr(self._input, 'open'): + # # pathlib.Path() instance, open with the same mode + # self._input_path = self._input + # self._input = self._input_path.open('r') + + if self._transform is not None: + self._fstream = self._output + if self._yaml.encoding is None: + self._output = StringIO() + else: + self._output = BytesIO() + + def teardown_output(self): + # type: () -> None + if self._output_inited: + self._yaml.serializer.close() + else: + return + try: + self._yaml.emitter.dispose() + except AttributeError: + raise + # self.dumper.dispose() # cyaml + try: + delattr(self._yaml, '_serializer') + delattr(self._yaml, '_emitter') + except AttributeError: + raise + if self._transform: + val = self._output.getvalue() + if self._yaml.encoding: + val = val.decode(self._yaml.encoding) + if self._fstream is None: + self._transform(val) + else: + self._fstream.write(self._transform(val)) + self._fstream.flush() + self._output = self._fstream # maybe not necessary + if self._output_path is not None: + self._output.close() + + def init_output(self, first_data): + # type: (Any) -> None + if self._yaml.top_level_colon_align is True: + tlca = max([len(str(x)) for x in first_data]) # type: Any + else: + tlca = self._yaml.top_level_colon_align + self._yaml.get_serializer_representer_emitter(self._output, tlca) + self._yaml.serializer.open() + self._output_inited = True + + def dump(self, data): + # type: (Any) -> None + if not self._output_inited: + self.init_output(data) + try: + self._yaml.representer.represent(data) + except AttributeError: + # nprint(dir(dumper._representer)) + raise + + # def teardown_input(self): + # pass + # + # def init_input(self): + # # set the constructor and parser on YAML() instance + # self._yaml.get_constructor_parser(stream) + # + # def load(self): + # if not self._input_inited: + # self.init_input() + # try: + # while self._yaml.constructor.check_data(): + # yield self._yaml.constructor.get_data() + # finally: + # parser.dispose() + # try: + # self._reader.reset_reader() # type: ignore + # except AttributeError: + # pass + # try: + # self._scanner.reset_scanner() # type: ignore + # except AttributeError: + # pass + + +def yaml_object(yml): + # type: (Any) -> Any + """ decorator for classes that needs to dump/load objects + The tag for such objects is taken from the class attribute yaml_tag (or the + class name in lowercase in case unavailable) + If methods to_yaml and/or from_yaml are available, these are called for dumping resp. + loading, default routines (dumping a mapping of the attributes) used otherwise. + """ + + def yo_deco(cls): + # type: (Any) -> Any + tag = getattr(cls, 'yaml_tag', '!' + cls.__name__) + try: + yml.representer.add_representer(cls, cls.to_yaml) + except AttributeError: + + def t_y(representer, data): + # type: (Any, Any) -> Any + return representer.represent_yaml_object( + tag, data, cls, flow_style=representer.default_flow_style + ) + + yml.representer.add_representer(cls, t_y) + try: + yml.constructor.add_constructor(tag, cls.from_yaml) + except AttributeError: + + def f_y(constructor, node): + # type: (Any, Any) -> Any + return constructor.construct_yaml_object(node, cls) + + yml.constructor.add_constructor(tag, f_y) + return cls + + return yo_deco + + +######################################################################################## + + +def scan(stream, Loader=Loader): + # type: (StreamTextType, Any) -> Any + """ + Scan a YAML stream and produce scanning tokens. + """ + loader = Loader(stream) + try: + while loader.scanner.check_token(): + yield loader.scanner.get_token() + finally: + loader._parser.dispose() + + +def parse(stream, Loader=Loader): + # type: (StreamTextType, Any) -> Any + """ + Parse a YAML stream and produce parsing events. + """ + loader = Loader(stream) + try: + while loader._parser.check_event(): + yield loader._parser.get_event() + finally: + loader._parser.dispose() + + +def compose(stream, Loader=Loader): + # type: (StreamTextType, Any) -> Any + """ + Parse the first YAML document in a stream + and produce the corresponding representation tree. + """ + loader = Loader(stream) + try: + return loader.get_single_node() + finally: + loader.dispose() + + +def compose_all(stream, Loader=Loader): + # type: (StreamTextType, Any) -> Any + """ + Parse all YAML documents in a stream + and produce corresponding representation trees. + """ + loader = Loader(stream) + try: + while loader.check_node(): + yield loader._composer.get_node() + finally: + loader._parser.dispose() + + +def load(stream, Loader=None, version=None, preserve_quotes=None): + # type: (StreamTextType, Any, Optional[VersionType], Any) -> Any + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + """ + if Loader is None: + warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2) + Loader = UnsafeLoader + loader = Loader(stream, version, preserve_quotes=preserve_quotes) + try: + return loader._constructor.get_single_data() + finally: + loader._parser.dispose() + try: + loader._reader.reset_reader() + except AttributeError: + pass + try: + loader._scanner.reset_scanner() + except AttributeError: + pass + + +def load_all(stream, Loader=None, version=None, preserve_quotes=None): + # type: (Optional[StreamTextType], Any, Optional[VersionType], Optional[bool]) -> Any # NOQA + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + """ + if Loader is None: + warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2) + Loader = UnsafeLoader + loader = Loader(stream, version, preserve_quotes=preserve_quotes) + try: + while loader._constructor.check_data(): + yield loader._constructor.get_data() + finally: + loader._parser.dispose() + try: + loader._reader.reset_reader() + except AttributeError: + pass + try: + loader._scanner.reset_scanner() + except AttributeError: + pass + + +def safe_load(stream, version=None): + # type: (StreamTextType, Optional[VersionType]) -> Any + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + Resolve only basic YAML tags. + """ + return load(stream, SafeLoader, version) + + +def safe_load_all(stream, version=None): + # type: (StreamTextType, Optional[VersionType]) -> Any + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + Resolve only basic YAML tags. + """ + return load_all(stream, SafeLoader, version) + + +def round_trip_load(stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + Resolve only basic YAML tags. + """ + return load(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes) + + +def round_trip_load_all(stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + Resolve only basic YAML tags. + """ + return load_all(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes) + + +def emit( + events, + stream=None, + Dumper=Dumper, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, +): + # type: (Any, Optional[StreamType], Any, Optional[bool], Union[int, None], Optional[int], Optional[bool], Any) -> Any # NOQA + """ + Emit YAML parsing events into a stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper( + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + ) + try: + for event in events: + dumper.emit(event) + finally: + try: + dumper._emitter.dispose() + except AttributeError: + raise + dumper.dispose() # cyaml + if getvalue is not None: + return getvalue() + + +enc = None if PY3 else 'utf-8' + + +def serialize_all( + nodes, + stream=None, + Dumper=Dumper, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=enc, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, +): + # type: (Any, Optional[StreamType], Any, Any, Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any) -> Any # NOQA + """ + Serialize a sequence of representation trees into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + stream = StringIO() + else: + stream = BytesIO() + getvalue = stream.getvalue + dumper = Dumper( + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + encoding=encoding, + version=version, + tags=tags, + explicit_start=explicit_start, + explicit_end=explicit_end, + ) + try: + dumper._serializer.open() + for node in nodes: + dumper.serialize(node) + dumper._serializer.close() + finally: + try: + dumper._emitter.dispose() + except AttributeError: + raise + dumper.dispose() # cyaml + if getvalue is not None: + return getvalue() + + +def serialize(node, stream=None, Dumper=Dumper, **kwds): + # type: (Any, Optional[StreamType], Any, Any) -> Any + """ + Serialize a representation tree into a YAML stream. + If stream is None, return the produced string instead. + """ + return serialize_all([node], stream, Dumper=Dumper, **kwds) + + +def dump_all( + documents, + stream=None, + Dumper=Dumper, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=enc, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, +): + # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> Optional[str] # NOQA + """ + Serialize a sequence of Python objects into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if top_level_colon_align is True: + top_level_colon_align = max([len(str(x)) for x in documents[0]]) + if stream is None: + if encoding is None: + stream = StringIO() + else: + stream = BytesIO() + getvalue = stream.getvalue + dumper = Dumper( + stream, + default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + block_seq_indent=block_seq_indent, + top_level_colon_align=top_level_colon_align, + prefix_colon=prefix_colon, + ) + try: + dumper._serializer.open() + for data in documents: + try: + dumper._representer.represent(data) + except AttributeError: + # nprint(dir(dumper._representer)) + raise + dumper._serializer.close() + finally: + try: + dumper._emitter.dispose() + except AttributeError: + raise + dumper.dispose() # cyaml + if getvalue is not None: + return getvalue() + return None + + +def dump( + data, + stream=None, + Dumper=Dumper, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=enc, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, +): + # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any) -> Optional[str] # NOQA + """ + Serialize a Python object into a YAML stream. + If stream is None, return the produced string instead. + + default_style ∈ None, '', '"', "'", '|', '>' + + """ + return dump_all( + [data], + stream, + Dumper=Dumper, + default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + block_seq_indent=block_seq_indent, + ) + + +def safe_dump_all(documents, stream=None, **kwds): + # type: (Any, Optional[StreamType], Any) -> Optional[str] + """ + Serialize a sequence of Python objects into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all(documents, stream, Dumper=SafeDumper, **kwds) + + +def safe_dump(data, stream=None, **kwds): + # type: (Any, Optional[StreamType], Any) -> Optional[str] + """ + Serialize a Python object into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=SafeDumper, **kwds) + + +def round_trip_dump( + data, + stream=None, + Dumper=RoundTripDumper, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=enc, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, +): + # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any, Any, Any) -> Optional[str] # NOQA + allow_unicode = True if allow_unicode is None else allow_unicode + return dump_all( + [data], + stream, + Dumper=Dumper, + default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + block_seq_indent=block_seq_indent, + top_level_colon_align=top_level_colon_align, + prefix_colon=prefix_colon, + ) + + +# Loader/Dumper are no longer composites, to get to the associated +# Resolver()/Representer(), etc., you need to instantiate the class + + +def add_implicit_resolver( + tag, regexp, first=None, Loader=None, Dumper=None, resolver=Resolver +): + # type: (Any, Any, Any, Any, Any, Any) -> None + """ + Add an implicit scalar detector. + If an implicit scalar value matches the given regexp, + the corresponding tag is assigned to the scalar. + first is a sequence of possible initial characters or None. + """ + if Loader is None and Dumper is None: + resolver.add_implicit_resolver(tag, regexp, first) + return + if Loader: + if hasattr(Loader, 'add_implicit_resolver'): + Loader.add_implicit_resolver(tag, regexp, first) + elif issubclass( + Loader, (BaseLoader, SafeLoader, ruamel.yaml.loader.Loader, RoundTripLoader) + ): + Resolver.add_implicit_resolver(tag, regexp, first) + else: + raise NotImplementedError + if Dumper: + if hasattr(Dumper, 'add_implicit_resolver'): + Dumper.add_implicit_resolver(tag, regexp, first) + elif issubclass( + Dumper, (BaseDumper, SafeDumper, ruamel.yaml.dumper.Dumper, RoundTripDumper) + ): + Resolver.add_implicit_resolver(tag, regexp, first) + else: + raise NotImplementedError + + +# this code currently not tested +def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=None, resolver=Resolver): + # type: (Any, Any, Any, Any, Any, Any) -> None + """ + Add a path based resolver for the given tag. + A path is a list of keys that forms a path + to a node in the representation tree. + Keys can be string values, integers, or None. + """ + if Loader is None and Dumper is None: + resolver.add_path_resolver(tag, path, kind) + return + if Loader: + if hasattr(Loader, 'add_path_resolver'): + Loader.add_path_resolver(tag, path, kind) + elif issubclass( + Loader, (BaseLoader, SafeLoader, ruamel.yaml.loader.Loader, RoundTripLoader) + ): + Resolver.add_path_resolver(tag, path, kind) + else: + raise NotImplementedError + if Dumper: + if hasattr(Dumper, 'add_path_resolver'): + Dumper.add_path_resolver(tag, path, kind) + elif issubclass( + Dumper, (BaseDumper, SafeDumper, ruamel.yaml.dumper.Dumper, RoundTripDumper) + ): + Resolver.add_path_resolver(tag, path, kind) + else: + raise NotImplementedError + + +def add_constructor(tag, object_constructor, Loader=None, constructor=Constructor): + # type: (Any, Any, Any, Any) -> None + """ + Add an object constructor for the given tag. + object_onstructor is a function that accepts a Loader instance + and a node object and produces the corresponding Python object. + """ + if Loader is None: + constructor.add_constructor(tag, object_constructor) + else: + if hasattr(Loader, 'add_constructor'): + Loader.add_constructor(tag, object_constructor) + return + if issubclass(Loader, BaseLoader): + BaseConstructor.add_constructor(tag, object_constructor) + elif issubclass(Loader, SafeLoader): + SafeConstructor.add_constructor(tag, object_constructor) + elif issubclass(Loader, Loader): + Constructor.add_constructor(tag, object_constructor) + elif issubclass(Loader, RoundTripLoader): + RoundTripConstructor.add_constructor(tag, object_constructor) + else: + raise NotImplementedError + + +def add_multi_constructor(tag_prefix, multi_constructor, Loader=None, constructor=Constructor): + # type: (Any, Any, Any, Any) -> None + """ + Add a multi-constructor for the given tag prefix. + Multi-constructor is called for a node if its tag starts with tag_prefix. + Multi-constructor accepts a Loader instance, a tag suffix, + and a node object and produces the corresponding Python object. + """ + if Loader is None: + constructor.add_multi_constructor(tag_prefix, multi_constructor) + else: + if False and hasattr(Loader, 'add_multi_constructor'): + Loader.add_multi_constructor(tag_prefix, constructor) + return + if issubclass(Loader, BaseLoader): + BaseConstructor.add_multi_constructor(tag_prefix, multi_constructor) + elif issubclass(Loader, SafeLoader): + SafeConstructor.add_multi_constructor(tag_prefix, multi_constructor) + elif issubclass(Loader, ruamel.yaml.loader.Loader): + Constructor.add_multi_constructor(tag_prefix, multi_constructor) + elif issubclass(Loader, RoundTripLoader): + RoundTripConstructor.add_multi_constructor(tag_prefix, multi_constructor) + else: + raise NotImplementedError + + +def add_representer(data_type, object_representer, Dumper=None, representer=Representer): + # type: (Any, Any, Any, Any) -> None + """ + Add a representer for the given type. + object_representer is a function accepting a Dumper instance + and an instance of the given data type + and producing the corresponding representation node. + """ + if Dumper is None: + representer.add_representer(data_type, object_representer) + else: + if hasattr(Dumper, 'add_representer'): + Dumper.add_representer(data_type, object_representer) + return + if issubclass(Dumper, BaseDumper): + BaseRepresenter.add_representer(data_type, object_representer) + elif issubclass(Dumper, SafeDumper): + SafeRepresenter.add_representer(data_type, object_representer) + elif issubclass(Dumper, Dumper): + Representer.add_representer(data_type, object_representer) + elif issubclass(Dumper, RoundTripDumper): + RoundTripRepresenter.add_representer(data_type, object_representer) + else: + raise NotImplementedError + + +# this code currently not tested +def add_multi_representer(data_type, multi_representer, Dumper=None, representer=Representer): + # type: (Any, Any, Any, Any) -> None + """ + Add a representer for the given type. + multi_representer is a function accepting a Dumper instance + and an instance of the given data type or subtype + and producing the corresponding representation node. + """ + if Dumper is None: + representer.add_multi_representer(data_type, multi_representer) + else: + if hasattr(Dumper, 'add_multi_representer'): + Dumper.add_multi_representer(data_type, multi_representer) + return + if issubclass(Dumper, BaseDumper): + BaseRepresenter.add_multi_representer(data_type, multi_representer) + elif issubclass(Dumper, SafeDumper): + SafeRepresenter.add_multi_representer(data_type, multi_representer) + elif issubclass(Dumper, Dumper): + Representer.add_multi_representer(data_type, multi_representer) + elif issubclass(Dumper, RoundTripDumper): + RoundTripRepresenter.add_multi_representer(data_type, multi_representer) + else: + raise NotImplementedError + + +class YAMLObjectMetaclass(type): + """ + The metaclass for YAMLObject. + """ + + def __init__(cls, name, bases, kwds): + # type: (Any, Any, Any) -> None + super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) + if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: + cls.yaml_constructor.add_constructor(cls.yaml_tag, cls.from_yaml) # type: ignore + cls.yaml_representer.add_representer(cls, cls.to_yaml) # type: ignore + + +class YAMLObject(with_metaclass(YAMLObjectMetaclass)): # type: ignore + """ + An object that can dump itself to a YAML stream + and load itself from a YAML stream. + """ + + __slots__ = () # no direct instantiation, so allow immutable subclasses + + yaml_constructor = Constructor + yaml_representer = Representer + + yaml_tag = None # type: Any + yaml_flow_style = None # type: Any + + @classmethod + def from_yaml(cls, constructor, node): + # type: (Any, Any) -> Any + """ + Convert a representation node to a Python object. + """ + return constructor.construct_yaml_object(node, cls) + + @classmethod + def to_yaml(cls, representer, data): + # type: (Any, Any) -> Any + """ + Convert a Python object to a representation node. + """ + return representer.represent_yaml_object( + cls.yaml_tag, data, cls, flow_style=cls.yaml_flow_style + ) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/nodes.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/nodes.py new file mode 100644 index 000000000..da86e9c85 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/nodes.py @@ -0,0 +1,131 @@ +# coding: utf-8 + +from __future__ import print_function + +import sys +from .compat import string_types + +if False: # MYPY + from typing import Dict, Any, Text # NOQA + + +class Node(object): + __slots__ = 'tag', 'value', 'start_mark', 'end_mark', 'comment', 'anchor' + + def __init__(self, tag, value, start_mark, end_mark, comment=None, anchor=None): + # type: (Any, Any, Any, Any, Any, Any) -> None + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.comment = comment + self.anchor = anchor + + def __repr__(self): + # type: () -> str + value = self.value + # if isinstance(value, list): + # if len(value) == 0: + # value = '' + # elif len(value) == 1: + # value = '<1 item>' + # else: + # value = '<%d items>' % len(value) + # else: + # if len(value) > 75: + # value = repr(value[:70]+u' ... ') + # else: + # value = repr(value) + value = repr(value) + return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) + + def dump(self, indent=0): + # type: (int) -> None + if isinstance(self.value, string_types): + sys.stdout.write( + '{}{}(tag={!r}, value={!r})\n'.format( + ' ' * indent, self.__class__.__name__, self.tag, self.value + ) + ) + if self.comment: + sys.stdout.write(' {}comment: {})\n'.format(' ' * indent, self.comment)) + return + sys.stdout.write( + '{}{}(tag={!r})\n'.format(' ' * indent, self.__class__.__name__, self.tag) + ) + if self.comment: + sys.stdout.write(' {}comment: {})\n'.format(' ' * indent, self.comment)) + for v in self.value: + if isinstance(v, tuple): + for v1 in v: + v1.dump(indent + 1) + elif isinstance(v, Node): + v.dump(indent + 1) + else: + sys.stdout.write('Node value type? {}\n'.format(type(v))) + + +class ScalarNode(Node): + """ + styles: + ? -> set() ? key, no value + " -> double quoted + ' -> single quoted + | -> literal style + > -> folding style + """ + + __slots__ = ('style',) + id = 'scalar' + + def __init__( + self, tag, value, start_mark=None, end_mark=None, style=None, comment=None, anchor=None + ): + # type: (Any, Any, Any, Any, Any, Any, Any) -> None + Node.__init__(self, tag, value, start_mark, end_mark, comment=comment, anchor=anchor) + self.style = style + + +class CollectionNode(Node): + __slots__ = ('flow_style',) + + def __init__( + self, + tag, + value, + start_mark=None, + end_mark=None, + flow_style=None, + comment=None, + anchor=None, + ): + # type: (Any, Any, Any, Any, Any, Any, Any) -> None + Node.__init__(self, tag, value, start_mark, end_mark, comment=comment) + self.flow_style = flow_style + self.anchor = anchor + + +class SequenceNode(CollectionNode): + __slots__ = () + id = 'sequence' + + +class MappingNode(CollectionNode): + __slots__ = ('merge',) + id = 'mapping' + + def __init__( + self, + tag, + value, + start_mark=None, + end_mark=None, + flow_style=None, + comment=None, + anchor=None, + ): + # type: (Any, Any, Any, Any, Any, Any, Any) -> None + CollectionNode.__init__( + self, tag, value, start_mark, end_mark, flow_style, comment, anchor + ) + self.merge = None diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/parser.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/parser.py new file mode 100644 index 000000000..49bfbc09d --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/parser.py @@ -0,0 +1,802 @@ +# coding: utf-8 + +from __future__ import absolute_import + +# The following YAML grammar is LL(1) and is parsed by a recursive descent +# parser. +# +# stream ::= STREAM-START implicit_document? explicit_document* +# STREAM-END +# implicit_document ::= block_node DOCUMENT-END* +# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +# block_node_or_indentless_sequence ::= +# ALIAS +# | properties (block_content | +# indentless_block_sequence)? +# | block_content +# | indentless_block_sequence +# block_node ::= ALIAS +# | properties block_content? +# | block_content +# flow_node ::= ALIAS +# | properties flow_content? +# | flow_content +# properties ::= TAG ANCHOR? | ANCHOR TAG? +# block_content ::= block_collection | flow_collection | SCALAR +# flow_content ::= flow_collection | SCALAR +# block_collection ::= block_sequence | block_mapping +# flow_collection ::= flow_sequence | flow_mapping +# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* +# BLOCK-END +# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +# block_mapping ::= BLOCK-MAPPING_START +# ((KEY block_node_or_indentless_sequence?)? +# (VALUE block_node_or_indentless_sequence?)?)* +# BLOCK-END +# flow_sequence ::= FLOW-SEQUENCE-START +# (flow_sequence_entry FLOW-ENTRY)* +# flow_sequence_entry? +# FLOW-SEQUENCE-END +# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# flow_mapping ::= FLOW-MAPPING-START +# (flow_mapping_entry FLOW-ENTRY)* +# flow_mapping_entry? +# FLOW-MAPPING-END +# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# +# FIRST sets: +# +# stream: { STREAM-START } +# explicit_document: { DIRECTIVE DOCUMENT-START } +# implicit_document: FIRST(block_node) +# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START +# FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_sequence: { BLOCK-SEQUENCE-START } +# block_mapping: { BLOCK-MAPPING-START } +# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR +# BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START +# FLOW-MAPPING-START BLOCK-ENTRY } +# indentless_sequence: { ENTRY } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_sequence: { FLOW-SEQUENCE-START } +# flow_mapping: { FLOW-MAPPING-START } +# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START +# FLOW-MAPPING-START KEY } +# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START +# FLOW-MAPPING-START KEY } + +# need to have full path with import, as pkg_resources tries to load parser.py in __init__.py +# only to not do anything with the package afterwards +# and for Jython too + + +from ...ruamel.yaml.error import MarkedYAMLError +from ...ruamel.yaml.tokens import * # NOQA +from ...ruamel.yaml.events import * # NOQA +from ...ruamel.yaml.scanner import Scanner, RoundTripScanner, ScannerError # NOQA +from ...ruamel.yaml.compat import utf8, nprint, nprintf # NOQA + +if False: # MYPY + from typing import Any, Dict, Optional, List # NOQA + +__all__ = ['Parser', 'RoundTripParser', 'ParserError'] + + +class ParserError(MarkedYAMLError): + pass + + +class Parser(object): + # Since writing a recursive-descendant parser is a straightforward task, we + # do not give many comments here. + + DEFAULT_TAGS = {u'!': u'!', u'!!': u'tag:yaml.org,2002:'} + + def __init__(self, loader): + # type: (Any) -> None + self.loader = loader + if self.loader is not None and getattr(self.loader, '_parser', None) is None: + self.loader._parser = self + self.reset_parser() + + def reset_parser(self): + # type: () -> None + # Reset the state attributes (to clear self-references) + self.current_event = None + self.tag_handles = {} # type: Dict[Any, Any] + self.states = [] # type: List[Any] + self.marks = [] # type: List[Any] + self.state = self.parse_stream_start # type: Any + + def dispose(self): + # type: () -> None + self.reset_parser() + + @property + def scanner(self): + # type: () -> Any + if hasattr(self.loader, 'typ'): + return self.loader.scanner + return self.loader._scanner + + @property + def resolver(self): + # type: () -> Any + if hasattr(self.loader, 'typ'): + return self.loader.resolver + return self.loader._resolver + + def check_event(self, *choices): + # type: (Any) -> bool + # Check the type of the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + if self.current_event is not None: + if not choices: + return True + for choice in choices: + if isinstance(self.current_event, choice): + return True + return False + + def peek_event(self): + # type: () -> Any + # Get the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + return self.current_event + + def get_event(self): + # type: () -> Any + # Get the next event and proceed further. + if self.current_event is None: + if self.state: + self.current_event = self.state() + value = self.current_event + self.current_event = None + return value + + # stream ::= STREAM-START implicit_document? explicit_document* + # STREAM-END + # implicit_document ::= block_node DOCUMENT-END* + # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* + + def parse_stream_start(self): + # type: () -> Any + # Parse the stream start. + token = self.scanner.get_token() + token.move_comment(self.scanner.peek_token()) + event = StreamStartEvent(token.start_mark, token.end_mark, encoding=token.encoding) + + # Prepare the next state. + self.state = self.parse_implicit_document_start + + return event + + def parse_implicit_document_start(self): + # type: () -> Any + # Parse an implicit document. + if not self.scanner.check_token(DirectiveToken, DocumentStartToken, StreamEndToken): + self.tag_handles = self.DEFAULT_TAGS + token = self.scanner.peek_token() + start_mark = end_mark = token.start_mark + event = DocumentStartEvent(start_mark, end_mark, explicit=False) + + # Prepare the next state. + self.states.append(self.parse_document_end) + self.state = self.parse_block_node + + return event + + else: + return self.parse_document_start() + + def parse_document_start(self): + # type: () -> Any + # Parse any extra document end indicators. + while self.scanner.check_token(DocumentEndToken): + self.scanner.get_token() + # Parse an explicit document. + if not self.scanner.check_token(StreamEndToken): + token = self.scanner.peek_token() + start_mark = token.start_mark + version, tags = self.process_directives() + if not self.scanner.check_token(DocumentStartToken): + raise ParserError( + None, + None, + "expected '', but found %r" % self.scanner.peek_token().id, + self.scanner.peek_token().start_mark, + ) + token = self.scanner.get_token() + end_mark = token.end_mark + # if self.loader is not None and \ + # end_mark.line != self.scanner.peek_token().start_mark.line: + # self.loader.scalar_after_indicator = False + event = DocumentStartEvent( + start_mark, end_mark, explicit=True, version=version, tags=tags + ) # type: Any + self.states.append(self.parse_document_end) + self.state = self.parse_document_content + else: + # Parse the end of the stream. + token = self.scanner.get_token() + event = StreamEndEvent(token.start_mark, token.end_mark, comment=token.comment) + assert not self.states + assert not self.marks + self.state = None + return event + + def parse_document_end(self): + # type: () -> Any + # Parse the document end. + token = self.scanner.peek_token() + start_mark = end_mark = token.start_mark + explicit = False + if self.scanner.check_token(DocumentEndToken): + token = self.scanner.get_token() + end_mark = token.end_mark + explicit = True + event = DocumentEndEvent(start_mark, end_mark, explicit=explicit) + + # Prepare the next state. + if self.resolver.processing_version == (1, 1): + self.state = self.parse_document_start + else: + self.state = self.parse_implicit_document_start + + return event + + def parse_document_content(self): + # type: () -> Any + if self.scanner.check_token( + DirectiveToken, DocumentStartToken, DocumentEndToken, StreamEndToken + ): + event = self.process_empty_scalar(self.scanner.peek_token().start_mark) + self.state = self.states.pop() + return event + else: + return self.parse_block_node() + + def process_directives(self): + # type: () -> Any + yaml_version = None + self.tag_handles = {} + while self.scanner.check_token(DirectiveToken): + token = self.scanner.get_token() + if token.name == u'YAML': + if yaml_version is not None: + raise ParserError( + None, None, 'found duplicate YAML directive', token.start_mark + ) + major, minor = token.value + if major != 1: + raise ParserError( + None, + None, + 'found incompatible YAML document (version 1.* is ' 'required)', + token.start_mark, + ) + yaml_version = token.value + elif token.name == u'TAG': + handle, prefix = token.value + if handle in self.tag_handles: + raise ParserError( + None, None, 'duplicate tag handle %r' % utf8(handle), token.start_mark + ) + self.tag_handles[handle] = prefix + if bool(self.tag_handles): + value = yaml_version, self.tag_handles.copy() # type: Any + else: + value = yaml_version, None + if self.loader is not None and hasattr(self.loader, 'tags'): + self.loader.version = yaml_version + if self.loader.tags is None: + self.loader.tags = {} + for k in self.tag_handles: + self.loader.tags[k] = self.tag_handles[k] + for key in self.DEFAULT_TAGS: + if key not in self.tag_handles: + self.tag_handles[key] = self.DEFAULT_TAGS[key] + return value + + # block_node_or_indentless_sequence ::= ALIAS + # | properties (block_content | indentless_block_sequence)? + # | block_content + # | indentless_block_sequence + # block_node ::= ALIAS + # | properties block_content? + # | block_content + # flow_node ::= ALIAS + # | properties flow_content? + # | flow_content + # properties ::= TAG ANCHOR? | ANCHOR TAG? + # block_content ::= block_collection | flow_collection | SCALAR + # flow_content ::= flow_collection | SCALAR + # block_collection ::= block_sequence | block_mapping + # flow_collection ::= flow_sequence | flow_mapping + + def parse_block_node(self): + # type: () -> Any + return self.parse_node(block=True) + + def parse_flow_node(self): + # type: () -> Any + return self.parse_node() + + def parse_block_node_or_indentless_sequence(self): + # type: () -> Any + return self.parse_node(block=True, indentless_sequence=True) + + def transform_tag(self, handle, suffix): + # type: (Any, Any) -> Any + return self.tag_handles[handle] + suffix + + def parse_node(self, block=False, indentless_sequence=False): + # type: (bool, bool) -> Any + if self.scanner.check_token(AliasToken): + token = self.scanner.get_token() + event = AliasEvent(token.value, token.start_mark, token.end_mark) # type: Any + self.state = self.states.pop() + return event + + anchor = None + tag = None + start_mark = end_mark = tag_mark = None + if self.scanner.check_token(AnchorToken): + token = self.scanner.get_token() + start_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if self.scanner.check_token(TagToken): + token = self.scanner.get_token() + tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + elif self.scanner.check_token(TagToken): + token = self.scanner.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + if self.scanner.check_token(AnchorToken): + token = self.scanner.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if tag is not None: + handle, suffix = tag + if handle is not None: + if handle not in self.tag_handles: + raise ParserError( + 'while parsing a node', + start_mark, + 'found undefined tag handle %r' % utf8(handle), + tag_mark, + ) + tag = self.transform_tag(handle, suffix) + else: + tag = suffix + # if tag == u'!': + # raise ParserError("while parsing a node", start_mark, + # "found non-specific tag '!'", tag_mark, + # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' + # and share your opinion.") + if start_mark is None: + start_mark = end_mark = self.scanner.peek_token().start_mark + event = None + implicit = tag is None or tag == u'!' + if indentless_sequence and self.scanner.check_token(BlockEntryToken): + comment = None + pt = self.scanner.peek_token() + if pt.comment and pt.comment[0]: + comment = [pt.comment[0], []] + pt.comment[0] = None + end_mark = self.scanner.peek_token().end_mark + event = SequenceStartEvent( + anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment + ) + self.state = self.parse_indentless_sequence_entry + return event + + if self.scanner.check_token(ScalarToken): + token = self.scanner.get_token() + # self.scanner.peek_token_same_line_comment(token) + end_mark = token.end_mark + if (token.plain and tag is None) or tag == u'!': + implicit = (True, False) + elif tag is None: + implicit = (False, True) + else: + implicit = (False, False) + # nprint('se', token.value, token.comment) + event = ScalarEvent( + anchor, + tag, + implicit, + token.value, + start_mark, + end_mark, + style=token.style, + comment=token.comment, + ) + self.state = self.states.pop() + elif self.scanner.check_token(FlowSequenceStartToken): + pt = self.scanner.peek_token() + end_mark = pt.end_mark + event = SequenceStartEvent( + anchor, + tag, + implicit, + start_mark, + end_mark, + flow_style=True, + comment=pt.comment, + ) + self.state = self.parse_flow_sequence_first_entry + elif self.scanner.check_token(FlowMappingStartToken): + pt = self.scanner.peek_token() + end_mark = pt.end_mark + event = MappingStartEvent( + anchor, + tag, + implicit, + start_mark, + end_mark, + flow_style=True, + comment=pt.comment, + ) + self.state = self.parse_flow_mapping_first_key + elif block and self.scanner.check_token(BlockSequenceStartToken): + end_mark = self.scanner.peek_token().start_mark + # should inserting the comment be dependent on the + # indentation? + pt = self.scanner.peek_token() + comment = pt.comment + # nprint('pt0', type(pt)) + if comment is None or comment[1] is None: + comment = pt.split_comment() + # nprint('pt1', comment) + event = SequenceStartEvent( + anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment + ) + self.state = self.parse_block_sequence_first_entry + elif block and self.scanner.check_token(BlockMappingStartToken): + end_mark = self.scanner.peek_token().start_mark + comment = self.scanner.peek_token().comment + event = MappingStartEvent( + anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment + ) + self.state = self.parse_block_mapping_first_key + elif anchor is not None or tag is not None: + # Empty scalars are allowed even if a tag or an anchor is + # specified. + event = ScalarEvent(anchor, tag, (implicit, False), "", start_mark, end_mark) + self.state = self.states.pop() + else: + if block: + node = 'block' + else: + node = 'flow' + token = self.scanner.peek_token() + raise ParserError( + 'while parsing a %s node' % node, + start_mark, + 'expected the node content, but found %r' % token.id, + token.start_mark, + ) + return event + + # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* + # BLOCK-END + + def parse_block_sequence_first_entry(self): + # type: () -> Any + token = self.scanner.get_token() + # move any comment from start token + # token.move_comment(self.scanner.peek_token()) + self.marks.append(token.start_mark) + return self.parse_block_sequence_entry() + + def parse_block_sequence_entry(self): + # type: () -> Any + if self.scanner.check_token(BlockEntryToken): + token = self.scanner.get_token() + token.move_comment(self.scanner.peek_token()) + if not self.scanner.check_token(BlockEntryToken, BlockEndToken): + self.states.append(self.parse_block_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_block_sequence_entry + return self.process_empty_scalar(token.end_mark) + if not self.scanner.check_token(BlockEndToken): + token = self.scanner.peek_token() + raise ParserError( + 'while parsing a block collection', + self.marks[-1], + 'expected , but found %r' % token.id, + token.start_mark, + ) + token = self.scanner.get_token() # BlockEndToken + event = SequenceEndEvent(token.start_mark, token.end_mark, comment=token.comment) + self.state = self.states.pop() + self.marks.pop() + return event + + # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ + + # indentless_sequence? + # sequence: + # - entry + # - nested + + def parse_indentless_sequence_entry(self): + # type: () -> Any + if self.scanner.check_token(BlockEntryToken): + token = self.scanner.get_token() + token.move_comment(self.scanner.peek_token()) + if not self.scanner.check_token( + BlockEntryToken, KeyToken, ValueToken, BlockEndToken + ): + self.states.append(self.parse_indentless_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_indentless_sequence_entry + return self.process_empty_scalar(token.end_mark) + token = self.scanner.peek_token() + event = SequenceEndEvent(token.start_mark, token.start_mark, comment=token.comment) + self.state = self.states.pop() + return event + + # block_mapping ::= BLOCK-MAPPING_START + # ((KEY block_node_or_indentless_sequence?)? + # (VALUE block_node_or_indentless_sequence?)?)* + # BLOCK-END + + def parse_block_mapping_first_key(self): + # type: () -> Any + token = self.scanner.get_token() + self.marks.append(token.start_mark) + return self.parse_block_mapping_key() + + def parse_block_mapping_key(self): + # type: () -> Any + if self.scanner.check_token(KeyToken): + token = self.scanner.get_token() + token.move_comment(self.scanner.peek_token()) + if not self.scanner.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_value) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(token.end_mark) + if self.resolver.processing_version > (1, 1) and self.scanner.check_token(ValueToken): + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(self.scanner.peek_token().start_mark) + if not self.scanner.check_token(BlockEndToken): + token = self.scanner.peek_token() + raise ParserError( + 'while parsing a block mapping', + self.marks[-1], + 'expected , but found %r' % token.id, + token.start_mark, + ) + token = self.scanner.get_token() + token.move_comment(self.scanner.peek_token()) + event = MappingEndEvent(token.start_mark, token.end_mark, comment=token.comment) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_block_mapping_value(self): + # type: () -> Any + if self.scanner.check_token(ValueToken): + token = self.scanner.get_token() + # value token might have post comment move it to e.g. block + if self.scanner.check_token(ValueToken): + token.move_comment(self.scanner.peek_token()) + else: + if not self.scanner.check_token(KeyToken): + token.move_comment(self.scanner.peek_token(), empty=True) + # else: empty value for this key cannot move token.comment + if not self.scanner.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_key) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_key + comment = token.comment + if comment is None: + token = self.scanner.peek_token() + comment = token.comment + if comment: + token._comment = [None, comment[1]] + comment = [comment[0], None] + return self.process_empty_scalar(token.end_mark, comment=comment) + else: + self.state = self.parse_block_mapping_key + token = self.scanner.peek_token() + return self.process_empty_scalar(token.start_mark) + + # flow_sequence ::= FLOW-SEQUENCE-START + # (flow_sequence_entry FLOW-ENTRY)* + # flow_sequence_entry? + # FLOW-SEQUENCE-END + # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + # + # Note that while production rules for both flow_sequence_entry and + # flow_mapping_entry are equal, their interpretations are different. + # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` + # generate an inline mapping (set syntax). + + def parse_flow_sequence_first_entry(self): + # type: () -> Any + token = self.scanner.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_sequence_entry(first=True) + + def parse_flow_sequence_entry(self, first=False): + # type: (bool) -> Any + if not self.scanner.check_token(FlowSequenceEndToken): + if not first: + if self.scanner.check_token(FlowEntryToken): + self.scanner.get_token() + else: + token = self.scanner.peek_token() + raise ParserError( + 'while parsing a flow sequence', + self.marks[-1], + "expected ',' or ']', but got %r" % token.id, + token.start_mark, + ) + + if self.scanner.check_token(KeyToken): + token = self.scanner.peek_token() + event = MappingStartEvent( + None, None, True, token.start_mark, token.end_mark, flow_style=True + ) # type: Any + self.state = self.parse_flow_sequence_entry_mapping_key + return event + elif not self.scanner.check_token(FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry) + return self.parse_flow_node() + token = self.scanner.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark, comment=token.comment) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_sequence_entry_mapping_key(self): + # type: () -> Any + token = self.scanner.get_token() + if not self.scanner.check_token(ValueToken, FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_value + return self.process_empty_scalar(token.end_mark) + + def parse_flow_sequence_entry_mapping_value(self): + # type: () -> Any + if self.scanner.check_token(ValueToken): + token = self.scanner.get_token() + if not self.scanner.check_token(FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_end) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_end + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_sequence_entry_mapping_end + token = self.scanner.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_sequence_entry_mapping_end(self): + # type: () -> Any + self.state = self.parse_flow_sequence_entry + token = self.scanner.peek_token() + return MappingEndEvent(token.start_mark, token.start_mark) + + # flow_mapping ::= FLOW-MAPPING-START + # (flow_mapping_entry FLOW-ENTRY)* + # flow_mapping_entry? + # FLOW-MAPPING-END + # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + + def parse_flow_mapping_first_key(self): + # type: () -> Any + token = self.scanner.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_mapping_key(first=True) + + def parse_flow_mapping_key(self, first=False): + # type: (Any) -> Any + if not self.scanner.check_token(FlowMappingEndToken): + if not first: + if self.scanner.check_token(FlowEntryToken): + self.scanner.get_token() + else: + token = self.scanner.peek_token() + raise ParserError( + 'while parsing a flow mapping', + self.marks[-1], + "expected ',' or '}', but got %r" % token.id, + token.start_mark, + ) + if self.scanner.check_token(KeyToken): + token = self.scanner.get_token() + if not self.scanner.check_token( + ValueToken, FlowEntryToken, FlowMappingEndToken + ): + self.states.append(self.parse_flow_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(token.end_mark) + elif self.resolver.processing_version > (1, 1) and self.scanner.check_token( + ValueToken + ): + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(self.scanner.peek_token().end_mark) + elif not self.scanner.check_token(FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_empty_value) + return self.parse_flow_node() + token = self.scanner.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark, comment=token.comment) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_mapping_value(self): + # type: () -> Any + if self.scanner.check_token(ValueToken): + token = self.scanner.get_token() + if not self.scanner.check_token(FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_key) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_mapping_key + token = self.scanner.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_mapping_empty_value(self): + # type: () -> Any + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(self.scanner.peek_token().start_mark) + + def process_empty_scalar(self, mark, comment=None): + # type: (Any, Any) -> Any + return ScalarEvent(None, None, (True, False), "", mark, mark, comment=comment) + + +class RoundTripParser(Parser): + """roundtrip is a safe loader, that wants to see the unmangled tag""" + + def transform_tag(self, handle, suffix): + # type: (Any, Any) -> Any + # return self.tag_handles[handle]+suffix + if handle == '!!' and suffix in ( + u'null', + u'bool', + u'int', + u'float', + u'binary', + u'timestamp', + u'omap', + u'pairs', + u'set', + u'str', + u'seq', + u'map', + ): + return Parser.transform_tag(self, handle, suffix) + return handle + suffix diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/py.typed b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/py.typed new file mode 100644 index 000000000..e69de29bb diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/reader.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/reader.py new file mode 100644 index 000000000..62c7c475b --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/reader.py @@ -0,0 +1,311 @@ +# coding: utf-8 + +from __future__ import absolute_import + +# This module contains abstractions for the input stream. You don't have to +# looks further, there are no pretty code. +# +# We define two classes here. +# +# Mark(source, line, column) +# It's just a record and its only use is producing nice error messages. +# Parser does not use it for any other purposes. +# +# Reader(source, data) +# Reader determines the encoding of `data` and converts it to unicode. +# Reader provides the following methods and attributes: +# reader.peek(length=1) - return the next `length` characters +# reader.forward(length=1) - move the current position to `length` +# characters. +# reader.index - the number of the current character. +# reader.line, stream.column - the line and the column of the current +# character. + +import codecs + +from ...ruamel.yaml.error import YAMLError, FileMark, StringMark, YAMLStreamError +from ...ruamel.yaml.compat import text_type, binary_type, PY3, UNICODE_SIZE +from ...ruamel.yaml.util import RegExp + +if False: # MYPY + from typing import Any, Dict, Optional, List, Union, Text, Tuple, Optional # NOQA +# from ruamel.yaml.compat import StreamTextType # NOQA + +__all__ = ['Reader', 'ReaderError'] + + +class ReaderError(YAMLError): + def __init__(self, name, position, character, encoding, reason): + # type: (Any, Any, Any, Any, Any) -> None + self.name = name + self.character = character + self.position = position + self.encoding = encoding + self.reason = reason + + def __str__(self): + # type: () -> str + if isinstance(self.character, binary_type): + return "'%s' codec can't decode byte #x%02x: %s\n" ' in "%s", position %d' % ( + self.encoding, + ord(self.character), + self.reason, + self.name, + self.position, + ) + else: + return 'unacceptable character #x%04x: %s\n' ' in "%s", position %d' % ( + self.character, + self.reason, + self.name, + self.position, + ) + + +class Reader(object): + # Reader: + # - determines the data encoding and converts it to a unicode string, + # - checks if characters are in allowed range, + # - adds '\0' to the end. + + # Reader accepts + # - a `str` object (PY2) / a `bytes` object (PY3), + # - a `unicode` object (PY2) / a `str` object (PY3), + # - a file-like object with its `read` method returning `str`, + # - a file-like object with its `read` method returning `unicode`. + + # Yeah, it's ugly and slow. + + def __init__(self, stream, loader=None): + # type: (Any, Any) -> None + self.loader = loader + if self.loader is not None and getattr(self.loader, '_reader', None) is None: + self.loader._reader = self + self.reset_reader() + self.stream = stream # type: Any # as .read is called + + def reset_reader(self): + # type: () -> None + self.name = None # type: Any + self.stream_pointer = 0 + self.eof = True + self.buffer = "" + self.pointer = 0 + self.raw_buffer = None # type: Any + self.raw_decode = None + self.encoding = None # type: Optional[Text] + self.index = 0 + self.line = 0 + self.column = 0 + + @property + def stream(self): + # type: () -> Any + try: + return self._stream + except AttributeError: + raise YAMLStreamError('input stream needs to specified') + + @stream.setter + def stream(self, val): + # type: (Any) -> None + if val is None: + return + self._stream = None + if isinstance(val, text_type): + self.name = '' + self.check_printable(val) + self.buffer = val + u'\0' # type: ignore + elif isinstance(val, binary_type): + self.name = '' + self.raw_buffer = val + self.determine_encoding() + else: + if not hasattr(val, 'read'): + raise YAMLStreamError('stream argument needs to have a read() method') + self._stream = val + self.name = getattr(self.stream, 'name', '') + self.eof = False + self.raw_buffer = None + self.determine_encoding() + + def peek(self, index=0): + # type: (int) -> Text + try: + return self.buffer[self.pointer + index] + except IndexError: + self.update(index + 1) + return self.buffer[self.pointer + index] + + def prefix(self, length=1): + # type: (int) -> Any + if self.pointer + length >= len(self.buffer): + self.update(length) + return self.buffer[self.pointer : self.pointer + length] + + def forward_1_1(self, length=1): + # type: (int) -> None + if self.pointer + length + 1 >= len(self.buffer): + self.update(length + 1) + while length != 0: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch in u'\n\x85\u2028\u2029' or ( + ch == u'\r' and self.buffer[self.pointer] != u'\n' + ): + self.line += 1 + self.column = 0 + elif ch != u'\uFEFF': + self.column += 1 + length -= 1 + + def forward(self, length=1): + # type: (int) -> None + if self.pointer + length + 1 >= len(self.buffer): + self.update(length + 1) + while length != 0: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch == u'\n' or (ch == u'\r' and self.buffer[self.pointer] != u'\n'): + self.line += 1 + self.column = 0 + elif ch != u'\uFEFF': + self.column += 1 + length -= 1 + + def get_mark(self): + # type: () -> Any + if self.stream is None: + return StringMark( + self.name, self.index, self.line, self.column, self.buffer, self.pointer + ) + else: + return FileMark(self.name, self.index, self.line, self.column) + + def determine_encoding(self): + # type: () -> None + while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2): + self.update_raw() + if isinstance(self.raw_buffer, binary_type): + if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): + self.raw_decode = codecs.utf_16_le_decode # type: ignore + self.encoding = 'utf-16-le' + elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): + self.raw_decode = codecs.utf_16_be_decode # type: ignore + self.encoding = 'utf-16-be' + else: + self.raw_decode = codecs.utf_8_decode # type: ignore + self.encoding = 'utf-8' + self.update(1) + + if UNICODE_SIZE == 2: + NON_PRINTABLE = RegExp( + u'[^\x09\x0A\x0D\x20-\x7E\x85' u'\xA0-\uD7FF' u'\uE000-\uFFFD' u']' + ) + else: + NON_PRINTABLE = RegExp( + u'[^\x09\x0A\x0D\x20-\x7E\x85' + u'\xA0-\uD7FF' + u'\uE000-\uFFFD' + u'\U00010000-\U0010FFFF' + u']' + ) + + _printable_ascii = ('\x09\x0A\x0D' + "".join(map(chr, range(0x20, 0x7F)))).encode('ascii') + + @classmethod + def _get_non_printable_ascii(cls, data): # type: ignore + # type: (Text, bytes) -> Optional[Tuple[int, Text]] + ascii_bytes = data.encode('ascii') + non_printables = ascii_bytes.translate(None, cls._printable_ascii) # type: ignore + if not non_printables: + return None + non_printable = non_printables[:1] + return ascii_bytes.index(non_printable), non_printable.decode('ascii') + + @classmethod + def _get_non_printable_regex(cls, data): + # type: (Text) -> Optional[Tuple[int, Text]] + match = cls.NON_PRINTABLE.search(data) + if not bool(match): + return None + return match.start(), match.group() + + @classmethod + def _get_non_printable(cls, data): + # type: (Text) -> Optional[Tuple[int, Text]] + try: + return cls._get_non_printable_ascii(data) # type: ignore + except UnicodeEncodeError: + return cls._get_non_printable_regex(data) + + def check_printable(self, data): + # type: (Any) -> None + non_printable_match = self._get_non_printable(data) + if non_printable_match is not None: + start, character = non_printable_match + position = self.index + (len(self.buffer) - self.pointer) + start + raise ReaderError( + self.name, + position, + ord(character), + 'unicode', + 'special characters are not allowed', + ) + + def update(self, length): + # type: (int) -> None + if self.raw_buffer is None: + return + self.buffer = self.buffer[self.pointer :] + self.pointer = 0 + while len(self.buffer) < length: + if not self.eof: + self.update_raw() + if self.raw_decode is not None: + try: + data, converted = self.raw_decode(self.raw_buffer, 'strict', self.eof) + except UnicodeDecodeError as exc: + if PY3: + character = self.raw_buffer[exc.start] + else: + character = exc.object[exc.start] + if self.stream is not None: + position = self.stream_pointer - len(self.raw_buffer) + exc.start + elif self.stream is not None: + position = self.stream_pointer - len(self.raw_buffer) + exc.start + else: + position = exc.start + raise ReaderError(self.name, position, character, exc.encoding, exc.reason) + else: + data = self.raw_buffer + converted = len(data) + self.check_printable(data) + self.buffer += data + self.raw_buffer = self.raw_buffer[converted:] + if self.eof: + self.buffer += '\0' + self.raw_buffer = None + break + + def update_raw(self, size=None): + # type: (Optional[int]) -> None + if size is None: + size = 4096 if PY3 else 1024 + data = self.stream.read(size) + if self.raw_buffer is None: + self.raw_buffer = data + else: + self.raw_buffer += data + self.stream_pointer += len(data) + if not data: + self.eof = True + + +# try: +# import psyco +# psyco.bind(Reader) +# except ImportError: +# pass diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/representer.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/representer.py new file mode 100644 index 000000000..880f4f74e --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/representer.py @@ -0,0 +1,1282 @@ +# coding: utf-8 + +from __future__ import print_function, absolute_import, division + + +from ...ruamel.yaml.error import * # NOQA +from ...ruamel.yaml.nodes import * # NOQA +from ...ruamel.yaml.compat import text_type, binary_type, to_unicode, PY2, PY3 +from ...ruamel.yaml.compat import ordereddict # type: ignore +from ...ruamel.yaml.compat import nprint, nprintf # NOQA +from ...ruamel.yaml.scalarstring import ( + LiteralScalarString, + FoldedScalarString, + SingleQuotedScalarString, + DoubleQuotedScalarString, + PlainScalarString, +) +from ...ruamel.yaml.comments import ( + CommentedMap, + CommentedOrderedMap, + CommentedSeq, + CommentedKeySeq, + CommentedKeyMap, + CommentedSet, + comment_attrib, + merge_attrib, + TaggedScalar, +) +from ...ruamel.yaml.scalarint import ScalarInt, BinaryInt, OctalInt, HexInt, HexCapsInt +from ...ruamel.yaml.scalarfloat import ScalarFloat +from ...ruamel.yaml.scalarbool import ScalarBoolean +from ...ruamel.yaml.timestamp import TimeStamp + +import datetime +import sys +import types + +if PY3: + import copyreg + import base64 +else: + import copy_reg as copyreg # type: ignore + +if False: # MYPY + from typing import Dict, List, Any, Union, Text, Optional # NOQA + +# fmt: off +__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', + 'RepresenterError', 'RoundTripRepresenter'] +# fmt: on + + +class RepresenterError(YAMLError): + pass + + +if PY2: + + def get_classobj_bases(cls): + # type: (Any) -> Any + bases = [cls] + for base in cls.__bases__: + bases.extend(get_classobj_bases(base)) + return bases + + +class BaseRepresenter(object): + + yaml_representers = {} # type: Dict[Any, Any] + yaml_multi_representers = {} # type: Dict[Any, Any] + + def __init__(self, default_style=None, default_flow_style=None, dumper=None): + # type: (Any, Any, Any, Any) -> None + self.dumper = dumper + if self.dumper is not None: + self.dumper._representer = self + self.default_style = default_style + self.default_flow_style = default_flow_style + self.represented_objects = {} # type: Dict[Any, Any] + self.object_keeper = [] # type: List[Any] + self.alias_key = None # type: Optional[int] + self.sort_base_mapping_type_on_output = True + + @property + def serializer(self): + # type: () -> Any + try: + if hasattr(self.dumper, 'typ'): + return self.dumper.serializer + return self.dumper._serializer + except AttributeError: + return self # cyaml + + def represent(self, data): + # type: (Any) -> None + node = self.represent_data(data) + self.serializer.serialize(node) + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def represent_data(self, data): + # type: (Any) -> Any + if self.ignore_aliases(data): + self.alias_key = None + else: + self.alias_key = id(data) + if self.alias_key is not None: + if self.alias_key in self.represented_objects: + node = self.represented_objects[self.alias_key] + # if node is None: + # raise RepresenterError( + # "recursive objects are not allowed: %r" % data) + return node + # self.represented_objects[alias_key] = None + self.object_keeper.append(data) + data_types = type(data).__mro__ + if PY2: + # if type(data) is types.InstanceType: + if isinstance(data, types.InstanceType): + data_types = get_classobj_bases(data.__class__) + list(data_types) + if data_types[0] in self.yaml_representers: + node = self.yaml_representers[data_types[0]](self, data) + else: + for data_type in data_types: + if data_type in self.yaml_multi_representers: + node = self.yaml_multi_representers[data_type](self, data) + break + else: + if None in self.yaml_multi_representers: + node = self.yaml_multi_representers[None](self, data) + elif None in self.yaml_representers: + node = self.yaml_representers[None](self, data) + else: + node = ScalarNode(None, text_type(data)) + # if alias_key is not None: + # self.represented_objects[alias_key] = node + return node + + def represent_key(self, data): + # type: (Any) -> Any + """ + David Fraser: Extract a method to represent keys in mappings, so that + a subclass can choose not to quote them (for example) + used in represent_mapping + https://bitbucket.org/davidfraser/pyyaml/commits/d81df6eb95f20cac4a79eed95ae553b5c6f77b8c + """ + return self.represent_data(data) + + @classmethod + def add_representer(cls, data_type, representer): + # type: (Any, Any) -> None + if 'yaml_representers' not in cls.__dict__: + cls.yaml_representers = cls.yaml_representers.copy() + cls.yaml_representers[data_type] = representer + + @classmethod + def add_multi_representer(cls, data_type, representer): + # type: (Any, Any) -> None + if 'yaml_multi_representers' not in cls.__dict__: + cls.yaml_multi_representers = cls.yaml_multi_representers.copy() + cls.yaml_multi_representers[data_type] = representer + + def represent_scalar(self, tag, value, style=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + if style is None: + style = self.default_style + comment = None + if style and style[0] in '|>': + comment = getattr(value, 'comment', None) + if comment: + comment = [None, [comment]] + node = ScalarNode(tag, value, style=style, comment=comment, anchor=anchor) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node + + def represent_sequence(self, tag, sequence, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item in sequence: + node_item = self.represent_data(item) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_omap(self, tag, omap, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item_key in omap: + item_val = omap[item_key] + node_item = self.represent_data({item_key: item_val}) + # if not (isinstance(node_item, ScalarNode) \ + # and not node_item.style): + # best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_mapping(self, tag, mapping, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + node = MappingNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = list(mapping.items()) + if self.sort_base_mapping_type_on_output: + try: + mapping = sorted(mapping) + except TypeError: + pass + for item_key, item_value in mapping: + node_key = self.represent_key(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def ignore_aliases(self, data): + # type: (Any) -> bool + return False + + +class SafeRepresenter(BaseRepresenter): + def ignore_aliases(self, data): + # type: (Any) -> bool + # https://docs.python.org/3/reference/expressions.html#parenthesized-forms : + # "i.e. two occurrences of the empty tuple may or may not yield the same object" + # so "data is ()" should not be used + if data is None or (isinstance(data, tuple) and data == ()): + return True + if isinstance(data, (binary_type, text_type, bool, int, float)): + return True + return False + + def represent_none(self, data): + # type: (Any) -> Any + return self.represent_scalar(u'tag:yaml.org,2002:null', u'null') + + if PY3: + + def represent_str(self, data): + # type: (Any) -> Any + return self.represent_scalar(u'tag:yaml.org,2002:str', data) + + def represent_binary(self, data): + # type: (Any) -> Any + if hasattr(base64, 'encodebytes'): + data = base64.encodebytes(data).decode('ascii') + else: + data = base64.encodestring(data).decode('ascii') + return self.represent_scalar(u'tag:yaml.org,2002:binary', data, style='|') + + else: + + def represent_str(self, data): + # type: (Any) -> Any + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + # type: (Any) -> Any + return self.represent_scalar(u'tag:yaml.org,2002:str', data) + + def represent_bool(self, data, anchor=None): + # type: (Any, Optional[Any]) -> Any + try: + value = self.dumper.boolean_representation[bool(data)] + except AttributeError: + if data: + value = u'true' + else: + value = u'false' + return self.represent_scalar(u'tag:yaml.org,2002:bool', value, anchor=anchor) + + def represent_int(self, data): + # type: (Any) -> Any + return self.represent_scalar(u'tag:yaml.org,2002:int', text_type(data)) + + if PY2: + + def represent_long(self, data): + # type: (Any) -> Any + return self.represent_scalar(u'tag:yaml.org,2002:int', text_type(data)) + + inf_value = 1e300 + while repr(inf_value) != repr(inf_value * inf_value): + inf_value *= inf_value + + def represent_float(self, data): + # type: (Any) -> Any + if data != data or (data == 0.0 and data == 1.0): + value = u'.nan' + elif data == self.inf_value: + value = u'.inf' + elif data == -self.inf_value: + value = u'-.inf' + else: + value = to_unicode(repr(data)).lower() + if getattr(self.serializer, 'use_version', None) == (1, 1): + if u'.' not in value and u'e' in value: + # Note that in some cases `repr(data)` represents a float number + # without the decimal parts. For instance: + # >>> repr(1e17) + # '1e17' + # Unfortunately, this is not a valid float representation according + # to the definition of the `!!float` tag in YAML 1.1. We fix + # this by adding '.0' before the 'e' symbol. + value = value.replace(u'e', u'.0e', 1) + return self.represent_scalar(u'tag:yaml.org,2002:float', value) + + def represent_list(self, data): + # type: (Any) -> Any + # pairs = (len(data) > 0 and isinstance(data, list)) + # if pairs: + # for item in data: + # if not isinstance(item, tuple) or len(item) != 2: + # pairs = False + # break + # if not pairs: + return self.represent_sequence(u'tag:yaml.org,2002:seq', data) + + # value = [] + # for item_key, item_value in data: + # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', + # [(item_key, item_value)])) + # return SequenceNode(u'tag:yaml.org,2002:pairs', value) + + def represent_dict(self, data): + # type: (Any) -> Any + return self.represent_mapping(u'tag:yaml.org,2002:map', data) + + def represent_ordereddict(self, data): + # type: (Any) -> Any + return self.represent_omap(u'tag:yaml.org,2002:omap', data) + + def represent_set(self, data): + # type: (Any) -> Any + value = {} # type: Dict[Any, None] + for key in data: + value[key] = None + return self.represent_mapping(u'tag:yaml.org,2002:set', value) + + def represent_date(self, data): + # type: (Any) -> Any + value = to_unicode(data.isoformat()) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_datetime(self, data): + # type: (Any) -> Any + value = to_unicode(data.isoformat(' ')) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_yaml_object(self, tag, data, cls, flow_style=None): + # type: (Any, Any, Any, Any) -> Any + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__.copy() + return self.represent_mapping(tag, state, flow_style=flow_style) + + def represent_undefined(self, data): + # type: (Any) -> None + raise RepresenterError('cannot represent an object: %s' % (data,)) + + +SafeRepresenter.add_representer(type(None), SafeRepresenter.represent_none) + +SafeRepresenter.add_representer(str, SafeRepresenter.represent_str) + +if PY2: + SafeRepresenter.add_representer(unicode, SafeRepresenter.represent_unicode) +else: + SafeRepresenter.add_representer(bytes, SafeRepresenter.represent_binary) + +SafeRepresenter.add_representer(bool, SafeRepresenter.represent_bool) + +SafeRepresenter.add_representer(int, SafeRepresenter.represent_int) + +if PY2: + SafeRepresenter.add_representer(long, SafeRepresenter.represent_long) + +SafeRepresenter.add_representer(float, SafeRepresenter.represent_float) + +SafeRepresenter.add_representer(list, SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(tuple, SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(dict, SafeRepresenter.represent_dict) + +SafeRepresenter.add_representer(set, SafeRepresenter.represent_set) + +SafeRepresenter.add_representer(ordereddict, SafeRepresenter.represent_ordereddict) + +if sys.version_info >= (2, 7): + import collections + + SafeRepresenter.add_representer( + collections.OrderedDict, SafeRepresenter.represent_ordereddict + ) + +SafeRepresenter.add_representer(datetime.date, SafeRepresenter.represent_date) + +SafeRepresenter.add_representer(datetime.datetime, SafeRepresenter.represent_datetime) + +SafeRepresenter.add_representer(None, SafeRepresenter.represent_undefined) + + +class Representer(SafeRepresenter): + if PY2: + + def represent_str(self, data): + # type: (Any) -> Any + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:python/str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + # type: (Any) -> Any + tag = None + try: + data.encode('ascii') + tag = u'tag:yaml.org,2002:python/unicode' + except UnicodeEncodeError: + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data) + + def represent_long(self, data): + # type: (Any) -> Any + tag = u'tag:yaml.org,2002:int' + if int(data) is not data: + tag = u'tag:yaml.org,2002:python/long' + return self.represent_scalar(tag, to_unicode(data)) + + def represent_complex(self, data): + # type: (Any) -> Any + if data.imag == 0.0: + data = u'%r' % data.real + elif data.real == 0.0: + data = u'%rj' % data.imag + elif data.imag > 0: + data = u'%r+%rj' % (data.real, data.imag) + else: + data = u'%r%rj' % (data.real, data.imag) + return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data) + + def represent_tuple(self, data): + # type: (Any) -> Any + return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data) + + def represent_name(self, data): + # type: (Any) -> Any + try: + name = u'%s.%s' % (data.__module__, data.__qualname__) + except AttributeError: + # probably PY2 + name = u'%s.%s' % (data.__module__, data.__name__) + return self.represent_scalar(u'tag:yaml.org,2002:python/name:' + name, "") + + def represent_module(self, data): + # type: (Any) -> Any + return self.represent_scalar(u'tag:yaml.org,2002:python/module:' + data.__name__, "") + + if PY2: + + def represent_instance(self, data): + # type: (Any) -> Any + # For instances of classic classes, we use __getinitargs__ and + # __getstate__ to serialize the data. + + # If data.__getinitargs__ exists, the object must be reconstructed + # by calling cls(**args), where args is a tuple returned by + # __getinitargs__. Otherwise, the cls.__init__ method should never + # be called and the class instance is created by instantiating a + # trivial class and assigning to the instance's __class__ variable. + + # If data.__getstate__ exists, it returns the state of the object. + # Otherwise, the state of the object is data.__dict__. + + # We produce either a !!python/object or !!python/object/new node. + # If data.__getinitargs__ does not exist and state is a dictionary, + # we produce a !!python/object node . Otherwise we produce a + # !!python/object/new node. + + cls = data.__class__ + class_name = u'%s.%s' % (cls.__module__, cls.__name__) + args = None + state = None + if hasattr(data, '__getinitargs__'): + args = list(data.__getinitargs__()) + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__ + if args is None and isinstance(state, dict): + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:' + class_name, state + ) + if isinstance(state, dict) and not state: + return self.represent_sequence( + u'tag:yaml.org,2002:python/object/new:' + class_name, args + ) + value = {} + if bool(args): + value['args'] = args + value['state'] = state # type: ignore + return self.represent_mapping( + u'tag:yaml.org,2002:python/object/new:' + class_name, value + ) + + def represent_object(self, data): + # type: (Any) -> Any + # We use __reduce__ API to save the data. data.__reduce__ returns + # a tuple of length 2-5: + # (function, args, state, listitems, dictitems) + + # For reconstructing, we calls function(*args), then set its state, + # listitems, and dictitems if they are not None. + + # A special case is when function.__name__ == '__newobj__'. In this + # case we create the object with args[0].__new__(*args). + + # Another special case is when __reduce__ returns a string - we don't + # support it. + + # We produce a !!python/object, !!python/object/new or + # !!python/object/apply node. + + cls = type(data) + if cls in copyreg.dispatch_table: + reduce = copyreg.dispatch_table[cls](data) + elif hasattr(data, '__reduce_ex__'): + reduce = data.__reduce_ex__(2) + elif hasattr(data, '__reduce__'): + reduce = data.__reduce__() + else: + raise RepresenterError('cannot represent object: %r' % (data,)) + reduce = (list(reduce) + [None] * 5)[:5] + function, args, state, listitems, dictitems = reduce + args = list(args) + if state is None: + state = {} + if listitems is not None: + listitems = list(listitems) + if dictitems is not None: + dictitems = dict(dictitems) + if function.__name__ == '__newobj__': + function = args[0] + args = args[1:] + tag = u'tag:yaml.org,2002:python/object/new:' + newobj = True + else: + tag = u'tag:yaml.org,2002:python/object/apply:' + newobj = False + try: + function_name = u'%s.%s' % (function.__module__, function.__qualname__) + except AttributeError: + # probably PY2 + function_name = u'%s.%s' % (function.__module__, function.__name__) + if not args and not listitems and not dictitems and isinstance(state, dict) and newobj: + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:' + function_name, state + ) + if not listitems and not dictitems and isinstance(state, dict) and not state: + return self.represent_sequence(tag + function_name, args) + value = {} + if args: + value['args'] = args + if state or not isinstance(state, dict): + value['state'] = state + if listitems: + value['listitems'] = listitems + if dictitems: + value['dictitems'] = dictitems + return self.represent_mapping(tag + function_name, value) + + +if PY2: + Representer.add_representer(str, Representer.represent_str) + + Representer.add_representer(unicode, Representer.represent_unicode) + + Representer.add_representer(long, Representer.represent_long) + +Representer.add_representer(complex, Representer.represent_complex) + +Representer.add_representer(tuple, Representer.represent_tuple) + +Representer.add_representer(type, Representer.represent_name) + +if PY2: + Representer.add_representer(types.ClassType, Representer.represent_name) + +Representer.add_representer(types.FunctionType, Representer.represent_name) + +Representer.add_representer(types.BuiltinFunctionType, Representer.represent_name) + +Representer.add_representer(types.ModuleType, Representer.represent_module) + +if PY2: + Representer.add_multi_representer(types.InstanceType, Representer.represent_instance) + +Representer.add_multi_representer(object, Representer.represent_object) + +Representer.add_multi_representer(type, Representer.represent_name) + + +class RoundTripRepresenter(SafeRepresenter): + # need to add type here and write out the .comment + # in serializer and emitter + + def __init__(self, default_style=None, default_flow_style=None, dumper=None): + # type: (Any, Any, Any) -> None + if not hasattr(dumper, 'typ') and default_flow_style is None: + default_flow_style = False + SafeRepresenter.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=dumper, + ) + + def ignore_aliases(self, data): + # type: (Any) -> bool + try: + if data.anchor is not None and data.anchor.value is not None: + return False + except AttributeError: + pass + return SafeRepresenter.ignore_aliases(self, data) + + def represent_none(self, data): + # type: (Any) -> Any + if len(self.represented_objects) == 0 and not self.serializer.use_explicit_start: + # this will be open ended (although it is not yet) + return self.represent_scalar(u'tag:yaml.org,2002:null', u'null') + return self.represent_scalar(u'tag:yaml.org,2002:null', "") + + def represent_literal_scalarstring(self, data): + # type: (Any) -> Any + tag = None + style = '|' + anchor = data.yaml_anchor(any=True) + if PY2 and not isinstance(data, unicode): + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data, style=style, anchor=anchor) + + represent_preserved_scalarstring = represent_literal_scalarstring + + def represent_folded_scalarstring(self, data): + # type: (Any) -> Any + tag = None + style = '>' + anchor = data.yaml_anchor(any=True) + for fold_pos in reversed(getattr(data, 'fold_pos', [])): + if ( + data[fold_pos] == ' ' + and (fold_pos > 0 and not data[fold_pos - 1].isspace()) + and (fold_pos < len(data) and not data[fold_pos + 1].isspace()) + ): + data = data[:fold_pos] + '\a' + data[fold_pos:] + if PY2 and not isinstance(data, unicode): + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data, style=style, anchor=anchor) + + def represent_single_quoted_scalarstring(self, data): + # type: (Any) -> Any + tag = None + style = "'" + anchor = data.yaml_anchor(any=True) + if PY2 and not isinstance(data, unicode): + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data, style=style, anchor=anchor) + + def represent_double_quoted_scalarstring(self, data): + # type: (Any) -> Any + tag = None + style = '"' + anchor = data.yaml_anchor(any=True) + if PY2 and not isinstance(data, unicode): + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data, style=style, anchor=anchor) + + def represent_plain_scalarstring(self, data): + # type: (Any) -> Any + tag = None + style = '' + anchor = data.yaml_anchor(any=True) + if PY2 and not isinstance(data, unicode): + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data, style=style, anchor=anchor) + + def insert_underscore(self, prefix, s, underscore, anchor=None): + # type: (Any, Any, Any, Any) -> Any + if underscore is None: + return self.represent_scalar(u'tag:yaml.org,2002:int', prefix + s, anchor=anchor) + if underscore[0]: + sl = list(s) + pos = len(s) - underscore[0] + while pos > 0: + sl.insert(pos, '_') + pos -= underscore[0] + s = "".join(sl) + if underscore[1]: + s = '_' + s + if underscore[2]: + s += '_' + return self.represent_scalar(u'tag:yaml.org,2002:int', prefix + s, anchor=anchor) + + def represent_scalar_int(self, data): + # type: (Any) -> Any + if data._width is not None: + s = '{:0{}d}'.format(data, data._width) + else: + s = format(data, 'd') + anchor = data.yaml_anchor(any=True) + return self.insert_underscore("", s, data._underscore, anchor=anchor) + + def represent_binary_int(self, data): + # type: (Any) -> Any + if data._width is not None: + # cannot use '{:#0{}b}', that strips the zeros + s = '{:0{}b}'.format(data, data._width) + else: + s = format(data, 'b') + anchor = data.yaml_anchor(any=True) + return self.insert_underscore('0b', s, data._underscore, anchor=anchor) + + def represent_octal_int(self, data): + # type: (Any) -> Any + if data._width is not None: + # cannot use '{:#0{}o}', that strips the zeros + s = '{:0{}o}'.format(data, data._width) + else: + s = format(data, 'o') + anchor = data.yaml_anchor(any=True) + return self.insert_underscore('0o', s, data._underscore, anchor=anchor) + + def represent_hex_int(self, data): + # type: (Any) -> Any + if data._width is not None: + # cannot use '{:#0{}x}', that strips the zeros + s = '{:0{}x}'.format(data, data._width) + else: + s = format(data, 'x') + anchor = data.yaml_anchor(any=True) + return self.insert_underscore('0x', s, data._underscore, anchor=anchor) + + def represent_hex_caps_int(self, data): + # type: (Any) -> Any + if data._width is not None: + # cannot use '{:#0{}X}', that strips the zeros + s = '{:0{}X}'.format(data, data._width) + else: + s = format(data, 'X') + anchor = data.yaml_anchor(any=True) + return self.insert_underscore('0x', s, data._underscore, anchor=anchor) + + def represent_scalar_float(self, data): + # type: (Any) -> Any + """ this is way more complicated """ + value = None + anchor = data.yaml_anchor(any=True) + if data != data or (data == 0.0 and data == 1.0): + value = u'.nan' + elif data == self.inf_value: + value = u'.inf' + elif data == -self.inf_value: + value = u'-.inf' + if value: + return self.represent_scalar(u'tag:yaml.org,2002:float', value, anchor=anchor) + if data._exp is None and data._prec > 0 and data._prec == data._width - 1: + # no exponent, but trailing dot + value = u'{}{:d}.'.format(data._m_sign if data._m_sign else "", abs(int(data))) + elif data._exp is None: + # no exponent, "normal" dot + prec = data._prec + ms = data._m_sign if data._m_sign else "" + # -1 for the dot + value = u'{}{:0{}.{}f}'.format( + ms, abs(data), data._width - len(ms), data._width - prec - 1 + ) + if prec == 0 or (prec == 1 and ms != ""): + value = value.replace(u'0.', u'.') + while len(value) < data._width: + value += u'0' + else: + # exponent + m, es = u'{:{}.{}e}'.format( + # data, data._width, data._width - data._prec + (1 if data._m_sign else 0) + data, + data._width, + data._width + (1 if data._m_sign else 0), + ).split('e') + w = data._width if data._prec > 0 else (data._width + 1) + if data < 0: + w += 1 + m = m[:w] + e = int(es) + m1, m2 = m.split('.') # always second? + while len(m1) + len(m2) < data._width - (1 if data._prec >= 0 else 0): + m2 += u'0' + if data._m_sign and data > 0: + m1 = '+' + m1 + esgn = u'+' if data._e_sign else "" + if data._prec < 0: # mantissa without dot + if m2 != u'0': + e -= len(m2) + else: + m2 = "" + while (len(m1) + len(m2) - (1 if data._m_sign else 0)) < data._width: + m2 += u'0' + e -= 1 + value = m1 + m2 + data._exp + u'{:{}0{}d}'.format(e, esgn, data._e_width) + elif data._prec == 0: # mantissa with trailing dot + e -= len(m2) + value = ( + m1 + m2 + u'.' + data._exp + u'{:{}0{}d}'.format(e, esgn, data._e_width) + ) + else: + if data._m_lead0 > 0: + m2 = u'0' * (data._m_lead0 - 1) + m1 + m2 + m1 = u'0' + m2 = m2[: -data._m_lead0] # these should be zeros + e += data._m_lead0 + while len(m1) < data._prec: + m1 += m2[0] + m2 = m2[1:] + e -= 1 + value = ( + m1 + u'.' + m2 + data._exp + u'{:{}0{}d}'.format(e, esgn, data._e_width) + ) + + if value is None: + value = to_unicode(repr(data)).lower() + return self.represent_scalar(u'tag:yaml.org,2002:float', value, anchor=anchor) + + def represent_sequence(self, tag, sequence, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + # if the flow_style is None, the flow style tacked on to the object + # explicitly will be taken. If that is None as well the default flow + # style rules + try: + flow_style = sequence.fa.flow_style(flow_style) + except AttributeError: + flow_style = flow_style + try: + anchor = sequence.yaml_anchor() + except AttributeError: + anchor = None + node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + try: + comment = getattr(sequence, comment_attrib) + node.comment = comment.comment + # reset any comment already printed information + if node.comment and node.comment[1]: + for ct in node.comment[1]: + ct.reset() + item_comments = comment.items + for v in item_comments.values(): + if v and v[1]: + for ct in v[1]: + ct.reset() + item_comments = comment.items + node.comment = comment.comment + try: + node.comment.append(comment.end) + except AttributeError: + pass + except AttributeError: + item_comments = {} + for idx, item in enumerate(sequence): + node_item = self.represent_data(item) + self.merge_comments(node_item, item_comments.get(idx)) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if len(sequence) != 0 and self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def merge_comments(self, node, comments): + # type: (Any, Any) -> Any + if comments is None: + assert hasattr(node, 'comment') + return node + if getattr(node, 'comment', None) is not None: + for idx, val in enumerate(comments): + if idx >= len(node.comment): + continue + nc = node.comment[idx] + if nc is not None: + assert val is None or val == nc + comments[idx] = nc + node.comment = comments + return node + + def represent_key(self, data): + # type: (Any) -> Any + if isinstance(data, CommentedKeySeq): + self.alias_key = None + return self.represent_sequence(u'tag:yaml.org,2002:seq', data, flow_style=True) + if isinstance(data, CommentedKeyMap): + self.alias_key = None + return self.represent_mapping(u'tag:yaml.org,2002:map', data, flow_style=True) + return SafeRepresenter.represent_key(self, data) + + def represent_mapping(self, tag, mapping, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + try: + flow_style = mapping.fa.flow_style(flow_style) + except AttributeError: + flow_style = flow_style + try: + anchor = mapping.yaml_anchor() + except AttributeError: + anchor = None + node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + # no sorting! !! + try: + comment = getattr(mapping, comment_attrib) + node.comment = comment.comment + if node.comment and node.comment[1]: + for ct in node.comment[1]: + ct.reset() + item_comments = comment.items + for v in item_comments.values(): + if v and v[1]: + for ct in v[1]: + ct.reset() + try: + node.comment.append(comment.end) + except AttributeError: + pass + except AttributeError: + item_comments = {} + merge_list = [m[1] for m in getattr(mapping, merge_attrib, [])] + try: + merge_pos = getattr(mapping, merge_attrib, [[0]])[0][0] + except IndexError: + merge_pos = 0 + item_count = 0 + if bool(merge_list): + items = mapping.non_merged_items() + else: + items = mapping.items() + for item_key, item_value in items: + item_count += 1 + node_key = self.represent_key(item_key) + node_value = self.represent_data(item_value) + item_comment = item_comments.get(item_key) + if item_comment: + assert getattr(node_key, 'comment', None) is None + node_key.comment = item_comment[:2] + nvc = getattr(node_value, 'comment', None) + if nvc is not None: # end comment already there + nvc[0] = item_comment[2] + nvc[1] = item_comment[3] + else: + node_value.comment = item_comment[2:] + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if ((item_count != 0) or bool(merge_list)) and self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + if bool(merge_list): + # because of the call to represent_data here, the anchors + # are marked as being used and thereby created + if len(merge_list) == 1: + arg = self.represent_data(merge_list[0]) + else: + arg = self.represent_data(merge_list) + arg.flow_style = True + value.insert(merge_pos, (ScalarNode(u'tag:yaml.org,2002:merge', '<<'), arg)) + return node + + def represent_omap(self, tag, omap, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + try: + flow_style = omap.fa.flow_style(flow_style) + except AttributeError: + flow_style = flow_style + try: + anchor = omap.yaml_anchor() + except AttributeError: + anchor = None + node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + try: + comment = getattr(omap, comment_attrib) + node.comment = comment.comment + if node.comment and node.comment[1]: + for ct in node.comment[1]: + ct.reset() + item_comments = comment.items + for v in item_comments.values(): + if v and v[1]: + for ct in v[1]: + ct.reset() + try: + node.comment.append(comment.end) + except AttributeError: + pass + except AttributeError: + item_comments = {} + for item_key in omap: + item_val = omap[item_key] + node_item = self.represent_data({item_key: item_val}) + # node_item.flow_style = False + # node item has two scalars in value: node_key and node_value + item_comment = item_comments.get(item_key) + if item_comment: + if item_comment[1]: + node_item.comment = [None, item_comment[1]] + assert getattr(node_item.value[0][0], 'comment', None) is None + node_item.value[0][0].comment = [item_comment[0], None] + nvc = getattr(node_item.value[0][1], 'comment', None) + if nvc is not None: # end comment already there + nvc[0] = item_comment[2] + nvc[1] = item_comment[3] + else: + node_item.value[0][1].comment = item_comment[2:] + # if not (isinstance(node_item, ScalarNode) \ + # and not node_item.style): + # best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_set(self, setting): + # type: (Any) -> Any + flow_style = False + tag = u'tag:yaml.org,2002:set' + # return self.represent_mapping(tag, value) + value = [] # type: List[Any] + flow_style = setting.fa.flow_style(flow_style) + try: + anchor = setting.yaml_anchor() + except AttributeError: + anchor = None + node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + # no sorting! !! + try: + comment = getattr(setting, comment_attrib) + node.comment = comment.comment + if node.comment and node.comment[1]: + for ct in node.comment[1]: + ct.reset() + item_comments = comment.items + for v in item_comments.values(): + if v and v[1]: + for ct in v[1]: + ct.reset() + try: + node.comment.append(comment.end) + except AttributeError: + pass + except AttributeError: + item_comments = {} + for item_key in setting.odict: + node_key = self.represent_key(item_key) + node_value = self.represent_data(None) + item_comment = item_comments.get(item_key) + if item_comment: + assert getattr(node_key, 'comment', None) is None + node_key.comment = item_comment[:2] + node_key.style = node_value.style = '?' + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + best_style = best_style + return node + + def represent_dict(self, data): + # type: (Any) -> Any + """write out tag if saved on loading""" + try: + t = data.tag.value + except AttributeError: + t = None + if t: + if t.startswith('!!'): + tag = 'tag:yaml.org,2002:' + t[2:] + else: + tag = t + else: + tag = u'tag:yaml.org,2002:map' + return self.represent_mapping(tag, data) + + def represent_list(self, data): + # type: (Any) -> Any + try: + t = data.tag.value + except AttributeError: + t = None + if t: + if t.startswith('!!'): + tag = 'tag:yaml.org,2002:' + t[2:] + else: + tag = t + else: + tag = u'tag:yaml.org,2002:seq' + return self.represent_sequence(tag, data) + + def represent_datetime(self, data): + # type: (Any) -> Any + inter = 'T' if data._yaml['t'] else ' ' + _yaml = data._yaml + if _yaml['delta']: + data += _yaml['delta'] + value = data.isoformat(inter) + else: + value = data.isoformat(inter) + if _yaml['tz']: + value += _yaml['tz'] + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', to_unicode(value)) + + def represent_tagged_scalar(self, data): + # type: (Any) -> Any + try: + tag = data.tag.value + except AttributeError: + tag = None + try: + anchor = data.yaml_anchor() + except AttributeError: + anchor = None + return self.represent_scalar(tag, data.value, style=data.style, anchor=anchor) + + def represent_scalar_bool(self, data): + # type: (Any) -> Any + try: + anchor = data.yaml_anchor() + except AttributeError: + anchor = None + return SafeRepresenter.represent_bool(self, data, anchor=anchor) + + +RoundTripRepresenter.add_representer(type(None), RoundTripRepresenter.represent_none) + +RoundTripRepresenter.add_representer( + LiteralScalarString, RoundTripRepresenter.represent_literal_scalarstring +) + +RoundTripRepresenter.add_representer( + FoldedScalarString, RoundTripRepresenter.represent_folded_scalarstring +) + +RoundTripRepresenter.add_representer( + SingleQuotedScalarString, RoundTripRepresenter.represent_single_quoted_scalarstring +) + +RoundTripRepresenter.add_representer( + DoubleQuotedScalarString, RoundTripRepresenter.represent_double_quoted_scalarstring +) + +RoundTripRepresenter.add_representer( + PlainScalarString, RoundTripRepresenter.represent_plain_scalarstring +) + +# RoundTripRepresenter.add_representer(tuple, Representer.represent_tuple) + +RoundTripRepresenter.add_representer(ScalarInt, RoundTripRepresenter.represent_scalar_int) + +RoundTripRepresenter.add_representer(BinaryInt, RoundTripRepresenter.represent_binary_int) + +RoundTripRepresenter.add_representer(OctalInt, RoundTripRepresenter.represent_octal_int) + +RoundTripRepresenter.add_representer(HexInt, RoundTripRepresenter.represent_hex_int) + +RoundTripRepresenter.add_representer(HexCapsInt, RoundTripRepresenter.represent_hex_caps_int) + +RoundTripRepresenter.add_representer(ScalarFloat, RoundTripRepresenter.represent_scalar_float) + +RoundTripRepresenter.add_representer(ScalarBoolean, RoundTripRepresenter.represent_scalar_bool) + +RoundTripRepresenter.add_representer(CommentedSeq, RoundTripRepresenter.represent_list) + +RoundTripRepresenter.add_representer(CommentedMap, RoundTripRepresenter.represent_dict) + +RoundTripRepresenter.add_representer( + CommentedOrderedMap, RoundTripRepresenter.represent_ordereddict +) + +if sys.version_info >= (2, 7): + import collections + + RoundTripRepresenter.add_representer( + collections.OrderedDict, RoundTripRepresenter.represent_ordereddict + ) + +RoundTripRepresenter.add_representer(CommentedSet, RoundTripRepresenter.represent_set) + +RoundTripRepresenter.add_representer( + TaggedScalar, RoundTripRepresenter.represent_tagged_scalar +) + +RoundTripRepresenter.add_representer(TimeStamp, RoundTripRepresenter.represent_datetime) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/resolver.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/resolver.py new file mode 100644 index 000000000..28b7767b0 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/resolver.py @@ -0,0 +1,399 @@ +# coding: utf-8 + +from __future__ import absolute_import + +import re + +if False: # MYPY + from typing import Any, Dict, List, Union, Text, Optional # NOQA + from ...ruamel.yaml.compat import VersionType # NOQA + +from ...ruamel.yaml.compat import string_types, _DEFAULT_YAML_VERSION # NOQA +from ...ruamel.yaml.error import * # NOQA +from ...ruamel.yaml.nodes import MappingNode, ScalarNode, SequenceNode # NOQA +from ...ruamel.yaml.util import RegExp # NOQA + +__all__ = ['BaseResolver', 'Resolver', 'VersionedResolver'] + + +# fmt: off +# resolvers consist of +# - a list of applicable version +# - a tag +# - a regexp +# - a list of first characters to match +implicit_resolvers = [ + ([(1, 2)], + u'tag:yaml.org,2002:bool', + RegExp(u'''^(?:true|True|TRUE|false|False|FALSE)$''', re.X), + list(u'tTfF')), + ([(1, 1)], + u'tag:yaml.org,2002:bool', + RegExp(u'''^(?:y|Y|yes|Yes|YES|n|N|no|No|NO + |true|True|TRUE|false|False|FALSE + |on|On|ON|off|Off|OFF)$''', re.X), + list(u'yYnNtTfFoO')), + ([(1, 2)], + u'tag:yaml.org,2002:float', + RegExp(u'''^(?: + [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)? + |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) + |[-+]?\\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?\\.(?:inf|Inf|INF) + |\\.(?:nan|NaN|NAN))$''', re.X), + list(u'-+0123456789.')), + ([(1, 1)], + u'tag:yaml.org,2002:float', + RegExp(u'''^(?: + [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)? + |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) + |\\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]* # sexagesimal float + |[-+]?\\.(?:inf|Inf|INF) + |\\.(?:nan|NaN|NAN))$''', re.X), + list(u'-+0123456789.')), + ([(1, 2)], + u'tag:yaml.org,2002:int', + RegExp(u'''^(?:[-+]?0b[0-1_]+ + |[-+]?0o?[0-7_]+ + |[-+]?[0-9_]+ + |[-+]?0x[0-9a-fA-F_]+)$''', re.X), + list(u'-+0123456789')), + ([(1, 1)], + u'tag:yaml.org,2002:int', + RegExp(u'''^(?:[-+]?0b[0-1_]+ + |[-+]?0?[0-7_]+ + |[-+]?(?:0|[1-9][0-9_]*) + |[-+]?0x[0-9a-fA-F_]+ + |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), # sexagesimal int + list(u'-+0123456789')), + ([(1, 2), (1, 1)], + u'tag:yaml.org,2002:merge', + RegExp(u'^(?:<<)$'), + [u'<']), + ([(1, 2), (1, 1)], + u'tag:yaml.org,2002:null', + RegExp(u'''^(?: ~ + |null|Null|NULL + | )$''', re.X), + [u'~', u'n', u'N', u'']), + ([(1, 2), (1, 1)], + u'tag:yaml.org,2002:timestamp', + RegExp(u'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] + |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? + (?:[Tt]|[ \\t]+)[0-9][0-9]? + :[0-9][0-9] :[0-9][0-9] (?:\\.[0-9]*)? + (?:[ \\t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), + list(u'0123456789')), + ([(1, 2), (1, 1)], + u'tag:yaml.org,2002:value', + RegExp(u'^(?:=)$'), + [u'=']), + # The following resolver is only for documentation purposes. It cannot work + # because plain scalars cannot start with '!', '&', or '*'. + ([(1, 2), (1, 1)], + u'tag:yaml.org,2002:yaml', + RegExp(u'^(?:!|&|\\*)$'), + list(u'!&*')), +] +# fmt: on + + +class ResolverError(YAMLError): + pass + + +class BaseResolver(object): + + DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str' + DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq' + DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map' + + yaml_implicit_resolvers = {} # type: Dict[Any, Any] + yaml_path_resolvers = {} # type: Dict[Any, Any] + + def __init__(self, loadumper=None): + # type: (Any, Any) -> None + self.loadumper = loadumper + if self.loadumper is not None and getattr(self.loadumper, '_resolver', None) is None: + self.loadumper._resolver = self.loadumper + self._loader_version = None # type: Any + self.resolver_exact_paths = [] # type: List[Any] + self.resolver_prefix_paths = [] # type: List[Any] + + @property + def parser(self): + # type: () -> Any + if self.loadumper is not None: + if hasattr(self.loadumper, 'typ'): + return self.loadumper.parser + return self.loadumper._parser + return None + + @classmethod + def add_implicit_resolver_base(cls, tag, regexp, first): + # type: (Any, Any, Any) -> None + if 'yaml_implicit_resolvers' not in cls.__dict__: + # deepcopy doesn't work here + cls.yaml_implicit_resolvers = dict( + (k, cls.yaml_implicit_resolvers[k][:]) for k in cls.yaml_implicit_resolvers + ) + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + + @classmethod + def add_implicit_resolver(cls, tag, regexp, first): + # type: (Any, Any, Any) -> None + if 'yaml_implicit_resolvers' not in cls.__dict__: + # deepcopy doesn't work here + cls.yaml_implicit_resolvers = dict( + (k, cls.yaml_implicit_resolvers[k][:]) for k in cls.yaml_implicit_resolvers + ) + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + implicit_resolvers.append(([(1, 2), (1, 1)], tag, regexp, first)) + + # @classmethod + # def add_implicit_resolver(cls, tag, regexp, first): + + @classmethod + def add_path_resolver(cls, tag, path, kind=None): + # type: (Any, Any, Any) -> None + # Note: `add_path_resolver` is experimental. The API could be changed. + # `new_path` is a pattern that is matched against the path from the + # root to the node that is being considered. `node_path` elements are + # tuples `(node_check, index_check)`. `node_check` is a node class: + # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` + # matches any kind of a node. `index_check` could be `None`, a boolean + # value, a string value, or a number. `None` and `False` match against + # any _value_ of sequence and mapping nodes. `True` matches against + # any _key_ of a mapping node. A string `index_check` matches against + # a mapping value that corresponds to a scalar key which content is + # equal to the `index_check` value. An integer `index_check` matches + # against a sequence value with the index equal to `index_check`. + if 'yaml_path_resolvers' not in cls.__dict__: + cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() + new_path = [] # type: List[Any] + for element in path: + if isinstance(element, (list, tuple)): + if len(element) == 2: + node_check, index_check = element + elif len(element) == 1: + node_check = element[0] + index_check = True + else: + raise ResolverError('Invalid path element: %s' % (element,)) + else: + node_check = None + index_check = element + if node_check is str: + node_check = ScalarNode + elif node_check is list: + node_check = SequenceNode + elif node_check is dict: + node_check = MappingNode + elif ( + node_check not in [ScalarNode, SequenceNode, MappingNode] + and not isinstance(node_check, string_types) + and node_check is not None + ): + raise ResolverError('Invalid node checker: %s' % (node_check,)) + if not isinstance(index_check, (string_types, int)) and index_check is not None: + raise ResolverError('Invalid index checker: %s' % (index_check,)) + new_path.append((node_check, index_check)) + if kind is str: + kind = ScalarNode + elif kind is list: + kind = SequenceNode + elif kind is dict: + kind = MappingNode + elif kind not in [ScalarNode, SequenceNode, MappingNode] and kind is not None: + raise ResolverError('Invalid node kind: %s' % (kind,)) + cls.yaml_path_resolvers[tuple(new_path), kind] = tag + + def descend_resolver(self, current_node, current_index): + # type: (Any, Any) -> None + if not self.yaml_path_resolvers: + return + exact_paths = {} + prefix_paths = [] + if current_node: + depth = len(self.resolver_prefix_paths) + for path, kind in self.resolver_prefix_paths[-1]: + if self.check_resolver_prefix(depth, path, kind, current_node, current_index): + if len(path) > depth: + prefix_paths.append((path, kind)) + else: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + for path, kind in self.yaml_path_resolvers: + if not path: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + prefix_paths.append((path, kind)) + self.resolver_exact_paths.append(exact_paths) + self.resolver_prefix_paths.append(prefix_paths) + + def ascend_resolver(self): + # type: () -> None + if not self.yaml_path_resolvers: + return + self.resolver_exact_paths.pop() + self.resolver_prefix_paths.pop() + + def check_resolver_prefix(self, depth, path, kind, current_node, current_index): + # type: (int, Text, Any, Any, Any) -> bool + node_check, index_check = path[depth - 1] + if isinstance(node_check, string_types): + if current_node.tag != node_check: + return False + elif node_check is not None: + if not isinstance(current_node, node_check): + return False + if index_check is True and current_index is not None: + return False + if (index_check is False or index_check is None) and current_index is None: + return False + if isinstance(index_check, string_types): + if not ( + isinstance(current_index, ScalarNode) and index_check == current_index.value + ): + return False + elif isinstance(index_check, int) and not isinstance(index_check, bool): + if index_check != current_index: + return False + return True + + def resolve(self, kind, value, implicit): + # type: (Any, Any, Any) -> Any + if kind is ScalarNode and implicit[0]: + if value == "": + resolvers = self.yaml_implicit_resolvers.get("", []) + else: + resolvers = self.yaml_implicit_resolvers.get(value[0], []) + resolvers += self.yaml_implicit_resolvers.get(None, []) + for tag, regexp in resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if bool(self.yaml_path_resolvers): + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + + @property + def processing_version(self): + # type: () -> Any + return None + + +class Resolver(BaseResolver): + pass + + +for ir in implicit_resolvers: + if (1, 2) in ir[0]: + Resolver.add_implicit_resolver_base(*ir[1:]) + + +class VersionedResolver(BaseResolver): + """ + contrary to the "normal" resolver, the smart resolver delays loading + the pattern matching rules. That way it can decide to load 1.1 rules + or the (default) 1.2 rules, that no longer support octal without 0o, sexagesimals + and Yes/No/On/Off booleans. + """ + + def __init__(self, version=None, loader=None, loadumper=None): + # type: (Optional[VersionType], Any, Any) -> None + if loader is None and loadumper is not None: + loader = loadumper + BaseResolver.__init__(self, loader) + self._loader_version = self.get_loader_version(version) + self._version_implicit_resolver = {} # type: Dict[Any, Any] + + def add_version_implicit_resolver(self, version, tag, regexp, first): + # type: (VersionType, Any, Any, Any) -> None + if first is None: + first = [None] + impl_resolver = self._version_implicit_resolver.setdefault(version, {}) + for ch in first: + impl_resolver.setdefault(ch, []).append((tag, regexp)) + + def get_loader_version(self, version): + # type: (Optional[VersionType]) -> Any + if version is None or isinstance(version, tuple): + return version + if isinstance(version, list): + return tuple(version) + # assume string + return tuple(map(int, version.split(u'.'))) + + @property + def versioned_resolver(self): + # type: () -> Any + """ + select the resolver based on the version we are parsing + """ + version = self.processing_version + if version not in self._version_implicit_resolver: + for x in implicit_resolvers: + if version in x[0]: + self.add_version_implicit_resolver(version, x[1], x[2], x[3]) + return self._version_implicit_resolver[version] + + def resolve(self, kind, value, implicit): + # type: (Any, Any, Any) -> Any + if kind is ScalarNode and implicit[0]: + if value == "": + resolvers = self.versioned_resolver.get("", []) + else: + resolvers = self.versioned_resolver.get(value[0], []) + resolvers += self.versioned_resolver.get(None, []) + for tag, regexp in resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if bool(self.yaml_path_resolvers): + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + + @property + def processing_version(self): + # type: () -> Any + try: + version = self.loadumper._scanner.yaml_version + except AttributeError: + try: + if hasattr(self.loadumper, 'typ'): + version = self.loadumper.version + else: + version = self.loadumper._serializer.use_version # dumping + except AttributeError: + version = None + if version is None: + version = self._loader_version + if version is None: + version = _DEFAULT_YAML_VERSION + return version diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarbool.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarbool.py new file mode 100644 index 000000000..627d01dad --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarbool.py @@ -0,0 +1,51 @@ +# coding: utf-8 + +from __future__ import print_function, absolute_import, division, unicode_literals + +""" +You cannot subclass bool, and this is necessary for round-tripping anchored +bool values (and also if you want to preserve the original way of writing) + +bool.__bases__ is type 'int', so that is what is used as the basis for ScalarBoolean as well. + +You can use these in an if statement, but not when testing equivalence +""" + +from ...ruamel.yaml.anchor import Anchor + +if False: # MYPY + from typing import Text, Any, Dict, List # NOQA + +__all__ = ['ScalarBoolean'] + +# no need for no_limit_int -> int + + +class ScalarBoolean(int): + def __new__(cls, *args, **kw): + # type: (Any, Any, Any) -> Any + anchor = kw.pop('anchor', None) # type: ignore + b = int.__new__(cls, *args, **kw) # type: ignore + if anchor is not None: + b.yaml_set_anchor(anchor, always_dump=True) + return b + + @property + def anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + setattr(self, Anchor.attrib, Anchor()) + return getattr(self, Anchor.attrib) + + def yaml_anchor(self, any=False): + # type: (bool) -> Any + if not hasattr(self, Anchor.attrib): + return None + if any or self.anchor.always_dump: + return self.anchor + return None + + def yaml_set_anchor(self, value, always_dump=False): + # type: (Any, bool) -> None + self.anchor.value = value + self.anchor.always_dump = always_dump diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarfloat.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarfloat.py new file mode 100644 index 000000000..456fdeb34 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarfloat.py @@ -0,0 +1,127 @@ +# coding: utf-8 + +from __future__ import print_function, absolute_import, division, unicode_literals + +import sys +from .compat import no_limit_int # NOQA +from ...ruamel.yaml.anchor import Anchor + +if False: # MYPY + from typing import Text, Any, Dict, List # NOQA + +__all__ = ['ScalarFloat', 'ExponentialFloat', 'ExponentialCapsFloat'] + + +class ScalarFloat(float): + def __new__(cls, *args, **kw): + # type: (Any, Any, Any) -> Any + width = kw.pop('width', None) # type: ignore + prec = kw.pop('prec', None) # type: ignore + m_sign = kw.pop('m_sign', None) # type: ignore + m_lead0 = kw.pop('m_lead0', 0) # type: ignore + exp = kw.pop('exp', None) # type: ignore + e_width = kw.pop('e_width', None) # type: ignore + e_sign = kw.pop('e_sign', None) # type: ignore + underscore = kw.pop('underscore', None) # type: ignore + anchor = kw.pop('anchor', None) # type: ignore + v = float.__new__(cls, *args, **kw) # type: ignore + v._width = width + v._prec = prec + v._m_sign = m_sign + v._m_lead0 = m_lead0 + v._exp = exp + v._e_width = e_width + v._e_sign = e_sign + v._underscore = underscore + if anchor is not None: + v.yaml_set_anchor(anchor, always_dump=True) + return v + + def __iadd__(self, a): # type: ignore + # type: (Any) -> Any + return float(self) + a + x = type(self)(self + a) + x._width = self._width + x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA + return x + + def __ifloordiv__(self, a): # type: ignore + # type: (Any) -> Any + return float(self) // a + x = type(self)(self // a) + x._width = self._width + x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA + return x + + def __imul__(self, a): # type: ignore + # type: (Any) -> Any + return float(self) * a + x = type(self)(self * a) + x._width = self._width + x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA + x._prec = self._prec # check for others + return x + + def __ipow__(self, a): # type: ignore + # type: (Any) -> Any + return float(self) ** a + x = type(self)(self ** a) + x._width = self._width + x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA + return x + + def __isub__(self, a): # type: ignore + # type: (Any) -> Any + return float(self) - a + x = type(self)(self - a) + x._width = self._width + x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA + return x + + @property + def anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + setattr(self, Anchor.attrib, Anchor()) + return getattr(self, Anchor.attrib) + + def yaml_anchor(self, any=False): + # type: (bool) -> Any + if not hasattr(self, Anchor.attrib): + return None + if any or self.anchor.always_dump: + return self.anchor + return None + + def yaml_set_anchor(self, value, always_dump=False): + # type: (Any, bool) -> None + self.anchor.value = value + self.anchor.always_dump = always_dump + + def dump(self, out=sys.stdout): + # type: (Any) -> Any + out.write( + 'ScalarFloat({}| w:{}, p:{}, s:{}, lz:{}, _:{}|{}, w:{}, s:{})\n'.format( + self, + self._width, # type: ignore + self._prec, # type: ignore + self._m_sign, # type: ignore + self._m_lead0, # type: ignore + self._underscore, # type: ignore + self._exp, # type: ignore + self._e_width, # type: ignore + self._e_sign, # type: ignore + ) + ) + + +class ExponentialFloat(ScalarFloat): + def __new__(cls, value, width=None, underscore=None): + # type: (Any, Any, Any) -> Any + return ScalarFloat.__new__(cls, value, width=width, underscore=underscore) + + +class ExponentialCapsFloat(ScalarFloat): + def __new__(cls, value, width=None, underscore=None): + # type: (Any, Any, Any) -> Any + return ScalarFloat.__new__(cls, value, width=width, underscore=underscore) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarint.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarint.py new file mode 100644 index 000000000..01567be89 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarint.py @@ -0,0 +1,130 @@ +# coding: utf-8 + +from __future__ import print_function, absolute_import, division, unicode_literals + +from .compat import no_limit_int # NOQA +from ...ruamel.yaml.anchor import Anchor + +if False: # MYPY + from typing import Text, Any, Dict, List # NOQA + +__all__ = ['ScalarInt', 'BinaryInt', 'OctalInt', 'HexInt', 'HexCapsInt', 'DecimalInt'] + + +class ScalarInt(no_limit_int): + def __new__(cls, *args, **kw): + # type: (Any, Any, Any) -> Any + width = kw.pop('width', None) # type: ignore + underscore = kw.pop('underscore', None) # type: ignore + anchor = kw.pop('anchor', None) # type: ignore + v = no_limit_int.__new__(cls, *args, **kw) # type: ignore + v._width = width + v._underscore = underscore + if anchor is not None: + v.yaml_set_anchor(anchor, always_dump=True) + return v + + def __iadd__(self, a): # type: ignore + # type: (Any) -> Any + x = type(self)(self + a) + x._width = self._width # type: ignore + x._underscore = ( # type: ignore + self._underscore[:] if self._underscore is not None else None # type: ignore + ) # NOQA + return x + + def __ifloordiv__(self, a): # type: ignore + # type: (Any) -> Any + x = type(self)(self // a) + x._width = self._width # type: ignore + x._underscore = ( # type: ignore + self._underscore[:] if self._underscore is not None else None # type: ignore + ) # NOQA + return x + + def __imul__(self, a): # type: ignore + # type: (Any) -> Any + x = type(self)(self * a) + x._width = self._width # type: ignore + x._underscore = ( # type: ignore + self._underscore[:] if self._underscore is not None else None # type: ignore + ) # NOQA + return x + + def __ipow__(self, a): # type: ignore + # type: (Any) -> Any + x = type(self)(self ** a) + x._width = self._width # type: ignore + x._underscore = ( # type: ignore + self._underscore[:] if self._underscore is not None else None # type: ignore + ) # NOQA + return x + + def __isub__(self, a): # type: ignore + # type: (Any) -> Any + x = type(self)(self - a) + x._width = self._width # type: ignore + x._underscore = ( # type: ignore + self._underscore[:] if self._underscore is not None else None # type: ignore + ) # NOQA + return x + + @property + def anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + setattr(self, Anchor.attrib, Anchor()) + return getattr(self, Anchor.attrib) + + def yaml_anchor(self, any=False): + # type: (bool) -> Any + if not hasattr(self, Anchor.attrib): + return None + if any or self.anchor.always_dump: + return self.anchor + return None + + def yaml_set_anchor(self, value, always_dump=False): + # type: (Any, bool) -> None + self.anchor.value = value + self.anchor.always_dump = always_dump + + +class BinaryInt(ScalarInt): + def __new__(cls, value, width=None, underscore=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor) + + +class OctalInt(ScalarInt): + def __new__(cls, value, width=None, underscore=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor) + + +# mixed casing of A-F is not supported, when loading the first non digit +# determines the case + + +class HexInt(ScalarInt): + """uses lower case (a-f)""" + + def __new__(cls, value, width=None, underscore=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor) + + +class HexCapsInt(ScalarInt): + """uses upper case (A-F)""" + + def __new__(cls, value, width=None, underscore=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor) + + +class DecimalInt(ScalarInt): + """needed if anchor""" + + def __new__(cls, value, width=None, underscore=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarstring.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarstring.py new file mode 100644 index 000000000..33ddf5e55 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarstring.py @@ -0,0 +1,156 @@ +# coding: utf-8 + +from __future__ import print_function, absolute_import, division, unicode_literals + +from ...ruamel.yaml.compat import text_type +from ...ruamel.yaml.anchor import Anchor + +if False: # MYPY + from typing import Text, Any, Dict, List # NOQA + +__all__ = [ + 'ScalarString', + 'LiteralScalarString', + 'FoldedScalarString', + 'SingleQuotedScalarString', + 'DoubleQuotedScalarString', + 'PlainScalarString', + # PreservedScalarString is the old name, as it was the first to be preserved on rt, + # use LiteralScalarString instead + 'PreservedScalarString', +] + + +class ScalarString(text_type): + __slots__ = Anchor.attrib + + def __new__(cls, *args, **kw): + # type: (Any, Any) -> Any + anchor = kw.pop('anchor', None) # type: ignore + ret_val = text_type.__new__(cls, *args, **kw) # type: ignore + if anchor is not None: + ret_val.yaml_set_anchor(anchor, always_dump=True) + return ret_val + + def replace(self, old, new, maxreplace=-1): + # type: (Any, Any, int) -> Any + return type(self)((text_type.replace(self, old, new, maxreplace))) + + @property + def anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + setattr(self, Anchor.attrib, Anchor()) + return getattr(self, Anchor.attrib) + + def yaml_anchor(self, any=False): + # type: (bool) -> Any + if not hasattr(self, Anchor.attrib): + return None + if any or self.anchor.always_dump: + return self.anchor + return None + + def yaml_set_anchor(self, value, always_dump=False): + # type: (Any, bool) -> None + self.anchor.value = value + self.anchor.always_dump = always_dump + + +class LiteralScalarString(ScalarString): + __slots__ = 'comment' # the comment after the | on the first line + + style = '|' + + def __new__(cls, value, anchor=None): + # type: (Text, Any) -> Any + return ScalarString.__new__(cls, value, anchor=anchor) + + +PreservedScalarString = LiteralScalarString + + +class FoldedScalarString(ScalarString): + __slots__ = ('fold_pos', 'comment') # the comment after the > on the first line + + style = '>' + + def __new__(cls, value, anchor=None): + # type: (Text, Any) -> Any + return ScalarString.__new__(cls, value, anchor=anchor) + + +class SingleQuotedScalarString(ScalarString): + __slots__ = () + + style = "'" + + def __new__(cls, value, anchor=None): + # type: (Text, Any) -> Any + return ScalarString.__new__(cls, value, anchor=anchor) + + +class DoubleQuotedScalarString(ScalarString): + __slots__ = () + + style = '"' + + def __new__(cls, value, anchor=None): + # type: (Text, Any) -> Any + return ScalarString.__new__(cls, value, anchor=anchor) + + +class PlainScalarString(ScalarString): + __slots__ = () + + style = '' + + def __new__(cls, value, anchor=None): + # type: (Text, Any) -> Any + return ScalarString.__new__(cls, value, anchor=anchor) + + +def preserve_literal(s): + # type: (Text) -> Text + return LiteralScalarString(s.replace('\r\n', '\n').replace('\r', '\n')) + + +def walk_tree(base, map=None): + # type: (Any, Any) -> None + """ + the routine here walks over a simple yaml tree (recursing in + dict values and list items) and converts strings that + have multiple lines to literal scalars + + You can also provide an explicit (ordered) mapping for multiple transforms + (first of which is executed): + map = ruamel.yaml.compat.ordereddict + map['\n'] = preserve_literal + map[':'] = SingleQuotedScalarString + walk_tree(data, map=map) + """ + from ...ruamel.yaml.compat import string_types + from ...ruamel.yaml.compat import MutableMapping, MutableSequence # type: ignore + + if map is None: + map = {'\n': preserve_literal} + + if isinstance(base, MutableMapping): + for k in base: + v = base[k] # type: Text + if isinstance(v, string_types): + for ch in map: + if ch in v: + base[k] = map[ch](v) + break + else: + walk_tree(v, map=map) + elif isinstance(base, MutableSequence): + for idx, elem in enumerate(base): + if isinstance(elem, string_types): + for ch in map: + if ch in elem: # type: ignore + base[idx] = map[ch](elem) + break + else: + walk_tree(elem, map=map) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scanner.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scanner.py new file mode 100644 index 000000000..084bca4e0 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scanner.py @@ -0,0 +1,1980 @@ +# coding: utf-8 + +from __future__ import print_function, absolute_import, division, unicode_literals + +# Scanner produces tokens of the following types: +# STREAM-START +# STREAM-END +# DIRECTIVE(name, value) +# DOCUMENT-START +# DOCUMENT-END +# BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START +# BLOCK-END +# FLOW-SEQUENCE-START +# FLOW-MAPPING-START +# FLOW-SEQUENCE-END +# FLOW-MAPPING-END +# BLOCK-ENTRY +# FLOW-ENTRY +# KEY +# VALUE +# ALIAS(value) +# ANCHOR(value) +# TAG(value) +# SCALAR(value, plain, style) +# +# RoundTripScanner +# COMMENT(value) +# +# Read comments in the Scanner code for more details. +# + +from ...ruamel.yaml.error import MarkedYAMLError +from ...ruamel.yaml.tokens import * # NOQA +from ...ruamel.yaml.compat import utf8, unichr, PY3, check_anchorname_char, nprint # NOQA + +if False: # MYPY + from typing import Any, Dict, Optional, List, Union, Text # NOQA + from ...ruamel.yaml.compat import VersionType # NOQA + +__all__ = ['Scanner', 'RoundTripScanner', 'ScannerError'] + + +_THE_END = '\n\0\r\x85\u2028\u2029' +_THE_END_SPACE_TAB = ' \n\0\t\r\x85\u2028\u2029' +_SPACE_TAB = ' \t' + + +class ScannerError(MarkedYAMLError): + pass + + +class SimpleKey(object): + # See below simple keys treatment. + + def __init__(self, token_number, required, index, line, column, mark): + # type: (Any, Any, int, int, int, Any) -> None + self.token_number = token_number + self.required = required + self.index = index + self.line = line + self.column = column + self.mark = mark + + +class Scanner(object): + def __init__(self, loader=None): + # type: (Any) -> None + """Initialize the scanner.""" + # It is assumed that Scanner and Reader will have a common descendant. + # Reader do the dirty work of checking for BOM and converting the + # input data to Unicode. It also adds NUL to the end. + # + # Reader supports the following methods + # self.peek(i=0) # peek the next i-th character + # self.prefix(l=1) # peek the next l characters + # self.forward(l=1) # read the next l characters and move the pointer + + self.loader = loader + if self.loader is not None and getattr(self.loader, '_scanner', None) is None: + self.loader._scanner = self + self.reset_scanner() + self.first_time = False + self.yaml_version = None # type: Any + + @property + def flow_level(self): + # type: () -> int + return len(self.flow_context) + + def reset_scanner(self): + # type: () -> None + # Had we reached the end of the stream? + self.done = False + + # flow_context is an expanding/shrinking list consisting of '{' and '[' + # for each unclosed flow context. If empty list that means block context + self.flow_context = [] # type: List[Text] + + # List of processed tokens that are not yet emitted. + self.tokens = [] # type: List[Any] + + # Add the STREAM-START token. + self.fetch_stream_start() + + # Number of tokens that were emitted through the `get_token` method. + self.tokens_taken = 0 + + # The current indentation level. + self.indent = -1 + + # Past indentation levels. + self.indents = [] # type: List[int] + + # Variables related to simple keys treatment. + + # A simple key is a key that is not denoted by the '?' indicator. + # Example of simple keys: + # --- + # block simple key: value + # ? not a simple key: + # : { flow simple key: value } + # We emit the KEY token before all keys, so when we find a potential + # simple key, we try to locate the corresponding ':' indicator. + # Simple keys should be limited to a single line and 1024 characters. + + # Can a simple key start at the current position? A simple key may + # start: + # - at the beginning of the line, not counting indentation spaces + # (in block context), + # - after '{', '[', ',' (in the flow context), + # - after '?', ':', '-' (in the block context). + # In the block context, this flag also signifies if a block collection + # may start at the current position. + self.allow_simple_key = True + + # Keep track of possible simple keys. This is a dictionary. The key + # is `flow_level`; there can be no more that one possible simple key + # for each level. The value is a SimpleKey record: + # (token_number, required, index, line, column, mark) + # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), + # '[', or '{' tokens. + self.possible_simple_keys = {} # type: Dict[Any, Any] + + @property + def reader(self): + # type: () -> Any + try: + return self._scanner_reader # type: ignore + except AttributeError: + if hasattr(self.loader, 'typ'): + self._scanner_reader = self.loader.reader + else: + self._scanner_reader = self.loader._reader + return self._scanner_reader + + @property + def scanner_processing_version(self): # prefix until un-composited + # type: () -> Any + if hasattr(self.loader, 'typ'): + return self.loader.resolver.processing_version + return self.loader.processing_version + + # Public methods. + + def check_token(self, *choices): + # type: (Any) -> bool + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + if bool(self.tokens): + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # type: () -> Any + # Return the next token, but do not delete if from the queue. + while self.need_more_tokens(): + self.fetch_more_tokens() + if bool(self.tokens): + return self.tokens[0] + + def get_token(self): + # type: () -> Any + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + if bool(self.tokens): + self.tokens_taken += 1 + return self.tokens.pop(0) + + # Private methods. + + def need_more_tokens(self): + # type: () -> bool + if self.done: + return False + if not self.tokens: + return True + # The current token may be a potential simple key, so we + # need to look further. + self.stale_possible_simple_keys() + if self.next_possible_simple_key() == self.tokens_taken: + return True + return False + + def fetch_comment(self, comment): + # type: (Any) -> None + raise NotImplementedError + + def fetch_more_tokens(self): + # type: () -> Any + # Eat whitespaces and comments until we reach the next token. + comment = self.scan_to_next_token() + if comment is not None: # never happens for base scanner + return self.fetch_comment(comment) + # Remove obsolete possible simple keys. + self.stale_possible_simple_keys() + + # Compare the current indentation and column. It may add some tokens + # and decrease the current indentation level. + self.unwind_indent(self.reader.column) + + # Peek the next character. + ch = self.reader.peek() + + # Is it the end of stream? + if ch == '\0': + return self.fetch_stream_end() + + # Is it a directive? + if ch == '%' and self.check_directive(): + return self.fetch_directive() + + # Is it the document start? + if ch == '-' and self.check_document_start(): + return self.fetch_document_start() + + # Is it the document end? + if ch == '.' and self.check_document_end(): + return self.fetch_document_end() + + # TODO: support for BOM within a stream. + # if ch == u'\uFEFF': + # return self.fetch_bom() <-- issue BOMToken + + # Note: the order of the following checks is NOT significant. + + # Is it the flow sequence start indicator? + if ch == '[': + return self.fetch_flow_sequence_start() + + # Is it the flow mapping start indicator? + if ch == '{': + return self.fetch_flow_mapping_start() + + # Is it the flow sequence end indicator? + if ch == ']': + return self.fetch_flow_sequence_end() + + # Is it the flow mapping end indicator? + if ch == '}': + return self.fetch_flow_mapping_end() + + # Is it the flow entry indicator? + if ch == ',': + return self.fetch_flow_entry() + + # Is it the block entry indicator? + if ch == '-' and self.check_block_entry(): + return self.fetch_block_entry() + + # Is it the key indicator? + if ch == '?' and self.check_key(): + return self.fetch_key() + + # Is it the value indicator? + if ch == ':' and self.check_value(): + return self.fetch_value() + + # Is it an alias? + if ch == '*': + return self.fetch_alias() + + # Is it an anchor? + if ch == '&': + return self.fetch_anchor() + + # Is it a tag? + if ch == '!': + return self.fetch_tag() + + # Is it a literal scalar? + if ch == '|' and not self.flow_level: + return self.fetch_literal() + + # Is it a folded scalar? + if ch == '>' and not self.flow_level: + return self.fetch_folded() + + # Is it a single quoted scalar? + if ch == "'": + return self.fetch_single() + + # Is it a double quoted scalar? + if ch == '"': + return self.fetch_double() + + # It must be a plain scalar then. + if self.check_plain(): + return self.fetch_plain() + + # No? It's an error. Let's produce a nice error message. + raise ScannerError( + 'while scanning for the next token', + None, + 'found character %r that cannot start any token' % utf8(ch), + self.reader.get_mark(), + ) + + # Simple keys treatment. + + def next_possible_simple_key(self): + # type: () -> Any + # Return the number of the nearest possible simple key. Actually we + # don't need to loop through the whole dictionary. We may replace it + # with the following code: + # if not self.possible_simple_keys: + # return None + # return self.possible_simple_keys[ + # min(self.possible_simple_keys.keys())].token_number + min_token_number = None + for level in self.possible_simple_keys: + key = self.possible_simple_keys[level] + if min_token_number is None or key.token_number < min_token_number: + min_token_number = key.token_number + return min_token_number + + def stale_possible_simple_keys(self): + # type: () -> None + # Remove entries that are no longer possible simple keys. According to + # the YAML specification, simple keys + # - should be limited to a single line, + # - should be no longer than 1024 characters. + # Disabling this procedure will allow simple keys of any length and + # height (may cause problems if indentation is broken though). + for level in list(self.possible_simple_keys): + key = self.possible_simple_keys[level] + if key.line != self.reader.line or self.reader.index - key.index > 1024: + if key.required: + raise ScannerError( + 'while scanning a simple key', + key.mark, + "could not find expected ':'", + self.reader.get_mark(), + ) + del self.possible_simple_keys[level] + + def save_possible_simple_key(self): + # type: () -> None + # The next token may start a simple key. We check if it's possible + # and save its position. This function is called for + # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. + + # Check if a simple key is required at the current position. + required = not self.flow_level and self.indent == self.reader.column + + # The next token might be a simple key. Let's save it's number and + # position. + if self.allow_simple_key: + self.remove_possible_simple_key() + token_number = self.tokens_taken + len(self.tokens) + key = SimpleKey( + token_number, + required, + self.reader.index, + self.reader.line, + self.reader.column, + self.reader.get_mark(), + ) + self.possible_simple_keys[self.flow_level] = key + + def remove_possible_simple_key(self): + # type: () -> None + # Remove the saved possible key position at the current flow level. + if self.flow_level in self.possible_simple_keys: + key = self.possible_simple_keys[self.flow_level] + + if key.required: + raise ScannerError( + 'while scanning a simple key', + key.mark, + "could not find expected ':'", + self.reader.get_mark(), + ) + + del self.possible_simple_keys[self.flow_level] + + # Indentation functions. + + def unwind_indent(self, column): + # type: (Any) -> None + # In flow context, tokens should respect indentation. + # Actually the condition should be `self.indent >= column` according to + # the spec. But this condition will prohibit intuitively correct + # constructions such as + # key : { + # } + # #### + # if self.flow_level and self.indent > column: + # raise ScannerError(None, None, + # "invalid intendation or unclosed '[' or '{'", + # self.reader.get_mark()) + + # In the flow context, indentation is ignored. We make the scanner less + # restrictive then specification requires. + if bool(self.flow_level): + return + + # In block context, we may need to issue the BLOCK-END tokens. + while self.indent > column: + mark = self.reader.get_mark() + self.indent = self.indents.pop() + self.tokens.append(BlockEndToken(mark, mark)) + + def add_indent(self, column): + # type: (int) -> bool + # Check if we need to increase indentation. + if self.indent < column: + self.indents.append(self.indent) + self.indent = column + return True + return False + + # Fetchers. + + def fetch_stream_start(self): + # type: () -> None + # We always add STREAM-START as the first token and STREAM-END as the + # last token. + # Read the token. + mark = self.reader.get_mark() + # Add STREAM-START. + self.tokens.append(StreamStartToken(mark, mark, encoding=self.reader.encoding)) + + def fetch_stream_end(self): + # type: () -> None + # Set the current intendation to -1. + self.unwind_indent(-1) + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + self.possible_simple_keys = {} + # Read the token. + mark = self.reader.get_mark() + # Add STREAM-END. + self.tokens.append(StreamEndToken(mark, mark)) + # The steam is finished. + self.done = True + + def fetch_directive(self): + # type: () -> None + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Scan and add DIRECTIVE. + self.tokens.append(self.scan_directive()) + + def fetch_document_start(self): + # type: () -> None + self.fetch_document_indicator(DocumentStartToken) + + def fetch_document_end(self): + # type: () -> None + self.fetch_document_indicator(DocumentEndToken) + + def fetch_document_indicator(self, TokenClass): + # type: (Any) -> None + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. Note that there could not be a block collection + # after '---'. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Add DOCUMENT-START or DOCUMENT-END. + start_mark = self.reader.get_mark() + self.reader.forward(3) + end_mark = self.reader.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_start(self): + # type: () -> None + self.fetch_flow_collection_start(FlowSequenceStartToken, to_push='[') + + def fetch_flow_mapping_start(self): + # type: () -> None + self.fetch_flow_collection_start(FlowMappingStartToken, to_push='{') + + def fetch_flow_collection_start(self, TokenClass, to_push): + # type: (Any, Text) -> None + # '[' and '{' may start a simple key. + self.save_possible_simple_key() + # Increase the flow level. + self.flow_context.append(to_push) + # Simple keys are allowed after '[' and '{'. + self.allow_simple_key = True + # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_end(self): + # type: () -> None + self.fetch_flow_collection_end(FlowSequenceEndToken) + + def fetch_flow_mapping_end(self): + # type: () -> None + self.fetch_flow_collection_end(FlowMappingEndToken) + + def fetch_flow_collection_end(self, TokenClass): + # type: (Any) -> None + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + # Decrease the flow level. + try: + popped = self.flow_context.pop() # NOQA + except IndexError: + # We must not be in a list or object. + # Defer error handling to the parser. + pass + # No simple keys after ']' or '}'. + self.allow_simple_key = False + # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_entry(self): + # type: () -> None + # Simple keys are allowed after ','. + self.allow_simple_key = True + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + # Add FLOW-ENTRY. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(FlowEntryToken(start_mark, end_mark)) + + def fetch_block_entry(self): + # type: () -> None + # Block context needs additional checks. + if not self.flow_level: + # Are we allowed to start a new entry? + if not self.allow_simple_key: + raise ScannerError( + None, None, 'sequence entries are not allowed here', self.reader.get_mark() + ) + # We may need to add BLOCK-SEQUENCE-START. + if self.add_indent(self.reader.column): + mark = self.reader.get_mark() + self.tokens.append(BlockSequenceStartToken(mark, mark)) + # It's an error for the block entry to occur in the flow context, + # but we let the parser detect this. + else: + pass + # Simple keys are allowed after '-'. + self.allow_simple_key = True + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add BLOCK-ENTRY. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(BlockEntryToken(start_mark, end_mark)) + + def fetch_key(self): + # type: () -> None + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a key (not nessesary a simple)? + if not self.allow_simple_key: + raise ScannerError( + None, None, 'mapping keys are not allowed here', self.reader.get_mark() + ) + + # We may need to add BLOCK-MAPPING-START. + if self.add_indent(self.reader.column): + mark = self.reader.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after '?' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add KEY. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(KeyToken(start_mark, end_mark)) + + def fetch_value(self): + # type: () -> None + # Do we determine a simple key? + if self.flow_level in self.possible_simple_keys: + # Add KEY. + key = self.possible_simple_keys[self.flow_level] + del self.possible_simple_keys[self.flow_level] + self.tokens.insert( + key.token_number - self.tokens_taken, KeyToken(key.mark, key.mark) + ) + + # If this key starts a new block mapping, we need to add + # BLOCK-MAPPING-START. + if not self.flow_level: + if self.add_indent(key.column): + self.tokens.insert( + key.token_number - self.tokens_taken, + BlockMappingStartToken(key.mark, key.mark), + ) + + # There cannot be two simple keys one after another. + self.allow_simple_key = False + + # It must be a part of a complex key. + else: + + # Block context needs additional checks. + # (Do we really need them? They will be caught by the parser + # anyway.) + if not self.flow_level: + + # We are allowed to start a complex value if and only if + # we can start a simple key. + if not self.allow_simple_key: + raise ScannerError( + None, + None, + 'mapping values are not allowed here', + self.reader.get_mark(), + ) + + # If this value starts a new block mapping, we need to add + # BLOCK-MAPPING-START. It will be detected as an error later by + # the parser. + if not self.flow_level: + if self.add_indent(self.reader.column): + mark = self.reader.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after ':' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add VALUE. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(ValueToken(start_mark, end_mark)) + + def fetch_alias(self): + # type: () -> None + # ALIAS could be a simple key. + self.save_possible_simple_key() + # No simple keys after ALIAS. + self.allow_simple_key = False + # Scan and add ALIAS. + self.tokens.append(self.scan_anchor(AliasToken)) + + def fetch_anchor(self): + # type: () -> None + # ANCHOR could start a simple key. + self.save_possible_simple_key() + # No simple keys after ANCHOR. + self.allow_simple_key = False + # Scan and add ANCHOR. + self.tokens.append(self.scan_anchor(AnchorToken)) + + def fetch_tag(self): + # type: () -> None + # TAG could start a simple key. + self.save_possible_simple_key() + # No simple keys after TAG. + self.allow_simple_key = False + # Scan and add TAG. + self.tokens.append(self.scan_tag()) + + def fetch_literal(self): + # type: () -> None + self.fetch_block_scalar(style='|') + + def fetch_folded(self): + # type: () -> None + self.fetch_block_scalar(style='>') + + def fetch_block_scalar(self, style): + # type: (Any) -> None + # A simple key may follow a block scalar. + self.allow_simple_key = True + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + # Scan and add SCALAR. + self.tokens.append(self.scan_block_scalar(style)) + + def fetch_single(self): + # type: () -> None + self.fetch_flow_scalar(style="'") + + def fetch_double(self): + # type: () -> None + self.fetch_flow_scalar(style='"') + + def fetch_flow_scalar(self, style): + # type: (Any) -> None + # A flow scalar could be a simple key. + self.save_possible_simple_key() + # No simple keys after flow scalars. + self.allow_simple_key = False + # Scan and add SCALAR. + self.tokens.append(self.scan_flow_scalar(style)) + + def fetch_plain(self): + # type: () -> None + # A plain scalar could be a simple key. + self.save_possible_simple_key() + # No simple keys after plain scalars. But note that `scan_plain` will + # change this flag if the scan is finished at the beginning of the + # line. + self.allow_simple_key = False + # Scan and add SCALAR. May change `allow_simple_key`. + self.tokens.append(self.scan_plain()) + + # Checkers. + + def check_directive(self): + # type: () -> Any + # DIRECTIVE: ^ '%' ... + # The '%' indicator is already checked. + if self.reader.column == 0: + return True + return None + + def check_document_start(self): + # type: () -> Any + # DOCUMENT-START: ^ '---' (' '|'\n') + if self.reader.column == 0: + if self.reader.prefix(3) == '---' and self.reader.peek(3) in _THE_END_SPACE_TAB: + return True + return None + + def check_document_end(self): + # type: () -> Any + # DOCUMENT-END: ^ '...' (' '|'\n') + if self.reader.column == 0: + if self.reader.prefix(3) == '...' and self.reader.peek(3) in _THE_END_SPACE_TAB: + return True + return None + + def check_block_entry(self): + # type: () -> Any + # BLOCK-ENTRY: '-' (' '|'\n') + return self.reader.peek(1) in _THE_END_SPACE_TAB + + def check_key(self): + # type: () -> Any + # KEY(flow context): '?' + if bool(self.flow_level): + return True + # KEY(block context): '?' (' '|'\n') + return self.reader.peek(1) in _THE_END_SPACE_TAB + + def check_value(self): + # type: () -> Any + # VALUE(flow context): ':' + if self.scanner_processing_version == (1, 1): + if bool(self.flow_level): + return True + else: + if bool(self.flow_level): + if self.flow_context[-1] == '[': + if self.reader.peek(1) not in _THE_END_SPACE_TAB: + return False + elif self.tokens and isinstance(self.tokens[-1], ValueToken): + # mapping flow context scanning a value token + if self.reader.peek(1) not in _THE_END_SPACE_TAB: + return False + return True + # VALUE(block context): ':' (' '|'\n') + return self.reader.peek(1) in _THE_END_SPACE_TAB + + def check_plain(self): + # type: () -> Any + # A plain scalar may start with any non-space character except: + # '-', '?', ':', ',', '[', ']', '{', '}', + # '#', '&', '*', '!', '|', '>', '\'', '\"', + # '%', '@', '`'. + # + # It may also start with + # '-', '?', ':' + # if it is followed by a non-space character. + # + # Note that we limit the last rule to the block context (except the + # '-' character) because we want the flow context to be space + # independent. + srp = self.reader.peek + ch = srp() + if self.scanner_processing_version == (1, 1): + return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'"%@`' or ( + srp(1) not in _THE_END_SPACE_TAB + and (ch == '-' or (not self.flow_level and ch in '?:')) + ) + # YAML 1.2 + if ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'"%@`': + # ################### ^ ??? + return True + ch1 = srp(1) + if ch == '-' and ch1 not in _THE_END_SPACE_TAB: + return True + if ch == ':' and bool(self.flow_level) and ch1 not in _SPACE_TAB: + return True + + return srp(1) not in _THE_END_SPACE_TAB and ( + ch == '-' or (not self.flow_level and ch in '?:') + ) + + # Scanners. + + def scan_to_next_token(self): + # type: () -> Any + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if : + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + srp = self.reader.peek + srf = self.reader.forward + if self.reader.index == 0 and srp() == '\uFEFF': + srf() + found = False + _the_end = _THE_END + while not found: + while srp() == ' ': + srf() + if srp() == '#': + while srp() not in _the_end: + srf() + if self.scan_line_break(): + if not self.flow_level: + self.allow_simple_key = True + else: + found = True + return None + + def scan_directive(self): + # type: () -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + start_mark = self.reader.get_mark() + srf() + name = self.scan_directive_name(start_mark) + value = None + if name == 'YAML': + value = self.scan_yaml_directive_value(start_mark) + end_mark = self.reader.get_mark() + elif name == 'TAG': + value = self.scan_tag_directive_value(start_mark) + end_mark = self.reader.get_mark() + else: + end_mark = self.reader.get_mark() + while srp() not in _THE_END: + srf() + self.scan_directive_ignored_line(start_mark) + return DirectiveToken(name, value, start_mark, end_mark) + + def scan_directive_name(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + length = 0 + srp = self.reader.peek + ch = srp(length) + while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_:.': + length += 1 + ch = srp(length) + if not length: + raise ScannerError( + 'while scanning a directive', + start_mark, + 'expected alphabetic or numeric character, but found %r' % utf8(ch), + self.reader.get_mark(), + ) + value = self.reader.prefix(length) + self.reader.forward(length) + ch = srp() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError( + 'while scanning a directive', + start_mark, + 'expected alphabetic or numeric character, but found %r' % utf8(ch), + self.reader.get_mark(), + ) + return value + + def scan_yaml_directive_value(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + while srp() == ' ': + srf() + major = self.scan_yaml_directive_number(start_mark) + if srp() != '.': + raise ScannerError( + 'while scanning a directive', + start_mark, + "expected a digit or '.', but found %r" % utf8(srp()), + self.reader.get_mark(), + ) + srf() + minor = self.scan_yaml_directive_number(start_mark) + if srp() not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError( + 'while scanning a directive', + start_mark, + "expected a digit or ' ', but found %r" % utf8(srp()), + self.reader.get_mark(), + ) + self.yaml_version = (major, minor) + return self.yaml_version + + def scan_yaml_directive_number(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + ch = srp() + if not ('0' <= ch <= '9'): + raise ScannerError( + 'while scanning a directive', + start_mark, + 'expected a digit, but found %r' % utf8(ch), + self.reader.get_mark(), + ) + length = 0 + while '0' <= srp(length) <= '9': + length += 1 + value = int(self.reader.prefix(length)) + srf(length) + return value + + def scan_tag_directive_value(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + while srp() == ' ': + srf() + handle = self.scan_tag_directive_handle(start_mark) + while srp() == ' ': + srf() + prefix = self.scan_tag_directive_prefix(start_mark) + return (handle, prefix) + + def scan_tag_directive_handle(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + value = self.scan_tag_handle('directive', start_mark) + ch = self.reader.peek() + if ch != ' ': + raise ScannerError( + 'while scanning a directive', + start_mark, + "expected ' ', but found %r" % utf8(ch), + self.reader.get_mark(), + ) + return value + + def scan_tag_directive_prefix(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + value = self.scan_tag_uri('directive', start_mark) + ch = self.reader.peek() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError( + 'while scanning a directive', + start_mark, + "expected ' ', but found %r" % utf8(ch), + self.reader.get_mark(), + ) + return value + + def scan_directive_ignored_line(self, start_mark): + # type: (Any) -> None + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + while srp() == ' ': + srf() + if srp() == '#': + while srp() not in _THE_END: + srf() + ch = srp() + if ch not in _THE_END: + raise ScannerError( + 'while scanning a directive', + start_mark, + 'expected a comment or a line break, but found %r' % utf8(ch), + self.reader.get_mark(), + ) + self.scan_line_break() + + def scan_anchor(self, TokenClass): + # type: (Any) -> Any + # The specification does not restrict characters for anchors and + # aliases. This may lead to problems, for instance, the document: + # [ *alias, value ] + # can be interpteted in two ways, as + # [ "value" ] + # and + # [ *alias , "value" ] + # Therefore we restrict aliases to numbers and ASCII letters. + srp = self.reader.peek + start_mark = self.reader.get_mark() + indicator = srp() + if indicator == '*': + name = 'alias' + else: + name = 'anchor' + self.reader.forward() + length = 0 + ch = srp(length) + # while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + # or ch in u'-_': + while check_anchorname_char(ch): + length += 1 + ch = srp(length) + if not length: + raise ScannerError( + 'while scanning an %s' % (name,), + start_mark, + 'expected alphabetic or numeric character, but found %r' % utf8(ch), + self.reader.get_mark(), + ) + value = self.reader.prefix(length) + self.reader.forward(length) + # ch1 = ch + # ch = srp() # no need to peek, ch is already set + # assert ch1 == ch + if ch not in '\0 \t\r\n\x85\u2028\u2029?:,[]{}%@`': + raise ScannerError( + 'while scanning an %s' % (name,), + start_mark, + 'expected alphabetic or numeric character, but found %r' % utf8(ch), + self.reader.get_mark(), + ) + end_mark = self.reader.get_mark() + return TokenClass(value, start_mark, end_mark) + + def scan_tag(self): + # type: () -> Any + # See the specification for details. + srp = self.reader.peek + start_mark = self.reader.get_mark() + ch = srp(1) + if ch == '<': + handle = None + self.reader.forward(2) + suffix = self.scan_tag_uri('tag', start_mark) + if srp() != '>': + raise ScannerError( + 'while parsing a tag', + start_mark, + "expected '>', but found %r" % utf8(srp()), + self.reader.get_mark(), + ) + self.reader.forward() + elif ch in _THE_END_SPACE_TAB: + handle = None + suffix = '!' + self.reader.forward() + else: + length = 1 + use_handle = False + while ch not in '\0 \r\n\x85\u2028\u2029': + if ch == '!': + use_handle = True + break + length += 1 + ch = srp(length) + handle = '!' + if use_handle: + handle = self.scan_tag_handle('tag', start_mark) + else: + handle = '!' + self.reader.forward() + suffix = self.scan_tag_uri('tag', start_mark) + ch = srp() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError( + 'while scanning a tag', + start_mark, + "expected ' ', but found %r" % utf8(ch), + self.reader.get_mark(), + ) + value = (handle, suffix) + end_mark = self.reader.get_mark() + return TagToken(value, start_mark, end_mark) + + def scan_block_scalar(self, style, rt=False): + # type: (Any, Optional[bool]) -> Any + # See the specification for details. + srp = self.reader.peek + if style == '>': + folded = True + else: + folded = False + + chunks = [] # type: List[Any] + start_mark = self.reader.get_mark() + + # Scan the header. + self.reader.forward() + chomping, increment = self.scan_block_scalar_indicators(start_mark) + # block scalar comment e.g. : |+ # comment text + block_scalar_comment = self.scan_block_scalar_ignored_line(start_mark) + + # Determine the indentation level and go to the first non-empty line. + min_indent = self.indent + 1 + if increment is None: + # no increment and top level, min_indent could be 0 + if min_indent < 1 and ( + style not in '|>' + or (self.scanner_processing_version == (1, 1)) + and getattr( + self.loader, 'top_level_block_style_scalar_no_indent_error_1_1', False + ) + ): + min_indent = 1 + breaks, max_indent, end_mark = self.scan_block_scalar_indentation() + indent = max(min_indent, max_indent) + else: + if min_indent < 1: + min_indent = 1 + indent = min_indent + increment - 1 + breaks, end_mark = self.scan_block_scalar_breaks(indent) + line_break = "" + + # Scan the inner part of the block scalar. + while self.reader.column == indent and srp() != '\0': + chunks.extend(breaks) + leading_non_space = srp() not in ' \t' + length = 0 + while srp(length) not in _THE_END: + length += 1 + chunks.append(self.reader.prefix(length)) + self.reader.forward(length) + line_break = self.scan_line_break() + breaks, end_mark = self.scan_block_scalar_breaks(indent) + if style in '|>' and min_indent == 0: + # at the beginning of a line, if in block style see if + # end of document/start_new_document + if self.check_document_start() or self.check_document_end(): + break + if self.reader.column == indent and srp() != '\0': + + # Unfortunately, folding rules are ambiguous. + # + # This is the folding according to the specification: + + if rt and folded and line_break == '\n': + chunks.append('\a') + if folded and line_break == '\n' and leading_non_space and srp() not in ' \t': + if not breaks: + chunks.append(' ') + else: + chunks.append(line_break) + + # This is Clark Evans's interpretation (also in the spec + # examples): + # + # if folded and line_break == u'\n': + # if not breaks: + # if srp() not in ' \t': + # chunks.append(u' ') + # else: + # chunks.append(line_break) + # else: + # chunks.append(line_break) + else: + break + + # Process trailing line breaks. The 'chomping' setting determines + # whether they are included in the value. + trailing = [] # type: List[Any] + if chomping in [None, True]: + chunks.append(line_break) + if chomping is True: + chunks.extend(breaks) + elif chomping in [None, False]: + trailing.extend(breaks) + + # We are done. + token = ScalarToken("".join(chunks), False, start_mark, end_mark, style) + if block_scalar_comment is not None: + token.add_pre_comments([block_scalar_comment]) + if len(trailing) > 0: + # nprint('trailing 1', trailing) # XXXXX + # Eat whitespaces and comments until we reach the next token. + comment = self.scan_to_next_token() + while comment: + trailing.append(' ' * comment[1].column + comment[0]) + comment = self.scan_to_next_token() + + # Keep track of the trailing whitespace and following comments + # as a comment token, if isn't all included in the actual value. + comment_end_mark = self.reader.get_mark() + comment = CommentToken("".join(trailing), end_mark, comment_end_mark) + token.add_post_comment(comment) + return token + + def scan_block_scalar_indicators(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + srp = self.reader.peek + chomping = None + increment = None + ch = srp() + if ch in '+-': + if ch == '+': + chomping = True + else: + chomping = False + self.reader.forward() + ch = srp() + if ch in '0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError( + 'while scanning a block scalar', + start_mark, + 'expected indentation indicator in the range 1-9, ' 'but found 0', + self.reader.get_mark(), + ) + self.reader.forward() + elif ch in '0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError( + 'while scanning a block scalar', + start_mark, + 'expected indentation indicator in the range 1-9, ' 'but found 0', + self.reader.get_mark(), + ) + self.reader.forward() + ch = srp() + if ch in '+-': + if ch == '+': + chomping = True + else: + chomping = False + self.reader.forward() + ch = srp() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError( + 'while scanning a block scalar', + start_mark, + 'expected chomping or indentation indicators, but found %r' % utf8(ch), + self.reader.get_mark(), + ) + return chomping, increment + + def scan_block_scalar_ignored_line(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + prefix = '' + comment = None + while srp() == ' ': + prefix += srp() + srf() + if srp() == '#': + comment = prefix + while srp() not in _THE_END: + comment += srp() + srf() + ch = srp() + if ch not in _THE_END: + raise ScannerError( + 'while scanning a block scalar', + start_mark, + 'expected a comment or a line break, but found %r' % utf8(ch), + self.reader.get_mark(), + ) + self.scan_line_break() + return comment + + def scan_block_scalar_indentation(self): + # type: () -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + chunks = [] + max_indent = 0 + end_mark = self.reader.get_mark() + while srp() in ' \r\n\x85\u2028\u2029': + if srp() != ' ': + chunks.append(self.scan_line_break()) + end_mark = self.reader.get_mark() + else: + srf() + if self.reader.column > max_indent: + max_indent = self.reader.column + return chunks, max_indent, end_mark + + def scan_block_scalar_breaks(self, indent): + # type: (int) -> Any + # See the specification for details. + chunks = [] + srp = self.reader.peek + srf = self.reader.forward + end_mark = self.reader.get_mark() + while self.reader.column < indent and srp() == ' ': + srf() + while srp() in '\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + end_mark = self.reader.get_mark() + while self.reader.column < indent and srp() == ' ': + srf() + return chunks, end_mark + + def scan_flow_scalar(self, style): + # type: (Any) -> Any + # See the specification for details. + # Note that we loose indentation rules for quoted scalars. Quoted + # scalars don't need to adhere indentation because " and ' clearly + # mark the beginning and the end of them. Therefore we are less + # restrictive then the specification requires. We only need to check + # that document separators are not included in scalars. + if style == '"': + double = True + else: + double = False + srp = self.reader.peek + chunks = [] # type: List[Any] + start_mark = self.reader.get_mark() + quote = srp() + self.reader.forward() + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + while srp() != quote: + chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + self.reader.forward() + end_mark = self.reader.get_mark() + return ScalarToken("".join(chunks), False, start_mark, end_mark, style) + + ESCAPE_REPLACEMENTS = { + '0': '\0', + 'a': '\x07', + 'b': '\x08', + 't': '\x09', + '\t': '\x09', + 'n': '\x0A', + 'v': '\x0B', + 'f': '\x0C', + 'r': '\x0D', + 'e': '\x1B', + ' ': '\x20', + '"': '"', + '/': '/', # as per http://www.json.org/ + '\\': '\\', + 'N': '\x85', + '_': '\xA0', + 'L': '\u2028', + 'P': '\u2029', + } + + ESCAPE_CODES = {'x': 2, 'u': 4, 'U': 8} + + def scan_flow_scalar_non_spaces(self, double, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + chunks = [] # type: List[Any] + srp = self.reader.peek + srf = self.reader.forward + while True: + length = 0 + while srp(length) not in ' \n\'"\\\0\t\r\x85\u2028\u2029': + length += 1 + if length != 0: + chunks.append(self.reader.prefix(length)) + srf(length) + ch = srp() + if not double and ch == "'" and srp(1) == "'": + chunks.append("'") + srf(2) + elif (double and ch == "'") or (not double and ch in '"\\'): + chunks.append(ch) + srf() + elif double and ch == '\\': + srf() + ch = srp() + if ch in self.ESCAPE_REPLACEMENTS: + chunks.append(self.ESCAPE_REPLACEMENTS[ch]) + srf() + elif ch in self.ESCAPE_CODES: + length = self.ESCAPE_CODES[ch] + srf() + for k in range(length): + if srp(k) not in '0123456789ABCDEFabcdef': + raise ScannerError( + 'while scanning a double-quoted scalar', + start_mark, + 'expected escape sequence of %d hexdecimal ' + 'numbers, but found %r' % (length, utf8(srp(k))), + self.reader.get_mark(), + ) + code = int(self.reader.prefix(length), 16) + chunks.append(unichr(code)) + srf(length) + elif ch in '\n\r\x85\u2028\u2029': + self.scan_line_break() + chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) + else: + raise ScannerError( + 'while scanning a double-quoted scalar', + start_mark, + 'found unknown escape character %r' % utf8(ch), + self.reader.get_mark(), + ) + else: + return chunks + + def scan_flow_scalar_spaces(self, double, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + srp = self.reader.peek + chunks = [] + length = 0 + while srp(length) in ' \t': + length += 1 + whitespaces = self.reader.prefix(length) + self.reader.forward(length) + ch = srp() + if ch == '\0': + raise ScannerError( + 'while scanning a quoted scalar', + start_mark, + 'found unexpected end of stream', + self.reader.get_mark(), + ) + elif ch in '\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + breaks = self.scan_flow_scalar_breaks(double, start_mark) + if line_break != '\n': + chunks.append(line_break) + elif not breaks: + chunks.append(' ') + chunks.extend(breaks) + else: + chunks.append(whitespaces) + return chunks + + def scan_flow_scalar_breaks(self, double, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + chunks = [] # type: List[Any] + srp = self.reader.peek + srf = self.reader.forward + while True: + # Instead of checking indentation, we check for document + # separators. + prefix = self.reader.prefix(3) + if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB: + raise ScannerError( + 'while scanning a quoted scalar', + start_mark, + 'found unexpected document separator', + self.reader.get_mark(), + ) + while srp() in ' \t': + srf() + if srp() in '\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + else: + return chunks + + def scan_plain(self): + # type: () -> Any + # See the specification for details. + # We add an additional restriction for the flow context: + # plain scalars in the flow context cannot contain ',', ': ' and '?'. + # We also keep track of the `allow_simple_key` flag here. + # Indentation rules are loosed for the flow context. + srp = self.reader.peek + srf = self.reader.forward + chunks = [] # type: List[Any] + start_mark = self.reader.get_mark() + end_mark = start_mark + indent = self.indent + 1 + # We allow zero indentation for scalars, but then we need to check for + # document separators at the beginning of the line. + # if indent == 0: + # indent = 1 + spaces = [] # type: List[Any] + while True: + length = 0 + if srp() == '#': + break + while True: + ch = srp(length) + if ch == ':' and srp(length + 1) not in _THE_END_SPACE_TAB: + pass + elif ch == '?' and self.scanner_processing_version != (1, 1): + pass + elif ( + ch in _THE_END_SPACE_TAB + or ( + not self.flow_level + and ch == ':' + and srp(length + 1) in _THE_END_SPACE_TAB + ) + or (self.flow_level and ch in ',:?[]{}') + ): + break + length += 1 + # It's not clear what we should do with ':' in the flow context. + if ( + self.flow_level + and ch == ':' + and srp(length + 1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}' + ): + srf(length) + raise ScannerError( + 'while scanning a plain scalar', + start_mark, + "found unexpected ':'", + self.reader.get_mark(), + 'Please check ' + 'http://pyyaml.org/wiki/YAMLColonInFlowContext ' + 'for details.', + ) + if length == 0: + break + self.allow_simple_key = False + chunks.extend(spaces) + chunks.append(self.reader.prefix(length)) + srf(length) + end_mark = self.reader.get_mark() + spaces = self.scan_plain_spaces(indent, start_mark) + if ( + not spaces + or srp() == '#' + or (not self.flow_level and self.reader.column < indent) + ): + break + + token = ScalarToken("".join(chunks), True, start_mark, end_mark) + if spaces and spaces[0] == '\n': + # Create a comment token to preserve the trailing line breaks. + comment = CommentToken("".join(spaces) + '\n', start_mark, end_mark) + token.add_post_comment(comment) + return token + + def scan_plain_spaces(self, indent, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + # The specification is really confusing about tabs in plain scalars. + # We just forbid them completely. Do not use tabs in YAML! + srp = self.reader.peek + srf = self.reader.forward + chunks = [] + length = 0 + while srp(length) in ' ': + length += 1 + whitespaces = self.reader.prefix(length) + self.reader.forward(length) + ch = srp() + if ch in '\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + self.allow_simple_key = True + prefix = self.reader.prefix(3) + if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB: + return + breaks = [] + while srp() in ' \r\n\x85\u2028\u2029': + if srp() == ' ': + srf() + else: + breaks.append(self.scan_line_break()) + prefix = self.reader.prefix(3) + if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB: + return + if line_break != '\n': + chunks.append(line_break) + elif not breaks: + chunks.append(' ') + chunks.extend(breaks) + elif whitespaces: + chunks.append(whitespaces) + return chunks + + def scan_tag_handle(self, name, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + # For some strange reasons, the specification does not allow '_' in + # tag handles. I have allowed it anyway. + srp = self.reader.peek + ch = srp() + if ch != '!': + raise ScannerError( + 'while scanning a %s' % (name,), + start_mark, + "expected '!', but found %r" % utf8(ch), + self.reader.get_mark(), + ) + length = 1 + ch = srp(length) + if ch != ' ': + while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_': + length += 1 + ch = srp(length) + if ch != '!': + self.reader.forward(length) + raise ScannerError( + 'while scanning a %s' % (name,), + start_mark, + "expected '!', but found %r" % utf8(ch), + self.reader.get_mark(), + ) + length += 1 + value = self.reader.prefix(length) + self.reader.forward(length) + return value + + def scan_tag_uri(self, name, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + # Note: we do not check if URI is well-formed. + srp = self.reader.peek + chunks = [] + length = 0 + ch = srp(length) + while ( + '0' <= ch <= '9' + or 'A' <= ch <= 'Z' + or 'a' <= ch <= 'z' + or ch in "-;/?:@&=+$,_.!~*'()[]%" + or ((self.scanner_processing_version > (1, 1)) and ch == '#') + ): + if ch == '%': + chunks.append(self.reader.prefix(length)) + self.reader.forward(length) + length = 0 + chunks.append(self.scan_uri_escapes(name, start_mark)) + else: + length += 1 + ch = srp(length) + if length != 0: + chunks.append(self.reader.prefix(length)) + self.reader.forward(length) + length = 0 + if not chunks: + raise ScannerError( + 'while parsing a %s' % (name,), + start_mark, + 'expected URI, but found %r' % utf8(ch), + self.reader.get_mark(), + ) + return "".join(chunks) + + def scan_uri_escapes(self, name, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + code_bytes = [] # type: List[Any] + mark = self.reader.get_mark() + while srp() == '%': + srf() + for k in range(2): + if srp(k) not in '0123456789ABCDEFabcdef': + raise ScannerError( + 'while scanning a %s' % (name,), + start_mark, + 'expected URI escape sequence of 2 hexdecimal numbers,' + ' but found %r' % utf8(srp(k)), + self.reader.get_mark(), + ) + if PY3: + code_bytes.append(int(self.reader.prefix(2), 16)) + else: + code_bytes.append(chr(int(self.reader.prefix(2), 16))) + srf(2) + try: + if PY3: + value = bytes(code_bytes).decode('utf-8') + else: + value = unicode(b"".join(code_bytes), 'utf-8') + except UnicodeDecodeError as exc: + raise ScannerError('while scanning a %s' % (name,), start_mark, str(exc), mark) + return value + + def scan_line_break(self): + # type: () -> Any + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.reader.peek() + if ch in '\r\n\x85': + if self.reader.prefix(2) == '\r\n': + self.reader.forward(2) + else: + self.reader.forward() + return '\n' + elif ch in '\u2028\u2029': + self.reader.forward() + return ch + return "" + + +class RoundTripScanner(Scanner): + def check_token(self, *choices): + # type: (Any) -> bool + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + self._gather_comments() + if bool(self.tokens): + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # type: () -> Any + # Return the next token, but do not delete if from the queue. + while self.need_more_tokens(): + self.fetch_more_tokens() + self._gather_comments() + if bool(self.tokens): + return self.tokens[0] + return None + + def _gather_comments(self): + # type: () -> Any + """combine multiple comment lines""" + comments = [] # type: List[Any] + if not self.tokens: + return comments + if isinstance(self.tokens[0], CommentToken): + comment = self.tokens.pop(0) + self.tokens_taken += 1 + comments.append(comment) + while self.need_more_tokens(): + self.fetch_more_tokens() + if not self.tokens: + return comments + if isinstance(self.tokens[0], CommentToken): + self.tokens_taken += 1 + comment = self.tokens.pop(0) + # nprint('dropping2', comment) + comments.append(comment) + if len(comments) >= 1: + self.tokens[0].add_pre_comments(comments) + # pull in post comment on e.g. ':' + if not self.done and len(self.tokens) < 2: + self.fetch_more_tokens() + + def get_token(self): + # type: () -> Any + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + self._gather_comments() + if bool(self.tokens): + # nprint('tk', self.tokens) + # only add post comment to single line tokens: + # scalar, value token. FlowXEndToken, otherwise + # hidden streamtokens could get them (leave them and they will be + # pre comments for the next map/seq + if ( + len(self.tokens) > 1 + and isinstance( + self.tokens[0], + (ScalarToken, ValueToken, FlowSequenceEndToken, FlowMappingEndToken), + ) + and isinstance(self.tokens[1], CommentToken) + and self.tokens[0].end_mark.line == self.tokens[1].start_mark.line + ): + self.tokens_taken += 1 + c = self.tokens.pop(1) + self.fetch_more_tokens() + while len(self.tokens) > 1 and isinstance(self.tokens[1], CommentToken): + self.tokens_taken += 1 + c1 = self.tokens.pop(1) + c.value = c.value + (' ' * c1.start_mark.column) + c1.value + self.fetch_more_tokens() + self.tokens[0].add_post_comment(c) + elif ( + len(self.tokens) > 1 + and isinstance(self.tokens[0], ScalarToken) + and isinstance(self.tokens[1], CommentToken) + and self.tokens[0].end_mark.line != self.tokens[1].start_mark.line + ): + self.tokens_taken += 1 + c = self.tokens.pop(1) + c.value = ( + '\n' * (c.start_mark.line - self.tokens[0].end_mark.line) + + (' ' * c.start_mark.column) + + c.value + ) + self.tokens[0].add_post_comment(c) + self.fetch_more_tokens() + while len(self.tokens) > 1 and isinstance(self.tokens[1], CommentToken): + self.tokens_taken += 1 + c1 = self.tokens.pop(1) + c.value = c.value + (' ' * c1.start_mark.column) + c1.value + self.fetch_more_tokens() + self.tokens_taken += 1 + return self.tokens.pop(0) + return None + + def fetch_comment(self, comment): + # type: (Any) -> None + value, start_mark, end_mark = comment + while value and value[-1] == ' ': + # empty line within indented key context + # no need to update end-mark, that is not used + value = value[:-1] + self.tokens.append(CommentToken(value, start_mark, end_mark)) + + # scanner + + def scan_to_next_token(self): + # type: () -> Any + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if : + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + + srp = self.reader.peek + srf = self.reader.forward + if self.reader.index == 0 and srp() == '\uFEFF': + srf() + found = False + while not found: + while srp() == ' ': + srf() + ch = srp() + if ch == '#': + start_mark = self.reader.get_mark() + comment = ch + srf() + while ch not in _THE_END: + ch = srp() + if ch == '\0': # don't gobble the end-of-stream character + # but add an explicit newline as "YAML processors should terminate + # the stream with an explicit line break + # https://yaml.org/spec/1.2/spec.html#id2780069 + comment += '\n' + break + comment += ch + srf() + # gather any blank lines following the comment too + ch = self.scan_line_break() + while len(ch) > 0: + comment += ch + ch = self.scan_line_break() + end_mark = self.reader.get_mark() + if not self.flow_level: + self.allow_simple_key = True + return comment, start_mark, end_mark + if bool(self.scan_line_break()): + start_mark = self.reader.get_mark() + if not self.flow_level: + self.allow_simple_key = True + ch = srp() + if ch == '\n': # empty toplevel lines + start_mark = self.reader.get_mark() + comment = "" + while ch: + ch = self.scan_line_break(empty_line=True) + comment += ch + if srp() == '#': + # empty line followed by indented real comment + comment = comment.rsplit('\n', 1)[0] + '\n' + end_mark = self.reader.get_mark() + return comment, start_mark, end_mark + else: + found = True + return None + + def scan_line_break(self, empty_line=False): + # type: (bool) -> Text + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.reader.peek() # type: Text + if ch in '\r\n\x85': + if self.reader.prefix(2) == '\r\n': + self.reader.forward(2) + else: + self.reader.forward() + return '\n' + elif ch in '\u2028\u2029': + self.reader.forward() + return ch + elif empty_line and ch in '\t ': + self.reader.forward() + return ch + return "" + + def scan_block_scalar(self, style, rt=True): + # type: (Any, Optional[bool]) -> Any + return Scanner.scan_block_scalar(self, style, rt=rt) + + +# try: +# import psyco +# psyco.bind(Scanner) +# except ImportError: +# pass diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/serializer.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/serializer.py new file mode 100644 index 000000000..522e9e9ab --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/serializer.py @@ -0,0 +1,240 @@ +# coding: utf-8 + +from __future__ import absolute_import + +from ...ruamel.yaml.error import YAMLError +from ...ruamel.yaml.compat import nprint, DBG_NODE, dbg, string_types, nprintf # NOQA +from ...ruamel.yaml.util import RegExp + +from ...ruamel.yaml.events import ( + StreamStartEvent, + StreamEndEvent, + MappingStartEvent, + MappingEndEvent, + SequenceStartEvent, + SequenceEndEvent, + AliasEvent, + ScalarEvent, + DocumentStartEvent, + DocumentEndEvent, +) +from ...ruamel.yaml.nodes import MappingNode, ScalarNode, SequenceNode + +if False: # MYPY + from typing import Any, Dict, Union, Text, Optional # NOQA + from ...ruamel.yaml.compat import VersionType # NOQA + +__all__ = ['Serializer', 'SerializerError'] + + +class SerializerError(YAMLError): + pass + + +class Serializer(object): + + # 'id' and 3+ numbers, but not 000 + ANCHOR_TEMPLATE = u'id%03d' + ANCHOR_RE = RegExp(u'id(?!000$)\\d{3,}') + + def __init__( + self, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + dumper=None, + ): + # type: (Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any) -> None # NOQA + self.dumper = dumper + if self.dumper is not None: + self.dumper._serializer = self + self.use_encoding = encoding + self.use_explicit_start = explicit_start + self.use_explicit_end = explicit_end + if isinstance(version, string_types): + self.use_version = tuple(map(int, version.split('.'))) + else: + self.use_version = version # type: ignore + self.use_tags = tags + self.serialized_nodes = {} # type: Dict[Any, Any] + self.anchors = {} # type: Dict[Any, Any] + self.last_anchor_id = 0 + self.closed = None # type: Optional[bool] + self._templated_id = None + + @property + def emitter(self): + # type: () -> Any + if hasattr(self.dumper, 'typ'): + return self.dumper.emitter + return self.dumper._emitter + + @property + def resolver(self): + # type: () -> Any + if hasattr(self.dumper, 'typ'): + self.dumper.resolver + return self.dumper._resolver + + def open(self): + # type: () -> None + if self.closed is None: + self.emitter.emit(StreamStartEvent(encoding=self.use_encoding)) + self.closed = False + elif self.closed: + raise SerializerError('serializer is closed') + else: + raise SerializerError('serializer is already opened') + + def close(self): + # type: () -> None + if self.closed is None: + raise SerializerError('serializer is not opened') + elif not self.closed: + self.emitter.emit(StreamEndEvent()) + self.closed = True + + # def __del__(self): + # self.close() + + def serialize(self, node): + # type: (Any) -> None + if dbg(DBG_NODE): + nprint('Serializing nodes') + node.dump() + if self.closed is None: + raise SerializerError('serializer is not opened') + elif self.closed: + raise SerializerError('serializer is closed') + self.emitter.emit( + DocumentStartEvent( + explicit=self.use_explicit_start, version=self.use_version, tags=self.use_tags + ) + ) + self.anchor_node(node) + self.serialize_node(node, None, None) + self.emitter.emit(DocumentEndEvent(explicit=self.use_explicit_end)) + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + + def anchor_node(self, node): + # type: (Any) -> None + if node in self.anchors: + if self.anchors[node] is None: + self.anchors[node] = self.generate_anchor(node) + else: + anchor = None + try: + if node.anchor.always_dump: + anchor = node.anchor.value + except: # NOQA + pass + self.anchors[node] = anchor + if isinstance(node, SequenceNode): + for item in node.value: + self.anchor_node(item) + elif isinstance(node, MappingNode): + for key, value in node.value: + self.anchor_node(key) + self.anchor_node(value) + + def generate_anchor(self, node): + # type: (Any) -> Any + try: + anchor = node.anchor.value + except: # NOQA + anchor = None + if anchor is None: + self.last_anchor_id += 1 + return self.ANCHOR_TEMPLATE % self.last_anchor_id + return anchor + + def serialize_node(self, node, parent, index): + # type: (Any, Any, Any) -> None + alias = self.anchors[node] + if node in self.serialized_nodes: + self.emitter.emit(AliasEvent(alias)) + else: + self.serialized_nodes[node] = True + self.resolver.descend_resolver(parent, index) + if isinstance(node, ScalarNode): + # here check if the node.tag equals the one that would result from parsing + # if not equal quoting is necessary for strings + detected_tag = self.resolver.resolve(ScalarNode, node.value, (True, False)) + default_tag = self.resolver.resolve(ScalarNode, node.value, (False, True)) + implicit = ( + (node.tag == detected_tag), + (node.tag == default_tag), + node.tag.startswith('tag:yaml.org,2002:'), + ) + self.emitter.emit( + ScalarEvent( + alias, + node.tag, + implicit, + node.value, + style=node.style, + comment=node.comment, + ) + ) + elif isinstance(node, SequenceNode): + implicit = node.tag == self.resolver.resolve(SequenceNode, node.value, True) + comment = node.comment + end_comment = None + seq_comment = None + if node.flow_style is True: + if comment: # eol comment on flow style sequence + seq_comment = comment[0] + # comment[0] = None + if comment and len(comment) > 2: + end_comment = comment[2] + else: + end_comment = None + self.emitter.emit( + SequenceStartEvent( + alias, + node.tag, + implicit, + flow_style=node.flow_style, + comment=node.comment, + ) + ) + index = 0 + for item in node.value: + self.serialize_node(item, node, index) + index += 1 + self.emitter.emit(SequenceEndEvent(comment=[seq_comment, end_comment])) + elif isinstance(node, MappingNode): + implicit = node.tag == self.resolver.resolve(MappingNode, node.value, True) + comment = node.comment + end_comment = None + map_comment = None + if node.flow_style is True: + if comment: # eol comment on flow style sequence + map_comment = comment[0] + # comment[0] = None + if comment and len(comment) > 2: + end_comment = comment[2] + self.emitter.emit( + MappingStartEvent( + alias, + node.tag, + implicit, + flow_style=node.flow_style, + comment=node.comment, + nr_items=len(node.value), + ) + ) + for key, value in node.value: + self.serialize_node(key, node, None) + self.serialize_node(value, node, key) + self.emitter.emit(MappingEndEvent(comment=[map_comment, end_comment])) + self.resolver.ascend_resolver() + + +def templated_id(s): + # type: (Text) -> Any + return Serializer.ANCHOR_RE.match(s) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/timestamp.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/timestamp.py new file mode 100644 index 000000000..e44db44d0 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/timestamp.py @@ -0,0 +1,54 @@ + +# coding: utf-8 + +from __future__ import print_function, absolute_import, division, unicode_literals + +import datetime +import copy + +# ToDo: at least on PY3 you could probably attach the tzinfo correctly to the object +# a more complete datetime might be used by safe loading as well + +if False: # MYPY + from typing import Any, Dict, Optional, List # NOQA + + +class TimeStamp(datetime.datetime): + def __init__(self, *args, **kw): + # type: (Any, Any) -> None + self._yaml = dict(t=False, tz=None, delta=0) # type: Dict[Any, Any] + + def __new__(cls, *args, **kw): # datetime is immutable + # type: (Any, Any) -> Any + return datetime.datetime.__new__(cls, *args, **kw) # type: ignore + + def __deepcopy__(self, memo): + # type: (Any) -> Any + ts = TimeStamp(self.year, self.month, self.day, self.hour, self.minute, self.second) + ts._yaml = copy.deepcopy(self._yaml) + return ts + + def replace(self, year=None, month=None, day=None, hour=None, + minute=None, second=None, microsecond=None, tzinfo=True, + fold=None): + if year is None: + year = self.year + if month is None: + month = self.month + if day is None: + day = self.day + if hour is None: + hour = self.hour + if minute is None: + minute = self.minute + if second is None: + second = self.second + if microsecond is None: + microsecond = self.microsecond + if tzinfo is True: + tzinfo = self.tzinfo + if fold is None: + fold = self.fold + ts = type(self)(year, month, day, hour, minute, second, microsecond, tzinfo, fold=fold) + ts._yaml = copy.deepcopy(self._yaml) + return ts diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/tokens.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/tokens.py new file mode 100644 index 000000000..5f5a66353 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/tokens.py @@ -0,0 +1,286 @@ +# # header +# coding: utf-8 + +from __future__ import unicode_literals + +if False: # MYPY + from typing import Text, Any, Dict, Optional, List # NOQA + from .error import StreamMark # NOQA + +SHOWLINES = True + + +class Token(object): + __slots__ = 'start_mark', 'end_mark', '_comment' + + def __init__(self, start_mark, end_mark): + # type: (StreamMark, StreamMark) -> None + self.start_mark = start_mark + self.end_mark = end_mark + + def __repr__(self): + # type: () -> Any + # attributes = [key for key in self.__slots__ if not key.endswith('_mark') and + # hasattr('self', key)] + attributes = [key for key in self.__slots__ if not key.endswith('_mark')] + attributes.sort() + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) for key in attributes]) + if SHOWLINES: + try: + arguments += ', line: ' + str(self.start_mark.line) + except: # NOQA + pass + try: + arguments += ', comment: ' + str(self._comment) + except: # NOQA + pass + return '{}({})'.format(self.__class__.__name__, arguments) + + def add_post_comment(self, comment): + # type: (Any) -> None + if not hasattr(self, '_comment'): + self._comment = [None, None] + self._comment[0] = comment + + def add_pre_comments(self, comments): + # type: (Any) -> None + if not hasattr(self, '_comment'): + self._comment = [None, None] + assert self._comment[1] is None + self._comment[1] = comments + + def get_comment(self): + # type: () -> Any + return getattr(self, '_comment', None) + + @property + def comment(self): + # type: () -> Any + return getattr(self, '_comment', None) + + def move_comment(self, target, empty=False): + # type: (Any, bool) -> Any + """move a comment from this token to target (normally next token) + used to combine e.g. comments before a BlockEntryToken to the + ScalarToken that follows it + empty is a special for empty values -> comment after key + """ + c = self.comment + if c is None: + return + # don't push beyond last element + if isinstance(target, (StreamEndToken, DocumentStartToken)): + return + delattr(self, '_comment') + tc = target.comment + if not tc: # target comment, just insert + # special for empty value in key: value issue 25 + if empty: + c = [c[0], c[1], None, None, c[0]] + target._comment = c + # nprint('mco2:', self, target, target.comment, empty) + return self + if c[0] and tc[0] or c[1] and tc[1]: + raise NotImplementedError('overlap in comment %r %r' % (c, tc)) + if c[0]: + tc[0] = c[0] + if c[1]: + tc[1] = c[1] + return self + + def split_comment(self): + # type: () -> Any + """ split the post part of a comment, and return it + as comment to be added. Delete second part if [None, None] + abc: # this goes to sequence + # this goes to first element + - first element + """ + comment = self.comment + if comment is None or comment[0] is None: + return None # nothing to do + ret_val = [comment[0], None] + if comment[1] is None: + delattr(self, '_comment') + return ret_val + + +# class BOMToken(Token): +# id = '' + + +class DirectiveToken(Token): + __slots__ = 'name', 'value' + id = '' + + def __init__(self, name, value, start_mark, end_mark): + # type: (Any, Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.name = name + self.value = value + + +class DocumentStartToken(Token): + __slots__ = () + id = '' + + +class DocumentEndToken(Token): + __slots__ = () + id = '' + + +class StreamStartToken(Token): + __slots__ = ('encoding',) + id = '' + + def __init__(self, start_mark=None, end_mark=None, encoding=None): + # type: (Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.encoding = encoding + + +class StreamEndToken(Token): + __slots__ = () + id = '' + + +class BlockSequenceStartToken(Token): + __slots__ = () + id = '' + + +class BlockMappingStartToken(Token): + __slots__ = () + id = '' + + +class BlockEndToken(Token): + __slots__ = () + id = '' + + +class FlowSequenceStartToken(Token): + __slots__ = () + id = '[' + + +class FlowMappingStartToken(Token): + __slots__ = () + id = '{' + + +class FlowSequenceEndToken(Token): + __slots__ = () + id = ']' + + +class FlowMappingEndToken(Token): + __slots__ = () + id = '}' + + +class KeyToken(Token): + __slots__ = () + id = '?' + + # def x__repr__(self): + # return 'KeyToken({})'.format( + # self.start_mark.buffer[self.start_mark.index:].split(None, 1)[0]) + + +class ValueToken(Token): + __slots__ = () + id = ':' + + +class BlockEntryToken(Token): + __slots__ = () + id = '-' + + +class FlowEntryToken(Token): + __slots__ = () + id = ',' + + +class AliasToken(Token): + __slots__ = ('value',) + id = '' + + def __init__(self, value, start_mark, end_mark): + # type: (Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.value = value + + +class AnchorToken(Token): + __slots__ = ('value',) + id = '' + + def __init__(self, value, start_mark, end_mark): + # type: (Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.value = value + + +class TagToken(Token): + __slots__ = ('value',) + id = '' + + def __init__(self, value, start_mark, end_mark): + # type: (Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.value = value + + +class ScalarToken(Token): + __slots__ = 'value', 'plain', 'style' + id = '' + + def __init__(self, value, plain, start_mark, end_mark, style=None): + # type: (Any, Any, Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.value = value + self.plain = plain + self.style = style + + +class CommentToken(Token): + __slots__ = 'value', 'pre_done' + id = '' + + def __init__(self, value, start_mark, end_mark): + # type: (Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.value = value + + def reset(self): + # type: () -> None + if hasattr(self, 'pre_done'): + delattr(self, 'pre_done') + + def __repr__(self): + # type: () -> Any + v = '{!r}'.format(self.value) + if SHOWLINES: + try: + v += ', line: ' + str(self.start_mark.line) + v += ', col: ' + str(self.start_mark.column) + except: # NOQA + pass + return 'CommentToken({})'.format(v) + + def __eq__(self, other): + # type: (Any) -> bool + if self.start_mark != other.start_mark: + return False + if self.end_mark != other.end_mark: + return False + if self.value != other.value: + return False + return True + + def __ne__(self, other): + # type: (Any) -> bool + return not self.__eq__(other) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/util.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/util.py new file mode 100644 index 000000000..178825492 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/util.py @@ -0,0 +1,190 @@ +# coding: utf-8 + +""" +some helper functions that might be generally useful +""" + +from __future__ import absolute_import, print_function + +from functools import partial +import re + +from .compat import text_type, binary_type + +if False: # MYPY + from typing import Any, Dict, Optional, List, Text # NOQA + from .compat import StreamTextType # NOQA + + +class LazyEval(object): + """ + Lightweight wrapper around lazily evaluated func(*args, **kwargs). + + func is only evaluated when any attribute of its return value is accessed. + Every attribute access is passed through to the wrapped value. + (This only excludes special cases like method-wrappers, e.g., __hash__.) + The sole additional attribute is the lazy_self function which holds the + return value (or, prior to evaluation, func and arguments), in its closure. + """ + + def __init__(self, func, *args, **kwargs): + # type: (Any, Any, Any) -> None + def lazy_self(): + # type: () -> Any + return_value = func(*args, **kwargs) + object.__setattr__(self, 'lazy_self', lambda: return_value) + return return_value + + object.__setattr__(self, 'lazy_self', lazy_self) + + def __getattribute__(self, name): + # type: (Any) -> Any + lazy_self = object.__getattribute__(self, 'lazy_self') + if name == 'lazy_self': + return lazy_self + return getattr(lazy_self(), name) + + def __setattr__(self, name, value): + # type: (Any, Any) -> None + setattr(self.lazy_self(), name, value) + + +RegExp = partial(LazyEval, re.compile) + + +# originally as comment +# https://github.com/pre-commit/pre-commit/pull/211#issuecomment-186466605 +# if you use this in your code, I suggest adding a test in your test suite +# that check this routines output against a known piece of your YAML +# before upgrades to this code break your round-tripped YAML +def load_yaml_guess_indent(stream, **kw): + # type: (StreamTextType, Any) -> Any + """guess the indent and block sequence indent of yaml stream/string + + returns round_trip_loaded stream, indent level, block sequence indent + - block sequence indent is the number of spaces before a dash relative to previous indent + - if there are no block sequences, indent is taken from nested mappings, block sequence + indent is unset (None) in that case + """ + from .main import round_trip_load + + # load a YAML document, guess the indentation, if you use TABs you're on your own + def leading_spaces(line): + # type: (Any) -> int + idx = 0 + while idx < len(line) and line[idx] == ' ': + idx += 1 + return idx + + if isinstance(stream, text_type): + yaml_str = stream # type: Any + elif isinstance(stream, binary_type): + # most likely, but the Reader checks BOM for this + yaml_str = stream.decode('utf-8') + else: + yaml_str = stream.read() + map_indent = None + indent = None # default if not found for some reason + block_seq_indent = None + prev_line_key_only = None + key_indent = 0 + for line in yaml_str.splitlines(): + rline = line.rstrip() + lline = rline.lstrip() + if lline.startswith('- '): + l_s = leading_spaces(line) + block_seq_indent = l_s - key_indent + idx = l_s + 1 + while line[idx] == ' ': # this will end as we rstripped + idx += 1 + if line[idx] == '#': # comment after - + continue + indent = idx - key_indent + break + if map_indent is None and prev_line_key_only is not None and rline: + idx = 0 + while line[idx] in ' -': + idx += 1 + if idx > prev_line_key_only: + map_indent = idx - prev_line_key_only + if rline.endswith(':'): + key_indent = leading_spaces(line) + idx = 0 + while line[idx] == ' ': # this will end on ':' + idx += 1 + prev_line_key_only = idx + continue + prev_line_key_only = None + if indent is None and map_indent is not None: + indent = map_indent + return round_trip_load(yaml_str, **kw), indent, block_seq_indent + + +def configobj_walker(cfg): + # type: (Any) -> Any + """ + walks over a ConfigObj (INI file with comments) generating + corresponding YAML output (including comments + """ + from configobj import ConfigObj # type: ignore + + assert isinstance(cfg, ConfigObj) + for c in cfg.initial_comment: + if c.strip(): + yield c + for s in _walk_section(cfg): + if s.strip(): + yield s + for c in cfg.final_comment: + if c.strip(): + yield c + + +def _walk_section(s, level=0): + # type: (Any, int) -> Any + from configobj import Section + + assert isinstance(s, Section) + indent = u' ' * level + for name in s.scalars: + for c in s.comments[name]: + yield indent + c.strip() + x = s[name] + if u'\n' in x: + i = indent + u' ' + x = u'|\n' + i + x.strip().replace(u'\n', u'\n' + i) + elif ':' in x: + x = u"'" + x.replace(u"'", u"''") + u"'" + line = u'{0}{1}: {2}'.format(indent, name, x) + c = s.inline_comments[name] + if c: + line += u' ' + c + yield line + for name in s.sections: + for c in s.comments[name]: + yield indent + c.strip() + line = u'{0}{1}:'.format(indent, name) + c = s.inline_comments[name] + if c: + line += u' ' + c + yield line + for val in _walk_section(s[name], level=level + 1): + yield val + + +# def config_obj_2_rt_yaml(cfg): +# from .comments import CommentedMap, CommentedSeq +# from configobj import ConfigObj +# assert isinstance(cfg, ConfigObj) +# #for c in cfg.initial_comment: +# # if c.strip(): +# # pass +# cm = CommentedMap() +# for name in s.sections: +# cm[name] = d = CommentedMap() +# +# +# #for c in cfg.final_comment: +# # if c.strip(): +# # yield c +# return cm diff --git a/insights/client/apps/ansible/test_playbook.yml b/insights/client/apps/ansible/test_playbook.yml index 47e906d19..5669d6108 100644 --- a/insights/client/apps/ansible/test_playbook.yml +++ b/insights/client/apps/ansible/test_playbook.yml @@ -6,29 +6,33 @@ # responsible for any adverse outcomes related to these recommendations or Playbooks. # # ping -# https://cloud.redhat.com/insights/remediations/44466a02-24a1-47b4-84cb-391aeff4523 +# https://cloud.redhat.com/insights/remediations/44466a02-24a1-47b4-84cb-391aeff4444 # Generated by Red Hat Insights on Thu, 29 Oct 2020 12:24:17 GMT # Created by some-user -- name: run insights - hosts: host1,host2 - become: true - gather_facts: false +- name: ping + hosts: "@@HOSTS@@" vars: insights_signature_exclude: /hosts,/vars/insights_signature insights_signature: !!binary | - TFMwdExTMUNSVWRKVGlCUVIxQWdVMGxIVGtGVVZWSkZMUzB0TFMwS0NtbFJSWHBDUVVGQ1EwRkJa - RVpwUlVWQmMzVTJTbWsxYkhoUlpqTlRkVTgyT1U0ek1WWXdZMUU1U1RCR1FXMUJkMUkyVVVGRFoy - dFJPVTR6TVZZd1kxRUtPVWt4TnpGUlppOVZkMjRyUWtSdk5EUkZibUlyVWtOSGFVTkdhR2x3WWts - S1dHNUhhakJ1VFdkU1lVNHpZV3BUV1ZCT1NFOUZLMGhoZFVwYVdEbGthZ3B5T0U5dEsyOXJjVGhZ - TkZobGEySk9hV3hqWTBScVdtODNWMWQxWW1SVGNXdDZMMjB3YWpacFNEUlNTMnhJUnpONmNFdGFX - bE13VkRsM2RVbzNTakZGQ21wWmRGaExZMW95Ymk5bmNVOTNhWEIwUVdoNFdHdHhXa2RDYm1OM1JH - NTBLM2xhV0RoWmNqWmtTbk5QUkdGVmFTOHlSRTlVT0c5S1RYaFZZV05oYURrS2FFaFhMM0JrY21o - eGRXSjVMemMzVTNob2VYaEJlV3hvV1dsTFoweDZUMDFFUWpsRFZEaHhTMk5NYVZCVGVXSTBhMFJv - WTBsS015dDRNeTlqUkRWVmVBcDVPRE01VjJKUlNGZEZhRFUxTWs5dWN6YzRXbk5KZEVsaE5XUnpj - REpNUlZkV1EwOXhPVWxLT1VodmFHTjVhRmhpTUZGeVJWbEZZVzlNUTFkNFlXUnRDamMwVGtKYVNF - eGhUbk16WWs5dU1XdG5VM0Z1V2t4cVRXSnlLeklyZHowOUNqMXJhMEpXQ2kwdExTMHRSVTVFSUZC - SFVDQlRTVWRPUVZSVlVrVXRMUzB0TFFvPQ== + TFMwdExTMUNSVWRKVGlCUVIxQWdVMGxIVGtGVVZWSkZMUzB0TFMwS1ZtVnljMmx2YmpvZ1IyNTFV + RWNnZGpFS0NtbFJTVlZCZDFWQldVaHBSM0ZqZG5jMU9FUXJhalZ3VGtGUmFrTXpRUzh6VVdwUVow + MXZTM0JYZFZVeWNuaExWVkpJYTI5VVRHVkdTRmczVDFkVU1Ya0tlRzR6WWtOMU1FeHdXRWhDWjBk + Vkt6VndTRFF3ZGswdmMzVlhjblJZYjNJckwydHRja3BFWkZWMU5IWkpaMmt4VW1aQmNsTmxabk5H + TTFCdlIxWnFjUW8yWkhVM1RuQmhOazlQT1cxWFJGWXZPRnBqYW14SVdrVkpUVU5OYlRKamRqQnVk + RmhuWTJwSmJrTmhlbmtyTVhkNmFHaExUMFJNV1RKTE9WZ3dPVUkzQ2xKR01HMWpjR0ZpUVZsclJH + ZHpWVVYyU0RCM2RXUTRkRkpuZW5sWVFXWTJZMmw0TTBabVoyOTVTa2d2ZUdFd01uWkZRbFZGUWxG + dFUzVlhjMEk0ZHpNS2FXaFVVVVpyVEN0NE1uQnliSGxXUWtWd2FqTnlRMmhZY3poMVVsbEJMeTlU + UjNka1owSndkMlpVWm01ckswUk5VVGRuUzJKbFYyVnhabFFyVlRNNUt3cGFaMGhoTW1WbFkzZFJh + MmhsVEUwNFkzVkhha012ZFVFNVpWSnhablJRY1RCcU0yVllNWGxGYjNOR1pHTldOekJuYzFJd1dH + TjBSazlDWTJWSVVXNXRDak4xVjBSVVRqRmllV0kyWVZaYVFqZzVUM1JWTjFCU1preHhSMVkyYjBj + MU5WQkZVRkV6VVRCSVRXMVRlRWt3WlRjNWNIUXhXVmxHUkhad2FtNXBjSGtLTjJ0aUwzbERObU5s + ZFZGTWNraHpLMjVSYkVKQ05VSk5OV0ZQVFN0emMwSkpSM1JJZGtWUFRqWTFOMGw2WlRKaGNqRnBj + bTh5Y1V4TVFXRkJVMHBrVEFwbU9ERmlPSGxFY1hCeVpFOVZaV1ZMYzBncldYazFhekZGVVUwdlRE + QXpjeTkzVm1wa1NqQktiV2xFVlhwd1pXa3dkVXRCYmxGUWRFdElRbFJsYmtsSENtRnBMMU5KYlRO + RWMxcHlNRTV4TnpsYVVWVjBiVEpYY0c5bGVrTkZla3ByVlN0eU4weFhjRWg1Y21SdE5VMVpOVzV4 + TW1sb2JsaEVNRFZHTkhsbGIwVUtObmh4VlRGYWJDdERXSEZOYTNOblJUWklZV0l2VDFscVpHRmFX + VUZwUm5aUlJFOXhPVkU1V1ZVNE5rSnRXSGRwUzNoalltNVRWREJ6VnpZeFVUaFpWd3BWVUVSblpT + dFJMMHhSUFQwS1BVVnpWVWNLTFMwdExTMUZUa1FnVUVkUUlGTkpSMDVCVkZWU1JTMHRMUzB0Q2c9 + PQ== tasks: - - name: run insights - command: insights-client - changed_when: false + - ping: diff --git a/insights/tests/client/apps/test_playbook_verifier.py b/insights/tests/client/apps/test_playbook_verifier.py index 6b63f238f..ac5163755 100644 --- a/insights/tests/client/apps/test_playbook_verifier.py +++ b/insights/tests/client/apps/test_playbook_verifier.py @@ -1,11 +1,13 @@ # -*- coding: UTF-8 -*- import sys import pytest - -from insights.client.apps.ansible.playbook_verifier import verify, PlaybookVerificationError from mock.mock import patch from pytest import raises +# don't even bother on 2.6 +if sys.version_info >= (2, 7): + from insights.client.apps.ansible.playbook_verifier import verify, PlaybookVerificationError # noqa + @pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above') def test_skip_validation(): From a10b85ec0750fb44d5fc0b878cf0692ba946729e Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Fri, 7 May 2021 14:44:47 -0500 Subject: [PATCH 412/892] Fix issues in rhel6/python26 CI (#3051) * This fix uses a new python26 image with required modules included Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- Jenkinsfile | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index a6d7113d2..ea42da6d4 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -21,19 +21,18 @@ pipeline { sh """ virtualenv .testenv source .testenv/bin/activate - pip install --upgrade "pip<10" - pip install "idna<=2.7" - pip install "pycparser<=2.18" - pip install -e .[testing] + pip install /pip_packages/pip-9.0.3-py2.py3-none-any.whl + pip install -r /var/lib/jenkins/ci_requirements.txt -f /pip_packages + pip install -e .[testing] -f /pip_packages pytest """ echo "Testing with Linter..." sh """ virtualenv .lintenv source .lintenv/bin/activate - pip install https://github.com/kjd/idna/archive/refs/tags/v2.7.zip - pip install https://github.com/eliben/pycparser/archive/refs/tags/release_v2.18.zip - pip install -e .[linting] + pip install /pip_packages/pip-9.0.3-py2.py3-none-any.whl + pip install -r /var/lib/jenkins/ci_requirements.txt -f /pip_packages + pip install -e .[linting] -f /pip_packages flake8 """ } From 94c1912c3b4730df2c703bda8647fbc8d8b8785a Mon Sep 17 00:00:00 2001 From: Sachin Date: Wed, 12 May 2021 01:00:50 +0530 Subject: [PATCH 413/892] Add combiner for /sys/bus/vmbus/devices/*/{class_id,device_id} (#3038) Signed-off-by: Sachin Patil --- .../sys_vmbus_devices.rst | 3 + docs/shared_parsers_catalog/sys_vmbus.rst | 3 + insights/combiners/sys_vmbus_devices.py | 48 ++++++++++++ .../combiners/tests/test_sys_vmbus_devices.py | 64 ++++++++++++++++ insights/parsers/lsvmbus.py | 9 +++ insights/parsers/sys_vmbus.py | 76 +++++++++++++++++++ insights/parsers/tests/test_sys_vmbus.py | 73 ++++++++++++++++++ insights/specs/__init__.py | 2 + insights/specs/default.py | 2 + 9 files changed, 280 insertions(+) create mode 100644 docs/shared_combiners_catalog/sys_vmbus_devices.rst create mode 100644 docs/shared_parsers_catalog/sys_vmbus.rst create mode 100644 insights/combiners/sys_vmbus_devices.py create mode 100644 insights/combiners/tests/test_sys_vmbus_devices.py create mode 100644 insights/parsers/sys_vmbus.py create mode 100644 insights/parsers/tests/test_sys_vmbus.py diff --git a/docs/shared_combiners_catalog/sys_vmbus_devices.rst b/docs/shared_combiners_catalog/sys_vmbus_devices.rst new file mode 100644 index 000000000..f40745f45 --- /dev/null +++ b/docs/shared_combiners_catalog/sys_vmbus_devices.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.combiners.sys_vmbus_devices + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/sys_vmbus.rst b/docs/shared_parsers_catalog/sys_vmbus.rst new file mode 100644 index 000000000..92daeafce --- /dev/null +++ b/docs/shared_parsers_catalog/sys_vmbus.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.sys_vmbus + :members: + :show-inheritance: diff --git a/insights/combiners/sys_vmbus_devices.py b/insights/combiners/sys_vmbus_devices.py new file mode 100644 index 000000000..e3b7d133c --- /dev/null +++ b/insights/combiners/sys_vmbus_devices.py @@ -0,0 +1,48 @@ +""" +VMBus device info +================= +""" +from insights.core.plugins import combiner +from insights.parsers.sys_vmbus import SysVmbusDeviceID, SysVmbusClassID + + +@combiner(SysVmbusDeviceID, SysVmbusClassID) +class SysVmBusDeviceInfo(object): + ''' + Combiner to access all the VMBus devices. + + Attributes: + devices (list): The list is dict. + + Sample output:: + + [ + { + 'device_id': '47505500-0001-0000-3130-444531444234', + 'class_id': '44c4f61d-4444-4400-9d52-802e27ede19f', + 'description': 'PCI Express pass-through' + } + ] + + Examples: + >>> len(output.devices) + 2 + >>> output.devices[0].get('device_id', '') + '47505500-0001-0000-3130-444531444234' + >>> output.devices[0].get('class_id', '') + '44c4f61d-4444-4400-9d52-802e27ede19f' + >>> output.devices[0].get('description', '') + 'PCI Express pass-through' + ''' + def __init__(self, device_id, class_id): + self.devices = [] + for d in device_id: + for c in class_id: + if d.id in c.file_path: + self.devices.append( + { + 'device_id': d.id, + 'class_id': c.id, + 'description': c.desc + } + ) diff --git a/insights/combiners/tests/test_sys_vmbus_devices.py b/insights/combiners/tests/test_sys_vmbus_devices.py new file mode 100644 index 000000000..73fbc6e46 --- /dev/null +++ b/insights/combiners/tests/test_sys_vmbus_devices.py @@ -0,0 +1,64 @@ +import doctest + +from insights.combiners import sys_vmbus_devices +from insights.parsers.sys_vmbus import SysVmbusDeviceID, SysVmbusClassID +from insights.tests import context_wrap + + +DEVICE_ID_1 = """ +{47505500-0001-0000-3130-444531444234} +""".strip() + +CLASS_ID_1 = """ +{44c4f61d-4444-4400-9d52-802e27ede19f} +""".strip() + +DEVICE_ID_2 = """ +{4487b255-b88c-403f-bb51-d1f69cf17f87} +""".strip() + +CLASS_ID_2 = """ +{3375baf4-9e15-4b30-b765-67acb10d607b} +""".strip() + + +def test_sys_vmbus_devices_combiner(): + result = sys_vmbus_devices.SysVmBusDeviceInfo( + [ + SysVmbusDeviceID(context_wrap(DEVICE_ID_1, path='/sys/bus/vmbus/devices/47505500-0001-0000-3130-444531444234/device_id')), + SysVmbusDeviceID(context_wrap(DEVICE_ID_2, path='/sys/bus/vmbus/devices/4487b255-b88c-403f-bb51-d1f69cf17f87/device_id')), + ], + [ + SysVmbusClassID(context_wrap(CLASS_ID_1, path='/sys/bus/vmbus/devices/47505500-0001-0000-3130-444531444234/class_id')), + SysVmbusClassID(context_wrap(CLASS_ID_2, path='/sys/bus/vmbus/devices/4487b255-b88c-403f-bb51-d1f69cf17f87/class_id')) + ] + ) + + assert result.devices[0].get('device_id') == '47505500-0001-0000-3130-444531444234' + assert result.devices[0].get('class_id') == '44c4f61d-4444-4400-9d52-802e27ede19f' + assert result.devices[0].get('description') == 'PCI Express pass-through' + + assert result.devices[-1].get('device_id') == '4487b255-b88c-403f-bb51-d1f69cf17f87' + assert result.devices[-1].get('class_id') == '3375baf4-9e15-4b30-b765-67acb10d607b' + assert result.devices[-1].get('description') == 'Unknown' + + +def test_documentation(): + result = sys_vmbus_devices.SysVmBusDeviceInfo( + [ + SysVmbusDeviceID(context_wrap(DEVICE_ID_1, path='/sys/bus/vmbus/devices/47505500-0001-0000-3130-444531444234/device_id')), + SysVmbusDeviceID(context_wrap(DEVICE_ID_2, path='/sys/bus/vmbus/devices/4487b255-b88c-403f-bb51-d1f69cf17f87/device_id')), + ], + [ + SysVmbusClassID(context_wrap(CLASS_ID_1, path='/sys/bus/vmbus/devices/47505500-0001-0000-3130-444531444234/class_id')), + SysVmbusClassID(context_wrap(CLASS_ID_2, path='/sys/bus/vmbus/devices/4487b255-b88c-403f-bb51-d1f69cf17f87/class_id')) + ] + ) + + failed_count, tests = doctest.testmod( + sys_vmbus_devices, + globs={ + 'output': result + } + ) + assert failed_count == 0 diff --git a/insights/parsers/lsvmbus.py b/insights/parsers/lsvmbus.py index 601e0cb62..ac3141479 100644 --- a/insights/parsers/lsvmbus.py +++ b/insights/parsers/lsvmbus.py @@ -6,6 +6,7 @@ """ import re +from insights.util import deprecated from insights import parser, CommandParser from insights.parsers import SkipException from insights.specs import Specs @@ -15,6 +16,10 @@ class LsvmBus(CommandParser): """Parse the output of ``lsvmbus -vv`` as list. + .. warning:: + This parser class is deprecated, please use + :py:class:`insights.combiner.sys_vmbus_devices.SysVmBusDeviceInfo` instead. + Typical output:: VMBUS ID 18: Class_ID = {44c4f61d-4444-4400-9d52-802e27ede19f} - PCI Express pass-through @@ -60,6 +65,10 @@ class LsvmBus(CommandParser): ] """ + def __init__(self, *args, **kwargs): + deprecated(LsvmBus, "Use the SysVmBusDeviceInfo combiner instead") + super(LsvmBus, self).__init__(*args, **kwargs) + def parse_content(self, content): if not content: raise SkipException('No content.') diff --git a/insights/parsers/sys_vmbus.py b/insights/parsers/sys_vmbus.py new file mode 100644 index 000000000..c88a8ece4 --- /dev/null +++ b/insights/parsers/sys_vmbus.py @@ -0,0 +1,76 @@ +""" +``/sys/bus/vmbus/`` VMBus info +============================== + +SysVmbusDeviceID - file ``/sys/bus/vmbus/devices/*/device_id`` +-------------------------------------------------------------- + +SysVmbusClassID - file ``/sys/bus/vmbus/devices/*/class_id`` +------------------------------------------------------------ +""" + +from insights import parser, Parser +from insights.parsers import SkipException +from insights.specs import Specs + +# Please refer to +# https://github.com/torvalds/linux/blob/master/tools/hv/lsvmbus#L23 for +# full list of Class ID mapping. +VMBUS_DEV_DICT = { + '44c4f61d-4444-4400-9d52-802e27ede19f': 'PCI Express pass-through', + 'da0a7802-e377-4aac-8e77-0558eb1073f8': 'Synthetic framebuffer adapter' +} + + +@parser(Specs.sys_vmbus_device_id) +class SysVmbusDeviceID(Parser): + """Parse the file ``/sys/bus/vmbus/devices/*/device_id`` + + Sample content:: + + {47505500-0001-0000-3130-444531444234} + + Raises: + SkipException: When nothing need to parse. + + Attributes: + id(str): Device ID + + Examples:: + + >>> vmbus_device.id + '47505500-0001-0000-3130-444531444234' + """ + def parse_content(self, content): + if not content or len(content) != 1: + raise SkipException() + self.id = content[0].strip('{}\n') + + +@parser(Specs.sys_vmbus_class_id) +class SysVmbusClassID(Parser): + """Parse the file ``/sys/bus/vmbus/devices/*/class_id`` + + Sample content:: + + {44c4f61d-4444-4400-9d52-802e27ede19f} + + Raises: + SkipException: When nothing need to parse. + + Attributes: + id(str): Class ID + desc(str): Description + + Examples:: + + >>> vmbus_class.id + '44c4f61d-4444-4400-9d52-802e27ede19f' + >>> vmbus_class.desc + 'PCI Express pass-through' + """ + def parse_content(self, content): + if not content or len(content) != 1: + raise SkipException() + self.id = content[0].strip('{}\n') + self.desc = VMBUS_DEV_DICT.get(self.id, 'Unknown') diff --git a/insights/parsers/tests/test_sys_vmbus.py b/insights/parsers/tests/test_sys_vmbus.py new file mode 100644 index 000000000..4a35526fb --- /dev/null +++ b/insights/parsers/tests/test_sys_vmbus.py @@ -0,0 +1,73 @@ +import doctest +import pytest +from insights.parsers import sys_vmbus, SkipException +from insights.tests import context_wrap + + +BLANK = """ +""".strip() + +NO_RESULT = """ + Id Name State +---------------------------------------------------- +""".strip() + +DEVICE_ID_1 = """ +{47505500-0001-0000-3130-444531444234} +""".strip() + +CLASS_ID_1 = """ +{44c4f61d-4444-4400-9d52-802e27ede19f} +""".strip() + +CLASS_ID_2 = """ +{3375baf4-9e15-4b30-b765-67acb10d607b} +""".strip() + +CLASS_ID_3 = """ +{da0a7802-e377-4aac-8e77-0558eb1073f8} +""".strip() + + +def test_device_id(): + output = sys_vmbus.SysVmbusDeviceID(context_wrap(DEVICE_ID_1, path='/sys/bus/vmbus/devices/47505500-0001-0000-3130-444531444234/device_id')) + assert output.file_name == 'device_id' + assert output.id == '47505500-0001-0000-3130-444531444234' + + +def test_class_id(): + output = sys_vmbus.SysVmbusClassID(context_wrap(CLASS_ID_1, path='/sys/bus/vmbus/devices/47505500-0001-0000-3130-444531444234/class_id')) + assert output.file_name == 'class_id' + assert output.id == '44c4f61d-4444-4400-9d52-802e27ede19f' + assert output.desc == 'PCI Express pass-through' + + output = sys_vmbus.SysVmbusClassID(context_wrap(CLASS_ID_2, path='/sys/bus/vmbus/devices/4487b255-b88c-403f-bb51-d1f69cf17f87/class_id')) + assert output.file_name == 'class_id' + assert output.id == '3375baf4-9e15-4b30-b765-67acb10d607b' + assert output.desc == 'Unknown' + + output = sys_vmbus.SysVmbusClassID(context_wrap(CLASS_ID_3, path='/sys/bus/vmbus/devices/5620e0c7-8062-4dce-aeb7-520c7ef76171/class_id')) + assert output.file_name == 'class_id' + assert output.id == 'da0a7802-e377-4aac-8e77-0558eb1073f8' + assert output.desc == 'Synthetic framebuffer adapter' + + +def test_blank_output(): + with pytest.raises(SkipException): + output = sys_vmbus.SysVmbusDeviceID(context_wrap(BLANK, path='/sys/bus/vmbus/devices/47505500-0001-0000-3130-444531444234/device_id')) + assert output is None + + with pytest.raises(SkipException): + output = sys_vmbus.SysVmbusClassID(context_wrap(BLANK, path='/sys/bus/vmbus/devices/47505500-0001-0000-3130-444531444234/class_id')) + assert output is None + + +def test_documentation(): + failed_count, tests = doctest.testmod( + sys_vmbus, + globs={ + 'vmbus_device': sys_vmbus.SysVmbusDeviceID(context_wrap(DEVICE_ID_1, path='/sys/bus/vmbus/devices/47505500-0001-0000-3130-444531444234/device_id')), + 'vmbus_class': sys_vmbus.SysVmbusClassID(context_wrap(CLASS_ID_1, path='/sys/bus/vmbus/devices/47505500-0001-0000-3130-444531444234/class_id')) + } + ) + assert failed_count == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index e775853eb..1fb7c2545 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -662,6 +662,8 @@ class Specs(SpecSet): systemd_system_origin_accounting = RegistryPoint() systemid = RegistryPoint() systool_b_scsi_v = RegistryPoint() + sys_vmbus_device_id = RegistryPoint(multi_output=True) + sys_vmbus_class_id = RegistryPoint(multi_output=True) testparm_s = RegistryPoint(filterable=True) testparm_v_s = RegistryPoint(filterable=True) tags = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 0f9b47acf..7682431eb 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -983,6 +983,8 @@ def is_mod_loaded_for_ss(broker): simple_file("/conf/rhn/sysconfig/rhn/systemid") ]) systool_b_scsi_v = simple_command("/bin/systool -b scsi -v") + sys_vmbus_device_id = glob_file('/sys/bus/vmbus/devices/*/device_id') + sys_vmbus_class_id = glob_file('/sys/bus/vmbus/devices/*/class_id') testparm_s = simple_command("/usr/bin/testparm -s") testparm_v_s = simple_command("/usr/bin/testparm -v -s") tags = simple_file("/tags.json", kind=RawFileProvider) From 2daf64ceb41dc3303c840780e963c8b2c4ce414b Mon Sep 17 00:00:00 2001 From: Ryan Blakley <3789184+ryan-blakley@users.noreply.github.com> Date: Wed, 12 May 2021 12:15:53 -0400 Subject: [PATCH 414/892] Fix uname parser for when a debug kernel is being used (#3056) * Debug kernels append .debug to the end of the kernel name in the uname output, so I updated the parser to check and remove the appended .debug because it causes exceptions. * Fixes #2709 Signed-off-by: Ryan Blakley --- insights/parsers/tests/test_uname.py | 9 +++++++++ insights/parsers/uname.py | 11 +++++++++++ 2 files changed, 20 insertions(+) diff --git a/insights/parsers/tests/test_uname.py b/insights/parsers/tests/test_uname.py index c973aa625..0dfe1903a 100644 --- a/insights/parsers/tests/test_uname.py +++ b/insights/parsers/tests/test_uname.py @@ -11,6 +11,7 @@ UNAME2 = "Linux rhel7box 3.10.0-229.el7.x86_64 #1 SMP Mon Mar 3 13:32:45 EST 2014 x86_64 x86_64 x86_64 GNU/Linux" UNAME3 = "Linux map1a 2.6.18-53.el5PAE #1 SMP Wed Oct 10 16:48:18 EDT 2007 i686 i686 i386 GNU/Linux" UNAME4 = "Linux cvlvtsmsrv01 3.10.0-229.el7.x86_64 #1 SMP Thu Jan 29 18:37:38 EST 2015 x86_64 x86_64 x86_64 GNU/Linux" +UNAME4_DEBUG = "Linux cvlvtsmsrv01 3.10.0-229.el7.x86_64.debug #1 SMP Thu Jan 29 18:37:38 EST 2015 x86_64 x86_64 x86_64 GNU/Linux" UNAME5 = "Linux cvlvtsmsrv01 2.6.32-504.8.2.bgq.el6.x86_64 #1 SMP Thu Jan 29 18:37:38 EST 2015 x86_64 x86_64 x86_64 GNU/Linux" UNAME_RT_1 = "Linux localhost.localdomain 2.6.24.7-101.el5rt.x86_64 #1 SMP PREEMPT RT Thu Oct 29 21:54:23 EDT 2015 x86_64 x86_64 x86_64 GNU/Linux" UNAME_RT_1pre = "Linux localhost.localdomain 2.6.24.6-101.el5rt.x86_64 #1 SMP PREEMPT RT Thu Oct 29 21:54:23 EDT 2015 x86_64 x86_64 x86_64 GNU/Linux" @@ -53,6 +54,7 @@ def test_uname(): uname2 = uname.Uname(context_wrap(UNAME2)) uname3 = uname.Uname(context_wrap(UNAME3)) uname4 = uname.Uname(context_wrap(UNAME4)) + uname4_debug = uname.Uname(context_wrap(UNAME4_DEBUG)) uname5 = uname.Uname(context_wrap(UNAME5)) uname6 = uname.Uname(context_wrap(UNAME_BLANK_LINE)) uname7 = uname.Uname(context_wrap(UNAME_FOREMAN_DEBUG)) @@ -75,18 +77,25 @@ def test_uname(): assert uname1.rhel_release == ['6', '6'] assert uname1.ver_rel == '2.6.32-504.el6' assert uname1.version == '2.6.32' + assert uname1.debug_kernel is False assert uname1._lv_release == LooseVersion('504.0.0.0.el6') assert uname1._lv_version == LooseVersion('2.6.32') assert uname1._rel_maj == '504' assert uname1._sv_version == StrictVersion('2.6.32') + # Test that the debug kernel returns True + assert uname4_debug.debug_kernel is True + # Test the equality and inequality operators assert uname1 != uname2 assert uname2 == uname4 + assert uname2 == uname4_debug assert uname2 > uname1 assert uname4 >= uname3 + assert uname4_debug >= uname3 assert uname3 < uname2 assert uname1 <= uname4 + assert uname1 <= uname4_debug # String and repr tests assert str(uname1) == 'version: 2.6.32; release: 504.el6; rel_maj: 504; lv_release: 504.0.0.0.el6' diff --git a/insights/parsers/uname.py b/insights/parsers/uname.py index aace685f0..d528a2894 100644 --- a/insights/parsers/uname.py +++ b/insights/parsers/uname.py @@ -173,6 +173,8 @@ class Uname(CommandParser): ``version-release``. - `rhel_release`: A list of two elements, the major and minor RHEL product release numbers. + - `debug_kernel`: A bool that's returns True when the server is running the + debug kernel. """ keys = [ @@ -191,6 +193,7 @@ class Uname(CommandParser): 'arch', 'ver_rel', 'rhel_release', + 'debug_kernel', '_lv_release', '_rel_maj', '_sv_version', @@ -333,6 +336,14 @@ def parse_nvr(cls, nvr, data=None, arch=True): data = dict(cls.defaults) data['version'], data['release_arch'] = nvr.split('-', 1) + + # Debug kernels have .debug appended to the end, so remove it before continuing. + if data['release_arch'].endswith('.debug'): + data['debug_kernel'] = True + data['release_arch'] = data['release_arch'].rsplit('.', 1)[0] + else: + data['debug_kernel'] = False + if arch: try: data['release'], data['arch'] = data['release_arch'].rsplit('.', 1) From eba58ed12fad40bcc80cf929eb75f7862a3d2621 Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Wed, 12 May 2021 13:56:49 -0500 Subject: [PATCH 415/892] Fix error created by jinga2 3.0.0 release (#3057) * Fix error created by jinga2 3.0.0 release * Update doc build modules to latest versions * Pinned jinja2 to the last version that worked with core Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Add jedi pin back in since problem isn't fixed Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- setup.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/setup.py b/setup.py index 5316d6689..3f99b0417 100644 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ 'cachecontrol[filecache]', 'defusedxml', 'lockfile', - 'jinja2', + 'jinja2<=2.11.3', ]) if (sys.version_info < (2, 7)): @@ -66,16 +66,16 @@ def maybe_require(pkg): ]) docs = set([ - 'docutils==0.16', - 'Sphinx<=3.0.2', + 'docutils', + 'Sphinx', 'nbsphinx', 'sphinx_rtd_theme', 'ipython', 'colorama', - 'jinja2', + 'jinja2<=2.11.3', 'Pygments', - 'jedi<0.18.0' # Open issue with jedi 0.18.0 and iPython <= 7.19 - # https://github.com/davidhalter/jedi/issues/1714 + 'jedi<0.18.0', # Open issue with jedi 0.18.0 and iPython <= 7.19 + # https://github.com/davidhalter/jedi/issues/1714 ]) testing = set([ From 7ee571f9a1a1edc60a408d09ca504f0d4cdd3042 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 13 May 2021 07:44:50 +0800 Subject: [PATCH 416/892] Update mongod_conf spec (#3054) * Update mongod_conf spec * Remove uesless spec * Change glob_file to first_file Signed-off-by: Huanhuan Li * Change it back to glob_file Signed-off-by: Huanhuan Li --- insights/specs/default.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 7682431eb..6ff706ac5 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -605,11 +605,9 @@ def md_device_list(broker): modprobe = glob_file(["/etc/modprobe.conf", "/etc/modprobe.d/*.conf"]) mokutil_sbstate = simple_command("/bin/mokutil --sb-state") mongod_conf = glob_file([ - "/etc/mongod.conf", - "/etc/mongodb.conf", - "/etc/opt/rh/rh-mongodb26/mongod.conf", - "/etc/opt/rh/rh-mongodb34/mongod.conf" - ]) + "/etc/mongod.conf", + "/etc/opt/rh/rh-mongodb34/mongod.conf" + ]) mount = simple_command("/bin/mount") mounts = simple_file("/proc/mounts") mssql_conf = simple_file("/var/opt/mssql/mssql.conf") From e93c4cf47c9d9207578d7200d7041d2c82b54344 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 13 May 2021 13:31:18 -0400 Subject: [PATCH 417/892] update uploader.json map Signed-off-by: Jeremy Crafts --- insights/client/uploader_json_map.json | 41 +++++--------------------- 1 file changed, 8 insertions(+), 33 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index 2f339e64e..03a8bfdc4 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -252,6 +252,7 @@ { "command": "/usr/bin/doveconf", "pattern": [ + "auth_mechanisms", "ssl_min_protocol", "ssl_protocols", "{", @@ -1152,11 +1153,8 @@ "command": "/bin/ps aux", "pattern": [ "/usr/bin/docker", - "/usr/bin/docker daemon", "/usr/bin/docker-current", - "/usr/bin/docker-current daemon", "/usr/bin/dockerd-current", - "/usr/bin/hyperkube kubelet", "/usr/bin/openshift start master", "/usr/bin/openshift start node", "COMMAND", @@ -1672,8 +1670,7 @@ { "file": "/root/.config/openshift/hosts", "pattern": [ - "[", - "openshift_use_crio" + "[" ], "symbolic_name": "openshift_hosts" }, @@ -2513,7 +2510,6 @@ " invoked oom-killer: ", "(enic): transmit queue 0 timed out", ", type vxfs) has no security xattr handler", - "- image is referenced in one or more repositories", "/input/input", "/usr/lib/ocf/resource.d/heartbeat/azure-lb: line 91: kill: Binary: arguments must be process or job IDs", "17763", @@ -2532,10 +2528,8 @@ "Device is still in reset", "Disable lvmetad in lvm.conf. lvmetad should never be enabled in a clustered environment. Set use_lvmetad=0 and kill the lvmetad process", "Error I40E_AQ_RC_EINVAL adding RX filters on PF, promiscuous mode forced on", - "Error deleting EBS Disk volume aws", "Error running DeviceResume dm_task_run failed", "Exception happened during processing of request from", - "Failed to extend thin", "File system is filling up", "High directory name cache miss rate", "High number of saturated processors", @@ -2549,20 +2543,16 @@ "Low random number entropy available", "MDC/MDIO access timeout", "Medium access timeout failure. Offlining disk!", - "MountVolume.SetUp succeeded for volume", "NETDEV WATCHDOG", "Neighbour table overflow", "NetworkManager state is now CONNECTED_SITE", "Not scheduled for", - "Orphaned pod", "Out of MCCQ wrbs", "Out of memory: Kill process", "PPM exceeds tolerance 500 PPM", "ProcessExecutionError: Exit code: 1; Stdin: ; Stdout: ; Stderr: setting the network namespace", "Result of start operation for clvmd ", "SCSI error: return code =", - "SDN initialization failed: Error: Existing service with IP: None is not part of service network", - "Scheduled import of stream", "Severe demand for real memory", "Some CPU busy executing in system mode", "Steal time is >", @@ -2572,14 +2562,12 @@ "TX driver issue detected, PF reset issued", "The threshold number of context switches per second per CPU", "This system does not support \"SSSE3\"", - "Throttling request took", "Unit ip6tables.service entered failed state", "Unit iptables.service entered failed state", "Virtualization daemon", "] trap divide error ", "_NET_ACTIVE_WINDOW", "as active slave; either", - "belongs to docker.service", "callbacks suppressed", "canceled DHCP transaction, DHCP client pid", "clearing Tx timestamp hang", @@ -2589,21 +2577,15 @@ "enabling it in", "end_request: I/O error, dev", "error Error on attach: Node not found", - "evicted, waiting for pod to be cleaned up", - "eviction manager: eviction criteria not yet met for threshold", "eviction manager: must evict pod(s) to reclaim nodefsInodes", - "eviction manager: observations: signal=allocatableNodeFs.available, available: -", "ext4_ext_search_left", "failed while handling", "failed with error -110", "failed: Invalid argument", - "failed: rpc error: code = 2 desc = unable to inspect docker image", "fiid_obj_get: 'present_countdown_value': data not available", "firewalld - dynamic firewall daemon", - "fit failure summary on nodes : Insufficient pods", "from image service failed: rpc error: code = Canceled desc = context canceled", "host not found in upstream", - "http2: no cached connection was available", "hv_netvsc vmbus_", "hv_netvsc: probe of vmbus_", "hw csum failure", @@ -2650,9 +2632,7 @@ "start request repeated too quickly for docker.service", "state changed timeout -> done", "swapper: page allocation failure", - "systemd[1]: Received SIGCHLD from PID", "tg3_start_xmit", - "there is a meaningful conflict", "timed out", "timeout before we got a set response", "timing out command, waited", @@ -2675,12 +2655,16 @@ "symbolic_name": "modprobe_d" }, { - "file": "/etc/()*mongod.conf", + "file": "/etc/mongod.conf", "pattern": [ "dbpath" ], "symbolic_name": "mongod_conf" }, + { + "file": "/etc/opt/rh/rh-mongodb34/mongod.conf", + "symbolic_name": "mongod_conf" + }, { "file": "/proc/mounts", "pattern": [], @@ -3244,15 +3228,6 @@ "pattern": [], "symbolic_name": "resolv_conf" }, - { - "file": "/etc/opt/rh/rh-mongodb26/()*mongod.conf", - "pattern": [ - "destination", - "syslog", - "systemLog" - ], - "symbolic_name": "rh_mongodb26_conf" - }, { "file": "/etc/rhn/rhn.conf", "pattern": [], @@ -4326,5 +4301,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-04-22T11:04:17.009396" + "version": "2021-05-06T16:06:41.477806" } \ No newline at end of file From f9f0b49095966efb6ee80cb351eebdd5537b3e03 Mon Sep 17 00:00:00 2001 From: Ryan Blakley <3789184+ryan-blakley@users.noreply.github.com> Date: Fri, 14 May 2021 02:58:52 -0400 Subject: [PATCH 418/892] Add kernel-alt release mappings (#3060) * Add the mappings for the 3 kernel-alt releases, I added alt to the release number to distinguish it from the normal kernel release versions. * Fixes #2770 Signed-off-by: Ryan Blakley --- insights/parsers/tests/test_uname.py | 12 ++++++++++++ insights/parsers/uname.py | 6 ++++++ 2 files changed, 18 insertions(+) diff --git a/insights/parsers/tests/test_uname.py b/insights/parsers/tests/test_uname.py index 0dfe1903a..4b38770e8 100644 --- a/insights/parsers/tests/test_uname.py +++ b/insights/parsers/tests/test_uname.py @@ -258,6 +258,18 @@ def test_from_release(): from_nvr = uname.Uname.from_kernel("2.6.32-358") assert str(from_release) == str(from_nvr) + # Test the regular 7.4 version. + release = ("7", "4") + from_release = uname.Uname.from_release(release) + from_nvr = uname.Uname.from_kernel("3.10.0-693") + assert str(from_release) == str(from_nvr) + + # Test the kernel-alt 7.4 version. + release = ("7", "4", "alt") + from_release = uname.Uname.from_release(release) + from_nvr = uname.Uname.from_kernel("4.11.0-44") + assert str(from_release) == str(from_nvr) + unknown_list = ["2.4.21-3", "2.6.9-4", "2.6.18-7", "2.6.32-70", "3.10.0-53"] known_list = [{'version': "2.4.21-4", 'rhel_release': ["3", "0"]}, {'version': "2.6.9-55", 'rhel_release': ["4", "5"]}, diff --git a/insights/parsers/uname.py b/insights/parsers/uname.py index d528a2894..e9851aaed 100644 --- a/insights/parsers/uname.py +++ b/insights/parsers/uname.py @@ -101,6 +101,12 @@ "3.10.0-1062": "7.7", "3.10.0-1127": "7.8", "3.10.0-1160": "7.9", + # Added alt to the below 3 kernel entries since they're part of the + # kernel-alt pkg, if we don't it would create duplicate entries and + # mess with what's returned by from_release. + "4.11.0-44": "7.4.alt", + "4.14.0-49": "7.5.alt", + "4.14.0-115": "7.6.alt", "4.18.0-80": "8.0", "4.18.0-147": "8.1", "4.18.0-193": "8.2", From 340f17894263adff7020a22af3aeb43fa74561dc Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Fri, 14 May 2021 16:22:35 +0800 Subject: [PATCH 419/892] Update spec pmrep metrics (#3061) * Update spec pmrep metrics Signed-off-by: jiazhang * Remove miss lines Signed-off-by: jiazhang * Resort search result Signed-off-by: jiazhang * Update pmrep_doc_obj_search Signed-off-by: jiazhang * Update search test Signed-off-by: jiazhang --- insights/parsers/pmrep.py | 20 ++++++++++---------- insights/parsers/tests/test_pmrep.py | 21 ++++++++++++--------- insights/specs/default.py | 2 +- 3 files changed, 23 insertions(+), 20 deletions(-) diff --git a/insights/parsers/pmrep.py b/insights/parsers/pmrep.py index 29453b9ef..39011943f 100644 --- a/insights/parsers/pmrep.py +++ b/insights/parsers/pmrep.py @@ -1,8 +1,8 @@ """ -Pmrep - command ``pmrep -t 1s -T 1s network.interface.out.packets network.interface.collisions swap.pagesout -o csv`` -===================================================================================================================== +Pmrep - command ``pmrep -t 1s -T 1s -o csv`` +====================================================== -Parse the content of the ``pmrep -t 1s -T 1s network.interface.out.packets network.interface.collisions swap.pagesout -o csv`` command. +Parse the content of the ``pmrep -t 1s -T 1s network.interface.out.packets network.interface.collisions swap.pagesout mssql.memory_manager.stolen_server_memory mssql.memory_manager.total_server_memory -o csv`` command. Sample ``pmrep -t 1s -T 1s network.interface.out.packets network.interface.collisions swap.pagesout -o csv`` command output:: @@ -14,11 +14,11 @@ >>> type(pmrep_doc_obj) >>> pmrep_doc_obj = sorted(pmrep_doc_obj, key=lambda x: x['name']) - >>> pmrep_doc_obj[1] + >>> pmrep_doc_obj[3] {'name': 'network.interface.collisions-eth0', 'value': '4.000'} - >>> pmrep_doc_obj[4] + >>> pmrep_doc_obj[6] {'name': 'network.interface.out.packets-lo', 'value': '1.000'} - >>> pmrep_doc_obj[5] + >>> pmrep_doc_obj[7] {'name': 'swap.pagesout', 'value': '5.000'} """ @@ -30,7 +30,7 @@ @parser(Specs.pmrep_metrics) class PMREPMetrics(CommandParser, list): - """Parses output of ``pmrep -t 1s -T 1s network.interface.out.packets network.interface.collisions swap.pagesout -o csv`` command.""" + """Parses output of ``pmrep -t 1s -T 1s -o csv`` command.""" def parse_content(self, content): if not content or len(content) == 1: raise SkipException("There is no data in the table") @@ -53,9 +53,9 @@ def search(self, **kwargs): search criteria. Examples: - >>> pmrep_doc_obj.search(name__endswith='lo') - [{'name': 'network.interface.out.packets-lo', 'value': '1.000'}, {'name': 'network.interface.collisions-lo', 'value': '3.000'}] - >>> pmrep_doc_obj.search(name__endswith='swap.pagesout') + >>> sorted(pmrep_doc_obj_search.search(name__endswith='lo'), key=lambda x: x['name']) + [{'name': 'network.interface.collisions-lo', 'value': '3.000'}, {'name': 'network.interface.out.packets-lo', 'value': '1.000'}] + >>> sorted(pmrep_doc_obj_search.search(name__endswith='swap.pagesout'), key=lambda x: x['name']) [{'name': 'swap.pagesout', 'value': '5.000'}] """ return keyword_search(self, **kwargs) diff --git a/insights/parsers/tests/test_pmrep.py b/insights/parsers/tests/test_pmrep.py index d942bc158..9e82efa85 100644 --- a/insights/parsers/tests/test_pmrep.py +++ b/insights/parsers/tests/test_pmrep.py @@ -6,9 +6,9 @@ from insights.parsers.pmrep import PMREPMetrics PMREPMETRIC_DATA = """ -Time,"network.interface.out.packets-lo","network.interface.out.packets-eth0","network.interface.collisions-lo","network.interface.collisions-eth0","swap.pagesout" +Time,"network.interface.out.packets-lo","network.interface.out.packets-eth0","network.interface.collisions-lo","network.interface.collisions-eth0","swap.pagesout","mssql.memory_manager.stolen_server_memory","mssql.memory_manager.total_server_memory" 2021-04-26 05:42:24,,,,, -2021-04-26 05:42:25,1.000,2.000,3.000,4.000,5.000 +2021-04-26 05:42:25,1.000,2.000,3.000,4.000,5.000,349816,442000 """.strip() PMREPMETRIC_DATA_2 = """ @@ -30,11 +30,13 @@ def test_pmrep_info(): pmrep_table = PMREPMetrics(context_wrap(PMREPMETRIC_DATA)) pmrep_table = sorted(pmrep_table, key=lambda x: x['name']) assert pmrep_table[0] == {'name': 'Time', 'value': '2021-04-26 05:42:25'} - assert pmrep_table[1] == {'name': 'network.interface.collisions-eth0', 'value': '4.000'} - assert pmrep_table[2] == {'name': 'network.interface.collisions-lo', 'value': '3.000'} - assert pmrep_table[3] == {'name': 'network.interface.out.packets-eth0', 'value': '2.000'} - assert pmrep_table[4] == {'name': 'network.interface.out.packets-lo', 'value': '1.000'} - assert pmrep_table[5] == {'name': 'swap.pagesout', 'value': '5.000'} + assert pmrep_table[1] == {'name': 'mssql.memory_manager.stolen_server_memory', 'value': '349816'} + assert pmrep_table[2] == {'name': 'mssql.memory_manager.total_server_memory', 'value': '442000'} + assert pmrep_table[3] == {'name': 'network.interface.collisions-eth0', 'value': '4.000'} + assert pmrep_table[4] == {'name': 'network.interface.collisions-lo', 'value': '3.000'} + assert pmrep_table[5] == {'name': 'network.interface.out.packets-eth0', 'value': '2.000'} + assert pmrep_table[6] == {'name': 'network.interface.out.packets-lo', 'value': '1.000'} + assert pmrep_table[7] == {'name': 'swap.pagesout', 'value': '5.000'} pmrep_table = PMREPMetrics(context_wrap(PMREPMETRIC_DATA_2)) pmrep_table = sorted(pmrep_table, key=lambda x: x['name']) @@ -44,8 +46,8 @@ def test_pmrep_info(): assert pmrep_table[3] == {'name': 'swap.pagesout', 'value': '3.000'} pmrep_table = PMREPMetrics(context_wrap(PMREPMETRIC_DATA)) - assert pmrep_table.search(name__endswith='lo') == [{'name': 'network.interface.out.packets-lo', 'value': '1.000'}, {'name': 'network.interface.collisions-lo', 'value': '3.000'}] - assert pmrep_table.search(name__endswith='swap.pagesout') == [{'name': 'swap.pagesout', 'value': '5.000'}] + assert sorted(pmrep_table.search(name__endswith='lo'), key=lambda x: x['name']) == [{'name': 'network.interface.collisions-lo', 'value': '3.000'}, {'name': 'network.interface.out.packets-lo', 'value': '1.000'}] + assert sorted(pmrep_table.search(name__endswith='swap.pagesout'), key=lambda x: x['name']) == [{'name': 'swap.pagesout', 'value': '5.000'}] def test_empty(): @@ -63,6 +65,7 @@ def test_wrong_data(): def test_pmrep_doc_examples(): env = { 'pmrep_doc_obj': PMREPMetrics(context_wrap(PMREPMETRIC_DATA)), + 'pmrep_doc_obj_search': PMREPMetrics(context_wrap(PMREPMETRIC_DATA)) } failed, total = doctest.testmod(pmrep, globs=env) assert failed == 0 diff --git a/insights/specs/default.py b/insights/specs/default.py index 6ff706ac5..e330654d9 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -753,7 +753,7 @@ def pmlog_summary_file(broker): pmlog_summary = command_with_args( "/usr/bin/pmlogsummary %s mem.util.used mem.physmem kernel.all.cpu.user kernel.all.cpu.sys kernel.all.cpu.nice kernel.all.cpu.steal kernel.all.cpu.idle disk.all.total mem.util.cached mem.util.bufmem mem.util.free kernel.all.cpu.wait.total", pmlog_summary_file) - pmrep_metrics = simple_command("pmrep -t 1s -T 1s network.interface.out.packets network.interface.collisions swap.pagesout -o csv") + pmrep_metrics = simple_command("pmrep -t 1s -T 1s network.interface.out.packets network.interface.collisions swap.pagesout mssql.memory_manager.stolen_server_memory mssql.memory_manager.total_server_memory -o csv") postconf_builtin = simple_command("/usr/sbin/postconf -C builtin") postconf = simple_command("/usr/sbin/postconf") postgresql_conf = first_file([ From 5cd5a0e6a0ef389d8e99200c0a0f14d724b2df96 Mon Sep 17 00:00:00 2001 From: Stephen Date: Fri, 14 May 2021 09:39:09 -0400 Subject: [PATCH 420/892] [testing] start using github actions for tests (#3064) The old jenkins CI that this runs on is far out of date and very unstable. Moving to Github actions satisfies all the requirements we have for testing the project Signed-off-by: Stephen Adams --- .github/workflows/main.yml | 90 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) create mode 100644 .github/workflows/main.yml diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 000000000..1c070e40d --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,90 @@ +name: Insights Core Test + +on: + push: + branches: [ master, '3.0'] + pull_request: + branches: [ master ] + +jobs: + code-test: + + runs-on: ubuntu-latest + strategy: + matrix: + python-versions: [2.7, 3.6] + + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-versions }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-versions }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + - name: flake8 + run: | + pip install -e .[linting] + flake8 . + - name: pytest + run: | + pip install -e .[testing] + pytest + + python26-test: + + runs-on: ubuntu-18.04 + + steps: + - uses: actions/checkout@v2 + - name: apt-install python26 + run: | + sudo add-apt-repository ppa:deadsnakes/ppa + sudo apt-get update + sudo apt-get install python2.6 + sudo update-alternatives --install /usr/bin/python python /usr/bin/python2.6 1 + sudo update-alternatives --set python /usr/bin/python2.6 + - name: get depenencies + run: | + git clone https://github.com/SteveHNH/jenkins-s2i-example.git pips + CUR_DIR=$(pwd) + mkdir ../tools && cd ../tools + curl -L -O https://files.pythonhosted.org/packages/b8/04/be569e393006fa9a2c10ef72ea33133c2902baa115dd1d4279dae55c3b3b/setuptools-36.8.0.zip + unzip setuptools-36.8.0.zip && cd setuptools-36.8.0 + python setup.py install --user && cd .. + curl -L -O https://github.com/pypa/pip/archive/refs/tags/9.0.3.tar.gz + tar -xvzf 9.0.3.tar.gz && cd pip-9.0.3 + python setup.py install --user && cd ${CUR_DIR} + pip install --user -r ./pips/slave26/ci_requirements.txt -f ./pips/slave26/pip_packages + mkdir ../collections_module + sudo curl -L -o ./../collections_module/collections.py https://raw.githubusercontent.com/RedHatInsights/insights-core/5c8ca0f2fb3de45908e8d931d40758af34a7997a/.collections.py + - name: flake8 + run: | + pip install --user -e .[linting] -f ./pips/slave26/pip_packages + flake8 . + - name: pytest + run: | + pip install --user -e .[testing] -f ./pips/slave26/pip_packages + export PYTHONPATH=${PYTHONPATH}:./../collections_module + pytest + + docs-test: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.6 + uses: actions/setup-python@v2 + with: + python-version: 3.6 + - name: install dependencies + run: | + sudo apt-get install pandoc + python -m pip install --upgrade pip + - name: docs Test + run: | + pip install docutils==0.17 + pip install -e .[docs] + sphinx-build -W -b html -qa -E docs docs/_build/html \ No newline at end of file From 4db3823a3f7996e3b313d93f1d6164c8c4ac8b36 Mon Sep 17 00:00:00 2001 From: Ping Qin <30404410+qinpingli@users.noreply.github.com> Date: Mon, 17 May 2021 21:41:38 +0800 Subject: [PATCH 421/892] Correct the `unit_files` format in pydoc (#3070) Signed-off-by: Ping Qin --- insights/core/plugins.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/core/plugins.py b/insights/core/plugins.py index 95ce144c0..ef99f6313 100644 --- a/insights/core/plugins.py +++ b/insights/core/plugins.py @@ -243,7 +243,7 @@ def report(sshd_config, installed_rpms, chk_config, unit_files, ip_tables, ip_ad At least one of the arguments to parameters of an "at least one" list will not be ``None``. In the example, either or both of ``chk_config`` - and unit_files will not be ``None``. + and ``unit_files`` will not be ``None``. Any or all arguments for optional parameters may be ``None``. From 1dca26c64c57b1a5d847f49a993aec28beb81a0a Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Tue, 18 May 2021 21:41:37 +0800 Subject: [PATCH 422/892] Update pmrep_metrics spec in insights_archive.py (#3068) * Add spec in insights_archive.py Signed-off-by: jiazhang * Use first_file instead of simple_file Signed-off-by: jiazhang --- insights/specs/default.py | 2 +- insights/specs/insights_archive.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index e330654d9..c751204b0 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -753,7 +753,7 @@ def pmlog_summary_file(broker): pmlog_summary = command_with_args( "/usr/bin/pmlogsummary %s mem.util.used mem.physmem kernel.all.cpu.user kernel.all.cpu.sys kernel.all.cpu.nice kernel.all.cpu.steal kernel.all.cpu.idle disk.all.total mem.util.cached mem.util.bufmem mem.util.free kernel.all.cpu.wait.total", pmlog_summary_file) - pmrep_metrics = simple_command("pmrep -t 1s -T 1s network.interface.out.packets network.interface.collisions swap.pagesout mssql.memory_manager.stolen_server_memory mssql.memory_manager.total_server_memory -o csv") + pmrep_metrics = simple_command("/usr/bin/pmrep -t 1s -T 1s network.interface.out.packets network.interface.collisions swap.pagesout mssql.memory_manager.stolen_server_memory mssql.memory_manager.total_server_memory -o csv") postconf_builtin = simple_command("/usr/sbin/postconf -C builtin") postconf = simple_command("/usr/sbin/postconf") postgresql_conf = first_file([ diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 82f4b84d2..0dcf43c42 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -187,7 +187,7 @@ class InsightsArchiveSpecs(Specs): pcp_metrics = simple_file("insights_commands/curl_-s_http_..127.0.0.1_44322.metrics_--connect-timeout_5") pcs_quorum_status = simple_file("insights_commands/pcs_quorum_status") pcs_status = simple_file("insights_commands/pcs_status") - pmrep_metrics = simple_file("insights_commands/pmrep_-t_1s_-T_1s_network.interface.out.packets_network.interface.collisions_swap.pagesout_-o_csv") + pmrep_metrics = first_file(["insights_commands/pmrep_-t_1s_-T_1s_network.interface.out.packets_network.interface.collisions_swap.pagesout_mssql.memory_manager.stolen_server_memory_mssql.memory_manager.total_server_memory_-o_csv", "insights_commands/pmrep_-t_1s_-T_1s_network.interface.out.packets_network.interface.collisions_swap.pagesout_-o_csv"]) postconf_builtin = simple_file("insights_commands/postconf_-C_builtin") postconf = simple_file("insights_commands/postconf") ps_alxwww = simple_file("insights_commands/ps_alxwww") From 670520a1332149adcd96faa645bac6ef56a7591e Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Tue, 18 May 2021 12:09:58 -0400 Subject: [PATCH 423/892] update uploader_json_map.json Signed-off-by: Link Dupont --- insights/client/uploader_json_map.json | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index 03a8bfdc4..e5e71b1f2 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -1062,7 +1062,7 @@ "symbolic_name": "pcs_status" }, { - "command": "pmrep -t 1s -T 1s network.interface.out.packets network.interface.collisions swap.pagesout -o csv", + "command": "/usr/bin/pmrep -t 1s -T 1s network.interface.out.packets network.interface.collisions swap.pagesout mssql.memory_manager.stolen_server_memory mssql.memory_manager.total_server_memory -o csv", "pattern": [], "symbolic_name": "pmrep_metrics" }, @@ -2531,6 +2531,7 @@ "Error running DeviceResume dm_task_run failed", "Exception happened during processing of request from", "File system is filling up", + "High collision rate in packet sends", "High directory name cache miss rate", "High number of saturated processors", "High per CPU processor utilization", @@ -2663,7 +2664,10 @@ }, { "file": "/etc/opt/rh/rh-mongodb34/mongod.conf", - "symbolic_name": "mongod_conf" + "symbolic_name": "mongod_conf", + "pattern": [ + "dbpath" + ] }, { "file": "/proc/mounts", @@ -4252,6 +4256,16 @@ ], "symbolic_name": "rsyslog_conf" }, + { + "glob": "/sys/bus/vmbus/devices/*/class_id", + "pattern": [], + "symbolic_name": "sys_vmbus_class_id" + }, + { + "glob": "/sys/bus/vmbus/devices/*/device_id", + "pattern": [], + "symbolic_name": "sys_vmbus_device_id" + }, { "glob": "/sys/block/*/queue/scheduler", "symbolic_name": "scheduler", @@ -4301,5 +4315,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-05-06T16:06:41.477806" + "version": "2021-05-13T13:38:29.298898" } \ No newline at end of file From 9693ead24a82b268de3ea4f3e6cc13db9e5f3d8c Mon Sep 17 00:00:00 2001 From: Ryan Blakley <3789184+ryan-blakley@users.noreply.github.com> Date: Tue, 18 May 2021 15:52:27 -0400 Subject: [PATCH 424/892] Fix IndexError exception in smt combiner (#3074) * On some systems the /sys/devices/system/cpu0/online file doesn't exist, which causes an IndexError in the combiner. Updated it to to handle the missing file since cpu0 will always be online. * Add test to replicate the missing cpu0 online file. * Fixes #3073 Signed-off-by: Ryan Blakley --- insights/combiners/smt.py | 8 +++++++- insights/combiners/tests/test_smt.py | 17 +++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/insights/combiners/smt.py b/insights/combiners/smt.py index 467b9aff2..25e5ef790 100644 --- a/insights/combiners/smt.py +++ b/insights/combiners/smt.py @@ -43,7 +43,13 @@ def __init__(self, cpu_online, cpu_siblings): max_cpu_core_id = max([core.core_id for core in cpu_online]) for n in range(max_cpu_core_id + 1): online = [core for core in cpu_online if core.core_id == n] - online = online[0].on + # On some boxes cpu0 doesn't have the online file, since technically cpu0 will always + # be online. So check if online returns anything before trying to access online[0]. + # If it returns nothing and n is 0 set online to True. + if online: + online = online[0].on + elif not online and n == 0: + online = True siblings = [sibling for sibling in cpu_siblings if sibling.core_id == n] if len(siblings) != 0: siblings = siblings[0].siblings diff --git a/insights/combiners/tests/test_smt.py b/insights/combiners/tests/test_smt.py index abf5efe31..596780eec 100644 --- a/insights/combiners/tests/test_smt.py +++ b/insights/combiners/tests/test_smt.py @@ -142,3 +142,20 @@ def test_doc_examples(cpu_all_online): } failed, total = doctest.testmod(smt, globs=env) assert failed == 0 + + +def test_without_hyperthreading_all_online_missing_cpu0_online_file(): + online = [ + CpuCoreOnline(context_wrap("1", path=ONLINE_PATH.format(1))), + ] + siblings = [ + CpuSiblings(context_wrap("0", path=SIBLINGS_PATH.format(0))), + CpuSiblings(context_wrap("1", path=SIBLINGS_PATH.format(1))) + ] + + cpu_topology = CpuTopology(online, siblings) + assert cpu_topology.online(0) + assert cpu_topology.siblings(0) == [0] + assert cpu_topology.online(1) + assert cpu_topology.siblings(1) == [1] + assert cpu_topology.all_solitary From 26addcd274eaef2ea490cce0f1ff9341ee9d9e87 Mon Sep 17 00:00:00 2001 From: Ryan Blakley <3789184+ryan-blakley@users.noreply.github.com> Date: Tue, 18 May 2021 15:56:59 -0400 Subject: [PATCH 425/892] Fix s390x cpuinfo parsing (#3067) * Added the ability to parse the different cpuinfo output for s390x cpus, and added model_ids correctly for power pc cpuinfo. * Added new test cases for s390x on r7 and r8 since r7 and below doesn't display per cpu clock speed, and r8 an above does. * Fixes #2629 Signed-off-by: Ryan Blakley --- insights/parsers/cpuinfo.py | 23 +++++++++- insights/parsers/tests/test_cpuinfo.py | 61 ++++++++++++++++++++++++++ 2 files changed, 82 insertions(+), 2 deletions(-) diff --git a/insights/parsers/cpuinfo.py b/insights/parsers/cpuinfo.py index 8abbbaf31..ea4a74298 100644 --- a/insights/parsers/cpuinfo.py +++ b/insights/parsers/cpuinfo.py @@ -143,17 +143,36 @@ def parse_content(self, content): "revision": "revision", "address sizes": "address_sizes", "bugs": "bugs", - "microcode": "microcode" + "microcode": "microcode", + "cpu MHz static": "clockspeeds", + "features": "features" } for line in get_active_lines(content, comment_char="COMMAND>"): key, value = [p.strip() for p in line.split(":", 1)] + # For s390x the : symbol is after the number instead of before. + # So re-split and set the key and value before checking mappings. + if key.startswith("processor") and key[-1].isdigit(): + key, value = key.split(" ", 1) + if key in mappings: self.data[mappings[key]].append(value) if "cpu" in self.data and "POWER" in self.data["cpu"][0]: # this works differently than on x86 and is not per-cpu - del self.data["model_ids"] + model_id = self.data["model_ids"][0] + cpu_cnt = self.cpu_count + self.data["model_ids"] = [model_id] * cpu_cnt + + # s390x cpuinfo is setup drastically differently than other arches. + # It doesn't print the same information for each cpu, like other arches. + # So any info not repeated per cpu, copy, delete and then add for each cpu entry. + if "vendors" in self.data and "IBM/S390" in self.data["vendors"][0]: + vendor = self.data["vendors"][0] + features = self.data["features"][0] + cpu_cnt = self.cpu_count + self.data["vendors"] = [vendor] * cpu_cnt + self.data["features"] = [features] * cpu_cnt self.data = dict(self.data) diff --git a/insights/parsers/tests/test_cpuinfo.py b/insights/parsers/tests/test_cpuinfo.py index 3a6c89473..88e2e772d 100644 --- a/insights/parsers/tests/test_cpuinfo.py +++ b/insights/parsers/tests/test_cpuinfo.py @@ -1402,6 +1402,48 @@ machine : CHRP IBM,8231-E2D """ +S390X_CPUINFO_R7 = """ +vendor_id : IBM/S390 +# processors : 2 +bogomips per cpu: 24038.00 +max thread id : 0 +features : esan3 zarch stfle msa ldisp eimm dfp edat etf3eh highgprs te vx vxd vxe gs +facilities : 0 1 2 3 4 6 7 8 9 10 12 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 30 31 32 33 34 35 36 37 38 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 57 58 59 60 61 73 74 75 76 77 80 81 82 128 129 130 131 133 134 135 146 147 148 150 151 152 155 156 168 +cache0 : level=1 type=Data scope=Private size=128K line_size=256 associativity=8 +cache1 : level=1 type=Instruction scope=Private size=128K line_size=256 associativity=8 +cache2 : level=2 type=Data scope=Private size=4096K line_size=256 associativity=8 +cache3 : level=2 type=Instruction scope=Private size=4096K line_size=256 associativity=8 +cache4 : level=3 type=Unified scope=Shared size=262144K line_size=256 associativity=32 +cache5 : level=4 type=Unified scope=Shared size=983040K line_size=256 associativity=60 +processor 0: version = FF, identification = 0E19C8, machine = 8561 +processor 1: version = FF, identification = 0E19C8, machine = 8561 +""" + +S390X_CPUINFO_R8 = """ +vendor_id : IBM/S390 +# processors : 2 +bogomips per cpu: 21881.00 +max thread id : 0 +features : esan3 zarch stfle msa ldisp eimm dfp edat etf3eh highgprs te vx vxd vxe gs +facilities : 0 1 2 3 4 6 7 8 9 10 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 30 31 32 33 34 35 36 37 38 40 41 42 43 44 45 47 48 49 50 51 52 53 54 57 58 59 60 64 65 69 71 72 73 74 75 76 77 78 80 81 82 129 130 131 133 134 135 138 139 146 147 +cache0 : level=1 type=Data scope=Private size=128K line_size=256 associativity=8 +cache1 : level=1 type=Instruction scope=Private size=128K line_size=256 associativity=8 +cache2 : level=2 type=Data scope=Private size=4096K line_size=256 associativity=8 +cache3 : level=2 type=Instruction scope=Private size=2048K line_size=256 associativity=8 +cache4 : level=3 type=Unified scope=Shared size=131072K line_size=256 associativity=32 +cache5 : level=4 type=Unified scope=Shared size=688128K line_size=256 associativity=42 +processor 0: version = FF, identification = 1E41E8, machine = 3906 +processor 1: version = FF, identification = 1E41E8, machine = 3906 + +cpu number : 0 +cpu MHz dynamic : 5208 +cpu MHz static : 5208 + +cpu number : 1 +cpu MHz dynamic : 5208 +cpu MHz static : 5208 +""" + def test_cpuinfo(): cpu_info = CpuInfo(context_wrap(CPUINFO)) @@ -1487,6 +1529,25 @@ def test_power_cpuinfo(): for i, cpu in enumerate(cpu_info): assert cpu["cpu"] == "POWER7 (architected), altivec supported" assert cpu["revision"] == "2.0 (pvr 004a 0200)" + assert cpu["model_ids"] == "IBM,8231-E2D" + + +def test_s390x_cpuinfo(): + def test_common(info): + assert info.cpu_count == 2 + assert info.socket_count == 0 + assert info.vendor == "IBM/S390" + for i, cpu in enumerate(info): + assert cpu["features"] == "esan3 zarch stfle msa ldisp eimm dfp edat etf3eh highgprs te vx vxd vxe gs" + + # Test r7 output. + cpu_info = CpuInfo(context_wrap(S390X_CPUINFO_R7)) + test_common(cpu_info) + + # Test r8 output. + cpu_info = CpuInfo(context_wrap(S390X_CPUINFO_R8)) + test_common(cpu_info) + assert cpu_info.cpu_speed == "5208" def test_cpuinfo_doc_examples(): From cd1aa905f0cda0f7231f160c2fb32b7f5e9ebb85 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Wed, 19 May 2021 14:34:34 +0800 Subject: [PATCH 426/892] Support RHEL 8.4 released on 18th May (#3075) Signed-off-by: Xiangce Liu --- insights/parsers/uname.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/parsers/uname.py b/insights/parsers/uname.py index e9851aaed..13ad52862 100644 --- a/insights/parsers/uname.py +++ b/insights/parsers/uname.py @@ -111,6 +111,7 @@ "4.18.0-147": "8.1", "4.18.0-193": "8.2", "4.18.0-240": "8.3", + "4.18.0-305": "8.4", } release_to_kernel_map = dict((v, k) for k, v in rhel_release_map.items()) From 399fe0c9070f29b5a865963194039dd6eb8df0ba Mon Sep 17 00:00:00 2001 From: Ping Qin <30404410+qinpingli@users.noreply.github.com> Date: Thu, 20 May 2021 05:45:44 +0800 Subject: [PATCH 427/892] New parser for /etc/sysconfig/grub (#3072) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * New parser for /etc/sysconfig/grub Signed-off-by: Ping Qin * Fix ci error: E302 expected 2 blank lines, found 1 Signed-off-by: Ping Qin * Bugfix per ci error: NameError: name 'grub_syscfg' is not defined Signed-off-by: Ping Qin * Update the spec per the review comments: 1. doc format 2. add a short explanation for `/etc/sysconfig/grub` file 3. get the default grub from `/etc/default/grub` file 4. remove sysconfig_grub from the sos_archive.py Signed-off-by: Ping Qin --- insights/parsers/sysconfig.py | 33 +++++++++++++++++++ .../tests/test_sysconfig_doc_examples.py | 16 ++++++++- insights/parsers/tests/test_sysconfig_grub.py | 23 +++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 73 insertions(+), 1 deletion(-) mode change 100644 => 100755 insights/parsers/tests/test_sysconfig_doc_examples.py create mode 100644 insights/parsers/tests/test_sysconfig_grub.py diff --git a/insights/parsers/sysconfig.py b/insights/parsers/sysconfig.py index 0915856c7..0913638eb 100644 --- a/insights/parsers/sysconfig.py +++ b/insights/parsers/sysconfig.py @@ -71,6 +71,9 @@ IfCFGStaticRoute - files ``/etc/sysconfig/network-scripts/route-*`` ------------------------------------------------------------------- + +GrubSysconfig - files ``/etc/sysconfig/grub`` +--------------------------------------------- """ @@ -630,3 +633,33 @@ class IfCFGStaticRoute(SysconfigOptions): def parse_content(self, content): self.static_route_name = self.file_name.split("route-", 1)[1] super(IfCFGStaticRoute, self).parse_content(content) + + +@parser(Specs.sysconfig_grub) +class GrubSysconfig(SysconfigOptions): + """ + Class to parse the ``/etc/sysconfig/grub`` + + ``/etc/sysconfig/grub`` is a symlink of ``/etc/default/grub`` file + + Typical content example:: + + GRUB_TIMEOUT=1 + GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g' /etc/system-release)" + GRUB_DEFAULT=saved + GRUB_DISABLE_SUBMENU=true + GRUB_TERMINAL_OUTPUT="console" + GRUB_CMDLINE_LINUX="console=ttyS0 console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=auto" + GRUB_DISABLE_RECOVERY="true" + GRUB_ENABLE_BLSCFG=true + + Examples: + >>> grub_syscfg.get('GRUB_ENABLE_BLSCFG') + 'true' + >>> 'NONEXISTENT_VAR' in grub_syscfg + False + >>> 'GRUB_ENABLE_BLSCFG' in grub_syscfg + True + + """ + pass diff --git a/insights/parsers/tests/test_sysconfig_doc_examples.py b/insights/parsers/tests/test_sysconfig_doc_examples.py old mode 100644 new mode 100755 index dc05dcc8a..2bea5e305 --- a/insights/parsers/tests/test_sysconfig_doc_examples.py +++ b/insights/parsers/tests/test_sysconfig_doc_examples.py @@ -12,6 +12,7 @@ from insights.parsers.sysconfig import CorosyncSysconfig from insights.parsers.sysconfig import IfCFGStaticRoute from insights.parsers.sysconfig import NetworkSysconfig +from insights.parsers.sysconfig import GrubSysconfig import doctest @@ -156,6 +157,18 @@ NM_BOND_VLAN_ENABLED=no """.strip() +GRUB_SYSCONFIG = """ + +GRUB_TIMEOUT=1 +GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g' /etc/system-release)" +GRUB_DEFAULT=saved +GRUB_DISABLE_SUBMENU=true +GRUB_TERMINAL_OUTPUT="console" +GRUB_CMDLINE_LINUX="console=ttyS0 console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=auto" +GRUB_DISABLE_RECOVERY="true" +GRUB_ENABLE_BLSCFG=true +""".strip() + def test_sysconfig_doc(): env = { @@ -179,7 +192,8 @@ def test_sysconfig_doc(): 'dirsrv_syscfg': DirsrvSysconfig(context_wrap(DIRSRVSYSCONFG)), 'cs_syscfg': CorosyncSysconfig(context_wrap(COROSYNCSYSCONFIG)), 'conn_info': IfCFGStaticRoute(context_wrap(STATIC_ROUTE_1, CONTEXT_PATH_DEVICE_1)), - 'net_syscfg': NetworkSysconfig(context_wrap(NETWORK_SYSCONFIG)) + 'net_syscfg': NetworkSysconfig(context_wrap(NETWORK_SYSCONFIG)), + 'grub_syscfg': GrubSysconfig(context_wrap(GRUB_SYSCONFIG)) } failed, total = doctest.testmod(sysconfig, globs=env) assert failed == 0 diff --git a/insights/parsers/tests/test_sysconfig_grub.py b/insights/parsers/tests/test_sysconfig_grub.py new file mode 100644 index 000000000..36252bd84 --- /dev/null +++ b/insights/parsers/tests/test_sysconfig_grub.py @@ -0,0 +1,23 @@ +from insights.tests import context_wrap +from insights.parsers.sysconfig import GrubSysconfig + +GRUB_SYSCONFIG = """ + +GRUB_TIMEOUT=1 +GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g' /etc/system-release)" +GRUB_DEFAULT=saved +GRUB_DISABLE_SUBMENU=true +GRUB_TERMINAL_OUTPUT="console" +GRUB_CMDLINE_LINUX="console=ttyS0 console=ttyS0,115200n8 no_timer_check net.ifnames=0 crashkernel=auto" +GRUB_DISABLE_RECOVERY="true" +GRUB_ENABLE_BLSCFG=true +""".strip() + + +def test_sysconfig_grub(): + result = GrubSysconfig(context_wrap(GRUB_SYSCONFIG)) + assert result["GRUB_ENABLE_BLSCFG"] == 'true' + assert result.get("GRUB_ENABLE_BLSCFG") == 'true' + assert result.get("NONEXISTENT_VAR") is None + assert "NONEXISTENT_VAR" not in result + assert "GRUB_ENABLE_BLSCFG" in result diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 1fb7c2545..4120f0f11 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -625,6 +625,7 @@ class Specs(SpecSet): swift_proxy_server_conf = RegistryPoint() sys_kernel_sched_features = RegistryPoint() sysconfig_chronyd = RegistryPoint() + sysconfig_grub = RegistryPoint() sysconfig_httpd = RegistryPoint() sysconfig_irqbalance = RegistryPoint() sysconfig_kdump = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index c751204b0..6b229383d 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -950,6 +950,7 @@ def is_mod_loaded_for_ss(broker): subscription_manager_installed_product_ids = simple_command("/usr/bin/find /etc/pki/product-default/ /etc/pki/product/ -name '*pem' -exec rct cat-cert --no-content '{}' \;") swift_object_expirer_conf = first_file(["/var/lib/config-data/puppet-generated/swift/etc/swift/object-expirer.conf", "/etc/swift/object-expirer.conf"]) swift_proxy_server_conf = first_file(["/var/lib/config-data/puppet-generated/swift/etc/swift/proxy-server.conf", "/etc/swift/proxy-server.conf"]) + sysconfig_grub = simple_file("/etc/default/grub") # This is the file where the "/etc/sysconfig/grub" point to sysconfig_kdump = simple_file("etc/sysconfig/kdump") sysconfig_libvirt_guests = simple_file("etc/sysconfig/libvirt-guests") sysconfig_network = simple_file("etc/sysconfig/network") From 127a9d63f6bd37ff6ccea7d883d3983cf45868c7 Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Thu, 20 May 2021 16:13:43 +0800 Subject: [PATCH 428/892] Update sos journal (#3078) * Update sos journal Signed-off-by: jiazhang * Update items Signed-off-by: jiazhang --- insights/specs/sos_archive.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index 0010928bb..6e786769d 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -113,9 +113,8 @@ class SosSpecs(Specs): ip_s_link = first_of([simple_file("sos_commands/networking/ip_-s_-d_link"), simple_file("sos_commands/networking/ip_-s_link"), simple_file("sos_commands/networking/ip_link")]) ip6tables_permanent = simple_file("etc/sysconfig/ip6tables") iptables = first_file(["/etc/sysconfig/iptables", "/etc/sysconfig/iptables.save"]) - journal_since_boot = first_of([simple_file("sos_commands/logs/journalctl_--no-pager_--boot"), simple_file("sos_commands/logs/journalctl_--no-pager_--catalog_--boot"), simple_file("sos_commands/logs/journalctl_--all_--this-boot_--no-pager")]) + journal_since_boot = first_file(["sos_commands/logs/journalctl_--no-pager_--boot", "sos_commands/logs/journalctl_--no-pager_--catalog_--boot", "sos_commands/logs/journalctl_--all_--this-boot_--no-pager"]) ironic_conf = first_file(["/var/lib/config-data/puppet-generated/ironic/etc/ironic/ironic.conf", "/etc/ironic/ironic.conf"]) - journal_since_boot = first_of([simple_file("sos_commands/logs/journalctl_--no-pager_--boot"), simple_file("sos_commands/logs/journalctl_--no-pager_--catalog_--boot")]) kerberos_kdc_log = simple_file("var/log/krb5kdc.log") keystone_log = first_file(["/var/log/containers/keystone/keystone.log", "/var/log/keystone/keystone.log"]) kexec_crash_loaded = simple_file("/sys/kernel/kexec_crash_loaded") From 6a651059ed50c59074024376bc785bace007771b Mon Sep 17 00:00:00 2001 From: Rahul Srivastava <44598880+rasrivas-redhat@users.noreply.github.com> Date: Fri, 21 May 2021 12:19:28 +0530 Subject: [PATCH 429/892] added vmware_tools_conf spec (#3080) Signed-off-by: rasrivas --- insights/specs/default.py | 1 + insights/tests/client/collection_rules/test_map_components.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 6b229383d..ca75d603d 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -1010,6 +1010,7 @@ def is_mod_loaded_for_ss(broker): virt_what = simple_command("/usr/sbin/virt-what") virt_who_conf = glob_file([r"etc/virt-who.conf", r"etc/virt-who.d/*.conf"]) virtlogd_conf = simple_file("/etc/libvirt/virtlogd.conf") + vmware_tools_conf = simple_file("/etc/vmware-tools/tools.conf") vsftpd = simple_file("/etc/pam.d/vsftpd") vsftpd_conf = simple_file("/etc/vsftpd/vsftpd.conf") x86_pti_enabled = simple_file("sys/kernel/debug/x86/pti_enabled") diff --git a/insights/tests/client/collection_rules/test_map_components.py b/insights/tests/client/collection_rules/test_map_components.py index 600473a90..d3a26a0e6 100644 --- a/insights/tests/client/collection_rules/test_map_components.py +++ b/insights/tests/client/collection_rules/test_map_components.py @@ -75,7 +75,6 @@ def test_get_component_by_symbolic_name(): 'cpu_vulns_spec_store_bypass', 'docker_storage', 'freeipa_healthcheck_log', - 'vmware_tools_conf', 'ironic_conf', 'octavia_conf', 'partitions', From 5bf962dd2395dd1674b8176b04adf0cea3fc311d Mon Sep 17 00:00:00 2001 From: Glutexo Date: Mon, 24 May 2021 20:39:07 +0200 Subject: [PATCH 430/892] Use /var/tmp/insights-client for egg release (#3016) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of fetching the egg release file to a world-writable /tmp directory, use /var/tmp/insights-client instead. This directory is not accessible by any other user than the owner. Tampering with the egg release file by regular users is no longer possible. Signed-off-by: Štěpán Tomsa --- insights/client/__init__.py | 3 ++- insights/client/constants.py | 2 +- insights/tests/client/init/test_fetch.py | 22 +++++++++++----------- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/insights/client/__init__.py b/insights/client/__init__.py index 3df34962f..7686b002b 100644 --- a/insights/client/__init__.py +++ b/insights/client/__init__.py @@ -17,6 +17,7 @@ from .auto_config import try_auto_configuration from .utilities import (delete_registered_file, delete_unregistered_file, + write_data_to_file, write_to_disk, generate_machine_id, get_tags, @@ -134,7 +135,7 @@ def fetch(self, force=False): try: # write the release path to temp so we can collect it # in the archive - write_to_disk(constants.egg_release_file, content=egg_release) + write_data_to_file(egg_release, constants.egg_release_file) except (OSError, IOError) as e: logger.debug('Could not write egg release file: %s', str(e)) diff --git a/insights/client/constants.py b/insights/client/constants.py index 7e51c12d4..fe57d5071 100644 --- a/insights/client/constants.py +++ b/insights/client/constants.py @@ -78,7 +78,7 @@ class InsightsConstants(object): sig_kill_bad = 101 cached_branch_info = os.path.join(default_conf_dir, '.branch_info') pidfile = os.path.join(os.sep, 'var', 'run', 'insights-client.pid') - egg_release_file = os.path.join(os.sep, 'tmp', 'insights-client-egg-release') + egg_release_file = os.path.join(os.sep, 'var', 'tmp', 'insights-client', 'insights-client-egg-release') ppidfile = os.path.join(os.sep, 'tmp', 'insights-client.ppid') valid_compressors = ("gz", "xz", "bz2", "none") # RPM version in which core collection was released diff --git a/insights/tests/client/init/test_fetch.py b/insights/tests/client/init/test_fetch.py index b31e57c8d..d42f66dd0 100644 --- a/insights/tests/client/init/test_fetch.py +++ b/insights/tests/client/init/test_fetch.py @@ -49,34 +49,34 @@ def test_request_forced(insights_client): @patch('insights.client.os.path', Mock()) @patch('insights.client.tempfile', Mock()) @patch('insights.client.InsightsClient.get_egg_url', return_value='/testvalue') -@patch('insights.client.write_to_disk') -def test_egg_release_written(write_to_disk, get_egg_url, insights_client): +@patch('insights.client.write_data_to_file') +def test_egg_release_written(write_data_to_file, get_egg_url, insights_client): ''' Verify egg release file successfully written after request ''' insights_client.fetch(force=False) - write_to_disk.assert_called_once_with(constants.egg_release_file, content='/testvalue') + write_data_to_file.assert_called_once_with('/testvalue', constants.egg_release_file) @patch('insights.client.InsightsClient._fetch') @patch('insights.client.os.path', Mock()) @patch('insights.client.tempfile', Mock()) @patch('insights.client.InsightsClient.get_egg_url', return_value='/testvalue') -@patch('insights.client.write_to_disk') -def test_egg_release_error(write_to_disk, get_egg_url, _fetch, insights_client): +@patch('insights.client.write_data_to_file') +def test_egg_release_error(write_data_to_file, get_egg_url, _fetch, insights_client): ''' Verify OSError and IOError are caught and process continues on ''' - write_to_disk.side_effect = OSError('test') + write_data_to_file.side_effect = OSError('test') assert insights_client.fetch(force=False) - write_to_disk.assert_called_once_with(constants.egg_release_file, content='/testvalue') + write_data_to_file.assert_called_once_with('/testvalue', constants.egg_release_file) assert _fetch.call_count == 2 - write_to_disk.side_effect = None - write_to_disk.reset_mock() + write_data_to_file.side_effect = None + write_data_to_file.reset_mock() _fetch.reset_mock() - write_to_disk.side_effect = IOError('test') + write_data_to_file.side_effect = IOError('test') assert insights_client.fetch(force=False) - write_to_disk.assert_called_once_with(constants.egg_release_file, content='/testvalue') + write_data_to_file.assert_called_once_with('/testvalue', constants.egg_release_file) assert _fetch.call_count == 2 From add88c7389369768565ee55318ec595c65a4a548 Mon Sep 17 00:00:00 2001 From: Chris Sams Date: Tue, 25 May 2021 14:41:07 -0500 Subject: [PATCH 431/892] Add fallback boot detected to GreenbootStatus (#3081) Signed-off-by: Christopher Sams --- insights/parsers/greenboot_status.py | 4 +++- insights/parsers/tests/test_greenboot_status.py | 15 ++++++++++++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/insights/parsers/greenboot_status.py b/insights/parsers/greenboot_status.py index f9f2824cb..5bc144613 100644 --- a/insights/parsers/greenboot_status.py +++ b/insights/parsers/greenboot_status.py @@ -10,7 +10,8 @@ _green = "Boot Status is GREEN" _red = "Boot Status is RED" -add_filter(Specs.greenboot_status, [_green, _red]) +_fallback = "FALLBACK BOOT DETECTED" +add_filter(Specs.greenboot_status, [_green, _red, _fallback]) @parser(Specs.greenboot_status) @@ -31,3 +32,4 @@ class GreenbootStatus(LogFileOutput): GreenbootStatus.token_scan("green", _green) GreenbootStatus.token_scan("red", _red) +GreenbootStatus.token_scan("fallback", _fallback) diff --git a/insights/parsers/tests/test_greenboot_status.py b/insights/parsers/tests/test_greenboot_status.py index 5ce8cb3b6..3690a9f60 100644 --- a/insights/parsers/tests/test_greenboot_status.py +++ b/insights/parsers/tests/test_greenboot_status.py @@ -6,7 +6,6 @@ Boot Status is GREEN - Health Check SUCCESS """ - RED = """ Mar 04 15:47:12 example greenboot[768]: Script 'check-dns.sh' SUCCESS Mar 04 15:47:12 example required-services.sh[999]: active @@ -55,6 +54,13 @@ Mar 04 15:47:12 example systemd[1]: Started greenboot MotD Generator. """ +FALLBACK = """ +Feb 22 22:50:26 example systemd[1]: Starting greenboot MotD Generator... +Feb 22 22:50:26 example greenboot-status[905]: Boot Status is GREEN - Health Check SUCCESS +Feb 22 22:50:26 example greenboot-status[905]: FALLBACK BOOT DETECTED! Default rpm-ostree deployment has been rolled back. +Feb 22 22:50:26 example systemd[1]: Started greenboot MotD Generator. +""" + def test_greenboot_status_green(): green = context_wrap(GREEN) @@ -68,3 +74,10 @@ def test_greenboot_status_red(): p = GreenbootStatus(red) assert p.red assert not p.green + + +def test_greenboot_status_fallback(): + fb = context_wrap(FALLBACK) + p = GreenbootStatus(fb) + assert p.green + assert p.fallback From ec1ef9bc4d3b9754af05228f94cb59d86eb87f16 Mon Sep 17 00:00:00 2001 From: Ryan Blakley <3789184+ryan-blakley@users.noreply.github.com> Date: Tue, 25 May 2021 15:55:15 -0400 Subject: [PATCH 432/892] Add new switch to run to choose the color output (#3084) * Add new switch --color[=WHEN] to allow the choice of color output encoding. The options for when are always, auto, and never. Always sets colorama to not strip the encoding, so the color can be piped. Auto sets colorama to the default, and never disables color encoding. * Fixes #2980 Signed-off-by: Ryan Blakley --- docs/manpages/insights-run.rst | 6 ++++++ insights/__init__.py | 6 ++++++ insights/formats/text.py | 22 +++++++++++++++++++--- 3 files changed, 31 insertions(+), 3 deletions(-) diff --git a/docs/manpages/insights-run.rst b/docs/manpages/insights-run.rst index d30cb6695..0ad07a79c 100644 --- a/docs/manpages/insights-run.rst +++ b/docs/manpages/insights-run.rst @@ -39,6 +39,12 @@ OPTIONS -c CONFIG --config CONFIG Configure components. + \-\-color [=WHEN] + Choose if and how the color encoding is outputted. When can be 'always', 'auto', or + 'never'. If always the color encoding isn't stripped from the output, so it can be + piped. If auto the color is outputted in the terminal but is stripped if piped. If + never then no color encoding is outputted. + \-\-context CONTEXT Execution Context. Defaults to HostContext if an archive isn't passed. See :ref:`context-label` for additional information. diff --git a/insights/__init__.py b/insights/__init__.py index 87e7d8007..cbb37e94a 100644 --- a/insights/__init__.py +++ b/insights/__init__.py @@ -56,6 +56,8 @@ for name in package_info: package_info[name] = pkgutil.get_data(__name__, name).strip().decode("utf-8") +_COLOR = "auto" + def get_nvr(): return "{0}-{1}-{2}".format(package_info["NAME"], @@ -263,6 +265,8 @@ def run(component=None, root=None, print_summary=False, p.add_argument("--tags", help="Expression to select rules by tag.") p.add_argument("-D", "--debug", help="Verbose debug output.", action="store_true") p.add_argument("--context", help="Execution Context. Defaults to HostContext if an archive isn't passed.") + p.add_argument("--color", default="auto", choices=["always", "auto", "never"], metavar="[=WHEN]", + help="Choose if and how the color encoding is outputted. When is 'always', 'auto', or 'never'.") class Args(object): pass @@ -270,6 +274,8 @@ class Args(object): formatters = [] args = Args() p.parse_known_args(namespace=args) + global _COLOR + _COLOR = args.color p = argparse.ArgumentParser(parents=[p]) args.format = "insights.formats._json" if args.format == "json" else args.format args.format = "insights.formats._yaml" if args.format == "yaml" else args.format diff --git a/insights/formats/text.py b/insights/formats/text.py index c599687ff..7d54a35f6 100644 --- a/insights/formats/text.py +++ b/insights/formats/text.py @@ -6,14 +6,30 @@ from pprint import pprint from six import StringIO -from insights import dr, datasource, rule, condition, incident, parser +from insights import _COLOR, dr, datasource, rule, condition, incident, parser from insights.core.context import ExecutionContext from insights.formats import Formatter, FormatterAdapter, render try: - from colorama import Fore, Style, init - init() + from colorama import init + + if _COLOR == "always": + from colorama import Fore, Style + init(strip=False) + elif _COLOR == "auto": + from colorama import Fore, Style + init() + elif _COLOR == "never": + class Default(type): + def __getattr__(*args): + return "" + + class Fore(six.with_metaclass(Default)): + pass + + class Style(six.with_metaclass(Default)): + pass except ImportError: print("Install colorama if console colors are preferred.") From bf8dd0da0a77f276c46bfa171aadd66023806d41 Mon Sep 17 00:00:00 2001 From: Ryan Blakley <3789184+ryan-blakley@users.noreply.github.com> Date: Tue, 25 May 2021 15:57:27 -0400 Subject: [PATCH 433/892] Fix ValueError exception in docker_list parser (#3077) * In epel 6 there is a another package named docker, that's a kde system tray. So when the docker specs run they just gather the help output. So I added a check for the help output so it can be skipped. * Added a test with the help output. * Fixes #3076 Signed-off-by: Ryan Blakley --- insights/parsers/docker_list.py | 6 ++++++ insights/parsers/tests/test_docker_list.py | 19 +++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/insights/parsers/docker_list.py b/insights/parsers/docker_list.py index dc259e93a..2bbe76e3a 100644 --- a/insights/parsers/docker_list.py +++ b/insights/parsers/docker_list.py @@ -61,6 +61,12 @@ def parse_content(self, content): if not (self.key_field and self.attr_name): raise NotImplementedError("'key_field' or 'attr_name' is not defined") + # There is another application named docker that's a kde system tray, that + # will output help when the spec is run due to incorrect arguments. So check + # the content for any lines starting with Usage: so it can be skipped. + if any(l for l in content if l.startswith("Usage: ")): + raise SkipException('No data only help output.') + self.rows = parse_fixed_table(content, heading_ignore=self.heading_ignore, header_substitute=self.substitutions) diff --git a/insights/parsers/tests/test_docker_list.py b/insights/parsers/tests/test_docker_list.py index 57d213523..091783a1c 100644 --- a/insights/parsers/tests/test_docker_list.py +++ b/insights/parsers/tests/test_docker_list.py @@ -23,6 +23,15 @@ REPOSITORY TAG DIGEST IMAGE ID CREATED VIRTUAL SIZE """ +DOCKER_HELP_OUTPUT = """ +/usr/bin/docker - version 1.5 + +Usage: /usr/bin/docker [OPTIONS] + +Options: + -help Show this help. +""" + def test_docker_list_images(): result = docker_list.DockerListImages(context_wrap(DOCKER_LIST_IMAGES)) @@ -74,6 +83,16 @@ def test_docker_list_images_no_data(): assert 'No data.' in str(ex) +def test_docker_list_images_help_output(): + with pytest.raises(SkipException) as ex: + docker_list.DockerListImages(context_wrap(DOCKER_HELP_OUTPUT)) + assert 'No data only help output.' in str(ex) + + with pytest.raises(SkipException) as ex: + docker_list.DockerListContainers(context_wrap(DOCKER_HELP_OUTPUT)) + assert 'No data only help output.' in str(ex) + + def test_undefined_key_field(): with pytest.raises(NotImplementedError): assert docker_list.DockerList(context_wrap(DOCKER_LIST_CONTAINERS)).key_field is None From 392b57d25eb32b76ced486e9542d869af453da9b Mon Sep 17 00:00:00 2001 From: Glutexo Date: Tue, 25 May 2021 22:13:39 +0200 Subject: [PATCH 434/892] Make /tmp/insights-client-egg-release manipulation more secure (#3014) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Don’t fail on egg release read MemoryError Catch MemoryError on Egg Release file read. This file can be maliciously symlinked to a large or even infinite stream, causing the collection to crash. Signed-off-by: Štěpán Tomsa * Don’t fail on add egg release OSError Insights egg release can be too large, either accidentally or maliciously, making adding to archive crash. Added OSError catch, falling back to an empty file to ensure compatibility. Signed-off-by: Štěpán Tomsa * Fix error message Signed-off-by: Štěpán Tomsa Co-authored-by: Jeremy Crafts --- insights/client/data_collector.py | 13 +++-- .../data_collector/test_write_metadata.py | 50 +++++++++++++++++++ 2 files changed, 59 insertions(+), 4 deletions(-) diff --git a/insights/client/data_collector.py b/insights/client/data_collector.py index 6b1226f58..c5af189f1 100644 --- a/insights/client/data_collector.py +++ b/insights/client/data_collector.py @@ -132,15 +132,20 @@ def _write_egg_release(self): try: with open(constants.egg_release_file) as fil: egg_release = fil.read() - except IOError as e: - logger.debug('Could not read the egg release file :%s', str(e)) + except (IOError, MemoryError) as e: + logger.debug('Could not read the egg release file: %s', str(e)) try: os.remove(constants.egg_release_file) except OSError as e: logger.debug('Could not remove the egg release file: %s', str(e)) - self.archive.add_metadata_to_archive( - egg_release, '/egg_release') + try: + self.archive.add_metadata_to_archive( + egg_release, '/egg_release') + except OSError as e: + logger.debug('Could not add the egg release file to the archive: %s', str(e)) + self.archive.add_metadata_to_archive( + '', '/egg_release') def _write_collection_stats(self, collection_stats): logger.debug("Writing collection stats to archive...") diff --git a/insights/tests/client/data_collector/test_write_metadata.py b/insights/tests/client/data_collector/test_write_metadata.py index d529e7d22..ba7d9d317 100644 --- a/insights/tests/client/data_collector/test_write_metadata.py +++ b/insights/tests/client/data_collector/test_write_metadata.py @@ -3,6 +3,7 @@ from insights.client.constants import InsightsConstants as constants from insights.client.config import InsightsConfig from insights.client.data_collector import DataCollector +from mock.mock import call from mock.mock import patch @@ -71,3 +72,52 @@ def test_egg_release_file_read_and_written_no_read(archive, remove): d._write_egg_release() remove.assert_called_once_with(constants.egg_release_file) d.archive.add_metadata_to_archive.assert_called_once_with('', '/egg_release') + + +@patch('insights.client.data_collector.os.remove') +@patch('insights.client.data_collector.InsightsArchive') +def test_egg_release_file_read_memory_error(archive, remove): + ''' + Verify that a memory error on the egg release file read is not + fatal. + ''' + if six.PY3: + open_name = 'builtins.open' + else: + open_name = '__builtin__.open' + + with patch(open_name, create=True) as mock_open: + file_mock = mock.mock_open().return_value + file_mock.read.side_effect = MemoryError() + mock_open.side_effect = [file_mock] + c = InsightsConfig() + d = DataCollector(c) + d._write_egg_release() + remove.assert_called_once_with(constants.egg_release_file) + d.archive.add_metadata_to_archive.assert_called_once_with('', '/egg_release') + + +@patch('insights.client.data_collector.os.remove') +@patch( + 'insights.client.data_collector.InsightsArchive', + **{'return_value.add_metadata_to_archive.side_effect': [OSError('[Errno 28] No space left on device'), None]}) +def test_egg_release_file_write_os_error(archive, remove): + ''' + Verify that an OS Error (e.g. no space left) on the egg release file + write is not fatal - an empty file is written instead. + ''' + if six.PY3: + open_name = 'builtins.open' + else: + open_name = '__builtin__.open' + + with patch(open_name, create=True) as mock_open: + mock_open.side_effect = [mock.mock_open(read_data='/testvalue').return_value] + c = InsightsConfig() + d = DataCollector(c) + d._write_egg_release() + remove.assert_called_once_with(constants.egg_release_file) + failed_call = call('/testvalue', '/egg_release') + rescue_call = call('', '/egg_release') + expected_calls = [failed_call, rescue_call] + d.archive.add_metadata_to_archive.assert_has_calls(expected_calls) From 20904d23ee6029c0ddf01fe41fc019ed82726865 Mon Sep 17 00:00:00 2001 From: Stanislav Kontar Date: Wed, 26 May 2021 14:42:08 +0200 Subject: [PATCH 435/892] Update parser for device schedulers (#3083) * Update parser for device schedulers Signed-off-by: Stanislav Kontar * Improve docstring Signed-off-by: Stanislav Kontar * Review comment improvements Signed-off-by: Stanislav Kontar * Add empty line at the end Signed-off-by: Stanislav Kontar --- docs/shared_parsers_catalog/scheduler.rst | 3 ++ insights/parsers/scheduler.py | 60 ++++++++++++++++++++--- insights/parsers/tests/test_scheduler.py | 39 +++++++++++++++ 3 files changed, 94 insertions(+), 8 deletions(-) create mode 100644 docs/shared_parsers_catalog/scheduler.rst diff --git a/docs/shared_parsers_catalog/scheduler.rst b/docs/shared_parsers_catalog/scheduler.rst new file mode 100644 index 000000000..406c2d5f8 --- /dev/null +++ b/docs/shared_parsers_catalog/scheduler.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.scheduler + :members: + :show-inheritance: diff --git a/insights/parsers/scheduler.py b/insights/parsers/scheduler.py index 33205a434..03f10ac9d 100644 --- a/insights/parsers/scheduler.py +++ b/insights/parsers/scheduler.py @@ -1,17 +1,61 @@ -from .. import parser, get_active_lines, Parser +""" +Scheduler - file ``/sys/block/*/queue/scheduler`` +================================================= + +This parser parses the content from scheduler files. It stores available +values and also current selection for every device. + +Sample content from schduler file: + + noop deadline [cfq] + +Examples: + >>> type(scheduler_obj) + + >>> scheduler_obj.data + {'sda': '[cfq]'} + >>> scheduler_obj.device + 'sda' + >>> scheduler_obj.schedulers + ['noop', 'deadline', 'cfq'] + >>> scheduler_obj.active_scheduler + 'cfq' + +""" + import re + from insights.specs import Specs +from .. import parser, get_active_lines, Parser @parser(Specs.scheduler) class Scheduler(Parser): + """ + This class provides parsing for content of ``/sys/block/*/queue/scheduler`` + files. + + Attributes: + device (str): Block device name + schedulers (list): A list of available schedulers + active_scheduler (str): An active scheduler + data (dict): A dictionary with block device name as a key and an active scheduler as + a value. + """ + ACTIVE_SCHEDULER_PATTERN = re.compile(r'\[(.*)]') def parse_content(self, content): - active_scheduler_regex = re.compile(r'\[.*]') - result = {} + self.device = None + self.schedulers = [] + self.active_scheduler = None + + self.device = self.file_path.split('/')[3] for line in get_active_lines(content): - for sched in line.split(): - active_scheduler = active_scheduler_regex.search(sched) - if active_scheduler: - result[self.file_path.split('/')[3]] = active_scheduler.group() - self.data = result + r = self.ACTIVE_SCHEDULER_PATTERN.search(line) + if r: + self.active_scheduler = r.group(1) + + self.schedulers = line.replace('[', '').replace(']', '').split() + + # Legacy values + self.data = {self.device: '[' + self.active_scheduler + ']'} diff --git a/insights/parsers/tests/test_scheduler.py b/insights/parsers/tests/test_scheduler.py index 977c34df7..e3f5c9f38 100644 --- a/insights/parsers/tests/test_scheduler.py +++ b/insights/parsers/tests/test_scheduler.py @@ -1,3 +1,5 @@ +import doctest + from insights.parsers import scheduler from insights.tests import context_wrap @@ -31,3 +33,40 @@ def test_scheduler_deadline(): def test_scheduler_noop(): r = scheduler.Scheduler(context_wrap(VDC_SCHEDULER, VDC_PATH)) assert r.data["vdc"] == '[noop]' + + +def test_schedulers_defaults(): + r = scheduler.Scheduler(context_wrap('[none] mq-deadline kyber bfq', + '/sys/block/nvme0n1/queue/scheduler')) + assert r.device == 'nvme0n1' + assert r.schedulers == ['none', 'mq-deadline', 'kyber', 'bfq'] + assert r.active_scheduler == 'none' + + # RHEL 6 + r = scheduler.Scheduler(context_wrap('noop anticipatory deadline [cfq]', + '/sys/block/vda/queue/scheduler')) + assert r.device == 'vda' + assert r.schedulers == ['noop', 'anticipatory', 'deadline', 'cfq'] + assert r.active_scheduler == 'cfq' + + # RHEL 7 + r = scheduler.Scheduler(context_wrap('[mq-deadline] kyber none', + '/sys/block/vda/queue/scheduler')) + assert r.device == 'vda' + assert r.schedulers == ['mq-deadline', 'kyber', 'none'] + assert r.active_scheduler == 'mq-deadline' + + # RHEL 8 + r = scheduler.Scheduler(context_wrap('[mq-deadline] kyber bfq none', + '/sys/block/vda/queue/scheduler')) + assert r.device == 'vda' + assert r.schedulers == ['mq-deadline', 'kyber', 'bfq', 'none'] + assert r.active_scheduler == 'mq-deadline' + + +def test_docs(): + env = { + 'scheduler_obj': scheduler.Scheduler(context_wrap(SDA_SCHEDULER, SDA_PATH)) + } + failed, total = doctest.testmod(scheduler, globs=env) + assert failed == 0 From 6aa437d041da3cca73db41b38a1f7c03a29dd80c Mon Sep 17 00:00:00 2001 From: Stephen Adams Date: Wed, 26 May 2021 13:34:45 -0400 Subject: [PATCH 436/892] update uploader_json_map.json Signed-off-by: Stephen Adams --- insights/client/uploader_json_map.json | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index e5e71b1f2..6f74da35b 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -2658,7 +2658,8 @@ { "file": "/etc/mongod.conf", "pattern": [ - "dbpath" + "dbPath", + "storage" ], "symbolic_name": "mongod_conf" }, @@ -2666,7 +2667,8 @@ "file": "/etc/opt/rh/rh-mongodb34/mongod.conf", "symbolic_name": "mongod_conf", "pattern": [ - "dbpath" + "dbPath", + "storage" ] }, { @@ -3523,6 +3525,11 @@ "pattern": [], "symbolic_name": "sys_kernel_sched_features" }, + { + "file": "/etc/default/grub", + "pattern": [], + "symbolic_name": "sysconfig_grub" + }, { "file": "/etc/sysconfig/kdump", "pattern": [], @@ -4315,5 +4322,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-05-13T13:38:29.298898" + "version": "2021-05-18T12:17:02.852717" } \ No newline at end of file From 077dec92bed81a101f9c72c3bccc4aad89170491 Mon Sep 17 00:00:00 2001 From: Alec Cohan <44471274+aleccohan@users.noreply.github.com> Date: Tue, 1 Jun 2021 14:11:15 -0400 Subject: [PATCH 437/892] Update Verifier code to allow for signature validation (#3055) * Update Verifier code to allow for signature validation Signed-off-by: Alec Cohan * Remove Check Version from playbook_verifier code Signed-off-by: Alec Cohan * flake Signed-off-by: Alec Cohan * remove version check tests Signed-off-by: Alec Cohan * remove versioning url Signed-off-by: Alec Cohan Co-authored-by: Jeremy Crafts --- .../ansible/playbook_verifier/__init__.py | 27 +++--------- .../ansible/playbook_verifier/__main__.py | 8 ++-- .../apps/ansible/playbook_verifier/public.gpg | 41 ++++++++++++------- .../client/apps/test_playbook_verifier.py | 26 ++++-------- 4 files changed, 43 insertions(+), 59 deletions(-) diff --git a/insights/client/apps/ansible/playbook_verifier/__init__.py b/insights/client/apps/ansible/playbook_verifier/__init__.py index f02ef74f6..5e0d64f70 100644 --- a/insights/client/apps/ansible/playbook_verifier/__init__.py +++ b/insights/client/apps/ansible/playbook_verifier/__init__.py @@ -1,14 +1,11 @@ import os import copy import base64 -import requests import tempfile import pkgutil import hashlib import insights.client.apps.ansible from logging import getLogger -from distutils.version import LooseVersion -from insights.client.utilities import get_version_info from insights.client.apps.ansible.playbook_verifier.contrib import gnupg from insights.client.apps.ansible.playbook_verifier.contrib.ruamel_yaml.ruamel import yaml from insights.client.constants import InsightsConstants as constants @@ -17,7 +14,6 @@ SIGKEY = 'insights_signature' PUBLIC_KEY_FOLDER = pkgutil.get_data(insights.client.apps.ansible.__name__, 'playbook_verifier/public.gpg') # Update this when we have the key generated -VERSIONING_URL = 'https://cloud.redhat.com/api/v1/static/egg_version' EXCLUDABLE_VARIABLES = ['hosts', 'vars'] logger = getLogger(__name__) @@ -58,18 +54,6 @@ def createSnippetHash(snippet): return snippetHash.digest() -def eggVersioningCheck(checkVersion): - currentVersion = requests.get(VERSIONING_URL) - currentVersion = currentVersion.text - runningVersion = get_version_info()['core_version'] - - if checkVersion: - if LooseVersion(currentVersion.strip()) < LooseVersion(runningVersion): - raise PlaybookVerificationError(message="EGG VERSION ERROR: Current running egg is not the most recent version") - - return currentVersion - - def getPublicKey(gpg): if not PUBLIC_KEY_FOLDER: raise PlaybookVerificationError(message="PUBLIC KEY IMPORT ERROR: Public key file not found") @@ -125,9 +109,11 @@ def executeVerification(snippet, encodedSignature): def verifyPlaybookSnippet(snippet): if ('vars' not in snippet.keys()): - raise PlaybookVerificationError(message='VARS FIELD NOT FOUND: Verification failed') + raise PlaybookVerificationError(message='VERIFICATION FAILED: Vars field not found') + elif (snippet['vars'] is None): + raise PlaybookVerificationError(message='VERIFICATION FAILED: Empty vars field') elif (SIGKEY not in snippet['vars']): - raise PlaybookVerificationError(message='SIGNATURE NOT FOUND: Verification failed') + raise PlaybookVerificationError(message='VERIFICATION FAILED: Signature not found') encodedSignature = snippet['vars'][SIGKEY] snippetCopy = copy.deepcopy(snippet) @@ -137,7 +123,7 @@ def verifyPlaybookSnippet(snippet): return executeVerification(snippetCopy, encodedSignature) -def verify(playbook, checkVersion=False, skipVerify=True): +def verify(playbook, skipVerify=True): """ Verify the signed playbook. @@ -147,9 +133,6 @@ def verify(playbook, checkVersion=False, skipVerify=True): """ logger.info('Playbook Verification has started') - # Egg Version Check - eggVersioningCheck(checkVersion) - if not skipVerify: for snippet in playbook: verified = verifyPlaybookSnippet(snippet) diff --git a/insights/client/apps/ansible/playbook_verifier/__main__.py b/insights/client/apps/ansible/playbook_verifier/__main__.py index 130a46262..c1688f84c 100644 --- a/insights/client/apps/ansible/playbook_verifier/__main__.py +++ b/insights/client/apps/ansible/playbook_verifier/__main__.py @@ -1,5 +1,6 @@ import os import sys +from insights.client.constants import InsightsConstants as constants from insights.client.apps.ansible.playbook_verifier import verify, loadPlaybookYaml @@ -17,17 +18,14 @@ def read_playbook(): playbook = read_playbook() playbook_yaml = loadPlaybookYaml(playbook) skipVerify = True -checkVersion = False if (os.environ.get('SKIP_VERIFY')): skipVerify = False -if (os.environ.get('CHECK_VERSION')): - checkVersion = True try: - verified_playbook = verify(playbook_yaml, checkVersion, skipVerify) + verified_playbook = verify(playbook_yaml, skipVerify) except Exception as e: sys.stderr.write(e.message) - sys.exit(1) + sys.exit(constants.sig_kill_bad) print(playbook) diff --git a/insights/client/apps/ansible/playbook_verifier/public.gpg b/insights/client/apps/ansible/playbook_verifier/public.gpg index 82bd7cc13..676a90fa9 100644 --- a/insights/client/apps/ansible/playbook_verifier/public.gpg +++ b/insights/client/apps/ansible/playbook_verifier/public.gpg @@ -1,18 +1,29 @@ -----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v2.0.22 (GNU/Linux) -mQENBGAbFTwBCACpkvYKGGck/pmpc5fN7b+BGHIxQujxxxQsm+iGhNiOvvdI15W+ -xkzQH/NdciShqwt5KmgGVWK4OV8MbdT2PQZ89K3RA0Eh+QYZ7GANlpnLTE2oYeO+ -2thNLWf7HyL8y+Bh4R/freAU3Tnncw2n9BkS/3HYs5i7ZWxoYs1uLC54wqmQLnXC -0qRZWO9O8p0qE3sPXQj97PRvqi1vf+fuIk8E7ZqxRzYA2M2YMXCOTwkPsNmUgAcp -vTS5MwKWHwI6TaJRjvQaam37tRjGuNdqFESt/Ve61ax3ggf+krZAvoAEmDNhlvRX -zDizemSZN5KwJGRKUwolmHTWn2LvEg/aKGUjABEBAAG0J0F1dG9nZW5lcmF0ZWQg -S2V5IDxleGFtcGxlQGV4YW1wbGUuY29tPokBTgQTAQgAOBYhBALLuiYuZcUH90rj -uvTd9VdHEPSNBQJgGxU8AhsvBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEPTd -9VdHEPSNwdoH/13591eoYRJ3s5MANTpjbFv7AklOMxXsIhuxXTzyAuNogp3OWR93 -PnNMMo71o2oPcUh/E51EZVa+dhLfvXH2KRokfUzUWWiG7MHHbH9j0chgYtRHR0H9 -gBZ2jOzoew7Yuz1bKyitSb4VR6A+l8ryO7iesUtXUiDtp7ARnI3CJU0NkoRvvzQR -QrXnCii4F6SjnJXgcbQ/ry78toYG3BKTPwNgwRdbmy0ngNkeG2c0LiCZ2ZNJmqqp -0vAx06GasItIZ9WmdS3qZxTgz0vCgpxyrMKkJwRPSeX++jlMp0h2+W4vc75WIXPC -f1h3KZ/Vq8ZmrDCXgeC2TFC0yzijjmWBr4k= -=fNnm +mQINBGBCnLUBEACkdDE/r1VvmVV6ZzLT0B1MjUftIonMSsC5dqMpSNnFg4apxGWw +YtQQrpxLcMbLjG3F3823SfGHBXk8VJRsJNi2lHmCDmuj4KkyWxP6t8a4jfDLoXU/ +DfO2OG7zAhss+OEDmEO6qzQPVnlVIWdQryeUTrkZLmnGtKI0kaDQaOUYWrj9+9cj +Z50svXgMlK7PnFrp+af6Vp3ul312dZKyccVdQ+bVZHVNclddd1ONThAM74+rhnZt +TvmgaWDobdbH7jGi//lXEtrAtOOBB5ohmqMIyNCzonFFG82fhw8U2nwE6CbwRLIN +aZcB0BbkJ2a62JEBs72fZ8ridOSmec6nsPrslT+w/oUU6Xap1XSbkpCBmy/vAHZK +TAYw8jDFJcZjLRJtJBQD/J3ep5/tAJsTcCXRWoisdxq5sOLWCHTDHRd3mleEzdDV +/pzFWK7IWMpaO4kr3nxGujCFgO4u/a0RwqcikbOjh80nIBPQA+D5qBBEYR8RNapn +ZcURGaoWqxm687AyvJJ4x7Ng3KYENl1vk4S45YJMbO4B+GQuNUzHIdJN7l6Fz+qB +fGHElMF0fHMaqBMcfMsTM88J6OejgxHyO2f8xsBGdaJDoNOcuXgtHRowwF/DfRue +rNbylIE+WTDn8q+biDcDHBqvcMCv93AMYV/STYGsm5nWD4lVUNLs/jIjYQARAQAB +tDRSZWQgSGF0LCBJbmMuIChQbGF5Ym9vayBLZXkgMSkgPHNlY3VyaXR5QHJlZGhh +dC5jb20+iQI3BBMBAgAhBQJgQpy1AhsDBgsJCAcDAgYVCAIJCgsDFgIBAh4BAheA +AAoJEMvw58D+j5pNgGUP/1dFIjBbiMFVW7yR4I1SZEFAkqrpklzmHz/eCMKPVDvT +TGcTaQSamxYgsbuS+2bny6vIl5lJ4gPYsDJ2ss5kYLx7JfN+rJT/rMVV+t+E60U5 +UsW2zBnLH906kBDhFYr9YgAQ4Svd2WCCV6HHBOJuKxWJY/1QB7DnZOpX59gzb2dc +AyeDKwSeTpgRdDOhNC5T96g14OuaNGlnOwNJ55Hqx2xs/C0O54Zqftu4JDHsjcA5 +Ec0wiheVH8oSB03AY1lDdx9SmOfLg9BiTL99+Zqoggzwc8nLXvqnzL7vrxit9k4E +RZiJkEI6micIzpdnjY8SnsXpayYEI0r+f+4vJcQt4rQnqzgE70mJZtX2c6wDVuiB +lKcmWnu4gnSbA/HdwDVyAIPe59r4ASZ1yX6ylfesr5MGAgsw0n2eLO8MeuuWoqFc +lCTps3I9n0W+9b1mmVrBKjLd2QhwToBIUXnl79HhILRm5IQxNE02owfD2crL8CdF +btzuuxSm7+V35y7bvsWhl2WoVuUT3Cgs8QEveejbZMG5q0m58SXItTr7d9cqpERY +s4iD1gttM/UES8COjp4zM4aOnqqlY+/96LJujN2MVeY6gY12y9ykg5Y9CzQdySTB +7+S7LMgwT6P8Af/xTYCujcUruY93AdcvDFHWM3zjIUVhEOvJCjHoiSwpRJqiqOVQ +=thvC -----END PGP PUBLIC KEY BLOCK----- diff --git a/insights/tests/client/apps/test_playbook_verifier.py b/insights/tests/client/apps/test_playbook_verifier.py index ac5163755..bb922c097 100644 --- a/insights/tests/client/apps/test_playbook_verifier.py +++ b/insights/tests/client/apps/test_playbook_verifier.py @@ -10,36 +10,28 @@ @pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above') -def test_skip_validation(): - result = verify([{'name': "test playbook", 'vars': {}}], skipVerify=True, checkVersion=False) - assert result == [{'name': "test playbook", 'vars': {}}] - - -@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above') -@patch('requests.get') -def test_egg_validation_error(mock_get): - mock_get.return_value.text = '3.0.0' - egg_error = 'EGG VERSION ERROR: Current running egg is not the most recent version' +def test_vars_not_found_error(): + vars_error = 'VERIFICATION FAILED: Vars field not found' fake_playbook = [{'name': "test playbook"}] with raises(PlaybookVerificationError) as error: - verify(fake_playbook, checkVersion=True) - assert egg_error in str(error.value) + verify(fake_playbook, skipVerify=False) + assert vars_error in str(error.value) @pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above') -def test_vars_not_found_error(): - vars_error = 'VARS FIELD NOT FOUND: Verification failed' - fake_playbook = [{'name': "test playbook"}] +def test_empty_vars_error(): + sig_error = 'VERIFICATION FAILED: Empty vars field' + fake_playbook = [{'name': "test playbook", 'vars': None}] with raises(PlaybookVerificationError) as error: verify(fake_playbook, skipVerify=False) - assert vars_error in str(error.value) + assert sig_error in str(error.value) @pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above') def test_signature_not_found_error(): - sig_error = 'SIGNATURE NOT FOUND: Verification failed' + sig_error = 'VERIFICATION FAILED: Signature not found' fake_playbook = [{'name': "test playbook", 'vars': {}}] with raises(PlaybookVerificationError) as error: From 1a1a5a7e467575fbf70aaa7532465dea88b6d72e Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 3 Jun 2021 15:57:42 +0800 Subject: [PATCH 438/892] Remove "package_provides_java" (#3090) * package_provides_java is duplicate with package_provides_command Signed-off-by: Huanhuan Li --- insights/specs/default.py | 1 - 1 file changed, 1 deletion(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index ca75d603d..e3b7853a2 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -705,7 +705,6 @@ def cmd_and_pkg(broker): raise SkipComponent package_provides_command = command_with_args("/usr/bin/echo '%s'", cmd_and_pkg) - package_provides_java = foreach_execute(cmd_and_pkg, "/usr/bin/echo '%s'") pacemaker_log = first_file(["/var/log/pacemaker.log", "/var/log/pacemaker/pacemaker.log"]) pci_rport_target_disk_paths = simple_command("/usr/bin/find /sys/devices/ -maxdepth 10 -mindepth 9 -name stat -type f") From e3cb53b5c6879568820077cddf31f1653c55893d Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Thu, 3 Jun 2021 09:02:45 -0500 Subject: [PATCH 439/892] Add a pull request template to the project (#3089) * This template will help ensure that developers are providing the necessary information to help maintainers review their PRs. Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- .github/pull_request_template.md | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 .github/pull_request_template.md diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 000000000..a916bce57 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,13 @@ +**Please do not include links to any websites that are not publicly accessible. You may include non-link reference numbers to help you and your team identify non-public references.** + +### All Pull Requests: + +* [ ] Have you followed the guidelines in our Contributing document? +* [ ] Is this PR to correct an issue? +* [ ] Is this PR an enhancement? + +### Issue/Bug Fix: +Provide complete details of the issue and how this PR fixes the issue. You can link to one or more existing open, publicly-accessible issue(s) that provide details. + +### Enhancement: +Provide complete details of the enhancement. From 2fc9f9c0fa7ae50753773e325a9590ab5b0e4a14 Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Thu, 3 Jun 2021 10:55:13 -0400 Subject: [PATCH 440/892] update uploader_json_map.json Signed-off-by: Link Dupont --- insights/client/uploader_json_map.json | 28 ++++++++++++++++---------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index 6f74da35b..cdc4ee2af 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -176,8 +176,6 @@ "L1TF", "L1Tf", "Linux version", - "Machine Check Exception", - "Machine check events logged", "NUMA: ", "Node 0 CPUs: ", "QLogic QLE2692 - QLogic 16Gb FC Dual-port HBA", @@ -1094,7 +1092,6 @@ "COMMAND", "auditd", "avahi", - "bash", "ceilometer-poll", "chronyd", "cinder-volume", @@ -1105,6 +1102,7 @@ "dlm_controld", "dnsmasq", "docker", + "docker-runc-current", "elasticsearch", "gnocchi-metricd", "gnome-shell", @@ -1129,7 +1127,8 @@ "openshift start master api", "openshift start master controllers", "openshift start node", - "ora", + "ora_", + "oracle", "ovs-vswitchd", "pacemaker-controld", "pacemaker_remote", @@ -1141,6 +1140,7 @@ "redis-server", "rngd", "sap", + "setup.sh", "smbd", "snmpd", "spausedd", @@ -1160,7 +1160,6 @@ "COMMAND", "STAP/8.2", "auditd", - "bash", "ceilometer-poll", "ceph-osd", "chronyd", @@ -1171,6 +1170,7 @@ "crmd", "dlm_controld", "docker", + "docker-runc-current", "elasticsearch", "gnocchi-metricd", "gnome-shell", @@ -1197,7 +1197,8 @@ "openshift start master api", "openshift start master controllers", "openshift start node", - "ora", + "ora_", + "oracle", "ovs-vswitchd", "pacemaker-controld", "pacemaker_remote", @@ -1211,6 +1212,7 @@ "redis-server", "rngd", "sap", + "setup.sh", "smbd", "snmpd", "spausedd", @@ -1236,7 +1238,6 @@ "/usr/sbin/fcoemon --syslog", "COMMAND", "auditd", - "bash", "catalina.base", "ceilometer-poll", "chronyd", @@ -1247,6 +1248,7 @@ "crmd", "dlm_controld", "docker", + "docker-runc-current", "elasticsearch", "gnocchi-metricd", "gnome-shell", @@ -1275,7 +1277,8 @@ "openshift start master api", "openshift start master controllers", "openshift start node", - "ora", + "ora_", + "oracle", "ovs-vswitchd", "pacemaker-controld", "pacemaker_remote", @@ -1288,6 +1291,7 @@ "redis-server", "rngd", "sap", + "setup.sh", "smbd", "snmpd", "spausedd", @@ -1305,7 +1309,6 @@ "/usr/bin/openshift start node", "CMD", "auditd", - "bash", "ceilometer-poll", "chronyd", "cinder-volume", @@ -1315,6 +1318,7 @@ "crmd", "dlm_controld", "docker", + "docker-runc-current", "elasticsearch", "gnocchi-metricd", "gnome-shell", @@ -1342,7 +1346,8 @@ "openshift start master api", "openshift start master controllers", "openshift start node", - "ora", + "ora_", + "oracle", "ovs-vswitchd", "pacemaker-controld", "pacemaker_remote", @@ -1354,6 +1359,7 @@ "redis-server", "rngd", "sap", + "setup.sh", "smbd", "snmpd", "spausedd", @@ -4322,5 +4328,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-05-18T12:17:02.852717" + "version": "2021-05-26T13:40:04.964795" } \ No newline at end of file From 92afaa101931f55cf73a53c708148fc2c90b8a55 Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Thu, 3 Jun 2021 14:26:52 -0500 Subject: [PATCH 441/892] Update CONTRIBUTING to reference the PR template (#3093) * Update CONTRIBUTING to reference the PR template * Update documentation to reference the new PR template that is required to submit a new PR. Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Fix incorrect wording Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- CONTRIBUTING.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 81e2fcc46..4f0a52d0f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -133,7 +133,11 @@ from the current master branch of the upstream project. of the topic branch. Again, such manipulations change history and require a `--force` push. -6. When ready, use the github UI to submit a pull request. +6. When ready, use the github UI to submit a pull request. Fill out + the information requested in the PR template. If your PR fixes an + issue make sure to reference the issue using a + [keyword](https://docs.github.com/en/issues/tracking-your-work-with-issues/creating-issues/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword) + so that it will be closed once your PR is merged. 7. Repeat steps 4 and 5 as necessary. Note that a forced push to the topic branch will work as expected. The pull request will be From 10326d57dafb5f517b6395d0a0bc5c98353d8a6c Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Tue, 8 Jun 2021 15:57:10 -0400 Subject: [PATCH 442/892] feat: add ansible_host option (#3007) * feat: ansible_host config option Signed-off-by: Jeremy Crafts --- insights/client/__init__.py | 7 ++ insights/client/config.py | 40 +++++++++++- insights/client/connection.py | 25 +++++++ insights/client/core_collector.py | 1 + insights/client/data_collector.py | 7 ++ insights/client/phase/v1.py | 8 +++ insights/tests/client/test_ansiblehost.py | 79 +++++++++++++++++++++++ 7 files changed, 166 insertions(+), 1 deletion(-) create mode 100644 insights/tests/client/test_ansiblehost.py diff --git a/insights/client/__init__.py b/insights/client/__init__.py index 7686b002b..96357d380 100644 --- a/insights/client/__init__.py +++ b/insights/client/__init__.py @@ -507,6 +507,13 @@ def set_display_name(self, display_name): ''' return self.connection.set_display_name(display_name) + @_net + def set_ansible_host(self, ansible_host): + ''' + returns True on success, False on failure + ''' + return self.connection.set_ansible_host(ansible_host) + @_net def get_diagnosis(self, remediation_id=None): ''' diff --git a/insights/client/config.py b/insights/client/config.py index a2fd13b26..888d6dce7 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -62,6 +62,12 @@ def _core_collect_default(): 'const': True, 'nargs': '?', }, + 'ansible_host': { + 'default': None, + 'opt': ['--ansible-host'], + 'help': 'Set an Ansible hostname for this system. ', + 'action': 'store' + }, 'authmethod': { # non-CLI 'default': 'BASIC' @@ -467,9 +473,9 @@ def __init__(self, *args, **kwargs): if args: self._update_dict(args[0]) self._update_dict(kwargs) + self._cli_opts = None self._imply_options() self._validate_options() - self._cli_opts = None def __str__(self): _str = ' ' @@ -761,6 +767,38 @@ def _imply_options(self): # get full path self.output_file = os.path.abspath(self.output_file) self._determine_filename_and_extension() + if self._cli_opts and "ansible_host" in self._cli_opts and not self.register: + # Specific use case, explained here: + # + # Ansible hostname is, more or less, a second display name. + # However, there is no method in the legacy API to handle + # changes to the ansible hostname. So, if a user specifies + # --ansible-hostname on the CLI to change it like they would + # --display-name, in order to actually change it, we need to + # force disable legacy_upload to make the proper HTTP requests. + # + # As of now, registration still needs to be tied to the legacy + # API, so if the user has legacy upload enabled (the default), + # we can't force disable it when registering. Thus, if + # specifying --ansible-hostname alongside --register, all the + # necessary legacy API calls will still be made, the + # ansible-hostname will be packed into the archive, and the + # rest will be handled by ingress. Incidentally, if legacy + # upload *is* disabled, the ansible hostname will also be + # included in the upload metadata. + # + # The reason to explicitly look for ansible_host in the CLI + # parameters *only* is because, due to a customer request from + # long ago, a display_name specified in the config file should + # be applied as part of the upload, and conversely, specifying + # it on the command line (WITHOUT --register) should be a + # "once and done" option that does a single HTTP call to modify + # it. We are going to mimic that behavior with the Ansible + # hostname. + # + # Therefore, only force legacy_upload to False when attempting + # to change Ansible hostname from the CLI, when not registering. + self.legacy_upload = False def _determine_filename_and_extension(self): ''' diff --git a/insights/client/connection.py b/insights/client/connection.py index 448c487b7..364409fcb 100644 --- a/insights/client/connection.py +++ b/insights/client/connection.py @@ -856,6 +856,9 @@ def upload_archive(self, data_collected, content_type, duration=None): if self.config.display_name: # add display_name to canonical facts c_facts['display_name'] = self.config.display_name + if self.config.ansible_host: + # add ansible_host to canonical facts + c_facts['ansible_host'] = self.config.ansible_host if self.config.branch_info: c_facts["branch_info"] = self.config.branch_info c_facts["satellite_id"] = self.config.branch_info["remote_leaf"] @@ -956,6 +959,28 @@ def set_display_name(self, display_name): logger.info('Display name updated to ' + display_name + '.') return True + def set_ansible_host(self, ansible_host): + ''' + Set Ansible hostname of a system independently of upload. + ''' + system = self._fetch_system_by_machine_id() + if not system: + return system + inventory_id = system[0]['id'] + + req_url = self.inventory_url + '/hosts/' + inventory_id + try: + logger.log(NETWORK, "PATCH %s", req_url) + res = self.session.patch(req_url, json={'ansible_host': ansible_host}) + except REQUEST_FAILED_EXCEPTIONS as e: + _api_request_failed(e) + return False + if (self.handle_fail_rcs(res)): + logger.error('Could not update Ansible hostname.') + return False + logger.info('Ansible hostname updated to ' + ansible_host + '.') + return True + def get_diagnosis(self, remediation_id=None): ''' Reach out to the platform and fetch a diagnosis. diff --git a/insights/client/core_collector.py b/insights/client/core_collector.py index ff9d32bd3..9d5b17dc3 100644 --- a/insights/client/core_collector.py +++ b/insights/client/core_collector.py @@ -77,6 +77,7 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): logger.debug('Collecting metadata...') self._write_branch_info(branch_info) self._write_display_name() + self._write_ansible_host() self._write_version_info() self._write_tags() self._write_blacklist_report(blacklist_report) diff --git a/insights/client/data_collector.py b/insights/client/data_collector.py index c5af189f1..006537516 100644 --- a/insights/client/data_collector.py +++ b/insights/client/data_collector.py @@ -91,6 +91,12 @@ def _write_display_name(self): self.archive.add_metadata_to_archive( self.config.display_name, '/display_name') + def _write_ansible_host(self): + if self.config.ansible_host: + logger.debug("Writing ansible_host to archive...") + self.archive.add_metadata_to_archive( + self.config.ansible_host, '/ansible_host') + def _write_version_info(self): logger.debug("Writing version information to archive...") version_info = get_version_info() @@ -329,6 +335,7 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): logger.debug('Collecting metadata...') self._write_branch_info(branch_info) self._write_display_name() + self._write_ansible_host() self._write_version_info() self._write_tags() self._write_blacklist_report(blacklist_report) diff --git a/insights/client/phase/v1.py b/insights/client/phase/v1.py index 30e8d482a..d87045a57 100644 --- a/insights/client/phase/v1.py +++ b/insights/client/phase/v1.py @@ -275,6 +275,14 @@ def post_update(client, config): else: sys.exit(constants.sig_kill_bad) + # set --ansible-hostname independent of register + # only do this if set from the CLI. normally display_name is sent on upload + if 'ansible_host' in config._cli_opts and not config.register: + if client.set_ansible_host(config.ansible_host): + sys.exit(constants.sig_kill_ok) + else: + sys.exit(constants.sig_kill_bad) + @phase def collect_and_output(client, config): diff --git a/insights/tests/client/test_ansiblehost.py b/insights/tests/client/test_ansiblehost.py new file mode 100644 index 000000000..1046b27bd --- /dev/null +++ b/insights/tests/client/test_ansiblehost.py @@ -0,0 +1,79 @@ +import pytest +from insights.client.config import InsightsConfig +from insights.client.connection import InsightsConnection +from mock.mock import patch + + +class MockSession(object): + def __init__(self): + self.status_code = None + self.text = None + self.content = '{"display_name": "test"}' + + def get(self, url=None, timeout=None, headers=None, data=None): + return MockResponse(self.status_code, self.text, self.content) + + def put(self, url=None, timeout=None, headers=None, data=None): + return MockResponse(self.status_code, self.text, None) + + +class MockResponse(object): + def __init__(self, expected_status, expected_text, expected_content): + self.status_code = expected_status + self.text = expected_text + self.content = expected_content + + +def mock_init_session(obj): + return MockSession() + + +def mock_get_proxies(obj): + return + + +@pytest.mark.skip(reason='No time to fix this for double-API calling') +@patch('insights.client.connection.InsightsConnection._init_session', + mock_init_session) +@patch('insights.client.connection.InsightsConnection.get_proxies', + mock_get_proxies) +@patch('insights.client.utilities.constants.machine_id_file', + '/tmp/machine-id') +def test_set_ansible_host(): + conf = InsightsConfig() + c = InsightsConnection(conf) + c.session.status_code = 200 + assert c.set_ansible_host('GO STICK YOUR HEAD IN A PIG') + c.session.status_code = 404 + assert not c.set_ansible_host('GO STICK YOUR HEAD IN A PIG') + c.session.status_code = 500 + c.session.text = 'oops' + assert not c.set_ansible_host('GO STICK YOUR HEAD IN A PIG') + + +def test_ansible_host_no_reg_forces_legacy_false(): + ''' + When not specifying --register, using --ansible-host on the CLI forces legacy_upload to False + ''' + conf = InsightsConfig(register=False, ansible_host="test", legacy_upload=True) + conf._cli_opts = ["ansible_host"] + conf._imply_options() + assert not conf.legacy_upload + conf = InsightsConfig(register=False, ansible_host="test", legacy_upload=False) + conf._cli_opts = ["ansible_host"] + conf._imply_options() + assert not conf.legacy_upload + + +def test_ansible_host_reg_legacy_no_change(): + ''' + When specifying --register, using --ansible-host on the CLI does not affect legacy_upload + ''' + conf = InsightsConfig(register=True, ansible_host="test", legacy_upload=True) + conf._cli_opts = ["ansible_host"] + conf._imply_options() + assert conf.legacy_upload + conf = InsightsConfig(register=True, ansible_host="test", legacy_upload=False) + conf._cli_opts = ["ansible_host"] + conf._imply_options() + assert not conf.legacy_upload From ea55ee586ae3bd92d3441d43e5162ea446e6a572 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Wed, 9 Jun 2021 03:59:39 +0800 Subject: [PATCH 443/892] Fix the command of gcp_license_codes (#3097) Signed-off-by: Xiangce Liu --- insights/specs/default.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index e3b7853a2..f185068a6 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -433,7 +433,7 @@ def is_gcp(broker): return True raise SkipComponent() - gcp_license_codes = simple_command("/usr/bin/curl -s curl -H Metadata-Flavor: Google http://metadata.google.internal/computeMetadata/v1/instance/licenses/?recursive=True --connect-timeout 5", deps=[is_gcp]) + gcp_license_codes = simple_command("/usr/bin/curl -s -H 'Metadata-Flavor: Google' http://metadata.google.internal/computeMetadata/v1/instance/licenses/?recursive=True --connect-timeout 5", deps=[is_gcp]) greenboot_status = simple_command("/usr/libexec/greenboot/greenboot-status") grub_conf = simple_file("/boot/grub/grub.conf") grub_config_perms = simple_command("/bin/ls -l /boot/grub2/grub.cfg") # only RHEL7 and updwards From 47d55e5c9cbd98175d2db7a6e7d5f83dce013885 Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Tue, 8 Jun 2021 15:03:14 -0500 Subject: [PATCH 444/892] Correcting rst syntax in contributing md file (#3096) Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- CONTRIBUTING.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4f0a52d0f..927e34ac6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -16,24 +16,24 @@ phase). ## Rule Development Setup -Clone the project:: +Clone the project: git clone git@github.com:RedHatInsights/insights-core.git -Or, alternatively, using HTTPS:: +Or, alternatively, using HTTPS: git clone https://github.com/RedHatInsights/insights-core.git -Initialize a virtualenv:: +Initialize a virtualenv: cd insights-core virtualenv . -Install the project and its dependencies:: +Install the project and its dependencies: bin/pip install -e . -Install a rule repository:: +Install a rule repository: bin/pip install -e path/to/rule/repo @@ -42,16 +42,16 @@ Install a rule repository:: If you wish to contribute to the insights-core project you'll need to create a fork in github. -1. Clone your fork:: +1. Clone your fork: git clone git@github.com:your-user/insights-core.git -2. Reference the original project as "upstream":: +2. Reference the original project as "upstream": git remote add upstream git@github.com:RedHatInsights/insights-core.git At this point, you would synchronize your fork with the upstream project -using the following commands:: +using the following commands: git pull upstream master git push origin master From 9a06ad99aedb0dc470ad4091349240f9e20f88f3 Mon Sep 17 00:00:00 2001 From: Stanislav Kontar Date: Tue, 8 Jun 2021 22:04:54 +0200 Subject: [PATCH 445/892] Fix exception thrown from Scheduler parser (#3094) When there is no active scheduler, the parser throws an exception. This change fixes the problem. Signed-off-by: Stanislav Kontar --- insights/parsers/scheduler.py | 6 ++++-- insights/parsers/tests/test_scheduler.py | 9 +++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/insights/parsers/scheduler.py b/insights/parsers/scheduler.py index 03f10ac9d..1dd6a45fe 100644 --- a/insights/parsers/scheduler.py +++ b/insights/parsers/scheduler.py @@ -48,6 +48,7 @@ def parse_content(self, content): self.device = None self.schedulers = [] self.active_scheduler = None + self.data = {} # Legacy value to keep backwards compatibility self.device = self.file_path.split('/')[3] for line in get_active_lines(content): @@ -57,5 +58,6 @@ def parse_content(self, content): self.schedulers = line.replace('[', '').replace(']', '').split() - # Legacy values - self.data = {self.device: '[' + self.active_scheduler + ']'} + # Set legacy values + if self.active_scheduler: + self.data = {self.device: '[' + self.active_scheduler + ']'} diff --git a/insights/parsers/tests/test_scheduler.py b/insights/parsers/tests/test_scheduler.py index e3f5c9f38..84fe5f51a 100644 --- a/insights/parsers/tests/test_scheduler.py +++ b/insights/parsers/tests/test_scheduler.py @@ -64,6 +64,15 @@ def test_schedulers_defaults(): assert r.active_scheduler == 'mq-deadline' +def test_none(): + r = scheduler.Scheduler(context_wrap('none', + '/sys/block/vda/queue/scheduler')) + assert r.device == 'vda' + assert r.schedulers == ['none'] + assert r.active_scheduler is None + assert r.data == {} + + def test_docs(): env = { 'scheduler_obj': scheduler.Scheduler(context_wrap(SDA_SCHEDULER, SDA_PATH)) From 092dc57f4578a0db445013d437d4f2e0f3623030 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 10 Jun 2021 06:04:30 +0800 Subject: [PATCH 446/892] Update parser "httpd_M" and "httpd_V" to return right binary path (#3092) * Also add args interface for rule testing in InputData and Context Signed-off-by: Huanhuan Li --- insights/core/context.py | 3 ++- insights/parsers/httpd_M.py | 8 +++----- insights/parsers/httpd_V.py | 8 +++----- insights/parsers/tests/test_httpd_M.py | 6 +++--- insights/parsers/tests/test_httpd_V.py | 4 ++-- insights/tests/__init__.py | 4 ++-- 6 files changed, 15 insertions(+), 18 deletions(-) diff --git a/insights/core/context.py b/insights/core/context.py index 47acac36b..f8e828019 100644 --- a/insights/core/context.py +++ b/insights/core/context.py @@ -103,7 +103,8 @@ def __init__(self, **kwargs): self.cmd = None optional_attrs = [ "content", "path", "hostname", "release", - "machine_id", "target", "last_client_run", "relative_path" + "machine_id", "target", "last_client_run", "relative_path", + "args" ] for k in optional_attrs: setattr(self, k, kwargs.pop(k, None)) diff --git a/insights/parsers/httpd_M.py b/insights/parsers/httpd_M.py index e2a7a6aba..78019fc17 100644 --- a/insights/parsers/httpd_M.py +++ b/insights/parsers/httpd_M.py @@ -85,9 +85,7 @@ def parse_content(self, content): @property def httpd_command(self): """ - str: The full path of a running httpd. An Empty string when nothing - is found. It's to identify which httpd binaries the instance run with. + Return the full binary path of a running httpd or None when nothing + is found. It's to identify which httpd binaries the instance run with. """ - # Typical `file_path` of HttpdM looks like: '/usr/sbin/httpd_-M' - # Remove the trailing '_-M' - return self.file_path[:-3] if self.file_path else '' + return self.args diff --git a/insights/parsers/httpd_V.py b/insights/parsers/httpd_V.py index dae5c7878..488c13f88 100644 --- a/insights/parsers/httpd_V.py +++ b/insights/parsers/httpd_V.py @@ -97,12 +97,10 @@ def parse_content(self, content): @property def httpd_command(self): """ - str: The full path of a running httpd. An Empty string when nothing - is found. To identify which httpd binaries the instance run with. + Return the full binary path of a running httpd or None when nothing + is found. It's to identify which httpd binaries the instance run with. """ - # Typical `file_path` of HttpdV looks like: '/usr/sbin/httpd_-V' - # Remove the trailing '_-V' - return self.file_path[:-3] if self.file_path else '' + return self.args @property def mpm(self): diff --git a/insights/parsers/tests/test_httpd_M.py b/insights/parsers/tests/test_httpd_M.py index 2682e9825..1b652d0d6 100644 --- a/insights/parsers/tests/test_httpd_M.py +++ b/insights/parsers/tests/test_httpd_M.py @@ -64,13 +64,13 @@ def test_httpd_M(): - result = HttpdM(context_wrap(HTTPD_M_RHEL6, path='/usr/test/httpd_-M')) + result = HttpdM(context_wrap(HTTPD_M_RHEL6, path='/usr/test/httpd_-M', args='/usr/test/httpd')) assert result.httpd_command == '/usr/test/httpd' assert sorted(result.loaded_modules) == sorted(result.shared_modules + result.static_modules) assert 'core_module' in result assert result['core_module'] == 'static' - result = HttpdM(context_wrap(HTTPD_M_RHEL7, path='/usr/tst/httpd_-M')) + result = HttpdM(context_wrap(HTTPD_M_RHEL7, path='/usr/tst/httpd_-M', args='/usr/tst/httpd')) assert result.httpd_command == '/usr/tst/httpd' assert sorted(result.loaded_modules) == sorted(result.shared_modules + result.static_modules) assert 'core_module' not in result @@ -89,7 +89,7 @@ def test_httpd_M_exp(): def test_httpd_M_doc(): env = { 'HttpdM': HttpdM, - 'hm': HttpdM(context_wrap(HTTPD_M_DOC, path='/usr/sbin/httpd_-M')) + 'hm': HttpdM(context_wrap(HTTPD_M_DOC, path='/usr/sbin/httpd_-M', args='/usr/sbin/httpd')) } failed, total = doctest.testmod(httpd_M, globs=env) assert failed == 0 diff --git a/insights/parsers/tests/test_httpd_V.py b/insights/parsers/tests/test_httpd_V.py index 66353d06e..123d3bc6c 100644 --- a/insights/parsers/tests/test_httpd_V.py +++ b/insights/parsers/tests/test_httpd_V.py @@ -81,7 +81,7 @@ def test_httpd_V(): - result = HttpdV(context_wrap(HTTPD_V_22, path='/usr/sbin/httpd_-V')) + result = HttpdV(context_wrap(HTTPD_V_22, path='/usr/sbin/httpd_-V', args='/usr/sbin/httpd')) assert result["Server MPM"] == "prefork" assert result["Server version"] == "apache/2.2.15 (unix)" assert result["forked"] == "yes (variable process count)" @@ -93,7 +93,7 @@ def test_httpd_V(): assert result.mpm == "prefork" assert result.version == "apache/2.2.15 (unix)" - result = HttpdV(context_wrap(HTTPD_V_24, path='/usr/sbin/httpd.worker_-V')) + result = HttpdV(context_wrap(HTTPD_V_24, path='/usr/sbin/httpd.worker_-V', args='/usr/sbin/httpd.worker')) assert result["Server MPM"] == "worker" assert result["Server version"] == "apache/2.4.6 (red hat enterprise linux)" assert result["forked"] == "yes (variable process count)" diff --git a/insights/tests/__init__.py b/insights/tests/__init__.py index 3ec27fa3c..ec4a106c6 100644 --- a/insights/tests/__init__.py +++ b/insights/tests/__init__.py @@ -225,7 +225,7 @@ def add_component(self, comp, obj): """ self.data[comp] = obj - def add(self, spec, content, path=None, do_filter=True): + def add(self, spec, content, path=None, do_filter=True, **kwargs): if not path: # path must change to allow parsers to fire path = self._make_path() if not path.startswith("/"): @@ -242,7 +242,7 @@ def add(self, spec, content, path=None, do_filter=True): if do_filter: content_iter = list(apply_filters(spec, content_iter)) - content_provider = context_wrap(content_iter, path=path, split=False) + content_provider = context_wrap(content_iter, path=path, split=False, **kwargs) if dr.get_delegate(spec).multi_output: if spec not in self.data: self.data[spec] = [] From 794498c1583ca1a2140e7f1d0422adbd5ff648e7 Mon Sep 17 00:00:00 2001 From: Rohan Arora Date: Thu, 10 Jun 2021 13:29:54 +0530 Subject: [PATCH 447/892] Host and datetime parsing for ForemanSSLAccessLog (#3098) Signed-off-by: Rohan Arora --- insights/parsers/foreman_log.py | 29 +++++++++++++++++++++- insights/parsers/tests/test_foreman_log.py | 11 +++++++- 2 files changed, 38 insertions(+), 2 deletions(-) diff --git a/insights/parsers/foreman_log.py b/insights/parsers/foreman_log.py index 3117735c9..f18a057de 100644 --- a/insights/parsers/foreman_log.py +++ b/insights/parsers/foreman_log.py @@ -25,6 +25,9 @@ SatelliteLog - file ``/var/log/foreman-installer/satellite.log`` ---------------------------------------------------------------- +ForemanSSLAccessLog - file ``/var/log/httpd/foreman-ssl_access_ssl.log`` +------------------------------------------------------------------------ + """ from datetime import datetime @@ -146,7 +149,7 @@ class CandlepinErrorLog(LogFileOutput): @parser(Specs.foreman_ssl_access_ssl_log) class ForemanSSLAccessLog(LogFileOutput): - """Class for parsing ``var/log/httpd/foreman-ssl_access_ssl.log`` file. + """Class for parsing ``/var/log/httpd/foreman-ssl_access_ssl.log`` file. Sample log contents:: @@ -157,9 +160,33 @@ class ForemanSSLAccessLog(LogFileOutput): 10.181.73.211 - rhcapkdc.example2.com [27/Mar/2017:13:34:52 -0400] "GET /rhsm/consumers/385e688f-43ad-41b2-9fc7-593942ddec78/compliance HTTP/1.1" 200 5527 10.181.73.211 - rhcapkdc.example2.com [27/Mar/2017:13:34:52 -0400] "GET /rhsm/consumers/4f8a39d0-38b6-4663-8b7e-03368be4d3ab HTTP/1.1" 200 10695 "-" "-" + Each line is parsed into a dictionary with the following keys: + + * **raw_message(str)** - complete log line + * **host(str)** - remote host's IP or hostname + * **timestamp(datetime)** - date and time of http request, time zone is ignored Examples: >>> foreman_ssl_acess_log.get('consumers/385e688f-43ad-41b2-9fc7-593942ddec78')[0]['raw_message'] '10.181.73.211 - rhcapkdc.example2.com [27/Mar/2017:13:34:52 -0400] "GET /rhsm/consumers/385e688f-43ad-41b2-9fc7-593942ddec78 HTTP/1.1" 200 10736 "-" "-"' + >>> foreman_ssl_acess_log.get('consumers/385e688f-43ad-41b2-9fc7-593942ddec78')[0]['host'] + '10.181.73.211' + >>> foreman_ssl_acess_log.get('consumers/385e688f-43ad-41b2-9fc7-593942ddec78')[0]['timestamp'] + datetime.datetime(2017, 3, 27, 13, 34, 52) """ + # Actual time format - '%d/%b/%Y:%H:%M:%S %z' but %z doesn't work for python 2 time_format = '%d/%b/%Y:%H:%M:%S' + + def _parse_line(self, line): + msg_info = {'raw_message': line} + # Split enough to get datetime and host + line_split = line.split(None, 4) + if len(line_split) == 5: + try: + msg_info['timestamp'] = datetime.strptime( + line_split[3].strip('['), self.time_format + ) + msg_info['host'] = line_split[0] + except ValueError: + pass + return msg_info diff --git a/insights/parsers/tests/test_foreman_log.py b/insights/parsers/tests/test_foreman_log.py index 9154c7a5d..889724da1 100644 --- a/insights/parsers/tests/test_foreman_log.py +++ b/insights/parsers/tests/test_foreman_log.py @@ -172,6 +172,11 @@ 10.181.73.211 - rhcapkdc.example2.com [27/Mar/2017:13:34:52 -0400] "GET /rhsm/consumers/385e688f-43ad-41b2-9fc7-593942ddec78/entitlements?exclude=certificates.key&exclude=certificates.cert HTTP/1.1" 200 9920 "-" "-" """.strip() +FOREMAN_SSL_ACCESS_SSL_LOG_WRONG = """ +"GET /rhsm/consumers/385e688f-43ad-41b2-9fc7-593942ddec78 HTTP/1.1" 200 +10.181.73.211 - rhcapkdc.example2.com [27/Mar/2017:13:34:52:0400] "GET /rhsm/status HTTP/1.1" 200 263 "-" "-" +""".strip() + def test_production_log(): fm_log = ProductionLog(context_wrap(PRODUCTION_LOG)) @@ -225,7 +230,11 @@ def test_foreman_ssl_access_ssl_log(): assert "385e688f-43ad-41b2-9fc7-593942ddec78" in foreman_ssl_access_log assert len(foreman_ssl_access_log.get("GET /rhsm/consumers")) == 5 assert len(foreman_ssl_access_log.get("385e688f-43ad-41b2-9fc7-593942ddec78")) == 3 - assert len(list(foreman_ssl_access_log.get_after(datetime(2017, 3, 27, 13, 34, 0)))) == 7 + assert foreman_ssl_access_log.get('/rhsm/consumers')[0].get('host') == '10.181.73.211' + assert foreman_ssl_access_log.get('/rhsm/consumers')[0].get('timestamp') == datetime(2017, 3, 27, 13, 34, 52) + + foreman_ssl_access_log = ForemanSSLAccessLog(context_wrap(FOREMAN_SSL_ACCESS_SSL_LOG_WRONG)) + assert len(foreman_ssl_access_log.get('GET')) == 2 def test_doc(): From fb56adb9cde2c2b83afbd7db47d48e45bf8753d5 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Tue, 15 Jun 2021 13:13:49 +0800 Subject: [PATCH 448/892] New parser for GoogleInstanceType (#2801) * New parser for GoogleInstanceType Signed-off-by: Xiangce Liu * fix flake8 errors Signed-off-by: Xiangce Liu * Update the spec as per is_gcp Signed-off-by: Xiangce Liu * Remove it from insights_archive.py Signed-off-by: Xiangce Liu * Rename to GCP* Signed-off-by: Xiangce Liu * fix doc title Signed-off-by: Xiangce Liu --- .../gcp_instance_type.rst | 3 + insights/parsers/gcp_instance_type.py | 70 ++++++++++++++++ .../parsers/tests/test_gcp_instance_type.py | 81 +++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 156 insertions(+) create mode 100644 docs/shared_parsers_catalog/gcp_instance_type.rst create mode 100644 insights/parsers/gcp_instance_type.py create mode 100644 insights/parsers/tests/test_gcp_instance_type.py diff --git a/docs/shared_parsers_catalog/gcp_instance_type.rst b/docs/shared_parsers_catalog/gcp_instance_type.rst new file mode 100644 index 000000000..5ed223c74 --- /dev/null +++ b/docs/shared_parsers_catalog/gcp_instance_type.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.gcp_instance_type + :members: + :show-inheritance: diff --git a/insights/parsers/gcp_instance_type.py b/insights/parsers/gcp_instance_type.py new file mode 100644 index 000000000..5189c101a --- /dev/null +++ b/insights/parsers/gcp_instance_type.py @@ -0,0 +1,70 @@ +""" +GCPInstanceType +=============== + +This parser simply reads the output of command +``curl http://metadata.google.internal/computeMetadata/v1/instance/machine-type -H 'Metadata-Flavor: Google'``, +which is used to check the machine type of the Google instance of the host. + +For more details, See: +- https://cloud.google.com/compute/docs/machine-types +- https://cloud.google.com/compute/docs/storing-retrieving-metadata#api_4 + +""" + +from insights import parser, CommandParser +from insights.specs import Specs +from insights.parsers import SkipException, ParseException + + +@parser(Specs.gcp_instance_type) +class GCPInstanceType(CommandParser): + """ + Class for parsing the GCP Instance type returned by command + ``curl -s -H 'Metadata-Flavor: Google' http://metadata.google.internal/computeMetadata/v1/instance/machine-type``, + + + Typical output of this command is:: + + projects/123456789/machineTypes/n2-highcpu-16 + + + Raises: + SkipException: When content is empty or no parse-able content. + ParseException: When type cannot be recognized. + + Attributes: + type (str): The type of VM instance in GCP, e.g: n2 + size (str): The size of VM instance in GCP, e.g: highcpu-16 + raw (str): The fully type string, e.g. 'n2-highcpu-16' + raw_line (str): The fully type string returned by the ``curl`` command + + Examples: + >>> gcp_inst.type + 'n2' + >>> gcp_inst.size + 'highcpu-16' + >>> gcp_inst.raw + 'n2-highcpu-16' + """ + + def parse_content(self, content): + if not content or 'curl: ' in content[0]: + raise SkipException() + + self.raw_line = self.raw = self.type = self.size = None + # Ignore any curl stats that may be present in data + for l in content: + l_strip = l.strip() + if ' ' not in l_strip and '-' in l_strip: + self.raw_line = l_strip + self.raw = l_strip.split('/')[-1] + type_sp = self.raw.split('-', 1) + self.type, self.size = type_sp[0], type_sp[1] + + if not self.type: + raise ParseException('Unrecognized type: "{0}"', content[0]) + + def __repr__(self): + return "".format( + t=self.type, s=self.size, r=self.raw_line) diff --git a/insights/parsers/tests/test_gcp_instance_type.py b/insights/parsers/tests/test_gcp_instance_type.py new file mode 100644 index 000000000..7f680558e --- /dev/null +++ b/insights/parsers/tests/test_gcp_instance_type.py @@ -0,0 +1,81 @@ +import pytest +import doctest +from insights.parsers import gcp_instance_type +from insights.parsers.gcp_instance_type import GCPInstanceType +from insights.tests import context_wrap +from insights.parsers import SkipException, ParseException + +GOOGLE_TYPE_1 = "projects/123456789/machineTypes/n2-highcpu-16" +GOOGLE_TYPE_2 = "projects/123456789/machineTypes/e2-medium" +GOOGLE_TYPE_3 = """ + % Total % Received % Xferd Average Speed Time Time Time Current + Dload Upload Total Spent Left Speed + 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0 + 100 1126 100 1126 0 0 1374k 0 --:--:-- --:--:-- --:--:-- 1099k +projects/123456789/machineTypes/e2-medium +""" +GOOGLE_TYPE_DOC = GOOGLE_TYPE_1 +GOOGLE_TYPE_AB_1 = """ +curl: (7) Failed to connect to 169.254.169.254 port 80: Connection timed out +""".strip() +GOOGLE_TYPE_AB_2 = """ +curl: (7) couldn't connect to host +""".strip() +GOOGLE_TYPE_AB_3 = """ +curl: (28) connect() timed out! +""".strip() +GOOGLE_TYPE_AB_4 = """ +.micro +""".strip() + + +def test_gcp_instance_type_ab_other(): + with pytest.raises(SkipException): + GCPInstanceType(context_wrap(GOOGLE_TYPE_AB_1)) + + with pytest.raises(SkipException): + GCPInstanceType(context_wrap(GOOGLE_TYPE_AB_2)) + + with pytest.raises(SkipException): + GCPInstanceType(context_wrap(GOOGLE_TYPE_AB_3)) + + with pytest.raises(ParseException) as pe: + GCPInstanceType(context_wrap(GOOGLE_TYPE_AB_4)) + assert 'Unrecognized type' in str(pe) + + +def test_gcp_instance_type_ab_empty(): + with pytest.raises(SkipException): + GCPInstanceType(context_wrap('')) + + +def test_gcp_instance_type(): + google = GCPInstanceType(context_wrap(GOOGLE_TYPE_1)) + assert google.type == "n2" + assert google.size == "highcpu-16" + assert google.raw == "n2-highcpu-16" + assert google.raw_line == GOOGLE_TYPE_1 + + google = GCPInstanceType(context_wrap(GOOGLE_TYPE_2)) + assert google.type == "e2" + assert google.size == "medium" + assert google.raw == "e2-medium" + assert google.raw_line == GOOGLE_TYPE_2 + assert "e2-medium" in str(google) + + +def test_gcp_instance_type_stats(): + google = GCPInstanceType(context_wrap(GOOGLE_TYPE_3)) + assert google.type == "e2" + assert google.size == "medium" + assert google.raw == "e2-medium" + assert google.raw_line == GOOGLE_TYPE_2 + assert "e2-medium" in str(google) + + +def test_doc_examples(): + env = { + 'gcp_inst': GCPInstanceType(context_wrap(GOOGLE_TYPE_DOC)) + } + failed, total = doctest.testmod(gcp_instance_type, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 4120f0f11..e99fa2352 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -187,6 +187,7 @@ class Specs(SpecSet): freeipa_healthcheck_log = RegistryPoint() fstab = RegistryPoint() galera_cnf = RegistryPoint() + gcp_instance_type = RegistryPoint() gcp_license_codes = RegistryPoint() getcert_list = RegistryPoint() getconf_page_size = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index f185068a6..4ab970ec3 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -433,6 +433,7 @@ def is_gcp(broker): return True raise SkipComponent() + gcp_instance_type = simple_command("/usr/bin/curl -s -H 'Metadata-Flavor: Google' http://metadata.google.internal/computeMetadata/v1/instance/machine-type --connect-timeout 5", deps=[is_gcp]) gcp_license_codes = simple_command("/usr/bin/curl -s -H 'Metadata-Flavor: Google' http://metadata.google.internal/computeMetadata/v1/instance/licenses/?recursive=True --connect-timeout 5", deps=[is_gcp]) greenboot_status = simple_command("/usr/libexec/greenboot/greenboot-status") grub_conf = simple_file("/boot/grub/grub.conf") From a7d2d8048f47142a7839e7cdb40ee4728eedc1ec Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Tue, 15 Jun 2021 17:10:54 +0800 Subject: [PATCH 449/892] Add sos_spec audit_conf (#3103) Signed-off-by: jiazhang --- insights/specs/sos_archive.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index 6e786769d..41421df15 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -11,6 +11,7 @@ class SosSpecs(Specs): alternatives_display_python = simple_file("sos_commands/alternatives/alternatives_--display_python") auditctl_status = simple_file("sos_commands/auditd/auditctl_-s") + auditd_conf = simple_file("/etc/audit/auditd.conf") autofs_conf = simple_file("/etc/autofs.conf") blkid = first_file(["sos_commands/block/blkid_-c_.dev.null", "sos_commands/filesys/blkid_-c_.dev.null"]) From 2dd0d12e0ccd9d29c85b2d8b21f89a38a62a229c Mon Sep 17 00:00:00 2001 From: Ryan Blakley <3789184+ryan-blakley@users.noreply.github.com> Date: Wed, 16 Jun 2021 12:33:54 -0400 Subject: [PATCH 450/892] Fix non aws systems from being detected as aws (#3100) * There are systems that end up with uuids that start with ec2 that aren't aws systems. I added an extra check on the asset tag so non aws systems aren't detected. * Added a new test for a non aws system with a uuid starting with ec2. * Fixes #2904 Signed-off-by: Ryan Blakley --- insights/combiners/cloud_provider.py | 4 +++- insights/combiners/tests/test_cloud_provider.py | 13 +++++++++++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/insights/combiners/cloud_provider.py b/insights/combiners/cloud_provider.py index 4bdd1bd57..e3947e139 100644 --- a/insights/combiners/cloud_provider.py +++ b/insights/combiners/cloud_provider.py @@ -235,10 +235,12 @@ def __init__(self, *args, **kwargs): self.rpm = 'rh-amazon-rhui-client' self.bios_vendor_version = 'amazon' self.uuid = 'ec2' + self.asset_tag = 'Amazon EC2' self.cp_bios_vendor = self._get_cp_bios_vendor(self.bios_vendor_version) self.cp_bios_version = self._get_cp_bios_version(self.bios_vendor_version) self.cp_rpms = self._get_rpm_cp_info(self.rpm) self.cp_uuid = self._get_cp_from_uuid(self.uuid) + self.cp_asset_tag = self._get_cp_from_asset_tag(self.asset_tag) class AzureCloudProvider(CloudProviderInstance): @@ -387,7 +389,7 @@ def _select_provider(self): if self._cp_objects[self.AZURE].cp_yum or self._cp_objects[self.AZURE].cp_asset_tag: return self.AZURE - if self._cp_objects[self.AWS].cp_uuid: + if self._cp_objects[self.AWS].cp_uuid and self._cp_objects[self.AWS].cp_asset_tag: return self.AWS if self._cp_objects[self.ALIBABA].cp_manufacturer: diff --git a/insights/combiners/tests/test_cloud_provider.py b/insights/combiners/tests/test_cloud_provider.py index de9e41848..b189a20c3 100644 --- a/insights/combiners/tests/test_cloud_provider.py +++ b/insights/combiners/tests/test_cloud_provider.py @@ -209,7 +209,7 @@ \tProduct Name: X9SCL/X9SCM \tVersion: 0123456789 \tSerial Number: 0123456789 -\tUUID: 12345678-1234-1234-1234-123456681234 +\tUUID: EC245678-1234-1234-1234-123456681234 \tWake-up Type: Power Switch \tSKU Number: To be filled by O.E.M. \tFamily: To be filled by O.E.M. @@ -313,7 +313,7 @@ \tLock: Not Present \tVersion: Not Specified \tSerial Number: Not Specified -\tAsset Tag: Not Specified +\tAsset Tag: Amazon EC2 \tBoot-up State: Safe \tPower Supply State: Safe \tThermal State: Safe @@ -666,6 +666,15 @@ def test__uuid(): assert ret.cp_uuid[CloudProvider.AWS] == 'EC2F58AF-2DAD-C57E-88C0-A81CB6084290' +def test__uuid_not_aws(): + irpms = IRPMS(context_wrap(RPMS)) + dmi = DMIDecode(context_wrap(DMIDECODE_BARE_METAL)) + yrl = YumRepoList(context_wrap(YUM_REPOLIST_NOT_AZURE)) + ret = CloudProvider(irpms, dmi, yrl, None) + assert ret.cloud_provider is None + assert ret.cp_uuid[CloudProvider.AWS] == 'EC245678-1234-1234-1234-123456681234' + + def test_dmidecode_alibaba(): irpms = IRPMS(context_wrap(RPMS)) dmi = DMIDecode(context_wrap(DMIDECODE_ALIBABA)) From 166edc840b2ed18b3e2479fd96cba3295f89e933 Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Wed, 16 Jun 2021 15:56:00 -0500 Subject: [PATCH 451/892] Update pull request template (#3106) * Update pull request template * Improve instructions * Convert instructions to comments * Simplify number of sections Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Further simplification/cleanup Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- .github/pull_request_template.md | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index a916bce57..8ff08196c 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,13 +1,20 @@ -**Please do not include links to any websites that are not publicly accessible. You may include non-link reference numbers to help you and your team identify non-public references.** - ### All Pull Requests: -* [ ] Have you followed the guidelines in our Contributing document? +Check all that apply: + +* [ ] Have you followed the guidelines in our Contributing document, including the instructions about commit messages? * [ ] Is this PR to correct an issue? * [ ] Is this PR an enhancement? -### Issue/Bug Fix: -Provide complete details of the issue and how this PR fixes the issue. You can link to one or more existing open, publicly-accessible issue(s) that provide details. +### Complete Description of Additions/Changes: + + +*Add your description here* From 4b2ddca329393e5201c808168243e67dd5e0fc31 Mon Sep 17 00:00:00 2001 From: Akshay Gaikwad Date: Thu, 17 Jun 2021 10:16:20 +0530 Subject: [PATCH 452/892] Add partitions to default specs entries (#3104) The specs partitions is already available for sos archive however it is missing from default. This commits add partition spec to default so that archive generated by insights would also gather /proc/partitions file. Since a new rule is developed to use partitons specs, hence it is removed from skipped_specs list. Signed-off-by: Akshay Gaikwad --- insights/specs/default.py | 1 + insights/tests/client/collection_rules/test_map_components.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 4ab970ec3..3abf3b9d7 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -707,6 +707,7 @@ def cmd_and_pkg(broker): package_provides_command = command_with_args("/usr/bin/echo '%s'", cmd_and_pkg) pacemaker_log = first_file(["/var/log/pacemaker.log", "/var/log/pacemaker/pacemaker.log"]) + partitions = simple_file("/proc/partitions") pci_rport_target_disk_paths = simple_command("/usr/bin/find /sys/devices/ -maxdepth 10 -mindepth 9 -name stat -type f") @datasource(Services, HostContext) diff --git a/insights/tests/client/collection_rules/test_map_components.py b/insights/tests/client/collection_rules/test_map_components.py index d3a26a0e6..d00025289 100644 --- a/insights/tests/client/collection_rules/test_map_components.py +++ b/insights/tests/client/collection_rules/test_map_components.py @@ -77,7 +77,6 @@ def test_get_component_by_symbolic_name(): 'freeipa_healthcheck_log', 'ironic_conf', 'octavia_conf', - 'partitions', 'rhn_entitlement_cert_xml', 'rhn_hibernate_conf', 'rhn_schema_version', From 63eb37076e05f57ddf73ccdc483c27d4f8a7055e Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 17 Jun 2021 16:13:20 +0800 Subject: [PATCH 453/892] New spec to collect running httpd and corresponding pkg (#3091) Signed-off-by: Huanhuan Li --- insights/specs/default.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 3abf3b9d7..4a8764989 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -74,9 +74,10 @@ def _get_running_commands(broker, commands): ctx = broker[HostContext] ret = set() - for cmd in set(p['COMMAND_NAME'] for p in ps_cmds): + for cmd in set(p['COMMAND'] for p in ps_cmds): try: - which = ctx.shell_out("/usr/bin/which {0}".format(cmd)) + cmd_prefix = cmd.split(None, 1)[0] + which = ctx.shell_out("/usr/bin/which {0}".format(cmd_prefix)) except Exception: continue ret.add(which[0]) if which else None @@ -697,7 +698,7 @@ def cmd_and_pkg(broker): Attributes: COMMANDS (list): List of the specified commands that need to check the provider package. """ - COMMANDS = ['java'] + COMMANDS = ['java', 'httpd'] pkg_cmd = list() for cmd in _get_running_commands(broker, COMMANDS): pkg_cmd.append("{0} {1}".format(cmd, _get_package(broker, cmd))) From c1fce368bfbe0435da4c12ae134e0008dc314c08 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 17 Jun 2021 11:36:00 -0400 Subject: [PATCH 454/892] fix: disallow unregister in offline mode (#3105) Signed-off-by: Jeremy Crafts --- insights/client/config.py | 2 ++ insights/tests/client/test_config.py | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/insights/client/config.py b/insights/client/config.py index 888d6dce7..147300afc 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -691,6 +691,8 @@ def _validate_options(self): raise ValueError('Cannot run connection test in offline mode.') if self.checkin: raise ValueError('Cannot check in in offline mode.') + if self.unregister: + raise ValueError('Cannot unregister in offline mode.') if self.output_dir and self.output_file: raise ValueError('Specify only one: --output-dir or --output-file.') if self.output_dir == '': diff --git a/insights/tests/client/test_config.py b/insights/tests/client/test_config.py index b09915bdc..c28ec6954 100644 --- a/insights/tests/client/test_config.py +++ b/insights/tests/client/test_config.py @@ -170,6 +170,12 @@ def test_offline_disables_options(): with pytest.raises(ValueError): InsightsConfig(status=True, offline=True) + with pytest.raises(ValueError): + InsightsConfig(checkin=True, offline=True) + + with pytest.raises(ValueError): + InsightsConfig(unregister=True, offline=True) + # empty argv so parse_args isn't polluted with pytest arguments @patch('insights.client.config.sys.argv', [sys.argv[0]]) From da9c862c8379be857b0b1d7bc95607a605a5b956 Mon Sep 17 00:00:00 2001 From: Stephen Adams Date: Thu, 17 Jun 2021 12:49:25 -0400 Subject: [PATCH 455/892] update uploader_json_map.json Signed-off-by: Stephen Adams --- insights/client/uploader_json_map.json | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index cdc4ee2af..8c00b2355 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -1092,6 +1092,7 @@ "COMMAND", "auditd", "avahi", + "catalina.base", "ceilometer-poll", "chronyd", "cinder-volume", @@ -1136,6 +1137,7 @@ "pkla-check-auth", "pmcd", "pmie", + "postmaster", "radosgw", "redis-server", "rngd", @@ -1160,6 +1162,7 @@ "COMMAND", "STAP/8.2", "auditd", + "catalina.base", "ceilometer-poll", "ceph-osd", "chronyd", @@ -1207,6 +1210,7 @@ "pkla-check-auth", "pmcd", "pmie", + "postmaster", "ptp4l", "radosgw", "redis-server", @@ -1287,6 +1291,7 @@ "pmcd", "pmie", "postgres", + "postmaster", "radosgw", "redis-server", "rngd", @@ -1308,7 +1313,9 @@ "/usr/bin/openshift start master", "/usr/bin/openshift start node", "CMD", + "COMMAND", "auditd", + "catalina.base", "ceilometer-poll", "chronyd", "cinder-volume", @@ -1355,6 +1362,7 @@ "pkla-check-auth", "pmcd", "pmie", + "postmaster", "radosgw", "redis-server", "rngd", @@ -4328,5 +4336,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-05-26T13:40:04.964795" + "version": "2021-06-10T14:24:36.700805" } \ No newline at end of file From 60606c335bee19862fbdec13e92b89bfc232a1ff Mon Sep 17 00:00:00 2001 From: Ryan Blakley <3789184+ryan-blakley@users.noreply.github.com> Date: Tue, 22 Jun 2021 15:50:34 -0400 Subject: [PATCH 456/892] Fix attribute error in AllModProbe combiner (#3108) * Added a check that the type of the entry is a list before attempting to append. * Added duplicate blacklist entries to the test. * Fixes #3107 Signed-off-by: Ryan Blakley --- insights/combiners/modprobe.py | 2 +- insights/combiners/tests/test_modprobe.py | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/insights/combiners/modprobe.py b/insights/combiners/modprobe.py index a3fb596e4..97c6c6e53 100644 --- a/insights/combiners/modprobe.py +++ b/insights/combiners/modprobe.py @@ -82,7 +82,7 @@ def __init__(self, modprobe): if section not in self.data: self.data[section] = {} for name, value in sectdict.items(): - if name in self.data[section]: + if name in self.data[section] and type(self.data[section][name][0]) == list: # append to this module's value - should only # happen for aliases. self.data[section][name][0].append(value) diff --git a/insights/combiners/tests/test_modprobe.py b/insights/combiners/tests/test_modprobe.py index e15f0bd8e..ba12e03bb 100644 --- a/insights/combiners/tests/test_modprobe.py +++ b/insights/combiners/tests/test_modprobe.py @@ -5,6 +5,9 @@ MOD_OPTION_INFO = """ options ipv6 disable=1 install ipv6 /bin/true + +# Test duplicate entry. +blacklist vfat """ MOD_OPTION_INFO_PATH = "/etc/modprobe.d/ipv6.conf" @@ -38,6 +41,9 @@ alias alias scsi_hostadapter2 ata_piix failed comment balclkist ieee80211 + +# Test duplicate entry. +blacklist vfat """ MOD_COMPLETE_PATH = "/etc/modprobe.conf" @@ -59,6 +65,7 @@ def test_all_modprobe(): assert 'blacklist' in all_data assert all_data['blacklist'] == { 'i8xx_tco': ModProbeValue(value=True, source=MOD_COMPLETE_PATH), + 'vfat': ModProbeValue(value=True, source=MOD_OPTION_INFO_PATH), } assert 'install' in all_data From 3d67e938ae584f451085e70d1ef5d7b883e1f2bc Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Tue, 22 Jun 2021 14:52:43 -0500 Subject: [PATCH 457/892] Move cloud_cfg datasource into separate module (#3110) * Move this datasource out of default.py into its own module * Create tests for this datasource * Update documentation for all custom datasources Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- docs/api_index.rst | 8 ++ docs/custom_datasources_index.rst | 12 +++ docs/index.rst | 1 + insights/collect.py | 4 + insights/specs/datasources/__init__.py | 0 insights/specs/datasources/cloud_init.py | 94 +++++++++++++++++++ insights/specs/default.py | 69 +------------- insights/tests/datasources/__init__.py | 0 insights/tests/datasources/test_cloud_init.py | 83 ++++++++++++++++ insights/util/specs_catalog.py | 1 + 10 files changed, 205 insertions(+), 67 deletions(-) create mode 100644 docs/custom_datasources_index.rst create mode 100644 insights/specs/datasources/__init__.py create mode 100644 insights/specs/datasources/cloud_init.py create mode 100644 insights/tests/datasources/__init__.py create mode 100644 insights/tests/datasources/test_cloud_init.py diff --git a/docs/api_index.rst b/docs/api_index.rst index 61187c18a..ce4d689fa 100644 --- a/docs/api_index.rst +++ b/docs/api_index.rst @@ -193,3 +193,11 @@ insights.util :members: :show-inheritance: :undoc-members: + +insights +-------- + +.. automodule:: insights.collect + :members: default_manifest, collect + :show-inheritance: + :undoc-members: \ No newline at end of file diff --git a/docs/custom_datasources_index.rst b/docs/custom_datasources_index.rst new file mode 100644 index 000000000..d0f215753 --- /dev/null +++ b/docs/custom_datasources_index.rst @@ -0,0 +1,12 @@ +.. _custom-datasources: + +Custom Datasources Catalog +========================== + +insights.specs.datasources.cloud_init +------------------------------------- + +.. automodule:: insights.specs.datasources.cloud_init + :members: cloud_cfg, LocalSpecs + :show-inheritance: + :undoc-members: \ No newline at end of file diff --git a/docs/index.rst b/docs/index.rst index aac7a31ff..613380abb 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -15,6 +15,7 @@ Contents: parsers_index combiners_index components_index + custom_datasources_index ocp shell docs_guidelines diff --git a/insights/collect.py b/insights/collect.py index e69d69e50..928abca36 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -78,6 +78,7 @@ # packages and modules to load packages: - insights.specs.default + - insights.specs.datasources # configuration of loaded components. names are prefixes, so any component with # a fully qualified name that starts with a key will get the associated @@ -85,6 +86,9 @@ # datasources. Can specify metadata, which must be a dictionary and will be # merged with the components' default metadata. configs: + - name: insights.specs.datasources + enabled: true + - name: insights.specs.Specs enabled: true diff --git a/insights/specs/datasources/__init__.py b/insights/specs/datasources/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/insights/specs/datasources/cloud_init.py b/insights/specs/datasources/cloud_init.py new file mode 100644 index 000000000..7065e7abe --- /dev/null +++ b/insights/specs/datasources/cloud_init.py @@ -0,0 +1,94 @@ +""" +Custom datasources for cloud initialization information +""" +import json +import yaml + +from insights.core.context import HostContext +from insights.core.dr import SkipComponent +from insights.core.plugins import datasource +from insights.core.spec_factory import DatasourceProvider, simple_file +from insights.specs import Specs + + +class LocalSpecs(Specs): + """ Local specs used only by cloud_init datasources """ + + cloud_cfg_input = simple_file("/etc/cloud/cloud.cfg") + """ Returns the contents of the file ``/etc/cloud/cloud.cfg`` """ + + +@datasource(LocalSpecs.cloud_cfg_input, HostContext) +def cloud_cfg(broker): + """ + This datasource provides the network configuration information collected + from ``/etc/cloud/cloud.cfg``. + + Typical content of ``/etc/cloud/cloud.cfg`` file is:: + + #cloud-config + users: + - name: demo + ssh-authorized-keys: + - key_one + - key_two + passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/ + + network: + version: 1 + config: + - type: physical + name: eth0 + subnets: + - type: dhcp + - type: dhcp6 + + system_info: + default_user: + name: user2 + plain_text_passwd: 'someP@assword' + home: /home/user2 + + debug: + output: /var/log/cloud-init-debug.log + verbose: true + + Note: + This datasource may be executed using the following command: + + ``insights cat --no-header cloud_cfg`` + + Sample data returned includes only the ``network`` portion of the input file in JSON format:: + + { + "version": 1, + "config": [ + { + "type": "physical", + "name": "eth0", + "subnets": [ + {"type": "dhcp"}, + {"type": "dhcp6"} + ] + } + ] + } + + Returns: + str: JSON string when the ``network`` parameter includes content, else `None` is returned. + + Raises: + SkipComponent: When the path does not exist or any exception occurs. + """ + relative_path = '/etc/cloud/cloud.cfg' + try: + content = broker[LocalSpecs.cloud_cfg_input].content + if content: + content = yaml.load('\n'.join(content), Loader=yaml.SafeLoader) + network_config = content.get('network', None) + if network_config: + return DatasourceProvider(content=json.dumps(network_config), relative_path=relative_path) + except Exception as e: + raise SkipComponent("Unexpected exception:{e}".format(e=str(e))) + + raise SkipComponent('No network section in yaml') diff --git a/insights/specs/default.py b/insights/specs/default.py index 4a8764989..de13196ac 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -11,15 +11,12 @@ import logging import os import re -import json import signal from grp import getgrgid from os import stat from pwd import getpwuid -import yaml - from insights.core.context import HostContext from insights.core.dr import SkipComponent from insights.core.plugins import datasource @@ -38,6 +35,7 @@ from insights.combiners.satellite_version import SatelliteVersion, CapsuleVersion from insights.parsers.mount import Mount from insights.specs import Specs +from insights.specs.datasources import cloud_init import datetime @@ -231,70 +229,7 @@ def is_ceph_monitor(broker): cinder_api_log = first_file(["/var/log/containers/cinder/cinder-api.log", "/var/log/cinder/cinder-api.log"]) cinder_conf = first_file(["/var/lib/config-data/puppet-generated/cinder/etc/cinder/cinder.conf", "/etc/cinder/cinder.conf"]) cinder_volume_log = first_file(["/var/log/containers/cinder/volume.log", "/var/log/containers/cinder/cinder-volume.log", "/var/log/cinder/volume.log"]) - cloud_cfg_input = simple_file("/etc/cloud/cloud.cfg") - - @datasource(cloud_cfg_input, HostContext) - def cloud_cfg(broker): - """This datasource provides the network configuration collected - from ``/etc/cloud/cloud.cfg``. - - Typical content of ``/etc/cloud/cloud.cfg`` file is:: - - #cloud-config - users: - - name: demo - ssh-authorized-keys: - - key_one - - key_two - passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/ - - network: - version: 1 - config: - - type: physical - name: eth0 - subnets: - - type: dhcp - - type: dhcp6 - - system_info: - default_user: - name: user2 - plain_text_passwd: 'someP@assword' - home: /home/user2 - - debug: - output: /var/log/cloud-init-debug.log - verbose: true - - Note: - This datasource may be executed using the following command: - - ``insights-cat --no-header cloud_cfg`` - - Example: - - ``{"version": 1, "config": [{"type": "physical", "name": "eth0", "subnets": [{"type": "dhcp"}, {"type": "dhcp6"}]}]}`` - - Returns: - str: JSON string when the ``network`` parameter is configure, else nothing is returned. - - Raises: - SkipComponent: When the path does not exist or any exception occurs. - """ - relative_path = '/etc/cloud/cloud.cfg' - try: - content = broker[DefaultSpecs.cloud_cfg_input].content - if content: - content = yaml.load('\n'.join(content), Loader=yaml.SafeLoader) - network_config = content.get('network', None) - if network_config: - return DatasourceProvider(content=json.dumps(network_config), relative_path=relative_path) - except Exception as e: - raise SkipComponent("Unexpected exception:{e}".format(e=str(e))) - - raise SkipComponent() - + cloud_cfg = cloud_init.cloud_cfg cloud_init_custom_network = simple_file("/etc/cloud/cloud.cfg.d/99-custom-networking.cfg") cloud_init_log = simple_file("/var/log/cloud-init.log") cluster_conf = simple_file("/etc/cluster/cluster.conf") diff --git a/insights/tests/datasources/__init__.py b/insights/tests/datasources/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/insights/tests/datasources/test_cloud_init.py b/insights/tests/datasources/test_cloud_init.py new file mode 100644 index 000000000..3e02467b2 --- /dev/null +++ b/insights/tests/datasources/test_cloud_init.py @@ -0,0 +1,83 @@ +import json +import pytest + +from insights.core.dr import SkipComponent +from insights.core.spec_factory import DatasourceProvider, simple_file +from insights.specs.datasources.cloud_init import cloud_cfg, LocalSpecs + +CLOUD_CFG = """ +users: + - name: demo + ssh-authorized-keys: + - key_one + - key_two + +network: + version: 1 + config: + - type: physical + name: eth0 + subnets: + - type: dhcp + - type: dhcp6 +""".strip() + +CLOUD_CFG_NO_NETWORK = """ +users: + - name: demo + ssh-authorized-keys: + - key_one + - key_two +""".strip() + +CLOUD_CFG_BAD = """ +users + name -demo + ssh-authorized-keys + - key_one + - key_two +""".strip() + + +CLOUD_CFG_JSON = { + 'version': 1, + 'config': [ + { + 'type': 'physical', + 'name': 'eth0', + 'subnets': [ + {'type': 'dhcp'}, + {'type': 'dhcp6'} + ] + } + ] +} + +RELATIVE_PATH = '/etc/cloud/cloud.cfg' + + +def test_cloud_cfg(): + simple_file.content = CLOUD_CFG.splitlines() + broker = {LocalSpecs.cloud_cfg_input: simple_file} + result = cloud_cfg(broker) + assert result is not None + assert isinstance(result, DatasourceProvider) + expected = DatasourceProvider(content=json.dumps(CLOUD_CFG_JSON), relative_path=RELATIVE_PATH) + assert result.content == expected.content + assert result.relative_path == expected.relative_path + + +def test_cloud_cfg_bad(): + simple_file.content = CLOUD_CFG_BAD.splitlines() + broker = {LocalSpecs.cloud_cfg_input: simple_file} + with pytest.raises(SkipComponent) as e: + cloud_cfg(broker) + assert 'Unexpected exception' in str(e) + + +def test_cloud_cfg_no_network(): + simple_file.content = CLOUD_CFG_NO_NETWORK.splitlines() + broker = {LocalSpecs.cloud_cfg_input: simple_file} + with pytest.raises(SkipComponent) as e: + cloud_cfg(broker) + assert 'No network section in yaml' in str(e) diff --git a/insights/util/specs_catalog.py b/insights/util/specs_catalog.py index 9c9dc4ae4..ebbec88d1 100644 --- a/insights/util/specs_catalog.py +++ b/insights/util/specs_catalog.py @@ -51,6 +51,7 @@ Some datasources are implemented as functions and each links to the details provided in the function specific documentation. Generally functions are used as a ``provider`` to other datasources to, for instance, get a list of running processes of a particular program. +Functions implemented as custom datasources can be found in :ref:`custom-datasources`. Python code that implements these datasources is located in the module :py:mod:`insights.specs.default`. The datasources each have a unique name in the From d5e480f40506c5e74bfd356b219d489dc1a26f9a Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Tue, 22 Jun 2021 20:59:00 -0500 Subject: [PATCH 458/892] Remove futures module from setup.py (#3114) * Futures is included in python3 but a separate module in python2 * Installing the python2 module when using python3 causes problems * Collect.py is the only place that futures was used, and it was only loaded if the client collection was run in parallel. This is not feature we currently use. * The parallel collection feature will still work with python3, but not with python2 Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- setup.py | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.py b/setup.py index 3f99b0417..22227bd56 100644 --- a/setup.py +++ b/setup.py @@ -61,7 +61,6 @@ def maybe_require(pkg): ]) develop = set([ - 'futures==3.0.5', 'wheel', ]) From 10e5d32535dae1eed4c6daf089f933995319941d Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Wed, 23 Jun 2021 10:01:52 +0800 Subject: [PATCH 459/892] Add parser ansible_tower_custom (#3102) * Add parser ansible_tower_custom Signed-off-by: jiazhang * Update test Signed-off-by: jiazhang * Update to use filter Signed-off-by: jiazhang * Update docstring Signed-off-by: jiazhang * Update content Signed-off-by: jiazhang * Update filter Signed-off-by: jiazhang * Update to use dict Signed-off-by: jiazhang * Update to glob_file Signed-off-by: jiazhang * Rename doc file Signed-off-by: jiazhang --- .../ansible_tower_settings.rst | 3 ++ insights/parsers/ansible_tower_settings.py | 45 +++++++++++++++++++ .../tests/test_ansible_tower_settings.py | 38 ++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 88 insertions(+) create mode 100644 docs/shared_parsers_catalog/ansible_tower_settings.rst create mode 100644 insights/parsers/ansible_tower_settings.py create mode 100644 insights/parsers/tests/test_ansible_tower_settings.py diff --git a/docs/shared_parsers_catalog/ansible_tower_settings.rst b/docs/shared_parsers_catalog/ansible_tower_settings.rst new file mode 100644 index 000000000..11b514d42 --- /dev/null +++ b/docs/shared_parsers_catalog/ansible_tower_settings.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ansible_tower_settings + :members: + :show-inheritance: diff --git a/insights/parsers/ansible_tower_settings.py b/insights/parsers/ansible_tower_settings.py new file mode 100644 index 000000000..f7c659822 --- /dev/null +++ b/insights/parsers/ansible_tower_settings.py @@ -0,0 +1,45 @@ +""" +AnsibleTowerSettings - file ``/etc/tower/conf.d/*.py`` and ``/etc/tower/settings.py`` +===================================================================================== +The AnsibleTowerSettings class parses the file ``/etc/tower/conf.d/*.py`` and +``/etc/tower/settings.py``. +""" +from insights import parser, get_active_lines, Parser +from insights.specs import Specs +from insights.parsers import SkipException + + +@parser(Specs.ansible_tower_settings) +class AnsibleTowerSettings(Parser, dict): + """ + Class for content of ansible tower config file ``/etc/tower/conf.d/*.py`` and ``/etc/tower/settings.py``. + + Sample ``/etc/tower/conf.d/*.py`` file:: + + AWX_CLEANUP_PATHS = False + + Attributes: + data (dict): A dict of "key=value" from configuration file + + Raises: + SkipException: the file is empty or there is no valid content + + Examples:: + >>> type(conf) + + >>> conf['AWX_CLEANUP_PATHS'] + 'False' + """ + + def parse_content(self, content): + """Parse content of of ansible tower config file ``/etc/tower/conf.d/*.py`` and ``/etc/tower/settings.py``""" + if not content: + raise SkipException("No Valid Configuration") + data = {} + for line in get_active_lines(content): + if "=" in line: + key, value = line.split("=") + data[key.strip()] = value.strip() + if not data: + raise SkipException("No Valid Configuration") + self.update(data) diff --git a/insights/parsers/tests/test_ansible_tower_settings.py b/insights/parsers/tests/test_ansible_tower_settings.py new file mode 100644 index 000000000..d0724f179 --- /dev/null +++ b/insights/parsers/tests/test_ansible_tower_settings.py @@ -0,0 +1,38 @@ +import doctest +import pytest +from insights.parsers import ansible_tower_settings, SkipException +from insights.tests import context_wrap + + +ANSIBLE_TOWER_CONFIG_CUSTOM = ''' +AWX_CLEANUP_PATHS = False +LOGGING['handlers']['tower_warnings']['level'] = 'DEBUG' +'''.strip() + +ANSIBLE_TOWER_CONFIG_CUSTOM_INVALID1 = ''' +'''.strip() + +ANSIBLE_TOWER_CONFIG_CUSTOM_INVALID2 = ''' +AWX_CLEANUP_PATHS +'''.strip() + + +def test_ansible_tower_settings(): + conf = ansible_tower_settings.AnsibleTowerSettings(context_wrap(ANSIBLE_TOWER_CONFIG_CUSTOM)) + assert conf['AWX_CLEANUP_PATHS'] == 'False' + + with pytest.raises(SkipException) as exc: + ansible_tower_settings.AnsibleTowerSettings(context_wrap(ANSIBLE_TOWER_CONFIG_CUSTOM_INVALID1)) + assert 'No Valid Configuration' in str(exc) + + with pytest.raises(SkipException) as exc: + ansible_tower_settings.AnsibleTowerSettings(context_wrap(ANSIBLE_TOWER_CONFIG_CUSTOM_INVALID2)) + assert 'No Valid Configuration' in str(exc) + + +def test_ansible_tower_settings_documentation(): + failed_count, tests = doctest.testmod( + ansible_tower_settings, + globs={'conf': ansible_tower_settings.AnsibleTowerSettings(context_wrap(ANSIBLE_TOWER_CONFIG_CUSTOM))} + ) + assert failed_count == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index e99fa2352..990810365 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -19,6 +19,7 @@ class Specs(SpecSet): abrt_ccpp_conf = RegistryPoint(filterable=True) abrt_status_bare = RegistryPoint() alternatives_display_python = RegistryPoint() + ansible_tower_settings = RegistryPoint(filterable=True) amq_broker = RegistryPoint(multi_output=True) ansible_host = RegistryPoint() auditctl_status = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index de13196ac..68993f0d6 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -132,6 +132,7 @@ class DefaultSpecs(Specs): abrt_status_bare = simple_command("/usr/bin/abrt status --bare=True") alternatives_display_python = simple_command("/usr/sbin/alternatives --display python") amq_broker = glob_file("/var/opt/amq-broker/*/etc/broker.xml") + ansible_tower_settings = glob_file(["/etc/tower/settings.py", "/etc/tower/conf.d/*.py"]) auditctl_status = simple_command("/sbin/auditctl -s") auditd_conf = simple_file("/etc/audit/auditd.conf") audit_log = simple_file("/var/log/audit/audit.log") From b49c941b6e553b7ae5b43707c52591a99dd95f82 Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Wed, 23 Jun 2021 14:39:37 -0500 Subject: [PATCH 460/892] Fix spec that will collect multiple data items (#3116) --- insights/specs/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 990810365..2e64530ba 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -19,7 +19,7 @@ class Specs(SpecSet): abrt_ccpp_conf = RegistryPoint(filterable=True) abrt_status_bare = RegistryPoint() alternatives_display_python = RegistryPoint() - ansible_tower_settings = RegistryPoint(filterable=True) + ansible_tower_settings = RegistryPoint(filterable=True, multi_output=True) amq_broker = RegistryPoint(multi_output=True) ansible_host = RegistryPoint() auditctl_status = RegistryPoint() From c256856b140349cce718bf96a5efd203cf703b95 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 24 Jun 2021 21:20:05 +0800 Subject: [PATCH 461/892] Revert "Add parser ansible_tower_custom (#3102)" (#3117) This reverts commit 10e5d32535dae1eed4c6daf089f933995319941d b49c941b6e553b7ae5b43707c52591a99dd95f82 --- .../ansible_tower_settings.rst | 3 -- insights/parsers/ansible_tower_settings.py | 45 ------------------- .../tests/test_ansible_tower_settings.py | 38 ---------------- insights/specs/__init__.py | 1 - insights/specs/default.py | 1 - 5 files changed, 88 deletions(-) delete mode 100644 docs/shared_parsers_catalog/ansible_tower_settings.rst delete mode 100644 insights/parsers/ansible_tower_settings.py delete mode 100644 insights/parsers/tests/test_ansible_tower_settings.py diff --git a/docs/shared_parsers_catalog/ansible_tower_settings.rst b/docs/shared_parsers_catalog/ansible_tower_settings.rst deleted file mode 100644 index 11b514d42..000000000 --- a/docs/shared_parsers_catalog/ansible_tower_settings.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.parsers.ansible_tower_settings - :members: - :show-inheritance: diff --git a/insights/parsers/ansible_tower_settings.py b/insights/parsers/ansible_tower_settings.py deleted file mode 100644 index f7c659822..000000000 --- a/insights/parsers/ansible_tower_settings.py +++ /dev/null @@ -1,45 +0,0 @@ -""" -AnsibleTowerSettings - file ``/etc/tower/conf.d/*.py`` and ``/etc/tower/settings.py`` -===================================================================================== -The AnsibleTowerSettings class parses the file ``/etc/tower/conf.d/*.py`` and -``/etc/tower/settings.py``. -""" -from insights import parser, get_active_lines, Parser -from insights.specs import Specs -from insights.parsers import SkipException - - -@parser(Specs.ansible_tower_settings) -class AnsibleTowerSettings(Parser, dict): - """ - Class for content of ansible tower config file ``/etc/tower/conf.d/*.py`` and ``/etc/tower/settings.py``. - - Sample ``/etc/tower/conf.d/*.py`` file:: - - AWX_CLEANUP_PATHS = False - - Attributes: - data (dict): A dict of "key=value" from configuration file - - Raises: - SkipException: the file is empty or there is no valid content - - Examples:: - >>> type(conf) - - >>> conf['AWX_CLEANUP_PATHS'] - 'False' - """ - - def parse_content(self, content): - """Parse content of of ansible tower config file ``/etc/tower/conf.d/*.py`` and ``/etc/tower/settings.py``""" - if not content: - raise SkipException("No Valid Configuration") - data = {} - for line in get_active_lines(content): - if "=" in line: - key, value = line.split("=") - data[key.strip()] = value.strip() - if not data: - raise SkipException("No Valid Configuration") - self.update(data) diff --git a/insights/parsers/tests/test_ansible_tower_settings.py b/insights/parsers/tests/test_ansible_tower_settings.py deleted file mode 100644 index d0724f179..000000000 --- a/insights/parsers/tests/test_ansible_tower_settings.py +++ /dev/null @@ -1,38 +0,0 @@ -import doctest -import pytest -from insights.parsers import ansible_tower_settings, SkipException -from insights.tests import context_wrap - - -ANSIBLE_TOWER_CONFIG_CUSTOM = ''' -AWX_CLEANUP_PATHS = False -LOGGING['handlers']['tower_warnings']['level'] = 'DEBUG' -'''.strip() - -ANSIBLE_TOWER_CONFIG_CUSTOM_INVALID1 = ''' -'''.strip() - -ANSIBLE_TOWER_CONFIG_CUSTOM_INVALID2 = ''' -AWX_CLEANUP_PATHS -'''.strip() - - -def test_ansible_tower_settings(): - conf = ansible_tower_settings.AnsibleTowerSettings(context_wrap(ANSIBLE_TOWER_CONFIG_CUSTOM)) - assert conf['AWX_CLEANUP_PATHS'] == 'False' - - with pytest.raises(SkipException) as exc: - ansible_tower_settings.AnsibleTowerSettings(context_wrap(ANSIBLE_TOWER_CONFIG_CUSTOM_INVALID1)) - assert 'No Valid Configuration' in str(exc) - - with pytest.raises(SkipException) as exc: - ansible_tower_settings.AnsibleTowerSettings(context_wrap(ANSIBLE_TOWER_CONFIG_CUSTOM_INVALID2)) - assert 'No Valid Configuration' in str(exc) - - -def test_ansible_tower_settings_documentation(): - failed_count, tests = doctest.testmod( - ansible_tower_settings, - globs={'conf': ansible_tower_settings.AnsibleTowerSettings(context_wrap(ANSIBLE_TOWER_CONFIG_CUSTOM))} - ) - assert failed_count == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 2e64530ba..e99fa2352 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -19,7 +19,6 @@ class Specs(SpecSet): abrt_ccpp_conf = RegistryPoint(filterable=True) abrt_status_bare = RegistryPoint() alternatives_display_python = RegistryPoint() - ansible_tower_settings = RegistryPoint(filterable=True, multi_output=True) amq_broker = RegistryPoint(multi_output=True) ansible_host = RegistryPoint() auditctl_status = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 68993f0d6..de13196ac 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -132,7 +132,6 @@ class DefaultSpecs(Specs): abrt_status_bare = simple_command("/usr/bin/abrt status --bare=True") alternatives_display_python = simple_command("/usr/sbin/alternatives --display python") amq_broker = glob_file("/var/opt/amq-broker/*/etc/broker.xml") - ansible_tower_settings = glob_file(["/etc/tower/settings.py", "/etc/tower/conf.d/*.py"]) auditctl_status = simple_command("/sbin/auditctl -s") auditd_conf = simple_file("/etc/audit/auditd.conf") audit_log = simple_file("/var/log/audit/audit.log") From e29add328d24dd451d5100d6501e6c0d718471c3 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 24 Jun 2021 10:40:58 -0400 Subject: [PATCH 462/892] make copy of options when loading defaults into argparse (#3111) * make copy of options when loading defaults into argparse Signed-off-by: Jeremy Crafts --- insights/client/config.py | 5 ++++- insights/tests/client/test_config.py | 16 ++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/insights/client/config.py b/insights/client/config.py index 147300afc..c0c140229 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -573,7 +573,10 @@ def _load_command_line(self, conf_only=False): } cli_options = dict((k, v) for k, v in DEFAULT_OPTS.items() if ( 'opt' in v)) - for _, o in cli_options.items(): + for _, _o in cli_options.items(): + # cli_options contains references to DEFAULT_OPTS, so + # make a copy so we don't mutate DEFAULT_OPTS + o = copy.copy(_o) group_name = o.pop('group', None) if group_name is None: group = parser diff --git a/insights/tests/client/test_config.py b/insights/tests/client/test_config.py index c28ec6954..ef26c54b4 100644 --- a/insights/tests/client/test_config.py +++ b/insights/tests/client/test_config.py @@ -302,3 +302,19 @@ def test_core_collect_default(get_version_info): assert _core_collect_default() conf = InsightsConfig() assert conf.core_collect + + +@patch('insights.client.config.sys.argv', [sys.argv[0], "--status"]) +def test_command_line_parse_twice(): + ''' + Verify that running _load_command_line() twice does not + raise an argparse error. + + Previously would raise a SystemExit due to argparse not + being loaded with the correct options. + ''' + c = InsightsConfig() + c._load_command_line() + assert c.status + c._load_command_line() + assert c.status From 94dee664a2a7cbc17d59566234f954871d60bc51 Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Thu, 24 Jun 2021 10:55:14 -0400 Subject: [PATCH 463/892] update uploader_json_map.json Signed-off-by: Link Dupont --- insights/client/uploader_json_map.json | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index 8c00b2355..0c1d164b8 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -2308,7 +2308,6 @@ "AH00485: scoreboard is full, not at MaxRequestWorkers", "ModSecurity: collections_remove_stale: Failed deleting collection", "Require ServerLimit > 0, setting to 1", - "Resource temporarily unavailable", "The mpm module (prefork.c) is not supported by mod_http2", "[crit] Memory allocation failed, aborting process", "and would exceed the ServerLimit value of ", @@ -4336,5 +4335,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-06-10T14:24:36.700805" + "version": "2021-06-17T12:53:13.800630" } \ No newline at end of file From 2891674d8e3defbad75164901689309a082a8729 Mon Sep 17 00:00:00 2001 From: Viliam Krizan Date: Thu, 24 Jun 2021 21:06:45 +0200 Subject: [PATCH 464/892] Compliance: Improve tmp handling (#3101) Reuse archive's temp dir for the oscap results. Use tempfile for tailoring file. Signed-off-by: Andrew Kofink Co-authored-by: Jeremy Crafts --- insights/client/apps/compliance/__init__.py | 25 +++++++++++++++---- insights/tests/client/apps/test_compliance.py | 6 ++--- 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/insights/client/apps/compliance/__init__.py b/insights/client/apps/compliance/__init__.py index 34b4a7c5f..812edfd54 100644 --- a/insights/client/apps/compliance/__init__.py +++ b/insights/client/apps/compliance/__init__.py @@ -6,8 +6,10 @@ from logging import getLogger from re import findall from sys import exit +import tempfile from insights.util.subproc import call import os +import os.path import six NONCOMPLIANT_STATUS = 2 @@ -32,13 +34,18 @@ def oscap_scan(self): if not profiles: logger.error("System is not associated with any profiles. Assign profiles using the Compliance web UI.\n") exit(constants.sig_kill_bad) + + archive_dir = self.archive.create_archive_dir() for profile in profiles: + tailoring_file = self.download_tailoring_file(profile) self.run_scan( profile['attributes']['ref_id'], self.find_scap_policy(profile['attributes']['ref_id']), - '/var/tmp/oscap_results-{0}.xml'.format(profile['attributes']['ref_id']), - tailoring_file_path=self.download_tailoring_file(profile) + self._results_file(archive_dir, profile), + tailoring_file_path=tailoring_file ) + if tailoring_file: + os.remove(tailoring_file) return self.archive.create_tar_file(), COMPLIANCE_CONTENT_TYPE @@ -51,7 +58,11 @@ def download_tailoring_file(self, profile): logger.debug( "Policy {0} is a tailored policy. Starting tailoring file download...".format(profile['attributes']['ref_id']) ) - tailoring_file_path = "/var/tmp/oscap_tailoring_file-{0}.xml".format(profile['attributes']['ref_id']) + tailoring_file_path = tempfile.mkstemp( + prefix='oscap_tailoring_file-{0}.'.format(profile['attributes']['ref_id']), + suffix='.xml', + dir='/var/tmp' + )[1] response = self.conn.session.get( "https://{0}/compliance/profiles/{1}/tailoring_file".format(self.config.base_url, profile['id']) ) @@ -138,8 +149,6 @@ def run_scan(self, profile_ref_id, policy_xml, output_path, tailoring_file_path= logger.error('Scan failed') logger.error(oscap) exit(constants.sig_kill_bad) - else: - self.archive.copy_file(output_path) def _assert_oscap_rpms_exist(self): rpmcmd = 'rpm -qa ' + ' '.join(REQUIRED_PACKAGES) @@ -161,3 +170,9 @@ def _get_inventory_id(self): else: logger.error('Failed to find system in Inventory') exit(constants.sig_kill_bad) + + def _results_file(self, archive_dir, profile): + return os.path.join( + archive_dir, + 'oscap_results-{0}.xml'.format(profile['attributes']['ref_id']) + ) diff --git a/insights/tests/client/apps/test_compliance.py b/insights/tests/client/apps/test_compliance.py index 708006396..6da8a803a 100644 --- a/insights/tests/client/apps/test_compliance.py +++ b/insights/tests/client/apps/test_compliance.py @@ -198,8 +198,7 @@ def test_tailored_file_is_not_downloaded_if_tailored_is_missing(config): def test_tailored_file_is_downloaded_from_initial_profile_if_os_minor_version_is_missing(config, call): compliance_client = ComplianceClient(config) compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': [{'attributes': 'data'}]}))) - tailoring_file_path = "/var/tmp/oscap_tailoring_file-aaaaa.xml" - assert tailoring_file_path == compliance_client.download_tailoring_file({'id': 'foo', 'attributes': {'tailored': True, 'ref_id': 'aaaaa'}}) + assert 'oscap_tailoring_file-aaaaa' in compliance_client.download_tailoring_file({'id': 'foo', 'attributes': {'tailored': True, 'ref_id': 'aaaaa'}}) assert compliance_client.download_tailoring_file({'id': 'foo', 'attributes': {'tailored': False, 'ref_id': 'aaaaa'}}) is None @@ -218,8 +217,7 @@ def test_tailored_file_is_not_downloaded_if_os_minor_version_mismatches(config, def test_tailored_file_is_downloaded_if_needed(config, call, os_release_info_mock): compliance_client = ComplianceClient(config) compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': [{'attributes': 'data'}]}))) - tailoring_file_path = "/var/tmp/oscap_tailoring_file-aaaaa.xml" - assert tailoring_file_path == compliance_client.download_tailoring_file({'id': 'foo', 'attributes': {'tailored': True, 'ref_id': 'aaaaa', 'os_minor_version': '5'}}) + assert 'oscap_tailoring_file-aaaaa' in compliance_client.download_tailoring_file({'id': 'foo', 'attributes': {'tailored': True, 'ref_id': 'aaaaa', 'os_minor_version': '5'}}) assert compliance_client.download_tailoring_file({'id': 'foo', 'attributes': {'tailored': False, 'ref_id': 'aaaaa', 'os_minor_version': '5'}}) is None From 8da5965cacb76c16a4ee86493a367315c9fa3064 Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Tue, 29 Jun 2021 16:03:03 -0500 Subject: [PATCH 465/892] Fix test for cloud_init datasource (#3125) * Replace test simple_file obj with mock in tests Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/tests/datasources/test_cloud_init.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/insights/tests/datasources/test_cloud_init.py b/insights/tests/datasources/test_cloud_init.py index 3e02467b2..b3032cee4 100644 --- a/insights/tests/datasources/test_cloud_init.py +++ b/insights/tests/datasources/test_cloud_init.py @@ -1,8 +1,9 @@ import json import pytest +from mock.mock import Mock from insights.core.dr import SkipComponent -from insights.core.spec_factory import DatasourceProvider, simple_file +from insights.core.spec_factory import DatasourceProvider from insights.specs.datasources.cloud_init import cloud_cfg, LocalSpecs CLOUD_CFG = """ @@ -57,8 +58,9 @@ def test_cloud_cfg(): - simple_file.content = CLOUD_CFG.splitlines() - broker = {LocalSpecs.cloud_cfg_input: simple_file} + cloud_init_file = Mock() + cloud_init_file.content = CLOUD_CFG.splitlines() + broker = {LocalSpecs.cloud_cfg_input: cloud_init_file} result = cloud_cfg(broker) assert result is not None assert isinstance(result, DatasourceProvider) @@ -68,16 +70,18 @@ def test_cloud_cfg(): def test_cloud_cfg_bad(): - simple_file.content = CLOUD_CFG_BAD.splitlines() - broker = {LocalSpecs.cloud_cfg_input: simple_file} + cloud_init_file = Mock() + cloud_init_file.content = CLOUD_CFG_BAD.splitlines() + broker = {LocalSpecs.cloud_cfg_input: cloud_init_file} with pytest.raises(SkipComponent) as e: cloud_cfg(broker) assert 'Unexpected exception' in str(e) def test_cloud_cfg_no_network(): - simple_file.content = CLOUD_CFG_NO_NETWORK.splitlines() - broker = {LocalSpecs.cloud_cfg_input: simple_file} + cloud_init_file = Mock() + cloud_init_file.content = CLOUD_CFG_NO_NETWORK.splitlines() + broker = {LocalSpecs.cloud_cfg_input: cloud_init_file} with pytest.raises(SkipComponent) as e: cloud_cfg(broker) assert 'No network section in yaml' in str(e) From 5681a6e7a3edc45983742184bd7916ecac20b9b4 Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Wed, 30 Jun 2021 02:02:26 -0500 Subject: [PATCH 466/892] Add new datasource and parser for ps eo pid,args (#3121) * Add new datasource and parser for ps * This new datasource/parser will enhance the ps combiner output by ensuring that we have the full command path but don't collect args that may include PII and sensitive info * Add new datasource for ps eo pid,args that removes args leaving the command only * Add new parser for the datasource * Update ps combiner to use new parser * Add tests and docs for both Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Move new ps command before filtered commands * Filtered ps commands can add args, so the new commmand that removes args needs to be added before the filtered commands Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- docs/custom_datasources_index.rst | 8 +++ insights/combiners/ps.py | 13 +++-- insights/combiners/tests/test_ps.py | 66 ++++++++++++++++++++----- insights/parsers/ps.py | 53 ++++++++++++++++---- insights/parsers/tests/test_ps.py | 44 ++++++++++++++++- insights/specs/__init__.py | 1 + insights/specs/datasources/ps.py | 77 +++++++++++++++++++++++++++++ insights/specs/default.py | 3 +- 8 files changed, 238 insertions(+), 27 deletions(-) create mode 100644 insights/specs/datasources/ps.py diff --git a/docs/custom_datasources_index.rst b/docs/custom_datasources_index.rst index d0f215753..3ab9a3735 100644 --- a/docs/custom_datasources_index.rst +++ b/docs/custom_datasources_index.rst @@ -9,4 +9,12 @@ insights.specs.datasources.cloud_init .. automodule:: insights.specs.datasources.cloud_init :members: cloud_cfg, LocalSpecs :show-inheritance: + :undoc-members: + +insights.specs.datasources.ps +----------------------------- + +.. automodule:: insights.specs.datasources.ps + :members: ps_eo_cmd, LocalSpecs + :show-inheritance: :undoc-members: \ No newline at end of file diff --git a/insights/combiners/ps.py b/insights/combiners/ps.py index fcbae73e2..72cdcd908 100644 --- a/insights/combiners/ps.py +++ b/insights/combiners/ps.py @@ -6,6 +6,7 @@ More specifically this consolidates data from :py:class:`insights.parsers.ps.PsEo`, :py:class:`insights.parsers.ps.PsAuxcww`, +:py:class:`insights.parsers.ps.PsEoCmd`, :py:class:`insights.parsers.ps.PsEf`, :py:class:`insights.parsers.ps.PsAux`, :py:class:`insights.parsers.ps.PsAuxww` and @@ -21,8 +22,8 @@ Examples: - >>> ps_combiner.pids - [1, 2, 3, 8, 9, 10, 11, 12] + >>> sorted(ps_combiner.pids) + [1, 2, 3, 8, 9, 10, 11, 12, 13] >>> '[kthreadd]' in ps_combiner.commands True >>> '[kthreadd]' in ps_combiner @@ -53,10 +54,10 @@ from insights.core.plugins import combiner from insights.parsers import keyword_search -from insights.parsers.ps import PsAlxwww, PsAuxww, PsAux, PsAuxcww, PsEo, PsEf +from insights.parsers.ps import PsAlxwww, PsAuxww, PsAux, PsAuxcww, PsEo, PsEf, PsEoCmd -@combiner([PsAlxwww, PsAuxww, PsAux, PsEf, PsAuxcww, PsEo]) +@combiner([PsAlxwww, PsAuxww, PsAux, PsEf, PsAuxcww, PsEo, PsEoCmd]) class Ps(object): """ ``Ps`` combiner consolidates data from the parsers in ``insights.parsers.ps`` module. @@ -97,7 +98,7 @@ class Ps(object): 'WCHAN': None } - def __init__(self, ps_alxwww, ps_auxww, ps_aux, ps_ef, ps_auxcww, ps_eo): + def __init__(self, ps_alxwww, ps_auxww, ps_aux, ps_ef, ps_auxcww, ps_eo, ps_eo_cmd): self._pid_data = {} # order of parsers is important here @@ -105,6 +106,8 @@ def __init__(self, ps_alxwww, ps_auxww, ps_aux, ps_ef, ps_auxcww, ps_eo): self.__update_data(ps_eo) if ps_auxcww: self.__update_data(ps_auxcww) + if ps_eo_cmd: + self.__update_data(ps_eo_cmd) if ps_ef: # mapping configurations to combine PsEf data mapping = { diff --git a/insights/combiners/tests/test_ps.py b/insights/combiners/tests/test_ps.py index 1d5e49b66..a48a44959 100644 --- a/insights/combiners/tests/test_ps.py +++ b/insights/combiners/tests/test_ps.py @@ -1,6 +1,6 @@ from insights.combiners import ps from insights.combiners.ps import Ps -from insights.parsers.ps import PsAlxwww, PsAuxww, PsAux, PsAuxcww, PsEo, PsEf +from insights.parsers.ps import PsAlxwww, PsAuxww, PsAux, PsAuxcww, PsEo, PsEf, PsEoCmd from insights.tests import context_wrap import doctest @@ -15,6 +15,17 @@ 10 2 rcu_sched """ +PS_EO_CMD_LINES = """ + PID COMMAND + 1 /usr/lib/systemd/systemd + 2 [kthreadd] + 3 [ksoftirqd/0] + 8 [migration/0] + 9 [rcu_bh] + 10 [rcu_sched] + 13 /usr/bin/python3.6 + """ + PS_AUXCWW_LINES = """ USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND root 1 0.1 0.0 195712 7756 ? Ss 2019 477:10 systemd @@ -65,7 +76,7 @@ def test_pseo_parser(): ps_eo = PsEo(context_wrap(PS_EO_LINES, strip=False)) - ps = Ps(None, None, None, None, None, ps_eo) + ps = Ps(None, None, None, None, None, ps_eo, None) assert len(ps.processes) == 6 proc = ps[1] assert proc['USER'] is None @@ -78,7 +89,7 @@ def test_pseo_parser(): def test_pseo_and_psauxcww_parsers(): ps_eo = PsEo(context_wrap(PS_EO_LINES, strip=False)) ps_auxcww = PsAuxcww(context_wrap(PS_AUXCWW_LINES)) - ps = Ps(None, None, None, None, ps_auxcww, ps_eo) + ps = Ps(None, None, None, None, ps_auxcww, ps_eo, None) assert len(ps.processes) == 7 proc9 = ps[9] assert proc9['USER'] == 'root' @@ -96,7 +107,7 @@ def test_pseo_and_psauxcww_parsers(): def test_psef_parser(): ps_ef = PsEf(context_wrap(PS_EF_LINES)) - ps = Ps(None, None, None, ps_ef, None, None) + ps = Ps(None, None, None, ps_ef, None, None, None) len(ps.processes) == 6 proc = ps[1] assert proc.get('UID') is None @@ -112,7 +123,7 @@ def test_psef_parser(): def test_psauxcww_and_ps_ef_parsers(): ps_auxcww = PsAuxcww(context_wrap(PS_AUXCWW_LINES)) ps_ef = PsEf(context_wrap(PS_EF_LINES)) - ps = Ps(None, None, None, ps_ef, ps_auxcww, None) + ps = Ps(None, None, None, ps_ef, ps_auxcww, None, None) assert len(ps.processes) == 7 proc1 = ps[1] assert proc1['COMMAND'] == '/usr/lib/systemd/systemd --switched-root --system --deserialize 21' @@ -133,7 +144,7 @@ def test_psalxwww_and_psauxww_and_psaux_parsers(): ps_alxwww = PsAlxwww(context_wrap(PS_ALXWWW_LINES)) ps_auxww = PsAuxww(context_wrap(PS_AUXWW_LINES)) ps_aux = PsAux(context_wrap(PS_AUX_LINES)) - ps = Ps(ps_alxwww, ps_auxww, ps_aux, None, None, None) + ps = Ps(ps_alxwww, ps_auxww, ps_aux, None, None, None, None) len(ps.processes) == 5 ps = ps[1] assert ps['PID'] == 1 @@ -163,7 +174,7 @@ def test_psalxwww_and_psauxww_and_psaux_and_psef_and_psauxcww_and_ps_eo_parsers( ps_ef = PsEf(context_wrap(PS_EF_LINES)) ps_auxcww = PsAuxcww(context_wrap(PS_AUXCWW_LINES)) ps_eo = PsEo(context_wrap(PS_EO_LINES, strip=False)) - ps = Ps(ps_alxwww, ps_auxww, ps_aux, ps_ef, ps_auxcww, ps_eo) + ps = Ps(ps_alxwww, ps_auxww, ps_aux, ps_ef, ps_auxcww, ps_eo, None) len(ps.processes) == 8 ps = ps[1] assert ps['PID'] == 1 @@ -190,7 +201,7 @@ def test_type_conversion(): ps_alxwww = PsAlxwww(context_wrap(PS_ALXWWW_LINES)) ps_ef = PsEf(context_wrap(PS_EF_LINES)) ps_auxcww = PsAuxcww(context_wrap(PS_AUXCWW_LINES)) - ps = Ps(ps_alxwww, None, None, ps_ef, ps_auxcww, None) + ps = Ps(ps_alxwww, None, None, ps_ef, ps_auxcww, None, None) assert all(isinstance(p['PID'], int) for p in ps.processes) assert all(p['UID'] is None or isinstance(p['UID'], int) for p in ps.processes) assert all(p['PID'] is None or isinstance(p['PID'], int) for p in ps.processes) @@ -203,7 +214,7 @@ def test_type_conversion(): def test_combiner_api(): ps_auxcww = PsAuxcww(context_wrap(PS_AUXCWW_LINES)) - ps = Ps(None, None, None, None, ps_auxcww, None) + ps = Ps(None, None, None, None, ps_auxcww, None, None) assert ps.pids == [1, 2, 3, 8, 9, 11] assert len(ps.processes) == 6 assert ps.processes[0] @@ -240,7 +251,8 @@ def test_docs(): ps_ef = PsEf(context_wrap(PS_EF_LINES)) ps_auxcww = PsAuxcww(context_wrap(PS_AUXCWW_LINES)) ps_eo = PsEo(context_wrap(PS_EO_LINES, strip=False)) - ps_combiner = Ps(ps_alxwww, ps_auxww, ps_aux, ps_ef, ps_auxcww, ps_eo) + ps_eo_cmd = PsEoCmd(context_wrap(PS_EO_CMD_LINES, strip=False)) + ps_combiner = Ps(ps_alxwww, ps_auxww, ps_aux, ps_ef, ps_auxcww, ps_eo, ps_eo_cmd) env = { 'ps_combiner': ps_combiner } @@ -266,5 +278,37 @@ def test_docs(): def test_search_ps_alxwww_w_grep(): p = PsAlxwww(context_wrap(PS_ALXWWW_W_GREP)) - ps = Ps(p, None, None, None, None, None) + ps = Ps(p, None, None, None, None, None, None) assert len(ps.search(COMMAND_NAME__contains='dbus')) == 1 + + +def test_psalxwww_and_psauxww_and_psaux_and_psef_and_psauxcww_and_ps_eo_cmd_parsers(): + ps_alxwww = PsAlxwww(context_wrap(PS_ALXWWW_LINES)) + ps_auxww = PsAuxww(context_wrap(PS_AUXWW_LINES)) + ps_aux = PsAux(context_wrap(PS_AUX_LINES)) + ps_ef = PsEf(context_wrap(PS_EF_LINES)) + ps_auxcww = PsAuxcww(context_wrap(PS_AUXCWW_LINES)) + ps_eo_cmd = PsEoCmd(context_wrap(PS_EO_CMD_LINES, strip=False)) + ps_combiner = Ps(ps_alxwww, ps_auxww, ps_aux, ps_ef, ps_auxcww, None, ps_eo_cmd) + len(ps_combiner.processes) == 9 + ps = ps_combiner[1] + assert ps['PID'] == 1 + assert ps['USER'] == 'root' + assert ps['UID'] == 0 + assert ps['PPID'] == 0 + assert ps['%CPU'] == 0.1 + assert ps['%MEM'] == 0.0 + assert ps['VSZ'] == 195712.0 + assert ps['RSS'] == 7756.0 + assert ps['STAT'] == 'Ss' + assert ps['TTY'] == '?' + assert ps['START'] == '2019' + assert ps['TIME'] == '478:05' + assert ps['COMMAND'] == '/usr/lib/systemd/systemd --switched-root --system --deserialize 21' + assert ps['COMMAND_NAME'] == 'systemd' + assert ps['F'] == '4' + assert ps['PRI'] == 20 + assert ps['NI'] == '0' + assert ps['WCHAN'] == 'ep_pol' + + assert ps_combiner[13]['COMMAND'] == '/usr/bin/python3.6' diff --git a/insights/parsers/ps.py b/insights/parsers/ps.py index 29553efaf..bacc36514 100644 --- a/insights/parsers/ps.py +++ b/insights/parsers/ps.py @@ -33,6 +33,7 @@ class Ps(CommandParser): arguments. services (list): List of tuples in format (cmd names, user/uid/pid, raw_line) for each command. + pid_info (dict): Dictionary indexed by ``pid`` returning dict of process info. """ command_name = "COMMAND_TEMPLATE" @@ -56,6 +57,7 @@ def __init__(self, *args, **kwargs): self.running = set() self.cmd_names = set() self.services = [] + self.pid_info = {} super(Ps, self).__init__(*args, **kwargs) def parse_content(self, content): @@ -87,6 +89,10 @@ def parse_content(self, content): proc["ARGS"] = cmd.split(" ", 1)[1] if " " in cmd else "" self.services.append((cmd_name, proc[self.user_name], proc[raw_line_key])) del proc[raw_line_key] + + for row in self.data: + self.pid_info[row['PID']] = row + else: raise ParseException( "{0}: Cannot find ps header line containing {1} and {2} in output".format( @@ -353,9 +359,6 @@ class PsEo(Ps): 17259 2 kworker/0:0 18294 3357 sshd - Attributes: - pid_info(dict): Dictionary with PID as key containing ps row as a dict - Examples: >>> type(ps_eo) @@ -373,12 +376,6 @@ class PsEo(Ps): user_name = 'PID' max_splits = 3 - def parse_content(self, content): - super(PsEo, self).parse_content(content) - self.pid_info = {} - for row in self.data: - self.pid_info[row['PID']] = row - def children(self, ppid): """list: Returns a list of dict for all rows with `ppid` as parent PID""" return [row for row in self.data if row['PPID'] == ppid] @@ -426,3 +423,41 @@ class PsAlxwww(Ps): max_splits = 12 pass + + +@parser(Specs.ps_eo_cmd) +class PsEoCmd(Ps): + """ + Class to parse the command `ps -eo pid,args` where the + datasource `ps_eo_cmd` trims off all args leaving only the full + path to the command. + + Sample output from the ``ps -eo pid, args`` command:: + + PID COMMAND + 1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31 + 2 [kthreadd] + 11 /usr/bin/python3 /home/user1/pythonapp.py + 12 [kworker/u16:0-kcryptd/253:0] + + Sample data after trimming by the datasource:: + + PID COMMAND + 1 /usr/lib/systemd/systemd + 2 [kthreadd] + 11 /usr/bin/python3 + 12 [kworker/u16:0-kcryptd/253:0] + + Examples: + >>> type(ps_eo_cmd) + + >>> ps_eo_cmd.running_pids() == ['1', '2', '11', '12'] + True + >>> ps_eo_cmd.search(COMMAND__contains='python3') == [ + ... {'PID': '11', 'COMMAND': '/usr/bin/python3', 'COMMAND_NAME': 'python3', 'ARGS': ''} + ... ] + True + """ + command_name = 'COMMAND' + user_name = 'PID' + max_splits = 1 diff --git a/insights/parsers/tests/test_ps.py b/insights/parsers/tests/test_ps.py index bb73951e4..964f4fcd0 100644 --- a/insights/parsers/tests/test_ps.py +++ b/insights/parsers/tests/test_ps.py @@ -60,6 +60,14 @@ 1 0 8 2 20 0 0 0 rcu_gp S ? 0:00 [rcu_bh] """ +PsEoCmd_TEST_DOC = """ + PID COMMAND + 1 /usr/lib/systemd/systemd + 2 [kthreadd] + 11 /usr/bin/python3 + 12 [kworker/u16:0-kcryptd/253:0] +""" + def test_doc_examples(): env = { @@ -67,7 +75,8 @@ def test_doc_examples(): 'ps_auxww': ps.PsAuxww(context_wrap(PsAuxww_TEST_DOC)), 'ps_ef': ps.PsEf(context_wrap(PsEf_TEST_DOC)), 'ps_eo': ps.PsEo(context_wrap(PsEo_TEST_DOC, strip=False)), - 'ps_alxwww': ps.PsAlxwww(context_wrap(PsAlxwww_TEST_DOC)) + 'ps_alxwww': ps.PsAlxwww(context_wrap(PsAlxwww_TEST_DOC)), + 'ps_eo_cmd': ps.PsEoCmd(context_wrap(PsEoCmd_TEST_DOC)) } failed, total = doctest.testmod(ps, globs=env) assert failed == 0 @@ -428,3 +437,36 @@ def test_ps_alxwww(): assert dbus_proc['COMMAND_NAME'] == 'dbus-daemon' assert dbus_proc['UID'] == '81' assert dbus_proc['ARGS'] == '--system --address=systemd: --nofork --nopidfile --systemd-activation' + +PS_EO_CMD_NORMAL = """ + PID COMMAND + 1 /usr/lib/systemd/systemd + 2 [kthreadd] + 3 [rcu_gp] + 93831 qmgr + 93838 tlsmgr +1221279 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.242.b08-4.el8.x86_64/jre/bin/java +1221774 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.242.b08-4.el8.x86_64/jre/bin/java +""" + + +def test_ps_eo_cmd(): + p = ps.PsEoCmd(context_wrap(PS_EO_CMD_NORMAL, strip=False)) + assert p is not None + assert len(p.running_pids()) == 7 + assert '93838' in p.pid_info + assert p.pid_info['1221279'] == { + 'PID': '1221279', 'COMMAND': '/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.242.b08-4.el8.x86_64/jre/bin/java', 'COMMAND_NAME': 'java', 'ARGS': '' + } + assert p.pid_info['3'] == { + 'PID': '3', 'COMMAND': '[rcu_gp]', 'COMMAND_NAME': '[rcu_gp]', 'ARGS': '' + } + assert p.pid_info['93831'] == { + 'PID': '93831', 'COMMAND': 'qmgr', 'COMMAND_NAME': 'qmgr', 'ARGS': '' + } + + +def test_ps_eo_cmd_stripped(): + p = ps.PsEo(context_wrap(PS_EO_CMD_NORMAL, strip=True)) + assert p is not None + assert len(p.running_pids()) == 7 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index e99fa2352..f6bc1a892 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -511,6 +511,7 @@ class Specs(SpecSet): ps_auxww = RegistryPoint(filterable=True) ps_ef = RegistryPoint(filterable=True) ps_eo = RegistryPoint() + ps_eo_cmd = RegistryPoint() pulp_worker_defaults = RegistryPoint() puppet_ssl_cert_ca_pem = RegistryPoint() puppetserver_config = RegistryPoint(filterable=True) diff --git a/insights/specs/datasources/ps.py b/insights/specs/datasources/ps.py new file mode 100644 index 000000000..4baf54c8b --- /dev/null +++ b/insights/specs/datasources/ps.py @@ -0,0 +1,77 @@ +""" +Custom datasources for ps information +""" +from insights.core.context import HostContext +from insights.core.dr import SkipComponent +from insights.core.plugins import datasource +from insights.core.spec_factory import DatasourceProvider, simple_command +from insights.specs import Specs + + +class LocalSpecs(Specs): + """ Local specs used only by ps datasources """ + + ps_eo_args = simple_command("/bin/ps -eo pid,args") + """ Returns ps output including pid and full args """ + + +@datasource(LocalSpecs.ps_eo_args, HostContext) +def ps_eo_cmd(broker): + """ + Custom datasource to collect the full paths to all running commands on the system + provided by the ``ps -eo pid,args`` command. After collecting the data, all of the + args are trimmed to leave only the command including full path. + + Sample output from the ``ps -eo pid, args`` command:: + + PID COMMAND + 1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31 + 2 [kthreadd] + 3 [rcu_gp] + 4 [rcu_par_gp] + 6 [kworker/0:0H-events_highpri] + 9 [mm_percpu_wq] + 10 [rcu_tasks_kthre] + 11 /usr/bin/python3 /home/user1/python_app.py + 12 [kworker/u16:0-kcryptd/253:0] + + This datasource trims off the args to minimize possible PII and sensitive information. + After trimming the data looks like this:: + + PID COMMAND + 1 /usr/lib/systemd/systemd + 2 [kthreadd] + 3 [rcu_gp] + 4 [rcu_par_gp] + 6 [kworker/0:0H-events_highpri] + 9 [mm_percpu_wq] + 10 [rcu_tasks_kthre] + 11 /usr/bin/python3 + 12 [kworker/u16:0-kcryptd/253:0] + + Returns: + str: Returns a multiline string in the same format as ``ps`` output + + Raises: + SkipComponent: Raised if no data is available + """ + content = broker[LocalSpecs.ps_eo_args].content + data = [] + data.append('PID COMMAND') + for l in content: + if 'PID' in l and 'COMMAND' in l: + start = True + continue + if not start: + continue + pid, args = l.strip().split(None, 1) + if ' ' in args: + cmd, _ = args.split(None, 1) + else: + cmd = args + data.append('{0} {1}'.format(pid, cmd)) + + if len(data) > 1: + return DatasourceProvider('\n'.join(data), relative_path='insights_commands/ps_eo_cmd') + + raise SkipComponent() diff --git a/insights/specs/default.py b/insights/specs/default.py index de13196ac..084dc7d81 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -35,7 +35,7 @@ from insights.combiners.satellite_version import SatelliteVersion, CapsuleVersion from insights.parsers.mount import Mount from insights.specs import Specs -from insights.specs.datasources import cloud_init +from insights.specs.datasources import cloud_init, ps as ps_datasource import datetime @@ -175,6 +175,7 @@ def is_azure(broker): ps_auxww = simple_command("/bin/ps auxww") ps_ef = simple_command("/bin/ps -ef") ps_eo = simple_command("/usr/bin/ps -eo pid,ppid,comm") + ps_eo_cmd = ps_datasource.ps_eo_cmd @datasource(ps_auxww, HostContext) def tomcat_base(broker): From 5c0caaee42bb94c86c853fdeb970d8804b7ec209 Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Thu, 1 Jul 2021 03:19:38 +0800 Subject: [PATCH 467/892] Add parser ansible_tower_custom (#3102) (#3123) * Revert "Revert "Add parser ansible_tower_custom (#3102)" (#3117)" This reverts commit c256856b140349cce718bf96a5efd203cf703b95. Signed-off-by: jiazhang * Remove dict docstring Signed-off-by: jiazhang --- .../ansible_tower_settings.rst | 3 ++ insights/parsers/ansible_tower_settings.py | 42 +++++++++++++++++++ .../tests/test_ansible_tower_settings.py | 38 +++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 85 insertions(+) create mode 100644 docs/shared_parsers_catalog/ansible_tower_settings.rst create mode 100644 insights/parsers/ansible_tower_settings.py create mode 100644 insights/parsers/tests/test_ansible_tower_settings.py diff --git a/docs/shared_parsers_catalog/ansible_tower_settings.rst b/docs/shared_parsers_catalog/ansible_tower_settings.rst new file mode 100644 index 000000000..11b514d42 --- /dev/null +++ b/docs/shared_parsers_catalog/ansible_tower_settings.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ansible_tower_settings + :members: + :show-inheritance: diff --git a/insights/parsers/ansible_tower_settings.py b/insights/parsers/ansible_tower_settings.py new file mode 100644 index 000000000..74dad34fa --- /dev/null +++ b/insights/parsers/ansible_tower_settings.py @@ -0,0 +1,42 @@ +""" +AnsibleTowerSettings - file ``/etc/tower/conf.d/*.py`` and ``/etc/tower/settings.py`` +===================================================================================== +The AnsibleTowerSettings class parses the file ``/etc/tower/conf.d/*.py`` and +``/etc/tower/settings.py``. +""" +from insights import parser, get_active_lines, Parser +from insights.specs import Specs +from insights.parsers import SkipException + + +@parser(Specs.ansible_tower_settings) +class AnsibleTowerSettings(Parser, dict): + """ + Class for content of ansible tower config file ``/etc/tower/conf.d/*.py`` and ``/etc/tower/settings.py``. + + Sample ``/etc/tower/conf.d/*.py`` file:: + + AWX_CLEANUP_PATHS = False + + Raises: + SkipException: the file is empty or there is no valid content + + Examples:: + >>> type(conf) + + >>> conf['AWX_CLEANUP_PATHS'] + 'False' + """ + + def parse_content(self, content): + """Parse content of of ansible tower config file ``/etc/tower/conf.d/*.py`` and ``/etc/tower/settings.py``""" + if not content: + raise SkipException("No Valid Configuration") + data = {} + for line in get_active_lines(content): + if "=" in line: + key, value = line.split("=") + data[key.strip()] = value.strip() + if not data: + raise SkipException("No Valid Configuration") + self.update(data) diff --git a/insights/parsers/tests/test_ansible_tower_settings.py b/insights/parsers/tests/test_ansible_tower_settings.py new file mode 100644 index 000000000..d0724f179 --- /dev/null +++ b/insights/parsers/tests/test_ansible_tower_settings.py @@ -0,0 +1,38 @@ +import doctest +import pytest +from insights.parsers import ansible_tower_settings, SkipException +from insights.tests import context_wrap + + +ANSIBLE_TOWER_CONFIG_CUSTOM = ''' +AWX_CLEANUP_PATHS = False +LOGGING['handlers']['tower_warnings']['level'] = 'DEBUG' +'''.strip() + +ANSIBLE_TOWER_CONFIG_CUSTOM_INVALID1 = ''' +'''.strip() + +ANSIBLE_TOWER_CONFIG_CUSTOM_INVALID2 = ''' +AWX_CLEANUP_PATHS +'''.strip() + + +def test_ansible_tower_settings(): + conf = ansible_tower_settings.AnsibleTowerSettings(context_wrap(ANSIBLE_TOWER_CONFIG_CUSTOM)) + assert conf['AWX_CLEANUP_PATHS'] == 'False' + + with pytest.raises(SkipException) as exc: + ansible_tower_settings.AnsibleTowerSettings(context_wrap(ANSIBLE_TOWER_CONFIG_CUSTOM_INVALID1)) + assert 'No Valid Configuration' in str(exc) + + with pytest.raises(SkipException) as exc: + ansible_tower_settings.AnsibleTowerSettings(context_wrap(ANSIBLE_TOWER_CONFIG_CUSTOM_INVALID2)) + assert 'No Valid Configuration' in str(exc) + + +def test_ansible_tower_settings_documentation(): + failed_count, tests = doctest.testmod( + ansible_tower_settings, + globs={'conf': ansible_tower_settings.AnsibleTowerSettings(context_wrap(ANSIBLE_TOWER_CONFIG_CUSTOM))} + ) + assert failed_count == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index f6bc1a892..2c3f8af28 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -19,6 +19,7 @@ class Specs(SpecSet): abrt_ccpp_conf = RegistryPoint(filterable=True) abrt_status_bare = RegistryPoint() alternatives_display_python = RegistryPoint() + ansible_tower_settings = RegistryPoint(filterable=True, multi_output=True) amq_broker = RegistryPoint(multi_output=True) ansible_host = RegistryPoint() auditctl_status = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 084dc7d81..58003fc7a 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -132,6 +132,7 @@ class DefaultSpecs(Specs): abrt_status_bare = simple_command("/usr/bin/abrt status --bare=True") alternatives_display_python = simple_command("/usr/sbin/alternatives --display python") amq_broker = glob_file("/var/opt/amq-broker/*/etc/broker.xml") + ansible_tower_settings = glob_file(["/etc/tower/settings.py", "/etc/tower/conf.d/*.py"]) auditctl_status = simple_command("/sbin/auditctl -s") auditd_conf = simple_file("/etc/audit/auditd.conf") audit_log = simple_file("/var/log/audit/audit.log") From 0a772066572c816e69dc3a7e88a432fd55405f8a Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Wed, 30 Jun 2021 14:24:38 -0500 Subject: [PATCH 468/892] Add testing for ps datasource (#3129) * Add tests for the new ps datasource * Fix problem with possible leading lines before the headings in ps datasource Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/specs/datasources/ps.py | 1 + insights/tests/datasources/test_ps.py | 70 +++++++++++++++++++++++++++ 2 files changed, 71 insertions(+) create mode 100644 insights/tests/datasources/test_ps.py diff --git a/insights/specs/datasources/ps.py b/insights/specs/datasources/ps.py index 4baf54c8b..8daba2a12 100644 --- a/insights/specs/datasources/ps.py +++ b/insights/specs/datasources/ps.py @@ -58,6 +58,7 @@ def ps_eo_cmd(broker): content = broker[LocalSpecs.ps_eo_args].content data = [] data.append('PID COMMAND') + start = False for l in content: if 'PID' in l and 'COMMAND' in l: start = True diff --git a/insights/tests/datasources/test_ps.py b/insights/tests/datasources/test_ps.py new file mode 100644 index 000000000..c28066beb --- /dev/null +++ b/insights/tests/datasources/test_ps.py @@ -0,0 +1,70 @@ +import pytest +from mock.mock import Mock + +from insights.core.dr import SkipComponent +from insights.core.spec_factory import DatasourceProvider +from insights.specs.datasources.ps import ps_eo_cmd, LocalSpecs + +PS_DATA = """ +PID COMMAND + 1 /usr/lib/systemd/systemd --switched-root --system --deserialize 31 + 2 [kthreadd] + 3 [rcu_gp] + 4 [rcu_par_gp] + 6 [kworker/0:0H-events_highpri] + 9 [mm_percpu_wq] + 10 [rcu_tasks_kthre] + 11 /usr/bin/python3 /home/user1/python_app.py + 12 [kworker/u16:0-kcryptd/253:0] +""" + +PS_EXPECTED = """ +PID COMMAND +1 /usr/lib/systemd/systemd +2 [kthreadd] +3 [rcu_gp] +4 [rcu_par_gp] +6 [kworker/0:0H-events_highpri] +9 [mm_percpu_wq] +10 [rcu_tasks_kthre] +11 /usr/bin/python3 +12 [kworker/u16:0-kcryptd/253:0] +""" + +PS_BAD = "Command not found" + +PS_EMPTY = """ +PID COMMAND +""" + +RELATIVE_PATH = 'insights_commands/ps_eo_cmd' + + +def test_ps_eo_cmd(): + ps_eo_args = Mock() + ps_eo_args.content = PS_DATA.splitlines() + broker = {LocalSpecs.ps_eo_args: ps_eo_args} + result = ps_eo_cmd(broker) + assert result is not None + assert isinstance(result, DatasourceProvider) + expected = DatasourceProvider(content=PS_EXPECTED.strip(), relative_path=RELATIVE_PATH) + assert result.content == expected.content + assert result.relative_path == expected.relative_path + + +def test_ps_eo_cmd_bad(): + ps_eo_args = Mock() + ps_eo_args.content = PS_BAD.splitlines() + broker = {LocalSpecs.ps_eo_args: ps_eo_args} + with pytest.raises(SkipComponent) as e: + ps_eo_cmd(broker) + assert e is not None + + +def test_ps_eo_cmd_empty(): + ps_eo_args = Mock() + ps_eo_args.content = PS_EMPTY.splitlines() + broker = {LocalSpecs.ps_eo_args: ps_eo_args} + with pytest.raises(SkipComponent) as e: + ps_eo_cmd(broker) + assert e is not None From 1ac284cb9f7b23cce1bcd2e475b0ffc95069a5b3 Mon Sep 17 00:00:00 2001 From: Lloyd Date: Wed, 30 Jun 2021 15:32:23 -0500 Subject: [PATCH 469/892] - Removed insights.spec.openshift package directory which include __init__.py and default.py files (#3120) - Removed Openshift SpecSet class definition in insight.specs.__ini__.py - Removed Removed OpenShiftContext class from insight.core.context Signed-off-by: Lloyd Huett --- insights/core/context.py | 6 -- insights/specs/__init__.py | 14 --- insights/specs/openshift/__init__.py | 132 --------------------------- insights/specs/openshift/default.py | 30 ------ 4 files changed, 182 deletions(-) delete mode 100644 insights/specs/openshift/__init__.py delete mode 100644 insights/specs/openshift/default.py diff --git a/insights/core/context.py b/insights/core/context.py index f8e828019..b14110ac7 100644 --- a/insights/core/context.py +++ b/insights/core/context.py @@ -281,9 +281,3 @@ class OpenStackContext(ExecutionContext): def __init__(self, hostname): super(OpenStackContext, self).__init__() self.hostname = hostname - - -class OpenShiftContext(ExecutionContext): - def __init__(self, hostname): - super(OpenShiftContext, self).__init__() - self.hostname = hostname diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 2c3f8af28..27e2a45cd 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -1,20 +1,6 @@ from insights.core.spec_factory import SpecSet, RegistryPoint -class Openshift(SpecSet): - cluster_operators = RegistryPoint(raw=True) - crds = RegistryPoint(raw=True) - crs = RegistryPoint(raw=True, multi_output=True) - machine_configs = RegistryPoint(raw=True) - machines = RegistryPoint(raw=True) - machine_id = RegistryPoint(raw=True) # stand in for system id - namespaces = RegistryPoint(raw=True) - nodes = RegistryPoint(raw=True) - pods = RegistryPoint(raw=True) - pvcs = RegistryPoint(raw=True) - storage_classes = RegistryPoint(raw=True) - - class Specs(SpecSet): abrt_ccpp_conf = RegistryPoint(filterable=True) abrt_status_bare = RegistryPoint() diff --git a/insights/specs/openshift/__init__.py b/insights/specs/openshift/__init__.py deleted file mode 100644 index 3478d0e59..000000000 --- a/insights/specs/openshift/__init__.py +++ /dev/null @@ -1,132 +0,0 @@ -import json -import logging -import os - -from kubernetes import config -from kubernetes.client import ApiClient, Configuration -from openshift.dynamic import DynamicClient - -from insights.core.context import ExecutionContext -from insights.core.plugins import datasource -from insights.core.serde import deserializer, serializer -from insights.core.spec_factory import ContentProvider, SerializedRawOutputProvider -from insights.util import fs - -log = logging.getLogger(__name__) - - -class GVK(object): - def __init__(self, kind, api_version="v1", kwargs=None): - self.kind = kind - self.api_version = api_version - self.kwargs = kwargs or {} - - -class OpenshiftOutputProvider(ContentProvider): - def __init__(self, client, kind=None, api_version=None, **kwargs): - super(OpenshiftOutputProvider, self).__init__() - self.kind = kind - self.api_version = api_version - name = "%s/%s" % (self.api_version, self.kind) - self.gvk = name.split("/") - self.relative_path = name - self.root = "/" - self.kwargs = kwargs - self.k8s = client.k8s - - def load(self): - return self.k8s.resources.get(kind=self.kind, api_version=self.api_version).get(serialize=False, **self.kwargs).data - - def write(self, dst): - fs.ensure_path(os.path.dirname(dst)) - with open(dst, "wb") as f: - f.write(self.content) - - # we're done with it if we're writing it down. - # reset _content so we don't build up memory - self.loaded = False - self._content = None - - -@serializer(OpenshiftOutputProvider) -def serialize_openshift_output(obj, root): - rel = os.path.join("k8s", *obj.gvk) - dst = os.path.join(root, rel) - fs.ensure_path(os.path.dirname(dst)) - obj.write(dst) - return {"relative_path": rel} - - -@deserializer(OpenshiftOutputProvider) -def deserialize_openshift_output(_type, data, root): - rel = data["relative_path"] - res = SerializedRawOutputProvider(rel, root) - return res - - -class OpenshiftContext(ExecutionContext): - pass - - -class OpenshiftClient(object): - def __init__(self, ctx=None, cfg=None): - cfg = cfg or os.environ.get("KUBECONFIG") - if cfg: - k8s_client = config.new_client_from_config(cfg) - else: - config.load_incluster_config() # makes a singleton config behind the scenes - k8cfg = Configuration() # gets a copy from what was populated in the line above - # NOTE this is required due to https://github.com/openshift/origin/issues/22125 - k8cfg.verify_ssl = False - k8s_client = ApiClient(configuration=k8cfg) # this should use the singleton produced above - self.k8s = DynamicClient(k8s_client) # stole this from config.new_client_from_config - - -client = OpenshiftClient() - - -class resource(object): - client_kwargs = None - timeout = None - - def __init__(self, kind, api_version="v1", **kwargs): - # encode group into the api_version string if necessary - self.static_kwargs = kwargs - self.kind = kind - self.api_version = api_version - self.__name__ = self.__class__.__name__ - datasource(OpenshiftContext)(self) - - def __call__(self, broker): - # allow manifest to override what's in the resource definition - ctx = broker[OpenshiftContext] - timeout = self.timeout or ctx.timeout - kwargs = dict(self.client_kwargs if self.client_kwargs is not None else self.static_kwargs) - if timeout: - kwargs["timeout_seconds"] = timeout - return OpenshiftOutputProvider(client, kind=self.kind, api_version=self.api_version, **kwargs) - - -class foreach_resource(object): - client_kwargs = None - timeout = None - - def __init__(self, dep, func): - self.dep = dep - self.func = func - self.__name__ = self.__class__.__name__ - datasource(OpenshiftContext, dep)(self) - - def __call__(self, broker): - # allow manifest to override what's in the resource definition - ctx = broker[OpenshiftContext] - timeout = self.timeout or ctx.timeout - crds = broker[self.dep] - _lst = [] - for crd in json.loads(crds.content)["items"]: - gvk = self.func(crd) - kwargs = dict(self.client_kwargs if self.client_kwargs is not None else gvk.kwargs) - if timeout: - kwargs["timeout_seconds"] = timeout - _lst.append(OpenshiftOutputProvider(client, kind=gvk.kind, api_version=gvk.api_version, **kwargs)) - return _lst diff --git a/insights/specs/openshift/default.py b/insights/specs/openshift/default.py deleted file mode 100644 index 25d35dc51..000000000 --- a/insights/specs/openshift/default.py +++ /dev/null @@ -1,30 +0,0 @@ -import json -from insights import specs, datasource -from insights.core.spec_factory import DatasourceProvider -from . import GVK, foreach_resource, resource - - -def cr_gvk(crd): - group = crd["spec"]["group"] - version = crd["spec"]["version"] - kind = crd["spec"]["names"]["kind"] - return GVK(kind=kind, api_version="%s/%s" % (group, version)) - - -class OpenshiftSpecsImpl(specs.Openshift): - cluster_operators = resource(kind="ClusterOperator", api_version="config.openshift.io/v1") - crds = resource(kind="CustomResourceDefinition", api_version="apiextensions.k8s.io/v1beta1") - crs = foreach_resource(crds, cr_gvk) - machine_configs = resource(kind="MachineConfig", api_version="machineconfiguration.openshift.io/v1") - machines = resource(kind="Machine", api_version="machine.openshift.io/v1beta1") - namespaces = resource(kind="Namespace") - nodes = resource(kind="Node") - pods = resource(kind="Pod", field_selector="status.phase!=Succeeded") - pvcs = resource(kind="PersistentVolumeClaim") - storage_classes = resource(kind="StorageClass") - - @datasource(namespaces) - def machine_id(broker): - doc = json.loads(broker[OpenshiftSpecsImpl.namespaces].content) - v = next(o["metadata"]["uid"] for o in doc["items"] if o["metadata"]["name"] == "kube-system") - return DatasourceProvider(content=v, relative_path="/etc/insights-client/machine-id") From ac0ca88fc3f789949a3d7f0114f998d8322117ae Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 1 Jul 2021 20:28:12 +0800 Subject: [PATCH 470/892] Refactor is_aws, is_azure, is_gcp to Component (#3126) * Refactor is_aws, is_azure, is_gcp to Components Signed-off-by: Xiangce Liu * Fix typo Signed-off-by: Xiangce Liu * simplify the conditions Signed-off-by: Xiangce Liu * Add the components to the default collecting list Signed-off-by: Xiangce Liu --- insights/collect.py | 10 ++++ insights/components/cloud_provider.py | 59 +++++++++++++++++++ .../components/tests/test_cloud_provider.py | 27 +++++++++ insights/specs/default.py | 41 +++---------- 4 files changed, 103 insertions(+), 34 deletions(-) create mode 100644 insights/components/cloud_provider.py create mode 100644 insights/components/tests/test_cloud_provider.py diff --git a/insights/collect.py b/insights/collect.py index 928abca36..568094eb8 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -123,6 +123,16 @@ - name: insights.combiners.cloud_provider enabled: true + # needed for the cloud related specs + - name: insights.components.cloud_provider.IsAWS + enabled: true + + - name: insights.components.cloud_provider.IsAzure + enabled: true + + - name: insights.components.cloud_provider.IsGCP + enabled: true + # needed for the Services combiner - name: insights.parsers.chkconfig enabled: true diff --git a/insights/components/cloud_provider.py b/insights/components/cloud_provider.py new file mode 100644 index 000000000..76eb4039e --- /dev/null +++ b/insights/components/cloud_provider.py @@ -0,0 +1,59 @@ +""" +Components identify Cloud Provider +=================================== + +The ``Is*`` component is valid if the +:py:class:`insights.combiners.cloud_provider.CloudProvider` combiner indicates +the host is from the specific Cloud Provider. Otherwise, it raises a +:py:class:`insights.core.dr.SkipComponent` to prevent dependent components from +executing. + +""" +from insights.core.dr import SkipComponent +from insights.core.plugins import component +from insights.combiners.cloud_provider import CloudProvider + + +@component(CloudProvider) +class IsAWS(object): + """ + This component uses ``CloudProvider`` combiner to determine the cloud + provider of the instance. + It checks if AWS, if not AWS it raises ``SkipComponent``. + + Raises: + SkipComponent: When it's not an instance from AWS. + """ + def __init__(self, cp): + if not cp or cp.cloud_provider != CloudProvider.AWS: + raise SkipComponent("Not AWS instance") + + +@component(CloudProvider) +class IsAzure(object): + """ + This component uses ``CloudProvider`` combiner to determine the cloud + provider of the instance. + It checks if Azure, if not Azure it raises ``SkipComponent``. + + Raises: + SkipComponent: When it's not an instance from Azure. + """ + def __init__(self, cp): + if not cp or cp.cloud_provider != CloudProvider.AZURE: + raise SkipComponent("Not Azure instance") + + +@component(CloudProvider) +class IsGCP(object): + """ + This component uses ``CloudProvider`` combiner to determine the cloud + provider of the instance. + It checks if Google Cloud Platform (GCP), if not GCP it raises ``SkipComponent``. + + Raises: + SkipComponent: When it's not an instance from GCP. + """ + def __init__(self, cp): + if not cp or cp.cloud_provider != CloudProvider.GOOGLE: + raise SkipComponent("Not Google Cloud Platform instance") diff --git a/insights/components/tests/test_cloud_provider.py b/insights/components/tests/test_cloud_provider.py new file mode 100644 index 000000000..450cf1af3 --- /dev/null +++ b/insights/components/tests/test_cloud_provider.py @@ -0,0 +1,27 @@ +from insights.tests import context_wrap +from insights.parsers.dmidecode import DMIDecode +from insights.combiners.cloud_provider import CloudProvider +from insights.components.cloud_provider import IsAWS, IsAzure, IsGCP +from insights.combiners.tests.test_cloud_provider import ( + DMIDECODE_AWS, DMIDECODE_GOOGLE, DMIDECODE_AZURE_ASSET_TAG) + + +def test_is_aws(): + dmi = DMIDecode(context_wrap(DMIDECODE_AWS)) + cp = CloudProvider(None, dmi, None, None) + result = IsAWS(cp) + assert isinstance(result, IsAWS) + + +def test_is_azure(): + dmi = DMIDecode(context_wrap(DMIDECODE_AZURE_ASSET_TAG)) + cp = CloudProvider(None, dmi, None, None) + result = IsAzure(cp) + assert isinstance(result, IsAzure) + + +def test_is_gcp(): + dmi = DMIDecode(context_wrap(DMIDECODE_GOOGLE)) + cp = CloudProvider(None, dmi, None, None) + result = IsGCP(cp) + assert isinstance(result, IsGCP) diff --git a/insights/specs/default.py b/insights/specs/default.py index 58003fc7a..39963060d 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -25,11 +25,11 @@ from insights.core.spec_factory import first_of, command_with_args from insights.core.spec_factory import foreach_collect, foreach_execute from insights.core.spec_factory import first_file, listdir -from insights.combiners.cloud_provider import CloudProvider from insights.combiners.services import Services from insights.combiners.sap import Sap from insights.combiners.ps import Ps from insights.components.rhel_version import IsRhel8, IsRhel7, IsRhel6 +from insights.components.cloud_provider import IsAWS, IsAzure, IsGCP from insights.parsers.mdstat import Mdstat from insights.parsers.lsmod import LsMod from insights.combiners.satellite_version import SatelliteVersion, CapsuleVersion @@ -138,29 +138,11 @@ class DefaultSpecs(Specs): audit_log = simple_file("/var/log/audit/audit.log") avc_hash_stats = simple_file("/sys/fs/selinux/avc/hash_stats") avc_cache_threshold = simple_file("/sys/fs/selinux/avc/cache_threshold") - - @datasource(CloudProvider, HostContext) - def is_aws(broker): - """ bool: Returns True if this node is identified as running in AWS """ - cp = broker[CloudProvider] - if cp and cp.cloud_provider == CloudProvider.AWS: - return True - raise SkipComponent() - - aws_instance_id_doc = simple_command("/usr/bin/curl -s http://169.254.169.254/latest/dynamic/instance-identity/document --connect-timeout 5", deps=[is_aws]) - aws_instance_id_pkcs7 = simple_command("/usr/bin/curl -s http://169.254.169.254/latest/dynamic/instance-identity/pkcs7 --connect-timeout 5", deps=[is_aws]) + aws_instance_id_doc = simple_command("/usr/bin/curl -s http://169.254.169.254/latest/dynamic/instance-identity/document --connect-timeout 5", deps=[IsAWS]) + aws_instance_id_pkcs7 = simple_command("/usr/bin/curl -s http://169.254.169.254/latest/dynamic/instance-identity/pkcs7 --connect-timeout 5", deps=[IsAWS]) awx_manage_check_license = simple_command("/usr/bin/awx-manage check_license") - - @datasource(CloudProvider, HostContext) - def is_azure(broker): - """ bool: Returns True if this node is identified as running in Azure """ - cp = broker[CloudProvider] - if cp and cp.cloud_provider == CloudProvider.AZURE: - return True - raise SkipComponent() - - azure_instance_type = simple_command("/usr/bin/curl -s -H Metadata:true http://169.254.169.254/metadata/instance/compute/vmSize?api-version=2018-10-01&format=text --connect-timeout 5", deps=[is_azure]) - azure_instance_plan = simple_command("/usr/bin/curl -s -H Metadata:true http://169.254.169.254/metadata/instance/compute/plan?api-version=2018-10-01&format=json --connect-timeout 5", deps=[is_azure]) + azure_instance_type = simple_command("/usr/bin/curl -s -H Metadata:true http://169.254.169.254/metadata/instance/compute/vmSize?api-version=2018-10-01&format=text --connect-timeout 5", deps=[IsAzure]) + azure_instance_plan = simple_command("/usr/bin/curl -s -H Metadata:true http://169.254.169.254/metadata/instance/compute/plan?api-version=2018-10-01&format=json --connect-timeout 5", deps=[IsAzure]) bios_uuid = simple_command("/usr/sbin/dmidecode -s system-uuid") blkid = simple_command("/sbin/blkid -c /dev/null") bond = glob_file("/proc/net/bonding/bond*") @@ -362,17 +344,8 @@ def gfs2_mount_points(broker): gluster_v_info = simple_command("/usr/sbin/gluster volume info") gnocchi_conf = first_file(["/var/lib/config-data/puppet-generated/gnocchi/etc/gnocchi/gnocchi.conf", "/etc/gnocchi/gnocchi.conf"]) gnocchi_metricd_log = first_file(["/var/log/containers/gnocchi/gnocchi-metricd.log", "/var/log/gnocchi/metricd.log"]) - - @datasource(CloudProvider, HostContext) - def is_gcp(broker): - """ bool: Returns True if this node is identified as running in GCP """ - cp = broker[CloudProvider] - if cp and cp.cloud_provider == CloudProvider.GOOGLE: - return True - raise SkipComponent() - - gcp_instance_type = simple_command("/usr/bin/curl -s -H 'Metadata-Flavor: Google' http://metadata.google.internal/computeMetadata/v1/instance/machine-type --connect-timeout 5", deps=[is_gcp]) - gcp_license_codes = simple_command("/usr/bin/curl -s -H 'Metadata-Flavor: Google' http://metadata.google.internal/computeMetadata/v1/instance/licenses/?recursive=True --connect-timeout 5", deps=[is_gcp]) + gcp_instance_type = simple_command("/usr/bin/curl -s -H 'Metadata-Flavor: Google' http://metadata.google.internal/computeMetadata/v1/instance/machine-type --connect-timeout 5", deps=[IsGCP]) + gcp_license_codes = simple_command("/usr/bin/curl -s -H 'Metadata-Flavor: Google' http://metadata.google.internal/computeMetadata/v1/instance/licenses/?recursive=True --connect-timeout 5", deps=[IsGCP]) greenboot_status = simple_command("/usr/libexec/greenboot/greenboot-status") grub_conf = simple_file("/boot/grub/grub.conf") grub_config_perms = simple_command("/bin/ls -l /boot/grub2/grub.cfg") # only RHEL7 and updwards From 9d9db53a29e5913ba6d49e9a707849c5fd532732 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 1 Jul 2021 20:58:25 +0800 Subject: [PATCH 471/892] Add parser for spec "/etc/candlepin/broker.xml" (#3099) * Add spec "/etc/candlepin/broker.xml" and the parser Signed-off-by: Huanhuan Li * Update import statement and spec location Signed-off-by: Huanhuan Li * Replace simple_file with Mock Signed-off-by: Huanhuan Li --- docs/custom_datasources_index.rst | 10 +- .../candlepin_broker.rst | 3 + insights/parsers/candlepin_broker.py | 29 +++ .../parsers/tests/test_candlepin_broker.py | 134 ++++++++++++++ insights/specs/__init__.py | 1 + .../specs/datasources/candlepin_broker.py | 77 ++++++++ insights/specs/default.py | 3 +- .../datasources/test_candlepin_broker.py | 174 ++++++++++++++++++ 8 files changed, 429 insertions(+), 2 deletions(-) create mode 100644 docs/shared_parsers_catalog/candlepin_broker.rst create mode 100644 insights/parsers/candlepin_broker.py create mode 100644 insights/parsers/tests/test_candlepin_broker.py create mode 100644 insights/specs/datasources/candlepin_broker.py create mode 100644 insights/tests/datasources/test_candlepin_broker.py diff --git a/docs/custom_datasources_index.rst b/docs/custom_datasources_index.rst index 3ab9a3735..1b7855e6d 100644 --- a/docs/custom_datasources_index.rst +++ b/docs/custom_datasources_index.rst @@ -17,4 +17,12 @@ insights.specs.datasources.ps .. automodule:: insights.specs.datasources.ps :members: ps_eo_cmd, LocalSpecs :show-inheritance: - :undoc-members: \ No newline at end of file + :undoc-members: + +insights.specs.datasources.candlepin_broker +------------------------------------------- + +.. automodule:: insights.specs.datasources.candlepin_broker + :members: candlepin_broker, LocalSpecs + :show-inheritance: + :undoc-members: diff --git a/docs/shared_parsers_catalog/candlepin_broker.rst b/docs/shared_parsers_catalog/candlepin_broker.rst new file mode 100644 index 000000000..1333d3bd3 --- /dev/null +++ b/docs/shared_parsers_catalog/candlepin_broker.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.candlepin_broker + :members: + :show-inheritance: diff --git a/insights/parsers/candlepin_broker.py b/insights/parsers/candlepin_broker.py new file mode 100644 index 000000000..254c755bf --- /dev/null +++ b/insights/parsers/candlepin_broker.py @@ -0,0 +1,29 @@ +""" +CandlepinBrokerXML - file ``/etc/candlepin/broker.xml`` +======================================================= +""" + +from insights.core import XMLParser +from insights import parser +from insights.specs import Specs + + +@parser(Specs.candlepin_broker) +class CandlepinBrokerXML(XMLParser): + """ + Parse the ``/etc/candlepin/broker.xml`` file. + + .. note:: + Please refer to its super-class :class:`insights.core.XMLParser` + + Examples: + >>> type(broker) + + >>> page_dirs = broker.get_elements('.//paging-directory') + >>> page_dirs[0].text + '/var/lib/candlepin/activemq-artemis/paging' + >>> usage_ele = broker.dom.find('.//max-disk-usage') + >>> usage_ele.text + '99' + """ + pass diff --git a/insights/parsers/tests/test_candlepin_broker.py b/insights/parsers/tests/test_candlepin_broker.py new file mode 100644 index 000000000..186e46781 --- /dev/null +++ b/insights/parsers/tests/test_candlepin_broker.py @@ -0,0 +1,134 @@ +from insights.parsers import candlepin_broker +from insights.parsers.candlepin_broker import CandlepinBrokerXML +from insights.tests import context_wrap +import doctest + + +CANDLEPIN_BROKER_1 = """ + + + + + true + + + + + + + + + + + + + + + + + + + + + + true + NIO + 1 + true + true + /var/lib/candlepin/activemq-artemis/bindings + /var/lib/candlepin/activemq-artemis/journal + /var/lib/candlepin/activemq-artemis/largemsgs + /var/lib/candlepin/activemq-artemis/paging + 99 + +
+ + + + +
+ +
+ + + +
+ +
+ + + +
+
+ + + + FORCE + true + 10485760 + + + 1048576 + + + 30000 + 3600000 + 2 + 0 + + + + false + 10485760 + + + 1048576 + + 0 + 1 + + + true + 10485760 + + + 1048576 + + + 30000 + 3600000 + 2 + 0 + + + + + + false +
event.default
+ + katello.candlepin +
+
+
+
+""".strip() + + +def test_candlepin_broker(): + broker = CandlepinBrokerXML(context_wrap(CANDLEPIN_BROKER_1)) + page_dirs = broker.get_elements('.//paging-directory') + assert len(page_dirs) == 1 + assert page_dirs[0].text == '/var/lib/candlepin/activemq-artemis/paging' + usage_eles = broker.get_elements('.//max-disk-usage') + assert len(usage_eles) == 1 + assert usage_eles[0].text == '99' + + +def test_catalina_log_doc_examples(): + env = { + 'broker': CandlepinBrokerXML(context_wrap(CANDLEPIN_BROKER_1)), + } + failed, total = doctest.testmod(candlepin_broker, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 27e2a45cd..0dbd48378 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -27,6 +27,7 @@ class Specs(SpecSet): boot_loader_entries = RegistryPoint(multi_output=True) branch_info = RegistryPoint(raw=True) brctl_show = RegistryPoint() + candlepin_broker = RegistryPoint() candlepin_error_log = RegistryPoint(filterable=True) candlepin_log = RegistryPoint(filterable=True) cdc_wdm = RegistryPoint() diff --git a/insights/specs/datasources/candlepin_broker.py b/insights/specs/datasources/candlepin_broker.py new file mode 100644 index 000000000..c40b1df98 --- /dev/null +++ b/insights/specs/datasources/candlepin_broker.py @@ -0,0 +1,77 @@ +""" +Custom datasources for candlepin broker.xml +""" +from insights.core.context import HostContext +from insights.core.dr import SkipComponent +from insights.core.plugins import datasource +from insights.core.spec_factory import DatasourceProvider, simple_file +from insights.specs import Specs +from insights.specs.default import SatelliteVersion +from insights.core import ET + + +class LocalSpecs(Specs): + """ Local specs used only by candlepin_broker datasources """ + + candlepin_broker_input = simple_file("/etc/candlepin/broker.xml") + """ Returns the contents of the file ``/etc/candlepin/broker.xml`` """ + + +@datasource(LocalSpecs.candlepin_broker_input, HostContext, SatelliteVersion) +def candlepin_broker(broker): + """ + This datasource provides the candlepn broker configuration information + collected from ``/etc/candlepin/broker.xml``. + + Typical content of ``/etc/candlepin/broker.xml`` file is:: + + + + + vm://0 + tcp://localhost:61613?protocols=STOMP;useEpoll=false;sslEnabled=true;trustStorePath=/etc/candlepin/certs/truststore;trustStorePassword=CDX9i3K5uPPBzcNtzz5tcycVf5PuXA5w;keyStorePath=/etc/candlepin/certs/keystore;keyStorePassword=4iBpTS45VZjFmVdNzRhRKNXtxbsH5Dij;needClientAuth=true + + true + + + + Note: + This datasource may be executed using the following command: + + ``insights cat --no-header candlepin_broker`` + + Returns: + str: XML string after removeing sensitive information. + + Raises: + SkipComponent: When the path does not exist or any exception occurs. + """ + + relative_path = '/etc/candlepin/broker.xml' + try: + content = broker[LocalSpecs.candlepin_broker_input].content + if content: + root = ET.fromstring('\n'.join(content)) + # remove namespace before save to avoid urgly search + for node in root.getiterator(): + prefix, has_namespace, postfix = node.tag.rpartition('}') + if has_namespace: + node.tag = postfix + # remove sensitive data + core_ele = root.find('core') + passwd_ele = core_ele.find('cluster-password') + if passwd_ele is not None: + core_ele.remove(passwd_ele) + acc_ele = core_ele.find('acceptors') + if acc_ele: + core_ele.remove(acc_ele) + return DatasourceProvider( + content=[line for line in ET.tostring(root).decode('utf-8').splitlines() if line.strip()], + relative_path=relative_path + ) + except Exception as e: + raise SkipComponent("Unexpected exception:{e}".format(e=str(e))) + raise SkipComponent() diff --git a/insights/specs/default.py b/insights/specs/default.py index 39963060d..93bb4dfee 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -35,7 +35,7 @@ from insights.combiners.satellite_version import SatelliteVersion, CapsuleVersion from insights.parsers.mount import Mount from insights.specs import Specs -from insights.specs.datasources import cloud_init, ps as ps_datasource +from insights.specs.datasources import cloud_init, ps as ps_datasource, candlepin_broker import datetime @@ -150,6 +150,7 @@ class DefaultSpecs(Specs): boot_loader_entries = glob_file("/boot/loader/entries/*.conf") branch_info = simple_file("/branch_info", kind=RawFileProvider) brctl_show = simple_command("/usr/sbin/brctl show") + candlepin_broker = candlepin_broker.candlepin_broker candlepin_log = simple_file("/var/log/candlepin/candlepin.log") cgroups = simple_file("/proc/cgroups") ps_alxwww = simple_command("/bin/ps alxwww") diff --git a/insights/tests/datasources/test_candlepin_broker.py b/insights/tests/datasources/test_candlepin_broker.py new file mode 100644 index 000000000..b34190740 --- /dev/null +++ b/insights/tests/datasources/test_candlepin_broker.py @@ -0,0 +1,174 @@ +import pytest +from mock.mock import Mock + +from insights.core.spec_factory import DatasourceProvider +from insights.core.dr import SkipComponent +from insights.specs.datasources.candlepin_broker import candlepin_broker, LocalSpecs + + +CANDLEPIN_BROKER = """ + + + + + vm://0 + tcp://localhost:61613?protocols=STOMP;useEpoll=false;sslEnabled=true;trustStorePath=/etc/candlepin/certs/truststore;trustStorePassword=CDX9i3K5uPPBzcNtzz5tcycVf5PuXA5w;keyStorePath=/etc/candlepin/certs/keystore;keyStorePassword=4iBpTS45VZjFmVdNzRhRKNXtxbsH5Dij;needClientAuth=true + + + true + + + + + + + + + + + + + + + + + + + + + + + + +""".strip() + +CANDLEPIN_BROKER_NO_SENSITIVE_INFO = """ + + + + true + + + + + + + + + + + + + + + + + + + + + + + +""".strip() + +CANDLEPIN_BROKER_BAD = """ + + + +""".strip() + + +CANDLEPIN_BROKER_XML = """ + + + true + + + + + + + + + + + + + + + + + + + + +""".strip() + + +CANDLE_BROKER_NO_SENTISVE_INFO = """ + + + true + + + + + + + + + + + + + + + + + + + + +""".strip() + +RELATIVE_PATH = '/etc/candlepin/broker.xml' + + +def test_candlepin_broker(): + candlepin_broker_file = Mock() + candlepin_broker_file.content = CANDLEPIN_BROKER.splitlines() + broker = {LocalSpecs.candlepin_broker_input: candlepin_broker_file} + result = candlepin_broker(broker) + assert result is not None + assert isinstance(result, DatasourceProvider) + expected = DatasourceProvider(content=CANDLEPIN_BROKER_XML.splitlines(), relative_path=RELATIVE_PATH) + assert result.content == expected.content + assert result.relative_path == expected.relative_path + + +def test_candlepin_broker_bad(): + candlepin_broker_file = Mock() + candlepin_broker_file.content = CANDLEPIN_BROKER_BAD.splitlines() + broker = {LocalSpecs.candlepin_broker_input: candlepin_broker_file} + with pytest.raises(SkipComponent) as e: + candlepin_broker(broker) + assert 'Unexpected exception' in str(e) + + +def test_candlpin_broker_no_sensitive_info(): + candlepin_broker_file = Mock() + candlepin_broker_file.content = CANDLEPIN_BROKER_NO_SENSITIVE_INFO.splitlines() + broker = {LocalSpecs.candlepin_broker_input: candlepin_broker_file} + result = candlepin_broker(broker) + assert result is not None + assert isinstance(result, DatasourceProvider) + expected = DatasourceProvider(content=CANDLE_BROKER_NO_SENTISVE_INFO.splitlines(), relative_path=RELATIVE_PATH) + assert result.content == expected.content + assert result.relative_path == expected.relative_path From 2a00adc9bf0f66cda907630faaa8b86e9a047808 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 1 Jul 2021 12:52:52 -0400 Subject: [PATCH 472/892] update uploader_json_map.json Signed-off-by: Jeremy Crafts --- insights/client/uploader_json_map.json | 52 +++++++++++++++++++++----- 1 file changed, 42 insertions(+), 10 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index 0c1d164b8..d60fde106 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -159,6 +159,7 @@ "BIOS Hyper-V UEFI Release", "Brought up ", "CIFS VFS: protocol revalidation - security settings mismatch", + "CQE error - vendor syndrome", "CSUM", "CVE-2017-1000364", "CVE-2018-14634", @@ -198,7 +199,6 @@ "page allocation failure: order:", "resetting", "smpboot: CPU ", - "temperature above threshold", "the DIE domain not a subset of the NUMA domain", "tx hang", "vmxnet3", @@ -1092,6 +1092,7 @@ "COMMAND", "auditd", "avahi", + "backupserver", "catalina.base", "ceilometer-poll", "chronyd", @@ -1100,6 +1101,10 @@ "cmirrord", "corosync", "crmd", + "dataserver", + "diagbs", + "diagserver", + "diagxps", "dlm_controld", "dnsmasq", "docker", @@ -1109,10 +1114,12 @@ "gnome-shell", "haproxy", "heat-engine", + "histserver", "httpd", "libvirtd", "memcached", "mongdb", + "monserver", "multipath", "multipathd", "neutron-dhcp-ag", @@ -1147,7 +1154,8 @@ "snmpd", "spausedd", "swift-proxy-ser", - "tuned" + "tuned", + "xpserver" ], "symbolic_name": "ps_alxwww" }, @@ -1162,6 +1170,7 @@ "COMMAND", "STAP/8.2", "auditd", + "backupserver", "catalina.base", "ceilometer-poll", "ceph-osd", @@ -1171,6 +1180,10 @@ "cmirrord", "corosync", "crmd", + "dataserver", + "diagbs", + "diagserver", + "diagxps", "dlm_controld", "docker", "docker-runc-current", @@ -1179,10 +1192,12 @@ "gnome-shell", "haproxy", "heat-engine", + "histserver", "httpd", "libvirtd", "memcached", "mongdb", + "monserver", "multipath", "multipathd", "mysqld", @@ -1206,12 +1221,10 @@ "pacemaker-controld", "pacemaker_remote", "pacemakerd", - "phc2sys", "pkla-check-auth", "pmcd", "pmie", "postmaster", - "ptp4l", "radosgw", "redis-server", "rngd", @@ -1221,7 +1234,8 @@ "snmpd", "spausedd", "swift-proxy-ser", - "tuned" + "tuned", + "xpserver" ], "symbolic_name": "ps_aux" }, @@ -1242,6 +1256,7 @@ "/usr/sbin/fcoemon --syslog", "COMMAND", "auditd", + "backupserver", "catalina.base", "ceilometer-poll", "chronyd", @@ -1250,6 +1265,10 @@ "cmirrord", "corosync", "crmd", + "dataserver", + "diagbs", + "diagserver", + "diagxps", "dlm_controld", "docker", "docker-runc-current", @@ -1260,11 +1279,13 @@ "greenplum", "haproxy", "heat-engine", + "histserver", "httpd", "iscsid", "libvirtd", "memcached", "mongdb", + "monserver", "multipath", "multipathd", "neutron-dhcp-ag", @@ -1303,7 +1324,8 @@ "swift-proxy-ser", "target_completi", "tgtd", - "tuned" + "tuned", + "xpserver" ], "symbolic_name": "ps_auxww" }, @@ -1315,6 +1337,7 @@ "CMD", "COMMAND", "auditd", + "backupserver", "catalina.base", "ceilometer-poll", "chronyd", @@ -1323,6 +1346,10 @@ "cmirrord", "corosync", "crmd", + "dataserver", + "diagbs", + "diagserver", + "diagxps", "dlm_controld", "docker", "docker-runc-current", @@ -1331,10 +1358,12 @@ "gnome-shell", "haproxy", "heat-engine", + "histserver", "httpd", "libvirtd", "memcached", "mongdb", + "monserver", "multipath", "multipathd", "neutron-dhcp-ag", @@ -1372,7 +1401,8 @@ "snmpd", "spausedd", "swift-proxy-ser", - "tuned" + "tuned", + "xpserver" ], "symbolic_name": "ps_ef" }, @@ -2493,6 +2523,7 @@ { "file": "/etc/lvm/lvm.conf", "pattern": [ + "auto_activation_volume_list", "filter", "locking_type", "use_lvmetad", @@ -3088,14 +3119,14 @@ { "file": "/var/log/pacemaker.log", "pattern": [ - "pcmk_dbus_find_error" + "is active on 2 nodes (attempting recovery)" ], "symbolic_name": "pacemaker_log" }, { "file": "/var/log/pacemaker/pacemaker.log", "pattern": [ - "pcmk_dbus_find_error" + "is active on 2 nodes (attempting recovery)" ], "symbolic_name": "pacemaker_log" }, @@ -3771,6 +3802,7 @@ "Ssl_Tlsv1_1", "Ssl_Tlsv1_2", "local_enable", + "session_support", "ssl_enable", "ssl_sslv2", "ssl_sslv3", @@ -4335,5 +4367,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-06-17T12:53:13.800630" + "version": "2021-06-24T11:02:05.835577" } \ No newline at end of file From 98a54b684b3d684bd3e7bdb17e2d29822e58f44e Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Fri, 2 Jul 2021 20:49:57 +0800 Subject: [PATCH 473/892] Add doc entry for component cloud_provider (#3131) Signed-off-by: Xiangce Liu --- docs/shared_components_catalog/cloud_provider.rst | 3 +++ insights/components/cloud_provider.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 docs/shared_components_catalog/cloud_provider.rst diff --git a/docs/shared_components_catalog/cloud_provider.rst b/docs/shared_components_catalog/cloud_provider.rst new file mode 100644 index 000000000..a7a276769 --- /dev/null +++ b/docs/shared_components_catalog/cloud_provider.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.components.cloud_provider + :members: + :show-inheritance: diff --git a/insights/components/cloud_provider.py b/insights/components/cloud_provider.py index 76eb4039e..6b99ea1dd 100644 --- a/insights/components/cloud_provider.py +++ b/insights/components/cloud_provider.py @@ -2,7 +2,7 @@ Components identify Cloud Provider =================================== -The ``Is*`` component is valid if the +The ``Is*`` component in this module is valid if the :py:class:`insights.combiners.cloud_provider.CloudProvider` combiner indicates the host is from the specific Cloud Provider. Otherwise, it raises a :py:class:`insights.core.dr.SkipComponent` to prevent dependent components from From 56bed377dc45d3e09ae81b69aee34138a3554f9d Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Fri, 2 Jul 2021 21:02:02 +0800 Subject: [PATCH 474/892] Change is_ceph_monitor as Component IsCephMonitor (#3132) * Change is_ceph_monitor as Component IsCephMonitor Signed-off-by: Xiangce Liu * Add test and coverage test Signed-off-by: Xiangce Liu --- docs/shared_components_catalog/ceph.rst | 3 +++ insights/collect.py | 4 +++ insights/components/ceph.py | 28 ++++++++++++++++++++ insights/components/tests/test_ceph.py | 35 +++++++++++++++++++++++++ insights/specs/default.py | 12 ++------- 5 files changed, 72 insertions(+), 10 deletions(-) create mode 100644 docs/shared_components_catalog/ceph.rst create mode 100644 insights/components/ceph.py create mode 100644 insights/components/tests/test_ceph.py diff --git a/docs/shared_components_catalog/ceph.rst b/docs/shared_components_catalog/ceph.rst new file mode 100644 index 000000000..59de0ca85 --- /dev/null +++ b/docs/shared_components_catalog/ceph.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.components.ceph + :members: + :show-inheritance: diff --git a/insights/collect.py b/insights/collect.py index 568094eb8..7451ce237 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -133,6 +133,10 @@ - name: insights.components.cloud_provider.IsGCP enabled: true + # needed for the ceph related specs + - name: insights.components.ceph.IsCephMonitor + enabled: true + # needed for the Services combiner - name: insights.parsers.chkconfig enabled: true diff --git a/insights/components/ceph.py b/insights/components/ceph.py new file mode 100644 index 000000000..246db9a3d --- /dev/null +++ b/insights/components/ceph.py @@ -0,0 +1,28 @@ +""" +Component identifies Ceph Monitor +================================= + +The ``Is*`` component in this module is valid if the +:py:class:`insights.combiners.ps.Ps` combiner indicates +the host is a Ceph monitor. Otherwise, it raises a +:py:class:`insights.core.dr.SkipComponent` to prevent dependent components from +executing. + +""" +from insights.core.dr import SkipComponent +from insights.core.plugins import component +from insights.combiners.ps import Ps + + +@component(Ps) +class IsCephMonitor(object): + """ + This component uses ``Ps`` combiner to determine if the host is a Ceph + monitor or not. If not Ceph monitor, it raises ``SkipComponent``. + + Raises: + SkipComponent: When it's not a Ceph monitor. + """ + def __init__(self, ps): + if not ps.search(COMMAND_NAME__contains='ceph-mon'): + raise SkipComponent("Not Ceph Monitor") diff --git a/insights/components/tests/test_ceph.py b/insights/components/tests/test_ceph.py new file mode 100644 index 000000000..e053f4e5e --- /dev/null +++ b/insights/components/tests/test_ceph.py @@ -0,0 +1,35 @@ +import pytest +from insights import SkipComponent +from insights.tests import context_wrap +from insights.parsers.ps import PsAuxcww +from insights.combiners.ps import Ps +from insights.components.ceph import IsCephMonitor + +PsAuxcww_CEPH = """ +USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND +root 1 0.0 0.0 19356 1544 ? Ss May31 0:01 init +user2 20164 0.0 0.0 108472 1896 pts/5 Ss 10:10 0:00 bash +root 20357 0.0 0.0 9120 832 ? Ss 10:09 0:00 dhclient +root 22673 0.6 10.7 1618556 840452 ? Sl 11:38 1:31 ceph-mon +vdsm 27323 98.0 11.3 9120 987 ? Ss 10.01 1:31 vdsm +""".strip() + +PsAuxcww_NO_CEPH = """ +USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND +root 1 0.0 0.0 19356 1544 ? Ss May31 0:01 init +user2 20164 0.0 0.0 108472 1896 pts/5 Ss 10:10 0:00 bash +root 20357 0.0 0.0 9120 832 ? Ss 10:09 0:00 dhclient +vdsm 27323 98.0 11.3 9120 987 ? Ss 10.01 1:31 vdsm +""".strip() + + +def test_is_ceph_monitor(): + ps_auxcww = PsAuxcww(context_wrap(PsAuxcww_CEPH)) + ps = Ps(None, None, None, None, ps_auxcww, None, None) + result = IsCephMonitor(ps) + assert isinstance(result, IsCephMonitor) + + ps_auxcww = PsAuxcww(context_wrap(PsAuxcww_NO_CEPH)) + ps = Ps(None, None, None, None, ps_auxcww, None, None) + with pytest.raises(SkipComponent): + IsCephMonitor(ps) diff --git a/insights/specs/default.py b/insights/specs/default.py index 93bb4dfee..c8b34032f 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -30,6 +30,7 @@ from insights.combiners.ps import Ps from insights.components.rhel_version import IsRhel8, IsRhel7, IsRhel6 from insights.components.cloud_provider import IsAWS, IsAzure, IsGCP +from insights.components.ceph import IsCephMonitor from insights.parsers.mdstat import Mdstat from insights.parsers.lsmod import LsMod from insights.combiners.satellite_version import SatelliteVersion, CapsuleVersion @@ -189,16 +190,7 @@ def tomcat_base(broker): ceph_conf = first_file(["/var/lib/config-data/puppet-generated/ceph/etc/ceph/ceph.conf", "/etc/ceph/ceph.conf"]) ceph_df_detail = simple_command("/usr/bin/ceph df detail -f json") ceph_health_detail = simple_command("/usr/bin/ceph health detail -f json") - - @datasource(Ps, HostContext) - def is_ceph_monitor(broker): - """ bool: Returns True if ceph monitor process ceph-mon is running on this node """ - ps = broker[Ps] - if ps.search(COMMAND__contains='ceph-mon'): - return True - raise SkipComponent() - - ceph_insights = simple_command("/usr/bin/ceph insights", deps=[is_ceph_monitor]) + ceph_insights = simple_command("/usr/bin/ceph insights", deps=[IsCephMonitor]) ceph_log = glob_file(r"var/log/ceph/ceph.log*") ceph_osd_dump = simple_command("/usr/bin/ceph osd dump -f json") ceph_osd_ec_profile_ls = simple_command("/usr/bin/ceph osd erasure-code-profile ls") From d4bed19a777b132b556858ea325a57c8f521f0de Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Fri, 2 Jul 2021 21:26:23 +0800 Subject: [PATCH 475/892] New spec "ipcs -s -i xxx" (#3127) * New spec "ipcs -s -i xxx" Signed-off-by: Huanhuan Li * Add title, rename file, use spec instead of parser Signed-off-by: Huanhuan Li * Raise SkipComponent if there is no semid Signed-off-by: Huanhuan Li * Use "insights.specs.Specs.ipcs_s" directly Signed-off-by: Huanhuan Li --- docs/custom_datasources_index.rst | 8 +++++ insights/specs/datasources/ipcs.py | 40 ++++++++++++++++++++++ insights/specs/default.py | 3 +- insights/tests/datasources/test_ipcs.py | 45 +++++++++++++++++++++++++ 4 files changed, 95 insertions(+), 1 deletion(-) create mode 100644 insights/specs/datasources/ipcs.py create mode 100644 insights/tests/datasources/test_ipcs.py diff --git a/docs/custom_datasources_index.rst b/docs/custom_datasources_index.rst index 1b7855e6d..49154a0f4 100644 --- a/docs/custom_datasources_index.rst +++ b/docs/custom_datasources_index.rst @@ -26,3 +26,11 @@ insights.specs.datasources.candlepin_broker :members: candlepin_broker, LocalSpecs :show-inheritance: :undoc-members: + +insights.specs.datasources.ipcs +------------------------------- + +.. automodule:: insights.specs.datasources.ipcs + :members: semid + :show-inheritance: + :undoc-members: diff --git a/insights/specs/datasources/ipcs.py b/insights/specs/datasources/ipcs.py new file mode 100644 index 000000000..e4954f283 --- /dev/null +++ b/insights/specs/datasources/ipcs.py @@ -0,0 +1,40 @@ +""" +Custom datasources to get the semid of all the inter-processes. +""" + +from insights.core.context import HostContext +from insights.core.plugins import datasource +from insights.specs import Specs +from insights.core.dr import SkipComponent + + +@datasource(Specs.ipcs_s, HostContext) +def semid(broker): + """ + This datasource provides a list of the semid of all the inter-processes. + + Note: + This datasource may be executed using the following command: + + ``insights cat --no-header ipcs_s_i`` + + Sample output:: + + [ + '65570', '98353', '98354' + ] + + Returns: + list: A list of the semid of all the inter-processes. + """ + content = broker[Specs.ipcs_s].content + results = set() + for s in content: + s_splits = s.split() + # key semid owner perms nsems + # 0x00000000 65536 apache 600 1 + if len(s_splits) == 5 and s_splits[1].isdigit(): + results.add(s_splits[1]) + if results: + return list(results) + raise SkipComponent diff --git a/insights/specs/default.py b/insights/specs/default.py index c8b34032f..434ec9f95 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -36,7 +36,7 @@ from insights.combiners.satellite_version import SatelliteVersion, CapsuleVersion from insights.parsers.mount import Mount from insights.specs import Specs -from insights.specs.datasources import cloud_init, ps as ps_datasource, candlepin_broker +from insights.specs.datasources import cloud_init, ps as ps_datasource, candlepin_broker, ipcs import datetime @@ -415,6 +415,7 @@ def httpd_cmd(broker): ipcs_m = simple_command("/usr/bin/ipcs -m") ipcs_m_p = simple_command("/usr/bin/ipcs -m -p") ipcs_s = simple_command("/usr/bin/ipcs -s") + ipcs_s_i = foreach_execute(ipcs.semid, "/usr/bin/ipcs -s -i %s") ipsec_conf = simple_file("/etc/ipsec.conf") iptables = simple_command("/sbin/iptables-save") iptables_permanent = simple_file("etc/sysconfig/iptables") diff --git a/insights/tests/datasources/test_ipcs.py b/insights/tests/datasources/test_ipcs.py new file mode 100644 index 000000000..a3a5f9b06 --- /dev/null +++ b/insights/tests/datasources/test_ipcs.py @@ -0,0 +1,45 @@ +import pytest +from mock.mock import Mock + +from insights.specs.datasources import ipcs +from insights.core.dr import SkipComponent +from insights.specs import Specs + + +IPCS_OUTPUT1 = """ + +------ Semaphore Arrays -------- +key semid owner perms nsems +0x00000000 65570 apache 600 1 +0x00000000 98353 apache 600 1 +0x00000000 98354 apache 600 1 +0x00000000 98355 apache 600 1 +0x00000000 98356 apache 600 1 +0x00000000 98357 apache 600 1 +""".strip() + +IPCS_OUTPUT2 = """ + +------ Semaphore Arrays -------- +key semid owner perms nsems +""".strip() + + +def test_semid(): + ipcs_command = Mock() + ipcs_command.content = IPCS_OUTPUT1.splitlines() + broker = {Specs.ipcs_s: ipcs_command} + result = ipcs.semid(broker) + assert result is not None + assert isinstance(result, list) + assert '65570' in result + assert '98357' in result + assert len(result) == 6 + + +def test_exception(): + ipcs_command = Mock() + ipcs_command.content = IPCS_OUTPUT2.splitlines() + broker = {Specs.ipcs_s: ipcs_command} + with pytest.raises(SkipComponent): + ipcs.semid(broker) From d71fb9173d796cd88740f9586b7ea19c8b03b72b Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Sun, 4 Jul 2021 20:43:55 -0500 Subject: [PATCH 476/892] Update datasource package_provides_command (#3119) * Update datasource package_provides_command * Move datasource to separate module * Update to fix issues in collecting data * Add testing for datasource and functions * Update docs Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Fix doc build errors Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Fix tests for ps parser and rebase Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- docs/custom_datasources_index.rst | 38 +++-- insights/specs/__init__.py | 2 +- insights/specs/datasources/__init__.py | 48 ++++++ .../specs/datasources/package_provides.py | 80 +++++++++ insights/specs/default.py | 77 +-------- .../datasources/test_get_running_commands.py | 127 +++++++++++++++ .../datasources/test_package_provides.py | 153 ++++++++++++++++++ insights/tests/test_specs.py | 11 -- 8 files changed, 441 insertions(+), 95 deletions(-) create mode 100644 insights/specs/datasources/package_provides.py create mode 100644 insights/tests/datasources/test_get_running_commands.py create mode 100644 insights/tests/datasources/test_package_provides.py diff --git a/docs/custom_datasources_index.rst b/docs/custom_datasources_index.rst index 49154a0f4..c74365051 100644 --- a/docs/custom_datasources_index.rst +++ b/docs/custom_datasources_index.rst @@ -3,6 +3,22 @@ Custom Datasources Catalog ========================== +insights.specs.datasources +-------------------------- + +.. automodule:: insights.specs.datasources + :members: + :show-inheritance: + :undoc-members: + +insights.specs.datasources.candlepin_broker +------------------------------------------- + +.. automodule:: insights.specs.datasources.candlepin_broker + :members: candlepin_broker, LocalSpecs + :show-inheritance: + :undoc-members: + insights.specs.datasources.cloud_init ------------------------------------- @@ -11,26 +27,26 @@ insights.specs.datasources.cloud_init :show-inheritance: :undoc-members: -insights.specs.datasources.ps ------------------------------ +insights.specs.datasources.ipcs +------------------------------- -.. automodule:: insights.specs.datasources.ps - :members: ps_eo_cmd, LocalSpecs +.. automodule:: insights.specs.datasources.ipcs + :members: semid :show-inheritance: :undoc-members: -insights.specs.datasources.candlepin_broker +insights.specs.datasources.package_provides ------------------------------------------- -.. automodule:: insights.specs.datasources.candlepin_broker - :members: candlepin_broker, LocalSpecs +.. automodule:: insights.specs.datasources.package_provides + :members: cmd_and_pkg, get_package :show-inheritance: :undoc-members: -insights.specs.datasources.ipcs -------------------------------- +insights.specs.datasources.ps +----------------------------- -.. automodule:: insights.specs.datasources.ipcs - :members: semid +.. automodule:: insights.specs.datasources.ps + :members: ps_eo_cmd, LocalSpecs :show-inheritance: :undoc-members: diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 0dbd48378..e5e70a16c 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -462,7 +462,7 @@ class Specs(SpecSet): ovs_vsctl_show = RegistryPoint() ovs_vswitchd_limits = RegistryPoint() pacemaker_log = RegistryPoint(filterable=True) - package_provides_command = RegistryPoint() + package_provides_command = RegistryPoint(filterable=True) package_provides_java = RegistryPoint(multi_output=True) package_provides_httpd = RegistryPoint(multi_output=True) pam_conf = RegistryPoint() diff --git a/insights/specs/datasources/__init__.py b/insights/specs/datasources/__init__.py index e69de29bb..4113b50bf 100644 --- a/insights/specs/datasources/__init__.py +++ b/insights/specs/datasources/__init__.py @@ -0,0 +1,48 @@ +""" +Custom datasources provide functionality beyond ``simple_file`` and ``simple_command`` +type datasources. If the custom datasource is short and concise it may be added to +this module. Other datasources should be added as a separate module. +Normally custom datasources are necessary for core-collection. In order for a custom +datasource to execute, all of its dependencies must be explicitly loaded by the client. +The client uses the YAML template :py:data:`insights.collect.default_manifest` and each +parser/combiner/component required by a custom datasource must be included in the YAML +template to ensure it is loaded. +""" +DEFAULT_SHELL_TIMEOUT = 10 +""" int: Default timeout in seconds for ctx.shell_out() commands, must be provided as an arg """ + + +def get_running_commands(ps, ctx, commands): + """ + Search for a list of commands in Ps combiner output and returns the full path + to the command + + Arguments: + ps: Ps combiner object + ctx: Context of the current collection + commands(list): List of commands to search for in ps output + + Returns: + list: List of the full command paths of the all ``command``. + + Raises: + TypeError: Raised when ``commands`` args is not a list + """ + if not commands or not isinstance(commands, list): + raise TypeError('Commands argument must be a list object and contain at least one item') + + ps_list = [ps.search(COMMAND_NAME__contains=c) for c in commands] + if ps_list and isinstance(ps_list[0], list): + ps_cmds = [i for sub_l in ps_list for i in sub_l] + else: + ps_cmds = ps_list + + ret = set() + for cmd in set(p['COMMAND'] for p in ps_cmds): + try: + cmd_prefix = cmd.split(None, 1)[0] + which = ctx.shell_out("/usr/bin/which {0}".format(cmd_prefix), timeout=DEFAULT_SHELL_TIMEOUT) + except Exception: + continue + ret.add(which[0]) if which else None + return sorted(ret) diff --git a/insights/specs/datasources/package_provides.py b/insights/specs/datasources/package_provides.py new file mode 100644 index 000000000..0521b82e0 --- /dev/null +++ b/insights/specs/datasources/package_provides.py @@ -0,0 +1,80 @@ +""" +Custom datasource for package_provides +""" +import logging +import signal + +from insights.combiners.ps import Ps +from insights.core.context import HostContext +from insights.core.dr import SkipComponent +from insights.core.filters import get_filters +from insights.core.plugins import datasource +from insights.core.spec_factory import DatasourceProvider +from insights.specs import Specs + +from . import get_running_commands, DEFAULT_SHELL_TIMEOUT + +logger = logging.getLogger(__name__) + + +def get_package(ctx, file_path): + """ + Get the RPM package that owns the specified filename with path + + Arguments: + ctx: The current execution context + file_path(str): The full path and filename for RPM query + + Returns: + str: The name of the RPM package that provides the ``file`` + or None if file is not associated with an RPM. + """ + rc, resolved = ctx.shell_out( + "/usr/bin/readlink -e {0}".format(file_path), + timeout=DEFAULT_SHELL_TIMEOUT, + keep_rc=True + ) + if rc == 0 and resolved: + rc, pkg = ctx.shell_out( + "/usr/bin/rpm -qf {0}".format(resolved[0]), + timeout=DEFAULT_SHELL_TIMEOUT, + keep_rc=True, + signum=signal.SIGTERM + ) + if rc == 0 and pkg: + return pkg[0] + + +@datasource(Ps, HostContext) +def cmd_and_pkg(broker): + """ + Collect a list of running commands and the associated RPM package providing those commands. + The commands are based on filters so rules must add the desired commands as filters to + enable collection. If a command is not provided by an RPM then it will not be included + in the output. + + In order for the full command line to be present in the Ps combiner a filter must be added + to the spec ``ps_auxww``. A filter must also be added to ``package_provides_command`` so + this datasource will look for the command in Ps. + + Arguments: + broker: the broker object for the current session + + Returns: + DatasourceProvider: Returns the collected information as a file with 1 line per command + + Raises: + SkipComponent: Raised if no data is collected + """ + commands = get_filters(Specs.package_provides_command) + """ list: List of commands to search for, added as filters for the spec """ + + if commands: + pkg_cmd = list() + for cmd in get_running_commands(broker[Ps], broker[HostContext], list(commands)): + pkg = get_package(broker[HostContext], cmd) + if pkg is not None: + pkg_cmd.append("{0} {1}".format(cmd, pkg)) + if pkg_cmd: + return DatasourceProvider('\n'.join(pkg_cmd), relative_path='insights_commands/package_provides_command') + raise SkipComponent diff --git a/insights/specs/default.py b/insights/specs/default.py index 434ec9f95..72bc25ec6 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -8,6 +8,7 @@ data sources that standard Insights `Parsers` resolve against. """ +import datetime import logging import os import re @@ -36,8 +37,8 @@ from insights.combiners.satellite_version import SatelliteVersion, CapsuleVersion from insights.parsers.mount import Mount from insights.specs import Specs -from insights.specs.datasources import cloud_init, ps as ps_datasource, candlepin_broker, ipcs -import datetime +from insights.specs.datasources import ( + cloud_init, candlepin_broker, get_running_commands, ipcs, package_provides, ps as ps_datasource) logger = logging.getLogger(__name__) @@ -51,56 +52,6 @@ def get_owner(filename): return (name, group) -def _get_running_commands(broker, commands): - """ - Search for command in ``ps auxcww`` output and determine RPM providing binary - - Arguments: - broker(dict): Current state of specs collected by Insights - commands(str or list): Command or list of commands to search for in ps output - - Returns: - list: List of the full command paths of the ``command``. - - Raises: - Exception: Raises an exception if commands object is not a list or is empty - """ - if not commands or not isinstance(commands, list): - raise Exception('Commands argument must be a list object and contain at least one item') - - ps_list = [broker[Ps].search(COMMAND_NAME__contains=c) for c in commands] - ps_cmds = [i for sub_l in ps_list for i in sub_l] - ctx = broker[HostContext] - - ret = set() - for cmd in set(p['COMMAND'] for p in ps_cmds): - try: - cmd_prefix = cmd.split(None, 1)[0] - which = ctx.shell_out("/usr/bin/which {0}".format(cmd_prefix)) - except Exception: - continue - ret.add(which[0]) if which else None - return sorted(ret) - - -def _get_package(broker, command): - """ - Arguments: - broker(dict): Current state of specs collected by Insights - command(str): The full command name to get the package - - Returns: - str: The package that provides the ``command``. - """ - ctx = broker[HostContext] - resolved = ctx.shell_out("/usr/bin/readlink -e {0}".format(command)) - if resolved: - pkg = ctx.shell_out("/usr/bin/rpm -qf {0}".format(resolved[0]), signum=signal.SIGTERM) - if pkg: - return pkg[0] - raise SkipComponent - - def _make_rpm_formatter(fmt=None): """ function: Returns function that will format output of rpm query command """ if fmt is None: @@ -394,7 +345,7 @@ def httpd_cmd(broker): Returns: list: List of the binary paths to each running process """ - return _get_running_commands(broker, ['httpd', ]) + return get_running_commands(broker[Ps], broker[HostContext], ['httpd', ]) httpd_pid = simple_command("/usr/bin/pgrep -o httpd") httpd_limits = foreach_collect(httpd_pid, "/proc/%s/limits") @@ -592,25 +543,7 @@ def md_device_list(broker): ovirt_engine_ui_log = simple_file("/var/log/ovirt-engine/ui.log") ovs_vsctl_list_bridge = simple_command("/usr/bin/ovs-vsctl list bridge") ovs_vsctl_show = simple_command("/usr/bin/ovs-vsctl show") - - @datasource(Ps, HostContext) - def cmd_and_pkg(broker): - """ - Returns: - list: List of the command and provider package string of the specified commands. - - Attributes: - COMMANDS (list): List of the specified commands that need to check the provider package. - """ - COMMANDS = ['java', 'httpd'] - pkg_cmd = list() - for cmd in _get_running_commands(broker, COMMANDS): - pkg_cmd.append("{0} {1}".format(cmd, _get_package(broker, cmd))) - if pkg_cmd: - return '\n'.join(pkg_cmd) - raise SkipComponent - - package_provides_command = command_with_args("/usr/bin/echo '%s'", cmd_and_pkg) + package_provides_command = package_provides.cmd_and_pkg pacemaker_log = first_file(["/var/log/pacemaker.log", "/var/log/pacemaker/pacemaker.log"]) partitions = simple_file("/proc/partitions") pci_rport_target_disk_paths = simple_command("/usr/bin/find /sys/devices/ -maxdepth 10 -mindepth 9 -name stat -type f") diff --git a/insights/tests/datasources/test_get_running_commands.py b/insights/tests/datasources/test_get_running_commands.py new file mode 100644 index 000000000..94d1e2168 --- /dev/null +++ b/insights/tests/datasources/test_get_running_commands.py @@ -0,0 +1,127 @@ +import pytest + +from insights.combiners.ps import Ps +from insights.parsers.ps import PsEoCmd +from insights.specs.datasources import get_running_commands +from insights.tests import context_wrap + +PS_EO_CMD = """ + PID COMMAND + 1 /usr/lib/systemd/systemd --switched-root --system --deserialize 22 + 2 [kthreadd] + 988 /usr/sbin/httpd -DFOREGROUND + 1036 /usr/sbin/httpd -DFOREGROUND + 1037 /usr/sbin/httpd -DFOREGROUND + 1038 /usr/sbin/httpd -DFOREGROUND + 1039 /usr/sbin/httpd -DFOREGROUND + 1040 /usr/local/sbin/httpd -DFOREGROUND + 28218 /usr/bin/java TestSleepMethod1 + 28219 java TestSleepMethod1 + 28240 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.292.b10-1.el7_9.x86_64/jre/bin/java TestSleepMethod2 +333083 /home/user3/apps/pycharm-2021.1.1/jbr/bin/java -classpath /home/user3/apps/pycharm-2021.1.1/lib/bootstrap.jar:/home/user3/apps/pycharm-2021.1.1/lib/util.jar:/home/user3/apps/pycharm-2021.1.1/lib/jdom.jar:/home/user3/apps/pycharm-2021.1.1/lib/log4j.jar:/home/user3/apps/pycharm-2021.1.1/lib/jna.jar -Xms128m -Xmx2048m -XX:ReservedCodeCacheSize=512m -XX:+UseG1GC -XX:SoftRefLRUPolicyMSPerMB=50 -XX:CICompilerCount=2 -XX:+HeapDumpOnOutOfMemoryError -XX:-OmitStackTraceInFastThrow -ea -Dsun.io.useCanonCaches=false -Djdk.http.auth.tunneling.disabledSchemes="" -Djdk.attach.allowAttachSelf=true -Djdk.module.illegalAccess.silent=true -Dkotlinx.coroutines.debug=off -Dsun.tools.attach.tmp.only=true -XX:ErrorFile=/home/user3/java_error_in_pycharm_%p.log -XX:HeapDumpPath=/home/user3/java_error_in_pycharm_.hprof -Didea.vendor.name=JetBrains -Didea.paths.selector=PyCharm2021.1 -Djb.vmOptionsFile=/home/user3/.config/JetBrains/PyCharm2021.1/pycharm64.vmoptions -Didea.platform.prefix=Python com.intellij.idea.Main +""" + +PS_EO_CMD_MISSING = """ +PID COMMAND + 1 /usr/lib/systemd/systemd --switched-root --system --deserialize 22 + 2 [kthreadd] +""" + +PS_EO_CMD_EXCEPTION = """ + PID COMMAND + 1 /usr/lib/systemd/systemd --switched-root --system --deserialize 22 + 2 [kthreadd] +28218 /usr/bin/java TestSleepMethod1 +28219 /exception/java +""" + +PS_EO_CMD_ONE = """ + PID COMMAND + 1 /usr/lib/systemd/systemd --switched-root --system --deserialize 22 + 2 [kthreadd] + 988 /usr/sbin/httpd -DFOREGROUND +""" + + +class FakeContext(object): + def shell_out(self, cmd, split=True, timeout=None, keep_rc=False, env=None, signum=None): + tmp_cmd = cmd.strip().split() + if 'exception' in tmp_cmd[-1]: + raise Exception() + elif tmp_cmd[-1].startswith('/'): + return [tmp_cmd[-1], ] + elif tmp_cmd[-1].endswith('java'): + return ['/usr/bin/java', ] + elif tmp_cmd[-1].endswith('httpd'): + return ['/usr/sbin/httpd', ] + + raise Exception() + + +def test_get_running_commands_present(): + pseo = PsEoCmd(context_wrap(PS_EO_CMD)) + ps = Ps(None, None, None, None, None, None, pseo) + assert ps is not None + ctx = FakeContext() + + results = get_running_commands(ps, ctx, ['httpd']) + assert set(results) == set(['/usr/sbin/httpd', '/usr/local/sbin/httpd']) + + results = get_running_commands(ps, ctx, ['java']) + assert set(results) == set([ + '/usr/bin/java', + '/home/user3/apps/pycharm-2021.1.1/jbr/bin/java', + '/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.292.b10-1.el7_9.x86_64/jre/bin/java' + ]) + + results = get_running_commands(ps, ctx, ['java', 'httpd']) + assert set(results) == set([ + '/usr/bin/java', + '/home/user3/apps/pycharm-2021.1.1/jbr/bin/java', + '/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.292.b10-1.el7_9.x86_64/jre/bin/java', + '/usr/sbin/httpd', + '/usr/local/sbin/httpd' + ]) + + +def test_get_running_commands_missing(): + pseo = PsEoCmd(context_wrap(PS_EO_CMD_MISSING)) + ps = Ps(None, None, None, None, None, None, pseo) + assert ps is not None + ctx = FakeContext() + + results = get_running_commands(ps, ctx, ['httpd', 'java']) + assert set(results) == set() + + +def test_get_running_commands_cmd_exception(): + pseo = PsEoCmd(context_wrap(PS_EO_CMD_EXCEPTION)) + ps = Ps(None, pseo, None, None, None, None, pseo) + assert ps is not None + ctx = FakeContext() + + results = get_running_commands(ps, ctx, ['httpd', 'java']) + assert set(results) == set(['/usr/bin/java', ]) + + +def test_get_running_commands_exception(): + pseo = PsEoCmd(context_wrap(PS_EO_CMD_MISSING)) + ps = Ps(None, None, None, None, None, None, pseo) + assert ps is not None + ctx = FakeContext() + + with pytest.raises(TypeError): + get_running_commands(ps, ctx, 'not_a_list') + + with pytest.raises(TypeError): + get_running_commands(ps, ctx, []) + + +def test_get_running_commands_one(): + pseo = PsEoCmd(context_wrap(PS_EO_CMD_ONE)) + ps = Ps(None, None, None, None, None, None, pseo) + assert ps is not None + ctx = FakeContext() + + results = get_running_commands(ps, ctx, ['httpd']) + assert set(results) == set(['/usr/sbin/httpd', ]) diff --git a/insights/tests/datasources/test_package_provides.py b/insights/tests/datasources/test_package_provides.py new file mode 100644 index 000000000..5bb06d8fa --- /dev/null +++ b/insights/tests/datasources/test_package_provides.py @@ -0,0 +1,153 @@ +import pytest + +from insights import dr, HostContext +from insights.combiners.ps import Ps +from insights.core import filters +from insights.core.dr import SkipComponent +from insights.parsers.ps import PsEoCmd +from insights.specs import Specs +from insights.specs.datasources.package_provides import get_package, cmd_and_pkg +from insights.core.spec_factory import DatasourceProvider +from insights.tests import context_wrap + +JAVA_PATH_1 = '/usr/bin/java' +JAVA_PATH_2 = '/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.292.b10-1.el7_9.x86_64/jre/bin/java' +JAVA_PATH_BAD = '/random/java' +JAVA_PATH_ERR = '/error/java' +JAVA_PKG_2 = 'java-1.8.0-openjdk-headless-1.8.0.292.b10-1.el7_9.x86_64' +HTTPD_PATH = '/usr/sbin/httpd' +HTTPD_PKG = 'httpd-2.4.6-97.el7_9.x86_64' + + +class FakeContext(HostContext): + def shell_out(self, cmd, split=True, timeout=None, keep_rc=False, env=None, signum=None): + tmp_cmd = cmd.strip().split() + shell_cmd = tmp_cmd[0] + arg = tmp_cmd[-1] + if 'readlink' in shell_cmd: + if arg == JAVA_PATH_1: + return (0, [JAVA_PATH_2, ]) + elif arg == JAVA_PATH_ERR: + return (1, ['file not found', ]) + elif arg.startswith('/'): + return (0, [arg, ]) + elif 'rpm' in shell_cmd: + if arg == JAVA_PATH_2: + return (0, [JAVA_PKG_2, ]) + elif arg == HTTPD_PATH: + return (0, [HTTPD_PKG, ]) + else: + return (1, ['file {0} is not owned by any package'.format(arg), ]) + elif 'which' in shell_cmd: + if 'exception' in arg: + raise Exception() + elif arg.startswith('/'): + return [tmp_cmd[-1], ] + elif arg.endswith('java'): + return ['/usr/bin/java', ] + elif arg.endswith('httpd'): + return ['/usr/sbin/httpd', ] + + raise Exception() + + +def setup_function(func): + if Specs.package_provides_command in filters._CACHE: + del filters._CACHE[Specs.package_provides_command] + if Specs.package_provides_command in filters.FILTERS: + del filters.FILTERS[Specs.package_provides_command] + + if func is test_cmd_and_pkg: + filters.add_filter(Specs.package_provides_command, ['httpd', 'java']) + elif func is test_cmd_and_pkg_not_found: + filters.add_filter(Specs.package_provides_command, ['not_found']) + + +def teardown_function(func): + if func is test_cmd_and_pkg or func is test_cmd_and_pkg_not_found: + del filters.FILTERS[Specs.package_provides_command] + + +def test_get_package(): + ctx = FakeContext() + + result = get_package(ctx, '/usr/bin/java') + print('result:', result) + assert result == 'java-1.8.0-openjdk-headless-1.8.0.292.b10-1.el7_9.x86_64' + + result = get_package(ctx, '/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.292.b10-1.el7_9.x86_64/jre/bin/java') + assert result == 'java-1.8.0-openjdk-headless-1.8.0.292.b10-1.el7_9.x86_64' + + +def test_get_package_bad(): + ctx = FakeContext() + + result = get_package(ctx, JAVA_PATH_BAD) + assert result is None + + +def test_get_package_err(): + ctx = FakeContext() + + result = get_package(ctx, JAVA_PATH_ERR) + assert result is None + + +PS_EO_CMD = """ + PID COMMAND + 1 /usr/lib/systemd/systemd --switched-root --system --deserialize 22 + 2 [kthreadd] + 988 /usr/sbin/httpd -DFOREGROUND + 1036 /usr/sbin/httpd -DFOREGROUND + 1037 /usr/sbin/httpd -DFOREGROUND + 1038 /usr/sbin/httpd -DFOREGROUND + 1039 /usr/sbin/httpd -DFOREGROUND + 1040 /usr/local/sbin/httpd -DFOREGROUND + 28218 /usr/bin/java TestSleepMethod1 + 28219 java TestSleepMethod1 + 28240 /usr/lib/jvm/java-1.8.0-openjdk-1.8.0.292.b10-1.el7_9.x86_64/jre/bin/java TestSleepMethod2 +333083 /home/user3/apps/pycharm-2021.1.1/jbr/bin/java -classpath /home/user3/apps/pycharm-2021.1.1/lib/bootstrap.jar:/home/user3/apps/pycharm-2021.1.1/lib/util.jar:/home/user3/apps/pycharm-2021.1.1/lib/jdom.jar:/home/user3/apps/pycharm-2021.1.1/lib/log4j.jar:/home/user3/apps/pycharm-2021.1.1/lib/jna.jar -Xms128m -Xmx2048m -XX:ReservedCodeCacheSize=512m -XX:+UseG1GC -XX:SoftRefLRUPolicyMSPerMB=50 -XX:CICompilerCount=2 -XX:+HeapDumpOnOutOfMemoryError -XX:-OmitStackTraceInFastThrow -ea -Dsun.io.useCanonCaches=false -Djdk.http.auth.tunneling.disabledSchemes="" -Djdk.attach.allowAttachSelf=true -Djdk.module.illegalAccess.silent=true -Dkotlinx.coroutines.debug=off -Dsun.tools.attach.tmp.only=true -XX:ErrorFile=/home/user3/java_error_in_pycharm_%p.log -XX:HeapDumpPath=/home/user3/java_error_in_pycharm_.hprof -Didea.vendor.name=JetBrains -Didea.paths.selector=PyCharm2021.1 -Djb.vmOptionsFile=/home/user3/.config/JetBrains/PyCharm2021.1/pycharm64.vmoptions -Didea.platform.prefix=Python com.intellij.idea.Main +""" + +EXPECTED = DatasourceProvider( + "\n".join([ + "{0} {1}".format(HTTPD_PATH, HTTPD_PKG), + "{0} {1}".format(JAVA_PATH_1, JAVA_PKG_2), + "{0} {1}".format(JAVA_PATH_2, JAVA_PKG_2) + ]), + relative_path='insights_commands/package_provides_command' +) + + +def test_cmd_and_pkg(): + pseo = PsEoCmd(context_wrap(PS_EO_CMD)) + ps = Ps(None, None, None, None, None, None, pseo) + broker = dr.Broker() + broker[HostContext] = FakeContext() + broker[Ps] = ps + + result = cmd_and_pkg(broker) + assert result is not None + assert sorted(result.content) == sorted(EXPECTED.content) + + +def test_cmd_and_pkg_no_filters(): + pseo = PsEoCmd(context_wrap(PS_EO_CMD)) + ps = Ps(None, None, None, None, None, None, pseo) + broker = dr.Broker() + broker[HostContext] = FakeContext() + broker[Ps] = ps + + with pytest.raises(SkipComponent): + cmd_and_pkg(broker) + + +def test_cmd_and_pkg_not_found(): + pseo = PsEoCmd(context_wrap(PS_EO_CMD)) + ps = Ps(None, None, None, None, None, None, pseo) + broker = dr.Broker() + broker[HostContext] = FakeContext() + broker[Ps] = ps + + with pytest.raises(SkipComponent): + cmd_and_pkg(broker) diff --git a/insights/tests/test_specs.py b/insights/tests/test_specs.py index a5815b92d..483cd846f 100644 --- a/insights/tests/test_specs.py +++ b/insights/tests/test_specs.py @@ -6,7 +6,6 @@ from insights.core.plugins import ContentException from insights.core.spec_factory import (DatasourceProvider, simple_file, simple_command, glob_file, SpecSet) -import insights.specs.default as default_specs import tempfile import pytest import glob @@ -103,13 +102,3 @@ def parse_content(self, content): p = MyParser(ds) assert p.content == data.splitlines() assert list(ds.stream()) == data.splitlines() - - -def test_get_running_commands(): - broker = dr.Broker() - broker[HostContext] = HostContext() - with pytest.raises(Exception): - default_specs._get_running_commands(broker, 'not_a_list') - - with pytest.raises(Exception): - default_specs._get_running_commands(broker, []) From ee976d6949f84685e5155e1696f59ac5c9fd08e0 Mon Sep 17 00:00:00 2001 From: Ryan Blakley <3789184+ryan-blakley@users.noreply.github.com> Date: Tue, 6 Jul 2021 22:15:42 -0400 Subject: [PATCH 477/892] Fix ethtool parse exceptions due to a spec issue (#3122) * Add new ethernet interfaces datasource, to properly gather a list of valid ethernet interface names. That way ethtool is ran against the proper interface names. * Fixes #1791 Signed-off-by: Ryan Blakley --- docs/custom_datasources_index.rst | 8 +++ insights/specs/datasources/ethernet.py | 57 +++++++++++++++++++++ insights/specs/default.py | 16 +++--- insights/tests/datasources/test_ethernet.py | 37 +++++++++++++ 4 files changed, 110 insertions(+), 8 deletions(-) create mode 100644 insights/specs/datasources/ethernet.py create mode 100644 insights/tests/datasources/test_ethernet.py diff --git a/docs/custom_datasources_index.rst b/docs/custom_datasources_index.rst index c74365051..d45b77392 100644 --- a/docs/custom_datasources_index.rst +++ b/docs/custom_datasources_index.rst @@ -27,6 +27,14 @@ insights.specs.datasources.cloud_init :show-inheritance: :undoc-members: +insights.specs.datasources.ethernet +------------------------------------- + +.. automodule:: insights.specs.datasources.ethernet + :members: interfaces, LocalSpecs + :show-inheritance: + :undoc-members: + insights.specs.datasources.ipcs ------------------------------- diff --git a/insights/specs/datasources/ethernet.py b/insights/specs/datasources/ethernet.py new file mode 100644 index 000000000..2224f4741 --- /dev/null +++ b/insights/specs/datasources/ethernet.py @@ -0,0 +1,57 @@ +""" +Custom datasource for gathering a list of the ethernet interface names. +""" +from insights.core.context import HostContext +from insights.core.dr import SkipComponent +from insights.core.plugins import datasource +from insights.core.spec_factory import simple_command +from insights.specs import Specs + + +class LocalSpecs(Specs): + """ Local specs used only by ethernet_interfaces datasource. """ + ip_link = simple_command("/sbin/ip -o link") + + +@datasource(LocalSpecs.ip_link, HostContext) +def interfaces(broker): + """ + This datasource provides a list of the ethernet interfaces available. + + Typical content of the spec is:: + + 1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000\\ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 + 2: enp1s0: mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000\\ link/ether 52:54:00:13:14:b5 brd ff:ff:ff:ff:ff:ff + 3: enp8s0: mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000\\ link/ether 52:54:00:e5:11:d4 brd ff:ff:ff:ff:ff:ff + 4: enp1s0.2@enp1s0: mtu 1500 qdisc noqueue state UP mode DEFAULT group default qlen 1000\\ link/ether 52:54:00:13:14:b5 brd ff:ff:ff:ff:ff:ff + 5: ib0: mtu 4092 qdisc mq state DOWN group default qlen 256\\ link/infiniband 00:01:02:03:fd:90:0:00:00:00:00:00:ef:0d:8b:02:01:d9:82:fd + + Note: + This datasource may be executed using the following command: + + ``insights cat --no-header ethernet_interfaces`` + + Sample data returned:: + + ['enp1s0', 'enp8s0', 'enp1s0.2'] + + Returns: + list: List of the ethernet interfaces available. + + Raises: + SkipComponent: When there is not any content. + """ + content = broker[LocalSpecs.ip_link].content + if content: + ifaces = [] + for x in content: + # Only process lines that have link/ether, this should exclude non ethernet devices. + if "link/ether" in x: + # Split first on : the interface name should be the second entry. + # Then split again on @ since vlans append @ and the parent interface name to the end. + ifaces.append(x.split(':')[1].split('@')[0].strip()) + + if ifaces: + return sorted(ifaces) + + raise SkipComponent diff --git a/insights/specs/default.py b/insights/specs/default.py index 72bc25ec6..f8bfc3d42 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -38,7 +38,7 @@ from insights.parsers.mount import Mount from insights.specs import Specs from insights.specs.datasources import ( - cloud_init, candlepin_broker, get_running_commands, ipcs, package_provides, ps as ps_datasource) + cloud_init, candlepin_broker, ethernet, get_running_commands, ipcs, package_provides, ps as ps_datasource) logger = logging.getLogger(__name__) @@ -244,13 +244,13 @@ def du_dirs_list(broker): "/usr/lib/udev/rules.d/40-redhat.rules", "/usr/local/lib/udev/rules.d/40-redhat.rules"]) etcd_conf = simple_file("/etc/etcd/etcd.conf") ethernet_interfaces = listdir("/sys/class/net", context=HostContext) - ethtool = foreach_execute(ethernet_interfaces, "/sbin/ethtool %s") - ethtool_S = foreach_execute(ethernet_interfaces, "/sbin/ethtool -S %s") - ethtool_T = foreach_execute(ethernet_interfaces, "/sbin/ethtool -T %s") - ethtool_c = foreach_execute(ethernet_interfaces, "/sbin/ethtool -c %s") - ethtool_g = foreach_execute(ethernet_interfaces, "/sbin/ethtool -g %s") - ethtool_i = foreach_execute(ethernet_interfaces, "/sbin/ethtool -i %s") - ethtool_k = foreach_execute(ethernet_interfaces, "/sbin/ethtool -k %s") + ethtool = foreach_execute(ethernet.interfaces, "/sbin/ethtool %s") + ethtool_S = foreach_execute(ethernet.interfaces, "/sbin/ethtool -S %s") + ethtool_T = foreach_execute(ethernet.interfaces, "/sbin/ethtool -T %s") + ethtool_c = foreach_execute(ethernet.interfaces, "/sbin/ethtool -c %s") + ethtool_g = foreach_execute(ethernet.interfaces, "/sbin/ethtool -g %s") + ethtool_i = foreach_execute(ethernet.interfaces, "/sbin/ethtool -i %s") + ethtool_k = foreach_execute(ethernet.interfaces, "/sbin/ethtool -k %s") facter = simple_command("/usr/bin/facter") fc_match = simple_command("/bin/fc-match -sv 'sans:regular:roman' family fontformat") fcoeadm_i = simple_command("/usr/sbin/fcoeadm -i") diff --git a/insights/tests/datasources/test_ethernet.py b/insights/tests/datasources/test_ethernet.py new file mode 100644 index 000000000..1d4a35ca9 --- /dev/null +++ b/insights/tests/datasources/test_ethernet.py @@ -0,0 +1,37 @@ +import pytest + +from insights.core.dr import SkipComponent +from insights.specs.datasources.ethernet import interfaces, LocalSpecs +from mock.mock import Mock + +RELATIVE_PATH = "insights_commands/ethernet_interfaces" + +IP_LINK = """ +1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000\\ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 +2: enp1s0: mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000\\ link/ether 52:54:00:13:14:b5 brd ff:ff:ff:ff:ff:ff +3: enp8s0: mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000\\ link/ether 52:54:00:e5:11:d4 brd ff:ff:ff:ff:ff:ff +4: enp1s0.2@enp1s0: mtu 1500 qdisc noqueue state UP mode DEFAULT group default qlen 1000\\ link/ether 52:54:00:13:14:b5 brd ff:ff:ff:ff:ff:ff +5: ib0: mtu 4092 qdisc mq state DOWN group default qlen 256\\ link/infiniband 00:01:02:03:fd:90:0:00:00:00:00:00:ef:0d:8b:02:01:d9:82:fd +""" + +IP_LINK_BAD = "" + +EXPECTED = ['enp1s0', 'enp8s0', 'enp1s0.2'] + + +def test_ethernet_interfaces(): + ip_link_command = Mock() + ip_link_command.content = IP_LINK.splitlines() + broker = {LocalSpecs.ip_link: ip_link_command} + result = interfaces(broker) + assert result is not None + assert isinstance(result, list) + assert result == sorted(EXPECTED) + + +def test_ethernet_interfaces_bad(): + ip_link_command = Mock() + ip_link_command.content = IP_LINK_BAD.splitlines() + broker = {LocalSpecs.ip_link: ip_link_command} + with pytest.raises(SkipComponent): + interfaces(broker) From 64cc80bb5db625f5fb4332cce20771637e1c882c Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 8 Jul 2021 03:52:07 +0800 Subject: [PATCH 478/892] Remove "is_satellite_server" and "is_satellite_capsule" (#3136) * Since the combiner "SatelliteVersion" and "CapsuleVersion" already raise SkipComponent if it is not satellite server or capsule, so use it directly Signed-off-by: Huanhuan Li --- insights/specs/default.py | 27 ++++----------------------- 1 file changed, 4 insertions(+), 23 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index f8bfc3d42..0ea94a11e 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -706,32 +706,13 @@ def ld_library_path_of_user(broker): saphostexec_status = simple_command("/usr/sap/hostctrl/exe/saphostexec -status") saphostexec_version = simple_command("/usr/sap/hostctrl/exe/saphostexec -version") sat5_insights_properties = simple_file("/etc/redhat-access/redhat-access-insights.properties") - - @datasource(SatelliteVersion, HostContext) - def is_satellite_server(broker): - """ - bool: Returns True if the host is satellite server. - """ - if broker[SatelliteVersion]: - return True - raise SkipComponent - - @datasource(CapsuleVersion, HostContext) - def is_satellite_capsule(broker): - """ - bool: Returns True if the host is satellite capsule. - """ - if broker[CapsuleVersion]: - return True - raise SkipComponent - satellite_compute_resources = simple_command( "/usr/bin/sudo -iu postgres /usr/bin/psql -d foreman -c 'select name, type from compute_resources' --csv", - deps=[is_satellite_server] + deps=[SatelliteVersion] ) satellite_content_hosts_count = simple_command( "/usr/bin/sudo -iu postgres /usr/bin/psql -d foreman -c 'select count(*) from hosts'", - deps=[is_satellite_server] + deps=[SatelliteVersion] ) satellite_custom_ca_chain = simple_command( '/usr/bin/awk \'BEGIN { pipe="openssl x509 -noout -subject -enddate"} /^-+BEGIN CERT/,/^-+END CERT/ { print | pipe } /^-+END CERT/ { close(pipe); printf("\\n")}\' /etc/pki/katello/certs/katello-server-ca.crt', @@ -739,11 +720,11 @@ def is_satellite_capsule(broker): satellite_mongodb_storage_engine = simple_command("/usr/bin/mongo pulp_database --eval 'db.serverStatus().storageEngine'") satellite_non_yum_type_repos = simple_command( "/usr/bin/mongo pulp_database --eval 'db.repo_importers.find({\"importer_type_id\": { $ne: \"yum_importer\"}}).count()'", - deps=[[is_satellite_server, is_satellite_capsule]] + deps=[[SatelliteVersion, CapsuleVersion]] ) satellite_settings = simple_command( "/usr/bin/sudo -iu postgres /usr/bin/psql -d foreman -c \"select name, value, \\\"default\\\" from settings where name in ('destroy_vm_on_host_delete', 'unregister_delete_host')\" --csv", - deps=[is_satellite_server] + deps=[SatelliteVersion] ) satellite_version_rb = simple_file("/usr/share/foreman/lib/satellite/version.rb") satellite_custom_hiera = simple_file("/etc/foreman-installer/custom-hiera.yaml") From 5f4c154f0ed8db37f52b38b60d49b4282bdae32d Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 8 Jul 2021 08:58:21 +0800 Subject: [PATCH 479/892] Move sap related datasource into new insights.specs.datasource module (#3124) * Move sap related datasource into new insights.specs.datasource module Signed-off-by: Xiangce Liu * Fix doc error Signed-off-by: Xiangce Liu * Make sap_sid and etc as public attr of the sap module Signed-off-by: Xiangce Liu * Update as per review - fix the doc entry line - add timeout for shell command Signed-off-by: Xiangce Liu * import sap in default Signed-off-by: Xiangce Liu --- docs/custom_datasources_index.rst | 8 + insights/specs/datasources/sap.py | 93 ++++++++++ insights/specs/default.py | 70 +------- insights/tests/datasources/test_sap.py | 225 +++++++++++++++++++++++++ 4 files changed, 331 insertions(+), 65 deletions(-) create mode 100644 insights/specs/datasources/sap.py create mode 100644 insights/tests/datasources/test_sap.py diff --git a/docs/custom_datasources_index.rst b/docs/custom_datasources_index.rst index d45b77392..08b0b404e 100644 --- a/docs/custom_datasources_index.rst +++ b/docs/custom_datasources_index.rst @@ -58,3 +58,11 @@ insights.specs.datasources.ps :members: ps_eo_cmd, LocalSpecs :show-inheritance: :undoc-members: + +insights.specs.datasources.sap +------------------------------ + +.. automodule:: insights.specs.datasources.sap + :members: sap_sid, sap_hana_sid, sap_hana_sid_SID_nr, ld_library_path_of_user, LocalSpecs + :show-inheritance: + :undoc-members: diff --git a/insights/specs/datasources/sap.py b/insights/specs/datasources/sap.py new file mode 100644 index 000000000..71d70a6f9 --- /dev/null +++ b/insights/specs/datasources/sap.py @@ -0,0 +1,93 @@ +""" +Custom datasources for SAP related specs +""" +from insights.specs import Specs +from insights.core.context import HostContext +from insights.core.dr import SkipComponent +from insights.core.plugins import datasource +from insights.core.spec_factory import DatasourceProvider +from insights.combiners.sap import Sap +from insights.specs.datasources import DEFAULT_SHELL_TIMEOUT + + +class LocalSpecs(Specs): + """ Local specs used only by sap datasources """ + + @datasource(Sap, HostContext) + def sap_instance(broker): + """ + list: List of all SAP Instances. + """ + sap = broker[Sap] + return sorted(v for v in sap.values()) + + @datasource(sap_instance, HostContext) + def sap_hana_instance(broker): + """ + list: List of the SAP HANA Instances. + """ + sap = broker[LocalSpecs.sap_instance] + insts = sorted(v for v in sap if v.type == 'HDB') + if insts: + return insts + raise SkipComponent() + + +@datasource(LocalSpecs.sap_instance, HostContext) +def sap_sid(broker): + """ + list: List of the SID of all the SAP Instances. + """ + sap = broker[LocalSpecs.sap_instance] + return sorted(set(h.sid.lower() for h in sap)) + + +@datasource(LocalSpecs.sap_hana_instance, HostContext) +def sap_hana_sid(broker): + """ + list: List of the SID of SAP HANA Instances. """ + hana = broker[LocalSpecs.sap_hana_instance] + sids = sorted(set(h.sid.lower() for h in hana)) + if sids: + return sids + raise SkipComponent() + + +@datasource(LocalSpecs.sap_hana_instance, HostContext) +def sap_hana_sid_SID_nr(broker): + """ + list: List of tuples (sid, SID, Nr) of SAP HANA Instances. + """ + hana = broker[LocalSpecs.sap_hana_instance] + sids = sorted((h.sid.lower(), h.sid, h.number) for h in hana) + if sids: + return sids + raise SkipComponent() + + +@datasource(sap_sid, HostContext) +def ld_library_path_of_user(broker): + """ + list: The list of "Username LD_LIBRARY_PATH", e.g.:: + + [ + 'sr1adm /usr/sap/RH1/SYS/exe/run:/usr/lib/', + 'sr2adm /usr/sap/RH2/SYS/exe/run', + ] + + .. note:: + Currently, only Sap users are supported. + """ + ctx = broker[HostContext] + llds = [] + for sid in broker[sap_sid]: + usr = '{0}adm'.format(sid) + ret, vvs = ctx.shell_out("/bin/su -l {0} -c /bin/env".format(usr), keep_rc=True, timeout=DEFAULT_SHELL_TIMEOUT) + if ret != 0: + continue + for v in vvs: + if "LD_LIBRARY_PATH=" in v: + llds.append('{0} {1}'.format(usr, v.split('=', 1)[-1])) + if llds: + return DatasourceProvider('\n'.join(llds), relative_path='insights_commands/echo_user_LD_LIBRARY_PATH') + raise SkipComponent('') diff --git a/insights/specs/default.py b/insights/specs/default.py index 0ea94a11e..45be50297 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -21,13 +21,12 @@ from insights.core.context import HostContext from insights.core.dr import SkipComponent from insights.core.plugins import datasource -from insights.core.spec_factory import RawFileProvider, DatasourceProvider +from insights.core.spec_factory import RawFileProvider from insights.core.spec_factory import simple_file, simple_command, glob_file from insights.core.spec_factory import first_of, command_with_args from insights.core.spec_factory import foreach_collect, foreach_execute from insights.core.spec_factory import first_file, listdir from insights.combiners.services import Services -from insights.combiners.sap import Sap from insights.combiners.ps import Ps from insights.components.rhel_version import IsRhel8, IsRhel7, IsRhel6 from insights.components.cloud_provider import IsAWS, IsAzure, IsGCP @@ -38,7 +37,9 @@ from insights.parsers.mount import Mount from insights.specs import Specs from insights.specs.datasources import ( - cloud_init, candlepin_broker, ethernet, get_running_commands, ipcs, package_provides, ps as ps_datasource) + cloud_init, candlepin_broker, ethernet, get_running_commands, ipcs, package_provides, + ps as ps_datasource, sap) +from insights.specs.datasources.sap import sap_hana_sid, sap_hana_sid_SID_nr logger = logging.getLogger(__name__) @@ -385,6 +386,7 @@ def httpd_cmd(broker): kubepods_cpu_quota = glob_file("/sys/fs/cgroup/cpu/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod[a-f0-9_]*.slice/cpu.cfs_quota_us") last_upload_globs = ["/etc/redhat-access-insights/.lastupload", "/etc/insights-client/.lastupload"] lastupload = glob_file(last_upload_globs) + ld_library_path_of_user = sap.ld_library_path_of_user libssh_client_config = simple_file("/etc/libssh/libssh_client.config") libssh_server_config = simple_file("/etc/libssh/libssh_server.config") libvirtd_log = simple_file("/var/log/libvirt/libvirtd.log") @@ -638,68 +640,6 @@ def pmlog_summary_file(broker): rpm_V_packages = simple_command("/bin/rpm -V coreutils procps procps-ng shadow-utils passwd sudo chrony", keep_rc=True, signum=signal.SIGTERM) rsyslog_conf = glob_file(["/etc/rsyslog.conf", "/etc/rsyslog.d/*.conf"]) samba = simple_file("/etc/samba/smb.conf") - - @datasource(Sap, HostContext) - def sap_instance(broker): - """ - list: List of all SAP Instances. - """ - sap = broker[Sap] - return list(v for v in sap.values()) - - @datasource(sap_instance, HostContext) - def sap_hana_instance(broker): - """ - list: List of the SAP HANA Instances. - """ - sap = broker[DefaultSpecs.sap_instance] - return list(v for v in sap if v.type == 'HDB') - - @datasource(sap_instance, HostContext) - def sap_sid(broker): - """ - list: List of the SID of all the SAP Instances. - """ - sap = broker[DefaultSpecs.sap_instance] - return list(set(h.sid.lower() for h in sap)) - - @datasource(sap_hana_instance, HostContext) - def sap_hana_sid(broker): - """ - list: List of the SID of SAP HANA Instances. - """ - hana = broker[DefaultSpecs.sap_hana_instance] - return list(set(h.sid.lower() for h in hana)) - - @datasource(sap_hana_instance, HostContext) - def sap_hana_sid_SID_nr(broker): - """ - list: List of tuples (sid, SID, Nr) of SAP HANA Instances. - """ - hana = broker[DefaultSpecs.sap_hana_instance] - return list((h.sid.lower(), h.sid, h.number) for h in hana) - - @datasource(sap_sid, HostContext) - def ld_library_path_of_user(broker): - """ - Returns: The list of LD_LIBRARY_PATH of specified users. - Username is combined from SAP and 'adm' and is also stored. - """ - sids = broker[DefaultSpecs.sap_sid] - ctx = broker[HostContext] - llds = [] - for sid in sids: - usr = '{0}adm'.format(sid) - ret, vvs = ctx.shell_out("/bin/su -l {0} -c /bin/env".format(usr), keep_rc=True) - if ret != 0: - continue - for v in vvs: - if "LD_LIBRARY_PATH=" in v: - llds.append('{0} {1}'.format(usr, v.split('=', 1)[-1])) - if llds: - return DatasourceProvider('\n'.join(llds), relative_path='insights_commands/echo_user_LD_LIBRARY_PATH') - raise SkipComponent - sap_hana_landscape = foreach_execute(sap_hana_sid_SID_nr, "/bin/su -l %sadm -c 'python /usr/sap/%s/HDB%s/exe/python_support/landscapeHostConfiguration.py'", keep_rc=True) sap_hdb_version = foreach_execute(sap_hana_sid, "/bin/su -l %sadm -c 'HDB version'", keep_rc=True) saphostctl_getcimobject_sapinstance = simple_command("/usr/sap/hostctrl/exe/saphostctrl -function GetCIMObject -enuminstances SAPInstance") diff --git a/insights/tests/datasources/test_sap.py b/insights/tests/datasources/test_sap.py new file mode 100644 index 000000000..690ec5887 --- /dev/null +++ b/insights/tests/datasources/test_sap.py @@ -0,0 +1,225 @@ +import pytest +from insights.core.dr import SkipComponent +from insights.core.context import HostContext +from insights.core.spec_factory import DatasourceProvider +from insights.tests import context_wrap +from insights.parsers.hostname import Hostname as HostnameParser +from insights.parsers.saphostctrl import SAPHostCtrlInstances +from insights.combiners.sap import Sap +from insights.combiners.hostname import Hostname +from insights.combiners.tests.test_sap import ( + SAPHOSTCTRL_HOSTINSTANCES_R_CASE, + SAPHOSTCTRL_HOSTINSTANCES_GOOD, HOSTNAME1) +from insights.specs.datasources.sap import ( + LocalSpecs, sap_sid, sap_hana_sid, sap_hana_sid_SID_nr, + ld_library_path_of_user) + +SAPHOSTCTRL_HOSTINSTANCES = ''' +********************************************************* + CreationClassName , String , SAPInstance + SID , String , RH1 + SystemNumber , String , 01 + InstanceName , String , ASCS01 + Hostname , String , vm37-39 + FullQualifiedHostname , String , vm37-39.pek2.com + IPAddress , String , 10.72.37.39 + SapVersionInfo , String , 745, patch 100, changelist 1652052 +********************************************************* + CreationClassName , String , SAPInstance + SID , String , RH1 + SystemNumber , String , 00 + InstanceName , String , D00 + Hostname , String , vm37-39 + FullQualifiedHostname , String , vm37-39.pek2.com + IPAddress , String , 10.72.37.39 + SapVersionInfo , String , 745, patch 100, changelist 1652052 +********************************************************* + CreationClassName , String , SAPInstance + SID , String , SR1 + SystemNumber , String , 02 + InstanceName , String , HDB02 + Hostname , String , vm37-39 + FullQualifiedHostname , String , vm37-39.pek2.com + IPAddress , String , 10.72.37.39 + SapVersionInfo , String , 749, patch 418, changelist 1816226 +********************************************************* + CreationClassName , String , SAPInstance + SID , String , RH2 + SystemNumber , String , 04 + InstanceName , String , ASCS04 + Hostname , String , vm37-39 + FullQualifiedHostname , String , vm37-39.pek2.com + IPAddress , String , 10.72.37.39 + SapVersionInfo , String , 745, patch 100, changelist 1652052 +********************************************************* + CreationClassName , String , SAPInstance + SID , String , RH2 + SystemNumber , String , 03 + InstanceName , String , D03 + Hostname , String , vm37-39 + FullQualifiedHostname , String , vm37-39.pek2.com + IPAddress , String , 10.72.37.39 + SapVersionInfo , String , 745, patch 100, changelist 1652052 +'''.strip() + +RH1ADM_ENV = ''' +TERM=screen-256color +HOME=/home/rh1adm +SHELL=/bin/csh +USER=rh1adm +LOGNAME=rh1adm +PATH=/sapdb/clients/RH1/bin:/sapdb/programs/bin:/usr/local/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/usr/sap/RH1/SYS/exe/run:/home/rh1adm:. +XDG_SESSION_ID=6682 +HOSTTYPE=x86_64-linux +VENDOR=unknown +OSTYPE=linux +MACHTYPE=x86_64 +SHLVL=1 +PWD=/home/rh1adm +GROUP=sapsys +HOST=vm37-39 +REMOTEHOST=10.66.136.143 +MAIL=/var/spool/mail/rh1adm +HOSTNAME=vm37-39 +LANG=en_US.UTF-8 +LESSOPEN=||/usr/bin/lesspipe.sh %s +SAPSYSTEMNAME=RH1 +DIR_LIBRARY=/usr/sap/RH1/SYS/exe/run +RSEC_SSFS_DATAPATH=/usr/sap/RH1/SYS/global/security/rsecssfs/data +RSEC_SSFS_KEYPATH=/usr/sap/RH1/SYS/global/security/rsecssfs/key +rsdb_ssfs_connect=0 +LD_LIBRARY_PATH=/usr/sap/RH1/SYS/exe/run:/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/sapdb/clients/RH1/lib +dbms_type=ADA +'''.strip() + +RH2ADM_ENV = ''' +TERM=screen-256color +HOME=/home/rh2adm +SHELL=/bin/csh +USER=rh2adm +LOGNAME=rh2adm +PATH=/sapdb/clients/RH2/bin:/sapdb/programs/bin:/usr/local/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/usr/sap/RH2/SYS/exe/uc/linuxx86_64:/usr/sap/RH2/SYS/exe/run:/home/rh2adm:. +XDG_SESSION_ID=6682 +HOSTTYPE=x86_64-linux +VENDOR=unknown +OSTYPE=linux +MACHTYPE=x86_64 +SHLVL=1 +PWD=/home/rh2adm +GROUP=sapsys +HOST=vm37-39 +REMOTEHOST=10.66.136.143 +MAIL=/var/spool/mail/rh2adm +HOSTNAME=vm37-39 +LANG=en_US.UTF-8 +LESSOPEN=||/usr/bin/lesspipe.sh %s +SAPSYSTEMNAME=RH2 +DIR_LIBRARY=/usr/sap/RH2/SYS/exe/run +RSEC_SSFS_DATAPATH=/usr/sap/RH2/SYS/global/security/rsecssfs/data +RSEC_SSFS_KEYPATH=/usr/sap/RH2/SYS/global/security/rsecssfs/key +rsdb_ssfs_connect=0 +LD_LIBRARY_PATH=/usr/sap/RH2/SYS/exe/run:/usr/sap/RH2/SYS/exe/uc/linuxx86_64:/sapdb/clients/RH2/lib +dbms_type=ADA +'''.strip() + +HOSTNAME = 'vm37-39.pek2.com' + + +class FakeContext(object): + def shell_out(self, cmd, split=True, timeout=None, keep_rc=False, env=None, signum=None): + tmp_cmd = cmd.strip().split() + if 'rh1adm' in tmp_cmd[2]: + return 0, RH1ADM_ENV.splitlines() + if 'rh2adm' in tmp_cmd[2]: + return 0, RH2ADM_ENV.splitlines() + return -1, [] + + +def test_hana_instance_skip(): + inst = SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES_R_CASE)) + hn = Hostname(HostnameParser(context_wrap(HOSTNAME)), None, None, None, None) + sap = Sap(hn, inst, None) + broker = {Sap: sap} + broker.update({LocalSpecs.sap_instance: LocalSpecs.sap_instance(broker)}) + with pytest.raises(SkipComponent): + LocalSpecs.sap_hana_instance(broker) + + +def test_sid(): + # Good + inst = SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES)) + hn = Hostname(HostnameParser(context_wrap(HOSTNAME)), None, None, None, None) + sap = Sap(hn, inst, None) + broker = {Sap: sap} + broker.update({LocalSpecs.sap_instance: LocalSpecs.sap_instance(broker)}) + result = sap_sid(broker) + assert result is not None + assert isinstance(result, list) + assert result == sorted(set(v.sid.lower() for v in sap.values())) + + +def test_hana_sid(): + # Good + inst = SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES)) + hn = Hostname(HostnameParser(context_wrap(HOSTNAME)), None, None, None, None) + sap = Sap(hn, inst, None) + broker = {Sap: sap} + broker.update({LocalSpecs.sap_instance: LocalSpecs.sap_instance(broker)}) + broker.update({LocalSpecs.sap_hana_instance: LocalSpecs.sap_hana_instance(broker)}) + result = sap_hana_sid(broker) + assert result is not None + assert isinstance(result, list) + assert result == list(set(v.sid.lower() for v in sap.values() if v.type == 'HDB')) + + # Bad + broker.update({LocalSpecs.sap_hana_instance: []}) + with pytest.raises(SkipComponent): + sap_hana_sid(broker) + + +def test_hana_sid_SID_nr(): + # Good + inst = SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES)) + hn = Hostname(HostnameParser(context_wrap(HOSTNAME)), None, None, None, None) + sap = Sap(hn, inst, None) + broker = {Sap: sap} + broker.update({LocalSpecs.sap_instance: LocalSpecs.sap_instance(broker)}) + broker.update({LocalSpecs.sap_hana_instance: LocalSpecs.sap_hana_instance(broker)}) + result = sap_hana_sid_SID_nr(broker) + assert result is not None + assert isinstance(result, list) + assert result == list((v.sid.lower(), v.sid, v.number) for v in sap.values() if v.type == 'HDB') + + # Bad + broker.update({LocalSpecs.sap_hana_instance: []}) + with pytest.raises(SkipComponent): + sap_hana_sid_SID_nr(broker) + + +def test_ld_library_path_of_user(): + # Good + inst = SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES)) + hn = Hostname(HostnameParser(context_wrap(HOSTNAME)), None, None, None, None) + sap = Sap(hn, inst, None) + broker = {Sap: sap, HostContext: FakeContext()} + broker.update({LocalSpecs.sap_instance: LocalSpecs.sap_instance(broker)}) + broker.update({sap_sid: sap_sid(broker)}) + result = ld_library_path_of_user(broker) + assert result is not None + assert isinstance(result, DatasourceProvider) + assert sorted(result.content) == [ + 'rh1adm /usr/sap/RH1/SYS/exe/run:/usr/sap/RH1/SYS/exe/uc/linuxx86_64:/sapdb/clients/RH1/lib', + 'rh2adm /usr/sap/RH2/SYS/exe/run:/usr/sap/RH2/SYS/exe/uc/linuxx86_64:/sapdb/clients/RH2/lib', + ] + assert result.relative_path == 'insights_commands/echo_user_LD_LIBRARY_PATH' + + # Bad + inst = SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES_GOOD)) + hn = Hostname(HostnameParser(context_wrap(HOSTNAME1)), None, None, None, None) + sap = Sap(hn, inst, None) + broker = {Sap: sap, HostContext: FakeContext()} + broker.update({LocalSpecs.sap_instance: LocalSpecs.sap_instance(broker)}) + broker.update({sap_sid: sap_sid(broker)}) + with pytest.raises(SkipComponent): + result = ld_library_path_of_user(broker) + assert result is None From 0a480cb4e61298994a6e4a6ebe4ed4e104453362 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 8 Jul 2021 15:09:27 +0800 Subject: [PATCH 480/892] New spec to get satellite missed queues (#3053) * New spec to get missed pulp agent queues on Satellite server Signed-off-by: Huanhuan Li * Add parser for "satellite_missed_queues" * Update code based on suggestions Signed-off-by: Huanhuan Li --- docs/custom_datasources_index.rst | 8 + .../satellite_missed_queues.rst | 3 + insights/parsers/satellite_missed_queues.py | 47 ++++ .../tests/test_satellite_missed_queues.py | 68 +++++ insights/specs/__init__.py | 1 + .../datasources/satellite_missed_queues.py | 106 ++++++++ insights/specs/default.py | 3 +- .../test_satellite_missed_queues.py | 245 ++++++++++++++++++ 8 files changed, 480 insertions(+), 1 deletion(-) create mode 100644 docs/shared_parsers_catalog/satellite_missed_queues.rst create mode 100644 insights/parsers/satellite_missed_queues.py create mode 100644 insights/parsers/tests/test_satellite_missed_queues.py create mode 100644 insights/specs/datasources/satellite_missed_queues.py create mode 100644 insights/tests/datasources/test_satellite_missed_queues.py diff --git a/docs/custom_datasources_index.rst b/docs/custom_datasources_index.rst index 08b0b404e..737e2cf53 100644 --- a/docs/custom_datasources_index.rst +++ b/docs/custom_datasources_index.rst @@ -66,3 +66,11 @@ insights.specs.datasources.sap :members: sap_sid, sap_hana_sid, sap_hana_sid_SID_nr, ld_library_path_of_user, LocalSpecs :show-inheritance: :undoc-members: + +insights.specs.datasources.satellite_missed_queues +-------------------------------------------------- + +.. automodule:: insights.specs.datasources.satellite_missed_queues + :members: satellite_missed_pulp_agent_queues, LocalSpecs + :show-inheritance: + :undoc-members: diff --git a/docs/shared_parsers_catalog/satellite_missed_queues.rst b/docs/shared_parsers_catalog/satellite_missed_queues.rst new file mode 100644 index 000000000..74992bcb4 --- /dev/null +++ b/docs/shared_parsers_catalog/satellite_missed_queues.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.satellite_missed_queues + :members: + :show-inheritance: diff --git a/insights/parsers/satellite_missed_queues.py b/insights/parsers/satellite_missed_queues.py new file mode 100644 index 000000000..292adfe92 --- /dev/null +++ b/insights/parsers/satellite_missed_queues.py @@ -0,0 +1,47 @@ +""" +SatelliteMissedQueues - datasource ``satellite_missed_pulp_agent_queues`` +========================================================================= +""" +from insights import parser, Parser +from insights.specs import Specs +from insights import SkipComponent + + +@parser(Specs.satellite_missed_pulp_agent_queues) +class SatelliteMissedQueues(Parser): + """This parser parses the output of ``satellite_missed_pulp_agent_queues`` datasource. + + Typical output from the datasource is:: + + pulp.agent.09008eec-aba6-4174-aa9f-e930004ce5c9:2018-01-16 00:06:13 + pulp.agent.fac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024:2018-01-16 00:06:16 + 0 + + Examples: + >>> satellite_queues.truncated + False + >>> 'pulp.agent.09008eec-aba6-4174-aa9f-e930004ce5c9' in satellite_queues.missed_queues + True + >>> satellite_queues.missed_queues['pulp.agent.09008eec-aba6-4174-aa9f-e930004ce5c9'] + '2018-01-16 00:06:13' + + Attributes: + missed_queues(dict): Satellite missed pulp agent queues. + truncated(bool): The missed queues truncated or not. + + Raises: + SkipComponent: when no missed queues or the content isn't in expected format. +""" + def parse_content(self, content): + self.missed_queues = {} + self.truncated = None + if len(content) >= 2: + for line in content[:-1]: + if ':' not in line: + raise SkipComponent + parts = line.split(':', 1) + self.missed_queues[parts[0]] = parts[1] + if content[-1].strip() in ['0', '1']: + self.truncated = True if content[-1].strip() == '1' else False + if not self.missed_queues or self.truncated is None: + raise SkipComponent diff --git a/insights/parsers/tests/test_satellite_missed_queues.py b/insights/parsers/tests/test_satellite_missed_queues.py new file mode 100644 index 000000000..0b67600d0 --- /dev/null +++ b/insights/parsers/tests/test_satellite_missed_queues.py @@ -0,0 +1,68 @@ +import doctest +import pytest + +from insights.parsers import satellite_missed_queues +from insights.tests import context_wrap +from insights import SkipComponent + + +SATELLITE_MISSED_QUEUES_OUTPUT1 = """ +pulp.agent.09008eec-aba6-4174-aa9f-e930004ce5c9:2018-01-16 00:06:13 +pulp.agent.fac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024:2018-01-16 00:06:16 +0 +""".strip() + +SATELLITE_MISSED_QUEUES_OUTPUT2 = """ +pulp.agent.09008eec-aba6-4174-aa9f-e930004ce5c9:2018-01-16 00:06:13 +pulp.agent.iac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024:2018-01-16 00:06:16 +pulp.agent.aac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024:2018-01-16 00:06:17 +pulp.agent.bac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024:2018-01-16 00:06:18 +pulp.agent.cac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024:2018-01-16 00:06:19 +pulp.agent.dac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024:2018-01-16 00:06:20 +pulp.agent.eac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024:2018-01-16 00:06:21 +pulp.agent.fac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024:2018-01-16 00:06:22 +pulp.agent.gac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024:2018-01-16 00:06:23 +pulp.agent.hac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024:2018-01-16 00:06:24 +1 +""".strip() + +SATELLITE_MISSED_QUEUES_BAD_OUTPUT_1 = """ +pulp.agent.09008eec-aba6-4174-aa9f-e930004ce5c9:2018-01-16 00:06:13 +""".strip() + +SATELLITE_MISSED_QUEUES_BAD_OUTPUT_2 = """ +abc +def +""".strip() + +SATELLITE_MISSED_QUEUES_BAD_OUTPUT_3 = """ +pulp.agent.eac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024:2018-01-16 00:06:21 +pulp.agent.fac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024:2018-01-16 00:06:22 +pulp.agent.gac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024:2018-01-16 00:06:23 +pulp.agent.hac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024:2018-01-16 00:06:24 +""".strip() + + +def test_satellite_queue(): + queues = satellite_missed_queues.SatelliteMissedQueues(context_wrap(SATELLITE_MISSED_QUEUES_OUTPUT2)) + assert queues.truncated + assert len(queues.missed_queues) == 10 + assert 'pulp.agent.hac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024' in queues.missed_queues + assert queues.missed_queues['pulp.agent.hac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024'] == '2018-01-16 00:06:24' + + +def test_doc_examples(): + env = { + 'satellite_queues': satellite_missed_queues.SatelliteMissedQueues(context_wrap(SATELLITE_MISSED_QUEUES_OUTPUT1)), + } + failed, total = doctest.testmod(satellite_missed_queues, globs=env) + assert failed == 0 + + +def test_exception(): + with pytest.raises(SkipComponent): + satellite_missed_queues.SatelliteMissedQueues(context_wrap(SATELLITE_MISSED_QUEUES_BAD_OUTPUT_1)) + with pytest.raises(SkipComponent): + satellite_missed_queues.SatelliteMissedQueues(context_wrap(SATELLITE_MISSED_QUEUES_BAD_OUTPUT_2)) + with pytest.raises(SkipComponent): + satellite_missed_queues.SatelliteMissedQueues(context_wrap(SATELLITE_MISSED_QUEUES_BAD_OUTPUT_3)) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index e5e70a16c..469607b01 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -348,6 +348,7 @@ class Specs(SpecSet): meminfo = RegistryPoint() messages = RegistryPoint(filterable=True) metadata_json = RegistryPoint(raw=True) + satellite_missed_pulp_agent_queues = RegistryPoint() mistral_executor_log = RegistryPoint(filterable=True) mlx4_port = RegistryPoint(multi_output=True) modinfo_i40e = RegistryPoint() diff --git a/insights/specs/datasources/satellite_missed_queues.py b/insights/specs/datasources/satellite_missed_queues.py new file mode 100644 index 000000000..b38562003 --- /dev/null +++ b/insights/specs/datasources/satellite_missed_queues.py @@ -0,0 +1,106 @@ +import re + +from insights.core.context import HostContext +from insights.core.dr import SkipComponent +from insights.core.plugins import datasource +from insights.core.spec_factory import DatasourceProvider, simple_command +from insights.combiners.satellite_version import SatelliteVersion +from insights.specs import Specs +from insights.core.filters import add_filter + + +NODE_NOT_FOUND_ERROR = 'error Error on attach: Node not found' +add_filter(Specs.messages, NODE_NOT_FOUND_ERROR) + + +class LocalSpecs(Specs): + """ Local specs used only by get_satellite_missed_pulp_agent_queues datasources """ + + content_host_uuids = simple_command( + '/usr/bin/sudo -iu postgres /usr/bin/psql -d foreman -c "select uuid from katello_content_facets where uuid is not null;"', + deps=[SatelliteVersion] + ) + qpid_queues = simple_command( + '/usr/bin/qpid-stat -q --ssl-certificate=/etc/pki/pulp/qpid/client.crt -b amqps://localhost:5671', + deps=[SatelliteVersion] + ) + + +@datasource(LocalSpecs.content_host_uuids, LocalSpecs.qpid_queues, Specs.messages, HostContext, SatelliteVersion) +def satellite_missed_pulp_agent_queues(broker): + """ + This datasource provides the missed pulp agent queues information on satellite server. + + Note: + This datasource may be executed using the following command: + + ``insights cat --no-header satellite_missed_pulp_agent_queues`` + + Sample output:: + + pulp.agent.09008eec-aba6-4174-aa9f-e930004ce5c9:2018-01-16 00:06:13 + pulp.agent.fac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024:2018-01-16 00:06:16 + 0 + + Returns: + str: All the missed pulp agent queues and the boolean mark if the data is + truncated in the last line. If the value of last line is 0, + it means all the missed queues are returned. If the value of the + last line is 1, it means there are a lot of missed queues, to + avoid render error, only the first 10 missed queues are returned. + + Raises: + SkipComponent: When the error doen't happen or the missed queues have been recreated. + + """ + def _parse_non_existing_queues_in_msg(): + agentq_date_re = re.compile( + r'^(?P\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}) \[Protocol\] error Error on attach: Node not found: (?Ppulp.agent.[0-9a-f-]+)$' + ) + agent_queue_last_date = {} + ms_obj = broker[Specs.messages] + for line in ms_obj.stream(): + if NODE_NOT_FOUND_ERROR in line: + info, msg = [i.strip() for i in line.split(': ', 1)] + info_splits = info.rsplit(None, 2) + if len(info_splits) >= 3 and info_splits[2].startswith('qpidd'): + # The timestamp from syslog doesn't contain the year, but the + # message itself does - so use that. + match = agentq_date_re.search(msg) + if match: + agent_queue_last_date[match.group('agentq')] = match.group('date') + return agent_queue_last_date + + def _get_content_host_uuid(): + output = broker[LocalSpecs.content_host_uuids].content + host_uuids = [] + if len(output) > 3: + for line in output[2:-1]: + host_uuids.append(line.strip()) + return host_uuids + + def _get_qpid_queues(): + output = broker[LocalSpecs.qpid_queues].content + current_queues = [] + if len(output) > 3: + current_queues = [line.split()[0].strip() for line in output[3:] if line.split()[0].startswith('pulp.agent')] + return current_queues + + missed_queues_in_log = _parse_non_existing_queues_in_msg() + if missed_queues_in_log: + host_uuids = _get_content_host_uuid() + if host_uuids: + qpid_queues = _get_qpid_queues() + missed_queues = [] + too_more_data = 0 + for queue in missed_queues_in_log: + if queue.split('.')[-1] in host_uuids and queue not in qpid_queues: + missed_queues.append('%s:%s' % (queue, missed_queues_in_log[queue])) + # only return 10 missed queues in case too long data can't be rendered + if len(missed_queues) >= 10: + too_more_data = 1 + break + if missed_queues: + missed_queues.append(str(too_more_data)) + return DatasourceProvider(missed_queues, relative_path='insights_commands/satellite_missed_qpid_queues') + raise SkipComponent diff --git a/insights/specs/default.py b/insights/specs/default.py index 45be50297..648f765b1 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -38,7 +38,7 @@ from insights.specs import Specs from insights.specs.datasources import ( cloud_init, candlepin_broker, ethernet, get_running_commands, ipcs, package_provides, - ps as ps_datasource, sap) + ps as ps_datasource, sap, satellite_missed_queues) from insights.specs.datasources.sap import sap_hana_sid, sap_hana_sid_SID_nr @@ -657,6 +657,7 @@ def pmlog_summary_file(broker): satellite_custom_ca_chain = simple_command( '/usr/bin/awk \'BEGIN { pipe="openssl x509 -noout -subject -enddate"} /^-+BEGIN CERT/,/^-+END CERT/ { print | pipe } /^-+END CERT/ { close(pipe); printf("\\n")}\' /etc/pki/katello/certs/katello-server-ca.crt', ) + satellite_missed_pulp_agent_queues = satellite_missed_queues.satellite_missed_pulp_agent_queues satellite_mongodb_storage_engine = simple_command("/usr/bin/mongo pulp_database --eval 'db.serverStatus().storageEngine'") satellite_non_yum_type_repos = simple_command( "/usr/bin/mongo pulp_database --eval 'db.repo_importers.find({\"importer_type_id\": { $ne: \"yum_importer\"}}).count()'", diff --git a/insights/tests/datasources/test_satellite_missed_queues.py b/insights/tests/datasources/test_satellite_missed_queues.py new file mode 100644 index 000000000..24fbd5282 --- /dev/null +++ b/insights/tests/datasources/test_satellite_missed_queues.py @@ -0,0 +1,245 @@ +import pytest +from mock.mock import Mock + +from insights.core.dr import SkipComponent +from insights.core.spec_factory import DatasourceProvider +from insights.specs import Specs +from insights.specs.datasources.satellite_missed_queues import LocalSpecs, satellite_missed_pulp_agent_queues + + +MESSAGE_WITH_ERRORS = """ +Jan 16 00:06:13 satellite qpidd: 2018-01-16 00:06:13 [Protocol] error Error on attach: Node not found: pulp.agent.09008eec-aba6-4174-aa9f-e930004ce5c9 +Jan 16 00:06:13 satellite qpidd[51948]: 2018-01-16 00:06:13 [Protocol] error Error on attach: Node not found: pulp.agent.09008eec-aba6-4174-aa9f-e930004ce5c9 +Jan 16 00:06:16 satellite qpidd: 2018-01-16 00:06:16 [Protocol] error Error on attach: Node not found: pulp.agent.fac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024 +Jan 16 00:06:16 satellite qpidd[51948]: 2018-01-16 00:06:16 [Protocol] error Error on attach: Node not found: pulp.agent.fac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024 +Jan 16 00:06:17 satellite qpidd[51948]: 2018-01-16 00:06:17 [Protocol] error Error on attach: Node not found: pulp.agent.a91bbcad-4310-47d3-b550-57b904ef815d +Jan 16 00:06:17 satellite qpidd: 2018-01-16 00:06:17 [Protocol] error Error on attach: Node not found: pulp.agent.a91bbcad-4310-47d3-b550-57b904ef815d +Jan 16 00:06:21 satellite qpidd: 2018-01-16 00:06:21 [Protocol] error Error on attach: Node not found: pulp.agent.5fc8b4e2-1d2e-47e9-a34f-2017bb3bb417 +Jan 16 00:06:21 satellite qpidd[51948]: 2018-01-16 00:06:21 [Protocol] error Error on attach: Node not found: pulp.agent.5fc8b4e2-1d2e-47e9-a34f-2017bb3bb417 +Jan 16 00:06:23 satellite qpidd: 2018-01-16 00:06:23 [Protocol] error Error on attach: Node not found: pulp.agent.8330f90a-d4f0-41e2-8792-4f1b9960ceef +Jan 16 00:06:23 satellite qpidd[51948]: 2018-01-16 00:06:23 [Protocol] error Error on attach: Node not found: pulp.agent.8330f90a-d4f0-41e2-8792-4f1b9960ceef +Jan 16 00:06:25 satellite qpidd: 2018-01-16 00:06:25 [Protocol] error Error on attach: Node not found: pulp.agent.4caf276f-644a-4327-8975-fb34c83a788c +Jan 16 00:06:25 satellite qpidd[51948]: 2018-01-16 00:06:25 [Protocol] error Error on attach: Node not found: pulp.agent.4caf276f-644a-4327-8975-fb34c83a788c +Jan 16 00:06:32 satellite qpidd: 2018-01-16 00:06:32 [Protocol] error Error on attach: Node not found: pulp.agent.076a1c3f-3dde-4523-b26c-fcfffbd93b0e +Jan 16 00:06:32 satellite qpidd[51948]: 2018-01-16 00:06:32 [Protocol] error Error on attach: Node not found: pulp.agent.076a1c3f-3dde-4523-b26c-fcfffbd93b0e +Jan 16 00:06:34 satellite qpidd: 2018-01-16 00:06:34 [Protocol] error Error on attach: Node not found: pulp.agent.076a1c3f-3dde-4523-b26c-fcfffbd93bde +Jan 16 00:06:34 satellite qpidd[51948]: 2018-01-16 00:06:34 [Protocol] error Error on attach: Node not found: pulp.agent.076a1c3f-3dde-4523-b26c-fcfffbd93bde +Jan 16 00:06:35 satellite qpidd: 2018-01-16 00:06:35 [Protocol] error Error on attach: Node not found: pulp.agent.076a1c3f-3dde-4523-b26c-fcfffbd93bae +Jan 16 00:06:35 satellite qpidd[51948]: 2018-01-16 00:06:35 [Protocol] error Error on attach: Node not found: pulp.agent.076a1c3f-3dde-4523-b26c-fcfffbd93bae +Jan 16 00:06:36 satellite qpidd: 2018-01-16 00:06:36 [Protocol] error Error on attach: Node not found: pulp.agent.076a1c3f-3dde-4523-b26c-fcfffbd93bfe +Jan 16 00:06:36 satellite qpidd[51948]: 2018-01-16 00:06:36 [Protocol] error Error on attach: Node not found: pulp.agent.076a1c3f-3dde-4523-b26c-fcfffbd93bfe +""".strip() + +MESSAGE_WITHOUT_ERROR = """ +Jun 13 03:28:03 ab.cd pulp: gofer.messaging.adapter.qpid.connection:INFO: closed: qpid+ssl://localhost:5671 +Jun 13 03:28:04 ab.cd pulp: gofer.messaging.adapter.qpid.connection:INFO: closed: qpid+ssl://localhost:5671 +Jun 13 03:28:07 ab.cd pulp: pulp.server.db.connection:INFO: Attempting to connect to localhost:27017 +Jun 13 03:28:07 ab.cd pulp: pulp.server.db.connection:INFO: Attempting to connect to localhost:27017 +Jun 13 03:28:07 ab.cd pulp: pulp.server.db.connection:INFO: Attempting to connect to localhost:27017 +Jun 13 03:28:07 ab.cd pulp: pulp.server.db.connection:INFO: Attempting to connect to localhost:27017 +Jun 13 03:28:07 ab.cd pulp: pulp.server.db.connection:INFO: Attempting to connect to localhost:27017 +Jun 13 03:28:07 ab.cd pulp: pulp.server.db.connection:INFO: Attempting to connect to localhost:27017 +Jun 13 03:28:07 ab.cd pulp: pulp.server.db.connection:INFO: Attempting to connect to localhost:27017 +Jun 13 03:28:07 ab.cd pulp: pulp.server.db.connection:INFO: Attempting to connect to localhost:27017 +Jun 13 03:28:08 ab.cd pulp: pulp.server.db.connection:INFO: Write concern for Mongo connection: {} +Jun 13 03:28:08 ab.cd pulp: pulp.server.db.connection:INFO: Write concern for Mongo connection: { +""".strip() + +MESSAGE_WITH_ERROR_BUT_QUEUE_EXISTS = """ +Jan 16 00:06:13 satellite qpidd: 2018-01-16 00:06:13 [Protocol] error Error on attach: Node not found: pulp.agent.49041b8c-cf5b-4ec6-a8db-66ea70d04566 +Jan 16 00:06:13 satellite qpidd[51948]: 2018-01-16 00:06:13 [Protocol] error Error on attach: Node not found: pulp.agent.49041b8c-cf5b-4ec6-a8db-66ea70d04566 +Jan 16 00:06:16 satellite qpidd: 2018-01-16 00:06:16 [Protocol] error Error on attach: Node not found: pulp.agent.39f7b444-1532-43c2-ab1f-dd62a69a3be2 +Jan 16 00:06:16 satellite qpidd[51948]: 2018-01-16 00:06:16 [Protocol] error Error on attach: Node not found: pulp.agent.39f7b444-1532-43c2-ab1f-dd62a69a3be2 +Jan 16 00:06:17 satellite qpidd[51948]: 2018-01-16 00:06:17 [Protocol] error Error on attach: Node not found: pulp.agent.70d37c60-26d5-415d-ae95-722806b802b1 +Jan 16 00:06:17 satellite qpidd: 2018-01-16 00:06:17 [Protocol] error Error on attach: Node not found: pulp.agent.70d37c60-26d5-415d-ae95-722806b802b1 +""".strip() + +HOST_UUIDS = """ + uuid +-------------------------------------- + 49041b8c-cf5b-4ec6-a8db-66ea70d04566 + c274234a-bd93-4868-8267-a5ea3a434c33 + 70d37c60-26d5-415d-ae95-722806b802b1 + 39f7b444-1532-43c2-ab1f-dd62a69a3be2 + 09008eec-aba6-4174-aa9f-e930004ce5c9 + fac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024 +(6 rows) +""".strip() + +HOST_UUIDS_1 = """ + uuid +-------------------------------------- +(0 rows) +""".strip() + +HOST_UUIDS_2 = """ + uuid +-------------------------------------- + 49041b8c-cf5b-4ec6-a8db-66ea70d04566 + c274234a-bd93-4868-8267-a5ea3a434c33 + 70d37c60-26d5-415d-ae95-722806b802b1 + 39f7b444-1532-43c2-ab1f-dd62a69a3be2 + 09008eec-aba6-4174-aa9f-e930004ce5c9 + fac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024 + a91bbcad-4310-47d3-b550-57b904ef815d + 5fc8b4e2-1d2e-47e9-a34f-2017bb3bb417 + 8330f90a-d4f0-41e2-8792-4f1b9960ceef + 4caf276f-644a-4327-8975-fb34c83a788c + 076a1c3f-3dde-4523-b26c-fcfffbd93b0e + 076a1c3f-3dde-4523-b26c-fcfffbd93bde + 076a1c3f-3dde-4523-b26c-fcfffbd93bae + 076a1c3f-3dde-4523-b26c-fcfffbd93bfe +(14 rows) +""".strip() + +QPID_QUEUES = """ +Queues + queue dur autoDel excl msg msgIn msgOut bytes bytesIn bytesOut cons bind + =============================================================================================================================================================== + 1c82aae4-3f19-4739-8799-2140e47d8af6:1.0 Y Y 0 8 8 0 5.04k 5.04k 1 2 + 1c82aae4-3f19-4739-8799-2140e47d8af6:2.0 Y Y 0 4 4 0 2.55k 2.55k 1 2 + 28899b04-2085-4366-ae85-fe70b1e930ff:1.0 Y Y 0 1 1 0 243 243 1 2 + 2dc38953-d515-4f93-92b0-6dd13d872632:1.0 Y Y 0 8 8 0 5.04k 5.04k 1 2 + 2dc38953-d515-4f93-92b0-6dd13d872632:2.0 Y Y 0 4 4 0 2.55k 2.55k 1 2 + 41fa4a8d-7061-4527-b4ec-19f33085cde3:1.0 Y Y 0 4 4 0 2.46k 2.46k 1 2 + 50570ead-86c5-410a-bc82-8d3c84fbfc6e:1.0 Y Y 0 8 8 0 5.04k 5.04k 1 2 + 50570ead-86c5-410a-bc82-8d3c84fbfc6e:2.0 Y Y 0 4 4 0 2.55k 2.55k 1 2 + 6d51a877-f190-4d84-80bd-ee75bd2571ec:1.0 Y Y 0 1 1 0 243 243 1 2 + 8023568e-6343-4010-8f7b-420c4c05bcb9:1.0 Y Y 0 4 4 0 2.42k 2.42k 1 2 + 80841d92-2539-42b2-941d-9ccbf6513be2:1.0 Y Y 0 8 8 0 4.89k 4.89k 1 2 + 828ac1e2-520a-4df1-954c-f219e6ba811f:0.0 Y Y 0 0 0 0 0 0 1 2 + 916ba27c-08f6-4854-9e6f-80db7dee6cfd:1.0 Y Y 0 1 1 0 243 243 1 2 + 927aefc9-ebe6-4548-a52f-83565ab0c75e:1.0 Y Y 0 8 8 0 5.04k 5.04k 1 2 + 927aefc9-ebe6-4548-a52f-83565ab0c75e:2.0 Y Y 0 4 4 0 2.47k 2.47k 1 2 + a3b72515-b58d-4217-a060-bd871fe53418:1.0 Y Y 0 1 1 0 243 243 1 2 + b5e362ac-926a-4a57-a42d-de8dfb747fd1:1.0 Y Y 0 8 8 0 5.04k 5.04k 1 2 + b5e362ac-926a-4a57-a42d-de8dfb747fd1:2.0 Y Y 0 4 4 0 2.55k 2.55k 1 2 + b8995e8a-9d78-4665-86d8-7b3cded8a8f2:1.0 Y Y 0 8 8 0 4.89k 4.89k 1 2 + bcf2f3fc-6b74-4329-8db6-3b90f62c5fd9:1.0 Y Y 0 8 8 0 4.89k 4.89k 1 2 + c0b5d7ab-ef90-404d-90ff-f2623846f375:1.0 Y Y 0 4 4 0 2.42k 2.42k 1 2 + c8ccd59e-4581-4287-b910-6bf1ede38297:1.0 Y Y 0 8 8 0 4.89k 4.89k 1 2 + cebd9c62-42ff-4fa4-b148-6e8339751b4e:1.0 Y Y 0 1 1 0 243 243 1 2 + celery Y 0 15.0k 15.0k 0 12.7m 12.7m 4 2 + e688cf97-9868-4d91-a3b1-9e60655331ac:1.0 Y Y 0 8 8 0 4.89k 4.89k 1 2 + e79c3bd9-ada3-4b0e-86e3-062564e26488:1.0 Y Y 0 8 8 0 4.89k 4.89k 1 2 + pulp.agent.39f7b444-1532-43c2-ab1f-dd62a69a3be2 Y 6 6 0 3.67k 3.67k 0 0 1 + pulp.agent.49041b8c-cf5b-4ec6-a8db-66ea70d04566 Y 2 2 0 1.18k 1.18k 0 0 1 + pulp.agent.70d37c60-26d5-415d-ae95-722806b802b1 Y 0 4 4 0 2.70k 2.70k 1 1 + pulp.task Y 0 12 12 0 8.61k 8.61k 3 1 + reserved_resource_worker-0@ab.cd.def.com.celery.pidbox Y 0 0 0 0 0 0 1 2 + reserved_resource_worker-0@ab.cd.def.com.dq2 Y 0 1.20k 1.20k 0 9.87m 9.87m 1 2 + reserved_resource_worker-1@ab.cd.def.com.celery.pidbox Y 0 0 0 0 0 0 1 2 + reserved_resource_worker-1@ab.cd.def.com.dq2 Y 0 398 398 0 507k 507k 1 2 + reserved_resource_worker-2@ab.cd.def.com.celery.pidbox Y 0 0 0 0 0 0 1 2 + reserved_resource_worker-2@ab.cd.def.com.dq2 Y 0 488 488 0 622k 622k 1 2 + reserved_resource_worker-3@ab.cd.def.com.celery.pidbox Y 0 0 0 0 0 0 1 2 + reserved_resource_worker-3@ab.cd.def.com.dq2 Y 0 574 574 0 732k 732k 1 2 + resource_manager Y 0 1.33k 1.33k 0 10.6m 10.6m 1 2 + resource_manager@ab.cd.def.com.celery.pidbox Y 0 0 0 0 0 0 1 2 + resource_manager@ab.cd.def.com.dq2 Y 0 0 0 0 0 0 1 2 +""".strip() + +MISSED_QUEUES_OUTPUT = """ +pulp.agent.09008eec-aba6-4174-aa9f-e930004ce5c9:2018-01-16 00:06:13 +pulp.agent.fac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024:2018-01-16 00:06:16 +0 +""".strip() + +MISSED_QUEUES_OUTPUT_2 = """ +pulp.agent.09008eec-aba6-4174-aa9f-e930004ce5c9:2018-01-16 00:06:13 +pulp.agent.fac7ebbc-ee4f-44b4-9fe0-3f4e42c7f024:2018-01-16 00:06:16 +pulp.agent.a91bbcad-4310-47d3-b550-57b904ef815d:2018-01-16 00:06:17 +pulp.agent.5fc8b4e2-1d2e-47e9-a34f-2017bb3bb417:2018-01-16 00:06:21 +pulp.agent.8330f90a-d4f0-41e2-8792-4f1b9960ceef:2018-01-16 00:06:23 +pulp.agent.4caf276f-644a-4327-8975-fb34c83a788c:2018-01-16 00:06:25 +pulp.agent.076a1c3f-3dde-4523-b26c-fcfffbd93b0e:2018-01-16 00:06:32 +pulp.agent.076a1c3f-3dde-4523-b26c-fcfffbd93bde:2018-01-16 00:06:34 +pulp.agent.076a1c3f-3dde-4523-b26c-fcfffbd93bae:2018-01-16 00:06:35 +pulp.agent.076a1c3f-3dde-4523-b26c-fcfffbd93bfe:2018-01-16 00:06:36 +1 +""".strip() + +RELATIVE_PATH = "insights_commands/satellite_missed_qpid_queues" + + +def mock_stream(): + for line in MESSAGE_WITH_ERRORS.splitlines(): + yield line + + +def mock_stream_without_error(): + for line in MESSAGE_WITHOUT_ERROR.splitlines(): + yield line + + +def mock_stream_with_queue_exists(): + for line in MESSAGE_WITH_ERROR_BUT_QUEUE_EXISTS.splitlines(): + yield line + + +def test_satellite_missed_queues(): + host_uuids = Mock() + host_uuids.content = HOST_UUIDS.splitlines() + qpid_queues = Mock() + qpid_queues.content = QPID_QUEUES.splitlines() + messages = Mock() + messages.stream = mock_stream + broker = { + Specs.messages: messages, + LocalSpecs.content_host_uuids: host_uuids, + LocalSpecs.qpid_queues: qpid_queues, + } + result = satellite_missed_pulp_agent_queues(broker) + assert result is not None + assert isinstance(result, DatasourceProvider) + expected = DatasourceProvider(content=MISSED_QUEUES_OUTPUT.splitlines(), relative_path=RELATIVE_PATH) + assert sorted(result.content) == sorted(expected.content) + assert result.relative_path == expected.relative_path + + +def test_satellite_missed_queues_with_more_data(): + host_uuids = Mock() + host_uuids.content = HOST_UUIDS_2.splitlines() + qpid_queues = Mock() + qpid_queues.content = QPID_QUEUES.splitlines() + messages = Mock() + messages.stream = mock_stream + broker = { + Specs.messages: messages, + LocalSpecs.content_host_uuids: host_uuids, + LocalSpecs.qpid_queues: qpid_queues, + } + result = satellite_missed_pulp_agent_queues(broker) + assert result is not None + assert isinstance(result, DatasourceProvider) + expected = DatasourceProvider(content=MISSED_QUEUES_OUTPUT_2.splitlines(), relative_path=RELATIVE_PATH) + assert sorted(result.content) == sorted(expected.content) + assert result.relative_path == expected.relative_path + + +def test_exception(): + host_uuids = Mock() + host_uuids.content = HOST_UUIDS_2.splitlines() + qpid_queues = Mock() + qpid_queues.content = QPID_QUEUES.splitlines() + messages = Mock() + messages.stream = mock_stream_without_error + broker = { + Specs.messages: messages, + LocalSpecs.content_host_uuids: host_uuids, + LocalSpecs.qpid_queues: qpid_queues, + } + with pytest.raises(SkipComponent): + satellite_missed_pulp_agent_queues(broker) + + new_messages = Mock() + new_messages.stream = mock_stream_with_queue_exists + broker[Specs.messages] = new_messages + with pytest.raises(SkipComponent): + satellite_missed_pulp_agent_queues(broker) + + empty_uuids = Mock() + empty_uuids.content = HOST_UUIDS_1.splitlines() + broker[LocalSpecs.content_host_uuids] = empty_uuids + with pytest.raises(SkipComponent): + satellite_missed_pulp_agent_queues(broker) From be37ee634ef8bf4dae4fbd38f90680325f3ed2e1 Mon Sep 17 00:00:00 2001 From: Stephen Adams Date: Thu, 8 Jul 2021 13:00:54 -0400 Subject: [PATCH 481/892] update uploader_json_map.json Signed-off-by: Stephen Adams --- insights/client/uploader_json_map.json | 30 +++++++++++++++++--------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index d60fde106..7ed00199d 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -1173,7 +1173,6 @@ "backupserver", "catalina.base", "ceilometer-poll", - "ceph-osd", "chronyd", "cinder-volume", "clvmd", @@ -1718,6 +1717,14 @@ ], "symbolic_name": "openshift_hosts" }, + { + "file": "/etc/tower/settings.py", + "pattern": [ + "AWX_CLEANUP_PATHS", + "AWX_PROOT_BASE_PATH" + ], + "symbolic_name": "ansible_tower_settings" + }, { "file": "/etc/redhat-access-insights/machine-id", "pattern": [], @@ -2337,7 +2344,6 @@ "(28)No space left on device: ", "AH00485: scoreboard is full, not at MaxRequestWorkers", "ModSecurity: collections_remove_stale: Failed deleting collection", - "Require ServerLimit > 0, setting to 1", "The mpm module (prefork.c) is not supported by mod_http2", "[crit] Memory allocation failed, aborting process", "and would exceed the ServerLimit value of ", @@ -2345,13 +2351,10 @@ "consider raising the MaxRequestWorkers setting", "exceed ServerLimit of", "exceeds ServerLimit value of", - "exceeds compile time limit of", - "exceeds compile-time limit of", "exit signal Segmentation fault", "manager_handler CONFIG error: MEM: Can't update or insert node", "manager_handler ENABLE-APP error: MEM: Can't update or insert context", - "manager_handler ENABLE-APP error: MEM: Can't update or insert host alias", - "not allowed, increasing to 1" + "manager_handler ENABLE-APP error: MEM: Can't update or insert host alias" ], "symbolic_name": "httpd_error_log" }, @@ -4136,6 +4139,14 @@ } ], "globs": [ + { + "glob": "/etc/tower/conf.d/*.py", + "pattern": [ + "AWX_CLEANUP_PATHS", + "AWX_PROOT_BASE_PATH" + ], + "symbolic_name": "ansible_tower_settings" + }, { "glob": "/sys/devices/system/cpu/cpu[0-9]*/online", "symbolic_name": "cpu_cores", @@ -4342,8 +4353,7 @@ "glob": "/etc/ssh/ssh_config.d/*.conf", "symbolic_name": "ssh_config_d", "pattern": [ - "Include", - "SendEnv" + "Include" ] } ], @@ -4365,7 +4375,7 @@ } }, "pre_commands": { - "iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'" + "iface": "/sbin/ip -o link | awk -F ': |@' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-06-24T11:02:05.835577" + "version": "2021-07-01T12:56:10.617317" } \ No newline at end of file From ea71008beaa66262861ad319bc8e564523c88451 Mon Sep 17 00:00:00 2001 From: Jitka Obselkova <41325380+jobselko@users.noreply.github.com> Date: Thu, 15 Jul 2021 15:05:31 +0200 Subject: [PATCH 482/892] Add haproxy_cfg_scl spec and parser (#3138) Signed-off-by: Jitka Obselkova --- insights/parsers/haproxy_cfg.py | 175 ++++++++++++--------- insights/parsers/tests/test_haproxy_cfg.py | 95 ++++++++++- insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 4 files changed, 196 insertions(+), 76 deletions(-) diff --git a/insights/parsers/haproxy_cfg.py b/insights/parsers/haproxy_cfg.py index 1f8b007c5..973e9e970 100644 --- a/insights/parsers/haproxy_cfg.py +++ b/insights/parsers/haproxy_cfg.py @@ -1,86 +1,111 @@ """ -HaproxyCfg - file ``/etc/haproxy/haproxy.cfg`` -============================================== - -Contents of the `haproxy.cfg` file look like:: - - global - daemon - group haproxy - log /dev/log local0 - user haproxy - maxconn 20480 - pidfile /var/run/haproxy.pid - - defaults - retries 3 - maxconn 4096 - log global - timeout http-request 10s - timeout queue 1m - timeout connect 10s - -If there are duplicate key items, merge them in to one. Like:: - - option tcpka - }---> option: ["tcpka","tcplog"] - option tcplog - -Attributes: - data (dict): Dictionary of all parsed sections. - lines (list): List of all non-commented lines. - -Examples: - >>> cfg = shared[HaproxyCfg] - >>> cfg.data['global'] - {"daemon": "", "group": "haproxy", "log": " /dev/log local0", - "user": "haproxy", "maxconn": "20480", "pidfile": "/var/run/haproxy.pid"} - >>> cfg.data['global']['group'] - "haproxy" - >>> 'global' in cfg.data - True - >>> 'user' in cfg.data.get('global') - True +Haproxy configuration files +=========================== + +Parsers provided by this module are: + +HaproxyCfg - file ``/var/lib/config-data/puppet-generated/haproxy/etc/haproxy/haproxy.cfg`` or ``/etc/haproxy/haproxy.cfg`` +--------------------------------------------------------------------------------------------------------------------------- +HaproxyCfgScl - file ``/etc/opt/rh/rh-haproxy18/haproxy/haproxy.cfg`` +--------------------------------------------------------------------- """ -from .. import Parser, parser + +from insights.core import Parser +from insights.core.plugins import parser from insights.specs import Specs -def _parse_content(content): +class HaproxyFile(Parser): + """ + Base class for ``HaproxyCfg`` and ``HaproxyCfgScl`` classes. + + Attributes: + data (dict): Dictionary of all parsed sections. + lines (list): List of all non-commented lines. + + Content of the `haproxy.cfg` file looks like:: + + global + daemon + group haproxy + log /dev/log local0 + user haproxy + maxconn 20480 + pidfile /var/run/haproxy.pid + + defaults + retries 3 + maxconn 4096 + log global + timeout http-request 10s + timeout queue 1m + timeout connect 10s + + Examples: + >>> type(haproxy) + + >>> haproxy.data['global'] + {'daemon': '', 'group': 'haproxy', 'log': '/dev/log local0', 'user': 'haproxy', 'maxconn': '20480', 'pidfile': '/var/run/haproxy.pid'} + >>> haproxy.data['global']['group'] + 'haproxy' + >>> 'global' in haproxy.data + True + >>> 'user' in haproxy.data.get('global') + True + >>> haproxy.data['defaults'] + {'retries': '3', 'maxconn': '4096', 'log': 'global', 'timeout': ['http-request 10s', 'queue 1m', 'connect 10s']} + """ + SECTION_NAMES = ("global", "defaults", "frontend", "backend", "listen") - haproxy_dict = {} - section_dict = {} - lines = [] - for line in content: - line = line.strip() - if line.startswith("#") or line == "": - continue - lines.append(line) - values = line.split(None, 1) - if values[0] in SECTION_NAMES: - # new section like global:{} or listen mysql: {} - section_dict = {} - i_key = values[0] if len(values) == 1 else values[0] + " " + values[1] - haproxy_dict.update({i_key: section_dict}) - else: - # handle attributes in one section - if len(values) == 1: - section_dict[line] = "" + + def __init__(self, context): + self.data = {} + self.lines = [] + super(HaproxyFile, self).__init__(context) + + def parse_content(self, content): + section_dict = {} + + for line in content: + line = line.strip() + if line.startswith("#") or line == "": + continue + + self.lines.append(line) + split_items = line.split(None, 1) + + # Create a new section, e.g. global: {} + if split_items[0] in self.SECTION_NAMES: + section_dict = {} + section = split_items[0] if len(split_items) == 1 else split_items[0] + " " + split_items[1] + self.data.update({section: section_dict}) + # Handle attributes inside section else: - attr_key = values[0] - attr_value = values[1] - if attr_key in section_dict: - # if it is not list, convert it to list - if not isinstance(section_dict[attr_key], list): - section_dict[attr_key] = [section_dict[attr_key]] - section_dict[attr_key].append(attr_value) + if len(split_items) == 1: + section_dict[line] = "" else: - section_dict[attr_key] = attr_value - return haproxy_dict, lines + key = split_items[0] + value = split_items[1] + if key in section_dict: + # Convert value into list in case of duplicate key items + if not isinstance(section_dict[key], list): + section_dict[key] = [section_dict[key]] + section_dict[key].append(value) + else: + section_dict[key] = value @parser(Specs.haproxy_cfg) -class HaproxyCfg(Parser): - """Class to parse file ``haproxy.cfg``.""" - def parse_content(self, content): - self.data, self.lines = _parse_content(content) +class HaproxyCfg(HaproxyFile): + """ + Class to parse file ``/var/lib/config-data/puppet-generated/haproxy/etc/haproxy/haproxy.cfg`` or ``haproxy.cfg``. + """ + pass + + +@parser(Specs.haproxy_cfg_scl) +class HaproxyCfgScl(HaproxyFile): + """ + Class to parse file ``/etc/opt/rh/rh-haproxy18/haproxy/haproxy.cfg``. + """ + pass diff --git a/insights/parsers/tests/test_haproxy_cfg.py b/insights/parsers/tests/test_haproxy_cfg.py index 5ca6e7269..d42162374 100644 --- a/insights/parsers/tests/test_haproxy_cfg.py +++ b/insights/parsers/tests/test_haproxy_cfg.py @@ -1,7 +1,11 @@ +import doctest + from insights.core.context import OSP -from insights.parsers.haproxy_cfg import HaproxyCfg +from insights.parsers import haproxy_cfg +from insights.parsers.haproxy_cfg import HaproxyFile, HaproxyCfg, HaproxyCfgScl from insights.tests import context_wrap + haproxy_osp = """ # This file managed by Puppet global @@ -241,6 +245,80 @@ osp_c = OSP() osp_c.role = "Controller" +HAPROXY_CFG_SCL = """ +global + log 127.0.0.1 local2 + + chroot /var/opt/rh/rh-haproxy18/lib/haproxy + pidfile /var/run/rh-haproxy18-haproxy.pid + maxconn 4000 + user haproxy + group haproxy + daemon + + # turn on stats unix socket + stats socket /var/opt/rh/rh-haproxy18/lib/haproxy/stats + + # utilize system-wide crypto-policies + ssl-default-bind-ciphers PROFILE=SYSTEM + ssl-default-server-ciphers PROFILE=SYSTEM + +defaults + mode http + log global + option httplog + option dontlognull + option http-server-close + option forwardfor except 127.0.0.0/8 + option redispatch + retries 3 + timeout http-request 10s + timeout queue 1m + timeout connect 10s + timeout client 1m + timeout server 1m + timeout http-keep-alive 10s + timeout check 10s + maxconn 3000 + +frontend main + bind *:5000 + acl url_static path_beg -i /static /images /javascript /stylesheets + acl url_static path_end -i .jpg .gif .png .css .js + + use_backend static if url_static + default_backend app + +backend static + balance roundrobin + server static 127.0.0.1:4331 check + +backend app + balance roundrobin + server app1 127.0.0.1:5001 check + server app2 127.0.0.1:5002 check + server app3 127.0.0.1:5003 check + server app4 127.0.0.1:5004 check +""" + +HAPROXY_DOCTEST = """ +global + daemon + group haproxy + log /dev/log local0 + user haproxy + maxconn 20480 + pidfile /var/run/haproxy.pid + +defaults + retries 3 + maxconn 4096 + log global + timeout http-request 10s + timeout queue 1m + timeout connect 10s +""" + def test_haproxy_cls_1(): r = HaproxyCfg(context_wrap(haproxy_osp, osp=osp_c)) @@ -255,3 +333,18 @@ def test_haproxy_cls_2(): assert "maxconn" in result.data.get("global") assert result.data.get("defaults").get("maxconn") == "4096" assert "queue 1m" in result.data.get("defaults").get("timeout") + + +def test_haproxy_cfg_scl(): + haproxy_cfg_scl = HaproxyCfgScl(context_wrap(HAPROXY_CFG_SCL)) + assert "stats socket /var/opt/rh/rh-haproxy18/lib/haproxy/stats" in haproxy_cfg_scl.lines + assert "/var/opt/rh/rh-haproxy18/lib/haproxy" in haproxy_cfg_scl.data.get("global").get("chroot") + assert len(haproxy_cfg_scl.data.get("global")) == 10 + + +def test_doc_examples(): + env = { + "haproxy": HaproxyFile(context_wrap(HAPROXY_DOCTEST)) + } + failed, total = doctest.testmod(haproxy_cfg, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 469607b01..c0601c2f7 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -204,6 +204,7 @@ class Specs(SpecSet): hammer_task_list = RegistryPoint() satellite_enabled_features = RegistryPoint() haproxy_cfg = RegistryPoint() + haproxy_cfg_scl = RegistryPoint() heat_api_log = RegistryPoint(filterable=True) heat_conf = RegistryPoint() heat_crontab = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 648f765b1..47e81fcb2 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -302,6 +302,7 @@ def gfs2_mount_points(broker): grubby_default_kernel = simple_command("/sbin/grubby --default-kernel") hammer_task_list = simple_command("/usr/bin/hammer --config /root/.hammer/cli.modules.d/foreman.yml --output csv task list --search 'state=running AND ( label=Actions::Candlepin::ListenOnCandlepinEvents OR label=Actions::Katello::EventQueue::Monitor )'") haproxy_cfg = first_file(["/var/lib/config-data/puppet-generated/haproxy/etc/haproxy/haproxy.cfg", "/etc/haproxy/haproxy.cfg"]) + haproxy_cfg_scl = simple_file("/etc/opt/rh/rh-haproxy18/haproxy/haproxy.cfg") heat_api_log = first_file(["/var/log/containers/heat/heat_api.log", "/var/log/heat/heat-api.log", "/var/log/heat/heat_api.log"]) heat_conf = first_file(["/var/lib/config-data/puppet-generated/heat/etc/heat/heat.conf", "/etc/heat/heat.conf"]) hostname = simple_command("/bin/hostname -f") From 1c4596252e0e6c34a93fd7d89372d5509a3f268f Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Thu, 15 Jul 2021 08:49:39 -0500 Subject: [PATCH 483/892] Add make_none response for rules (#3135) * Add make_none response for rules * Changes how rules that return none are handled. Instead of raising a SkipComponent and ignoring the rule, they will now be counted in the results. * Add a make_none response type * Update formatters to handle the new type * Update tests * Fix #3026 Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Remove none rules from detailed output Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Update to add none option to text format Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- docs/manpages/insights-run.rst | 3 +++ insights/__init__.py | 2 +- insights/core/plugins.py | 16 ++++++++++- insights/formats/text.py | 40 ++++++++++++++++++++-------- insights/tests/test_rules_fixture.py | 5 ++-- 5 files changed, 51 insertions(+), 15 deletions(-) diff --git a/docs/manpages/insights-run.rst b/docs/manpages/insights-run.rst index 0ad07a79c..5f1645412 100644 --- a/docs/manpages/insights-run.rst +++ b/docs/manpages/insights-run.rst @@ -75,6 +75,9 @@ OPTIONS -m --missing Show missing requirements. + -n --none + Show rules returning ``None``. + -p PLUGINS --plugins PLUGINS Comma-separated list without spaces of package(s) or module(s) containing plugins. diff --git a/insights/__init__.py b/insights/__init__.py index cbb37e94a..4198f13fb 100644 --- a/insights/__init__.py +++ b/insights/__init__.py @@ -38,7 +38,7 @@ from .core.plugins import combiner, fact, metadata, parser, rule # noqa: F401 from .core.plugins import datasource, condition, incident # noqa: F401 from .core.plugins import make_response, make_metadata, make_fingerprint # noqa: F401 -from .core.plugins import make_pass, make_fail, make_info # noqa: F401 +from .core.plugins import make_pass, make_fail, make_info, make_none # noqa: F401 from .core.filters import add_filter, apply_filters, get_filters # noqa: F401 from .formats import get_formatter from .parsers import get_active_lines # noqa: F401 diff --git a/insights/core/plugins.py b/insights/core/plugins.py index ef99f6313..92815a894 100644 --- a/insights/core/plugins.py +++ b/insights/core/plugins.py @@ -302,7 +302,7 @@ def process(self, broker): return _make_skip(dr.get_name(self.component), missing) r = self.invoke(broker) if r is None: - raise dr.SkipComponent() + return make_none() if not isinstance(r, Response): raise Exception("rules must return Response objects.") return r @@ -660,3 +660,17 @@ def __init__(self, rule_fqdn, missing): rule_fqdn=rule_fqdn, reason="MISSING_REQUIREMENTS", details=details) + + +class make_none(Response): + """ + Used to create a response for a rule that returns None + + This is not intended to be used by plugins, only infrastructure + but it not private so that we can easily add it to reporting. + """ + response_type = "none" + key_name = "none_key" + + def __init__(self): + super(make_none, self).__init__(key="NONE_KEY") diff --git a/insights/formats/text.py b/insights/formats/text.py index 7d54a35f6..3096734e1 100644 --- a/insights/formats/text.py +++ b/insights/formats/text.py @@ -90,9 +90,11 @@ def __init__(self, broker, tracebacks=False, dropped=False, fail_only=False, + none=False, stream=sys.stdout): self.broker = broker self.missing = missing + self.none = none self.tracebacks = tracebacks self.dropped = dropped self.fail_only = fail_only @@ -115,7 +117,8 @@ def preprocess(self): title="Fingerprint : "), 'metadata': response(color=Fore.YELLOW, label="META", intl='M', title="Metadata : "), 'metadata_key': response(color=Fore.MAGENTA, label="META", intl='K', title="Metadata Key: "), - 'exception': response(color=Fore.RED, label="EXCEPT", intl='E', title="Exceptions : ") + 'exception': response(color=Fore.RED, label="EXCEPT", intl='E', title="Exceptions : "), + 'none': response(color=Fore.BLUE, label="RETURNED NONE", intl='N', title="Ret'd None : ") } self.counts = {} @@ -177,19 +180,26 @@ def printit(c, v): name = "%s%s%s" % (resp.color, name, Style.RESET_ALL) print(name, file=self.stream) print(underline, file=self.stream) - print(render_links(c), file=self.stream) - print(render(c, v), file=self.stream) + if v.get('type') != 'none': + print(render(c, v), file=self.stream) print(file=self.stream) for c in sorted(self.broker.get_by_type(rule), key=dr.get_name): v = self.broker[c] _type = v.get('type') + if _type is None: + continue + if _type in self.responses: self.counts[_type] += 1 - if (_type and ((self.fail_only and _type == 'rule') or - ((self.missing and _type == 'skip') or - (not self.fail_only and _type != 'skip')))): + + if ((self.fail_only and _type == 'rule') or + (self.missing and _type == 'skip') or + (self.none and _type == 'none')): printit(c, v) + elif not self.fail_only and _type not in ['skip', 'none']: + printit(c, v) + print(file=self.stream) self.print_header("Rule Execution Summary", Fore.CYAN) @@ -214,23 +224,31 @@ class HumanReadableFormatAdapter(FormatterAdapter): @staticmethod def configure(p): p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true") + p.add_argument("-n", "--none", help="Show rules returning None", action="store_true") p.add_argument("-t", "--tracebacks", help="Show stack traces.", action="store_true") p.add_argument("-d", "--dropped", help="Show collected files that weren't processed.", action="store_true") - p.add_argument("-F", "--fail-only", help="Show FAIL results only. Conflict with '-m' or '-f', will be dropped when using them together", action="store_true") + p.add_argument("-F", "--fail-only", help="Show FAIL results only. Conflict with '-m' and '-n' or '-f', will be dropped when using them together", action="store_true") def __init__(self, args): self.missing = args.missing + self.none = args.none self.tracebacks = args.tracebacks self.dropped = args.dropped self.fail_only = args.fail_only self.formatter = None - if self.missing and self.fail_only: - print(Fore.YELLOW + 'Options conflict: -m and -F, drops -F', file=sys.stderr) + if (self.missing or self.none) and self.fail_only: + print(Fore.YELLOW + 'Options conflict: -m/-n and -F, drops -F', file=sys.stderr) self.fail_only = False def preprocess(self, broker): - self.formatter = HumanReadableFormat(broker, - self.missing, self.tracebacks, self.dropped, self.fail_only) + self.formatter = HumanReadableFormat( + broker, + self.missing, + self.tracebacks, + self.dropped, + self.fail_only, + self.none + ) self.formatter.preprocess() def postprocess(self, broker): diff --git a/insights/tests/test_rules_fixture.py b/insights/tests/test_rules_fixture.py index c6d266c59..1381bde79 100644 --- a/insights/tests/test_rules_fixture.py +++ b/insights/tests/test_rules_fixture.py @@ -1,4 +1,4 @@ -from insights.core.plugins import make_pass, make_fail +from insights.core.plugins import make_pass, make_fail, make_none from insights.specs import Specs from insights.plugins import rules_fixture_plugin from insights.tests import InputData @@ -37,4 +37,5 @@ def test_rules_fixture(run_rule): input_data = InputData('test_ret_none') results = run_rule(rules_fixture_plugin.report, input_data) - assert results is None + expected = make_none() + assert results == expected From 50b42b757aa544f69d6bb25c82f327080c677f9a Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 15 Jul 2021 11:14:06 -0400 Subject: [PATCH 484/892] update uploader_json_map.json Signed-off-by: Jeremy Crafts --- insights/client/uploader_json_map.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index 7ed00199d..ef709ace6 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -4377,5 +4377,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': |@' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-07-01T12:56:10.617317" + "version": "2021-07-08T13:34:47.107154" } \ No newline at end of file From 0cd73803d5b561995dd6edf5ba4d7f44c17da85c Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Thu, 15 Jul 2021 13:41:36 -0500 Subject: [PATCH 485/892] Revert "Add make_none response for rules (#3135)" (#3146) This reverts commit 1c4596252e0e6c34a93fd7d89372d5509a3f268f. --- docs/manpages/insights-run.rst | 3 --- insights/__init__.py | 2 +- insights/core/plugins.py | 16 +---------- insights/formats/text.py | 40 ++++++++-------------------- insights/tests/test_rules_fixture.py | 5 ++-- 5 files changed, 15 insertions(+), 51 deletions(-) diff --git a/docs/manpages/insights-run.rst b/docs/manpages/insights-run.rst index 5f1645412..0ad07a79c 100644 --- a/docs/manpages/insights-run.rst +++ b/docs/manpages/insights-run.rst @@ -75,9 +75,6 @@ OPTIONS -m --missing Show missing requirements. - -n --none - Show rules returning ``None``. - -p PLUGINS --plugins PLUGINS Comma-separated list without spaces of package(s) or module(s) containing plugins. diff --git a/insights/__init__.py b/insights/__init__.py index 4198f13fb..cbb37e94a 100644 --- a/insights/__init__.py +++ b/insights/__init__.py @@ -38,7 +38,7 @@ from .core.plugins import combiner, fact, metadata, parser, rule # noqa: F401 from .core.plugins import datasource, condition, incident # noqa: F401 from .core.plugins import make_response, make_metadata, make_fingerprint # noqa: F401 -from .core.plugins import make_pass, make_fail, make_info, make_none # noqa: F401 +from .core.plugins import make_pass, make_fail, make_info # noqa: F401 from .core.filters import add_filter, apply_filters, get_filters # noqa: F401 from .formats import get_formatter from .parsers import get_active_lines # noqa: F401 diff --git a/insights/core/plugins.py b/insights/core/plugins.py index 92815a894..ef99f6313 100644 --- a/insights/core/plugins.py +++ b/insights/core/plugins.py @@ -302,7 +302,7 @@ def process(self, broker): return _make_skip(dr.get_name(self.component), missing) r = self.invoke(broker) if r is None: - return make_none() + raise dr.SkipComponent() if not isinstance(r, Response): raise Exception("rules must return Response objects.") return r @@ -660,17 +660,3 @@ def __init__(self, rule_fqdn, missing): rule_fqdn=rule_fqdn, reason="MISSING_REQUIREMENTS", details=details) - - -class make_none(Response): - """ - Used to create a response for a rule that returns None - - This is not intended to be used by plugins, only infrastructure - but it not private so that we can easily add it to reporting. - """ - response_type = "none" - key_name = "none_key" - - def __init__(self): - super(make_none, self).__init__(key="NONE_KEY") diff --git a/insights/formats/text.py b/insights/formats/text.py index 3096734e1..7d54a35f6 100644 --- a/insights/formats/text.py +++ b/insights/formats/text.py @@ -90,11 +90,9 @@ def __init__(self, broker, tracebacks=False, dropped=False, fail_only=False, - none=False, stream=sys.stdout): self.broker = broker self.missing = missing - self.none = none self.tracebacks = tracebacks self.dropped = dropped self.fail_only = fail_only @@ -117,8 +115,7 @@ def preprocess(self): title="Fingerprint : "), 'metadata': response(color=Fore.YELLOW, label="META", intl='M', title="Metadata : "), 'metadata_key': response(color=Fore.MAGENTA, label="META", intl='K', title="Metadata Key: "), - 'exception': response(color=Fore.RED, label="EXCEPT", intl='E', title="Exceptions : "), - 'none': response(color=Fore.BLUE, label="RETURNED NONE", intl='N', title="Ret'd None : ") + 'exception': response(color=Fore.RED, label="EXCEPT", intl='E', title="Exceptions : ") } self.counts = {} @@ -180,26 +177,19 @@ def printit(c, v): name = "%s%s%s" % (resp.color, name, Style.RESET_ALL) print(name, file=self.stream) print(underline, file=self.stream) - if v.get('type') != 'none': - print(render(c, v), file=self.stream) + print(render_links(c), file=self.stream) + print(render(c, v), file=self.stream) print(file=self.stream) for c in sorted(self.broker.get_by_type(rule), key=dr.get_name): v = self.broker[c] _type = v.get('type') - if _type is None: - continue - if _type in self.responses: self.counts[_type] += 1 - - if ((self.fail_only and _type == 'rule') or - (self.missing and _type == 'skip') or - (self.none and _type == 'none')): + if (_type and ((self.fail_only and _type == 'rule') or + ((self.missing and _type == 'skip') or + (not self.fail_only and _type != 'skip')))): printit(c, v) - elif not self.fail_only and _type not in ['skip', 'none']: - printit(c, v) - print(file=self.stream) self.print_header("Rule Execution Summary", Fore.CYAN) @@ -224,31 +214,23 @@ class HumanReadableFormatAdapter(FormatterAdapter): @staticmethod def configure(p): p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true") - p.add_argument("-n", "--none", help="Show rules returning None", action="store_true") p.add_argument("-t", "--tracebacks", help="Show stack traces.", action="store_true") p.add_argument("-d", "--dropped", help="Show collected files that weren't processed.", action="store_true") - p.add_argument("-F", "--fail-only", help="Show FAIL results only. Conflict with '-m' and '-n' or '-f', will be dropped when using them together", action="store_true") + p.add_argument("-F", "--fail-only", help="Show FAIL results only. Conflict with '-m' or '-f', will be dropped when using them together", action="store_true") def __init__(self, args): self.missing = args.missing - self.none = args.none self.tracebacks = args.tracebacks self.dropped = args.dropped self.fail_only = args.fail_only self.formatter = None - if (self.missing or self.none) and self.fail_only: - print(Fore.YELLOW + 'Options conflict: -m/-n and -F, drops -F', file=sys.stderr) + if self.missing and self.fail_only: + print(Fore.YELLOW + 'Options conflict: -m and -F, drops -F', file=sys.stderr) self.fail_only = False def preprocess(self, broker): - self.formatter = HumanReadableFormat( - broker, - self.missing, - self.tracebacks, - self.dropped, - self.fail_only, - self.none - ) + self.formatter = HumanReadableFormat(broker, + self.missing, self.tracebacks, self.dropped, self.fail_only) self.formatter.preprocess() def postprocess(self, broker): diff --git a/insights/tests/test_rules_fixture.py b/insights/tests/test_rules_fixture.py index 1381bde79..c6d266c59 100644 --- a/insights/tests/test_rules_fixture.py +++ b/insights/tests/test_rules_fixture.py @@ -1,4 +1,4 @@ -from insights.core.plugins import make_pass, make_fail, make_none +from insights.core.plugins import make_pass, make_fail from insights.specs import Specs from insights.plugins import rules_fixture_plugin from insights.tests import InputData @@ -37,5 +37,4 @@ def test_rules_fixture(run_rule): input_data = InputData('test_ret_none') results = run_rule(rules_fixture_plugin.report, input_data) - expected = make_none() - assert results == expected + assert results is None From fd3f12fa9ede9f2dc567bd90585925cd46292027 Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Fri, 16 Jul 2021 16:06:48 +0800 Subject: [PATCH 486/892] Enhance parser awx manage.py (#3133) * Compliance: Improve tmp handling (#3101) Reuse archive's temp dir for the oscap results. Use tempfile for tailoring file. Signed-off-by: Andrew Kofink Co-authored-by: Jeremy Crafts Signed-off-by: jiazhang * Enhance parser awx_manage.py Signed-off-by: jiazhang * Add filter Signed-off-by: jiazhang * Update test filter Signed-off-by: jiazhang * Update datasource test Signed-off-by: jiazhang * Update docstring Signed-off-by: jiazhang * Update example Signed-off-by: jiazhang * Sort filter result Signed-off-by: jiazhang * Update docstring format Signed-off-by: jiazhang * Update docstring format Signed-off-by: jiazhang * Update docstring format Signed-off-by: jiazhang * Update default.py Signed-off-by: jiazhang * Keep origin AnsibleTowerLicenseType, add new class AnsibleTowerLicense Signed-off-by: jiazhang * Update default.py Signed-off-by: jiazhang Co-authored-by: Viliam Krizan Co-authored-by: Jeremy Crafts --- insights/parsers/awx_manage.py | 28 +++++++- insights/parsers/tests/test_awx_manage.py | 48 ++++++++++--- insights/specs/__init__.py | 1 + insights/specs/datasources/awx_manage.py | 49 +++++++++++++ insights/specs/default.py | 3 +- insights/tests/datasources/test_awx_manage.py | 69 +++++++++++++++++++ 6 files changed, 183 insertions(+), 15 deletions(-) create mode 100644 insights/specs/datasources/awx_manage.py create mode 100644 insights/tests/datasources/test_awx_manage.py diff --git a/insights/parsers/awx_manage.py b/insights/parsers/awx_manage.py index 562580e9d..63a07286d 100644 --- a/insights/parsers/awx_manage.py +++ b/insights/parsers/awx_manage.py @@ -4,8 +4,10 @@ Parsers contains in this module are: -AnsibleTowerLicenseType - command ``awx-manage check_license`` --------------------------------------------------------------- +AnsibleTowerLicenseType - command ``/usr/bin/awx-manage check_license`` + +AnsibleTowerLicense - command ``/usr/bin/awx-manage check_license --data`` +-------------------------------------------------------------------------- """ from insights import JSONParser, parser, CommandParser @@ -16,7 +18,7 @@ @parser(Specs.awx_manage_check_license) class AnsibleTowerLicenseType(CommandParser, JSONParser): """ - Parses the output of command ``awx-manage check_license`` + Parses the output of command ``/usr/bin/awx-manage check_license`` Sample output of the command:: @@ -37,3 +39,23 @@ def parse_content(self, content): if len(content) != 1: raise ParseException("Invalid output: {0}".format(content)) self.type = content[0].strip() + + +@parser(Specs.awx_manage_check_license_data) +class AnsibleTowerLicense(CommandParser, JSONParser): + """ + Parses the output of command ``/usr/bin/awx-manage check_license --data`` + + Sample output of the command:: + + {"instance_count": 100, "license_date": 1655092799, "license_type": "enterprise", "support_level": "Standard", "time_remaining": 29885220, "trial": false, "grace_period_remaining": 32477220, "compliant": true, "date_warning": false, "date_expired": false} + + Examples: + >>> type(awx_manage_license) + + >>> awx_manage_license.data['license_type'] == "enterprise" + True + >>> awx_manage_license.data['time_remaining'] + 29885220 + """ + pass diff --git a/insights/parsers/tests/test_awx_manage.py b/insights/parsers/tests/test_awx_manage.py index c01fb1531..01e254107 100644 --- a/insights/parsers/tests/test_awx_manage.py +++ b/insights/parsers/tests/test_awx_manage.py @@ -1,20 +1,19 @@ import doctest import pytest -from insights.parsers import awx_manage, SkipException, ParseException -from insights.core import ContentException -from insights.parsers.awx_manage import AnsibleTowerLicenseType +from insights.parsers import awx_manage, SkipException +from insights.core import ContentException, ParseException +from insights.parsers.awx_manage import AnsibleTowerLicenseType, AnsibleTowerLicense from insights.tests import context_wrap +GOOD_LICENSE = """ +enterprise +""".strip() NO_LICENSE = """ none """.strip() -STD_LICENSE = """ -enterprise -""".strip() - NG_COMMAND_0 = "" NG_COMMAND_1 = """ @@ -27,15 +26,19 @@ load_entry_point('awx==3.6.4', 'console_scripts', 'awx-manage')() """.strip() +AWX_MANAGE_LICENSE = """ +{"contact_email": "test@redhat.com", "company_name": "test Inc", "instance_count": 100, "license_date": 1655092799, "license_type": "enterprise", "subscription_name": "Red Hat Ansible Automation, Standard (100 Managed Nodes)", "sku": "MCT3691", "support_level": "Standard", "product_name": "Red Hat Ansible Automation Platform", "valid_key": true, "satellite": null, "pool_id": "2c92808179803e530179ea5989a157a4", "current_instances": 1, "available_instances": 100, "free_instances": 99, "time_remaining": 29885220, "trial": false, "grace_period_remaining": 32477220, "compliant": true, "date_warning": false, "date_expired": false} +""".strip() + -def test_ansible_tower_license(): +def test_ansible_tower_license_type(): ret = AnsibleTowerLicenseType(context_wrap(NO_LICENSE)) assert ret.type == 'none' - ret = AnsibleTowerLicenseType(context_wrap(STD_LICENSE)) + ret = AnsibleTowerLicenseType(context_wrap(GOOD_LICENSE)) assert ret.type == 'enterprise' -def test_ansible_tower_license_ab(): +def test_ansible_tower_license_ab_type(): with pytest.raises(SkipException): AnsibleTowerLicenseType(context_wrap(NG_COMMAND_0)) @@ -46,9 +49,32 @@ def test_ansible_tower_license_ab(): AnsibleTowerLicenseType(context_wrap(NG_COMMAND_2)) +def test_ansible_tower_license_data(): + ret = AnsibleTowerLicense(context_wrap(AWX_MANAGE_LICENSE)) + assert ret.get("license_type") == 'enterprise' + assert ret.get("instance_count") == 100 + assert ret.get("time_remaining") == 29885220 + assert ret.get("contact_email") == "test@redhat.com" + + +def test_ansible_tower_license__data_ab_type(): + with pytest.raises(ParseException): + AnsibleTowerLicense(context_wrap(NG_COMMAND_0)) + + with pytest.raises(ContentException): + AnsibleTowerLicense(context_wrap(NG_COMMAND_1)) + + with pytest.raises(ParseException): + AnsibleTowerLicense(context_wrap(NG_COMMAND_2)) + + with pytest.raises(ParseException): + AnsibleTowerLicense(context_wrap(NO_LICENSE)) + + def test_awx_manage_doc_examples(): env = { - 'awx_license': AnsibleTowerLicenseType(context_wrap(STD_LICENSE)), + 'awx_license': AnsibleTowerLicenseType(context_wrap(GOOD_LICENSE)), + 'awx_manage_license': AnsibleTowerLicense(context_wrap(AWX_MANAGE_LICENSE)), } failed, total = doctest.testmod(awx_manage, globs=env) assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index c0601c2f7..5f9cb6245 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -18,6 +18,7 @@ class Specs(SpecSet): aws_instance_id_pkcs7 = RegistryPoint() aws_instance_type = RegistryPoint() awx_manage_check_license = RegistryPoint() + awx_manage_check_license_data = RegistryPoint(filterable=True) azure_instance_type = RegistryPoint() azure_instance_plan = RegistryPoint() bios_uuid = RegistryPoint() diff --git a/insights/specs/datasources/awx_manage.py b/insights/specs/datasources/awx_manage.py new file mode 100644 index 000000000..1a2f343b7 --- /dev/null +++ b/insights/specs/datasources/awx_manage.py @@ -0,0 +1,49 @@ +""" +Custom datasources for awx_manage information +""" +from insights.core.context import HostContext +from insights.core.dr import SkipComponent +from insights.core.plugins import datasource +from insights.core.spec_factory import DatasourceProvider, simple_command +from insights.core.filters import get_filters +from insights.specs import Specs +import json +import collections + + +class LocalSpecs(Specs): + """ Local specs used only by awx_manage datasources """ + + awx_manage_check_license_data_raw = simple_command("/usr/bin/awx-manage check_license --data") + """ Returns the output of command ``/usr/bin/awx-manage check_license --data`` """ + + +@datasource(LocalSpecs.awx_manage_check_license_data_raw, HostContext) +def awx_manage_check_license_data_datasource(broker): + """ + This datasource provides the not-sensitive information collected + from ``/usr/bin/awx-manage check_license --data``. + + Typical content of ``/usr/bin/awx-manage check_license --data`` file is:: + + {"contact_email": "test@redhat.com", "company_name": "test Inc", "instance_count": 100, "license_date": 1655092799, "license_type": "enterprise", "subscription_name": "Red Hat Ansible Automation, Standard (100 Managed Nodes)", "sku": "MCT3691", "support_level": "Standard", "product_name": "Red Hat Ansible Automation Platform", "valid_key": true, "satellite": null, "pool_id": "2c92808179803e530179ea5989a157a4", "current_instances": 1, "available_instances": 100, "free_instances": 99, "time_remaining": 29885220, "trial": false, "grace_period_remaining": 32477220, "compliant": true, "date_warning": false, "date_expired": false} + + Returns: + str: JSON string containing non-sensitive information. + + Raises: + SkipComponent: When the filter/path does not exist or any exception occurs. + """ + try: + filters = get_filters(Specs.awx_manage_check_license_data) + content = broker[LocalSpecs.awx_manage_check_license_data_raw].content + if content and filters: + json_data = json.loads(content[0]) + filter_result = {} + for item in filters: + filter_result[item] = json_data.get(item) + if filter_result: + return DatasourceProvider(content=json.dumps(collections.OrderedDict(sorted(filter_result.items()))), relative_path='insights_commands/awx-manage_check_license_--data') + except Exception as e: + raise SkipComponent("Unexpected exception:{e}".format(e=str(e))) + raise SkipComponent diff --git a/insights/specs/default.py b/insights/specs/default.py index 47e81fcb2..a86ac858d 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -37,7 +37,7 @@ from insights.parsers.mount import Mount from insights.specs import Specs from insights.specs.datasources import ( - cloud_init, candlepin_broker, ethernet, get_running_commands, ipcs, package_provides, + awx_manage, cloud_init, candlepin_broker, ethernet, get_running_commands, ipcs, package_provides, ps as ps_datasource, sap, satellite_missed_queues) from insights.specs.datasources.sap import sap_hana_sid, sap_hana_sid_SID_nr @@ -94,6 +94,7 @@ class DefaultSpecs(Specs): aws_instance_id_doc = simple_command("/usr/bin/curl -s http://169.254.169.254/latest/dynamic/instance-identity/document --connect-timeout 5", deps=[IsAWS]) aws_instance_id_pkcs7 = simple_command("/usr/bin/curl -s http://169.254.169.254/latest/dynamic/instance-identity/pkcs7 --connect-timeout 5", deps=[IsAWS]) awx_manage_check_license = simple_command("/usr/bin/awx-manage check_license") + awx_manage_check_license_data = awx_manage.awx_manage_check_license_data_datasource azure_instance_type = simple_command("/usr/bin/curl -s -H Metadata:true http://169.254.169.254/metadata/instance/compute/vmSize?api-version=2018-10-01&format=text --connect-timeout 5", deps=[IsAzure]) azure_instance_plan = simple_command("/usr/bin/curl -s -H Metadata:true http://169.254.169.254/metadata/instance/compute/plan?api-version=2018-10-01&format=json --connect-timeout 5", deps=[IsAzure]) bios_uuid = simple_command("/usr/sbin/dmidecode -s system-uuid") diff --git a/insights/tests/datasources/test_awx_manage.py b/insights/tests/datasources/test_awx_manage.py new file mode 100644 index 000000000..50b3b9d37 --- /dev/null +++ b/insights/tests/datasources/test_awx_manage.py @@ -0,0 +1,69 @@ +import json +import collections +import pytest +from mock.mock import Mock +from insights.core.dr import SkipComponent +from insights.core.spec_factory import DatasourceProvider +from insights.specs.datasources.awx_manage import awx_manage_check_license_data_datasource, LocalSpecs +from insights.specs import Specs +from insights.core import filters + + +AWX_MANAGE_LICENSE = """ +{"contact_email": "test@redhat.com", "company_name": "test Inc", "instance_count": 100, "license_date": 1655092799, "license_type": "enterprise", "subscription_name": "Red Hat Ansible Automation, Standard (100 Managed Nodes)", "sku": "MCT3691", "support_level": "Standard", "product_name": "Red Hat Ansible Automation Platform", "valid_key": true, "satellite": null, "pool_id": "2c92808179803e530179ea5989a157a4", "current_instances": 1, "available_instances": 100, "free_instances": 99, "time_remaining": 29885220, "trial": false, "grace_period_remaining": 32477220, "compliant": true, "date_warning": false, "date_expired": false} +""".strip() + +NG_COMMAND = """ +awx-manage: command not found +""".strip() + +AWX_MANAGE_FILTER_JSON = { + "license_type": "enterprise", + "time_remaining": 29885220, + "instance_count": 100, + "support_level": "Standard" +} + +RELATIVE_PATH = 'insights_commands/awx-manage_check_license_--data' + + +def setup_function(func): + if Specs.awx_manage_check_license_data in filters._CACHE: + del filters._CACHE[Specs.awx_manage_check_license_data] + if Specs.awx_manage_check_license_data in filters.FILTERS: + del filters.FILTERS[Specs.awx_manage_check_license_data] + + if func is test_ansible_tower_license_datasource or func is test_ansible_tower_license_datasource_NG_output: + filters.add_filter(Specs.awx_manage_check_license_data, ["license_type", "support_level", "instance_count", "time_remaining"]) + if func is test_ansible_tower_license_datasource_no_filter: + filters.add_filter(Specs.awx_manage_check_license_data, []) + + +def test_ansible_tower_license_datasource(): + awx_manage_data = Mock() + awx_manage_data.content = AWX_MANAGE_LICENSE.splitlines() + broker = {LocalSpecs.awx_manage_check_license_data_raw: awx_manage_data} + result = awx_manage_check_license_data_datasource(broker) + assert result is not None + assert isinstance(result, DatasourceProvider) + expected = DatasourceProvider(content=json.dumps(collections.OrderedDict(sorted(AWX_MANAGE_FILTER_JSON.items()))), relative_path=RELATIVE_PATH) + assert result.content == expected.content + assert result.relative_path == expected.relative_path + + +def test_ansible_tower_license_datasource_no_filter(): + awx_manage_data = Mock() + awx_manage_data.content = AWX_MANAGE_LICENSE.splitlines() + broker = {LocalSpecs.awx_manage_check_license_data_raw: awx_manage_data} + with pytest.raises(SkipComponent) as e: + awx_manage_check_license_data_datasource(broker) + assert 'SkipComponent' in str(e) + + +def test_ansible_tower_license_datasource_NG_output(): + awx_manage_data = Mock() + awx_manage_data.content = NG_COMMAND.splitlines() + broker = {LocalSpecs.awx_manage_check_license_data_raw: awx_manage_data} + with pytest.raises(SkipComponent) as e: + awx_manage_check_license_data_datasource(broker) + assert 'Unexpected exception' in str(e) From 2e4e6a01dd7d35a2bbdb1971c4b01043213d01e0 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Wed, 21 Jul 2021 16:01:35 +0800 Subject: [PATCH 487/892] Fix "KeyError" bug (#3153) * Fix "KeyError" bug * When one line endswith "}" in script part, the parser will consider it is ending of current section. Signed-off-by: Huanhuan Li * Add more checking in the test Signed-off-by: Huanhuan Li --- insights/parsers/logrotate_conf.py | 2 +- insights/parsers/tests/test_logrotate_conf.py | 33 +++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/insights/parsers/logrotate_conf.py b/insights/parsers/logrotate_conf.py index f97fcd335..a970c324a 100644 --- a/insights/parsers/logrotate_conf.py +++ b/insights/parsers/logrotate_conf.py @@ -116,7 +116,7 @@ def _parse_opts(line): self.options.append(key1) else: # in log_file section - if line.endswith('}'): + if line.endswith('}') and script is None: # end of the section, # save options for each log_file individually for lf in log_files: diff --git a/insights/parsers/tests/test_logrotate_conf.py b/insights/parsers/tests/test_logrotate_conf.py index 8ddd81567..ca6593e93 100644 --- a/insights/parsers/tests/test_logrotate_conf.py +++ b/insights/parsers/tests/test_logrotate_conf.py @@ -97,6 +97,31 @@ """.strip() +LOGROTATE_CONF_4 = """ +/var/log/news/olds.crit { + monthly + rotate 2 + olddir /var/log/news/old + missingok + prerotate + export LANG=C + ACTLOG_RLOG=/var/log/actlog/selfinfo/postrotate + { + E=/var/log/actlog.exports/eventlog.1 + C=/var/log/actlog.exports/cpuload.1 + if [ -e ${C}.gz -a -e $E ] ; then + E_backup=eventlog.1-`date -r $E +%F.%H%M%S` + echo "WARNING: Both ${C}.gz and $E exist ; move eventlog.1 to sysinfo/${E_backup}" + mv -f $E /var/log/actlog/sysinfo/${E_backup} + fi + } >>${ACTLOG_RLOG} 2>&1 + exit 0 + endscript + nocompress +} +""".strip() + + def test_web_xml_doc_examples(): env = { 'log_rt': LogrotateConf(context_wrap(LOGROTATE_MAN_PAGE_DOC, path='/etc/logrotate.conf')), @@ -130,3 +155,11 @@ def test_logrotate_conf_3(): assert log_rt['/var/log/cron']['sharedscripts'] is True assert log_rt['/var/log/messages']['postrotate'] == [ '/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true'] + + +def test_logrotate_conf_4(): + log_rt = LogrotateConf(context_wrap(LOGROTATE_CONF_4, path='/etc/logrotate.d/abc')) + assert '/var/log/news/olds.crit' in log_rt.log_files + assert 'mv -f $E /var/log/actlog/sysinfo/${E_backup}' in log_rt['/var/log/news/olds.crit']['prerotate'] + assert '} >>${ACTLOG_RLOG} 2>&1' in log_rt['/var/log/news/olds.crit']['prerotate'] + assert len(log_rt['/var/log/news/olds.crit']['prerotate']) == 12 From 539f1085e4759cedfdea05da8619c11b95adf804 Mon Sep 17 00:00:00 2001 From: Glutexo Date: Wed, 21 Jul 2021 15:28:01 +0200 Subject: [PATCH 488/892] Do not compare by is (not) with literals (#3139) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Comparing string literals by is yields a SyntaxWarning on newer Pythons. This happens in insights.client.connection that uses is/is not to compare with string and number literals. Not only this issues warnings, but is itself incorrect behavior that may lead to bugs. Hence the warnings. Fixed and tested. Signed-off-by: Štěpán Tomsa --- insights/client/connection.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/insights/client/connection.py b/insights/client/connection.py index 364409fcb..39795f85d 100644 --- a/insights/client/connection.py +++ b/insights/client/connection.py @@ -312,10 +312,10 @@ def _legacy_test_urls(self, url, method): for ext in paths: try: logger.log(NETWORK, "Testing: %s", test_url + ext) - if method is "POST": + if method == "POST": test_req = self.session.post( test_url + ext, timeout=self.config.http_timeout, data=test_flag) - elif method is "GET": + elif method == "GET": test_req = self.session.get(test_url + ext, timeout=self.config.http_timeout) logger.log(NETWORK, "HTTP Status Code: %d", test_req.status_code) logger.log(NETWORK, "HTTP Status Text: %s", test_req.reason) @@ -344,14 +344,14 @@ def _test_urls(self, url, method): return self._legacy_test_urls(url, method) try: logger.log(NETWORK, 'Testing %s', url) - if method is 'POST': + if method == 'POST': test_tar = TemporaryFile(mode='rb', suffix='.tar.gz') test_files = { 'file': ('test.tar.gz', test_tar, 'application/vnd.redhat.advisor.collection+tgz'), 'metadata': '{\"test\": \"test\"}' } test_req = self.session.post(url, timeout=self.config.http_timeout, files=test_files) - elif method is "GET": + elif method == "GET": test_req = self.session.get(url, timeout=self.config.http_timeout) logger.log(NETWORK, "HTTP Status Code: %d", test_req.status_code) logger.log(NETWORK, "HTTP Status Text: %s", test_req.reason) @@ -516,8 +516,8 @@ def get_branch_info(self): logger.debug(u'Branch information: %s', json.dumps(branch_info)) # Determine if we are connected to Satellite 5 - if ((branch_info[u'remote_branch'] is not -1 and - branch_info[u'remote_leaf'] is -1)): + if ((branch_info[u'remote_branch'] != -1 and + branch_info[u'remote_leaf'] == -1)): self.get_satellite5_info(branch_info) # logger.debug(u'Saving branch info to file.') From cbfaf58fab42f179f9dab6d602cc0b3f6b4785e7 Mon Sep 17 00:00:00 2001 From: Rahul Srivastava <44598880+rasrivas-redhat@users.noreply.github.com> Date: Thu, 22 Jul 2021 19:01:07 +0530 Subject: [PATCH 489/892] Added SystemdDnsmasqServiceConf method to the systemd_config parser (#3155) Signed-off-by: rasrivas Co-authored-by: Sachin --- insights/parsers/systemd/config.py | 27 +++++++++++++++++++ insights/parsers/tests/test_systemd_config.py | 20 +++++++++++++- insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 4 files changed, 48 insertions(+), 1 deletion(-) diff --git a/insights/parsers/systemd/config.py b/insights/parsers/systemd/config.py index 177bbd97f..5983785b7 100644 --- a/insights/parsers/systemd/config.py +++ b/insights/parsers/systemd/config.py @@ -16,6 +16,9 @@ SystemdRpcbindSocketConf - unit file ``rpcbind.socket`` ------------------------------------------------------- +SystemdDnsmasqConf - unit file ``dnsmasq.service`` +-------------------------------------------------- + SystemdOpenshiftNode - file ``/usr/lib/systemd/system/atomic-openshift-node.service`` ------------------------------------------------------------------------------------- @@ -208,6 +211,30 @@ class SystemdRpcbindSocketConf(SystemdConf): pass +@parser(Specs.systemctl_cat_dnsmasq_service) +class SystemdDnsmasqServiceConf(SystemdConf): + """ + Class for systemd configuration for dnsmasq.service unit. + + Typical content of the ``dnsmasq.service`` unit file is:: + + [Unit] + Description=DNS caching server. + After=network.target + + [Service] + ExecStart=/usr/sbin/dnsmasq -k + + [Install] + WantedBy=multi-user.target + + Example: + >>> dnsmasq_service["Unit"]["After"] + 'network.target' + """ + pass + + class MultiOrderedDict(dict): """ .. warning:: diff --git a/insights/parsers/tests/test_systemd_config.py b/insights/parsers/tests/test_systemd_config.py index 24bf7b117..80f6ecb23 100644 --- a/insights/parsers/tests/test_systemd_config.py +++ b/insights/parsers/tests/test_systemd_config.py @@ -137,6 +137,18 @@ WantedBy=sockets.target """.strip() +SYSTEMD_DNSMASQ_SERVICE = """ +[Unit] +Description=DNS caching server. +After=network.target + +[Service] +ExecStart=/usr/sbin/dnsmasq -k + +[Install] +WantedBy=multi-user.target +""".strip() + SYSTEMD_SYSTEM_CONF = """ # This file is part of systemd. # @@ -258,6 +270,11 @@ def test_systemd_rpcbind_socket_conf(): assert rpcbind_socket["Socket"]["ListenDatagram"] == ['0.0.0.0:111', '[::]:111'] +def test_systemd_dnsmasq_conf(): + dnsmasq_service_conf = config.SystemdDnsmasqServiceConf(context_wrap(SYSTEMD_DNSMASQ_SERVICE)) + assert dnsmasq_service_conf["Unit"]["After"] == "network.target" + + def test_systemd_empty(): with pytest.raises(SkipException): assert config.SystemdLogindConf(context_wrap('')) is None @@ -270,7 +287,8 @@ def test_doc_examples(): 'system_origin_accounting': config.SystemdOriginAccounting(context_wrap(SYSTEMD_SYSTEM_ORIGIN_ACCOUNTING)), 'openshift_node_service': config.SystemdOpenshiftNode(context_wrap(SYSTEMD_OPENSHIFT_NODE)), 'logind_conf': config.SystemdLogindConf(context_wrap(SYSTEMD_LOGIND_CONF)), - 'rpcbind_socket': config.SystemdRpcbindSocketConf(context_wrap(SYSTEMD_RPCBIND_SOCKET)) + 'rpcbind_socket': config.SystemdRpcbindSocketConf(context_wrap(SYSTEMD_RPCBIND_SOCKET)), + 'dnsmasq_service': config.SystemdDnsmasqServiceConf(context_wrap(SYSTEMD_DNSMASQ_SERVICE)) } failed, total = doctest.testmod(config, globs=env) assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 5f9cb6245..f60971508 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -633,6 +633,7 @@ class Specs(SpecSet): sysctl_conf_initramfs = RegistryPoint(multi_output=True) sysctl_conf = RegistryPoint() sysctl = RegistryPoint() + systemctl_cat_dnsmasq_service = RegistryPoint() systemctl_cat_rpcbind_socket = RegistryPoint() systemctl_cinder_volume = RegistryPoint() systemctl_httpd = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index a86ac858d..a4fb30c08 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -723,6 +723,7 @@ def is_mod_loaded_for_ss(broker): sysconfig_virt_who = simple_file("/etc/sysconfig/virt-who") sysctl = simple_command("/sbin/sysctl -a") sysctl_conf = simple_file("/etc/sysctl.conf") + systemctl_cat_dnsmasq_service = simple_command("/bin/systemctl cat dnsmasq.service") systemctl_cat_rpcbind_socket = simple_command("/bin/systemctl cat rpcbind.socket") systemctl_cinder_volume = simple_command("/bin/systemctl show openstack-cinder-volume") systemctl_httpd = simple_command("/bin/systemctl show httpd") From 46d54f29bb6ab913064eedc8c0c8a7cb4288ad00 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 22 Jul 2021 21:44:03 +0800 Subject: [PATCH 490/892] Add new option '--show-rules' to insights-run (#3154) * Add new option '--show-rules' to insights-run Signed-off-by: Xiangce Liu * tiny update Signed-off-by: Xiangce Liu * Fix the test errors Signed-off-by: Xiangce Liu * tiny update to markdown Signed-off-by: Xiangce Liu --- insights/formats/__init__.py | 46 ++++++++++++++++++++++++++++++----- insights/formats/_json.py | 15 ++++++++++-- insights/formats/_markdown.py | 41 ++++++++++++++++++------------- insights/formats/_yaml.py | 15 ++++++++++-- insights/formats/text.py | 42 +++++++++++++++++++------------- 5 files changed, 115 insertions(+), 44 deletions(-) diff --git a/insights/formats/__init__.py b/insights/formats/__init__.py index 78acdb0ee..38a80490f 100644 --- a/insights/formats/__init__.py +++ b/insights/formats/__init__.py @@ -53,6 +53,7 @@ def postprocess(self, broker): Called after all components have been run. Useful for interrogating the broker for final state. """ + pass class Formatter(object): @@ -84,19 +85,33 @@ class EvaluatorFormatterAdapter(FormatterAdapter): @staticmethod def configure(p): - """ Override to add arguments to the ArgumentParser. """ - p.add_argument("-F", "--fail-only", help="Show FAIL results only. Conflict with '-m' or '-f', will be dropped when using them together", action="store_true") + p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true") + p.add_argument("-S", "--show-rules", nargs="+", + choices=["fail", "info", "pass", "metadata", "fingerprint"], + metavar="TYPE", + help="Show results per rule type(s).") + p.add_argument("-F", "--fail-only", + help="Show FAIL results only. Conflict with '-m', will be dropped when using them together. This option is deprecated by '-S fail'", + action="store_true") def __init__(self, args=None): if args: hn = "insights.combiners.hostname, insights.parsers.branch_info" args.plugins = ",".join([args.plugins, hn]) if args.plugins else hn - if args.fail_only: - print('Options conflict: -f and -F, drops -F', file=sys.stderr) - args.fail_only = False + self.missing = args.missing + fail_only = args.fail_only + if args.missing and fail_only: + # Drops the '-F' silently when specifying '-m' and '-F' together + # --> Do NOT break the Format of the output + fail_only = None + self.show_rules = [] # Empty by default, means show ALL types + if not args.show_rules and fail_only: + self.show_rules = ['rule'] + elif args.show_rules: + self.show_rules = [opt.replace('fail', 'rule') for opt in args.show_rules] def preprocess(self, broker): - self.formatter = self.Impl(broker) + self.formatter = self.Impl(broker, self.missing, self.show_rules) self.formatter.preprocess() def postprocess(self, broker): @@ -155,3 +170,22 @@ def render(comp, val): _type = dr.get_component_type(comp) func = RENDERERS.get(_type) return func(comp, val) if func else str(val) + + +def get_response_of_types(response, missing=True, show_rules=None): + if not missing and 'skips' in response: + response.pop('skips') + if not show_rules: + return response + if 'metadata' not in show_rules and 'metadata' in response.get('system', {}): + response['system'].pop('metadata') + if 'rule' not in show_rules and 'reports' in response: + response.pop('reports') + if 'info' not in show_rules and 'info' in response: + response.pop('info') + if 'pass' not in show_rules and 'pass' in response: + response.pop('pass') + if 'fingerprint' not in show_rules and 'fingerprints' in response: + response.pop('fingerprints') + + return response diff --git a/insights/formats/_json.py b/insights/formats/_json.py index bdc8193bd..e3dfde4c4 100644 --- a/insights/formats/_json.py +++ b/insights/formats/_json.py @@ -1,12 +1,23 @@ import json +import sys from insights.core.evaluators import SingleEvaluator as Evaluator -from insights.formats import EvaluatorFormatterAdapter +from insights.formats import EvaluatorFormatterAdapter, get_response_of_types class JsonFormat(Evaluator): + def __init__(self, + broker=None, + missing=False, + show_rules=None, + stream=sys.stdout): + super(JsonFormat, self).__init__(broker, stream=stream) + self.missing = missing + self.show_rules = [] if show_rules is None else show_rules + def postprocess(self): - json.dump(self.get_response(), self.stream) + response = get_response_of_types(self.get_response(), self.missing, self.show_rules) + json.dump(response, self.stream) class JsonFormatterAdapter(EvaluatorFormatterAdapter): diff --git a/insights/formats/_markdown.py b/insights/formats/_markdown.py index bc08c2cca..d61349d6e 100644 --- a/insights/formats/_markdown.py +++ b/insights/formats/_markdown.py @@ -40,15 +40,13 @@ def __init__(self, missing=False, tracebacks=False, dropped=False, - fail_only=False, + show_rules=None, stream=sys.stdout): - super(MarkdownFormat, self).__init__(broker, stream) - self.broker = broker + super(MarkdownFormat, self).__init__(broker, stream=stream) self.missing = missing self.tracebacks = tracebacks self.dropped = dropped - self.fail_only = fail_only - self.stream = stream + self.show_rules = [] if show_rules is None else show_rules self.counts = {'skip': 0, 'pass': 0, 'rule': 0, 'info': 0, 'metadata': 0, 'metadata_key': 0, 'fingerprint': 0, 'exception': 0} self.responses = { @@ -150,8 +148,7 @@ def printit(c, v): if _type: if self.missing and _type == 'skip': print_missing(c, v) - elif ((self.fail_only and _type == 'rule') or - (not self.fail_only and _type != 'skip')): + elif self.show_rules == [] or _type in self.show_rules: printit(c, v) print(file=self.stream) @@ -182,24 +179,34 @@ class MarkdownFormatAdapter(FormatterAdapter): @staticmethod def configure(p): - p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true") p.add_argument("-t", "--tracebacks", help="Show stack traces.", action="store_true") p.add_argument("-d", "--dropped", help="Show collected files that weren't processed.", action="store_true") - p.add_argument("-F", "--fail-only", help="Show FAIL results only. Conflict with '-m' or '-f', will be dropped when using them together", action="store_true") - - def __init__(self, args): + p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true") + p.add_argument("-S", "--show-rules", nargs="+", + choices=["fail", "info", "pass", "metadata", "fingerprint"], + metavar="TYPE", + help="Show results per rule type(s).") + p.add_argument("-F", "--fail-only", + help="Show FAIL results only. Conflict with '-m', will be dropped when using them together. This option is deprecated by '-S fail'", + action="store_true") + + def __init__(self, args=None): self.missing = args.missing + fail_only = args.fail_only + if args.missing and fail_only: + print('Options conflict: -m and -F, drops -F', file=sys.stderr) + fail_only = None + self.show_rules = [] # Empty by default, means show ALL types + if not args.show_rules and fail_only: + self.show_rules = ['rule'] + elif args.show_rules: + self.show_rules = [opt.replace('fail', 'rule') for opt in args.show_rules] self.tracebacks = args.tracebacks self.dropped = args.dropped - self.fail_only = args.fail_only - self.formatter = None - if self.missing and self.fail_only: - print('Options conflict: -m and -F, drops -F', file=sys.stderr) - self.fail_only = False def preprocess(self, broker): self.formatter = MarkdownFormat(broker, - self.missing, self.tracebacks, self.dropped, self.fail_only) + self.missing, self.tracebacks, self.dropped, self.show_rules) self.formatter.preprocess() def postprocess(self, broker): diff --git a/insights/formats/_yaml.py b/insights/formats/_yaml.py index 37772353b..ae1d3c816 100644 --- a/insights/formats/_yaml.py +++ b/insights/formats/_yaml.py @@ -1,7 +1,8 @@ import yaml +import sys from insights.core.evaluators import SingleEvaluator -from insights.formats import EvaluatorFormatterAdapter +from insights.formats import EvaluatorFormatterAdapter, get_response_of_types from yaml.representer import Representer from insights.core import ScanMeta @@ -9,8 +10,18 @@ class YamlFormat(SingleEvaluator): + def __init__(self, + broker=None, + missing=False, + show_rules=None, + stream=sys.stdout): + super(YamlFormat, self).__init__(broker, stream=stream) + self.missing = missing + self.show_rules = [] if show_rules is None else show_rules + def postprocess(self): - yaml.dump(self.get_response(), self.stream) + response = get_response_of_types(self.get_response(), self.missing, self.show_rules) + yaml.dump(response, self.stream) class YamlFormatterAdapter(EvaluatorFormatterAdapter): diff --git a/insights/formats/text.py b/insights/formats/text.py index 7d54a35f6..b3e8bf9a7 100644 --- a/insights/formats/text.py +++ b/insights/formats/text.py @@ -89,14 +89,13 @@ def __init__(self, broker, missing=False, tracebacks=False, dropped=False, - fail_only=False, + show_rules=None, stream=sys.stdout): - self.broker = broker + super(HumanReadableFormat, self).__init__(broker, stream=stream) self.missing = missing self.tracebacks = tracebacks self.dropped = dropped - self.fail_only = fail_only - self.stream = stream + self.show_rules = [] if show_rules is None else show_rules def print_header(self, header, color): ln = len(header) @@ -186,9 +185,8 @@ def printit(c, v): _type = v.get('type') if _type in self.responses: self.counts[_type] += 1 - if (_type and ((self.fail_only and _type == 'rule') or - ((self.missing and _type == 'skip') or - (not self.fail_only and _type != 'skip')))): + if (self.missing and _type == 'skip' or + (self.show_rules == [] or _type in self.show_rules)): printit(c, v) print(file=self.stream) @@ -213,24 +211,34 @@ class HumanReadableFormatAdapter(FormatterAdapter): @staticmethod def configure(p): - p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true") p.add_argument("-t", "--tracebacks", help="Show stack traces.", action="store_true") p.add_argument("-d", "--dropped", help="Show collected files that weren't processed.", action="store_true") - p.add_argument("-F", "--fail-only", help="Show FAIL results only. Conflict with '-m' or '-f', will be dropped when using them together", action="store_true") - - def __init__(self, args): + p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true") + p.add_argument("-S", "--show-rules", default=[], nargs="+", + choices=["fail", "info", "pass", "metadata", "fingerprint"], + metavar="TYPE", + help="Show results per rule type(s).") + p.add_argument("-F", "--fail-only", + help="Show FAIL results only. Conflict with '-m', will be dropped when using them together. This option is deprecated by '-S fail'", + action="store_true") + + def __init__(self, args=None): self.missing = args.missing + fail_only = args.fail_only + if args.missing and fail_only: + print(Fore.YELLOW + 'Options conflict: -m and -F, drops -F', file=sys.stderr) + fail_only = None + self.show_rules = [] # Empty by default, means show ALL types + if not args.show_rules and fail_only: + self.show_rules = ['rule'] + elif args.show_rules: + self.show_rules = [opt.replace('fail', 'rule') for opt in args.show_rules] self.tracebacks = args.tracebacks self.dropped = args.dropped - self.fail_only = args.fail_only - self.formatter = None - if self.missing and self.fail_only: - print(Fore.YELLOW + 'Options conflict: -m and -F, drops -F', file=sys.stderr) - self.fail_only = False def preprocess(self, broker): self.formatter = HumanReadableFormat(broker, - self.missing, self.tracebacks, self.dropped, self.fail_only) + self.missing, self.tracebacks, self.dropped, self.show_rules) self.formatter.preprocess() def postprocess(self, broker): From 6dab91154f0e63c9e90ab0d24e2398242a0c0ea5 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 22 Jul 2021 21:50:47 +0800 Subject: [PATCH 491/892] Combiner LsPci should not modify Parser LsPci* (#3148) * Combiner LsPci should not modify Parser LsPci* Signed-off-by: Xiangce Liu * Add comments to the fixed lines Signed-off-by: Xiangce Liu * Add more comments to the tests Signed-off-by: Xiangce Liu --- insights/combiners/lspci.py | 7 ++++++- insights/combiners/tests/test_lspci.py | 13 +++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/insights/combiners/lspci.py b/insights/combiners/lspci.py index cf56a97ab..0566092cd 100644 --- a/insights/combiners/lspci.py +++ b/insights/combiners/lspci.py @@ -115,8 +115,11 @@ class LsPci(list): def __init__(self, lspci_k, lspci_vmmkn): if lspci_vmmkn: for dev in lspci_vmmkn: + # use the local copy to prevent from writing back to the parser + dev = dev.copy() if lspci_k and dev['Slot'] in lspci_k: - dev_k = lspci_k.data[dev['Slot']] + # use the local copy to prevent from writing back to the parser + dev_k = lspci_k.data[dev['Slot']].copy() dev_k.pop('Kernel driver in use') if 'Kernel driver in use' in dev_k else None dev_k.pop('Kernel modules') if 'Kernel modules' in dev_k else None dev.update(dev_k) @@ -124,6 +127,8 @@ def __init__(self, lspci_k, lspci_vmmkn): self._pci_dev_list = lspci_vmmkn.pci_dev_list else: for dev in lspci_k.data.values(): + # use the local copy to prevent from writing back to the parser + dev = dev.copy() dev.update(Driver=dev.pop('Kernel driver in use')) if 'Kernel driver in use' in dev else None dev.update(Module=[i.strip() for i in dev.pop('Kernel modules').split(',')]) if 'Kernel modules' in dev else None self.append(dev) diff --git a/insights/combiners/tests/test_lspci.py b/insights/combiners/tests/test_lspci.py index 70fd897cb..9bf9396f4 100644 --- a/insights/combiners/tests/test_lspci.py +++ b/insights/combiners/tests/test_lspci.py @@ -108,6 +108,9 @@ def test_lspci_k(): } ] assert lspci.search(Slot__startwith='00:1b.0') == [] + # Make sure the original parser is untouched + assert lspci_k.pci_dev_details('00:00.0').get('Kernel driver in use') == 'hsw_uncore' + assert lspci_k.pci_dev_details('00:1b.0').get('Kernel driver in use') == 'snd_hda_intel' def test_lspci_vmmkn(): @@ -122,6 +125,9 @@ def test_lspci_vmmkn(): } ] assert lspci.search(Dev_Details__contains='I218') == [] + # Make sure the original parser is untouched + assert sorted(lspci_vmmkn[0].keys()) == sorted(['Slot', 'Class', 'Vendor', + 'Device', 'SVendor', 'SDevice', 'Rev', 'Driver']) def test_lspci_both(): @@ -148,6 +154,13 @@ def test_lspci_both(): 'Dev_Details': 'Audio device: Intel Corporation 8 Series HD Audio Controller (rev 04)' } ] + # Make sure the original parsers are untouched + assert lspci_k.pci_dev_details('00:00.0').get('Kernel driver in use') == 'hsw_uncore' + assert lspci_k.pci_dev_details('00:1b.0').get('Kernel driver in use') == 'snd_hda_intel' + assert sorted(lspci_vmmkn[0].keys()) == sorted(['Slot', 'Class', 'Vendor', + 'Device', 'SVendor', 'SDevice', 'Rev', 'Driver']) + assert sorted(lspci_vmmkn[-1].keys()) == sorted(['Class', 'Device', + 'Driver', 'Module', 'Rev', 'SDevice', 'SVendor', 'Slot', 'Vendor']) def test_doc_examples(): From 39e4c712308f0d56e833e61cd6884dba03d3e06c Mon Sep 17 00:00:00 2001 From: Glutexo Date: Thu, 22 Jul 2021 16:26:14 +0200 Subject: [PATCH 492/892] Fix a circular import in Candlepin broker test (#3160) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit One does not simply import from insights.specs.datasources.candlepin_broker, because it itself imports from insights.specs.default, which imports back from insights.specs.datasources.candlepin_broker. Signed-off-by: Štěpán Tomsa --- .../tests/datasources/test_candlepin_broker.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/insights/tests/datasources/test_candlepin_broker.py b/insights/tests/datasources/test_candlepin_broker.py index b34190740..49d3ceda3 100644 --- a/insights/tests/datasources/test_candlepin_broker.py +++ b/insights/tests/datasources/test_candlepin_broker.py @@ -3,7 +3,7 @@ from insights.core.spec_factory import DatasourceProvider from insights.core.dr import SkipComponent -from insights.specs.datasources.candlepin_broker import candlepin_broker, LocalSpecs +from insights.specs.default import candlepin_broker CANDLEPIN_BROKER = """ @@ -144,8 +144,8 @@ def test_candlepin_broker(): candlepin_broker_file = Mock() candlepin_broker_file.content = CANDLEPIN_BROKER.splitlines() - broker = {LocalSpecs.candlepin_broker_input: candlepin_broker_file} - result = candlepin_broker(broker) + broker = {candlepin_broker.LocalSpecs.candlepin_broker_input: candlepin_broker_file} + result = candlepin_broker.candlepin_broker(broker) assert result is not None assert isinstance(result, DatasourceProvider) expected = DatasourceProvider(content=CANDLEPIN_BROKER_XML.splitlines(), relative_path=RELATIVE_PATH) @@ -156,17 +156,17 @@ def test_candlepin_broker(): def test_candlepin_broker_bad(): candlepin_broker_file = Mock() candlepin_broker_file.content = CANDLEPIN_BROKER_BAD.splitlines() - broker = {LocalSpecs.candlepin_broker_input: candlepin_broker_file} + broker = {candlepin_broker.LocalSpecs.candlepin_broker_input: candlepin_broker_file} with pytest.raises(SkipComponent) as e: - candlepin_broker(broker) + candlepin_broker.candlepin_broker(broker) assert 'Unexpected exception' in str(e) def test_candlpin_broker_no_sensitive_info(): candlepin_broker_file = Mock() candlepin_broker_file.content = CANDLEPIN_BROKER_NO_SENSITIVE_INFO.splitlines() - broker = {LocalSpecs.candlepin_broker_input: candlepin_broker_file} - result = candlepin_broker(broker) + broker = {candlepin_broker.LocalSpecs.candlepin_broker_input: candlepin_broker_file} + result = candlepin_broker.candlepin_broker(broker) assert result is not None assert isinstance(result, DatasourceProvider) expected = DatasourceProvider(content=CANDLE_BROKER_NO_SENTISVE_INFO.splitlines(), relative_path=RELATIVE_PATH) From d261ba406ca8138367e218f7f40c67ddcf0ed535 Mon Sep 17 00:00:00 2001 From: Stephen Adams Date: Thu, 22 Jul 2021 12:03:46 -0400 Subject: [PATCH 493/892] Update uploader.json map file Signed-off-by: Stephen Adams --- insights/client/uploader_json_map.json | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index ef709ace6..d5b68fec8 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -1534,6 +1534,11 @@ "pattern": [], "symbolic_name": "sysctl" }, + { + "command": "/bin/systemctl cat dnsmasq.service", + "pattern": [], + "symbolic_name": "systemctl_cat_dnsmasq_service" + }, { "command": "/bin/systemctl cat rpcbind.socket", "pattern": [], @@ -2302,6 +2307,11 @@ "pattern": [], "symbolic_name": "haproxy_cfg" }, + { + "file": "/etc/opt/rh/rh-haproxy18/haproxy/haproxy.cfg", + "pattern": [], + "symbolic_name": "haproxy_cfg_scl" + }, { "file": "/etc/heat/heat.conf", "pattern": [], @@ -2343,7 +2353,6 @@ "pattern": [ "(28)No space left on device: ", "AH00485: scoreboard is full, not at MaxRequestWorkers", - "ModSecurity: collections_remove_stale: Failed deleting collection", "The mpm module (prefork.c) is not supported by mod_http2", "[crit] Memory allocation failed, aborting process", "and would exceed the ServerLimit value of ", @@ -4377,5 +4386,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': |@' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-07-08T13:34:47.107154" + "version": "2021-07-15T11:42:35.950625" } \ No newline at end of file From 23759fd8cd386af9be6e92635217e9993e6caad2 Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Fri, 23 Jul 2021 09:24:10 -0500 Subject: [PATCH 494/892] Revert "Add new option '--show-rules' to insights-run (#3154)" (#3162) This reverts commit 46d54f29bb6ab913064eedc8c0c8a7cb4288ad00. --- insights/formats/__init__.py | 46 +++++------------------------------ insights/formats/_json.py | 15 ++---------- insights/formats/_markdown.py | 41 +++++++++++++------------------ insights/formats/_yaml.py | 15 ++---------- insights/formats/text.py | 42 +++++++++++++------------------- 5 files changed, 44 insertions(+), 115 deletions(-) diff --git a/insights/formats/__init__.py b/insights/formats/__init__.py index 38a80490f..78acdb0ee 100644 --- a/insights/formats/__init__.py +++ b/insights/formats/__init__.py @@ -53,7 +53,6 @@ def postprocess(self, broker): Called after all components have been run. Useful for interrogating the broker for final state. """ - pass class Formatter(object): @@ -85,33 +84,19 @@ class EvaluatorFormatterAdapter(FormatterAdapter): @staticmethod def configure(p): - p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true") - p.add_argument("-S", "--show-rules", nargs="+", - choices=["fail", "info", "pass", "metadata", "fingerprint"], - metavar="TYPE", - help="Show results per rule type(s).") - p.add_argument("-F", "--fail-only", - help="Show FAIL results only. Conflict with '-m', will be dropped when using them together. This option is deprecated by '-S fail'", - action="store_true") + """ Override to add arguments to the ArgumentParser. """ + p.add_argument("-F", "--fail-only", help="Show FAIL results only. Conflict with '-m' or '-f', will be dropped when using them together", action="store_true") def __init__(self, args=None): if args: hn = "insights.combiners.hostname, insights.parsers.branch_info" args.plugins = ",".join([args.plugins, hn]) if args.plugins else hn - self.missing = args.missing - fail_only = args.fail_only - if args.missing and fail_only: - # Drops the '-F' silently when specifying '-m' and '-F' together - # --> Do NOT break the Format of the output - fail_only = None - self.show_rules = [] # Empty by default, means show ALL types - if not args.show_rules and fail_only: - self.show_rules = ['rule'] - elif args.show_rules: - self.show_rules = [opt.replace('fail', 'rule') for opt in args.show_rules] + if args.fail_only: + print('Options conflict: -f and -F, drops -F', file=sys.stderr) + args.fail_only = False def preprocess(self, broker): - self.formatter = self.Impl(broker, self.missing, self.show_rules) + self.formatter = self.Impl(broker) self.formatter.preprocess() def postprocess(self, broker): @@ -170,22 +155,3 @@ def render(comp, val): _type = dr.get_component_type(comp) func = RENDERERS.get(_type) return func(comp, val) if func else str(val) - - -def get_response_of_types(response, missing=True, show_rules=None): - if not missing and 'skips' in response: - response.pop('skips') - if not show_rules: - return response - if 'metadata' not in show_rules and 'metadata' in response.get('system', {}): - response['system'].pop('metadata') - if 'rule' not in show_rules and 'reports' in response: - response.pop('reports') - if 'info' not in show_rules and 'info' in response: - response.pop('info') - if 'pass' not in show_rules and 'pass' in response: - response.pop('pass') - if 'fingerprint' not in show_rules and 'fingerprints' in response: - response.pop('fingerprints') - - return response diff --git a/insights/formats/_json.py b/insights/formats/_json.py index e3dfde4c4..bdc8193bd 100644 --- a/insights/formats/_json.py +++ b/insights/formats/_json.py @@ -1,23 +1,12 @@ import json -import sys from insights.core.evaluators import SingleEvaluator as Evaluator -from insights.formats import EvaluatorFormatterAdapter, get_response_of_types +from insights.formats import EvaluatorFormatterAdapter class JsonFormat(Evaluator): - def __init__(self, - broker=None, - missing=False, - show_rules=None, - stream=sys.stdout): - super(JsonFormat, self).__init__(broker, stream=stream) - self.missing = missing - self.show_rules = [] if show_rules is None else show_rules - def postprocess(self): - response = get_response_of_types(self.get_response(), self.missing, self.show_rules) - json.dump(response, self.stream) + json.dump(self.get_response(), self.stream) class JsonFormatterAdapter(EvaluatorFormatterAdapter): diff --git a/insights/formats/_markdown.py b/insights/formats/_markdown.py index d61349d6e..bc08c2cca 100644 --- a/insights/formats/_markdown.py +++ b/insights/formats/_markdown.py @@ -40,13 +40,15 @@ def __init__(self, missing=False, tracebacks=False, dropped=False, - show_rules=None, + fail_only=False, stream=sys.stdout): - super(MarkdownFormat, self).__init__(broker, stream=stream) + super(MarkdownFormat, self).__init__(broker, stream) + self.broker = broker self.missing = missing self.tracebacks = tracebacks self.dropped = dropped - self.show_rules = [] if show_rules is None else show_rules + self.fail_only = fail_only + self.stream = stream self.counts = {'skip': 0, 'pass': 0, 'rule': 0, 'info': 0, 'metadata': 0, 'metadata_key': 0, 'fingerprint': 0, 'exception': 0} self.responses = { @@ -148,7 +150,8 @@ def printit(c, v): if _type: if self.missing and _type == 'skip': print_missing(c, v) - elif self.show_rules == [] or _type in self.show_rules: + elif ((self.fail_only and _type == 'rule') or + (not self.fail_only and _type != 'skip')): printit(c, v) print(file=self.stream) @@ -179,34 +182,24 @@ class MarkdownFormatAdapter(FormatterAdapter): @staticmethod def configure(p): + p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true") p.add_argument("-t", "--tracebacks", help="Show stack traces.", action="store_true") p.add_argument("-d", "--dropped", help="Show collected files that weren't processed.", action="store_true") - p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true") - p.add_argument("-S", "--show-rules", nargs="+", - choices=["fail", "info", "pass", "metadata", "fingerprint"], - metavar="TYPE", - help="Show results per rule type(s).") - p.add_argument("-F", "--fail-only", - help="Show FAIL results only. Conflict with '-m', will be dropped when using them together. This option is deprecated by '-S fail'", - action="store_true") - - def __init__(self, args=None): + p.add_argument("-F", "--fail-only", help="Show FAIL results only. Conflict with '-m' or '-f', will be dropped when using them together", action="store_true") + + def __init__(self, args): self.missing = args.missing - fail_only = args.fail_only - if args.missing and fail_only: - print('Options conflict: -m and -F, drops -F', file=sys.stderr) - fail_only = None - self.show_rules = [] # Empty by default, means show ALL types - if not args.show_rules and fail_only: - self.show_rules = ['rule'] - elif args.show_rules: - self.show_rules = [opt.replace('fail', 'rule') for opt in args.show_rules] self.tracebacks = args.tracebacks self.dropped = args.dropped + self.fail_only = args.fail_only + self.formatter = None + if self.missing and self.fail_only: + print('Options conflict: -m and -F, drops -F', file=sys.stderr) + self.fail_only = False def preprocess(self, broker): self.formatter = MarkdownFormat(broker, - self.missing, self.tracebacks, self.dropped, self.show_rules) + self.missing, self.tracebacks, self.dropped, self.fail_only) self.formatter.preprocess() def postprocess(self, broker): diff --git a/insights/formats/_yaml.py b/insights/formats/_yaml.py index ae1d3c816..37772353b 100644 --- a/insights/formats/_yaml.py +++ b/insights/formats/_yaml.py @@ -1,8 +1,7 @@ import yaml -import sys from insights.core.evaluators import SingleEvaluator -from insights.formats import EvaluatorFormatterAdapter, get_response_of_types +from insights.formats import EvaluatorFormatterAdapter from yaml.representer import Representer from insights.core import ScanMeta @@ -10,18 +9,8 @@ class YamlFormat(SingleEvaluator): - def __init__(self, - broker=None, - missing=False, - show_rules=None, - stream=sys.stdout): - super(YamlFormat, self).__init__(broker, stream=stream) - self.missing = missing - self.show_rules = [] if show_rules is None else show_rules - def postprocess(self): - response = get_response_of_types(self.get_response(), self.missing, self.show_rules) - yaml.dump(response, self.stream) + yaml.dump(self.get_response(), self.stream) class YamlFormatterAdapter(EvaluatorFormatterAdapter): diff --git a/insights/formats/text.py b/insights/formats/text.py index b3e8bf9a7..7d54a35f6 100644 --- a/insights/formats/text.py +++ b/insights/formats/text.py @@ -89,13 +89,14 @@ def __init__(self, broker, missing=False, tracebacks=False, dropped=False, - show_rules=None, + fail_only=False, stream=sys.stdout): - super(HumanReadableFormat, self).__init__(broker, stream=stream) + self.broker = broker self.missing = missing self.tracebacks = tracebacks self.dropped = dropped - self.show_rules = [] if show_rules is None else show_rules + self.fail_only = fail_only + self.stream = stream def print_header(self, header, color): ln = len(header) @@ -185,8 +186,9 @@ def printit(c, v): _type = v.get('type') if _type in self.responses: self.counts[_type] += 1 - if (self.missing and _type == 'skip' or - (self.show_rules == [] or _type in self.show_rules)): + if (_type and ((self.fail_only and _type == 'rule') or + ((self.missing and _type == 'skip') or + (not self.fail_only and _type != 'skip')))): printit(c, v) print(file=self.stream) @@ -211,34 +213,24 @@ class HumanReadableFormatAdapter(FormatterAdapter): @staticmethod def configure(p): + p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true") p.add_argument("-t", "--tracebacks", help="Show stack traces.", action="store_true") p.add_argument("-d", "--dropped", help="Show collected files that weren't processed.", action="store_true") - p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true") - p.add_argument("-S", "--show-rules", default=[], nargs="+", - choices=["fail", "info", "pass", "metadata", "fingerprint"], - metavar="TYPE", - help="Show results per rule type(s).") - p.add_argument("-F", "--fail-only", - help="Show FAIL results only. Conflict with '-m', will be dropped when using them together. This option is deprecated by '-S fail'", - action="store_true") - - def __init__(self, args=None): + p.add_argument("-F", "--fail-only", help="Show FAIL results only. Conflict with '-m' or '-f', will be dropped when using them together", action="store_true") + + def __init__(self, args): self.missing = args.missing - fail_only = args.fail_only - if args.missing and fail_only: - print(Fore.YELLOW + 'Options conflict: -m and -F, drops -F', file=sys.stderr) - fail_only = None - self.show_rules = [] # Empty by default, means show ALL types - if not args.show_rules and fail_only: - self.show_rules = ['rule'] - elif args.show_rules: - self.show_rules = [opt.replace('fail', 'rule') for opt in args.show_rules] self.tracebacks = args.tracebacks self.dropped = args.dropped + self.fail_only = args.fail_only + self.formatter = None + if self.missing and self.fail_only: + print(Fore.YELLOW + 'Options conflict: -m and -F, drops -F', file=sys.stderr) + self.fail_only = False def preprocess(self, broker): self.formatter = HumanReadableFormat(broker, - self.missing, self.tracebacks, self.dropped, self.show_rules) + self.missing, self.tracebacks, self.dropped, self.fail_only) self.formatter.preprocess() def postprocess(self, broker): From bdaaeaedfc43cff20dea081e2d992e8dfa4b86b8 Mon Sep 17 00:00:00 2001 From: Rahul Srivastava <44598880+rasrivas-redhat@users.noreply.github.com> Date: Wed, 28 Jul 2021 01:42:52 +0530 Subject: [PATCH 495/892] added systemctl_cat_dnsmasq_service spec to isnights_archive (#3166) Signed-off-by: Rahul --- insights/parsers/systemd/config.py | 4 ++-- insights/specs/insights_archive.py | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/insights/parsers/systemd/config.py b/insights/parsers/systemd/config.py index 5983785b7..ebba7c133 100644 --- a/insights/parsers/systemd/config.py +++ b/insights/parsers/systemd/config.py @@ -16,8 +16,8 @@ SystemdRpcbindSocketConf - unit file ``rpcbind.socket`` ------------------------------------------------------- -SystemdDnsmasqConf - unit file ``dnsmasq.service`` --------------------------------------------------- +SystemdDnsmasqServiceConf - unit file ``dnsmasq.service`` +--------------------------------------------------------- SystemdOpenshiftNode - file ``/usr/lib/systemd/system/atomic-openshift-node.service`` ------------------------------------------------------------------------------------- diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 0dcf43c42..e6e2ade89 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -227,6 +227,7 @@ class InsightsArchiveSpecs(Specs): subscription_manager_id = simple_file("insights_commands/subscription-manager_identity") subscription_manager_installed_product_ids = simple_file("insights_commands/find_.etc.pki.product-default._.etc.pki.product._-name_pem_-exec_rct_cat-cert_--no-content") sysctl = simple_file("insights_commands/sysctl_-a") + systemctl_cat_dnsmasq_service = simple_file("insights_commands/systemctl_cat_dnsmasq.service") systemctl_cat_rpcbind_socket = simple_file("insights_commands/systemctl_cat_rpcbind.socket") systemctl_cinder_volume = simple_file("insights_commands/systemctl_show_openstack-cinder-volume") systemctl_httpd = simple_file("insights_commands/systemctl_show_httpd") From 19bd419164b6d61e4ef621d5cf2a5790f1f569a3 Mon Sep 17 00:00:00 2001 From: Ryan Blakley <3789184+ryan-blakley@users.noreply.github.com> Date: Tue, 27 Jul 2021 16:50:02 -0400 Subject: [PATCH 496/892] Fix json parsing of empty content. (#3157) * Add a check for no content in the JSONParser class instead of checking in each individual parser for no content. * Add a test for empty output to each parser's test that uses the JSONParser class. * Fixes #3163 Signed-off-by: Ryan Blakley --- insights/core/__init__.py | 14 ++++--- insights/parsers/engine_db_query.py | 4 -- insights/parsers/tests/__init__.py | 11 +++++ insights/parsers/tests/test_awx_manage.py | 6 +-- .../tests/test_ceph_cmd_json_parsing.py | 40 ++++++++++++++----- insights/parsers/tests/test_cloud_cfg.py | 6 +++ .../tests/test_cni_podman_bridge_conf.py | 7 +++- .../parsers/tests/test_engine_db_query.py | 8 ++-- .../tests/test_freeipa_healthcheck_log.py | 6 +++ insights/parsers/tests/test_httpd_open_nfs.py | 8 +++- insights/parsers/tests/test_ndctl_list.py | 8 +++- .../parsers/tests/test_rhsm_releasever.py | 11 +++-- .../tests/test_rhv_log_collector_analyzer.py | 4 ++ insights/parsers/tests/test_tags.py | 5 +++ .../tests/test_teamdctl_config_dump.py | 10 ++++- .../parsers/tests/test_teamdctl_state_dump.py | 5 +++ insights/parsers/tests/test_version_info.py | 6 +++ .../parsers/tests/test_virt_uuid_facts.py | 5 +++ 18 files changed, 131 insertions(+), 33 deletions(-) diff --git a/insights/core/__init__.py b/insights/core/__init__.py index 61efe9219..001eb5a18 100644 --- a/insights/core/__init__.py +++ b/insights/core/__init__.py @@ -750,11 +750,15 @@ def parse_content(self, content): try: self.data = json.loads(''.join(content)) except: - tb = sys.exc_info()[2] - cls = self.__class__ - name = ".".join([cls.__module__, cls.__name__]) - msg = "%s couldn't parse json." % name - six.reraise(ParseException, ParseException(msg), tb) + # If content is empty then raise a skip exception instead of a parse exception. + if not content: + raise SkipException("Empty output.") + else: + tb = sys.exc_info()[2] + cls = self.__class__ + name = ".".join([cls.__module__, cls.__name__]) + msg = "%s couldn't parse json." % name + six.reraise(ParseException, ParseException(msg), tb) class ScanMeta(type): diff --git a/insights/parsers/engine_db_query.py b/insights/parsers/engine_db_query.py index ff9ba6fe4..7298c2ebd 100644 --- a/insights/parsers/engine_db_query.py +++ b/insights/parsers/engine_db_query.py @@ -5,7 +5,6 @@ Parses the output of the command `engine-db-query` returned in JSON format. """ from insights.core import CommandParser, JSONParser -from insights.parsers import SkipException from insights.core.plugins import parser from insights.specs import Specs @@ -46,9 +45,6 @@ class EngineDBQueryVDSMversion(CommandParser, JSONParser): True """ def parse_content(self, content): - if not content: - raise SkipException("Empty output.") - super(EngineDBQueryVDSMversion, self).parse_content(content) @property diff --git a/insights/parsers/tests/__init__.py b/insights/parsers/tests/__init__.py index 8d933a6d6..3d0f416b8 100644 --- a/insights/parsers/tests/__init__.py +++ b/insights/parsers/tests/__init__.py @@ -1,6 +1,7 @@ import doctest from doctest import (DebugRunner, DocTestFinder, DocTestRunner, OutputChecker) +import pytest import re import sys @@ -35,3 +36,13 @@ def ic_testmod(m, name=None, globs=None, verbose=None, runner.summarize() return doctest.TestResults(runner.failures, runner.tries) + + +@pytest.fixture() +def test_empty_skip(parser_obj): + from insights.parsers import SkipException + from insights.tests import context_wrap + + with pytest.raises(SkipException) as ex: + parser_obj(context_wrap("")) + return str(ex) diff --git a/insights/parsers/tests/test_awx_manage.py b/insights/parsers/tests/test_awx_manage.py index 01e254107..7aa162b5b 100644 --- a/insights/parsers/tests/test_awx_manage.py +++ b/insights/parsers/tests/test_awx_manage.py @@ -1,9 +1,10 @@ import doctest import pytest -from insights.parsers import awx_manage, SkipException from insights.core import ContentException, ParseException +from insights.parsers import awx_manage, SkipException from insights.parsers.awx_manage import AnsibleTowerLicenseType, AnsibleTowerLicense +from insights.parsers.tests import test_empty_skip from insights.tests import context_wrap GOOD_LICENSE = """ @@ -58,8 +59,7 @@ def test_ansible_tower_license_data(): def test_ansible_tower_license__data_ab_type(): - with pytest.raises(ParseException): - AnsibleTowerLicense(context_wrap(NG_COMMAND_0)) + assert 'Empty output.' in test_empty_skip(AnsibleTowerLicense) with pytest.raises(ContentException): AnsibleTowerLicense(context_wrap(NG_COMMAND_1)) diff --git a/insights/parsers/tests/test_ceph_cmd_json_parsing.py b/insights/parsers/tests/test_ceph_cmd_json_parsing.py index 1ee1fdd8d..f22105c51 100644 --- a/insights/parsers/tests/test_ceph_cmd_json_parsing.py +++ b/insights/parsers/tests/test_ceph_cmd_json_parsing.py @@ -1,8 +1,10 @@ import doctest import pytest -from insights.parsers import ceph_cmd_json_parsing, ParseException, SkipException + +from insights.parsers import ceph_cmd_json_parsing, ParseException from insights.parsers.ceph_cmd_json_parsing import CephOsdDump, CephOsdDf, CephS, CephECProfileGet, CephCfgInfo, \ CephHealthDetail, CephDfDetail, CephOsdTree, CephReport +from insights.parsers.tests import test_empty_skip from insights.tests import context_wrap CEPH_OSD_DUMP_INFO = """ @@ -163,7 +165,7 @@ { "name": "osd.1", "cluster": "ceph", - "debug_none": "0\/5", + "debug_none": "0/5", "heartbeat_interval": "5", "heartbeat_file": "", "heartbeat_inject_failure": "0", @@ -493,8 +495,6 @@ } """.strip() -CEPH_REPORT_EMPTY = """""".strip() - def test_ceph_doc_examples(): env = { @@ -534,6 +534,9 @@ def test_ceph_osd_dump(self): } assert result['pools'][0]['min_size'] == 2 + def test_ceph_osd_dump_empty(self): + assert 'Empty output.' in test_empty_skip(CephOsdDump) + class TestCephOsdDf(): def test_ceph_osd_df(self): @@ -570,6 +573,9 @@ def test_ceph_osd_df(self): } assert result['nodes'][0]['pgs'] == 945 + def test_ceph_os_df_empty(self): + assert 'Empty output.' in test_empty_skip(CephOsdDf) + class TestCephS(): def test_ceph_s(self): @@ -600,6 +606,9 @@ def test_ceph_s(self): } assert result['pgmap']['pgs_by_state'][0]['state_name'] == 'active+clean' + def test_ceph_s_empty(self): + assert 'Empty output.' in test_empty_skip(CephS) + class TestCephECProfileGet(): def test_ceph_ec_profile_get(self): @@ -614,9 +623,12 @@ def test_ceph_ec_profile_get(self): assert result['k'] == "2" assert result['m'] == "1" + def test_ceph_ec_profile_get_empty(self): + assert 'Empty output.' in test_empty_skip(CephECProfileGet) + class TestCephCfgInfo(): - def test_cephcfginfo(self): + def test_ceph_cfg_info(self): result = CephCfgInfo(context_wrap(CEPHINFO)) assert result.data == { @@ -637,6 +649,9 @@ def test_cephcfginfo(self): assert result.max_open_files == '131072' + def test_ceph_cfg_info_empty(self): + assert 'Empty output.' in test_empty_skip(CephCfgInfo) + class TestCephHealthDetail(): def test_ceph_health_detail(self): @@ -656,6 +671,9 @@ def test_ceph_health_detail(self): } assert result['overall_status'] == 'HEALTH_OK' + def test_ceph_health_detail_empty(self): + assert 'Empty output.' in test_empty_skip(CephHealthDetail) + class TestCephDfDetail(): def test_ceph_df_detail(self): @@ -705,6 +723,9 @@ def test_ceph_df_detail(self): } assert result['stats']['total_avail_bytes'] == 16910123008 + def test_ceph_df_detail_empty(self): + assert 'Empty output.' in test_empty_skip(CephDfDetail) + class TestCephOsdTree(): def test_ceph_osd_tree(self): @@ -836,6 +857,9 @@ def test_ceph_osd_tree(self): assert len(result['nodes'][0]['children']) == 4 + def test_ceph_osd_tree_empty(self): + assert 'Empty output.' in test_empty_skip(CephOsdTree) + class TestCephReport(): def test_ceph_report(self): @@ -852,7 +876,5 @@ def test_invalid_json(self): CephReport(context_wrap(CEPH_REPORT_INVALID_JSON)) assert "Could not parse json." in str(e) - def test_invalid_empty(self): - with pytest.raises(SkipException) as e: - CephReport(context_wrap(CEPH_REPORT_EMPTY)) - assert "Empty output." in str(e) + def test_ceph_report_empty(self): + assert 'Empty output.' in test_empty_skip(CephReport) diff --git a/insights/parsers/tests/test_cloud_cfg.py b/insights/parsers/tests/test_cloud_cfg.py index 14ac56657..b683636fa 100644 --- a/insights/parsers/tests/test_cloud_cfg.py +++ b/insights/parsers/tests/test_cloud_cfg.py @@ -1,5 +1,7 @@ import doctest + from insights.parsers import cloud_cfg +from insights.parsers.tests import test_empty_skip from insights.tests import context_wrap @@ -20,6 +22,10 @@ def test_cloud_cfg(): assert result.data['config'][0]['name'] == 'eth0' +def test_cloud_cfg_empty(): + assert 'Empty output.' in test_empty_skip(cloud_cfg.CloudCfg) + + def test_doc_examples(): env = { 'cloud_cfg': cloud_cfg.CloudCfg(context_wrap(CONFIG_2)), diff --git a/insights/parsers/tests/test_cni_podman_bridge_conf.py b/insights/parsers/tests/test_cni_podman_bridge_conf.py index fa56ca90a..5d5756a79 100644 --- a/insights/parsers/tests/test_cni_podman_bridge_conf.py +++ b/insights/parsers/tests/test_cni_podman_bridge_conf.py @@ -1,8 +1,9 @@ import doctest -from insights.tests import context_wrap from insights.parsers import cni_podman_bridge_conf from insights.parsers.cni_podman_bridge_conf import CNIPodmanBridgeConf +from insights.parsers.tests import test_empty_skip +from insights.tests import context_wrap PODMAN_CNI_FILE = ''' { @@ -61,3 +62,7 @@ def test_cni_podman_bridge_conf(): conf = CNIPodmanBridgeConf(context_wrap(PODMAN_CNI_FILE)) assert len(conf["plugins"]) == 4 assert conf["plugins"][3]["type"] == "tuning" + + +def test_cni_podman_bridge_conf_empty(): + assert 'Empty output.' in test_empty_skip(CNIPodmanBridgeConf) diff --git a/insights/parsers/tests/test_engine_db_query.py b/insights/parsers/tests/test_engine_db_query.py index 031d13381..a3082bc26 100644 --- a/insights/parsers/tests/test_engine_db_query.py +++ b/insights/parsers/tests/test_engine_db_query.py @@ -1,6 +1,8 @@ import doctest import pytest -from insights.parsers import engine_db_query, ParseException, SkipException + +from insights.parsers import engine_db_query, ParseException +from insights.parsers.tests import test_empty_skip from insights.tests import context_wrap @@ -91,9 +93,7 @@ def test_edbq(): assert output.result == [{'vds_name': 'hosto', 'rpm_version': 'vdsm-4.40.20-33.git1b7dedcf3.fc30'}, {'vds_name': 'hosto2', 'rpm_version': 'vdsm-4.40.13-38.gite9bae3c68.fc30'}] # No content - with pytest.raises(SkipException) as e: - engine_db_query.EngineDBQueryVDSMversion(context_wrap("")) - assert "Empty output." in str(e) + assert 'Empty output.' in test_empty_skip(engine_db_query.EngineDBQueryVDSMversion) # Error with pytest.raises(ParseException) as e: diff --git a/insights/parsers/tests/test_freeipa_healthcheck_log.py b/insights/parsers/tests/test_freeipa_healthcheck_log.py index e0ae282e7..5db80b26b 100644 --- a/insights/parsers/tests/test_freeipa_healthcheck_log.py +++ b/insights/parsers/tests/test_freeipa_healthcheck_log.py @@ -1,6 +1,8 @@ import doctest + from insights.parsers import freeipa_healthcheck_log from insights.parsers.freeipa_healthcheck_log import FreeIPAHealthCheckLog +from insights.parsers.tests import test_empty_skip from insights.tests import context_wrap LONG_FREEIPA_HEALTHCHECK_LOG_OK = """ @@ -96,6 +98,10 @@ def test_freeipa_healthcheck_get_results_not_ok(): assert result['source'] == 'ipahealthcheck.system.filesystemspace' +def test_freeipa_healthcheck_log_empty(): + assert 'Empty output.' in test_empty_skip(FreeIPAHealthCheckLog) + + def test_freeipa_healthcheck_log__documentation(): env = { 'healthcheck': FreeIPAHealthCheckLog(context_wrap(FREEIPA_HEALTHCHECK_LOG_DOCS_EXAMPLE)), diff --git a/insights/parsers/tests/test_httpd_open_nfs.py b/insights/parsers/tests/test_httpd_open_nfs.py index 4935278af..9f5441a7e 100644 --- a/insights/parsers/tests/test_httpd_open_nfs.py +++ b/insights/parsers/tests/test_httpd_open_nfs.py @@ -1,7 +1,9 @@ +import doctest + from insights.parsers import httpd_open_nfs from insights.parsers.httpd_open_nfs import HttpdOnNFSFilesCount +from insights.parsers.tests import test_empty_skip from insights.tests import context_wrap -import doctest http_nfs = """ {"http_ids": [1787, 2399], "nfs_mounts": ["/data", "/www"], "open_nfs_files": 1000} @@ -16,6 +18,10 @@ def test_http_nfs(): assert httpd_nfs_counting.data.get("open_nfs_files") == 1000 +def test_empty(): + assert 'Empty output.' in test_empty_skip(HttpdOnNFSFilesCount) + + def test_http_nfs_documentation(): env = { 'httpon_nfs': HttpdOnNFSFilesCount(context_wrap(http_nfs)) diff --git a/insights/parsers/tests/test_ndctl_list.py b/insights/parsers/tests/test_ndctl_list.py index 671909803..2c62ec881 100644 --- a/insights/parsers/tests/test_ndctl_list.py +++ b/insights/parsers/tests/test_ndctl_list.py @@ -1,6 +1,8 @@ import doctest + from insights.parsers import ndctl_list from insights.parsers.ndctl_list import NdctlListNi +from insights.parsers.tests import test_empty_skip from insights.tests import context_wrap NDCTL_OUTPUT = """ @@ -35,7 +37,7 @@ """.strip() -def test_netstat_doc_examples(): +def test_ndctl_list_doc_examples(): env = { 'ndctl_list': NdctlListNi(context_wrap(NDCTL_OUTPUT)) } @@ -49,3 +51,7 @@ def test_get_dev_attr(): assert 'map' in ndctl.get_blockdev('pmem1') assert ndctl.get_blockdev('pmem1').get('map') == 'mem' assert ndctl.get_blockdev('pmem2') == {} + + +def test_empty(): + assert 'Empty output.' in test_empty_skip(NdctlListNi) diff --git a/insights/parsers/tests/test_rhsm_releasever.py b/insights/parsers/tests/test_rhsm_releasever.py index fb967ff9c..f9728394c 100644 --- a/insights/parsers/tests/test_rhsm_releasever.py +++ b/insights/parsers/tests/test_rhsm_releasever.py @@ -1,8 +1,9 @@ import doctest import pytest -import insights.parsers.rhsm_releasever as rhsm_releasever_module -from insights.parsers import SkipException + +from insights.parsers import rhsm_releasever as rhsm_releasever_module, SkipException from insights.parsers.rhsm_releasever import RhsmReleaseVer +from insights.parsers.tests import test_empty_skip from insights.tests import context_wrap RHEL_MAJ_MIN = '{"releaseVer": "6.10"}' @@ -45,10 +46,14 @@ def test_rhsm_releasever(): assert relver.minor is None with pytest.raises(SkipException) as e_info: - relver = RhsmReleaseVer(context_wrap(RHEL_EMPTY)) + RhsmReleaseVer(context_wrap(RHEL_EMPTY)) assert "releaseVer is not in data" in str(e_info.value) +def test_empty(): + assert 'Empty output.' in test_empty_skip(RhsmReleaseVer) + + def test_doc_examples(): env = { 'rhsm_releasever': RhsmReleaseVer(context_wrap(RHEL_MAJ_MIN)), diff --git a/insights/parsers/tests/test_rhv_log_collector_analyzer.py b/insights/parsers/tests/test_rhv_log_collector_analyzer.py index a72ab5dc6..3fdada667 100644 --- a/insights/parsers/tests/test_rhv_log_collector_analyzer.py +++ b/insights/parsers/tests/test_rhv_log_collector_analyzer.py @@ -1,4 +1,5 @@ from insights.parsers.rhv_log_collector_analyzer import RhvLogCollectorJson +from insights.parsers.tests import test_empty_skip from insights.tests import context_wrap RHV_ANALYZER_JSON = """ @@ -136,3 +137,6 @@ def test_rhv_log_collector_json(self): ] } assert result['rhv-log-collector-analyzer'][0]['file'] == 'cluster_query_migration_policy_check_legacy.sql' + + def test_empty(self): + assert 'Empty output.' in test_empty_skip(RhvLogCollectorJson) diff --git a/insights/parsers/tests/test_tags.py b/insights/parsers/tests/test_tags.py index 059ae0e12..efc9b530a 100644 --- a/insights/parsers/tests/test_tags.py +++ b/insights/parsers/tests/test_tags.py @@ -1,4 +1,5 @@ from insights.parsers.tags import Tags +from insights.parsers.tests import test_empty_skip from insights.tests import context_wrap tags_json_content = """ @@ -14,3 +15,7 @@ def test_tags_json(): assert result.data['owner'] == "test" assert result.data['exclude'] == "true" assert result.data['group'] == "app-db-01" + + +def test_tags_empty(): + assert 'Empty output.' in test_empty_skip(Tags) diff --git a/insights/parsers/tests/test_teamdctl_config_dump.py b/insights/parsers/tests/test_teamdctl_config_dump.py index 168bde3e5..20b096755 100644 --- a/insights/parsers/tests/test_teamdctl_config_dump.py +++ b/insights/parsers/tests/test_teamdctl_config_dump.py @@ -1,7 +1,9 @@ -from insights.parsers.teamdctl_config_dump import TeamdctlConfigDump +import doctest + from insights.parsers import teamdctl_config_dump +from insights.parsers.teamdctl_config_dump import TeamdctlConfigDump +from insights.parsers.tests import test_empty_skip from insights.tests import context_wrap -import doctest TEAMDCTL_CONFIG_DUMP_INFO = """ { @@ -38,6 +40,10 @@ def test_teamdctl_state_dump(): assert result.runner_hwaddr_policy == 'only_active' +def test_teamdctl_state_dump_empty(): + assert 'Empty output.' in test_empty_skip(TeamdctlConfigDump) + + def test_nmcli_doc_examples(): env = { 'teamdctl_config_dump': TeamdctlConfigDump(context_wrap(TEAMDCTL_CONFIG_DUMP_INFO)), diff --git a/insights/parsers/tests/test_teamdctl_state_dump.py b/insights/parsers/tests/test_teamdctl_state_dump.py index 44d161c40..c4849bab2 100644 --- a/insights/parsers/tests/test_teamdctl_state_dump.py +++ b/insights/parsers/tests/test_teamdctl_state_dump.py @@ -1,4 +1,5 @@ from insights.parsers.teamdctl_state_dump import TeamdctlStateDump +from insights.parsers.tests import test_empty_skip from insights.tests import context_wrap TEAMDCTL_STATE_DUMP_INFO = """ @@ -110,3 +111,7 @@ def test_teamdctl_state_dump_none(): assert result['setup']['runner_name'] == 'activebackup' assert result.runner_type == 'activebackup' assert result.team_ifname is None + + +def test_teamdctl_state_dump_empty(): + assert 'Empty output.' in test_empty_skip(TeamdctlStateDump) diff --git a/insights/parsers/tests/test_version_info.py b/insights/parsers/tests/test_version_info.py index 8c0234d48..288a4a9f6 100644 --- a/insights/parsers/tests/test_version_info.py +++ b/insights/parsers/tests/test_version_info.py @@ -1,5 +1,7 @@ import doctest + from insights.parsers import version_info +from insights.parsers.tests import test_empty_skip from insights.tests import context_wrap @@ -22,6 +24,10 @@ def test_version_info(): assert ret.client_version == '3.1.1' +def test_version_info_empty(): + assert 'Empty output.' in test_empty_skip(version_info.VersionInfo) + + def test_doc_examples(): env = { 'ver': version_info.VersionInfo(context_wrap(VER_INFO_2)), diff --git a/insights/parsers/tests/test_virt_uuid_facts.py b/insights/parsers/tests/test_virt_uuid_facts.py index d097651fe..1624d46c0 100644 --- a/insights/parsers/tests/test_virt_uuid_facts.py +++ b/insights/parsers/tests/test_virt_uuid_facts.py @@ -1,6 +1,7 @@ import doctest from insights.parsers import virt_uuid_facts +from insights.parsers.tests import test_empty_skip from insights.parsers.virt_uuid_facts import VirtUuidFacts from insights.tests import context_wrap @@ -20,6 +21,10 @@ def test_virt_uuid_facts(): assert result.data['virt.uuid'] == '4546B285-6C41-5D6R-86G5-0BFR4B3625FS' +def test_virt_uuid_facts_empty(): + assert 'Empty output.' in test_empty_skip(VirtUuidFacts) + + def test_virt_uuid_facts_doc_examples(): env = { 'VirtUuidFacts': VirtUuidFacts, From 62e0e1e3b21389bc8c3abbe91d6d4298c5c15f12 Mon Sep 17 00:00:00 2001 From: Rahul Srivastava <44598880+rasrivas-redhat@users.noreply.github.com> Date: Wed, 28 Jul 2021 23:47:00 +0530 Subject: [PATCH 497/892] Added dnsmasq_config spec (#3164) Signed-off-by: Rahul --- insights/specs/default.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/specs/default.py b/insights/specs/default.py index a4fb30c08..8544f50cf 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -223,6 +223,7 @@ def corosync_cmapctl_cmd_list(broker): dmsetup_status = simple_command("/usr/sbin/dmsetup status") dnf_conf = simple_file("/etc/dnf/dnf.conf") dnf_modules = glob_file("/etc/dnf/modules.d/*.module") + dnsmasq_config = glob_file(["/etc/dnsmasq.conf", "/etc/dnsmasq.d/*.conf"]) docker_info = simple_command("/usr/bin/docker info") docker_list_containers = simple_command("/usr/bin/docker ps --all --no-trunc") docker_list_images = simple_command("/usr/bin/docker images --all --no-trunc --digests") From c77f21a4ffe4855bed40b3251295d3c8eff48f83 Mon Sep 17 00:00:00 2001 From: Link Dupont Date: Thu, 29 Jul 2021 10:52:37 -0400 Subject: [PATCH 498/892] update uploader_json_map.json Signed-off-by: Link Dupont --- insights/client/uploader_json_map.json | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index d5b68fec8..c62306d94 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -2175,6 +2175,11 @@ "pattern": [], "symbolic_name": "dnf_modules" }, + { + "file": "/etc/dnsmasq.conf", + "pattern": [], + "symbolic_name": "dnsmasq_config" + }, { "file": "/etc/sysconfig/docker-storage-setup", "pattern": [], @@ -4171,6 +4176,11 @@ "symbolic_name": "cpu_vulns", "pattern": [] }, + { + "glob": "/etc/dnsmasq.d/*.conf", + "pattern": [], + "symbolic_name": "dnsmasq_config" + }, { "glob": "/sys/class/net/*/address", "symbolic_name": "mac_addresses", @@ -4386,5 +4396,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': |@' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-07-15T11:42:35.950625" + "version": "2021-07-22T13:39:35.175760" } \ No newline at end of file From a84fe17cb07d83d0f6831df292eee22c93a2e820 Mon Sep 17 00:00:00 2001 From: Rohan Arora Date: Mon, 2 Aug 2021 10:33:48 +0530 Subject: [PATCH 499/892] Add spec paths for Lsof in sosreport (#3173) Signed-off-by: Rohan Arora --- insights/specs/sos_archive.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index 41421df15..9bec8b236 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -128,7 +128,11 @@ class SosSpecs(Specs): ls_sys_firmware = simple_file("sos_commands/boot/ls_-lanR_.sys.firmware") lscpu = simple_file("sos_commands/processor/lscpu") lsinitrd = simple_file("sos_commands/boot/lsinitrd") - lsof = simple_file("sos_commands/process/lsof_-b_M_-n_-l") + lsof = first_file([ + "sos_commands/process/lsof_M_-n_-l_-c", + "sos_commands/process/lsof_-b_M_-n_-l_-c", + "sos_commands/process/lsof_-b_M_-n_-l" + ]) lsmod = simple_file("sos_commands/kernel/lsmod") lspci = first_of([ simple_file("sos_commands/pci/lspci_-nnvv"), From 82835ed4fe67dff9423528c7b024cdefb30b0fe6 Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Tue, 3 Aug 2021 08:15:46 -0500 Subject: [PATCH 500/892] Add make none ci fix (#3156) * Add make_none response for rules * Changes how rules that return none are handled. Instead of raising a SkipComponent and ignoring the rule, they will now be counted in the results. * Add a make_none response type * Update formatters to handle the new type * Update tests * Fix #3026 Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Remove none rules from detailed output Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Update to add none option to text format Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Fix legacy CI tests that check for None * This change allows the legacy tests that check for None result from a rule to work with make_none() without requiring changes to the rules * The simplified integration testing fixture includes a new flag that allows turning switching to the new functionality on a per test basis Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> * Fix text format link rendering Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- conftest.py | 10 ++- docs/manpages/insights-run.rst | 3 + insights/__init__.py | 2 +- insights/core/plugins.py | 16 ++++- insights/formats/text.py | 41 ++++++++--- insights/plugins/returns_none.py | 25 +++++++ insights/tests/__init__.py | 20 +++++- .../tests/test_plugins/test_returns_none.py | 72 +++++++++++++++++++ insights/tests/test_rules_fixture.py | 7 +- 9 files changed, 174 insertions(+), 22 deletions(-) create mode 100644 insights/plugins/returns_none.py create mode 100644 insights/tests/test_plugins/test_returns_none.py diff --git a/conftest.py b/conftest.py index c59a44df0..0fbe32038 100644 --- a/conftest.py +++ b/conftest.py @@ -28,7 +28,7 @@ def run_rule(): internal support rules that will not be used in the customer facing Insights product. """ - def _run_rule(rule, input_data): + def _run_rule(rule, input_data, return_make_none=False): """ Fixture for rule integration testing @@ -47,11 +47,15 @@ def test_myrule(run_rule): rule (object): Your rule function object. data (InputData): InputData obj containing all of the necessary data for the test. + return_make_none (bool): Set to true if you are testing for ``make_none()`` + results in your CI tests instead of ``None``. """ - result = run_test(rule, input_data) + result = run_test(rule, input_data, return_make_none=return_make_none) # Check result for skip to be compatible with archive_provider decorator # Return None instead of result indicating missing component(s) - if result is not None and 'type' in result and result['type'] == 'skip': + if (result is not None and 'type' in result and + (result['type'] == 'skip' or + (result['type'] == 'none' and not return_make_none))): return None else: return result diff --git a/docs/manpages/insights-run.rst b/docs/manpages/insights-run.rst index 0ad07a79c..5f1645412 100644 --- a/docs/manpages/insights-run.rst +++ b/docs/manpages/insights-run.rst @@ -75,6 +75,9 @@ OPTIONS -m --missing Show missing requirements. + -n --none + Show rules returning ``None``. + -p PLUGINS --plugins PLUGINS Comma-separated list without spaces of package(s) or module(s) containing plugins. diff --git a/insights/__init__.py b/insights/__init__.py index cbb37e94a..4198f13fb 100644 --- a/insights/__init__.py +++ b/insights/__init__.py @@ -38,7 +38,7 @@ from .core.plugins import combiner, fact, metadata, parser, rule # noqa: F401 from .core.plugins import datasource, condition, incident # noqa: F401 from .core.plugins import make_response, make_metadata, make_fingerprint # noqa: F401 -from .core.plugins import make_pass, make_fail, make_info # noqa: F401 +from .core.plugins import make_pass, make_fail, make_info, make_none # noqa: F401 from .core.filters import add_filter, apply_filters, get_filters # noqa: F401 from .formats import get_formatter from .parsers import get_active_lines # noqa: F401 diff --git a/insights/core/plugins.py b/insights/core/plugins.py index ef99f6313..92815a894 100644 --- a/insights/core/plugins.py +++ b/insights/core/plugins.py @@ -302,7 +302,7 @@ def process(self, broker): return _make_skip(dr.get_name(self.component), missing) r = self.invoke(broker) if r is None: - raise dr.SkipComponent() + return make_none() if not isinstance(r, Response): raise Exception("rules must return Response objects.") return r @@ -660,3 +660,17 @@ def __init__(self, rule_fqdn, missing): rule_fqdn=rule_fqdn, reason="MISSING_REQUIREMENTS", details=details) + + +class make_none(Response): + """ + Used to create a response for a rule that returns None + + This is not intended to be used by plugins, only infrastructure + but it not private so that we can easily add it to reporting. + """ + response_type = "none" + key_name = "none_key" + + def __init__(self): + super(make_none, self).__init__(key="NONE_KEY") diff --git a/insights/formats/text.py b/insights/formats/text.py index 7d54a35f6..7c0a1b9c3 100644 --- a/insights/formats/text.py +++ b/insights/formats/text.py @@ -90,9 +90,11 @@ def __init__(self, broker, tracebacks=False, dropped=False, fail_only=False, + none=False, stream=sys.stdout): self.broker = broker self.missing = missing + self.none = none self.tracebacks = tracebacks self.dropped = dropped self.fail_only = fail_only @@ -115,7 +117,8 @@ def preprocess(self): title="Fingerprint : "), 'metadata': response(color=Fore.YELLOW, label="META", intl='M', title="Metadata : "), 'metadata_key': response(color=Fore.MAGENTA, label="META", intl='K', title="Metadata Key: "), - 'exception': response(color=Fore.RED, label="EXCEPT", intl='E', title="Exceptions : ") + 'exception': response(color=Fore.RED, label="EXCEPT", intl='E', title="Exceptions : "), + 'none': response(color=Fore.BLUE, label="RETURNED NONE", intl='N', title="Ret'd None : ") } self.counts = {} @@ -177,19 +180,27 @@ def printit(c, v): name = "%s%s%s" % (resp.color, name, Style.RESET_ALL) print(name, file=self.stream) print(underline, file=self.stream) - print(render_links(c), file=self.stream) - print(render(c, v), file=self.stream) + if v.get('type') != 'none': + print(render_links(c), file=self.stream) + print(render(c, v), file=self.stream) print(file=self.stream) for c in sorted(self.broker.get_by_type(rule), key=dr.get_name): v = self.broker[c] _type = v.get('type') + if _type is None: + continue + if _type in self.responses: self.counts[_type] += 1 - if (_type and ((self.fail_only and _type == 'rule') or - ((self.missing and _type == 'skip') or - (not self.fail_only and _type != 'skip')))): + + if ((self.fail_only and _type == 'rule') or + (self.missing and _type == 'skip') or + (self.none and _type == 'none')): printit(c, v) + elif not self.fail_only and _type not in ['skip', 'none']: + printit(c, v) + print(file=self.stream) self.print_header("Rule Execution Summary", Fore.CYAN) @@ -214,23 +225,31 @@ class HumanReadableFormatAdapter(FormatterAdapter): @staticmethod def configure(p): p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true") + p.add_argument("-n", "--none", help="Show rules returning None", action="store_true") p.add_argument("-t", "--tracebacks", help="Show stack traces.", action="store_true") p.add_argument("-d", "--dropped", help="Show collected files that weren't processed.", action="store_true") - p.add_argument("-F", "--fail-only", help="Show FAIL results only. Conflict with '-m' or '-f', will be dropped when using them together", action="store_true") + p.add_argument("-F", "--fail-only", help="Show FAIL results only. Conflict with '-m' and '-n' or '-f', will be dropped when using them together", action="store_true") def __init__(self, args): self.missing = args.missing + self.none = args.none self.tracebacks = args.tracebacks self.dropped = args.dropped self.fail_only = args.fail_only self.formatter = None - if self.missing and self.fail_only: - print(Fore.YELLOW + 'Options conflict: -m and -F, drops -F', file=sys.stderr) + if (self.missing or self.none) and self.fail_only: + print(Fore.YELLOW + 'Options conflict: -m/-n and -F, drops -F', file=sys.stderr) self.fail_only = False def preprocess(self, broker): - self.formatter = HumanReadableFormat(broker, - self.missing, self.tracebacks, self.dropped, self.fail_only) + self.formatter = HumanReadableFormat( + broker, + self.missing, + self.tracebacks, + self.dropped, + self.fail_only, + self.none + ) self.formatter.preprocess() def postprocess(self, broker): diff --git a/insights/plugins/returns_none.py b/insights/plugins/returns_none.py new file mode 100644 index 000000000..360d68001 --- /dev/null +++ b/insights/plugins/returns_none.py @@ -0,0 +1,25 @@ +from insights.core.dr import SkipComponent +from insights import rule +from insights.parsers.redhat_release import RedhatRelease +from insights.core.plugins import make_none + + +@rule(RedhatRelease) +def report_none(u): + return + + +@rule(RedhatRelease) +def report_make_none(u): + return make_none() + + +@rule(RedhatRelease) +def report_skip_exception(u): + raise SkipComponent() + + +if __name__ == "__main__": + from insights import run + + broker = run([report_none, report_make_none], print_summary=True) diff --git a/insights/tests/__init__.py b/insights/tests/__init__.py index ec4a106c6..871b3a813 100644 --- a/insights/tests/__init__.py +++ b/insights/tests/__init__.py @@ -19,6 +19,7 @@ from insights import apply_filters from insights.core import dr, filters, spec_factory from insights.core.context import Context +from insights.core.plugins import make_none from insights.core.spec_factory import RegistryPoint from insights.specs import Specs @@ -82,6 +83,8 @@ def _get_registry_points(component): DEFAULT_RELEASE = "Red Hat Enterprise Linux Server release 7.2 (Maipo)" DEFAULT_HOSTNAME = "hostname.example.com" +MAKE_NONE_RESULT = make_none() + def deep_compare(result, expected): """ @@ -89,6 +92,12 @@ def deep_compare(result, expected): """ logger.debug("--Comparing-- (%s) %s to (%s) %s", type(result), result, type(expected), expected) + # This case ensures that when rules return a make_none() response, all of the older + # CI tests that are looking for None instead of make_none() will still pass + if result is None or (isinstance(result, dict) and result.get("type") == "none"): + assert (expected is None or expected == MAKE_NONE_RESULT), result + return + if isinstance(result, dict) and expected is None: assert result["type"] == "skip", result return @@ -108,7 +117,7 @@ def run_input_data(component, input_data): return broker -def run_test(component, input_data, expected=None): +def run_test(component, input_data, expected=None, return_make_none=False): if filters.ENABLED: mod = component.__module__ sup_mod = '.'.join(mod.split('.')[:-1]) @@ -121,9 +130,14 @@ def run_test(component, input_data, expected=None): raise Exception(msg % (mod, ", ".join(names))) broker = run_input_data(component, input_data) + result = broker.get(component) if expected: - deep_compare(broker.get(component), expected) - return broker.get(component) + deep_compare(result, expected) + elif result == MAKE_NONE_RESULT and not return_make_none: + # Convert make_none() result to None as default unless + # make_none explicitly requested + return None + return result def integrate(input_data, component): diff --git a/insights/tests/test_plugins/test_returns_none.py b/insights/tests/test_plugins/test_returns_none.py new file mode 100644 index 000000000..56f1778a0 --- /dev/null +++ b/insights/tests/test_plugins/test_returns_none.py @@ -0,0 +1,72 @@ +from insights.specs import Specs +from insights.tests import InputData, archive_provider, RHEL7, MAKE_NONE_RESULT +from insights.plugins import returns_none + + +@archive_provider(returns_none.report_make_none) +def integration_tests_1(): + input_data = InputData("test_return_make_none_1") + input_data.add(Specs.redhat_release, RHEL7) + yield input_data, None + + input_data = InputData("test_return_make_none_2") + input_data.add(Specs.redhat_release, RHEL7) + yield input_data, MAKE_NONE_RESULT + + +@archive_provider(returns_none.report_none) +def integration_tests_2(): + input_data = InputData("test_return_none_1") + input_data.add(Specs.redhat_release, RHEL7) + yield input_data, None + + input_data = InputData("test_return_none_2") + input_data.add(Specs.redhat_release, RHEL7) + yield input_data, MAKE_NONE_RESULT + + +@archive_provider(returns_none.report_skip_exception) +def integration_tests_3(): + input_data = InputData("test_return_skip_exception_1") + input_data.add(Specs.redhat_release, RHEL7) + yield input_data, None + + input_data = InputData("test_return_skip_exception_2") + input_data.add(Specs.redhat_release, RHEL7) + yield input_data, None + + +def test_integration_1(run_rule): + input_data = InputData("test_return_make_none_1_1") + input_data.add(Specs.redhat_release, RHEL7) + result = run_rule(returns_none.report_make_none, input_data) + assert result is None + + input_data = InputData("test_return_make_none_2_2") + input_data.add(Specs.redhat_release, RHEL7) + result = run_rule(returns_none.report_make_none, input_data, return_make_none=True) + assert result == MAKE_NONE_RESULT + + +def test_integration_2(run_rule): + input_data = InputData("test_return_none_2_1") + input_data.add(Specs.redhat_release, RHEL7) + result = run_rule(returns_none.report_none, input_data) + assert result is None + + input_data = InputData("test_return_none_2_2") + input_data.add(Specs.redhat_release, RHEL7) + result = run_rule(returns_none.report_none, input_data, return_make_none=True) + assert result == MAKE_NONE_RESULT + + +def test_integration_3(run_rule): + input_data = InputData("test_return_skip_exception_2_1") + input_data.add(Specs.redhat_release, RHEL7) + result = run_rule(returns_none.report_skip_exception, input_data) + assert result is None + + input_data = InputData("test_return_skip_exception_2_2") + input_data.add(Specs.redhat_release, RHEL7) + result = run_rule(returns_none.report_skip_exception, input_data, return_make_none=True) + assert result is None diff --git a/insights/tests/test_rules_fixture.py b/insights/tests/test_rules_fixture.py index c6d266c59..3dce58fbe 100644 --- a/insights/tests/test_rules_fixture.py +++ b/insights/tests/test_rules_fixture.py @@ -1,4 +1,4 @@ -from insights.core.plugins import make_pass, make_fail +from insights.core.plugins import make_pass, make_fail, make_none from insights.specs import Specs from insights.plugins import rules_fixture_plugin from insights.tests import InputData @@ -36,5 +36,6 @@ def test_rules_fixture(run_rule): assert results == expected input_data = InputData('test_ret_none') - results = run_rule(rules_fixture_plugin.report, input_data) - assert results is None + results = run_rule(rules_fixture_plugin.report, input_data, return_make_none=True) + expected = make_none() + assert results == expected From fb4adff221e1ed1cddef7689ef08b673f2d59d0a Mon Sep 17 00:00:00 2001 From: Ping Qin <30404410+qinpingli@users.noreply.github.com> Date: Wed, 4 Aug 2021 03:33:50 +0800 Subject: [PATCH 501/892] New spec for getting oracle asm disks udev rules (#3137) * New spec for getting oracle asm disks udev rules Signed-off-by: Qin Ping * Register etc_udev_oracle_asm_rules as a multi_output spec Signed-off-by: Qin Ping * Move UdevRulesOracleASM from etc_udev_rules to udev_rules Signed-off-by: Qin Ping --- insights/parsers/etc_udev_rules.py | 1 - insights/parsers/tests/test_udev_rules.py | 32 +++++++++++++++-- insights/parsers/udev_rules.py | 43 +++++++++++++++++++++-- insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 73 insertions(+), 5 deletions(-) diff --git a/insights/parsers/etc_udev_rules.py b/insights/parsers/etc_udev_rules.py index 84db2685c..03472f377 100644 --- a/insights/parsers/etc_udev_rules.py +++ b/insights/parsers/etc_udev_rules.py @@ -9,7 +9,6 @@ UdevRules40Redhat - file ``/etc/udev/rules.d/40-redhat.rules`` -------------------------------------------------------------- - """ from insights import parser from insights.core import LogFileOutput diff --git a/insights/parsers/tests/test_udev_rules.py b/insights/parsers/tests/test_udev_rules.py index 5116f2fe3..81e03f5e5 100644 --- a/insights/parsers/tests/test_udev_rules.py +++ b/insights/parsers/tests/test_udev_rules.py @@ -1,6 +1,6 @@ import doctest from insights.parsers import udev_rules -from insights.parsers.udev_rules import UdevRulesFCWWPN, UdevRules40Redhat +from insights.parsers.udev_rules import UdevRulesFCWWPN, UdevRules40Redhat, UdevRulesOracleASM from insights.tests import context_wrap UDEV_RULES_FILT_HIT = """ @@ -70,10 +70,28 @@ LABEL="zfcp_scsi_device_end" """.strip() +ORACLE_ASM_UDEV_RULES = """ +KERNEL=="dm*", PROGRAM=="scsi_id --page=0x83 --whitelisted --device=/dev/%k", \ +RESULT=="360060e80164c210000014c2100007a8f", \ +SYMLINK+="oracleasm/disks/asm_sbe80_7a8f", OWNER="oracle", GROUP="dba", MODE="0660" + +KERNEL=="dm*", PROGRAM=="scsi_id --page=0x83 --whitelisted --device=/dev/%k", \ +RESULT=="360060e80164c210000014c2100007a90", \ +SYMLINK+="oracleasm/disks/asm_sbe80_7a90", OWNER="oracle", GROUP="dba", MODE="0660" + +KERNEL=="dm*", PROGRAM=="scsi_id --page=0x83 --whitelisted --device=/dev/%k", \ +RESULT=="360060e80164c210000014c2100007a91", \ +SYMLINK+="oracleasm/disks/asm_sbe80_7a91", OWNER="oracle", GROUP="dba", MODE="0660" + +# NOTE: Insert new Oracle ASM LUN configuration before this comment +ACTION=="add|change", KERNEL=="sd*", OPTIONS:="nowatch" +""".strip() + def test_documentation(): env = {'udev_rules': UdevRulesFCWWPN(context_wrap(UDEV_RULES_FILT_HIT)), - 'udev_40_redhat_rules': UdevRules40Redhat(context_wrap(SAMPLE_40_REDHAT_RULES))} + 'udev_40_redhat_rules': UdevRules40Redhat(context_wrap(SAMPLE_40_REDHAT_RULES)), + 'udev_oracle_asm_rules': UdevRulesOracleASM(context_wrap(ORACLE_ASM_UDEV_RULES))} failed_count, tests = doctest.testmod(udev_rules, globs=env) assert failed_count == 0 @@ -92,3 +110,13 @@ def test_udev_40_redhat_rules(): 'SUBSYSTEM!="memory", GOTO="memory_hotplug_end"', 'ACTION!="add", GOTO="memory_hotplug_end"']: assert line in result.lines + + +def test_udev_oracle_asm_rules(): + result = UdevRulesOracleASM(context_wrap(ORACLE_ASM_UDEV_RULES)) + for line in ['ACTION=="add|change", KERNEL=="sd*", OPTIONS:="nowatch"', + 'KERNEL=="dm*", PROGRAM=="scsi_id --page=0x83 --whitelisted --device=/dev/%k", RESULT=="360060e80164c210000014c2100007a8f", SYMLINK+="oracleasm/disks/asm_sbe80_7a8f", OWNER="oracle", GROUP="dba", MODE="0660"']: + assert line in result.lines + actions = result.get('ACTION') + assert len(actions) == 1 + assert actions[0]['raw_message'] == 'ACTION=="add|change", KERNEL=="sd*", OPTIONS:="nowatch"' diff --git a/insights/parsers/udev_rules.py b/insights/parsers/udev_rules.py index fdc33ca02..8442fcc20 100644 --- a/insights/parsers/udev_rules.py +++ b/insights/parsers/udev_rules.py @@ -1,6 +1,6 @@ """ -UdevRules - file ``/usr/lib/udev/rules.d/`` -=========================================== +UdevRules - files ``/usr/lib/udev/rules.d/*`` and ``/etc/udev/rules.d/`` +======================================================================== The parsers included in this module are: @@ -9,6 +9,9 @@ UdevRules40Redhat - files ``/etc/udev/rules.d/40-redhat.rules``, ``/run/udev/rules.d/40-redhat.rules``, ``/usr/lib/udev/rules.d/40-redhat.rules``, ``/usr/local/lib/udev/rules.d/40-redhat.rules`` -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + +UdevRulesOracleASM - file ``/etc/udev/rules.d/*asm*.rules`` +----------------------------------------------------------- """ from insights import parser from insights.core import LogFileOutput @@ -65,3 +68,39 @@ class UdevRules40Redhat(LogFileOutput): True """ pass + + +@parser(Specs.etc_udev_oracle_asm_rules) +class UdevRulesOracleASM(LogFileOutput): + """ + Read the content of ``/etc/udev/rules.d/*asm*.rules`` file. + + .. note:: + + The syntax of the `.rules` file is complex, and no rules require to + get the serialized parsed result currently. An only existing rule's + supposed to check the syntax of some specific lines, so here the + :class:`insights.core.LogFileOutput` is the base class. + + Sample input:: + + KERNEL=="dm*", PROGRAM=="scsi_id --page=0x83 --whitelisted --device=/dev/%k", \ + RESULT=="360060e80164c210000014c2100007a8f", \ + SYMLINK+="oracleasm/disks/asm_sbe80_7a8f", OWNER="oracle", GROUP="dba", MODE="0660" + + + KERNEL=="dm*", PROGRAM=="scsi_id --page=0x83 --whitelisted --device=/dev/%k", \ + RESULT=="360060e80164c210000014c2100007a91", \ + SYMLINK+="oracleasm/disks/asm_sbe80_7a91", OWNER="oracle", GROUP="dba", MODE="0660" + + # NOTE: Insert new Oracle ASM LUN configuration before this comment + ACTION=="add|change", KERNEL=="sd*", OPTIONS:="nowatch" + + Examples: + + >>> 'ACTION=="add|change", KERNEL=="sd*", OPTIONS:="nowatch"' in udev_oracle_asm_rules.lines + True + >>> udev_oracle_asm_rules.get('ACTION')[0]['raw_message'] + 'ACTION=="add|change", KERNEL=="sd*", OPTIONS:="nowatch"' + """ + pass diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index f60971508..ce7defd81 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -147,6 +147,7 @@ class Specs(SpecSet): etc_journald_conf = RegistryPoint() etc_machine_id = RegistryPoint() etc_udev_40_redhat_rules = RegistryPoint(filterable=True) + etc_udev_oracle_asm_rules = RegistryPoint(multi_output=True, filterable=True) etcd_conf = RegistryPoint(filterable=True) ethernet_interfaces = RegistryPoint() ethtool_a = RegistryPoint(multi_output=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index 8544f50cf..73e3deec2 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -245,6 +245,7 @@ def du_dirs_list(broker): etc_machine_id = simple_file("/etc/machine-id") etc_udev_40_redhat_rules = first_file(["/etc/udev/rules.d/40-redhat.rules", "/run/udev/rules.d/40-redhat.rules", "/usr/lib/udev/rules.d/40-redhat.rules", "/usr/local/lib/udev/rules.d/40-redhat.rules"]) + etc_udev_oracle_asm_rules = glob_file(r"/etc/udev/rules.d/*asm*.rules") etcd_conf = simple_file("/etc/etcd/etcd.conf") ethernet_interfaces = listdir("/sys/class/net", context=HostContext) ethtool = foreach_execute(ethernet.interfaces, "/sbin/ethtool %s") From f281f3ebab846d442f444560cf53968f42ecc079 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Wed, 4 Aug 2021 11:09:19 +0800 Subject: [PATCH 502/892] Insert the 'Slot' to the parser result of LsPci (#3149) * Insert the 'Slot' to the parser result of LsPci Signed-off-by: Xiangce Liu * Add slot to the doc example Signed-off-by: Xiangce Liu * Fix the variable name Signed-off-by: Xiangce Liu --- insights/combiners/tests/test_lspci.py | 1 + insights/parsers/lspci.py | 18 +++++++++++------- insights/parsers/tests/test_lspci.py | 10 ++++++++-- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/insights/combiners/tests/test_lspci.py b/insights/combiners/tests/test_lspci.py index 9bf9396f4..178e44971 100644 --- a/insights/combiners/tests/test_lspci.py +++ b/insights/combiners/tests/test_lspci.py @@ -102,6 +102,7 @@ def test_lspci_k(): assert sorted(lspci.pci_dev_list) == ['00:00.0', '00:02.0', '00:03.0', '00:16.0', '00:19.0', '00:1b.0'] assert lspci.search(Dev_Details__contains='I218') == [ { + 'Slot': '00:19.0', 'Driver': 'e1000e', 'Module': ['e1000e'], 'Subsystem': 'Lenovo ThinkPad X240', 'Dev_Details': 'Ethernet controller: Intel Corporation Ethernet Connection I218-LM (rev 04)' diff --git a/insights/parsers/lspci.py b/insights/parsers/lspci.py index db8d19cbe..b3730a4ef 100644 --- a/insights/parsers/lspci.py +++ b/insights/parsers/lspci.py @@ -57,6 +57,8 @@ class LsPci(CommandParser, LogFileOutput): False >>> sorted(lspci.pci_dev_list) ['00:00.0', '00:01.0', '00:02.0', '03:00.0', '06:00.0'] + >>> lspci.pci_dev_details('00:00.0')['Slot'] + '00:00.0' >>> lspci.pci_dev_details('00:00.0')['Subsystem'] 'Cisco Systems Inc Device 0101' >>> lspci.pci_dev_details('00:00.0')['Dev_Details'] @@ -77,21 +79,23 @@ def parse_content(self, content): scanner(self) # Parse kernel driver lines self.data = {} - bus_device_function = None - bus_device_function_re = re.compile(r'[0-9a-f]+:[0-9a-f]+.[0-9a-f]+') + slot = None + slot_re = re.compile(r'^[0-9a-f]+:[0-9a-f]+.[0-9a-f]+') fields = ["Subsystem", "Kernel driver in use", "Kernel modules"] for line in get_active_lines(content): parts = line.split() - if bus_device_function_re.match(parts[0]): - bus_device_function = parts[0] + if slot_re.match(parts[0]): + slot = parts[0] device_details = line.split(None, 1)[-1] # keep the raw line - self.data[bus_device_function] = {'Dev_Details': device_details.lstrip()} - elif bus_device_function and (line.split(":")[0].strip() in fields): + self.data[slot] = { + 'Slot': slot, + 'Dev_Details': device_details.lstrip()} + elif slot and (line.split(":")[0].strip() in fields): parts = line.split(':') - self.data[bus_device_function][parts[0]] = parts[1].lstrip() + self.data[slot][parts[0]] = parts[1].lstrip() def pci_dev_details(self, dev_name): """ diff --git a/insights/parsers/tests/test_lspci.py b/insights/parsers/tests/test_lspci.py index 361837d6c..bf5d7a127 100644 --- a/insights/parsers/tests/test_lspci.py +++ b/insights/parsers/tests/test_lspci.py @@ -274,14 +274,16 @@ def test_lspci_driver(): lspci_obj = LsPci(context_wrap(LSPCI_DRIVER_DETAILS)) assert len(lspci_obj.data) == 44 dev_info = lspci_obj.pci_dev_details('00:01.0') - assert len(dev_info) == 3 + assert len(dev_info) == 4 assert dev_info['Kernel driver in use'] == 'pcieport' + assert dev_info['Slot'] == '00:01.0' assert len(lspci_obj.pci_dev_list) == 44 lspci_obj = LsPci(context_wrap(LSPCI_DRIVER_DETAILS_2)) assert len(lspci_obj.data) == 4 dev_info = lspci_obj.pci_dev_details('04:00.0') - assert len(dev_info) == 1 + assert len(dev_info) == 2 + assert dev_info['Slot'] == '04:00.0' assert 'Kernel driver in use' not in dev_info assert len(lspci_obj.pci_dev_list) == 4 @@ -298,10 +300,14 @@ def test_lspci_vmmkn(): lspci_vmmkn = LsPciVmmkn(context_wrap(LSPCI_VMMKN)) assert sorted(lspci_vmmkn.pci_dev_list) == ['00:00.0', '00:01.0', '00:01.1', '00:03.0'] assert lspci_vmmkn[0].get('Driver') is None + assert lspci_vmmkn[0].get('Slot') == '00:00.0' assert lspci_vmmkn[1].get('Vendor') == '8086' + assert lspci_vmmkn[1].get('Slot') == '00:01.0' assert lspci_vmmkn[1].get('Device') == '7010' + assert lspci_vmmkn[2].get('Slot') == '00:01.1' assert lspci_vmmkn[2].get('SVendor') == '1af4' assert lspci_vmmkn[3].get('SDevice') == '0001' + assert lspci_vmmkn[3].get('Slot') == '00:03.0' assert lspci_vmmkn[-1].get('Driver') == 'virtio-pci' assert sorted(lspci_vmmkn[1].get('Module')) == sorted(['ata_piix', 'ata_generic']) assert lspci_vmmkn[-1].get('Module') is None From 80f3fc6e45fe1d09d0213f10ae56e9f2ed8e10db Mon Sep 17 00:00:00 2001 From: Alec Cohan <44471274+aleccohan@users.noreply.github.com> Date: Wed, 4 Aug 2021 11:11:51 -0400 Subject: [PATCH 503/892] RHCLOUDTurn playbook validation on (#3167) * Turn playbook validation on Signed-off-by: Alec Cohan * Update silly mistake Signed-off-by: Alec Cohan Co-authored-by: Jeremy Crafts --- insights/client/apps/ansible/playbook_verifier/__init__.py | 2 +- insights/client/apps/ansible/playbook_verifier/__main__.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/insights/client/apps/ansible/playbook_verifier/__init__.py b/insights/client/apps/ansible/playbook_verifier/__init__.py index 5e0d64f70..cc7bdedf2 100644 --- a/insights/client/apps/ansible/playbook_verifier/__init__.py +++ b/insights/client/apps/ansible/playbook_verifier/__init__.py @@ -123,7 +123,7 @@ def verifyPlaybookSnippet(snippet): return executeVerification(snippetCopy, encodedSignature) -def verify(playbook, skipVerify=True): +def verify(playbook, skipVerify=False): """ Verify the signed playbook. diff --git a/insights/client/apps/ansible/playbook_verifier/__main__.py b/insights/client/apps/ansible/playbook_verifier/__main__.py index c1688f84c..d49bb56fb 100644 --- a/insights/client/apps/ansible/playbook_verifier/__main__.py +++ b/insights/client/apps/ansible/playbook_verifier/__main__.py @@ -17,10 +17,10 @@ def read_playbook(): playbook = read_playbook() playbook_yaml = loadPlaybookYaml(playbook) -skipVerify = True +skipVerify = False if (os.environ.get('SKIP_VERIFY')): - skipVerify = False + skipVerify = True try: verified_playbook = verify(playbook_yaml, skipVerify) From 261efb31056cd44d5a9d6d0ba00131d6029933af Mon Sep 17 00:00:00 2001 From: rh-tguittet <88336850+rh-tguittet@users.noreply.github.com> Date: Wed, 4 Aug 2021 20:51:09 +0200 Subject: [PATCH 504/892] Docs typo: extra dot at the end of URL points to the wrong page (#3177) Signed-off-by: Thibault Guittet --- docs/intro.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/intro.rst b/docs/intro.rst index 76b0a2ff8..8f3ec4cbd 100644 --- a/docs/intro.rst +++ b/docs/intro.rst @@ -167,7 +167,7 @@ many customization options to optimize each customer's specific needs. .. Links: .. _Red Hat Customer Portal: https://access.redhat.com -.. _Red Hat Insights Portal: https://access.redhat.com/products/red-hat-insights. +.. _Red Hat Insights Portal: https://access.redhat.com/products/red-hat-insights .. _insights-core Repository: https://github.com/RedHatInsights/insights-core .. _Mozilla OpenSSH Security Guidelines: https://wiki.mozilla.org/Security/Guidelines/OpenSSH .. _Red Hat Insights Client GitHub Project: http://github.com/redhataccess/insights-client From a0fce998d6eee0b0d86768e56e5ae4e6aea17457 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 5 Aug 2021 02:56:29 +0800 Subject: [PATCH 505/892] Add new option '--show-rules' to insights-run (#3175) * Add new option '--show-rules' to insights-run Signed-off-by: Xiangce Liu * tiny update Signed-off-by: Xiangce Liu * Fix the test errors Signed-off-by: Xiangce Liu * tiny update to markdown Signed-off-by: Xiangce Liu * Fix the issue mentioned in 3161 Signed-off-by: Xiangce Liu * Add more comments (for _json, html and _yaml) Signed-off-by: Xiangce Liu * update the comments Signed-off-by: Xiangce Liu * update the comments again Signed-off-by: Xiangce Liu * Fix the order of args Signed-off-by: Xiangce Liu * Fix the missed condition in text for self.none Signed-off-by: Xiangce Liu --- insights/formats/__init__.py | 56 +++++++++++++++++++++++++++++++---- insights/formats/_json.py | 15 ++++++++-- insights/formats/_markdown.py | 42 +++++++++++++++----------- insights/formats/_yaml.py | 15 ++++++++-- insights/formats/text.py | 52 ++++++++++++++++++-------------- 5 files changed, 131 insertions(+), 49 deletions(-) diff --git a/insights/formats/__init__.py b/insights/formats/__init__.py index 78acdb0ee..fc325d2ff 100644 --- a/insights/formats/__init__.py +++ b/insights/formats/__init__.py @@ -53,6 +53,7 @@ def postprocess(self, broker): Called after all components have been run. Useful for interrogating the broker for final state. """ + pass class Formatter(object): @@ -84,19 +85,33 @@ class EvaluatorFormatterAdapter(FormatterAdapter): @staticmethod def configure(p): - """ Override to add arguments to the ArgumentParser. """ - p.add_argument("-F", "--fail-only", help="Show FAIL results only. Conflict with '-m' or '-f', will be dropped when using them together", action="store_true") + p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true") + p.add_argument("-S", "--show-rules", nargs="+", + choices=["fail", "info", "pass", "metadata", "fingerprint"], + metavar="TYPE", + help="Show results per rule type(s).") + p.add_argument("-F", "--fail-only", + help="Show FAIL results only. Conflict with '-m', will be dropped when using them together. This option is deprecated by '-S fail'", + action="store_true") def __init__(self, args=None): if args: hn = "insights.combiners.hostname, insights.parsers.branch_info" args.plugins = ",".join([args.plugins, hn]) if args.plugins else hn - if args.fail_only: - print('Options conflict: -f and -F, drops -F', file=sys.stderr) - args.fail_only = False + self.missing = args.missing + fail_only = args.fail_only + if args.missing and fail_only: + # Drops the '-F' silently when specifying '-m' and '-F' together + # --> Do NOT break the Format of the output + fail_only = None + self.show_rules = [] # Empty by default, means show ALL types + if not args.show_rules and fail_only: + self.show_rules = ['rule'] + elif args.show_rules: + self.show_rules = [opt.replace('fail', 'rule') for opt in args.show_rules] def preprocess(self, broker): - self.formatter = self.Impl(broker) + self.formatter = self.Impl(broker, self.missing, self.show_rules) self.formatter.preprocess() def postprocess(self, broker): @@ -155,3 +170,32 @@ def render(comp, val): _type = dr.get_component_type(comp) func = RENDERERS.get(_type) return func(comp, val) if func else str(val) + + +def get_response_of_types(response, missing=True, show_rules=None): + # Check the "-m" option: + # - When "-m" is specified, show the "skips" rules + # - When "-m" is NOT specified, do not show the "skips" rules + if not missing and 'skips' in response: + response.pop('skips') + # Check the "-S" option: + # - When "-m" is specified but "-S" is NOT specified, show all the loaded rules + # - When neither "-m" nor "-S" is specified, show all the HIT rules (exclude the "skips") + if not show_rules: + return response + # - Discard the "medadata" rules when it's not specified in the "-S" option + if 'metadata' not in show_rules and 'metadata' in response.get('system', {}): + response['system'].pop('metadata') + # - Discard the "make_fail" rules when it's not specified in the "-S" option + if 'rule' not in show_rules and 'reports' in response: + response.pop('reports') + # - Discard the "make_info" rules when it's not specified in the "-S" option + if 'info' not in show_rules and 'info' in response: + response.pop('info') + # - Discard the "make_pass" rules when it's not specified in the "-S" option + if 'pass' not in show_rules and 'pass' in response: + response.pop('pass') + # - Discard the "fingerprint" rules when it's not specified in the "-S" option + if 'fingerprint' not in show_rules and 'fingerprints' in response: + response.pop('fingerprints') + return response diff --git a/insights/formats/_json.py b/insights/formats/_json.py index bdc8193bd..e3dfde4c4 100644 --- a/insights/formats/_json.py +++ b/insights/formats/_json.py @@ -1,12 +1,23 @@ import json +import sys from insights.core.evaluators import SingleEvaluator as Evaluator -from insights.formats import EvaluatorFormatterAdapter +from insights.formats import EvaluatorFormatterAdapter, get_response_of_types class JsonFormat(Evaluator): + def __init__(self, + broker=None, + missing=False, + show_rules=None, + stream=sys.stdout): + super(JsonFormat, self).__init__(broker, stream=stream) + self.missing = missing + self.show_rules = [] if show_rules is None else show_rules + def postprocess(self): - json.dump(self.get_response(), self.stream) + response = get_response_of_types(self.get_response(), self.missing, self.show_rules) + json.dump(response, self.stream) class JsonFormatterAdapter(EvaluatorFormatterAdapter): diff --git a/insights/formats/_markdown.py b/insights/formats/_markdown.py index bc08c2cca..90ac235b3 100644 --- a/insights/formats/_markdown.py +++ b/insights/formats/_markdown.py @@ -40,15 +40,13 @@ def __init__(self, missing=False, tracebacks=False, dropped=False, - fail_only=False, + show_rules=None, stream=sys.stdout): - super(MarkdownFormat, self).__init__(broker, stream) - self.broker = broker + super(MarkdownFormat, self).__init__(broker, stream=stream) self.missing = missing self.tracebacks = tracebacks self.dropped = dropped - self.fail_only = fail_only - self.stream = stream + self.show_rules = [] if show_rules is None else show_rules self.counts = {'skip': 0, 'pass': 0, 'rule': 0, 'info': 0, 'metadata': 0, 'metadata_key': 0, 'fingerprint': 0, 'exception': 0} self.responses = { @@ -150,8 +148,8 @@ def printit(c, v): if _type: if self.missing and _type == 'skip': print_missing(c, v) - elif ((self.fail_only and _type == 'rule') or - (not self.fail_only and _type != 'skip')): + elif ((self.show_rules and _type in self.show_rules) or + (not self.show_rules and _type != 'skip')): printit(c, v) print(file=self.stream) @@ -182,24 +180,34 @@ class MarkdownFormatAdapter(FormatterAdapter): @staticmethod def configure(p): - p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true") p.add_argument("-t", "--tracebacks", help="Show stack traces.", action="store_true") p.add_argument("-d", "--dropped", help="Show collected files that weren't processed.", action="store_true") - p.add_argument("-F", "--fail-only", help="Show FAIL results only. Conflict with '-m' or '-f', will be dropped when using them together", action="store_true") - - def __init__(self, args): + p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true") + p.add_argument("-S", "--show-rules", nargs="+", + choices=["fail", "info", "pass", "metadata", "fingerprint"], + metavar="TYPE", + help="Show results per rule type(s).") + p.add_argument("-F", "--fail-only", + help="Show FAIL results only. Conflict with '-m', will be dropped when using them together. This option is deprecated by '-S fail'", + action="store_true") + + def __init__(self, args=None): self.missing = args.missing + fail_only = args.fail_only + if args.missing and fail_only: + print('Options conflict: -m and -F, drops -F', file=sys.stderr) + fail_only = None + self.show_rules = [] # Empty by default, means show ALL types + if not args.show_rules and fail_only: + self.show_rules = ['rule'] + elif args.show_rules: + self.show_rules = [opt.replace('fail', 'rule') for opt in args.show_rules] self.tracebacks = args.tracebacks self.dropped = args.dropped - self.fail_only = args.fail_only - self.formatter = None - if self.missing and self.fail_only: - print('Options conflict: -m and -F, drops -F', file=sys.stderr) - self.fail_only = False def preprocess(self, broker): self.formatter = MarkdownFormat(broker, - self.missing, self.tracebacks, self.dropped, self.fail_only) + self.missing, self.tracebacks, self.dropped, self.show_rules) self.formatter.preprocess() def postprocess(self, broker): diff --git a/insights/formats/_yaml.py b/insights/formats/_yaml.py index 37772353b..ae1d3c816 100644 --- a/insights/formats/_yaml.py +++ b/insights/formats/_yaml.py @@ -1,7 +1,8 @@ import yaml +import sys from insights.core.evaluators import SingleEvaluator -from insights.formats import EvaluatorFormatterAdapter +from insights.formats import EvaluatorFormatterAdapter, get_response_of_types from yaml.representer import Representer from insights.core import ScanMeta @@ -9,8 +10,18 @@ class YamlFormat(SingleEvaluator): + def __init__(self, + broker=None, + missing=False, + show_rules=None, + stream=sys.stdout): + super(YamlFormat, self).__init__(broker, stream=stream) + self.missing = missing + self.show_rules = [] if show_rules is None else show_rules + def postprocess(self): - yaml.dump(self.get_response(), self.stream) + response = get_response_of_types(self.get_response(), self.missing, self.show_rules) + yaml.dump(response, self.stream) class YamlFormatterAdapter(EvaluatorFormatterAdapter): diff --git a/insights/formats/text.py b/insights/formats/text.py index 7c0a1b9c3..e37e8ed3c 100644 --- a/insights/formats/text.py +++ b/insights/formats/text.py @@ -89,16 +89,15 @@ def __init__(self, broker, missing=False, tracebacks=False, dropped=False, - fail_only=False, none=False, + show_rules=None, stream=sys.stdout): - self.broker = broker + super(HumanReadableFormat, self).__init__(broker, stream=stream) self.missing = missing self.none = none self.tracebacks = tracebacks self.dropped = dropped - self.fail_only = fail_only - self.stream = stream + self.show_rules = [] if show_rules is None else show_rules def print_header(self, header, color): ln = len(header) @@ -194,11 +193,10 @@ def printit(c, v): if _type in self.responses: self.counts[_type] += 1 - if ((self.fail_only and _type == 'rule') or - (self.missing and _type == 'skip') or - (self.none and _type == 'none')): - printit(c, v) - elif not self.fail_only and _type not in ['skip', 'none']: + if ((self.missing and _type == 'skip') or + (self.show_rules and _type in self.show_rules) or + (self.none and _type == 'none') or + (not self.show_rules and _type not in ['skip', 'none'])): printit(c, v) print(file=self.stream) @@ -224,22 +222,32 @@ class HumanReadableFormatAdapter(FormatterAdapter): @staticmethod def configure(p): - p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true") - p.add_argument("-n", "--none", help="Show rules returning None", action="store_true") p.add_argument("-t", "--tracebacks", help="Show stack traces.", action="store_true") p.add_argument("-d", "--dropped", help="Show collected files that weren't processed.", action="store_true") - p.add_argument("-F", "--fail-only", help="Show FAIL results only. Conflict with '-m' and '-n' or '-f', will be dropped when using them together", action="store_true") - - def __init__(self, args): - self.missing = args.missing - self.none = args.none + p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true") + p.add_argument("-n", "--none", help="Show rules returning None", action="store_true") + p.add_argument("-S", "--show-rules", default=[], nargs="+", + choices=["fail", "info", "pass", "metadata", "fingerprint"], + metavar="TYPE", + help="Show results per rule type(s).") + p.add_argument("-F", "--fail-only", + help="Show FAIL results only. Conflict with '-m', will be dropped when using them together. This option is deprecated by '-S fail'", + action="store_true") + + def __init__(self, args=None): self.tracebacks = args.tracebacks self.dropped = args.dropped - self.fail_only = args.fail_only - self.formatter = None - if (self.missing or self.none) and self.fail_only: + self.missing = args.missing + self.none = args.none + fail_only = args.fail_only + if (self.missing or self.none) and fail_only: print(Fore.YELLOW + 'Options conflict: -m/-n and -F, drops -F', file=sys.stderr) - self.fail_only = False + fail_only = None + self.show_rules = [] # Empty by default, means show ALL types + if not args.show_rules and fail_only: + self.show_rules = ['rule'] + elif args.show_rules: + self.show_rules = [opt.replace('fail', 'rule') for opt in args.show_rules] def preprocess(self, broker): self.formatter = HumanReadableFormat( @@ -247,8 +255,8 @@ def preprocess(self, broker): self.missing, self.tracebacks, self.dropped, - self.fail_only, - self.none + self.none, + self.show_rules, ) self.formatter.preprocess() From ff2202410525c3a3d60b045f4a06f6fe6fa28dcd Mon Sep 17 00:00:00 2001 From: Ping Qin <30404410+qinpingli@users.noreply.github.com> Date: Thu, 5 Aug 2021 03:23:57 +0800 Subject: [PATCH 506/892] New spec for oracle asmlib configuration (#3169) Signed-off-by: Qin Ping --- insights/parsers/sysconfig.py | 47 +++++++++++++++++++ .../tests/test_sysconfig_doc_examples.py | 25 +++++++++- .../parsers/tests/test_sysconfig_oracleasm.py | 38 +++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 111 insertions(+), 1 deletion(-) create mode 100644 insights/parsers/tests/test_sysconfig_oracleasm.py diff --git a/insights/parsers/sysconfig.py b/insights/parsers/sysconfig.py index 0913638eb..4ac0c7bd7 100644 --- a/insights/parsers/sysconfig.py +++ b/insights/parsers/sysconfig.py @@ -74,6 +74,9 @@ GrubSysconfig - files ``/etc/sysconfig/grub`` --------------------------------------------- + +OracleasmSysconfig - files ``/etc/sysconfig/oracleasm`` +------------------------------------------------------- """ @@ -663,3 +666,47 @@ class GrubSysconfig(SysconfigOptions): """ pass + + +@parser(Specs.sysconfig_oracleasm) +class OracleasmSysconfig(SysconfigOptions): + """ + Class to parse the ``/etc/sysconfig/oracleasm`` + + Typical content example:: + + # + # This is a configuration file for automatic loading of the Oracle + # Automatic Storage Management library kernel driver. It is generated + # By running /etc/init.d/oracleasm configure. Please use that method + # to modify this file + # + + # ORACLEASM_ENABELED: 'true' means to load the driver on boot. + ORACLEASM_ENABLED=true + + # ORACLEASM_UID: Default user owning the /dev/oracleasm mount point. + ORACLEASM_UID=oracle + + # ORACLEASM_GID: Default group owning the /dev/oracleasm mount point. + ORACLEASM_GID=oinstall + + # ORACLEASM_SCANBOOT: 'true' means scan for ASM disks on boot. + ORACLEASM_SCANBOOT=true + + # ORACLEASM_SCANORDER: Matching patterns to order disk scanning + ORACLEASM_SCANORDER="dm" + + # ORACLEASM_SCANEXCLUDE: Matching patterns to exclude disks from scan + ORACLEASM_SCANEXCLUDE="sd" + + Examples: + >>> oracleasm_syscfg.get('ORACLEASM_SCANBOOT') + 'true' + >>> 'ORACLEASM_SCANORDER' in oracleasm_syscfg + True + >>> 'ORACLEASM_SCANEXCLUDE_1' in oracleasm_syscfg + False + + """ + pass diff --git a/insights/parsers/tests/test_sysconfig_doc_examples.py b/insights/parsers/tests/test_sysconfig_doc_examples.py index 2bea5e305..b1c22cbbf 100755 --- a/insights/parsers/tests/test_sysconfig_doc_examples.py +++ b/insights/parsers/tests/test_sysconfig_doc_examples.py @@ -13,6 +13,7 @@ from insights.parsers.sysconfig import IfCFGStaticRoute from insights.parsers.sysconfig import NetworkSysconfig from insights.parsers.sysconfig import GrubSysconfig +from insights.parsers.sysconfig import OracleasmSysconfig import doctest @@ -169,6 +170,27 @@ GRUB_ENABLE_BLSCFG=true """.strip() +ORACLEASM_SYSCONFIG = """ +# +# This is a configuration file for automatic loading of the Oracle +# Automatic Storage Management library kernel driver. It is generated +# By running /etc/init.d/oracleasm configure. Please use that method +# to modify this file +# +# ORACLEASM_ENABELED: 'true' means to load the driver on boot. +ORACLEASM_ENABLED=true +# ORACLEASM_UID: Default user owning the /dev/oracleasm mount point. +ORACLEASM_UID=oracle +# ORACLEASM_GID: Default group owning the /dev/oracleasm mount point. +ORACLEASM_GID=oinstall +# ORACLEASM_SCANBOOT: 'true' means scan for ASM disks on boot. +ORACLEASM_SCANBOOT=true +# ORACLEASM_SCANORDER: Matching patterns to order disk scanning +ORACLEASM_SCANORDER="dm" +# ORACLEASM_SCANEXCLUDE: Matching patterns to exclude disks from scan +ORACLEASM_SCANEXCLUDE="sd" +""".strip() + def test_sysconfig_doc(): env = { @@ -193,7 +215,8 @@ def test_sysconfig_doc(): 'cs_syscfg': CorosyncSysconfig(context_wrap(COROSYNCSYSCONFIG)), 'conn_info': IfCFGStaticRoute(context_wrap(STATIC_ROUTE_1, CONTEXT_PATH_DEVICE_1)), 'net_syscfg': NetworkSysconfig(context_wrap(NETWORK_SYSCONFIG)), - 'grub_syscfg': GrubSysconfig(context_wrap(GRUB_SYSCONFIG)) + 'grub_syscfg': GrubSysconfig(context_wrap(GRUB_SYSCONFIG)), + 'oracleasm_syscfg': OracleasmSysconfig(context_wrap(ORACLEASM_SYSCONFIG)) } failed, total = doctest.testmod(sysconfig, globs=env) assert failed == 0 diff --git a/insights/parsers/tests/test_sysconfig_oracleasm.py b/insights/parsers/tests/test_sysconfig_oracleasm.py new file mode 100644 index 000000000..bfe261941 --- /dev/null +++ b/insights/parsers/tests/test_sysconfig_oracleasm.py @@ -0,0 +1,38 @@ +from insights.tests import context_wrap +from insights.parsers.sysconfig import OracleasmSysconfig + +ORACLEASM_SYSCONFIG = """ +# +# This is a configuration file for automatic loading of the Oracle +# Automatic Storage Management library kernel driver. It is generated +# By running /etc/init.d/oracleasm configure. Please use that method +# to modify this file +# + +# ORACLEASM_ENABELED: 'true' means to load the driver on boot. +ORACLEASM_ENABLED=true + +# ORACLEASM_UID: Default user owning the /dev/oracleasm mount point. +ORACLEASM_UID=oracle + +# ORACLEASM_GID: Default group owning the /dev/oracleasm mount point. +ORACLEASM_GID=oinstall + +# ORACLEASM_SCANBOOT: 'true' means scan for ASM disks on boot. +ORACLEASM_SCANBOOT=true + +# ORACLEASM_SCANORDER: Matching patterns to order disk scanning +ORACLEASM_SCANORDER="dm" + +# ORACLEASM_SCANEXCLUDE: Matching patterns to exclude disks from scan +ORACLEASM_SCANEXCLUDE="sd" +""".strip() + + +def test_sysconfig_oracleasm(): + result = OracleasmSysconfig(context_wrap(ORACLEASM_SYSCONFIG)) + assert result["ORACLEASM_SCANORDER"] == 'dm' + assert result.get("ORACLEASM_SCANBOOT") == 'true' + assert result.get("NONEXISTENT_VAR") is None + assert "NONEXISTENT_VAR" not in result + assert "ORACLEASM_SCANEXCLUDE" in result diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index ce7defd81..776a6801b 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -628,6 +628,7 @@ class Specs(SpecSet): sysconfig_mongod = RegistryPoint(multi_output=True) sysconfig_network = RegistryPoint() sysconfig_ntpd = RegistryPoint() + sysconfig_oracleasm = RegistryPoint() sysconfig_prelink = RegistryPoint() sysconfig_sshd = RegistryPoint() sysconfig_virt_who = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 73e3deec2..82ce7bc6d 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -720,6 +720,7 @@ def is_mod_loaded_for_ss(broker): sysconfig_libvirt_guests = simple_file("etc/sysconfig/libvirt-guests") sysconfig_network = simple_file("etc/sysconfig/network") sysconfig_ntpd = simple_file("/etc/sysconfig/ntpd") + sysconfig_oracleasm = simple_file("/etc/sysconfig/oracleasm") sysconfig_prelink = simple_file("/etc/sysconfig/prelink") sysconfig_sshd = simple_file("/etc/sysconfig/sshd") sysconfig_virt_who = simple_file("/etc/sysconfig/virt-who") From 4b73769be603e047ef2092d59f2eace21b020373 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 5 Aug 2021 10:25:29 -0400 Subject: [PATCH 507/892] always update uploader.json (#3170) Signed-off-by: Jeremy Crafts --- insights/client/phase/v1.py | 3 +-- insights/tests/client/phase/test_update.py | 4 +++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/insights/client/phase/v1.py b/insights/client/phase/v1.py index d87045a57..2bcbf6876 100644 --- a/insights/client/phase/v1.py +++ b/insights/client/phase/v1.py @@ -130,8 +130,7 @@ def update(client, config): if config.payload: logger.debug('Uploading a payload. Bypassing rules update.') return - if not config.core_collect: - client.update_rules() + client.update_rules() @phase diff --git a/insights/tests/client/phase/test_update.py b/insights/tests/client/phase/test_update.py index 45d5b52aa..8d6f87c3a 100644 --- a/insights/tests/client/phase/test_update.py +++ b/insights/tests/client/phase/test_update.py @@ -50,7 +50,9 @@ def test_update_core_collect_on(insights_config, insights_client): except SystemExit: pass insights_client.return_value.update.assert_called_once() - insights_client.return_value.update_rules.assert_not_called() + # we still want to download uploader.json because it's used + # for remove.conf component name resolution + insights_client.return_value.update_rules.assert_called_once() @patch("insights.client.phase.v1.InsightsClient") From bad7df596a326967e7b97504e76ea7df1735ce68 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 5 Aug 2021 10:58:07 -0400 Subject: [PATCH 508/892] Obfuscate hostname using sha1(hostname) rather than "host0" (#2611) * use SHA1 hash instead of host0 for obfuscated hostname * save obfuscated hostname and IPs list to RHSM facts * use filename insights-client.facts * use fqdn as soscleaner hostname * maintain both short hostname and fqdn in soscleaner * compile obfuscated IPs into a JSON string * encode to utf8 before hashing for py3 Signed-off-by: Jeremy Crafts --- insights/client/constants.py | 2 + insights/client/data_collector.py | 50 +++++++++++++++++++ insights/contrib/soscleaner.py | 23 ++++++--- .../tests/client/data_collector/test_done.py | 8 +-- 4 files changed, 73 insertions(+), 10 deletions(-) diff --git a/insights/client/constants.py b/insights/client/constants.py index fe57d5071..2581720fc 100644 --- a/insights/client/constants.py +++ b/insights/client/constants.py @@ -83,3 +83,5 @@ class InsightsConstants(object): valid_compressors = ("gz", "xz", "bz2", "none") # RPM version in which core collection was released core_collect_rpm_version = '3.1.0' + rhsm_facts_dir = os.path.join(os.sep, 'etc', 'rhsm', 'facts') + rhsm_facts_file = os.path.join(os.sep, 'etc', 'rhsm', 'facts', 'insights-client.facts') diff --git a/insights/client/data_collector.py b/insights/client/data_collector.py index 006537516..2688d0f4e 100644 --- a/insights/client/data_collector.py +++ b/insights/client/data_collector.py @@ -158,6 +158,52 @@ def _write_collection_stats(self, collection_stats): self.archive.add_metadata_to_archive( json.dumps(collection_stats), '/collection_stats') + def _write_rhsm_facts(self, hashed_fqdn, ip_csv): + logger.info('Writing RHSM facts to %s...', constants.rhsm_facts_file) + ips_list = '' + with open(ip_csv) as fil: + # create IP list as JSON block with format + # [ + # { + # original: + # obfuscated: + # } + # ] + + ips_list = fil.readlines() + headings = ips_list[0].strip().split(',') + # set the indices for the IPs + if 'original' in headings[0].lower(): + # soscleaner 0.4.4, original first + org = 0 + obf = 1 + else: + # soscleaner 0.2.2, obfuscated first + org = 1 + obf = 0 + + ip_block = [] + for line in ips_list[1:]: + ipset = line.strip().split(',') + ip_block.append( + { + 'original': ipset[org], + 'obfuscated': ipset[obf] + }) + + facts = { + 'insights_client.obfuscate_hostname_enabled': self.config.obfuscate_hostname, + 'insights_client.hostname': hashed_fqdn, + 'insights_client.obfuscate_ip_enabled': self.config.obfuscate, + 'insights_client.ips': json.dumps(ip_block) + } + + try: + with open(constants.rhsm_facts_file, 'w') as fil: + json.dump(facts, fil) + except (IOError, OSError) as e: + logger.error('Could not write to %s: %s', constants.rhsm_facts_file, str(e)) + def _run_pre_command(self, pre_cmd): ''' Run a pre command to get external args for a command @@ -426,6 +472,10 @@ def done(self, conf, rm_conf): cleaner.clean_report(clean_opts, self.archive.archive_dir) if clean_opts.keyword_file is not None: os.remove(clean_opts.keyword_file.name) + + # generate RHSM facts at this point + self._write_rhsm_facts(cleaner.hashed_fqdn, cleaner.ip_report) + if self.config.output_dir: # return the entire soscleaner dir # see additions to soscleaner.SOSCleaner.clean_report diff --git a/insights/contrib/soscleaner.py b/insights/contrib/soscleaner.py index 22eeda335..6be99bbec 100644 --- a/insights/contrib/soscleaner.py +++ b/insights/contrib/soscleaner.py @@ -31,6 +31,7 @@ import logging import tarfile import six +import hashlib from insights.util import content_type @@ -62,6 +63,8 @@ def __init__(self, quiet=False): self.hn_db = dict() #hostname database self.hostname_count = 0 self.hostname = None + self.fqdn = None + self.hashed_fqdn = None # addition for insights-client # Domainname obfuscation information self.dn_db = dict() #domainname database @@ -295,7 +298,7 @@ def _sub_hostname(self, line): self.logger.debug("Obfuscating FQDN - %s > %s", hn, new_hn) line = line.replace(hn, new_hn) if self.hostname: - line = line.replace(self.hostname, self._hn2db(self.hostname)) #catch any non-fqdn instances of the system hostname + line = line.replace(self.hostname, self._hn2db(self.fqdn)) #catch any non-fqdn instances of the system hostname return line except Exception as e: # pragma: no cover @@ -454,12 +457,13 @@ def _get_hostname(self, hostname='hostname'): fh = open(hostfile, 'rt') name_list = fh.readline().rstrip().split('.') hostname = name_list[0] + fqdn = '.'.join(name_list) # insights-client needs FQDN if len(name_list) > 1: domainname = '.'.join(name_list[1:len(name_list)]) else: domainname = None - return hostname, domainname + return hostname, domainname, fqdn except IOError as e: #the 'hostname' file doesn't exist or isn't readable for some reason self.logger.warning("Unable to determine system hostname!!!") @@ -471,8 +475,9 @@ def _get_hostname(self, hostname='hostname'): hostname = None domainname = None + fqdn = None - return hostname, domainname + return hostname, domainname, fqdn except Exception as e: # pragma: no cover self.logger.exception(e) @@ -676,15 +681,19 @@ def clean_report(self, options, sosreport): # pragma: no cover self.report = self._extract_sosreport(sosreport) self._make_dest_env() # create the working directory if options.hostname_path: - self.hostname, self.domainname = self._get_hostname(options.hostname_path) + self.hostname, self.domainname, self.fqdn = self._get_hostname(options.hostname_path) else: - self.hostname, self.domainname = self._get_hostname() + self.hostname, self.domainname, self.fqdn = self._get_hostname() if options.files: self._add_extra_files(options.files) - if self.hostname: # if we have a hostname that's not a None type - self.hn_db['host0'] = self.hostname # we'll prime the hostname pump to clear out a ton of useless logic later + if self.fqdn: # if we have a hostname that's not a None type + if six.PY3: + self.hashed_fqdn = hashlib.sha1(self.fqdn.encode('utf-8')).hexdigest() + '.example.com' + else: + self.hashed_fqdn = hashlib.sha1(self.fqdn).hexdigest() + '.example.com' + self.hn_db[self.hashed_fqdn] = self.fqdn # we'll prime the hostname pump to clear out a ton of useless logic later self._process_hosts_file(options) # we'll take a dig through the hosts file and make sure it is as scrubbed as possible diff --git a/insights/tests/client/data_collector/test_done.py b/insights/tests/client/data_collector/test_done.py index 1126cd717..61c589a7b 100644 --- a/insights/tests/client/data_collector/test_done.py +++ b/insights/tests/client/data_collector/test_done.py @@ -24,9 +24,10 @@ def test_dir_returned(_): assert ret == d.archive.archive_dir +@patch('insights.client.data_collector.DataCollector._write_rhsm_facts') @patch('insights.client.data_collector.SOSCleaner') @patch('insights.client.data_collector.InsightsArchive') -def test_soscleaner_archive_returned(_, soscleaner): +def test_soscleaner_archive_returned(_, soscleaner, __): ''' Test that SOSCleaner is enabled when obfuscate=True, and returns an archive by default @@ -40,9 +41,10 @@ def test_soscleaner_archive_returned(_, soscleaner): assert ret == soscleaner.return_value.archive_path +@patch('insights.client.data_collector.DataCollector._write_rhsm_facts') @patch('insights.client.data_collector.SOSCleaner') @patch('insights.client.data_collector.InsightsArchive') -def test_soscleaner_dir_returned(_, soscleaner): +def test_soscleaner_dir_returned(_, soscleaner, __): ''' Test that SOSCleaner returns a directory when output_dir is specified. @@ -92,7 +94,7 @@ def test_soscleaner_additions(isdir_, clean_opts): s._clean_files_only = Mock() s._extract_sosreport = Mock() s._make_dest_env = Mock() - s._get_hostname = Mock(return_value=(None, None)) + s._get_hostname = Mock(return_value=(None, None, None)) s._add_extra_files = Mock() s._process_hosts_file = Mock() s._domains2db = Mock() From 58829c97d1807fc98a91594abc8abc67c2690b2d Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 5 Aug 2021 11:25:58 -0400 Subject: [PATCH 509/892] update uploader_json_map.json Signed-off-by: Jeremy Crafts --- insights/client/uploader_json_map.json | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json index c62306d94..fc75a47bd 100644 --- a/insights/client/uploader_json_map.json +++ b/insights/client/uploader_json_map.json @@ -2623,7 +2623,6 @@ "TCP request queue full drops", "TX driver issue detected, PF reset issued", "The threshold number of context switches per second per CPU", - "This system does not support \"SSSE3\"", "Unit ip6tables.service entered failed state", "Unit iptables.service entered failed state", "Virtualization daemon", @@ -2657,7 +2656,6 @@ "invalid key/value pair in file /usr/lib/udev/rules.d/59-fc-wwpn-id.rules", "ip_local_port_range: prefer different parity for start/end values", "irq handler for vector (irq -1)", - "is beyond advertised capabilities", "is down or the link is down", "is greater than comparison timestamp", "iscsiadm: iscsiadm: Could not log into all portals", @@ -2689,7 +2687,6 @@ "rhsmd: rhsmd process exceeded runtime and was killed", "shm_open failed, Permission denied", "skb_copy", - "skb_over_panic", "socket error sending to node", "start request repeated too quickly for docker.service", "state changed timeout -> done", @@ -3631,6 +3628,11 @@ "pattern": [], "symbolic_name": "sysconfig_rh_mongodb26" }, + { + "file": "/etc/sysconfig/oracleasm", + "pattern": [], + "symbolic_name": "sysconfig_oracleasm" + }, { "file": "/etc/sysconfig/prelink", "pattern": [], @@ -4161,6 +4163,14 @@ ], "symbolic_name": "ansible_tower_settings" }, + { + "glob": "/etc/udev/rules.d/*asm*.rules", + "pattern": [ + "oracleasm", + "ACTION==" + ], + "symbolic_name": "etc_udev_oracle_asm_rules" + }, { "glob": "/sys/devices/system/cpu/cpu[0-9]*/online", "symbolic_name": "cpu_cores", @@ -4396,5 +4406,5 @@ "pre_commands": { "iface": "/sbin/ip -o link | awk -F ': |@' '/.*link\\/ether/ {print $2}'" }, - "version": "2021-07-22T13:39:35.175760" + "version": "2021-07-29T13:51:55.397132" } \ No newline at end of file From 0818ca55108a6f8f505acab610a439a62804c3bc Mon Sep 17 00:00:00 2001 From: Viliam Krizan Date: Tue, 29 Jun 2021 17:34:25 +0200 Subject: [PATCH 510/892] fix(compliance): malformed SSG version in results On certain SSG/Benchmark versions there is a malformed version listed in the results/DS file. It complicates parsing in Compliance, as it determines used version from the results file. RHICOMPL-1173 Signed-off-by: Viliam Krizan --- insights/client/apps/compliance/__init__.py | 75 ++++++++++++++++++- insights/tests/client/apps/test_compliance.py | 44 +++++++++++ 2 files changed, 117 insertions(+), 2 deletions(-) diff --git a/insights/client/apps/compliance/__init__.py b/insights/client/apps/compliance/__init__.py index 812edfd54..8a4e21545 100644 --- a/insights/client/apps/compliance/__init__.py +++ b/insights/client/apps/compliance/__init__.py @@ -15,7 +15,13 @@ NONCOMPLIANT_STATUS = 2 COMPLIANCE_CONTENT_TYPE = 'application/vnd.redhat.compliance.something+tgz' POLICY_FILE_LOCATION = '/usr/share/xml/scap/ssg/content/' -REQUIRED_PACKAGES = ['scap-security-guide', 'openscap-scanner', 'openscap'] +SSG_PACKAGE = 'scap-security-guide' +REQUIRED_PACKAGES = [SSG_PACKAGE, 'openscap-scanner', 'openscap'] + +# SSG versions that need the in XML repaired +VERSIONS_FOR_REPAIR = '0.1.18 0.1.19 0.1.21 0.1.25'.split() +SNIPPET_TO_FIX = '0.9' + logger = getLogger(__name__) @@ -24,6 +30,7 @@ def __init__(self, config): self.config = config self.conn = InsightsConnection(config) self.archive = InsightsArchive(config) + self._ssg_version = None def oscap_scan(self): self.inventory_id = self._get_inventory_id() @@ -36,14 +43,19 @@ def oscap_scan(self): exit(constants.sig_kill_bad) archive_dir = self.archive.create_archive_dir() + results_need_repair = self.results_need_repair() + for profile in profiles: tailoring_file = self.download_tailoring_file(profile) + results_file = self._results_file(archive_dir, profile) self.run_scan( profile['attributes']['ref_id'], self.find_scap_policy(profile['attributes']['ref_id']), - self._results_file(archive_dir, profile), + results_file, tailoring_file_path=tailoring_file ) + if results_need_repair: + self.repair_results(results_file) if tailoring_file: os.remove(tailoring_file) @@ -150,6 +162,65 @@ def run_scan(self, profile_ref_id, policy_xml, output_path, tailoring_file_path= logger.error(oscap) exit(constants.sig_kill_bad) + @property + def ssg_version(self): + if not self._ssg_version: + self._ssg_version = self.get_ssg_version() + return self._ssg_version + + def get_ssg_version(self): + rpmcmd = 'rpm -qa --qf "%{VERSION}" ' + SSG_PACKAGE + if not six.PY3: + rpmcmd = rpmcmd.encode() + + rc, ssg_version = call(rpmcmd, keep_rc=True) + if rc: + logger.warning('Tried determinig SSG version but failed: {0}.\n'.format(ssg_version)) + return + + logger.info('System uses SSG version %s', ssg_version) + return ssg_version + + def results_need_repair(self): + return self.ssg_version in VERSIONS_FOR_REPAIR + + def repair_results(self, results_file): + if not os.path.isfile(results_file): + return + if not self.ssg_version: + logger.warning("Couldn't repair SSG version in results file %s", results_file) + return + + results_file_in = '{0}.in'.format(results_file) + os.rename(results_file, results_file_in) + + with open(results_file_in, 'r') as in_file: + with open(results_file, 'w') as out_file: + is_repaired = self._repair_ssg_version_in_results( + in_file, out_file, self.ssg_version + ) + + os.remove(results_file_in) + if is_repaired: + logger.debug('Repaired version in results file %s', results_file) + return is_repaired + + def _repair_ssg_version_in_results(self, in_file, out_file, ssg_version): + replacement = '{0}'.format(ssg_version) + is_repaired = False + for line in in_file: + if is_repaired or SNIPPET_TO_FIX not in line: + out_file.write(line) + else: + out_file.write(line.replace(SNIPPET_TO_FIX, replacement)) + is_repaired = True + logger.debug( + 'Substituted "%s" with "%s" in %s', + SNIPPET_TO_FIX, replacement, out_file.name + ) + + return is_repaired + def _assert_oscap_rpms_exist(self): rpmcmd = 'rpm -qa ' + ' '.join(REQUIRED_PACKAGES) if not six.PY3: diff --git a/insights/tests/client/apps/test_compliance.py b/insights/tests/client/apps/test_compliance.py index 6da8a803a..3820d1c96 100644 --- a/insights/tests/client/apps/test_compliance.py +++ b/insights/tests/client/apps/test_compliance.py @@ -25,6 +25,34 @@ def test_oscap_scan(config, assert_rpms): assert content_type == COMPLIANCE_CONTENT_TYPE +@patch("insights.client.apps.compliance.ComplianceClient._assert_oscap_rpms_exist") +@patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None, compressor='gz') +def test_oscap_scan_with_results_repaired(config, assert_rpms, tmpdir): + results_file = tmpdir.mkdir('results').join('result.xml') + results_file.write(""" + + 0.9 + + """) + + compliance_client = ComplianceClient(config) + compliance_client._ssg_version = '0.1.25' + compliance_client._get_inventory_id = lambda: '' + compliance_client.get_initial_profiles = lambda: [{'attributes': {'ref_id': 'foo', 'tailored': False}}] + compliance_client.get_profiles_matching_os = lambda: [] + compliance_client.find_scap_policy = lambda ref_id: '/usr/share/xml/scap/foo.xml' + compliance_client._results_file = lambda archive_dir, profile: str(results_file) + compliance_client.run_scan = lambda ref_id, policy_xml, output_path, tailoring_file_path: None + compliance_client.archive.archive_tmp_dir = '/tmp' + compliance_client.archive.archive_name = 'insights-compliance-test' + archive, content_type = compliance_client.oscap_scan() + assert archive == '/tmp/insights-compliance-test.tar.gz' + assert content_type == COMPLIANCE_CONTENT_TYPE + + repaired_results = open(str(results_file)).read() + assert '0.1.25' in repaired_results + + @patch("insights.client.apps.compliance.call", return_value=(0, '')) @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) def test_missing_packages(config, call): @@ -51,6 +79,22 @@ def test_errored_rpm_call(config, call): compliance_client.oscap_scan() +@patch("insights.client.apps.compliance.call", return_value=(0, '1.2.3')) +@patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) +def test_get_ssg_version(config, call): + ssg_version = ComplianceClient(config).ssg_version + assert ssg_version == '1.2.3' + call.assert_called_with('rpm -qa --qf "%{VERSION}" scap-security-guide', keep_rc=True) + + +@patch("insights.client.apps.compliance.call", return_value=(1, '0.0.0')) +@patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) +def test_get_ssg_version_with_failure(config, call): + ssg_version = ComplianceClient(config).ssg_version + assert not ssg_version + call.assert_called_with('rpm -qa --qf "%{VERSION}" scap-security-guide', keep_rc=True) + + @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) def test_get_profiles(config): compliance_client = ComplianceClient(config) From e4100afdd2a0adc61fe97740065f350b163cbd9d Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Tue, 10 Aug 2021 11:38:24 -0400 Subject: [PATCH 511/892] Compatibility layer legacy upload fix (#3151) * update /platform append qualifier regex * change regex to a positive match on */r/insights Signed-off-by: Jeremy Crafts --- insights/client/auto_config.py | 8 +++++- .../auto_config/test_autoconfig_urls.py | 28 +++++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/insights/client/auto_config.py b/insights/client/auto_config.py index 12b044f8f..3b9dd31ac 100644 --- a/insights/client/auto_config.py +++ b/insights/client/auto_config.py @@ -247,6 +247,12 @@ def try_auto_configuration(config): if config.auto_config and not config.offline: if not _try_satellite6_configuration(config): _try_satellite5_configuration(config) - if not config.legacy_upload and not re.match(r'(\w+\.)?cloud\.(\w+\.)?redhat\.com', config.base_url): + if not config.legacy_upload and re.match(r'(.+)?\/r\/insights', config.base_url): + # When to append /platform + # base url ~= cloud.redhat.com/r/insights + # base url ~= cert-api.access.redhat.com/r/insights + # base url ~= satellite.host.example.com/redhat_access/r/insights + # When not to append /platform + # base url ~= cloud.redhat.com/api config.base_url = config.base_url + '/platform' logger.debug('Updated base_url: %s', config.base_url) diff --git a/insights/tests/client/auto_config/test_autoconfig_urls.py b/insights/tests/client/auto_config/test_autoconfig_urls.py index 73518148c..8d595a2fd 100644 --- a/insights/tests/client/auto_config/test_autoconfig_urls.py +++ b/insights/tests/client/auto_config/test_autoconfig_urls.py @@ -165,6 +165,34 @@ def test_platform_path_added(): assert config.base_url == 'test.satellite.com:443/redhat_access/r/insights/platform' +@patch("insights.client.auto_config._try_satellite6_configuration", Mock()) +@patch("insights.client.auto_config._try_satellite5_configuration", Mock()) +def test_platform_path_added_cloud_redhat(): + ''' + Ensure /platform is added when legacy_upload is false + for any base_url ending in /r/insights, otherwise not added + ''' + # classic API + config = Mock(base_url='cert-api.access.redhat.com/r/insights', auto_config=True, legacy_upload=False, offline=False) + try_auto_configuration(config) + assert config.base_url == 'cert-api.access.redhat.com/r/insights/platform' + + # satellite + config = Mock(base_url='test.satellite.com:443/redhat_access/r/insights', auto_config=True, legacy_upload=False, offline=False) + try_auto_configuration(config) + assert config.base_url == 'test.satellite.com:443/redhat_access/r/insights/platform' + + # cloud.redhat.com compatibility layer - classic API hosted on c.rh.c + config = Mock(base_url='cloud.redhat.com/r/insights', auto_config=True, legacy_upload=False, offline=False) + try_auto_configuration(config) + assert config.base_url == 'cloud.redhat.com/r/insights/platform' + + # cloud.redhat.com API directly connected + config = Mock(base_url='cloud.redhat.com/api', auto_config=True, legacy_upload=False, offline=False) + try_auto_configuration(config) + assert config.base_url == 'cloud.redhat.com/api' + + @patch("insights.client.auto_config.verify_connectivity", Mock()) def test_rhsm_stage_legacy_base_url_configured(): ''' From 24290eff5079aa1cb29457001da218ca44deeddd Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Wed, 11 Aug 2021 03:14:27 +0800 Subject: [PATCH 512/892] Revert the 'ntpq_pn' spec back (#3178) * Revert the 'ntpq_pn' spec back Signed-off-by: Xiangce Liu * Remove it from the test_map_components Signed-off-by: Xiangce Liu --- insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + insights/tests/client/collection_rules/test_map_components.py | 1 - 3 files changed, 2 insertions(+), 1 deletion(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 82ce7bc6d..b0bc921f0 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -529,6 +529,7 @@ def md_device_list(broker): nsswitch_conf = simple_file("/etc/nsswitch.conf") ntp_conf = simple_file("/etc/ntp.conf") ntpq_leap = simple_command("/usr/sbin/ntpq -c 'rv 0 leap'") + ntpq_pn = simple_command("/usr/sbin/ntpq -pn") ntptime = simple_command("/usr/sbin/ntptime") numa_cpus = glob_file("/sys/devices/system/node/node[0-9]*/cpulist") numeric_user_group_name = simple_command("/bin/grep -c '^[[:digit:]]' /etc/passwd /etc/group") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index e6e2ade89..28e40fdd3 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -173,6 +173,7 @@ class InsightsArchiveSpecs(Specs): nova_crontab = simple_file("insights_commands/crontab_-l_-u_nova") nova_uid = simple_file("insights_commands/id_-u_nova") ntpq_leap = simple_file("insights_commands/ntpq_-c_rv_0_leap") + ntpq_pn = simple_file("insights_commands/ntpq_-pn") ntptime = simple_file("insights_commands/ntptime") numeric_user_group_name = simple_file("insights_commands/grep_-c_digit_.etc.passwd_.etc.group") oc_get_clusterrole_with_config = simple_file("insights_commands/oc_get_clusterrole_--config_.etc.origin.master.admin.kubeconfig") diff --git a/insights/tests/client/collection_rules/test_map_components.py b/insights/tests/client/collection_rules/test_map_components.py index d00025289..414632a27 100644 --- a/insights/tests/client/collection_rules/test_map_components.py +++ b/insights/tests/client/collection_rules/test_map_components.py @@ -59,7 +59,6 @@ def test_get_component_by_symbolic_name(): 'ls_usr_sbin', 'lvmconfig', 'nova_migration_uid', - 'ntpq_pn', 'rabbitmq_queues', 'rhev_data_center', 'root_crontab', From 83e46a404e304c26568d43a1ee17b2c3947dd12c Mon Sep 17 00:00:00 2001 From: Rohan Arora Date: Wed, 11 Aug 2021 01:03:50 +0530 Subject: [PATCH 513/892] Fix Lsof parser logic (#3172) (#3174) * Do no assume starting index of values. Rather check if value at start, end index of header is empty to determine if value is empty. Signed-off-by: Rohan Arora --- insights/parsers/lsof.py | 43 +++++++++++++-------------- insights/parsers/tests/test_lsof.py | 45 +++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+), 23 deletions(-) diff --git a/insights/parsers/lsof.py b/insights/parsers/lsof.py index 24bcec3ed..6357d6526 100644 --- a/insights/parsers/lsof.py +++ b/insights/parsers/lsof.py @@ -76,24 +76,12 @@ class Lsof(CommandParser, Scannable): """ def _calc_indexes(self, line): - self.header_row = line - self.name_idx = self.header_row.index(" NAME") - self.pid_idx = self.header_row.index(" PID") - _, self.mid_cols = self.header_row[:self.name_idx].split(None, 1) - self.mid_cols = " " + self.mid_cols + self.headings = [c.strip() for c in line.split(None)] self.indexes = {} - for col_name in self.mid_cols.split(): - self.indexes[col_name] = self.mid_cols.index(col_name) + len(col_name) - self.indexes["FD"] += 2 - - def _split_middle(self, middle): - mid_dict = {} - offset = 0 - for col in self.mid_cols.split(): - idx = self.indexes[col] - mid_dict[col] = middle[offset:idx].strip() - offset = idx - return mid_dict + for heading in self.headings: + index = line.index(heading) + # (start, end) + self.indexes[heading] = (index, index + len(heading)) def _start(self, content): """ @@ -114,12 +102,21 @@ def _parse_line(self, line): Given a line, returns a dictionary for that line. Requires _start to be called first. """ - command, rest = line[:self.pid_idx], line[self.pid_idx:] - name = line[self.name_idx:] - middle_dict = self._split_middle(rest[:self.name_idx - len(command)]) - middle_dict["COMMAND"] = command.strip() - middle_dict["NAME"] = name.strip() - return middle_dict + # Split NAME seperately as it can have extra whitespaces + command = line[self.indexes['NAME'][0]:].strip() + rest = line[:self.indexes['NAME'][0]] + rdict = dict.fromkeys(self.headings, '') + rowsplit = [i.strip() for i in rest.split(None, len(self.headings) - 2)] + if len(rowsplit) < len(self.headings) - 1: + rowsplit = iter(rowsplit) + for heading in self.headings[:-1]: + # Use value if (start, end) index of heading is not empty + if line[slice(*self.indexes[heading])].strip(): + rdict[heading] = next(rowsplit) + else: + rdict = dict(zip(self.headings, rowsplit)) + rdict['NAME'] = command + return rdict def parse(self, content): """ diff --git a/insights/parsers/tests/test_lsof.py b/insights/parsers/tests/test_lsof.py index 39ba8c298..63e04a5bf 100644 --- a/insights/parsers/tests/test_lsof.py +++ b/insights/parsers/tests/test_lsof.py @@ -37,6 +37,19 @@ JS 642 648 polkitd 2u CHR 1,3 0t0 4674 /dev/null """.strip() +LSOF_GOOD_V2 = """ +lsof: avoiding readlink(/sys): -b was specified. +lsof: avoiding stat(/sys): -b was specified. +lsof: WARNING: can't stat() sysfs file system /sys + Output information may be incomplete. +COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME +systemd 1 0 cwd DIR 253,0 251 128 / +systemd 1 0 0u CHR 1,3 0t0 1032 /dev/null +systemd 1 0 19u unix 0xffff998f20251f80 0t0 132266239 /run/systemd/journal/stdout type=STREAM +blkcg_pun 36 0 txt unknown /proc/36/exe +rsyslogd 772018 0 5w REG 253,6 151175680 1351 /var/log/messages-20210221 (deleted) +""".strip() + columns = ["COMMAND", "PID", "TID", "USER", "FD", "TYPE", "DEVICE", "SIZE/OFF", "NODE", "NAME"] @@ -116,6 +129,38 @@ def test_lsof_good(): } +def test_lsof_good_v2(): + ctx = context_wrap(LSOF_GOOD_V2) + d = list(lsof.Lsof(ctx).parse(LSOF_GOOD_V2.splitlines())) + assert d[0] == { + 'COMMAND': 'systemd', + 'PID': '1', + 'USER': '0', + 'FD': 'cwd', + 'TYPE': 'DIR', + 'DEVICE': '253,0', + 'SIZE/OFF': '251', + 'NODE': '128', + 'NAME': '/' + } + + # wide and empty values checks + assert d[2]['DEVICE'] == '0xffff998f20251f80' + assert d[3]['DEVICE'] == '' + + assert d[-1] == { + 'COMMAND': 'rsyslogd', + 'PID': '772018', + 'USER': '0', + 'FD': '5w', + 'TYPE': 'REG', + 'DEVICE': '253,6', + 'SIZE/OFF': '151175680', + 'NODE': '1351', + 'NAME': '/var/log/messages-20210221 (deleted)' + } + + def test_lsof_scan(): ctx = context_wrap(LSOF_GOOD_V1) # Scannable provided `any` method From add5c45456effcb6eb88f13b49ad925fa2d3d3bc Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Wed, 11 Aug 2021 00:50:05 -0400 Subject: [PATCH 514/892] allow collections without registration w/ --no-upload (#3142) Signed-off-by: Jeremy Crafts --- insights/client/phase/v1.py | 8 ++++++++ .../client/phase/test_LEGACY_post_update.py | 16 ++++++++++++++++ .../tests/client/phase/test_post_update.py | 18 ++++++++++++++++++ 3 files changed, 42 insertions(+) diff --git a/insights/client/phase/v1.py b/insights/client/phase/v1.py index 2bcbf6876..c22ceaf30 100644 --- a/insights/client/phase/v1.py +++ b/insights/client/phase/v1.py @@ -182,6 +182,10 @@ def post_update(client, config): logger.debug('Running client in offline mode. Bypassing registration.') return + if config.no_upload: + logger.debug("Running client without uploading. Bypassing registration.") + return + if config.display_name and not config.register: # setting display name independent of registration if client.set_display_name(config.display_name): @@ -211,6 +215,10 @@ def post_update(client, config): logger.debug('Running client in offline mode. Bypassing registration.') return + if config.no_upload: + logger.debug("Running client without uploading. Bypassing registration.") + return + # --payload short circuits registration check if config.payload: logger.debug('Uploading a specified archive. Bypassing registration.') diff --git a/insights/tests/client/phase/test_LEGACY_post_update.py b/insights/tests/client/phase/test_LEGACY_post_update.py index 6325b4090..2f0de9180 100644 --- a/insights/tests/client/phase/test_LEGACY_post_update.py +++ b/insights/tests/client/phase/test_LEGACY_post_update.py @@ -19,6 +19,7 @@ def patch_insights_config(old_function): "return_value.load_all.return_value.list_specs": False, "return_value.load_all.return_value.show_results": False, "return_value.load_all.return_value.check_results": False, + "return_value.load_all.return_value.no_upload": False, "return_value.load_all.return_value.core_collect": False}) return patcher(old_function) @@ -63,3 +64,18 @@ def test_exit_ok(insights_config, insights_client): with raises(SystemExit) as exc_info: post_update() assert exc_info.value.code == 0 + + +@patch("insights.client.phase.v1.InsightsClient") +@patch_insights_config +def test_post_update_no_upload(insights_config, insights_client): + """ + No-upload short circuits this phase + """ + insights_config.return_value.load_all.return_value.no_upload = True + try: + post_update() + except SystemExit: + pass + insights_client.return_value.register.assert_not_called() + insights_client.return_value.get_machine_id.assert_called_once() diff --git a/insights/tests/client/phase/test_post_update.py b/insights/tests/client/phase/test_post_update.py index dbd67bf38..77d8f422b 100644 --- a/insights/tests/client/phase/test_post_update.py +++ b/insights/tests/client/phase/test_post_update.py @@ -22,6 +22,7 @@ def patch_insights_config(old_function): "return_value.load_all.return_value.list_specs": False, "return_value.load_all.return_value.show_results": False, "return_value.load_all.return_value.check_results": False, + "return_value.load_all.return_value.no_upload": False, "return_value.load_all.return_value.core_collect": False}) return patcher(old_function) @@ -321,6 +322,23 @@ def test_post_update_offline(insights_config, insights_client): insights_client.return_value.set_display_name.assert_not_called() +@patch("insights.client.phase.v1.InsightsClient") +@patch_insights_config +def test_post_update_no_upload(insights_config, insights_client): + """ + No-upload short circuits this phase + """ + insights_config.return_value.load_all.return_value.no_upload = True + try: + post_update() + except SystemExit: + pass + insights_client.return_value.get_machine_id.assert_called_once() + insights_client.return_value.get_registration_status.assert_not_called() + insights_client.return_value.clear_local_registration.assert_not_called() + insights_client.return_value.set_display_name.assert_not_called() + + @patch("insights.client.phase.v1.InsightsClient") @patch_insights_config # @patch("insights.client.phase.v1.InsightsClient") From cfde234c89463696f5d46331e7f3d164fd305fe4 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Wed, 11 Aug 2021 16:36:20 +0800 Subject: [PATCH 515/892] New spec "/var/log/httpd/foreman-ssl_error_ssl.log" (#3180) Signed-off-by: Huanhuan Li --- insights/parsers/foreman_log.py | 22 ++++++++++++++++++ insights/parsers/tests/test_foreman_log.py | 27 +++++++++++++++++++--- insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 4 files changed, 48 insertions(+), 3 deletions(-) diff --git a/insights/parsers/foreman_log.py b/insights/parsers/foreman_log.py index f18a057de..d518c9db6 100644 --- a/insights/parsers/foreman_log.py +++ b/insights/parsers/foreman_log.py @@ -28,6 +28,9 @@ ForemanSSLAccessLog - file ``/var/log/httpd/foreman-ssl_access_ssl.log`` ------------------------------------------------------------------------ +ForemanSSLErrorLog - file ``/var/log/httpd/foreman-ssl_error_ssl.log`` +---------------------------------------------------------------------- + """ from datetime import datetime @@ -190,3 +193,22 @@ def _parse_line(self, line): except ValueError: pass return msg_info + + +@parser(Specs.foreman_ssl_error_ssl_log) +class ForemanSSLErrorLog(LogFileOutput): + """ + Class for parsing ``/var/log/httpd/foreman-ssl_error_ssl.log`` file. + + .. note:: + + Please refer to its super-class :class:`insights.core.LogFileOutput` for examples. + + Sample log contents:: + + [Mon Aug 09 10:01:22.548717 2021] [ssl:warn] [pid 5881] [client 10.72.44.126:47190] AH02227: Failed to set r->user to 'SSL_CLIENT_S_DN_CN' + [Mon Aug 09 11:02:23.229609 2021] [proxy_http:error] [pid 749] (20014)Internal error: [client 10.72.44.126:47920] AH01102: error reading status line from remote server yyy + [Mon Aug 09 11:17:52.204503 2021] [proxy_http:error] [pid 5854] (20014)Internal error: [client 10.72.44.126:48016] AH01102: error reading status line from remote server yyy + """ + + time_format = '%a %b %d %H:%M:%S.%f %Y' diff --git a/insights/parsers/tests/test_foreman_log.py b/insights/parsers/tests/test_foreman_log.py index 889724da1..c10bc44c1 100644 --- a/insights/parsers/tests/test_foreman_log.py +++ b/insights/parsers/tests/test_foreman_log.py @@ -4,6 +4,7 @@ from insights.parsers.foreman_log import CandlepinLog, ProxyLog from insights.parsers.foreman_log import CandlepinErrorLog from insights.parsers.foreman_log import ForemanSSLAccessLog +from insights.parsers.foreman_log import ForemanSSLErrorLog from datetime import datetime import doctest @@ -177,6 +178,16 @@ 10.181.73.211 - rhcapkdc.example2.com [27/Mar/2017:13:34:52:0400] "GET /rhsm/status HTTP/1.1" 200 263 "-" "-" """.strip() +FOREMAN_SSL_ERROR_SSL_LOG = """ +[Mon Aug 09 09:31:04.075673 2021] [ssl:warn] [pid 746] [client 10.72.44.126:43266] AH02227: Failed to set r->user to 'SSL_CLIENT_S_DN_CN' +[Mon Aug 09 09:31:10.343254 2021] [ssl:warn] [pid 747] [client 10.72.44.126:43346] AH02227: Failed to set r->user to 'SSL_CLIENT_S_DN_CN' +[Mon Aug 09 09:31:10.364351 2021] [ssl:warn] [pid 753] [client 10.72.44.126:43350] AH02227: Failed to set r->user to 'SSL_CLIENT_S_DN_CN' +[Mon Aug 09 10:01:22.512497 2021] [ssl:warn] [pid 747] [client 10.72.44.126:47188] AH02227: Failed to set r->user to 'SSL_CLIENT_S_DN_CN' +[Mon Aug 09 10:01:22.548717 2021] [ssl:warn] [pid 5881] [client 10.72.44.126:47190] AH02227: Failed to set r->user to 'SSL_CLIENT_S_DN_CN' +[Mon Aug 09 11:02:23.229609 2021] [proxy_http:error] [pid 749] (20014)Internal error: [client 10.72.44.126:47920] AH01102: error reading status line from remote server yyy +[Mon Aug 09 11:17:52.204503 2021] [proxy_http:error] [pid 5854] (20014)Internal error: [client 10.72.44.126:48016] AH01102: error reading status line from remote server yyy +""".strip() + def test_production_log(): fm_log = ProductionLog(context_wrap(PRODUCTION_LOG)) @@ -237,9 +248,19 @@ def test_foreman_ssl_access_ssl_log(): assert len(foreman_ssl_access_log.get('GET')) == 2 +def test_foreman_ssl_error_ssl_log(): + ForemanSSLErrorLog.last_scan('test_error_1', 'error reading status line from remote server') + foreman_ssl_access_log = ForemanSSLErrorLog(context_wrap(FOREMAN_SSL_ERROR_SSL_LOG)) + assert foreman_ssl_access_log.test_error_1 + assert 'error reading status line from remote server yyy' in foreman_ssl_access_log.test_error_1.get('raw_message') + + def test_doc(): failed_count, tests = doctest.testmod(foreman_log, - globs={"cp_log": CandlepinLog(context_wrap(CANDLEPIN_LOG)), - "candlepin_log": CandlepinErrorLog(context_wrap(CANDLEPIN_ERROR_LOG)), - "foreman_ssl_acess_log": ForemanSSLAccessLog(context_wrap(FOREMAN_SSL_ACCESS_SSL_LOG))}) + globs={ + "cp_log": CandlepinLog(context_wrap(CANDLEPIN_LOG)), + "candlepin_log": CandlepinErrorLog(context_wrap(CANDLEPIN_ERROR_LOG)), + "foreman_ssl_acess_log": ForemanSSLAccessLog(context_wrap(FOREMAN_SSL_ACCESS_SSL_LOG)) + } + ) assert failed_count == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 776a6801b..216964b2a 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -172,6 +172,7 @@ class Specs(SpecSet): foreman_proxy_log = RegistryPoint(filterable=True) foreman_satellite_log = RegistryPoint(filterable=True) foreman_ssl_access_ssl_log = RegistryPoint(filterable=True) + foreman_ssl_error_ssl_log = RegistryPoint(filterable=True) foreman_rake_db_migrate_status = RegistryPoint() foreman_tasks_config = RegistryPoint(filterable=True) freeipa_healthcheck_log = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index b0bc921f0..3c8d3b6a9 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -261,6 +261,7 @@ def du_dirs_list(broker): findmnt_lo_propagation = simple_command("/bin/findmnt -lo+PROPAGATION") firewall_cmd_list_all_zones = simple_command("/usr/bin/firewall-cmd --list-all-zones") firewalld_conf = simple_file("/etc/firewalld/firewalld.conf") + foreman_ssl_error_ssl_log = simple_file("/var/log/httpd/foreman-ssl_error_ssl.log") fstab = simple_file("/etc/fstab") galera_cnf = first_file(["/var/lib/config-data/puppet-generated/mysql/etc/my.cnf.d/galera.cnf", "/etc/my.cnf.d/galera.cnf"]) getconf_page_size = simple_command("/usr/bin/getconf PAGE_SIZE") From df66cfcb7bf18141ccd9c600477885663dc2f0d0 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Wed, 11 Aug 2021 10:24:17 -0400 Subject: [PATCH 516/892] chmod lastupload files to 644 (#3168) * chmod lastupload files to 644 Signed-off-by: Jeremy Crafts --- insights/client/client.py | 3 +++ insights/tests/client/test_client.py | 12 ++++++++++++ 2 files changed, 15 insertions(+) diff --git a/insights/client/client.py b/insights/client/client.py index 9d4c435f1..cecd42421 100644 --- a/insights/client/client.py +++ b/insights/client/client.py @@ -324,7 +324,9 @@ def _legacy_upload(config, pconn, tar_file, content_type, collection_duration=No handler.write(upload.text) else: handler.write(upload.text.encode('utf-8')) + os.chmod(constants.last_upload_results_file, 0o644) write_to_disk(constants.lastupload_file) + os.chmod(constants.lastupload_file, 0o644) msg_name = determine_hostname(config.display_name) account_number = config.account_number @@ -364,6 +366,7 @@ def upload(config, pconn, tar_file, content_type, collection_duration=None): if upload.status_code in (200, 202): write_to_disk(constants.lastupload_file) + os.chmod(constants.lastupload_file, 0o644) msg_name = determine_hostname(config.display_name) logger.info("Successfully uploaded report for %s.", msg_name) if config.register: diff --git a/insights/tests/client/test_client.py b/insights/tests/client/test_client.py index 415a635bc..e81f6a732 100644 --- a/insights/tests/client/test_client.py +++ b/insights/tests/client/test_client.py @@ -14,6 +14,18 @@ from pytest import raises +@pytest.fixture(autouse=True) +def mock_os_chmod(): + with patch('insights.client.client.os.chmod', Mock()) as os_chmod: + yield os_chmod + + +@pytest.fixture(autouse=True) +def mock_os_umask(): + with patch('insights.client.client.os.umask', Mock()) as os_umask: + yield os_umask + + class FakeConnection(object): ''' For stubbing out network calls From 8394df6f1f5317ed9c6231868bf53c883f7fd13a Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Wed, 11 Aug 2021 10:51:44 -0400 Subject: [PATCH 517/892] don't add messages that say to view logs to the log (#3141) Signed-off-by: Jeremy Crafts --- insights/client/client.py | 4 ++-- insights/client/connection.py | 7 +++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/insights/client/client.py b/insights/client/client.py index cecd42421..83c089289 100644 --- a/insights/client/client.py +++ b/insights/client/client.py @@ -352,7 +352,7 @@ def _legacy_upload(config, pconn, tar_file, content_type, collection_duration=No time.sleep(constants.sleep_time) else: logger.error("All attempts to upload have failed!") - logger.error("Please see %s for additional information", config.logging_file) + print("Please see %s for additional information" % config.logging_file) raise RuntimeError('Upload failed.') return api_response @@ -385,5 +385,5 @@ def upload(config, pconn, tar_file, content_type, collection_duration=None): time.sleep(constants.sleep_time) else: logger.error("All attempts to upload have failed!") - logger.error("Please see %s for additional information", config.logging_file) + print("Please see %s for additional information" % config.logging_file) raise RuntimeError('Upload failed.') diff --git a/insights/client/connection.py b/insights/client/connection.py index 39795f85d..5cc1feae5 100644 --- a/insights/client/connection.py +++ b/insights/client/connection.py @@ -390,17 +390,16 @@ def test_connection(self, rc=0): "SUCCESS" if api_success else "FAILURE") if upload_success and api_success: logger.info("Connectivity tests completed successfully") - logger.info("See %s for more details.", self.config.logging_file) + print("See %s for more details." % self.config.logging_file) else: logger.info("Connectivity tests completed with some errors") - logger.info("See %s for more details.", self.config.logging_file) + print("See %s for more details." % self.config.logging_file) rc = 1 except requests.ConnectionError as exc: print(exc) logger.error('Connectivity test failed! ' 'Please check your network configuration') - logger.error('Additional information may be in' - ' /var/log/' + APP_NAME + "/" + APP_NAME + ".log") + print('Additional information may be in %s' % self.config.logging_file) return 1 return rc From 5b822bd86f6e6befc6fd3c5c290cccc5ab226072 Mon Sep 17 00:00:00 2001 From: Ryan Blakley <3789184+ryan-blakley@users.noreply.github.com> Date: Wed, 11 Aug 2021 15:43:49 -0400 Subject: [PATCH 518/892] Fix fixture error in pytest 4 for test_empty_skip (#3182) * In pytest 4 fixtures can't be called directly, so I changed test_empty_skip to skip_exception_check and removed the fixture decorator. Signed-off-by: Ryan Blakley --- insights/parsers/tests/__init__.py | 5 ++--- insights/parsers/tests/test_awx_manage.py | 4 ++-- .../tests/test_ceph_cmd_json_parsing.py | 20 +++++++++---------- insights/parsers/tests/test_cloud_cfg.py | 4 ++-- .../tests/test_cni_podman_bridge_conf.py | 4 ++-- .../parsers/tests/test_engine_db_query.py | 4 ++-- .../tests/test_freeipa_healthcheck_log.py | 4 ++-- insights/parsers/tests/test_httpd_open_nfs.py | 4 ++-- insights/parsers/tests/test_ndctl_list.py | 4 ++-- .../parsers/tests/test_rhsm_releasever.py | 4 ++-- .../tests/test_rhv_log_collector_analyzer.py | 4 ++-- insights/parsers/tests/test_tags.py | 4 ++-- .../tests/test_teamdctl_config_dump.py | 4 ++-- .../parsers/tests/test_teamdctl_state_dump.py | 4 ++-- insights/parsers/tests/test_version_info.py | 4 ++-- .../parsers/tests/test_virt_uuid_facts.py | 4 ++-- 16 files changed, 40 insertions(+), 41 deletions(-) diff --git a/insights/parsers/tests/__init__.py b/insights/parsers/tests/__init__.py index 3d0f416b8..50eaf0158 100644 --- a/insights/parsers/tests/__init__.py +++ b/insights/parsers/tests/__init__.py @@ -38,11 +38,10 @@ def ic_testmod(m, name=None, globs=None, verbose=None, return doctest.TestResults(runner.failures, runner.tries) -@pytest.fixture() -def test_empty_skip(parser_obj): +def skip_exception_check(parser_obj, output_str=""): from insights.parsers import SkipException from insights.tests import context_wrap with pytest.raises(SkipException) as ex: - parser_obj(context_wrap("")) + parser_obj(context_wrap(output_str)) return str(ex) diff --git a/insights/parsers/tests/test_awx_manage.py b/insights/parsers/tests/test_awx_manage.py index 7aa162b5b..23b75f3be 100644 --- a/insights/parsers/tests/test_awx_manage.py +++ b/insights/parsers/tests/test_awx_manage.py @@ -4,7 +4,7 @@ from insights.core import ContentException, ParseException from insights.parsers import awx_manage, SkipException from insights.parsers.awx_manage import AnsibleTowerLicenseType, AnsibleTowerLicense -from insights.parsers.tests import test_empty_skip +from insights.parsers.tests import skip_exception_check from insights.tests import context_wrap GOOD_LICENSE = """ @@ -59,7 +59,7 @@ def test_ansible_tower_license_data(): def test_ansible_tower_license__data_ab_type(): - assert 'Empty output.' in test_empty_skip(AnsibleTowerLicense) + assert 'Empty output.' in skip_exception_check(AnsibleTowerLicense) with pytest.raises(ContentException): AnsibleTowerLicense(context_wrap(NG_COMMAND_1)) diff --git a/insights/parsers/tests/test_ceph_cmd_json_parsing.py b/insights/parsers/tests/test_ceph_cmd_json_parsing.py index f22105c51..f4b410710 100644 --- a/insights/parsers/tests/test_ceph_cmd_json_parsing.py +++ b/insights/parsers/tests/test_ceph_cmd_json_parsing.py @@ -4,7 +4,7 @@ from insights.parsers import ceph_cmd_json_parsing, ParseException from insights.parsers.ceph_cmd_json_parsing import CephOsdDump, CephOsdDf, CephS, CephECProfileGet, CephCfgInfo, \ CephHealthDetail, CephDfDetail, CephOsdTree, CephReport -from insights.parsers.tests import test_empty_skip +from insights.parsers.tests import skip_exception_check from insights.tests import context_wrap CEPH_OSD_DUMP_INFO = """ @@ -535,7 +535,7 @@ def test_ceph_osd_dump(self): assert result['pools'][0]['min_size'] == 2 def test_ceph_osd_dump_empty(self): - assert 'Empty output.' in test_empty_skip(CephOsdDump) + assert 'Empty output.' in skip_exception_check(CephOsdDump) class TestCephOsdDf(): @@ -574,7 +574,7 @@ def test_ceph_osd_df(self): assert result['nodes'][0]['pgs'] == 945 def test_ceph_os_df_empty(self): - assert 'Empty output.' in test_empty_skip(CephOsdDf) + assert 'Empty output.' in skip_exception_check(CephOsdDf) class TestCephS(): @@ -607,7 +607,7 @@ def test_ceph_s(self): assert result['pgmap']['pgs_by_state'][0]['state_name'] == 'active+clean' def test_ceph_s_empty(self): - assert 'Empty output.' in test_empty_skip(CephS) + assert 'Empty output.' in skip_exception_check(CephS) class TestCephECProfileGet(): @@ -624,7 +624,7 @@ def test_ceph_ec_profile_get(self): assert result['m'] == "1" def test_ceph_ec_profile_get_empty(self): - assert 'Empty output.' in test_empty_skip(CephECProfileGet) + assert 'Empty output.' in skip_exception_check(CephECProfileGet) class TestCephCfgInfo(): @@ -650,7 +650,7 @@ def test_ceph_cfg_info(self): assert result.max_open_files == '131072' def test_ceph_cfg_info_empty(self): - assert 'Empty output.' in test_empty_skip(CephCfgInfo) + assert 'Empty output.' in skip_exception_check(CephCfgInfo) class TestCephHealthDetail(): @@ -672,7 +672,7 @@ def test_ceph_health_detail(self): assert result['overall_status'] == 'HEALTH_OK' def test_ceph_health_detail_empty(self): - assert 'Empty output.' in test_empty_skip(CephHealthDetail) + assert 'Empty output.' in skip_exception_check(CephHealthDetail) class TestCephDfDetail(): @@ -724,7 +724,7 @@ def test_ceph_df_detail(self): assert result['stats']['total_avail_bytes'] == 16910123008 def test_ceph_df_detail_empty(self): - assert 'Empty output.' in test_empty_skip(CephDfDetail) + assert 'Empty output.' in skip_exception_check(CephDfDetail) class TestCephOsdTree(): @@ -858,7 +858,7 @@ def test_ceph_osd_tree(self): assert len(result['nodes'][0]['children']) == 4 def test_ceph_osd_tree_empty(self): - assert 'Empty output.' in test_empty_skip(CephOsdTree) + assert 'Empty output.' in skip_exception_check(CephOsdTree) class TestCephReport(): @@ -877,4 +877,4 @@ def test_invalid_json(self): assert "Could not parse json." in str(e) def test_ceph_report_empty(self): - assert 'Empty output.' in test_empty_skip(CephReport) + assert 'Empty output.' in skip_exception_check(CephReport) diff --git a/insights/parsers/tests/test_cloud_cfg.py b/insights/parsers/tests/test_cloud_cfg.py index b683636fa..2a4c819d4 100644 --- a/insights/parsers/tests/test_cloud_cfg.py +++ b/insights/parsers/tests/test_cloud_cfg.py @@ -1,7 +1,7 @@ import doctest from insights.parsers import cloud_cfg -from insights.parsers.tests import test_empty_skip +from insights.parsers.tests import skip_exception_check from insights.tests import context_wrap @@ -23,7 +23,7 @@ def test_cloud_cfg(): def test_cloud_cfg_empty(): - assert 'Empty output.' in test_empty_skip(cloud_cfg.CloudCfg) + assert 'Empty output.' in skip_exception_check(cloud_cfg.CloudCfg) def test_doc_examples(): diff --git a/insights/parsers/tests/test_cni_podman_bridge_conf.py b/insights/parsers/tests/test_cni_podman_bridge_conf.py index 5d5756a79..022a82514 100644 --- a/insights/parsers/tests/test_cni_podman_bridge_conf.py +++ b/insights/parsers/tests/test_cni_podman_bridge_conf.py @@ -2,7 +2,7 @@ from insights.parsers import cni_podman_bridge_conf from insights.parsers.cni_podman_bridge_conf import CNIPodmanBridgeConf -from insights.parsers.tests import test_empty_skip +from insights.parsers.tests import skip_exception_check from insights.tests import context_wrap PODMAN_CNI_FILE = ''' @@ -65,4 +65,4 @@ def test_cni_podman_bridge_conf(): def test_cni_podman_bridge_conf_empty(): - assert 'Empty output.' in test_empty_skip(CNIPodmanBridgeConf) + assert 'Empty output.' in skip_exception_check(CNIPodmanBridgeConf) diff --git a/insights/parsers/tests/test_engine_db_query.py b/insights/parsers/tests/test_engine_db_query.py index a3082bc26..262252178 100644 --- a/insights/parsers/tests/test_engine_db_query.py +++ b/insights/parsers/tests/test_engine_db_query.py @@ -2,7 +2,7 @@ import pytest from insights.parsers import engine_db_query, ParseException -from insights.parsers.tests import test_empty_skip +from insights.parsers.tests import skip_exception_check from insights.tests import context_wrap @@ -93,7 +93,7 @@ def test_edbq(): assert output.result == [{'vds_name': 'hosto', 'rpm_version': 'vdsm-4.40.20-33.git1b7dedcf3.fc30'}, {'vds_name': 'hosto2', 'rpm_version': 'vdsm-4.40.13-38.gite9bae3c68.fc30'}] # No content - assert 'Empty output.' in test_empty_skip(engine_db_query.EngineDBQueryVDSMversion) + assert 'Empty output.' in skip_exception_check(engine_db_query.EngineDBQueryVDSMversion) # Error with pytest.raises(ParseException) as e: diff --git a/insights/parsers/tests/test_freeipa_healthcheck_log.py b/insights/parsers/tests/test_freeipa_healthcheck_log.py index 5db80b26b..8e53bce64 100644 --- a/insights/parsers/tests/test_freeipa_healthcheck_log.py +++ b/insights/parsers/tests/test_freeipa_healthcheck_log.py @@ -2,7 +2,7 @@ from insights.parsers import freeipa_healthcheck_log from insights.parsers.freeipa_healthcheck_log import FreeIPAHealthCheckLog -from insights.parsers.tests import test_empty_skip +from insights.parsers.tests import skip_exception_check from insights.tests import context_wrap LONG_FREEIPA_HEALTHCHECK_LOG_OK = """ @@ -99,7 +99,7 @@ def test_freeipa_healthcheck_get_results_not_ok(): def test_freeipa_healthcheck_log_empty(): - assert 'Empty output.' in test_empty_skip(FreeIPAHealthCheckLog) + assert 'Empty output.' in skip_exception_check(FreeIPAHealthCheckLog) def test_freeipa_healthcheck_log__documentation(): diff --git a/insights/parsers/tests/test_httpd_open_nfs.py b/insights/parsers/tests/test_httpd_open_nfs.py index 9f5441a7e..af2330a33 100644 --- a/insights/parsers/tests/test_httpd_open_nfs.py +++ b/insights/parsers/tests/test_httpd_open_nfs.py @@ -2,7 +2,7 @@ from insights.parsers import httpd_open_nfs from insights.parsers.httpd_open_nfs import HttpdOnNFSFilesCount -from insights.parsers.tests import test_empty_skip +from insights.parsers.tests import skip_exception_check from insights.tests import context_wrap http_nfs = """ @@ -19,7 +19,7 @@ def test_http_nfs(): def test_empty(): - assert 'Empty output.' in test_empty_skip(HttpdOnNFSFilesCount) + assert 'Empty output.' in skip_exception_check(HttpdOnNFSFilesCount) def test_http_nfs_documentation(): diff --git a/insights/parsers/tests/test_ndctl_list.py b/insights/parsers/tests/test_ndctl_list.py index 2c62ec881..33c0fdebd 100644 --- a/insights/parsers/tests/test_ndctl_list.py +++ b/insights/parsers/tests/test_ndctl_list.py @@ -2,7 +2,7 @@ from insights.parsers import ndctl_list from insights.parsers.ndctl_list import NdctlListNi -from insights.parsers.tests import test_empty_skip +from insights.parsers.tests import skip_exception_check from insights.tests import context_wrap NDCTL_OUTPUT = """ @@ -54,4 +54,4 @@ def test_get_dev_attr(): def test_empty(): - assert 'Empty output.' in test_empty_skip(NdctlListNi) + assert 'Empty output.' in skip_exception_check(NdctlListNi) diff --git a/insights/parsers/tests/test_rhsm_releasever.py b/insights/parsers/tests/test_rhsm_releasever.py index f9728394c..44540758a 100644 --- a/insights/parsers/tests/test_rhsm_releasever.py +++ b/insights/parsers/tests/test_rhsm_releasever.py @@ -3,7 +3,7 @@ from insights.parsers import rhsm_releasever as rhsm_releasever_module, SkipException from insights.parsers.rhsm_releasever import RhsmReleaseVer -from insights.parsers.tests import test_empty_skip +from insights.parsers.tests import skip_exception_check from insights.tests import context_wrap RHEL_MAJ_MIN = '{"releaseVer": "6.10"}' @@ -51,7 +51,7 @@ def test_rhsm_releasever(): def test_empty(): - assert 'Empty output.' in test_empty_skip(RhsmReleaseVer) + assert 'Empty output.' in skip_exception_check(RhsmReleaseVer) def test_doc_examples(): diff --git a/insights/parsers/tests/test_rhv_log_collector_analyzer.py b/insights/parsers/tests/test_rhv_log_collector_analyzer.py index 3fdada667..f002019c2 100644 --- a/insights/parsers/tests/test_rhv_log_collector_analyzer.py +++ b/insights/parsers/tests/test_rhv_log_collector_analyzer.py @@ -1,5 +1,5 @@ from insights.parsers.rhv_log_collector_analyzer import RhvLogCollectorJson -from insights.parsers.tests import test_empty_skip +from insights.parsers.tests import skip_exception_check from insights.tests import context_wrap RHV_ANALYZER_JSON = """ @@ -139,4 +139,4 @@ def test_rhv_log_collector_json(self): assert result['rhv-log-collector-analyzer'][0]['file'] == 'cluster_query_migration_policy_check_legacy.sql' def test_empty(self): - assert 'Empty output.' in test_empty_skip(RhvLogCollectorJson) + assert 'Empty output.' in skip_exception_check(RhvLogCollectorJson) diff --git a/insights/parsers/tests/test_tags.py b/insights/parsers/tests/test_tags.py index efc9b530a..79efa9485 100644 --- a/insights/parsers/tests/test_tags.py +++ b/insights/parsers/tests/test_tags.py @@ -1,5 +1,5 @@ from insights.parsers.tags import Tags -from insights.parsers.tests import test_empty_skip +from insights.parsers.tests import skip_exception_check from insights.tests import context_wrap tags_json_content = """ @@ -18,4 +18,4 @@ def test_tags_json(): def test_tags_empty(): - assert 'Empty output.' in test_empty_skip(Tags) + assert 'Empty output.' in skip_exception_check(Tags) diff --git a/insights/parsers/tests/test_teamdctl_config_dump.py b/insights/parsers/tests/test_teamdctl_config_dump.py index 20b096755..2ef2a628b 100644 --- a/insights/parsers/tests/test_teamdctl_config_dump.py +++ b/insights/parsers/tests/test_teamdctl_config_dump.py @@ -2,7 +2,7 @@ from insights.parsers import teamdctl_config_dump from insights.parsers.teamdctl_config_dump import TeamdctlConfigDump -from insights.parsers.tests import test_empty_skip +from insights.parsers.tests import skip_exception_check from insights.tests import context_wrap TEAMDCTL_CONFIG_DUMP_INFO = """ @@ -41,7 +41,7 @@ def test_teamdctl_state_dump(): def test_teamdctl_state_dump_empty(): - assert 'Empty output.' in test_empty_skip(TeamdctlConfigDump) + assert 'Empty output.' in skip_exception_check(TeamdctlConfigDump) def test_nmcli_doc_examples(): diff --git a/insights/parsers/tests/test_teamdctl_state_dump.py b/insights/parsers/tests/test_teamdctl_state_dump.py index c4849bab2..42e4a3897 100644 --- a/insights/parsers/tests/test_teamdctl_state_dump.py +++ b/insights/parsers/tests/test_teamdctl_state_dump.py @@ -1,5 +1,5 @@ from insights.parsers.teamdctl_state_dump import TeamdctlStateDump -from insights.parsers.tests import test_empty_skip +from insights.parsers.tests import skip_exception_check from insights.tests import context_wrap TEAMDCTL_STATE_DUMP_INFO = """ @@ -114,4 +114,4 @@ def test_teamdctl_state_dump_none(): def test_teamdctl_state_dump_empty(): - assert 'Empty output.' in test_empty_skip(TeamdctlStateDump) + assert 'Empty output.' in skip_exception_check(TeamdctlStateDump) diff --git a/insights/parsers/tests/test_version_info.py b/insights/parsers/tests/test_version_info.py index 288a4a9f6..934450917 100644 --- a/insights/parsers/tests/test_version_info.py +++ b/insights/parsers/tests/test_version_info.py @@ -1,7 +1,7 @@ import doctest from insights.parsers import version_info -from insights.parsers.tests import test_empty_skip +from insights.parsers.tests import skip_exception_check from insights.tests import context_wrap @@ -25,7 +25,7 @@ def test_version_info(): def test_version_info_empty(): - assert 'Empty output.' in test_empty_skip(version_info.VersionInfo) + assert 'Empty output.' in skip_exception_check(version_info.VersionInfo) def test_doc_examples(): diff --git a/insights/parsers/tests/test_virt_uuid_facts.py b/insights/parsers/tests/test_virt_uuid_facts.py index 1624d46c0..3ffcddbeb 100644 --- a/insights/parsers/tests/test_virt_uuid_facts.py +++ b/insights/parsers/tests/test_virt_uuid_facts.py @@ -1,7 +1,7 @@ import doctest from insights.parsers import virt_uuid_facts -from insights.parsers.tests import test_empty_skip +from insights.parsers.tests import skip_exception_check from insights.parsers.virt_uuid_facts import VirtUuidFacts from insights.tests import context_wrap @@ -22,7 +22,7 @@ def test_virt_uuid_facts(): def test_virt_uuid_facts_empty(): - assert 'Empty output.' in test_empty_skip(VirtUuidFacts) + assert 'Empty output.' in skip_exception_check(VirtUuidFacts) def test_virt_uuid_facts_doc_examples(): From 88e7aa75f2ea11c613e7da1ff69320f0c530f7d8 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Wed, 11 Aug 2021 16:49:18 -0400 Subject: [PATCH 519/892] Remove uploader.json map (#3171) * - remove uploader.json map file - cache the contents of uploader.json once loaded in get_conf_file() - download uploader.json to use in the unit tests * remove mapping test, replaced by QE test in core-assets Signed-off-by: Jeremy Crafts --- MANIFEST.in | 1 - insights/client/collection_rules.py | 25 +- insights/client/map_components.py | 26 +- insights/client/uploader_json_map.json | 4410 ----------------- .../collection_rules/test_get_rm_conf.py | 26 + .../collection_rules/test_map_components.py | 155 +- insights/tests/client/phase/test_update.py | 2 +- insights/tests/client/test_collect.py | 6 +- 8 files changed, 84 insertions(+), 4567 deletions(-) delete mode 100644 insights/client/uploader_json_map.json diff --git a/MANIFEST.in b/MANIFEST.in index 74be9fcbd..a1dde986d 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -5,5 +5,4 @@ include insights/COMMIT include insights/RELEASE include insights/filters.yaml include LICENSE -include insights/client/uploader_json_map.json graft insights/archive/repository/base_archives diff --git a/insights/client/collection_rules.py b/insights/client/collection_rules.py index eec5b3ea8..32441c6e8 100644 --- a/insights/client/collection_rules.py +++ b/insights/client/collection_rules.py @@ -114,6 +114,10 @@ def __init__(self, config, conn=None): self.collection_rules_url = self.config.collection_rules_url self.gpg = self.config.gpg + # initialize an attribute to store the content of uploader.json + # once it is loaded and verified + self.uploader_json = None + # set rm_conf as a class attribute so we can observe it # in create_report self.rm_conf = None @@ -129,7 +133,6 @@ def __init__(self, config, conn=None): self.collection_rules_url = conn.base_url + '/v1/static/uploader.v2.json' else: self.collection_rules_url = conn.base_url.split('/platform')[0] + '/v1/static/uploader.v2.json' - # self.collection_rules_url = conn.base_url + '/static/uploader.v2.json' self.conn = conn def validate_gpg_sig(self, path, sig=None): @@ -259,6 +262,9 @@ def get_conf_file(self): """ Get config from local config file, first try cache, then fallback. """ + if self.uploader_json: + return self.uploader_json + for conf_file in [self.collection_rules_file, self.fallback_file]: logger.debug("trying to read conf from: " + conf_file) conf = self.try_disk(conf_file, self.gpg) @@ -273,13 +279,14 @@ def get_conf_file(self): conf['file'] = conf_file logger.debug("Success reading config") logger.debug(json.dumps(conf)) + self.uploader_json = conf return conf raise RuntimeError("ERROR: Unable to download conf or read it from disk!") def get_conf_update(self): """ - Get updated config from URL, fallback to local file if download fails. + Get updated config from URL. """ dyn_conf = self.get_collection_rules() @@ -405,14 +412,14 @@ def get_rm_conf(self): # try to use remove.conf self.rm_conf = self.get_rm_conf_old() if self.config.core_collect: - self.rm_conf = map_rm_conf_to_components(self.rm_conf) + self.rm_conf = map_rm_conf_to_components(self.rm_conf, self.get_conf_file()) return self.rm_conf # remove Nones, empty strings, and empty lists filtered_rm_conf = dict((k, v) for k, v in rm_conf.items() if v) self.rm_conf = filtered_rm_conf if self.config.core_collect: - self.rm_conf = map_rm_conf_to_components(self.rm_conf) + self.rm_conf = map_rm_conf_to_components(self.rm_conf, self.get_conf_file()) return self.rm_conf def get_tags_conf(self): @@ -492,13 +499,3 @@ def length(lst): 'using_new_format': self.using_new_format, 'using_patterns_regex': using_regex } - - -if __name__ == '__main__': - from .config import InsightsConfig - config = InsightsConfig().load_all() - uploadconf = InsightsUploadConf(config) - uploadconf.validate() - # report = uploadconf.create_report() - - # print(report) diff --git a/insights/client/map_components.py b/insights/client/map_components.py index 68c28ecc2..8f4879bb6 100644 --- a/insights/client/map_components.py +++ b/insights/client/map_components.py @@ -1,7 +1,4 @@ from __future__ import absolute_import -import pkgutil -import insights -import json import six import logging import textwrap @@ -11,11 +8,8 @@ APP_NAME = constants.app_name logger = logging.getLogger(__name__) -uploader_json_file = pkgutil.get_data(insights.__name__, "client/uploader_json_map.json") -uploader_json = json.loads(uploader_json_file) - -def map_rm_conf_to_components(rm_conf): +def map_rm_conf_to_components(rm_conf, uploader_json): ''' In order to maximize compatibility between "classic" remove.conf configurations and core collection, do the following mapping @@ -56,10 +50,10 @@ def map_rm_conf_to_components(rm_conf): continue for key in rm_conf[section]: if section == 'commands': - symbolic_name = _search_uploader_json(['commands'], key) + symbolic_name = _search_uploader_json(uploader_json, ['commands'], key) elif section == 'files': # match both files and globs to rm_conf files - symbolic_name = _search_uploader_json(['files', 'globs'], key) + symbolic_name = _search_uploader_json(uploader_json, ['files', 'globs'], key) component = _get_component_by_symbolic_name(symbolic_name) if component: @@ -91,7 +85,7 @@ def map_rm_conf_to_components(rm_conf): return rm_conf -def _search_uploader_json(headings, key): +def _search_uploader_json(uploader_json, headings, key): ''' Search an uploader.json block for a command/file from "name" and return the symbolic name if it exists @@ -174,15 +168,3 @@ def _log_conversion_table(conversion_map, longest_key_len): # log the conversion on the first line of the "wrap" wrapped_spec[0] = '- {0:{1}} => {2}'.format(wrapped_spec[0], log_len, spec_name_no_prefix) logger.warning('\n '.join(wrapped_spec)) - - -if __name__ == '__main__': - from .config import InsightsConfig - from .collection_rules import InsightsUploadConf - config = InsightsConfig(core_collect=True).load_all() - uploadconf = InsightsUploadConf(config) - # rm_conf = uploadconf.get_rm_conf() - # report = map_rm_conf_to_components(rm_conf) - # uploadconf.rm_conf = report - uploadconf.validate() - # print(report) diff --git a/insights/client/uploader_json_map.json b/insights/client/uploader_json_map.json deleted file mode 100644 index fc75a47bd..000000000 --- a/insights/client/uploader_json_map.json +++ /dev/null @@ -1,4410 +0,0 @@ -{ - "commands": [ - { - "command": "/usr/bin/abrt status --bare=True", - "pattern": [], - "symbolic_name": "abrt_status_bare" - }, - { - "command": "/usr/sbin/alternatives --display python", - "pattern": [], - "symbolic_name": "alternatives_display_python" - }, - { - "command": "python -m insights.tools.cat --no-header aws_instance_id_doc", - "pattern": [], - "symbolic_name": "aws_instance_id_doc" - }, - { - "command": "python -m insights.tools.cat --no-header aws_instance_id_pkcs7", - "pattern": [], - "symbolic_name": "aws_instance_id_pkcs7" - }, - { - "command": "/usr/bin/awx-manage check_license", - "pattern": [], - "symbolic_name": "awx_manage_check_license" - }, - { - "command": "python -m insights.tools.cat --no-header azure_instance_type", - "pattern": [], - "symbolic_name": "azure_instance_type" - }, - { - "command": "python -m insights.tools.cat --no-header azure_instance_plan", - "pattern": [], - "symbolic_name": "azure_instance_plan" - }, - { - "command": "/sbin/auditctl -s", - "pattern": [], - "symbolic_name": "auditctl_status" - }, - { - "command": "/sbin/blkid -c /dev/null", - "pattern": [], - "symbolic_name": "blkid" - }, - { - "command": "/usr/sbin/brctl show", - "pattern": [], - "symbolic_name": "brctl_show" - }, - { - "command": "/usr/bin/ceph health detail -f json", - "pattern": [], - "symbolic_name": "ceph_health_detail" - }, - { - "command": "/usr/bin/ceph df detail -f json", - "pattern": [], - "symbolic_name": "ceph_df_detail" - }, - { - "command": "/usr/bin/ceph osd dump -f json", - "pattern": [], - "symbolic_name": "ceph_osd_dump" - }, - { - "command": "/usr/bin/ceph osd df -f json", - "pattern": [], - "symbolic_name": "ceph_osd_df" - }, - { - "command": "/usr/bin/ceph osd tree -f json", - "pattern": [], - "symbolic_name": "ceph_osd_tree" - }, - { - "command": "/usr/bin/ceph -s -f json", - "pattern": [], - "symbolic_name": "ceph_s" - }, - { - "command": "/usr/bin/ceph -v", - "pattern": [], - "symbolic_name": "ceph_v" - }, - { - "command": "python -m insights.tools.cat --no-header ceph_insights", - "pattern": [], - "symbolic_name": "ceph_insights" - }, - { - "command": "/usr/bin/find /etc/origin/node /etc/origin/master /etc/pki /etc/ipa -type f -exec /usr/bin/openssl x509 -noout -enddate -in '{}' \\; -exec echo 'FileName= {}' \\;", - "pattern": [], - "symbolic_name": "certificates_enddate" - }, - { - "command": "/sbin/chkconfig --list", - "pattern": [], - "symbolic_name": "chkconfig" - }, - { - "command": "/usr/bin/chronyc sources", - "pattern": [], - "symbolic_name": "chronyc_sources" - }, - { - "command": "/usr/bin/cpupower -c all frequency-info", - "pattern": [], - "symbolic_name": "cpupower_frequency_info" - }, - { - "command": "/bin/date", - "pattern": [], - "symbolic_name": "date" - }, - { - "command": "/bin/date --utc", - "pattern": [], - "symbolic_name": "date_utc" - }, - { - "command": "/bin/df -al -x autofs", - "pattern": [], - "symbolic_name": "df__al" - }, - { - "command": "/bin/df -alP -x autofs", - "pattern": [], - "symbolic_name": "df__alP" - }, - { - "command": "/bin/df -li -x autofs", - "pattern": [], - "symbolic_name": "df__li" - }, - { - "command": "/usr/bin/dig +dnssec . SOA", - "pattern": [], - "symbolic_name": "dig_dnssec" - }, - { - "command": "/usr/bin/dig +edns=0 . SOA", - "pattern": [], - "symbolic_name": "dig_edns" - }, - { - "command": "/usr/bin/dig +noedns . SOA", - "pattern": [], - "symbolic_name": "dig_noedns" - }, - { - "command": "/bin/dmesg", - "pattern": [ - " is now offline", - "AMD Secure Memory Encryption (SME) active", - "Amazon EC2", - "BIOS Hyper-V UEFI Release", - "Brought up ", - "CIFS VFS: protocol revalidation - security settings mismatch", - "CQE error - vendor syndrome", - "CSUM", - "CVE-2017-1000364", - "CVE-2018-14634", - "Dazed and confused, but trying to continue", - "Device is ineligible for IOMMU domain attach due to platform RMRR requirement", - "Dropping TSO", - "EDAC ", - "Emulex OneConnect OCe10100, FCoE Initiator", - "FEATURE IBPB_SUPPORT", - "FEATURE SPEC_CTRL", - "Hyper-V Host Build", - "Ignoring BGRT: failed to map image header memory", - "Ignoring BGRT: failed to map image memory", - "Kernel page table isolation", - "L1TF", - "L1Tf", - "Linux version", - "NUMA: ", - "Node 0 CPUs: ", - "QLogic QLE2692 - QLogic 16Gb FC Dual-port HBA", - "SMBIOS ", - "SPLXMOD: SPLX 3.0: KHM loaded. Version [30118]", - "SPLXMOD: SPLX 3.0: KHM loaded. Version [30119]", - "Secure boot enabled", - "TECH PREVIEW: NVMe over FC may not be fully supported.", - "Uhhuh. NMI received for unknown reason", - "VPD access disabled", - "Warning: QLogic ISP3XXX Network Driver - this hardware has not undergone testing by Red Hat and might not be certified", - "__cpufreq_add_dev", - "blocked FC remote port time out: removing target and saving binding", - "crashkernel=auto resulted in zero bytes of reserved memory", - "e1000: E1000 MODULE IS NOT SUPPORTED", - "fw=8.08.", - "l1tf", - "mce: ", - "netconsole: network logging started", - "page allocation failure: order:", - "resetting", - "smpboot: CPU ", - "the DIE domain not a subset of the NUMA domain", - "tx hang", - "vmxnet3", - "vpd r/w failed", - "x86/pti" - ], - "symbolic_name": "dmesg" - }, - { - "command": "/usr/sbin/dmidecode", - "pattern": [], - "symbolic_name": "dmidecode" - }, - { - "command": "/usr/sbin/dmidecode -s system-uuid", - "pattern": [], - "symbolic_name": "bios_uuid" - }, - { - "command": "/usr/sbin/dmsetup info -C", - "pattern": [], - "symbolic_name": "dmsetup_info" - }, - { - "command": "/usr/sbin/dmsetup status", - "pattern": [], - "symbolic_name": "dmsetup_status" - }, - { - "command": "/usr/bin/docker info", - "pattern": [], - "symbolic_name": "docker_info" - }, - { - "command": "/usr/bin/docker ps --all --no-trunc", - "pattern": [], - "symbolic_name": "docker_list_containers" - }, - { - "command": "/usr/bin/docker images --all --no-trunc --digests", - "pattern": [], - "symbolic_name": "docker_list_images" - }, - { - "command": "/usr/bin/dotnet --version", - "pattern": [], - "symbolic_name": "dotnet_version" - }, - { - "command": "/usr/bin/doveconf", - "pattern": [ - "auth_mechanisms", - "ssl_min_protocol", - "ssl_protocols", - "{", - "}" - ], - "symbolic_name": "doveconf" - }, - { - "command": "/bin/du -s -k /var/lib/candlepin/activemq-artemis", - "pattern": [], - "symbolic_name": "du_dirs" - }, - { - "command": "/bin/engine-db-query --statement \"SELECT vs.vds_name, rpm_version FROM vds_dynamic vd, vds_static vs WHERE vd.vds_id = vs.vds_id\" --json", - "pattern": [], - "symbolic_name": "engine_db_query_vdsm_version" - }, - { - "command": "/sbin/ethtool", - "pattern": [], - "pre_command": "iface", - "symbolic_name": "ethtool" - }, - { - "command": "/sbin/ethtool -c", - "pattern": [], - "pre_command": "iface", - "symbolic_name": "ethtool_c" - }, - { - "command": "/sbin/ethtool -S", - "pattern": [], - "pre_command": "iface", - "symbolic_name": "ethtool_S" - }, - { - "command": "/sbin/ethtool -T", - "pattern": [], - "pre_command": "iface", - "symbolic_name": "ethtool_T" - }, - { - "command": "/sbin/ethtool -g", - "pattern": [], - "pre_command": "iface", - "symbolic_name": "ethtool_g" - }, - { - "command": "/sbin/ethtool -i", - "pattern": [], - "pre_command": "iface", - "symbolic_name": "ethtool_i" - }, - { - "command": "/sbin/ethtool -k", - "pattern": [], - "pre_command": "iface", - "symbolic_name": "ethtool_k" - }, - { - "command": "/usr/bin/facter", - "pattern": [], - "symbolic_name": "facter" - }, - { - "command": "/bin/fc-match -sv 'sans:regular:roman' family fontformat", - "pattern": [], - "symbolic_name": "fc_match" - }, - { - "command": "/usr/sbin/fcoeadm -i", - "pattern": [], - "symbolic_name": "fcoeadm_i" - }, - { - "command": "/bin/findmnt -lo+PROPAGATION", - "pattern": [], - "symbolic_name": "findmnt_lo_propagation" - }, - { - "command": "/usr/bin/firewall-cmd --list-all-zones", - "pattern": [], - "symbolic_name": "firewall_cmd_list_all_zones" - }, - { - "command": "python -m insights.tools.cat --no-header gcp_license_codes", - "pattern": [], - "symbolic_name": "gcp_license_codes" - }, - { - "command": "/usr/bin/getconf PAGE_SIZE", - "pattern": [], - "symbolic_name": "getconf_pagesize" - }, - { - "command": "/usr/sbin/getenforce", - "pattern": [], - "symbolic_name": "getenforce" - }, - { - "command": "/usr/sbin/getsebool -a", - "pattern": [], - "symbolic_name": "getsebool" - }, - { - "command": "/usr/sbin/gluster volume info", - "pattern": [], - "symbolic_name": "gluster_v_info" - }, - { - "command": "/usr/sbin/gluster peer status", - "pattern": [], - "symbolic_name": "gluster_peer_status" - }, - { - "command": "/usr/sbin/gluster volume status", - "pattern": [], - "symbolic_name": "gluster_v_status" - }, - { - "command": "/bin/ls -l /boot/grub/grub.conf", - "pattern": [], - "symbolic_name": "grub1_config_perms" - }, - { - "command": "/bin/ls -l /boot/grub2/grub.cfg", - "pattern": [], - "symbolic_name": "grub_config_perms" - }, - { - "command": "/usr/sbin/grubby --default-index", - "pattern": [], - "symbolic_name": "grubby_default_index" - }, - { - "command": "/sbin/grubby --default-kernel", - "pattern": [], - "symbolic_name": "grubby_default_kernel" - }, - { - "command": "/usr/bin/crontab -l -u heat", - "pattern": [ - "heat-manage" - ], - "symbolic_name": "heat_crontab" - }, - { - "command": "/bin/hostname", - "pattern": [], - "symbolic_name": "hostname_default" - }, - { - "command": "/bin/hostname -f", - "pattern": [], - "symbolic_name": "hostname" - }, - { - "command": "/bin/hostname -I", - "pattern": [], - "symbolic_name": "ip_addresses" - }, - { - "command": "/bin/hostname -s", - "pattern": [], - "symbolic_name": "hostname_short" - }, - { - "command": "/usr/sbin/httpd -V", - "pattern": [], - "symbolic_name": "httpd_V" - }, - { - "command": "python -m insights.tools.cat --no-header httpd_on_nfs", - "pattern": [], - "symbolic_name": "httpd_on_nfs" - }, - { - "command": "/usr/sbin/httpd -M", - "pattern": [], - "symbolic_name": "httpd_M" - }, - { - "command": "/bin/rpm -qa --root={CONTAINER_MOUNT_POINT} --qf='\\{\"name\":\"%{NAME}\",\"epoch\":\"%{EPOCH}\",\"version\":\"%{VERSION}\",\"release\":\"%{RELEASE}\",\"arch\":\"%{ARCH}\",\"installtime\":\"%{INSTALLTIME:date}\",\"buildtime\":\"%{BUILDTIME}\",\"vendor\":\"%{VENDOR}\",\"buildhost\":\"%{BUILDHOST}\",\"sigpgp\":\"%{SIGPGP:pgpsig}\"\\}\n'", - "pattern": [], - "symbolic_name": "installed_rpms", - "image": true - }, - { - "command": "/sbin/initctl --system list", - "pattern": [], - "symbolic_name": "initctl_lst" - }, - { - "command": "/sbin/ip -s -d link", - "pattern": [], - "symbolic_name": "ip_s_link" - }, - { - "command": "/sbin/ip6tables-save", - "pattern": [], - "symbolic_name": "ip6tables" - }, - { - "command": "/sbin/ip addr", - "pattern": [], - "symbolic_name": "ip_addr" - }, - { - "command": "/sbin/ip route show table all", - "pattern": [], - "symbolic_name": "ip_route_show_table_all" - }, - { - "command": "/usr/bin/ipcs -m", - "pattern": [], - "symbolic_name": "ipcs_m" - }, - { - "command": "/usr/bin/ipcs -m -p", - "pattern": [], - "symbolic_name": "ipcs_m_p" - }, - { - "command": "/usr/bin/ipcs -s", - "pattern": [], - "symbolic_name": "ipcs_s" - }, - { - "command": "/sbin/iptables-save", - "pattern": [], - "symbolic_name": "iptables" - }, - { - "command": "/sbin/ip -4 neighbor show nud all", - "pattern": [], - "symbolic_name": "ipv4_neigh" - }, - { - "command": "/sbin/ip -6 neighbor show nud all", - "pattern": [], - "symbolic_name": "ipv6_neigh" - }, - { - "command": "/usr/sbin/iscsiadm -m session", - "pattern": [], - "symbolic_name": "iscsiadm_m_session" - }, - { - "command": "/usr/bin/crontab -l -u keystone", - "pattern": [ - "heat-manage", - "keystone-manage" - ], - "symbolic_name": "keystone_crontab" - }, - { - "command": "/usr/sbin/kpatch list", - "pattern": [], - "symbolic_name": "kpatch_list" - }, - { - "command": "/usr/bin/file -L /etc/localtime", - "pattern": [], - "symbolic_name": "localtime" - }, - { - "command": "/usr/bin/lpstat -p", - "pattern": [], - "symbolic_name": "lpstat_p" - }, - { - "command": "/bin/ls -lanR /boot", - "pattern": [], - "symbolic_name": "ls_boot" - }, - { - "command": "/bin/ls -lanR /dev", - "pattern": [], - "symbolic_name": "ls_dev" - }, - { - "command": "/bin/ls -lanR /dev/disk", - "pattern": [], - "symbolic_name": "ls_disk" - }, - { - "command": "/bin/ls -lan /sys/devices/system/edac/mc", - "pattern": [], - "symbolic_name": "ls_edac_mc" - }, - { - "command": "/bin/ls -lan /etc /etc/cloud/cloud.cfg.d /etc/nova/migration /etc/pki/ovirt-vmconsole /etc/pki/tls/certs /etc/pki/tls/private /etc/rc.d/init.d /etc/sysconfig", - "pattern": [], - "symbolic_name": "ls_etc" - }, - { - "command": "/bin/ls -lan /usr/share/ipa/ui/js/plugins/idoverride-memberof", - "pattern": [], - "symbolic_name": "ls_ipa_idoverride_memberof" - }, - { - "command": "/bin/ls -lanR /lib/firmware", - "pattern": [], - "symbolic_name": "ls_lib_firmware" - }, - { - "command": "/bin/ls -lanR /sys/firmware", - "pattern": [], - "symbolic_name": "ls_sys_firmware" - }, - { - "command": "/bin/ls -lan /usr/bin", - "pattern": [ - "/usr/bin", - "python", - "sudo", - "total" - ], - "symbolic_name": "ls_usr_bin" - }, - { - "command": "/bin/ls -lan /var/cache/pulp", - "pattern": [], - "symbolic_name": "ls_var_cache_pulp" - }, - { - "command": "/bin/ls -la /var/lib/mongodb", - "pattern": [], - "symbolic_name": "ls_var_lib_mongodb" - }, - { - "command": "/bin/ls -laR /var/lib/nova/instances", - "pattern": [], - "symbolic_name": "ls_R_var_lib_nova_instances" - }, - { - "command": "/bin/ls -laRZ /var/lib/nova/instances", - "pattern": [], - "symbolic_name": "ls_var_lib_nova_instances" - }, - { - "command": "/bin/ls -ld /var/opt/mssql", - "pattern": [], - "symbolic_name": "ls_var_opt_mssql" - }, - { - "command": "/bin/ls -lan /usr/lib64", - "pattern": [ - "liblber", - "libldap", - "total" - ], - "symbolic_name": "ls_usr_lib64" - }, - { - "command": "/bin/ls -ln /usr/sbin", - "pattern": [ - "total" - ], - "symbolic_name": "ls_usr_sbin" - }, - { - "command": "/bin/ls -la /var/log /var/log/audit", - "pattern": [], - "symbolic_name": "ls_var_log" - }, - { - "command": "/bin/ls -la /var/opt/mssql/log", - "pattern": [], - "symbolic_name": "ls_var_opt_mssql_log" - }, - { - "command": "/bin/ls -la /dev/null /var/www", - "pattern": [], - "symbolic_name": "ls_var_www" - }, - { - "command": "/bin/ls -lnL /var/run", - "pattern": [], - "symbolic_name": "ls_var_run" - }, - { - "command": "/bin/ls -ln /var/spool/postfix/maildrop", - "pattern": [], - "symbolic_name": "ls_var_spool_postfix_maildrop" - }, - { - "command": "/bin/ls -ln /var/spool/clientmqueue", - "pattern": [], - "symbolic_name": "ls_var_spool_clientmq" - }, - { - "command": "/bin/ls -l /var/lib/cni/networks/openshift-sdn", - "pattern": [], - "symbolic_name": "ls_ocp_cni_openshift_sdn" - }, - { - "command": "/bin/ls -l /var/lib/origin/openshift.local.volumes/pods", - "pattern": [], - "symbolic_name": "ls_origin_local_volumes_pods" - }, - { - "command": "/bin/ls -lan /", - "pattern": [], - "symbolic_name": "ls_osroot" - }, - { - "command": "/bin/ls -lan /run/systemd/generator", - "pattern": [], - "symbolic_name": "ls_run_systemd_generator" - }, - { - "command": "/bin/ls -ln /var/tmp", - "pattern": [ - "/var/tmp" - ], - "symbolic_name": "ls_var_tmp" - }, - { - "command": "/bin/ls -la /tmp", - "pattern": [ - ".sap", - ".trex" - ], - "symbolic_name": "ls_tmp" - }, - { - "command": "/bin/lsblk", - "pattern": [], - "symbolic_name": "lsblk" - }, - { - "command": "/bin/lsblk -P -o NAME,KNAME,MAJ:MIN,FSTYPE,MOUNTPOINT,LABEL,UUID,RA,RO,RM,MODEL,SIZE,STATE,OWNER,GROUP,MODE,ALIGNMENT,MIN-IO,OPT-IO,PHY-SEC,LOG-SEC,ROTA,SCHED,RQ-SIZE,TYPE,DISC-ALN,DISC-GRAN,DISC-MAX,DISC-ZERO", - "pattern": [], - "symbolic_name": "lsblk_pairs" - }, - { - "command": "/usr/bin/lscpu", - "pattern": [], - "symbolic_name": "lscpu" - }, - { - "command": "/sbin/lsmod", - "pattern": [], - "symbolic_name": "lsmod" - }, - { - "command": "/usr/sbin/lsof", - "pattern": [ - "(deleted)", - "/var/log/journal", - "COMMAND", - "libcrypto", - "liblvm2cmd.so", - "libssl", - "libssl.so", - "lsnrctl", - "ovs-vswit", - "tnslsnr" - ], - "symbolic_name": "lsof" - }, - { - "command": "/sbin/lspci -k", - "pattern": [], - "symbolic_name": "lspci" - }, - { - "command": "/sbin/lspci -vmmkn", - "pattern": [], - "symbolic_name": "lspci_vmmkn" - }, - { - "command": "/usr/sbin/lvmconfig --type full", - "pattern": [], - "symbolic_name": "lvmconfig" - }, - { - "command": "/usr/sbin/lvm dumpconfig --type full", - "pattern": [], - "symbolic_name": "lvmconfig" - }, - { - "command": "/usr/sap/hostctrl/exe/saphostctrl -function GetCIMObject -enuminstances SAPInstance", - "pattern": [ - "******", - "CreationClassName", - "FullQualifiedHostname", - "Hostname", - "InstanceName", - "SID", - "SapVersionInfo", - "SystemNumber" - ], - "symbolic_name": "saphostctl_getcimobject_sapinstance" - }, - { - "command": "/usr/sap/hostctrl/exe/saphostexec -status", - "pattern": [], - "symbolic_name": "saphostexec_status" - }, - { - "command": "/usr/sap/hostctrl/exe/saphostexec -version", - "pattern": [], - "symbolic_name": "saphostexec_version" - }, - { - "command": "/usr/bin/lsscsi", - "pattern": [], - "symbolic_name": "lsscsi" - }, - { - "command": "/usr/sbin/lsvmbus -vv", - "pattern": [], - "symbolic_name": "lsvmbus" - }, - { - "command": "/sbin/lvs --nameprefixes --noheadings --separator='|' -a -o lv_name,lv_size,lv_attr,mirror_log,vg_name,devices,region_size,data_percent,metadata_percent,segtype,seg_monitor,lv_kernel_major,lv_kernel_minor --config=\"global{locking_type=0}\"", - "pattern": [], - "symbolic_name": "lvs_noheadings" - }, - { - "command": "/bin/awk -F':' '{ if($3 > max) max = $3 } END { print max }' /etc/passwd", - "pattern": [], - "symbolic_name": "max_uid" - }, - { - "command": "/usr/bin/md5sum /etc/pki/product/69.pem", - "pattern": [], - "symbolic_name": "md5chk_files" - }, - { - "command": "/usr/bin/md5sum /etc/pki/product-default/69.pem", - "pattern": [], - "symbolic_name": "md5chk_files" - }, - { - "command": "/usr/bin/md5sum /usr/lib/libsoftokn3.so", - "pattern": [], - "symbolic_name": "md5chk_files" - }, - { - "command": "/usr/bin/md5sum /usr/lib64/libsoftokn3.so", - "pattern": [], - "symbolic_name": "md5chk_files" - }, - { - "command": "/usr/bin/md5sum /usr/lib/libfreeblpriv3.so", - "pattern": [], - "symbolic_name": "md5chk_files" - }, - { - "command": "/usr/bin/md5sum /usr/lib64/libfreeblpriv3.so", - "pattern": [], - "symbolic_name": "md5chk_files" - }, - { - "command": "/bin/mokutil --sb-state", - "pattern": [], - "symbolic_name": "mokutil_sbstate" - }, - { - "command": "/bin/mount", - "pattern": [], - "symbolic_name": "mount" - }, - { - "command": "/sbin/modinfo i40e", - "pattern": [], - "symbolic_name": "modinfo_i40e" - }, - { - "command": "/sbin/modinfo igb", - "pattern": [], - "symbolic_name": "modinfo_igb" - }, - { - "command": "/sbin/modinfo ixgbe", - "pattern": [], - "symbolic_name": "modinfo_ixgbe" - }, - { - "command": "/sbin/modinfo veth", - "pattern": [], - "symbolic_name": "modinfo_veth" - }, - { - "command": "/sbin/modinfo vmxnet3", - "pattern": [], - "symbolic_name": "modinfo_vmxnet3" - }, - { - "command": "/usr/bin/find /sys/devices/virtual/net/ -name multicast_querier -print -exec cat {} \\;", - "pattern": [], - "symbolic_name": "multicast_querier" - }, - { - "command": "/sbin/multipath -v4 -ll", - "pattern": [], - "symbolic_name": "multipath__v4__ll" - }, - { - "command": "/bin/lsinitrd -f /etc/multipath.conf", - "pattern": [], - "symbolic_name": "multipath_conf_initramfs" - }, - { - "command": "/bin/mysqladmin variables", - "pattern": [], - "symbolic_name": "mysqladmin_vars" - }, - { - "command": "/usr/sbin/named-checkconf -p", - "pattern": [ - "DISABLE-ALGORITHMS", - "DISABLE-DS-DIGESTS", - "DNSSEC-ENABLE", - "DSA", - "Disable-Algorithms", - "Disable-Ds-Digests", - "Dnssec-Enable", - "Dsa", - "ECCGOST", - "ECDSAP256SHA256", - "ECDSAP384SHA384", - "Eccgost", - "Ecdsap256Sha256", - "Ecdsap384Sha384", - "GOST", - "Gost", - "NSEC3DSA", - "NSEC3RSASHA1", - "Nsec3Dsa", - "Nsec3Rsasha1", - "RSAMD5", - "RSASHA1", - "RSASHA256", - "RSASHA512", - "Rsamd5", - "Rsasha1", - "Rsasha256", - "Rsasha512", - "SHA-1", - "SHA-256", - "SHA-384", - "SHA1", - "SHA256", - "SHA384", - "Sha-1", - "Sha-256", - "Sha-384", - "Sha1", - "Sha256", - "Sha384", - "disable-algorithms", - "disable-ds-digests", - "dnssec-enable", - "dsa", - "eccgost", - "ecdsap256sha256", - "ecdsap384sha384", - "gost", - "nsec3dsa", - "nsec3rsasha1", - "rsamd5", - "rsasha1", - "rsasha256", - "rsasha512", - "sha-1", - "sha-256", - "sha-384", - "sha1", - "sha256", - "sha384", - "}" - ], - "symbolic_name": "named_checkconf_p" - }, - { - "command": "/bin/ls /var/run/netns", - "pattern": [], - "symbolic_name": "namespace" - }, - { - "command": "/usr/bin/ndctl list -Ni", - "pattern": [], - "symbolic_name": "ndctl_list_Ni" - }, - { - "command": "/bin/netstat -neopa", - "pattern": [], - "symbolic_name": "netstat" - }, - { - "command": "/bin/netstat -i", - "pattern": [], - "symbolic_name": "netstat_i" - }, - { - "command": "/bin/netstat -s", - "pattern": [], - "symbolic_name": "netstat_s" - }, - { - "command": "/bin/netstat -agn", - "pattern": [], - "symbolic_name": "netstat__agn" - }, - { - "command": "/usr/bin/nmcli conn show", - "pattern": [], - "symbolic_name": "nmcli_conn_show" - }, - { - "command": "/usr/bin/nmcli dev show", - "pattern": [], - "symbolic_name": "nmcli_dev_show" - }, - { - "command": "/usr/bin/crontab -l -u nova", - "pattern": [], - "symbolic_name": "nova_crontab" - }, - { - "command": "/usr/bin/id -u nova", - "pattern": [], - "symbolic_name": "nova_uid" - }, - { - "command": "/usr/bin/id -u nova_migration", - "pattern": [], - "symbolic_name": "nova_migration_uid" - }, - { - "command": "/usr/bin/hammer --config /root/.hammer/cli.modules.d/foreman.yml --output csv task list --search 'state=running AND ( label=Actions::Candlepin::ListenOnCandlepinEvents OR label=Actions::Katello::EventQueue::Monitor )'", - "pattern": [], - "symbolic_name": "hammer_task_list" - }, - { - "command": "/usr/sbin/ntpq -c 'rv 0 leap'", - "pattern": [], - "symbolic_name": "ntpq_leap" - }, - { - "command": "/usr/sbin/ntpq -pn", - "pattern": [], - "symbolic_name": "ntpq_pn" - }, - { - "command": "/usr/sbin/ntptime", - "pattern": [], - "symbolic_name": "ntptime" - }, - { - "command": "/bin/grep -c '^[[:digit:]]' /etc/passwd /etc/group", - "pattern": [], - "symbolic_name": "numeric_user_group_name" - }, - { - "command": "/usr/bin/oc get clusterrole --config /etc/origin/master/admin.kubeconfig", - "pattern": [], - "symbolic_name": "oc_get_clusterrole_with_config" - }, - { - "command": "/usr/bin/oc get clusterrolebinding --config /etc/origin/master/admin.kubeconfig", - "pattern": [], - "symbolic_name": "oc_get_clusterrolebinding_with_config" - }, - { - "command": "/usr/bin/vmware-toolbox-cmd stat raw text session", - "pattern": [], - "symbolic_name": "open_vm_tools_stat_raw_text_session" - }, - { - "command": "/usr/bin/ovs-vsctl -t 5 get Open_vSwitch . other_config", - "pattern": [], - "symbolic_name": "openvswitch_other_config" - }, - { - "command": "/usr/bin/ovs-vsctl list bridge", - "pattern": [], - "symbolic_name": "ovs_vsctl_list_bridge" - }, - { - "command": "/usr/bin/ovs-vsctl show", - "pattern": [], - "symbolic_name": "ovs_vsctl_show" - }, - { - "command": "/usr/bin/passenger-status", - "pattern": [], - "symbolic_name": "passenger_status" - }, - { - "command": "/usr/bin/find /sys/devices/ -maxdepth 10 -mindepth 9 -name stat -type f", - "pattern": [], - "symbolic_name": "pci_rport_target_disk_paths" - }, - { - "command": "/usr/sbin/pcs quorum status", - "pattern": [], - "symbolic_name": "pcs_quorum_status" - }, - { - "command": "/usr/sbin/pcs status", - "pattern": [], - "symbolic_name": "pcs_status" - }, - { - "command": "/usr/bin/pmrep -t 1s -T 1s network.interface.out.packets network.interface.collisions swap.pagesout mssql.memory_manager.stolen_server_memory mssql.memory_manager.total_server_memory -o csv", - "pattern": [], - "symbolic_name": "pmrep_metrics" - }, - { - "command": "/usr/sbin/postconf -C builtin", - "pattern": [ - "smtp_tls_mandatory_protocols", - "smtp_tls_protocols", - "smtpd_tls_mandatory_protocols", - "smtpd_tls_protocols" - ], - "symbolic_name": "postconf_builtin" - }, - { - "command": "/usr/sbin/postconf", - "pattern": [ - "smtp_tls_mandatory_protocols", - "smtp_tls_protocols", - "smtpd_tls_mandatory_protocols", - "smtpd_tls_protocols" - ], - "symbolic_name": "postconf" - }, - { - "command": "/bin/ps alxwww", - "pattern": [ - "/usr/bin/openshift start master", - "/usr/bin/openshift start node", - "COMMAND", - "auditd", - "avahi", - "backupserver", - "catalina.base", - "ceilometer-poll", - "chronyd", - "cinder-volume", - "clvmd", - "cmirrord", - "corosync", - "crmd", - "dataserver", - "diagbs", - "diagserver", - "diagxps", - "dlm_controld", - "dnsmasq", - "docker", - "docker-runc-current", - "elasticsearch", - "gnocchi-metricd", - "gnome-shell", - "haproxy", - "heat-engine", - "histserver", - "httpd", - "libvirtd", - "memcached", - "mongdb", - "monserver", - "multipath", - "multipathd", - "neutron-dhcp-ag", - "neutron-l3-agen", - "neutron-server", - "nfsd", - "nginx", - "nova-compute", - "nova-conductor", - "nova-scheduler", - "ntpd", - "octavia-worker", - "openshift start master api", - "openshift start master controllers", - "openshift start node", - "ora_", - "oracle", - "ovs-vswitchd", - "pacemaker-controld", - "pacemaker_remote", - "pacemakerd", - "pkla-check-auth", - "pmcd", - "pmie", - "postmaster", - "radosgw", - "redis-server", - "rngd", - "sap", - "setup.sh", - "smbd", - "snmpd", - "spausedd", - "swift-proxy-ser", - "tuned", - "xpserver" - ], - "symbolic_name": "ps_alxwww" - }, - { - "command": "/bin/ps aux", - "pattern": [ - "/usr/bin/docker", - "/usr/bin/docker-current", - "/usr/bin/dockerd-current", - "/usr/bin/openshift start master", - "/usr/bin/openshift start node", - "COMMAND", - "STAP/8.2", - "auditd", - "backupserver", - "catalina.base", - "ceilometer-poll", - "chronyd", - "cinder-volume", - "clvmd", - "cmirrord", - "corosync", - "crmd", - "dataserver", - "diagbs", - "diagserver", - "diagxps", - "dlm_controld", - "docker", - "docker-runc-current", - "elasticsearch", - "gnocchi-metricd", - "gnome-shell", - "haproxy", - "heat-engine", - "histserver", - "httpd", - "libvirtd", - "memcached", - "mongdb", - "monserver", - "multipath", - "multipathd", - "mysqld", - "neutron-dhcp-ag", - "neutron-l3-agen", - "neutron-server", - "nfsd", - "nginx", - "nova-compute", - "nova-conductor", - "nova-scheduler", - "ntpd", - "oc observe csr", - "octavia-worker", - "openshift start master api", - "openshift start master controllers", - "openshift start node", - "ora_", - "oracle", - "ovs-vswitchd", - "pacemaker-controld", - "pacemaker_remote", - "pacemakerd", - "pkla-check-auth", - "pmcd", - "pmie", - "postmaster", - "radosgw", - "redis-server", - "rngd", - "sap", - "setup.sh", - "smbd", - "snmpd", - "spausedd", - "swift-proxy-ser", - "tuned", - "xpserver" - ], - "symbolic_name": "ps_aux" - }, - { - "command": "/bin/ps auxcww", - "pattern": [], - "symbolic_name": "ps_auxcww" - }, - { - "command": "/bin/ps auxww", - "pattern": [ - "/opt/perf/bin/midaemon", - "/sbin/multipathd", - "/sbin/rngd", - "/usr/bin/openshift start master", - "/usr/bin/openshift start node", - "/usr/bin/teamd", - "/usr/sbin/fcoemon --syslog", - "COMMAND", - "auditd", - "backupserver", - "catalina.base", - "ceilometer-poll", - "chronyd", - "cinder-volume", - "clvmd", - "cmirrord", - "corosync", - "crmd", - "dataserver", - "diagbs", - "diagserver", - "diagxps", - "dlm_controld", - "docker", - "docker-runc-current", - "elasticsearch", - "gnocchi-metricd", - "gnome-shell", - "goferd", - "greenplum", - "haproxy", - "heat-engine", - "histserver", - "httpd", - "iscsid", - "libvirtd", - "memcached", - "mongdb", - "monserver", - "multipath", - "multipathd", - "neutron-dhcp-ag", - "neutron-l3-agen", - "neutron-server", - "nfs-server", - "nfsd", - "nginx", - "nova-compute", - "nova-conductor", - "nova-scheduler", - "ntpd", - "octavia-worker", - "openshift start master api", - "openshift start master controllers", - "openshift start node", - "ora_", - "oracle", - "ovs-vswitchd", - "pacemaker-controld", - "pacemaker_remote", - "pacemakerd", - "pkla-check-auth", - "pmcd", - "pmie", - "postgres", - "postmaster", - "radosgw", - "redis-server", - "rngd", - "sap", - "setup.sh", - "smbd", - "snmpd", - "spausedd", - "swift-proxy-ser", - "target_completi", - "tgtd", - "tuned", - "xpserver" - ], - "symbolic_name": "ps_auxww" - }, - { - "command": "/bin/ps -ef", - "pattern": [ - "/usr/bin/openshift start master", - "/usr/bin/openshift start node", - "CMD", - "COMMAND", - "auditd", - "backupserver", - "catalina.base", - "ceilometer-poll", - "chronyd", - "cinder-volume", - "clvmd", - "cmirrord", - "corosync", - "crmd", - "dataserver", - "diagbs", - "diagserver", - "diagxps", - "dlm_controld", - "docker", - "docker-runc-current", - "elasticsearch", - "gnocchi-metricd", - "gnome-shell", - "haproxy", - "heat-engine", - "histserver", - "httpd", - "libvirtd", - "memcached", - "mongdb", - "monserver", - "multipath", - "multipathd", - "neutron-dhcp-ag", - "neutron-l3-agen", - "neutron-ns-metadata-proxy", - "neutron-server", - "nfsd", - "nginx", - "nginx: master process", - "nginx: worker process", - "nova-compute", - "nova-conductor", - "nova-scheduler", - "ntpd", - "octavia-worker", - "openshift start master api", - "openshift start master controllers", - "openshift start node", - "ora_", - "oracle", - "ovs-vswitchd", - "pacemaker-controld", - "pacemaker_remote", - "pacemakerd", - "pkla-check-auth", - "pmcd", - "pmie", - "postmaster", - "radosgw", - "redis-server", - "rngd", - "sap", - "setup.sh", - "smbd", - "snmpd", - "spausedd", - "swift-proxy-ser", - "tuned", - "xpserver" - ], - "symbolic_name": "ps_ef" - }, - { - "command": "/bin/ps -eo pid,ppid,comm", - "pattern": [], - "symbolic_name": "ps_eo" - }, - { - "command": "/usr/bin/openssl x509 -in /etc/puppetlabs/puppet/ssl/ca/ca_crt.pem -enddate -noout", - "pattern": [], - "symbolic_name": "puppet_ca_cert_expire_date" - }, - { - "command": "/sbin/pvs --nameprefixes --noheadings --separator='|' -a -o pv_all,vg_name --config=\"global{locking_type=0}\"", - "pattern": [], - "symbolic_name": "pvs_noheadings" - }, - { - "command": "/usr/sbin/rabbitmqctl list_queues name messages consumers auto_delete", - "pattern": [], - "symbolic_name": "rabbitmq_queues" - }, - { - "command": "/usr/sbin/rabbitmqctl report", - "pattern": [], - "symbolic_name": "rabbitmq_report" - }, - { - "command": "/usr/sbin/rabbitmqctl list_users", - "pattern": [], - "symbolic_name": "rabbitmq_users" - }, - { - "command": "/usr/bin/readlink -e /etc/mtab", - "pattern": [], - "symbolic_name": "readlink_e_etc_mtab" - }, - { - "command": "/usr/bin/readlink -e /etc/origin/node/certificates/kubelet-client-current.pem", - "pattern": [], - "symbolic_name": "readlink_e_shift_cert_client" - }, - { - "command": "/usr/bin/readlink -e /etc/origin/node/certificates/kubelet-server-current.pem", - "pattern": [], - "symbolic_name": "readlink_e_shift_cert_server" - }, - { - "command": "python -m insights.tools.cat --no-header rhev_data_center", - "pattern": [], - "symbolic_name": "rhev_data_center" - }, - { - "command": "/usr/bin/openssl x509 -in /etc/rhsm/ca/katello-default-ca.pem -noout -issuer", - "pattern": [], - "symbolic_name": "rhsm_katello_default_ca_cert" - }, - { - "command": "/usr/sbin/rndc status", - "pattern": [], - "symbolic_name": "rndc_status" - }, - { - "command": "/usr/bin/crontab -l -u root", - "pattern": [ - "heat-manage" - ], - "symbolic_name": "root_crontab" - }, - { - "command": "/bin/rpm -V coreutils procps procps-ng shadow-utils passwd sudo chrony", - "pattern": [], - "symbolic_name": "rpm_V_packages" - }, - { - "command": "/usr/bin/awk 'BEGIN { pipe=\"openssl x509 -noout -subject -enddate\"} /^-+BEGIN CERT/,/^-+END CERT/ { print | pipe } /^-+END CERT/ { close(pipe); printf(\"\\n\")}' /etc/pki/katello/certs/katello-server-ca.crt", - "pattern": [], - "symbolic_name": "satellite_custom_ca_chain" - }, - { - "command": "/usr/bin/mongo pulp_database --eval 'db.serverStatus().storageEngine'", - "pattern": [], - "symbolic_name": "satellite_mongodb_storage_engine" - }, - { - "command": "/usr/bin/sealert -l \"*\"", - "pattern": [], - "symbolic_name": "sealert" - }, - { - "command": "/usr/sbin/sestatus -b", - "pattern": [], - "symbolic_name": "sestatus" - }, - { - "command": "/usr/bin/smbstatus -p", - "pattern": [], - "symbolic_name": "smbstatus_p" - }, - { - "command": "/usr/bin/scl --list", - "pattern": [], - "symbolic_name": "software_collections_list" - }, - { - "command": "/bin/grep -r '^\\s*CHANNELURL=' /etc/mail/spamassassin/channel.d", - "pattern": [], - "symbolic_name": "spamassassin_channels" - }, - { - "command": "/usr/sbin/subscription-manager identity", - "pattern": [], - "symbolic_name": "subscription_manager_id" - }, - { - "command": "/usr/bin/find /etc/pki/product-default/ /etc/pki/product/ -name '*pem' -exec rct cat-cert --no-content '{}' \\;", - "pattern": [ - "ID:" - ], - "symbolic_name": "subscription_manager_installed_product_ids" - }, - { - "command": "/bin/ls -l /etc/ssh/sshd_config", - "pattern": [], - "symbolic_name": "sshd_config_perms" - }, - { - "command": "/sbin/sysctl -a", - "pattern": [], - "symbolic_name": "sysctl" - }, - { - "command": "/bin/systemctl cat dnsmasq.service", - "pattern": [], - "symbolic_name": "systemctl_cat_dnsmasq_service" - }, - { - "command": "/bin/systemctl cat rpcbind.socket", - "pattern": [], - "symbolic_name": "systemctl_cat_rpcbind_socket" - }, - { - "command": "/bin/systemctl show openstack-cinder-volume", - "pattern": [], - "symbolic_name": "systemctl_cinder_volume" - }, - { - "command": "/bin/systemctl list-unit-files", - "pattern": [], - "symbolic_name": "systemctl_list_unit_files" - }, - { - "command": "/bin/systemctl list-units", - "pattern": [], - "symbolic_name": "systemctl_list_units" - }, - { - "command": "/bin/systemctl show mariadb", - "pattern": [], - "symbolic_name": "systemctl_mariadb" - }, - { - "command": "/bin/systemctl show qpidd", - "pattern": [], - "symbolic_name": "systemctl_qpidd" - }, - { - "command": "/bin/systemctl show qdrouterd", - "pattern": [], - "symbolic_name": "systemctl_qdrouterd" - }, - { - "command": "/bin/systemctl show httpd", - "pattern": [], - "symbolic_name": "systemctl_httpd" - }, - { - "command": "/bin/systemctl show nginx", - "pattern": [], - "symbolic_name": "systemctl_nginx" - }, - { - "command": "/bin/systemctl show smart_proxy_dynflow_core", - "pattern": [], - "symbolic_name": "systemctl_smartpdc" - }, - { - "command": "/bin/systemctl show *.service", - "pattern": [], - "symbolic_name": "systemctl_show_all_services" - }, - { - "command": "/bin/systemctl show *.target", - "pattern": [], - "symbolic_name": "systemctl_show_target" - }, - { - "command": "/bin/systemd-analyze blame", - "pattern": [], - "symbolic_name": "systemd_analyze_blame" - }, - { - "command": "/usr/bin/systemctl cat docker.service", - "pattern": [], - "symbolic_name": "systemd_docker" - }, - { - "command": "/usr/bin/systemctl cat atomic-openshift-node.service", - "pattern": [], - "symbolic_name": "systemd_openshift_node" - }, - { - "command": "/bin/systool -b scsi -v", - "pattern": [], - "symbolic_name": "systool_b_scsi_v" - }, - { - "command": "/usr/bin/testparm -s", - "pattern": [ - "Server role:", - "[" - ], - "symbolic_name": "testparm_s" - }, - { - "command": "/usr/bin/testparm -v -s", - "pattern": [ - "Server role:", - "[", - "server schannel" - ], - "symbolic_name": "testparm_v_s" - }, - { - "command": "/usr/bin/find /usr/share -maxdepth 1 -name 'tomcat*' -exec /bin/grep -R -s 'VirtualDirContext' --include '*.xml' '{}' +", - "pattern": [], - "symbolic_name": "tomcat_vdc_fallback" - }, - { - "command": "/usr/sbin/tuned-adm list", - "pattern": [], - "symbolic_name": "tuned_adm" - }, - { - "command": "/bin/uname -a", - "pattern": [], - "symbolic_name": "uname" - }, - { - "command": "/usr/bin/uptime", - "pattern": [], - "symbolic_name": "uptime" - }, - { - "command": "/usr/bin/vdo status", - "pattern": [], - "symbolic_name": "vdo_status" - }, - { - "command": "/sbin/vgdisplay -vv", - "pattern": [], - "symbolic_name": "vgdisplay" - }, - { - "command": "/sbin/vgs --nameprefixes --noheadings --separator='|' -a -o vg_all --config=\"global{locking_type=0}\"", - "pattern": [], - "symbolic_name": "vgs_noheadings" - }, - { - "command": "/usr/bin/virsh --readonly list --all", - "pattern": [], - "symbolic_name": "virsh_list_all" - }, - { - "command": "/usr/sbin/virt-what", - "pattern": [], - "symbolic_name": "virt_what" - }, - { - "command": "yum -C --noplugins list available", - "pattern": [], - "symbolic_name": "yum_list_available" - }, - { - "command": "yum -C --noplugins list installed", - "pattern": [], - "symbolic_name": "yum_list_installed" - }, - { - "command": "/usr/bin/yum -C --noplugins repolist", - "pattern": [], - "symbolic_name": "yum_repolist" - }, - { - "command": "/usr/bin/yum -C updateinfo list", - "pattern": [], - "symbolic_name": "yum_updateinfo" - }, - { - "command": "/usr/sbin/zdump -v /etc/localtime -c 2019,2039", - "pattern": [], - "symbolic_name": "zdump_v" - } - ], - "files": [ - { - "file": "/etc/abrt/plugins/CCpp.conf", - "pattern": [ - "CreateCoreBacktrace" - ], - "symbolic_name": "abrt_ccpp_conf" - }, - { - "file": "/root/.config/openshift/hosts", - "pattern": [ - "[" - ], - "symbolic_name": "openshift_hosts" - }, - { - "file": "/etc/tower/settings.py", - "pattern": [ - "AWX_CLEANUP_PATHS", - "AWX_PROOT_BASE_PATH" - ], - "symbolic_name": "ansible_tower_settings" - }, - { - "file": "/etc/redhat-access-insights/machine-id", - "pattern": [], - "symbolic_name": "machine_id1" - }, - { - "file": "/etc/redhat_access_proactive/machine-id", - "pattern": [], - "symbolic_name": "machine_id2" - }, - { - "file": "/etc/machine-id", - "pattern": [], - "symbolic_name": "etc_machine_id" - }, - { - "file": "/etc/udev/rules.d/40-redhat.rules", - "pattern": [ - "SUBSYSTEM!=\"memory\", ACTION!=\"add\", GOTO=\"memory_hotplug_end\"" - ], - "symbolic_name": "etc_udev_40_redhat_rules" - }, - { - "file": "/run/udev/rules.d/40-redhat.rules", - "pattern": [ - "SUBSYSTEM!=\"memory\", ACTION!=\"add\", GOTO=\"memory_hotplug_end\"" - ], - "symbolic_name": "etc_udev_40_redhat_rules" - }, - { - "file": "/usr/lib/udev/rules.d/40-redhat.rules", - "pattern": [ - "SUBSYSTEM!=\"memory\", ACTION!=\"add\", GOTO=\"memory_hotplug_end\"" - ], - "symbolic_name": "etc_udev_40_redhat_rules" - }, - { - "file": "/usr/local/lib/udev/rules.d/40-redhat.rules", - "pattern": [ - "SUBSYSTEM!=\"memory\", ACTION!=\"add\", GOTO=\"memory_hotplug_end\"" - ], - "symbolic_name": "etc_udev_40_redhat_rules" - }, - { - "file": "/proc/1/cgroup", - "pattern": [], - "symbolic_name": "init_process_cgroup" - }, - { - "file": "/etc/insights-client/insights-client.conf", - "pattern": [ - "[", - "auto_update", - "core_collect" - ], - "symbolic_name": "insights_client_conf" - }, - { - "file": "/etc/insights-client/machine-id", - "pattern": [], - "symbolic_name": "machine_id3" - }, - { - "file": "/var/log/audit/audit.log", - "pattern": [ - "comm=\"virtlogd\" name=\"console.log\"", - "type=AVC" - ], - "symbolic_name": "audit_log" - }, - { - "file": "/etc/cloud/cloud.cfg.d/99-custom-networking.cfg", - "pattern": [], - "symbolic_name": "cloud_init_custom_network" - }, - { - "file": "/var/log/cloud-init.log", - "pattern": [ - "401", - "Attempting to load yaml from string of length 59 with allowed root types", - "Failed loading yaml blob. Invalid format at line 1 column 1", - "Network config is likely broken", - "No available network renderers found", - "Read 59 bytes from /etc/cloud/cloud.cfg.d/99-datasource.cfg", - "Unable to render networking", - "WARNING", - "bad status code", - "failed", - "http://169.254.169.254", - "sysconfig", - "url_helper.py" - ], - "symbolic_name": "cloud_init_log" - }, - { - "file": "/etc/audit/auditd.conf", - "pattern": [], - "symbolic_name": "auditd_conf" - }, - { - "file": "/sys/fs/selinux/avc/hash_stats", - "pattern": [], - "symbolic_name": "avc_hash_stats" - }, - { - "file": "/sys/fs/selinux/avc/cache_threshold", - "pattern": [], - "symbolic_name": "avc_cache_threshold" - }, - { - "file": "/proc/net/bonding/()*bond.*", - "pattern": [], - "symbolic_name": "bond" - }, - { - "file": "/var/log/candlepin/candlepin.log", - "pattern": [ - "Candlepin initializing context", - "No Dead Letter Address configured for queue event.org.candlepin.audit.DatabaseListener in AddressSettings" - ], - "symbolic_name": "candlepin_log" - }, - { - "file": "/var/log/tomcat/()*catalina\\.out", - "pattern": [ - "NoCobblerTokenException: We had an error trying to login." - ], - "symbolic_name": "catalina_out" - }, - { - "file": "/var/log/tomcat6/()*catalina\\.out", - "pattern": [ - "NoCobblerTokenException: We had an error trying to login." - ], - "symbolic_name": "catalina_out" - }, - { - "file": "/tomcat-logs/tomcat/()*catalina\\.out", - "pattern": [ - "NoCobblerTokenException: We had an error trying to login." - ], - "symbolic_name": "catalina_out" - }, - { - "file": "/tomcat-logs/tomcat6/()*catalina\\.out", - "pattern": [ - "NoCobblerTokenException: We had an error trying to login." - ], - "symbolic_name": "catalina_out" - }, - { - "file": "/proc/driver/cciss/()*cciss.*", - "pattern": [], - "symbolic_name": "cciss" - }, - { - "file": "/sys/bus/usb/drivers/cdc_wdm/module/refcnt", - "pattern": [], - "symbolic_name": "cdc_wdm" - }, - { - "file": "/etc/ceilometer/ceilometer.conf", - "pattern": [], - "symbolic_name": "ceilometer_conf" - }, - { - "file": "/var/lib/config-data/puppet-generated/ceilometer/etc/ceilometer/ceilometer.conf", - "pattern": [], - "symbolic_name": "ceilometer_conf" - }, - { - "file": "/var/log/ceilometer/compute.log", - "pattern": [ - "Cannot inspect data of" - ], - "symbolic_name": "ceilometer_compute_log" - }, - { - "file": "/var/log/containers/ceilometer/compute.log", - "pattern": [ - "Cannot inspect data of" - ], - "symbolic_name": "ceilometer_compute_log" - }, - { - "file": "/var/log/ceilometer/collector.log", - "pattern": [ - "DBDataError", - "ERROR", - "Out of range value for column", - "pymysql.err.DataError" - ], - "symbolic_name": "ceilometer_collector_log" - }, - { - "file": "/var/log/containers/ceilometer/collector.log", - "pattern": [ - "DBDataError", - "ERROR", - "Out of range value for column", - "pymysql.err.DataError" - ], - "symbolic_name": "ceilometer_collector_log" - }, - { - "file": "/etc/ceph/ceph.conf", - "pattern": [ - "[", - "wait" - ], - "symbolic_name": "ceph_conf" - }, - { - "file": "/var/lib/config-data/puppet-generated/ceph/etc/ceph/ceph.conf", - "pattern": [ - "[", - "wait" - ], - "symbolic_name": "ceph_conf" - }, - { - "file": "/var/log/ceph/()*ceph-osd.*\\.log$", - "pattern": [ - "common/Thread.cc" - ], - "symbolic_name": "ceph_osd_log" - }, - { - "file": "/proc/cgroups", - "pattern": [], - "symbolic_name": "cgroups" - }, - { - "file": "/etc/chrony.conf", - "pattern": [], - "symbolic_name": "chrony_conf" - }, - { - "file": "/var/lib/pacemaker/cib/cib.xml", - "pattern": [], - "symbolic_name": "cib_xml" - }, - { - "file": "/etc/cinder/cinder.conf", - "pattern": [], - "symbolic_name": "cinder_conf" - }, - { - "file": "/var/log/cinder/cinder-api.log", - "pattern": [ - "DEBUG oslo.messaging._drivers.impl_rabbit", - "Timed out waiting for RPC response" - ], - "symbolic_name": "cinder_api_log" - }, - { - "file": "/var/log/containers/cinder/cinder-api.log", - "pattern": [ - "DEBUG oslo.messaging._drivers.impl_rabbit", - "Timed out waiting for RPC response" - ], - "symbolic_name": "cinder_api_log" - }, - { - "file": "/var/lib/config-data/puppet-generated/cinder/etc/cinder/cinder.conf", - "pattern": [], - "symbolic_name": "cinder_conf" - }, - { - "file": "/var/log/containers/cinder/cinder-volume.log", - "pattern": [ - "DEBUG oslo.messaging._drivers.impl_rabbit", - "Timed out waiting for RPC response" - ], - "symbolic_name": "cinder_volume_log" - }, - { - "file": "/var/log/containers/cinder/volume.log", - "pattern": [ - "DEBUG oslo.messaging._drivers.impl_rabbit", - "Timed out waiting for RPC response" - ], - "symbolic_name": "cinder_volume_log" - }, - { - "file": "/var/log/cinder/volume.log", - "pattern": [ - "DEBUG oslo.messaging._drivers.impl_rabbit", - "Timed out waiting for RPC response" - ], - "symbolic_name": "cinder_volume_log" - }, - { - "file": "/etc/cluster/cluster.conf", - "pattern": [ - "clusternode name=" - ], - "symbolic_name": "cluster_conf" - }, - { - "file": "/proc/cmdline", - "pattern": [], - "symbolic_name": "cmdline" - }, - { - "file": "/etc/cni/net.d/87-podman-bridge.conflist", - "pattern": [], - "symbolic_name": "cni_podman_bridge_conf" - }, - { - "file": "/etc/corosync/corosync.conf", - "pattern": [], - "symbolic_name": "corosync_conf" - }, - { - "file": "/etc/sysconfig/corosync", - "pattern": [], - "symbolic_name": "corosync" - }, - { - "file": "/proc/cpuinfo", - "pattern": [], - "symbolic_name": "cpuinfo" - }, - { - "file": "/sys/devices/system/clocksource/clocksource0/current_clocksource", - "pattern": [], - "symbolic_name": "current_clocksource" - }, - { - "file": "/sys/devices/system/cpu/smt/active", - "pattern": [], - "symbolic_name": "cpu_smt_active" - }, - { - "file": "/sys/devices/system/cpu/smt/control", - "pattern": [], - "symbolic_name": "cpu_smt_control" - }, - { - "file": "/sys/devices/system/cpu/vulnerabilities/meltdown", - "pattern": [], - "symbolic_name": "cpu_vulns_meltdown" - }, - { - "file": "/sys/devices/system/cpu/vulnerabilities/spectre_v1", - "pattern": [], - "symbolic_name": "cpu_vulns_spectre_v1" - }, - { - "file": "/sys/devices/system/cpu/vulnerabilities/spectre_v2", - "pattern": [], - "symbolic_name": "cpu_vulns_spectre_v2" - }, - { - "file": "/sys/devices/system/cpu/vulnerabilities/spec_store_bypass", - "pattern": [], - "symbolic_name": "cpu_vulns_spec_store_bypass" - }, - { - "file": "/sys/fs/cgroup/cpuset/cpuset.cpus", - "pattern": [], - "symbolic_name": "cpuset_cpus" - }, - { - "file": "/etc/cron.daily/rhsmd", - "pattern": [ - "if [ -n $config ]; then" - ], - "symbolic_name": "cron_daily_rhsmd" - }, - { - "file": "/etc/crypto-policies/config", - "pattern": [], - "symbolic_name": "crypto_policies_config" - }, - { - "file": "/etc/crypto-policies/state/current", - "pattern": [], - "symbolic_name": "crypto_policies_state_current" - }, - { - "file": "/etc/crypto-policies/back-ends/opensshserver.config", - "pattern": [], - "symbolic_name": "crypto_policies_opensshserver" - }, - { - "file": "/etc/crypto-policies/back-ends/bind.config", - "pattern": [], - "symbolic_name": "crypto_policies_bind" - }, - { - "file": "/sys/kernel/debug/x86/retp_enabled", - "pattern": [], - "symbolic_name": "x86_retp_enabled" - }, - { - "file": "/var/log/dirsrv/.*/()*(errors|errors\\.2.*)", - "pattern": [ - "DSRetroclPlugin - delete_changerecord: could not delete change record", - "We recommend to increase the entry cache size nsslapd-cachememsize" - ], - "symbolic_name": "dirsrv_errors" - }, - { - "file": "/sys/module/dm_mod/parameters/use_blk_mq", - "pattern": [], - "symbolic_name": "dm_mod_use_blk_mq" - }, - { - "file": "/var/log/dmesg", - "pattern": [ - "Amazon EC2", - "CVE-2017-1000364", - "CVE-2018-14634", - "FEATURE IBPB_SUPPORT", - "FEATURE SPEC_CTRL", - "Kernel page table isolation", - "L1TF", - "L1Tf", - "Linux version", - "PM: Creating hibernation image", - "PM: hibernation entry", - "PM: hibernation exit", - "Secure boot enabled", - "__cpufreq_add_dev", - "hv_vmbus: probe failed for device", - "l1tf", - "x86/pti" - ], - "symbolic_name": "dmesg_log" - }, - { - "file": "/etc/dnf/dnf.conf", - "pattern": [ - "[", - "best" - ], - "symbolic_name": "dnf_conf" - }, - { - "file": "/etc/dnf/modules.d/.*\\.module", - "pattern": [], - "symbolic_name": "dnf_modules" - }, - { - "file": "/etc/dnsmasq.conf", - "pattern": [], - "symbolic_name": "dnsmasq_config" - }, - { - "file": "/etc/sysconfig/docker-storage-setup", - "pattern": [], - "symbolic_name": "docker_storage_setup" - }, - { - "file": "/etc/sysconfig/docker-storage", - "pattern": [], - "symbolic_name": "docker_storage" - }, - { - "file": "/etc/sysconfig/docker", - "pattern": [], - "symbolic_name": "docker_sysconfig" - }, - { - "file": "/usr/lib/dracut/modules.d/99kdumpbase/kdump-capture.service", - "pattern": [], - "symbolic_name": "dracut_kdump_capture_service" - }, - { - "file": "/etc/etcd/etcd.conf", - "pattern": [ - "ETCD_DATA_DIR", - "[" - ], - "symbolic_name": "etcd_conf" - }, - { - "file": "/var/log/ovirt-engine/engine.log", - "pattern": [ - "Data Center", - "ERROR", - "INFO", - "Low disk space. Host", - "VDS_LOW_DISK_SPACE_ERROR", - "org.ovirt.engine.core.dal.dbbroker.auditloghandling.AuditLogDirector" - ], - "symbolic_name": "engine_log" - }, - { - "file": "/var/log/ovirt-engine/server.log", - "pattern": [ - "INFO [org.wildfly.extension.undertow", - "Registered web context: '/ovirt-engine/api' for server" - ], - "symbolic_name": "ovirt_engine_server_log" - }, - { - "file": "/var/log/ovirt-engine/ui.log", - "pattern": [ - "Uncaught exception: com.google.gwt.event.shared.UmbrellaException" - ], - "symbolic_name": "ovirt_engine_ui_log" - }, - { - "file": "/etc/systemd/()*journald\\.conf", - "pattern": [], - "symbolic_name": "etc_journald_conf" - }, - { - "file": "/etc/systemd/journald.conf.d/()*.+\\.conf", - "pattern": [], - "symbolic_name": "etc_journald_conf_d" - }, - { - "file": "/etc/firewalld/firewalld.conf", - "pattern": [ - "AllowZoneDrifting" - ], - "symbolic_name": "firewalld_conf" - }, - { - "file": "/var/log/ipa/healthcheck/healthcheck.log", - "pattern": [], - "symbolic_name": "freeipa_healthcheck_log" - }, - { - "file": "/etc/fstab", - "pattern": [], - "symbolic_name": "fstab" - }, - { - "file": "/etc/my.cnf.d/galera.cnf", - "pattern": [], - "symbolic_name": "galera_cnf" - }, - { - "file": "/var/lib/config-data/puppet-generated/mysql/etc/my.cnf.d/galera.cnf", - "pattern": [], - "symbolic_name": "galera_cnf" - }, - { - "file": "/boot/efi/EFI/redhat/grub.conf", - "pattern": [], - "symbolic_name": "grub_efi_conf" - }, - { - "file": "/boot/efi/EFI/redhat/grubenv", - "pattern": [], - "symbolic_name": "grub2_efi_grubenv" - }, - { - "file": "/boot/grub/grub.conf", - "pattern": [], - "symbolic_name": "grub_conf" - }, - { - "file": "/boot/efi/EFI/redhat/grub.cfg", - "pattern": [], - "symbolic_name": "grub2_efi_cfg" - }, - { - "file": "/boot/grub2/grub.cfg", - "pattern": [], - "symbolic_name": "grub2_cfg" - }, - { - "file": "/boot/grub2/grubenv", - "pattern": [], - "symbolic_name": "grub2_grubenv" - }, - { - "file": "/etc/haproxy/haproxy.cfg", - "pattern": [], - "symbolic_name": "haproxy_cfg" - }, - { - "file": "/var/lib/config-data/puppet-generated/haproxy/etc/haproxy/haproxy.cfg", - "pattern": [], - "symbolic_name": "haproxy_cfg" - }, - { - "file": "/etc/opt/rh/rh-haproxy18/haproxy/haproxy.cfg", - "pattern": [], - "symbolic_name": "haproxy_cfg_scl" - }, - { - "file": "/etc/heat/heat.conf", - "pattern": [], - "symbolic_name": "heat_conf" - }, - { - "file": "/var/lib/config-data/puppet-generated/heat/etc/heat/heat.conf", - "pattern": [], - "symbolic_name": "heat_conf" - }, - { - "file": "/var/log/heat/heat-api.log", - "pattern": [ - "Too many connections" - ], - "symbolic_name": "heat_api_log" - }, - { - "file": "/var/log/heat/heat_api.log", - "pattern": [ - "Too many connections" - ], - "symbolic_name": "heat_api_log" - }, - { - "file": "/var/log/containers/heat/heat_api.log", - "pattern": [ - "Too many connections" - ], - "symbolic_name": "heat_api_log" - }, - { - "file": "/etc/hosts", - "pattern": [], - "symbolic_name": "hosts" - }, - { - "file": "/var/log/httpd/error_log", - "pattern": [ - "(28)No space left on device: ", - "AH00485: scoreboard is full, not at MaxRequestWorkers", - "The mpm module (prefork.c) is not supported by mod_http2", - "[crit] Memory allocation failed, aborting process", - "and would exceed the ServerLimit value of ", - "consider raising the MaxClients setting", - "consider raising the MaxRequestWorkers setting", - "exceed ServerLimit of", - "exceeds ServerLimit value of", - "exit signal Segmentation fault", - "manager_handler CONFIG error: MEM: Can't update or insert node", - "manager_handler ENABLE-APP error: MEM: Can't update or insert context", - "manager_handler ENABLE-APP error: MEM: Can't update or insert host alias" - ], - "symbolic_name": "httpd_error_log" - }, - { - "file": "/opt/rh/httpd24/root/etc/httpd/logs/error_log", - "pattern": [ - "The mpm module (prefork.c) is not supported by mod_http2" - ], - "symbolic_name": "httpd24_httpd_error_log" - }, - { - "file": "/opt/rh/jbcs-httpd24/root/etc/httpd/logs/error_log", - "pattern": [ - "The mpm module (prefork.c) is not supported by mod_http2" - ], - "symbolic_name": "jbcs_httpd24_httpd_error_log" - }, - { - "file": "/etc/sysconfig/network-scripts/()*ifcfg-.*", - "pattern": [], - "symbolic_name": "ifcfg" - }, - { - "file": "/etc/sysconfig/network-scripts/()*route-.*", - "pattern": [], - "symbolic_name": "ifcfg_static_route" - }, - { - "file": "/etc/ImageMagick/()*policy\\.xml", - "pattern": [ - "", - "" - ], - "symbolic_name": "imagemagick_policy" - }, - { - "file": "/usr/lib64/ImageMagick-6.5.4/config/()*policy\\.xml", - "pattern": [ - "", - "" - ], - "symbolic_name": "imagemagick_policy" - }, - { - "file": "/usr/lib/ImageMagick-6.5.4/config/()*policy\\.xml", - "pattern": [ - "", - "" - ], - "symbolic_name": "imagemagick_policy" - }, - { - "file": "/etc/vmware-tools/tools.conf", - "pattern": [], - "symbolic_name": "vmware_tools_conf" - }, - { - "file": "/proc/interrupts", - "pattern": [], - "symbolic_name": "interrupts" - }, - { - "file": "/var/log/ipaupgrade.log", - "pattern": [ - "wait_for_open_ports: localhost" - ], - "symbolic_name": "ipaupgrade_log" - }, - { - "file": "/etc/ipsec.conf", - "pattern": [ - "include" - ], - "symbolic_name": "ipsec_conf" - }, - { - "file": "/etc/sysconfig/iptables", - "pattern": [], - "symbolic_name": "iptables_permanent" - }, - { - "file": "/etc/ironic/ironic.conf", - "pattern": [ - "[" - ], - "symbolic_name": "ironic_conf" - }, - { - "file": "/var/lib/config-data/puppet-generated/ironic/etc/ironic/ironic.conf", - "pattern": [ - "[" - ], - "symbolic_name": "ironic_conf" - }, - { - "file": "/var/log/ironic-inspector/ironic-inspector.log", - "pattern": [ - "Certificate did not match expected hostname", - "ERROR requests.packages.urllib3.connection" - ], - "symbolic_name": "ironic_inspector_log" - }, - { - "file": "/var/log/containers/ironic-inspector/ironic-inspector.log", - "pattern": [ - "Certificate did not match expected hostname", - "ERROR requests.packages.urllib3.connection" - ], - "symbolic_name": "ironic_inspector_log" - }, - { - "file": "/etc/kdump.conf", - "pattern": [], - "symbolic_name": "kdump_conf" - }, - { - "file": "/sys/kernel/kexec_crash_size", - "pattern": [], - "symbolic_name": "kexec_crash_size" - }, - { - "file": "/etc/()*krb5\\.conf", - "pattern": [], - "symbolic_name": "krb5" - }, - { - "file": "/sys/kernel/mm/ksm/run", - "pattern": [], - "symbolic_name": "ksmstate" - }, - { - "file": "/etc/libssh/libssh_client.config", - "pattern": [ - "Include" - ], - "symbolic_name": "libssh_client_config" - }, - { - "file": "/etc/libssh/libssh_server.config", - "pattern": [ - "Include" - ], - "symbolic_name": "libssh_server_config" - }, - { - "file": "/var/log/libvirt/libvirtd.log", - "pattern": [ - "qemuMigrationSrcNBDStorageCopyBlockdev:", - "virDomainBlockCommit:", - "virDomainBlockCopy:", - "virDomainBlockPull:", - "virDomainSnapshotCreateXML:" - ], - "symbolic_name": "libvirtd_log" - }, - { - "file": "/etc/security/()*limits\\.conf", - "pattern": [], - "symbolic_name": "limits_conf" - }, - { - "file": "/etc/security/limits.d/()*.*\\.conf", - "pattern": [], - "symbolic_name": "limits_d" - }, - { - "file": "/etc/lvm/lvm.conf", - "pattern": [ - "auto_activation_volume_list", - "filter", - "locking_type", - "use_lvmetad", - "volume_list" - ], - "symbolic_name": "lvm_conf" - }, - { - "file": "/var/log/mariadb/mariadb.log", - "pattern": [ - "Too many open files" - ], - "symbolic_name": "mariadb_log" - }, - { - "file": "/proc/mdstat", - "pattern": [], - "symbolic_name": "mdstat" - }, - { - "file": "/proc/meminfo", - "pattern": [], - "symbolic_name": "meminfo" - }, - { - "file": "/var/log/messages", - "pattern": [ - " invoked oom-killer: ", - "(enic): transmit queue 0 timed out", - ", type vxfs) has no security xattr handler", - "/input/input", - "/usr/lib/ocf/resource.d/heartbeat/azure-lb: line 91: kill: Binary: arguments must be process or job IDs", - "17763", - ": segfault at ", - "Abort command issued", - "Broken pipe", - "Buffer I/O error on device", - "Cannot allocate memory", - "Cannot assign requested address", - "Cannot assign requested address: AH00072", - "Corosync main process was not scheduled (@", - "Could not set", - "DHCPv4 lease renewal requested", - "DMA Status error. Resetting chip", - "Detected Tx Unit Hang", - "Device is still in reset", - "Disable lvmetad in lvm.conf. lvmetad should never be enabled in a clustered environment. Set use_lvmetad=0 and kill the lvmetad process", - "Error I40E_AQ_RC_EINVAL adding RX filters on PF, promiscuous mode forced on", - "Error running DeviceResume dm_task_run failed", - "Exception happened during processing of request from", - "File system is filling up", - "High collision rate in packet sends", - "High directory name cache miss rate", - "High number of saturated processors", - "High per CPU processor utilization", - "High per CPU system call rate", - "Hyper-V Host", - "List /apis/image.openshift.io/v1/images", - "Loop callback failed with: Cannot allocate memory", - "Low buffer cache read hit ratio", - "Low free swap space", - "Low random number entropy available", - "MDC/MDIO access timeout", - "Medium access timeout failure. Offlining disk!", - "NETDEV WATCHDOG", - "Neighbour table overflow", - "NetworkManager state is now CONNECTED_SITE", - "Not scheduled for", - "Out of MCCQ wrbs", - "Out of memory: Kill process", - "PPM exceeds tolerance 500 PPM", - "ProcessExecutionError: Exit code: 1; Stdin: ; Stdout: ; Stderr: setting the network namespace", - "Result of start operation for clvmd ", - "SCSI error: return code =", - "Severe demand for real memory", - "Some CPU busy executing in system mode", - "Steal time is >", - "TCP listen overflows", - "TCP request queue full SYN cookie replies", - "TCP request queue full drops", - "TX driver issue detected, PF reset issued", - "The threshold number of context switches per second per CPU", - "Unit ip6tables.service entered failed state", - "Unit iptables.service entered failed state", - "Virtualization daemon", - "] trap divide error ", - "_NET_ACTIVE_WINDOW", - "as active slave; either", - "callbacks suppressed", - "canceled DHCP transaction, DHCP client pid", - "clearing Tx timestamp hang", - "device-mapper: multipath: Failing path", - "does not seem to be present, delaying initialization", - "drivers/input/input-leds.c:115 input_leds_connect", - "enabling it in", - "end_request: I/O error, dev", - "error Error on attach: Node not found", - "eviction manager: must evict pod(s) to reclaim nodefsInodes", - "ext4_ext_search_left", - "failed while handling", - "failed with error -110", - "failed: Invalid argument", - "fiid_obj_get: 'present_countdown_value': data not available", - "firewalld - dynamic firewall daemon", - "from image service failed: rpc error: code = Canceled desc = context canceled", - "host not found in upstream", - "hv_netvsc vmbus_", - "hv_netvsc: probe of vmbus_", - "hw csum failure", - "ill process ", - "in libnl.so.1", - "initiating reset due to tx timeout", - "invalid key/value pair in file /usr/lib/udev/rules.d/59-fc-wwpn-id.rules", - "ip_local_port_range: prefer different parity for start/end values", - "irq handler for vector (irq -1)", - "is down or the link is down", - "is greater than comparison timestamp", - "iscsiadm: iscsiadm: Could not log into all portals", - "kernel: BUG: soft lockup", - "kernel: CIFS VFS: Unexpected SMB signature", - "kernel: INFO: task xfsaild/md", - "kernel: Linux version", - "kernel: Memory cgroup out of memory: Kill process", - "kernel: TCP: out of memory -- consider tuning tcp_mem", - "kernel: bnx2fc: byte_count", - "kernel: lockd: Unknown symbol register_inet6addr_notifier", - "kernel: lockd: Unknown symbol unregister_inet6addr_notifier", - "kernel: megaraid_sas: FW detected to be in faultstate, restarting it", - "kernel: megasas: Found FW in FAULT state, will reset adapter.", - "kernel: nfs: server", - "khash_super_prune_nolock", - "link status up for interface", - "mode:0x20", - "multipathd.service operation timed out. Terminating", - "netlink_socket|ERR|fcntl: Too many open file", - "nfs_reap_expired_delegations", - "not responding, timed out", - "page allocation failure", - "per_source_limit from", - "platform microcode: firmware: requesting", - "reservation conflict", - "returned a bad sequence-id error", - "rhsm", - "rhsmd: rhsmd process exceeded runtime and was killed", - "shm_open failed, Permission denied", - "skb_copy", - "socket error sending to node", - "start request repeated too quickly for docker.service", - "state changed timeout -> done", - "swapper: page allocation failure", - "tg3_start_xmit", - "timed out", - "timeout before we got a set response", - "timing out command, waited", - "transmit queue", - "udev: renamed network interface", - "unknown filesystem type 'binfmt_misc'", - "ut of memory: ", - "watch chan error: etcdserver: mvcc: required revision has been compacted" - ], - "symbolic_name": "messages" - }, - { - "file": "/etc/()*modprobe\\.conf", - "pattern": [], - "symbolic_name": "modprobe_conf" - }, - { - "file": "/etc/modprobe.d/()*.*\\.conf", - "pattern": [], - "symbolic_name": "modprobe_d" - }, - { - "file": "/etc/mongod.conf", - "pattern": [ - "dbPath", - "storage" - ], - "symbolic_name": "mongod_conf" - }, - { - "file": "/etc/opt/rh/rh-mongodb34/mongod.conf", - "symbolic_name": "mongod_conf", - "pattern": [ - "dbPath", - "storage" - ] - }, - { - "file": "/proc/mounts", - "pattern": [], - "symbolic_name": "mounts" - }, - { - "file": "/var/opt/mssql/mssql.conf", - "pattern": [], - "symbolic_name": "mssql_conf" - }, - { - "file": "/etc/multipath.conf", - "pattern": [], - "symbolic_name": "multipath_conf" - }, - { - "file": "/var/log/mysql/mysqld.log", - "pattern": [ - "SSL error", - "Too many open files", - "[ERROR]", - "handshake with remote endpoint ssl" - ], - "symbolic_name": "mysql_log" - }, - { - "file": "/var/log/mysql.log", - "pattern": [ - "SSL error", - "Too many open files", - "[ERROR]", - "handshake with remote endpoint ssl" - ], - "symbolic_name": "mysql_log" - }, - { - "file": "/etc/sysconfig/netconsole", - "pattern": [], - "symbolic_name": "netconsole" - }, - { - "file": "/etc/NetworkManager/NetworkManager.conf", - "pattern": [], - "symbolic_name": "networkmanager_conf" - }, - { - "file": "/etc/NetworkManager/dispatcher.d/.*-dhclient", - "pattern": [], - "symbolic_name": "networkmanager_dispatcher_d" - }, - { - "file": "/proc/net/netfilter/nfnetlink_queue", - "pattern": [], - "symbolic_name": "nfnetlink_queue" - }, - { - "file": "/etc/exports", - "pattern": [ - "*", - "no_root_squash" - ], - "symbolic_name": "nfs_exports" - }, - { - "file": "/etc/exports.d/()*.*\\.exports", - "pattern": [ - "*", - "no_root_squash" - ], - "symbolic_name": "nfs_exports_d" - }, - { - "file": "/var/log/nova/nova-api.log", - "pattern": [ - "DEBUG oslo.messaging._drivers.impl_rabbit", - "Timed out waiting for RPC response" - ], - "symbolic_name": "nova_api_log" - }, - { - "file": "/var/log/containers/nova/nova-api.log", - "pattern": [ - "DEBUG oslo.messaging._drivers.impl_rabbit", - "Timed out waiting for RPC response" - ], - "symbolic_name": "nova_api_log" - }, - { - "file": "/var/log/nova/nova-compute.log", - "pattern": [ - "/console.log: Permission denied", - ": No such device or address", - "Attempting claim on node", - "Claim successful on node", - "Could not open ", - "DEBUG oslo.messaging._drivers.impl_rabbit", - "Detach volume", - "Disk of instance is too large", - "During sync_power_state the instance has a pending task", - "ERROR nova.virt.libvirt.driver", - "ERROR oslo_messaging.rpc.server Command: scp -r", - "ERROR oslo_messaging.rpc.server InvalidSharedStorage", - "ERROR oslo_messaging.rpc.server Stderr: u'Device crypt-dm-uuid-mpath", - "Find Multipath device file for volume WWN", - "FlavorDiskSmallerThanImage: Flavor's disk is too small for requested image.", - "INFO nova.compute.manager", - "INFO os_brick.initiator.linuxscsi", - "Instance shutdown by itself. Calling the stop API.", - "Live Migration failure: internal error: process exited while connecting to monitor", - "Migration pre-check error: Unable to migrate", - "No such device or address", - "Resuming guest", - "Stderr: u'blockdev: cannot open", - "Successfully plugged vif VIFBridge", - "Timed out waiting for RPC response", - "cmt=off: Property '.cmt' not found", - "error: Failed to start domain", - "from mountpoint /dev", - "is not active", - "is not on shared storage", - "libvirt-guests.sh", - "libvirtError: Requested operation is not valid: domain has assigned non-USB host devices", - "libvirtError: Unable to delete file /var/lib/nova/instances/" - ], - "symbolic_name": "nova_compute_log" - }, - { - "file": "/var/log/containers/nova/nova-compute.log", - "pattern": [ - "/console.log: Permission denied", - ": No such device or address", - "Attempting claim on node", - "Claim successful on node", - "Could not open ", - "DEBUG oslo.messaging._drivers.impl_rabbit", - "Detach volume", - "Disk of instance is too large", - "During sync_power_state the instance has a pending task", - "ERROR nova.virt.libvirt.driver", - "ERROR oslo_messaging.rpc.server Command: scp -r", - "ERROR oslo_messaging.rpc.server InvalidSharedStorage", - "ERROR oslo_messaging.rpc.server Stderr: u'Device crypt-dm-uuid-mpath", - "Find Multipath device file for volume WWN", - "FlavorDiskSmallerThanImage: Flavor's disk is too small for requested image.", - "INFO nova.compute.manager", - "INFO os_brick.initiator.linuxscsi", - "Instance shutdown by itself. Calling the stop API.", - "Live Migration failure: internal error: process exited while connecting to monitor", - "Migration pre-check error: Unable to migrate", - "No such device or address", - "Resuming guest", - "Stderr: u'blockdev: cannot open", - "Successfully plugged vif VIFBridge", - "Timed out waiting for RPC response", - "cmt=off: Property '.cmt' not found", - "error: Failed to start domain", - "from mountpoint /dev", - "is not active", - "is not on shared storage", - "libvirt-guests.sh", - "libvirtError: Requested operation is not valid: domain has assigned non-USB host devices", - "libvirtError: Unable to delete file /var/lib/nova/instances/" - ], - "symbolic_name": "nova_compute_log" - }, - { - "file": "/etc/nova/nova.conf", - "pattern": [], - "symbolic_name": "nova_conf" - }, - { - "file": "/var/lib/config-data/puppet-generated/nova/etc/nova/nova.conf", - "pattern": [], - "symbolic_name": "nova_conf" - }, - { - "file": "/var/lib/config-data/puppet-generated/nova_libvirt/etc/nova/nova.conf", - "pattern": [], - "symbolic_name": "nova_conf" - }, - { - "file": "/etc/nscd.conf", - "pattern": [ - "enable-cache" - ], - "symbolic_name": "nscd_conf" - }, - { - "file": "/etc/nsswitch.conf", - "pattern": [ - "HOSTS:", - "Hosts:", - "group", - "hosts:", - "passwd", - "shadow" - ], - "symbolic_name": "nsswitch_conf" - }, - { - "file": "/etc/ntp.conf", - "pattern": [], - "symbolic_name": "ntp_conf" - }, - { - "file": "/sys/module/nvme_core/parameters/io_timeout", - "pattern": [], - "symbolic_name": "nvme_core_io_timeout" - }, - { - "file": "/var/lib/config-data/puppet-generated/octavia/etc/octavia/octavia.conf", - "pattern": [ - "[", - "active_connection_max_retries", - "active_connection_rety_interval", - "admin_log_targets", - "administrative_log_facility", - "agent_request_read_timeout", - "agent_server_ca", - "agent_server_cert", - "agent_server_network_dir", - "agent_server_network_file", - "agent_tls_protocol", - "allow_pagination", - "allow_ping_health_monitors", - "allow_sorting", - "allow_tls_terminated_listeners", - "allow_vip_network_id", - "allow_vip_port_id", - "allow_vip_subnet_id", - "amp_active_retries", - "amp_active_wait_sec", - "amp_boot_network_list", - "amp_flavor_id", - "amp_image_id", - "amp_image_owner_id", - "amp_image_tag", - "amp_secgroup_list", - "amp_ssh_access_allowed", - "amphora_driver", - "amphora_expiry_age", - "amphora_udp_driver", - "anti_affinity_policy", - "api_base_uri", - "audit_map_file", - "auth_strategy", - "auth_type", - "availability_zone", - "base_cert_dir", - "base_path", - "bind_host", - "bind_ip", - "bind_port", - "build_active_retries", - "build_rate_limit", - "build_retry_interval", - "ca_certificates_file", - "cafile", - "cert_generator", - "cert_manager", - "cert_validity_time", - "cleanup_interval", - "client_ca", - "client_cert", - "compute_driver", - "connection_logging", - "connection_max_retries", - "connection_retry_interval", - "controller_ip_port_list", - "debug", - "default_health_monitor_quota", - "default_listener_quota", - "default_load_balancer_quota", - "default_member_quota", - "default_pool_quota", - "default_provider_driver", - "disable_local_log_storage", - "disable_revert", - "distributor_driver", - "driver", - "enable_anti_affinity", - "enable_proxy_headers_parsing", - "enabled", - "enabled_provider_agents", - "enabled_provider_drivers", - "endpoint_type", - "engine", - "failover_threads", - "forward_all_logs", - "get_socket_path", - "graceful_shutdown_timeout", - "haproxy_cmd", - "haproxy_stick_size", - "haproxy_template", - "health_check_interval", - "health_update_driver", - "health_update_threads", - "heartbeat_interval", - "heartbeat_timeout", - "ignore_req_list", - "insecure", - "lb_network_interface", - "load_balancer_expiry_age", - "loadbalancer_topology", - "log_dir", - "log_file", - "log_protocol", - "log_queue_size", - "log_retry_count", - "log_retry_interval", - "logging_template_override", - "max_process_warning_percent", - "max_retries", - "max_workers", - "memcached_servers", - "network_driver", - "octavia_plugins", - "pagination_max_limit", - "policy_file", - "port_detach_timeout", - "provider_agent_shutdown_timeout", - "random_amphora_name_length", - "region_name", - "respawn_count", - "respawn_interval", - "rest_request_conn_timeout", - "rest_request_read_timeout", - "retry_interval", - "rpc_thread_pool_size", - "server_ca", - "service_name", - "signing_digest", - "sock_rlimit", - "spare_amphora_pool_size", - "spare_check_interval", - "stats_max_processes", - "stats_request_timeout", - "stats_socket_path", - "stats_update_driver", - "stats_update_threads", - "status_max_processes", - "status_request_timeout", - "status_socket_path", - "status_update_threads", - "storage_path", - "tenant_log_targets", - "topic", - "topics", - "udp_connect_min_interval_health_monitor", - "use_oslo_messaging", - "use_upstart", - "user_data_config_drive", - "user_log_facility", - "user_log_format", - "volume_create_max_retries", - "volume_create_retry_interval", - "volume_create_timeout", - "volume_driver", - "volume_size", - "volume_type", - "vrrp_advert_int", - "vrrp_check_interval", - "vrrp_fail_count", - "vrrp_garp_refresh_count", - "vrrp_garp_refresh_interval", - "vrrp_success_count", - "workers" - ], - "symbolic_name": "octavia_conf" - }, - { - "file": "/etc/odbc.ini", - "pattern": [ - "DRIVER", - "Driver", - "NO_SSPS", - "No_ssps", - "SERVER", - "Server", - "[", - "driver", - "no_ssps", - "server" - ], - "symbolic_name": "odbc_ini" - }, - { - "file": "/etc/odbcinst.ini", - "pattern": [], - "symbolic_name": "odbcinst_ini" - }, - { - "file": "/etc/origin/master/master-config.yaml", - "pattern": [], - "symbolic_name": "ose_master_config" - }, - { - "file": "/etc/origin/node/node-config.yaml", - "pattern": [], - "symbolic_name": "ose_node_config" - }, - { - "file": "/var/log/pacemaker.log", - "pattern": [ - "is active on 2 nodes (attempting recovery)" - ], - "symbolic_name": "pacemaker_log" - }, - { - "file": "/var/log/pacemaker/pacemaker.log", - "pattern": [ - "is active on 2 nodes (attempting recovery)" - ], - "symbolic_name": "pacemaker_log" - }, - { - "file": "/proc/partitions", - "pattern": [], - "symbolic_name": "partitions" - }, - { - "file": "/etc/pam.d/password-auth", - "pattern": [], - "symbolic_name": "password_auth" - }, - { - "file": "/etc/opt/rh/php73/php.ini", - "pattern": [ - "[", - "post_max_size" - ], - "symbolic_name": "php_ini" - }, - { - "file": "/etc/opt/rh/php72/php.ini", - "pattern": [ - "[", - "post_max_size" - ], - "symbolic_name": "php_ini" - }, - { - "file": "/etc/php.ini", - "pattern": [ - "[", - "post_max_size" - ], - "symbolic_name": "php_ini" - }, - { - "file": "/etc/yum/pluginconf.d/()*\\w+\\.conf", - "pattern": [], - "symbolic_name": "pluginconf_d" - }, - { - "file": "/var/lib/pgsql/data/postgresql.conf", - "pattern": [], - "symbolic_name": "postgresql_conf" - }, - { - "file": "/var/opt/rh/rh-postgresql12/lib/pgsql/data/postgresql.conf", - "pattern": [], - "symbolic_name": "postgresql_conf" - }, - { - "file": "/var/lib/pgsql/data/pg_log/()*postgresql-.+\\.log", - "pattern": [ - "FATAL", - "checkpoints are occurring too frequently", - "connection limit exceeded for non-superusers", - "remaining connection slots are reserved for non-replication superuser connections" - ], - "symbolic_name": "postgresql_log" - }, - { - "file": "/proc/net/netstat", - "pattern": [], - "symbolic_name": "proc_netstat" - }, - { - "file": "/proc/net/snmp", - "pattern": [], - "symbolic_name": "proc_snmp_ipv4" - }, - { - "file": "/proc/net/snmp6", - "pattern": [], - "symbolic_name": "proc_snmp_ipv6" - }, - { - "file": "/proc/slabinfo", - "pattern": [], - "symbolic_name": "proc_slabinfo" - }, - { - "file": "/proc/stat", - "pattern": [], - "symbolic_name": "proc_stat" - }, - { - "file": "/sos_commands/process/ps_auxwww", - "pattern": [], - "symbolic_name": "ps_auxwww" - }, - { - "file": "/etc/default/pulp_workers", - "pattern": [], - "symbolic_name": "pulp_worker_defaults" - }, - { - "file": "/etc/sysconfig/puppetserver", - "pattern": [ - "JAVA_ARGS" - ], - "symbolic_name": "puppetserver_config" - }, - { - "file": "/etc/libvirt/qemu.conf", - "pattern": [], - "symbolic_name": "qemu_conf" - }, - { - "file": "/etc/libvirt/qemu/()*.+\\.xml", - "pattern": [], - "symbolic_name": "qemu_xml" - }, - { - "file": "/etc/rabbitmq/rabbitmq-env.conf", - "pattern": [], - "symbolic_name": "rabbitmq_env" - }, - { - "file": "/var/log/rabbitmq/startup_log", - "pattern": [ - "Event crashed log handler:" - ], - "symbolic_name": "rabbitmq_startup_log" - }, - { - "file": "/etc/rc.d/rc.local", - "pattern": [], - "symbolic_name": "rc_local" - }, - { - "file": "/etc/rdma/rdma.conf", - "pattern": [], - "symbolic_name": "rdma_conf" - }, - { - "file": "/etc/redhat-release", - "pattern": [], - "symbolic_name": "redhat_release" - }, - { - "file": "/etc/os-release", - "pattern": [], - "symbolic_name": "os_release" - }, - { - "file": "/etc/resolv.conf", - "pattern": [], - "symbolic_name": "resolv_conf" - }, - { - "file": "/etc/rhn/rhn.conf", - "pattern": [], - "symbolic_name": "rhn_conf" - }, - { - "file": "/etc/rhosp-release", - "pattern": [], - "symbolic_name": "rhosp_release" - }, - { - "file": "/etc/rhsm/rhsm.conf", - "pattern": [], - "symbolic_name": "rhsm_conf" - }, - { - "file": "/var/lib/rhsm/cache/releasever.json", - "pattern": [], - "symbolic_name": "rhsm_releasever" - }, - { - "file": "/etc/qpid/qpidd.conf", - "pattern": [], - "symbolic_name": "qpidd_conf" - }, - { - "file": "/var/log/rhsm/rhsm.log", - "pattern": [ - "KeyError: 'config.network.dnsConfig.hostName'", - "Validation failed: Name is invalid", - "virt.host_type=hyperv, virt.uuid=Not Settable" - ], - "symbolic_name": "rhsm_log" - }, - { - "file": "/etc/rsyslog.conf", - "pattern": [ - "$ActionQueueFileName", - "(", - ")", - "imjournal", - "imtcp", - "regex", - "{", - "}" - ], - "symbolic_name": "rsyslog_conf" - }, - { - "file": "/etc/samba/smb.conf", - "pattern": [ - "GLOBAL", - "Global", - "KERBEROS METHOD", - "Kerberos Method", - "MAX SMBD PROCESSES", - "Max Smbd Processes", - "NT PIPE SUPPORT", - "Nt Pipe Support", - "PASSDB BACKEND", - "Passdb Backend", - "REALM", - "Realm", - "SECURITY", - "Security", - "[", - "]", - "comment", - "global", - "kerberos method", - "max smbd processes", - "nt pipe support", - "passdb backend", - "path", - "read only", - "realm", - "security", - "server max protocol", - "socket options", - "writable" - ], - "symbolic_name": "samba" - }, - { - "file": "/etc/redhat-access/redhat-access-insights.properties", - "pattern": [], - "symbolic_name": "sat5_insights_properties" - }, - { - "file": "/etc/foreman-installer/custom-hiera.yaml", - "pattern": [], - "symbolic_name": "satellite_custom_hiera" - }, - { - "file": "/usr/share/foreman/lib/satellite/version.rb", - "pattern": [], - "symbolic_name": "satellite_version_rb" - }, - { - "file": "/proc/scsi/scsi", - "pattern": [], - "symbolic_name": "scsi" - }, - { - "file": "/sys/module/scsi_mod/parameters/use_blk_mq", - "pattern": [], - "symbolic_name": "scsi_mod_use_blk_mq" - }, - { - "file": "/proc/net/sctp/assocs", - "pattern": [], - "symbolic_name": "sctp_asc" - }, - { - "file": "/proc/net/sctp/eps", - "pattern": [], - "symbolic_name": "sctp_eps" - }, - { - "file": "/proc/net/sctp/snmp", - "pattern": [], - "symbolic_name": "sctp_snmp" - }, - { - "file": "/var/log/secure", - "pattern": [ - "Could not set limit for 'nofile': Operation not permitted" - ], - "symbolic_name": "secure" - }, - { - "file": "/etc/selinux/config", - "pattern": [], - "symbolic_name": "selinux_config" - }, - { - "file": "/etc/sysconfig/foreman-tasks", - "pattern": [ - "EXECUTOR_MEMORY_LIMIT", - "EXECUTOR_MEMORY_MONITOR_DELAY", - "EXECUTOR_MEMORY_MONITOR_INTERVAL" - ], - "symbolic_name": "foreman_tasks_config" - }, - { - "file": "/etc/sysconfig/dynflowd", - "pattern": [ - "EXECUTOR_MEMORY_LIMIT", - "EXECUTOR_MEMORY_MONITOR_DELAY", - "EXECUTOR_MEMORY_MONITOR_INTERVAL" - ], - "symbolic_name": "foreman_tasks_config" - }, - { - "file": "/proc/net/softnet_stat", - "pattern": [], - "symbolic_name": "softnet_stat" - }, - { - "file": "/proc/net/sockstat", - "pattern": [], - "symbolic_name": "sockstat" - }, - { - "file": "/etc/ssh/sshd_config", - "pattern": [ - "ALLOWUSERS", - "AllowUsers", - "Allowusers", - "AuthorizedKeysFile", - "CHALLENGERESPONSEAUTHENTICATION", - "CIPHERS", - "CLIENTALIVECOUNTMAX", - "CLIENTALIVEINTERVAL", - "ChallengeResponseAuthentication", - "Challengeresponseauthentication", - "Ciphers", - "ClientAliveCountMax", - "ClientAliveInterval", - "Clientalivecountmax", - "Clientaliveinterval", - "DENYUSERS", - "DenyUsers", - "Denyusers", - "KBDINTERACTIVEAUTHENTICATION", - "KbdInteractiveAuthentication", - "Kbdinteractiveauthentication", - "LOGINGRACETIME", - "LoginGraceTime", - "Logingracetime", - "MACS", - "MACs", - "MAXAUTHTRIES", - "MAXSTARTUPS", - "Macs", - "MaxAuthTries", - "MaxStartups", - "Maxauthtries", - "Maxstartups", - "PERMITEMPTYPASSWORDS", - "PERMITROOTLOGIN", - "PROTOCOL", - "PermitEmptyPasswords", - "PermitRootLogin", - "Permitemptypasswords", - "Permitrootlogin", - "Protocol", - "USEPAM", - "UsePAM", - "UsePam", - "Usepam", - "allowusers", - "challengeresponseauthentication", - "ciphers", - "clientalivecountmax", - "clientaliveinterval", - "denyusers", - "kbdinteractiveauthentication", - "logingracetime", - "macs", - "maxauthtries", - "maxstartups", - "permitemptypasswords", - "permitrootlogin", - "protocol", - "usepam" - ], - "symbolic_name": "sshd_config" - }, - { - "file": "/etc/ssh/ssh_config", - "pattern": [ - "Include" - ], - "symbolic_name": "ssh_config" - }, - { - "file": "/usr/share/foreman/.ssh/ssh_config", - "pattern": [ - "[CAUTION] This_is_the default_filter_string_for_all_large_files!" - ], - "symbolic_name": "ssh_foreman_config" - }, - { - "file": "/usr/share/foreman-proxy/.ssh/ssh_config", - "pattern": [ - "Host", - "ProxyCommand" - ], - "symbolic_name": "ssh_foreman_proxy_config" - }, - { - "file": "/etc/sssd/sssd.conf", - "pattern": [], - "symbolic_name": "sssd_config" - }, - { - "file": "/etc/swift/swift.conf", - "pattern": [], - "symbolic_name": "swift_conf" - }, - { - "file": "/var/lib/config-data/puppet-generated/swift/etc/swift/swift.conf", - "pattern": [], - "symbolic_name": "swift_conf" - }, - { - "file": "/etc/swift/object-expirer.conf", - "pattern": [], - "symbolic_name": "swift_object_expirer_conf" - }, - { - "file": "/var/lib/config-data/puppet-generated/swift/etc/swift/object-expirer.conf", - "pattern": [], - "symbolic_name": "swift_object_expirer_conf" - }, - { - "file": "/etc/swift/proxy-server.conf", - "pattern": [], - "symbolic_name": "swift_proxy_server_conf" - }, - { - "file": "/var/lib/config-data/puppet-generated/swift/etc/swift/proxy-server.conf", - "pattern": [], - "symbolic_name": "swift_proxy_server_conf" - }, - { - "file": "/sys/kernel/debug/sched_features", - "pattern": [], - "symbolic_name": "sys_kernel_sched_features" - }, - { - "file": "/etc/default/grub", - "pattern": [], - "symbolic_name": "sysconfig_grub" - }, - { - "file": "/etc/sysconfig/kdump", - "pattern": [], - "symbolic_name": "sysconfig_kdump" - }, - { - "file": "/etc/sysconfig/libvirt-guests", - "pattern": [], - "symbolic_name": "sysconfig_libvirt_guests" - }, - { - "file": "/etc/sysconfig/memcached", - "pattern": [], - "symbolic_name": "sysconfig_memcached" - }, - { - "file": "/var/lib/config-data/memcached/etc/sysconfig/memcached", - "pattern": [], - "symbolic_name": "sysconfig_memcached" - }, - { - "file": "/etc/sysconfig/()*mongod", - "pattern": [], - "symbolic_name": "sysconfig_mongod" - }, - { - "file": "/etc/sysconfig/ntpd", - "pattern": [], - "symbolic_name": "sysconfig_ntpd" - }, - { - "file": "/etc/sysconfig/network", - "pattern": [], - "symbolic_name": "sysconfig_network" - }, - { - "file": "/etc/opt/rh/rh-mongodb26/sysconfig/()*mongod", - "pattern": [], - "symbolic_name": "sysconfig_rh_mongodb26" - }, - { - "file": "/etc/sysconfig/oracleasm", - "pattern": [], - "symbolic_name": "sysconfig_oracleasm" - }, - { - "file": "/etc/sysconfig/prelink", - "pattern": [], - "symbolic_name": "sysconfig_prelink" - }, - { - "file": "/etc/sysconfig/sshd", - "pattern": [], - "symbolic_name": "sysconfig_sshd" - }, - { - "file": "/etc/sysconfig/virt-who", - "pattern": [], - "symbolic_name": "sysconfig_virt_who" - }, - { - "file": "/etc/sysctl.conf", - "pattern": [], - "symbolic_name": "sysctl_conf" - }, - { - "file": "/etc/systemd/logind.conf", - "pattern": [], - "symbolic_name": "systemd_logind_conf" - }, - { - "file": "/etc/systemd/system.conf.d/origin-accounting.conf", - "pattern": [], - "symbolic_name": "systemd_system_origin_accounting" - }, - { - "file": "/etc/systemd/system.conf", - "pattern": [], - "symbolic_name": "systemd_system_conf" - }, - { - "file": "/etc/sysconfig/rhn/systemid", - "pattern": [], - "symbolic_name": "systemid" - }, - { - "file": "/sys/kernel/mm/transparent_hugepage/enabled", - "pattern": [], - "symbolic_name": "thp_enabled" - }, - { - "file": "/sys/kernel/mm/transparent_hugepage/use_zero_page", - "pattern": [], - "symbolic_name": "thp_use_zero_page" - }, - { - "file": "/etc/tmpfiles.d/()*.*\\.conf", - "pattern": [], - "symbolic_name": "tmpfilesd" - }, - { - "file": "/usr/lib/tmpfiles.d/()*.*\\.conf", - "pattern": [], - "symbolic_name": "tmpfilesd" - }, - { - "file": "/run/tmpfiles.d/()*.*\\.conf", - "pattern": [], - "symbolic_name": "tmpfilesd" - }, - { - "file": "/etc/tuned.conf", - "pattern": [], - "symbolic_name": "tuned_conf" - }, - { - "file": "/etc/sysconfig/rhn/up2date", - "pattern": [], - "symbolic_name": "up2date" - }, - { - "file": "/var/log/up2date", - "pattern": [ - "The certificate /usr/share/rhn/RHNS-CA-CERT is expired" - ], - "symbolic_name": "up2date_log" - }, - { - "file": "/usr/lib/systemd/journald.conf.d/()*.+\\.conf", - "pattern": [], - "symbolic_name": "usr_journald_conf_d" - }, - { - "file": "/etc/vdsm/vdsm.conf", - "pattern": [], - "symbolic_name": "vdsm_conf" - }, - { - "file": "/var/log/vdsm/vdsm.log", - "pattern": [ - "(mailbox-spm) [storage.Misc.excCmd] /usr/bin/taskset --cpu-list", - "Changed state to Down: 'NoneType' object has no attribute 'attrib'", - "Changed state to Down: internal error: Attempted double use of PCI slot", - "ERROR (mailbox-spm) [storage.MailBox.SpmMailMonitor]", - "RPC call Host.setupNetworks failed", - "Stopping connection", - "The name org.fedoraproject.FirewallD1 was not provided by any .service files", - "The vm start process failed" - ], - "symbolic_name": "vdsm_log" - }, - { - "file": "/etc/vdsm/vdsm.id", - "pattern": [], - "symbolic_name": "vdsm_id" - }, - { - "file": "/etc/vdsm/logger.conf", - "pattern": [], - "symbolic_name": "vdsm_logger_conf" - }, - { - "file": "/sys/module/vhost_net/parameters/experimental_zcopytx", - "pattern": [], - "symbolic_name": "vhost_net_zero_copy_tx" - }, - { - "file": "/etc/()*virt-who\\.conf", - "pattern": [ - "[", - "configs", - "debug", - "env", - "interval", - "log_", - "oneshot", - "owner", - "server", - "type" - ], - "symbolic_name": "virt_who_conf" - }, - { - "file": "/etc/virt-who.d/()*.*\\.conf", - "pattern": [ - "[", - "configs", - "debug", - "env", - "interval", - "log_", - "oneshot", - "owner", - "server", - "type" - ], - "symbolic_name": "virt_who_conf" - }, - { - "file": "/etc/libvirt/virtlogd.conf", - "pattern": [ - "max_size" - ], - "symbolic_name": "virtlogd_conf" - }, - { - "file": "/sys/kernel/mm/swap/vma_ra_enabled", - "pattern": [], - "symbolic_name": "vma_ra_enabled" - }, - { - "file": "/etc/pam.d/vsftpd", - "pattern": [], - "symbolic_name": "vsftpd" - }, - { - "file": "/etc/vsftpd/vsftpd.conf", - "pattern": [ - "LOCAL_ENABLE", - "Local_Enable", - "SSL_ENABLE", - "SSL_SSLV2", - "SSL_SSLV3", - "SSL_TLSV1", - "SSL_TLSV1_1", - "SSL_TLSV1_2", - "Ssl_Enable", - "Ssl_Sslv2", - "Ssl_Sslv3", - "Ssl_Tlsv1", - "Ssl_Tlsv1_1", - "Ssl_Tlsv1_2", - "local_enable", - "session_support", - "ssl_enable", - "ssl_sslv2", - "ssl_sslv3", - "ssl_tlsv1", - "ssl_tlsv1_1", - "ssl_tlsv1_2" - ], - "symbolic_name": "vsftpd_conf" - }, - { - "file": "/sys/kernel/debug/x86/ibpb_enabled", - "pattern": [], - "symbolic_name": "x86_ibpb_enabled" - }, - { - "file": "/sys/kernel/debug/x86/ibrs_enabled", - "pattern": [], - "symbolic_name": "x86_ibrs_enabled" - }, - { - "file": "/sys/kernel/debug/x86/pti_enabled", - "pattern": [], - "symbolic_name": "x86_pti_enabled" - }, - { - "file": "/etc/()*xinetd\\.conf", - "pattern": [], - "symbolic_name": "xinetd_conf" - }, - { - "file": "/etc/xinetd.d/()*.*", - "pattern": [], - "symbolic_name": "xinetd_conf" - }, - { - "file": "/etc/yum.conf", - "pattern": [], - "symbolic_name": "yum_conf" - }, - { - "file": "/var/log/yum.log", - "pattern": [], - "symbolic_name": "yum_log" - }, - { - "file": "/var/log/redhat_access_proactive/redhat_access_proactive.log", - "pattern": [], - "symbolic_name": "redhat_access_proactive_log" - }, - { - "file": "/etc/logrotate.conf", - "pattern": [], - "symbolic_name": "logrotate_conf" - }, - { - "file": "/etc/logrotate.d/().*", - "pattern": [], - "symbolic_name": "logrotate_conf" - }, - { - "file": "/etc/rhsm/facts/virt_uuid.facts", - "pattern": [], - "symbolic_name": "virt_uuid_facts" - }, - { - "file": "/var/log/containers/gnocchi/gnocchi-metricd.log", - "pattern": [ - "ObjectNotFound: error opening pool 'metrics'" - ], - "symbolic_name": "gnocchi_metricd_log" - }, - { - "file": "/var/log/gnocchi/metricd.log", - "pattern": [ - "ObjectNotFound: error opening pool 'metrics'" - ], - "symbolic_name": "gnocchi_metricd_log" - }, - { - "file": "/var/lib/config-data/puppet-generated/gnocchi/etc/gnocchi/gnocchi.conf", - "pattern": [ - "[", - "ceph", - "ceph_pool", - "driver", - "storage" - ], - "symbolic_name": "gnocchi_conf" - }, - { - "file": "/etc/gnocchi/gnocchi.conf", - "pattern": [ - "[", - "ceph", - "ceph_pool", - "driver", - "storage" - ], - "symbolic_name": "gnocchi_conf" - }, - { - "file": "/etc/named.conf", - "pattern": [ - "include", - "{", - "}" - ], - "symbolic_name": "named_conf" - }, - { - "file": "/etc/neutron/neutron.conf", - "pattern": [ - "[", - "agent_down_time", - "agent_report_interval", - "allow_automatic_dhcp_failover", - "api_workers", - "debug", - "ipam_driver", - "router_distributed", - "rpc_workers", - "service_plugins" - ], - "symbolic_name": "neutron_conf" - }, - { - "file": "/var/lib/config-data/puppet-generated/neutron/etc/neutron/neutron.conf", - "pattern": [ - "[", - "agent_down_time", - "agent_report_interval", - "allow_automatic_dhcp_failover", - "api_workers", - "debug", - "ipam_driver", - "router_distributed", - "rpc_workers", - "service_plugins" - ], - "symbolic_name": "neutron_conf" - }, - { - "file": "/etc/neutron/dhcp_agent.ini", - "pattern": [ - "[", - "force_metadata" - ], - "symbolic_name": "neutron_dhcp_agent_ini" - }, - { - "file": "/var/lib/config-data/puppet-generated/neutron/etc/neutron/dhcp_agent.ini", - "pattern": [ - "[", - "force_metadata" - ], - "symbolic_name": "neutron_dhcp_agent_ini" - }, - { - "file": "/etc/neutron/plugin.ini", - "pattern": [], - "symbolic_name": "neutron_plugin_ini" - }, - { - "file": "/var/lib/config-data/puppet-generated/neutron/etc/neutron/plugin.ini", - "pattern": [], - "symbolic_name": "neutron_plugin_ini" - }, - { - "file": "/etc/neutron/plugins/ml2/sriov_agent.ini", - "pattern": [ - "[", - "debug", - "exclude_devices", - "extensions", - "physical_device_mappings" - ], - "symbolic_name": "neutron_sriov_agent" - }, - { - "file": "/var/lib/config-data/puppet-generated/neutron/etc/neutron/plugins/ml2/sriov_agent.ini", - "pattern": [ - "[", - "debug", - "exclude_devices", - "extensions", - "physical_device_mappings" - ], - "symbolic_name": "neutron_sriov_agent" - }, - { - "file": "/etc/zipl.conf", - "pattern": [], - "symbolic_name": "zipl_conf" - }, - { - "file": "/etc/smart_proxy_dynflow_core/settings.yml", - "pattern": [ - ":database:" - ], - "symbolic_name": "smartpdc_settings" - }, - { - "file": "/etc/neutron/l3_agent.ini", - "pattern": [ - "[", - "agent_mode" - ], - "symbolic_name": "neutron_l3_agent_ini" - }, - { - "file": "/var/lib/config-data/puppet-generated/neutron/etc/neutron/l3_agent.ini", - "pattern": [ - "[", - "agent_mode" - ], - "symbolic_name": "neutron_l3_agent_ini" - }, - { - "file": "/var/log/neutron/l3-agent.log", - "pattern": [ - "DEBUG oslo.messaging._drivers.impl_rabbit", - "Error while deleting router", - "Stderr: Another app is currently holding the xtables lock", - "Timed out waiting for RPC response" - ], - "symbolic_name": "neutron_l3_agent_log" - }, - { - "file": "/var/log/containers/neutron/l3-agent.log", - "pattern": [ - "DEBUG oslo.messaging._drivers.impl_rabbit", - "Error while deleting router", - "Stderr: Another app is currently holding the xtables lock", - "Timed out waiting for RPC response" - ], - "symbolic_name": "neutron_l3_agent_log" - }, - { - "file": "/etc/neutron/metadata_agent.ini", - "pattern": [ - "[" - ], - "symbolic_name": "neutron_metadata_agent_ini" - }, - { - "file": "/var/lib/config-data/puppet-generated/neutron/etc/neutron/metadata_agent.ini", - "pattern": [ - "[" - ], - "symbolic_name": "neutron_metadata_agent_ini" - }, - { - "file": "/var/log/neutron/metadata-agent.log", - "pattern": [ - "Unauthorized: {\"error\": {\"message\": \"The resource could not be found.\", \"code\": 404, \"title\": \"Not Found\"}}" - ], - "symbolic_name": "neutron_metadata_agent_log" - }, - { - "file": "/var/log/containers/neutron/metadata-agent.log", - "pattern": [ - "Unauthorized: {\"error\": {\"message\": \"The resource could not be found.\", \"code\": 404, \"title\": \"Not Found\"}}" - ], - "symbolic_name": "neutron_metadata_agent_log" - }, - { - "file": "/etc/neutron/plugins/ml2/ml2_conf.ini", - "pattern": [ - "[" - ], - "symbolic_name": "neutron_ml2_conf" - }, - { - "file": "/var/lib/config-data/puppet-generated/neutron/etc/neutron/plugins/ml2/ml2_conf.ini", - "pattern": [ - "[" - ], - "symbolic_name": "neutron_ml2_conf" - }, - { - "file": "/var/log/neutron/openvswitch-agent.log", - "pattern": [ - "Agent main thread died of an exception", - "DEBUG oslo.messaging._drivers.impl_rabbit", - "Timed out waiting for RPC response", - "neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native.ovs_ryuapp", - "u'device_owner': u'network:router_interface_distributed'" - ], - "symbolic_name": "neutron_ovs_agent_log" - }, - { - "file": "/var/log/containers/neutron/openvswitch-agent.log", - "pattern": [ - "Agent main thread died of an exception", - "DEBUG oslo.messaging._drivers.impl_rabbit", - "Timed out waiting for RPC response", - "neutron.plugins.ml2.drivers.openvswitch.agent.openflow.native.ovs_ryuapp", - "u'device_owner': u'network:router_interface_distributed'" - ], - "symbolic_name": "neutron_ovs_agent_log" - }, - { - "file": "/usr/libexec/setup-named-chroot.sh", - "pattern": [ - "/", - "ROOTDIR_MOUNT" - ], - "symbolic_name": "setup_named_chroot" - }, - { - "file": "/usr/sap/hostctrl/exe/host_profile", - "pattern": [ - "DIR_", - "SAPSYSTEM" - ], - "symbolic_name": "sap_host_profile" - }, - { - "file": "/proc/sys/kernel/sched_rt_runtime_us", - "pattern": [], - "symbolic_name": "sched_rt_runtime_us" - }, - { - "file": "/usr/lib/udev/rules.d/59-fc-wwpn-id.rules", - "pattern": [ - "ENV{FC_INITIATOR_WWPN}", - "ENV{FC_TARGET_LUN}", - "ENV{FC_TARGET_WWPN}" - ], - "symbolic_name": "udev_fc_wwpn_id_rules" - } - ], - "globs": [ - { - "glob": "/etc/tower/conf.d/*.py", - "pattern": [ - "AWX_CLEANUP_PATHS", - "AWX_PROOT_BASE_PATH" - ], - "symbolic_name": "ansible_tower_settings" - }, - { - "glob": "/etc/udev/rules.d/*asm*.rules", - "pattern": [ - "oracleasm", - "ACTION==" - ], - "symbolic_name": "etc_udev_oracle_asm_rules" - }, - { - "glob": "/sys/devices/system/cpu/cpu[0-9]*/online", - "symbolic_name": "cpu_cores", - "pattern": [] - }, - { - "glob": "/sys/devices/system/cpu/cpu[0-9]*/topology/thread_siblings_list", - "symbolic_name": "cpu_siblings", - "pattern": [] - }, - { - "glob": "/sys/devices/system/cpu/vulnerabilities/*", - "symbolic_name": "cpu_vulns", - "pattern": [] - }, - { - "glob": "/etc/dnsmasq.d/*.conf", - "pattern": [], - "symbolic_name": "dnsmasq_config" - }, - { - "glob": "/sys/class/net/*/address", - "symbolic_name": "mac_addresses", - "pattern": [] - }, - { - "glob": "/etc/httpd/conf.d/*/*.conf", - "pattern": [], - "symbolic_name": "httpd_conf" - }, - { - "glob": "/etc/httpd/conf*/*.conf", - "pattern": [], - "symbolic_name": "httpd_conf" - }, - { - "glob": "/opt/rh/httpd24/root/etc/httpd/conf.d/*/*.conf", - "pattern": [], - "symbolic_name": "httpd_conf_scl_httpd24" - }, - { - "glob": "/opt/rh/httpd24/root/etc/httpd/conf*/*.conf", - "pattern": [], - "symbolic_name": "httpd_conf_scl_httpd24" - }, - { - "glob": "/opt/rh/jbcs-httpd24/root/etc/httpd/conf.d/*/*.conf", - "pattern": [], - "symbolic_name": "httpd_conf_scl_jbcs_httpd24" - }, - { - "glob": "/opt/rh/jbcs-httpd24/root/etc/httpd/conf*/*.conf", - "pattern": [], - "symbolic_name": "httpd_conf_scl_jbcs_httpd24" - }, - { - "glob": "/sys/class/net/bond[0-9]*/bonding/tlb_dynamic_lb", - "symbolic_name": "bond_dynamic_lb", - "pattern": [] - }, - { - "glob": "/boot/loader/entries/*.conf", - "symbolic_name": "boot_loader_entries", - "pattern": [] - }, - { - "glob": "/var/opt/amq-broker/*/etc/broker.xml", - "symbolic_name": "amq_broker", - "pattern": [] - }, - { - "glob": "/boot/config-*", - "symbolic_name": "kernel_config", - "pattern": [ - "CONFIG_BPF_SYSCALL", - "CONFIG_PREEMPT_RT_FULL", - "CONFIG_SMP" - ] - }, - { - "glob": "/etc/krb5.conf.d/*", - "symbolic_name": "krb5_conf_d", - "pattern": [] - }, - { - "glob": "/etc/nginx/*.conf", - "pattern": [], - "symbolic_name": "nginx_conf" - }, - { - "glob": "/etc/nginx/*.d/*.conf", - "pattern": [], - "symbolic_name": "nginx_conf" - }, - { - "glob": "/etc/opt/rh/rh-nginx*/nginx/*.conf", - "pattern": [], - "symbolic_name": "nginx_conf" - }, - { - "glob": "/etc/opt/rh/rh-nginx*/nginx/*.d/*.conf", - "pattern": [], - "symbolic_name": "nginx_conf" - }, - { - "glob": "/opt/rh/nginx*/root/etc/nginx/*.conf", - "pattern": [], - "symbolic_name": "nginx_conf" - }, - { - "glob": "/opt/rh/nginx*/root/etc/nginx/*.d/*.conf", - "pattern": [], - "symbolic_name": "nginx_conf" - }, - { - "glob": "/sys/fs/cgroup/cpu/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod[a-f0-9_]*.slice/cpu.cfs_quota_us", - "symbolic_name": "kubepods_cpu_quota", - "pattern": [] - }, - { - "glob": "/var/log/ceph/ceph.log*", - "pattern": [ - "[WRN] slow request" - ], - "symbolic_name": "ceph_log" - }, - { - "glob": "/var/log/libvirt/qemu/*.log", - "pattern": [ - "[CAUTION] This_is_the default_filter_string_for_all_large_files!" - ], - "symbolic_name": "libvirtd_qemu_log" - }, - { - "glob": "/sys/bus/pci/devices/*/mlx4_port[0-9]", - "symbolic_name": "mlx4_port", - "pattern": [] - }, - { - "glob": "/var/opt/rh/rh-mysql*/log/mysql/mysqld.log", - "symbolic_name": "mysql_log", - "pattern": [ - "SSL error", - "Too many open files", - "[ERROR]", - "handshake with remote endpoint ssl" - ] - }, - { - "glob": "/sys/devices/system/node/node[0-9]*/cpulist", - "symbolic_name": "numa_cpus", - "pattern": [] - }, - { - "glob": "/var/opt/rh/rh-postgresql12/lib/pgsql/data/log/postgresql-*.log", - "symbolic_name": "postgresql_log", - "pattern": [ - "FATAL", - "checkpoints are occurring too frequently", - "connection limit exceeded for non-superusers", - "remaining connection slots are reserved for non-replication superuser connections" - ] - }, - { - "glob": "/etc/rsyslog.d/*.conf", - "pattern": [ - "$ActionQueueFileName", - "(", - ")", - "imjournal", - "imtcp", - "regex", - "{", - "}" - ], - "symbolic_name": "rsyslog_conf" - }, - { - "glob": "/sys/bus/vmbus/devices/*/class_id", - "pattern": [], - "symbolic_name": "sys_vmbus_class_id" - }, - { - "glob": "/sys/bus/vmbus/devices/*/device_id", - "pattern": [], - "symbolic_name": "sys_vmbus_device_id" - }, - { - "glob": "/sys/block/*/queue/scheduler", - "symbolic_name": "scheduler", - "pattern": [] - }, - { - "glob": "/sys/class/scsi_host/host[0-9]*/fwrev", - "symbolic_name": "scsi_fwver", - "pattern": [] - }, - { - "glob": "/sys/class/scsi_host/host[0-9]*/eh_deadline", - "symbolic_name": "scsi_eh_deadline", - "pattern": [] - }, - { - "glob": "/etc/yum.repos.d/*.repo", - "symbolic_name": "yum_repos_d", - "pattern": [] - }, - { - "glob": "/etc/ssh/ssh_config.d/*.conf", - "symbolic_name": "ssh_config_d", - "pattern": [ - "Include" - ] - } - ], - "meta_specs": { - "analysis_target": { - "archive_file_name": "/insights_data/analysis_target" - }, - "branch_info": { - "archive_file_name": "/branch_info" - }, - "machine-id": { - "archive_file_name": "/insights_data/machine-id" - }, - "metadata.json": { - "archive_file_name": "metadata.json" - }, - "uploader_log": { - "archive_file_name": "/insights_data/insights_logs/insights.log" - } - }, - "pre_commands": { - "iface": "/sbin/ip -o link | awk -F ': |@' '/.*link\\/ether/ {print $2}'" - }, - "version": "2021-07-29T13:51:55.397132" -} \ No newline at end of file diff --git a/insights/tests/client/collection_rules/test_get_rm_conf.py b/insights/tests/client/collection_rules/test_get_rm_conf.py index 4041a51ea..8efdeef39 100644 --- a/insights/tests/client/collection_rules/test_get_rm_conf.py +++ b/insights/tests/client/collection_rules/test_get_rm_conf.py @@ -333,6 +333,32 @@ def test_rm_conf_old_load_ok(isfile, verify): assert result == {'commands': ['/bin/ls', 'ethtool_i'], 'files': ['/etc/test'], 'patterns': ['abc123', 'def456'], 'keywords': ['key1', 'key2', 'key3']} +@patch('insights.client.collection_rules.InsightsUploadConf.load_redaction_file', Mock(return_value={"test": "test"})) +@patch('insights.client.collection_rules.map_rm_conf_to_components', Mock()) +@patch('insights.client.collection_rules.InsightsUploadConf.get_conf_file') +def test_rm_conf_loads_uploader_json_core_collect(get_conf_file): + ''' + Verify that get_conf_file() is called from + get_rm_conf() when core_collect==True + ''' + upload_conf = insights_upload_conf(core_collect=True) + upload_conf.get_rm_conf() + get_conf_file.assert_called_once() + + +@patch('insights.client.collection_rules.InsightsUploadConf.load_redaction_file', Mock(return_value={"test": "test"})) +@patch('insights.client.collection_rules.map_rm_conf_to_components', Mock()) +@patch('insights.client.collection_rules.InsightsUploadConf.get_conf_file') +def test_rm_conf_no_load_uploader_json_classic_collect(get_conf_file): + ''' + Verify that get_conf_file() is NOT called from + get_rm_conf() when core_collect==False + ''' + upload_conf = insights_upload_conf(core_collect=False) + upload_conf.get_rm_conf() + get_conf_file.assert_not_called() + + # @patch('insights.client.collection_rules.verify_permissions', return_value=True) # @patch_isfile(True) # def test_rm_conf_old_load_bad(isfile, verify): diff --git a/insights/tests/client/collection_rules/test_map_components.py b/insights/tests/client/collection_rules/test_map_components.py index 414632a27..5cdf1cf4b 100644 --- a/insights/tests/client/collection_rules/test_map_components.py +++ b/insights/tests/client/collection_rules/test_map_components.py @@ -1,9 +1,8 @@ -import pkgutil -import insights -import json +import requests -# from insights.client.config import InsightsConfig from insights.client.collection_rules import InsightsUploadConf +from insights.client.connection import InsightsConnection +from insights.client.config import InsightsConfig from mock.mock import patch, Mock from insights.specs.default import DefaultSpecs from insights.specs.sos_archive import SosSpecs @@ -11,14 +10,27 @@ _search_uploader_json, _get_component_by_symbolic_name) -uploader_json_file = pkgutil.get_data(insights.__name__, "client/uploader_json_map.json") -uploader_json = json.loads(uploader_json_file) +config = InsightsConfig() +conn = InsightsConnection(config) default_specs = vars(DefaultSpecs).keys() sos_specs = vars(SosSpecs).keys() +def get_uploader_json(): + ''' + Download latest uploader.json to use for unit tests + ''' + print("Downloading a fresh and hot uploader.json...") + url = "https://api.access.redhat.com/r/insights/v1/static/uploader.v2.json" + uploader_json = requests.get(url).json() + return uploader_json + +uploader_json = get_uploader_json() + + @patch('insights.client.collection_rules.InsightsUploadConf.load_redaction_file', Mock(return_value={'test': 'test'})) @patch('insights.client.collection_rules.InsightsUploadConf.get_rm_conf_old', Mock(return_value={'test': 'test'})) +@patch('insights.client.collection_rules.InsightsUploadConf.get_conf_file', Mock(return_value={'test': 'test'})) @patch('insights.client.collection_rules.map_rm_conf_to_components') def test_called_when_core_collection_enabled(map_rm_conf_to_components): ''' @@ -26,11 +38,12 @@ def test_called_when_core_collection_enabled(map_rm_conf_to_components): ''' upload_conf = InsightsUploadConf(Mock(core_collect=True)) upload_conf.get_rm_conf() - map_rm_conf_to_components.assert_called_once_with({'test': 'test'}) + map_rm_conf_to_components.assert_called_once_with({'test': 'test'}, {'test': 'test'}) @patch('insights.client.collection_rules.InsightsUploadConf.load_redaction_file', Mock(return_value={'test': 'test'})) @patch('insights.client.collection_rules.InsightsUploadConf.get_rm_conf_old', Mock(return_value={'test': 'test'})) +@patch('insights.client.collection_rules.InsightsUploadConf.get_conf_file', Mock(return_value={'test': 'test'})) @patch('insights.client.collection_rules.map_rm_conf_to_components') def test_not_called_when_core_collection_disabled(map_rm_conf_to_components): ''' @@ -41,117 +54,27 @@ def test_not_called_when_core_collection_disabled(map_rm_conf_to_components): map_rm_conf_to_components.assert_not_called() -def test_get_component_by_symbolic_name(): - ''' - Verify that all symbolic names in uploader.json can be mapped - to valid components as prescribed in the conversion function - ''' - # some specs have been removed for core release so because they either - # A) do not appear in uploader.json, or - # B) DO appear in uploader.json, but have no associated rules - # Filter out the (B) specs with this list - skipped_specs = [ - 'ceph_osd_df', - 'gluster_peer_status', - 'gluster_v_status', - 'heat_crontab', - 'httpd_on_nfs', - 'ls_usr_sbin', - 'lvmconfig', - 'nova_migration_uid', - 'rabbitmq_queues', - 'rhev_data_center', - 'root_crontab', - 'yum_list_installed', - 'zdump_v', - 'cni_podman_bridge_conf', - 'cobbler_modules_conf', - 'cobbler_settings', - 'cpu_smt_control', - 'cpu_vulns_meltdown', - 'cpu_vulns_spectre_v1', - 'cpu_vulns_spectre_v2', - 'cpu_vulns_spec_store_bypass', - 'docker_storage', - 'freeipa_healthcheck_log', - 'ironic_conf', - 'octavia_conf', - 'rhn_entitlement_cert_xml', - 'rhn_hibernate_conf', - 'rhn_schema_version', - 'rhn_search_daemon_log', - 'rhn_taskomatic_daemon_log', - 'rhosp_release', - 'secure', - 'foreman_tasks_config', - 'ssh_foreman_config', - 'swift_conf', - 'sys_kernel_sched_features', - 'sysconfig_memcached', - 'sysconfig_mongod', - 'systemd_system_origin_accounting', - 'tuned_conf', - 'vdsm_conf', - 'vdsm_id', - 'neutron_ml2_conf', - 'sap_host_profile', - 'sched_rt_runtime_us', - 'libvirtd_qemu_log', - 'mlx4_port', - 'qpid_stat_g', - 'lsinitrd' - ] - - # first, make sure our list is proper and one of these - # are in the default specs - for s in skipped_specs: - assert s not in default_specs - - for category in ['commands', 'files', 'globs']: - for entry in uploader_json[category]: - full_component = _get_component_by_symbolic_name(entry['symbolic_name']) - - if full_component is None: - # this entry should not be in core, so assert that it's missing - assert entry['symbolic_name'] not in default_specs - continue - - module, shortname = full_component.rsplit('.', 1) - - # filter out specs without associated rules - if shortname in skipped_specs: - continue - - if module == "insights.specs.default.DefaultSpecs": - assert shortname in default_specs - elif module == "insights.specs.sos_archive.SosSpecs": - assert shortname in sos_specs - else: - # invalid module name - assert False - - def test_search_uploader_json(): ''' Verify that all valid input from an uploader.json-based remove.conf will return a symbolic name ''' for cmd in uploader_json['commands']: - assert _search_uploader_json(['commands'], cmd['command']) - assert _search_uploader_json(['commands'], cmd['symbolic_name']) + assert _search_uploader_json(uploader_json, ['commands'], cmd['command']) + assert _search_uploader_json(uploader_json, ['commands'], cmd['symbolic_name']) for fil in uploader_json['files']: - assert _search_uploader_json(['files', 'globs'], fil['file']) - assert _search_uploader_json(['files', 'globs'], fil['symbolic_name']) + assert _search_uploader_json(uploader_json, ['files', 'globs'], fil['file']) + assert _search_uploader_json(uploader_json, ['files', 'globs'], fil['symbolic_name']) for glb in uploader_json['globs']: - assert _search_uploader_json(['files', 'globs'], glb['symbolic_name']) + assert _search_uploader_json(uploader_json, ['files', 'globs'], glb['symbolic_name']) def test_search_uploader_json_invalid(): ''' Verify that invalid input will return None ''' - assert _search_uploader_json(['commands'], 'random value') is None - assert _search_uploader_json(['files', 'globs'], 'random value') is None + assert _search_uploader_json(uploader_json, ['commands'], 'random value') is None + assert _search_uploader_json(uploader_json, ['files', 'globs'], 'random value') is None def test_search_uploader_json_globs_symbolic_only(): @@ -159,7 +82,7 @@ def test_search_uploader_json_globs_symbolic_only(): Verify that globs are matched by symbolic name only ''' for glb in uploader_json['globs']: - assert _search_uploader_json(['files', 'globs'], glb['glob']) is None + assert _search_uploader_json(uploader_json, ['files', 'globs'], glb['glob']) is None def test_map_rm_conf_to_components_sym_names(): @@ -174,7 +97,7 @@ def test_map_rm_conf_to_components_sym_names(): rm_conf = {'commands': [sym_name]} # figure out the destination name should be spec_name = _get_component_by_symbolic_name(sym_name) - new_rm_conf = map_rm_conf_to_components(rm_conf) + new_rm_conf = map_rm_conf_to_components(rm_conf, uploader_json) # commands should be empty, components should have 1 item assert len(new_rm_conf['commands']) == 0 assert len(new_rm_conf['components']) == 1 @@ -187,7 +110,7 @@ def test_map_rm_conf_to_components_sym_names(): rm_conf = {'files': [sym_name]} # figure out the destination name should be spec_name = _get_component_by_symbolic_name(sym_name) - new_rm_conf = map_rm_conf_to_components(rm_conf) + new_rm_conf = map_rm_conf_to_components(rm_conf, uploader_json) # files should be empty, components should have 1 item # except for these which cannot be mapped to specs. # in which case, components empty and these remain in files @@ -209,7 +132,7 @@ def test_map_rm_conf_to_components_sym_names(): rm_conf = {'files': [sym_name]} # figure out the destination name should be spec_name = _get_component_by_symbolic_name(sym_name) - new_rm_conf = map_rm_conf_to_components(rm_conf) + new_rm_conf = map_rm_conf_to_components(rm_conf, uploader_json) # files should be empty, components should have 1 item assert len(new_rm_conf['files']) == 0 assert len(new_rm_conf['components']) == 1 @@ -228,7 +151,7 @@ def test_map_rm_conf_to_components_raw_cmds_files(): sym_name = cmd['symbolic_name'] # figure out the destination name should be spec_name = _get_component_by_symbolic_name(sym_name) - new_rm_conf = map_rm_conf_to_components(rm_conf) + new_rm_conf = map_rm_conf_to_components(rm_conf, uploader_json) # commands should be empty, components should have 1 item assert len(new_rm_conf['commands']) == 0 assert len(new_rm_conf['components']) == 1 @@ -241,7 +164,7 @@ def test_map_rm_conf_to_components_raw_cmds_files(): sym_name = fil['symbolic_name'] # figure out the destination name should be spec_name = _get_component_by_symbolic_name(sym_name) - new_rm_conf = map_rm_conf_to_components(rm_conf) + new_rm_conf = map_rm_conf_to_components(rm_conf, uploader_json) # files should be empty, components should have 1 item # except for these which cannot be mapped to specs. # in which case, components empty and these remain in files @@ -262,7 +185,7 @@ def test_map_rm_conf_to_components_invalid(): Verify that matching commands/files are mapped to components ''' rm_conf = {'commands': ['random', 'value'], 'files': ['other', 'invalid', 'data']} - new_rm_conf = map_rm_conf_to_components(rm_conf) + new_rm_conf = map_rm_conf_to_components(rm_conf, uploader_json) # rm_conf should be unchanged assert len(new_rm_conf['commands']) == 2 assert len(new_rm_conf['files']) == 3 @@ -278,12 +201,12 @@ def test_rm_conf_empty(_search_uploader_json): with an empty dict or None ''' rm_conf = {} - new_rm_conf = map_rm_conf_to_components(rm_conf) + new_rm_conf = map_rm_conf_to_components(rm_conf, uploader_json) _search_uploader_json.assert_not_called() assert new_rm_conf == {} rm_conf = None - new_rm_conf = map_rm_conf_to_components(rm_conf) + new_rm_conf = map_rm_conf_to_components(rm_conf, uploader_json) _search_uploader_json.assert_not_called() assert new_rm_conf is None @@ -299,7 +222,7 @@ def test_log_long_key(logger_warning): 'files': ["/etc/sysconfig/virt-who", "/etc/yum.repos.d/fedora-cisco-openh264.repo", "krb5_conf_d"]} - map_rm_conf_to_components(rm_conf) + map_rm_conf_to_components(rm_conf, uploader_json) logger_warning.assert_any_call("- /usr/bin/find /etc/origin/node => certificates_enddate\n /etc/origin/master /etc/pki /etc/ipa -type f\n -exec /usr/bin/openssl x509 -noout -enddate -in\n '{}' \\; -exec echo 'FileName= {}' \\;") logger_warning.assert_any_call("- /usr/bin/md5sum /etc/pki/product/69.pem => md5chk_files") logger_warning.assert_any_call("- /etc/sysconfig/virt-who => sysconfig_virt_who") @@ -313,7 +236,7 @@ def test_log_short_key(logger_warning): is short ''' rm_conf = {'commands': ["ss_tupna"]} - map_rm_conf_to_components(rm_conf) + map_rm_conf_to_components(rm_conf, uploader_json) logger_warning.assert_any_call("If possible, commands and files specified in the blacklist configuration will be converted to Insights component specs that will be disabled as needed.") @@ -325,7 +248,7 @@ def test_components_added(): ''' rm_conf = {'commands': ["/usr/bin/md5sum /etc/pki/product/69.pem"], 'components': ["insights.specs.default.DefaultSpecs.sysconfig_virt_who"]} - results = map_rm_conf_to_components(rm_conf) + results = map_rm_conf_to_components(rm_conf, uploader_json) assert results == {'commands': [], 'files': [], diff --git a/insights/tests/client/phase/test_update.py b/insights/tests/client/phase/test_update.py index 8d6f87c3a..1602e21b2 100644 --- a/insights/tests/client/phase/test_update.py +++ b/insights/tests/client/phase/test_update.py @@ -41,7 +41,7 @@ def test_update_payload_off(insights_config, insights_client): @patch("insights.client.phase.v1.InsightsConfig") def test_update_core_collect_on(insights_config, insights_client): """ - Rules are not updated when using core collection + Rules ARE updated when using core collection """ insights_config.return_value.load_all.return_value.payload = False insights_config.return_value.load_all.return_value.core_collect = True diff --git a/insights/tests/client/test_collect.py b/insights/tests/client/test_collect.py index d790d37f1..02bbeb08f 100644 --- a/insights/tests/client/test_collect.py +++ b/insights/tests/client/test_collect.py @@ -167,14 +167,14 @@ def test_get_conf_file(get_branch_info, get_conf_file, data_collector): @patch("insights.client.client.CoreCollector") @patch_get_conf_file() @patch_get_branch_info() -def test_get_conf_not_called_core_collection(get_branch_info, get_conf_file, core_collector): +def test_get_conf_called_core_collection(get_branch_info, get_conf_file, core_collector): """ - Verify that uploader.json is not loaded when using core collection + Verify that uploader.json IS loaded when using core collection (from get_rm_conf function) """ config, pconn = collect_args(core_collect=True) collect(config, pconn) - get_conf_file.assert_not_called() + get_conf_file.assert_called_once() @patch_data_collector() From 20b9061f55f053ef27e95a282154cface55a011f Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Wed, 11 Aug 2021 17:47:06 -0400 Subject: [PATCH 520/892] add additional messaging for 401 response (#3066) Signed-off-by: Jeremy Crafts --- insights/client/connection.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/insights/client/connection.py b/insights/client/connection.py index 5cc1feae5..46e4170b4 100644 --- a/insights/client/connection.py +++ b/insights/client/connection.py @@ -420,9 +420,9 @@ def handle_fail_rcs(self, req): req.status_code) logger.debug("HTTP Status Text: %s", req.reason) if req.status_code == 401: - logger.error("Authorization Required.") - logger.error("Please ensure correct credentials " - "in " + constants.default_conf_file) + logger.error("Please ensure that the system is registered " + "with RHSM for CERT auth, or that correct " + "credentials are set in %s for BASIC auth.", self.config.conf) logger.log(NETWORK, "HTTP Response Text: %s", req.text) if req.status_code == 402: # failed registration because of entitlement limit hit @@ -633,6 +633,7 @@ def _legacy_api_registration_check(self): # True for registered # False for unregistered # None for system 404 + self.handle_fail_rcs(res) try: # check the 'unregistered_at' key of the response unreg_status = json.loads(res.content).get('unregistered_at', 'undefined') From 21e7d5d7d75112ad66ef6fb04271e33fc9da96fd Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Mon, 16 Aug 2021 05:53:40 -0500 Subject: [PATCH 521/892] Update candlepin_broker tests to work with newer python pytest (#3186) * This fixes testing issues with python 3.8 and the latest pytest where the order of attributes changes in the XML tags. * The new test doesn't compare everything, but compares most things including removal of sensitive tags. Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- .../datasources/test_candlepin_broker.py | 54 ++++++++++++++++++- 1 file changed, 52 insertions(+), 2 deletions(-) diff --git a/insights/tests/datasources/test_candlepin_broker.py b/insights/tests/datasources/test_candlepin_broker.py index 49d3ceda3..0b0776b05 100644 --- a/insights/tests/datasources/test_candlepin_broker.py +++ b/insights/tests/datasources/test_candlepin_broker.py @@ -1,6 +1,7 @@ import pytest from mock.mock import Mock +from insights.core import ET from insights.core.spec_factory import DatasourceProvider from insights.core.dr import SkipComponent from insights.specs.default import candlepin_broker @@ -141,6 +142,53 @@ RELATIVE_PATH = '/etc/candlepin/broker.xml' +def xml_check_removed(result): + root = ET.fromstring('\n'.join(result)) + assert root is not None + core_ele = root.find('core') + assert core_ele is not None + + search_tags = ['cluster-password', 'acceptors'] + for tag in search_tags: + found = core_ele.find(tag) + assert found is None, 'Tag {} should not be in result'.format(tag) + + +def xml_compare(result, expected): + root_result = ET.fromstring('\n'.join(result)) + root_expected = ET.fromstring('\n'.join(expected)) + + re_core_ele = root_result.find('core') + assert re_core_ele is not None + ex_core_ele = root_expected.find('core') + assert ex_core_ele is not None + + search_tags = ['cluster-user', 'security-enabled'] + for tag in search_tags: + ex_found = ex_core_ele.find(tag) + if ex_found is not None: + re_found = re_core_ele.find(tag) + assert re_found is not None, 'Tag {} is in expected but not in result'.format(tag) + assert re_found.text == ex_found.text, 'Tag {} text is different in expected and result'.format(tag) + + ex_settings = ex_core_ele.find('security-settings') + if ex_settings is not None: + re_settings = re_core_ele.find('security-settings') + assert re_found is not None, 'Tag security-settings is in expected but not in result' + assert re_found.text == ex_found.text, 'Tag {} text is different in expected and result'.format(tag) + ex_settings_dict = {} + re_settings_dict = {} + for setting in ex_settings.findall('security-setting'): + ex_settings_dict[setting.get('match')] = [] + for perm in setting.findall('permission'): + ex_settings_dict[setting.get('match')].append((perm.get('roles'), perm.get('type'))) + for setting in re_settings.findall('security-setting'): + re_settings_dict[setting.get('match')] = [] + for perm in setting.findall('permission'): + re_settings_dict[setting.get('match')].append((perm.get('roles'), perm.get('type'))) + assert ex_settings_dict == re_settings_dict + + def test_candlepin_broker(): candlepin_broker_file = Mock() candlepin_broker_file.content = CANDLEPIN_BROKER.splitlines() @@ -149,7 +197,8 @@ def test_candlepin_broker(): assert result is not None assert isinstance(result, DatasourceProvider) expected = DatasourceProvider(content=CANDLEPIN_BROKER_XML.splitlines(), relative_path=RELATIVE_PATH) - assert result.content == expected.content + xml_check_removed(result.content) + xml_compare(result.content, expected.content) assert result.relative_path == expected.relative_path @@ -170,5 +219,6 @@ def test_candlpin_broker_no_sensitive_info(): assert result is not None assert isinstance(result, DatasourceProvider) expected = DatasourceProvider(content=CANDLE_BROKER_NO_SENTISVE_INFO.splitlines(), relative_path=RELATIVE_PATH) - assert result.content == expected.content + xml_check_removed(result.content) + xml_compare(result.content, expected.content) assert result.relative_path == expected.relative_path From 8c4e8ab8f2862967cc8f4e30cb05c37f68ba3648 Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Mon, 16 Aug 2021 19:30:16 -0500 Subject: [PATCH 522/892] Update setup.py for python version (#3187) Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- setup.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index 22227bd56..68ad58985 100644 --- a/setup.py +++ b/setup.py @@ -77,10 +77,15 @@ def maybe_require(pkg): # https://github.com/davidhalter/jedi/issues/1714 ]) +# python 2.6 requires setuptools~=36.8.0 to support this syntax testing = set([ - 'coverage==4.3.4', - 'pytest==3.0.6', - 'pytest-cov==2.4.0', + 'coverage==4.3.4; python_version < "2.7"', + 'coverage; python_version >= "2.7"', + 'pytest==3.0.6; python_version < "2.7"', + 'pytest~=4.6.0; python_version == "2.7"', + 'pytest; python_version >= "3"', + 'pytest-cov==2.4.0; python_version < "2.7"', + 'pytest-cov; python_version >= "2.7"', 'mock==2.0.0', ]) From 81a965f81e487ff06df7f0ecc29ce79679562b20 Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Tue, 17 Aug 2021 09:10:11 +0800 Subject: [PATCH 523/892] Add parser mssql api assessment (#3189) * Add parser mssql_api_assessment Signed-off-by: jiazhang * Add parser mssql_api_assessment Signed-off-by: jiazhang * Update test Signed-off-by: jiazhang --- .../mssql_api_assessment.rst | 3 + insights/parsers/mssql_api_assessment.py | 56 +++++++++++++++++++ .../tests/test_mssql_api_assessment.py | 52 +++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 113 insertions(+) create mode 100644 docs/shared_parsers_catalog/mssql_api_assessment.rst create mode 100644 insights/parsers/mssql_api_assessment.py create mode 100644 insights/parsers/tests/test_mssql_api_assessment.py diff --git a/docs/shared_parsers_catalog/mssql_api_assessment.rst b/docs/shared_parsers_catalog/mssql_api_assessment.rst new file mode 100644 index 000000000..f731e0d4e --- /dev/null +++ b/docs/shared_parsers_catalog/mssql_api_assessment.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.mssql_api_assessment + :members: + :show-inheritance: diff --git a/insights/parsers/mssql_api_assessment.py b/insights/parsers/mssql_api_assessment.py new file mode 100644 index 000000000..8558768ec --- /dev/null +++ b/insights/parsers/mssql_api_assessment.py @@ -0,0 +1,56 @@ +""" +MssqlApiAssessment - file ``/var/opt/mssql/log/assessments/assessment-latest`` +============================================================================== + +Parsers contains in this module are: + +MssqlApiAssessment - file ``/var/opt/mssql/log/assessments/assessment-latest`` +""" + +from insights import JSONParser, parser +from insights.specs import Specs + + +@parser(Specs.mssql_api_assessment) +class MssqlApiAssessment(JSONParser): + """ + Parses the file: ``/var/opt/mssql/log/assessments/assessment-latest`` + + Sample content of the file:: + + [ + { + "Timestamp": "2021-05-05T21:51:55.2317511-04:00", + "Severity": "Information", + "TargetType": "Server", + "TargetName": "ceph4-mon", + "TargetPath": "Server[@Name='ceph4-mon']", + "CheckId": "TF174", + "CheckName": "TF 174 increases plan cache bucket count", + "Message": "Enable trace flag 174 to increase plan cache bucket count", + "RulesetName": "Microsoft ruleset", + "RulesetVersion": "1.0.305", + "HelpLink": "https://docs.microsoft.com/sql/t-sql/database-console-commands/dbcc-traceon-trace-flags-transact-sql" + }, + { + "Timestamp": "2021-05-05T21:51:55.2323431-04:00", + "Severity": "Information", + "TargetType": "Server", + "TargetName": "ceph4-mon", + "TargetPath": "Server[@Name='ceph4-mon']", + "CheckId": "TF834", + "CheckName": "TF 834 enables large-page allocations", + "Message": "Enable trace flag 834 to use large-page allocations to improve analytical and data warehousing workloads", + "RulesetName": "Microsoft ruleset", + "RulesetVersion": "1.0.305", + "HelpLink": "https://support.microsoft.com/kb/3210239" + } + ] + + Examples: + >>> type(mssql_api_assessment_output) + + >>> mssql_api_assessment_output[0]["Severity"] == 'Information' + True + """ + pass diff --git a/insights/parsers/tests/test_mssql_api_assessment.py b/insights/parsers/tests/test_mssql_api_assessment.py new file mode 100644 index 000000000..363d9bad3 --- /dev/null +++ b/insights/parsers/tests/test_mssql_api_assessment.py @@ -0,0 +1,52 @@ +import doctest + +from insights.parsers import mssql_api_assessment +from insights.parsers.mssql_api_assessment import MssqlApiAssessment +from insights.tests import context_wrap + +API_OUTPUT = """ +[ + { + "Timestamp": "2021-05-05T21:51:55.2317511-04:00", + "Severity": "Information", + "TargetType": "Server", + "TargetName": "ceph4-mon", + "TargetPath": "Server[@Name='ceph4-mon']", + "CheckId": "TF174", + "CheckName": "TF 174 increases plan cache bucket count", + "Message": "Enable trace flag 174 to increase plan cache bucket count", + "RulesetName": "Microsoft ruleset", + "RulesetVersion": "1.0.305", + "HelpLink": "https://docs.microsoft.com/sql/t-sql/database-console-commands/dbcc-traceon-trace-flags-transact-sql" + }, + { + "Timestamp": "2021-05-05T21:51:55.2323431-04:00", + "Severity": "Information", + "TargetType": "Server", + "TargetName": "ceph4-mon", + "TargetPath": "Server[@Name='ceph4-mon']", + "CheckId": "TF834", + "CheckName": "TF 834 enables large-page allocations", + "Message": "Enable trace flag 834 to use large-page allocations to improve analytical and data warehousing workloads", + "RulesetName": "Microsoft ruleset", + "RulesetVersion": "1.0.305", + "HelpLink": "https://support.microsoft.com/kb/3210239" + } +] +""".strip() + + +def test_mssql_api_assessment(): + ret = MssqlApiAssessment(context_wrap(API_OUTPUT)) + assert ret[0]["Severity"] == "Information" + assert ret[0]["TargetName"] == "ceph4-mon" + assert ret[0]["CheckId"] == "TF174" + assert ret[0]["Message"] == "Enable trace flag 174 to increase plan cache bucket count" + + +def test_mssql_api_assessment_doc_examples(): + env = { + 'mssql_api_assessment_output': MssqlApiAssessment(context_wrap(API_OUTPUT)) + } + failed, total = doctest.testmod(mssql_api_assessment, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 216964b2a..9e520fd0a 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -368,6 +368,7 @@ class Specs(SpecSet): mount = RegistryPoint() mounts = RegistryPoint() mssql_conf = RegistryPoint() + mssql_api_assessment = RegistryPoint() multicast_querier = RegistryPoint() multipath_conf = RegistryPoint() multipath_conf_initramfs = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 3c8d3b6a9..22be66cc4 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -475,6 +475,7 @@ def md_device_list(broker): mount = simple_command("/bin/mount") mounts = simple_file("/proc/mounts") mssql_conf = simple_file("/var/opt/mssql/mssql.conf") + mssql_api_assessment = simple_file("/var/opt/mssql/log/assessments/assessment-latest") multicast_querier = simple_command("/usr/bin/find /sys/devices/virtual/net/ -name multicast_querier -print -exec cat {} \;") multipath_conf = simple_file("/etc/multipath.conf") multipath_conf_initramfs = simple_command("/bin/lsinitrd -f /etc/multipath.conf") From 6bf7d3a3243dc36219e73c4941a18200a6a6470c Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Tue, 17 Aug 2021 09:31:20 +0800 Subject: [PATCH 524/892] Remove the spec mssql_api_assessment from default (#3191) Signed-off-by: jiazhang --- insights/specs/default.py | 1 - 1 file changed, 1 deletion(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 22be66cc4..3c8d3b6a9 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -475,7 +475,6 @@ def md_device_list(broker): mount = simple_command("/bin/mount") mounts = simple_file("/proc/mounts") mssql_conf = simple_file("/var/opt/mssql/mssql.conf") - mssql_api_assessment = simple_file("/var/opt/mssql/log/assessments/assessment-latest") multicast_querier = simple_command("/usr/bin/find /sys/devices/virtual/net/ -name multicast_querier -print -exec cat {} \;") multipath_conf = simple_file("/etc/multipath.conf") multipath_conf_initramfs = simple_command("/bin/lsinitrd -f /etc/multipath.conf") From bb090b763888719acf1af24865cee1820585b362 Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Tue, 17 Aug 2021 10:59:29 +0800 Subject: [PATCH 525/892] Add parser awx_manage_print_settings2 (#3181) Signed-off-by: jiazhang --- insights/parsers/awx_manage.py | 38 ++++++++++++++++++++--- insights/parsers/tests/test_awx_manage.py | 21 ++++++++++++- insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 5 files changed, 56 insertions(+), 6 deletions(-) diff --git a/insights/parsers/awx_manage.py b/insights/parsers/awx_manage.py index 63a07286d..5a192c5ce 100644 --- a/insights/parsers/awx_manage.py +++ b/insights/parsers/awx_manage.py @@ -7,7 +7,8 @@ AnsibleTowerLicenseType - command ``/usr/bin/awx-manage check_license`` AnsibleTowerLicense - command ``/usr/bin/awx-manage check_license --data`` --------------------------------------------------------------------------- + +AwxManagePrintSettings - command ``/usr/bin/awx-manage print_settings`` """ from insights import JSONParser, parser, CommandParser @@ -28,10 +29,10 @@ class AnsibleTowerLicenseType(CommandParser, JSONParser): type (str): The license type, e.g. "enterprise" Examples: - >>> type(awx_license) - - >>> awx_license.type == "enterprise" - True + >>> type(awx_license) + + >>> awx_license.type == "enterprise" + True """ def parse_content(self, content): if not content: @@ -59,3 +60,30 @@ class AnsibleTowerLicense(CommandParser, JSONParser): 29885220 """ pass + + +@parser(Specs.awx_manage_print_settings) +class AwxManagePrintSettings(CommandParser, JSONParser): + """ + The AwxManagePrintSettings class parses the command ``awx-manage print_settings INSIGHTS_TRACKING_STATE SYSTEM_UUID INSTALL_UUID TOWER_URL_BASE AWX_CLEANUP_PATHS AWX_PROOT_BASE_PATH --format json`` + + Sample command output:: + + { + "AWX_CLEANUP_PATHS": false, + "AWX_PROOT_BASE_PATH": "/opt/tmp", + "INSIGHTS_TRACKING_STATE": true, + "INSTALL_UUID": "c0d38a6a-4449-4e13-a64b-00e0248ad229", + "SYSTEM_UUID": "eecfd8dc-5028-46ef-9868-86f7d595da13", + "TOWER_URL_BASE": "https://10.72.37.79" + } + + Examples: + >>> type(settings) + + >>> settings['AWX_CLEANUP_PATHS'] + False + >>> settings['SYSTEM_UUID'] == 'eecfd8dc-5028-46ef-9868-86f7d595da13' + True + """ + pass diff --git a/insights/parsers/tests/test_awx_manage.py b/insights/parsers/tests/test_awx_manage.py index 23b75f3be..e9f5dbc05 100644 --- a/insights/parsers/tests/test_awx_manage.py +++ b/insights/parsers/tests/test_awx_manage.py @@ -3,7 +3,7 @@ from insights.core import ContentException, ParseException from insights.parsers import awx_manage, SkipException -from insights.parsers.awx_manage import AnsibleTowerLicenseType, AnsibleTowerLicense +from insights.parsers.awx_manage import AnsibleTowerLicenseType, AnsibleTowerLicense, AwxManagePrintSettings from insights.parsers.tests import skip_exception_check from insights.tests import context_wrap @@ -31,6 +31,17 @@ {"contact_email": "test@redhat.com", "company_name": "test Inc", "instance_count": 100, "license_date": 1655092799, "license_type": "enterprise", "subscription_name": "Red Hat Ansible Automation, Standard (100 Managed Nodes)", "sku": "MCT3691", "support_level": "Standard", "product_name": "Red Hat Ansible Automation Platform", "valid_key": true, "satellite": null, "pool_id": "2c92808179803e530179ea5989a157a4", "current_instances": 1, "available_instances": 100, "free_instances": 99, "time_remaining": 29885220, "trial": false, "grace_period_remaining": 32477220, "compliant": true, "date_warning": false, "date_expired": false} """.strip() +AWX_MANAGE_PRINT_SETTINGS = ''' +{ + "AWX_CLEANUP_PATHS": false, + "AWX_PROOT_BASE_PATH": "/opt/tmp", + "INSIGHTS_TRACKING_STATE": true, + "INSTALL_UUID": "c0d38a6a-4449-4e13-a64b-00e0248ad229", + "SYSTEM_UUID": "eecfd8dc-5028-46ef-9868-86f7d595da13", + "TOWER_URL_BASE": "https://10.72.37.79" +} +'''.strip() + def test_ansible_tower_license_type(): ret = AnsibleTowerLicenseType(context_wrap(NO_LICENSE)) @@ -71,10 +82,18 @@ def test_ansible_tower_license__data_ab_type(): AnsibleTowerLicense(context_wrap(NO_LICENSE)) +def test_awx_manage_print_settings(): + settings = awx_manage.AwxManagePrintSettings(context_wrap(AWX_MANAGE_PRINT_SETTINGS)) + assert not settings['AWX_CLEANUP_PATHS'] + assert settings['INSIGHTS_TRACKING_STATE'] + assert settings['SYSTEM_UUID'] == "eecfd8dc-5028-46ef-9868-86f7d595da13" + + def test_awx_manage_doc_examples(): env = { 'awx_license': AnsibleTowerLicenseType(context_wrap(GOOD_LICENSE)), 'awx_manage_license': AnsibleTowerLicense(context_wrap(AWX_MANAGE_LICENSE)), + 'settings': AwxManagePrintSettings(context_wrap(AWX_MANAGE_PRINT_SETTINGS)) } failed, total = doctest.testmod(awx_manage, globs=env) assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 9e520fd0a..dc8810824 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -19,6 +19,7 @@ class Specs(SpecSet): aws_instance_type = RegistryPoint() awx_manage_check_license = RegistryPoint() awx_manage_check_license_data = RegistryPoint(filterable=True) + awx_manage_print_settings = RegistryPoint() azure_instance_type = RegistryPoint() azure_instance_plan = RegistryPoint() bios_uuid = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 3c8d3b6a9..84c6ed0f0 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -95,6 +95,7 @@ class DefaultSpecs(Specs): aws_instance_id_pkcs7 = simple_command("/usr/bin/curl -s http://169.254.169.254/latest/dynamic/instance-identity/pkcs7 --connect-timeout 5", deps=[IsAWS]) awx_manage_check_license = simple_command("/usr/bin/awx-manage check_license") awx_manage_check_license_data = awx_manage.awx_manage_check_license_data_datasource + awx_manage_print_settings = simple_command("/usr/bin/awx-manage print_settings INSIGHTS_TRACKING_STATE SYSTEM_UUID INSTALL_UUID TOWER_URL_BASE AWX_CLEANUP_PATHS AWX_PROOT_BASE_PATH --format json") azure_instance_type = simple_command("/usr/bin/curl -s -H Metadata:true http://169.254.169.254/metadata/instance/compute/vmSize?api-version=2018-10-01&format=text --connect-timeout 5", deps=[IsAzure]) azure_instance_plan = simple_command("/usr/bin/curl -s -H Metadata:true http://169.254.169.254/metadata/instance/compute/plan?api-version=2018-10-01&format=json --connect-timeout 5", deps=[IsAzure]) bios_uuid = simple_command("/usr/sbin/dmidecode -s system-uuid") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 28e40fdd3..b4f814d50 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -18,6 +18,7 @@ class InsightsArchiveSpecs(Specs): aws_instance_id_doc = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_aws_instance_id_doc") aws_instance_id_pkcs7 = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_aws_instance_id_pkcs7") awx_manage_check_license = simple_file("insights_commands/awx-manage_check_license") + awx_manage_print_settings = simple_file("insights_commands/awx-manage_print_settings_INSIGHTS_TRACKING_STATE_SYSTEM_UUID_INSTALL_UUID_TOWER_URL_BASE_AWX_CLEANUP_PATHS_AWX_PROOT_BASE_PATH_--format_json") azure_instance_type = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_azure_instance_type") azure_instance_plan = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_azure_instance_plan") bios_uuid = simple_file("insights_commands/dmidecode_-s_system-uuid") From a66dac691e3fbadbea53a62579b282c5746b90ae Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 19 Aug 2021 03:26:56 +0800 Subject: [PATCH 526/892] Merge the -N (show make_none rules) to -S (show rules per type) (#3184) * Merge the --none (show make_none rules) to --show-rules (show rules per type) - the 'make_none' is also a type of rules Signed-off-by: Xiangce Liu * Fix the test error Signed-off-by: Xiangce Liu * Add 'none' to markdown and resort the summary for both text and markdown Signed-off-by: Xiangce Liu --- insights/formats/__init__.py | 12 +++++++++--- insights/formats/_markdown.py | 15 +++++++++------ insights/formats/text.py | 29 ++++++++++++----------------- 3 files changed, 30 insertions(+), 26 deletions(-) diff --git a/insights/formats/__init__.py b/insights/formats/__init__.py index fc325d2ff..ba3d55bc4 100644 --- a/insights/formats/__init__.py +++ b/insights/formats/__init__.py @@ -87,9 +87,9 @@ class EvaluatorFormatterAdapter(FormatterAdapter): def configure(p): p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true") p.add_argument("-S", "--show-rules", nargs="+", - choices=["fail", "info", "pass", "metadata", "fingerprint"], + choices=["fail", "info", "pass", "none", "metadata", "fingerprint"], metavar="TYPE", - help="Show results per rule type(s).") + help="Show results per rule's type: 'fail', 'info', 'pass', 'none', 'metadata', and 'fingerprint'") p.add_argument("-F", "--fail-only", help="Show FAIL results only. Conflict with '-m', will be dropped when using them together. This option is deprecated by '-S fail'", action="store_true") @@ -104,7 +104,7 @@ def __init__(self, args=None): # Drops the '-F' silently when specifying '-m' and '-F' together # --> Do NOT break the Format of the output fail_only = None - self.show_rules = [] # Empty by default, means show ALL types + self.show_rules = [] # Empty by default, means show ALL types (exclude "none") if not args.show_rules and fail_only: self.show_rules = ['rule'] elif args.show_rules: @@ -182,6 +182,9 @@ def get_response_of_types(response, missing=True, show_rules=None): # - When "-m" is specified but "-S" is NOT specified, show all the loaded rules # - When neither "-m" nor "-S" is specified, show all the HIT rules (exclude the "skips") if not show_rules: + # - Discard the "make_none" by default when no "-S" + # That means show "make_none" rules only when "none" is specified in "-S" + response.pop('none') if 'none' in response else None return response # - Discard the "medadata" rules when it's not specified in the "-S" option if 'metadata' not in show_rules and 'metadata' in response.get('system', {}): @@ -195,6 +198,9 @@ def get_response_of_types(response, missing=True, show_rules=None): # - Discard the "make_pass" rules when it's not specified in the "-S" option if 'pass' not in show_rules and 'pass' in response: response.pop('pass') + # - Discard the "make_none" rules when it's not specified in the "-S" option + if 'none' not in show_rules and 'none' in response: + response.pop('none') # - Discard the "fingerprint" rules when it's not specified in the "-S" option if 'fingerprint' not in show_rules and 'fingerprints' in response: response.pop('fingerprints') diff --git a/insights/formats/_markdown.py b/insights/formats/_markdown.py index 90ac235b3..c6144908f 100644 --- a/insights/formats/_markdown.py +++ b/insights/formats/_markdown.py @@ -48,12 +48,13 @@ def __init__(self, self.dropped = dropped self.show_rules = [] if show_rules is None else show_rules - self.counts = {'skip': 0, 'pass': 0, 'rule': 0, 'info': 0, 'metadata': 0, 'metadata_key': 0, 'fingerprint': 0, 'exception': 0} + self.counts = {'skip': 0, 'pass': 0, 'rule': 0, 'info': 0, 'none': 0, 'metadata': 0, 'metadata_key': 0, 'fingerprint': 0, 'exception': 0} self.responses = { 'skip': self.response(label="SKIP", title="Missing Deps: "), 'pass': self.response(label="PASS", title="Passed : "), 'rule': self.response(label="FAIL", title="Failed : "), 'info': self.response(label="INFO", title="Info : "), + 'none': self.response(label="RETURNED NONE", title="Ret'd None : "), 'metadata': self.response(label="META", title="Metadata : "), 'metadata_key': self.response(label="META", title="Metadata Key: "), 'fingerprint': self.response(label="FINGERPRINT", title="Fingerprint : "), @@ -148,8 +149,10 @@ def printit(c, v): if _type: if self.missing and _type == 'skip': print_missing(c, v) - elif ((self.show_rules and _type in self.show_rules) or - (not self.show_rules and _type != 'skip')): + elif ( + (self.show_rules and _type in self.show_rules) or + (not self.show_rules and _type not in ['skip', 'none']) + ): printit(c, v) print(file=self.stream) @@ -184,9 +187,9 @@ def configure(p): p.add_argument("-d", "--dropped", help="Show collected files that weren't processed.", action="store_true") p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true") p.add_argument("-S", "--show-rules", nargs="+", - choices=["fail", "info", "pass", "metadata", "fingerprint"], + choices=["fail", "info", "pass", "none", "metadata", "fingerprint"], metavar="TYPE", - help="Show results per rule type(s).") + help="Show results per rule's type: 'fail', 'info', 'pass', 'none', 'metadata', and 'fingerprint'") p.add_argument("-F", "--fail-only", help="Show FAIL results only. Conflict with '-m', will be dropped when using them together. This option is deprecated by '-S fail'", action="store_true") @@ -197,7 +200,7 @@ def __init__(self, args=None): if args.missing and fail_only: print('Options conflict: -m and -F, drops -F', file=sys.stderr) fail_only = None - self.show_rules = [] # Empty by default, means show ALL types + self.show_rules = [] # Empty by default, means show ALL types (exclude "none") if not args.show_rules and fail_only: self.show_rules = ['rule'] elif args.show_rules: diff --git a/insights/formats/text.py b/insights/formats/text.py index e37e8ed3c..f905eb493 100644 --- a/insights/formats/text.py +++ b/insights/formats/text.py @@ -89,12 +89,10 @@ def __init__(self, broker, missing=False, tracebacks=False, dropped=False, - none=False, show_rules=None, stream=sys.stdout): super(HumanReadableFormat, self).__init__(broker, stream=stream) self.missing = missing - self.none = none self.tracebacks = tracebacks self.dropped = dropped self.show_rules = [] if show_rules is None else show_rules @@ -108,16 +106,15 @@ def print_header(self, header, color): def preprocess(self): response = namedtuple('response', 'color label intl title') self.responses = { + 'skip': response(color=Fore.BLUE, label="SKIP", intl='S', title="Missing Deps: "), 'pass': response(color=Fore.GREEN, label="PASS", intl='P', title="Passed : "), 'rule': response(color=Fore.RED, label="FAIL", intl='F', title="Failed : "), 'info': response(color=Fore.WHITE, label="INFO", intl='I', title="Info : "), - 'skip': response(color=Fore.BLUE, label="SKIP", intl='S', title="Missing Deps: "), - 'fingerprint': response(color=Fore.YELLOW, label="FINGERPRINT", intl='P', - title="Fingerprint : "), + 'none': response(color=Fore.BLUE, label="RETURNED NONE", intl='N', title="Ret'd None : "), 'metadata': response(color=Fore.YELLOW, label="META", intl='M', title="Metadata : "), 'metadata_key': response(color=Fore.MAGENTA, label="META", intl='K', title="Metadata Key: "), + 'fingerprint': response(color=Fore.YELLOW, label="FINGERPRINT", intl='P', title="Fingerprint : "), 'exception': response(color=Fore.RED, label="EXCEPT", intl='E', title="Exceptions : "), - 'none': response(color=Fore.BLUE, label="RETURNED NONE", intl='N', title="Ret'd None : ") } self.counts = {} @@ -193,10 +190,11 @@ def printit(c, v): if _type in self.responses: self.counts[_type] += 1 - if ((self.missing and _type == 'skip') or + if ( + (self.missing and _type == 'skip') or (self.show_rules and _type in self.show_rules) or - (self.none and _type == 'none') or - (not self.show_rules and _type not in ['skip', 'none'])): + (not self.show_rules and _type not in ['skip', 'none']) + ): printit(c, v) print(file=self.stream) @@ -225,11 +223,10 @@ def configure(p): p.add_argument("-t", "--tracebacks", help="Show stack traces.", action="store_true") p.add_argument("-d", "--dropped", help="Show collected files that weren't processed.", action="store_true") p.add_argument("-m", "--missing", help="Show missing requirements.", action="store_true") - p.add_argument("-n", "--none", help="Show rules returning None", action="store_true") p.add_argument("-S", "--show-rules", default=[], nargs="+", - choices=["fail", "info", "pass", "metadata", "fingerprint"], + choices=["fail", "info", "pass", "none", "metadata", "fingerprint"], metavar="TYPE", - help="Show results per rule type(s).") + help="Show results per rule's type: 'fail', 'info', 'pass', 'none', 'metadata', and 'fingerprint'") p.add_argument("-F", "--fail-only", help="Show FAIL results only. Conflict with '-m', will be dropped when using them together. This option is deprecated by '-S fail'", action="store_true") @@ -238,12 +235,11 @@ def __init__(self, args=None): self.tracebacks = args.tracebacks self.dropped = args.dropped self.missing = args.missing - self.none = args.none fail_only = args.fail_only - if (self.missing or self.none) and fail_only: - print(Fore.YELLOW + 'Options conflict: -m/-n and -F, drops -F', file=sys.stderr) + if self.missing and fail_only: + print(Fore.YELLOW + 'Options conflict: -m and -F, drops -F', file=sys.stderr) fail_only = None - self.show_rules = [] # Empty by default, means show ALL types + self.show_rules = [] # Empty by default, means show ALL types (excludes "none") if not args.show_rules and fail_only: self.show_rules = ['rule'] elif args.show_rules: @@ -255,7 +251,6 @@ def preprocess(self, broker): self.missing, self.tracebacks, self.dropped, - self.none, self.show_rules, ) self.formatter.preprocess() From b8083c81d8d969c8707808fa68545c3ec713ceb4 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 19 Aug 2021 03:38:22 +0800 Subject: [PATCH 527/892] Skip the parsers in ntp_sources when input is empty (#3193) * Skip when input is empty - and refine the parser to dict/list - add the tests of doc Signed-off-by: Xiangce Liu * add docstring Signed-off-by: Xiangce Liu * update as per review feedback Signed-off-by: Xiangce Liu --- insights/parsers/ntp_sources.py | 127 ++++++++++++++------- insights/parsers/tests/test_ntp_sources.py | 48 +++++++- 2 files changed, 135 insertions(+), 40 deletions(-) diff --git a/insights/parsers/ntp_sources.py b/insights/parsers/ntp_sources.py index 8b1e6ed1f..aee9c7412 100644 --- a/insights/parsers/ntp_sources.py +++ b/insights/parsers/ntp_sources.py @@ -14,23 +14,20 @@ ChronycSources - command ``/usr/bin/chronyc sources`` ----------------------------------------------------- -NtpqLeap - command ``/usr/sbin/ntpq -c 'rv 0 leap'`` ----------------------------------------------------- - NtpqPn - command ``/usr/sbin/ntpq -pn`` --------------------------------------- - +NtpqLeap - command ``/usr/sbin/ntpq -c 'rv 0 leap'`` +---------------------------------------------------- """ -import re -from .. import parser, CommandParser +from insights import parser, CommandParser from insights.core.dr import SkipComponent from insights.specs import Specs @parser(Specs.chronyc_sources) -class ChronycSources(CommandParser): +class ChronycSources(CommandParser, list): """ Chronyc Sources parser @@ -49,14 +46,15 @@ class ChronycSources(CommandParser): Examples: - >>> sources = shared[ChronycSources].data - >>> len(sources) + >>> type(chrony_sources) + + >>> len(chrony_sources) 4 - >>> sources[0]['source'] + >>> chrony_sources[0]['source'] '10.20.30.40' - >>> sources[0]['mode'] + >>> chrony_sources[0]['mode'] '^' - >>> sources[0]['state'] + >>> chrony_sources[0]['state'] '-' """ @@ -64,15 +62,34 @@ def parse_content(self, content): """ Get source, mode and state for chrony """ - self.data = [] - for row in content[3:]: - if row.strip(): - values = row.split(" ", 2) - self.data.append({"source": values[1], "mode": values[0][0], "state": values[0][1]}) + data = [] + if len(content) > 3: + for row in content[3:]: + if row.strip(): + values = row.split(" ", 2) + data.append( + { + "source": values[1], + "mode": values[0][0], + "state": values[0][1] + } + ) + + if not data: + raise SkipComponent() + + self.extend(data) + + @property + def data(self): + """ + Set data as property to keep compatibility + """ + return self @parser(Specs.ntpq_leap) -class NtpqLeap(CommandParser): +class NtpqLeap(CommandParser, dict): """ Converts the output of ``ntpq -c 'rv 0 leap'`` into a dictionary in the ``data`` property, and sets the ``leap`` property to the value of the @@ -84,26 +101,43 @@ class NtpqLeap(CommandParser): Examples: - >>> print shared[NtpqLeap].leap # same data + >>> type(ntpq) + + >>> ntpq.leap '00' """ def parse_content(self, content): - if "Connection refused" in content[0]: + if content and "Connection refused" in content[0]: raise SkipComponent("NTP service is down and connection refused") - self.data = {} + + leap = None for line in content: - m = re.search(r'leap=(\d*)', line) - if m: - self.data["leap"] = m.group(1) + if 'leap=' in line: + leap = line.split('leap=')[1].rstrip() + + if leap is None: + raise SkipComponent() + + self.update(leap=leap) + + @property + def data(self): + """ + Set data as property to keep compatibility + """ + return self @property def leap(self): - return self.data.get('leap') + """ + Return the value of the 'leap' + """ + return self.get('leap') @parser(Specs.ntpq_pn) -class NtpqPn(CommandParser): +class NtpqPn(CommandParser, list): """ Get source and flag for each NTP time source from the output of ``/usr/sbin/ntpq -pn``. @@ -124,21 +158,36 @@ class NtpqPn(CommandParser): Examples: - >>> sources = shared[NtpqPn].data - >>> len(sources) + >>> type(ntp_sources) + + >>> len(ntp_sources) 4 - >>> sources[0] - {'flag': '*', 'source', '10.20.30.40'} + >>> ntp_sources[0]['source'] + '10.20.30.40' """ def parse_content(self, content): - if "Connection refused" in content[0]: + if content and "Connection refused" in content[0]: raise SkipComponent("NTP service is down and connection refused") - self.data = [] - for row in content[2:]: - if row.strip(): - values = row.split(" ", 2) - if row.startswith(" "): - self.data.append({"source": values[1], "flag": " "}) - else: - self.data.append({"source": values[0][1:], "flag": values[0][0]}) + + data = [] + if len(content) > 2: + for row in content[2:]: + if row.strip(): + values = row.split(" ", 2) + if row.startswith(" "): + data.append({"source": values[1], "flag": " "}) + else: + data.append({"source": values[0][1:], "flag": values[0][0]}) + + if not data: + raise SkipComponent() + + self.extend(data) + + @property + def data(self): + """ + Set data as property to keep compatibility + """ + return self diff --git a/insights/parsers/tests/test_ntp_sources.py b/insights/parsers/tests/test_ntp_sources.py index 590e5b937..d1249d0df 100644 --- a/insights/parsers/tests/test_ntp_sources.py +++ b/insights/parsers/tests/test_ntp_sources.py @@ -1,5 +1,7 @@ import pytest +import doctest from insights.core.dr import SkipComponent +from insights.parsers import ntp_sources from insights.parsers.ntp_sources import ChronycSources, NtpqPn, NtpqLeap from insights.tests import context_wrap @@ -12,6 +14,20 @@ ^+ d.e.f 1 6 377 21 -2629us[-2619us] +/- 86ms """.strip() +chrony_output_doc = """ +210 Number of sources = 6 +MS Name/IP address Stratum Poll Reach LastRx Last sample +=============================================================================== +^- 10.20.30.40 2 9 377 95 -1345us[-1345us] +/- 87ms +^- 10.56.72.8 2 10 377 949 -3449us[-3483us] +/- 120ms +^* 10.64.108.95 2 10 377 371 -91us[ -128us] +/- 30ms +^- 10.8.205.17 2 8 377 27 +7161us[+7161us] +/- 52ms +""".strip() + +empty_chrony_source = "" + +empty_ntpq_leap = "" + ntpq_leap_output = """ leap=00 """.strip() @@ -32,7 +48,18 @@ remote refid st t when poll reach delay offset jitter ============================================================================== 202.118.1.81 .INIT. 16 u - 1024 0 0.000 0.000 0.000 -""" +""".strip() + +ntpd_qn_doc = """ + remote refid st t when poll reach delay offset jitter +============================================================================== ++10.20.30.40 192.231.203.132 3 u 638 1024 377 0.242 2.461 1.886 +*2001:388:608c:8 .GPS. 1 u 371 1024 377 29.323 1.939 1.312 +-2001:44b8:1::1 216.218.254.202 2 u 396 1024 377 37.869 -3.340 6.458 ++150.203.1.10 202.6.131.118 2 u 509 1024 377 20.135 0.800 3.260 +""".strip() + +empty_ntpq_pn = "" ntp_connection_issue = """ /usr/sbin/ntpq: read: Connection refused @@ -45,6 +72,9 @@ def test_get_chrony_sources(): assert parser_result.data[2].get("state") == "+" assert parser_result.data[2].get("mode") == "^" + with pytest.raises(SkipComponent): + NtpqPn(context_wrap(empty_chrony_source)) + def test_get_ntpq_leap(): parser_result = NtpqLeap(context_wrap(ntpq_leap_output)) @@ -57,6 +87,9 @@ def test_get_ntpq_leap(): NtpqLeap(context_wrap(ntp_connection_issue)) assert "NTP service is down" in str(e) + with pytest.raises(SkipComponent): + NtpqLeap(context_wrap(empty_ntpq_leap)) + def test_get_ntpd_sources(): parser_result = NtpqPn(context_wrap(ntpd_output)) @@ -71,3 +104,16 @@ def test_get_ntpd_sources(): with pytest.raises(SkipComponent) as e: NtpqPn(context_wrap(ntp_connection_issue)) assert "NTP service is down" in str(e) + + with pytest.raises(SkipComponent): + NtpqPn(context_wrap(empty_ntpq_pn)) + + +def test_ntp_sources_doc_examples(): + env = { + 'chrony_sources': ChronycSources(context_wrap(chrony_output_doc)), + 'ntpq': NtpqLeap(context_wrap(ntpq_leap_output)), + 'ntp_sources': NtpqPn(context_wrap(ntpd_qn_doc)), + } + failed, total = doctest.testmod(ntp_sources, globs=env) + assert failed == 0 From d038b556415da9d14048dfb0d5672560b981072b Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Wed, 18 Aug 2021 15:00:13 -0500 Subject: [PATCH 528/892] Fix deprecated generator code (#3185) * The code utilizing the iter/next functions in the lsof parser changed in python 3.5 based on PEP 479 * This change makes the code compliant with that PEP and removes the deprecation warning for tests Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/parsers/lsof.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/insights/parsers/lsof.py b/insights/parsers/lsof.py index 6357d6526..1fcdc1357 100644 --- a/insights/parsers/lsof.py +++ b/insights/parsers/lsof.py @@ -92,7 +92,10 @@ def _start(self, content): line = next(content) while 'COMMAND ' not in line: - line = next(content) + line = next(content, '') + + if not line: + return iter([]) self._calc_indexes(line) return content @@ -112,7 +115,7 @@ def _parse_line(self, line): for heading in self.headings[:-1]: # Use value if (start, end) index of heading is not empty if line[slice(*self.indexes[heading])].strip(): - rdict[heading] = next(rowsplit) + rdict[heading] = next(rowsplit, '') else: rdict = dict(zip(self.headings, rowsplit)) rdict['NAME'] = command From 2b37409ff2b30301389ef3e10f0b982e481359b7 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 19 Aug 2021 04:05:36 +0800 Subject: [PATCH 529/892] Fix the LsPci combiner for full format of 'Slot' (#3190) * Fix the LsPci combiner for full format of 'Slot' - In case the `lspci -k` sometimes outputs the `Slot` in the full format of `domain:bus:device.function`, the combiner will take the `Slot` of the `lspci -k` as the key. Signed-off-by: Xiangce Liu * Fix the doc Signed-off-by: Xiangce Liu * Fix the doc error Signed-off-by: Xiangce Liu Co-authored-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/combiners/lspci.py | 15 +++- insights/combiners/tests/test_lspci.py | 107 +++++++++++++++++++++++++ 2 files changed, 120 insertions(+), 2 deletions(-) diff --git a/insights/combiners/lspci.py b/insights/combiners/lspci.py index 0566092cd..42d28acc3 100644 --- a/insights/combiners/lspci.py +++ b/insights/combiners/lspci.py @@ -17,6 +17,12 @@ class LsPci(list): Combines the Parser LsPci of ``/sbin/lspci -k`` command and Parser LsPciVmmkn of ``/sbin/lspci -vmmkn`` command. + .. note:: + In case the ``lspci -k`` sometimes outputs the `Slot` in the full + format of ``domain:bus:device.function``, and the ``lspci -k`` is more + common than ``lspci -vmmkn``, so this combiner will take the `Slot` of + the `lspci -k` as the key. + Typical output of the ``lspci -k`` command is:: 00:00.0 Host bridge: Intel Corporation Haswell-ULT DRAM Controller (rev 09) @@ -119,12 +125,17 @@ def __init__(self, lspci_k, lspci_vmmkn): dev = dev.copy() if lspci_k and dev['Slot'] in lspci_k: # use the local copy to prevent from writing back to the parser - dev_k = lspci_k.data[dev['Slot']].copy() + dev_k = [v for v in lspci_k.data.values() if v['Slot'].endswith(dev['Slot'])][0].copy() + # Since the 'lspci -k' is a more common command than the + # 'lspci -vmmkn', the following line should be commented + # out to use the 'Slot' in 'lspci -k' as the 'Slot' in + # this combiner: + # dev_k.pop('Slot') if 'Slot' in dev_k else None dev_k.pop('Kernel driver in use') if 'Kernel driver in use' in dev_k else None dev_k.pop('Kernel modules') if 'Kernel modules' in dev_k else None dev.update(dev_k) self.append(dev) - self._pci_dev_list = lspci_vmmkn.pci_dev_list + self._pci_dev_list = (lspci_k if lspci_k else lspci_vmmkn).pci_dev_list else: for dev in lspci_k.data.values(): # use the local copy to prevent from writing back to the parser diff --git a/insights/combiners/tests/test_lspci.py b/insights/combiners/tests/test_lspci.py index 178e44971..7bc5feace 100644 --- a/insights/combiners/tests/test_lspci.py +++ b/insights/combiners/tests/test_lspci.py @@ -95,6 +95,77 @@ Module: snd_hda_intel """ +LSPCI_K_LONG_SLOT = """ +0000:00:00.0 Host bridge: Intel Corporation 440BX/ZX/DX - 82443BX/ZX/DX Host bridge (AGP disabled) (rev 03) +0000:00:07.0 ISA bridge: Intel Corporation 82371AB/EB/MB PIIX4 ISA (rev 01) + Subsystem: Microsoft Corporation Device 0000 +0000:00:07.1 IDE interface: Intel Corporation 82371AB/EB/MB PIIX4 IDE (rev 01) + Kernel driver in use: ata_piix + Kernel modules: ata_piix, pata_acpi, ata_generic +0000:00:07.3 Bridge: Intel Corporation 82371AB/EB/MB PIIX4 ACPI (rev 02) + Kernel modules: i2c_piix4 +0000:00:08.0 VGA compatible controller: Microsoft Corporation Hyper-V virtual VGA + Kernel driver in use: hyperv_fb + Kernel modules: hyperv_fb +1a06:00:02.0 Ethernet controller: Mellanox Technologies MT27710 Family [ConnectX-4 Lx Virtual Function] (rev 80) + Subsystem: Mellanox Technologies Device 0190 + Kernel driver in use: mlx5_core + Kernel modules: mlx5_core +""" + +LSPCI_VMMKN_LONG_SLOT = """ +Slot: 00:00.0 +Class: 0600 +Vendor: 8086 +Device: 7192 +Rev: 03 + +Slot: 00:07.0 +Class: 0601 +Vendor: 8086 +Device: 7110 +SVendor: 1414 +SDevice: 0000 +Rev: 01 + +Slot: 00:07.1 +Class: 0101 +Vendor: 8086 +Device: 7111 +Rev: 01 +ProgIf: 80 +Driver: ata_piix +Module: ata_piix +Module: pata_acpi +Module: ata_generic + +Slot: 00:07.3 +Class: 0680 +Vendor: 8086 +Device: 7113 +Rev: 02 +Module: i2c_piix4 + +Slot: 00:08.0 +Class: 0300 +Vendor: 1414 +Device: 5353 +Driver: hyperv_fb +Module: hyperv_fb + +Slot: 1a06:00:02.0 +Class: 0200 +Vendor: 15b3 +Device: 1016 +SVendor: 15b3 +SDevice: 0190 +PhySlot: 1 +Rev: 80 +Driver: mlx5_core +Module: mlx5_core +NUMANode: 0 +""" + def test_lspci_k(): lspci_k = LsPciParser(context_wrap(LSPCI_K)) @@ -164,6 +235,42 @@ def test_lspci_both(): 'Driver', 'Module', 'Rev', 'SDevice', 'SVendor', 'Slot', 'Vendor']) +def test_lspci_both_long_slot(): + lspci_vmmkn = LsPciVmmkn(context_wrap(LSPCI_VMMKN_LONG_SLOT)) + lspci_k = LsPciParser(context_wrap(LSPCI_K_LONG_SLOT)) + lspci = LsPci(lspci_k, lspci_vmmkn) + assert sorted(lspci.pci_dev_list) == ['0000:00:00.0', '0000:00:07.0', '0000:00:07.1', '0000:00:07.3', '0000:00:08.0', '1a06:00:02.0'] + assert lspci.search(Dev_Details__contains='PIIX4 ISA') == [ + { + 'Slot': '0000:00:07.0', 'Class': '0601', 'Vendor': '8086', + 'Device': '7110', 'SVendor': '1414', 'SDevice': '0000', + 'Rev': '01', + 'Subsystem': 'Microsoft Corporation Device 0000', + 'Dev_Details': 'ISA bridge: Intel Corporation 82371AB/EB/MB PIIX4 ISA (rev 01)' + } + ] + assert lspci.search(Slot='1a06:00:02.0') == [ + { + 'Slot': '1a06:00:02.0', 'Class': '0200', 'Vendor': '15b3', + 'Device': '1016', 'SVendor': '15b3', 'SDevice': '0190', + 'Rev': '80', 'Driver': 'mlx5_core', 'PhySlot': '1', + 'Module': ['mlx5_core'], 'NUMANode': '0', + 'Subsystem': 'Mellanox Technologies Device 0190', + 'Dev_Details': 'Ethernet controller: Mellanox Technologies MT27710 Family [ConnectX-4 Lx Virtual Function] (rev 80)' + } + ] + # Make sure the original parsers are untouched + assert sorted(lspci_k.pci_dev_list) == ['0000:00:00.0', '0000:00:07.0', + '0000:00:07.1', '0000:00:07.3', '0000:00:08.0', '1a06:00:02.0'] + assert lspci_k.pci_dev_details('0000:00:00.0').get('Module') is None + assert lspci_k.pci_dev_details('0000:00:08.0').get('Kernel driver in use') == 'hyperv_fb' + assert sorted(lspci_vmmkn.pci_dev_list) == ['00:00.0', '00:07.0', '00:07.1', '00:07.3', '00:08.0', '1a06:00:02.0'] + assert sorted(lspci_vmmkn[0].keys()) == sorted(['Class', 'Device', 'Rev', 'Slot', 'Vendor']) + assert sorted(lspci_vmmkn[-1].keys()) == sorted(['Class', 'Device', + 'Driver', 'Module', 'Rev', 'SDevice', 'SVendor', 'Slot', 'Vendor', + 'PhySlot', 'NUMANode']) + + def test_doc_examples(): lspci_vmmkn = LsPciVmmkn(context_wrap(LSPCI_VMMKN)) lspci_k = LsPciParser(context_wrap(LSPCI_K)) From 4860928037242a15282563a2ac1dc3242828aba6 Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Thu, 19 Aug 2021 08:35:00 -0500 Subject: [PATCH 530/892] Fix issues in canonical facts ip and mac addr (#3194) * Apply validation to IP addresses and MAC addresses to ensure that unexpected values are not present * Add tests Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/tests/test_canonical_facts.py | 86 +++++++++++++++++++++++++- insights/util/canonical_facts.py | 44 ++++++++++--- 2 files changed, 120 insertions(+), 10 deletions(-) diff --git a/insights/tests/test_canonical_facts.py b/insights/tests/test_canonical_facts.py index 2c7d5c983..ad1bfc1e7 100644 --- a/insights/tests/test_canonical_facts.py +++ b/insights/tests/test_canonical_facts.py @@ -1,4 +1,8 @@ -from insights.util.canonical_facts import _filter_falsy +import uuid +from insights.tests import context_wrap +from insights.util.canonical_facts import ( + _filter_falsy, _safe_parse, IPs, valid_ipv4_address_or_None, valid_mac_addresses, + valid_uuid_or_None) def test_identity(): @@ -11,3 +15,83 @@ def test_drops_none(): def test_drops_empty_list(): assert {"foo": "bar"} == _filter_falsy({"foo": "bar", "baz": []}) + + +UUID_VALID = str(uuid.uuid4()) +UUID_INVALID = 'invalid uuid' + + +def test_valid_uuid_or_None(): + result = valid_uuid_or_None(UUID_VALID) + assert result == UUID_VALID + result = valid_uuid_or_None(UUID_INVALID) + assert result is None + + +IPV4_VALID = [ + '192.168.1.1', + '1.1.1.1', + '10.110.1.254', + '0.0.0.0', +] +IPV4_INVALID = [ + '4001:631:51:a066:250:56ff:1ea7:1696', + '0.0', + '192.168', + 'abc', +] + + +def test_valid_ipv4_address_or_None(): + for ip in IPV4_VALID: + result = valid_ipv4_address_or_None(ip) + assert result == ip, 'Failed valid IP address {}'.format(ip) + for ip in IPV4_INVALID: + result = valid_ipv4_address_or_None(ip) + assert result is None, 'Failed invalid IP address {}'.format(ip) + + +class HasContent(): + def __init__(self, content=None): + self.content = [content, ] if content is not None else [] + + +MACS_VALID = [ + HasContent('aa:26:28:12:22:1a'), + HasContent(' aa:26:28:12:22:1a '), + HasContent('aa:26:28:12:22:2a'), + HasContent('aa:26:28:12:22:3a'), + HasContent('00:00:00:00:00:00'), +] +MACS_INVALID = [ + HasContent('aa:26:28:12:22:1a:aa:26:28:12:22:1a'), + HasContent('# aa:26:28:12:22:1a'), + HasContent('00:00:00:00'), +] + + +def test_valid_mac_addresses(): + result = valid_mac_addresses(MACS_VALID) + assert result == [m.content[0].strip() for m in MACS_VALID] + result = valid_mac_addresses(MACS_INVALID) + assert result == [] + + +HOSTNAME_I_VALID = '192.168.1.71 10.88.0.1 172.17.0.1 172.18.0.1 10.10.121.131\n' +HOSTNAME_I_INVALID = '19f.168.1.71 0.0 2307:f1c0:ff13:f036:c0::214 f001:1a98:380:4::1d 2f07:a160:ff01:2092:e32:a5ff:ff37:7114' + + +def test_IPs(): + ips = IPs(context_wrap(HOSTNAME_I_VALID)) + assert ips.data == HOSTNAME_I_VALID.strip().split() + ips = IPs(context_wrap(HOSTNAME_I_INVALID)) + assert ips.data == [] + + +def test_safe_parse(): + result = _safe_parse(HasContent('some content')) + assert result == 'some content' + result = _safe_parse(HasContent()) + assert result is None + result = _safe_parse(None) + assert result is None diff --git a/insights/util/canonical_facts.py b/insights/util/canonical_facts.py index ebe376192..215b71dcd 100644 --- a/insights/util/canonical_facts.py +++ b/insights/util/canonical_facts.py @@ -2,6 +2,9 @@ from __future__ import print_function +import re +import socket + from insights import rule, make_metadata, run from insights.specs import Specs from insights.core import Parser @@ -22,14 +25,41 @@ def valid_uuid_or_None(s): return None +def valid_ipv4_address_or_None(addr): + """ str: Returns the input value if it is a valid IPV4 address """ + try: + socket.inet_pton(socket.AF_INET, addr) + return addr + except socket.error: + return None + + +def valid_mac_addresses(mac_address_datasources): + """ list: Return a list of valid mac addresses from a list of datasources """ + valid_addrs = [] + for ds in mac_address_datasources: + try: + addr = ds.content[0].strip() + except Exception: + continue + # Only look for addresses in the form 00:00:00:00:00:00 + match = re.match("^([0-9a-f]{2}:){5}[0-9a-f]{2}$", addr) + if match is not None: + valid_addrs.append(addr) + + return valid_addrs + + @parser(Specs.ip_addresses) class IPs(Parser): """ - Reads the output of hostname -I and constructs a list of all assigned IP - addresses. + Reads the output of ``hostname -I`` and constructs a list of all assigned IP + addresses. This command should only output IPV4 addresses and should not + include localhost, but sometimes it does. The validation function removes + those from the list. Example output:: - 192.168.1.71 10.88.0.1 172.17.0.1 172.18.0.1 10.10.121.131 2600:1700:720:7e30:e4ef:e9d0:7ea1:c8a7 + 192.168.1.71 10.88.0.1 172.17.0.1 172.18.0.1 10.10.121.131 Resultant data structure:: [ @@ -38,12 +68,10 @@ class IPs(Parser): "172.17.0.1", "172.18.0.1", "10.10.121.131", - "2600:1700:720:7e30:e4ef:e9d0:7ea1:c8a7" ] """ - def parse_content(self, content): - self.data = content[0].rstrip().split() + self.data = list(filter(None, [valid_ipv4_address_or_None(addr) for addr in content[0].rstrip().split()])) @parser(Specs.subscription_manager_id) @@ -99,9 +127,7 @@ def canonical_facts( bios_uuid=valid_uuid_or_None(_safe_parse(bios_uuid)), subscription_manager_id=valid_uuid_or_None(submanid.data if submanid else None), ip_addresses=ips.data if ips else [], - mac_addresses=list( - filter(None, (_safe_parse(c) for c in mac_addresses)) - ) if mac_addresses else [], + mac_addresses=valid_mac_addresses(mac_addresses) if mac_addresses else [], fqdn=_safe_parse(fqdn), ) From 70a2827febfd6f2795f6ab09e43d239c4dc8666b Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Tue, 24 Aug 2021 20:11:38 -0500 Subject: [PATCH 531/892] Adding ss command back in without restrictions (#3196) * Due to problems with netstat output formatting, ss is the best source for network information * The ss command was restricted to only run if certain modules were already loaded since those modules were necessary for ss. This change removes those restrictions. The side-effect is that the modules inet_diag, tcp_diag, and udp_diag may be loaded by execution of the ss command. Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/specs/default.py | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 84c6ed0f0..7cd565f98 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -32,7 +32,6 @@ from insights.components.cloud_provider import IsAWS, IsAzure, IsGCP from insights.components.ceph import IsCephMonitor from insights.parsers.mdstat import Mdstat -from insights.parsers.lsmod import LsMod from insights.combiners.satellite_version import SatelliteVersion, CapsuleVersion from insights.parsers.mount import Mount from insights.specs import Specs @@ -694,20 +693,7 @@ def pmlog_summary_file(broker): softnet_stat = simple_file("proc/net/softnet_stat") software_collections_list = simple_command('/usr/bin/scl --list') spamassassin_channels = simple_command("/bin/grep -r '^\\s*CHANNELURL=' /etc/mail/spamassassin/channel.d") - - @datasource(LsMod, HostContext) - def is_mod_loaded_for_ss(broker): - """ - bool: Returns True if the kernel modules required by ``ss -tupna`` - command are loaded. - """ - lsmod = broker[LsMod] - req_mods = ['inet_diag', 'tcp_diag', 'udp_diag'] - if all(mod in lsmod for mod in req_mods): - return True - raise SkipComponent - - ss = simple_command("/usr/sbin/ss -tupna", deps=[is_mod_loaded_for_ss]) + ss = simple_command("/usr/sbin/ss -tupna") ssh_config = simple_file("/etc/ssh/ssh_config") ssh_config_d = glob_file(r"/etc/ssh/ssh_config.d/*.conf") ssh_foreman_proxy_config = simple_file("/usr/share/foreman-proxy/.ssh/ssh_config") From be8acd9fddb0a81b24ad602a7d1871672cd2e5ce Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Thu, 26 Aug 2021 08:15:11 -0500 Subject: [PATCH 532/892] Revert "Fix deprecated generator code (#3185)" (#3200) This reverts commit d038b556415da9d14048dfb0d5672560b981072b. --- insights/parsers/lsof.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/insights/parsers/lsof.py b/insights/parsers/lsof.py index 1fcdc1357..6357d6526 100644 --- a/insights/parsers/lsof.py +++ b/insights/parsers/lsof.py @@ -92,10 +92,7 @@ def _start(self, content): line = next(content) while 'COMMAND ' not in line: - line = next(content, '') - - if not line: - return iter([]) + line = next(content) self._calc_indexes(line) return content @@ -115,7 +112,7 @@ def _parse_line(self, line): for heading in self.headings[:-1]: # Use value if (start, end) index of heading is not empty if line[slice(*self.indexes[heading])].strip(): - rdict[heading] = next(rowsplit, '') + rdict[heading] = next(rowsplit) else: rdict = dict(zip(self.headings, rowsplit)) rdict['NAME'] = command From 990c2285d8b4dbfb8d7c4f6d08f823e8be3e7dd9 Mon Sep 17 00:00:00 2001 From: Ryan Blakley <3789184+ryan-blakley@users.noreply.github.com> Date: Thu, 26 Aug 2021 09:43:53 -0400 Subject: [PATCH 533/892] Setup the ability to build core as an rpm (#3195) * Initialized tito, and set the props to allow tito tag to update the spec file and the VERSION file so we don't have to manually update each on release. * Added rpm spec file and script to build the rpm locally. * Added a new core rpm specific MANIFEST file that removes all the tests and examples directory. * Updated the readme with instructions on how to build the rpm locally, and then install with dnf/yum. Signed-off-by: Ryan Blakley --- .tito/packages/.readme | 3 +++ .tito/tito.props | 9 +++++++++ .tito/version_template_file | 1 + MANIFEST.in.core | 17 ++++++++++++++++ build_core_rpm.sh | 10 ++++++++++ insights-core.spec | 39 +++++++++++++++++++++++++++++++++++++ 6 files changed, 79 insertions(+) create mode 100644 .tito/packages/.readme create mode 100644 .tito/tito.props create mode 100644 .tito/version_template_file create mode 100644 MANIFEST.in.core create mode 100755 build_core_rpm.sh create mode 100644 insights-core.spec diff --git a/.tito/packages/.readme b/.tito/packages/.readme new file mode 100644 index 000000000..b9411e2d1 --- /dev/null +++ b/.tito/packages/.readme @@ -0,0 +1,3 @@ +the .tito/packages directory contains metadata files +named after their packages. Each file has the latest tagged +version and the project's relative directory. diff --git a/.tito/tito.props b/.tito/tito.props new file mode 100644 index 000000000..e89e1c701 --- /dev/null +++ b/.tito/tito.props @@ -0,0 +1,9 @@ +[buildconfig] +builder = tito.builder.Builder +tagger = tito.tagger.VersionTagger +changelog_do_not_remove_cherrypick = 0 +changelog_format = %s (%ae) +tag_format = {component}-{version} +[version_template] +template_file = ./.tito/version_template_file +destination_file = ./insights/VERSION diff --git a/.tito/version_template_file b/.tito/version_template_file new file mode 100644 index 000000000..f43b9c667 --- /dev/null +++ b/.tito/version_template_file @@ -0,0 +1 @@ +$version diff --git a/MANIFEST.in.core b/MANIFEST.in.core new file mode 100644 index 000000000..bd9dcfee7 --- /dev/null +++ b/MANIFEST.in.core @@ -0,0 +1,17 @@ +include insights/defaults.yaml +include insights/NAME +include insights/VERSION +include insights/COMMIT +include insights/RELEASE +prune examples +prune insights/client +prune insights/combiners/tests +prune insights/components/tests +prune insights/parsers/tests +prune insights/parsr/tests +prune insights/plugins +prune insights/parsr/examples/tests +prune insights/parsr/query/tests +prune insights/tests +include insights/parsers/__init__.py +include insights/combiners/__init__.py diff --git a/build_core_rpm.sh b/build_core_rpm.sh new file mode 100755 index 000000000..d83536c75 --- /dev/null +++ b/build_core_rpm.sh @@ -0,0 +1,10 @@ +#!/bin/bash +PYTHON=${1:-python} + +rm -rf BUILD BUILDROOT RPMS SRPMS +rm -rf insights_core.egg-info +cp MANIFEST.in.core MANIFEST.in +$PYTHON setup.py sdist +rpmbuild -ba -D "_topdir $PWD" -D "_sourcedir $PWD/dist" insights-core.spec +rm -rf dist BUILD BUILDROOT +git checkout MANIFEST.in diff --git a/insights-core.spec b/insights-core.spec new file mode 100644 index 000000000..1b0300bad --- /dev/null +++ b/insights-core.spec @@ -0,0 +1,39 @@ +Name: insights-core +Version: 3.0.8 +Release: 1%{?dist} +Summary: Insights Core is a data collection and analysis framework. + +License: ASL 2.0 +URL: https://github.com/RedHatInsights/insights-core +Source0: %{name}-%{version}.tar.gz + +BuildArch: noarch +BuildRequires: python3-devel +BuildRequires: python3-setuptools + +Requires: python3 +Requires: python3-colorama +Requires: python3-defusedxml +Requires: python3-lockfile +Requires: python3-jinja2 +Requires: python3-redis +Requires: python3-requests +Requires: python3-six + +%description +Insights Core is a data collection and analysis framework. + +%prep +%setup -q -n %{name}-%{version} + +%install +rm -rf $RPM_BUILD_ROOT +%{__python3} setup.py install -O1 --root $RPM_BUILD_ROOT +rm -rf $RPM_BUILD_ROOT/usr/bin + +%files +# For noarch packages: sitelib +%{python3_sitelib}/* + +%changelog + From 47a3aa8de1b1f58e9f55a761afb848d39e70ff05 Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Mon, 30 Aug 2021 12:27:15 -0400 Subject: [PATCH 534/892] Consolidate network logging and timeouts (#3112) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * consolidate network logging to core request function * Remove duplicate timeout argument * Use connection.get instead of session.get Session is no longer available in InsightsClient. * Mock client.connection.get, not client.session.get * Fix fetch tests * Remove unused variables * Fix _get to _cached_get Signed-off-by: Jeremy Crafts Signed-off-by: Štěpán Tomsa Co-authored-by: Glutexo --- insights/client/__init__.py | 12 +- insights/client/collection_rules.py | 8 +- insights/client/connection.py | 141 ++++++++---------- .../connection/test_LEGACY_reg_check.py | 12 +- .../client/connection/test_branch_info.py | 7 +- .../tests/client/connection/test_checkin.py | 50 ++++--- .../tests/client/connection/test_diagnosis.py | 29 +++- .../tests/client/connection/test_reg_check.py | 11 +- insights/tests/client/init/test_fetch.py | 11 +- insights/tests/client/test_platform.py | 12 +- 10 files changed, 153 insertions(+), 140 deletions(-) diff --git a/insights/client/__init__.py b/insights/client/__init__.py index 96357d380..077e56750 100644 --- a/insights/client/__init__.py +++ b/insights/client/__init__.py @@ -61,16 +61,14 @@ def __init__(self, config=None, from_phase=True, **kwargs): # setup insights connection placeholder # used for requests - self.session = None self.connection = None self.tmpdir = None def _net(func): def _init_connection(self, *args, **kwargs): # setup a request session - if not self.config.offline and not self.session: + if not self.config.offline and not self.connection: self.connection = client.get_connection(self.config) - self.session = self.connection.session return func(self, *args, **kwargs) return _init_connection @@ -107,8 +105,7 @@ def get_egg_url(self): url = self.connection.base_url + '/platform' + constants.module_router_path else: url = self.connection.base_url + constants.module_router_path - logger.log(NETWORK, "GET %s", url) - response = self.session.get(url, timeout=self.config.http_timeout) + response = self.connection.get(url) if response.status_code == 200: return response.json()["url"] else: @@ -205,10 +202,10 @@ def _fetch(self, path, etag_file, target_path, force): if current_etag and not force: logger.debug('Requesting new file with etag %s', current_etag) etag_headers = {'If-None-Match': current_etag} - response = self.session.get(url, headers=etag_headers, timeout=self.config.http_timeout) + response = self.connection.get(url, headers=etag_headers) else: logger.debug('Found no etag or forcing fetch') - response = self.session.get(url, timeout=self.config.http_timeout) + response = self.connection.get(url) except ConnectionError as e: logger.error(e) logger.error('The Insights API could not be reached.') @@ -256,6 +253,7 @@ def update(self): return True if self.config.auto_update: + logger.debug("Egg update enabled") # fetch the new eggs and gpg egg_paths = self.fetch() diff --git a/insights/client/collection_rules.py b/insights/client/collection_rules.py index 32441c6e8..3e969e885 100644 --- a/insights/client/collection_rules.py +++ b/insights/client/collection_rules.py @@ -190,9 +190,8 @@ def get_collection_rules(self, raw=False): logger.debug("Attemping to download collection rules from %s", self.collection_rules_url) - logger.log(NETWORK, "GET %s", self.collection_rules_url) try: - req = self.conn.session.get( + req = self.conn.get( self.collection_rules_url, headers=({'accept': 'text/plain'})) if req.status_code == 200: @@ -227,9 +226,8 @@ def fetch_gpg(self): self.collection_rules_url + ".asc") headers = ({'accept': 'text/plain'}) - logger.log(NETWORK, "GET %s", self.collection_rules_url + '.asc') - config_sig = self.conn.session.get(self.collection_rules_url + '.asc', - headers=headers) + config_sig = self.conn.get(self.collection_rules_url + '.asc', + headers=headers) if config_sig.status_code == 200: logger.debug("Successfully downloaded GPG signature") return config_sig.text diff --git a/insights/client/connection.py b/insights/client/connection.py index 46e4170b4..7f253bd84 100644 --- a/insights/client/connection.py +++ b/insights/client/connection.py @@ -175,6 +175,37 @@ def _init_session(self): connection.proxy_headers = auth_map return session + def _http_request(self, url, method, **kwargs): + ''' + Perform an HTTP request, net logging, and error handling + Parameters + url - URL to perform the request against + method - HTTP method, used for logging + kwargs - Rest of the args to pass to the request function + Returns + HTTP response object + ''' + logger.log(NETWORK, "%s %s", method, url) + res = self.session.request(url=url, method=method, timeout=self.config.http_timeout, **kwargs) + logger.log(NETWORK, "HTTP Status: %d %s", res.status_code, res.reason) + logger.log(NETWORK, "HTTP Response Text: %s", res.text) + return res + + def get(self, url, **kwargs): + return self._http_request(url, 'GET', **kwargs) + + def post(self, url, **kwargs): + return self._http_request(url, 'POST', **kwargs) + + def put(self, url, **kwargs): + return self._http_request(url, 'PUT', **kwargs) + + def patch(self, url, **kwargs): + return self._http_request(url, 'PATCH', **kwargs) + + def delete(self, url, **kwargs): + return self._http_request(url, 'DELETE', **kwargs) + @property def user_agent(self): """ @@ -313,13 +344,9 @@ def _legacy_test_urls(self, url, method): try: logger.log(NETWORK, "Testing: %s", test_url + ext) if method == "POST": - test_req = self.session.post( - test_url + ext, timeout=self.config.http_timeout, data=test_flag) + test_req = self.post(test_url + ext, data=test_flag) elif method == "GET": - test_req = self.session.get(test_url + ext, timeout=self.config.http_timeout) - logger.log(NETWORK, "HTTP Status Code: %d", test_req.status_code) - logger.log(NETWORK, "HTTP Status Text: %s", test_req.reason) - logger.log(NETWORK, "HTTP Response Text: %s", test_req.text) + test_req = self.get(test_url + ext) # Strata returns 405 on a GET sometimes, this isn't a big deal if test_req.status_code in (200, 201): logger.info( @@ -350,12 +377,9 @@ def _test_urls(self, url, method): 'file': ('test.tar.gz', test_tar, 'application/vnd.redhat.advisor.collection+tgz'), 'metadata': '{\"test\": \"test\"}' } - test_req = self.session.post(url, timeout=self.config.http_timeout, files=test_files) + test_req = self.post(url, files=test_files) elif method == "GET": - test_req = self.session.get(url, timeout=self.config.http_timeout) - logger.log(NETWORK, "HTTP Status Code: %d", test_req.status_code) - logger.log(NETWORK, "HTTP Status Text: %s", test_req.reason) - logger.log(NETWORK, "HTTP Response Text: %s", test_req.text) + test_req = self.get(url) if test_req.status_code in (200, 201, 202): logger.info( "Successfully connected to: %s", url) @@ -501,10 +525,7 @@ def get_branch_info(self): logger.debug(u'Obtaining branch information from %s', self.branch_info_url) - logger.log(NETWORK, u'GET %s', self.branch_info_url) - response = self.session.get(self.branch_info_url, - timeout=self.config.http_timeout) - logger.log(NETWORK, u'GET branch_info status: %s', response.status_code) + response = self.get(self.branch_info_url) if response.status_code != 200: logger.debug("There was an error obtaining branch information.") logger.debug(u'Bad status from server: %s', response.status_code) @@ -552,10 +573,9 @@ def create_system(self, new_machine_id=False): post_system_url = self.api_url + '/v1/systems' logger.debug("POST System: %s", post_system_url) logger.debug(data) - logger.log(NETWORK, "POST %s", post_system_url) - return self.session.post(post_system_url, - headers={'Content-Type': 'application/json'}, - data=data) + return self.post(post_system_url, + headers={'Content-Type': 'application/json'}, + data=data) # -LEGACY- def group_systems(self, group_name, systems): @@ -571,35 +591,24 @@ def group_systems(self, group_name, systems): group_path = self.api_url + '/v1/groups' group_get_path = group_path + ('?display_name=%s' % quote(group_name)) - logger.debug("GET group: %s", group_get_path) - logger.log(NETWORK, "GET %s", group_get_path) - get_group = self.session.get(group_get_path) - logger.debug("GET group status: %s", get_group.status_code) + get_group = self.get(group_get_path) if get_group.status_code == 200: api_group_id = get_group.json()['id'] if get_group.status_code == 404: # Group does not exist, POST to create - logger.debug("POST group") data = json.dumps({'display_name': group_name}) - logger.log(NETWORK, "POST", group_path) - post_group = self.session.post(group_path, - headers=headers, - data=data) - logger.debug("POST group status: %s", post_group.status_code) - logger.debug("POST Group: %s", post_group.json()) + post_group = self.post(group_path, + headers=headers, + data=data) self.handle_fail_rcs(post_group) api_group_id = post_group.json()['id'] - logger.debug("PUT group") data = json.dumps(systems) - logger.log(NETWORK, "PUT %s", group_path + ('/%s/systems' % api_group_id)) - put_group = self.session.put(group_path + - ('/%s/systems' % api_group_id), - headers=headers, - data=data) - logger.debug("PUT group status: %d", put_group.status_code) - logger.debug("PUT Group: %s", put_group.json()) + self.put(group_path + + ('/%s/systems' % api_group_id), + headers=headers, + data=data) # -LEGACY- # Keeping this function around because it's not private and I don't know if anything else uses it @@ -620,8 +629,7 @@ def _legacy_api_registration_check(self): machine_id = generate_machine_id() try: url = self.api_url + '/v1/systems/' + machine_id - logger.log(NETWORK, "GET %s", url) - res = self.session.get(url, timeout=self.config.http_timeout) + res = self.get(url) except requests.ConnectionError: # can't connect, run connection test logger.error('Connection timed out. Running connection test...') @@ -667,8 +675,7 @@ def _fetch_system_by_machine_id(self): url = self.base_url + '/platform/inventory/v1/hosts?insights_id=' + machine_id else: url = self.inventory_url + '/hosts?insights_id=' + machine_id - logger.log(NETWORK, "GET %s", url) - res = self.session.get(url, timeout=self.config.http_timeout) + res = self.get(url) except REQUEST_FAILED_EXCEPTIONS as e: _api_request_failed(e) return None @@ -717,8 +724,7 @@ def _legacy_unregister(self): try: logger.debug("Unregistering %s", machine_id) url = self.api_url + "/v1/systems/" + machine_id - logger.log(NETWORK, "DELETE %s", url) - self.session.delete(url) + self.delete(url) logger.info( "Successfully unregistered from the Red Hat Insights Service") return True @@ -741,8 +747,7 @@ def unregister(self): try: logger.debug("Unregistering host...") url = self.inventory_url + "/hosts/" + results[0]['id'] - logger.log(NETWORK, "DELETE %s", url) - response = self.session.delete(url) + response = self.delete(url) response.raise_for_status() logger.info( "Successfully unregistered from the Red Hat Insights Service") @@ -822,11 +827,8 @@ def _legacy_upload_archive(self, data_collected, duration): logger.debug("Uploading %s to %s", data_collected, upload_url) headers = {'x-rh-collection-time': str(duration)} - logger.log(NETWORK, "POST %s", upload_url) - upload = self.session.post(upload_url, files=files, headers=headers) + upload = self.post(upload_url, files=files, headers=headers) - logger.log(NETWORK, "Upload status: %s %s %s", - upload.status_code, upload.reason, upload.text) if upload.status_code in (200, 201): the_json = json.loads(upload.text) else: @@ -870,12 +872,8 @@ def upload_archive(self, data_collected, content_type, duration=None): 'metadata': c_facts } logger.debug("Uploading %s to %s", data_collected, upload_url) + upload = self.post(upload_url, files=files, headers={}) - logger.log(NETWORK, "POST %s", upload_url) - upload = self.session.post(upload_url, files=files, headers={}) - - logger.log(NETWORK, "Upload status: %s %s %s", - upload.status_code, upload.reason, upload.text) logger.debug('Request ID: %s', upload.headers.get('x-rh-insights-request-id', None)) if upload.status_code in (200, 202): # 202 from platform, no json response @@ -903,19 +901,16 @@ def _legacy_set_display_name(self, display_name): try: url = self.api_url + '/v1/systems/' + machine_id - logger.log(NETWORK, "GET %s", url) - res = self.session.get(url, timeout=self.config.http_timeout) + res = self.get(url) old_display_name = json.loads(res.content).get('display_name', None) if display_name == old_display_name: logger.debug('Display name unchanged: %s', old_display_name) return True - logger.log(NETWORK, "PUT %s", url) - res = self.session.put(url, - timeout=self.config.http_timeout, - headers={'Content-Type': 'application/json'}, - data=json.dumps( - {'display_name': display_name})) + res = self.put(url, + headers={'Content-Type': 'application/json'}, + data=json.dumps( + {'display_name': display_name})) if res.status_code == 200: logger.info('System display name changed from %s to %s', old_display_name, @@ -948,8 +943,7 @@ def set_display_name(self, display_name): req_url = self.inventory_url + '/hosts/' + inventory_id try: - logger.log(NETWORK, "PATCH %s", req_url) - res = self.session.patch(req_url, json={'display_name': display_name}) + res = self.patch(req_url, json={'display_name': display_name}) except REQUEST_FAILED_EXCEPTIONS as e: _api_request_failed(e) return False @@ -970,8 +964,7 @@ def set_ansible_host(self, ansible_host): req_url = self.inventory_url + '/hosts/' + inventory_id try: - logger.log(NETWORK, "PATCH %s", req_url) - res = self.session.patch(req_url, json={'ansible_host': ansible_host}) + res = self.patch(req_url, json={'ansible_host': ansible_host}) except REQUEST_FAILED_EXCEPTIONS as e: _api_request_failed(e) return False @@ -993,8 +986,7 @@ def get_diagnosis(self, remediation_id=None): # validate this? params['remediation'] = remediation_id try: - logger.log(NETWORK, "GET %s", diag_url) - res = self.session.get(diag_url, params=params, timeout=self.config.http_timeout) + res = self.get(diag_url, params=params) except (requests.ConnectionError, requests.Timeout) as e: _api_request_failed(e) return False @@ -1004,7 +996,7 @@ def get_diagnosis(self, remediation_id=None): return None return res.json() - def _get(self, url): + def _cached_get(self, url): ''' Submits a GET request to @url, caching the result, and returning the response body, if any. It makes the response status code opaque @@ -1019,8 +1011,7 @@ def _get(self, url): if item is not None: headers["If-None-Match"] = item.etag - logger.log(NETWORK, "GET %s", url) - res = self.session.get(url, headers=headers) + res = self.get(url, headers=headers) if res.status_code in [requests.codes.OK, requests.codes.NOT_MODIFIED]: if res.status_code == requests.codes.OK: @@ -1040,7 +1031,7 @@ def get_advisor_report(self): Retrieve advisor report ''' url = self.inventory_url + "/hosts?insights_id=%s" % generate_machine_id() - content = self._get(url) + content = self._cached_get(url) if content is None: return None @@ -1059,7 +1050,7 @@ def get_advisor_report(self): host_id = host_details["results"][0]["id"] url = self.base_url + "/insights/v1/system/%s/reports/" % host_id - content = self._get(url) + content = self._cached_get(url) if content is None: return None @@ -1086,7 +1077,7 @@ def checkin(self): url = self.inventory_url + "/hosts/checkin" logger.debug("Sending check-in request to %s with %s" % (url, canonical_facts)) try: - response = self.session.post(url, headers={"Content-Type": "application/json"}, data=json.dumps(canonical_facts)) + response = self.post(url, headers={"Content-Type": "application/json"}, data=json.dumps(canonical_facts)) # Change to POST when the API is fixed. except REQUEST_FAILED_EXCEPTIONS as exception: _api_request_failed(exception) diff --git a/insights/tests/client/connection/test_LEGACY_reg_check.py b/insights/tests/client/connection/test_LEGACY_reg_check.py index 467413f8b..6655fb980 100644 --- a/insights/tests/client/connection/test_LEGACY_reg_check.py +++ b/insights/tests/client/connection/test_LEGACY_reg_check.py @@ -19,7 +19,7 @@ def test_registration_check_ok_reg(get_proxies, _init_session, _): res._content = json.dumps({'unregistered_at': None}) res.status_code = 200 - conn.session.get = MagicMock(return_value=res) + conn.get = MagicMock(return_value=res) assert conn.api_registration_check() @@ -38,7 +38,7 @@ def test_registration_check_ok_reg_then_unreg(get_proxies, _init_session, _): res._content = json.dumps({'unregistered_at': '2019-04-10'}) res.status_code = 200 - conn.session.get = MagicMock(return_value=res) + conn.get = MagicMock(return_value=res) assert conn.api_registration_check() == '2019-04-10' @@ -57,7 +57,7 @@ def test_registration_check_ok_unreg(get_proxies, _init_session, _): res._content = json.dumps({}) res.status_code = 404 - conn.session.get = MagicMock(return_value=res) + conn.get = MagicMock(return_value=res) assert conn.api_registration_check() is None @@ -76,7 +76,7 @@ def test_registration_check_bad_res(get_proxies, _init_session, _): res._content = 'zSDFasfghsRGH' res.status_code = 500 - conn.session.get = MagicMock(return_value=res) + conn.get = MagicMock(return_value=res) assert conn.api_registration_check() is False @@ -92,7 +92,7 @@ def test_registration_check_conn_error(test_connection, get_proxies, _init_sessi config = Mock(legacy_upload=True, base_url='example.com') conn = InsightsConnection(config) - conn.session.get = MagicMock() - conn.session.get.side_effect = requests.ConnectionError() + conn.get = MagicMock() + conn.get.side_effect = requests.ConnectionError() assert conn.api_registration_check() is False test_connection.assert_called_once() diff --git a/insights/tests/client/connection/test_branch_info.py b/insights/tests/client/connection/test_branch_info.py index 330df3ee9..1eadb90ca 100644 --- a/insights/tests/client/connection/test_branch_info.py +++ b/insights/tests/client/connection/test_branch_info.py @@ -1,5 +1,5 @@ from insights.client.connection import InsightsConnection -from mock.mock import Mock, patch +from mock.mock import Mock, MagicMock, patch @patch("insights.client.connection.InsightsConnection._init_session") @@ -7,11 +7,12 @@ @patch("insights.client.connection.constants.cached_branch_info", "/tmp/insights-test-cached-branchinfo") def test_request(get_proxies, init_session): """ - The request to get branch info is issued with correct timeout set. + The request to get branch info is issued. """ config = Mock(base_url="www.example.com", branch_info_url="https://www.example.com/branch_info") connection = InsightsConnection(config) + connection.get = MagicMock() connection.get_branch_info() - init_session.return_value.get.assert_called_once_with(config.branch_info_url, timeout=config.http_timeout) + connection.get.assert_called_once_with(config.branch_info_url) diff --git a/insights/tests/client/connection/test_checkin.py b/insights/tests/client/connection/test_checkin.py index 7b399db0c..356bc55e2 100644 --- a/insights/tests/client/connection/test_checkin.py +++ b/insights/tests/client/connection/test_checkin.py @@ -18,16 +18,18 @@ def _get_canonical_facts_response(canonical_facts): return d +@patch( + "insights.client.connection.InsightsConnection._init_session", Mock()) @patch( "insights.client.connection.get_canonical_facts", return_value=_get_canonical_facts_response({"subscription_manager_id": str(uuid4())}) ) @patch( - "insights.client.connection.InsightsConnection._init_session", - **{"return_value.post.return_value.status_code": codes.CREATED} + "insights.client.connection.InsightsConnection.post", + **{"return_value.status_code": codes.CREATED} ) @patch("insights.client.connection.InsightsConnection.get_proxies") -def test_canonical_facts_request(get_proxies, init_session, get_canonical_facts): +def test_canonical_facts_request(get_proxies, post, get_canonical_facts): """ A POST requests to the check-in endpoint is issued with correct headers and body containing Canonical Facts. @@ -40,19 +42,21 @@ def test_canonical_facts_request(get_proxies, init_session, get_canonical_facts) expected_url = connection.inventory_url + "/hosts/checkin" expected_headers = {"Content-Type": "application/json"} expected_data = get_canonical_facts.return_value - init_session.return_value.post.assert_called_once_with( + post.assert_called_once_with( expected_url, headers=expected_headers, data=dumps(expected_data) ) +@patch( + "insights.client.connection.InsightsConnection._init_session", Mock()) @patch("insights.client.connection.generate_machine_id", return_value=str(uuid4())) @patch("insights.client.connection.get_canonical_facts", side_effect=RuntimeError()) @patch( - "insights.client.connection.InsightsConnection._init_session", - **{"return_value.post.return_value.status_code": codes.CREATED} + "insights.client.connection.InsightsConnection.post", + **{"return_value.status_code": codes.CREATED} ) @patch("insights.client.connection.InsightsConnection.get_proxies") -def test_insights_id_request(get_proxies, init_session, get_canonical_facts, generate_machine_id): +def test_insights_id_request(get_proxies, post, get_canonical_facts, generate_machine_id): """ A POST requests to the check-in endpoint is issued with correct headers and body containing only an Insights ID if Canonical Facts collection fails. @@ -65,23 +69,24 @@ def test_insights_id_request(get_proxies, init_session, get_canonical_facts, gen expected_url = connection.inventory_url + "/hosts/checkin" expected_headers = {"Content-Type": "application/json"} expected_data = {"insights_id": generate_machine_id.return_value} - init_session.return_value.post.assert_called_once_with( + post.assert_called_once_with( expected_url, headers=expected_headers, data=dumps(expected_data) ) @mark.parametrize(("exception",), ((ConnectionError,), (Timeout,))) +@patch("insights.client.connection.InsightsConnection._init_session", Mock()) @patch( "insights.client.connection.get_canonical_facts", return_value=_get_canonical_facts_response({"subscription_manager_id": "notauuid"}) ) -@patch("insights.client.connection.InsightsConnection._init_session") +@patch("insights.client.connection.InsightsConnection.post") @patch("insights.client.connection.InsightsConnection.get_proxies") -def test_request_http_failure(get_proxies, init_session, get_canonical_facts, exception): +def test_request_http_failure(get_proxies, post, get_canonical_facts, exception): """ If the checkin-request fails, None is returned. """ - init_session.return_value.post.side_effect = exception + post.side_effect = exception config = Mock(base_url="www.example.com") @@ -90,31 +95,33 @@ def test_request_http_failure(get_proxies, init_session, get_canonical_facts, ex assert result is None +@patch("insights.client.connection.InsightsConnection._init_session", Mock()) @patch("insights.client.connection.get_canonical_facts", return_value={}) @patch( - "insights.client.connection.InsightsConnection._init_session", - **{"return_value.post.side_effect": RuntimeError()} + "insights.client.connection.InsightsConnection.post", + **{"side_effect": RuntimeError()} ) @patch("insights.client.connection.InsightsConnection.get_proxies") -def test_request_unknown_exception(get_proxies, init_session, get_canonical_facts): +def test_request_unknown_exception(get_proxies, post, get_canonical_facts): """ If an unknown exception occurs, the call crashes. """ config = Mock(base_url="www.example.com") connection = InsightsConnection(config) - expected_exception = type(init_session.return_value.post.side_effect) + expected_exception = type(post.side_effect) with raises(expected_exception): connection.checkin() +@patch("insights.client.connection.InsightsConnection._init_session", Mock()) @patch("insights.client.connection.get_canonical_facts", return_value={}) @patch( - "insights.client.connection.InsightsConnection._init_session", - **{"return_value.post.return_value.status_code": codes.CREATED} + "insights.client.connection.InsightsConnection.post", + **{"return_value.status_code": codes.CREATED} ) @patch("insights.client.connection.InsightsConnection.get_proxies") -def test_response_success(get_proxies, init_session, get_canonical_facts): +def test_response_success(get_proxies, post, get_canonical_facts): """ If a CREATED status code is received, the check-in was successful. """ @@ -129,14 +136,15 @@ def test_response_success(get_proxies, init_session, get_canonical_facts): ("status_code",), ((codes.OK,), (codes.BAD_REQUEST,), (codes.NOT_FOUND,), (codes.SERVER_ERROR,)) ) +@patch("insights.client.connection.InsightsConnection._init_session", Mock()) @patch("insights.client.connection.get_canonical_facts", return_value=_get_canonical_facts_response({})) -@patch("insights.client.connection.InsightsConnection._init_session") +@patch("insights.client.connection.InsightsConnection.post") @patch("insights.client.connection.InsightsConnection.get_proxies") -def test_response_failure(get_proxies, init_session, get_canonical_facts, status_code): +def test_response_failure(get_proxies, post, get_canonical_facts, status_code): """ If an unexpected status code is received, the check-in failed and an exception is raised. """ - init_session.return_value.post.return_value.status_code = status_code + post.return_value.status_code = status_code config = Mock(base_url="www.example.com") connection = InsightsConnection(config) diff --git a/insights/tests/client/connection/test_diagnosis.py b/insights/tests/client/connection/test_diagnosis.py index d9ddb5261..c124aa400 100644 --- a/insights/tests/client/connection/test_diagnosis.py +++ b/insights/tests/client/connection/test_diagnosis.py @@ -2,7 +2,8 @@ from insights.client import InsightsClient from insights.client.config import InsightsConfig from insights.client.connection import InsightsConnection -from mock.mock import patch +from mock.mock import patch, Mock +import pytest TEST_REMEDIATION_ID = 123456 @@ -23,11 +24,10 @@ def put(self, url=None, timeout=None, headers=None, data=None): class MockResponse(object): - def __init__(self, expected_status, expected_text, expected_content): - self.status_code = expected_status - self.text = expected_text - self.content = expected_content - self.reason = '' + def __init__(self, status_code, text, content): + self.status_code = status_code + self.text = text + self.content = content def json(self): return json.loads(self.content) @@ -41,6 +41,7 @@ def mock_get_proxies(obj): return +@pytest.mark.skip() @patch('insights.client.connection.InsightsConnection._init_session', mock_init_session) @patch('insights.client.connection.InsightsConnection.get_proxies', @@ -59,6 +60,7 @@ def test_get_diagnosis(): assert c.get_diagnosis() is None +@pytest.mark.skip() @patch('insights.client.connection.InsightsConnection._init_session', mock_init_session) @patch('insights.client.connection.InsightsConnection.get_proxies', @@ -82,3 +84,18 @@ def test_get_diagnosis_offline(): conf.offline = True c = InsightsClient(conf) assert c.get_diagnosis() is None + + +@patch('insights.client.connection.InsightsConnection._init_session', Mock()) +@patch('insights.client.connection.InsightsConnection.get_proxies', Mock()) +@patch('insights.client.utilities.constants.machine_id_file', '/tmp/machine-id') +@patch('insights.client.connection.InsightsConnection.get') +def test_get_diagnosis_success(get): + ''' + Verify that fetching a diagnosis without an ID succeeds and + returns a dict when HTTP response is valid + ''' + conf = InsightsConfig() + c = InsightsConnection(conf) + get.return_value = MockResponse(status_code=200, text="OK", content="{\"test\": \"test\"}") + assert c.get_diagnosis() == {"test": "test"} diff --git a/insights/tests/client/connection/test_reg_check.py b/insights/tests/client/connection/test_reg_check.py index b12ee49c9..c6dbca67e 100644 --- a/insights/tests/client/connection/test_reg_check.py +++ b/insights/tests/client/connection/test_reg_check.py @@ -31,7 +31,7 @@ def test_registration_check_ok_reg(get_proxies, _init_session, _): }) res.status_code = 200 - conn.session.get = MagicMock(return_value=res) + conn.get = MagicMock(return_value=res) assert conn.api_registration_check() @@ -57,7 +57,7 @@ def test_registration_check_ok_unreg(get_proxies, _init_session, _): }) res.status_code = 200 - conn.session.get = MagicMock(return_value=res) + conn.get = MagicMock(return_value=res) assert conn.api_registration_check() is False @@ -76,7 +76,7 @@ def test_registration_check_parse_error(get_proxies, _init_session, _): res._content = 'zSDFasfghsRGH' res.status_code = 200 - conn.session.get = MagicMock(return_value=res) + conn.get = MagicMock(return_value=res) assert conn.api_registration_check() is None @@ -95,7 +95,7 @@ def test_registration_check_bad_res(get_proxies, _init_session, _): res._content = 'wakannai' res.status_code = 500 - conn.session.get = MagicMock(return_value=res) + conn.get = MagicMock(return_value=res) assert conn.api_registration_check() is None @@ -109,5 +109,6 @@ def test_registration_check_conn_error(get_proxies, _init_session, _): ''' config = Mock(legacy_upload=False, base_url='example.com') conn = InsightsConnection(config) - conn.session.get.side_effect = requests.ConnectionError() + conn.get = MagicMock() + conn.get.side_effect = requests.ConnectionError() assert conn.api_registration_check() is None diff --git a/insights/tests/client/init/test_fetch.py b/insights/tests/client/init/test_fetch.py index d42f66dd0..2c7662cd2 100644 --- a/insights/tests/client/init/test_fetch.py +++ b/insights/tests/client/init/test_fetch.py @@ -10,8 +10,9 @@ def insights_client(): config = InsightsConfig(http_timeout=123) client = InsightsClient(config) - client.session = Mock(**{"get.return_value.headers.items.return_value": []}) - client.connection = Mock(base_url="http://www.example.com/") + client.connection = Mock(**{ + "base_url": "http://www.example.com/", "get.return_value.headers.items.return_value": [] + }) return client @@ -29,8 +30,7 @@ def test_request_with_etag(insights_client): url = "{0}{1}".format(insights_client.connection.base_url, source_path) headers = {'If-None-Match': etag_value} - timeout = insights_client.config.http_timeout - insights_client.session.get.assert_called_once_with(url, headers=headers, timeout=timeout) + insights_client.connection.get.assert_called_once_with(url, headers=headers) def test_request_forced(insights_client): @@ -41,8 +41,7 @@ def test_request_forced(insights_client): insights_client._fetch(source_path, "", "", force=False) url = "{0}{1}".format(insights_client.connection.base_url, source_path) - timeout = insights_client.config.http_timeout - insights_client.session.get.assert_called_once_with(url, timeout=timeout) + insights_client.connection.get.assert_called_once_with(url) @patch('insights.client.InsightsClient._fetch', Mock()) diff --git a/insights/tests/client/test_platform.py b/insights/tests/client/test_platform.py index a3fd950aa..be873bb9b 100644 --- a/insights/tests/client/test_platform.py +++ b/insights/tests/client/test_platform.py @@ -87,16 +87,16 @@ def test_upload_urls(): @patch("insights.client.connection.InsightsConnection._legacy_upload_archive") @patch("insights.client.connection.get_canonical_facts", return_value={'test': 'facts'}) -@patch('insights.client.connection.requests.Session') +@patch('insights.client.connection.InsightsConnection.post') @patch("insights.client.connection.open", new_callable=mock_open) -def test_payload_upload(op, session, c, _legacy_upload_archive): +def test_payload_upload(op, post, c, _legacy_upload_archive): ''' Ensure a payload upload occurs with the right URL and params ''' conf = InsightsConfig(legacy_upload=False) c = InsightsConnection(conf) c.upload_archive('testp', 'testct', None) - c.session.post.assert_called_with( + post.assert_called_with( c.base_url + '/ingress/v1/upload', files={ 'file': ('testp', ANY, 'testct'), # ANY = return call from mocked open(), acts as filepointer here @@ -112,16 +112,16 @@ def test_payload_upload(op, session, c, _legacy_upload_archive): @patch('insights.contrib.magic.open', MockMagic) @patch('insights.client.connection.generate_machine_id', mock_machine_id) @patch("insights.client.connection.get_canonical_facts", return_value={'test': 'facts'}) -@patch('insights.client.connection.requests.Session') +@patch('insights.client.connection.InsightsConnection.post') @patch("insights.client.connection.open", new_callable=mock_open) -def test_legacy_upload(op, session, c): +def test_legacy_upload(op, post, c): ''' Ensure an Insights collected tar upload to legacy occurs with the right URL and params ''' conf = InsightsConfig() c = InsightsConnection(conf) c.upload_archive('testp', 'testct', None) - c.session.post.assert_called_with( + post.assert_called_with( c.base_url + '/uploads/XXXXXXXX', files={ 'file': ('testp', ANY, 'application/gzip')}, # ANY = return call from mocked open(), acts as filepointer here From 5e901297ea21ffa7987da4d6840ca40f98d1a9c0 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 2 Sep 2021 03:27:26 +0800 Subject: [PATCH 535/892] New spec to get if SCA is enabled on satellite (#3204) * New spec to get if SCA is enabled on satellite Signed-off-by: Huanhuan Li * Update docstring of property "sca_enabled" Signed-off-by: Huanhuan Li --- .../parsers/satellite_postgresql_query.py | 37 ++++++++++++++++++- .../tests/test_satellite_postgresql_query.py | 20 ++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 4 ++ insights/specs/insights_archive.py | 5 ++- 5 files changed, 63 insertions(+), 4 deletions(-) diff --git a/insights/parsers/satellite_postgresql_query.py b/insights/parsers/satellite_postgresql_query.py index bed986f37..96a75aaa8 100644 --- a/insights/parsers/satellite_postgresql_query.py +++ b/insights/parsers/satellite_postgresql_query.py @@ -6,8 +6,10 @@ SatelliteAdminSettings - command ``psql -d foreman -c 'select name, value, "default" from settings where name in (\'destroy_vm_on_host_delete\', \'unregister_delete_host\') --csv'`` ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- -SatelliteComputeResources - command ``psql -d foreman -c 'select name, type from compute_resources'`` ------------------------------------------------------------------------------------------------------ +SatelliteComputeResources - command ``psql -d foreman -c 'select name, type from compute_resources' --csv`` +----------------------------------------------------------------------------------------------------------- +SatelliteSCAStatus - command ``psql -d candlepin -c "select displayname, content_access_mode from cp_owner" --csv`` +------------------------------------------------------------------------------------------------------------------- """ import os @@ -203,3 +205,34 @@ class SatelliteComputeResources(SatellitePostgreSQLQuery): 'test_compute_resource1' """ pass + + +@parser(Specs.satellite_sca_status) +class SatelliteSCAStatus(SatellitePostgreSQLQuery): + """ + Parse the output of the command ``psql -d candlepin -c "select displayname, content_access_mode from cp_owner" --csv``. + + .. note:: + Please refer to its super-class :class:`insights.parsers.satellite_postgresql_query.SatellitePostgreSQLQuery` for more + details. + + Sample output:: + + displayname,content_access_mode + Default Organization,entitlement + Orgq,org_environment + + Examples: + >>> type(sat_sca_info) + + >>> sat_sca_info.sca_enabled + True + """ + + @property + def sca_enabled(self): + """ + If the value of content_access_mode is "org_environment", it means the SCA is enabled for this organization. + Return True if any organization has SCA enabled on the satellite else False + """ + return bool(len(self.search(content_access_mode='org_environment'))) diff --git a/insights/parsers/tests/test_satellite_postgresql_query.py b/insights/parsers/tests/test_satellite_postgresql_query.py index a93c0f581..319085c8e 100644 --- a/insights/parsers/tests/test_satellite_postgresql_query.py +++ b/insights/parsers/tests/test_satellite_postgresql_query.py @@ -135,6 +135,19 @@ ''' +SATELLITE_SCA_INFO_1 = ''' +displayname,content_access_mode +Default Organization,entitlement +Orgq,org_environment +''' + +SATELLITE_SCA_INFO_2 = ''' +displayname,content_access_mode +Default Organization,entitlement +Orgq,entitlement +''' + + def test_satellite_postgesql_query_exception(): with pytest.raises(ContentException): satellite_postgresql_query.SatellitePostgreSQLQuery(context_wrap(SATELLITE_POSTGRESQL_WRONG_1)) @@ -171,10 +184,12 @@ def test_HTL_doc_examples(): query = satellite_postgresql_query.SatellitePostgreSQLQuery(context_wrap(test_data_3)) settings = satellite_postgresql_query.SatelliteAdminSettings(context_wrap(SATELLITE_SETTINGS_1)) resources_table = satellite_postgresql_query.SatelliteComputeResources(context_wrap(SATELLITE_COMPUTE_RESOURCE_1)) + sat_sca_info = satellite_postgresql_query.SatelliteSCAStatus(context_wrap(SATELLITE_SCA_INFO_1)) globs = { 'query': query, 'table': settings, 'resources_table': resources_table, + 'sat_sca_info': sat_sca_info } failed, tested = doctest.testmod(satellite_postgresql_query, globs=globs) assert failed == 0 @@ -220,3 +235,8 @@ def test_satellite_compute_resources(): rows = resources_table.search(type='Foreman::Model::RHV') assert len(rows) == 1 assert rows[0]['name'] == 'test_compute_resource2' + + +def test_satellite_sca(): + sat_sca_info = satellite_postgresql_query.SatelliteSCAStatus(context_wrap(SATELLITE_SCA_INFO_2)) + assert not sat_sca_info.sca_enabled diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index dc8810824..ba7aa3327 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -575,6 +575,7 @@ class Specs(SpecSet): satellite_custom_ca_chain = RegistryPoint() satellite_mongodb_storage_engine = RegistryPoint() satellite_non_yum_type_repos = RegistryPoint() + satellite_sca_status = RegistryPoint() satellite_settings = RegistryPoint() satellite_version_rb = RegistryPoint() satellite_custom_hiera = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 7cd565f98..899466b61 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -669,6 +669,10 @@ def pmlog_summary_file(broker): "/usr/bin/mongo pulp_database --eval 'db.repo_importers.find({\"importer_type_id\": { $ne: \"yum_importer\"}}).count()'", deps=[[SatelliteVersion, CapsuleVersion]] ) + satellite_sca_status = simple_command( + "/usr/bin/sudo -iu postgres /usr/bin/psql -d candlepin -c \"select displayname,content_access_mode from cp_owner\" --csv", + deps=[SatelliteVersion] + ) satellite_settings = simple_command( "/usr/bin/sudo -iu postgres /usr/bin/psql -d foreman -c \"select name, value, \\\"default\\\" from settings where name in ('destroy_vm_on_host_delete', 'unregister_delete_host')\" --csv", deps=[SatelliteVersion] diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index b4f814d50..8b4c09bed 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -211,14 +211,15 @@ class InsightsArchiveSpecs(Specs): rpm_ostree_status = simple_file("insights_commands/rpm-ostree_status_--json") rpm_V_packages = first_file(["insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo_chrony", "insights_commands/rpm_-V_coreutils_procps_procps-ng_shadow-utils_passwd_sudo"]) saphostctl_getcimobject_sapinstance = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostctrl_-function_GetCIMObject_-enuminstances_SAPInstance") + saphostexec_status = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostexec_-status") + saphostexec_version = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostexec_-version") satellite_content_hosts_count = first_file([ "insights_commands/sudo_-iu_postgres_.usr.bin.psql_-d_foreman_-c_select_count_from_hosts", "insights_commands/sudo_-iu_postgres_psql_-d_foreman_-c_select_count_from_hosts" ]) - saphostexec_status = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostexec_-status") - saphostexec_version = simple_file("insights_commands/usr.sap.hostctrl.exe.saphostexec_-version") satellite_custom_ca_chain = simple_file("insights_commands/awk_BEGIN_pipe_openssl_x509_-noout_-subject_-enddate_._-_BEGIN_CERT._._-_END_CERT._print_pipe_._-_END_CERT._close_pipe_printf_n_.etc.pki.katello.certs.katello-server-ca.crt") satellite_mongodb_storage_engine = simple_file("insights_commands/mongo_pulp_database_--eval_db.serverStatus_.storageEngine") + satellite_sca_status = simple_file("insights_commands/sudo_-iu_postgres_.usr.bin.psql_-d_candlepin_-c_select_displayname_content_access_mode_from_cp_owner_--csv") sealert = simple_file('insights_commands/sealert_-l') sestatus = simple_file("insights_commands/sestatus_-b") smbstatus_p = simple_file("insights_commands/smbstatus_-p") From 43559e2f531a5bc84e5193c22bc4c90b74875c2e Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Thu, 2 Sep 2021 03:51:30 +0800 Subject: [PATCH 536/892] Add parser cups_ppd (#3201) * Add parser cups_ppd Signed-off-by: jiazhang * Use dict base Signed-off-by: jiazhang * Add SkipException test Signed-off-by: jiazhang * Update docstring Signed-off-by: jiazhang --- docs/shared_parsers_catalog/cups_ppd.rst | 3 ++ insights/parsers/cups_ppd.py | 60 ++++++++++++++++++++++++ insights/parsers/tests/test_cups_ppd.py | 52 ++++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 117 insertions(+) create mode 100644 docs/shared_parsers_catalog/cups_ppd.rst create mode 100644 insights/parsers/cups_ppd.py create mode 100644 insights/parsers/tests/test_cups_ppd.py diff --git a/docs/shared_parsers_catalog/cups_ppd.rst b/docs/shared_parsers_catalog/cups_ppd.rst new file mode 100644 index 000000000..ecb18cc53 --- /dev/null +++ b/docs/shared_parsers_catalog/cups_ppd.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.cups_ppd + :members: + :show-inheritance: diff --git a/insights/parsers/cups_ppd.py b/insights/parsers/cups_ppd.py new file mode 100644 index 000000000..ace98adb3 --- /dev/null +++ b/insights/parsers/cups_ppd.py @@ -0,0 +1,60 @@ +""" +CupsPpd - files ``/etc/cups/ppd/*`` +=================================== + +Parser to parse the content of files ``/etc/cups/ppd/*`` +""" + +from insights import Parser +from insights import parser +from insights.specs import Specs +from insights.parsers import SkipException + + +@parser(Specs.cups_ppd) +class CupsPpd(Parser, dict): + """ + Class to parse ``/etc/cups/ppd/*`` files. + + Sample output for files:: + + *PPD-Adobe: "4.3" + *FormatVersion: "4.3" + *FileVersion: "2.2" + *LanguageVersion: English + *LanguageEncoding: ISOLatin1 + *PCFileName: "ippeve.ppd" + *Manufacturer: "Canon" + *ModelName: "iR-ADV C3525/3530 PPD" + *Product: "(iR-ADV C3525/3530 PPD)" + *NickName: "iR-ADV C3525/3530 PPD" + *ShortNickName: "iR-ADV C3525/3530 PPD" + *cupsFilter2: "application/vnd.cups-pdf application/pdf 10 -" + *cupsFilter2: "application/vnd.cups-postscript application/postscript 10 -" + + Examples: + >>> type(cups_ppd) + + >>> cups_ppd["PCFileName"] + '"ippeve.ppd"' + >>> cups_ppd["cupsFilter2"] + ['"application/vnd.cups-pdf application/pdf 10 -"', '"application/vnd.cups-postscript application/postscript 10 -"'] + """ + def parse_content(self, content): + if not content: + raise SkipException("No Valid Configuration") + data = {} + for line in content: + if "*" in line and ":" in line: + key = line.split(":")[0].split("*")[-1].strip() + value = line.split(":")[-1].strip() + if key in data: + if isinstance(data[key], list): + data[key].append(value) + else: + data[key] = [data[key], value] + else: + data[key] = value + if not data: + raise SkipException("No Valid Configuration") + self.update(data) diff --git a/insights/parsers/tests/test_cups_ppd.py b/insights/parsers/tests/test_cups_ppd.py new file mode 100644 index 000000000..b0fa27022 --- /dev/null +++ b/insights/parsers/tests/test_cups_ppd.py @@ -0,0 +1,52 @@ +from insights.tests import context_wrap +from insights.parsers import cups_ppd +from insights.parsers.cups_ppd import CupsPpd, SkipException +import doctest +import pytest + + +CUPS_PPD = """ +*PPD-Adobe: "4.3" +*FormatVersion: "4.3" +*FileVersion: "2.2" +*LanguageVersion: English +*LanguageEncoding: ISOLatin1 +*PCFileName: "ippeve.ppd" +*Manufacturer: "Canon" +*ModelName: "iR-ADV C3525/3530 PPD" +*Product: "(iR-ADV C3525/3530 PPD)" +*NickName: "iR-ADV C3525/3530 PPD" +*ShortNickName: "iR-ADV C3525/3530 PPD" +*cupsFilter2: "application/vnd.cups-pdf application/pdf 10 -" +*cupsFilter2: "application/vnd.cups-postscript application/postscript 10 -" +""".strip() + +CUPS_PPD_INVALID1 = ''' +'''.strip() + +CUPS_PPD_INVALID2 = ''' +ShortNickName +'''.strip() + + +def test_cups_ppd(): + cups_ppd_result = CupsPpd(context_wrap(CUPS_PPD, path='/etc/cups/ppd/test_printer1.ppd')) + assert cups_ppd_result["PCFileName"] == '"ippeve.ppd"' + assert cups_ppd_result["cupsFilter2"] == ['"application/vnd.cups-pdf application/pdf 10 -"', '"application/vnd.cups-postscript application/postscript 10 -"'] + + with pytest.raises(SkipException) as exc: + CupsPpd(context_wrap(CUPS_PPD_INVALID1, path='/etc/cups/ppd/test_printer1.ppd')) + assert 'No Valid Configuration' in str(exc) + + with pytest.raises(SkipException) as exc: + CupsPpd(context_wrap(CUPS_PPD_INVALID2, path='/etc/cups/ppd/test_printer1.ppd')) + assert 'No Valid Configuration' in str(exc) + + +def test_cups_ppd_documentation(): + env = { + 'cups_ppd': CupsPpd(context_wrap(CUPS_PPD, + path='/etc/cups/ppd/test_printer1.ppd')) + } + failed, total = doctest.testmod(cups_ppd, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index ba7aa3327..50847a336 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -97,6 +97,7 @@ class Specs(SpecSet): crypto_policies_opensshserver = RegistryPoint() crypto_policies_bind = RegistryPoint() crt = RegistryPoint() + cups_ppd = RegistryPoint(multi_output=True) current_clocksource = RegistryPoint() date_iso = RegistryPoint() date = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 899466b61..97a63ebf2 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -203,6 +203,7 @@ def corosync_cmapctl_cmd_list(broker): crypto_policies_state_current = simple_file("/etc/crypto-policies/state/current") crypto_policies_opensshserver = simple_file("/etc/crypto-policies/back-ends/opensshserver.config") crypto_policies_bind = simple_file("/etc/crypto-policies/back-ends/bind.config") + cups_ppd = glob_file("etc/cups/ppd/*") current_clocksource = simple_file("/sys/devices/system/clocksource/clocksource0/current_clocksource") date = simple_command("/bin/date") date_utc = simple_command("/bin/date --utc") From e4885c2e903af22dcdfc57949f005d580bd02e98 Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Thu, 2 Sep 2021 03:51:41 +0800 Subject: [PATCH 537/892] Add new parser lpstat_v (#3202) * Add new parser lpstat_v Signed-off-by: jiazhang * Add new parser lpstat_v Signed-off-by: jiazhang * Use dict base and update datasource return docstring Signed-off-by: jiazhang * Update docstring Signed-off-by: jiazhang * Update docstring Signed-off-by: jiazhang --- insights/parsers/lpstat.py | 87 ++++++++++++++++------- insights/parsers/tests/test_lpstat.py | 50 ++++++++++--- insights/specs/__init__.py | 1 + insights/specs/datasources/lpstat.py | 45 ++++++++++++ insights/specs/default.py | 3 +- insights/tests/datasources/test_lpstat.py | 43 +++++++++++ 6 files changed, 191 insertions(+), 38 deletions(-) create mode 100644 insights/specs/datasources/lpstat.py create mode 100644 insights/tests/datasources/test_lpstat.py diff --git a/insights/parsers/lpstat.py b/insights/parsers/lpstat.py index 9eb5948a0..65229133c 100644 --- a/insights/parsers/lpstat.py +++ b/insights/parsers/lpstat.py @@ -1,35 +1,17 @@ """ -LpstatPrinters - command ``lpstat -p`` -====================================== - -Parses the output of ``lpstat -p``, to get locally configured printers. - -Current available printer states are: - -* IDLE (``PRINTER_STATUS_IDLE``) -* PROCESSING (``PRINTER_STATUS_PROCESSING``) -- printing -* DISABLED (``PRINTER_STATUS_DISABLED``) -* UNKNOWN (``PRINTER_STATUS_UNKNOWN``) - - -Examples: - >>> from insights.parsers.lpstat import LpstatPrinters, PRINTER_STATUS_DISABLED - >>> from insights.tests import context_wrap - >>> LPSTAT_P_OUTPUT = ''' - ... printer idle_printer is idle. enabled since Fri 20 Jan 2017 09:55:50 PM CET - ... printer disabled_printer disabled since Wed 15 Feb 2017 12:01:11 PM EST - - ... reason unknown - ... ''' - >>> lpstat = LpstatPrinters(context_wrap(LPSTAT_P_OUTPUT)) - >>> lpstat.printers - [{'status': 'IDLE', 'name': 'idle_printer'}, {'status': 'DISABLED', 'name': 'disabled_printer'}] - >>> lpstat.printer_names_by_status(PRINTER_STATUS_DISABLED) - ['disabled_printer'] +Lpstat - command ``lpstat`` +=========================== + +Parsers contains in this module are: + +LpstatPrinters - command ``/usr/bin/lpstat -p`` +LpstatProtocol - command ``/usr/bin/lpstat -v`` """ from .. import parser, CommandParser from insights.specs import Specs +from insights.parsers import SkipException # Printer states PRINTER_STATUS_IDLE = 'IDLE' @@ -42,10 +24,31 @@ @parser(Specs.lpstat_p) class LpstatPrinters(CommandParser): - """Class to parse ``lpstat -p`` command output. + """ + Class to parse ``lpstat -p`` command output. Parses the output of ``lpstat -p``, to get locally configured printers. + Current available printer states are: + + * IDLE (``PRINTER_STATUS_IDLE``) + * PROCESSING (``PRINTER_STATUS_PROCESSING``) -- printing + * DISABLED (``PRINTER_STATUS_DISABLED``) + * UNKNOWN (``PRINTER_STATUS_UNKNOWN``) + + Sample output of the command: + + printer idle_printer is idle. enabled since Fri 20 Jan 2017 09:55:50 PM CET + printer disabled_printer disabled since Wed 15 Feb 2017 12:01:11 PM EST - + reason unknown Raises: ValueError: Raised if any error occurs parsing the content. + + Examples: + >>> type(lpstat_printers) + + >>> len(lpstat_printers.printers) + 3 + >>> lpstat_printers.printer_names_by_status('DISABLED') + ['disabled_printer'] """ def __init__(self, *args, **kwargs): @@ -87,3 +90,33 @@ def printer_names_by_status(self, status): """ names = [prntr['name'] for prntr in self.printers if prntr['status'] == status] return names + + +@parser(Specs.lpstat_protocol_printers) +class LpstatProtocol(CommandParser, dict): + """ + Class to parse ``lpstat -v`` command output. + + Sample output of the command:: + + device for test_printer1: ipp + device for test_printer2: ipp + + Examples: + >>> type(lpstat_protocol) + + >>> lpstat_protocol['test_printer1'] + 'ipp' + """ + def parse_content(self, content): + if not content: + raise SkipException("No Valid Output") + data = {} + for line in content: + if line.startswith("device for "): + protocol = line.split(":")[-1].strip() + printer = line.split(":")[0].split()[-1].strip() + data[printer] = protocol + if not data: + raise SkipException("No Valid Output") + self.update(data) diff --git a/insights/parsers/tests/test_lpstat.py b/insights/parsers/tests/test_lpstat.py index 96420f773..b93add6b2 100644 --- a/insights/parsers/tests/test_lpstat.py +++ b/insights/parsers/tests/test_lpstat.py @@ -1,15 +1,8 @@ -""" -Tests for lpstat parser -======================= - -Note, that date time is localized (according to LC_TIME). - - -""" - +import doctest import pytest -from insights.parsers.lpstat import LpstatPrinters +from insights.parsers import lpstat +from insights.parsers.lpstat import LpstatPrinters, LpstatProtocol, SkipException from insights.tests import context_wrap @@ -25,6 +18,19 @@ """ +LPSTAT_V_OUTPUT = """ +device for test_printer1: ipp +device for test_printer2: ipp +""".strip() + +LPSTAT_V_OUTPUT_INVALID_1 = """ +""".strip() + +LPSTAT_V_OUTPUT_INVALID_2 = """ +lpstat: Transport endpoint is not connected +""".strip() + + def test_lpstat_parse(): lpstat = LpstatPrinters(context_wrap(LPSTAT_P_OUTPUT)) @@ -71,3 +77,27 @@ def test_lpstat_printer_names_by_status(status, expected_name): lpstat = LpstatPrinters(context_wrap(LPSTAT_P_OUTPUT)) names = lpstat.printer_names_by_status(status) assert names == [expected_name] + + +def test_lpstat_protocol(): + lpstat_protocol = LpstatProtocol(context_wrap(LPSTAT_V_OUTPUT)) + assert lpstat_protocol["test_printer1"] == "ipp" + + +def test_lpstat_protocol_invalid_state(): + with pytest.raises(SkipException) as exc: + LpstatProtocol(context_wrap(LPSTAT_V_OUTPUT_INVALID_1)) + assert 'No Valid Output' in str(exc) + + with pytest.raises(SkipException) as exc: + LpstatProtocol(context_wrap(LPSTAT_V_OUTPUT_INVALID_2)) + assert 'No Valid Output' in str(exc) + + +def test_lpstat_doc_examples(): + env = { + 'lpstat_printers': LpstatPrinters(context_wrap(LPSTAT_P_OUTPUT)), + 'lpstat_protocol': LpstatProtocol(context_wrap(LPSTAT_V_OUTPUT)) + } + failed, total = doctest.testmod(lpstat, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 50847a336..17e30ffde 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -296,6 +296,7 @@ class Specs(SpecSet): localtime = RegistryPoint() logrotate_conf = RegistryPoint(multi_output=True) lpstat_p = RegistryPoint() + lpstat_protocol_printers = RegistryPoint() ls_boot = RegistryPoint() ls_dev = RegistryPoint() ls_disk = RegistryPoint() diff --git a/insights/specs/datasources/lpstat.py b/insights/specs/datasources/lpstat.py new file mode 100644 index 000000000..77b004277 --- /dev/null +++ b/insights/specs/datasources/lpstat.py @@ -0,0 +1,45 @@ +""" +Custom datasources for lpstat information +""" +from insights.core.context import HostContext +from insights.core.dr import SkipComponent +from insights.core.plugins import datasource +from insights.core.spec_factory import DatasourceProvider, simple_command +from insights.specs import Specs + + +class LocalSpecs(Specs): + """ Local specs used only by lpstat datasources """ + + lpstat_v = simple_command("/usr/bin/lpstat -v") + """ Returns the output of command ``/usr/bin/lpstat -v`` """ + + +@datasource(LocalSpecs.lpstat_v, HostContext) +def lpstat_protocol_printers_info(broker): + """ + This datasource provides the not-sensitive information collected + from ``/usr/bin/lpstat -v``. + + Typical content of ``/usr/bin/lpstat -v`` file is:: + + "device for test_printer1: ipp://cups.test.com/printers/test_printer1" + + Returns: + DatasourceProvider: Returns the collected content containing non-sensitive information + + Raises: + SkipComponent: When the filter/path does not exist or any exception occurs. + """ + try: + content = broker[LocalSpecs.lpstat_v].content + result = [] + for line in content: + if "device for " in line: + "Remove printer address information" + result.append(line.rsplit(":", 1)[0]) + if result: + return DatasourceProvider(content="\n".join(result), relative_path='insights_commands/lpstat_-v') + except Exception as e: + raise SkipComponent("Unexpected exception:{e}".format(e=str(e))) + raise SkipComponent diff --git a/insights/specs/default.py b/insights/specs/default.py index 97a63ebf2..c710934d8 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -36,7 +36,7 @@ from insights.parsers.mount import Mount from insights.specs import Specs from insights.specs.datasources import ( - awx_manage, cloud_init, candlepin_broker, ethernet, get_running_commands, ipcs, package_provides, + awx_manage, cloud_init, candlepin_broker, ethernet, get_running_commands, ipcs, lpstat, package_provides, ps as ps_datasource, sap, satellite_missed_queues) from insights.specs.datasources.sap import sap_hana_sid, sap_hana_sid_SID_nr @@ -400,6 +400,7 @@ def httpd_cmd(broker): localtime = simple_command("/usr/bin/file -L /etc/localtime") logrotate_conf = glob_file(["/etc/logrotate.conf", "/etc/logrotate.d/*"]) lpstat_p = simple_command("/usr/bin/lpstat -p") + lpstat_protocol_printers = lpstat.lpstat_protocol_printers_info ls_boot = simple_command("/bin/ls -lanR /boot") ls_dev = simple_command("/bin/ls -lanR /dev") ls_disk = simple_command("/bin/ls -lanR /dev/disk") diff --git a/insights/tests/datasources/test_lpstat.py b/insights/tests/datasources/test_lpstat.py new file mode 100644 index 000000000..4f1ab5a3e --- /dev/null +++ b/insights/tests/datasources/test_lpstat.py @@ -0,0 +1,43 @@ +import pytest +from mock.mock import Mock +from insights.core.dr import SkipComponent +from insights.core.spec_factory import DatasourceProvider +from insights.specs.datasources.lpstat import LocalSpecs, lpstat_protocol_printers_info + + +LPSTAT_V = """ +device for test_printer1: ipp://cups.test.com/printers/test_printer1 +device for test_printer2: ipp://cups.test.com/printers/test_printer2 +""".strip() + +LPSTAT_V_NOT_GOOD = """ +lpstat: Transport endpoint is not connected +""".strip() + +LPSTAT_V_RESULT = """ +device for test_printer1: ipp +device for test_printer2: ipp +""".strip() + +RELATIVE_PATH = 'insights_commands/lpstat_-v' + + +def test_lpstat_datasource(): + lpstat_data = Mock() + lpstat_data.content = LPSTAT_V.splitlines() + broker = {LocalSpecs.lpstat_v: lpstat_data} + result = lpstat_protocol_printers_info(broker) + assert result is not None + assert isinstance(result, DatasourceProvider) + expected = DatasourceProvider(content=LPSTAT_V_RESULT, relative_path=RELATIVE_PATH) + assert result.content == expected.content + assert result.relative_path == expected.relative_path + + +def test_lpstat_datasource_NG_output(): + lpstat_data = Mock() + lpstat_data.content = LPSTAT_V_NOT_GOOD.splitlines() + broker = {LocalSpecs.lpstat_v: lpstat_data} + with pytest.raises(SkipComponent) as e: + lpstat_protocol_printers_info(broker) + assert 'SkipComponent' in str(e) From a2dcbe37ceb4d673c09c0d4ebfa880443fc1f490 Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Wed, 1 Sep 2021 15:03:27 -0500 Subject: [PATCH 538/892] Fix iter/next code in lsof (#3203) * Refactor code using iter/next to avoid functions deprecated by PEP 479 * Add tests Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/parsers/lsof.py | 18 ++++++++++++------ insights/parsers/tests/test_lsof.py | 28 +++++++++++++++++++++++++++- 2 files changed, 39 insertions(+), 7 deletions(-) diff --git a/insights/parsers/lsof.py b/insights/parsers/lsof.py index 6357d6526..11681cb78 100644 --- a/insights/parsers/lsof.py +++ b/insights/parsers/lsof.py @@ -1,6 +1,6 @@ """ Lsof - command ``/usr/sbin/lsof`` -======================================= +================================= This parser reads the output of the ``/usr/sbin/lsof`` command and makes each line available as a dictionary keyed on the fields in the lsof output (with @@ -60,6 +60,7 @@ """ +from insights.core.dr import SkipComponent from .. import add_filter, Scannable, parser, CommandParser from insights.specs import Specs @@ -88,11 +89,16 @@ def _start(self, content): Consumes lines from content until the HEADER is found and processed. Returns an iterator over the remaining lines. """ - content = iter(content) + start = len(content) + for n, line in enumerate(content): + if 'COMMAND ' in line: + start = n + 1 + break - line = next(content) - while 'COMMAND ' not in line: - line = next(content) + if start >= len(content): + raise SkipComponent + + content = iter(content[start:]) self._calc_indexes(line) return content @@ -112,7 +118,7 @@ def _parse_line(self, line): for heading in self.headings[:-1]: # Use value if (start, end) index of heading is not empty if line[slice(*self.indexes[heading])].strip(): - rdict[heading] = next(rowsplit) + rdict[heading] = next(rowsplit, '') else: rdict = dict(zip(self.headings, rowsplit)) rdict['NAME'] = command diff --git a/insights/parsers/tests/test_lsof.py b/insights/parsers/tests/test_lsof.py index 63e04a5bf..a4b44c24d 100644 --- a/insights/parsers/tests/test_lsof.py +++ b/insights/parsers/tests/test_lsof.py @@ -1,3 +1,5 @@ +import pytest +from insights.core.dr import SkipComponent from insights.parsers import lsof from insights.tests import context_wrap @@ -12,6 +14,7 @@ dmeventd 674 688 root mem REG 253,1 20032 134413763 /usr/lib64/libuuid.so.1.3.0 dmeventd 674 688 root mem REG 253,1 248584 135108725 /usr/lib64/libblkid.so.1.1.0 bioset 611 root txt unknown /proc/611/exe +systemd 1 root cwd DIR 253,1 4096 128 / """.strip() LSOF_GOOD_V1 = """ @@ -77,7 +80,7 @@ def test_lsof(): assert d[5]["TYPE"] == "REG" assert d[7]["NAME"] == "/proc/611/exe" - assert d[-1] == { + assert d[-2] == { "COMMAND": "bioset", "PID": "611", "TID": "", @@ -89,6 +92,18 @@ def test_lsof(): "NODE": "", "NAME": "/proc/611/exe" } + assert d[-1] == { + "COMMAND": "systemd", + "PID": "1", + "TID": "", + "USER": "root", + "FD": "cwd", + "TYPE": "DIR", + "DEVICE": "253,1", + "SIZE/OFF": "4096", + "NODE": "128", + "NAME": "/" + } def test_lsof_good(): @@ -197,3 +212,14 @@ def test_lsof_scan(): assert l.root_stdin[1]['SIZE/OFF'] == '0t0' assert l.root_stdin[1]['NODE'] == '4674' assert l.root_stdin[1]['NAME'] == '/dev/null' + + +LSOF_BAD = """ +lsof: WARNING: can't stat() xfs file system /var/lib/origin/openshift.local.volumes/pods/abca2a21-da4c-22eb-b49c-011d3a938eb5/volume-subpaths/config/wh/0 +""" + + +def test_lsof_bad(): + with pytest.raises(SkipComponent) as e: + lsof.Lsof(context_wrap(LSOF_BAD)) + assert e is not None From 25cb106ec3480e8e7d6969a9c31259ff949f71d2 Mon Sep 17 00:00:00 2001 From: Ryan Blakley <3789184+ryan-blakley@users.noreply.github.com> Date: Wed, 1 Sep 2021 16:08:40 -0400 Subject: [PATCH 539/892] Fix lsblk parsing raid1 types (#3205) * Fix the regex to catch the various raid types raid0, raid1, etc. Signed-off-by: Ryan Blakley --- insights/parsers/lsblk.py | 2 +- insights/parsers/tests/test_lsblk.py | 59 ++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 1 deletion(-) diff --git a/insights/parsers/lsblk.py b/insights/parsers/lsblk.py index 8cf9714f7..4c1eecaf6 100644 --- a/insights/parsers/lsblk.py +++ b/insights/parsers/lsblk.py @@ -224,7 +224,7 @@ class LSBlock(BlockDevices): See the discussion of the key ``PARENT_NAMES`` above. """ def parse_content(self, content): - r = re.compile(r"([\s\|\`\-]*)(\S+.*) (\d+:\d+)\s+(\d)\s+(\d+(\.\d)?[A-Z])\s+(\d)\s+([a-z]+)(.*)") + r = re.compile(r"([\s\|\`\-]*)(\S+.*) (\d+:\d+)\s+(\d)\s+(\d+(\.\d)?[A-Z])\s+(\d)\s+([a-z0-9]+)(.*)") device_list = [] parents = [None] * MAX_GENERATIONS for line in content[1:]: diff --git a/insights/parsers/tests/test_lsblk.py b/insights/parsers/tests/test_lsblk.py index ac2d006c8..4f4f85098 100644 --- a/insights/parsers/tests/test_lsblk.py +++ b/insights/parsers/tests/test_lsblk.py @@ -52,6 +52,24 @@ `-appdg-app (dm-7) 253:7 0 2.8T 0 lvm /splunk """ +LSBLK_DATA3 = """ +NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT +vda 252:0 0 10G 0 disk +|-vda1 252:1 0 501M 0 part +| `-md127 9:127 0 500M 0 raid1 /boot +`-vda2 252:2 0 9.5G 0 part + `-md126 9:126 0 9.5G 0 raid1 + |-rhel-root 253:0 0 9.4G 0 lvm / + `-rhel-swap 253:1 0 100M 0 lvm [SWAP] +vdb 252:16 0 10G 0 disk +|-vdb1 252:17 0 501M 0 part +| `-md127 9:127 0 500M 0 raid1 /boot +`-vdb2 252:18 0 9.5G 0 part + `-md126 9:126 0 9.5G 0 raid1 + |-rhel-root 253:0 0 9.4G 0 lvm / + `-rhel-swap 253:1 0 100M 0 lvm [SWAP] +""" + # lsblk -P -o LSBLK_EXT_DATA = """ ALIGNMENT="0" DISC-ALN="0" DISC-GRAN="0B" DISC-MAX="0B" DISC-ZERO="0" FSTYPE="" GROUP="cdrom" KNAME="sr0" LABEL="" LOG-SEC="512" MAJ:MIN="11:0" MIN-IO="512" MODE="brw-rw----" MODEL="DVD+-RW DVD8801 " MOUNTPOINT="" NAME="sr0" OPT-IO="0" OWNER="root" PHY-SEC="512" RA="128" RM="1" RO="0" ROTA="1" RQ-SIZE="128" SCHED="cfq" SIZE="1024M" STATE="running" TYPE="rom" UUID="" @@ -155,6 +173,47 @@ def test_lsblk(): assert sdf_appdg.mountpoint == "/splunk" assert sdf_appdg.parent_names == ["sdf", "mpatha (dm-1)", "mpathap1 (dm-5)"] + # LSBLK_DATA3 - aka MD raid lsblk output + results = lsblk.LSBlock(context_wrap(LSBLK_DATA3)) + assert results is not None + assert len(results) == 14 + + rhel_root = None + md127 = None + for result in results: + if result.name == 'rhel-root': + rhel_root = result + elif result.name == 'md127': + md127 = result + + assert rhel_root is not None + assert rhel_root.maj_min == "253:0" + assert rhel_root.removable is False + assert rhel_root.size == "9.4G" + assert rhel_root.read_only is False + assert rhel_root.type == "lvm" + assert rhel_root.mountpoint == "/" + assert rhel_root.parent_names == ["vdb", "vdb2", "md126"] + + assert md127 is not None + assert md127.maj_min == "9:127" + assert md127.removable is False + assert md127.size == "500M" + assert md127.read_only is False + assert md127.type == "raid1" + assert md127.mountpoint == "/boot" + assert md127.parent_names == ["vdb", "vdb1"] + + assert hasattr(results, 'device_data') + assert isinstance(results.device_data, dict) + assert sorted(results.device_data.keys()) == sorted([ + 'vda', 'vda1', 'vda2', 'rhel-root', 'rhel-swap', 'vdb', 'vdb1', 'vdb2', 'md126', 'md127' + ]) + assert results.device_data['md127'] == md127 + assert md127.get('MAJ_MIN') == md127.maj_min + assert str(rhel_root) == 'lvm:rhel-root(/)' + assert str(md127) == 'raid1:md127(/boot)' + LSBLK_DATA_BAD = """ NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT From 9a6cd17ec172109c4db63cd27db2f6afabbff0f3 Mon Sep 17 00:00:00 2001 From: Ryan Blakley <3789184+ryan-blakley@users.noreply.github.com> Date: Wed, 1 Sep 2021 16:15:10 -0400 Subject: [PATCH 540/892] Add MII polling interval attribute to the bond parser (#3206) * Added in the missing attribute that is often set in the bond opts. * Added in testing to confirm the attribute was set properly. Signed-off-by: Ryan Blakley --- insights/parsers/bond.py | 10 ++++++++++ insights/parsers/tests/test_bond.py | 2 ++ 2 files changed, 12 insertions(+) diff --git a/insights/parsers/bond.py b/insights/parsers/bond.py index b03fa991c..a75e41b40 100644 --- a/insights/parsers/bond.py +++ b/insights/parsers/bond.py @@ -96,6 +96,7 @@ def parse_content(self, content): self.xmit_hash_policy = None self._arp_polling_interval = None self._arp_ip_target = None + self._mii_polling_interval = None self._slave_interface = [] self._aggregator_id = [] self._mii_status = [] @@ -162,6 +163,8 @@ def parse_content(self, content): self._arp_polling_interval = line.strip().split(':', 1)[1].strip() elif line.strip().startswith("ARP IP target/s (n.n.n.n form):"): self._arp_ip_target = line.strip().split(':', 1)[1].strip() + elif line.strip().startswith("MII Polling Interval (ms):"): + self._mii_polling_interval = line.strip().split(':', 1)[1].strip() elif line.strip().startswith("Primary Slave"): self._primary_slave = line.split(":", 1)[1].strip() elif line.strip().startswith("Up Delay (ms):"): @@ -255,6 +258,13 @@ def arp_ip_target(self): """ return self._arp_ip_target + @property + def mii_polling_interval(self): + """Returns the mii polling interval as a string. ``None`` is returned + if no "MII Polling Interval (ms)" key is found. + """ + return self._mii_polling_interval + @property def primary_slave(self): """Returns the "Primary Slave" in the bond file if key/value exists. diff --git a/insights/parsers/tests/test_bond.py b/insights/parsers/tests/test_bond.py index afba0e6b6..b539b9ab3 100644 --- a/insights/parsers/tests/test_bond.py +++ b/insights/parsers/tests/test_bond.py @@ -301,11 +301,13 @@ def test_bond_class(): assert bond_obj_3.mii_status == ['up', 'up', 'up'] assert bond_obj_3.arp_polling_interval is None assert bond_obj_3.arp_ip_target is None + assert bond_obj_3.mii_polling_interval == "100" bond_obj_4 = Bond(context_wrap(BONDINFO_MODE_7, CONTEXT_PATH)) assert bond_obj_4.file_name == 'bond0' assert bond_obj_4.arp_polling_interval == "1000" assert bond_obj_4.arp_ip_target == "10.152.1.1" + assert bond_obj_4.mii_polling_interval == "0" assert bond_obj_4.primary_slave == 'em3 (primary_reselect failure)' bond_obj = Bond(context_wrap(BOND_MODE_4, CONTEXT_PATH)) From 6d466c482567b9fee5ddbb7e9912f7d9e517ee41 Mon Sep 17 00:00:00 2001 From: Ryan Blakley <3789184+ryan-blakley@users.noreply.github.com> Date: Wed, 8 Sep 2021 18:01:16 -0400 Subject: [PATCH 541/892] Fix excludes not working in _load_components (#3209) * Moved the do_include and do_exclude to the top and added a check for excludes before importing path. Signed-off-by: Ryan Blakley --- insights/core/dr.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/insights/core/dr.py b/insights/core/dr.py index d1f4d2006..62b12daed 100644 --- a/insights/core/dr.py +++ b/insights/core/dr.py @@ -394,11 +394,16 @@ def _import(path, continue_on_error): def _load_components(path, include=".*", exclude="test", continue_on_error=True): + do_include = re.compile(include).search if include else lambda x: True + do_exclude = re.compile(exclude).search if exclude else lambda x: False + num_loaded = 0 if path.endswith(".py"): path, _ = os.path.splitext(path) path = path.rstrip("/").replace("/", ".") + if do_exclude(path): + return 0 package = _import(path, continue_on_error) if not package: @@ -406,9 +411,6 @@ def _load_components(path, include=".*", exclude="test", continue_on_error=True) num_loaded += 1 - do_include = re.compile(include).search if include else lambda x: True - do_exclude = re.compile(exclude).search if exclude else lambda x: False - if not hasattr(package, "__path__"): return num_loaded From 9079a723bfeda6446684dfb347bb9e150c55ffd3 Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Tue, 14 Sep 2021 03:12:16 +0800 Subject: [PATCH 542/892] Add default spec mssql_api_assessment (#3208) Signed-off-by: jiazhang --- insights/specs/default.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/specs/default.py b/insights/specs/default.py index c710934d8..b8d7c04f2 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -476,6 +476,7 @@ def md_device_list(broker): ]) mount = simple_command("/bin/mount") mounts = simple_file("/proc/mounts") + mssql_api_assessment = simple_file("/var/opt/mssql/log/assessments/assessment-latest") mssql_conf = simple_file("/var/opt/mssql/mssql.conf") multicast_querier = simple_command("/usr/bin/find /sys/devices/virtual/net/ -name multicast_querier -print -exec cat {} \;") multipath_conf = simple_file("/etc/multipath.conf") From 2bbef70118f3e9d08ca0513cc518567e757050a0 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Tue, 14 Sep 2021 03:19:34 +0800 Subject: [PATCH 543/892] Remove the unused datasource specs from default.py (#3207) * Remove the unused datasource specs from default.py Signed-off-by: Xiangce Liu * Fix flake8 errors Signed-off-by: Xiangce Liu * Modify the pre-loaded component in collect.py Signed-off-by: Xiangce Liu --- insights/collect.py | 6 +---- insights/specs/default.py | 56 +-------------------------------------- 2 files changed, 2 insertions(+), 60 deletions(-) diff --git a/insights/collect.py b/insights/collect.py index 7451ce237..71b84adce 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -179,17 +179,13 @@ - name: insights.combiners.satellite_version.CapsuleVersion enabled: true - # needed for the 'pre-check' of the 'gfs2_mount_points' spec - - name: insights.parsers.mount.Mount - enabled: true + # needed for the 'pre-check' of the 'corosync_cmapctl_cmd_list' spec - name: insights.combiners.redhat_release.RedHatRelease enabled: true - name: insights.parsers.uname.Uname enabled: true - name: insights.parsers.redhat_release.RedhatRelease enabled: true - - name: insights.components.rhel_version.IsRhel6 - enabled: true - name: insights.components.rhel_version.IsRhel7 enabled: true - name: insights.components.rhel_version.IsRhel8 diff --git a/insights/specs/default.py b/insights/specs/default.py index b8d7c04f2..84e2fadef 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -11,7 +11,6 @@ import datetime import logging import os -import re import signal from grp import getgrgid @@ -28,12 +27,10 @@ from insights.core.spec_factory import first_file, listdir from insights.combiners.services import Services from insights.combiners.ps import Ps -from insights.components.rhel_version import IsRhel8, IsRhel7, IsRhel6 +from insights.components.rhel_version import IsRhel8, IsRhel7 from insights.components.cloud_provider import IsAWS, IsAzure, IsGCP from insights.components.ceph import IsCephMonitor -from insights.parsers.mdstat import Mdstat from insights.combiners.satellite_version import SatelliteVersion, CapsuleVersion -from insights.parsers.mount import Mount from insights.specs import Specs from insights.specs.datasources import ( awx_manage, cloud_init, candlepin_broker, ethernet, get_running_commands, ipcs, lpstat, package_provides, @@ -114,27 +111,6 @@ class DefaultSpecs(Specs): ps_ef = simple_command("/bin/ps -ef") ps_eo = simple_command("/usr/bin/ps -eo pid,ppid,comm") ps_eo_cmd = ps_datasource.ps_eo_cmd - - @datasource(ps_auxww, HostContext) - def tomcat_base(broker): - """ - Function to search the output of ``ps auxww`` to find all running tomcat - processes and extract the base path where the process was started. - - Returns: - list: List of the paths to each running process - """ - ps = broker[DefaultSpecs.ps_auxww].content - results = [] - findall = re.compile(r"\-Dcatalina\.base=(\S+)").findall - for p in ps: - found = findall(p) - if found: - # Only get the path which is absolute - results.extend(f for f in found if f[0] == '/') - return list(set(results)) - - catalina_out = foreach_collect(tomcat_base, "%s/catalina.out") cciss = glob_file("/proc/driver/cciss/cciss*") cdc_wdm = simple_file("/sys/bus/usb/drivers/cdc_wdm/module/refcnt") ceilometer_collector_log = first_file(["/var/log/containers/ceilometer/collector.log", "/var/log/ceilometer/collector.log"]) @@ -268,29 +244,6 @@ def du_dirs_list(broker): getconf_page_size = simple_command("/usr/bin/getconf PAGE_SIZE") getenforce = simple_command("/usr/sbin/getenforce") getsebool = simple_command("/usr/sbin/getsebool -a") - - @datasource(Mount, [IsRhel6, IsRhel7, IsRhel8], HostContext) - def gfs2_mount_points(broker): - """ - Function to search the output of ``mount`` to find all the gfs2 file - systems. - And only run the ``stat`` command on RHEL version that's less than - 8.3. With 8.3 and later, the command ``blkid`` will also output the - block size info. - - Returns: - list: a list of mount points of which the file system type is gfs2 - """ - gfs2_mount_points = [] - if (broker.get(IsRhel6) or broker.get(IsRhel7) or - (broker.get(IsRhel8) and broker[IsRhel8].minor < 3)): - for mnt in broker[Mount]: - if mnt.mount_type == "gfs2": - gfs2_mount_points.append(mnt.mount_point) - if gfs2_mount_points: - return gfs2_mount_points - raise SkipComponent - gfs2_file_system_block_size = foreach_execute(gfs2_mount_points, "/usr/bin/stat -fc %%s %s") gluster_v_info = simple_command("/usr/sbin/gluster volume info") gnocchi_conf = first_file(["/var/lib/config-data/puppet-generated/gnocchi/etc/gnocchi/gnocchi.conf", "/etc/gnocchi/gnocchi.conf"]) gnocchi_metricd_log = first_file(["/var/log/containers/gnocchi/gnocchi-metricd.log", "/var/log/gnocchi/metricd.log"]) @@ -454,13 +407,6 @@ def md5chk_file_list(broker): md5chk_files = foreach_execute(md5chk_file_list, "/usr/bin/md5sum %s", keep_rc=True) mdstat = simple_file("/proc/mdstat") - @datasource(Mdstat, HostContext) - def md_device_list(broker): - md = broker[Mdstat] - if md.components: - return [dev["device_name"] for dev in md.components if dev["active"]] - raise SkipComponent() - mdadm_E = foreach_execute(md_device_list, "/usr/sbin/mdadm -E %s") meminfo = first_file(["/proc/meminfo", "/meminfo"]) messages = simple_file("/var/log/messages") modinfo_i40e = simple_command("/sbin/modinfo i40e") From ad9b662501da0c18619737b9018764557ecd8058 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n=20Moreno?= <52785490+amorenoz@users.noreply.github.com> Date: Tue, 14 Sep 2021 17:25:41 +0200 Subject: [PATCH 544/892] shell: support running shell in kernel mode (#3144) Add a new option to shell "-k" to start an ipython kernel. The ipykernel module is required to be installed separately for this option to work. Signed-off-by: Adrian Moreno --- insights/shell.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/insights/shell.py b/insights/shell.py index 549b676d8..60223c9ac 100644 --- a/insights/shell.py +++ b/insights/shell.py @@ -789,7 +789,7 @@ def _ipython_key_completions_(self): return self.keys() -def start_session(paths, change_directory=False, __coverage=None): +def start_session(paths, change_directory=False, __coverage=None, kernel=False): __cwd = os.path.abspath(os.curdir) def callback(brokers): @@ -815,7 +815,12 @@ def callback(brokers): __ns = {} __ns.update(globals()) __ns.update({"models": models}) - IPython.start_ipython([], user_ns=__ns, config=__cfg) + + if kernel: + from ipykernel import kernelapp + kernelapp.launch_new_instance([], user_ns=__ns, config=__cfg) + else: + IPython.start_ipython([], user_ns=__ns, config=__cfg) with_brokers(paths, callback) if change_directory: @@ -859,6 +864,11 @@ def _parse_args(): p.add_argument( "-v", "--verbose", action="store_true", help="Global debug level logging." ) + p.add_argument( + "-k", "--kernel", action="store_true", default=False, + help="Start an IPython kernel instead of an interactive session." + " Requires ipykernel module" + ) path_desc = "Archives or paths to analyze. Leave off to target the current system." p.add_argument("paths", nargs="*", help=path_desc) @@ -884,7 +894,7 @@ def main(): load_packages(parse_plugins(args.plugins)) _handle_config(args.config) - start_session(args.paths, args.cd, __coverage=cov) + start_session(args.paths, args.cd, __coverage=cov, kernel=args.kernel) if cov: cov.stop() cov.erase() From 7a74116f1c48ecb7f00754a5d0d85679e4d8a39c Mon Sep 17 00:00:00 2001 From: Alec Cohan <44471274+aleccohan@users.noreply.github.com> Date: Tue, 14 Sep 2021 13:25:19 -0400 Subject: [PATCH 545/892] Update validation code to fix python2.7 issue (#3214) * Update validation code to fix python2 issue Signed-off-by: Alec Cohan * remove prints & linting fixes Signed-off-by: Alec Cohan * flake8 Signed-off-by: Alec Cohan * use six.PY3 Signed-off-by: Alec Cohan * remove non-needed check Signed-off-by: Alec Cohan * remove unicode in favor of six.text_type Signed-off-by: Alec Cohan * Update normalization to include the possibility of a CommentedSeq() being a sequence of strings Signed-off-by: Alec Cohan * remove testing print statement Signed-off-by: Alec Cohan --- .../ansible/playbook_verifier/__init__.py | 35 ++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/insights/client/apps/ansible/playbook_verifier/__init__.py b/insights/client/apps/ansible/playbook_verifier/__init__.py index cc7bdedf2..bdbacd29a 100644 --- a/insights/client/apps/ansible/playbook_verifier/__init__.py +++ b/insights/client/apps/ansible/playbook_verifier/__init__.py @@ -1,4 +1,5 @@ import os +import six import copy import base64 import tempfile @@ -8,6 +9,7 @@ from logging import getLogger from insights.client.apps.ansible.playbook_verifier.contrib import gnupg from insights.client.apps.ansible.playbook_verifier.contrib.ruamel_yaml.ruamel import yaml +from insights.client.apps.ansible.playbook_verifier.contrib.ruamel_yaml.ruamel.yaml.comments import CommentedMap, CommentedSeq from insights.client.constants import InsightsConstants as constants __all__ = ("loadPlaybookYaml", "verify", "PlaybookVerificationError") @@ -48,7 +50,11 @@ def createSnippetHash(snippet): output: snippetHash (bytes) """ snippetHash = hashlib.sha256() - serializedSnippet = str(snippet).encode("UTF-8") + if six.PY2: + normalizedSnippet = normalizeSnippet(snippet) + serializedSnippet = str(normalizedSnippet).encode("UTF-8") + else: + serializedSnippet = str(snippet).encode("UTF-8") snippetHash.update(serializedSnippet) return snippetHash.digest() @@ -150,3 +156,30 @@ def loadPlaybookYaml(playbook): output: playbook yaml """ return yaml.load(playbook) + + +def normalizeSnippet(snippet): + """ + Normalize python2 snippet and get rid of any default unicode values + output: normalized snippet + """ + new = CommentedMap() + for key, value in snippet.iteritems(): + if isinstance(value, CommentedMap): + new[key] = CommentedMap(normalizeSnippet(value)) + elif isinstance(value, CommentedSeq): + new_sequence = CommentedSeq() + for item in value: + if not isinstance(item, CommentedMap): + new_sequence.append(item) + elif isinstance(item, six.text_type): + new_sequence.append(item.encode('ascii', 'ignore')) + else: + new_sequence.append(normalizeSnippet(item)) + new[key] = new_sequence + elif isinstance(value, six.text_type): + new[key] = value.encode('ascii', 'ignore') + else: + new[key] = value + + return new From 630396bd060b72611bf1a89e33b0d8723de2828f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20Hornick=C3=BD?= Date: Wed, 15 Sep 2021 21:09:44 +0200 Subject: [PATCH 546/892] Add custom datasource for collecting yum/dnf updates (#2993) Dynamically imports os-specific library for handling DNF/YUM interaction. Performs a resolution of updateable packages, and outputs it as JSON. Expected performance is on the order of seconds for both RHEL7 and RHEL8. Signed-off-by: mhornick --- docs/shared_parsers_catalog/yum_updates.rst | 3 + insights/parsers/tests/test_yum_updates.py | 38 ++++++ insights/parsers/yum_updates.py | 54 ++++++++ insights/specs/__init__.py | 1 + insights/specs/datasources/yum_updates.py | 141 ++++++++++++++++++++ insights/specs/default.py | 4 +- 6 files changed, 240 insertions(+), 1 deletion(-) create mode 100644 docs/shared_parsers_catalog/yum_updates.rst create mode 100644 insights/parsers/tests/test_yum_updates.py create mode 100644 insights/parsers/yum_updates.py create mode 100644 insights/specs/datasources/yum_updates.py diff --git a/docs/shared_parsers_catalog/yum_updates.rst b/docs/shared_parsers_catalog/yum_updates.rst new file mode 100644 index 000000000..506382b20 --- /dev/null +++ b/docs/shared_parsers_catalog/yum_updates.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.yum_updates + :members: + :show-inheritance: diff --git a/insights/parsers/tests/test_yum_updates.py b/insights/parsers/tests/test_yum_updates.py new file mode 100644 index 000000000..0b1dc8e7c --- /dev/null +++ b/insights/parsers/tests/test_yum_updates.py @@ -0,0 +1,38 @@ +from insights.parsers import yum_updates +from insights.tests import context_wrap +import doctest + +YUM_UPDATES_INPUT = """{ + "releasever": "8", + "basearch": "x86_64", + "update_list": { + "NetworkManager-1:1.22.8-4.el8.x86_64": { + "available_updates": [ + { + "package": "NetworkManager-1:1.22.8-5.el8_2.x86_64", + "repository": "rhel-8-for-x86_64-baseos-rpms", + "basearch": "x86_64", + "releasever": "8", + "erratum": "RHSA-2020:3011" + } + ] + } + }, + "metadata_time": "2021-01-01T09:39:45Z" + } +""" + + +def test_yum_updates(): + info = yum_updates.YumUpdates(context_wrap(YUM_UPDATES_INPUT)) + assert info is not None + assert len(info.updates) == 1 + assert len(info.updates["NetworkManager-1:1.22.8-4.el8.x86_64"]["available_updates"]) == 1 + + +def test_yum_updates_docs(): + env = { + 'updates': yum_updates.YumUpdates(context_wrap(YUM_UPDATES_INPUT)), + } + failed, total = doctest.testmod(yum_updates, globs=env) + assert failed == 0 diff --git a/insights/parsers/yum_updates.py b/insights/parsers/yum_updates.py new file mode 100644 index 000000000..ea67d1993 --- /dev/null +++ b/insights/parsers/yum_updates.py @@ -0,0 +1,54 @@ +""" +YumUpdates - parser for the `yum_updates` datasource +==================================================== +Provides a list of available package updates, along with related advisories. This information +is collected using DNF/YUM python interface. +""" + +from insights import JSONParser, parser +from insights.specs import Specs + + +@parser(Specs.yum_updateinfo) +class YumUpdates(JSONParser): + """ + Expected output of the command is:: + + { + "releasever": "8", + "basearch": "x86_64", + "update_list": { + "NetworkManager-1:1.22.8-4.el8.x86_64": { + "available_updates": [ + { + "package": "NetworkManager-1:1.22.8-5.el8_2.x86_64", + "repository": "rhel-8-for-x86_64-baseos-rpms", + "basearch": "x86_64", + "releasever": "8", + "erratum": "RHSA-2020:3011" + } + ] + } + }, + "metadata_time": "2021-01-01T09:39:45Z" + } + + Examples: + >>> len(updates.updates) + 1 + >>> updates.updates['NetworkManager-1:1.22.8-4.el8.x86_64']['available_updates'][0] == { \ + 'basearch': 'x86_64', \ + 'erratum': 'RHSA-2020:3011', \ + 'package': 'NetworkManager-1:1.22.8-5.el8_2.x86_64', \ + 'releasever': '8', \ + 'repository': 'rhel-8-for-x86_64-baseos-rpms'} + True + """ + + @property + def updates(self): + """ + Returns: + dict: Dict(package name -> list of available updates) + """ + return self.data['update_list'] diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 17e30ffde..8021aac48 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -724,6 +724,7 @@ class Specs(SpecSet): yum_repolist = RegistryPoint() yum_repos_d = RegistryPoint(multi_output=True) yum_updateinfo = RegistryPoint() + yum_updates = RegistryPoint() zdump_v = RegistryPoint() zipl_conf = RegistryPoint() sendq_socket_buffer = RegistryPoint() diff --git a/insights/specs/datasources/yum_updates.py b/insights/specs/datasources/yum_updates.py new file mode 100644 index 000000000..19ca6e302 --- /dev/null +++ b/insights/specs/datasources/yum_updates.py @@ -0,0 +1,141 @@ +""" +Custom datasource for collecting yum updates +""" +import json +import time + +from insights import datasource, HostContext, SkipComponent +from insights.components.rhel_version import IsRhel7 +from insights.core.spec_factory import DatasourceProvider + +sorted_cmp = None +try: + # cmp_to_key is not available in python 2.6, but it has sorted function which accepts cmp function + def sorted_cmp(it, cmp): + from functools import cmp_to_key + return sorted(it, key=cmp_to_key(cmp)) +except ImportError: + sorted_cmp = sorted + + +class UpdatesManager: + """ Performs package resolution on yum based systems """ + def __init__(self): + import yum + + self.base = yum.YumBase() + self.base.doGenericSetup(cache=1) + self.releasever = self.base.conf.yumvar['releasever'] + self.basearch = self.base.conf.yumvar['basearch'] + self.packages = [] + self.repos = [] + self.updict = {} + + def __enter__(self): + return self + + def __exit__(self, *args): + pass + + @staticmethod + def pkg_cmp(a, b): + vercmp = a.verCMP(b) + if vercmp != 0: + return vercmp + if a.repoid != b.repoid: + return -1 if a.repoid < b.repoid else 1 + return 0 + + def sorted_pkgs(self, pkgs): + return sorted_cmp(pkgs, self.pkg_cmp) + + def load(self): + self.base.doRepoSetup() + self.base.doSackSetup() + self.packages = self.base.pkgSack.returnPackages() + self.repos = self.base.repos.repos + self._build_updict() + + def _build_updict(self): + self.updict = {} + for pkg in self.packages: + self.updict.setdefault(pkg.na, []).append(pkg) + + def enabled_repos(self): + return [repo.id for repo in self.base.repos.listEnabled()] + + def installed_packages(self): + return self.base.rpmdb.returnPackages() + + def updates(self, pkg): + nevra = pkg.nevra + updates_list = [] + for upg in self.updict[pkg.na]: + if upg.verGT(pkg): + updates_list.append(upg) + return nevra, updates_list + + @staticmethod + def pkg_nevra(pkg): + return "{}-{}:{}-{}.{}".format(pkg.name, pkg.epoch, pkg.version, pkg.release, pkg.arch) + + @staticmethod + def pkg_repo(pkg): + return pkg.repoid + + def advisory(self, pkg): + adv = self.base.upinfo.get_notice(pkg.nvr) + if adv: + return adv.get_metadata()['update_id'] + return None + + @staticmethod + def last_update(): + return 0 + + +@datasource(HostContext, [IsRhel7]) +def yum_updates(_broker): + """ + This datasource provides a list of available updates on the system. + It uses the yum python library installed locally, and collects list of + available package updates, along with advisory info where applicable. + """ + + if not _broker.get(IsRhel7): + raise SkipComponent("Yum updates currently only works on RHEL 7") + + with UpdatesManager() as umgr: + umgr.load() + + response = { + "releasever": umgr.releasever, + "basearch": umgr.basearch, + "update_list": {}, + } + + data = {'package_list': umgr.installed_packages()} + updates = {} + for pkg in data["package_list"]: + (nevra, updates_list) = umgr.updates(pkg) + updates[nevra] = updates_list + for (nevra, update_list) in updates.items(): + if update_list: + out_list = [] + for pkg in umgr.sorted_pkgs(update_list): + pkg_dict = { + "package": umgr.pkg_nevra(pkg), + "repository": umgr.pkg_repo(pkg), + "basearch": response["basearch"], + "releasever": response["releasever"], + } + erratum = umgr.advisory(pkg) + if erratum: + pkg_dict["erratum"] = erratum + out_list.append(pkg_dict) + response["update_list"][nevra] = {"available_updates": out_list} + + ts = umgr.last_update() + if ts: + response["metadata_time"] = time.strftime("%FT%TZ", time.gmtime(ts)) + return DatasourceProvider(content=json.dumps(response), relative_path='insights_commands/yum_updates_list') diff --git a/insights/specs/default.py b/insights/specs/default.py index 84e2fadef..1d039e64a 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -34,7 +34,7 @@ from insights.specs import Specs from insights.specs.datasources import ( awx_manage, cloud_init, candlepin_broker, ethernet, get_running_commands, ipcs, lpstat, package_provides, - ps as ps_datasource, sap, satellite_missed_queues) + ps as ps_datasource, sap, satellite_missed_queues, yum_updates) from insights.specs.datasources.sap import sap_hana_sid, sap_hana_sid_SID_nr @@ -733,6 +733,8 @@ def pmlog_summary_file(broker): yum_repolist = simple_command("/usr/bin/yum -C --noplugins repolist", signum=signal.SIGTERM) yum_repos_d = glob_file("/etc/yum.repos.d/*.repo") yum_updateinfo = simple_command("/usr/bin/yum -C updateinfo list", signum=signal.SIGTERM) + yum_updates = yum_updates.yum_updates + zipl_conf = simple_file("/etc/zipl.conf") rpm_format = format_rpm() installed_rpms = simple_command("/bin/rpm -qa --qf '%s'" % rpm_format, context=HostContext, signum=signal.SIGTERM) From 29cb795d69dacc25381610ea594b6d38133fd053 Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Thu, 16 Sep 2021 03:31:59 +0800 Subject: [PATCH 547/892] Enhance parser cups_ppd (#3220) * Enhance parser cups_ppd Signed-off-by: jiazhang * Update format Signed-off-by: jiazhang --- insights/parsers/cups_ppd.py | 4 +++- insights/parsers/tests/test_cups_ppd.py | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/insights/parsers/cups_ppd.py b/insights/parsers/cups_ppd.py index ace98adb3..7fed2eb4d 100644 --- a/insights/parsers/cups_ppd.py +++ b/insights/parsers/cups_ppd.py @@ -19,6 +19,8 @@ class CupsPpd(Parser, dict): Sample output for files:: *PPD-Adobe: "4.3" + *% Copyright 2007-2014 by Apple Inc. + *% Copyright: 1997-2007 by Easy Software Products. *FormatVersion: "4.3" *FileVersion: "2.2" *LanguageVersion: English @@ -45,7 +47,7 @@ def parse_content(self, content): raise SkipException("No Valid Configuration") data = {} for line in content: - if "*" in line and ":" in line: + if line.startswith("*") and ":" in line and not line.startswith("*%"): key = line.split(":")[0].split("*")[-1].strip() value = line.split(":")[-1].strip() if key in data: diff --git a/insights/parsers/tests/test_cups_ppd.py b/insights/parsers/tests/test_cups_ppd.py index b0fa27022..9fa16deb7 100644 --- a/insights/parsers/tests/test_cups_ppd.py +++ b/insights/parsers/tests/test_cups_ppd.py @@ -7,6 +7,8 @@ CUPS_PPD = """ *PPD-Adobe: "4.3" +*% Copyright 2007-2014 by Apple Inc. +*% test: this line is used to check comment *FormatVersion: "4.3" *FileVersion: "2.2" *LanguageVersion: English @@ -33,6 +35,7 @@ def test_cups_ppd(): cups_ppd_result = CupsPpd(context_wrap(CUPS_PPD, path='/etc/cups/ppd/test_printer1.ppd')) assert cups_ppd_result["PCFileName"] == '"ippeve.ppd"' assert cups_ppd_result["cupsFilter2"] == ['"application/vnd.cups-pdf application/pdf 10 -"', '"application/vnd.cups-postscript application/postscript 10 -"'] + assert "test" not in cups_ppd_result with pytest.raises(SkipException) as exc: CupsPpd(context_wrap(CUPS_PPD_INVALID1, path='/etc/cups/ppd/test_printer1.ppd')) From 51e0370ae67f7ab8cab79a1a3a2e507ad3126020 Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Thu, 16 Sep 2021 03:51:08 +0800 Subject: [PATCH 548/892] Remove old spec ansible_tower_settings (#3216) Signed-off-by: jiazhang --- .../ansible_tower_settings.rst | 3 -- insights/parsers/ansible_tower_settings.py | 42 ------------------- .../tests/test_ansible_tower_settings.py | 38 ----------------- insights/specs/__init__.py | 1 - insights/specs/default.py | 1 - 5 files changed, 85 deletions(-) delete mode 100644 docs/shared_parsers_catalog/ansible_tower_settings.rst delete mode 100644 insights/parsers/ansible_tower_settings.py delete mode 100644 insights/parsers/tests/test_ansible_tower_settings.py diff --git a/docs/shared_parsers_catalog/ansible_tower_settings.rst b/docs/shared_parsers_catalog/ansible_tower_settings.rst deleted file mode 100644 index 11b514d42..000000000 --- a/docs/shared_parsers_catalog/ansible_tower_settings.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.parsers.ansible_tower_settings - :members: - :show-inheritance: diff --git a/insights/parsers/ansible_tower_settings.py b/insights/parsers/ansible_tower_settings.py deleted file mode 100644 index 74dad34fa..000000000 --- a/insights/parsers/ansible_tower_settings.py +++ /dev/null @@ -1,42 +0,0 @@ -""" -AnsibleTowerSettings - file ``/etc/tower/conf.d/*.py`` and ``/etc/tower/settings.py`` -===================================================================================== -The AnsibleTowerSettings class parses the file ``/etc/tower/conf.d/*.py`` and -``/etc/tower/settings.py``. -""" -from insights import parser, get_active_lines, Parser -from insights.specs import Specs -from insights.parsers import SkipException - - -@parser(Specs.ansible_tower_settings) -class AnsibleTowerSettings(Parser, dict): - """ - Class for content of ansible tower config file ``/etc/tower/conf.d/*.py`` and ``/etc/tower/settings.py``. - - Sample ``/etc/tower/conf.d/*.py`` file:: - - AWX_CLEANUP_PATHS = False - - Raises: - SkipException: the file is empty or there is no valid content - - Examples:: - >>> type(conf) - - >>> conf['AWX_CLEANUP_PATHS'] - 'False' - """ - - def parse_content(self, content): - """Parse content of of ansible tower config file ``/etc/tower/conf.d/*.py`` and ``/etc/tower/settings.py``""" - if not content: - raise SkipException("No Valid Configuration") - data = {} - for line in get_active_lines(content): - if "=" in line: - key, value = line.split("=") - data[key.strip()] = value.strip() - if not data: - raise SkipException("No Valid Configuration") - self.update(data) diff --git a/insights/parsers/tests/test_ansible_tower_settings.py b/insights/parsers/tests/test_ansible_tower_settings.py deleted file mode 100644 index d0724f179..000000000 --- a/insights/parsers/tests/test_ansible_tower_settings.py +++ /dev/null @@ -1,38 +0,0 @@ -import doctest -import pytest -from insights.parsers import ansible_tower_settings, SkipException -from insights.tests import context_wrap - - -ANSIBLE_TOWER_CONFIG_CUSTOM = ''' -AWX_CLEANUP_PATHS = False -LOGGING['handlers']['tower_warnings']['level'] = 'DEBUG' -'''.strip() - -ANSIBLE_TOWER_CONFIG_CUSTOM_INVALID1 = ''' -'''.strip() - -ANSIBLE_TOWER_CONFIG_CUSTOM_INVALID2 = ''' -AWX_CLEANUP_PATHS -'''.strip() - - -def test_ansible_tower_settings(): - conf = ansible_tower_settings.AnsibleTowerSettings(context_wrap(ANSIBLE_TOWER_CONFIG_CUSTOM)) - assert conf['AWX_CLEANUP_PATHS'] == 'False' - - with pytest.raises(SkipException) as exc: - ansible_tower_settings.AnsibleTowerSettings(context_wrap(ANSIBLE_TOWER_CONFIG_CUSTOM_INVALID1)) - assert 'No Valid Configuration' in str(exc) - - with pytest.raises(SkipException) as exc: - ansible_tower_settings.AnsibleTowerSettings(context_wrap(ANSIBLE_TOWER_CONFIG_CUSTOM_INVALID2)) - assert 'No Valid Configuration' in str(exc) - - -def test_ansible_tower_settings_documentation(): - failed_count, tests = doctest.testmod( - ansible_tower_settings, - globs={'conf': ansible_tower_settings.AnsibleTowerSettings(context_wrap(ANSIBLE_TOWER_CONFIG_CUSTOM))} - ) - assert failed_count == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 8021aac48..ee663f4bd 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -5,7 +5,6 @@ class Specs(SpecSet): abrt_ccpp_conf = RegistryPoint(filterable=True) abrt_status_bare = RegistryPoint() alternatives_display_python = RegistryPoint() - ansible_tower_settings = RegistryPoint(filterable=True, multi_output=True) amq_broker = RegistryPoint(multi_output=True) ansible_host = RegistryPoint() auditctl_status = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 1d039e64a..65662f0d0 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -81,7 +81,6 @@ class DefaultSpecs(Specs): abrt_status_bare = simple_command("/usr/bin/abrt status --bare=True") alternatives_display_python = simple_command("/usr/sbin/alternatives --display python") amq_broker = glob_file("/var/opt/amq-broker/*/etc/broker.xml") - ansible_tower_settings = glob_file(["/etc/tower/settings.py", "/etc/tower/conf.d/*.py"]) auditctl_status = simple_command("/sbin/auditctl -s") auditd_conf = simple_file("/etc/audit/auditd.conf") audit_log = simple_file("/var/log/audit/audit.log") From 58830c9819b5f7201fa363ecd6c9b0246fc85822 Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Fri, 17 Sep 2021 08:48:43 +0800 Subject: [PATCH 549/892] Enhance datasource lpstat (#3219) * Enhance datasource lpstat Signed-off-by: jiazhang * Update logic to check line Signed-off-by: jiazhang * Update line filter Signed-off-by: jiazhang --- insights/specs/datasources/lpstat.py | 2 +- insights/tests/datasources/test_lpstat.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/insights/specs/datasources/lpstat.py b/insights/specs/datasources/lpstat.py index 77b004277..276ef5d67 100644 --- a/insights/specs/datasources/lpstat.py +++ b/insights/specs/datasources/lpstat.py @@ -37,7 +37,7 @@ def lpstat_protocol_printers_info(broker): for line in content: if "device for " in line: "Remove printer address information" - result.append(line.rsplit(":", 1)[0]) + result.append(line.split("://", 1)[0] if '://' in line else line) if result: return DatasourceProvider(content="\n".join(result), relative_path='insights_commands/lpstat_-v') except Exception as e: diff --git a/insights/tests/datasources/test_lpstat.py b/insights/tests/datasources/test_lpstat.py index 4f1ab5a3e..b5c3ab343 100644 --- a/insights/tests/datasources/test_lpstat.py +++ b/insights/tests/datasources/test_lpstat.py @@ -8,6 +8,9 @@ LPSTAT_V = """ device for test_printer1: ipp://cups.test.com/printers/test_printer1 device for test_printer2: ipp://cups.test.com/printers/test_printer2 +device for test_printer3: socket://192.168.1.5:9100 +device for test_printer4: usb://smth +device for test_printer5: ///dev/null """.strip() LPSTAT_V_NOT_GOOD = """ @@ -17,6 +20,9 @@ LPSTAT_V_RESULT = """ device for test_printer1: ipp device for test_printer2: ipp +device for test_printer3: socket +device for test_printer4: usb +device for test_printer5: ///dev/null """.strip() RELATIVE_PATH = 'insights_commands/lpstat_-v' From 42bbb74c52d63685507de888b0e17b009294ed84 Mon Sep 17 00:00:00 2001 From: Ping Qin <30404410+qinpingli@users.noreply.github.com> Date: Tue, 21 Sep 2021 04:07:54 +0800 Subject: [PATCH 550/892] =?UTF-8?q?New=20specs=20and=20parsers=20for=20scs?= =?UTF-8?q?i=5Fmod,=20lpfc=20driver=20and=20qla2xxx=20driver=20ma=E2=80=A6?= =?UTF-8?q?=20(#3221)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * New specs and parsers for scsi_mod, lpfc driver and qla2xxx driver max luns Signed-off-by: Qin Ping * Updata the doc of Ql2xMaxLUN, SCSIModMaxReportLUNs and LpfcMaxLUNs parsers Signed-off-by: Qin Ping --- insights/parsers/sys_module.py | 72 +++++++++++++++++++++++ insights/parsers/tests/test_sys_module.py | 33 ++++++++++- insights/specs/__init__.py | 3 + insights/specs/default.py | 3 + 4 files changed, 110 insertions(+), 1 deletion(-) mode change 100644 => 100755 insights/parsers/sys_module.py diff --git a/insights/parsers/sys_module.py b/insights/parsers/sys_module.py old mode 100644 new mode 100755 index 700ff4c07..7cb41b5ef --- a/insights/parsers/sys_module.py +++ b/insights/parsers/sys_module.py @@ -8,6 +8,12 @@ DMModUseBlkMq - file ``/sys/module/dm_mod/parameters/use_blk_mq`` ------------------------------------------------------------------ +LpfcMaxLUNs - file ``/sys/module/lpfc/parameters/lpfc_max_luns`` +---------------------------------------------------------------- +Ql2xMaxLUN - file ``/sys/module/qla2xxx/parameters/ql2xmaxlun`` +--------------------------------------------------------------- +SCSIModMaxReportLUNs - file ``/sys/module/scsi_mod/parameters/max_report_luns`` +------------------------------------------------------------------------------- SCSIModUseBlkMq - file ``/sys/module/scsi_mod/parameters/use_blk_mq`` --------------------------------------------------------------------- VHostNetZeroCopyTx - file ``/sys/module/vhost_net/parameters/experimental_zcopytx`` @@ -55,6 +61,30 @@ def is_on(self): raise ValueError("Unexpected value {0}, please get raw data from attribute 'val' and tell is_on by yourself.".format(self.val)) +class MaxLUNs(Parser): + """ + Parse for file `/sys/module/{scsi_mod, lpfc, ...}/parameters/{max_report_luns, lpfc_max_luns, ...}`. + File content shows the maximum LUN value currently supported. + + Sample Content:: + + 512 + + Raises: + SkipException: When content is empty or no parse-able content. + + Attributes: + val(int): Convert the raw data of the content to int. + """ + + def parse_content(self, content): + if not content or len(content) != 1: + raise SkipException() + if not content[0].strip('').isdigit(): + raise ValueError("Unexpected content: {0}".format(content[0])) + self.val = int(content[0].strip()) + + @parser(Specs.dm_mod_use_blk_mq) class DMModUseBlkMq(XModUseBlkMq): """ @@ -101,3 +131,45 @@ class VHostNetZeroCopyTx(XModUseBlkMq): """ pass + + +@parser(Specs.lpfc_max_luns) +class LpfcMaxLUNs(MaxLUNs): + """ + This file `/sys/module/lpfc/parameters/lpfc_max_luns` shows the max LUN number + supported by lpfc driver. + + Examples: + + >>> lpfc_max_luns.val + 512 + """ + pass + + +@parser(Specs.ql2xmaxlun) +class Ql2xMaxLUN(MaxLUNs): + """ + This file `/sys/module/qla2xxx/parameters/ql2xmaxlun` shows the max LUN number + supported by qla2xxxx driver. + + Examples: + + >>> ql2xmaxlun.val + 512 + """ + pass + + +@parser(Specs.scsi_mod_max_report_luns) +class SCSIModMaxReportLUNs(MaxLUNs): + """ + This file `/sys/module/scsi_mod/parameters/max_report_luns` shows the max LUN number + supported by OS. + + Examples: + + >>> scsi_mod_max_report_luns.val + 512 + """ + pass diff --git a/insights/parsers/tests/test_sys_module.py b/insights/parsers/tests/test_sys_module.py index a4817430c..b0e9043b0 100644 --- a/insights/parsers/tests/test_sys_module.py +++ b/insights/parsers/tests/test_sys_module.py @@ -1,7 +1,7 @@ import doctest import pytest from insights.parsers import sys_module, SkipException -from insights.parsers.sys_module import DMModUseBlkMq, SCSIModUseBlkMq, VHostNetZeroCopyTx +from insights.parsers.sys_module import DMModUseBlkMq, SCSIModUseBlkMq, VHostNetZeroCopyTx, MaxLUNs, LpfcMaxLUNs, Ql2xMaxLUN, SCSIModMaxReportLUNs from insights.tests import context_wrap @@ -24,11 +24,19 @@ """.strip() +MAX_LUNS = """ +512 +""".strip() + + def test_doc_examples(): env = { 'dm_mod_use_blk_mq': DMModUseBlkMq(context_wrap(SCSI_DM_MOD_USE_BLK_MQ_Y)), 'scsi_mod_use_blk_mq': SCSIModUseBlkMq(context_wrap(SCSI_DM_MOD_USE_BLK_MQ_N)), 'vhost_net_zero_copy_tx': VHostNetZeroCopyTx(context_wrap(ZERO_COPY)), + 'lpfc_max_luns': LpfcMaxLUNs(context_wrap(MAX_LUNS)), + 'ql2xmaxlun': Ql2xMaxLUN(context_wrap(MAX_LUNS)), + 'scsi_mod_max_report_luns': SCSIModMaxReportLUNs(context_wrap(MAX_LUNS)), } failed, total = doctest.testmod(sys_module, globs=env) assert failed == 0 @@ -55,6 +63,20 @@ def test_XModUseBlkMq(): assert zero_copy_1.val == '1' +def test_MaxLUNs(): + max_luns = MaxLUNs(context_wrap(MAX_LUNS)) + assert max_luns.val == 512 + + lpfc_max_luns = LpfcMaxLUNs(context_wrap(MAX_LUNS)) + assert lpfc_max_luns.val == 512 + + ql2xmaxlun = Ql2xMaxLUN(context_wrap(MAX_LUNS)) + assert ql2xmaxlun.val == 512 + + scsi_mod_max_luns = SCSIModMaxReportLUNs(context_wrap(MAX_LUNS)) + assert scsi_mod_max_luns.val == 512 + + def test_class_exceptions(): with pytest.raises(SkipException): dm_mod = DMModUseBlkMq(context_wrap(SCSI_DM_MOD_USE_BLK_MQ_EMPTY)) @@ -64,3 +86,12 @@ def test_class_exceptions(): dm_mod_unknow = DMModUseBlkMq(context_wrap(SCSI_DM_MOD_USE_BLK_MQ_UNKNOW_CASE)) dm_mod_unknow.is_on assert "Unexpected value unknow_case, please get raw data from attribute 'val' and tell is_on by yourself." in str(e) + + with pytest.raises(SkipException): + max_luns = MaxLUNs(context_wrap(SCSI_DM_MOD_USE_BLK_MQ_EMPTY)) + assert max_luns is None + + with pytest.raises(ValueError) as e: + max_luns_not_digit = MaxLUNs(context_wrap(SCSI_DM_MOD_USE_BLK_MQ_UNKNOW_CASE)) + max_luns_not_digit.val + assert "Unexpected content: unknow_case" in str(e) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index ee663f4bd..59eea896d 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -294,6 +294,7 @@ class Specs(SpecSet): locale = RegistryPoint() localtime = RegistryPoint() logrotate_conf = RegistryPoint(multi_output=True) + lpfc_max_luns = RegistryPoint() lpstat_p = RegistryPoint() lpstat_protocol_printers = RegistryPoint() ls_boot = RegistryPoint() @@ -517,6 +518,7 @@ class Specs(SpecSet): pvs = RegistryPoint() qemu_conf = RegistryPoint() qemu_xml = RegistryPoint(multi_output=True) + ql2xmaxlun = RegistryPoint() qpid_stat_g = RegistryPoint() qpid_stat_q = RegistryPoint() qpid_stat_u = RegistryPoint() @@ -583,6 +585,7 @@ class Specs(SpecSet): scheduler = RegistryPoint(multi_output=True) sched_rt_runtime_us = RegistryPoint() scsi = RegistryPoint() + scsi_mod_max_report_luns = RegistryPoint() scsi_mod_use_blk_mq = RegistryPoint() sctp_asc = RegistryPoint() sctp_eps = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 65662f0d0..d6a19c2de 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -351,6 +351,7 @@ def httpd_cmd(broker): limits_conf = glob_file(["/etc/security/limits.conf", "/etc/security/limits.d/*.conf"]) localtime = simple_command("/usr/bin/file -L /etc/localtime") logrotate_conf = glob_file(["/etc/logrotate.conf", "/etc/logrotate.d/*"]) + lpfc_max_luns = simple_file("/sys/module/lpfc/parameters/lpfc_max_luns") lpstat_p = simple_command("/usr/bin/lpstat -p") lpstat_protocol_printers = lpstat.lpstat_protocol_printers_info ls_boot = simple_command("/bin/ls -lanR /boot") @@ -573,6 +574,7 @@ def pmlog_summary_file(broker): rhsm_katello_default_ca_cert = simple_command("/usr/bin/openssl x509 -in /etc/rhsm/ca/katello-default-ca.pem -noout -issuer") qemu_conf = simple_file("/etc/libvirt/qemu.conf") qemu_xml = glob_file(r"/etc/libvirt/qemu/*.xml") + ql2xmaxlun = simple_file("/sys/module/qla2xxx/parameters/ql2xmaxlun") qpidd_conf = simple_file("/etc/qpid/qpidd.conf") rabbitmq_env = simple_file("/etc/rabbitmq/rabbitmq-env.conf") rabbitmq_report = simple_command("/usr/sbin/rabbitmqctl report") @@ -631,6 +633,7 @@ def pmlog_summary_file(broker): scsi = simple_file("/proc/scsi/scsi") scsi_eh_deadline = glob_file('/sys/class/scsi_host/host[0-9]*/eh_deadline') scsi_fwver = glob_file('/sys/class/scsi_host/host[0-9]*/fwrev') + scsi_mod_max_report_luns = simple_file("/sys/module/scsi_mod/parameters/max_report_luns") scsi_mod_use_blk_mq = simple_file("/sys/module/scsi_mod/parameters/use_blk_mq") sctp_asc = simple_file('/proc/net/sctp/assocs') sctp_eps = simple_file('/proc/net/sctp/eps') From e5760b754a3be1347a83fa03336571f993f03e85 Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Mon, 20 Sep 2021 15:23:54 -0500 Subject: [PATCH 551/892] Stop collection of facter and remove dependencies (#3224) * We will no longer collect facter due to issues documented in Bugzilla 1989655 * Update combiners to remove dependency * Remove spec from HostContext collection * Update tests Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/collect.py | 3 -- insights/combiners/hostname.py | 28 ++++++-------- insights/combiners/tests/test_hostname.py | 46 ++++------------------- insights/combiners/tests/test_sap.py | 18 ++++----- insights/combiners/tests/test_uptime.py | 25 ++---------- insights/combiners/uptime.py | 37 ++++++++---------- insights/specs/default.py | 1 - insights/tests/datasources/test_sap.py | 12 +++--- 8 files changed, 53 insertions(+), 117 deletions(-) diff --git a/insights/collect.py b/insights/collect.py index 71b84adce..5bf5c4c3e 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -98,9 +98,6 @@ - name: insights.parsers.hostname enabled: true - - name: insights.parsers.facter - enabled: true - - name: insights.parsers.systemid enabled: true diff --git a/insights/combiners/hostname.py b/insights/combiners/hostname.py index 186410b38..ac82120f4 100644 --- a/insights/combiners/hostname.py +++ b/insights/combiners/hostname.py @@ -3,7 +3,7 @@ ======== Combiner for ``hostname`` information. It uses the results of all the -``Hostname`` parsers, ``Facter`` and the ``SystemID`` parser to get the fqdn, +``Hostname`` parsers and the ``SystemID`` parser to get the fqdn, hostname and domain information. @@ -12,17 +12,16 @@ from insights.core.plugins import combiner from insights.core.serde import deserializer, serializer from insights.parsers.hostname import Hostname as HnF, HostnameShort as HnS, HostnameDefault as HnD -from insights.parsers.facter import Facter from insights.parsers.systemid import SystemID from insights.util import deprecated -@combiner([HnF, HnD, HnS, Facter, SystemID]) +@combiner([HnF, HnD, HnS, SystemID]) class Hostname(object): """ - Check hostname, facter and systemid to get the fqdn, hostname and domain. + Check hostname and systemid to get the fqdn, hostname and domain. - Prefer hostname to facter and systemid. + Prefer hostname to systemid. Examples: >>> type(hostname) @@ -37,19 +36,16 @@ class Hostname(object): Raises: Exception: If no hostname can be found in any of the source parsers. """ - def __init__(self, hf, hd, hs, ft, sid): + def __init__(self, hf, hd, hs, sid): self.fqdn = self.hostname = self.domain = None - if hf or hs or hd or ft: - hn = hf or hs or hd or ft + if hf or hs or hd: + hn = hf or hs or hd self.hostname = self.fqdn = hn.hostname self.domain = '' if hf and hf.fqdn: self.fqdn = hf.fqdn self.domain = hf.domain - elif ft and ft.fqdn: - self.fqdn = ft.fqdn - self.domain = ft.domain if ft.domain else ".".join(self.fqdn.split(".")[1:]) else: self.fqdn = sid.get("profile_name") if self.fqdn: @@ -60,16 +56,16 @@ def __init__(self, hf, hd, hs, ft, sid): raise Exception("Unable to get hostname.") -@combiner([HnF, HnD, HnS, Facter, SystemID]) -def hostname(hf, hd, hs, ft, sid): +@combiner([HnF, HnD, HnS, SystemID]) +def hostname(hf, hd, hs, sid): """ .. warning:: This combiner methode is deprecated, please use :py:class:`insights.combiners.hostname.Hostname` instead. - Check hostname, facter and systemid to get the fqdn, hostname and domain. + Check hostname and systemid to get the fqdn, hostname and domain. - Prefer hostname to facter and systemid. + Prefer hostname to systemid. Examples: >>> hn.fqdn @@ -87,7 +83,7 @@ def hostname(hf, hd, hs, ft, sid): Exception: If no hostname can be found in any of the source parsers. """ deprecated(hostname, "Use the `Hostname` class instead.") - return Hostname(hf, hd, hs, ft, sid) + return Hostname(hf, hd, hs, sid) @serializer(Hostname) diff --git a/insights/combiners/tests/test_hostname.py b/insights/combiners/tests/test_hostname.py index 7d02958a0..4528178d1 100644 --- a/insights/combiners/tests/test_hostname.py +++ b/insights/combiners/tests/test_hostname.py @@ -1,7 +1,6 @@ import pytest import doctest from insights.parsers.hostname import Hostname as HnF, HostnameShort as HnS, HostnameDefault as HnD -from insights.parsers.facter import Facter from insights.parsers.systemid import SystemID from insights.combiners import hostname from insights.combiners.hostname import Hostname, hostname as hn_func @@ -10,23 +9,6 @@ HOSTNAME_FULL = "rhel7.example.com" HOSTNAME_SHORT = "rhel7" HOSTNAME_DEF = "rhel7" -FACTS_FQDN = """ -COMMAND> facter - -architecture => x86_64 -augeasversion => 1.1.0 -facterversion => 1.7.6 -filesystems => xfs -fqdn => ewa-satellite.example.com -hostname => ewa-satellite -""".strip() -FACTS_NO_FQDN = """ -COMMAND> facter - -architecture => x86_64 -facterversion => 1.7.6 -filesystems => xfs -""".strip() SYSTEMID_PROFILE_NAME = ''' @@ -72,30 +54,21 @@ def test_get_hostname(): hnf = HnF(context_wrap(HOSTNAME_FULL)) expected = (HOSTNAME_FULL, HOSTNAME_SHORT, 'example.com') - result = Hostname(hnf, None, None, None, None) + result = Hostname(hnf, None, None, None) assert result.fqdn == expected[0] assert result.hostname == expected[1] assert result.domain == expected[2] hns = HnS(context_wrap(HOSTNAME_SHORT)) expected = (HOSTNAME_SHORT, HOSTNAME_SHORT, '') - result = Hostname(None, None, hns, None, None) + result = Hostname(None, None, hns, None) assert result.fqdn == expected[0] assert result.hostname == expected[1] assert result.domain == expected[2] hnd = HnD(context_wrap(HOSTNAME_DEF)) expected = (HOSTNAME_DEF, HOSTNAME_DEF, '') - result = Hostname(None, hnd, None, None, None) - assert result.fqdn == expected[0] - assert result.hostname == expected[1] - assert result.domain == expected[2] - - -def test_get_facter_hostname(): - ft = Facter(context_wrap(FACTS_FQDN)) - expected = ('ewa-satellite.example.com', 'ewa-satellite', 'example.com') - result = Hostname(None, None, None, ft, None) + result = Hostname(None, hnd, None, None) assert result.fqdn == expected[0] assert result.hostname == expected[1] assert result.domain == expected[2] @@ -104,7 +77,7 @@ def test_get_facter_hostname(): def test_get_systemid_hostname(): sid = SystemID(context_wrap(SYSTEMID_PROFILE_NAME)) expected = ('example_profile', 'example_profile', '') - result = Hostname(None, None, None, None, sid) + result = Hostname(None, None, None, sid) assert result.fqdn == expected[0] assert result.hostname == expected[1] assert result.domain == expected[2] @@ -114,10 +87,9 @@ def test_get_all_hostname(): hnf = HnF(context_wrap(HOSTNAME_FULL)) hns = HnS(context_wrap(HOSTNAME_SHORT)) hnd = HnD(context_wrap(HOSTNAME_DEF)) - ft = Facter(context_wrap(FACTS_FQDN)) sid = SystemID(context_wrap(SYSTEMID_PROFILE_NAME)) expected = (HOSTNAME_FULL, HOSTNAME_SHORT, 'example.com') - result = Hostname(hnf, hnd, hns, ft, sid) + result = Hostname(hnf, hnd, hns, sid) assert result.fqdn == expected[0] assert result.hostname == expected[1] assert result.domain == expected[2] @@ -128,10 +100,9 @@ def test_hostname_function(): hnf = HnF(context_wrap(HOSTNAME_FULL)) hns = HnS(context_wrap(HOSTNAME_SHORT)) hnd = HnD(context_wrap(HOSTNAME_DEF)) - ft = Facter(context_wrap(FACTS_FQDN)) sid = SystemID(context_wrap(SYSTEMID_PROFILE_NAME)) expected = (HOSTNAME_FULL, HOSTNAME_SHORT, 'example.com') - result = hn_func(hnf, hnd, hns, ft, sid) + result = hn_func(hnf, hnd, hns, sid) assert result.fqdn == expected[0] assert result.hostname == expected[1] assert result.domain == expected[2] @@ -147,11 +118,10 @@ def test_hostname_doc(): hnf = HnF(context_wrap(HOSTNAME_FULL)) hns = HnS(context_wrap(HOSTNAME_SHORT)) hnd = HnD(context_wrap(HOSTNAME_DEF)) - ft = Facter(context_wrap(FACTS_FQDN)) sid = SystemID(context_wrap(SYSTEMID_PROFILE_NAME)) env = { - 'hostname': Hostname(hnf, hnd, hns, ft, sid), - 'hn': hn_func(hnf, hnd, hns, ft, sid) + 'hostname': Hostname(hnf, hnd, hns, sid), + 'hn': hn_func(hnf, hnd, hns, sid) } failed, total = doctest.testmod(hostname, globs=env) assert failed == 0 diff --git a/insights/combiners/tests/test_sap.py b/insights/combiners/tests/test_sap.py index ac355fa8b..c99b8dc25 100644 --- a/insights/combiners/tests/test_sap.py +++ b/insights/combiners/tests/test_sap.py @@ -232,7 +232,7 @@ def test_lssap_netweaver(): lssap = Lssap(context_wrap(Lssap_nw_TEST)) - hn = Hostname(HnF(context_wrap(HOSTNAME)), None, None, None, None) + hn = Hostname(HnF(context_wrap(HOSTNAME)), None, None, None) sap = Sap(hn, None, lssap) assert sap['D50'].number == '50' assert 'D16' in sap.local_instances @@ -246,7 +246,7 @@ def test_lssap_netweaver(): def test_saphostcrtl_hana(): lssap = Lssap(context_wrap(Lssap_nw_TEST)) inst = SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES)) - hn = Hostname(HnF(context_wrap(HOSTNAME)), None, None, None, None) + hn = Hostname(HnF(context_wrap(HOSTNAME)), None, None, None) sap = Sap(hn, inst, lssap) assert 'D50' not in sap assert sap.local_instances == ['HDB88'] @@ -264,7 +264,7 @@ def test_saphostcrtl_hana(): def test_saphostcrtl_hana_2(): lssap = Lssap(context_wrap(Lssap_all_TEST)) inst = SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES_GOOD)) - hn = Hostname(HnF(context_wrap(HOSTNAME1)), None, None, None, None) + hn = Hostname(HnF(context_wrap(HOSTNAME1)), None, None, None) sap = Sap(hn, inst, lssap) assert 'D50' not in sap assert 'HDB00' in sap @@ -289,7 +289,7 @@ def test_saphostcrtl_hana_2(): def test_lssap_hana(): lssap = Lssap(context_wrap(Lssap_hana_TEST)) - hn = Hostname(HnF(context_wrap(HOSTNAME)), None, None, None, None) + hn = Hostname(HnF(context_wrap(HOSTNAME)), None, None, None) sap = Sap(hn, None, lssap) assert 'D50' not in sap assert sap.is_netweaver is False @@ -299,7 +299,7 @@ def test_lssap_hana(): def test_lssap_ascs(): lssap = Lssap(context_wrap(Lssap_ascs_TEST)) - hn = Hostname(HnF(context_wrap(HOSTNAME)), None, None, None, None) + hn = Hostname(HnF(context_wrap(HOSTNAME)), None, None, None) sap = Sap(hn, None, lssap) assert sap['ASCS16'].sid == 'HA2' # ASCS is also a kind of NetWeaver @@ -310,7 +310,7 @@ def test_lssap_ascs(): def test_all(): lssap = Lssap(context_wrap(Lssap_all_TEST)) - hn = Hostname(HnF(context_wrap(HOSTNAME)), None, None, None, None) + hn = Hostname(HnF(context_wrap(HOSTNAME)), None, None, None) sap = Sap(hn, None, lssap) assert sap['D16'].version == '749, patch 10, changelist 1698137' assert sap['ASCS16'].hostname == 'lu0417' @@ -321,7 +321,7 @@ def test_all(): def test_r_case(): saphostctrl = SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES_R_CASE)) - hn = Hostname(HnF(context_wrap(HOSTNAME2)), None, None, None, None) + hn = Hostname(HnF(context_wrap(HOSTNAME2)), None, None, None) sap = Sap(hn, saphostctrl, None) assert sorted(sap.local_instances) == sorted(['W20', 'SMDA98', 'SMDA97']) assert sap['DVEBMGS12'].version == '753, patch 501, changelist 1967207' @@ -334,7 +334,7 @@ def test_r_case(): def test_doc_examples(): env = { 'saps': Sap( - Hostname(HnF(context_wrap(HOSTNAME)), None, None, None, None), + Hostname(HnF(context_wrap(HOSTNAME)), None, None, None), None, Lssap(context_wrap(Lssap_doc_TEST)) ) @@ -344,7 +344,7 @@ def test_doc_examples(): def test_ab(): - hn = Hostname(HnF(context_wrap(HOSTNAME)), None, None, None, None) + hn = Hostname(HnF(context_wrap(HOSTNAME)), None, None, None) with pytest.raises(SkipComponent) as se: Sap(hn, None, None) assert 'No SAP instance.' in str(se) diff --git a/insights/combiners/tests/test_uptime.py b/insights/combiners/tests/test_uptime.py index e9778f369..cfad03c43 100644 --- a/insights/combiners/tests/test_uptime.py +++ b/insights/combiners/tests/test_uptime.py @@ -1,18 +1,10 @@ import datetime from insights.parsers.uptime import Uptime -from insights.parsers.facter import Facter from insights.combiners.uptime import uptime from insights.tests import context_wrap UPTIME1 = " 14:28:24 up 5:55, 4 users, load average: 0.04, 0.03, 0.05" UPTIME2 = " 10:55:22 up 40 days, 3 min, 1 user, load average: 0.49, 0.12, 0.04" -UPTIME3 = """ -COMMAND> facts -uptime => 21 days -uptime_days => 21 -uptime_hours => 525 -uptime_seconds => 1893598 -""".strip() def total_seconds(time_delta): @@ -21,7 +13,7 @@ def total_seconds(time_delta): def test_get_uptime_uptime1(): ut = Uptime(context_wrap(UPTIME1)) - upt = uptime(ut, None) + upt = uptime(ut) assert upt.currtime == '14:28:24' assert upt.updays == "" assert upt.uphhmm == '5:55' @@ -33,7 +25,7 @@ def test_get_uptime_uptime1(): def test_get_uptime_uptime2(): ut = Uptime(context_wrap(UPTIME2)) - upt = uptime(ut, None) + upt = uptime(ut) assert upt.currtime == '10:55:22' assert upt.updays == '40' assert upt.uphhmm == '00:03' @@ -43,20 +35,9 @@ def test_get_uptime_uptime2(): assert total_seconds(upt.uptime) == total_seconds(c) -def test_get_facter_uptime(): - ft = Facter(context_wrap(UPTIME3)) - upt = uptime(None, ft) - assert upt.updays == "21" - assert upt.uphhmm == '21:59' - assert upt.loadavg is None - c = datetime.timedelta(days=0, hours=0, minutes=0, seconds=1893598) - assert total_seconds(upt.uptime) == total_seconds(c) - - def test_get_both_uptime(): ut = Uptime(context_wrap(UPTIME2)) - ft = Facter(context_wrap(UPTIME3)) - upt = uptime(ut, ft) + upt = uptime(ut) assert upt.currtime == '10:55:22' assert upt.updays == '40' assert upt.uphhmm == '00:03' diff --git a/insights/combiners/uptime.py b/insights/combiners/uptime.py index f02859660..5d477c540 100644 --- a/insights/combiners/uptime.py +++ b/insights/combiners/uptime.py @@ -3,7 +3,7 @@ ====== Combiner for uptime information. It uses the results of the ``Uptime`` -parser and the ``Facter`` parser to get the uptime information. ``Uptime`` is +parser to get the uptime information. ``Uptime`` is the preferred source of data. Examples: @@ -16,9 +16,10 @@ """ from __future__ import division from collections import namedtuple +from insights import SkipComponent from insights.core.plugins import combiner from insights.parsers.uptime import Uptime as upt -from insights.parsers.facter import Facter +from insights.util import deprecated Uptime = namedtuple("Uptime", field_names=[ @@ -27,34 +28,26 @@ """namedtuple: Type for storing the uptime information.""" -@combiner([upt, Facter]) -def uptime(ut, facter): - """Check uptime and facts to get the uptime information. +@combiner(upt) +def uptime(ut): + """ + .. warning:: + This combiner method is deprecated, please use + :py:class:`insights.parsers.uptime.Uptime` instead. - Prefer uptime to facts. + Check uptime to get the uptime information. Returns: insights.combiners.uptime.Uptime: A named tuple with `currtime`, `updays`, `uphhmm`, `users`, `loadavg` and `uptime` components. Raises: - Exception: If no data is available from both of the parsers. + SkipComponent: If no data is available or if ``loadavg`` is not available. """ + deprecated(uptime, "Use the `Uptime` parser instead.") - ut = ut - if ut and ut.loadavg: + if ut.loadavg: return Uptime(ut.currtime, ut.updays, ut.uphhmm, ut.users, ut.loadavg, ut.uptime) - ft = facter - if ft and hasattr(ft, 'uptime_seconds'): - import datetime - secs = int(ft.uptime_seconds) - up_dd = secs // (3600 * 24) - up_hh = (secs % (3600 * 24)) // 3600 - up_mm = (secs % 3600) // 60 - updays = str(up_dd) if up_dd > 0 else '' - uphhmm = '%02d:%02d' % (up_hh, up_mm) - up_time = datetime.timedelta(seconds=secs) - return Uptime(None, updays, uphhmm, None, None, up_time) - - raise Exception("Unable to get uptime information.") + + raise SkipComponent("Unable to get uptime information.") diff --git a/insights/specs/default.py b/insights/specs/default.py index d6a19c2de..f29e061d9 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -231,7 +231,6 @@ def du_dirs_list(broker): ethtool_g = foreach_execute(ethernet.interfaces, "/sbin/ethtool -g %s") ethtool_i = foreach_execute(ethernet.interfaces, "/sbin/ethtool -i %s") ethtool_k = foreach_execute(ethernet.interfaces, "/sbin/ethtool -k %s") - facter = simple_command("/usr/bin/facter") fc_match = simple_command("/bin/fc-match -sv 'sans:regular:roman' family fontformat") fcoeadm_i = simple_command("/usr/sbin/fcoeadm -i") findmnt_lo_propagation = simple_command("/bin/findmnt -lo+PROPAGATION") diff --git a/insights/tests/datasources/test_sap.py b/insights/tests/datasources/test_sap.py index 690ec5887..c01391758 100644 --- a/insights/tests/datasources/test_sap.py +++ b/insights/tests/datasources/test_sap.py @@ -137,7 +137,7 @@ def shell_out(self, cmd, split=True, timeout=None, keep_rc=False, env=None, sign def test_hana_instance_skip(): inst = SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES_R_CASE)) - hn = Hostname(HostnameParser(context_wrap(HOSTNAME)), None, None, None, None) + hn = Hostname(HostnameParser(context_wrap(HOSTNAME)), None, None, None) sap = Sap(hn, inst, None) broker = {Sap: sap} broker.update({LocalSpecs.sap_instance: LocalSpecs.sap_instance(broker)}) @@ -148,7 +148,7 @@ def test_hana_instance_skip(): def test_sid(): # Good inst = SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES)) - hn = Hostname(HostnameParser(context_wrap(HOSTNAME)), None, None, None, None) + hn = Hostname(HostnameParser(context_wrap(HOSTNAME)), None, None, None) sap = Sap(hn, inst, None) broker = {Sap: sap} broker.update({LocalSpecs.sap_instance: LocalSpecs.sap_instance(broker)}) @@ -161,7 +161,7 @@ def test_sid(): def test_hana_sid(): # Good inst = SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES)) - hn = Hostname(HostnameParser(context_wrap(HOSTNAME)), None, None, None, None) + hn = Hostname(HostnameParser(context_wrap(HOSTNAME)), None, None, None) sap = Sap(hn, inst, None) broker = {Sap: sap} broker.update({LocalSpecs.sap_instance: LocalSpecs.sap_instance(broker)}) @@ -180,7 +180,7 @@ def test_hana_sid(): def test_hana_sid_SID_nr(): # Good inst = SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES)) - hn = Hostname(HostnameParser(context_wrap(HOSTNAME)), None, None, None, None) + hn = Hostname(HostnameParser(context_wrap(HOSTNAME)), None, None, None) sap = Sap(hn, inst, None) broker = {Sap: sap} broker.update({LocalSpecs.sap_instance: LocalSpecs.sap_instance(broker)}) @@ -199,7 +199,7 @@ def test_hana_sid_SID_nr(): def test_ld_library_path_of_user(): # Good inst = SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES)) - hn = Hostname(HostnameParser(context_wrap(HOSTNAME)), None, None, None, None) + hn = Hostname(HostnameParser(context_wrap(HOSTNAME)), None, None, None) sap = Sap(hn, inst, None) broker = {Sap: sap, HostContext: FakeContext()} broker.update({LocalSpecs.sap_instance: LocalSpecs.sap_instance(broker)}) @@ -215,7 +215,7 @@ def test_ld_library_path_of_user(): # Bad inst = SAPHostCtrlInstances(context_wrap(SAPHOSTCTRL_HOSTINSTANCES_GOOD)) - hn = Hostname(HostnameParser(context_wrap(HOSTNAME1)), None, None, None, None) + hn = Hostname(HostnameParser(context_wrap(HOSTNAME1)), None, None, None) sap = Sap(hn, inst, None) broker = {Sap: sap, HostContext: FakeContext()} broker.update({LocalSpecs.sap_instance: LocalSpecs.sap_instance(broker)}) From 89660e0b952d71eea4efba46940523bd383dc260 Mon Sep 17 00:00:00 2001 From: Ryan Blakley Date: Wed, 22 Sep 2021 12:06:35 -0400 Subject: [PATCH 552/892] Fixed flake8 errors for the newest version of flake8 for the client (#3226) * Fixed the few flake8 errors thrown by the latest version of flake8 for the client only. Signed-off-by: Ryan Blakley Co-authored-by: Jeremy Crafts --- .../apps/ansible/playbook_verifier/__init__.py | 14 +++++++------- insights/client/config.py | 5 ++--- insights/client/connection.py | 2 +- .../client/collection_rules/test_map_components.py | 1 + 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/insights/client/apps/ansible/playbook_verifier/__init__.py b/insights/client/apps/ansible/playbook_verifier/__init__.py index bdbacd29a..1d345e638 100644 --- a/insights/client/apps/ansible/playbook_verifier/__init__.py +++ b/insights/client/apps/ansible/playbook_verifier/__init__.py @@ -61,15 +61,15 @@ def createSnippetHash(snippet): def getPublicKey(gpg): - if not PUBLIC_KEY_FOLDER: - raise PlaybookVerificationError(message="PUBLIC KEY IMPORT ERROR: Public key file not found") + if not PUBLIC_KEY_FOLDER: + raise PlaybookVerificationError(message="PUBLIC KEY IMPORT ERROR: Public key file not found") - publicKey = PUBLIC_KEY_FOLDER - importResults = gpg.import_keys(publicKey) - if (importResults.count < 1): - raise PlaybookVerificationError(message="PUBLIC KEY NOT IMPORTED: Public key import failed") + publicKey = PUBLIC_KEY_FOLDER + importResults = gpg.import_keys(publicKey) + if (importResults.count < 1): + raise PlaybookVerificationError(message="PUBLIC KEY NOT IMPORTED: Public key import failed") - return importResults + return importResults def excludeDynamicElements(snippet): diff --git a/insights/client/config.py b/insights/client/config.py index c0c140229..92ff97136 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -244,9 +244,8 @@ def _core_collect_default(): 'module': { 'default': None, 'opt': ['--module', '-m'], - 'help': 'Directly run a Python module within the insights-core package', - 'action': 'store', - 'help': argparse.SUPPRESS + 'help': argparse.SUPPRESS, + 'action': 'store' }, 'obfuscate': { # non-CLI diff --git a/insights/client/connection.py b/insights/client/connection.py index 7f253bd84..5b8467ad9 100644 --- a/insights/client/connection.py +++ b/insights/client/connection.py @@ -379,7 +379,7 @@ def _test_urls(self, url, method): } test_req = self.post(url, files=test_files) elif method == "GET": - test_req = self.get(url) + test_req = self.get(url) if test_req.status_code in (200, 201, 202): logger.info( "Successfully connected to: %s", url) diff --git a/insights/tests/client/collection_rules/test_map_components.py b/insights/tests/client/collection_rules/test_map_components.py index 5cdf1cf4b..e6e303324 100644 --- a/insights/tests/client/collection_rules/test_map_components.py +++ b/insights/tests/client/collection_rules/test_map_components.py @@ -25,6 +25,7 @@ def get_uploader_json(): uploader_json = requests.get(url).json() return uploader_json + uploader_json = get_uploader_json() From cb9ae054af7e9a8fab6f9341ddf81d74eb9676f3 Mon Sep 17 00:00:00 2001 From: Alec Cohan <44471274+aleccohan@users.noreply.github.com> Date: Wed, 22 Sep 2021 12:42:04 -0400 Subject: [PATCH 553/892] Update verifier code to remove long suffix python2 (#3227) * Update verifier code to remove the Long suffix for python2 impl Signed-off-by: Alec Cohan * remove test file Signed-off-by: Alec Cohan Co-authored-by: Jeremy Crafts --- insights/client/apps/ansible/playbook_verifier/__init__.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/insights/client/apps/ansible/playbook_verifier/__init__.py b/insights/client/apps/ansible/playbook_verifier/__init__.py index 1d345e638..e2c95fef1 100644 --- a/insights/client/apps/ansible/playbook_verifier/__init__.py +++ b/insights/client/apps/ansible/playbook_verifier/__init__.py @@ -10,6 +10,7 @@ from insights.client.apps.ansible.playbook_verifier.contrib import gnupg from insights.client.apps.ansible.playbook_verifier.contrib.ruamel_yaml.ruamel import yaml from insights.client.apps.ansible.playbook_verifier.contrib.ruamel_yaml.ruamel.yaml.comments import CommentedMap, CommentedSeq +from insights.client.apps.ansible.playbook_verifier.contrib.ruamel_yaml.ruamel.yaml.scalarint import ScalarInt from insights.client.constants import InsightsConstants as constants __all__ = ("loadPlaybookYaml", "verify", "PlaybookVerificationError") @@ -170,15 +171,17 @@ def normalizeSnippet(snippet): elif isinstance(value, CommentedSeq): new_sequence = CommentedSeq() for item in value: + if isinstance(item, six.text_type): + new_sequence.append(item.encode('ascii', 'ignore')) if not isinstance(item, CommentedMap): new_sequence.append(item) - elif isinstance(item, six.text_type): - new_sequence.append(item.encode('ascii', 'ignore')) else: new_sequence.append(normalizeSnippet(item)) new[key] = new_sequence elif isinstance(value, six.text_type): new[key] = value.encode('ascii', 'ignore') + elif isinstance(value, ScalarInt): + new[key] = int(value) else: new[key] = value From c2561e83a49a6877453c1c4639ffd49eb1f8fdb9 Mon Sep 17 00:00:00 2001 From: Ryan Blakley Date: Wed, 22 Sep 2021 15:37:25 -0400 Subject: [PATCH 554/892] Fixed flake8 errors for the newest version of flake8 (#3222) * Added W605, and W504 to the ignores since it would require a lot of trivial changes to a lot of files. * Fixed any flake8 errors thrown, most were indention, missing blank lines, and a few print formatting related issues. * Re-ordered imports to be in alphabetical order on any files that I had to fix a flake8 error on. * Bumped the flake8 version in the setup.py file. Signed-off-by: Ryan Blakley --- .flake8 | 2 +- insights/combiners/tests/test_ipv6.py | 135 +++++++++--------- .../combiners/tests/test_user_namespaces.py | 12 +- insights/ocpshell.py | 9 +- insights/parsers/saphostctrl.py | 4 +- insights/parsers/sctp.py | 9 +- insights/parsers/tests/test_df.py | 6 +- insights/parsers/tests/test_ip.py | 3 +- insights/parsers/tests/test_parsers_module.py | 7 +- insights/parsers/tests/test_podman_list.py | 6 +- insights/parsers/tests/test_ps.py | 10 +- insights/parsers/tests/test_rhsm_conf.py | 14 +- insights/parsers/tests/test_sctp.py | 4 +- .../parsers/tests/test_sysconfig_up2date.py | 10 +- insights/tests/test_json_parser.py | 1 + insights/util/autology/datasources.py | 3 +- setup.py | 3 +- 17 files changed, 124 insertions(+), 114 deletions(-) diff --git a/.flake8 b/.flake8 index f15d5b515..170a6256c 100644 --- a/.flake8 +++ b/.flake8 @@ -1,3 +1,3 @@ [flake8] -ignore = E501,E126,E127,E128,E722,E741 +ignore = E501,E126,E127,E128,E722,E741,W605,W504 exclude = insights/contrib,bin,docs,include,lib,lib64,.git,.collections.py,insights/parsers/tests/lvm_test_data.py,insights/client/apps/ansible/playbook_verifier/contrib diff --git a/insights/combiners/tests/test_ipv6.py b/insights/combiners/tests/test_ipv6.py index f9ffbf022..d4482fd10 100644 --- a/insights/combiners/tests/test_ipv6.py +++ b/insights/combiners/tests/test_ipv6.py @@ -1,12 +1,11 @@ -from ..ipv6 import IPv6 -from ...parsers.modprobe import ModProbe -from ...parsers.lsmod import LsMod -from ...parsers.cmdline import CmdLine -from ...parsers.sysctl import Sysctl -from ...parsers.uname import Uname -from ...tests import context_wrap - from collections import namedtuple +from insights.combiners.ipv6 import IPv6 +from insights.parsers.cmdline import CmdLine +from insights.parsers.lsmod import LsMod +from insights.parsers.modprobe import ModProbe +from insights.parsers.sysctl import Sysctl +from insights.parsers.uname import Uname +from insights.tests import context_wrap Case = namedtuple('Case', ['cmdline', 'lsmod', 'modprobe', 'sysctl']) @@ -96,70 +95,70 @@ ''' # noqa CASES = [ - # noqa - # RHEL7 not disabled - (7, Case(CMDLINE_NOT_DISABLED, None, None, SYSCTL_NOT_DISABLED), - (False, set())), - # RHEL7 disabled via cmdline - (7, Case(CMDLINE_DISABLED, None, None, SYSCTL_NOT_DISABLED), - (True, set(['cmdline']))), - # RHEL7 disabled via sysctl - (7, Case(CMDLINE_NOT_DISABLED, None, None, SYSCTL_DISABLED), - (True, set(['sysctl']))), - # RHEL7 disabled by both cmdline and sysctl - (7, Case(CMDLINE_DISABLED, None, None, SYSCTL_DISABLED), - (True, set(['cmdline', 'sysctl']))), - # RHEL7 with only uname provided - (7, Case(None, None, None, None), - (False, set())), - # RHEL6 loaded not disabled - (6, Case(None, LSMOD_LOADED, MODPROBE_NOT_DISABLED, - SYSCTL_NOT_DISABLED), (False, set())), - # RHEL6 not loaded but not disabled - (6, Case(None, LSMOD_NOT_LOADED, MODPROBE_NOT_DISABLED, - SYSCTL_NOT_DISABLED), (False, set())), - # RHEL6 fake installed but loaded - (6, Case(None, LSMOD_LOADED, MODPROBE_FAKE, SYSCTL_NOT_DISABLED), - (False, set())), - # RHEL6 not loaded, fake install commented - (6, Case(None, LSMOD_NOT_LOADED, MODPROBE_FAKE_COMMENTED, - SYSCTL_NOT_DISABLED), (False, set())), - # RHEL6 loaded but disabled via modprobe - (6, Case(None, LSMOD_LOADED, MODPROBE_DISABLED, SYSCTL_NOT_DISABLED), - (True, set(['modprobe_disable']))), - # RHEL6 not loaded, disabled via modprobe - (6, Case(None, LSMOD_NOT_LOADED, MODPROBE_DISABLED, - SYSCTL_NOT_DISABLED), (True, set(['modprobe_disable']))), - # RHEL6 not loaded, disabled via fake install - (6, Case(None, LSMOD_NOT_LOADED, MODPROBE_FAKE, SYSCTL_NOT_DISABLED), - (True, set(['fake_install']))), - # RHEL6 loaded but disabled by sysctl - (6, Case(None, LSMOD_LOADED, MODPROBE_NOT_DISABLED, SYSCTL_DISABLED), - (True, set(['sysctl']))), - # RHEL6 not loaded, disabled by sysctl - (6, Case(None, LSMOD_NOT_LOADED, MODPROBE_NOT_DISABLED, - SYSCTL_DISABLED), (True, set(['sysctl']))), - # RHEL6 disabled by modprobe and sysctl - (6, Case(None, LSMOD_NOT_LOADED, MODPROBE_DISABLED, SYSCTL_DISABLED), - (True, set(['sysctl', 'modprobe_disable']))), - # RHEL6 with lsmod but no modprobe - (6, Case(None, LSMOD_LOADED, None, None), - (False, set())), - # RHEL6 with modprobe but no lsmod - (6, Case(None, None, MODPROBE_DISABLED, None), - (True, set(['modprobe_disable']))), - # RHEL6 with fake install but no lsmod (insufficient data) - (6, Case(None, None, MODPROBE_FAKE, None), - (False, set())), - # RHEL6 with command line only - (6, Case(CMDLINE_RHEL6_DISABLED, None, None, None), - (True, set(['cmdline']))), - ] + # noqa + # RHEL7 not disabled + (7, Case(CMDLINE_NOT_DISABLED, None, None, SYSCTL_NOT_DISABLED), + (False, set())), + # RHEL7 disabled via cmdline + (7, Case(CMDLINE_DISABLED, None, None, SYSCTL_NOT_DISABLED), + (True, set(['cmdline']))), + # RHEL7 disabled via sysctl + (7, Case(CMDLINE_NOT_DISABLED, None, None, SYSCTL_DISABLED), + (True, set(['sysctl']))), + # RHEL7 disabled by both cmdline and sysctl + (7, Case(CMDLINE_DISABLED, None, None, SYSCTL_DISABLED), + (True, set(['cmdline', 'sysctl']))), + # RHEL7 with only uname provided + (7, Case(None, None, None, None), + (False, set())), + # RHEL6 loaded not disabled + (6, Case(None, LSMOD_LOADED, MODPROBE_NOT_DISABLED, + SYSCTL_NOT_DISABLED), (False, set())), + # RHEL6 not loaded but not disabled + (6, Case(None, LSMOD_NOT_LOADED, MODPROBE_NOT_DISABLED, + SYSCTL_NOT_DISABLED), (False, set())), + # RHEL6 fake installed but loaded + (6, Case(None, LSMOD_LOADED, MODPROBE_FAKE, SYSCTL_NOT_DISABLED), + (False, set())), + # RHEL6 not loaded, fake install commented + (6, Case(None, LSMOD_NOT_LOADED, MODPROBE_FAKE_COMMENTED, + SYSCTL_NOT_DISABLED), (False, set())), + # RHEL6 loaded but disabled via modprobe + (6, Case(None, LSMOD_LOADED, MODPROBE_DISABLED, SYSCTL_NOT_DISABLED), + (True, set(['modprobe_disable']))), + # RHEL6 not loaded, disabled via modprobe + (6, Case(None, LSMOD_NOT_LOADED, MODPROBE_DISABLED, + SYSCTL_NOT_DISABLED), (True, set(['modprobe_disable']))), + # RHEL6 not loaded, disabled via fake install + (6, Case(None, LSMOD_NOT_LOADED, MODPROBE_FAKE, SYSCTL_NOT_DISABLED), + (True, set(['fake_install']))), + # RHEL6 loaded but disabled by sysctl + (6, Case(None, LSMOD_LOADED, MODPROBE_NOT_DISABLED, SYSCTL_DISABLED), + (True, set(['sysctl']))), + # RHEL6 not loaded, disabled by sysctl + (6, Case(None, LSMOD_NOT_LOADED, MODPROBE_NOT_DISABLED, + SYSCTL_DISABLED), (True, set(['sysctl']))), + # RHEL6 disabled by modprobe and sysctl + (6, Case(None, LSMOD_NOT_LOADED, MODPROBE_DISABLED, SYSCTL_DISABLED), + (True, set(['sysctl', 'modprobe_disable']))), + # RHEL6 with lsmod but no modprobe + (6, Case(None, LSMOD_LOADED, None, None), + (False, set())), + # RHEL6 with modprobe but no lsmod + (6, Case(None, None, MODPROBE_DISABLED, None), + (True, set(['modprobe_disable']))), + # RHEL6 with fake install but no lsmod (insufficient data) + (6, Case(None, None, MODPROBE_FAKE, None), + (False, set())), + # RHEL6 with command line only + (6, Case(CMDLINE_RHEL6_DISABLED, None, None, None), + (True, set(['cmdline']))), +] def test_integration(): for rhel, case, result in CASES: - context = {} + context = dict() context[Uname] = Uname(context_wrap( UNAME_RHEL7 if rhel == 7 else UNAME_RHEL6)) diff --git a/insights/combiners/tests/test_user_namespaces.py b/insights/combiners/tests/test_user_namespaces.py index 1fcb69569..39c73ea93 100644 --- a/insights/combiners/tests/test_user_namespaces.py +++ b/insights/combiners/tests/test_user_namespaces.py @@ -1,7 +1,7 @@ -from ..user_namespaces import UserNamespaces -from ...parsers.cmdline import CmdLine -from ...parsers.grub_conf import Grub2Config -from ...tests import context_wrap +from insights.combiners.user_namespaces import UserNamespaces +from insights.parsers.cmdline import CmdLine +from insights.parsers.grub_conf import Grub2Config +from insights.tests import context_wrap ENABLE_TOK_A = ''' user_namespaces.enable=1 @@ -79,12 +79,12 @@ # Dash syntax, rather than underscore ((CMDLINE.format(ENABLE_TOK_B), GRUB2_CONF.format(ENABLE_TOK_B, '')), (True, [MENUENTRY_0])) - ] +] def test_integration(): for case in CASES: - context = {} + context = dict() context[CmdLine] = CmdLine(context_wrap(case[0][0])) if case[0][1] is not None: context[Grub2Config] = Grub2Config(context_wrap(case[0][1])) diff --git a/insights/ocpshell.py b/insights/ocpshell.py index b8702b0a1..40d915f74 100755 --- a/insights/ocpshell.py +++ b/insights/ocpshell.py @@ -1,6 +1,7 @@ #!/usr/bin/env python import argparse import logging + from insights.ocp import analyze @@ -57,10 +58,10 @@ def main(): conf = analyze(archives, excludes) # noqa F841 / unused var # import all the built-in predicates - from insights.parsr.query import (lt, le, eq, gt, ge, isin, contains, # noqa: F403 - startswith, endswith, ieq, icontains, istartswith, iendswith, # noqa: F403 - matches, make_child_query) # noqa: F403 - q = make_child_query # noqa: F405 + from insights.parsr.query import (lt, le, eq, gt, ge, isin, contains, # noqa: F401,F403 + startswith, endswith, ieq, icontains, istartswith, iendswith, + matches, make_child_query) + q = make_child_query import IPython from traitlets.config.loader import Config diff --git a/insights/parsers/saphostctrl.py b/insights/parsers/saphostctrl.py index 532dc3c1a..b401b72ac 100644 --- a/insights/parsers/saphostctrl.py +++ b/insights/parsers/saphostctrl.py @@ -8,9 +8,9 @@ ------------------------------------------------------------------------------------------------ """ from insights import parser, CommandParser +from insights.core.filters import add_filter from insights.parsers import ParseException, SkipException from insights.specs import Specs -from insights.core.filters import add_filter SAP_INST_FILTERS = [ @@ -105,7 +105,7 @@ def _update_instance(inst): if not inst['InstanceName'].endswith(inst['SystemNumber']): raise ParseException( - 'InstanceName: "{0}" missing match with SystemNumber: "{0}"'.format(inst['InstanceName'], inst['SystemNumber'])) + 'InstanceName: "{0}" missing match with SystemNumber: "{1}"'.format(inst['InstanceName'], inst['SystemNumber'])) # InstanceType = The chars in InstanceName before the SystemNumber # subtract len(sysnumber) characters from instance name inst['InstanceType'] = inst['InstanceName'][0:-len(inst['SystemNumber'])] diff --git a/insights/parsers/sctp.py b/insights/parsers/sctp.py index ac9779f7e..957c0449d 100644 --- a/insights/parsers/sctp.py +++ b/insights/parsers/sctp.py @@ -18,10 +18,9 @@ """ from insights import Parser, parser -from insights.parsers import SkipException, ParseException -from . import keyword_search -from insights.specs import Specs from insights.components.rhel_version import IsRhel6, IsRhel7 +from insights.parsers import keyword_search, SkipException, ParseException +from insights.specs import Specs @parser(Specs.sctp_eps) @@ -77,7 +76,7 @@ def parse_content(self, content): line = content[0].strip().split() keys_cnt = len(self.COLUMN_IDX) if "LPORT" not in line or len(line) != keys_cnt: - raise ParseException("Contents are not compatible to this parser".format(line)) + raise ParseException("The following line is not compatible with this parser: {0}".format(line)) self.data = [] for line in content[1:]: @@ -153,7 +152,7 @@ def parse_content(self, content): line = content[0].strip().split() keys_cnt = len(self.COLUMN_IDX) if "LPORT" not in line or len(line) != keys_cnt: - raise ParseException("Contents are not compatible to this parser".format(line)) + raise ParseException("The following line is not compatible with this parser: {0}".format(line)) laddr_idx = line.index('LADDRS') raddr_ridx = len(line) - line.index('RADDRS') diff --git a/insights/parsers/tests/test_df.py b/insights/parsers/tests/test_df.py index cc8076bb4..df28deed9 100644 --- a/insights/parsers/tests/test_df.py +++ b/insights/parsers/tests/test_df.py @@ -1,5 +1,5 @@ -import pytest import doctest +import pytest from insights.parsers import df, ParseException from insights.tests import context_wrap @@ -218,9 +218,10 @@ def test_df_al_bad(): assert 'Could not parse line' in str(exc) with pytest.raises(ParseException) as exc: - df_list = df.DiskFree_AL(context_wrap(DF_AL_BAD_BS)) + df.DiskFree_AL(context_wrap(DF_AL_BAD_BS)) assert 'Unknown block size' in str(exc) + DF_AL_BS_2MB = """ Filesystem 2MB-blocks Used Available Use% Mounted on /dev/vda3 62031 49197 9680 84% / @@ -236,6 +237,7 @@ def test_df_al_2MB(): assert df_list.block_size == 2000000 assert int(root.total) * df_list.block_size == 124062000000 # To Bytes + DF_LI_DOC = """ Filesystem Inodes IUsed IFree IUse% Mounted on devtmpfs 242224 359 241865 1% /dev diff --git a/insights/parsers/tests/test_ip.py b/insights/parsers/tests/test_ip.py index f84beacb9..5974f3dee 100644 --- a/insights/parsers/tests/test_ip.py +++ b/insights/parsers/tests/test_ip.py @@ -1,7 +1,7 @@ +from insights.contrib import ipaddress from insights.parsers import ip from insights.tests import context_wrap from insights.util import keys_in -from insights.contrib import ipaddress IP_ADDR_TEST = """ @@ -788,6 +788,7 @@ def test_ipv6_neigh(): 'addr': ipaddress.ip_address(u'ff02::1:ffea:2c00') } + IP_NEIGH_SHOW = """ 2a04:9a00:1:1:ec4:7aff:febb:d3ca dev bond0.104 lladdr 0c:c4:7a:bb:d3:ca REACHABLE fe80::22a:6aff:fe65:c3c2 dev bond0.104 lladdr 00:2a:6a:65:c3:c2 router STALE diff --git a/insights/parsers/tests/test_parsers_module.py b/insights/parsers/tests/test_parsers_module.py index 750c68efe..fc1731202 100644 --- a/insights/parsers/tests/test_parsers_module.py +++ b/insights/parsers/tests/test_parsers_module.py @@ -1,8 +1,8 @@ import pytest + from collections import OrderedDict -from insights.parsers import split_kv_pairs, unsplit_lines, parse_fixed_table -from insights.parsers import calc_offset, optlist_to_dict, keyword_search -from insights.parsers import parse_delimited_table, ParseException, SkipException +from insights.parsers import (calc_offset, keyword_search, optlist_to_dict, parse_delimited_table, parse_fixed_table, + split_kv_pairs, unsplit_lines, ParseException, SkipException) SPLIT_TEST_1 = """ # Comment line @@ -620,6 +620,7 @@ def test_keyword_search(): certificate__contains='encryption' ) == [] + PS_LIST = [ {'PID': '692', 'PPID': '2', 'COMMAND': 'kdmflush', '_line': ' 692 2 kdmflush'}, {'PID': '701', 'PPID': '2', 'COMMAND': 'kdmflush', '_line': ' 701 2 kdmflush'}, diff --git a/insights/parsers/tests/test_podman_list.py b/insights/parsers/tests/test_podman_list.py index b93d8cecd..b126f2977 100644 --- a/insights/parsers/tests/test_podman_list.py +++ b/insights/parsers/tests/test_podman_list.py @@ -1,7 +1,7 @@ import doctest import pytest -from insights.parsers import podman_list -from insights.parsers import SkipException + +from insights.parsers import podman_list, SkipException from insights.tests import context_wrap @@ -61,7 +61,7 @@ def test_podman_list_containers(): assert result.rows[1].get("CONTAINER ID") == "95516ea08b565e37e2a4bca3333af40a240c368131b77276da8dec629b7fe102" assert result.rows[1].get("COMMAND") == '"/bin/sh -c \'yum install -y vsftpd-2.2.2-6.el6\'"' assert result.rows[1]['STATUS'] == 'Exited (137) 18 hours ago' - assert result.rows[1].get("PORTS") is '' + assert result.rows[1].get("PORTS") == '' assert sorted(result.containers.keys()) == sorted(['angry_saha', 'tender_rosalind']) assert result.containers['angry_saha'] == result.rows[0] diff --git a/insights/parsers/tests/test_ps.py b/insights/parsers/tests/test_ps.py index 964f4fcd0..c52145231 100644 --- a/insights/parsers/tests/test_ps.py +++ b/insights/parsers/tests/test_ps.py @@ -1,8 +1,9 @@ -from ...parsers import ps, ParseException -from ...tests import context_wrap -from ...util import keys_in -import pytest import doctest +import pytest + +from insights.parsers import ps, ParseException +from insights.tests import context_wrap +from insights.util import keys_in PsAuxww_TEST_DOC = """ USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND @@ -438,6 +439,7 @@ def test_ps_alxwww(): assert dbus_proc['UID'] == '81' assert dbus_proc['ARGS'] == '--system --address=systemd: --nofork --nopidfile --systemd-activation' + PS_EO_CMD_NORMAL = """ PID COMMAND 1 /usr/lib/systemd/systemd diff --git a/insights/parsers/tests/test_rhsm_conf.py b/insights/parsers/tests/test_rhsm_conf.py index 268894b8e..4447aab61 100644 --- a/insights/parsers/tests/test_rhsm_conf.py +++ b/insights/parsers/tests/test_rhsm_conf.py @@ -1,5 +1,7 @@ -from insights.tests import context_wrap from insights.parsers import rhsm_conf +from insights.tests import context_wrap + + CONFIG = """ # Red Hat Subscription Manager Configuration File: @@ -78,9 +80,9 @@ def test_rhsm_conf(): - resault = rhsm_conf.RHSMConf(context_wrap(CONFIG)) + resault = rhsm_conf.RHSMConf(context_wrap(CONFIG)) - assert resault.get('rhsm', 'pluginConfDir') == '/etc/rhsm/pluginconf.d' - assert resault.get('rhsm', 'full_refresh_on_yum') == '0' - assert resault.get('rhsm', 'consumerCertDir') == '/etc/pki/consumer' - assert resault.get('rhsm', 'repo_ca_cert') == '%(ca_cert_dir)sredhat-uep.pem' + assert resault.get('rhsm', 'pluginConfDir') == '/etc/rhsm/pluginconf.d' + assert resault.get('rhsm', 'full_refresh_on_yum') == '0' + assert resault.get('rhsm', 'consumerCertDir') == '/etc/pki/consumer' + assert resault.get('rhsm', 'repo_ca_cert') == '%(ca_cert_dir)sredhat-uep.pem' diff --git a/insights/parsers/tests/test_sctp.py b/insights/parsers/tests/test_sctp.py index 4530074df..f725c9a23 100644 --- a/insights/parsers/tests/test_sctp.py +++ b/insights/parsers/tests/test_sctp.py @@ -151,7 +151,7 @@ def test_sctp_eps_exceptions(): with pytest.raises(ParseException) as exc: sctp_obj = SCTPEps(context_wrap(SCTP_EPS_DETAILS_NO)) assert sctp_obj is None # Just added to remove flake8 warnings - assert 'Contents are not compatible to this parser' in str(exc) + assert 'The following line is not compatible with this parser' in str(exc) with pytest.raises(SkipException) as exc: sctp_obj = SCTPEps(context_wrap(SCTP_EPS_DETAILS_NO_2)) @@ -163,7 +163,7 @@ def test_sctp_asc_exceptions(): with pytest.raises(ParseException) as exc: sctp_asc = SCTPAsc(context_wrap(SCTP_ASSOC_NO_2)) assert sctp_asc is None - assert 'Contents are not compatible to this parser' in str(exc) + assert 'The following line is not compatible with this parser' in str(exc) with pytest.raises(SkipException) as exc: sctp_asc = SCTPAsc(context_wrap(SCTP_ASSOC_NO)) assert sctp_asc is None diff --git a/insights/parsers/tests/test_sysconfig_up2date.py b/insights/parsers/tests/test_sysconfig_up2date.py index 1be9bcb78..1cd32314e 100644 --- a/insights/parsers/tests/test_sysconfig_up2date.py +++ b/insights/parsers/tests/test_sysconfig_up2date.py @@ -88,19 +88,19 @@ def test_get_up2date(): assert up2date_info['stagingContentWindow'] == '24' assert up2date_info['networkRetries'] == '5' assert up2date_info['enableProxy'] == '0' - assert up2date_info['proxyPassword'] is '' + assert up2date_info['proxyPassword'] == '' assert up2date_info['systemIdPath'] == '/etc/sysconfig/rhn/systemid' assert up2date_info['useNoSSLForPackages'] == '0' assert up2date_info['tmpDir'] == '/tmp' assert up2date_info['skipNetwork'] == '0' assert up2date_info['disallowConfChanges'] == 'noReboot;sslCACert;useNoSSLForPackages;noSSLServerURL;serverURL;disallowConfChanges;' assert up2date_info['enableProxyAuth'] == '0' - assert up2date_info['versionOverride'] is '' + assert up2date_info['versionOverride'] == '' assert up2date_info['stagingContent'] == '1' - assert up2date_info['proxyUser'] is '' - assert up2date_info['hostedWhitelist'] is '' + assert up2date_info['proxyUser'] == '' + assert up2date_info['hostedWhitelist'] == '' assert up2date_info['debug'] == '0' - assert up2date_info['httpProxy'] is '' + assert up2date_info['httpProxy'] == '' assert up2date_info['noReboot'] == '0' assert up2date_info['serverURL'] == 'http://192.168.160.23/XMLRPC' assert up2date_info['noSSLServerURL'] == 'http://192.168.160.23/XMLRPC' diff --git a/insights/tests/test_json_parser.py b/insights/tests/test_json_parser.py index 5211802c2..fc87afa3b 100644 --- a/insights/tests/test_json_parser.py +++ b/insights/tests/test_json_parser.py @@ -7,6 +7,7 @@ class MyJsonParser(JSONParser): pass + json_test_strings = { '{"a": "1", "b": "2"}': {'a': '1', 'b': '2'}, '[{"a": "1", "b": "2"},{"a": "3", "b": "4"}]': diff --git a/insights/util/autology/datasources.py b/insights/util/autology/datasources.py index 8b9264d29..b31b0e17f 100644 --- a/insights/util/autology/datasources.py +++ b/insights/util/autology/datasources.py @@ -424,12 +424,13 @@ class SosSpecs(DefaultSpecs): pass + if __name__ == "__main__": specs = DefaultSpecs() for k, v in specs.items(): try: print(v) - except Exception as e: + except Exception: print('======= Error with spec: ', k) print('repr_str :', v.repr_str) for dk, dv in v.items(): diff --git a/setup.py b/setup.py index 68ad58985..cc5beaf9e 100644 --- a/setup.py +++ b/setup.py @@ -100,7 +100,8 @@ def maybe_require(pkg): ]) linting = set([ - 'flake8==2.6.2', + 'flake8==2.6.2; python_version < "2.7"', + 'flake8; python_version >= "2.7"' ]) optional = set([ From cb4a414d658274f5f2fab3e3e6099403f27c3b2c Mon Sep 17 00:00:00 2001 From: Ryan Blakley Date: Wed, 22 Sep 2021 15:44:52 -0400 Subject: [PATCH 555/892] [master] Update requires in core rpm spec for el7 (#3228) * Update the requires entries in the rpm spec file to allow building it on el7. Since a lot of the packages start with python36 on el7. * Added the missing CacheControl and pyyaml requirements. Signed-off-by: Ryan Blakley --- insights-core.spec | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/insights-core.spec b/insights-core.spec index 1b0300bad..35be779a6 100644 --- a/insights-core.spec +++ b/insights-core.spec @@ -12,13 +12,27 @@ BuildRequires: python3-devel BuildRequires: python3-setuptools Requires: python3 +Requires: python3-redis + +%if 0%{?rhel} == 7 +Requires: python36-CacheControl +Requires: python36-colorama +Requires: python36-defusedxml +Requires: python36-jinja2 +Requires: python36-lockfile +Requires: python36-PyYAML +Requires: python36-requests +Requires: python36-six +%else +Requires: python3-CacheControl Requires: python3-colorama Requires: python3-defusedxml -Requires: python3-lockfile Requires: python3-jinja2 -Requires: python3-redis +Requires: python3-lockfile +Requires: python3-pyyaml Requires: python3-requests Requires: python3-six +%endif %description Insights Core is a data collection and analysis framework. From abb94b6325cf5ba5ddb3266aeac4ff037526283a Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Wed, 22 Sep 2021 17:01:48 -0400 Subject: [PATCH 556/892] preserve alignment in netstat -neopa output in obfuscation (#3231) * preserve alignment in netstat -neopa output in obfuscation Signed-off-by: Jeremy Crafts --- insights/contrib/soscleaner.py | 67 +++++++++++++++++++++++++++++++--- 1 file changed, 61 insertions(+), 6 deletions(-) diff --git a/insights/contrib/soscleaner.py b/insights/contrib/soscleaner.py index 6be99bbec..62ad3dc4f 100644 --- a/insights/contrib/soscleaner.py +++ b/insights/contrib/soscleaner.py @@ -203,14 +203,66 @@ def _sub_ip(self, line): ips = [each[0] for each in re.findall(pattern, line)] if len(ips) > 0: for ip in ips: - new_ip = self._ip2db(ip) - self.logger.debug("Obfuscating IP - %s > %s", ip, new_ip) - line = line.replace(ip, new_ip) + # skip loopback (https://github.com/RedHatInsights/insights-core/issues/3230#issuecomment-924859845) + if ip != "127.0.0.1": + new_ip = self._ip2db(ip) + self.logger.debug("Obfuscating IP - %s > %s", ip, new_ip) + line = line.replace(ip, new_ip) return line except Exception as e: # pragma: no cover self.logger.exception(e) raise Exception('SubIPError: Unable to Substitute IP Address - %s', ip) + def _sub_ip_netstat(self, line): + ''' + Special version of _sub_ip for netstat to preserve spacing + ''' + try: + pattern = r"(((\b25[0-5]|\b2[0-4][0-9]|\b1[0-9][0-9]|\b[1-9][0-9]|\b[1-9]))(\.(\b25[0-5]|\b2[0-4][0-9]|\b1[0-9][0-9]|\b[1-9][0-9]|\b[0-9])){3})" + ips = [each[0] for each in re.findall(pattern, line)] + if len(ips) > 0: + for ip in ips: + # skip loopback (https://github.com/RedHatInsights/insights-core/issues/3230#issuecomment-924859845) + if ip != "127.0.0.1": + ip_len = len(ip) + new_ip = self._ip2db(ip) + new_ip_len = len(new_ip) + + self.logger.debug("Obfuscating IP - %s > %s", ip, new_ip) + + # pad or remove spaces to allow for the new length + if ip_len > new_ip_len: + numspaces = ip_len - new_ip_len + line = line.replace(ip, new_ip) + + # shift past port specification to add spaces + idx = line.index(new_ip) + len(new_ip) + c = line[idx] + while c != " ": + idx += 1 + c = line[idx] + line = line[0:idx] + numspaces * " " + line[idx:] + + elif new_ip_len > ip_len: + numspaces = new_ip_len - ip_len + line = line.replace(ip, new_ip) + + # shift past port specification to skip spaces + idx = line.index(new_ip) + len(new_ip) + c = line[idx] + while c != " ": + idx += 1 + c = line[idx] + line = line[0:idx] + line[(idx+numspaces):] + + else: + line = line.replace(ip, new_ip) + return line + except Exception as e: # pragma: no cover + self.logger.exception(e) + raise Exception('SubIPError: Unable to Substitute IP Address - %s', ip) + + def _get_disclaimer(self): # pragma: no cover #prints a disclaimer that this isn't an excuse for manual or any other sort of data verification @@ -572,10 +624,13 @@ def _file_list(self, folder): self.file_count = len(rtn) #a count of the files we'll have in the final cleaned sosreport, for reporting return rtn - def _clean_line(self, l): + def _clean_line(self, l, f=None): '''this will return a line with obfuscations for all possible variables, hostname, ip, etc.''' - new_line = self._sub_ip(l) # IP substitution + if f and f.endswith("netstat_-neopa"): + new_line = self._sub_ip_netstat(l) # IP substitution + else: + new_line = self._sub_ip(l) # IP substitution new_line = self._sub_hostname(new_line) # Hostname substitution new_line = self._sub_keywords(new_line) # Keyword Substitution @@ -592,7 +647,7 @@ def _clean_file(self, f): fh.close() if len(data) > 0: #if the file isn't empty: for l in data: - new_l = self._clean_line(l) + new_l = self._clean_line(l, f) if six.PY3: tmp_file.write(new_l.encode('utf-8')) else: From 10e5c2c06eacd409e35b8cc94fe30e2acf400f3b Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 23 Sep 2021 21:42:17 +0800 Subject: [PATCH 557/892] Fix bug about some httpd directives may have empty string as attribute (#3218) * Fix bug about some httpd directives may have empty string as attribute Signed-off-by: Huanhuan Li * Move EmptyAttr to httpd_conf.py Signed-off-by: Huanhuan Li --- insights/combiners/httpd_conf.py | 3 ++- .../combiners/tests/test_httpd_conf_tree.py | 17 +++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/insights/combiners/httpd_conf.py b/insights/combiners/httpd_conf.py index 2ec8bef55..0ff9d7b3f 100644 --- a/insights/combiners/httpd_conf.py +++ b/insights/combiners/httpd_conf.py @@ -290,7 +290,8 @@ def __init__(self, ctx): OpAttr = (Literal("!=") | Literal("<=") | Literal(">=") | InSet("<>")) & WSChar BareAttr = String(set(string.printable) - (set(string.whitespace) | set("<>'\""))) - Attr = AttrStart >> (Num | QuotedString | OpAttr | BareAttr) << AttrEnd + EmptyAttr = String('"\'', min_length=2) + Attr = AttrStart >> (Num | QuotedString | OpAttr | BareAttr | EmptyAttr) << AttrEnd Attrs = Many(Attr) StartTag = (WS + LT) >> (StartName + Attrs) << (GT + WS) diff --git a/insights/combiners/tests/test_httpd_conf_tree.py b/insights/combiners/tests/test_httpd_conf_tree.py index 6858bb46c..ecaee6a58 100644 --- a/insights/combiners/tests/test_httpd_conf_tree.py +++ b/insights/combiners/tests/test_httpd_conf_tree.py @@ -488,6 +488,15 @@ """ +HTTPD_EMPTY_ATTR = """ + + RequestHeader set X_FORWARDED_PROTO "http" + RequestHeader set SSL_CLIENT_S_DN "" + RequestHeader set SSL_CLIENT_CERT "" + RequestHeader set SSL_CLIENT_VERIFY "" + +""".strip() + def test_mixed_case_tags(): httpd = _HttpdConf(context_wrap(HTTPD_CONF_MIXED, path='/etc/httpd/conf/httpd.conf')) @@ -795,3 +804,11 @@ def test_mixed_name(): httpd1 = _HttpdConf(context_wrap(HTTPD_CONF_MIXED_NAME, path='/etc/httpd/conf/httpd.conf')) result = HttpdConfTree([httpd1]) assert len(result.doc["H2Push"]) == 1 + + +def test_empty_attr(): + httpd1 = _HttpdConf(context_wrap(HTTPD_EMPTY_ATTR, path='/etc/httpd/conf/httpd.conf')) + result = HttpdConfTree([httpd1]) + assert len(result['VirtualHost']['RequestHeader']) == 4 + assert result['VirtualHost']['RequestHeader'][0].value == 'set X_FORWARDED_PROTO http' + assert result['VirtualHost']['RequestHeader'][-1].value == 'set SSL_CLIENT_VERIFY ""' From d5184ab055614162b8f7241e2d9ed9bed84a7b06 Mon Sep 17 00:00:00 2001 From: Aditi Puntambekar Date: Thu, 23 Sep 2021 19:19:06 +0530 Subject: [PATCH 558/892] Add config.ros parser (#3197) * Add config.ros parser Signed-off-by: Aditi Puntambekar * Modified parser based on suggestions Signed-off-by: Aditi Puntambekar * Update parsing logic for [access] section Signed-off-by: Aditi Puntambekar * Update ros_config tests Signed-off-by: Aditi Puntambekar * Add @parser decorator Signed-off-by: Aditi Puntambekar * Update parse_content for better access Signed-off-by: Aditi Puntambekar * Update docstring Signed-off-by: Aditi Puntambekar * Add test for examples doc Signed-off-by: Aditi Puntambekar --- docs/shared_parsers_catalog/ros_config.rst | 3 + insights/parsers/ros_config.py | 149 +++++++++++++++++++++ insights/parsers/tests/test_ros_config.py | 69 ++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 223 insertions(+) create mode 100644 docs/shared_parsers_catalog/ros_config.rst create mode 100644 insights/parsers/ros_config.py create mode 100644 insights/parsers/tests/test_ros_config.py diff --git a/docs/shared_parsers_catalog/ros_config.rst b/docs/shared_parsers_catalog/ros_config.rst new file mode 100644 index 000000000..b841e6a26 --- /dev/null +++ b/docs/shared_parsers_catalog/ros_config.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ros_config + :members: + :show-inheritance: diff --git a/insights/parsers/ros_config.py b/insights/parsers/ros_config.py new file mode 100644 index 000000000..f29f63294 --- /dev/null +++ b/insights/parsers/ros_config.py @@ -0,0 +1,149 @@ +""" +RosConfig - file ``/var/lib/pcp/config/pmlogger/config.ros`` +============================================================ +This class provides parsing for the files: + ``/var/lib/pcp/config/pmlogger/config.ros`` +""" + +from insights import parser, Parser +from insights.specs import Specs +from insights.parsr import EOF, EOL, Char, Literal, Many, OneLineComment, Opt, QuotedString, String, WSChar +import string + + +# https://man7.org/linux/man-pages/man1/pmlogger.1.html#CONFIGURATION_FILE_SYNTAX + +WS = Many(WSChar | EOL | OneLineComment("#")) + +Log = WS >> Literal("log") << WS +MandatoryOn = WS >> Literal("mandatory on") << WS +MandatoryOff = WS >> Literal("mandatory off") << WS +MandatoryMaybe = WS >> Literal("mandatory maybe") << WS +AdvisoryOn = WS >> Literal("advisory on") << WS +AdvisoryOff = WS >> Literal("advisory off") << WS + +Once = WS >> Literal("once") << WS +Default = WS >> Literal("default") << WS +Every = WS >> Literal("every") << WS +UnsignedInt = String(string.digits).map(int) +TimeUnits = WS >> String(string.ascii_letters) << WS +Freq = Opt(Every) >> (UnsignedInt + TimeUnits) + +Interval = Once | Default | Freq +OnStates = MandatoryOn | AdvisoryOn +OtherStates = MandatoryMaybe | MandatoryOff | AdvisoryOff + +Preamble = Opt(Log) >> ((OnStates + Interval) | OtherStates) + +LeftBrace = WS >> Char("{") << WS +RightBrace = WS >> Char("}") << WS +Comma = WS >> Char(",") << WS + +Name = WS >> String(string.ascii_letters + string.digits + "-._") << WS + +LeftBracket = WS >> Char('[') << WS +RightBracket = WS >> Char(']') << WS +InstanceName = QuotedString | UnsignedInt | Name +InstanceNames = LeftBracket >> InstanceName.sep_by(Comma | WS) << RightBracket +MetricSpec = Name + Opt(InstanceNames, default=[]) + +OneMetricSpec = MetricSpec.map(lambda s: [s]) +MultipleMetricSpecs = LeftBrace >> MetricSpec.sep_by(Comma | WS) << RightBrace +MetricSpecs = (OneMetricSpec | MultipleMetricSpecs).map(dict) + +LogSpec = Preamble + MetricSpecs + +LogSpecs = Many(LogSpec) + +AccessHeader = WS >> Literal("[access]") << WS +Allow = WS >> Literal("allow") << WS +Disallow = WS >> Literal("disallow") << WS +AllowDisallow = Allow | Disallow + + +EnquireOp = WS >> Literal("enquire") << WS +AdvisoryOp = WS >> Literal("advisory") << WS +MandatoryOp = WS >> Literal("mandatory") << WS +AllExceptOp = WS >> Literal("all except") << WS +AllOp = WS >> Literal("all") << WS + +Colon = WS >> Literal(":") << WS + +Host = String(string.ascii_letters + string.digits + ".*:\"") +HostList = WS >> Host.sep_by(Comma) << WS + +Operation = EnquireOp | AdvisoryOp | MandatoryOp | AllExceptOp | AllOp +OperationList = WS >> Operation.sep_by(Comma | WS) << WS + +Semicolon = WS >> Char(";") << WS + +AccessRule = AllowDisallow + HostList + Colon + OperationList << Semicolon +AccessRules = Many(AccessRule) +AccessControl = AccessHeader + AccessRules +Doc = LogSpecs + Opt(AccessControl) +parse = Doc << EOF + + +@parser(Specs.ros_config) +class RosConfig(Parser): + """ + Sample input data is in the format:: + + log mandatory on default { + mem.util.used + mem.physmem + kernel.all.cpu.user + kernel.all.cpu.sys + kernel.all.cpu.nice + kernel.all.cpu.steal + kernel.all.cpu.idle + kernel.all.cpu.wait.total + disk.all.total + mem.util.cached + mem.util.bufmem + mem.util.free + } + [access] + disallow .* : all; + disallow :* : all; + allow local:* : enquire; + + Examples: + >>> type(ros_input) + + >>> ros_input.rules[0]['allow_disallow'] + 'disallow' + >>> ros_input.rules[0]['hostlist'] + ['.*'] + >>> ros_input.rules[0]['operationlist'] + ['all'] + >>> ros_input.specs[0].get('state') + 'mandatory on' + >>> ros_input.specs[0].get('metrics')['mem.util.used'] + [] + >>> ros_input.specs[0].get('metrics')['kernel.all.cpu.user'] + [] + >>> ros_input.specs[0].get('logging_interval') + 'default' + + Attributes: + data(list): All parsed options and log files are stored in this + list. + specs(list of dicts): List of the ROS specifications present in + config.ros file. + rules(list of dicts): List of access control rules applied for + config.ros file. + + """ + def parse_content(self, content): + self.data = parse("\n".join(content)) + self.specs = [] + specifications = self.data[0] + for spec in specifications: + state = spec[0][0] + logging_interval = spec[0][1] if state.endswith('on') else None + self.specs.append({'state': state, 'logging_interval': logging_interval, 'metrics': spec[1]}) + access_rules = self.data[1][1] + self.rules = [] + for rule in access_rules: + self.rules.append({'allow_disallow': rule[0], 'hostlist': rule[1], 'operationlist': rule[3]}) diff --git a/insights/parsers/tests/test_ros_config.py b/insights/parsers/tests/test_ros_config.py new file mode 100644 index 000000000..e3c41a37f --- /dev/null +++ b/insights/parsers/tests/test_ros_config.py @@ -0,0 +1,69 @@ +from insights.parsers import ros_config +from insights.tests import context_wrap +import doctest + + +ROS_CONFIG_INPUT = """ +log mandatory on default { + mem.util.used + mem.physmem + kernel.all.cpu.user + kernel.all.cpu.sys + kernel.all.cpu.nice + kernel.all.cpu.steal + kernel.all.cpu.idle + kernel.all.cpu.wait.total + disk.all.total + mem.util.cached + mem.util.bufmem + mem.util.free +} +[access] +disallow .* : all; +disallow :* : all; +allow local:* : enquire; +""" + + +def test_ros_config(): + ros_input = ros_config.RosConfig(context_wrap(ROS_CONFIG_INPUT)) + assert ros_input.data is not None + assert ros_input.specs[0].get('state') == 'mandatory on' + assert ros_input.specs[0].get('metrics') == {'mem.util.used': [], + 'mem.physmem': [], + 'kernel.all.cpu.user': [], + 'kernel.all.cpu.sys': [], + 'kernel.all.cpu.nice': [], + 'kernel.all.cpu.steal': [], + 'kernel.all.cpu.idle': [], + 'kernel.all.cpu.wait.total': [], + 'disk.all.total': [], + 'mem.util.cached': [], + 'mem.util.bufmem': [], + 'mem.util.free': []} + assert ros_input.specs[0].get('logging_interval') == 'default' + assert ros_input.rules == [ + { + 'allow_disallow': 'disallow', + 'hostlist': ['.*'], + 'operationlist': ['all'] + }, + { + 'allow_disallow': 'disallow', + 'hostlist': [':*'], + 'operationlist': ['all'] + }, + { + 'allow_disallow': 'allow', + 'hostlist': ['local:*'], + 'operationlist': ['enquire'] + } + ] + + +def test_ros_config_documentation(): + env = { + 'ros_input': ros_config.RosConfig(context_wrap(ROS_CONFIG_INPUT, path='/var/lib/pcp/config/pmlogger/config.ros')), + } + failed, total = doctest.testmod(ros_config, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 59eea896d..90222509c 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -558,6 +558,7 @@ class Specs(SpecSet): rhsm_releasever = RegistryPoint() rndc_status = RegistryPoint() root_crontab = RegistryPoint() + ros_config = RegistryPoint() route = RegistryPoint() rpm_ostree_status = RegistryPoint() rpm_V_packages = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index f29e061d9..787105ea1 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -591,6 +591,7 @@ def pmlog_summary_file(broker): rhsm_log = simple_file("/var/log/rhsm/rhsm.log") rhsm_releasever = simple_file('/var/lib/rhsm/cache/releasever.json') rndc_status = simple_command("/usr/sbin/rndc status") + ros_config = simple_file("/var/lib/pcp/config/pmlogger/config.ros") rpm_ostree_status = simple_command("/usr/bin/rpm-ostree status --json") rpm_V_packages = simple_command("/bin/rpm -V coreutils procps procps-ng shadow-utils passwd sudo chrony", keep_rc=True, signum=signal.SIGTERM) rsyslog_conf = glob_file(["/etc/rsyslog.conf", "/etc/rsyslog.d/*.conf"]) From 8855f41b1bcc779f7c623deab1498f755d04b176 Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Wed, 29 Sep 2021 12:44:55 -0500 Subject: [PATCH 559/892] Add combiner for ansible information (#3232) * Add new combiner to collect ansible info for fingerprinting an ansible system * Add documentation and tests Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- .../shared_combiners_catalog/ansible_info.rst | 3 + insights/combiners/ansible_info.py | 98 +++++++++++++++++++ insights/combiners/tests/test_ansible_info.py | 74 ++++++++++++++ 3 files changed, 175 insertions(+) create mode 100644 docs/shared_combiners_catalog/ansible_info.rst create mode 100644 insights/combiners/ansible_info.py create mode 100644 insights/combiners/tests/test_ansible_info.py diff --git a/docs/shared_combiners_catalog/ansible_info.rst b/docs/shared_combiners_catalog/ansible_info.rst new file mode 100644 index 000000000..061a1b24e --- /dev/null +++ b/docs/shared_combiners_catalog/ansible_info.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.combiners.ansible_info + :members: + :show-inheritance: diff --git a/insights/combiners/ansible_info.py b/insights/combiners/ansible_info.py new file mode 100644 index 000000000..256a3532f --- /dev/null +++ b/insights/combiners/ansible_info.py @@ -0,0 +1,98 @@ +""" +Ansible Info +============ +Provide information about the Ansible packages installed on a system. +""" +from insights.core.plugins import combiner +from insights.parsers.installed_rpms import InstalledRpms + +ANSIBLE_TOWER_PKG = "ansible-tower" +ANSIBLE_AUTOMATION_HUB_PKG = "automation-hub" +ANSIBLE_CATALOG_WORKER_PKG = "catalog-worker" +ANSIBLE_AUTOMATION_CONTROLLER_PKG = "automation-controller" +ANSIBLE_PACKAGES = [ + ANSIBLE_TOWER_PKG, + ANSIBLE_AUTOMATION_HUB_PKG, + ANSIBLE_CATALOG_WORKER_PKG, + ANSIBLE_AUTOMATION_CONTROLLER_PKG, +] + + +@combiner(InstalledRpms) +class AnsibleInfo(dict): + """ + Provides information related to Ansible based on the RPMs installed. + + Provides properties to determine the Ansible specific system characteristics. The + base class of the combiner is ``dict`` with dictionary keys being the Ansible + package names, and data values being + :py:class:`insights.parsers.installed_rpms.InstalledRpm` objects. + See the :py:class:`insights.parsers.installed_rpms.InstalledRpm` + class for more information on object methods and values. + + Properties are provided to aid in fingerprinting of the system. + + Examples: + >>> type(info) + + >>> info.is_tower + True + >>> info.tower_version + '1.0.0' + >>> info.is_controller + True + >>> info.controller_version + '1.0.0' + """ + def __init__(self, rpms): + pkg_versions = dict([(pkg, rpms.get_max(pkg)) for pkg in ANSIBLE_PACKAGES if rpms.get_max(pkg) is not None]) + self.update(pkg_versions) + + @property + def is_tower(self): + """ bool: Whether or not this system has ``ansible-tower`` installed """ + return ANSIBLE_TOWER_PKG in self + + @property + def tower_version(self): + """ str: Version of ansible-tower installed or ``None``""" + return self[ANSIBLE_TOWER_PKG].version if ANSIBLE_TOWER_PKG in self else None + + @property + def is_controller(self): + """ + bool: Whether or not this system has ``ansible-tower`` or + ``automation-controller`` installed + """ + return ANSIBLE_TOWER_PKG in self or ANSIBLE_AUTOMATION_CONTROLLER_PKG in self + + @property + def controller_version(self): + """ + str: Version of ansible-tower installed, or if it's not installed + the version of automation-controller installed or ``None`` + """ + if ANSIBLE_TOWER_PKG in self: + return self[ANSIBLE_TOWER_PKG].version + elif ANSIBLE_AUTOMATION_CONTROLLER_PKG in self: + return self[ANSIBLE_AUTOMATION_CONTROLLER_PKG].version + + @property + def is_hub(self): + """ bool: Whether or not this system has ``automation-hub`` installed """ + return ANSIBLE_AUTOMATION_HUB_PKG in self + + @property + def hub_version(self): + """ str: Version of automation-hub installed or ``None``""" + return self[ANSIBLE_AUTOMATION_HUB_PKG].version if ANSIBLE_AUTOMATION_HUB_PKG in self else None + + @property + def is_catalog_worker(self): + """ bool: Whether or not this system has ``catalog-worker`` installed """ + return ANSIBLE_CATALOG_WORKER_PKG in self + + @property + def catalog_worker_version(self): + """ str: Version of catalog-worker installed or ``None``""" + return self[ANSIBLE_CATALOG_WORKER_PKG].version if ANSIBLE_CATALOG_WORKER_PKG in self else None diff --git a/insights/combiners/tests/test_ansible_info.py b/insights/combiners/tests/test_ansible_info.py new file mode 100644 index 000000000..0faf26d1d --- /dev/null +++ b/insights/combiners/tests/test_ansible_info.py @@ -0,0 +1,74 @@ +from insights.parsers.installed_rpms import InstalledRpms +from insights.combiners import ansible_info +from insights.combiners.ansible_info import ( + AnsibleInfo, ANSIBLE_AUTOMATION_CONTROLLER_PKG, ANSIBLE_CATALOG_WORKER_PKG, ANSIBLE_TOWER_PKG, + ANSIBLE_AUTOMATION_HUB_PKG) +from insights.tests import context_wrap +import doctest + +TOWER_RPM = ANSIBLE_TOWER_PKG + "-1.0.0-1" +AUTO_CONTROLLER_RPM = ANSIBLE_AUTOMATION_CONTROLLER_PKG + "-1.0.1-1" +CATALOG_WORKER_RPM = ANSIBLE_CATALOG_WORKER_PKG + "-1.0.2-1" +HUB_RPM = ANSIBLE_AUTOMATION_HUB_PKG + "-1.0.3-1" +ALL_RPMS = ''' +{controller} +{cworker} +{tower} +{hub} +'''.format( + controller=AUTO_CONTROLLER_RPM, + cworker=CATALOG_WORKER_RPM, + tower=TOWER_RPM, + hub=HUB_RPM).strip() + + +def test_ansible_info_all(): + rpms = InstalledRpms(context_wrap(ALL_RPMS)) + comb = AnsibleInfo(rpms) + assert comb is not None + assert comb.is_tower + assert comb.tower_version == '1.0.0' + assert comb[ANSIBLE_TOWER_PKG].nvr == TOWER_RPM + assert comb[ANSIBLE_AUTOMATION_CONTROLLER_PKG].nvr == AUTO_CONTROLLER_RPM + assert comb.is_controller + assert comb.controller_version == '1.0.0' + assert comb.is_hub + assert comb.hub_version == '1.0.3' + assert comb[ANSIBLE_AUTOMATION_HUB_PKG].nvr == HUB_RPM + assert comb.is_catalog_worker + assert comb.catalog_worker_version == '1.0.2' + assert comb[ANSIBLE_CATALOG_WORKER_PKG].nvr == CATALOG_WORKER_RPM + + +def test_ansible_info_tower(): + rpms = InstalledRpms(context_wrap(TOWER_RPM)) + comb = AnsibleInfo(rpms) + assert comb.is_tower + assert comb.is_controller + assert not comb.is_hub + assert not comb.is_catalog_worker + assert comb.tower_version == '1.0.0' + assert comb.controller_version == '1.0.0' + assert comb.hub_version is None + assert comb.catalog_worker_version is None + + +def test_ansible_info_auto_controller(): + rpms = InstalledRpms(context_wrap(AUTO_CONTROLLER_RPM)) + comb = AnsibleInfo(rpms) + assert not comb.is_tower + assert comb.is_controller + assert not comb.is_hub + assert not comb.is_catalog_worker + assert comb.tower_version is None + assert comb.controller_version == '1.0.1' + assert comb.hub_version is None + assert comb.catalog_worker_version is None + + +def test_ansible_info_docs(): + rpms = InstalledRpms(context_wrap(TOWER_RPM)) + comb = AnsibleInfo(rpms) + env = {'info': comb} + failed, total = doctest.testmod(ansible_info, globs=env) + assert failed == 0 From 35df5b51904dfe79b3ee91f9f93b60f0eaba4852 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20Hornick=C3=BD?= Date: Wed, 29 Sep 2021 19:50:42 +0200 Subject: [PATCH 560/892] Add yum_updates to documentation (#3225) Signed-off-by: mhornick --- docs/custom_datasources_index.rst | 8 +++++++ insights/specs/datasources/yum_updates.py | 28 ++++++++++++++++++++++- 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/docs/custom_datasources_index.rst b/docs/custom_datasources_index.rst index 737e2cf53..027a6ed07 100644 --- a/docs/custom_datasources_index.rst +++ b/docs/custom_datasources_index.rst @@ -74,3 +74,11 @@ insights.specs.datasources.satellite_missed_queues :members: satellite_missed_pulp_agent_queues, LocalSpecs :show-inheritance: :undoc-members: + +insights.specs.datasources.yum_updates +-------------------------------------- + +.. automodule:: insights.specs.datasources.yum_updates + :members: yum_updates + :show-inheritance: + :undoc-members: diff --git a/insights/specs/datasources/yum_updates.py b/insights/specs/datasources/yum_updates.py index 19ca6e302..4283dc9a9 100644 --- a/insights/specs/datasources/yum_updates.py +++ b/insights/specs/datasources/yum_updates.py @@ -70,7 +70,7 @@ def installed_packages(self): def updates(self, pkg): nevra = pkg.nevra updates_list = [] - for upg in self.updict[pkg.na]: + for upg in self.updict.get(pkg.na, []): if upg.verGT(pkg): updates_list.append(upg) return nevra, updates_list @@ -100,6 +100,32 @@ def yum_updates(_broker): This datasource provides a list of available updates on the system. It uses the yum python library installed locally, and collects list of available package updates, along with advisory info where applicable. + + Sample data returned:: + + { + "releasever": "8", + "basearch": "x86_64", + "update_list": { + "NetworkManager-1:1.22.8-4.el8.x86_64": { + "available_updates": [ + { + "package": "NetworkManager-1:1.22.8-5.el8_2.x86_64", + "repository": "rhel-8-for-x86_64-baseos-rpms", + "basearch": "x86_64", + "releasever": "8", + "erratum": "RHSA-2020:3011" + } + ] + } + }, + "metadata_time": "2021-01-01T09:39:45Z" + } + + Returns: + list: List of available updates + Raises: + SkipComponent: Raised on systems different than RHEL 7 """ if not _broker.get(IsRhel7): From 74f88dd84ae303563db9fcdb8e67c42673911ea6 Mon Sep 17 00:00:00 2001 From: gkamathe <73747618+gkamathe@users.noreply.github.com> Date: Fri, 1 Oct 2021 05:38:26 +0530 Subject: [PATCH 561/892] Fixing broken sosreport link (#3243) Signed-off-by: Gaurav Kamathe --- docs/api.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/api.rst b/docs/api.rst index 814aa866d..e32cb9a7a 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -14,7 +14,7 @@ SOSReports A SOSReport_ is a command-line tool for Red Hat Enterprise Linux (and other systems) to collect configuration and diagnostic information from the system. -.. _SOSReport: https://github.com/sos/sosreport +.. _SOSReport: https://github.com/sosreport/sos Insights Archives ----------------- From a8e64de161395fd8e299a837ae4e315ae82d7ce1 Mon Sep 17 00:00:00 2001 From: Rahul Srivastava <44598880+rasrivas-redhat@users.noreply.github.com> Date: Tue, 5 Oct 2021 01:03:26 +0530 Subject: [PATCH 562/892] Enhance awx_manage parser (#3242) Signed-off-by: Rahul --- insights/parsers/awx_manage.py | 9 ++++++++- insights/parsers/tests/test_awx_manage.py | 5 ++++- insights/specs/default.py | 2 +- insights/specs/insights_archive.py | 2 +- 4 files changed, 14 insertions(+), 4 deletions(-) diff --git a/insights/parsers/awx_manage.py b/insights/parsers/awx_manage.py index 5a192c5ce..267068c09 100644 --- a/insights/parsers/awx_manage.py +++ b/insights/parsers/awx_manage.py @@ -75,7 +75,10 @@ class AwxManagePrintSettings(CommandParser, JSONParser): "INSIGHTS_TRACKING_STATE": true, "INSTALL_UUID": "c0d38a6a-4449-4e13-a64b-00e0248ad229", "SYSTEM_UUID": "eecfd8dc-5028-46ef-9868-86f7d595da13", - "TOWER_URL_BASE": "https://10.72.37.79" + "TOWER_URL_BASE": "https://10.72.37.79", + "LOG_AGGREGATOR_ENABLED": true, + "LOG_AGGREGATOR_LEVEL": "DEBUG" + } Examples: @@ -85,5 +88,9 @@ class AwxManagePrintSettings(CommandParser, JSONParser): False >>> settings['SYSTEM_UUID'] == 'eecfd8dc-5028-46ef-9868-86f7d595da13' True + >>> settings['LOG_AGGREGATOR_ENABLED'] + True + >>> settings['LOG_AGGREGATOR_LEVEL'] == 'DEBUG' + True """ pass diff --git a/insights/parsers/tests/test_awx_manage.py b/insights/parsers/tests/test_awx_manage.py index e9f5dbc05..113962115 100644 --- a/insights/parsers/tests/test_awx_manage.py +++ b/insights/parsers/tests/test_awx_manage.py @@ -38,7 +38,9 @@ "INSIGHTS_TRACKING_STATE": true, "INSTALL_UUID": "c0d38a6a-4449-4e13-a64b-00e0248ad229", "SYSTEM_UUID": "eecfd8dc-5028-46ef-9868-86f7d595da13", - "TOWER_URL_BASE": "https://10.72.37.79" + "TOWER_URL_BASE": "https://10.72.37.79", + "LOG_AGGREGATOR_ENABLED": true, + "LOG_AGGREGATOR_LEVEL": "DEBUG" } '''.strip() @@ -87,6 +89,7 @@ def test_awx_manage_print_settings(): assert not settings['AWX_CLEANUP_PATHS'] assert settings['INSIGHTS_TRACKING_STATE'] assert settings['SYSTEM_UUID'] == "eecfd8dc-5028-46ef-9868-86f7d595da13" + assert settings['LOG_AGGREGATOR_LEVEL'] == "DEBUG" def test_awx_manage_doc_examples(): diff --git a/insights/specs/default.py b/insights/specs/default.py index 787105ea1..99714f0ca 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -90,7 +90,7 @@ class DefaultSpecs(Specs): aws_instance_id_pkcs7 = simple_command("/usr/bin/curl -s http://169.254.169.254/latest/dynamic/instance-identity/pkcs7 --connect-timeout 5", deps=[IsAWS]) awx_manage_check_license = simple_command("/usr/bin/awx-manage check_license") awx_manage_check_license_data = awx_manage.awx_manage_check_license_data_datasource - awx_manage_print_settings = simple_command("/usr/bin/awx-manage print_settings INSIGHTS_TRACKING_STATE SYSTEM_UUID INSTALL_UUID TOWER_URL_BASE AWX_CLEANUP_PATHS AWX_PROOT_BASE_PATH --format json") + awx_manage_print_settings = simple_command("/usr/bin/awx-manage print_settings INSIGHTS_TRACKING_STATE SYSTEM_UUID INSTALL_UUID TOWER_URL_BASE AWX_CLEANUP_PATHS AWX_PROOT_BASE_PATH LOG_AGGREGATOR_ENABLED LOG_AGGREGATOR_LEVEL --format json") azure_instance_type = simple_command("/usr/bin/curl -s -H Metadata:true http://169.254.169.254/metadata/instance/compute/vmSize?api-version=2018-10-01&format=text --connect-timeout 5", deps=[IsAzure]) azure_instance_plan = simple_command("/usr/bin/curl -s -H Metadata:true http://169.254.169.254/metadata/instance/compute/plan?api-version=2018-10-01&format=json --connect-timeout 5", deps=[IsAzure]) bios_uuid = simple_command("/usr/sbin/dmidecode -s system-uuid") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 8b4c09bed..b8d25f7b8 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -18,7 +18,7 @@ class InsightsArchiveSpecs(Specs): aws_instance_id_doc = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_aws_instance_id_doc") aws_instance_id_pkcs7 = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_aws_instance_id_pkcs7") awx_manage_check_license = simple_file("insights_commands/awx-manage_check_license") - awx_manage_print_settings = simple_file("insights_commands/awx-manage_print_settings_INSIGHTS_TRACKING_STATE_SYSTEM_UUID_INSTALL_UUID_TOWER_URL_BASE_AWX_CLEANUP_PATHS_AWX_PROOT_BASE_PATH_--format_json") + awx_manage_print_settings = simple_file("insights_commands/awx-manage_print_settings_INSIGHTS_TRACKING_STATE_SYSTEM_UUID_INSTALL_UUID_TOWER_URL_BASE_AWX_CLEANUP_PATHS_AWX_PROOT_BASE_PATH_LOG_AGGREGATOR_ENABLED_LOG_AGGREGATOR_LEVEL_--format_json") azure_instance_type = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_azure_instance_type") azure_instance_plan = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_azure_instance_plan") bios_uuid = simple_file("insights_commands/dmidecode_-s_system-uuid") From a0f476a1e8cde55f6fdc5fc7759f178434e55c2a Mon Sep 17 00:00:00 2001 From: Ryan Blakley Date: Mon, 4 Oct 2021 15:43:34 -0400 Subject: [PATCH 563/892] Replace assert with parse exception in netstat parser (#3238) * The netstat parser previously used an assert to make sure self.name was in the NETSTAT_SECTION_ID dictionary. Instead use an if statement and raise a parser exception if not met. Signed-off-by: Ryan Blakley --- insights/parsers/netstat.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/insights/parsers/netstat.py b/insights/parsers/netstat.py index 9cc288d4d..6e52006f9 100644 --- a/insights/parsers/netstat.py +++ b/insights/parsers/netstat.py @@ -272,7 +272,9 @@ class NetstatSection(object): def __init__(self, name): self.name = name.strip() - assert self.name in NETSTAT_SECTION_ID + if self.name not in NETSTAT_SECTION_ID: + raise ParseException("The name '{name}' isn't a valid name.".format(name=self.name)) + self.meta = NETSTAT_SECTION_ID[self.name] self.data = {} for m in self.meta: From 042b5076e6123c6b2da4f29a7144c2a599f439f5 Mon Sep 17 00:00:00 2001 From: Ryan Blakley Date: Mon, 4 Oct 2021 15:47:12 -0400 Subject: [PATCH 564/892] Update the nfnetlink parser (#3239) * Replace the assert check with if statement and raise a parse exception if met. * Updated the parse to inherit from a list, and added the data property for backwards compatibility. * Moved the docstring from the top to within the class. * Updated the test to look for parse and skip exceptions instead of assertion errors. Signed-off-by: Ryan Blakley --- insights/parsers/nfnetlink_queue.py | 113 ++++++++++-------- .../parsers/tests/test_nfnetlink_queue.py | 29 +++-- 2 files changed, 82 insertions(+), 60 deletions(-) diff --git a/insights/parsers/nfnetlink_queue.py b/insights/parsers/nfnetlink_queue.py index db6d45b13..e913be26d 100644 --- a/insights/parsers/nfnetlink_queue.py +++ b/insights/parsers/nfnetlink_queue.py @@ -1,66 +1,75 @@ """ NfnetLinkQueue - file ``/proc/net/netfilter/nfnetlink_queue`` ============================================================= - -Reads the ``/proc/net/netfilter/nfnetlink_queue`` file and creates a list of -dictionaries, one dictionary per row in the file. - -The keys of the dictionary are (see -https://home.regit.org/netfilter-en/using-nfqueue-and-libnetfilter_queue/): - -- ``queue_number`` -- ``peer_portid``: good chance it is process ID of software listening to - the queue -- ``queue_total``: current number of packets waiting in the queue -- ``copy_mode``: 0 and 1 only message only provide meta data. If 2, the - message provides a part of packet of size copy range. -- ``copy_range``: length of packet data to put in message -- ``queue_dropped``: number of packets dropped because queue was full -- ``user_dropped``: number of packets dropped because netlink message - could not be sent to userspace. If this counter is not zero, try - to increase netlink buffer size. On the application side, you will - see gap in packet id if netlink message are lost. -- ``id_sequence``: packet id of last packet -- The last field is always '1' and is ignored. - -Example Input:: - - 0 -4423 0 2 65535 0 0 22 1 - 1 -4424 0 2 65535 0 0 27 1 - -Examples: - - >>> # Set up of the environment - ignore this bit: - >>> nfnetlink_queue_data = ''' - ... 0 -4423 0 2 65535 0 0 22 1 - ... 1 -4424 0 2 65535 0 0 27 1 - ... ''' - >>> from insights.tests import context_wrap - >>> from insights.parsers.nfnetlink_queue import NfnetLinkQueue - >>> nf = NfnetLinkQueue(context_wrap(nfnetlink_queue_data)) - - >>> # Usual usage in a rule, using the argument name `nf` - >>> 'copy_mode' in nf.data[0] - True - >>> nf.data[0]['copy_mode'] # Note: values as integers - 2 - """ -from .. import Parser, parser +from insights import parser, Parser +from insights.parsers import ParseException, SkipException from insights.specs import Specs @parser(Specs.nfnetlink_queue) -class NfnetLinkQueue(Parser): - """Reads the ``/proc/net/netfilter/nfnetlink_queue`` file and - creates a list of dictionaries, one dictionary per row in the file. +class NfnetLinkQueue(Parser, list): + """ + Reads the ``/proc/net/netfilter/nfnetlink_queue`` file and creates a list of + dictionaries, one dictionary per row in the file. + + The keys of the dictionary are (see + https://home.regit.org/netfilter-en/using-nfqueue-and-libnetfilter_queue/): + + - ``queue_number`` + - ``peer_portid``: good chance it is process ID of software listening to + the queue + - ``queue_total``: current number of packets waiting in the queue + - ``copy_mode``: 0 and 1 only message only provide meta data. If 2, the + message provides a part of packet of size copy range. + - ``copy_range``: length of packet data to put in message + - ``queue_dropped``: number of packets dropped because queue was full + - ``user_dropped``: number of packets dropped because netlink message + could not be sent to userspace. If this counter is not zero, try + to increase netlink buffer size. On the application side, you will + see gap in packet id if netlink message are lost. + - ``id_sequence``: packet id of last packet + - The last field is always '1' and is ignored. + + Example Input:: + + 0 -4423 0 2 65535 0 0 22 1 + 1 -4424 0 2 65535 0 0 27 1 + + Examples: + + >>> type(nf) + + >>> 'copy_mode' in nf.data[0] + True + >>> nf.data[0]['copy_mode'] + 2 """ def parse_content(self, content): - self.data = [] + if not content: + raise SkipException("Empty output.") + + data = [] fields = ['queue_number', 'peer_portid', 'queue_total', 'copy_mode', - 'copy_range', 'queue_dropped', 'user_dropped', 'id_sequence'] + 'copy_range', 'queue_dropped', 'user_dropped', 'id_sequence'] + for line in content: parts = [int(p.strip()) for p in line.split()] - assert len(parts) == 9 - self.data.append(dict(zip(fields, parts))) + + if len(parts) != 9: + raise ParseException("Can't parse content because there isn't 9 columns provided.") + + data.append(dict(zip(fields, parts))) + + if not data: + raise SkipException("No parsed data.") + + self.extend(data) + + @property + def data(self): + """ + Set data as property to keep compatibility + """ + return self diff --git a/insights/parsers/tests/test_nfnetlink_queue.py b/insights/parsers/tests/test_nfnetlink_queue.py index bafb8d7e9..9eaa397f3 100644 --- a/insights/parsers/tests/test_nfnetlink_queue.py +++ b/insights/parsers/tests/test_nfnetlink_queue.py @@ -1,13 +1,9 @@ +import doctest import pytest -from insights.parsers import nfnetlink_queue +from insights.parsers import nfnetlink_queue, ParseException +from insights.parsers.tests import skip_exception_check from insights.tests import context_wrap -import doctest - - -def test_nfnetlink_doc_examples(): - failed, total = doctest.testmod(nfnetlink_queue) - assert failed == 0 NFNETLINK_QUEUE = """ @@ -37,6 +33,19 @@ def test_nfnetlink_doc_examples(): 5 -4428 0 2 65535 0 0 16 1 """.strip() +NFNETLINK_DOC = """ +0 -4423 0 2 65535 0 0 22 1 +1 -4424 0 2 65535 0 0 27 1 +""".strip() + + +def test_nfnetlink_doc_examples(): + env = { + 'nf': nfnetlink_queue.NfnetLinkQueue(context_wrap(NFNETLINK_DOC)) + } + failed, total = doctest.testmod(nfnetlink_queue, globs=env) + assert failed == 0 + def test_parse_content(): nfnet_link_queue = nfnetlink_queue.NfnetLinkQueue(context_wrap(NFNETLINK_QUEUE)) @@ -62,10 +71,14 @@ def test_parse_content(): def test_missing_columns(): - with pytest.raises(AssertionError): + with pytest.raises(ParseException): nfnetlink_queue.NfnetLinkQueue(context_wrap(CORRUPT_NFNETLINK_QUEUE_1)) def test_wrong_type(): with pytest.raises(ValueError): nfnetlink_queue.NfnetLinkQueue(context_wrap(CORRUPT_NFNETLINK_QUEUE_2)) + + +def test_empty_content(): + skip_exception_check(nfnetlink_queue.NfnetLinkQueue) From b34bd70ba9e6640b6d15f5f4a4afcd0cee01a26f Mon Sep 17 00:00:00 2001 From: Ryan Blakley Date: Mon, 4 Oct 2021 16:00:58 -0400 Subject: [PATCH 565/892] Update mdstat parser to remove asserts (#3240) * Went through and replaced all asserts with if statements that raise parse exceptions if not met. This required several logic updates due to how the asserts were used. * Updated the docstrings of each function so they were uniform. * Updated the test to check for parse exceptions instead of assertion errors, also added a couple additional test. Signed-off-by: Ryan Blakley --- insights/parsers/mdstat.py | 385 +++++++++++++------------- insights/parsers/tests/test_mdstat.py | 54 +++- 2 files changed, 244 insertions(+), 195 deletions(-) diff --git a/insights/parsers/mdstat.py b/insights/parsers/mdstat.py index 7027e990f..caed2e5d9 100644 --- a/insights/parsers/mdstat.py +++ b/insights/parsers/mdstat.py @@ -1,106 +1,103 @@ """ Mdstat - file ``/proc/mdstat`` ============================== - -Represents the information in the ``/proc/mdstat`` file. Several -examples of possible data containe in the file can be found on the -`MDstat kernel.org wiki page `_. - -In particular, the discussion here will focus on initial extraction of information -form lines such as:: - - Personalities : [raid1] [raid6] [raid5] [raid4] - md1 : active raid1 sdb2[1] sda2[0] - 136448 blocks [2/2] [UU] - - md2 : active raid1 sdb3[1] sda3[0] - 129596288 blocks [2/2] [UU] - - md3 : active raid5 sdl1[9] sdk1[8] sdj1[7] sdi1[6] sdh1[5] sdg1[4] sdf1[3] sde1[2] sdd1[1] sdc1[0] - 1318680576 blocks level 5, 1024k chunk, algorithm 2 [10/10] [UUUUUUUUUU] - -The data contained in ``mdstat`` is represented with three top level members - -``personalities``, ``components`` and ``mds``. - -Examples: - - >>> mdstat = shared[Mdstat] - >>> mdstat.personalities - ['raid1', 'raid6', 'raid5', 'raid4'] - >>> len(mdstat.components) # The individual component devices - 14 - >>> mdstat.components[0]['device_name'] - 'md1' - >>> sdb2 = mdstat.components[0] - >>> sdb2['component_name'] - 'sdb2' - >>> sdb2['active'] - True - >>> sdb2['raid'] - 'raid1' - >>> sdb2['role'] - 1 - >>> sdb2['up'] - True - >>> sorted(mdstat.mds.keys()) # dictionary of MD devices by device name - ['md1', 'md2', 'md3'] - >>> mdstat.mds['md1']['active'] - True - >>> len(mdstat.mds['md1']['devices']) # list of devices in this MD - 2 - >>> mdstat.mds['md1']['devices'][0]['component_name'] # device information - 'sdb2' """ import re -from .. import parser, CommandParser + +from insights import parser, CommandParser +from insights.parsers import ParseException, SkipException from insights.specs import Specs @parser(Specs.mdstat) class Mdstat(CommandParser): """ - Represents the information in the ``/proc/mdstat`` file. - - Attributes - ---------- - - personalities : list - A list of RAID levels the kernel currently supports - - components : list of dicts - A list containing a dict of md component device information - Each of these dicts contains the following keys - - - ``device_name`` : string - name of the array device - - ``active`` : boolean - ``True`` if the array is active, ``False`` - if it is inactive. - - ``component_name`` : string - name of the component device - - ``raid`` : string - with the raid level, e.g., "raid1" for "md1" - - ``role`` : int - raid role number - - ``device_flag`` : str - device component status flag. Known values - include 'F' (failed device), 'S', and 'W' - - ``up`` : boolean - ``True`` if the component device is up - - ``auto_read_only`` : boolean - ``True`` if the array device is - "auto-read-only" - - ``blocks`` : the number of blocks in the device - - ``level`` : the current RAID level, if found in the status line - - ``chunk`` : the device chunk size, if found in the status line - - ``algorithm`` : the current conflict resolution algorithm, if found - in the status line - - mds : dict of dicts - A dictionary keyed on the MD device name, with the following keys - - - ``name``: Name of the MD device - - ``active``: Whether the MD device is active - - ``raid``: The RAID type string - - ``devices``: a list of the devices in this - - ``blocks``, ``level``, ``chunk`` and ``algorithm`` - the same - information given above per component device (if found) + Represents the information in the ``/proc/mdstat`` file. Several + examples of possible data containe in the file can be found on the + `MDstat kernel.org wiki page `_. - """ + In particular, the discussion here will focus on initial extraction of information + form lines such as:: + Personalities : [raid1] [raid6] [raid5] [raid4] + md1 : active raid1 sdb2[1] sda2[0] + 136448 blocks [2/2] [UU] + + md2 : active raid1 sdb3[1] sda3[0] + 129596288 blocks [2/2] [UU] + + md3 : active raid5 sdl1[9] sdk1[8] sdj1[7] sdi1[6] sdh1[5] sdg1[4] sdf1[3] sde1[2] sdd1[1] sdc1[0] + 1318680576 blocks level 5, 1024k chunk, algorithm 2 [10/10] [UUUUUUUUUU] + + unused devices: + + The data contained in ``mdstat`` is represented with three top level members - + ``personalities``, ``components`` and ``mds``. + + Attributes: + personalities (list): A list of RAID levels the kernel currently supports. + components (list): A list containing a dict of md component device information. + Each of these dicts contains the following keys + + - ``device_name`` : string - name of the array device + - ``active`` : boolean - ``True`` if the array is active, ``False`` + if it is inactive. + - ``component_name`` : string - name of the component device + - ``raid`` : string - with the raid level, e.g., "raid1" for "md1" + - ``role`` : int - raid role number + - ``device_flag`` : str - device component status flag. Known values + include 'F' (failed device), 'S', and 'W' + - ``up`` : boolean - ``True`` if the component device is up + - ``auto_read_only`` : boolean - ``True`` if the array device is + "auto-read-only" + - ``blocks`` : the number of blocks in the device + - ``level`` : the current RAID level, if found in the status line + - ``chunk`` : the device chunk size, if found in the status line + - ``algorithm`` : the current conflict resolution algorithm, if found + in the status line + mds (dict): A dictionary keyed on the MD device name. + Each dict contains the following keys + + - ``name``: Name of the MD device + - ``active``: Whether the MD device is active + - ``raid``: The RAID type string + - ``devices``: a list of the devices in this + - ``blocks``, ``level``, ``chunk`` and ``algorithm`` - the same + information given above per component device (if found) + + Examples: + >>> type(mdstat) + + >>> mdstat.personalities + ['raid1', 'raid6', 'raid5', 'raid4'] + >>> len(mdstat.components) + 14 + >>> mdstat.components[0]['device_name'] + 'md1' + >>> sdb2 = mdstat.components[0] + >>> sdb2['component_name'] + 'sdb2' + >>> sdb2['active'] + True + >>> sdb2['raid'] + 'raid1' + >>> sdb2['role'] + 1 + >>> sdb2['up'] + True + >>> sorted(mdstat.mds.keys()) + ['md1', 'md2', 'md3'] + >>> mdstat.mds['md1']['active'] + True + >>> len(mdstat.mds['md1']['devices']) + 2 + >>> mdstat.mds['md1']['devices'][0]['component_name'] + 'sdb2' + """ def parse_content(self, content): + if not content: + raise SkipException("Empty output.") + self.mds = {} self.components = [] self.personalities = [] @@ -111,12 +108,19 @@ def parse_content(self, content): for line in content: line = line.strip() if line.startswith('Personalities'): + # If the line doesn't have any raid types then md raid isn't active. + if line == "Personalities :": + raise SkipException("No parseable md devices present.") + in_component = False self.personalities = parse_personalities(line) - elif line.startswith("md"): # Starting a component array stanza + # Starting a component array stanza. + elif line.startswith("md"): in_component = True current_components = parse_array_start(line) - elif not line: # blank line, ending a component array stanza + # Catch any blank lines, this signals + # the end of the component array stanza. + elif not line: if in_component: self.components.extend(current_components) current_components = None @@ -128,7 +132,7 @@ def parse_content(self, content): if upstring: apply_upstring(upstring, current_components) - # Map component devices into MDs dictionary by device name + # Map component devices into MDs dictionary by device name. for comp in self.components: devname = comp['device_name'] if devname not in self.mds: @@ -146,7 +150,7 @@ def parse_content(self, content): (k, comp[k]) for k in comp if k in ['component_name', 'role', 'up'] )) - # Keep self.data just for backwards compat + # Keep self.data just for backwards compatibility. self.data = { 'personalities': self.personalities, 'components': self.components @@ -154,107 +158,115 @@ def parse_content(self, content): def parse_personalities(personalities_line): - """Parse the "personalities" line of ``/proc/mdstat``. + """ + Parse the "personalities" line of ``/proc/mdstat``. - Lines are expected to be like: + Sample of personalities_line:: Personalities : [linear] [raid0] [raid1] [raid5] [raid4] [raid6] - If they do not have this format, an error will be raised since it - would be considered an unexpected parsing error. - - Parameters - ---------- + Args: + personalities_line (str): A single "Personalities" line from an + ``/proc/mdstat`` files. - personalities_line : str - A single "Personalities" line from an ``/proc/mdstat`` files. + Returns: + list: A list of raid "personalities" listed on the line. - Returns - ------- - A list of raid "personalities" listed on the line. + Raises: + ParseException: If the format isn't like the sample above. """ - tokens = personalities_line.split() - assert tokens.pop(0) == "Personalities" - assert tokens.pop(0) == ":" - personalities = [] - for token in tokens: - assert token.startswith('[') and token.endswith(']') - personalities.append(token.strip('[]')) + + if "Personalities :" not in personalities_line: + raise ParseException("Incorrectly formatted personalities line.") + + tokens = personalities_line.split() + for token in tokens[2:]: + if token.startswith('[') and token.endswith(']'): + personalities.append(token.strip('[]')) + else: + raise ParseException("Incorrectly formatted personalities line.") return personalities def parse_array_start(md_line): - """Parse the initial line of a device array stanza in - ``/proc/mdstat``. + """ + Parse the initial line of a device array stanza in ``/proc/mdstat``. - Lines are expected to be like: + Sample of md_line:: md2 : active raid1 sdb3[1] sda3[0] - If they do not have this format, an error will be raised since it - would be considered an unexpected parsing error. + Args: + md_line (str): A single line from the start of a device array stanza. - Parameters - ---------- + Returns: + list: A list of dictionaries, one dictionary for each component + device making up the array. - md_line : str - A single line from the start of a device array stanza from a - ``/proc/mdstat`` file. - - Returns - ------- - A list of dictionaries, one dictionrary for each component - device making up the array. + Raises: + ParseException: If the format isn't like the sample above. """ + auto_read_only = False components = [] + device_flag = None + raid = None + + # Split the line to create tokens, and + # set device_name to the first token. tokens = md_line.split() device_name = tokens.pop(0) - assert device_name.startswith("md") - assert tokens.pop(0) == ":" + + if not device_name.startswith("md") or ":" not in tokens: + raise ParseException("The md line isn't as expected.") + + # Remove the : symbol. + tokens.pop(0) active_string = tokens.pop(0) - active = False if active_string == "active": active = True + elif active_string == "inactive": + active = False else: - assert active_string == "inactive" - - raid_read_only_token = tokens.pop(0) - auto_read_only = False - raid = None - if raid_read_only_token == "(auto-read-only)": - auto_read_only = True - raid = tokens.pop(0) - elif active: - raid = raid_read_only_token - else: # Inactive devices don't list the raid type - raid = None - tokens.insert(0, raid_read_only_token) + raise ParseException("The raid isn't marked as active or inactive.") + + # Only active raids have the auto-read-only + # entry or the raid level. + if active: + raid_read_only_token = tokens.pop(0) + if raid_read_only_token == "(auto-read-only)": + auto_read_only = True + raid = tokens.pop(0) + else: + raid = raid_read_only_token for token in tokens: - subtokens = re.split(r"\[|]", token) - assert len(subtokens) > 1 - comp_name = subtokens[0] - assert comp_name + # Token should be sda1[0] or sda1[0](S) for + # example, and subtokens should be + # ['sda1', '0'] or ['sda1', '0', '(S)']. + subtokens = re.split(r"[\[\]]", token) + + if len(subtokens) <= 1: + raise ParseException("The len of subtokens '{s_tokens}' is incorrect.".format(s_tokens=subtokens)) + comp_name = subtokens[0] role = int(subtokens[1]) - device_flag = None if len(subtokens) > 2: - device_flag = subtokens[2] - if device_flag: - device_flag = device_flag.strip('()') - - component_row = {"device_name": device_name, - "raid": raid, - "active": active, - "auto_read_only": auto_read_only, - "component_name": comp_name, - "role": role, - "device_flag": device_flag} - components.append(component_row) + device_flag = subtokens[2].strip('()') + + components.append({ + "device_name": device_name, + "raid": raid, + "active": active, + "auto_read_only": auto_read_only, + "component_name": comp_name, + "role": role, + "device_flag": device_flag + }) + return components @@ -281,6 +293,10 @@ def parse_array_status(line, components): 1465151808 blocks level 5, 64k chunk, algorithm 2 [4/3] [UUU_] 136448 blocks [2/2] [UU] 6306 blocks super external:imsm + + Args: + line (str): The array status line to parse. + components (list): A list of component dicts. """ status_line_re = r'(?P\d+) blocks' + \ r'(?: super (?P\S+))?' + \ @@ -307,30 +323,20 @@ def parse_array_status(line, components): def parse_upstring(line): """ Parse the subsequent lines of a device array stanza in ``/proc/mdstat`` - for the "up" indictor string. + for the "up" indicator string. The up indicator is "U" and down indicator + is "_". - Lines are expected to be like: + Samples of line:: 129596288 blocks [2/2] [UU] - - or - 1318680576 blocks level 5, 1024k chunk, algorithm 2 [10/10] [UUU_UUUUUU] - In particular, this method searchs for the string like ``[UU]`` which - indicates whether component devices or up, ``U`` or down, ``_``. - Parameters - ---------- - - line : str - A single line from a device array stanza. + line (str): A single line from a device array stanza. - Returns - ------- - - The string containing a series of ``U`` and ``\_`` characters if - found in the string, and ``None`` if the uptime string is not found. + Returns: + str: The string containing a series of ``U`` and ``_`` characters if + found in the string, and ``None`` if the up string is not found. """ UP_STRING_REGEX = r"\[(U|_)+]" @@ -357,21 +363,20 @@ def apply_upstring(upstring, component_list): If there the number of rows in component_list does not match the number of characters in upstring, an ``AssertionError`` is raised. - Parameters - ---------- - - upstring : str - String sequence of ``U``s and ``_``s as determined by the - ``parse_upstring`` method - - component_list : list - List of dictionaries output from the ``parse_array_start`` method. + Args: + upstring (str): String sequence of ``U``s and ``_``s as determined + by the ``parse_upstring`` method. + component_list (list): List of dictionaries output from the + ``parse_array_start`` method. """ - assert len(upstring) == len(component_list) + def add_up_key(comp, up_char): + if up_char not in ['U', '_']: + raise ParseException("Invalid character for up_indicator '{word}'.".format(word=up_char)) + + comp['up'] = up_char == 'U' - def add_up_key(comp_dict, up_indicator): - assert up_indicator == 'U' or up_indicator == "_" - comp_dict['up'] = up_indicator == 'U' + if len(upstring) != len(component_list): + raise ParseException("Length of upstring and component_list doesn't match.") for comp_dict, up_indicator in zip(component_list, upstring): add_up_key(comp_dict, up_indicator) diff --git a/insights/parsers/tests/test_mdstat.py b/insights/parsers/tests/test_mdstat.py index d4f9ebe9b..0aa3a670c 100644 --- a/insights/parsers/tests/test_mdstat.py +++ b/insights/parsers/tests/test_mdstat.py @@ -1,5 +1,8 @@ +import doctest import pytest -from insights.parsers import mdstat + +from insights.parsers import mdstat, ParseException +from insights.parsers.tests import skip_exception_check from insights.tests import context_wrap MDSTAT_TEST_1 = """ @@ -98,6 +101,12 @@ {"device_name": "md0", "active": True, "auto_read_only": True, "raid": "raid6", "component_name": "sdd1", "role": 2, "device_flag": 'S'} ] +MD_TEST_3 = "md0 : inactive sdb[1](S) sda[0](S)" +MD_RESULT_3 = [ + {'device_flag': 'S', 'raid': None, 'device_name': 'md0', 'role': 1, 'active': False, 'auto_read_only': False, 'component_name': 'sdb'}, + {'device_flag': 'S', 'raid': None, 'device_name': 'md0', 'role': 0, 'active': False, 'auto_read_only': False, 'component_name': 'sda'} +] + MD_FAIL = [ "what? : active raid5 sdh1[6] sdg1[4] sdf1[3] sde1[2] sdd1[1] sdc1[0]", "md124 active raid5 sdh1[6] sdg1[4] sdf1[3] sde1[2] sdd1[1] sdc1[0]", @@ -112,13 +121,40 @@ UPSTRING_TEST_2 = "1318680576 blocks level 5, 1024k chunk, algorithm 2 [10/10] [UUUUUUUUUU]" UPSTRING_TEST_3 = "[==>..................] recovery = 12.6% (37043392/292945152) finish=127.5min speed=33440K/sec" +NO_MD_DEVICES = """ +Personalities : +unused devices: +""".strip() + +MD_DOC = """ +Personalities : [raid1] [raid6] [raid5] [raid4] +md1 : active raid1 sdb2[1] sda2[0] + 136448 blocks [2/2] [UU] + +md2 : active raid1 sdb3[1] sda3[0] + 129596288 blocks [2/2] [UU] + +md3 : active raid5 sdl1[9] sdk1[8] sdj1[7] sdi1[6] sdh1[5] sdg1[4] sdf1[3] sde1[2] sdd1[1] sdc1[0] + 1318680576 blocks level 5, 1024k chunk, algorithm 2 [10/10] [UUUUUUUUUU] + +unused devices: +""".strip() + + +def test_doc_examples(): + env = { + 'mdstat': mdstat.Mdstat(context_wrap(MD_DOC)) + } + failed, total = doctest.testmod(mdstat, globs=env) + assert failed == 0 + def test_parse_personalities(): result = mdstat.parse_personalities(PERSONALITIES_TEST) assert ["linear", "raid0", "raid1", "raid5", "raid4", "raid6"] == result for line in PERSONALITIES_FAIL: - with pytest.raises(AssertionError): + with pytest.raises(ParseException): mdstat.parse_personalities(line) @@ -129,8 +165,11 @@ def test_parse_array_start(): result = mdstat.parse_array_start(MD_TEST_2) assert MD_RESULT_2 == result + result = mdstat.parse_array_start(MD_TEST_3) + assert MD_RESULT_3 == result + for md_line in MD_FAIL: - with pytest.raises(AssertionError): + with pytest.raises(ParseException): mdstat.parse_array_start(md_line) @@ -153,10 +192,10 @@ def test_apply_upstring(): assert test_dict[2]['up'] assert not test_dict[3]['up'] - with pytest.raises(AssertionError): + with pytest.raises(ParseException): mdstat.apply_upstring('U?_U', test_dict) - with pytest.raises(AssertionError): + with pytest.raises(ParseException): mdstat.apply_upstring('U_U', test_dict) @@ -201,3 +240,8 @@ def compare_mdstat_data(test_data, parser_obj): result = mdstat.Mdstat(context_wrap(MDSTAT_TEST_4)) compare_mdstat_data(MDSTAT_RESULT_4, result) + + +def test_skip(): + skip_exception_check(mdstat.Mdstat, output_str=NO_MD_DEVICES) + skip_exception_check(mdstat.Mdstat) From e2cc2fd0b06b90dd681d6bdde436359d935641c4 Mon Sep 17 00:00:00 2001 From: Rohan Arora Date: Mon, 4 Oct 2021 20:22:54 +0530 Subject: [PATCH 566/892] Add Spec path of chronyc_sources for sos_archive Signed-off-by: Rohan Arora --- insights/specs/sos_archive.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index 9bec8b236..427f595ca 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -31,6 +31,7 @@ class SosSpecs(Specs): ceph_health_detail = simple_file("sos_commands/ceph/ceph_health_detail_--format_json-pretty") checkin_conf = simple_file("/etc/splice/checkin.conf") chkconfig = first_file(["sos_commands/startup/chkconfig_--list", "sos_commands/services/chkconfig_--list"]) + chronyc_sources = simple_file("sos_commands/chrony/chronyc_-n_sources") cib_xml = first_of( [ simple_file("/var/lib/pacemaker/cib/cib.xml"), From 09b1a0546df29cf667e85fca2cf24c7894ebf6b7 Mon Sep 17 00:00:00 2001 From: Ryan Blakley Date: Wed, 6 Oct 2021 15:47:16 -0400 Subject: [PATCH 567/892] Add doctest to messages parser (#3248) * Updated the examples in the docstring. * Added doctest to the messages test file. * Fixes #3029 Signed-off-by: Ryan Blakley --- insights/parsers/messages.py | 13 +++++-------- insights/parsers/tests/test_messages.py | 15 +++++++++++++-- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/insights/parsers/messages.py b/insights/parsers/messages.py index cf2b6f7a9..8033f68d7 100644 --- a/insights/parsers/messages.py +++ b/insights/parsers/messages.py @@ -30,17 +30,14 @@ class Messages(Syslog): This will also work around December/January crossovers. Examples: - >>> Messages.filters.append('wrapper') >>> Messages.token_scan('daemon_start', 'Wrapper Started as Daemon') - >>> msgs = shared[Messages] + >>> type(msgs) + >>> len(msgs.lines) + 9 >>> wrapper_msgs = msgs.get('wrapper') # Can only rely on lines filtered being present - >>> wrapper_msgs[0] - {'timestamp': 'May 18 15:13:36', 'hostname': 'lxc-rhel68-sat56', - 'procname': wrapper[11375]', 'message': '--> Wrapper Started as Daemon', - 'raw_message': 'May 18 15:13:36 lxc-rhel68-sat56 wrapper[11375]: --> Wrapper Started as Daemon' - } - >>> msgs.daemon_start # Token set if matching lines present in logs + >>> result = {'raw_message': 'May 18 15:13:36 lxc-rhel68-sat56 wrapper[11375]: --> Wrapper Started as Daemon', 'message': '--> Wrapper Started as Daemon', 'timestamp': 'May 18 15:13:36', 'hostname': 'lxc-rhel68-sat56', 'procname': 'wrapper[11375]'} + >>> wrapper_msgs[0] == result True """ pass diff --git a/insights/parsers/tests/test_messages.py b/insights/parsers/tests/test_messages.py index f7831d84b..0372097d9 100644 --- a/insights/parsers/tests/test_messages.py +++ b/insights/parsers/tests/test_messages.py @@ -1,5 +1,7 @@ +import doctest + from insights import add_filter -from insights.parsers.messages import Messages +from insights.parsers import messages from insights.specs import Specs from insights.tests import context_wrap @@ -25,8 +27,17 @@ ]) +def test_doc_examples(): + env = { + 'msgs': messages.Messages(context_wrap(MSGINFO)), + 'Messages': messages.Messages + } + failed, total = doctest.testmod(messages, globs=env) + assert failed == 0 + + def test_messages(): - msg_info = Messages(context_wrap(MSGINFO)) + msg_info = messages.Messages(context_wrap(MSGINFO)) bona_list = msg_info.get('(root) LIST (root)') assert 2 == len(bona_list) assert bona_list[0].get('timestamp') == "Apr 22 10:37:32" From 507f4227c001d01f1c91f893c9790a1ea2cf64ba Mon Sep 17 00:00:00 2001 From: Alec Cohan Date: Tue, 12 Oct 2021 11:31:46 -0400 Subject: [PATCH 568/892] RHCLOUD-16475: Investigate error handling issue found by sat team Signed-off-by: Alec Cohan --- .../apps/ansible/playbook_verifier/__init__.py | 13 +++++++++++-- .../apps/ansible/playbook_verifier/__main__.py | 17 +++++++++-------- 2 files changed, 20 insertions(+), 10 deletions(-) diff --git a/insights/client/apps/ansible/playbook_verifier/__init__.py b/insights/client/apps/ansible/playbook_verifier/__init__.py index e2c95fef1..59083e798 100644 --- a/insights/client/apps/ansible/playbook_verifier/__init__.py +++ b/insights/client/apps/ansible/playbook_verifier/__init__.py @@ -74,6 +74,9 @@ def getPublicKey(gpg): def excludeDynamicElements(snippet): + if 'insights_signature_exclude' not in snippet['vars']: + raise PlaybookVerificationError(message='EXCLUDE MISSING: the insights_signature_exclude var does not exist.') + exclusions = snippet['vars']['insights_signature_exclude'].split(',') for element in exclusions: @@ -82,7 +85,7 @@ def excludeDynamicElements(snippet): # remove empty strings element = [string for string in element if string != ''] - if (len(element) == 1 and element[0] in EXCLUDABLE_VARIABLES): + if (len(element) == 1 and element[0] in EXCLUDABLE_VARIABLES and element[0] in snippet.keys()): del snippet[element[0]] elif (len(element) == 2 and element[0] in EXCLUDABLE_VARIABLES): try: @@ -145,6 +148,8 @@ def verify(playbook, skipVerify=False): verified = verifyPlaybookSnippet(snippet) if not verified: + if 'name' not in snippet.keys(): + raise PlaybookVerificationError(message="SIGNATURE NOT VALID: Template [NAME UNAVAILABLE] has invalid signature") raise PlaybookVerificationError(message="SIGNATURE NOT VALID: Template [name: {0}] has invalid signature".format(snippet['name'])) logger.info('All templates successfully validated') @@ -156,7 +161,11 @@ def loadPlaybookYaml(playbook): Load playbook yaml using current yaml library implementation output: playbook yaml """ - return yaml.load(playbook) + try: + playbookYaml = yaml.load(playbook) + return playbookYaml + except: + raise PlaybookVerificationError(message="PLAYBOOK VERIFICATION FAILURE: Failed to load yaml") def normalizeSnippet(snippet): diff --git a/insights/client/apps/ansible/playbook_verifier/__main__.py b/insights/client/apps/ansible/playbook_verifier/__main__.py index d49bb56fb..0ea41cd30 100644 --- a/insights/client/apps/ansible/playbook_verifier/__main__.py +++ b/insights/client/apps/ansible/playbook_verifier/__main__.py @@ -1,8 +1,9 @@ import os import sys from insights.client.constants import InsightsConstants as constants -from insights.client.apps.ansible.playbook_verifier import verify, loadPlaybookYaml +from insights.client.apps.ansible.playbook_verifier import verify, loadPlaybookYaml, PlaybookVerificationError +skipVerify = False def read_playbook(): """ @@ -14,18 +15,18 @@ def read_playbook(): return unverified_playbook - -playbook = read_playbook() -playbook_yaml = loadPlaybookYaml(playbook) -skipVerify = False - if (os.environ.get('SKIP_VERIFY')): skipVerify = True try: + playbook = read_playbook() + playbook_yaml = loadPlaybookYaml(playbook) verified_playbook = verify(playbook_yaml, skipVerify) -except Exception as e: - sys.stderr.write(e.message) +except TypeError: + sys.stderr.write("VERIFICATION FAILURE: Playbook failed to be loaded in") + sys.exit(constants.sig_kill_bad) +except PlaybookVerificationError as err: + sys.stderr.write(err.message) sys.exit(constants.sig_kill_bad) print(playbook) From 92bf4ebe443ae0078b57a896775dc01ae37de8c6 Mon Sep 17 00:00:00 2001 From: Alec Cohan Date: Tue, 12 Oct 2021 11:41:57 -0400 Subject: [PATCH 569/892] flake8 fixes Signed-off-by: Alec Cohan --- insights/client/apps/ansible/playbook_verifier/__main__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/insights/client/apps/ansible/playbook_verifier/__main__.py b/insights/client/apps/ansible/playbook_verifier/__main__.py index 0ea41cd30..b9dc68265 100644 --- a/insights/client/apps/ansible/playbook_verifier/__main__.py +++ b/insights/client/apps/ansible/playbook_verifier/__main__.py @@ -5,6 +5,7 @@ skipVerify = False + def read_playbook(): """ Read in the stringified playbook yaml from stdin @@ -15,6 +16,7 @@ def read_playbook(): return unverified_playbook + if (os.environ.get('SKIP_VERIFY')): skipVerify = True From 04fcc7258eb0f7529f92d69ecf062cd34e686576 Mon Sep 17 00:00:00 2001 From: Alec Cohan Date: Tue, 12 Oct 2021 15:15:05 -0400 Subject: [PATCH 570/892] Update changes based on comments Signed-off-by: Alec Cohan --- insights/client/apps/ansible/playbook_verifier/__init__.py | 7 +++---- insights/client/apps/ansible/playbook_verifier/__main__.py | 3 --- insights/client/apps/ansible/test_playbook.yml | 2 +- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/insights/client/apps/ansible/playbook_verifier/__init__.py b/insights/client/apps/ansible/playbook_verifier/__init__.py index 59083e798..cb174b1b9 100644 --- a/insights/client/apps/ansible/playbook_verifier/__init__.py +++ b/insights/client/apps/ansible/playbook_verifier/__init__.py @@ -148,9 +148,8 @@ def verify(playbook, skipVerify=False): verified = verifyPlaybookSnippet(snippet) if not verified: - if 'name' not in snippet.keys(): - raise PlaybookVerificationError(message="SIGNATURE NOT VALID: Template [NAME UNAVAILABLE] has invalid signature") - raise PlaybookVerificationError(message="SIGNATURE NOT VALID: Template [name: {0}] has invalid signature".format(snippet['name'])) + name = snippet.get('name', 'NAME UNAVAILABLE') + raise PlaybookVerificationError(message="SIGNATURE NOT VALID: Template [name: {0}] has invalid signature".format(name)) logger.info('All templates successfully validated') return playbook @@ -165,7 +164,7 @@ def loadPlaybookYaml(playbook): playbookYaml = yaml.load(playbook) return playbookYaml except: - raise PlaybookVerificationError(message="PLAYBOOK VERIFICATION FAILURE: Failed to load yaml") + raise PlaybookVerificationError(message="PLAYBOOK VERIFICATION FAILURE: Failed to load playbook yaml because yaml is not valid") def normalizeSnippet(snippet): diff --git a/insights/client/apps/ansible/playbook_verifier/__main__.py b/insights/client/apps/ansible/playbook_verifier/__main__.py index b9dc68265..e76f19833 100644 --- a/insights/client/apps/ansible/playbook_verifier/__main__.py +++ b/insights/client/apps/ansible/playbook_verifier/__main__.py @@ -24,9 +24,6 @@ def read_playbook(): playbook = read_playbook() playbook_yaml = loadPlaybookYaml(playbook) verified_playbook = verify(playbook_yaml, skipVerify) -except TypeError: - sys.stderr.write("VERIFICATION FAILURE: Playbook failed to be loaded in") - sys.exit(constants.sig_kill_bad) except PlaybookVerificationError as err: sys.stderr.write(err.message) sys.exit(constants.sig_kill_bad) diff --git a/insights/client/apps/ansible/test_playbook.yml b/insights/client/apps/ansible/test_playbook.yml index 5669d6108..ea48c1733 100644 --- a/insights/client/apps/ansible/test_playbook.yml +++ b/insights/client/apps/ansible/test_playbook.yml @@ -33,6 +33,6 @@ TW1sb2JsaEVNRFZHTkhsbGIwVUtObmh4VlRGYWJDdERXSEZOYTNOblJUWklZV0l2VDFscVpHRmFX VUZwUm5aUlJFOXhPVkU1V1ZVNE5rSnRXSGRwUzNoalltNVRWREJ6VnpZeFVUaFpWd3BWVUVSblpT dFJMMHhSUFQwS1BVVnpWVWNLTFMwdExTMUZUa1FnVUVkUUlGTkpSMDVCVkZWU1JTMHRMUzB0Q2c9 - PQ== + sqdlslsdkfja;sldfs== tasks: - ping: From d6457832093007ea5a631c93f7b6ea611a9c8545 Mon Sep 17 00:00:00 2001 From: Alec Cohan Date: Wed, 13 Oct 2021 09:53:48 -0400 Subject: [PATCH 571/892] remove test playbook from insights-core folder Signed-off-by: Alec Cohan --- .../client/apps/ansible/test_playbook.yml | 38 ------------------- 1 file changed, 38 deletions(-) delete mode 100644 insights/client/apps/ansible/test_playbook.yml diff --git a/insights/client/apps/ansible/test_playbook.yml b/insights/client/apps/ansible/test_playbook.yml deleted file mode 100644 index ea48c1733..000000000 --- a/insights/client/apps/ansible/test_playbook.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -# Red Hat Insights has recommended one or more actions for you, a system administrator, to review and if you -# deem appropriate, deploy on your systems running Red Hat software. Based on the analysis, we have automatically -# generated an Ansible Playbook for you. Please review and test the recommended actions and the Playbook as -# they may contain configuration changes, updates, reboots and/or other changes to your systems. Red Hat is not -# responsible for any adverse outcomes related to these recommendations or Playbooks. -# -# ping -# https://cloud.redhat.com/insights/remediations/44466a02-24a1-47b4-84cb-391aeff4444 -# Generated by Red Hat Insights on Thu, 29 Oct 2020 12:24:17 GMT -# Created by some-user -- name: ping - hosts: "@@HOSTS@@" - vars: - insights_signature_exclude: /hosts,/vars/insights_signature - insights_signature: !!binary | - TFMwdExTMUNSVWRKVGlCUVIxQWdVMGxIVGtGVVZWSkZMUzB0TFMwS1ZtVnljMmx2YmpvZ1IyNTFV - RWNnZGpFS0NtbFJTVlZCZDFWQldVaHBSM0ZqZG5jMU9FUXJhalZ3VGtGUmFrTXpRUzh6VVdwUVow - MXZTM0JYZFZVeWNuaExWVkpJYTI5VVRHVkdTRmczVDFkVU1Ya0tlRzR6WWtOMU1FeHdXRWhDWjBk - Vkt6VndTRFF3ZGswdmMzVlhjblJZYjNJckwydHRja3BFWkZWMU5IWkpaMmt4VW1aQmNsTmxabk5H - TTFCdlIxWnFjUW8yWkhVM1RuQmhOazlQT1cxWFJGWXZPRnBqYW14SVdrVkpUVU5OYlRKamRqQnVk - RmhuWTJwSmJrTmhlbmtyTVhkNmFHaExUMFJNV1RKTE9WZ3dPVUkzQ2xKR01HMWpjR0ZpUVZsclJH - ZHpWVVYyU0RCM2RXUTRkRkpuZW5sWVFXWTJZMmw0TTBabVoyOTVTa2d2ZUdFd01uWkZRbFZGUWxG - dFUzVlhjMEk0ZHpNS2FXaFVVVVpyVEN0NE1uQnliSGxXUWtWd2FqTnlRMmhZY3poMVVsbEJMeTlU - UjNka1owSndkMlpVWm01ckswUk5VVGRuUzJKbFYyVnhabFFyVlRNNUt3cGFaMGhoTW1WbFkzZFJh - MmhsVEUwNFkzVkhha012ZFVFNVpWSnhablJRY1RCcU0yVllNWGxGYjNOR1pHTldOekJuYzFJd1dH - TjBSazlDWTJWSVVXNXRDak4xVjBSVVRqRmllV0kyWVZaYVFqZzVUM1JWTjFCU1preHhSMVkyYjBj - MU5WQkZVRkV6VVRCSVRXMVRlRWt3WlRjNWNIUXhXVmxHUkhad2FtNXBjSGtLTjJ0aUwzbERObU5s - ZFZGTWNraHpLMjVSYkVKQ05VSk5OV0ZQVFN0emMwSkpSM1JJZGtWUFRqWTFOMGw2WlRKaGNqRnBj - bTh5Y1V4TVFXRkJVMHBrVEFwbU9ERmlPSGxFY1hCeVpFOVZaV1ZMYzBncldYazFhekZGVVUwdlRE - QXpjeTkzVm1wa1NqQktiV2xFVlhwd1pXa3dkVXRCYmxGUWRFdElRbFJsYmtsSENtRnBMMU5KYlRO - RWMxcHlNRTV4TnpsYVVWVjBiVEpYY0c5bGVrTkZla3ByVlN0eU4weFhjRWg1Y21SdE5VMVpOVzV4 - TW1sb2JsaEVNRFZHTkhsbGIwVUtObmh4VlRGYWJDdERXSEZOYTNOblJUWklZV0l2VDFscVpHRmFX - VUZwUm5aUlJFOXhPVkU1V1ZVNE5rSnRXSGRwUzNoalltNVRWREJ6VnpZeFVUaFpWd3BWVUVSblpT - dFJMMHhSUFQwS1BVVnpWVWNLTFMwdExTMUZUa1FnVUVkUUlGTkpSMDVCVkZWU1JTMHRMUzB0Q2c9 - sqdlslsdkfja;sldfs== - tasks: - - ping: From ac0f8f2fa9a8c71a1a795e554235a4614b2d6cbb Mon Sep 17 00:00:00 2001 From: Stanislav Kontar Date: Wed, 13 Oct 2021 21:57:29 +0200 Subject: [PATCH 572/892] Add parsers and combiners for data from fwupdagent (#3253) * Add parsers and combiners for data from fwupdagent Signed-off-by: Stanislav Kontar * Add tests for command issues Signed-off-by: Stanislav Kontar * Use component instead of combiner Signed-off-by: Stanislav Kontar * Add component to the default manifest Signed-off-by: Stanislav Kontar --- .../virtualization.rst | 3 + docs/shared_parsers_catalog/fwupdagent.rst | 3 + insights/collect.py | 13 ++ .../components/tests/test_virtualization.py | 17 ++ insights/components/virtualization.py | 25 +++ insights/parsers/fwupdagent.py | 167 ++++++++++++++++ insights/parsers/tests/test_fwupdagent.py | 186 ++++++++++++++++++ insights/specs/__init__.py | 2 + insights/specs/default.py | 3 + insights/specs/insights_archive.py | 2 + 10 files changed, 421 insertions(+) create mode 100644 docs/shared_components_catalog/virtualization.rst create mode 100644 docs/shared_parsers_catalog/fwupdagent.rst create mode 100644 insights/components/tests/test_virtualization.py create mode 100644 insights/components/virtualization.py create mode 100644 insights/parsers/fwupdagent.py create mode 100644 insights/parsers/tests/test_fwupdagent.py diff --git a/docs/shared_components_catalog/virtualization.rst b/docs/shared_components_catalog/virtualization.rst new file mode 100644 index 000000000..a3929d7df --- /dev/null +++ b/docs/shared_components_catalog/virtualization.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.components.virtualization + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/fwupdagent.rst b/docs/shared_parsers_catalog/fwupdagent.rst new file mode 100644 index 000000000..49501c632 --- /dev/null +++ b/docs/shared_parsers_catalog/fwupdagent.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.fwupdagent + :members: + :show-inheritance: diff --git a/insights/collect.py b/insights/collect.py index 5bf5c4c3e..8113e1a71 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -164,6 +164,19 @@ - name: insights.combiners.sap enabled: true + # needed for fw_devices and fw_security specs + - name: insights.parsers.dmidecode.DMIDecode + enabled: true + + - name: insights.parsers.virt_what.VirtWhat + enabled: true + + - name: insights.combiners.virt_what.VirtWhat + enabled: true + + - name: insights.components.virtualization.IsBareMetal + enabled: true + # needed for the 'pre-check' of the 'ss' spec - name: insights.parsers.lsmod enabled: true diff --git a/insights/components/tests/test_virtualization.py b/insights/components/tests/test_virtualization.py new file mode 100644 index 000000000..b02358340 --- /dev/null +++ b/insights/components/tests/test_virtualization.py @@ -0,0 +1,17 @@ +import pytest + +from insights import SkipComponent +from insights.combiners.virt_what import VirtWhat +from insights.components.virtualization import IsBareMetal +from insights.parsers.virt_what import VirtWhat as VWP +from insights.tests import context_wrap + + +def test_is_bare_metal(): + virt_what = VirtWhat(None, VWP(context_wrap(""))) + result = IsBareMetal(virt_what) + assert isinstance(result, IsBareMetal) + + virt_what = VirtWhat(None, VWP(context_wrap("kvm"))) + with pytest.raises(SkipComponent): + IsBareMetal(virt_what) diff --git a/insights/components/virtualization.py b/insights/components/virtualization.py new file mode 100644 index 000000000..831e953c4 --- /dev/null +++ b/insights/components/virtualization.py @@ -0,0 +1,25 @@ +""" +Components identify system type with regard to virtualization +============================================================= + +The ``IsBareMetal`` component in this module is valid if the +:py:class:`insights.combiners.virt_what.VirtWhat` combiner indicates +the host is bare metal. +""" +from insights.combiners.virt_what import VirtWhat +from insights.core.dr import SkipComponent +from insights.core.plugins import component + + +@component(VirtWhat) +class IsBareMetal(object): + """ + This component uses ``VirtWhat`` combiner to determine the virtualization type. + It checks if the system is bare metal, otherwise it raises ``SkipComponent``. + + Raises: + SkipComponent: When system is a virtual machine. + """ + def __init__(self, virt): + if virt.is_virtual: + raise SkipComponent("Not a bare metal system.") diff --git a/insights/parsers/fwupdagent.py b/insights/parsers/fwupdagent.py new file mode 100644 index 000000000..7a6695b52 --- /dev/null +++ b/insights/parsers/fwupdagent.py @@ -0,0 +1,167 @@ +# -*- coding: utf-8 -*- +""" +fwupdagent - Commands +===================== + +FwupdagentDevices - command ``/bin/fwupdagent get-devices`` +FwupdagentSecurity - command ``/bin/fwupdagent security --force`` + +Both commands produce JSON data representing current state of the HW firmware. +""" + +from .. import parser, CommandParser, JSONParser +from insights.specs import Specs + + +@parser(Specs.fw_devices) +class FwupdagentDevices(CommandParser, JSONParser): + """ + Class ``FwupdagentDevices`` parses the output of the ``/bin/fwupdagent get-devices`` command. + + Attributes: + data (dict): The parsed output of the command. + + Sample input data:: + + { + "Devices" : [ + { + "Name" : "Thunderbolt host controller", + "DeviceId" : "113494429de0754484e6b93d70e879a913311dd5", + "Guid" : [ + "ea3c7ac8-d937-568a-aab5-e053bd6af9ce", + "3d3f953a-3408-56fa-81f3-042d73c59c16", + "e773c51e-a20c-5b29-9f09-6bb0e0ef7560", + "f1793149-6c12-5a7f-95db-9fefd116748d" + ], + "Summary" : "Unmatched performance for high-speed I/O", + "Plugin" : "thunderbolt", + "Protocol" : "com.intel.thunderbolt", + "Flags" : [ + "internal", + "updatable", + "require-ac", + "supported", + "registered", + "dual-image" + ], + "Vendor" : "Lenovo", + "VendorId" : "THUNDERBOLT:0x0109|TBT:0x0109", + "VendorIds" : [ + "THUNDERBOLT:0x0109", + "TBT:0x0109" + ], + "Version" : "20.00", + "VersionFormat" : "pair", + "Icons" : [ + "thunderbolt" + ], + "Created" : 1633947746, + "Releases" : [ + { + "AppstreamId" : "com.lenovo.ThinkPadN2JTF.firmware", + "RemoteId" : "lvfs", + "Summary" : "Lenovo ThinkPad X390/ThinkPad T490s Thunderbolt Firmware", + "Description" : "

Lenovo ThinkPad X390/ThinkPad T490s Thunderbolt Firmware

Fix Thunderbolt SPI ROM Wear out issue.

", + "Version" : "20.00", + "Filename" : "fc93d9ff3f7ced7eb11d9bf7d8cd009ce408d7d9", + "Protocol" : "com.intel.thunderbolt", + "Checksum" : [ + "915a6657f6972937bcb88868e5d8ce1f1ec9fb85" + ], + "License" : "LicenseRef-proprietary", + "Size" : 253952, + "Created" : 1583253000, + "Locations" : [ + "https://fwupd.org/downloads/8eef957c95cb6f534448be1faa7bbfc8702d620f64b757d40ee5e0b6b7094c0e-Lenovo-ThinkPad-X390-SystemFirmware-01.cab" + ], + "Uri" : "https://fwupd.org/downloads/8eef957c95cb6f534448be1faa7bbfc8702d620f64b757d40ee5e0b6b7094c0e-Lenovo-ThinkPad-X390-SystemFirmware-01.cab", + "Homepage" : "http://www.lenovo.com", + "Vendor" : "Lenovo Ltd." + } + ] + }, + { + "Name" : "USB3.0 Hub", + "DeviceId" : "54f0d9041b6c5438c7ff825f5139559c5ca1b222", + "Guid" : [ + "9429e4c7-f053-51d7-9289-75c4ddb14a97", + "26f33695-3a3e-5c08-badb-f6141390ebd9", + "10eb3a15-c177-5810-af53-1963e9200e65", + "60e0a85e-b245-5c84-ba3b-d5bdb540cd47", + "d91a45a0-4435-59d6-bb6b-9499f4a793c2", + "022d2f73-4826-546a-ba0f-62579ea848ea" + ], + "Summary" : "USB 3.x Hub", + "Plugin" : "vli", + "Protocol" : "com.vli.usbhub", + "Flags" : [ + "updatable", + "registered", + "can-verify", + "can-verify-image", + "dual-image", + "self-recovery", + "add-counterpart-guids" + ], + "Vendor" : "VIA Labs, Inc.", + "VendorId" : "USB:0x2109", + "Version" : "3.114", + "VersionFormat" : "bcd", + "Icons" : [ + "audio-card" + ], + "InstallDuration" : 15, + "Created" : 1633947743 + } + ] + } + + Examples: + >>> type(devices) + + >>> len(devices["Devices"]) + 2 + """ + pass + + +@parser(Specs.fw_security) +class FwupdagentSecurity(CommandParser, JSONParser): + """ + Class ``FwupdagentSecurity`` parses the output of the ``/bin/fwupdagent get-devices`` command. + + Attributes: + data (dict): The parsed output of the command. + + Sample input data:: + + { + "HostSecurityAttributes" : [ + { + "AppstreamId" : "org.fwupd.hsi.Kernel.Tainted", + "HsiResult" : "not-tainted", + "Name" : "Linux kernel", + "Uri" : "https://fwupd.github.io/hsi.html#org.fwupd.hsi.Kernel.Tainted", + "Flags" : [ + "success", + "runtime-issue" + ] + }, + { + "AppstreamId" : "org.fwupd.hsi.EncryptedRam", + "HsiLevel" : 4, + "HsiResult" : "not-supported", + "Name" : "Encrypted RAM", + "Uri" : "https://fwupd.github.io/hsi.html#org.fwupd.hsi.EncryptedRam" + } + ] + } + + Examples: + >>> type(security) + + >>> len(security["HostSecurityAttributes"]) + 2 + """ + pass diff --git a/insights/parsers/tests/test_fwupdagent.py b/insights/parsers/tests/test_fwupdagent.py new file mode 100644 index 000000000..affc04ef4 --- /dev/null +++ b/insights/parsers/tests/test_fwupdagent.py @@ -0,0 +1,186 @@ +# -*- coding: utf-8 -*- + +import doctest + +import pytest + +from insights.parsers import fwupdagent, ParseException +from insights.parsers.fwupdagent import FwupdagentDevices, FwupdagentSecurity +from insights.tests import context_wrap + +DEVICES = """ +{ + "Devices" : [ + { + "Name" : "Thunderbolt host controller", + "DeviceId" : "113494429de0754484e6b93d70e879a913311dd5", + "Guid" : [ + "ea3c7ac8-d937-568a-aab5-e053bd6af9ce", + "3d3f953a-3408-56fa-81f3-042d73c59c16", + "e773c51e-a20c-5b29-9f09-6bb0e0ef7560", + "f1793149-6c12-5a7f-95db-9fefd116748d" + ], + "Summary" : "Unmatched performance for high-speed I/O", + "Plugin" : "thunderbolt", + "Protocol" : "com.intel.thunderbolt", + "Flags" : [ + "internal", + "updatable", + "require-ac", + "supported", + "registered", + "dual-image" + ], + "Vendor" : "Lenovo", + "VendorId" : "THUNDERBOLT:0x0109|TBT:0x0109", + "VendorIds" : [ + "THUNDERBOLT:0x0109", + "TBT:0x0109" + ], + "Version" : "20.00", + "VersionFormat" : "pair", + "Icons" : [ + "thunderbolt" + ], + "Created" : 1633947746, + "Releases" : [ + { + "AppstreamId" : "com.lenovo.ThinkPadN2JTF.firmware", + "RemoteId" : "lvfs", + "Summary" : "Lenovo ThinkPad X390/ThinkPad T490s Thunderbolt Firmware", + "Description" : "

Lenovo ThinkPad X390/ThinkPad T490s Thunderbolt Firmware

Fix Thunderbolt SPI ROM Wear out issue.

", + "Version" : "20.00", + "Filename" : "fc93d9ff3f7ced7eb11d9bf7d8cd009ce408d7d9", + "Protocol" : "com.intel.thunderbolt", + "Checksum" : [ + "915a6657f6972937bcb88868e5d8ce1f1ec9fb85" + ], + "License" : "LicenseRef-proprietary", + "Size" : 253952, + "Created" : 1583253000, + "Locations" : [ + "https://fwupd.org/downloads/8eef957c95cb6f534448be1faa7bbfc8702d620f64b757d40ee5e0b6b7094c0e-Lenovo-ThinkPad-X390-SystemFirmware-01.cab" + ], + "Uri" : "https://fwupd.org/downloads/8eef957c95cb6f534448be1faa7bbfc8702d620f64b757d40ee5e0b6b7094c0e-Lenovo-ThinkPad-X390-SystemFirmware-01.cab", + "Homepage" : "http://www.lenovo.com", + "Vendor" : "Lenovo Ltd." + } + ] + }, + { + "Name" : "USB3.0 Hub", + "DeviceId" : "54f0d9041b6c5438c7ff825f5139559c5ca1b222", + "Guid" : [ + "9429e4c7-f053-51d7-9289-75c4ddb14a97", + "26f33695-3a3e-5c08-badb-f6141390ebd9", + "10eb3a15-c177-5810-af53-1963e9200e65", + "60e0a85e-b245-5c84-ba3b-d5bdb540cd47", + "d91a45a0-4435-59d6-bb6b-9499f4a793c2", + "022d2f73-4826-546a-ba0f-62579ea848ea" + ], + "Summary" : "USB 3.x Hub", + "Plugin" : "vli", + "Protocol" : "com.vli.usbhub", + "Flags" : [ + "updatable", + "registered", + "can-verify", + "can-verify-image", + "dual-image", + "self-recovery", + "add-counterpart-guids" + ], + "Vendor" : "VIA Labs, Inc.", + "VendorId" : "USB:0x2109", + "Version" : "3.114", + "VersionFormat" : "bcd", + "Icons" : [ + "audio-card" + ], + "InstallDuration" : 15, + "Created" : 1633947743 + } + ] +} +""" + +SECURITY = """ +{ + "HostSecurityAttributes" : [ + { + "AppstreamId" : "org.fwupd.hsi.Kernel.Tainted", + "HsiResult" : "not-tainted", + "Name" : "Linux kernel", + "Uri" : "https://fwupd.github.io/hsi.html#org.fwupd.hsi.Kernel.Tainted", + "Flags" : [ + "success", + "runtime-issue" + ] + }, + { + "AppstreamId" : "org.fwupd.hsi.EncryptedRam", + "HsiLevel" : 4, + "HsiResult" : "not-supported", + "Name" : "Encrypted RAM", + "Uri" : "https://fwupd.github.io/hsi.html#org.fwupd.hsi.EncryptedRam" + } + ] +} +""" + +SECURITY_ERROR_1 = """ +Failed to parse arguments: Unknown option --force +""" + +SECURITY_ERROR_2 = """ +Command not found + +Usage: + fwupdagent [OPTION…] + + get-devices Get all devices and possible releases + get-updates Gets the list of updates for connected hardware + get-upgrades Alias to get-updates + +Help Options: + -h, --help Show help options + +Application Options: + -v, --verbose Show extra debugging information + +This tool can be used from other tools and from shell scripts. +""" + + +def test_devices(): + devices = FwupdagentDevices(context_wrap(DEVICES)) + + assert len(devices["Devices"]) == 2 + assert devices["Devices"][0]["Name"] == "Thunderbolt host controller" + assert devices["Devices"][0]["Version"] == "20.00" + assert devices["Devices"][1]["Name"] == "USB3.0 Hub" + assert devices["Devices"][1]["Version"] == "3.114" + + +def test_security(): + security = FwupdagentSecurity(context_wrap(SECURITY)) + assert len(security["HostSecurityAttributes"]) == 2 + assert security["HostSecurityAttributes"][0]["Name"] == "Linux kernel" + assert security["HostSecurityAttributes"][0]["HsiResult"] == "not-tainted" + assert security["HostSecurityAttributes"][1]["Name"] == "Encrypted RAM" + assert security["HostSecurityAttributes"][1]["HsiLevel"] == 4 + + with pytest.raises(ParseException): + FwupdagentSecurity(context_wrap(SECURITY_ERROR_1)) + + with pytest.raises(ParseException): + FwupdagentSecurity(context_wrap(SECURITY_ERROR_2)) + + +def test_doc_examples(): + env = { + "devices": FwupdagentDevices(context_wrap(DEVICES)), + "security": FwupdagentSecurity(context_wrap(SECURITY)), + } + failed, total = doctest.testmod(fwupdagent, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 90222509c..0ea5a063f 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -178,6 +178,8 @@ class Specs(SpecSet): foreman_tasks_config = RegistryPoint(filterable=True) freeipa_healthcheck_log = RegistryPoint() fstab = RegistryPoint() + fw_devices = RegistryPoint() + fw_security = RegistryPoint() galera_cnf = RegistryPoint() gcp_instance_type = RegistryPoint() gcp_license_codes = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 99714f0ca..7b5b83187 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -17,6 +17,7 @@ from os import stat from pwd import getpwuid +from insights.components.virtualization import IsBareMetal from insights.core.context import HostContext from insights.core.dr import SkipComponent from insights.core.plugins import datasource @@ -238,6 +239,8 @@ def du_dirs_list(broker): firewalld_conf = simple_file("/etc/firewalld/firewalld.conf") foreman_ssl_error_ssl_log = simple_file("/var/log/httpd/foreman-ssl_error_ssl.log") fstab = simple_file("/etc/fstab") + fw_devices = simple_command("/bin/fwupdagent get-devices", deps=[IsBareMetal]) + fw_security = simple_command("/bin/fwupdagent security --force", deps=[IsBareMetal]) galera_cnf = first_file(["/var/lib/config-data/puppet-generated/mysql/etc/my.cnf.d/galera.cnf", "/etc/my.cnf.d/galera.cnf"]) getconf_page_size = simple_command("/usr/bin/getconf PAGE_SIZE") getenforce = simple_command("/usr/sbin/getenforce") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index b8d25f7b8..5e533c99e 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -68,6 +68,8 @@ class InsightsArchiveSpecs(Specs): fcoeadm_i = simple_file("insights_commands/fcoeadm_-i") findmnt_lo_propagation = simple_file("insights_commands/findmnt_-lo_PROPAGATION") firewall_cmd_list_all_zones = simple_file("insights_commands/firewall-cmd_--list-all-zones") + fw_devices = simple_file("insights_commands/fwupdagent_get-devices") + fw_security = simple_file("insights_commands/fwupdagent_security_--force") gcp_license_codes = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_gcp_license_codes") getconf_page_size = simple_file("insights_commands/getconf_PAGE_SIZE") getenforce = simple_file("insights_commands/getenforce") From 87b2eda66f57c9fb72ea08373c04a49343ddde9b Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Tue, 19 Oct 2021 03:17:34 +0800 Subject: [PATCH 573/892] New spec and parser to check httpd ssl certificate expire date (#3212) * New spec and parser to check httpd ssl certificate expire date Signed-off-by: Huanhuan Li * Add test for datasource * Restructure the code to put ssl certificate together Signed-off-by: Huanhuan Li * Reorder the import statement Signed-off-by: Huanhuan Li --- docs/custom_datasources_index.rst | 8 ++ insights/collect.py | 7 ++ insights/parsers/ssl_certificate.py | 24 ++++++ .../parsers/tests/test_ssl_certificate.py | 15 +++- insights/specs/__init__.py | 1 + insights/specs/datasources/ssl_certificate.py | 29 ++++++++ insights/specs/default.py | 3 +- .../tests/datasources/test_ssl_certificate.py | 73 +++++++++++++++++++ 8 files changed, 157 insertions(+), 3 deletions(-) create mode 100644 insights/specs/datasources/ssl_certificate.py create mode 100644 insights/tests/datasources/test_ssl_certificate.py diff --git a/docs/custom_datasources_index.rst b/docs/custom_datasources_index.rst index 027a6ed07..90331ce9f 100644 --- a/docs/custom_datasources_index.rst +++ b/docs/custom_datasources_index.rst @@ -75,6 +75,14 @@ insights.specs.datasources.satellite_missed_queues :show-inheritance: :undoc-members: +insights.specs.datasources.ssl_certificate +------------------------------------------ + +.. automodule:: insights.specs.datasources.ssl_certificate + :members: httpd_ssl_certificate_file + :show-inheritance: + :undoc-members: + insights.specs.datasources.yum_updates -------------------------------------- diff --git a/insights/collect.py b/insights/collect.py index 8113e1a71..4ec59bb8f 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -154,6 +154,13 @@ - name: insights.combiners.ps enabled: true + # needed for httpd_certificate + - name: insights.combiners.httpd_conf.HttpdConfTree + enabled: true + + - name: insights.combiners.httpd_conf._HttpdConf + enabled: true + # needed to collect the sap_hdb_version spec that uses the Sap combiner - name: insights.parsers.lssap enabled: true diff --git a/insights/parsers/ssl_certificate.py b/insights/parsers/ssl_certificate.py index 5d8c2a1a7..70846c2fb 100644 --- a/insights/parsers/ssl_certificate.py +++ b/insights/parsers/ssl_certificate.py @@ -8,6 +8,8 @@ ======================================================================================================================================================================================================================================== RhsmKatelloDefaultCACert - command ``openssl x509 -in /etc/rhsm/ca/katello-default-ca.pem -noout -issuer`` ========================================================================================================== +HttpdSSLCertExpireDate - command ``openssl x509 -in httpd_certificate_path -enddate -noout`` +============================================================================================ """ from insights import parser, CommandParser @@ -196,3 +198,25 @@ class RhsmKatelloDefaultCACert(CertificateInfo): '/C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=a.b.c.com' """ pass + + +@parser(Specs.httpd_ssl_cert_enddate) +class HttpdSSLCertExpireDate(CertificateInfo): + """ + .. note:: + Please refer to its super-class :class:`insights.parsers.ssl_certificate.CertificateInfo` for more + details. + + It parses the output of ``openssl x509 -in httpd_ssl_certificate_path -enddate -noout`` + + Sample output of ``openssl x509 -in httpd_certificate_path -enddate -noout``:: + + notAfter=Dec 4 07:04:05 2035 GMT + + Examples: + >>> type(date_info) + + >>> date_info['notAfter'].datetime + datetime.datetime(2038, 1, 18, 7, 2, 43) + """ + pass diff --git a/insights/parsers/tests/test_ssl_certificate.py b/insights/parsers/tests/test_ssl_certificate.py index f8bb7e336..c588c0232 100644 --- a/insights/parsers/tests/test_ssl_certificate.py +++ b/insights/parsers/tests/test_ssl_certificate.py @@ -76,6 +76,10 @@ """ +HTTPD_CERT_EXPIRE_INFO = ''' +notAfter=Jan 18 07:02:43 2038 GMT +''' + def test_certificate_info_exception(): with pytest.raises(ParseException): @@ -142,12 +146,19 @@ def test_doc(): ca_cert = ssl_certificate.CertificateChain(context_wrap(CERTIFICATE_CHAIN_OUTPUT1)) satellite_ca_certs = ssl_certificate.SatelliteCustomCaChain(context_wrap(SATELLITE_OUTPUT2)) rhsm_katello_default_ca = ssl_certificate.RhsmKatelloDefaultCACert(context_wrap(RHSM_KATELLO_CERT_OUTPUT1)) - + date_info = ssl_certificate.HttpdSSLCertExpireDate(context_wrap(HTTPD_CERT_EXPIRE_INFO)) globs = { 'cert': cert, 'certs': ca_cert, 'satellite_ca_certs': satellite_ca_certs, - 'rhsm_katello_default_ca': rhsm_katello_default_ca + 'rhsm_katello_default_ca': rhsm_katello_default_ca, + 'date_info': date_info } failed, tested = doctest.testmod(ssl_certificate, globs=globs) assert failed == 0 + + +def test_httpd_ssl_cert_parser(): + date_info = ssl_certificate.HttpdSSLCertExpireDate(context_wrap(HTTPD_CERT_EXPIRE_INFO)) + assert 'notAfter' in date_info + assert date_info['notAfter'].str == 'Jan 18 07:02:43 2038' diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 0ea5a063f..43e1d4647 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -232,6 +232,7 @@ class Specs(SpecSet): httpd_M = RegistryPoint(multi_output=True) httpd_on_nfs = RegistryPoint() httpd_ssl_access_log = RegistryPoint(filterable=True) + httpd_ssl_cert_enddate = RegistryPoint() httpd_ssl_error_log = RegistryPoint(filterable=True) httpd_V = RegistryPoint(multi_output=True) virt_uuid_facts = RegistryPoint() diff --git a/insights/specs/datasources/ssl_certificate.py b/insights/specs/datasources/ssl_certificate.py new file mode 100644 index 000000000..3b9c01319 --- /dev/null +++ b/insights/specs/datasources/ssl_certificate.py @@ -0,0 +1,29 @@ +""" +Custom datasource to get ssl certificate file path. +""" + +from insights.combiners.httpd_conf import HttpdConfTree +from insights.core.context import HostContext +from insights.core.dr import SkipComponent +from insights.core.plugins import datasource + + +@datasource(HttpdConfTree, HostContext) +def httpd_ssl_certificate_file(broker): + """ + Get the httpd SSL certificate file path configured by "SSLCertificateFile" + + Arguments: + broker: the broker object for the current session + + Returns: + str: Returns the SSL certificate file path configured by "SSLCertificateFile" + + Raises: + SkipComponent: Raised if "SSLCertificateFile" directive isn't found + """ + conf = broker[HttpdConfTree] + ssl_cert = conf.find('SSLCertificateFile') + if ssl_cert and ssl_cert[0].value: + return str(ssl_cert[0].value) + raise SkipComponent diff --git a/insights/specs/default.py b/insights/specs/default.py index 7b5b83187..0f85acda1 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -35,7 +35,7 @@ from insights.specs import Specs from insights.specs.datasources import ( awx_manage, cloud_init, candlepin_broker, ethernet, get_running_commands, ipcs, lpstat, package_provides, - ps as ps_datasource, sap, satellite_missed_queues, yum_updates) + ps as ps_datasource, sap, satellite_missed_queues, ssl_certificate, yum_updates) from insights.specs.datasources.sap import sap_hana_sid, sap_hana_sid_SID_nr @@ -311,6 +311,7 @@ def httpd_cmd(broker): httpd_pid = simple_command("/usr/bin/pgrep -o httpd") httpd_limits = foreach_collect(httpd_pid, "/proc/%s/limits") httpd_M = foreach_execute(httpd_cmd, "%s -M") + httpd_ssl_cert_enddate = command_with_args('/usr/bin/openssl x509 -in %s -enddate -noout', ssl_certificate.httpd_ssl_certificate_file) httpd_V = foreach_execute(httpd_cmd, "%s -V") ifcfg = glob_file("/etc/sysconfig/network-scripts/ifcfg-*") ifcfg_static_route = glob_file("/etc/sysconfig/network-scripts/route-*") diff --git a/insights/tests/datasources/test_ssl_certificate.py b/insights/tests/datasources/test_ssl_certificate.py new file mode 100644 index 000000000..9e90f92e5 --- /dev/null +++ b/insights/tests/datasources/test_ssl_certificate.py @@ -0,0 +1,73 @@ +import pytest + +from insights.core.dr import SkipComponent +from insights.tests import context_wrap +from insights.combiners.httpd_conf import _HttpdConf, HttpdConfTree +from insights.specs.datasources.ssl_certificate import httpd_ssl_certificate_file + + +HTTPD_CONF = """ +listen 80 +listen 443 +IncludeOptional "/etc/httpd/conf.d/*.conf" +""".strip() + +HTTPD_SSL_CONF = """ + + ## SSL directives + SSLEngine on + SSLCertificateFile "/etc/pki/katello/certs/katello-apache.crt" + SSLCertificateKeyFile "/etc/pki/katello/private/katello-apache.key" + SSLCertificateChainFile "/etc/pki/katello/certs/katello-server-ca.crt" + SSLVerifyClient optional + SSLCACertificateFile "/etc/pki/katello/certs/katello-default-ca.crt" + SSLVerifyDepth 3 + SSLOptions +StdEnvVars +ExportCertData + +""".strip() + +HTTPD_CONF_WITHOUT_SSL = """ + + ServerName a.b.c.com + +""".strip() + +HTTPD_SSL_CONF_NO_VALUE = """ + + ## SSL directives + SSLEngine off + SSLCertificateFile "" + SSLCertificateKeyFile "" + SSLCertificateChainFile "" + +""".strip() + + +def test_httpd_certificate(): + conf1 = _HttpdConf(context_wrap(HTTPD_CONF, path='/etc/httpd/conf/httpd.conf')) + conf2 = _HttpdConf(context_wrap(HTTPD_SSL_CONF, path='/etc/httpd/conf.d/ssl.conf')) + conf_tree = HttpdConfTree([conf1, conf2]) + + broker = { + HttpdConfTree: conf_tree + } + result = httpd_ssl_certificate_file(broker) + assert result == '/etc/pki/katello/certs/katello-apache.crt' + + +def test_exception(): + conf1 = _HttpdConf(context_wrap(HTTPD_CONF, path='/etc/httpd/conf/httpd.conf')) + conf2 = _HttpdConf(context_wrap(HTTPD_CONF_WITHOUT_SSL, path='/etc/httpd/conf.d/no_ssl.conf')) + conf_tree = HttpdConfTree([conf1, conf2]) + broker1 = { + HttpdConfTree: conf_tree + } + conf1 = _HttpdConf(context_wrap(HTTPD_CONF, path='/etc/httpd/conf/httpd.conf')) + conf2 = _HttpdConf(context_wrap(HTTPD_SSL_CONF_NO_VALUE, path='/etc/httpd/conf.d/no_ssl.conf')) + conf_tree = HttpdConfTree([conf1, conf2]) + broker2 = { + HttpdConfTree: conf_tree + } + with pytest.raises(SkipComponent): + httpd_ssl_certificate_file(broker1) + httpd_ssl_certificate_file(broker2) From 99bbe299c5b01d9ddf70d0e5301f5e0e6a7a120e Mon Sep 17 00:00:00 2001 From: Ryan Blakley Date: Mon, 18 Oct 2021 16:05:37 -0400 Subject: [PATCH 574/892] Update _load_component's default exclude (#3252) * Since all of the paths and names that are processed by _load_components, set the default exclude to insights.*.tests. That way it only exlcudes insights core test files, and not every file that has test in it's name. * Fixes #3250 Signed-off-by: Ryan Blakley --- insights/core/dr.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/core/dr.py b/insights/core/dr.py index 62b12daed..54eda7055 100644 --- a/insights/core/dr.py +++ b/insights/core/dr.py @@ -393,7 +393,7 @@ def _import(path, continue_on_error): raise -def _load_components(path, include=".*", exclude="test", continue_on_error=True): +def _load_components(path, include=".*", exclude="insights\\..+\\.tests", continue_on_error=True): do_include = re.compile(include).search if include else lambda x: True do_exclude = re.compile(exclude).search if exclude else lambda x: False From 3ee02b5d667e89766a0d1d762ddeacba1e07aa72 Mon Sep 17 00:00:00 2001 From: Ryan Blakley Date: Wed, 20 Oct 2021 12:47:11 -0400 Subject: [PATCH 575/892] Add new GrubEnv spec and parser (#3244) * Add in new spec and parser for the /boot/grub2/grubenv file. * Update the BootLoaderEntries combiner to expand any grub environmental variables in the cmdline from the GrubEnv parser. * Add in GrubEnv test, and update the grub_conf combiner test. * Fixes #3237 Signed-off-by: Ryan Blakley --- docs/shared_parsers_catalog/grubenv.rst | 3 + insights/combiners/grub_conf.py | 96 +++++++++++++--------- insights/combiners/tests/test_grub_conf.py | 86 ++++++++++++++++--- insights/parsers/grubenv.py | 92 +++++++++++++++++++++ insights/parsers/tests/test_grubenv.py | 79 ++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 7 files changed, 307 insertions(+), 51 deletions(-) create mode 100644 docs/shared_parsers_catalog/grubenv.rst create mode 100644 insights/parsers/grubenv.py create mode 100644 insights/parsers/tests/test_grubenv.py diff --git a/docs/shared_parsers_catalog/grubenv.rst b/docs/shared_parsers_catalog/grubenv.rst new file mode 100644 index 000000000..9905f5c4e --- /dev/null +++ b/docs/shared_parsers_catalog/grubenv.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.grubenv + :members: + :show-inheritance: diff --git a/insights/combiners/grub_conf.py b/insights/combiners/grub_conf.py index ed6a06c38..3db3978a4 100644 --- a/insights/combiners/grub_conf.py +++ b/insights/combiners/grub_conf.py @@ -1,7 +1,7 @@ """ GrubConf - The valid GRUB configuration ======================================= -Combiner for Red Hat Grub v1 and Grub v2 information. +Combiner for Red Hat Grub v1 Grub v2, and BLS information. This combiner uses the parsers: :class:`insights.parsers.grub_conf.Grub1Config`, @@ -9,38 +9,34 @@ :class:`insights.parsers.grub_conf.Grub2Config`, :class:`insights.parsers.grub_conf.Grub2EFIConfig`, and :class:`insights.parsers.grub_conf.BootLoaderEntries`. +:class:`insights.parsers.grub_env.GrubEnv`. -It determines which parser was used by checking one of the follwing +It determines which parser was used by checking one of the following parsers/combiners: :class:`insights.parsers.installed_rpms.InstalledRpms`, :class:`insights.parsers.cmdline.CmdLine`, :class:`insights.parsers.ls_sys_firmware.LsSysFirmware`, and :class:`insights.combiners.redhat_release.RedHatRelease`. - - """ +import re -from insights.core.plugins import combiner +from insights import SkipComponent from insights.combiners.redhat_release import RedHatRelease -from insights.parsers.grub_conf import BootEntry, get_kernel_initrds -from insights.parsers.grub_conf import Grub1Config, Grub1EFIConfig -from insights.parsers.grub_conf import Grub2Config, Grub2EFIConfig -from insights.parsers.grub_conf import BootLoaderEntries as BLE -from insights.parsers.ls_sys_firmware import LsSysFirmware -from insights.parsers.installed_rpms import InstalledRpms +from insights.core.plugins import combiner from insights.parsers.cmdline import CmdLine -from insights import SkipComponent +from insights.parsers.grub_conf import (get_kernel_initrds, BootEntry, Grub1Config, Grub1EFIConfig, Grub2Config, + Grub2EFIConfig, BootLoaderEntries as BLE) +from insights.parsers.grubenv import GrubEnv +from insights.parsers.installed_rpms import InstalledRpms +from insights.parsers.ls_sys_firmware import LsSysFirmware -@combiner(BLE, optional=[LsSysFirmware]) +@combiner(BLE, optional=[GrubEnv, LsSysFirmware]) class BootLoaderEntries(object): """ Combine all :class:`insights.parsers.grub_conf.BootLoaderEntries` parsers into one Combiner - Raises: - SkipComponent: when no any BootLoaderEntries Parsers. - Attributes: version (int): The version of the GRUB configuration, 1 or 2 is_efi (bool): If the host is boot with EFI @@ -50,30 +46,68 @@ class BootLoaderEntries(object): kernel_initrds (dict): Dict of the `kernel` and `initrd` files referenced in GRUB configuration files is_kdump_iommu_enabled (bool): If any kernel entry contains "intel_iommu=on" + + Raises: + SkipComponent: when no any BootLoaderEntries Parsers. """ - def __init__(self, grub_bles, sys_firmware): + def __init__(self, grub_bles, grubenv, sys_firmware): self.version = self._version = 2 self.is_efi = self._efi = '/sys/firmware/efi' in sys_firmware if sys_firmware else False self.entries = [] self.boot_entries = [] self.is_kdump_iommu_enabled = False + for ble in grub_bles: - self.entries.append(ble.entry) - self.boot_entries.append(BootEntry({'name': ble.title, 'cmdline': ble.cmdline})) + # Make a copy of the ble entry, so that no write + # backs occur below when expanding variables. + self.entries.append(ble.entry.copy()) + self.boot_entries.append(BootEntry({'name': ble.title, 'cmdline': ble.cmdline, + 'version': ble.entry.get('version')})) self.is_kdump_iommu_enabled = self.is_kdump_iommu_enabled or ble.is_kdump_iommu_enabled + + # If grub_bles and grubenv expand the $kernelopts, + # $tuned_params, and $tuned_initrd variables. + if grub_bles and grubenv: + for entry in self.entries: + entry_options = entry.get('options', "") + if "$kernelopts" in entry_options or "$tuned_params" in entry_options: + entry['options'] = re.sub("\\$kernelopts", grubenv.kernelopts, + entry['options']).strip() + entry['options'] = re.sub("\\$tuned_params", grubenv.tuned_params, + entry['options']).strip() + + if "$tuned_initrd" in entry.get('initrd', "") and grubenv.get('tuned_initrd'): + entry['initrd'] = re.sub("\\$tuned_initrd", grubenv.get('tuned_initrd', ""), + entry['initrd']).strip() + + for entry in self.boot_entries: + entry_options = entry.get('cmdline', "") + if "$kernelopts" in entry_options or "$tuned_params" in entry_options: + entry['cmdline'] = re.sub("\\$kernelopts", grubenv.kernelopts, entry['cmdline']).strip() + entry['cmdline'] = re.sub("\\$tuned_params", grubenv.tuned_params, entry['cmdline']).strip() + self.kernel_initrds = get_kernel_initrds(self.entries) if not self.entries: raise SkipComponent() -@combiner([Grub1Config, Grub2Config, - Grub1EFIConfig, Grub2EFIConfig, - BootLoaderEntries], +@combiner([Grub1Config, Grub2Config, Grub1EFIConfig, Grub2EFIConfig, BootLoaderEntries], optional=[InstalledRpms, CmdLine, LsSysFirmware, RedHatRelease]) class GrubConf(object): """ - Process Grub configuration v1 or v2 based on which type is passed in. + Process Grub configuration v1, v2, and BLS based on which type is passed in. + + Attributes: + version (int): returns 1 or 2, version of the GRUB configuration + is_efi (bool): returns True if the host is boot with EFI + kernel_initrds (dict): returns a dict of the `kernel` and `initrd` + files referenced in GRUB configuration files + is_kdump_iommu_enabled (bool): returns True if any kernel entry + contains "intel_iommu=on" + + Raises: + Exception: when cannot find any valid grub configuration. Examples: >>> type(grub_conf) @@ -89,27 +123,13 @@ class GrubConf(object): False >>> grub_conf.get_grub_cmdlines('') [] - - Raises: - Exception: when cannot find any valid grub configuration. - - Attributes: - version (int): returns 1 or 2, version of the GRUB configuration - is_efi (bool): returns True if the host is boot with EFI - kernel_initrds (dict): returns a dict of the `kernel` and `initrd` - files referenced in GRUB configuration files - is_kdump_iommu_enabled (bool): returns True if any kernel entry - contains "intel_iommu=on" """ - def __init__(self, grub1, grub2, grub1_efi, grub2_efi, grub_bles, rpms, cmdline, sys_firmware, rh_rel): - self.version = self.is_kdump_iommu_enabled = None self.grub = self.kernel_initrds = None - _grubs = list(filter(None, [grub1, grub2, grub1_efi, grub2_efi, grub_bles])) - # Check if `/sys/firmware/efi` exist? self.is_efi = '/sys/firmware/efi' in sys_firmware if sys_firmware else False + _grubs = list(filter(None, [grub1, grub2, grub1_efi, grub2_efi, grub_bles])) if len(_grubs) == 1: self.grub = _grubs[0] diff --git a/insights/combiners/tests/test_grub_conf.py b/insights/combiners/tests/test_grub_conf.py index 2b5e91c04..6dc783cfc 100644 --- a/insights/combiners/tests/test_grub_conf.py +++ b/insights/combiners/tests/test_grub_conf.py @@ -1,9 +1,9 @@ from insights.combiners.grub_conf import GrubConf, BootLoaderEntries from insights.combiners.redhat_release import RedHatRelease from insights.parsers.redhat_release import RedhatRelease -from insights.parsers.grub_conf import Grub1Config, Grub2Config -from insights.parsers.grub_conf import Grub2EFIConfig, Grub1EFIConfig -from insights.parsers.grub_conf import BootLoaderEntries as BLE +from insights.parsers.grub_conf import (Grub1Config, Grub2Config, Grub2EFIConfig, Grub1EFIConfig, + BootLoaderEntries as BLE) +from insights.parsers.grubenv import GrubEnv from insights.parsers.ls_sys_firmware import LsSysFirmware from insights.parsers.installed_rpms import InstalledRpms from insights.parsers.cmdline import CmdLine @@ -36,7 +36,7 @@ root (hd0,0) kernel /vmlinuz-2.6.32-642.el6.x86_64 {kernel_boot_options} ro root=/dev/mapper/VolGroup-lv_root rd_NO_LUKS LANG=en_US.UTF-8 rd_NO_MD rd_LVM_LV=VolGroup/lv_swap SYSFONT=latarcyrheb-sun16 crashkernel=auto rd_LVM_LV=VolGroup/lv_root KEYBOARDTYPE=pc KEYTABLE=us rd_NO_DM rhgb quiet initrd /initramfs-2.6.32-642.el6.x86_64.img -""" +""".strip() # noqa # rhel-7 GRUB2_TEMPLATE = """ @@ -91,7 +91,7 @@ linux16 /vmlinuz-0-rescue-9f20b35c9faa49aebe171f62a11b236f %s root=/dev/mapper/rhel-root ro crashkernel=auto rd.lvm.lv=rhel/root rd.lvm.lv=rhel/swap rhgb quiet initrd16 /initramfs-0-rescue-9f20b35c9faa49aebe171f62a11b236f.img } -""" +""".strip() # noqa GRUB2_EFI_CFG = """ ### BEGIN /etc/grub.d/10_linux ### @@ -176,7 +176,7 @@ root (hd0,1) kernel /vmlinuz-2.6.32-71.el6.x86_64 ro root=/dev/mapper/VolGroup-lv_root rd_LVM_LV=VolGroup/lv_root rd_LVM_LV=VolGroup/lv_swap rd_NO_LUKS rd_NO_MD rd_NO_DM LANG=en_US.UTF-8 SYSFONT=latarcyrheb-sun16 KEYBOARDTYPE=pc KEYTABLE=us crashkernel=auto rhgb quiet initrd /initramfs-2.6.32-71.el6.x86_64.img -""".strip() +""".strip() # noqa SYS_FIRMWARE_DIR_NOEFI = """ /sys/firmware: @@ -226,11 +226,11 @@ CMDLINE_V1 = """ ro root=/dev/mapper/vg_rhel6box-lv_root rd_NO_LUKS LANG=en_US.UTF-8 rd_LVM_LV=vg_rhel6box/lv_swap rd_LVM_LV=vg_rhel6box/lv_root rd_NO_MD SYSFONT=latarcyrheb-sun16 crashkernel=129M@0M KEYBOAR DTYPE=pc KEYTABLE=us rd_NO_DM rhgb quiet -""".strip() +""".strip() # noqa CMDLINE_V2 = """ BOOT_IMAGE=/vmlinuz-3.10.0-514.10.2.el7.x86_64 root=/dev/mapper/vg_system-lv_root ro crashkernel=auto rd.lvm.lv=vg_system/lv_root rd.lvm.lv=vg_system/lv_swap rhgb quiet LANG=en_US.UTF-8 -""".strip() +""".strip() # noqa BOOT_LOADER_ENTRIES_1 = """ title Red Hat Enterprise Linux (4.18.0-80.1.2.el8_0.x86_64) 8.0 (Ootpa) @@ -242,7 +242,7 @@ grub_users $grub_users grub_arg --unrestricted grub_class kernel -""".strip() +""".strip() # noqa BOOT_LOADER_ENTRIES_2 = """ title Red Hat Enterprise Linux (4.18.0-32.el8.x86_64) 8.0 (Ootpa) @@ -254,8 +254,31 @@ grub_users $grub_users grub_arg --unrestricted grub_class kernel +""".strip() # noqa + +BOOT_LOADER_ENTRIES_3 = """ +title Red Hat Enterprise Linux (4.18.0-305.el8.x86_64) 8.4 (Ootpa) +version 4.18.0-305.el8.x86_64 +linux /vmlinuz-4.18.0-305.el8.x86_64 +initrd /initramfs-4.18.0-305.el8.x86_64.img $tuned_initrd +options $kernelopts $tuned_params +id rhel-20210429130346-4.18.0-305.el8.x86_64 +grub_users $grub_users +grub_arg --unrestricted +grub_class kernel """.strip() +GRUBENV_WITH_TUNED_PARAMS = """ +# GRUB Environment Block +saved_entry=295e1ba1696e4fad9e062f096f92d147-4.18.0-305.el8.x86_64 +kernelopts=root=/dev/mapper/root_vg-lv_root ro crashkernel=auto resume=/dev/mapper/root_vg-lv_swap rd.lvm.lv=root_vg/lv_root rd.lvm.lv=root_vg/lv_swap console=tty0 console=ttyS0,115200 noapic +boot_success=0 +boot_indeterminate=2 +tuned_params=transparent_hugepages=never +tuned_initrd= +############################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################### +""".strip() # noqa + def test_grub1_only1(): grub1 = Grub1Config(context_wrap(GRUB1_TEMPLATE)) @@ -350,7 +373,7 @@ def test_grub2_cmdline(): assert result.kernel_initrds['grub_kernels'][0] == 'vmlinuz-3.10.0-327.el7.x86_64' assert result.kernel_initrds['grub_initrds'][0] == 'initramfs-3.10.0-327.el7.x86_64.img' assert result.is_kdump_iommu_enabled is False - assert result.get_grub_cmdlines('/vmlinuz-3.10.0')[0].name == "'Red Hat Enterprise Linux Server (3.10.0-327.el7.x86_64) 7.2 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-327.el7.x86_64-advanced-4f80b3d4-90ba-4545-869c-febdecc586ce'" + assert result.get_grub_cmdlines('/vmlinuz-3.10.0')[0].name == "'Red Hat Enterprise Linux Server (3.10.0-327.el7.x86_64) 7.2 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-327.el7.x86_64-advanced-4f80b3d4-90ba-4545-869c-febdecc586ce'" # noqa assert result.get_grub_cmdlines('test') == [] assert result.get_grub_cmdlines('') == [] assert len(result.get_grub_cmdlines()) == 2 @@ -384,7 +407,7 @@ def test_grub2_rpms(): assert result.kernel_initrds['grub_kernels'][0] == 'vmlinuz-3.10.0-327.el7.x86_64' assert result.kernel_initrds['grub_initrds'][0] == 'initramfs-3.10.0-327.el7.x86_64.img' assert result.is_kdump_iommu_enabled is False - assert result.get_grub_cmdlines('/vmlinuz-3.10.0')[0].name == "'Red Hat Enterprise Linux Server (3.10.0-327.el7.x86_64) 7.2 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-327.el7.x86_64-advanced-4f80b3d4-90ba-4545-869c-febdecc586ce'" + assert result.get_grub_cmdlines('/vmlinuz-3.10.0')[0].name == "'Red Hat Enterprise Linux Server (3.10.0-327.el7.x86_64) 7.2 (Maipo)' --class red --class gnu-linux --class gnu --class os --unrestricted $menuentry_id_option 'gnulinux-3.10.0-327.el7.x86_64-advanced-4f80b3d4-90ba-4545-869c-febdecc586ce'" # noqa assert result.get_grub_cmdlines('test') == [] assert result.get_grub_cmdlines('') == [] assert len(result.get_grub_cmdlines()) == 2 @@ -435,7 +458,7 @@ def test_grub2_grubenv(): grub2 = Grub2Config(context_wrap(GRUB2_TEMPLATE)) grub_ble1 = BLE(context_wrap(BOOT_LOADER_ENTRIES_1)) grub_ble2 = BLE(context_wrap(BOOT_LOADER_ENTRIES_2)) - grub_bles = BootLoaderEntries([grub_ble1, grub_ble2], None) + grub_bles = BootLoaderEntries([grub_ble1, grub_ble2], None, None) rhel8 = RedhatRelease(context_wrap(RHEL8)) rhel = RedHatRelease(None, rhel8) rpms = InstalledRpms(context_wrap(INSTALLED_RPMS_V2)) @@ -443,6 +466,23 @@ def test_grub2_grubenv(): result = GrubConf(None, grub2, None, None, grub_bles, rpms, None, sys_firmware, rhel) assert len(result.get_grub_cmdlines()) == 2 assert 'noapic' not in result.get_grub_cmdlines()[1]['cmdline'] + assert 'transparent_hugepages' not in result.get_grub_cmdlines()[0]['cmdline'] + assert result.version == 2 + assert not result.is_efi + + +def test_grub2_grubenv_with_kernelopts(): + grub2 = Grub2Config(context_wrap(GRUB2_TEMPLATE)) + grub_ble3 = BLE(context_wrap(BOOT_LOADER_ENTRIES_3)) + grub_bles = BootLoaderEntries([grub_ble3], None, None) + rhel8 = RedhatRelease(context_wrap(RHEL8)) + rhel = RedHatRelease(None, rhel8) + rpms = InstalledRpms(context_wrap(INSTALLED_RPMS_V2)) + sys_firmware = LsSysFirmware(context_wrap(SYS_FIRMWARE_DIR_NOEFI)) + result = GrubConf(None, grub2, None, None, grub_bles, rpms, None, sys_firmware, rhel) + assert len(result.get_grub_cmdlines()) == 1 + assert 'noapic' not in result.get_grub_cmdlines()[0]['cmdline'] + assert 'transparent_hugepages' not in result.get_grub_cmdlines()[0]['cmdline'] assert result.version == 2 assert not result.is_efi @@ -451,7 +491,24 @@ def test_grub2_boot_loader_entries(): grub2 = Grub2Config(context_wrap(GRUB2_TEMPLATE)) grub_ble1 = BLE(context_wrap(BOOT_LOADER_ENTRIES_1)) grub_ble2 = BLE(context_wrap(BOOT_LOADER_ENTRIES_2)) - grub_bles = BootLoaderEntries([grub_ble1, grub_ble2], None) + grub_bles = BootLoaderEntries([grub_ble1, grub_ble2], None, None) + rhel8 = RedhatRelease(context_wrap(RHEL8)) + rhel = RedHatRelease(None, rhel8) + rpms = InstalledRpms(context_wrap(INSTALLED_RPMS_V2)) + sys_firmware = LsSysFirmware(context_wrap(SYS_FIRMWARE_DIR_EFI)) + result = GrubConf(None, grub2, None, None, grub_bles, rpms, None, sys_firmware, rhel) + assert len(result.get_grub_cmdlines()) == 2 + assert 'noapic' in result.get_grub_cmdlines()[0]['cmdline'] + assert result.version == 2 + assert result.is_efi + + +def test_grub2_boot_loader_entries_with_grubenv(): + grubenv = GrubEnv(context_wrap(GRUBENV_WITH_TUNED_PARAMS)) + grub2 = Grub2Config(context_wrap(GRUB2_TEMPLATE)) + grub_ble1 = BLE(context_wrap(BOOT_LOADER_ENTRIES_1)) + grub_ble3 = BLE(context_wrap(BOOT_LOADER_ENTRIES_3)) + grub_bles = BootLoaderEntries([grub_ble1, grub_ble3], grubenv, None) rhel8 = RedhatRelease(context_wrap(RHEL8)) rhel = RedHatRelease(None, rhel8) rpms = InstalledRpms(context_wrap(INSTALLED_RPMS_V2)) @@ -459,5 +516,8 @@ def test_grub2_boot_loader_entries(): result = GrubConf(None, grub2, None, None, grub_bles, rpms, None, sys_firmware, rhel) assert len(result.get_grub_cmdlines()) == 2 assert 'noapic' in result.get_grub_cmdlines()[0]['cmdline'] + assert 'transparent_hugepages' in result.get_grub_cmdlines()[0]['cmdline'] + assert 'noapic' in result.get_grub_cmdlines()[1]['cmdline'] + assert 'transparent_hugepages' in result.get_grub_cmdlines()[1]['cmdline'] assert result.version == 2 assert result.is_efi diff --git a/insights/parsers/grubenv.py b/insights/parsers/grubenv.py new file mode 100644 index 000000000..72152634d --- /dev/null +++ b/insights/parsers/grubenv.py @@ -0,0 +1,92 @@ +""" +GrubEnv - file ``/boot/grub2/grubenv`` +====================================== + +This parser reads the GRUB environment block file. The file is laid out in a +key=value format similar to an ini file except it doesn't have any headers. + +The parser stores the key/value pairs in itself which inherits a dict. This +file is only used in Grub 2, but in RHEL8 with BLS being the default. There +were several variables added that are referenced in the +``/boot/loader/entries/*.conf`` files. +""" +from insights import get_active_lines, parser, Parser +from insights.parsers import SkipException +from insights.specs import Specs + + +@parser(Specs.grubenv) +class GrubEnv(Parser, dict): + """ + Parses the /boot/grub2/grubenv file and returns a dict + of the grubenv variables. + + Sample output of the file:: + + saved_entry=295e1ba1696e4fad9e062f096f92d147-4.18.0-305.el8.x86_64 + kernelopts=root=/dev/mapper/root_vg-lv_root ro crashkernel=auto resume=/dev/mapper/root_vg-lv_swap rd.lvm.lv=root_vg/lv_root rd.lvm.lv=root_vg/lv_swap console=tty0 console=ttyS0,115200 + boot_success=0 + boot_indeterminate=2 + tuned_params=transparent_hugepages=never + tuned_initrd= + + Attributes: + has_kernelopts (bool): Returns True/False depending on if kernelopts key is in the dict. + kernelopts (bool): Returns the string of kernelopts from the dict. + has_tuned_params (str): Returns True/False depending of if the tuned_params key is in the dict. + tuned_params (str): Returns the string of tuned_params from the dict. + + Examples: + >>> type(grubenv) + + >>> grubenv.has_kernelopts + True + >>> grubenv.kernelopts + 'root=/dev/mapper/root_vg-lv_root ro crashkernel=auto resume=/dev/mapper/root_vg-lv_swap rd.lvm.lv=root_vg/lv_root rd.lvm.lv=root_vg/lv_swap console=tty0 console=ttyS0,115200' + >>> grubenv.has_tuned_params + True + >>> grubenv.tuned_params + 'transparent_hugepages=never' + >>> grubenv['saved_entry'] + '295e1ba1696e4fad9e062f096f92d147-4.18.0-305.el8.x86_64' + """ + def __init__(self, context): + super(GrubEnv, self).__init__(context) + + def parse_content(self, content): + if not content: + raise SkipException("Empty output.") + + data = dict() + for line in get_active_lines(content): + if "=" not in line: + continue + + key, value = line.split("=", 1) + + # Some keys can have empty values, so just skip them. + if not value: + continue + + data[key] = value + + if not data: + raise SkipException("No parsed data.") + + self.update(data) + + @property + def has_kernelopts(self): + return "kernelopts" in self + + @property + def kernelopts(self): + return self.get("kernelopts", "") + + @property + def has_tuned_params(self): + return "tuned_params" in self + + @property + def tuned_params(self): + return self.get("tuned_params", "") diff --git a/insights/parsers/tests/test_grubenv.py b/insights/parsers/tests/test_grubenv.py new file mode 100644 index 000000000..99b053784 --- /dev/null +++ b/insights/parsers/tests/test_grubenv.py @@ -0,0 +1,79 @@ +import doctest + +from insights.parsers import grubenv +from insights.parsers.tests import skip_exception_check +from insights.tests import context_wrap + + +GRUBENV_WITH_TUNED_PARAMS = """ +# GRUB Environment Block +saved_entry=295e1ba1696e4fad9e062f096f92d147-4.18.0-305.el8.x86_64 +kernelopts=root=/dev/mapper/root_vg-lv_root ro crashkernel=auto resume=/dev/mapper/root_vg-lv_swap rd.lvm.lv=root_vg/lv_root rd.lvm.lv=root_vg/lv_swap console=tty0 console=ttyS0,115200 +boot_success=0 +boot_indeterminate=2 +tuned_params=transparent_hugepages=never +tuned_initrd= +############################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################### +""".strip() # noqa + +GRUBENV_WITHOUT_TUNED_PARAMS = """ +# GRUB Environment Block +saved_entry=295e1ba1696e4fad9e062f096f92d147-4.18.0-305.el8.x86_64 +kernelopts=root=/dev/mapper/root_vg-lv_root ro crashkernel=auto resume=/dev/mapper/root_vg-lv_swap rd.lvm.lv=root_vg/lv_root rd.lvm.lv=root_vg/lv_swap console=tty0 console=ttyS0,115200 +boot_success=0 +boot_indeterminate=2 +############################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################### +""".strip() # noqa + +GRUBENV_RHEL7 = """ +# GRUB Environment Block +saved_entry=Red Hat Enterprise Linux Server (3.10.0-1127.el7.x86_64) 7.8 (Maipo) +###################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################### +""".strip() # noqa + + +def test_doc_examples(): + env = { + 'grubenv': grubenv.GrubEnv(context_wrap(GRUBENV_WITH_TUNED_PARAMS)) + } + failed, total = doctest.testmod(grubenv, globs=env) + assert failed == 0 + + +def test_with_tuned_params(): + results = grubenv.GrubEnv(context_wrap(GRUBENV_WITH_TUNED_PARAMS)) + assert results is not None + assert results.has_kernelopts + assert results.has_tuned_params + assert results.kernelopts == "root=/dev/mapper/root_vg-lv_root ro crashkernel=auto resume=/dev/mapper/root_vg-lv_swap rd.lvm.lv=root_vg/lv_root rd.lvm.lv=root_vg/lv_swap console=tty0 console=ttyS0,115200" # noqa + assert results.tuned_params == "transparent_hugepages=never" + assert results['saved_entry'] == "295e1ba1696e4fad9e062f096f92d147-4.18.0-305.el8.x86_64" + assert results['boot_success'] == "0" + assert results['boot_indeterminate'] == "2" + + +def test_without_tuned_params(): + results = grubenv.GrubEnv(context_wrap(GRUBENV_WITHOUT_TUNED_PARAMS)) + assert results is not None + assert results.has_kernelopts + assert not results.has_tuned_params + assert results.kernelopts == "root=/dev/mapper/root_vg-lv_root ro crashkernel=auto resume=/dev/mapper/root_vg-lv_swap rd.lvm.lv=root_vg/lv_root rd.lvm.lv=root_vg/lv_swap console=tty0 console=ttyS0,115200" # noqa + assert results.tuned_params == "" + assert results['saved_entry'] == "295e1ba1696e4fad9e062f096f92d147-4.18.0-305.el8.x86_64" + assert results['boot_success'] == "0" + assert results['boot_indeterminate'] == "2" + + +def test_r7(): + results = grubenv.GrubEnv(context_wrap(GRUBENV_RHEL7)) + assert results is not None + assert not results.has_kernelopts + assert not results.has_tuned_params + assert results.kernelopts == "" + assert results.tuned_params == "" + assert results['saved_entry'] == "Red Hat Enterprise Linux Server (3.10.0-1127.el7.x86_64) 7.8 (Maipo)" + + +def test_skip(): + skip_exception_check(grubenv.GrubEnv, output_str="# test") + skip_exception_check(grubenv.GrubEnv) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 43e1d4647..0e18f0209 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -198,6 +198,7 @@ class Specs(SpecSet): gnocchi_conf = RegistryPoint(filterable=True) gnocchi_metricd_log = RegistryPoint(filterable=True) greenboot_status = RegistryPoint(filterable=True) + grubenv = RegistryPoint() grub_conf = RegistryPoint() grub_config_perms = RegistryPoint() grub_efi_conf = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 0f85acda1..21f222c5b 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -251,6 +251,7 @@ def du_dirs_list(broker): gcp_instance_type = simple_command("/usr/bin/curl -s -H 'Metadata-Flavor: Google' http://metadata.google.internal/computeMetadata/v1/instance/machine-type --connect-timeout 5", deps=[IsGCP]) gcp_license_codes = simple_command("/usr/bin/curl -s -H 'Metadata-Flavor: Google' http://metadata.google.internal/computeMetadata/v1/instance/licenses/?recursive=True --connect-timeout 5", deps=[IsGCP]) greenboot_status = simple_command("/usr/libexec/greenboot/greenboot-status") + grubenv = first_file(["/boot/grub2/grubenv", "/boot/efi/EFI/redhat/grubenv"]) grub_conf = simple_file("/boot/grub/grub.conf") grub_config_perms = simple_command("/bin/ls -l /boot/grub2/grub.cfg") # only RHEL7 and updwards grub_efi_conf = simple_file("/boot/efi/EFI/redhat/grub.conf") From 51d797dd1c758ba0b97d8f946fee8e6ec75ebf4b Mon Sep 17 00:00:00 2001 From: Sachin Date: Thu, 21 Oct 2021 11:06:51 +0530 Subject: [PATCH 576/892] [CloudCfg] Include full context in the output (#3249) [CloudCfg] Include full context in the output Resolves: #2935 Other changes: - Remove sensitive keys: 'users' and 'system_info'. - Add filter: The rule is required to add filter. - Code cleanup. Signed-off-by: Sachin Patil --- insights/parsers/cloud_cfg.py | 6 +- insights/parsers/tests/test_cloud_cfg.py | 8 +- insights/specs/__init__.py | 2 +- insights/specs/datasources/cloud_init.py | 61 +++++++++---- insights/tests/datasources/test_cloud_init.py | 87 +++++++++++++++---- 5 files changed, 120 insertions(+), 44 deletions(-) diff --git a/insights/parsers/cloud_cfg.py b/insights/parsers/cloud_cfg.py index 635f0df1d..5c5dbfc21 100644 --- a/insights/parsers/cloud_cfg.py +++ b/insights/parsers/cloud_cfg.py @@ -12,16 +12,16 @@ class CloudCfg(JSONParser): Typical output from the datasource is:: - {"version": 1, "config": [{"type": "physical", "name": "eth0", "subnets": [{"type": "dhcp"}, {"type": "dhcp6"}]}]} + {"ssh_deletekeys": 1, "network": {"version": 1, "config": [{"type": "physical", "name": "eth0", "subnets": [{"type": "dhcp"}, {"type": "dhcp6"}]}]}, "debug": {"output": "/var/log/cloud-init-debug.log", "verbose": true}} Attributes: data(dict): Cloud-init network configuration. Examples: - >>> cloud_cfg.data['version'] == 1 + >>> cloud_cfg.data['network']['version'] == 1 True - >>> cloud_cfg.data['config'] == [{"type": "physical", "name": "eth0", "subnets": [{"type": "dhcp"}, {"type": "dhcp6"}]}] + >>> cloud_cfg.data['network']['config'] == [{"type": "physical", "name": "eth0", "subnets": [{"type": "dhcp"}, {"type": "dhcp6"}]}] True """ pass diff --git a/insights/parsers/tests/test_cloud_cfg.py b/insights/parsers/tests/test_cloud_cfg.py index 2a4c819d4..8e1cd8c1a 100644 --- a/insights/parsers/tests/test_cloud_cfg.py +++ b/insights/parsers/tests/test_cloud_cfg.py @@ -6,20 +6,20 @@ CONFIG_1 = """ -{"config": "disabled"} +{"ssh_deletekeys": 1, "network": {"config": "disabled"}} """ CONFIG_2 = """ -{"version": 1, "config": [{"type": "physical", "name": "eth0", "subnets": [{"type": "dhcp"}, {"type": "dhcp6"}]}]} +{"ssh_deletekeys": 1, "network": {"version": 1, "config": [{"type": "physical", "name": "eth0", "subnets": [{"type": "dhcp"}, {"type": "dhcp6"}]}]}, "debug": {"output": "/var/log/cloud-init-debug.log", "verbose": true}} """ def test_cloud_cfg(): result = cloud_cfg.CloudCfg(context_wrap(CONFIG_1)) - assert result.data['config'] == 'disabled' + assert result.data['network'] == {'config': 'disabled'} result = cloud_cfg.CloudCfg(context_wrap(CONFIG_2)) - assert result.data['config'][0]['name'] == 'eth0' + assert result.data['network']['config'][0]['name'] == 'eth0' def test_cloud_cfg_empty(): diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 0e18f0209..d340fbe0c 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -66,7 +66,7 @@ class Specs(SpecSet): cinder_conf = RegistryPoint() cinder_volume_log = RegistryPoint(filterable=True) cloud_init_custom_network = RegistryPoint() - cloud_cfg = RegistryPoint() + cloud_cfg = RegistryPoint(filterable=True) cloud_init_log = RegistryPoint(filterable=True) cluster_conf = RegistryPoint(filterable=True) cmdline = RegistryPoint() diff --git a/insights/specs/datasources/cloud_init.py b/insights/specs/datasources/cloud_init.py index 7065e7abe..02c4cbc24 100644 --- a/insights/specs/datasources/cloud_init.py +++ b/insights/specs/datasources/cloud_init.py @@ -7,6 +7,7 @@ from insights.core.context import HostContext from insights.core.dr import SkipComponent from insights.core.plugins import datasource +from insights.core.filters import get_filters from insights.core.spec_factory import DatasourceProvider, simple_file from insights.specs import Specs @@ -21,8 +22,7 @@ class LocalSpecs(Specs): @datasource(LocalSpecs.cloud_cfg_input, HostContext) def cloud_cfg(broker): """ - This datasource provides the network configuration information collected - from ``/etc/cloud/cloud.cfg``. + This datasource provides configuration collected from ``/etc/cloud/cloud.cfg``. Typical content of ``/etc/cloud/cloud.cfg`` file is:: @@ -34,6 +34,8 @@ def cloud_cfg(broker): - key_two passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/ + ssh_deletekeys: 1 + network: version: 1 config: @@ -58,37 +60,60 @@ def cloud_cfg(broker): ``insights cat --no-header cloud_cfg`` - Sample data returned includes only the ``network`` portion of the input file in JSON format:: + Sample output in JSON format:: { + "ssh_deletekeys": 1, + "network": { "version": 1, "config": [ - { - "type": "physical", - "name": "eth0", - "subnets": [ - {"type": "dhcp"}, - {"type": "dhcp6"} - ] - } + { + "type": "physical", + "name": "eth0", + "subnets": [ + { + "type": "dhcp" + }, + { + "type": "dhcp6" + } + ] + } ] + }, + "debug": { + "output": "/var/log/cloud-init-debug.log", + "verbose": true + } } Returns: - str: JSON string when the ``network`` parameter includes content, else `None` is returned. + str: JSON string after removing the sensitive information. Raises: SkipComponent: When the path does not exist or any exception occurs. """ relative_path = '/etc/cloud/cloud.cfg' try: + filters = get_filters(Specs.cloud_cfg) content = broker[LocalSpecs.cloud_cfg_input].content - if content: + if content and filters: + result = dict() content = yaml.load('\n'.join(content), Loader=yaml.SafeLoader) - network_config = content.get('network', None) - if network_config: - return DatasourceProvider(content=json.dumps(network_config), relative_path=relative_path) + if isinstance(content, dict): + # remove sensitive data + content.pop('users', None) + content.pop('system_info', None) + # apply filters + for item in filters: + val = content.get(item, None) + if val: + result[item] = val + + if result: + result = dict(sorted(result.items(), key=lambda x: x[0])) + return DatasourceProvider(content=json.dumps(result), relative_path=relative_path) + raise SkipComponent("Invalid YAML format") except Exception as e: raise SkipComponent("Unexpected exception:{e}".format(e=str(e))) - - raise SkipComponent('No network section in yaml') + raise SkipComponent diff --git a/insights/tests/datasources/test_cloud_init.py b/insights/tests/datasources/test_cloud_init.py index b3032cee4..10c23b0b7 100644 --- a/insights/tests/datasources/test_cloud_init.py +++ b/insights/tests/datasources/test_cloud_init.py @@ -2,16 +2,22 @@ import pytest from mock.mock import Mock +from insights.core import filters from insights.core.dr import SkipComponent from insights.core.spec_factory import DatasourceProvider +from insights.specs import Specs from insights.specs.datasources.cloud_init import cloud_cfg, LocalSpecs + CLOUD_CFG = """ users: - name: demo ssh-authorized-keys: - key_one - key_two + passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/ + +ssh_deletekeys: 1 network: version: 1 @@ -23,12 +29,25 @@ - type: dhcp6 """.strip() -CLOUD_CFG_NO_NETWORK = """ +CLOUD_CFG_BAD_INDENT = """ +#cloud-config users: - name: demo ssh-authorized-keys: - key_one - key_two + passwd: $6$j212wezy$7H/1LT4f9/N3wpgNunhsIqtMj62OKiS3nyNwuizouQc3u7MbYCarYeAHWYPYb2FT.lbioDm2RrkJPb9BZMN1O/ + +ssh_deletekeys: 1 + +network: + config: disabled + + system_info: + default_user: + name: user2 + plain_text_passwd: 'someP@assword' + home: /home/user2 """.strip() CLOUD_CFG_BAD = """ @@ -41,22 +60,48 @@ CLOUD_CFG_JSON = { - 'version': 1, - 'config': [ - { - 'type': 'physical', - 'name': 'eth0', - 'subnets': [ - {'type': 'dhcp'}, - {'type': 'dhcp6'} - ] - } - ] + "network": { + "version": 1, + "config": [ + { + "type": "physical", + "name": "eth0", + "subnets": [ + { + "type": "dhcp" + }, + { + "type": "dhcp6" + } + ] + } + ] + }, + "ssh_deletekeys": 1, } RELATIVE_PATH = '/etc/cloud/cloud.cfg' +def setup_function(func): + if Specs.cloud_cfg in filters._CACHE: + del filters._CACHE[Specs.cloud_cfg] + if Specs.cloud_cfg in filters.FILTERS: + del filters.FILTERS[Specs.cloud_cfg] + + if func is test_cloud_cfg: + filters.add_filter(Specs.cloud_cfg, ['ssh_deletekeys', 'network', 'debug']) + if func is test_cloud_cfg_no_filter: + filters.add_filter(Specs.cloud_cfg, []) + elif func is test_cloud_cfg_bad: + filters.add_filter(Specs.cloud_cfg, ['not_found']) + + +def teardown_function(func): + if func is test_cloud_cfg_bad or func is test_cloud_cfg: + del filters.FILTERS[Specs.cloud_cfg] + + def test_cloud_cfg(): cloud_init_file = Mock() cloud_init_file.content = CLOUD_CFG.splitlines() @@ -69,19 +114,25 @@ def test_cloud_cfg(): assert result.relative_path == expected.relative_path -def test_cloud_cfg_bad(): +def test_cloud_cfg_no_filter(): cloud_init_file = Mock() - cloud_init_file.content = CLOUD_CFG_BAD.splitlines() + cloud_init_file.content = CLOUD_CFG.splitlines() broker = {LocalSpecs.cloud_cfg_input: cloud_init_file} with pytest.raises(SkipComponent) as e: cloud_cfg(broker) - assert 'Unexpected exception' in str(e) + assert 'SkipComponent' in str(e) -def test_cloud_cfg_no_network(): +def test_cloud_cfg_bad(): cloud_init_file = Mock() - cloud_init_file.content = CLOUD_CFG_NO_NETWORK.splitlines() + cloud_init_file.content = CLOUD_CFG_BAD.splitlines() broker = {LocalSpecs.cloud_cfg_input: cloud_init_file} with pytest.raises(SkipComponent) as e: cloud_cfg(broker) - assert 'No network section in yaml' in str(e) + assert 'Invalid YAML format' in str(e) + + cloud_init_file.content = CLOUD_CFG_BAD_INDENT.splitlines() + broker = {LocalSpecs.cloud_cfg_input: cloud_init_file} + with pytest.raises(SkipComponent) as e: + cloud_cfg(broker) + assert 'Unexpected exception' in str(e) From 64a9f278acf796772221b1c9cf655078160b6160 Mon Sep 17 00:00:00 2001 From: Ryan Blakley Date: Thu, 21 Oct 2021 10:55:26 -0400 Subject: [PATCH 577/892] Update the default exclude in load_components (#3262) * The default exclude was updated to only exclude insights core tests directories, but this causes other tools tests directories that use core to not be excluded. So change the default to exclude any tests directories. Signed-off-by: Ryan Blakley --- insights/core/dr.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/insights/core/dr.py b/insights/core/dr.py index 54eda7055..0cb093afc 100644 --- a/insights/core/dr.py +++ b/insights/core/dr.py @@ -393,7 +393,7 @@ def _import(path, continue_on_error): raise -def _load_components(path, include=".*", exclude="insights\\..+\\.tests", continue_on_error=True): +def _load_components(path, include=".*", exclude="\\.tests", continue_on_error=True): do_include = re.compile(include).search if include else lambda x: True do_exclude = re.compile(exclude).search if exclude else lambda x: False From d97f575ebe56d3dcdcbd37414a465b4fee3f0b95 Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Mon, 25 Oct 2021 14:54:01 -0500 Subject: [PATCH 578/892] fix: Remove old grub specs from client tests (#3263) * These specs have been removed from the JSON and are no longer valid for use in the client tests Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/client/map_components.py | 2 -- .../tests/client/collection_rules/test_map_components.py | 8 ++------ 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/insights/client/map_components.py b/insights/client/map_components.py index 8f4879bb6..e36efb3ee 100644 --- a/insights/client/map_components.py +++ b/insights/client/map_components.py @@ -127,8 +127,6 @@ def _get_component_by_symbolic_name(sname): 'machine_id1': 'machine_id', 'machine_id2': 'machine_id', 'machine_id3': 'machine_id', - 'grub2_efi_grubenv': None, - 'grub2_grubenv': None, 'limits_d': 'limits_conf', 'modprobe_conf': 'modprobe', 'modprobe_d': 'modprobe', diff --git a/insights/tests/client/collection_rules/test_map_components.py b/insights/tests/client/collection_rules/test_map_components.py index e6e303324..fbe1625b6 100644 --- a/insights/tests/client/collection_rules/test_map_components.py +++ b/insights/tests/client/collection_rules/test_map_components.py @@ -115,9 +115,7 @@ def test_map_rm_conf_to_components_sym_names(): # files should be empty, components should have 1 item # except for these which cannot be mapped to specs. # in which case, components empty and these remain in files - if sym_name in ['grub2_efi_grubenv', - 'grub2_grubenv', - 'redhat_access_proactive_log']: + if sym_name == 'redhat_access_proactive_log': assert len(new_rm_conf['files']) == 1 assert new_rm_conf['files'][0] == sym_name assert len(new_rm_conf['components']) == 0 @@ -169,9 +167,7 @@ def test_map_rm_conf_to_components_raw_cmds_files(): # files should be empty, components should have 1 item # except for these which cannot be mapped to specs. # in which case, components empty and these remain in files - if fil['file'] in ['/boot/efi/EFI/redhat/grubenv', - '/boot/grub2/grubenv', - '/var/log/redhat_access_proactive/redhat_access_proactive.log']: + if fil['file'] == '/var/log/redhat_access_proactive/redhat_access_proactive.log': assert len(new_rm_conf['files']) == 1 assert new_rm_conf['files'][0] == fil['file'] assert len(new_rm_conf['components']) == 0 From 3109ae914eb3dfea9b02b004d1c43365f218aebe Mon Sep 17 00:00:00 2001 From: Rahul Srivastava <44598880+rasrivas-redhat@users.noreply.github.com> Date: Wed, 27 Oct 2021 10:53:13 +0530 Subject: [PATCH 579/892] Enhanced the certificates_enddate spec to support tower cert (#3258) * Enhanced the certificates_enddate spec to support tower cert Signed-off-by: Rahul * updated the file for the ansible cert Signed-off-by: Rahul * updated the insights archive file for the ansible cert Signed-off-by: Rahul * moved the tower cert file to first item for the command Signed-off-by: Rahul * Updated the certificates_enddate spec by adding a new item for aap cert file in the list Signed-off-by: Rahul * Updated the tower.cert path the end for the default spec file Signed-off-by: Rahul --- insights/specs/default.py | 2 +- insights/specs/insights_archive.py | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 21f222c5b..f26a53296 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -127,7 +127,7 @@ class DefaultSpecs(Specs): ceph_osd_tree = simple_command("/usr/bin/ceph osd tree -f json") ceph_s = simple_command("/usr/bin/ceph -s -f json") ceph_v = simple_command("/usr/bin/ceph -v") - certificates_enddate = simple_command("/usr/bin/find /etc/origin/node /etc/origin/master /etc/pki /etc/ipa -type f -exec /usr/bin/openssl x509 -noout -enddate -in '{}' \; -exec echo 'FileName= {}' \;", keep_rc=True) + certificates_enddate = simple_command("/usr/bin/find /etc/origin/node /etc/origin/master /etc/pki /etc/ipa /etc/tower/tower.cert -type f -exec /usr/bin/openssl x509 -noout -enddate -in '{}' \; -exec echo 'FileName= {}' \;", keep_rc=True) chkconfig = simple_command("/sbin/chkconfig --list") chrony_conf = simple_file("/etc/chrony.conf") chronyc_sources = simple_command("/usr/bin/chronyc sources") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 5e533c99e..45072a363 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -31,7 +31,11 @@ class InsightsArchiveSpecs(Specs): ceph_osd_tree = first_file(["insights_commands/ceph_osd_tree_-f_json-pretty", "insights_commands/ceph_osd_tree_-f_json"]) ceph_s = first_file(["insights_commands/ceph_-s_-f_json-pretty", "insights_commands/ceph_-s_-f_json"]) ceph_v = simple_file("insights_commands/ceph_-v") - certificates_enddate = first_file(["insights_commands/find_.etc.origin.node_.etc.origin.master_.etc.pki_.etc.ipa_-type_f_-exec_.usr.bin.openssl_x509_-noout_-enddate_-in_-exec_echo_FileName", "insights_commands/find_.etc.origin.node_.etc.origin.master_.etc.pki_-type_f_-exec_.usr.bin.openssl_x509_-noout_-enddate_-in_-exec_echo_FileName"]) + certificates_enddate = first_file([ + "insights_commands/find_.etc.origin.node_.etc.origin.master_.etc.pki_.etc.ipa_.etc.tower.tower.cert_-type_f_-exec_.usr.bin.openssl_x509_-noout_-enddate_-in_-exec_echo_FileName", + "insights_commands/find_.etc.origin.node_.etc.origin.master_.etc.pki_.etc.ipa_-type_f_-exec_.usr.bin.openssl_x509_-noout_-enddate_-in_-exec_echo_FileName", + "insights_commands/find_.etc.origin.node_.etc.origin.master_.etc.pki_-type_f_-exec_.usr.bin.openssl_x509_-noout_-enddate_-in_-exec_echo_FileName" + ]) chkconfig = simple_file("insights_commands/chkconfig_--list") chronyc_sources = simple_file("insights_commands/chronyc_sources") corosync_cmapctl = glob_file("insights_commands/corosync-cmapctl*") From ca4222e5fd05de01bd2ddc6f13a6f1c81a880319 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Wed, 27 Oct 2021 23:50:36 +0800 Subject: [PATCH 580/892] New nginx spec to get ssl certificate expire data (#3259) * New nginx spec to get ssl certificate expire data * Collect all SSL certificates * Update code to make it more efficiency * Return "ExpirationDate" since both string or datetime format will be used by rules Signed-off-by: Huanhuan Li --- docs/custom_datasources_index.rst | 2 +- .../ssl_certificate.rst | 3 + insights/collect.py | 7 ++ insights/combiners/ssl_certificate.py | 60 ++++++++++++++ .../combiners/tests/test_ssl_certificate.py | 65 +++++++++++++++ insights/parsers/ssl_certificate.py | 33 +++++++- .../parsers/tests/test_ssl_certificate.py | 20 ++++- insights/specs/__init__.py | 1 + insights/specs/datasources/ssl_certificate.py | 22 +++++ insights/specs/default.py | 1 + .../tests/datasources/test_ssl_certificate.py | 82 ++++++++++++++++++- 11 files changed, 289 insertions(+), 7 deletions(-) create mode 100644 docs/shared_combiners_catalog/ssl_certificate.rst create mode 100644 insights/combiners/ssl_certificate.py create mode 100644 insights/combiners/tests/test_ssl_certificate.py diff --git a/docs/custom_datasources_index.rst b/docs/custom_datasources_index.rst index 90331ce9f..5703a75ba 100644 --- a/docs/custom_datasources_index.rst +++ b/docs/custom_datasources_index.rst @@ -79,7 +79,7 @@ insights.specs.datasources.ssl_certificate ------------------------------------------ .. automodule:: insights.specs.datasources.ssl_certificate - :members: httpd_ssl_certificate_file + :members: httpd_ssl_certificate_file, nginx_ssl_certificate_files :show-inheritance: :undoc-members: diff --git a/docs/shared_combiners_catalog/ssl_certificate.rst b/docs/shared_combiners_catalog/ssl_certificate.rst new file mode 100644 index 000000000..1f1ddefff --- /dev/null +++ b/docs/shared_combiners_catalog/ssl_certificate.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.combiners.ssl_certificate + :members: + :show-inheritance: diff --git a/insights/collect.py b/insights/collect.py index 4ec59bb8f..c3fb71384 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -161,6 +161,13 @@ - name: insights.combiners.httpd_conf._HttpdConf enabled: true + # needed for nginx_ssl_cert_enddate + - name: insights.combiners.nginx_conf.NginxConfTree + enabled: true + + - name: insights.combiners.nginx_conf._NginxConf + enabled: true + # needed to collect the sap_hdb_version spec that uses the Sap combiner - name: insights.parsers.lssap enabled: true diff --git a/insights/combiners/ssl_certificate.py b/insights/combiners/ssl_certificate.py new file mode 100644 index 000000000..00d565b07 --- /dev/null +++ b/insights/combiners/ssl_certificate.py @@ -0,0 +1,60 @@ +""" +Combiners for getting the earliest expiry date from a lot of SSL certificates +============================================================================= + +This module contains the following combiners: + +EarliestNginxSSLCertExpireDate - The earliest expire date in a lot of nginx ssl certificates +-------------------------------------------------------------------------------------------- +Combiner to get the earliest expire date in a lot of nginx ssl certificates. +""" + +from insights.core.dr import SkipComponent +from insights.parsers.ssl_certificate import NginxSSLCertExpireDate +from insights.parsers.certificates_enddate import CertificatesEnddate +from insights.core.plugins import combiner + + +class EarliestSSLCertExpireDate(object): + """ + The base class to get the earliest expiry date from a lot of :class:`insights.parsers.ssl_certificate.CertificateInfo` instances. + + Attributes: + earliest_expire_date (str): The earliest expire date in string format. + ssl_cert_path (str): The SSL certificate path which is expired first. + + Examples: + >>> type(ssl_certs) + + >>> ssl_certs.earliest_expire_date.str + 'Dec 18 07:02:43 2021' + >>> ssl_certs.ssl_cert_path + '/test/b.pem' + """ + def __init__(self, certificate_info_list): + self.earliest_expire_date = None + self.ssl_cert_path = None + for ssl_cert_expiry_date in certificate_info_list: + if (self.earliest_expire_date is None or + (isinstance(ssl_cert_expiry_date.get('notAfter', ''), CertificatesEnddate.ExpirationDate) and + ssl_cert_expiry_date['notAfter'].datetime < self.earliest_expire_date.datetime)): + self.earliest_expire_date = ssl_cert_expiry_date['notAfter'] + self.ssl_cert_path = ssl_cert_expiry_date.cert_path + if self.earliest_expire_date is None: + raise SkipComponent + + +@combiner(NginxSSLCertExpireDate) +class EarliestNginxSSLCertExpireDate(EarliestSSLCertExpireDate): + """ + Combiner to get the earliest expire date in a lot of nginx ssl certificates. + + Examples: + >>> type(nginx_certs) + + >>> nginx_certs.earliest_expire_date.str + 'Dec 18 07:02:43 2021' + >>> nginx_certs.ssl_cert_path + '/test/d.pem' + """ + pass diff --git a/insights/combiners/tests/test_ssl_certificate.py b/insights/combiners/tests/test_ssl_certificate.py new file mode 100644 index 000000000..fb4e9af71 --- /dev/null +++ b/insights/combiners/tests/test_ssl_certificate.py @@ -0,0 +1,65 @@ +import doctest +import pytest + +from insights.combiners import ssl_certificate +from insights.parsers.ssl_certificate import CertificateInfo +from insights.core.dr import SkipComponent +from insights.tests import context_wrap + + +COMMON_SSL_CERT_INFO1 = ''' +notAfter=Jan 18 07:02:43 2038 GMT +''' + +COMMON_SSL_CERT_INFO2 = ''' +notAfter=Dec 18 07:02:43 2021 GMT +''' + +NGINX_CERT_EXPIRE_INFO_1 = ''' +notAfter=Jan 18 07:02:43 2038 GMT +''' + +NGINX_CERT_EXPIRE_INFO_2 = ''' +notAfter=Dec 18 07:02:43 2021 GMT +''' + + +def test_earliest_ssl_expire_date(): + date_info1 = CertificateInfo(context_wrap(COMMON_SSL_CERT_INFO1, args='/test/a.pem')) + date_info2 = CertificateInfo(context_wrap(COMMON_SSL_CERT_INFO2, args='/test/b.pem')) + expiredate_obj = ssl_certificate.EarliestSSLCertExpireDate([date_info1, date_info2]) + assert expiredate_obj.earliest_expire_date.str == 'Dec 18 07:02:43 2021' + assert expiredate_obj.ssl_cert_path == '/test/b.pem' + + +def test_earliest_certs_combiner_exception(): + with pytest.raises(SkipComponent): + ssl_certificate.EarliestSSLCertExpireDate([]) + + +def test_doc(): + date_info1 = CertificateInfo(context_wrap(COMMON_SSL_CERT_INFO1, args='/test/a.pem')) + date_info2 = CertificateInfo(context_wrap(COMMON_SSL_CERT_INFO2, args='/test/b.pem')) + ssl_certs = ssl_certificate.EarliestSSLCertExpireDate([date_info1, date_info2]) + date_info1 = ssl_certificate.NginxSSLCertExpireDate(context_wrap(NGINX_CERT_EXPIRE_INFO_1, args='/test/c.pem')) + date_info2 = ssl_certificate.NginxSSLCertExpireDate(context_wrap(NGINX_CERT_EXPIRE_INFO_2, args='/test/d.pem')) + nginx_certs = ssl_certificate.EarliestNginxSSLCertExpireDate([date_info1, date_info2]) + globs = { + 'ssl_certs': ssl_certs, + 'nginx_certs': nginx_certs + } + failed, _ = doctest.testmod(ssl_certificate, globs=globs) + assert failed == 0 + + +def test_nginx_ssl_cert_combiner(): + date_info = ssl_certificate.NginxSSLCertExpireDate(context_wrap(NGINX_CERT_EXPIRE_INFO_1, args='/test/c.pem')) + expiredate_obj = ssl_certificate.EarliestNginxSSLCertExpireDate([date_info]) + assert expiredate_obj.earliest_expire_date.str == 'Jan 18 07:02:43 2038' + assert expiredate_obj.ssl_cert_path == '/test/c.pem' + + date_info1 = ssl_certificate.NginxSSLCertExpireDate(context_wrap(NGINX_CERT_EXPIRE_INFO_1, args='/test/c.pem')) + date_info2 = ssl_certificate.NginxSSLCertExpireDate(context_wrap(NGINX_CERT_EXPIRE_INFO_2, args='/test/d.pem')) + expiredate_obj = ssl_certificate.EarliestNginxSSLCertExpireDate([date_info1, date_info2]) + assert expiredate_obj.earliest_expire_date.str == 'Dec 18 07:02:43 2021' + assert expiredate_obj.ssl_cert_path == '/test/d.pem' diff --git a/insights/parsers/ssl_certificate.py b/insights/parsers/ssl_certificate.py index 70846c2fb..cad829759 100644 --- a/insights/parsers/ssl_certificate.py +++ b/insights/parsers/ssl_certificate.py @@ -10,6 +10,8 @@ ========================================================================================================== HttpdSSLCertExpireDate - command ``openssl x509 -in httpd_certificate_path -enddate -noout`` ============================================================================================ +NginxSSLCertExpireDate - command ``openssl x509 -in nginx_certificate_path -enddate -noout`` +============================================================================================ """ from insights import parser, CommandParser @@ -90,6 +92,11 @@ def parse_content(self, content): if not self: raise SkipException("There is not any info in the cert.") + @property + def cert_path(self): + '''Return the certificate path.''' + return self.args + class CertificateChain(CommandParser, list): """ @@ -207,7 +214,7 @@ class HttpdSSLCertExpireDate(CertificateInfo): Please refer to its super-class :class:`insights.parsers.ssl_certificate.CertificateInfo` for more details. - It parses the output of ``openssl x509 -in httpd_ssl_certificate_path -enddate -noout`` + It parses the output of ``openssl x509 -in httpd_ssl_certificate_path -enddate -noout``. Sample output of ``openssl x509 -in httpd_certificate_path -enddate -noout``:: @@ -220,3 +227,27 @@ class HttpdSSLCertExpireDate(CertificateInfo): datetime.datetime(2038, 1, 18, 7, 2, 43) """ pass + + +@parser(Specs.nginx_ssl_cert_enddate) +class NginxSSLCertExpireDate(CertificateInfo): + """ + .. note:: + Please refer to its super-class :class:`insights.parsers.ssl_certificate.CertificateInfo` for more + details. + + It parses the output of ``openssl x509 -in nginx_certificate_path -enddate -noout``. + + Sample output of ``openssl x509 -in nginx_certificate_path -enddate -noout``:: + + notAfter=Dec 4 07:04:05 2035 GMT + + Examples: + >>> type(nginx_date_info) + + >>> nginx_date_info['notAfter'].datetime + datetime.datetime(2038, 1, 18, 7, 2, 43) + >>> nginx_date_info.cert_path + '/a/b/c.pem' + """ + pass diff --git a/insights/parsers/tests/test_ssl_certificate.py b/insights/parsers/tests/test_ssl_certificate.py index c588c0232..32812ac23 100644 --- a/insights/parsers/tests/test_ssl_certificate.py +++ b/insights/parsers/tests/test_ssl_certificate.py @@ -80,6 +80,10 @@ notAfter=Jan 18 07:02:43 2038 GMT ''' +NGINX_CERT_EXPIRE_INFO = ''' +notAfter=Jan 18 07:02:43 2038 GMT +''' + def test_certificate_info_exception(): with pytest.raises(ParseException): @@ -98,11 +102,12 @@ def test_certificate_chain_exception(): def test_certificate_info(): - cert = ssl_certificate.CertificateInfo(context_wrap(CERTIFICATE_OUTPUT1)) + cert = ssl_certificate.CertificateInfo(context_wrap(CERTIFICATE_OUTPUT1, args='/a/b/c.pem')) assert cert['issuer'] == '/C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=a.b.c.com' assert cert['notBefore'].str == 'Dec 7 07:02:33 2020' assert cert['notAfter'].str == 'Jan 18 07:02:33 2038' assert cert['subject'] == '/C=US/ST=North Carolina/L=Raleigh/O=Katello/OU=SomeOrgUnit/CN=a.b.c.com' + assert cert.cert_path == '/a/b/c.pem' def test_certificates_chain(): @@ -147,14 +152,16 @@ def test_doc(): satellite_ca_certs = ssl_certificate.SatelliteCustomCaChain(context_wrap(SATELLITE_OUTPUT2)) rhsm_katello_default_ca = ssl_certificate.RhsmKatelloDefaultCACert(context_wrap(RHSM_KATELLO_CERT_OUTPUT1)) date_info = ssl_certificate.HttpdSSLCertExpireDate(context_wrap(HTTPD_CERT_EXPIRE_INFO)) + nginx_date_info = ssl_certificate.NginxSSLCertExpireDate(context_wrap(HTTPD_CERT_EXPIRE_INFO, args='/a/b/c.pem')) globs = { 'cert': cert, 'certs': ca_cert, 'satellite_ca_certs': satellite_ca_certs, 'rhsm_katello_default_ca': rhsm_katello_default_ca, - 'date_info': date_info + 'date_info': date_info, + 'nginx_date_info': nginx_date_info } - failed, tested = doctest.testmod(ssl_certificate, globs=globs) + failed, _ = doctest.testmod(ssl_certificate, globs=globs) assert failed == 0 @@ -162,3 +169,10 @@ def test_httpd_ssl_cert_parser(): date_info = ssl_certificate.HttpdSSLCertExpireDate(context_wrap(HTTPD_CERT_EXPIRE_INFO)) assert 'notAfter' in date_info assert date_info['notAfter'].str == 'Jan 18 07:02:43 2038' + + +def test_nginx_ssl_cert_parser(): + date_info = ssl_certificate.NginxSSLCertExpireDate(context_wrap(NGINX_CERT_EXPIRE_INFO, args='/test/c.pem')) + assert 'notAfter' in date_info + assert date_info['notAfter'].str == 'Jan 18 07:02:43 2038' + assert date_info.cert_path == '/test/c.pem' diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index d340fbe0c..ea52f00d4 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -410,6 +410,7 @@ class Specs(SpecSet): nfs_exports_d = RegistryPoint(multi_output=True) nfs_exports = RegistryPoint() nginx_conf = RegistryPoint(multi_output=True) + nginx_ssl_cert_enddate = RegistryPoint(multi_output=True) nmcli_conn_show = RegistryPoint() nmcli_dev_show = RegistryPoint() nmcli_dev_show_sos = RegistryPoint(multi_output=True) diff --git a/insights/specs/datasources/ssl_certificate.py b/insights/specs/datasources/ssl_certificate.py index 3b9c01319..4f6dfa5ea 100644 --- a/insights/specs/datasources/ssl_certificate.py +++ b/insights/specs/datasources/ssl_certificate.py @@ -3,6 +3,7 @@ """ from insights.combiners.httpd_conf import HttpdConfTree +from insights.combiners.nginx_conf import NginxConfTree from insights.core.context import HostContext from insights.core.dr import SkipComponent from insights.core.plugins import datasource @@ -27,3 +28,24 @@ def httpd_ssl_certificate_file(broker): if ssl_cert and ssl_cert[0].value: return str(ssl_cert[0].value) raise SkipComponent + + +@datasource(NginxConfTree, HostContext) +def nginx_ssl_certificate_files(broker): + """ + Get the nginx SSL certificate file path configured by "ssl_certificate" + + Arguments: + broker: the broker object for the current session + + Returns: + str: Returns the SSL certificate file path configured by "ssl_certificate" + + Raises: + SkipComponent: Raised if "ssl_certificate" directive isn't found + """ + conf = broker[NginxConfTree] + ssl_certs = conf.find('ssl_certificate') + if ssl_certs: + return [str(ssl_cert.value) for ssl_cert in ssl_certs] + raise SkipComponent diff --git a/insights/specs/default.py b/insights/specs/default.py index f26a53296..089bd2c10 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -468,6 +468,7 @@ def md5chk_file_list(broker): "/opt/rh/nginx*/root/etc/nginx/*.conf", "/opt/rh/nginx*/root/etc/nginx/conf.d/*", "/opt/rh/nginx*/root/etc/nginx/default.d/*", "/etc/opt/rh/rh-nginx*/nginx/*.conf", "/etc/opt/rh/rh-nginx*/nginx/conf.d/*", "/etc/opt/rh/rh-nginx*/nginx/default.d/*" ]) + nginx_ssl_cert_enddate = foreach_execute(ssl_certificate.nginx_ssl_certificate_files, "/usr/bin/openssl x509 -in %s -enddate -noout") nmcli_conn_show = simple_command("/usr/bin/nmcli conn show") nmcli_dev_show = simple_command("/usr/bin/nmcli dev show") nova_api_log = first_file(["/var/log/containers/nova/nova-api.log", "/var/log/nova/nova-api.log"]) diff --git a/insights/tests/datasources/test_ssl_certificate.py b/insights/tests/datasources/test_ssl_certificate.py index 9e90f92e5..a65ff42d3 100644 --- a/insights/tests/datasources/test_ssl_certificate.py +++ b/insights/tests/datasources/test_ssl_certificate.py @@ -3,7 +3,10 @@ from insights.core.dr import SkipComponent from insights.tests import context_wrap from insights.combiners.httpd_conf import _HttpdConf, HttpdConfTree -from insights.specs.datasources.ssl_certificate import httpd_ssl_certificate_file +from insights.combiners.nginx_conf import _NginxConf, NginxConfTree +from insights.specs.datasources.ssl_certificate import ( + httpd_ssl_certificate_file, nginx_ssl_certificate_files +) HTTPD_CONF = """ @@ -42,6 +45,48 @@ """.strip() +NGINX_CONF = """ +http { + listen 80; + listen 443; + include /etc/nginx/conf.d/*.conf; +} +""".strip() + +NGINX_SSL_CONF = """ +server { + ssl_certificate "/a/b/c.rsa.crt"; + ssl_certificate_key "/a/b/c.rsa.key"; + + ssl_certificate "/a/b/c.cecdsa.crt"; + ssl_certificate_key "/a/b/c.cecdsa.key"; +} +""".strip() + +NGINX_SSL_CONF_MULTIPLE_SERVERS = """ +server { + listen 443 ssl; + server_name www.example.com; + ssl_certificate "/a/b/www.example.com.crt"; + ssl_certificate_key "/a/b/www.example.com.key"; + ssl_certificate "/a/b/www.example.com.cecdsa.crt"; + ssl_certificate_key "/a/b/www.example.com.cecdsa.key"; +} + +server { + listen 443 ssl; + server_name www.example.org; + ssl_certificate "/a/b/www.example.org.crt"; + ssl_certificate_key "/a/b/www.example.org.key"; +} +""".strip() + +NGINX_CONF_WITHOUT_SSL = """ +server { + server_name 'a.b.c.com'; +} +""".strip() + def test_httpd_certificate(): conf1 = _HttpdConf(context_wrap(HTTPD_CONF, path='/etc/httpd/conf/httpd.conf')) @@ -55,7 +100,29 @@ def test_httpd_certificate(): assert result == '/etc/pki/katello/certs/katello-apache.crt' -def test_exception(): +def test_nginx_certificate(): + conf1 = _NginxConf(context_wrap(NGINX_CONF, path='/etc/nginx/nginx.conf')) + conf2 = _NginxConf(context_wrap(NGINX_SSL_CONF, path='/etc/nginx/conf.d/ssl.conf')) + conf_tree = NginxConfTree([conf1, conf2]) + + broker = { + NginxConfTree: conf_tree + } + result = nginx_ssl_certificate_files(broker) + assert result == ['/a/b/c.rsa.crt', '/a/b/c.cecdsa.crt'] + + conf1 = _NginxConf(context_wrap(NGINX_CONF, path='/etc/nginx/nginx.conf')) + conf2 = _NginxConf(context_wrap(NGINX_SSL_CONF_MULTIPLE_SERVERS, path='/etc/nginx/conf.d/ssl.conf')) + conf_tree = NginxConfTree([conf1, conf2]) + + broker = { + NginxConfTree: conf_tree + } + result = nginx_ssl_certificate_files(broker) + assert result == ['/a/b/www.example.com.crt', '/a/b/www.example.com.cecdsa.crt', '/a/b/www.example.org.crt'] + + +def test_httpd_ssl_cert_exception(): conf1 = _HttpdConf(context_wrap(HTTPD_CONF, path='/etc/httpd/conf/httpd.conf')) conf2 = _HttpdConf(context_wrap(HTTPD_CONF_WITHOUT_SSL, path='/etc/httpd/conf.d/no_ssl.conf')) conf_tree = HttpdConfTree([conf1, conf2]) @@ -71,3 +138,14 @@ def test_exception(): with pytest.raises(SkipComponent): httpd_ssl_certificate_file(broker1) httpd_ssl_certificate_file(broker2) + + +def test_nginx_ssl_cert_exception(): + conf1 = _NginxConf(context_wrap(NGINX_CONF, path='/etc/nginx/nginx.conf')) + conf2 = _NginxConf(context_wrap(NGINX_CONF_WITHOUT_SSL, path='/etc/nginx/conf.d/no_ssl.conf')) + conf_tree = NginxConfTree([conf1, conf2]) + broker1 = { + NginxConfTree: conf_tree + } + with pytest.raises(SkipComponent): + nginx_ssl_certificate_files(broker1) From 09012e3b2ccf861bf0b426b61296bd951cce255d Mon Sep 17 00:00:00 2001 From: Alec Cohan <44471274+aleccohan@users.noreply.github.com> Date: Wed, 27 Oct 2021 13:59:53 -0400 Subject: [PATCH 581/892] Fix: Update verification code with an additional fix (#3266) * Update verification code with an additional fix Signed-off-by: Alec Cohan * linting Signed-off-by: Alec Cohan * Fix PR based on feedback Signed-off-by: Alec Cohan * Remove testing playbook Signed-off-by: Alec Cohan --- .../apps/ansible/playbook_verifier/__init__.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/insights/client/apps/ansible/playbook_verifier/__init__.py b/insights/client/apps/ansible/playbook_verifier/__init__.py index cb174b1b9..5d387bb54 100644 --- a/insights/client/apps/ansible/playbook_verifier/__init__.py +++ b/insights/client/apps/ansible/playbook_verifier/__init__.py @@ -45,6 +45,14 @@ def __str__(self): return self.message +def decodeSignature(encodedSignature): + try: + decodedSignature = base64.b64decode(encodedSignature) + return decodedSignature + except: + raise PlaybookVerificationError(message='VERIFICATION FAILED: Error Decoding Signature') + + def createSnippetHash(snippet): """ Function that creates and returns a hash of the snippet given to the function. @@ -101,8 +109,7 @@ def excludeDynamicElements(snippet): def executeVerification(snippet, encodedSignature): gpg = gnupg.GPG(gnupghome=constants.insights_core_lib_dir) snippetHash = createSnippetHash(snippet) - - decodedSignature = base64.b64decode(encodedSignature) + decodedSignature = decodeSignature(encodedSignature) # load public key getPublicKey(gpg) @@ -144,6 +151,8 @@ def verify(playbook, skipVerify=False): logger.info('Playbook Verification has started') if not skipVerify: + if not playbook: + raise PlaybookVerificationError(message="PLAYBOOK VERIFICATION FAILURE: Playbook is empty") for snippet in playbook: verified = verifyPlaybookSnippet(snippet) From 9331bb7e7f260ebc0039564d224b5ce357fc4579 Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Wed, 27 Oct 2021 14:21:05 -0500 Subject: [PATCH 582/892] Feat: Add spec filtering to context_wrap for unit tests (#3265) * Add capability to pass in a filter and automatically filter the data before building the Context object * This enables parser/combiner CI testing using filters Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/tests/__init__.py | 4 ++++ insights/tests/test_context_wrap.py | 32 +++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100644 insights/tests/test_context_wrap.py diff --git a/insights/tests/__init__.py b/insights/tests/__init__.py index 871b3a813..82b06bdcd 100644 --- a/insights/tests/__init__.py +++ b/insights/tests/__init__.py @@ -152,6 +152,7 @@ def context_wrap(lines, machine_id="machine_id", strip=True, split=True, + filtered_spec=None, **kwargs): if isinstance(lines, six.string_types): if strip: @@ -159,6 +160,9 @@ def context_wrap(lines, if split: lines = lines.splitlines() + if filtered_spec is not None and filtered_spec in filters.FILTERS: + lines = [l for l in lines if any([f in l for f in filters.FILTERS[filtered_spec]])] + return Context(content=lines, path=path, hostname=hostname, release=release, version=version.split("."), diff --git a/insights/tests/test_context_wrap.py b/insights/tests/test_context_wrap.py new file mode 100644 index 000000000..381391606 --- /dev/null +++ b/insights/tests/test_context_wrap.py @@ -0,0 +1,32 @@ +from insights.core.filters import add_filter +from insights.specs import Specs +from insights.tests import context_wrap, DEFAULT_RELEASE, DEFAULT_HOSTNAME + +DATA = """ +One +Two +Three +Four +""" +DATA_FILTERED = """ +Two +Four +""" + +add_filter(Specs.messages, ["Two", "Four", "Five"]) + + +def test_context_wrap_unfiltered(): + context = context_wrap(DATA) + assert context is not None + assert context.content == DATA.strip().splitlines() + assert context.release == DEFAULT_RELEASE + assert context.hostname == DEFAULT_HOSTNAME + assert context.version == ["-1", "-1"] + assert context.machine_id == "machine_id" + + +def test_context_wrap_filtered(): + context = context_wrap(DATA, filtered_spec=Specs.messages) + assert context is not None + assert context.content == DATA_FILTERED.strip().splitlines() From ed8dedc611c2fedf37ccb2a9ce57d81063c903ea Mon Sep 17 00:00:00 2001 From: Bob Fahr <20520336+bfahr@users.noreply.github.com> Date: Wed, 3 Nov 2021 11:08:54 -0500 Subject: [PATCH 583/892] Fix: Fix issue in client test due to spec change (#3275) * PR #3258 changed the certificates_enddate spec that caused this client test to fail * This updates the command wrapping so that the test will work Signed-off-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> --- insights/tests/client/collection_rules/test_map_components.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/insights/tests/client/collection_rules/test_map_components.py b/insights/tests/client/collection_rules/test_map_components.py index fbe1625b6..f30a54da1 100644 --- a/insights/tests/client/collection_rules/test_map_components.py +++ b/insights/tests/client/collection_rules/test_map_components.py @@ -214,13 +214,13 @@ def test_log_long_key(logger_warning): Verify the conversion table is logged with proper spacing, wrapping, and unconverted specs are not logged ''' - rm_conf = {'commands': ["/usr/bin/find /etc/origin/node /etc/origin/master /etc/pki /etc/ipa -type f -exec /usr/bin/openssl x509 -noout -enddate -in '{}' \\; -exec echo 'FileName= {}' \\;", + rm_conf = {'commands': ["/usr/bin/find /etc/origin/node /etc/origin/master /etc/pki /etc/ipa /etc/tower/tower.cert -type f -exec /usr/bin/openssl x509 -noout -enddate -in '{}' \\; -exec echo 'FileName= {}' \\;", "/usr/bin/md5sum /etc/pki/product/69.pem"], 'files': ["/etc/sysconfig/virt-who", "/etc/yum.repos.d/fedora-cisco-openh264.repo", "krb5_conf_d"]} map_rm_conf_to_components(rm_conf, uploader_json) - logger_warning.assert_any_call("- /usr/bin/find /etc/origin/node => certificates_enddate\n /etc/origin/master /etc/pki /etc/ipa -type f\n -exec /usr/bin/openssl x509 -noout -enddate -in\n '{}' \\; -exec echo 'FileName= {}' \\;") + logger_warning.assert_any_call("- /usr/bin/find /etc/origin/node => certificates_enddate\n /etc/origin/master /etc/pki /etc/ipa\n /etc/tower/tower.cert -type f -exec\n /usr/bin/openssl x509 -noout -enddate -in '{}'\n \\; -exec echo 'FileName= {}' \\;") logger_warning.assert_any_call("- /usr/bin/md5sum /etc/pki/product/69.pem => md5chk_files") logger_warning.assert_any_call("- /etc/sysconfig/virt-who => sysconfig_virt_who") logger_warning.assert_any_call("- krb5_conf_d => krb5") From 7cc7c5cd9cbe2cc1b3dffe5833a14bd5a818d81d Mon Sep 17 00:00:00 2001 From: Ryan Blakley Date: Wed, 3 Nov 2021 17:15:37 -0400 Subject: [PATCH 584/892] Fix: Add raise SkipException to ConfigCombiner for missing main_file (#3277) * Moved find_main into the ConfigCombiner class since it's not used anywhere else, and find_matches is already in the class. * Added a raise SkipException to find_main, so if no files are returned in the loop it doesn't return None. * Fixes #3276 Signed-off-by: Ryan Blakley --- insights/core/__init__.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/insights/core/__init__.py b/insights/core/__init__.py index 001eb5a18..3b47c5561 100644 --- a/insights/core/__init__.py +++ b/insights/core/__init__.py @@ -129,12 +129,6 @@ def default_parser_deserializer(_type, data): return obj -def find_main(confs, name): - for c in confs: - if c.file_name == name: - return c - - def flatten(docs, pred): """ Replace include nodes with their config trees. Allows the same files to be @@ -370,7 +364,7 @@ class ConfigCombiner(ConfigComponent): """ def __init__(self, confs, main_file, include_finder): self.confs = confs - self.main = find_main(confs, main_file) + self.main = self.find_main(main_file) server_root = self.conf_path # Set the children of all include directives to the contents of the @@ -391,6 +385,13 @@ def find_matches(self, confs, pattern): results = [c for c in confs if fnmatch(c.file_path, pattern)] return sorted(results, key=operator.attrgetter("file_name")) + def find_main(self, name): + for c in self.confs: + if c.file_name == name: + return c + + raise SkipException("The main conf {main_conf} doesn't exist.".format(main_conf=name)) + class LegacyItemAccess(object): """ From 5c7cb4130244bf6ad27115dd29043b12b2bffeef Mon Sep 17 00:00:00 2001 From: Jakub Svoboda Date: Wed, 3 Nov 2021 21:16:20 +0000 Subject: [PATCH 585/892] Feat: Spec & parser for nss-rhel7.config (#3269) Signed-off-by: Jakub Svoboda --- docs/shared_parsers_catalog/nss_rhel7.rst | 3 ++ insights/parsers/nss_rhel7.py | 41 +++++++++++++++++++++++ insights/parsers/tests/test_nss_rhel7.py | 40 ++++++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 86 insertions(+) create mode 100644 docs/shared_parsers_catalog/nss_rhel7.rst create mode 100644 insights/parsers/nss_rhel7.py create mode 100644 insights/parsers/tests/test_nss_rhel7.py diff --git a/docs/shared_parsers_catalog/nss_rhel7.rst b/docs/shared_parsers_catalog/nss_rhel7.rst new file mode 100644 index 000000000..04d179942 --- /dev/null +++ b/docs/shared_parsers_catalog/nss_rhel7.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.nss_rhel7 + :members: + :show-inheritance: diff --git a/insights/parsers/nss_rhel7.py b/insights/parsers/nss_rhel7.py new file mode 100644 index 000000000..fc862fa19 --- /dev/null +++ b/insights/parsers/nss_rhel7.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +""" +NssRhel7 - file ``/etc/pki/nss-legacy/nss-rhel7.config`` +======================================================== +""" + +from insights import parser, SysconfigOptions +from insights.specs import Specs + + +@parser(Specs.nss_rhel7) +class NssRhel7(SysconfigOptions): + """ + This parser reads the ``/etc/pki/nss-legacy/nss-rhel7.config`` + file. It uses the ``SysconfigOptions`` parser class to convert the file into + a dictionary of options. It also provides the ``config`` property as a helper + to retrieve the ``config`` variable. + + Attributes: + config (union[str, None]): The value of the ``config`` variable if it exists, else None. + + Sample Input:: + + # To re-enable legacy algorithms, edit this file + # Note that the last empty line in this file must be preserved + library= + name=Policy + NSS=flags=policyOnly,moduleDB + config="disallow=MD5:RC4 allow=DH-MIN=1023:DSA-MIN=1023:RSA-MIN=1023:TLS-VERSION-MIN=tls1.0" + + + Examples: + >>> 'config' in nss_rhel7 + True + >>> nss_rhel7.config + 'disallow=MD5:RC4 allow=DH-MIN=1023:DSA-MIN=1023:RSA-MIN=1023:TLS-VERSION-MIN=tls1.0' + """ + + @property + def config(self): + return self.data.get("config", None) diff --git a/insights/parsers/tests/test_nss_rhel7.py b/insights/parsers/tests/test_nss_rhel7.py new file mode 100644 index 000000000..a67937cde --- /dev/null +++ b/insights/parsers/tests/test_nss_rhel7.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- + +import doctest + +from insights.parsers import nss_rhel7 +from insights.parsers.nss_rhel7 import NssRhel7 +from insights.tests import context_wrap + +NSS_RHEL7 = """ +# To re-enable legacy algorithms, edit this file +# Note that the last empty line in this file must be preserved +library= +name=Policy +NSS=flags=policyOnly,moduleDB +config="disallow=MD5:RC4 allow=DH-MIN=1023:DSA-MIN=1023:RSA-MIN=1023:TLS-VERSION-MIN=tls1.0" +""" + + +def test_nss_rhel7(): + nss_rhel7 = NssRhel7(context_wrap(NSS_RHEL7)) + assert "config" in nss_rhel7 + assert "asdf" not in nss_rhel7 + assert ( + nss_rhel7.config == "disallow=MD5:RC4 allow=DH-MIN=1023:DSA-MIN=1023:RSA-MIN=1023:TLS-VERSION-MIN=tls1.0" + ) + assert ( + nss_rhel7["config"] == "disallow=MD5:RC4 allow=DH-MIN=1023:DSA-MIN=1023:RSA-MIN=1023:TLS-VERSION-MIN=tls1.0" + ) + assert ( + nss_rhel7.get("config") == "disallow=MD5:RC4 allow=DH-MIN=1023:DSA-MIN=1023:RSA-MIN=1023:TLS-VERSION-MIN=tls1.0" + ) + assert nss_rhel7.get("asdf") is None + + +def test_doc_examples(): + env = { + "nss_rhel7": NssRhel7(context_wrap(NSS_RHEL7)), + } + failed, total = doctest.testmod(nss_rhel7, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index ea52f00d4..de3dc872e 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -422,6 +422,7 @@ class Specs(SpecSet): nova_uid = RegistryPoint() nova_migration_uid = RegistryPoint() nscd_conf = RegistryPoint(filterable=True) + nss_rhel7 = RegistryPoint() nsswitch_conf = RegistryPoint(filterable=True) ntp_conf = RegistryPoint() ntpq_leap = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 089bd2c10..100e258d8 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -481,6 +481,7 @@ def md5chk_file_list(broker): nova_crontab = simple_command("/usr/bin/crontab -l -u nova") nova_uid = simple_command("/usr/bin/id -u nova") nscd_conf = simple_file("/etc/nscd.conf") + nss_rhel7 = simple_file("/etc/pki/nss-legacy/nss-rhel7.config") nsswitch_conf = simple_file("/etc/nsswitch.conf") ntp_conf = simple_file("/etc/ntp.conf") ntpq_leap = simple_command("/usr/sbin/ntpq -c 'rv 0 leap'") From 3af9334c6fe0a58843a44a4a5b04073e5a80ae03 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Thu, 4 Nov 2021 05:17:56 +0800 Subject: [PATCH 586/892] fix: check 'tab' in lines of ntp.conf (#3272) Signed-off-by: Xiangce Liu --- insights/parsers/system_time.py | 2 +- insights/parsers/tests/test_system_time.py | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/insights/parsers/system_time.py b/insights/parsers/system_time.py index 1f4c0bc0b..389098b3d 100644 --- a/insights/parsers/system_time.py +++ b/insights/parsers/system_time.py @@ -66,7 +66,7 @@ class NTPConfParser(Parser): def parse_content(self, content): data = {} for line in get_active_lines(content): - if ' ' in line: + if ' ' in line or '\t' in line: k, rest = line.split(None, 1) if k in data: data[k].append(rest) diff --git a/insights/parsers/tests/test_system_time.py b/insights/parsers/tests/test_system_time.py index 7d112cea2..4b53ab003 100644 --- a/insights/parsers/tests/test_system_time.py +++ b/insights/parsers/tests/test_system_time.py @@ -146,6 +146,17 @@ tinker step 0.4 """ +NTP_CONF_TAB = """ +server\tntp1.inta.ok\tprefer +server\tntp2.inta.ok +server\tntp3.inta.ok + +driftfile /var/lib/ntp/drift +logfile /var/lib/ntp/log +# Set tinker panic value (0 recommended by redhat) +tinker panic 0 +""" + def test_chrony_conf(): ntp_obj = system_time.ChronyConf(context_wrap(CHRONY_CONF)) @@ -354,3 +365,13 @@ def test_ntp_get_tinker(): assert ntp_obj.get_last('tinker', 'step') == '0.4' assert ntp_obj.get_last('tinker', param='step', default='1') == '0.4' # Value from config assert ntp_obj.get_last('tinker', 'step', '1') == '0.4' + + +def test_ntp_conf_tab(): + ntp_obj = system_time.NTPConf(context_wrap(NTP_CONF_TAB)) + assert hasattr(ntp_obj, 'data') + assert 'tinker' in ntp_obj.data + assert hasattr(ntp_obj, 'servers') + assert ntp_obj.servers[0] == 'ntp1.inta.ok\tprefer' + assert ntp_obj.servers[1] == 'ntp2.inta.ok' + assert ntp_obj.servers[2] == 'ntp3.inta.ok' From cbc1731f6444ec0ea9d7e584bf1eaafa6dbb5146 Mon Sep 17 00:00:00 2001 From: Jakub Svoboda Date: Wed, 3 Nov 2021 21:23:34 +0000 Subject: [PATCH 587/892] Feat: Spec & parser for 389-ds TLS-related settings. (#3264) Signed-off-by: Jakub Svoboda --- .../dse_ldif_simple.rst | 3 + insights/parsers/dse_ldif_simple.py | 85 ++++++++ .../parsers/tests/test_dse_ldif_simple.py | 197 ++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 287 insertions(+) create mode 100644 docs/shared_parsers_catalog/dse_ldif_simple.rst create mode 100644 insights/parsers/dse_ldif_simple.py create mode 100644 insights/parsers/tests/test_dse_ldif_simple.py diff --git a/docs/shared_parsers_catalog/dse_ldif_simple.rst b/docs/shared_parsers_catalog/dse_ldif_simple.rst new file mode 100644 index 000000000..5251dd929 --- /dev/null +++ b/docs/shared_parsers_catalog/dse_ldif_simple.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.dse_ldif_simple + :members: + :show-inheritance: diff --git a/insights/parsers/dse_ldif_simple.py b/insights/parsers/dse_ldif_simple.py new file mode 100644 index 000000000..7633fcea4 --- /dev/null +++ b/insights/parsers/dse_ldif_simple.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +""" +DseLdifSimple - file ``/etc/dirsrv/*/dse.ldif`` +=============================================== +""" + + +from insights import Parser, parser +from insights.specs import Specs + + +@parser(Specs.dse_ldif) +class DseLdifSimple(Parser, dict): + """ + Parse the attributes out of the filtered lines of the dse.ldif file into + dict[attribute name] = attribute value. + + Please note the difference between the LDIF format and this parser. + + The file dse.ldif is in the LDIF format (see ``man 5 ldif``). LDIF contains + multi-row records where each record is identified by a ``dn:`` line ("dn" + as in "distinguished name") and the record's other lines are attributes. + Attributes may also have base64-encoded values, multiline values, and + file-stored values. + + The parser processes lines independently without tracking what the line is + and which record it belongs to. Only plaintext single-line values are + supported. + + This allows for filterable, efficient, and privacy-preserving processing of + attributes whose names are valid only in a single distinguished name, and + whose values are single-line plaintext only. + + Sample input data:: + + dn: cn=config + nsslapd-securePort: 636 + nsslapd-security: on + + dn: cn=encryption,cn=config + sslVersionMin: SSLv3 + sslVersionMax: TLS1.1 + nsSSL3: on + + Examples: + >>> type(dse_ldif_simple) + + >>> dse_ldif_simple["nsslapd-security"] + 'on' + >>> "sslVersionMin" in dse_ldif_simple + True + >>> dict(dse_ldif_simple)["nsSSL3"] + 'on' + + """ + + def parse_content(self, content): + data = {} + for line in content: + if line.startswith("#"): + # lines beginning with # are ignored + continue + if ":" not in line: + # only attribute: value lines supported + continue + if line.startswith(" "): + # multi-line values not supported + continue + attr_name, attr_value = line.split(":", 1) + if attr_value.startswith(":"): + # base64-encrypted values not supported + continue + if attr_value.startswith("<"): + # file-backed values not supported + continue + + # Whitespace at either side of the value has no effect. + attr_value = attr_value.strip() + + # If the same attribute is declared multiple times, the first + # instance takes into effect, the rest is ignored by the 386 + # Directory Server. + if attr_name not in data: + data[attr_name] = attr_value + self.update(data) diff --git a/insights/parsers/tests/test_dse_ldif_simple.py b/insights/parsers/tests/test_dse_ldif_simple.py new file mode 100644 index 000000000..bb8cff834 --- /dev/null +++ b/insights/parsers/tests/test_dse_ldif_simple.py @@ -0,0 +1,197 @@ +# -*- coding: utf-8 -*- + +import doctest + +from insights import add_filter +from insights.parsers import dse_ldif_simple +from insights.parsers.dse_ldif_simple import DseLdifSimple +from insights.specs import Specs +from insights.tests import context_wrap + +add_filter( + Specs.dse_ldif, [ + "nsslapd-security", + "sslVersionMin", + "sslVersionMax", + "nsSSL3", + "cn: config", # Note that this can serve as a canary for knowing whether the spec is collected. + ] +) + +DSE_LDIF_REAL_EXAMPLE = """ + +dn: cn=config +cn: config +objectClass: top +objectClass: extensibleObject +objectClass: nsslapdConfig +nsslapd-schemadir: /etc/dirsrv/slapd-dir/schema +nsslapd-lockdir: /var/lock/dirsrv/slapd-dir +nsslapd-tmpdir: /tmp +nsslapd-certdir: /etc/dirsrv/slapd-dir +nsslapd-ldifdir: /var/lib/dirsrv/slapd-dir/ldif +nsslapd-bakdir: /var/lib/dirsrv/slapd-dir/bak +nsslapd-rundir: /var/run/dirsrv +nsslapd-instancedir: /usr/lib64/dirsrv/slapd-dir +nsslapd-accesslog: /var/log/dirsrv/slapd-dir/access +nsslapd-localhost: testinstance.local +nsslapd-port: 389 +nsslapd-localuser: dirsrv +nsslapd-errorlog: /var/log/dirsrv/slapd-dir/errors +nsslapd-auditlog: /var/log/dirsrv/slapd-dir/audit +nsslapd-auditfaillog: /var/log/dirsrv/slapd-dir/audit +nsslapd-rootdn: cn=Directory Manager +nsslapd-ldapifilepath: /var/run/slapd-dir.socket +nsslapd-ldapilisten: off +nsslapd-ldapiautobind: off +nsslapd-ldapimaprootdn: cn=Directory Manager +nsslapd-ldapimaptoentries: off +nsslapd-ldapiuidnumbertype: uidNumber +nsslapd-ldapigidnumbertype: gidNumber +nsslapd-ldapientrysearchbase: dc=testinstance,dc=local +nsslapd-defaultnamingcontext: dc=testinstance,dc=local +aci: (targetattr="*")(version 3.0; acl "Configuration Administrators Group"; a + llow (all) groupdn="ldap:///cn=Configuration Administrators,ou=Groups,ou=Topo + logyManagement,o=NetscapeRoot";) +aci: (targetattr="*")(version 3.0; acl "Configuration Administrator"; allow (a + ll) userdn="ldap:///uid=admin,ou=Administrators,ou=TopologyManagement,o=Netsc + apeRoot";) +aci: (targetattr = "*")(version 3.0; acl "SIE Group"; allow (all) groupdn = "l + dap:///cn=slapd-dir,cn=Red Hat Directory Server,cn=Server Group,cn=testinstan + ce.local,ou=testinstance.local,o=NetscapeRoot";) +modifiersName: cn=directory manager +modifyTimestamp: 20211015231914Z +nsslapd-securePort: 636 +nsslapd-security: on +nsslapd-rootpw: {SSHA512}la6KbVGTR3XKHN6CoZ/PcYvOrS7qGAVSC9kjvGtyuPzSJGbHXReTs + FUBF6QnP0jHAONEx4784x6PNPcMzTOdpoJw0gOQkXKM +numSubordinates: 10 + + +dn: cn=monitor +objectClass: top +objectClass: extensibleObject +cn: monitor +aci: (target ="ldap:///cn=monitor*")(targetattr != "aci || connection")(versio + n 3.0; acl "monitor"; allow( read, search, compare ) userdn = "ldap:///anyone + ";) +creatorsName: cn=server,cn=plugins,cn=config +modifiersName: cn=server,cn=plugins,cn=config +createTimestamp: 20211015215626Z +modifyTimestamp: 20211015215626Z +numSubordinates: 3 + +dn: cn=encryption,cn=config +objectClass: top +objectClass: nsEncryptionConfig +cn: encryption +nsSSLSessionTimeout: 0 +nsSSLClientAuth: allowed +sslVersionMin: TLS1.0 +sslVersionMax: TLS1.1 +nsSSL3: on +nsTLS1: on +allowWeakCipher: on +nsSSL3Ciphers: +all +creatorsName: cn=server,cn=plugins,cn=config +modifiersName: cn=server,cn=plugins,cn=config +createTimestamp: 20211015215626Z +modifyTimestamp: 20211015231142Z +CACertExtractFile: /etc/dirsrv/slapd-dir/server-cert.pem +numSubordinates: 1 + +dn: cn=features,cn=config +objectClass: top +objectClass: nsContainer +cn: features +numSubordinates: 5 + +""" + +DSE_LDIF_SMOKE = """ +cn: config +nsslapd-security: on +sslVersionMin: TLS1.0 +sslVersionMax: TLS1.1 +nsSSL3: on +# a comment : with a colon +""" + +# This is purely for coverage testing. Real deployments don't use these features +# for these attributes. +DSE_LDIF_COVERAGE = """ +# a comment +nsslapd-security: o + n +sslVersionMax:: VExTMS4yIA== +sslVersionMin:< file:///tmp/somefile +nsSSL3: on +""" + +DSE_LDIF_DOCTEST = """ +dn: cn=config +nsslapd-securePort: 636 +nsslapd-security: on + +dn: cn=encryption,cn=config +sslVersionMin: SSLv3 +sslVersionMax: TLS1.1 +nsSSL3: on +""" + + +def test_dse_ldif_smoke(): + dse_ldif_simple = DseLdifSimple(context_wrap(DSE_LDIF_SMOKE)) + assert None is dse_ldif_simple.get("asdf") + assert len(dse_ldif_simple) == 5 + expected = { + "cn": "config", # just a canary to detect spec collection status + "nsslapd-security": "on", + "sslVersionMin": "TLS1.0", + "sslVersionMax": "TLS1.1", + "nsSSL3": "on", + } + assert dict(dse_ldif_simple) == expected + assert dse_ldif_simple["nsslapd-security"] == "on" + assert "sslVersionMin" in dse_ldif_simple + for k in dse_ldif_simple: + assert expected[k] == dse_ldif_simple[k] + assert expected[k] == dse_ldif_simple.get(k) + + +def test_dse_ldif_coverage(): + dse_ldif_simple = DseLdifSimple(context_wrap(DSE_LDIF_COVERAGE)) + assert "sslVersionMin" not in dse_ldif_simple + assert "sslVersionMax" not in dse_ldif_simple + assert "nsSSL3" in dse_ldif_simple + assert len(dse_ldif_simple) == 2 + expected = { + "nsSSL3": "on", + # This doesn't happen in real deployments because 389-ds automatically + # reformats it back to a single line. + "nsslapd-security": "o", + } + assert dict(dse_ldif_simple) == expected + + +def test_dse_ldif_filtered(): + dse_ldif_simple = DseLdifSimple(context_wrap(DSE_LDIF_REAL_EXAMPLE, filtered_spec=Specs.dse_ldif)) + assert dse_ldif_simple["nsslapd-security"] == "on" + assert len(dse_ldif_simple) == 6 + expected = { + "cn": "config", # just a canary to detect spec collection status + "nsslapd-security": "on", + "sslVersionMin": "TLS1.0", + "sslVersionMax": "TLS1.1", + "nsSSL3": "on", + "nsSSL3Ciphers": "+all", + } + assert dict(dse_ldif_simple) == expected + + +def test_doc_examples(): + env = { + "dse_ldif_simple": DseLdifSimple(context_wrap(DSE_LDIF_DOCTEST)), + } + failed, total = doctest.testmod(dse_ldif_simple, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index de3dc872e..734b6e303 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -139,6 +139,7 @@ class Specs(SpecSet): dotnet_version = RegistryPoint() doveconf = RegistryPoint(filterable=True) dracut_kdump_capture_service = RegistryPoint() + dse_ldif = RegistryPoint(multi_output=True, filterable=True) du_dirs = RegistryPoint(multi_output=True) dumpe2fs_h = RegistryPoint(multi_output=True) engine_config_all = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 100e258d8..3be17f875 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -82,6 +82,7 @@ class DefaultSpecs(Specs): abrt_status_bare = simple_command("/usr/bin/abrt status --bare=True") alternatives_display_python = simple_command("/usr/sbin/alternatives --display python") amq_broker = glob_file("/var/opt/amq-broker/*/etc/broker.xml") + dse_ldif = glob_file("/etc/dirsrv/*/dse.ldif") auditctl_status = simple_command("/sbin/auditctl -s") auditd_conf = simple_file("/etc/audit/auditd.conf") audit_log = simple_file("/var/log/audit/audit.log") From 5e65b34381327465b8d3538e10f67ef246b3237e Mon Sep 17 00:00:00 2001 From: Marley Stipich <87209745+marleystipich2@users.noreply.github.com> Date: Wed, 3 Nov 2021 16:36:12 -0500 Subject: [PATCH 588/892] Fix: RHICOMPL-1980 Adding the 'relationships' API attribute to the client profiles API call. (#3241) Signed-off-by: Marley Stipich --- insights/client/apps/compliance/__init__.py | 2 +- insights/tests/client/apps/test_compliance.py | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/insights/client/apps/compliance/__init__.py b/insights/client/apps/compliance/__init__.py index 8a4e21545..a1b2fba99 100644 --- a/insights/client/apps/compliance/__init__.py +++ b/insights/client/apps/compliance/__init__.py @@ -93,7 +93,7 @@ def download_tailoring_file(self, profile): def get_profiles(self, search): response = self.conn.session.get("https://{0}/compliance/profiles".format(self.config.base_url), - params={'search': search}) + params={'search': search, 'relationships': 'false'}) logger.debug("Content of the response: {0} - {1}".format(response, response.json())) if response.status_code == 200: diff --git a/insights/tests/client/apps/test_compliance.py b/insights/tests/client/apps/test_compliance.py index 3820d1c96..061814c93 100644 --- a/insights/tests/client/apps/test_compliance.py +++ b/insights/tests/client/apps/test_compliance.py @@ -101,7 +101,7 @@ def test_get_profiles(config): compliance_client.inventory_id = '068040f1-08c8-43e4-949f-7d6470e9111c' compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': [{'attributes': 'data'}]}))) assert compliance_client.get_profiles('search string') == [{'attributes': 'data'}] - compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'search string'}) + compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'search string', 'relationships': 'false'}) @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) @@ -110,7 +110,7 @@ def test_get_profiles_no_profiles(config): compliance_client.inventory_id = '068040f1-08c8-43e4-949f-7d6470e9111c' compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': []}))) assert compliance_client.get_profiles('search string') == [] - compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'search string'}) + compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'search string', 'relationships': 'false'}) @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) @@ -119,7 +119,7 @@ def test_get_profiles_error(config): compliance_client.inventory_id = '068040f1-08c8-43e4-949f-7d6470e9111c' compliance_client.conn.session.get = Mock(return_value=Mock(status_code=500)) assert compliance_client.get_profiles('search string') == [] - compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'search string'}) + compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'search string', 'relationships': 'false'}) @patch("insights.client.config.InsightsConfig", base_url='localhost/app', systemid='', proxy=None) @@ -128,7 +128,7 @@ def test_get_initial_profiles(config): compliance_client.inventory_id = '068040f1-08c8-43e4-949f-7d6470e9111c' compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': [{'attributes': 'data'}]}))) assert compliance_client.get_initial_profiles() == [{'attributes': 'data'}] - compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_ids=068040f1-08c8-43e4-949f-7d6470e9111c canonical=false external=false'}) + compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_ids=068040f1-08c8-43e4-949f-7d6470e9111c canonical=false external=false', 'relationships': 'false'}) @patch("insights.client.apps.compliance.os_release_info", return_value=(None, '6.5')) @@ -138,7 +138,7 @@ def test_get_profiles_matching_os(config, os_release_info_mock): compliance_client.inventory_id = '068040f1-08c8-43e4-949f-7d6470e9111c' compliance_client.conn.session.get = Mock(return_value=Mock(status_code=200, json=Mock(return_value={'data': [{'attributes': 'data'}]}))) assert compliance_client.get_profiles_matching_os() == [{'attributes': 'data'}] - compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_ids=068040f1-08c8-43e4-949f-7d6470e9111c canonical=false os_minor_version=5'}) + compliance_client.conn.session.get.assert_called_with('https://localhost/app/compliance/profiles', params={'search': 'system_ids=068040f1-08c8-43e4-949f-7d6470e9111c canonical=false os_minor_version=5', 'relationships': 'false'}) @patch("insights.client.apps.compliance.os_release_info", return_value=(None, '6.5')) From 07a4f58859329d4adb4acb3d1eebae4a55a8285b Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Mon, 8 Nov 2021 15:32:11 +0800 Subject: [PATCH 589/892] Get all SSL certificates for httpd incase different expired date used (#3270) Signed-off-by: Huanhuan Li --- docs/custom_datasources_index.rst | 2 +- insights/combiners/ssl_certificate.py | 22 ++++++++- .../combiners/tests/test_ssl_certificate.py | 27 ++++++++++- insights/specs/__init__.py | 2 +- insights/specs/datasources/ssl_certificate.py | 8 ++-- insights/specs/default.py | 2 +- .../tests/datasources/test_ssl_certificate.py | 47 +++++++++++++++++-- 7 files changed, 96 insertions(+), 14 deletions(-) diff --git a/docs/custom_datasources_index.rst b/docs/custom_datasources_index.rst index 5703a75ba..6f2cf47aa 100644 --- a/docs/custom_datasources_index.rst +++ b/docs/custom_datasources_index.rst @@ -79,7 +79,7 @@ insights.specs.datasources.ssl_certificate ------------------------------------------ .. automodule:: insights.specs.datasources.ssl_certificate - :members: httpd_ssl_certificate_file, nginx_ssl_certificate_files + :members: httpd_ssl_certificate_files, nginx_ssl_certificate_files :show-inheritance: :undoc-members: diff --git a/insights/combiners/ssl_certificate.py b/insights/combiners/ssl_certificate.py index 00d565b07..da825328b 100644 --- a/insights/combiners/ssl_certificate.py +++ b/insights/combiners/ssl_certificate.py @@ -7,10 +7,14 @@ EarliestNginxSSLCertExpireDate - The earliest expire date in a lot of nginx ssl certificates -------------------------------------------------------------------------------------------- Combiner to get the earliest expire date in a lot of nginx ssl certificates. + +EarliestHttpdSSLCertExpireDate - The earliest expire date in a lot of httpd ssl certificates +-------------------------------------------------------------------------------------------- +Combiner to get the earliest expire date in a lot of httpd ssl certificates. """ from insights.core.dr import SkipComponent -from insights.parsers.ssl_certificate import NginxSSLCertExpireDate +from insights.parsers.ssl_certificate import NginxSSLCertExpireDate, HttpdSSLCertExpireDate from insights.parsers.certificates_enddate import CertificatesEnddate from insights.core.plugins import combiner @@ -58,3 +62,19 @@ class EarliestNginxSSLCertExpireDate(EarliestSSLCertExpireDate): '/test/d.pem' """ pass + + +@combiner(HttpdSSLCertExpireDate) +class EarliestHttpdSSLCertExpireDate(EarliestSSLCertExpireDate): + """ + Combiner to get the earliest expire date in a lot of httpd ssl certificates. + + Examples: + >>> type(httpd_certs) + + >>> httpd_certs.earliest_expire_date.str + 'Dec 18 07:02:43 2021' + >>> httpd_certs.ssl_cert_path + '/test/d.pem' + """ + pass diff --git a/insights/combiners/tests/test_ssl_certificate.py b/insights/combiners/tests/test_ssl_certificate.py index fb4e9af71..4fc7c3a26 100644 --- a/insights/combiners/tests/test_ssl_certificate.py +++ b/insights/combiners/tests/test_ssl_certificate.py @@ -23,6 +23,14 @@ notAfter=Dec 18 07:02:43 2021 GMT ''' +HTTPD_CERT_EXPIRE_INFO_1 = ''' +notAfter=Jan 18 07:02:43 2038 GMT +''' + +HTTPD_CERT_EXPIRE_INFO_2 = ''' +notAfter=Dec 18 07:02:43 2021 GMT +''' + def test_earliest_ssl_expire_date(): date_info1 = CertificateInfo(context_wrap(COMMON_SSL_CERT_INFO1, args='/test/a.pem')) @@ -44,9 +52,13 @@ def test_doc(): date_info1 = ssl_certificate.NginxSSLCertExpireDate(context_wrap(NGINX_CERT_EXPIRE_INFO_1, args='/test/c.pem')) date_info2 = ssl_certificate.NginxSSLCertExpireDate(context_wrap(NGINX_CERT_EXPIRE_INFO_2, args='/test/d.pem')) nginx_certs = ssl_certificate.EarliestNginxSSLCertExpireDate([date_info1, date_info2]) + date_info1 = ssl_certificate.HttpdSSLCertExpireDate(context_wrap(HTTPD_CERT_EXPIRE_INFO_1, args='/test/c.pem')) + date_info2 = ssl_certificate.HttpdSSLCertExpireDate(context_wrap(HTTPD_CERT_EXPIRE_INFO_2, args='/test/d.pem')) + httpd_certs = ssl_certificate.EarliestHttpdSSLCertExpireDate([date_info1, date_info2]) globs = { 'ssl_certs': ssl_certs, - 'nginx_certs': nginx_certs + 'nginx_certs': nginx_certs, + 'httpd_certs': httpd_certs } failed, _ = doctest.testmod(ssl_certificate, globs=globs) assert failed == 0 @@ -63,3 +75,16 @@ def test_nginx_ssl_cert_combiner(): expiredate_obj = ssl_certificate.EarliestNginxSSLCertExpireDate([date_info1, date_info2]) assert expiredate_obj.earliest_expire_date.str == 'Dec 18 07:02:43 2021' assert expiredate_obj.ssl_cert_path == '/test/d.pem' + + +def test_httpd_ssl_cert_combiner(): + date_info = ssl_certificate.HttpdSSLCertExpireDate(context_wrap(HTTPD_CERT_EXPIRE_INFO_1, args='/test/c.pem')) + expiredate_obj = ssl_certificate.EarliestHttpdSSLCertExpireDate([date_info]) + assert expiredate_obj.earliest_expire_date.str == 'Jan 18 07:02:43 2038' + assert expiredate_obj.ssl_cert_path == '/test/c.pem' + + date_info1 = ssl_certificate.HttpdSSLCertExpireDate(context_wrap(HTTPD_CERT_EXPIRE_INFO_1, args='/test/c.pem')) + date_info2 = ssl_certificate.HttpdSSLCertExpireDate(context_wrap(HTTPD_CERT_EXPIRE_INFO_2, args='/test/d.pem')) + expiredate_obj = ssl_certificate.EarliestHttpdSSLCertExpireDate([date_info1, date_info2]) + assert expiredate_obj.earliest_expire_date.str == 'Dec 18 07:02:43 2021' + assert expiredate_obj.ssl_cert_path == '/test/d.pem' diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 734b6e303..46d965a83 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -234,7 +234,7 @@ class Specs(SpecSet): httpd_M = RegistryPoint(multi_output=True) httpd_on_nfs = RegistryPoint() httpd_ssl_access_log = RegistryPoint(filterable=True) - httpd_ssl_cert_enddate = RegistryPoint() + httpd_ssl_cert_enddate = RegistryPoint(multi_output=True) httpd_ssl_error_log = RegistryPoint(filterable=True) httpd_V = RegistryPoint(multi_output=True) virt_uuid_facts = RegistryPoint() diff --git a/insights/specs/datasources/ssl_certificate.py b/insights/specs/datasources/ssl_certificate.py index 4f6dfa5ea..00e8dff3a 100644 --- a/insights/specs/datasources/ssl_certificate.py +++ b/insights/specs/datasources/ssl_certificate.py @@ -10,7 +10,7 @@ @datasource(HttpdConfTree, HostContext) -def httpd_ssl_certificate_file(broker): +def httpd_ssl_certificate_files(broker): """ Get the httpd SSL certificate file path configured by "SSLCertificateFile" @@ -24,9 +24,9 @@ def httpd_ssl_certificate_file(broker): SkipComponent: Raised if "SSLCertificateFile" directive isn't found """ conf = broker[HttpdConfTree] - ssl_cert = conf.find('SSLCertificateFile') - if ssl_cert and ssl_cert[0].value: - return str(ssl_cert[0].value) + ssl_certs = conf.find('SSLCertificateFile') + if ssl_certs: + return [str(ssl_cert.value) for ssl_cert in ssl_certs] raise SkipComponent diff --git a/insights/specs/default.py b/insights/specs/default.py index 3be17f875..c37ded592 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -313,7 +313,7 @@ def httpd_cmd(broker): httpd_pid = simple_command("/usr/bin/pgrep -o httpd") httpd_limits = foreach_collect(httpd_pid, "/proc/%s/limits") httpd_M = foreach_execute(httpd_cmd, "%s -M") - httpd_ssl_cert_enddate = command_with_args('/usr/bin/openssl x509 -in %s -enddate -noout', ssl_certificate.httpd_ssl_certificate_file) + httpd_ssl_cert_enddate = foreach_execute(ssl_certificate.httpd_ssl_certificate_files, "/usr/bin/openssl x509 -in %s -enddate -noout") httpd_V = foreach_execute(httpd_cmd, "%s -V") ifcfg = glob_file("/etc/sysconfig/network-scripts/ifcfg-*") ifcfg_static_route = glob_file("/etc/sysconfig/network-scripts/route-*") diff --git a/insights/tests/datasources/test_ssl_certificate.py b/insights/tests/datasources/test_ssl_certificate.py index a65ff42d3..1c4c102a3 100644 --- a/insights/tests/datasources/test_ssl_certificate.py +++ b/insights/tests/datasources/test_ssl_certificate.py @@ -5,7 +5,7 @@ from insights.combiners.httpd_conf import _HttpdConf, HttpdConfTree from insights.combiners.nginx_conf import _NginxConf, NginxConfTree from insights.specs.datasources.ssl_certificate import ( - httpd_ssl_certificate_file, nginx_ssl_certificate_files + httpd_ssl_certificate_files, nginx_ssl_certificate_files ) @@ -29,6 +29,33 @@ """.strip() +HTTPD_SSL_CONF_2 = """ + + ## SSL directives + ServerName a.b.c.com + SSLEngine on + SSLCertificateFile "/etc/pki/katello/certs/katello-apache.crt" + SSLCertificateKeyFile "/etc/pki/katello/private/katello-apache.key" + SSLCertificateChainFile "/etc/pki/katello/certs/katello-server-ca.crt" + SSLVerifyClient optional + SSLCACertificateFile "/etc/pki/katello/certs/katello-default-ca.crt" + SSLVerifyDepth 3 + SSLOptions +StdEnvVars +ExportCertData + + + ## SSL directives + ServerName d.c.e.com + SSLEngine on + SSLCertificateFile "/etc/pki/katello/certs/katello-apache_d.crt" + SSLCertificateKeyFile "/etc/pki/katello/private/katello-apache_d.key" + SSLCertificateChainFile "/etc/pki/katello/certs/katello-server-ca_d.crt" + SSLVerifyClient optional + SSLCACertificateFile "/etc/pki/katello/certs/katello-default-ca_d.crt" + SSLVerifyDepth 3 + SSLOptions +StdEnvVars +ExportCertData + +""".strip() + HTTPD_CONF_WITHOUT_SSL = """ ServerName a.b.c.com @@ -96,8 +123,18 @@ def test_httpd_certificate(): broker = { HttpdConfTree: conf_tree } - result = httpd_ssl_certificate_file(broker) - assert result == '/etc/pki/katello/certs/katello-apache.crt' + result = httpd_ssl_certificate_files(broker) + assert result == ['/etc/pki/katello/certs/katello-apache.crt'] + + conf1 = _HttpdConf(context_wrap(HTTPD_CONF, path='/etc/httpd/conf/httpd.conf')) + conf2 = _HttpdConf(context_wrap(HTTPD_SSL_CONF_2, path='/etc/httpd/conf.d/ssl.conf')) + conf_tree = HttpdConfTree([conf1, conf2]) + + broker = { + HttpdConfTree: conf_tree + } + result = httpd_ssl_certificate_files(broker) + assert result == ['/etc/pki/katello/certs/katello-apache.crt', '/etc/pki/katello/certs/katello-apache_d.crt'] def test_nginx_certificate(): @@ -136,8 +173,8 @@ def test_httpd_ssl_cert_exception(): HttpdConfTree: conf_tree } with pytest.raises(SkipComponent): - httpd_ssl_certificate_file(broker1) - httpd_ssl_certificate_file(broker2) + httpd_ssl_certificate_files(broker1) + httpd_ssl_certificate_files(broker2) def test_nginx_ssl_cert_exception(): From 28580a8d573ed1593ccae3fae71c277df67ed011 Mon Sep 17 00:00:00 2001 From: Keith Grant Date: Tue, 9 Nov 2021 14:47:26 -0500 Subject: [PATCH 590/892] Fix: Strip progress messages from testparm output (#3273) * Strip progress messages from testparm output On some older releases of Samba, testparm -v -s contains progress messages such as `Processing section "[homes]"`. Because these messages contain section names, they aren't removed by filtering. This change removes them in the testparm parser. Signed-off-by: Keith Grant * Avoid double-testing every line in testparm output Instead of iterating through every line in testparm twice, only pass content from the server role line onward. Also replaced a curiously indented else with an explicit test of whether the server role line had been found or not. Signed-off-by: Keith Grant --- insights/parsers/samba.py | 12 +++++++++--- insights/parsers/tests/test_samba.py | 4 ++++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/insights/parsers/samba.py b/insights/parsers/samba.py index 6417fe46c..5d7cda458 100644 --- a/insights/parsers/samba.py +++ b/insights/parsers/samba.py @@ -136,15 +136,21 @@ class SambaConfigs(SambaConfig): """ def parse_content(self, content): # Parse server role - for line in content: + # The output of `testparm` sometimes includes progress lines such as + # `Processing section "[homes]"`. These lines are output before the server + # role definition, so only pass output from that line onward. + server_role_line = None + for i, line in enumerate(content): r = re.search(r"Server role:\s+(\S+)", line) if r: self.server_role = r.group(1) + server_role_line = i break - else: + + if server_role_line is None: raise ParseException("Server role not found.") - super(SambaConfigs, self).parse_content(content) + super(SambaConfigs, self).parse_content(content[server_role_line:]) @parser(Specs.testparm_v_s) diff --git a/insights/parsers/tests/test_samba.py b/insights/parsers/tests/test_samba.py index 210547557..7ffcd2ccc 100644 --- a/insights/parsers/tests/test_samba.py +++ b/insights/parsers/tests/test_samba.py @@ -197,8 +197,12 @@ def test_documentation(): # # Load smb config files from /etc/samba/smb.conf # Loaded services file OK. +# Because status messages include section names, they aren't +# removed by filtering. TESTPARM = """ +Processing section "[homes]" +Processing section "[printers]" Server role: ROLE_STANDALONE # Global parameters From 1049eb0e2a6a1d6dd3790d877ab301fe92fa75fb Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Wed, 10 Nov 2021 09:55:13 +0800 Subject: [PATCH 591/892] fix: Correct the order of satellite_custom_hiera in the list of specs (#3282) Signed-off-by: Huanhuan Li --- insights/specs/__init__.py | 2 +- insights/specs/default.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 46d965a83..5dd0405d3 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -584,12 +584,12 @@ class Specs(SpecSet): satellite_compute_resources = RegistryPoint() satellite_content_hosts_count = RegistryPoint() satellite_custom_ca_chain = RegistryPoint() + satellite_custom_hiera = RegistryPoint() satellite_mongodb_storage_engine = RegistryPoint() satellite_non_yum_type_repos = RegistryPoint() satellite_sca_status = RegistryPoint() satellite_settings = RegistryPoint() satellite_version_rb = RegistryPoint() - satellite_custom_hiera = RegistryPoint() scheduler = RegistryPoint(multi_output=True) sched_rt_runtime_us = RegistryPoint() scsi = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index c37ded592..099787eb2 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -621,6 +621,7 @@ def pmlog_summary_file(broker): satellite_custom_ca_chain = simple_command( '/usr/bin/awk \'BEGIN { pipe="openssl x509 -noout -subject -enddate"} /^-+BEGIN CERT/,/^-+END CERT/ { print | pipe } /^-+END CERT/ { close(pipe); printf("\\n")}\' /etc/pki/katello/certs/katello-server-ca.crt', ) + satellite_custom_hiera = simple_file("/etc/foreman-installer/custom-hiera.yaml") satellite_missed_pulp_agent_queues = satellite_missed_queues.satellite_missed_pulp_agent_queues satellite_mongodb_storage_engine = simple_command("/usr/bin/mongo pulp_database --eval 'db.serverStatus().storageEngine'") satellite_non_yum_type_repos = simple_command( @@ -636,7 +637,6 @@ def pmlog_summary_file(broker): deps=[SatelliteVersion] ) satellite_version_rb = simple_file("/usr/share/foreman/lib/satellite/version.rb") - satellite_custom_hiera = simple_file("/etc/foreman-installer/custom-hiera.yaml") scheduler = glob_file("/sys/block/*/queue/scheduler") scsi = simple_file("/proc/scsi/scsi") scsi_eh_deadline = glob_file('/sys/class/scsi_host/host[0-9]*/eh_deadline') From 3b9b52950f205e493126bcff07cc339f54079513 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Wed, 10 Nov 2021 22:31:09 +0800 Subject: [PATCH 592/892] chore: RHEL 8.5 is GA (#3285) - https://access.redhat.com/articles/3078 Signed-off-by: Xiangce Liu --- insights/parsers/uname.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/parsers/uname.py b/insights/parsers/uname.py index 13ad52862..c6454c5cb 100644 --- a/insights/parsers/uname.py +++ b/insights/parsers/uname.py @@ -112,6 +112,7 @@ "4.18.0-193": "8.2", "4.18.0-240": "8.3", "4.18.0-305": "8.4", + "4.18.0-348": "8.5", } release_to_kernel_map = dict((v, k) for k, v in rhel_release_map.items()) From f863c030006641c1d584fe01ba62e128015cba55 Mon Sep 17 00:00:00 2001 From: Rahul Srivastava <44598880+rasrivas-redhat@users.noreply.github.com> Date: Thu, 11 Nov 2021 08:52:55 +0530 Subject: [PATCH 593/892] Added spec for the getcert_list parser (#3274) Signed-off-by: Rahul --- insights/specs/default.py | 1 + insights/specs/insights_archive.py | 1 + 2 files changed, 2 insertions(+) diff --git a/insights/specs/default.py b/insights/specs/default.py index 099787eb2..0fd6fa2ee 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -243,6 +243,7 @@ def du_dirs_list(broker): fw_devices = simple_command("/bin/fwupdagent get-devices", deps=[IsBareMetal]) fw_security = simple_command("/bin/fwupdagent security --force", deps=[IsBareMetal]) galera_cnf = first_file(["/var/lib/config-data/puppet-generated/mysql/etc/my.cnf.d/galera.cnf", "/etc/my.cnf.d/galera.cnf"]) + getcert_list = simple_command("/usr/bin/getcert list") getconf_page_size = simple_command("/usr/bin/getconf PAGE_SIZE") getenforce = simple_command("/usr/sbin/getenforce") getsebool = simple_command("/usr/sbin/getsebool -a") diff --git a/insights/specs/insights_archive.py b/insights/specs/insights_archive.py index 45072a363..5c9efcb8c 100644 --- a/insights/specs/insights_archive.py +++ b/insights/specs/insights_archive.py @@ -75,6 +75,7 @@ class InsightsArchiveSpecs(Specs): fw_devices = simple_file("insights_commands/fwupdagent_get-devices") fw_security = simple_file("insights_commands/fwupdagent_security_--force") gcp_license_codes = simple_file("insights_commands/python_-m_insights.tools.cat_--no-header_gcp_license_codes") + getcert_list = simple_file("insights_commands/getcert_list") getconf_page_size = simple_file("insights_commands/getconf_PAGE_SIZE") getenforce = simple_file("insights_commands/getenforce") getsebool = simple_file("insights_commands/getsebool_-a") From dfc844e8f586b053ed7684eb71889d57c77b6bb8 Mon Sep 17 00:00:00 2001 From: Rahul Srivastava <44598880+rasrivas-redhat@users.noreply.github.com> Date: Thu, 11 Nov 2021 10:30:52 +0530 Subject: [PATCH 594/892] New parser ldap config (#3257) * New parser ldap config Signed-off-by: Rahul * added multi_output to the spec Signed-off-by: Rahul * Updated the doc string and use the list class Signed-off-by: Rahul * Fixed the flake8 error Signed-off-by: Rahul * lc Signed-off-by: Rahul * Updated to code to handle the multiple line attribute Signed-off-by: Rahul * Optimized the code by following the reviewer suggtions Signed-off-by: Rahul * Added test case to the doc string Signed-off-by: Rahul * Fixing the pytest error Signed-off-by: Rahul * Added line stripping check Signed-off-by: Rahul * updated the comment message Signed-off-by: Rahul * Updated the doc string to explain the dn block and updated the code to show complete dn attribute name Signed-off-by: Rahul * updated the data structure for the return and added a search method Signed-off-by: Rahul * fixed the pipeline fail error Signed-off-by: Rahul * fixed the pipeline error Signed-off-by: Rahul * fixed the pipeline error Signed-off-by: Rahul * updated the doc string example for the search method Signed-off-by: Rahul * updated the doc string Signed-off-by: Rahul * Added couple of test cases and optimized the code Signed-off-by: Rahul * Added doc test example and optimized the code Signed-off-by: Rahul --- docs/shared_parsers_catalog/ldif_config.rst | 3 + insights/parsers/ldif_config.py | 128 +++++++++++ insights/parsers/tests/test_ldif_config.py | 225 ++++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 358 insertions(+) create mode 100644 docs/shared_parsers_catalog/ldif_config.rst create mode 100644 insights/parsers/ldif_config.py create mode 100644 insights/parsers/tests/test_ldif_config.py diff --git a/docs/shared_parsers_catalog/ldif_config.rst b/docs/shared_parsers_catalog/ldif_config.rst new file mode 100644 index 000000000..8fe84b92e --- /dev/null +++ b/docs/shared_parsers_catalog/ldif_config.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ldif_config + :members: + :show-inheritance: diff --git a/insights/parsers/ldif_config.py b/insights/parsers/ldif_config.py new file mode 100644 index 000000000..7ee0a137d --- /dev/null +++ b/insights/parsers/ldif_config.py @@ -0,0 +1,128 @@ +""" +LDIF Configuration - file ``/etc/dirsrv/slapd-*/dse.ldif`` +========================================================== +""" + +from insights import Parser, parser +from insights.specs import Specs +from insights.parsers import SkipException, keyword_search + + +@parser(Specs.ldif_config) +class LDIFParser(Parser, list): + """ + Parse the content of the directory server configuration of the + ``/etc/dirsrv/slapd-*/dse.ldif`` file. + + The file dse.ldif is in the LDIF format. LDIF contains multi-row records + where each record is identified by a ``dn:`` line ("dn" as in "distinguished + name") and the record's other lines are attributes. The value may be specified + as UTF-8 text or as base64 encoded data, or a URI may be provided to the + location of the attribute value. + + .. note:: + 1. This parser unwraps the multiple 'aci:' lines to a single line. + 2. This parser only keeps the last value of a multiple keys and + discrads the others before it. + + Sample output:: + + dn: + aci: (targetattr != "aci")(version 3.0; aci "rootdse anon read access"; allow( + read,search,compare) userdn="ldap:///anyone";) + aci: (target = "ldap:///cn=automember rebuild membership,cn=tasks,cn=config")( + ,cn=permissions,cn=pbac,dc=idm";) + createTimestamp: 20201026161200Z + creatorsName: cn=server,cn=plugins,cn=config + modifiersName: cn=Directory Manager + modifyTimestamp: 20210608144722Z + nsslapd-return-default-opattr: namingContexts + nsslapd-return-default-opattr: supportedControl + nsslapd-return-default-opattr: supportedExtension + nsslapd-return-default-opattr: supportedLDAPVersion + nsslapd-return-default-opattr: supportedSASLMechanisms + nsslapd-return-default-opattr: vendorName + nsslapd-return-default-opattr: vendorVersion + objectClass: top + + dn: cn=changelog5,cn=config + cn: changelog5 + createTimestamp: 20201026161228Z + creatorsName: cn=Directory Manager + modifiersName: cn=Directory Manager + modifyTimestamp: 20201026161228Z + nsslapd-changelogdir: /var/lib/dirsrv/slapd-IDM-NYPD-FINEST/cldb + nsslapd-changelogmaxage: 7d + objectClass: top + objectClass: extensibleobject + + Returns: + + list: A list of dictionaries for each 'dn' attribute block of the ldif configuration. + + Examples: + + >>> ldif_config[0]['dn'] + '' + >>> ldif_config[0]['aci'] # the 2 aci are connected into one + '(targetattr != "aci")(version 3.0; aci "rootdse anon read access"; allow(read,search,compare) userdn="ldap:///anyone";)(target = "ldap:///cn=automember rebuild membership,cn=tasks,cn=config")(,cn=permissions,cn=pbac,dc=idm";)' + >>> ldif_config[0]['nsslapd-return-default-opattr'] # only keep the last + 'vendorVersion' + >>> ldif_config[1]['dn'] + 'cn=changelog5,cn=config' + >>> ldif_config[1]['modifiersName'] + 'cn=Directory Manager' + >>> ldif_config[1]['modifyTimestamp'] + '20201026161228Z' + """ + def parse_content(self, content): + if not content: + raise SkipException('The file is empty') + + attr_kval = {} + for line in content: + # lines beginning with # are ignored + line = line.strip() + if not line or line.startswith('#'): + continue + # line starts with 'dn' attribute + elif line.startswith('dn:'): + aci_flag = False + attr_kval = dict(dn=line.split(':', 1)[1].strip()) + self.append(attr_kval) + # line starts with 'aci' attribute + elif line.startswith('aci:'): + if 'aci' in attr_kval: + attr_kval['aci'] += line.split(':', 1)[1].strip() + else: + attr_kval['aci'] = line.split(':', 1)[1].strip() + aci_flag = True + # line is a muti-line value with the 'aci' attribute + elif aci_flag and ': ' not in line: + attr_kval['aci'] += line + # line is a non 'aci' attribute or file-backed value attribute + elif not line.startswith('aci:') and ': ' in line: + aci_flag = False + key, val = [i.strip() for i in line.split(':', 1)] + attr_kval[key] = val + + def search(self, **kwargs): + """ + Get the list for the 'dn' attribute block by searching the ldif configuration. + This uses the :py:func:`insights.parsers.keyword_search` function for searching, + see its documentation for usage details. If no search parameters are given or does + match the search, then nothing will be returned. + + Returns: + list: A list of dictionaries for each 'dn' attribute block of the ldif configuration that match the given + search criteria. + + Examples: + >>> ldif_config.search(dn__contains='cn=config')[0] == ldif_config[1] + True + >>> ldif_config.search(dn='cn=sasl,cn=config') == [] + True + >>> ldif_config.search(cn='changelog5')[0] == ldif_config[1] + True + """ + return keyword_search(self, **kwargs) diff --git a/insights/parsers/tests/test_ldif_config.py b/insights/parsers/tests/test_ldif_config.py new file mode 100644 index 000000000..03915dca9 --- /dev/null +++ b/insights/parsers/tests/test_ldif_config.py @@ -0,0 +1,225 @@ +import doctest +import pytest +from insights.tests import context_wrap +from insights.parsers import ldif_config, SkipException +from insights.parsers.ldif_config import LDIFParser + +LDIF_CONFIG = """ +dn: +aci: (targetattr != "aci")(version 3.0; aci "rootdse anon read access"; allow( + read,search,compare) userdn="ldap:///anyone";) +createTimestamp: 20201026161200Z +creatorsName: cn=server,cn=plugins,cn=config +modifiersName: cn=Directory Manager +modifyTimestamp: 20210608144722Z +nsslapd-return-default-opattr: namingContexts +nsslapd-return-default-opattr: supportedControl +nsslapd-return-default-opattr: supportedExtension +nsslapd-return-default-opattr: supportedLDAPVersion +nsslapd-return-default-opattr: supportedSASLMechanisms +nsslapd-return-default-opattr: vendorName +nsslapd-return-default-opattr: vendorVersion +objectClass: top + +dn: cn=config +aci: (targetattr != aci)(version 3.0; aci "cert manager read access"; allow (r + ead, search, compare) userdn = "ldap:///uid=pkidbuser,ou=people,o=ipaca";) +aci: (target = "ldap:///cn=automember rebuild membership,cn=tasks,cn=config")( + targetattr=*)(version 3.0;acl "permission:Add Automember Rebuil Membership T + ask";allow (add) groupdn = "ldap:///cn=Add Automember Rebuild Membership Task + ,cn=permissions,cn=pbac,dc=idm,dc=nypd,dc=finest";) +aci: (targetattr = "cn || createtimestamp || entryusn || modifytimestamp || ob + jectclass || passsyncmanagersdns*")(target = "ldap:///cn=ipa_pwd_extop,cn=plu + gins,cn=config")(version 3.0;acl "permission:Read PassSync Managers Configura + tion";allow (compare,read,search) groupdn = "ldap:///cn=Read PassSync Manager + s Configuration,cn=permissions,cn=pbac,dc=idm,dc=nypd,dc=finest";) +cn: config1111 +cn: config2222 +cn1:: IGJlZ2lucyB3aXRoIGEgc3BhY2U= +cn2:< file:///tmp/value +modifiersName: cn=directory manager +modifyTimestamp: 20210609192548Z +objectClass: nsslapdConfig +CACertExtractFile: /etc/dirsrv/slapd-IDM-NYPD-FINEST/IDM.NYPD.FINEST20IPA20CA. + pem +numSubordinates: 14 +nsslapd-errorlog-level: 81920 +nsslapd-rootpw: {SSHA512}mdrYu17fr9ukhID6B6/aMHE1KeMhWLwVfP3y2LSNtTFaMkRPf340X + MGEN/ocUoAyykmDSMxVcF3ajVR3+f5mqmNqxUek9PYT + +dn: cn=monitor +aci: (target ="ldap:///cn=monitor*")(targetattr != "aci || connection")(versio + n 3.0; acl "monitor"; allow( read, search, compare ) userdn = "ldap:///anyone + ";) +cn: monitor +createTimestamp: 20201026161200Z +creatorsName: cn=server,cn=plugins,cn=config +modifiersName: cn=server,cn=plugins,cn=config +modifyTimestamp: 20201026161200Z +objectClass: top +objectClass: extensibleObject +numSubordinates: 3 + +dn: cn=changelog5,cn=config +cn: changelog5 +createTimestamp: 20201026161228Z +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +modifyTimestamp: 20201026161228Z +nsslapd-changelogdir: /var/lib/dirsrv/slapd-IDM-NYPD-FINEST/cldb +nsslapd-changelogmaxage: 7d +objectClass: top +objectClass: extensibleobject + +dn: cn=encryption,cn=config +aci: (target ="ldap:///cn=monitor*")(targetattr != "aci) +CACertExtractFile: /etc/dirsrv/slapd-IDM-NYPD-FINEST/IDM.NYPD.FINEST20IPA20CA. + pem +# ldif config +allowWeakCipher: off +cn: encryption +createTimestamp: 20201026161200Z +creatorsName: cn=server,cn=plugins,cn=config +modifiersName: cn=server,cn=plugins,cn=config +modifyTimestamp: 20201026161252Z +nsSSL3Ciphers: default +nsSSLClientAuth: allowed +nsSSLSessionTimeout: 0 +objectClass: top +objectClass: nsEncryptionConfig +sslVersionMin: TLS1.0 +numSubordinates: 1 + +dn: cn=features,cn=config +cn: features +objectClass: top +objectClass: nsContainer +numSubordinates: 5 + +dn: cn=mapping tree,cn=config +aci: (targetattr = "cn || createtimestamp || description || entryusn || modify + timestamp || nsds50ruv || nsds5beginreplicarefresh || nsds5debugreplicatimeou + t || nsds5flags || nsds5replicaabortcleanruv || nsds5replicaautoreferral || n + sds5replicabackoffmax || nsds5replicabackoffmin || nsds5replicabinddn || nsds + 5replicabindmethod || nsds5replicabusywaittime || nsds5replicachangecount || + nsds5replicachangessentsincestartup || nsds5replicacleanruv || nsds5replicacl + eanruvnotified || nsds5replicacredentials || nsds5replicaenabled || nsds5repl + icahost || nsds5replicaid || nsds5replicalastinitend || nsds5replicalastinits + tart || nsds5replicalastinitstatus || nsds5replicalastupdateend || nsds5repli + calastupdatestart || nsds5replicalastupdatestatus || nsds5replicalegacyconsum + er || nsds5replicaname || nsds5replicaport || nsds5replicaprotocoltimeout || + nsds5replicapurgedelay || nsds5replicareferral || nsds5replicaroot || nsds5re + plicasessionpausetime || nsds5replicastripattrs || nsds5replicatedattributeli + st || nsds5replicatedattributelisttotal || nsds5replicatimeout || nsds5replic + atombstonepurgeinterval || nsds5replicatransportinfo || nsds5replicatype || n + sds5replicaupdateinprogress || nsds5replicaupdateschedule || nsds5task || nsd + s7directoryreplicasubtree || nsds7dirsynccookie || nsds7newwingroupsyncenable + d || nsds7newwinusersyncenabled || nsds7windowsdomain || nsds7windowsreplicas + ubtree || nsruvreplicalastmodified || nsstate || objectclass || onewaysync || + winsyncdirectoryfilter || winsyncinterval || winsyncmoveaction || winsyncsub + treepair || winsyncwindowsfilter")(targetfilter = "(|(objectclass=nsds5Replic + a)(objectclass=nsds5replicationagreement)(objectclass=nsDSWindowsReplicationA + greement)(objectClass=nsMappingTree))")(version 3.0;acl "permission:Read Repl + ication Agreements";allow (compare,read,search) groupdn = "ldap:///cn=Read Re + plication Agreements,cn=permissions,cn=pbac,dc=idm,dc=nypd,dc=finest";) +aci: (targetattr=*)(version 3.0;acl "permission:Add Replication Agreements";al + low (add) groupdn = "ldap:///cn=Add Replication Agreements,cn=permissions,cn= + pbac,dc=idm,dc=nypd,dc=finest";) +aci: (targetattr=*)(targetfilter="(|(objectclass=nsds5Replica)(objectclass=nsd + s5replicationagreement)(objectclass=nsDSWindowsReplicationAgreement)(objectCl + ass=nsMappingTree))")(version 3.0; acl "permission:Modify Replication Agreeme + nts"; allow (read, write, search) groupdn = "ldap:///cn=Modify Replication Ag + reements,cn=permissions,cn=pbac,dc=idm,dc=nypd,dc=finest";) +aci: (targetattr=*)(targetfilter="(|(objectclass=nsds5replicationagreement)(ob + jectclass=nsDSWindowsReplicationAgreement))")(version 3.0;acl "permission:Rem + ove Replication Agreements";allow (delete) groupdn = "ldap:///cn=Remove Repli + cation Agreements,cn=permissions,cn=pbac,dc=idm,dc=nypd,dc=finest";) +aci: (target = "ldap:///cn=meTo($dn),cn=*,cn=mapping tree,cn=config")(targetat + tr = "objectclass || cn")(version 3.0; acl "Allow hosts to read their replica + tion agreements"; allow(read, search, compare) userdn = "ldap:///fqdn=($dn),c + n=computers,cn=accounts,dc=idm,dc=nypd,dc=finest";) +cn: mapping tree +modifiersName: cn=Directory Manager +modifyTimestamp: 20201026161600Z +objectClass: top +objectClass: extensibleObject +numSubordinates: 3 + +dn: cn=sasl,cn=config +cn: sasl +objectClass: top +objectClass: nsContainer +numSubordinates: 1 +""" + +LDIF_CONFIG_DOC = """ +dn: +aci: (targetattr != "aci")(version 3.0; aci "rootdse anon read access"; allow( + read,search,compare) userdn="ldap:///anyone";) +aci: (target = "ldap:///cn=automember rebuild membership,cn=tasks,cn=config")( + ,cn=permissions,cn=pbac,dc=idm";) +createTimestamp: 20201026161200Z +creatorsName: cn=server,cn=plugins,cn=config +modifiersName: cn=Directory Manager +modifyTimestamp: 20210608144722Z +nsslapd-return-default-opattr: namingContexts +nsslapd-return-default-opattr: supportedControl +nsslapd-return-default-opattr: supportedExtension +nsslapd-return-default-opattr: supportedLDAPVersion +nsslapd-return-default-opattr: supportedSASLMechanisms +nsslapd-return-default-opattr: vendorName +nsslapd-return-default-opattr: vendorVersion +objectClass: top + +dn: cn=changelog5,cn=config +cn: changelog5 +createTimestamp: 20201026161228Z +creatorsName: cn=Directory Manager +modifiersName: cn=Directory Manager +modifyTimestamp: 20201026161228Z +nsslapd-changelogdir: /var/lib/dirsrv/slapd-IDM-NYPD-FINEST/cldb +nsslapd-changelogmaxage: 7d +objectClass: top +objectClass: extensibleobject +""" + +LDIF_CONFIG_EMPTY = "" + + +def test_ldif_parser(): + ldif_config = LDIFParser(context_wrap(LDIF_CONFIG)) + for item in ldif_config: + if item['dn'] == "": + item['aci'] == '(targetattr != "aci")(version 3.0; aci "rootdse anon read access"; allow(read,search,compare) userdn="ldap:///anyone";)' + item['modifiersName'] == 'cn=Directory Manager' + if item['dn'] == 'cn=changelog5,cn=config': + assert item['createTimestamp'] == '20201026161228Z' + assert item['creatorsName'] == 'cn=Directory Manager' + assert item['nsslapd-changelogdir'] == '/var/lib/dirsrv/slapd-IDM-NYPD-FINEST/cldb' + assert item['objectClass'] == 'extensibleobject' + if item['dn'] == 'cn=config': + assert item['aci'] == '(targetattr != aci)(version 3.0; aci "cert manager read access"; allow (read, search, compare) userdn = "ldap:///uid=pkidbuser,ou=people,o=ipaca";)(target = "ldap:///cn=automember rebuild membership,cn=tasks,cn=config")(targetattr=*)(version 3.0;acl "permission:Add Automember Rebuil Membership Task";allow (add) groupdn = "ldap:///cn=Add Automember Rebuild Membership Task,cn=permissions,cn=pbac,dc=idm,dc=nypd,dc=finest";)(targetattr = "cn || createtimestamp || entryusn || modifytimestamp || objectclass || passsyncmanagersdns*")(target = "ldap:///cn=ipa_pwd_extop,cn=plugins,cn=config")(version 3.0;acl "permission:Read PassSync Managers Configuration";allow (compare,read,search) groupdn = "ldap:///cn=Read PassSync Managers Configuration,cn=permissions,cn=pbac,dc=idm,dc=nypd,dc=finest";)' + assert item['modifiersName'] == 'cn=directory manager' + assert item['modifyTimestamp'] == '20210609192548Z' + assert item['objectClass'] == 'nsslapdConfig' + if item['dn'] == 'cn=monitor': + assert item['aci'] == '(target ="ldap:///cn=monitor*")(targetattr != "aci || connection")(version 3.0; acl "monitor"; allow( read, search, compare ) userdn = "ldap:///anyone";)' + + assert ldif_config.search(dn='cn=features,cn=config')[0] == ldif_config[5] + assert ldif_config.search(dn='cn=sasl,cn=config')[0] == ldif_config[7] + assert ldif_config.search(cn='features')[0] == ldif_config[5] + assert ldif_config.search(dn='cn=sasl,cn=config')[0] == ldif_config[-1] + + +def test_empty(): + with pytest.raises(SkipException) as e: + LDIFParser(context_wrap(LDIF_CONFIG_EMPTY)) + assert 'The file is empty' in str(e) + + +def test_ldif_config_doc_examples(): + env = { + 'ldif_config': LDIFParser(context_wrap(LDIF_CONFIG_DOC)), + } + failed, total = doctest.testmod(ldif_config, globs=env) + assert failed == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 5dd0405d3..f06996519 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -291,6 +291,7 @@ class Specs(SpecSet): kubepods_cpu_quota = RegistryPoint(multi_output=True) lastupload = RegistryPoint(multi_output=True) ld_library_path_of_user = RegistryPoint() + ldif_config = RegistryPoint(multi_output=True) libssh_client_config = RegistryPoint(filterable=True) libssh_server_config = RegistryPoint(filterable=True) libvirtd_log = RegistryPoint(filterable=True) diff --git a/insights/specs/default.py b/insights/specs/default.py index 0fd6fa2ee..fe600926a 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -351,6 +351,7 @@ def httpd_cmd(broker): last_upload_globs = ["/etc/redhat-access-insights/.lastupload", "/etc/insights-client/.lastupload"] lastupload = glob_file(last_upload_globs) ld_library_path_of_user = sap.ld_library_path_of_user + ldif_config = glob_file("/etc/dirsrv/slapd-*/dse.ldif") libssh_client_config = simple_file("/etc/libssh/libssh_client.config") libssh_server_config = simple_file("/etc/libssh/libssh_server.config") libvirtd_log = simple_file("/var/log/libvirt/libvirtd.log") From bdc6bd69000a1c0f57c5fe291a47f286c4c63245 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 11 Nov 2021 13:08:37 +0800 Subject: [PATCH 595/892] Add spec "/etc/foreman-installer/scenarios.d/satellite.yaml" (#3280) * Add spec "/etc/foreman-installer/scenarios.d/satellite.yaml" Signed-off-by: Huanhuan Li * Update docsting and remove unrelated change Signed-off-by: Huanhuan Li --- .../shared_parsers_catalog/satellite_yaml.rst | 3 + insights/parsers/satellite_yaml.py | 32 +++++++++ insights/parsers/tests/test_satellite_yaml.py | 68 +++++++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/default.py | 1 + 5 files changed, 105 insertions(+) create mode 100644 docs/shared_parsers_catalog/satellite_yaml.rst create mode 100644 insights/parsers/satellite_yaml.py create mode 100644 insights/parsers/tests/test_satellite_yaml.py diff --git a/docs/shared_parsers_catalog/satellite_yaml.rst b/docs/shared_parsers_catalog/satellite_yaml.rst new file mode 100644 index 000000000..4f631765c --- /dev/null +++ b/docs/shared_parsers_catalog/satellite_yaml.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.satellite_yaml + :members: + :show-inheritance: diff --git a/insights/parsers/satellite_yaml.py b/insights/parsers/satellite_yaml.py new file mode 100644 index 000000000..e889c631d --- /dev/null +++ b/insights/parsers/satellite_yaml.py @@ -0,0 +1,32 @@ +""" +SatelliteYaml - file ``/etc/foreman-installer/scenarios.d/satellite.yaml`` +========================================================================== + +Parse the file ``/etc/foreman-installer/scenarios.d/satellite.yaml``. + +Sample input:: + + --- + :answer_file: /etc/foreman-installer/scenarios.d/satellite-answers.yaml + :installer_dir: /usr/share/foreman-installer/katello + :custom: + :lock_package_versions: true + :facts: + tuning: default + mongo_cache_size: 6.25 + +Examples: + >>> ':facts' in SatelliteYaml + True + >>> SatelliteYaml[':facts']['tuning'] + 'default' +""" + +from insights.core.plugins import parser +from insights.core import YAMLParser +from insights.specs import Specs + + +@parser(Specs.satellite_yaml) +class SatelliteYaml(YAMLParser): + pass diff --git a/insights/parsers/tests/test_satellite_yaml.py b/insights/parsers/tests/test_satellite_yaml.py new file mode 100644 index 000000000..a0feeb42d --- /dev/null +++ b/insights/parsers/tests/test_satellite_yaml.py @@ -0,0 +1,68 @@ +import doctest + +from insights.tests import context_wrap +from insights.parsers import satellite_yaml + + +SATELLITE_YAML_FILE_CONTENT = """ +--- +:answer_file: "/etc/foreman-installer/scenarios.d/satellite-answers.yaml" +:color_of_background: :dark +:colors: true +:custom: + :lock_package_versions: true +:description: Install Satellite server +:dont_save_answers: false +:enabled: true +:facts: + tuning: default + mongo_cache_size: 3.89 +:hiera_config: "/usr/share/foreman-installer/config/foreman-hiera.yaml" +:hook_dirs: +- "/usr/share/foreman-installer/katello/hooks" +:ignore_undocumented: false +:installer_dir: "/usr/share/foreman-installer" +:log_dir: "/var/log/foreman-installer" +:log_level: DEBUG +:log_name: satellite.log +:low_priority_modules: [] +:mapping: {} +:module_dirs: "/usr/share/foreman-installer/modules" +:name: Satellite +:no_prefix: false +:order: +- certs +- foreman +- katello +- foreman_proxy +- foreman_proxy::plugin::pulp +- foreman_proxy_content +- puppet +:parser_cache_path: +- "/usr/share/foreman-installer/parser_cache/foreman.yaml" +- "/usr/share/foreman-installer/parser_cache/katello.yaml" +:skip_puppet_version_check: false +:store_dir: '' +:verbose: true +:verbose_log_level: notice +""".strip() + +sat_yaml_path = "/etc/foreman-installer/scenarios.d/satellite.yaml" + + +def test_satellite_yaml(): + result = satellite_yaml.SatelliteYaml( + context_wrap(SATELLITE_YAML_FILE_CONTENT, path=sat_yaml_path) + ) + assert ':facts' in result + assert result[':facts']['tuning'] == 'default' + + +def test_doc(): + failed_count, tests = doctest.testmod( + satellite_yaml, + globs={ + "SatelliteYaml": satellite_yaml.SatelliteYaml(context_wrap(SATELLITE_YAML_FILE_CONTENT)) + }, + ) + assert failed_count == 0 diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index f06996519..655651ef7 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -591,6 +591,7 @@ class Specs(SpecSet): satellite_sca_status = RegistryPoint() satellite_settings = RegistryPoint() satellite_version_rb = RegistryPoint() + satellite_yaml = RegistryPoint() scheduler = RegistryPoint(multi_output=True) sched_rt_runtime_us = RegistryPoint() scsi = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index fe600926a..2ca8fdfe8 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -639,6 +639,7 @@ def pmlog_summary_file(broker): deps=[SatelliteVersion] ) satellite_version_rb = simple_file("/usr/share/foreman/lib/satellite/version.rb") + satellite_yaml = simple_file("/etc/foreman-installer/scenarios.d/satellite.yaml") scheduler = glob_file("/sys/block/*/queue/scheduler") scsi = simple_file("/proc/scsi/scsi") scsi_eh_deadline = glob_file('/sys/class/scsi_host/host[0-9]*/eh_deadline') From f6fb9bf561da93b8e1fe2b9de292d64f4c451583 Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Thu, 11 Nov 2021 14:42:06 +0800 Subject: [PATCH 596/892] Add parser mssql_tls_file (#3283) * Add parser mssql_tls_file Signed-off-by: jiazhang * Update format Signed-off-by: jiazhang * Update default spec Signed-off-by: jiazhang --- docs/custom_datasources_index.rst | 2 +- insights/collect.py | 4 ++ insights/parsers/ssl_certificate.py | 24 +++++++++++ .../parsers/tests/test_ssl_certificate.py | 14 ++++++- insights/specs/__init__.py | 1 + insights/specs/datasources/ssl_certificate.py | 18 ++++++++ insights/specs/default.py | 1 + .../tests/datasources/test_ssl_certificate.py | 41 ++++++++++++++++++- 8 files changed, 102 insertions(+), 3 deletions(-) diff --git a/docs/custom_datasources_index.rst b/docs/custom_datasources_index.rst index 6f2cf47aa..4da8d1ecb 100644 --- a/docs/custom_datasources_index.rst +++ b/docs/custom_datasources_index.rst @@ -79,7 +79,7 @@ insights.specs.datasources.ssl_certificate ------------------------------------------ .. automodule:: insights.specs.datasources.ssl_certificate - :members: httpd_ssl_certificate_files, nginx_ssl_certificate_files + :members: httpd_ssl_certificate_files, nginx_ssl_certificate_files, mssql_tls_cert_file :show-inheritance: :undoc-members: diff --git a/insights/collect.py b/insights/collect.py index c3fb71384..fa516953b 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -168,6 +168,10 @@ - name: insights.combiners.nginx_conf._NginxConf enabled: true + # needed for mssql_tls_cert_enddate + - name: insights.parsers.mssql_conf.MsSQLConf + enabled: true + # needed to collect the sap_hdb_version spec that uses the Sap combiner - name: insights.parsers.lssap enabled: true diff --git a/insights/parsers/ssl_certificate.py b/insights/parsers/ssl_certificate.py index cad829759..c8ed7ddc1 100644 --- a/insights/parsers/ssl_certificate.py +++ b/insights/parsers/ssl_certificate.py @@ -12,6 +12,8 @@ ============================================================================================ NginxSSLCertExpireDate - command ``openssl x509 -in nginx_certificate_path -enddate -noout`` ============================================================================================ +MssqlTLSCertExpireDate - command ``openssl x509 -in mssql_tls_cert_file -enddate -noout`` +============================================================================================ """ from insights import parser, CommandParser @@ -251,3 +253,25 @@ class NginxSSLCertExpireDate(CertificateInfo): '/a/b/c.pem' """ pass + + +@parser(Specs.mssql_tls_cert_enddate) +class MssqlTLSCertExpireDate(CertificateInfo): + """ + .. note:: + Please refer to its super-class :class:`insights.parsers.ssl_certificate.CertificateInfo` for more + details. + + It parses the output of ``openssl x509 -in mssql_tls_cert_file -enddate -noout``. + + Sample output of ``openssl x509 -in mssql_tls_cert_file -enddate -noout``:: + + notAfter=Dec 4 07:04:05 2035 GMT + + Examples: + >>> type(mssql_date_info) + + >>> mssql_date_info['notAfter'].datetime + datetime.datetime(2022, 11, 5, 1, 43, 59) + """ + pass diff --git a/insights/parsers/tests/test_ssl_certificate.py b/insights/parsers/tests/test_ssl_certificate.py index 32812ac23..84d39763e 100644 --- a/insights/parsers/tests/test_ssl_certificate.py +++ b/insights/parsers/tests/test_ssl_certificate.py @@ -84,6 +84,10 @@ notAfter=Jan 18 07:02:43 2038 GMT ''' +MSSQL_CERT_EXPIRE_INFO = ''' +notAfter=Nov 5 01:43:59 2022 GMT +''' + def test_certificate_info_exception(): with pytest.raises(ParseException): @@ -153,13 +157,15 @@ def test_doc(): rhsm_katello_default_ca = ssl_certificate.RhsmKatelloDefaultCACert(context_wrap(RHSM_KATELLO_CERT_OUTPUT1)) date_info = ssl_certificate.HttpdSSLCertExpireDate(context_wrap(HTTPD_CERT_EXPIRE_INFO)) nginx_date_info = ssl_certificate.NginxSSLCertExpireDate(context_wrap(HTTPD_CERT_EXPIRE_INFO, args='/a/b/c.pem')) + mssql_date_info = ssl_certificate.MssqlTLSCertExpireDate(context_wrap(MSSQL_CERT_EXPIRE_INFO)) globs = { 'cert': cert, 'certs': ca_cert, 'satellite_ca_certs': satellite_ca_certs, 'rhsm_katello_default_ca': rhsm_katello_default_ca, 'date_info': date_info, - 'nginx_date_info': nginx_date_info + 'nginx_date_info': nginx_date_info, + 'mssql_date_info': mssql_date_info } failed, _ = doctest.testmod(ssl_certificate, globs=globs) assert failed == 0 @@ -176,3 +182,9 @@ def test_nginx_ssl_cert_parser(): assert 'notAfter' in date_info assert date_info['notAfter'].str == 'Jan 18 07:02:43 2038' assert date_info.cert_path == '/test/c.pem' + + +def test_mssql_tls_cert_parser(): + date_info = ssl_certificate.MssqlTLSCertExpireDate(context_wrap(MSSQL_CERT_EXPIRE_INFO)) + assert 'notAfter' in date_info + assert date_info['notAfter'].str == 'Nov 5 01:43:59 2022' diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 655651ef7..1d2c55cd9 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -378,6 +378,7 @@ class Specs(SpecSet): mounts = RegistryPoint() mssql_conf = RegistryPoint() mssql_api_assessment = RegistryPoint() + mssql_tls_cert_enddate = RegistryPoint() multicast_querier = RegistryPoint() multipath_conf = RegistryPoint() multipath_conf_initramfs = RegistryPoint() diff --git a/insights/specs/datasources/ssl_certificate.py b/insights/specs/datasources/ssl_certificate.py index 00e8dff3a..34de80d2c 100644 --- a/insights/specs/datasources/ssl_certificate.py +++ b/insights/specs/datasources/ssl_certificate.py @@ -4,6 +4,7 @@ from insights.combiners.httpd_conf import HttpdConfTree from insights.combiners.nginx_conf import NginxConfTree +from insights.parsers.mssql_conf import MsSQLConf from insights.core.context import HostContext from insights.core.dr import SkipComponent from insights.core.plugins import datasource @@ -49,3 +50,20 @@ def nginx_ssl_certificate_files(broker): if ssl_certs: return [str(ssl_cert.value) for ssl_cert in ssl_certs] raise SkipComponent + + +@datasource(MsSQLConf, HostContext) +def mssql_tls_cert_file(broker): + """ + Get the mssql tls certificate file path configured by "ssl_certificate" + Arguments: + broker: the broker object for the current session + Returns: + str: Returns the SSL certificate file path configured by "ssl_certificate" + Raises: + SkipComponent: Raised if "ssl_certificate" directive isn't found + """ + mssql_conf_content = broker[MsSQLConf] + if mssql_conf_content.has_option("network", "tlscert"): + return mssql_conf_content.get("network", "tlscert") + raise SkipComponent diff --git a/insights/specs/default.py b/insights/specs/default.py index 2ca8fdfe8..5e93a505f 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -431,6 +431,7 @@ def md5chk_file_list(broker): mounts = simple_file("/proc/mounts") mssql_api_assessment = simple_file("/var/opt/mssql/log/assessments/assessment-latest") mssql_conf = simple_file("/var/opt/mssql/mssql.conf") + mssql_tls_cert_enddate = command_with_args("/usr/bin/openssl x509 -in %s -enddate -noout", ssl_certificate.mssql_tls_cert_file) multicast_querier = simple_command("/usr/bin/find /sys/devices/virtual/net/ -name multicast_querier -print -exec cat {} \;") multipath_conf = simple_file("/etc/multipath.conf") multipath_conf_initramfs = simple_command("/bin/lsinitrd -f /etc/multipath.conf") diff --git a/insights/tests/datasources/test_ssl_certificate.py b/insights/tests/datasources/test_ssl_certificate.py index 1c4c102a3..8576be085 100644 --- a/insights/tests/datasources/test_ssl_certificate.py +++ b/insights/tests/datasources/test_ssl_certificate.py @@ -4,8 +4,9 @@ from insights.tests import context_wrap from insights.combiners.httpd_conf import _HttpdConf, HttpdConfTree from insights.combiners.nginx_conf import _NginxConf, NginxConfTree +from insights.parsers.mssql_conf import MsSQLConf from insights.specs.datasources.ssl_certificate import ( - httpd_ssl_certificate_files, nginx_ssl_certificate_files + httpd_ssl_certificate_files, nginx_ssl_certificate_files, mssql_tls_cert_file ) @@ -114,6 +115,26 @@ } """.strip() +MSSQL_WITH_TLS = """ +[sqlagent] +enabled = true +[EULA] +accepteula = Y +[memory] +memorylimitmb = 2048 +[network] +tlscert = /tmp/mssql.pem +""".strip() + +MSSQL_WITHOUT_TLS = """ +[sqlagent] +enabled = true +[EULA] +accepteula = Y +[memory] +memorylimitmb = 2048 +""".strip() + def test_httpd_certificate(): conf1 = _HttpdConf(context_wrap(HTTPD_CONF, path='/etc/httpd/conf/httpd.conf')) @@ -186,3 +207,21 @@ def test_nginx_ssl_cert_exception(): } with pytest.raises(SkipComponent): nginx_ssl_certificate_files(broker1) + + +def test_mssql_tls_cert_exception(): + conf1 = MsSQLConf(context_wrap(MSSQL_WITH_TLS, path='/var/opt/mssql/mssql.conf')) + broker1 = { + MsSQLConf: conf1 + } + result = mssql_tls_cert_file(broker1) + assert result == "/tmp/mssql.pem" + + +def test_mssql_tls_no_cert_exception(): + conf1 = MsSQLConf(context_wrap(MSSQL_WITHOUT_TLS, path='/var/opt/mssql/mssql.conf')) + broker1 = { + MsSQLConf: conf1 + } + with pytest.raises(SkipComponent): + mssql_tls_cert_file(broker1) From b7b6f66a6871a2450743c3d3694c5601e34190d2 Mon Sep 17 00:00:00 2001 From: Glutexo Date: Thu, 11 Nov 2021 18:30:00 +0100 Subject: [PATCH 597/892] Remove unused collect variables (#3284) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit pconn argument is not used. output variable is initialized later. Signed-off-by: Štěpán Tomsa --- insights/client/__init__.py | 2 +- insights/client/client.py | 3 +- insights/tests/client/test_collect.py | 54 +++++++++++++-------------- 3 files changed, 29 insertions(+), 30 deletions(-) diff --git a/insights/client/__init__.py b/insights/client/__init__.py index 077e56750..8ce8c962e 100644 --- a/insights/client/__init__.py +++ b/insights/client/__init__.py @@ -389,7 +389,7 @@ def update_rules(self): @_net def collect(self): # return collection results - tar_file = client.collect(self.config, self.connection) + tar_file = client.collect(self.config) # it is important to note that --to-stdout is utilized via the wrapper RPM # this file is received and then we invoke shutil.copyfileobj diff --git a/insights/client/client.py b/insights/client/client.py index 83c089289..f653bb8fd 100644 --- a/insights/client/client.py +++ b/insights/client/client.py @@ -277,13 +277,12 @@ def get_branch_info(config): return config.branch_info -def collect(config, pconn): +def collect(config): """ All the heavy lifting done here """ branch_info = get_branch_info(config) pc = InsightsUploadConf(config) - output = None rm_conf = pc.get_rm_conf() blacklist_report = pc.create_report() diff --git a/insights/tests/client/test_collect.py b/insights/tests/client/test_collect.py index 02bbeb08f..d3b1ae021 100644 --- a/insights/tests/client/test_collect.py +++ b/insights/tests/client/test_collect.py @@ -31,7 +31,7 @@ def collect_args(*insights_config_args, **insights_config_custom_kwargs): "content_redaction_file": conf_file_content_redaction_file, "core_collect": False} all_insights_config_kwargs.update(insights_config_custom_kwargs) - return InsightsConfig(*insights_config_args, **all_insights_config_kwargs), Mock() + return InsightsConfig(*insights_config_args, **all_insights_config_kwargs) @contextmanager @@ -158,8 +158,8 @@ def test_get_conf_file(get_branch_info, get_conf_file, data_collector): """ If there is no config passed via stdin, it is loaded from a file instead. """ - config, pconn = collect_args() - collect(config, pconn) + config = collect_args() + collect(config) get_conf_file.assert_called_once_with() @@ -171,8 +171,8 @@ def test_get_conf_called_core_collection(get_branch_info, get_conf_file, core_co """ Verify that uploader.json IS loaded when using core collection (from get_rm_conf function) """ - config, pconn = collect_args(core_collect=True) - collect(config, pconn) + config = collect_args(core_collect=True) + collect(config) get_conf_file.assert_called_once() @@ -185,8 +185,8 @@ def test_get_rm_conf_file(get_branch_info, get_conf_file, get_rm_conf, data_coll """ Load configuration of files removed from collection when collection rules are loaded from a file. """ - config, pconn = collect_args() - collect(config, pconn) + config = collect_args() + collect(config) get_rm_conf.assert_called_once_with() @@ -200,8 +200,8 @@ def test_data_collector_file(get_branch_info, get_conf_file, get_rm_conf, data_c """ Configuration from a file is passed to the DataCollector along with removed files configuration. """ - config, pconn = collect_args() - collect(config, pconn) + config = collect_args() + collect(config) collection_rules = get_conf_file.return_value rm_conf = get_rm_conf.return_value @@ -220,8 +220,8 @@ def test_core_collector_file(get_branch_info, get_conf_file, get_rm_conf, core_c """ CoreCollector is loaded with rm_conf and a None value for collection_rules """ - config, pconn = collect_args(core_collect=True) - collect(config, pconn) + config = collect_args(core_collect=True) + collect(config) collection_rules = None rm_conf = get_rm_conf.return_value @@ -242,8 +242,8 @@ def test_correct_collector_loaded(get_branch_info, get_conf_file, get_rm_conf, c Verify that core collection is loaded for core_collect=True, and that classic collection is loaded for core_collect=False ''' - config, pconn = collect_args(core_collect=False) - collect(config, pconn) + config = collect_args(core_collect=False) + collect(config) data_collector.return_value.run_collection.assert_called() core_collector.return_value.run_collection.assert_not_called() @@ -253,7 +253,7 @@ def test_correct_collector_loaded(get_branch_info, get_conf_file, get_rm_conf, c core_collector.return_value.run_collection.reset_mock() config.core_collect = True - collect(config, pconn) + collect(config) data_collector.return_value.run_collection.assert_not_called() core_collector.return_value.run_collection.assert_called() @@ -268,9 +268,9 @@ def test_file_signature_ignored(get_branch_info, validate_gpg_sig, data_collecto Signature of configuration from a file is not validated if validation is disabled. """ - config, pconn = collect_args(gpg=False) + config = collect_args(gpg=False) with patch_temp_conf_file(): - collect(config, pconn) + collect(config) validate_gpg_sig.assert_not_called() @@ -285,9 +285,9 @@ def test_file_signature_valid(get_branch_info, validate_gpg_sig, data_collector) """ Correct signature of configuration from a file is recognized. """ - config, pconn = collect_args() + config = collect_args() with patch_temp_conf_file(): - collect(config, pconn) + collect(config) validate_gpg_sig.assert_called_once() @@ -301,10 +301,10 @@ def test_file_signature_invalid(get_branch_info, validate_gpg_sig, data_collecto """ Incorrect signature of configuration from a file skips that file. """ - config, pconn = collect_args() + config = collect_args() with patch_temp_conf_file(): with raises(RuntimeError): - collect(config, pconn) + collect(config) validate_gpg_sig.assert_called() @@ -331,8 +331,8 @@ def test_file_result(get_branch_info, try_disk, raw_config_parser, data_collecto mock.mock_open(read_data='').return_value, mock.mock_open(read_data='[remove]\nfiles=/etc/some_file,/tmp/another_file').return_value] raw_config_parser.side_effect = [Mock(sections=Mock(return_value=['remove']), items=Mock(return_value=[('files', '/etc/some_file,/tmp/another_file')]))] - config, pconn = collect_args() - collect(config, pconn) + config = collect_args() + collect(config) name, args, kwargs = try_disk.mock_calls[0] collection_rules = try_disk.return_value.copy() @@ -353,9 +353,9 @@ def test_file_no_version(get_branch_info, try_disk, data_collector): """ Configuration from file is loaded from the "uploader.json" key. """ - config, pconn = collect_args() + config = collect_args() with raises(ValueError): - collect(config, pconn) + collect(config) data_collector.return_value.run_collection.assert_not_called() data_collector.return_value.done.assert_not_called() @@ -369,16 +369,16 @@ def test_file_no_data(get_branch_info, try_disk, data_collector): """ Configuration from file is loaded from the "uploader.json" key. """ - config, pconn = collect_args() + config = collect_args() with raises(RuntimeError): - collect(config, pconn) + collect(config) data_collector.return_value.run_collection.assert_not_called() data_collector.return_value.done.assert_not_called() def test_cmd_blacklist(): - config, pconn = collect_args() + config = collect_args() dc = DataCollector(config) assert dc._blacklist_check('rm') assert dc._blacklist_check('reboot') From 84485692d6f1861eb1d962458dc89652ef15f96e Mon Sep 17 00:00:00 2001 From: Jeremy Crafts Date: Thu, 11 Nov 2021 20:50:52 -0500 Subject: [PATCH 598/892] Feat: Add Malware app as a manifest spec (#3236) * Initial add of malware spec * Add --manifest option, example malware manifest and spec Signed-off-by: Jeremy Crafts * Initial add of malware app code * Add user configurable malware options to the manifest file Signed-off-by: Mark Huth * Fixes for failing unittests and code reviews Signed-off-by: Mark Huth * Various improvements to malware app * Improve find_yara functionality * Change scan_recent option to scan_since * Support getting the rules from a local file * For running commands with call, pass a list rather than a string * Allow scan_only option to take a list of items * Add exclude items list and tidy up scan* functions * Improve the documentation of the malware_options in the manifest * Add exclude_network_filesystems: true|false option * Add scan_since: last option Signed-off-by: Mark Huth * Enable specs for canonical facts and small fix to run_malware spec Signed-off-by: Mark Huth * Add --collector=app option * Wraps the --manifest option to define the app manifest file * Writes app default manifest to manifest file when app used for first time * Split malware manifest into core spec file in /var/lib/insights/manifests and config file in /etc/insights-client Signed-off-by: Mark Huth * Initial commit of malware unittests Signed-off-by: Mark Huth * Fix or skip unittests failing in QE Jenkins Signed-off-by: Mark Huth * Update the test rule for scanning insights-client * Added more unittests Signed-off-by: Mark Huth * Handle some common errors more gracefully Signed-off-by: Mark Huth * Rename malware references to malware-detection * Added more unittests * Manifest written out each time the app is run Signed-off-by: Mark Huth * Moar unittests, mainly the scan include/exclude logic Signed-off-by: Mark Huth * Make some changes to the original malware scanner branch Signed-off-by: Mark Huth * Move tests and remove misc test support files Signed-off-by: Mark Huth * Remove manifest file from app manifest logic Signed-off-by: Mark Huth * Get unittests working ... again Signed-off-by: Mark Huth * Added unittests for scan_since option * rearranged test file locations * removed unnecessary code and tests * excluded /var/log/insights-client from being scanned Signed-off-by: Mark Huth * Add malware_detection_misc back into the repo Signed-off-by: Mark Huth * Set config values via env vars Signed-off-by: Mark Huth * Change rules_location url depending on authmethod Signed-off-by: Mark Huth * Fixes for ESSNTL-1575 * Only run malware-detection when specified * Set correct permissions on files created * Handle scan_filesystem: false correctly Signed-off-by: Mark Huth * Remove need for yara and misc files in unittests Signed-off-by: Mark Huth * Check username/password exists when using basic auth Signed-off-by: Mark Huth * Use temp files in some unittests instead of filesystem files * Add unittest for scan_since too Signed-off-by: Mark Huth * Increase unittest coverage * Add tests for network filesystem processing * Fix small bug in processing list env vars Signed-off-by: Mark Huth Co-authored-by: Jeremy Crafts Co-authored-by: Mark Huth Co-authored-by: Bob Fahr <20520336+bfahr@users.noreply.github.com> Co-authored-by: Link Dupont --- .../client/apps/malware_detection/__init__.py | 1265 ++++++++++++++ insights/client/apps/manifests.py | 78 + insights/client/config.py | 18 + insights/client/connection.py | 1 + insights/client/core_collector.py | 6 +- insights/client/phase/v1.py | 3 +- .../specs/datasources/malware_detection.py | 29 + .../client/apps/test_malware_detection.py | 1497 +++++++++++++++++ .../client/phase/test_collect_and_upload.py | 2 + 9 files changed, 2897 insertions(+), 2 deletions(-) create mode 100644 insights/client/apps/malware_detection/__init__.py create mode 100644 insights/client/apps/manifests.py create mode 100644 insights/specs/datasources/malware_detection.py create mode 100644 insights/tests/client/apps/test_malware_detection.py diff --git a/insights/client/apps/malware_detection/__init__.py b/insights/client/apps/malware_detection/__init__.py new file mode 100644 index 000000000..e2b59e05a --- /dev/null +++ b/insights/client/apps/malware_detection/__init__.py @@ -0,0 +1,1265 @@ +import os +import re +import time +import json +import yaml +from sys import exit +import logging +from datetime import datetime +from tempfile import NamedTemporaryFile +try: + # python 2 + from urllib import quote as urlencode + from urlparse import urlparse, urlunparse +except ImportError: + # python 3 + from urllib.parse import urlparse, urlunparse, quote as urlencode + +from insights.client.connection import InsightsConnection +from insights.client.constants import InsightsConstants as constants +from insights.client.utilities import ( + generate_machine_id, write_data_to_file, get_time +) +from insights.util.subproc import call, CalledProcessError + +logger = logging.getLogger(__name__) +MIN_YARA_VERSION = "4.1.0" +MALWARE_APP_URL = 'https://console.redhat.com/insights/malware' +MALWARE_CONFIG_FILE = os.path.join(constants.default_conf_dir, "malware-detection-config.yml") +LAST_SCAN_FILE = os.path.join(constants.default_conf_dir, '.last_malware-detection_scan') +DEFAULT_MALWARE_CONFIG = """ +# Configuration file for the Red Hat Insights Malware Detection Client app +# File format is YAML +--- +# Scan the filesystem? +scan_filesystem: true + +# Scan the running processes? +# Disabled by default due to its potential performance impact on the host when scanning numerous or large processes +scan_processes: false + +# Perform a simple test scan of the insights-client config directory and process to verify installation and scanning +# are working correctly. The results from this scan do not show up in the webUI. +# Once verified, disable this option to perform actual malware scans. +test_scan: true + +# scan_only: a single or list of files/directories/process IDs to scan, for example: +# scan_only: +# - /var/www +# - /home +# ... means scan only files in /var/www and /home. May also be written as scan_only: [/var/www, /home] +# scan_only: 12345 +#... means scan only process ID 12345 +# No value means scan all files/directories/PIDs +scan_only: + +# scan_exclude: a single or list of files/directories to be excluded from filesystem scanning +# If an item appears in both scan_only and scan_exclude, scan_exclude takes precedence and the item will be excluded +# The scan_exclude list is pre-populated with a number of top level directories that are recommended to be excluded, +# as well as the insights-client log directory which could cause extra matches itself +scan_exclude: +- /proc +- /sys +- /cgroup +- /selinux +- /net +- /mnt +- /media +- /dev +- /var/log/insights-client + +# scan_since: scan files created or modified since X days ago or since the 'last' scan. +# Valid values are integers >= 1 or the string 'last'. For example: +# scan_since: 1 +# ... means scan files created/modified since 1 day ago +# scan_since: last +# ... means scan files created/modified since the last successful scan +# No value means scan all files regardless of created/modified date +scan_since: + +# Exclude mounted network/external filesystems mountpoints? +# Scanning files within mounted network filesystems may be slow and cause extra network traffic. +# They are excluded by default, meaning that files in network/externally mounted filesystems are not scanned. +# Their mountpoints will be added to the scan_exclude list of directories to be excluded from scanning +exclude_network_filesystem_mountpoints: true + +# List of network/external filesystem types to search for mountpoints on the system. +# If any mountpoints are found for these filesystem types, the value of the exclude_network_filesystem_mountpoints +# option will determine if files within the mountpoints are scanned or not. +network_filesystem_types: [nfs, nfs4, cifs, smbfs, fuse.sshfs, ceph, glusterfs, gfs, gfs2] + +# Add extra metadata about each scan match (if possible), eg file type & md5sum, matching line numbers, process name +# The extra metadata will display in the webUI along with the scan matches +add_metadata: true + +# Specific location of the yara binary file. Autodetected if not specified. For example: +# yara_binary: /usr/local/bin/yara +yara_binary: + +# Abort a particular scan if it takes longer than scan_timeout seconds. Default is 3600 seconds (1 hour) +scan_timeout: # 3600 + +# Run the yara process with this nice priority value. Default is 19 (lowest priority) +nice_value: # 19 + +# The max number of CPUs threads used by yara when scanning. Autodetected, but default is 2 +cpu_thread_limit: # 2 +""".lstrip() + +# All the config options have corresponding environment variables +# Env vars are initially strings and need to be parsed to their appropriate type to match the yaml types +ENV_VAR_TYPES = { + 'boolean': ['SCAN_FILESYSTEM', 'SCAN_PROCESSES', 'TEST_SCAN', 'ADD_METADATA', + 'EXCLUDE_NETWORK_FILESYSTEM_MOUNTPOINTS'], + 'list': ['SCAN_ONLY', 'SCAN_EXCLUDE', 'NETWORK_FILESYSTEM_TYPES'], + 'integer': ['SCAN_TIMEOUT', 'NICE_VALUE', 'CPU_THREAD_LIMIT', 'STRING_MATCH_LIMIT'], + 'int_or_str': ['SCAN_SINCE'], + 'string': ['YARA_BINARY'] +} + + +class MalwareDetectionClient: + def __init__(self, insights_config): + # insights_config is used to access insights-client auth and network config when downloading rules + self.insights_config = insights_config + + # Load the malware-detection config file + self.config = self._load_config() + + # Early check if the yara binary exists. No point continuing if it doesn't + self.yara_binary = self._find_yara() + + # Get/set the values of assorted integer config values - mainly options used with the yara command + for option, value in [('nice_value', 19), + ('scan_timeout', 3600), + ('cpu_thread_limit', 2), + ('string_match_limit', 10)]: + try: + setattr(self, option, int(self._get_config_option(option, value))) + except Exception as e: + logger.error("Problem setting configuration option %s: %s", option, str(e)) + exit(constants.sig_kill_bad) + + # If doing a test scan, then ignore the other scan_* options because test scan sets its own values for them + if not self._process_test_scan_option(): + self._process_scan_options() + + # Obtain the rules to be used by yara + self.rules_file = self._get_rules() + + # Build the yara command, with its various command line options, that will be run + self.yara_cmd = self._build_yara_command() + + # host_scan is a dictionary into which all the scan matches are stored. Its structure is like: + # host_scan = {rule_name: [{source: ..., stringData: ..., stringIdentifier: ..., stringOffset: ...}, + # {source: ...}], + # rule_name: [{...}, {...}, {...}], + # ... } + # host_scan_mutation is the host_scan dict converted to a GraphQL mutation query string + self.host_scan = {} + self.host_scan_mutation = '' + + # Check if we are adding extra metadata to each scan match + self.add_metadata = self._get_config_option('add_metadata', False) + + self.matches = 0 + + def run(self): + # Start the scans and record the time they were started + scan_start = get_time() + self.scan_filesystem() + self.scan_processes() + + if self.do_filesystem_scan or self.do_process_scan: + # If any scans were performed then get the results as a GraphQL mutation query + # This mutation query is what is uploaded to the malware backend + host_scan_mutation = self._create_host_scan_mutation() + + # Write a message to user informing them if there were matches or not and what to do next + if self.matches == 0: + logger.info("No rule matches found.\n") + else: + logger.info("Found %d rule match%s.", self.matches, 'es' if self.matches > 1 else '') + if not self.test_scan: + logger.info("Please visit %s for more information\n", MALWARE_APP_URL) + + # Write the scan start time to disk (its used by the 'scan_since: last' option) + # Only write the scan time after scans have completed without error or interruption, and its not a test scan + if not self.test_scan: + write_data_to_file(scan_start, LAST_SCAN_FILE) + os.chmod(LAST_SCAN_FILE, 0o644) + else: + logger.info("\nRed Hat Insights malware-detection app test scan complete.\n" + "Test scan results are not recorded in the Insights UI (%s)\n" + "To perform proper scans, please set test_scan: false in %s\n", + MALWARE_APP_URL, MALWARE_CONFIG_FILE) + + # This is what is uploaded to the malware backend + return host_scan_mutation + else: + logger.error("No scans performed, no results to upload.") + exit(constants.sig_kill_bad) + + @staticmethod + def _load_config(): + # Load the malware-detection config file. Write out a default one first if it doesn't already exist + if not os.path.isfile(MALWARE_CONFIG_FILE): + logger.info("Writing the malware-detection app default configuration to %s", MALWARE_CONFIG_FILE) + write_data_to_file(DEFAULT_MALWARE_CONFIG, MALWARE_CONFIG_FILE) + os.chmod(MALWARE_CONFIG_FILE, 0o644) + + try: + with open(MALWARE_CONFIG_FILE) as m: + return yaml.safe_load(m) + except Exception as e: + logger.error("Error encountered loading the malware-detection app config file %s:\n%s", + MALWARE_CONFIG_FILE, str(e)) + exit(constants.sig_kill_bad) + + def _find_yara(self): + """ + Find the yara binary on the local system and check it's version >= MIN_YARA_VERSION + """ + def yara_version_ok(yara): + # Check the installed yara version >= MIN_YARA_VERSION + installed_yara_version = call([[yara, '--version']]).strip() + try: + if float(installed_yara_version[:3]) < float(MIN_YARA_VERSION[:3]): + raise RuntimeError("Found yara version %s, but malware-detection requires version >= %s\n" + "Please install a later version of yara." + % (installed_yara_version, MIN_YARA_VERSION)) + except RuntimeError as e: + logger.error(str(e)) + exit(constants.sig_kill_bad) + except Exception as e: + logger.error("Error getting the version of the specified yara binary %s: %s" % (yara, str(e))) + exit(constants.sig_kill_bad) + # If we are here then the version of yara was ok + return True + + yara = self._get_config_option('yara_binary') + if yara and not os.path.isfile(yara): + logger.error("Couldn't find the specified yara binary %s. Please check it exists", yara) + exit(constants.sig_kill_bad) + elif yara and yara_version_ok(yara): + logger.debug("Using specified yara binary: %s", yara) + return yara + + try: + yara = str(call([['which', 'yara']])).strip() + except CalledProcessError: + logger.error("Couldn't find yara. Please ensure the yara package is installed") + exit(constants.sig_kill_bad) + yara_version_ok(yara) # Generates an error if not ok + logger.debug("Using yara binary: %s", yara) + return yara + + def _process_scan_options(self): + """ + Initialize the various scan flags and lists and run methods that may change/populate them + """ + self.do_filesystem_scan = self._get_config_option('scan_filesystem', True) + self.do_process_scan = self._get_config_option('scan_processes', False) + if not (self.do_filesystem_scan or self.do_process_scan): + logger.error("Both filesystem and process scans are disabled. Nothing to do.") + exit(constants.sig_kill_bad) + + self.scan_fsobjects = [] + self.scan_pids = [] + + self._process_scan_only_option() + self._process_scan_exclude_option() + self._process_scan_since_option() + self._process_exclude_network_filesystem_mountpoints_option() + + def _process_test_scan_option(self): + self.test_scan = self._get_config_option('test_scan', False) + if not self.test_scan: + return False + + self.scan_since_dict = {'timestamp': None} + self.scan_exclude_list = [] + self.network_filesystem_mountpoints = [] + + # For matching the test rule, scan the insights config file and the currently running process + # Make sure the config file exists first though! + if os.path.isfile(MALWARE_CONFIG_FILE): + self.do_filesystem_scan = True + self.scan_fsobjects = [MALWARE_CONFIG_FILE] + else: + self.do_filesystem_scan = False + self.scan_fsobjects = [] + + self.do_process_scan = True + self.scan_pids = [str(os.getpid())] + logger.info("\nPerforming a test scan of %sthe current process (PID %s) " + "to verify the malware-detection app is installed and scanning correctly ...\n", + "%s and " % self.scan_fsobjects[0] if self.do_filesystem_scan else "", self.scan_pids[0]) + return True + + def _process_scan_only_option(self): + """ + Parse the scan_only option, if specified, to get a list of things to scan + """ + scan_only = self._get_config_option('scan_only') + if scan_only: + # Process the scan_only option as a list of items to scan + # There may be both files/directories and PIDs all together + # Strings are assumed to be files/directories and ints are assumed to be process IDs + if not isinstance(scan_only, list): + scan_only = [scan_only] + for item in scan_only: + if isinstance(item, str) and self.do_filesystem_scan: + # Remove extras slashes (/) in the file name and leading double slashes too (normpath doesn't) + item = os.path.normpath(item).replace('//', '/') + # Assume the item represents a filesystem item + if os.path.exists(item): + self.scan_fsobjects.append(item) + else: + logger.info("Skipping missing scan_only filesystem item: '%s'", item) + elif isinstance(item, int) and self.do_process_scan: + # Assume the item represents a process ID + if os.path.exists('/proc/%s' % item): + self.scan_pids.append(str(item)) + else: + logger.info("Skipping missing scan_only PID: %s", item) + else: + logger.info("Skipping scan_only item: %s", item) + + if self.scan_fsobjects: + logger.info("Scan only the specified filesystem item%s: %s", "s" if len(self.scan_fsobjects) > 1 else "", + self.scan_fsobjects) + if self.scan_pids: + logger.info("Scan only the specified process ID%s: %s", "s" if len(self.scan_pids) > 1 else "", + self.scan_pids) + if not (self.scan_fsobjects or self.scan_pids): + logger.error("Unable to scan the items specified for the scan_only option") + exit(constants.sig_kill_bad) + + def _process_scan_since_option(self): + """ + scan_since is specified as an integer representing the number of days ago to scan for modified files + If the option was specified and valid, then get the corresponding unix timestamp for the specified + number of days ago from now, which is used for comparing file modification times + """ + self.scan_since_dict = {'timestamp': None, 'datetime': None} + scan_since = self._get_config_option('scan_since') + if scan_since is not None: + timestamp = get_scan_since_timestamp(scan_since) + if timestamp: + self.scan_since_dict['timestamp'] = timestamp + self.scan_since_dict['datetime'] = datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S') + message = "Scan for files created/modified since %s%s" + if isinstance(scan_since, str): + submessage = 'last successful scan on ' + else: + submessage = '%s day%s ago on ' % (scan_since, "s" if scan_since > 1 else "") + logger.info(message, submessage, self.scan_since_dict['datetime']) + + def _process_scan_exclude_option(self): + """ + Simple parse of the scan_exclude option (if specified) to get a list of valid items to exclude + """ + if not self.do_filesystem_scan: + return + + self.scan_exclude_list = [] + scan_exclude = self._get_config_option('scan_exclude') + if scan_exclude: + if not isinstance(scan_exclude, list): + # Convert scan_exclude to a list if only a single non-list item was specified + scan_exclude = [str(scan_exclude)] + for item in scan_exclude: + item = os.path.normpath(item).replace('//', '/') + if os.path.exists(item): + self.scan_exclude_list.append(item) + else: + logger.debug("Skipping missing scan_exclude item: '%s'", item) + logger.info("Excluding specified filesystem item%s: %s", "s" if len(self.scan_exclude_list) > 1 else "", + self.scan_exclude_list) + + def _process_exclude_network_filesystem_mountpoints_option(self): + """ + If exclude_network_filesystem_mountpoints is true, get a list of mountpoints of mounted network filesystems. + The network_filesystem_types option has the list of network filesystems types to look for mountpoints for, + eg NFS, CIFS, SMBFS, SSHFS, Ceph, GlusterFS, GFS. + The list of network filesystem mountpoints will be added to the list of directories to exclude from scanning + """ + if not self.do_filesystem_scan: + return + + self.network_filesystem_mountpoints = [] + if not self._get_config_option('exclude_network_filesystem_mountpoints'): + # We aren't excluding network filesystems, leave it as a blank list (ie nothing to exclude) + return + + network_filesystem_types = self._get_config_option('network_filesystem_types') + if not network_filesystem_types: + logger.error("No value specified for 'network_filesystem_types' option") + exit(constants.sig_kill_bad) + + if isinstance(network_filesystem_types, list): + network_filesystem_types = ','.join(network_filesystem_types) + cmd = ['findmnt', '-t', network_filesystem_types, '-n', '-o', 'TARGET'] + logger.debug("Command to find mounted network filesystems: %s", ' '.join(cmd)) + try: + output = call([cmd]) + except CalledProcessError as err: + logger.error("Unable to get network filesystem mountpoints: %s", err.output.strip()) + exit(constants.sig_kill_bad) + + self.network_filesystem_mountpoints = str(output).strip().split('\n') if output else [] + if self.network_filesystem_mountpoints: + logger.info("Excluding network filesystem mountpoints: %s", self.network_filesystem_mountpoints) + else: + logger.debug("No mounted network filesystems found") + + def _get_rules(self): + """ + Obtain the rules used by yara for scanning from the rules_location option. + They can either be downloaded from the malware backend or obtained from a local file. + """ + self.rules_location = self._get_config_option( + 'rules_location', "https://console.redhat.com/api/malware-detection/v1/signatures.yar" + ) + + # If rules_location starts with a /, assume its a file rather than a URL + if self.rules_location.startswith('/'): + # Remove any extra slashes from the file name and from the start too (normpath doesn't remove those) + rules_file = os.path.normpath(self.rules_location).replace('//', '/') + if not os.path.isfile(rules_file): + logger.error("Couldn't find specified rules file: %s", rules_file) + exit(constants.sig_kill_bad) + logger.debug("Using specified rules file: %s", rules_file) + return rules_file + + # If we are here, then we are downloading the rules from the malware backend + # Check if insights-config is defined first because we need to access its auth and network config + if not self.insights_config: + logger.error("Couldn't access the insights-client configuration") + exit(constants.sig_kill_bad) + + # Make sure the rules_location starts with https:// + if not re.match('^https?://', self.rules_location): + self.rules_location = 'https://' + self.rules_location + + # if insights-client is using cert auth or basic auth without a username and password, append 'cert.' to the url + authmethod = self.insights_config.authmethod if hasattr(self.insights_config, 'authmethod') else 'CERT' + username = self.insights_config.username if hasattr(self.insights_config, 'username') else '' + password = self.insights_config.password if hasattr(self.insights_config, 'password') else '' + if authmethod == 'CERT' or (authmethod == 'BASIC' and not (username or password)): + self.insights_config.authmethod = 'CERT' + parsed_url = urlparse(self.rules_location) + if not parsed_url.netloc.startswith('cert.'): + self.rules_location = urlunparse(parsed_url._replace(netloc='cert.' + parsed_url.netloc)) + + # If doing a test scan, replace signatures.yar (or any other file suffix) with test-rule.yar + if self.test_scan: + self.rules_location = self._get_test_rule_location(self.rules_location) + + logger.debug("Downloading rules from: %s", self.rules_location) + try: + self.insights_config.cert_verify = True + conn = InsightsConnection(self.insights_config) + response = conn.get(self.rules_location) + if response.status_code != 200: + logger.error("%s %s: %s", response.status_code, response.reason, response.text) + exit(constants.sig_kill_bad) + except Exception as e: + logger.error("Unable to download rules from %s: %s", self.rules_location, str(e)) + exit(constants.sig_kill_bad) + + self.temp_rules_file = NamedTemporaryFile(prefix='tmp_malware-detection-client_rules.', mode='wb', delete=True) + self.temp_rules_file.write(response.content) + self.temp_rules_file.flush() + return self.temp_rules_file.name + + def _build_yara_command(self): + """ + Get all the switches for the yara command to be run, for example: + - whether the rules file is compiled or not (-C) + - the number of CPU threads to use (-p) + - the nice command and its value to use (nice -n 'value') + - scan timeouts (-a) + """ + # Detect if the rules file is a text or binary (compiled) file (or otherwise) + output = call([['file', '-b', self.rules_file]]) + rule_type = output.strip().lower() + if os.path.getsize(self.rules_file) == 0 or rule_type == 'empty': + logger.error("Rules file %s is empty", self.rules_file) + exit(constants.sig_kill_bad) + + compiled_rules_flag = '-C' if rule_type.startswith('yara') or rule_type == 'data' else '' + logger.debug("Rules file type: '%s', Compiled rules: %s", rule_type, compiled_rules_flag == '-C') + + # Quickly test the rules file to make sure it contains usable rules! + # Note, if the compiled_rules_flag is '' it must be removed from the list or it causes problems + cmd = list(filter(None, [self.yara_binary, '--fail-on-warnings', '-p', '1', '-f', compiled_rules_flag, + self.rules_file, '/dev/null'])) + try: + call([cmd]) + except CalledProcessError as err: + logger.error("Unable to use rules file %s: %s", self.rules_file, err.output.strip()) + exit(constants.sig_kill_bad) + + # Limit the number of threads used by yara to limit the CPU load of the scans + # If system has 2 or fewer CPUs, then use just one thread + nproc = call('nproc').strip() + if not nproc or int(nproc) <= 2: + self.cpu_thread_limit = 1 + logger.debug("Using %s CPU thread(s) for scanning", self.cpu_thread_limit) + + # Construct the (partial) yara command that will be used later for scanning files and processes + # The argument for the files and processes that will be scanned will be added later + yara_cmd = list(filter(None, ['nice', '-n', str(self.nice_value), self.yara_binary, '-s', '-N', + '-a', str(self.scan_timeout), '-p', str(self.cpu_thread_limit), '-r', '-f', + compiled_rules_flag, self.rules_file])) + logger.debug("Yara command: %s", yara_cmd) + return yara_cmd + + def scan_filesystem(self): + if not self.do_filesystem_scan: + return False + + # Process the filesystem items to scan + # If self.scan_fsobjects is set, then just scan its items, less any items in the exclude list + # And exclude the rules file, unless that's the thing we specifically want to scan + # scan_dict will contain all the toplevel directories to scan, and any particular files/subdirectories to scan + if self.rules_file not in self.scan_fsobjects: + self.scan_exclude_list.append(self.rules_file) + scan_dict = process_include_exclude_items(include_items=self.scan_fsobjects, + exclude_items=self.scan_exclude_list, + exclude_mountpoints=self.network_filesystem_mountpoints) + logger.debug("Filesystem objects to be scanned in: %s", sorted(scan_dict.keys())) + + logger.info("Starting filesystem scan ...") + fs_scan_start = time.time() + + for toplevel_dir in sorted(scan_dict): + # Make a copy of the self.yara_cmd list and add to it the thing to scan + cmd = self.yara_cmd[:] + dir_scan_start = time.time() + + specified_log_txt = "specified " if 'include' in scan_dict[toplevel_dir] else "" + if self.scan_since_dict['timestamp']: + logger.info("Scanning %sfiles in %s modified since %s ...", specified_log_txt, toplevel_dir, + self.scan_since_dict['datetime']) + # Find the recently modified files in the given top level directory + scan_list_file = NamedTemporaryFile(prefix='%s_scan_list.' % os.path.basename(toplevel_dir), + mode='w', delete=True) + if 'include' in scan_dict[toplevel_dir]: + find_modified_include_items(scan_dict[toplevel_dir]['include'], self.scan_since_dict['timestamp'], scan_list_file) + else: + find_modified_in_directory(toplevel_dir, self.scan_since_dict['timestamp'], scan_list_file) + + scan_list_file.flush() + cmd.extend(['--scan-list', scan_list_file.name]) + else: + logger.info("Scanning %sfiles in %s ...", specified_log_txt, toplevel_dir) + if 'include' in scan_dict[toplevel_dir]: + scan_list_file = NamedTemporaryFile(prefix='%s_scan_list.' % os.path.basename(toplevel_dir), + mode='w', delete=True) + scan_list_file.write('\n'.join(scan_dict[toplevel_dir]['include'])) + scan_list_file.flush() + cmd.extend(['--scan-list', scan_list_file.name]) + else: + cmd.append(toplevel_dir) + + logger.debug("Yara command: %s", cmd) + try: + output = call([cmd]).strip() + except CalledProcessError as cpe: + logger.error("Unable to scan %s: %s", toplevel_dir, cpe.output.strip()) + continue + + self.parse_scan_output(output.strip()) + + dir_scan_end = time.time() + logger.info("Scan time for %s: %d seconds", toplevel_dir, (dir_scan_end - dir_scan_start)) + + fs_scan_end = time.time() + logger.info("Filesystem scan time: %s", time.strftime("%H:%M:%S", time.gmtime(fs_scan_end - fs_scan_start))) + return True + + def scan_processes(self): + if not self.do_process_scan: + return False + + logger.info("Starting processes scan ...") + pids_scan_start = time.time() + + if not self.scan_pids: + # Get list of process ids to scan + all_pids = [entry for entry in os.listdir('/proc') if entry.isdigit()] + exclude_pids = [str(os.getpid())] # exclude our script's pid at least + self.scan_pids = sorted(list(set(all_pids) - set(exclude_pids)), key=lambda pid: int(pid)) + + for scan_pid in self.scan_pids: + pid_scan_start = time.time() + logger.info("Scanning process %s ...", scan_pid) + cmd = self.yara_cmd + [str(scan_pid)] + logger.debug("Yara command: %s", cmd) + try: + output = call([cmd]).strip() + except CalledProcessError as cpe: + logger.error("Unable to scan process %s: %s", scan_pid, cpe.output.strip()) + continue + + self.parse_scan_output(output) + + pid_scan_end = time.time() + logger.info("Scan time for process %s: %d seconds", scan_pid, (pid_scan_end - pid_scan_start)) + + pids_scan_end = time.time() + logger.info("Processes scan time: %s", time.strftime("%H:%M:%S", time.gmtime(pids_scan_end - pids_scan_start))) + return True + + def parse_scan_output(self, output, exclude_items=[]): + if not output: + return + + # Each 'set' of output lines consists of 1 line containing the rule and file/pid (aka source) it matches + # Followed by one or more related lines of matching string data from that source, eg + # ... + # rule_name source + Set of 3 related lines + # 0x_offset:string_identifier:string_data | + # 0x_offset:string_identifier:string_data + + # rule_name source + Set of 2 related lines + # 0x_offset:string_identifier:string_data + + # ... + + def skip_string_data_lines(string_data_lines): + # Skip the 0x... lines containing string match data + while string_data_lines and string_data_lines[0].startswith('0x'): + logger.debug("Skipping string data line '%s'", string_data_lines[0]) + string_data_lines.pop(0) + + output_lines = output.split("\n") + while output_lines: + if 'error scanning ' in output_lines[0]: + if output_lines[0].endswith('error: 4'): + # Yara 'could not map file' error - only display this error if debugging (spammy) + logger.debug(output_lines[0]) + else: + logger.info(output_lines[0]) + output_lines.pop(0) # Skip the error scanning line + # Skip any string match lines after the error scanning line + skip_string_data_lines(output_lines) + continue + # Get the rule_name and source from the first line in the set + try: + rule_name, source = output_lines[0].rstrip().split(" ", 1) + except ValueError as err: + # Hopefully shouldn't happen but log it and continue processing + logger.debug("Error parsing rule match '%s': %s", output_lines[0], err) + output_lines.pop(0) # Skip the erroneous line + # Skip any string match lines afterwards until we get to the next rule match line + skip_string_data_lines(output_lines) + continue + + # All good so far, skip over the line containing the rule name and matching source file/pid + output_lines.pop(0) + + # If the rule or the source file/pid is to be excluded, then skip over its scan matches + # and move onto the next match + # if any([exclude_rule.lower() in rule_name.lower() for exclude_rule in self.exclude_rules]) \ + # or any([ei in source for ei in exclude_items]): + # skip_string_data_lines(output_lines) + # continue + + # Check if the rule name contains a ':' or doesn't start with a char/string + # It shouldn't and its likely to be due to a malformed string_offset line + # Skip any further scan matches until the next rule match + if ':' in rule_name or not re.match('^[a-zA-Z]+', rule_name): + skip_string_data_lines(output_lines) + continue + + rule_match = {'rule_name': rule_name, 'matches': []} + source_type = "process" if source.isdigit() else "file" + + # Parse the string match data for the remaining lines in the set + string_matches = 0 + while output_lines and output_lines[0].startswith('0x'): + if string_matches < self.string_match_limit: + try: + string_offset, string_identifier, string_data = output_lines[0].split(':', 2) + string_offset = int(string_offset, 0) + except ValueError as err: + logger.debug("Error parsing string match '%s': %s", output_lines[0], err) + output_lines.pop(0) + continue + rule_match_dict = {'source': source, + 'string_data': string_data.strip(), + 'string_identifier': string_identifier, + 'string_offset': string_offset, + 'metadata': {'source_type': source_type}} + rule_match['matches'].extend([rule_match_dict]) + output_lines.pop(0) + string_matches += 1 + + # If string_match_limit is 0 or there was no string data, there will be no rule_matches, + # but still record the file/pid source that was matched + if not rule_match['matches']: + rule_match_dict = {'source': source, + 'string_data': '', + 'string_identifier': '', + 'string_offset': -1, + 'metadata': {'source_type': source_type}} + rule_match['matches'] = [rule_match_dict] + + if self.add_metadata: + # Add extra data to each rule match, beyond what yara provides + # Eg, for files: line numbers & context, checksums; for processes: process name + # TODO: find more pythonic ways of doing this stuff instead of using system commands + metadata_func = self._add_file_metadata if source_type == 'file' else self._add_process_metadata + metadata_func(rule_match['matches']) + + self.matches += 1 + logger.info("Matched rule %s in %s %s", rule_name, source_type, source) + logger.debug(rule_match) + if self.host_scan.get(rule_match['rule_name']): + self.host_scan[rule_match['rule_name']].extend(rule_match['matches']) + else: + self.host_scan[rule_match['rule_name']] = rule_match['matches'] + + def _add_process_metadata(self, rule_matches): + """ + Add extra data to the process scan matches beyond what is provided by yara, eg process name + """ + # All passed in rule_matches will have the same source PID + # Check the process still exists before obtaining the metadata about it + source = rule_matches[0]['source'] + if not os.path.exists('/proc/%s' % source): + return + + # Get name of process from ps command + # -h: no output header, -q: only the specified process, -o args: just the process name and args + try: + process_name = call([['ps', '-hq', source, '-o', 'args']]) + except CalledProcessError: + process_name = 'unknown' + + for rule_match in rule_matches: + rule_match['metadata'].update({'process_name': process_name}) + + def _add_file_metadata(self, rule_matches): + """ + Add extra data to the file scan matches beyond what is provided by yara + - eg matching line numbers, line context, file checksum + - Use grep to get the line numbers & sed to get the line + """ + def get_line_from_file(file_name, line_number): + # Extract the line at line_number from file_name + line_length_limit = 120 + try: + line = call([['sed', '%dq;d' % line_number, file_name]]).strip() + except CalledProcessError: + line = "" + # Limit line length if necessary and urlencode it to minimize problems with GraphQL when uploading + return urlencode(line if len(line) < line_length_limit else line[:line_length_limit] + "...") + + # All passed in rule_matches will have the same source file + # Check the file still exists before obtaining the metadata about it + source = rule_matches[0]['source'] + if not os.path.exists(source): + return + + # Get the file type, mime type and md5sum hash of the source file + try: + file_type = call([['file', '-b', source]]).strip() + mime_type = call([['file', '-bi', source]]).strip() + md5sum = call([['md5sum', source]]).strip().split()[0] + except Exception: + file_type = mime_type = md5sum = "" + + grep_string_data_match_list = [] + if mime_type and 'charset=binary' not in mime_type: + # Get the line numbers for each of yara's string_data matches in the source file, but not for binary files + # Build a grep command that searches for each of the string_data patterns in the source file + # For each string_data pattern that grep finds, the grep output will have the form... + # line_number:offset_from_0:string_data_pattern + + # Get the set of patterns to grep for, eg ['pattern1', 'pattern2', etc], ie remove duplicate patterns + grep_string_data_pattern_set = set([match['string_data'] for match in rule_matches]) + if grep_string_data_pattern_set: + # Build an option list for grep, eg ['-e', 'pattern1', '-e', 'pattern2', ... etc] + # zip creates a list of tuples, eg [('-e', 'pattern'), ('-e', 'pattern2'), ...], then flatten the list + grep_string_data_patterns = [item for tup in list(zip(['-e'] * len(grep_string_data_pattern_set), + grep_string_data_pattern_set)) + for item in tup] + # Create the grep command to execute. -F means don't interpret regex special chars in the patterns + grep_command = ['/bin/grep', '-Fbon'] + grep_string_data_patterns + [source] + logger.debug("grep command: %s", grep_command) + try: + grep_output = call([grep_command]) + except CalledProcessError: + grep_output = "" + + # Now turn the grep output into a list of tuples for easier searching a little later, ie + # [(line_number, offset_from_0, string_data_pattern), (...), ] + if grep_output and not grep_output.lower().startswith('binary'): + grep_string_data_match_list = list(map(lambda grep_output_line: tuple(grep_output_line.split(':', 3)), + grep_output.splitlines())) + + for rule_match in rule_matches: + metadata = rule_match['metadata'] + metadata.update({'file_type': file_type, + 'mime_type': mime_type, + 'md5sum': md5sum}) + if grep_string_data_match_list: + # Now, for each offset_from_0 in the grep output, we want to match it with the corresponding + # string_offset value from the yara output so we can get the line number for that string_data match + # And while we are here, get the line from the source file at that line number + line_number = None + for grep_list_item in grep_string_data_match_list: + if int(grep_list_item[1]) == rule_match['string_offset']: + line_number = int(grep_list_item[0]) + break + if line_number: + metadata.update({'line_number': line_number, + 'line': get_line_from_file(source, line_number)}) + + def _create_host_scan_mutation(self): + # Build the mutation text + mutation_header = """ + mutation HostScan { + recordHostScan( + input: { + scannedhost: { + insightsId: "%s" + rulesScanned: [""" % generate_machine_id() + + mutation_footer = """ + ] + } + } + ) { + success + } + }""" + + mutation = mutation_header + for rule_name in self.host_scan.keys(): + rule_scan = """{ + ruleName: "%s" + stringsMatched: [""" % rule_name + for match in self.host_scan[rule_name]: + rule_scan += """{ + source: "%s" + stringData: %s + stringIdentifier: %s + stringOffset: "%s" + metadata: "%s" + }, """ % (match['source'], + json.dumps(match['string_data']), + json.dumps(match['string_identifier']), + match['string_offset'], + json.dumps(match['metadata']).replace('"', '\\"')) + rule_scan += "]}, " + mutation += rule_scan + + mutation += mutation_footer + return mutation + + def _get_config_option(self, option, default_value=None): + """ + Get the value of a config option or if it doesn't exist or is None, the default_value + """ + value = os.getenv(option.upper()) + if value is not None: + return self._parse_env_var(option.upper(), value) + value = self.config.get(option) + return value if value is not None else default_value + + @staticmethod + def _parse_env_var(env_var, value): + """ + Parse specific environment variables to make sure they have appropriate values + """ + logger.debug("Found environment variable: %s, value: %s", env_var, value) + # Parse these env vars as booleans + if env_var in ENV_VAR_TYPES['boolean']: + return value.lower() in ('true', 'yes', 't', 'y') + + # Parse these as lists and convert any numeric values into ints + if env_var in ENV_VAR_TYPES['list']: + if value: + value_list = value.split(',') if ',' in value else [value] + return list(map(lambda x: int(x) if str.isdigit(x) else x, value_list)) + else: + return [] + + # Parse scan_since, can be either an int or a string (ie 'last') + if env_var in ENV_VAR_TYPES['int_or_str']: + return int(value) if str.isdigit(value) else value + + # Parse these as ints + if env_var in ENV_VAR_TYPES['integer']: + try: + return int(value) + except ValueError as e: + logger.error("Problem parsing environment variable %s: %s", env_var, str(e)) + exit(constants.sig_kill_bad) + + # env_var value doesn't require parsing, just return it as is (ie. as a string) + return value + + @staticmethod + def _get_test_rule_location(rules_location): + test_rule = 'test-rule.yar' + # Nothing to do if the location already ends with test_rule + if rules_location.endswith(test_rule): + return rules_location + # Replace the last entry from the path with the test-rule + # A bit crude but it seems to work ok with both URL and file paths + return os.path.join(os.path.dirname(rules_location), test_rule) + + +# +# Utility functions +# Mainly for including / excluding certain directories for scanning +# And also for finding files that have been modified recently +# +def get_toplevel_dirs(): + """ + Returns a list of the top level directories directly under root (/), + """ + toplevel_dirs = sorted(filter(lambda x: not os.path.islink(x), map(lambda x: "/" + x, os.listdir('/')))) + return toplevel_dirs + + +def get_parent_dirs(item, parent_dir_list, base_case='/'): + """ + Get a list of parent directories of a particular filesystem item, stopping at base_case (root by default) + Eg for get_parent_dirs('/path/to/some/item', parent_dir_list) -> + parent_dir_list = ['/path', '/path/to', '/path/to/some', '/path/to/some/item'] + """ + if os.path.samefile(item, base_case) or os.path.samefile(item, '/'): + return + get_parent_dirs(os.path.dirname(item), parent_dir_list, base_case) + parent_dir_list.append(item) + + +def process_include_items(include_items=[]): + """ + Process the include items to a get list of directories to be scanned + If there are no entries then get the list of top level directories under root (/), + :return: a list of directories to be scanned. It never returns an empty list. + """ + default_values = get_toplevel_dirs() + + logger.debug("Parsing include items ...") + parsed_list = [] + for item in include_items: + item = item.strip() + if not item or item.startswith('#'): + continue + include_item = os.path.normpath(item).replace('//', '/') + if os.path.exists(include_item): + # ignore the include_item if its not a full directory path + if not include_item.startswith('/'): + logger.debug("Skipping partial directory path '%s' ...", include_item) + continue + elif os.path.islink(include_item): + logger.debug("Skipping link '%s' ...", include_item) + continue + elif os.path.samefile(include_item, '/'): + # Found / in include item list. No need to get the other items because / trumps all + logger.debug("Found root directory in list of items to scan. Ignoring the other items ...") + parsed_list = default_values + break + else: + parsed_list.append(include_item) + else: + logger.debug("Skipping missing item '%s' ...", include_item) + + if not parsed_list: + logger.debug("No items specified to be scanned. Using default values %s ...", default_values) + parsed_list = default_values + else: + # Remove any duplicates and any children of parent directories before returning + parsed_list = remove_child_items(sorted(list(set(parsed_list)))) + + logger.debug("Include items: %s", parsed_list) + return parsed_list + + +def process_exclude_items(exclude_items=[]): + """ + Process the exclude items to get list of directories to NOT be scanned + :return: a list of directories to not be scanned if any, otherwise an empty list + """ + logger.debug("Parsing exclude items ...") + parsed_list = [] + for item in exclude_items: + item = item.strip() + if not item or item.startswith('#'): + continue + exclude_item = os.path.normpath(item).replace('//', '/') + if os.path.exists(exclude_item): + # ignore the exclude_item if its not a full directory path + if os.path.samefile(exclude_item, '/'): + # Found / in exclude list. No need to get the other items because / trumps all + logger.debug("Found root directory in the exclude list. Expanding it to all toplevel directories ...") + parsed_list = get_toplevel_dirs() + break + elif not exclude_item.startswith('/'): + logger.debug("Skipping partial directory path '%s' ...", exclude_item) + continue + else: + parsed_list.append(exclude_item) + else: + logger.debug("Skipping missing item '%s' ...", exclude_item) + + if not parsed_list: + logger.debug("No items specified to be excluded") + else: + # Remove any duplicates and any children of parent directories before returning + parsed_list = remove_child_items(sorted(list(set(parsed_list)))) + + logger.debug("Exclude items: %s", parsed_list) + return parsed_list + + +def remove_child_items(item_list): + """ + For a list of filesystem items, remove those items that are duplicates or children of other items + Eg, for remove_child_items['/path/to/some/item/child', '/path/to/another/item', '/path/to/some/item'] + returns ['/path/to/another/item', '/path/to/some/item'] + If one if the items is root, then it wins + Also, all items should be the full path starting at root (/). Any that aren't are removed + """ + if '/' in item_list: + return ['/'] + + # Remove duplicates and any non-full path items + item_list = sorted(list(set(filter(lambda x: x.startswith('/'), item_list)))) + remove_items = set([]) + for i, item1 in enumerate(item_list[:-1]): + for item2 in item_list[i + 1:]: + if item1 != item2 and item2.startswith(item1 + '/'): + remove_items.add(item2) + for remove_item in remove_items: + item_list.remove(remove_item) + return sorted(list(set(item_list))) + + +def remove_included_excluded_items(included_items, excluded_items): + """ + Go through the list of included items and remove any that are in the exclude list, + or are children of excluded items (no need to scan an included item if its parent is to be excluded) + """ + # Clean up the lists, just in case this hasn't been done already + included_items = remove_child_items(included_items) + excluded_items = remove_child_items(excluded_items) + + remove_items = set([]) + for included_item in included_items: + for excluded_item in excluded_items: + if excluded_item == included_item or included_item.startswith(excluded_item + '/'): + remove_items.add(included_item) + for remove_item in remove_items: + included_items.remove(remove_item) + return included_items + + +def process_include_exclude_items(include_items=[], exclude_items=[], exclude_mountpoints=[]): + """ + Process the include and exclude items, where the exclude items are effectively subtracted from the include_items. + It builds a scan_dict dictionary of items to scan keyed by the filesystem top level directories. + Only the toplevel directories from items in the include_items list will be present in scan_dict. + scan_dict = {'/boot': {'include': ['/boot/include/me', ...], 'exclude: ['/boot/exclude/me', ...]}, + '/etc': {'include': ['/etc/include/me', ...], 'exclude: ['/etc/exclude/me', ...]}, + ... + :return: scan_dict + """ + # Get a list of excluded items from the exclude file and network filesystem mountpoints + initial_exclude_list = process_exclude_items(exclude_items) + final_exclude_list = remove_child_items(list(set(exclude_mountpoints) | set(initial_exclude_list))) + logger.debug("Final exclude items: %s", final_exclude_list) + + # Get a list of included items from the include file, minus the excluded items + initial_include_list = process_include_items(include_items) + if not initial_include_list: + logger.error("No items to scan because the include items doesn't contain any valid items") + exit(constants.sig_kill_bad) + final_include_list = remove_included_excluded_items(initial_include_list, final_exclude_list) + logger.debug("Final include items after removing exclude items: %s", final_include_list) + if not final_include_list: + logger.error("No items to scan because the specified exclude items cancel them out") + exit(constants.sig_kill_bad) + + # This is the dictionary that will hold all the items to scan (after processing the include and exclude items) + # It will be keyed by each of the toplevel directories containing items to scan + # yara will scan each of the toplevel dir's 'include' keys (if present), or just the toplevel dir itself + scan_dict = {} + + # Populate the scan_dict by creating keys for each toplevel directory of the items to include/scan + # Create an 'include' key for each toplevel directory containing items to include in that toplevel directory + logger.debug("Populating scan_dict's include items ...") + for include_item in final_include_list: + item_subpaths = [] + get_parent_dirs(include_item, item_subpaths) + include_item_toplevel_dir = item_subpaths[0] + if include_item_toplevel_dir not in scan_dict: + # Create an 'include' key if the item to scan isn't just the toplevel directory itself + scan_dict[include_item_toplevel_dir] = {'include': set([include_item])}\ + if include_item != include_item_toplevel_dir else {} + else: + scan_dict[include_item_toplevel_dir]['include'].add(include_item) + + logger.debug("Scan dict after adding include items: %s", scan_dict) + + # Populate an 'exclude' key for the toplevel dirs in the scan_dict that also have items to exclude + # Or remove the toplevel dirs from the scan dict where the toplevel dir itself is to be excluded + logger.debug("Populating scan_dict's exclude items ...") + for exclude_item in final_exclude_list: + item_subpaths = [] + get_parent_dirs(exclude_item, item_subpaths) + exclude_item_toplevel_dir = item_subpaths[0] + if exclude_item_toplevel_dir not in scan_dict: + # This exclude_item's toplevel dir isn't in the scan dict, so skip it (since its not being included) + continue + if 'exclude' not in scan_dict[exclude_item_toplevel_dir]: + # Create the 'exclude' key if it doesn't already exist + scan_dict[exclude_item_toplevel_dir]['exclude'] = {'items': [], 'subpaths': set([])} + + scan_dict[exclude_item_toplevel_dir]['exclude']['items'].append(exclude_item) + + # Add the list of subpaths leading to this exclude item. + # The subpaths are needed later for listing the contents each subpath + scan_dict[exclude_item_toplevel_dir]['exclude']['subpaths'].update(item_subpaths) + + logger.debug("Scan dict after adding exclude items: %s", scan_dict) + + # For each toplevel dir with items to exclude, re-populate the include key with directory content listings + # of the subpaths, minus the items to exclude and only including items to include. Yep, its complicated. + # These directory listings will be used with yara's --scan-list option + logger.debug("Re-populating scan_dict's include items with directory content listings to pass to yara ...") + for toplevel_dir in scan_dict: + if 'exclude' not in scan_dict[toplevel_dir]: + continue + + # Get directory listings of each of the subpaths + if 'include' in scan_dict[toplevel_dir]: + scan_items = set(scan_dict[toplevel_dir]['include']) + else: + scan_items = set([]) + toplevel_dir_exclude = scan_dict[toplevel_dir]['exclude'] + for exclude_item in toplevel_dir_exclude['items']: + subpaths = [] + get_parent_dirs(exclude_item, subpaths) + for i, subpath in enumerate(subpaths[:-1]): + dir_list = os.listdir(subpath) + dir_list = sorted(map(lambda x: subpath + '/' + x, dir_list)) + dir_list.remove(subpaths[i + 1]) + scan_items.update(dir_list) + + # Go through the list of scan items and remove any exclude items or exclude item subpaths + for scan_item in list(scan_items): + for exclude_item in toplevel_dir_exclude['items']: + if scan_item == exclude_item or scan_item.startswith(exclude_item + '/'): + scan_items.remove(scan_item) + break + else: + for exclude_subpath in toplevel_dir_exclude['subpaths']: + if scan_item == exclude_subpath: + scan_items.remove(scan_item) + + # If there is an include list, make sure the scan_items only include items in the include list + if 'include' in scan_dict[toplevel_dir]: + for maybe_include in list(scan_items): + if os.path.islink(maybe_include) or (not os.path.isfile(maybe_include) and not os.path.isdir(maybe_include)): + scan_items.remove(maybe_include) + continue + if any([maybe_include == definitely_include or maybe_include.startswith(definitely_include + '/') + for definitely_include in scan_dict[toplevel_dir]['include']]): + continue + else: + scan_items.remove(maybe_include) + + # Overwrite the existing include key list with the new list of scan_items + scan_dict[toplevel_dir]['include'] = sorted(list(scan_items)) + + logger.debug("Final scan_dict: %s", scan_dict) + return scan_dict + + +def get_scan_since_timestamp(since): + """ + Return a unix timestamp corresponding to how long ago to scan for files created/modified since this timestamp. + Valid values of 'since' are integers > 0 meaning the number of days back in time from now, + or 'last' meaning get the timestamp of the last scan + If 'since' is not one of these valid values, then terminate + """ + now = time.time() + + def get_lastscan_timestamp(lastscan): + try: + # Convert the datetime string into a unix timestamp + lastscan_seconds = float(datetime.strptime(lastscan, '%Y-%m-%dT%H:%M:%S.%f').strftime('%s')) + if lastscan_seconds > now: + raise RuntimeError("Last scan time is in the future.") + except Exception as err: + logger.error("Error getting time of last malware scan: %s. Ignoring 'scan_since: last' option ...", str(err)) + return None + return lastscan_seconds + + if isinstance(since, str) and since.lower().startswith('l'): + # Get the timestamp of the last scan + if os.path.isfile(LAST_SCAN_FILE): + with open(LAST_SCAN_FILE) as f: + lastscan = f.readline().strip() + return get_lastscan_timestamp(lastscan) + else: + logger.info("File %s doesn't exist for 'scan_since: last' option. Continuing ...", + LAST_SCAN_FILE) + return None + elif isinstance(since, str): + logger.error("Unknown value '%s' for scan_since option. Valid values are integers >= 1 and 'last'", since) + exit(constants.sig_kill_bad) + + try: + since_int = int(since) + if since_int >= 1: + return now - (since_int * 86400) # 86400 seconds in a day + else: + raise ValueError("Invalid scan_since value %s. Valid values are integers >= 1 and 'last'" % since) + except ValueError as e: + logger.error(str(e)) + exit(constants.sig_kill_bad) + + +def is_recent_mtime(item, timestamp): + """ + Return True if the given 'item' has a modification time that is newer than 'timestamp' + Return False otherwise, or if the the 'item' is a link or another non-file type (eg pipes) + """ + if os.path.exists(item) and not os.path.islink(item) and os.path.isfile(item): + return os.path.getmtime(item) > timestamp + return False + + +def find_modified_in_directory(directory, timestamp, output_file): + """ + Find files in 'directory' that have been created/modified since 'timestamp' + and write their names to 'output_file' + """ + for root, dirs, files in os.walk(directory): + for afile in files: + path = os.path.join(root, afile) + if is_recent_mtime(path, timestamp): + output_file.write(path + "\n") + + +def find_modified_include_items(item_list, timestamp, output_file): + """ + Find files in the given list of items (files/directories) that have been created/modified since 'timestamp' + and write their names to 'output_file' + """ + for item in item_list: + if os.path.isdir(item): + find_modified_in_directory(item, timestamp, output_file) + else: + if is_recent_mtime(item, timestamp): + output_file.write(item + '\n') diff --git a/insights/client/apps/manifests.py b/insights/client/apps/manifests.py new file mode 100644 index 000000000..a221e590c --- /dev/null +++ b/insights/client/apps/manifests.py @@ -0,0 +1,78 @@ +""" +App manifests for use with the --collector APP option +Define the app manifest and add it to the manifests dict at the bottom of the file +""" + +malware_detection_manifest = """ +# Manifest file for malware detection app data collection +--- +# version is for the format of this file, not its contents. +version: 0 + +client: + context: + class: insights.core.context.HostContext + args: + timeout: 10 # timeout in seconds for commands. Doesn't apply to files. + + blacklist: + files: [] + commands: [] + patterns: [] + keywords: [] + + persist: + # determines what will appear in the archive + - name: insights.specs.datasources.malware_detection.MalwareDetectionSpecs + enabled: true + + run_strategy: + name: serial + args: + max_workers: null + +plugins: + # disable everything by default + # defaults to false if not specified. + default_component_enabled: false + packages: + # determines which packages are loaded. These will be namespaced to the relevant collector + - insights.specs.datasources.malware_detection + - insights.specs.default + configs: + # determines which specs get loaded + - name: insights.specs.datasources.malware_detection.MalwareDetectionSpecs + enabled: true + # Enable specs for collecting the system's canonical facts + - name: insights.specs.default.DefaultSpecs.mac_addresses + enabled: true + - name: insights.specs.Specs.mac_addresses + enabled: true + - name: insights.specs.default.DefaultSpecs.etc_machine_id + enabled: true + - name: insights.specs.Specs.etc_machine_id + enabled: true + - name: insights.specs.default.DefaultSpecs.hostname + enabled: true + - name: insights.specs.Specs.hostname + enabled: true + - name: insights.specs.default.DefaultSpecs.bios_uuid + enabled: true + - name: insights.specs.Specs.bios_uuid + enabled: true + - name: insights.specs.default.DefaultSpecs.machine_id + enabled: true + - name: insights.specs.Specs.machine_id + enabled: true + - name: insights.specs.default.DefaultSpecs.ip_addresses + enabled: true + - name: insights.specs.Specs.ip_addresses + enabled: true + - name: insights.specs.default.DefaultSpecs.subscription_manager_id + enabled: true + - name: insights.specs.Specs.subscription_manager_id + enabled: true +""".lstrip() + +manifests = {'malware-detection': malware_detection_manifest} +content_types = {'malware-detection': 'application/vnd.redhat.malware-detection.results+tgz'} diff --git a/insights/client/config.py b/insights/client/config.py index 92ff97136..ca2811c70 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -8,6 +8,7 @@ from six.moves import configparser as ConfigParser from distutils.version import LooseVersion from .utilities import get_version_info +from insights.client.apps.manifests import manifests, content_types try: from .constants import InsightsConstants as constants @@ -118,6 +119,14 @@ def _core_collect_default(): # non-CLI 'default': None }, + 'app': { + 'default': None, + 'opt': ['--collector'], + 'help': 'Run the specified app and upload its results archive', + 'action': 'store', + 'group': 'actions', + 'dest': 'app' + }, 'compliance': { 'default': False, 'opt': ['--compliance'], @@ -730,6 +739,9 @@ def _validate_options(self): sys.stdout.write('WARNING: SOSCleaner reports will be created alongside the output archive.\n') if self.module and not self.module.startswith('insights.client.apps.'): raise ValueError('You can only run modules within the namespace insights.client.apps.*') + if self.app and not self.manifest: + raise ValueError("Unable to find app: %s\nList of available apps: %s" + % (self.app, ', '.join(sorted(manifests.keys())))) def _imply_options(self): ''' @@ -764,6 +776,12 @@ def _imply_options(self): if self._print_errors: sys.stdout.write('The compressor {0} is not supported. Using default: gz\n'.format(self.compressor)) self.compressor = 'gz' + if self.app: + # Get the manifest for the specified app + self.manifest = manifests.get(self.app) + self.content_type = content_types.get(self.app) + self.core_collect = True + self.legacy_upload = False if self.output_dir: # get full path self.output_dir = os.path.abspath(self.output_dir) diff --git a/insights/client/connection.py b/insights/client/connection.py index 5b8467ad9..e3012bc94 100644 --- a/insights/client/connection.py +++ b/insights/client/connection.py @@ -871,6 +871,7 @@ def upload_archive(self, data_collected, content_type, duration=None): 'file': (file_name, open(data_collected, 'rb'), content_type), 'metadata': c_facts } + logger.debug('content-type: %s', content_type) logger.debug("Uploading %s to %s", data_collected, upload_url) upload = self.post(upload_url, files=files, headers={}) diff --git a/insights/client/core_collector.py b/insights/client/core_collector.py index 9d5b17dc3..dad04a284 100644 --- a/insights/client/core_collector.py +++ b/insights/client/core_collector.py @@ -49,7 +49,11 @@ def run_collection(self, conf, rm_conf, branch_info, blacklist_report): 'components': rm_conf.get('components', []) } - collected_data_path = collect.collect(tmp_path=self.archive.tmp_dir, rm_conf=core_blacklist, client_timeout=self.config.cmd_timeout) + manifest = collect.default_manifest + if hasattr(self.config, 'manifest') and self.config.manifest: + manifest = self.config.manifest + collected_data_path = collect.collect(manifest=manifest, tmp_path=self.archive.tmp_dir, rm_conf=core_blacklist, client_timeout=self.config.cmd_timeout) + # update the archive dir with the reported data location from Insights Core if not collected_data_path: raise RuntimeError('Error running collection: no output path defined.') diff --git a/insights/client/phase/v1.py b/insights/client/phase/v1.py index c22ceaf30..3227d06ea 100644 --- a/insights/client/phase/v1.py +++ b/insights/client/phase/v1.py @@ -315,7 +315,8 @@ def collect_and_output(client, config): except RuntimeError as e: logger.error(e) sys.exit(constants.sig_kill_bad) - config.content_type = 'application/vnd.redhat.advisor.collection+tgz' + if not config.content_type: + config.content_type = 'application/vnd.redhat.advisor.collection+tgz' if config.no_upload: # output options for which upload is not performed diff --git a/insights/specs/datasources/malware_detection.py b/insights/specs/datasources/malware_detection.py new file mode 100644 index 000000000..091d6dcac --- /dev/null +++ b/insights/specs/datasources/malware_detection.py @@ -0,0 +1,29 @@ +from insights.core.context import HostContext +from insights.core.dr import SkipComponent +from insights.specs import Specs +from insights.core.plugins import datasource +from insights.core.spec_factory import DatasourceProvider +from insights.client.apps.malware_detection import MalwareDetectionClient +from insights.client.config import InsightsConfig + + +class MalwareDetectionSpecs(Specs): + @datasource(HostContext) + def malware_detection_app(broker): + """ + Custom datasource to collects content for malware scanner if a scanner is present on the system + """ + + try: + # Only run malware-detection if it was passed as an option to insights-client + insights_config = InsightsConfig().load_all() + if not (insights_config and hasattr(insights_config, 'app') and insights_config.app == 'malware-detection'): + raise SkipComponent + mdc = MalwareDetectionClient(insights_config) + scan_results = mdc.run() + if scan_results: + return DatasourceProvider(content=scan_results, relative_path="malware-detection-results.json") + else: + raise SkipComponent + except Exception: + raise SkipComponent diff --git a/insights/tests/client/apps/test_malware_detection.py b/insights/tests/client/apps/test_malware_detection.py new file mode 100644 index 000000000..6ff968ce3 --- /dev/null +++ b/insights/tests/client/apps/test_malware_detection.py @@ -0,0 +1,1497 @@ +import os +import re +import sys +import pytest +import yaml +import time +import string +import random +import fileinput +from datetime import datetime +from mock.mock import patch, Mock, ANY +try: + from urllib import quote as urlencode # python 2 +except ImportError: + from urllib.parse import quote as urlencode # python 3 + +from insights.client.apps.manifests import manifests, content_types +from insights.client.constants import InsightsConstants as constants +from insights.util.subproc import call, CalledProcessError +from insights.client.config import InsightsConfig + +from insights.client.apps.malware_detection import ( + DEFAULT_MALWARE_CONFIG, MalwareDetectionClient, InsightsConnection, + get_toplevel_dirs, get_parent_dirs, remove_child_items, remove_included_excluded_items, + process_include_items, process_exclude_items, process_include_exclude_items +) + +# Temporary directory for testing stuff in +RANDOM_STRING = ''.join(random.choice(string.ascii_lowercase) for _ in range(5)) +TEMP_TEST_DIR = "/tmp/malware-detection_test_dir_%s" % RANDOM_STRING + +YARA = '/bin/yara' # Fake yara executable +RULES_FILE = os.path.join(TEMP_TEST_DIR, 'rules.yar') +TEST_RULE_FILE = os.path.join(TEMP_TEST_DIR, 'test-rule.yar') +CONFIG = yaml.safe_load(DEFAULT_MALWARE_CONFIG) # Config 'returned' from _load_config +TEMP_CONFIG_FILE = os.path.join(TEMP_TEST_DIR, 'malware-detection-config.yml') + +# Get the number of CPU threads to run yara +CPUS = 1 if int(call('nproc').strip()) <= 2 else 2 + +# Some of the toplevel directories that will be included/excluded by default when listing root (/) +TLDS = ['/boot', '/dev', '/etc', '/home', '/opt', '/proc', '/root', '/sys', '/tmp', '/usr', '/var'] +INCLUDED_TLDS = ['/boot', '/etc', '/home', '/opt', '/root', '/tmp', '/usr', '/var'] # after removing exclude items +DEFAULT_SCAN_EXCLUDE = ['/cgroup', '/dev', '/media', '/mnt', '/net', '/proc', '/selinux', '/sys'] + +# Tests using the caplog fixture fail with python 2.6 +IS_PY26 = sys.version_info < (2, 7) +PY26_SKIP_TEST_REASON = "pytest caplog fixture doesn't work in python 2.6" + +# Various patch targets +LOGGER_TARGET = "insights.client.apps.malware_detection.logger" +LOAD_CONFIG_TARGET = "insights.client.apps.malware_detection.MalwareDetectionClient._load_config" +FIND_YARA_TARGET = "insights.client.apps.malware_detection.MalwareDetectionClient._find_yara" +GET_RULES_TARGET = "insights.client.apps.malware_detection.MalwareDetectionClient._get_rules" +BUILD_YARA_COMMAND_TARGET = "insights.client.apps.malware_detection.MalwareDetectionClient._build_yara_command" + + +@pytest.fixture +def create_test_files(): + # Write the test files to the temp directory + if not os.path.exists(TEMP_TEST_DIR): + os.mkdir(TEMP_TEST_DIR) + with open(TEMP_CONFIG_FILE, 'w') as tcf: + tcf.write(DEFAULT_MALWARE_CONFIG) + test_files = [(MATCHING_ENTITY_FILE, MATCHING_ENTITY_FILE_CONTENTS), + (ANOTHER_MATCHING_ENTITY_FILE, ANOTHER_MATCHING_ENTITY_FILE_CONTENTS), + (TEST_RULE_FILE, TEST_RULE_FILE_CONTENTS)] + for test_file, contents in test_files: + if not os.path.exists(test_file): + with open(test_file, 'w') as f: + f.write(contents) + yield + os.system('rm -rf %s' % TEMP_TEST_DIR) + + +@pytest.fixture +def extract_tmp_files(): + if not os.path.exists(TEMP_TEST_DIR): + os.mkdir(TEMP_TEST_DIR) + os.system("echo '%s' | base64 -d - | tar -C %s -zxf -" % (SCAN_FILES_BASE64, TEMP_TEST_DIR)) + yield + os.system('rm -rf %s' % TEMP_TEST_DIR) + + +class TestDefaultValues: + def test_default_spec(self): + # Read in the default malware spec and check its values + manifest = yaml.safe_load(manifests['malware-detection']) + content_type = content_types['malware-detection'] + assert content_type == "application/vnd.redhat.malware-detection.results+tgz" + specs = manifest['plugins']['configs'] + for spec in ['mac_addresses', 'etc_machine_id', 'hostname', 'bios_uuid', 'machine_id', 'ip_addresses', + 'subscription_manager_id']: + assert {'enabled': True, 'name': 'insights.specs.default.DefaultSpecs.%s' % spec} in specs + assert {'enabled': True, 'name': 'insights.specs.Specs.%s' % spec} in specs + + def test_default_options(self): + # Read in the default malware_detection_config options and check their values + assert CONFIG['yara_binary'] is None + assert CONFIG['test_scan'] is True + assert CONFIG['scan_filesystem'] is True + assert CONFIG['scan_processes'] is False + assert CONFIG['scan_only'] is None + assert CONFIG['scan_since'] is None + assert all([x in CONFIG['scan_exclude'] + for x in ['/proc', '/sys', '/cgroup', '/selinux', '/net', '/mnt', '/media', '/dev']]) + assert CONFIG['exclude_network_filesystem_mountpoints'] is True + + @patch(BUILD_YARA_COMMAND_TARGET) + @patch(GET_RULES_TARGET, return_value=TEST_RULE_FILE) + @patch(FIND_YARA_TARGET, return_value=YARA) + @patch(LOGGER_TARGET) + def test_running_default_options(self, log_mock, yara, rules, cmd, create_test_files): + # Try running malware-detection with the default options + # With the default options, test_scan is true, so some of the option values will be changed for that and + # will be different from those in the default config file. + # For example, do_filesystem_scan AND do_process_scan are both True when doing a test scan + test_pid = str(os.getpid()) + # Use a real config file so scan_fsobjects will be populated properly + with patch("insights.client.apps.malware_detection.MALWARE_CONFIG_FILE", TEMP_CONFIG_FILE): + mdc = MalwareDetectionClient(None) + assert mdc.yara_binary == YARA + assert mdc.rules_file == TEST_RULE_FILE + assert mdc.do_filesystem_scan is True + assert mdc.do_process_scan is True + assert mdc.scan_fsobjects == [TEMP_CONFIG_FILE] + assert mdc.scan_pids == [test_pid] + assert mdc.scan_since_dict == {'timestamp': None} + assert mdc.scan_exclude_list == [] + assert mdc.network_filesystem_mountpoints == [] + assert mdc.scan_timeout == 3600 + assert mdc.nice_value == 19 + + with patch("insights.client.apps.malware_detection.call") as call_mock: + # Mock all the calls to 'call' to get the yara matches and the metadata about the matches for the test scan + # 1st call is yara output from scanning TEST_RULE_FILE, calls 2-6 are to get its metadata + # 7th call is yara output from scanning the current process and 8th is to get its metadata + call_mock.side_effect = ["TEST_RedHatInsightsMalwareDetection %s\n0x4a:$re1: Malware Detection Client" % TEST_RULE_FILE, + "ASCII text", "text/plain; charset=us-ascii", "d5b0aeb3e18df68f47287e14ef144489", + "2:74:Malware Detection Client", + "// Verifies the Red Hat Insights Malware Detection Client app is present on the system", + "TEST_RedHatInsightsMalwareDetection %s\n0x4a:$re1: Malware Detection Client" % test_pid, + "python insights_client/run.py --collector malware-detection"] + mutation = mdc.run() + log_mock.info.assert_any_call("Found %d rule match%s.", 2, "es") + assert 'ruleName: "TEST_RedHatInsightsMalwareDetection"' in mutation + assert 'source: "%s"' % TEST_RULE_FILE in mutation + assert 'source: "%s"' % test_pid in mutation + assert re.search('metadata:.*line_number', mutation) + assert re.search('metadata:.*process_name', mutation) + + +@patch(BUILD_YARA_COMMAND_TARGET) +@patch(GET_RULES_TARGET, return_value=RULES_FILE) +@patch(LOAD_CONFIG_TARGET, return_value=CONFIG) +class TestFindYara: + + @patch.dict(os.environ) + def test_find_yara_binary(self, conf, rules, cmd): + # Testing finding yara + os.environ['YARA_BINARY'] = '/bin/yara' + with patch('os.path.isfile', return_value=True): + with patch("insights.client.apps.malware_detection.call", return_value='4.1'): + mdc = MalwareDetectionClient(None) + assert mdc.yara_binary == '/bin/yara' + cmd.assert_called() + + @patch.dict(os.environ) + def test_missing_yara_binary(self, conf, rules, cmd): + # Test yara_binary option with non-existent file + os.environ['YARA_BINARY'] = '/bin/notyara' + with pytest.raises(SystemExit): + MalwareDetectionClient(None) + cmd.assert_not_called() + + # Test yara_binary option with non-yara file + os.environ['YARA_BINARY'] = '/bin/ls' + with pytest.raises(SystemExit): + MalwareDetectionClient(None) + cmd.assert_not_called() + + @patch("insights.client.apps.malware_detection.call") # mock call to 'yara --version' + def test_invalid_yara_versions(self, version_mock, conf, rules, cmd): + # Test checking the version of yara + # Invalid versions of yara + for version in ['4.0.99', '4']: + version_mock.return_value = version + with pytest.raises(SystemExit): + MalwareDetectionClient(None) + cmd.assert_not_called() # We won't get to the build_yara_cmd method because we exit before its called + + # Valid versions of yara + for version in ['4.1', '10.0.0']: + version_mock.return_value = version + mdc = MalwareDetectionClient(None) + assert mdc.yara_binary + cmd.assert_called() + + +# Use patch.object, just because I wanted to try using patch.object instead of using patch all the time :shrug: +@patch.object(InsightsConnection, 'get', return_value=Mock(status_code=200, content=b"Rule Content")) +@patch.object(InsightsConnection, 'get_proxies') +@patch.object(InsightsConnection, '_init_session', return_value=Mock()) +@patch.object(MalwareDetectionClient, '_build_yara_command') +@patch.object(MalwareDetectionClient, '_find_yara', return_value=YARA) +@patch.object(MalwareDetectionClient, '_load_config', return_value=CONFIG) +class TestGetRules: + """ Testing the _get_rules method """ + + @patch.dict(os.environ, {'TEST_SCAN': 'true'}) + def test_download_rules_cert_auth(self, conf, yara, cmd, session, proxies, get): + # Test the standard rules_location urls, but will result in cert auth being used to download the rules + # Test with insights-config None, expect an error when trying to use the insights-config object + with pytest.raises(SystemExit): + MalwareDetectionClient(None) + session.assert_not_called() + + # With default insights config and test scan true ... + # Expect to use cert auth because no username or password specified and expect to download test-rule.yar + mdc = MalwareDetectionClient(InsightsConfig()) + assert mdc.rules_location == "https://cert.console.redhat.com/api/malware-detection/v1/test-rule.yar" + assert mdc.rules_file.startswith('/tmp') # rules will be saved into a temp file + get.assert_called_with("https://cert.console.redhat.com/api/malware-detection/v1/test-rule.yar") + + # With authmethod=CERT, expect 'cert.' to be prefixed to the url + mdc = MalwareDetectionClient(InsightsConfig(authmethod='CERT')) + assert mdc.rules_location == "https://cert.console.redhat.com/api/malware-detection/v1/test-rule.yar" + get.assert_called_with("https://cert.console.redhat.com/api/malware-detection/v1/test-rule.yar") + + # With authmethod=BASIC and test scan false ... + # Expect to still use cert auth because no username or password specified + os.environ['TEST_SCAN'] = 'false' + mdc = MalwareDetectionClient(InsightsConfig(authmethod='BASIC')) + assert mdc.rules_location == "https://cert.console.redhat.com/api/malware-detection/v1/signatures.yar" + get.assert_called_with("https://cert.console.redhat.com/api/malware-detection/v1/signatures.yar") + + mdc = MalwareDetectionClient(InsightsConfig(authmethod='CERT')) + assert mdc.rules_location == "https://cert.console.redhat.com/api/malware-detection/v1/signatures.yar" + get.assert_called_with("https://cert.console.redhat.com/api/malware-detection/v1/signatures.yar") + + @patch.dict(os.environ, {'TEST_SCAN': 'true'}) + @patch(LOGGER_TARGET) + def test_download_rules_basic_auth(self, log_mock, conf, yara, cmd, session, proxies, get): + # Test the standard rules_location urls, with basic auth attempting to be used to download the rules + # Basic auth is used by default, but needs to have a valid username and password for it to work + # Without a username and password, then cert auth will be used + + # Test with just a username specified - expect basic auth to be used but fails + get.return_value = Mock(status_code=401, reason="Unauthorized", text="No can do") + with pytest.raises(SystemExit): + MalwareDetectionClient(InsightsConfig(username='user')) + get.assert_called_with("https://console.redhat.com/api/malware-detection/v1/test-rule.yar") + log_mock.error.assert_called_with("%s %s: %s", 401, "Unauthorized", ANY) + + # Test with just a password specified - expect basic auth to be used but fails + with pytest.raises(SystemExit): + MalwareDetectionClient(InsightsConfig(password='pass')) + get.assert_called_with("https://console.redhat.com/api/malware-detection/v1/test-rule.yar") + log_mock.error.assert_called_with("%s %s: %s", 401, "Unauthorized", ANY) + + # Test with 'incorrect' username and/or password - expect basic auth failure + with pytest.raises(SystemExit): + MalwareDetectionClient(InsightsConfig(username='user', password='badpass')) + get.assert_called_with("https://console.redhat.com/api/malware-detection/v1/test-rule.yar") + log_mock.error.assert_called_with("%s %s: %s", 401, "Unauthorized", ANY) + + # Test with 'correct' username and password - expect basic auth success + get.return_value = Mock(status_code=200, content=b"Rule Content") + mdc = MalwareDetectionClient(InsightsConfig(username='user', password='goodpass')) + assert mdc.rules_location == "https://console.redhat.com/api/malware-detection/v1/test-rule.yar" + get.assert_called_with("https://console.redhat.com/api/malware-detection/v1/test-rule.yar") + + @patch.dict(os.environ, {'TEST_SCAN': 'true', 'RULES_LOCATION': 'console.redhat.com/rules.yar'}) + def test_get_rules_missing_protocol(self, conf, yara, cmd, session, proxies, get): + # Non-standard rules URLS - without https:// at the start and not signatures.yar + # test-scan true and BASIC auth by default expect test-rule.yar and no 'cert.' in URL + mdc = MalwareDetectionClient(InsightsConfig(username='user', password='pass')) + assert mdc.rules_location == "https://console.redhat.com/test-rule.yar" + get.assert_called_with("https://console.redhat.com/test-rule.yar") + + # test-scan false and CERT auth - expect 'cert.' prefixed to the URL and not test-rule.yar + os.environ['TEST_SCAN'] = 'false' + mdc = MalwareDetectionClient(InsightsConfig(authmethod='CERT')) + assert mdc.rules_location == "https://cert.console.redhat.com/rules.yar" + get.assert_called_with("https://cert.console.redhat.com/rules.yar") + + @patch.dict(os.environ, {'TEST_SCAN': 'false', 'RULES_LOCATION': 'http://localhost/rules.yar'}) + @patch(LOGGER_TARGET) + def test_download_failures(self, log_mock, conf, yara, cmd, session, proxies, get): + from requests.exceptions import ConnectionError, Timeout + # Test various problems downloading rules + # 404 error - unlikely to occur unless an incorrect rules_location was manually specified + get.return_value = Mock(status_code=404, reason="Not found", text="Nup") + with pytest.raises(SystemExit): + MalwareDetectionClient(InsightsConfig()) + log_mock.error.assert_called_with("%s %s: %s", 404, "Not found", "Nup") + assert get.call_count == 1 + + # Test other errors downloading rules from the backend - these are more likely to occur + get.side_effect = [ConnectionError("Couldn't connect"), Timeout("Timeout")] + with pytest.raises(SystemExit): + MalwareDetectionClient(InsightsConfig(username='user', password='pass')) + log_mock.error.assert_called_with("Unable to download rules from %s: %s", + os.environ['RULES_LOCATION'], "Couldn't connect") + assert get.call_count == 2 + + with pytest.raises(SystemExit): + MalwareDetectionClient(InsightsConfig()) + log_mock.error.assert_called_with("Unable to download rules from %s: %s", + 'http://cert.localhost/rules.yar', "Timeout") + assert get.call_count == 3 + + @patch.dict(os.environ, {'TEST_SCAN': 'true', 'RULES_LOCATION': '//console.redhat.com/rules.yar'}) + @patch("os.path.isfile", return_value=True) + def test_get_rules_location_files(self, isfile, conf, yara, cmd, session, proxies, get): + # Test using files for rules_location, esp irregular file names + # rules_location that starts with a '/' is assumed to be a file, even if its a double '//' + # Re-writing the rule to be test-rule.yar doesn't apply to local files + mdc = MalwareDetectionClient(None) + assert mdc.rules_location == "//console.redhat.com/rules.yar" + assert mdc.rules_file == "/console.redhat.com/rules.yar" + get.assert_not_called() + + # Just to confirm the filename stays the same for regardless of test_rule value + os.environ['TEST_SCAN'] = 'false' + mdc = MalwareDetectionClient(None) + assert mdc.rules_location == "//console.redhat.com/rules.yar" + assert mdc.rules_file == "/console.redhat.com/rules.yar" + get.assert_not_called() + + +@patch(GET_RULES_TARGET, return_value=RULES_FILE) +@patch(FIND_YARA_TARGET, return_value=YARA) +@patch(LOAD_CONFIG_TARGET, return_value=CONFIG) +class TestBuildYaraCmd: + + @patch('os.path.getsize') + def test_build_yara_command_success(self, size, conf, yara, rules): + expected_yara_cmd = "nice -n 19 {0} -s -N -a 3600 -p 1 -r -f%s {1}".format(YARA, RULES_FILE) + size.return_value = 1 + # Use side_effect with 3 'call' values because build_yara_command calls 'call' 3 times ... + # 1 to get the type of the rules file; 2 to see if the rules files contains valid rules; 3 to call nproc + # Test with text rules file - file type is 'ascii' + with patch("insights.client.apps.malware_detection.call", side_effect=['ascii', 'ok', '2']) as call_mock: + mdc = MalwareDetectionClient(None) + assert call_mock.call_count == 3 + assert ' '.join(mdc.yara_cmd) == expected_yara_cmd % '' + + # Test with 'compiled' rules file - file type is 'Yara 3.x' + with patch("insights.client.apps.malware_detection.call", side_effect=['Yara 3.X', 'ok', '2']) as call_mock: + mdc = MalwareDetectionClient(None) + assert call_mock.call_count == 3 + assert ' '.join(mdc.yara_cmd) == expected_yara_cmd % ' -C' + + # Another test with compiled rules file - file type is 'data' + with patch("insights.client.apps.malware_detection.call", side_effect=['data', 'ok', '2']) as call_mock: + mdc = MalwareDetectionClient(None) + assert call_mock.call_count == 3 + assert ' '.join(mdc.yara_cmd) == expected_yara_cmd % ' -C' + + @patch(LOGGER_TARGET) + @patch('os.path.getsize') + def test_build_yara_command_fail(self, size_mock, log_mock, conf, yara, rules): + # Test with empty rules file, ie file size is 0 + size_mock.return_value = 0 + with patch("insights.client.apps.malware_detection.call", side_effect=['wtf?', 'yikes', '2']) as call_mock: + with pytest.raises(SystemExit): + MalwareDetectionClient(None) + call_mock.assert_called_once() # Only 1 call to 'call' before we exit + log_mock.error.assert_called_with("Rules file %s is empty", RULES_FILE) + + # Test with empty rules files, ie the file type is 'empty' + size_mock.return_value = 1 + with patch("insights.client.apps.malware_detection.call", side_effect=['empty', 'yikes', '2']) as call_mock: + with pytest.raises(SystemExit): + MalwareDetectionClient(None) + call_mock.assert_called_once() # Only 1 call to 'call' before we exit + log_mock.error.assert_called_with("Rules file %s is empty", RULES_FILE) + + # Test with 'invalid' rules file - raise CalledProcessError when running command + with patch("insights.client.apps.malware_detection.call") as call_mock: + call_mock.side_effect = ['yara', CalledProcessError(1, 'cmd', 'invalid'), '2'] + with pytest.raises(SystemExit): + MalwareDetectionClient(None) + assert call_mock.call_count == 2 # 2 calls to 'call' before we exit + log_mock.error.assert_called_with("Unable to use rules file %s: %s", RULES_FILE, "invalid") + + +@patch(BUILD_YARA_COMMAND_TARGET) +@patch(GET_RULES_TARGET, return_value=RULES_FILE) +@patch(FIND_YARA_TARGET, return_value=YARA) +@patch.dict(os.environ, {'TEST_SCAN': 'false'}) +class TestMalwareDetectionOptions: + + @patch(LOAD_CONFIG_TARGET, return_value=CONFIG) + def test_running_modified_options(self, conf, yara, rules, cmd): + # Disable test_scan and the mdc attribute values should mostly match what's in the config file + mdc = MalwareDetectionClient(None) + assert mdc.rules_file == RULES_FILE + assert mdc.yara_binary == YARA + assert mdc.do_filesystem_scan is True + assert mdc.do_process_scan is False + assert mdc.scan_fsobjects == [] + assert mdc.scan_pids == [] + assert mdc.scan_since_dict == {'timestamp': None, 'datetime': None} + assert all([d in mdc.scan_exclude_list for d in ['/proc', '/sys', '/mnt', '/media', '/dev']]) + + @patch(LOAD_CONFIG_TARGET, return_value=CONFIG) + def test_scan_only_option(self, conf, yara, rules, cmd): + # Test various combinations of scan_only and the scan_filesystem and scan_processes options + # Firstly, test the default option values + mdc = MalwareDetectionClient(None) + assert mdc.do_filesystem_scan is True + assert mdc.do_process_scan is False + assert mdc.scan_fsobjects == [] + assert mdc.scan_pids == [] + + # Add scan_only for a process - expect to exit because we can't scan processes because do_process_scan is false + os.environ['SCAN_ONLY'] = '1' + with pytest.raises(SystemExit): + MalwareDetectionClient(None) + + # Enable process scanning and now the scan_only value should be used + os.environ['SCAN_PROCESSES'] = 'true' + mdc = MalwareDetectionClient(None) + assert mdc.scan_fsobjects == [] + assert mdc.scan_pids == ['1'] + + # Add directories and processes and expect all to be scanned + os.environ['SCAN_ONLY'] = '1,/tmp' + mdc = MalwareDetectionClient(None) + assert mdc.scan_fsobjects == ['/tmp'] + assert mdc.scan_pids == ['1'] + + # Disable filesystem scanning and only expect the process to be scanned + os.environ['SCAN_FILESYSTEM'] = 'FALSE' + mdc = MalwareDetectionClient(None) + assert mdc.scan_fsobjects == [] + assert mdc.scan_pids == ['1'] + + # Disable both filesystem and process scanning and expect an error as there is nothing to scan + os.environ['SCAN_PROCESSES'] = 'FALSE' + with pytest.raises(SystemExit): + MalwareDetectionClient(None) + + @patch(LOGGER_TARGET) + def test_invalid_config_values(self, log_mock, yara, rules, cmd, create_test_files): + # Check the malware client app behaves in a predictable way if the user specifies invalid option values + # in the config file. Some of these will fail yaml parsing, others will fail type checking + + # Invalid value for nice - fails casting to an integer + for line in fileinput.FileInput(TEMP_CONFIG_FILE, inplace=1): + line = "nice_value: nineteen" if line.startswith("nice_value") else line + print(line) + with patch("insights.client.apps.malware_detection.MALWARE_CONFIG_FILE", TEMP_CONFIG_FILE): + with pytest.raises(SystemExit): + MalwareDetectionClient(None) + log_mock.error.assert_called_with("Problem setting configuration option %s: %s", "nice_value", ANY) + yara.assert_called_once() # It failed after the _find_yara method + + # Missing colon for nice_value option - fails yaml parsing + for line in fileinput.FileInput(TEMP_CONFIG_FILE, inplace=1): + line = "test_scan: false" if line.startswith("test_scan:") else line + line = "nice_value 19\n" if line.startswith("nice_value") else line + print(line) + with patch("insights.client.apps.malware_detection.MALWARE_CONFIG_FILE", TEMP_CONFIG_FILE): + with pytest.raises(SystemExit): + MalwareDetectionClient(None) + log_mock.error.assert_called_with("Error encountered loading the malware-detection app config file %s:\n%s", TEMP_CONFIG_FILE, ANY) + yara.assert_called_once() # It failed before the _find_yara method because it was invalid yaml + + # Bad list items for scan_only, mixing single item and list items - fails yaml parsing + for line in fileinput.FileInput(TEMP_CONFIG_FILE, inplace=1): + line = "nice_value: 19\n" if line.startswith("nice_value") else line + line = "scan_only: /bad\n- /bad" if line.startswith("scan_only:") else line + print(line) + with patch("insights.client.apps.malware_detection.MALWARE_CONFIG_FILE", TEMP_CONFIG_FILE): + with pytest.raises(SystemExit): + MalwareDetectionClient(None) + log_mock.error.assert_called_with("Error encountered loading the malware-detection app config file %s:\n%s", TEMP_CONFIG_FILE, ANY) + yara.assert_called_once() # It failed before the _find_yara method because it was invalid yaml + + # Bad list items for scan_only, not a list item - fails yaml parsing + for line in fileinput.FileInput(TEMP_CONFIG_FILE, inplace=1): + line = "scan_only:" if line.startswith("scan_only:") else line + line = "/bad" if line.startswith("- /bad") else line + print(line) + with patch("insights.client.apps.malware_detection.MALWARE_CONFIG_FILE", TEMP_CONFIG_FILE): + with pytest.raises(SystemExit): + MalwareDetectionClient(None) + log_mock.error.assert_called_with("Error encountered loading the malware-detection app config file %s:\n%s", TEMP_CONFIG_FILE, ANY) + yara.assert_called_once() # It failed before the _find_yara method because it was invalid yaml + + # Bad list items for scan_only, not enough spaces - fails yaml parsing + for line in fileinput.FileInput(TEMP_CONFIG_FILE, inplace=1): + line = "-/bad" if line.startswith("/bad") else line + print(line) + with patch("insights.client.apps.malware_detection.MALWARE_CONFIG_FILE", TEMP_CONFIG_FILE): + with pytest.raises(SystemExit): + MalwareDetectionClient(None) + log_mock.error.assert_called_with("Error encountered loading the malware-detection app config file %s:\n%s", TEMP_CONFIG_FILE, ANY) + yara.assert_called_once() # It failed before the _find_yara method because it was invalid yaml + + # Bad list items for scan_only, using tabs instead of spaces - fails yaml parsing + for line in fileinput.FileInput(TEMP_CONFIG_FILE, inplace=1): + line = "\t- /bad" if line.startswith("-/bad") else line + print(line) + with patch("insights.client.apps.malware_detection.MALWARE_CONFIG_FILE", TEMP_CONFIG_FILE): + with pytest.raises(SystemExit): + MalwareDetectionClient(None) + log_mock.error.assert_called_with("Error encountered loading the malware-detection app config file %s:\n%s", TEMP_CONFIG_FILE, ANY) + yara.assert_called_once() # It failed before the _find_yara method because it was invalid yaml + + # Patch the os.environ dict so all the changes are only temporary + @patch(LOAD_CONFIG_TARGET, return_value=CONFIG) + @patch.dict(os.environ) + def test_using_env_vars(self, conf, yara, rules, cmd): + # Set certain option values via environment variables + env_var_list = [('RULES_LOCATION', RULES_FILE), ('TEST_SCAN', 'false'), + ('SCAN_FILESYSTEM', 'YES'), ('SCAN_PROCESSES', 'hello'), # will be interpreted as false + ('SCAN_ONLY', '/tmp'), ('SCAN_EXCLUDE', '/tmp'), + ('SCAN_SINCE', '2'), ('SCAN_TIMEOUT', '1800'), ('CPU_THREAD_LIMIT', '1')] + for key, value in env_var_list: + os.environ[key] = value + + mdc = MalwareDetectionClient(None) + assert mdc.yara_binary == YARA + assert mdc.rules_file == RULES_FILE + assert mdc.test_scan is False + assert mdc.do_filesystem_scan is True + assert mdc.do_process_scan is False + assert mdc.scan_fsobjects == ['/tmp'] + assert mdc.scan_exclude_list == ['/tmp'] + assert mdc.scan_since_dict['timestamp'] < time.time() - (2 * 86400) + assert mdc.scan_timeout == 1800 + # Not env vars, but just checking they have the expected values + assert mdc.nice_value == 19 + assert mdc.cpu_thread_limit == 1 + + # Start a filesystem scan and expect scan_only and scan_exclude to cancel each other out + with pytest.raises(SystemExit) as exc_info: + mdc.scan_filesystem() + assert exc_info.value.code == constants.sig_kill_bad + + # Test when SCAN_ONLY and SCAN_EXCLUDE values are comma separated + for key, value in [('SCAN_ONLY', '/tmp,/,/var/tmp'), + ('SCAN_EXCLUDE', '/home,/,/fred,barney')]: + os.environ[key] = value + mdc = MalwareDetectionClient(None) + assert mdc.scan_fsobjects == ['/tmp', '/', '/var/tmp'] + assert mdc.scan_exclude_list == ['/home', '/'] + assert mdc.test_scan is False + + # Test when SCAN_ONLY is empty + os.environ['SCAN_ONLY'] = '' + mdc = MalwareDetectionClient(None) + assert mdc.scan_fsobjects == [] + assert mdc.scan_exclude_list == ['/home', '/'] + + # Test when SCAN_EXCLUDE is empty + os.environ['SCAN_EXCLUDE'] = '' + mdc = MalwareDetectionClient(None) + assert mdc.scan_fsobjects == [] + assert mdc.scan_exclude_list == [] + + # Further testing of list type env vars + os.environ['NETWORK_FILESYSTEM_TYPES'] = '' + assert mdc._get_config_option('network_filesystem_types') == [] + os.environ['NETWORK_FILESYSTEM_TYPES'] = 'nfs' + assert mdc._get_config_option('network_filesystem_types') == ['nfs'] + os.environ['NETWORK_FILESYSTEM_TYPES'] = 'nfs,nfs4' + assert mdc._get_config_option('network_filesystem_types') == ['nfs', 'nfs4'] + + @patch(LOAD_CONFIG_TARGET, return_value=CONFIG) + @patch(LOGGER_TARGET) + @patch.dict(os.environ, {'TEST_SCAN': 'false', 'NICE_VALUE': 'nineteen', 'SCAN_SINCE': 'blast'}) + def test_invalid_env_vars(self, log_mock, conf, yara, rules, cmd): + # NICE_VALUE and SCAN_SINCE have invalid values + # First time through the NICE_VALUE should generate an error + with pytest.raises(SystemExit): + MalwareDetectionClient(None) + log_mock.error.assert_called_with("Problem parsing environment variable %s: %s", "NICE_VALUE", ANY) + + # Set NICE_VALUE to proper value to avoid it giving an error this time. + # Only SCAN_SINCE should generate an error + os.environ['NICE_VALUE'] = '19' + with pytest.raises(SystemExit): + MalwareDetectionClient(None) + log_mock.error.assert_called_with("Unknown value '%s' for scan_since option. " + "Valid values are integers >= 1 and 'last'", "blast") + + def test_scan_only_root(self, yara, rules, cmd, create_test_files): + # Nothing special about root when parsing the scan_only option + # There is no parsing of root to individual toplevel directories until running scan_filesystem + scan_only = '/' + for line in fileinput.FileInput(TEMP_CONFIG_FILE, inplace=1): + line = "test_scan: false" if line.startswith("test_scan:") else line + line = "scan_only: %s" % scan_only if line.startswith("scan_only:") else line + print(line) + with patch("insights.client.apps.malware_detection.MALWARE_CONFIG_FILE", TEMP_CONFIG_FILE): + mdc = MalwareDetectionClient(None) + assert mdc.scan_fsobjects == [scan_only] + # This is called by scan_filesystem to convert '/' into its top level subdirectories + scan_dict = process_include_exclude_items(include_items=mdc.scan_fsobjects, + exclude_items=mdc.scan_exclude_list) + assert all([x in list(scan_dict.keys()) for x in INCLUDED_TLDS]) + assert '/' not in list(scan_dict.keys()) + + # Multiple directories aren't consolidated until later + scan_only = ['/', '/tmp', '/home'] + for line in fileinput.FileInput(TEMP_CONFIG_FILE, inplace=1): + line = "scan_only: %s" % scan_only if line.startswith("scan_only:") else line + print(line) + with patch("insights.client.apps.malware_detection.MALWARE_CONFIG_FILE", TEMP_CONFIG_FILE): + mdc = MalwareDetectionClient(None) + assert mdc.scan_fsobjects == scan_only + scan_dict = process_include_exclude_items(include_items=mdc.scan_fsobjects, + exclude_items=mdc.scan_exclude_list) + assert all([x in list(scan_dict.keys()) for x in INCLUDED_TLDS]) + assert '/' not in list(scan_dict.keys()) + + def test_scan_exclude_root(self, yara, rules, cmd, create_test_files): + # Nothing special about root when parsing the scan_exclude option + # There is no parsing of root to individual toplevel directories until running scan_filesystem + # Add '/' to the list of scan_exclude items. Add it directly after the scan_exclude: line + for line in fileinput.FileInput(TEMP_CONFIG_FILE, inplace=1): + line = "test_scan: false" if line.startswith("test_scan:") else line + line = line + "\n- /" if line.startswith("scan_exclude:") else line + print(line) + with patch("insights.client.apps.malware_detection.MALWARE_CONFIG_FILE", TEMP_CONFIG_FILE): + mdc = MalwareDetectionClient(None) + assert '/' in mdc.scan_exclude_list + # When scan_filesystem is run, '/' will be expanded into toplevel directories that cancel out everything + with pytest.raises(SystemExit) as exc_info: + process_include_exclude_items(include_items=mdc.scan_fsobjects, + exclude_items=mdc.scan_exclude_list) + assert exc_info.value.code == constants.sig_kill_bad + + def test_scan_only_scan_exclude_nullify(self, yara, rules, cmd, create_test_files): + # Testing scan_only and scan_exclude items such that the exclude items nullify all the scan_only items + # In which case there will be nothing to scan + for line in fileinput.FileInput(TEMP_CONFIG_FILE, inplace=1): + line = "test_scan: false" if line.startswith("test_scan:") else line + line = line + "\n- /var/log\n- /usr/lib/systemd\n- /tmp" if line.startswith("scan_only:") else line + line = line + "\n- /tmp/\n- /usr/lib/\n- /var/log" if line.startswith("scan_exclude:") else line + print(line) + with patch("insights.client.apps.malware_detection.MALWARE_CONFIG_FILE", TEMP_CONFIG_FILE): + mdc = MalwareDetectionClient(None) + assert mdc.scan_fsobjects == ['/var/log', '/usr/lib/systemd', '/tmp'] + assert all([x in mdc.scan_exclude_list for x in ['/tmp', '/usr/lib', '/var/log']]) + # The exclude list covers all the items to be scanned, thus there is nothing to scan + with pytest.raises(SystemExit) as exc_info: + mdc.scan_filesystem() + assert exc_info.value.code == constants.sig_kill_bad + + # Both scan_only and scan_exclude contain root + for line in fileinput.FileInput(TEMP_CONFIG_FILE, inplace=1): + line = line + "\n- /" if line.startswith("scan_only:") else line + line = line + "\n- /" if line.startswith("scan_exclude:") else line + print(line) + with patch("insights.client.apps.malware_detection.MALWARE_CONFIG_FILE", TEMP_CONFIG_FILE): + mdc = MalwareDetectionClient(None) + assert mdc.scan_fsobjects == ['/', '/var/log', '/usr/lib/systemd', '/tmp'] + assert all([x in mdc.scan_exclude_list for x in ['/', '/tmp', '/usr/lib', '/var/log']]) + # Because both lists contain /, they will cancel each other out and there is nothing to scan + with pytest.raises(SystemExit) as exc_info: + mdc.scan_filesystem() + assert exc_info.value.code == constants.sig_kill_bad + + @patch("insights.client.apps.malware_detection.call") + @patch(LOGGER_TARGET) + def test_network_filesystem_mountpoints(self, log_mock, call_mock, yara, rules, cmd, extract_tmp_files, create_test_files): + # Test the exclude_network_filesystem_mountpoints option by 'creating' various mountpoints to exclude + scan_me_scan_me = os.path.join(TEMP_TEST_DIR, 'scan_me/scan_me') + scan_me_too = os.path.join(TEMP_TEST_DIR, 'scan_me_too') + scan_me_not_mnt = os.path.join(TEMP_TEST_DIR, 'scan_me_not') + dont_scan_me_mnt = os.path.join(TEMP_TEST_DIR, 'scan_me/dont_scan_me') + + for line in fileinput.FileInput(TEMP_CONFIG_FILE, inplace=1): + line = "test_scan: false" if line.startswith("test_scan:") else line + line = line + "rules_location: %s\n" % TEST_RULE_FILE if line.startswith('---') else line + line = "scan_only: %s" % TEMP_TEST_DIR if line.startswith("scan_only:") else line + print(line) + + # This is the mocked output returned from the findmnt command + call_mock.return_value = '%s\n%s\n' % (scan_me_not_mnt, dont_scan_me_mnt) + + # Setting exclude_network_filesystem_mountpoints to false means we don't care about excluding mountpoints + os.environ['EXCLUDE_NETWORK_FILESYSTEM_MOUNTPOINTS'] = 'false' + os.environ['NETWORK_FILESYSTEM_TYPES'] = '' + with patch("insights.client.apps.malware_detection.MALWARE_CONFIG_FILE", TEMP_CONFIG_FILE): + mdc = MalwareDetectionClient(None) + assert mdc.network_filesystem_mountpoints == [] + + # Removing the env var (it'll be true from the config file) but still not having any + # network_filesystem_types value will generate an error + del os.environ['EXCLUDE_NETWORK_FILESYSTEM_MOUNTPOINTS'] + with patch("insights.client.apps.malware_detection.MALWARE_CONFIG_FILE", TEMP_CONFIG_FILE): + with pytest.raises(SystemExit): + mdc = MalwareDetectionClient(None) + log_mock.error.assert_called_with("No value specified for 'network_filesystem_types' option") + + # Ok, now with exclude mountpoints true and a value for types we will produce a list of mountpoints + os.environ['NETWORK_FILESYSTEM_TYPES'] = 'nfs' + with patch("insights.client.apps.malware_detection.MALWARE_CONFIG_FILE", TEMP_CONFIG_FILE): + mdc = MalwareDetectionClient(None) + assert mdc.network_filesystem_mountpoints == [scan_me_not_mnt, dont_scan_me_mnt] + + # Now we can process all the include and exclude items to build the scan_dict of things to scan + scan_dict = process_include_exclude_items(include_items=mdc.scan_fsobjects, + exclude_items=mdc.scan_exclude_list, + exclude_mountpoints=mdc.network_filesystem_mountpoints) + + # The exclude_mountpoints will be added to the list of items to exclude + assert list(scan_dict.keys()) == ['/tmp'] + assert sorted(list(scan_dict['/tmp']['exclude']['items'])) == sorted([scan_me_not_mnt, dont_scan_me_mnt]) + assert all([x in scan_dict['/tmp']['include'] for x in [scan_me_scan_me, scan_me_too]]) + # scan_me dir won't be in the list of include items because it has a sub-item to be excluded + assert os.path.join(TEMP_TEST_DIR, 'scan_me') not in scan_dict['/tmp']['include'] + + # Now make TEMP_TEST_DIR a mountpoint and it will cancel out TEMP_TEST_DIR for scan_only + call_mock.return_value = '%s\n' % TEMP_TEST_DIR + with patch("insights.client.apps.malware_detection.MALWARE_CONFIG_FILE", TEMP_CONFIG_FILE): + mdc = MalwareDetectionClient(None) + assert mdc.network_filesystem_mountpoints == [TEMP_TEST_DIR] + with pytest.raises(SystemExit): + process_include_exclude_items(include_items=mdc.scan_fsobjects, + exclude_items=mdc.scan_exclude_list, + exclude_mountpoints=mdc.network_filesystem_mountpoints) + log_mock.error.assert_called_with("No items to scan because the specified exclude items cancel them out") + + +@patch(BUILD_YARA_COMMAND_TARGET) +@patch(FIND_YARA_TARGET, return_value=YARA) +@patch(LOGGER_TARGET) +class TestScanning: + + def test_scan_rules_file_with_extra_slashes(self, log_mock, yara, cmd, create_test_files): + # Test scanning RULES_FILE with an extra slash only in the rules_location one + # Even with the extra slashes in the rules_location there will be rules matched + # because */rules_compiled.yar and *//rules_compiled.yar are the same file + for line in fileinput.FileInput(TEMP_CONFIG_FILE, inplace=1): + line = "test_scan: false" if line.startswith("test_scan:") else line + line = line + "rules_location: %s\n" % TEST_RULE_FILE.replace('/', '//') if line.startswith('---') else line + line = "scan_only: %s" % TEST_RULE_FILE if line.startswith("scan_only:") else line + line = "add_metadata: false" if line.startswith("add_metadata:") else line + print(line) + with patch("insights.client.apps.malware_detection.MALWARE_CONFIG_FILE", TEMP_CONFIG_FILE): + mdc = MalwareDetectionClient(None) + assert mdc.rules_location == TEST_RULE_FILE.replace('/', '//') + assert mdc.rules_file == TEST_RULE_FILE + assert mdc.scan_fsobjects == [TEST_RULE_FILE] + with patch("insights.client.apps.malware_detection.call") as call_mock: + # Mock the scan match data from yara + call_mock.return_value = "TEST_RedHatInsightsMalwareDetection %s\n0x4a:$re1: Malware Detection Client" % TEST_RULE_FILE + mdc.scan_filesystem() + rule_match = mdc.host_scan['TEST_RedHatInsightsMalwareDetection'] + assert rule_match[0]['source'] == TEST_RULE_FILE + assert rule_match[0]['string_data'] == "Malware Detection Client" + assert rule_match[0]['string_identifier'] == '$re1' + assert rule_match[0]['string_offset'] == 74 + log_mock.info.assert_any_call("Matched rule %s in %s %s", "TEST_RedHatInsightsMalwareDetection", "file", TEST_RULE_FILE) + + def test_scan_root_with_extra_slashes(self, log_mock, yara, cmd, create_test_files): + # Testing we handle the situation where items in scan_only & scan_exclude contain multiple slashes + for line in fileinput.FileInput(TEMP_CONFIG_FILE, inplace=1): + line = "test_scan: false" if line.startswith("test_scan:") else line + line = line + "rules_location: %s\n" % TEST_RULE_FILE if line.startswith('---') else line + line = line + "- //\n" if line.startswith("scan_only:") else line + line = line + "- //\n" if line.startswith("scan_exclude:") else line + print(line) + with patch("insights.client.apps.malware_detection.MALWARE_CONFIG_FILE", TEMP_CONFIG_FILE): + mdc = MalwareDetectionClient(None) + assert mdc.scan_fsobjects == ['/'] + assert '/' in mdc.scan_exclude_list + # Chaos monkey - modify scan_fsobjects and scan_exclude_list AFTER they have been verified + # Assert that they still work and cancel each other out + mdc.scan_fsobjects = ['//'] + mdc.scan_exclude_list = ['//'] + with pytest.raises(SystemExit) as exc_info: + mdc.scan_filesystem() + log_mock.error.assert_called_with("No items to scan because the specified exclude items cancel them out") + assert exc_info.value.code == constants.sig_kill_bad + + @patch('insights.client.apps.malware_detection.NamedTemporaryFile') + @patch("insights.client.apps.malware_detection.call", return_value="") + def test_scan_since_tmp_files(self, call_mock, tmp_file_mock, log_mock, yara, cmd, extract_tmp_files, create_test_files): + # Set scan_only, scan_exclude options to some of the tmp files and then 'scan' them + # Then touch files to test the scan_since option and make sure that only the touched files will be scanned + yara_file_list = os.path.join(TEMP_TEST_DIR, 'yara_file_list') + scan_me_file = os.path.join(TEMP_TEST_DIR, 'scan_me/scan_me_file') + scan_me_too_file = os.path.join(TEMP_TEST_DIR, 'scan_me_too/scan_me_too_file') + scan_only = tuple(map(lambda x: os.path.join(TEMP_TEST_DIR, x), ['scan_me', 'scan_me_too'])) + scan_exclude = tuple(map(lambda x: os.path.join(TEMP_TEST_DIR, x), + ['scan_me_not', 'scan_me/dont_scan_me', 'scan_me_too/dont_scan_me_too'])) + + for line in fileinput.FileInput(TEMP_CONFIG_FILE, inplace=1): + line = "test_scan: false" if line.startswith("test_scan:") else line + line = line + "rules_location: %s\n" % TEST_RULE_FILE if line.startswith('---') else line + line = line + '- %s\n- %s' % scan_only if line.startswith("scan_only:") else line + line = line + "- %s\n- %s\n- %s" % scan_exclude if line.startswith("scan_exclude:") else line + print(line) + with patch("insights.client.apps.malware_detection.MALWARE_CONFIG_FILE", TEMP_CONFIG_FILE): + mdc = MalwareDetectionClient(None) + scan_dict = process_include_exclude_items(include_items=mdc.scan_fsobjects, + exclude_items=mdc.scan_exclude_list) + # Ensure the correct scan include and exclude values are set + assert list(scan_dict.keys()) == ['/tmp'] + assert sorted(list(scan_dict['/tmp']['exclude']['items'])) == sorted(scan_exclude) + + # Run scan_filesystem, but mock out NamedTemporaryFile so we can use our own file and inspect its contents after + # Also mock out the call to yara but we don't return anything since we aren't testing it + with open(yara_file_list, 'w') as f: + tmp_file_mock.return_value = f + mdc.scan_filesystem() + with open(yara_file_list, 'r') as f: + contents = f.read().splitlines() + # Ensure that a number of files are in the list of files passed to yara to scan + assert len(contents) > 2 + assert all([x in contents for x in [scan_me_file, scan_me_too_file]]) + + # With the same scan_only and scan_exclude values, add scan_since: last into the mix and set the last scan + # time to now. There should be no matches because no files have been modified since now + last_scan = time.time() + last_scan_fmt = datetime.fromtimestamp(last_scan).strftime('%Y-%m-%d %H:%M:%S') + for line in fileinput.FileInput(TEMP_CONFIG_FILE, inplace=1): + line = "scan_since: last" if line.startswith("scan_since:") else line + print(line) + with patch("insights.client.apps.malware_detection.MALWARE_CONFIG_FILE", TEMP_CONFIG_FILE): + with patch("insights.client.apps.malware_detection.get_scan_since_timestamp", return_value=last_scan): + mdc = MalwareDetectionClient(None) + assert mdc.scan_since_dict['timestamp'] == last_scan + assert mdc.scan_since_dict['datetime'] == last_scan_fmt + log_mock.info.assert_called_with("Scan for files created/modified since %s%s", ANY, ANY) + + # Run scan_filesystem, but mock out NamedTemporaryFile so we can use our own file and inspect its contents after + # Also mock out the call to yara but we don't return anything since we aren't testing it + with open(yara_file_list, 'w') as f: + tmp_file_mock.return_value = f + mdc.scan_filesystem() + with open(yara_file_list, 'r') as f: + contents = f.read().splitlines() + # Ensure no files were passed to yara to scan because none were modified since 'last_scan' + assert not contents + + # Try again, keeping the same last_scan time, but this time touch 2 files + # Confirm that only these 2 files appear in the list of files to be passed to yara + os.system('touch %s %s' % (scan_me_file, scan_me_too_file)) + with patch("insights.client.apps.malware_detection.MALWARE_CONFIG_FILE", TEMP_CONFIG_FILE): + with patch("insights.client.apps.malware_detection.get_scan_since_timestamp", return_value=last_scan): + mdc = MalwareDetectionClient(None) + + with open(yara_file_list, 'w') as f: + tmp_file_mock.return_value = f + mdc.scan_filesystem() + with open(yara_file_list, 'r') as f: + contents = f.read().splitlines() + # Ensure the scan_me_file was passed to yara to scan because it was 'modified' since 'last_scan' + assert len(contents) == 2 + assert contents == [scan_me_file, scan_me_too_file] + + # Touch some files that are excluded from scanning so even though they have been modified, they won't be + # in the list of files to scan that is passed to yara + os.system('touch %s %s %s' % (os.path.join(TEMP_TEST_DIR, 'scan_me/dont_scan_me/matching_entity'), + os.path.join(TEMP_TEST_DIR, 'scan_me_not/matching_entity'), + os.path.join(TEMP_TEST_DIR, "scan_me_too/dont_scan_me_too/'another matching_entity'"))) + with patch("insights.client.apps.malware_detection.MALWARE_CONFIG_FILE", TEMP_CONFIG_FILE): + with patch("insights.client.apps.malware_detection.get_scan_since_timestamp", return_value=last_scan): + mdc = MalwareDetectionClient(None) + + with open(yara_file_list, 'w') as f: + tmp_file_mock.return_value = f + mdc.scan_filesystem() + with open(yara_file_list, 'r') as f: + contents = f.read().splitlines() + # Ensure both scan_me_file and scan_me_too_file were passed to yara because both were modified since last_scan + assert len(contents) == 2 + assert contents == [scan_me_file, scan_me_too_file] + + +class TestIncludeExcludeMethods: + + def test_toplevel_dirs(self): + tlds = get_toplevel_dirs() + assert all([x in tlds for x in TLDS]) + assert any([x in tlds for x in DEFAULT_SCAN_EXCLUDE]) + + def test_get_parent_dirs(self): + parent_dir_list = [] + get_parent_dirs('/usr/lib/systemd/user/basic.target', parent_dir_list) + assert sorted(parent_dir_list) == ['/usr', '/usr/lib', '/usr/lib/systemd', '/usr/lib/systemd/user', + '/usr/lib/systemd/user/basic.target'] + + parent_dir_list = [] + get_parent_dirs('/usr/lib/systemd/user/basic.target', parent_dir_list, '/usr/lib') + assert sorted(parent_dir_list) == ['/usr/lib/systemd', '/usr/lib/systemd/user', + '/usr/lib/systemd/user/basic.target'] + + parent_dir_list = [] + get_parent_dirs('/usr/lib/systemd/user/basic.target', parent_dir_list, '/var') + assert sorted(parent_dir_list) == ['/usr', '/usr/lib', '/usr/lib/systemd', '/usr/lib/systemd/user', + '/usr/lib/systemd/user/basic.target'] + + def test_remove_child_items(self): + # Simple example from the function docstring + items = ['/path/to/some/item/child', '/path/to/another/item', '/path/to/some/item'] + assert remove_child_items(items) == ['/path/to/another/item', '/path/to/some/item'] + + # More complex test with duplicate items and items whose names start with another's name + # (rather than start with the same path) and names with spaces in them + items = ['/var/lib64', '/home/bob', '/var/lib/docker', '/home/bob', '/var/lib', + '/home/bobby-droptables', r'/home/bob/this\ is\ bobs/child', '/var/lib63/im ok', '/var/lib64/im not'] + assert remove_child_items(items) == ['/home/bob', '/home/bobby-droptables', '/var/lib', + '/var/lib63/im ok', '/var/lib64'] + + # /path is common to both so that will be all that is returned + items = ['/path/to/another/item', '/path/to/some/item', '/path'] + assert remove_child_items(items) == ['/path'] + + # The root directory will always win + items = ['/path/to/another/item', '/path/to/some/item', '/'] + assert remove_child_items(items) == ['/'] + + # Any non-full path items (doesn't start with /) are removed from the list + items = ['/path/to/another/item', '', '/path/to/some/item'] + assert remove_child_items(items) == ['/path/to/another/item', '/path/to/some/item'] + + def test_remove_included_excluded_items(self): + include_items = ['/home/bob', '/tmp', '/var/www/html'] + exclude_items = ['/home/bo', '/home/bob/exclude', '/home/bobby', '/temp', '/var/www'] + include_items = remove_included_excluded_items(include_items, exclude_items) + assert include_items == ['/home/bob', '/tmp'] + + # Some fairly random lists similar to before + include_items = ['/var/lib64/docker', r'/home/bob/this\ is\ bobs/child', '/var/lib/docker', '/home/bob', + '/var/lib', '/usr'] + exclude_items = ['/var/lib/docker2', '/home/bob', '/home/bobby-droptables', r'/home/bob/this\ is\ bobs/child', + '/var/lib63/im ok', '/var/lib64/im not', '/boot'] + include_items = remove_included_excluded_items(include_items, exclude_items) + assert include_items == ['/usr', '/var/lib', '/var/lib64/docker'] + + def test_default_include_items(self): + # Call process_include_items with an empty list. + # If nothing is passed to the function, the a list of the top level directories is produced + default_list = process_include_items() + assert default_list == get_toplevel_dirs() + + def test_default_exclude_items(self): + # Call process_exclude_items with an empty list. + # If nothing is passed to the function, then nothing to exclude + default_list = process_exclude_items() + assert default_list == [] + + def test_process_include_items(self): + # Call process_include_items with variously populated lists + # Add some valid entries to include_items list, esp subdirectories + include_items = ['/etc/pam.d', '/tmp', '/var/log/'] + processed_items = process_include_items(include_items) + assert processed_items == ['/etc/pam.d', '/tmp', '/var/log'] + + # Add some more subdirectories + include_items.extend(['/etc/cron.d', '/tmp', '/var/lib/']) + processed_items = process_include_items(include_items) + assert processed_items == ['/etc/cron.d', '/etc/pam.d', '/tmp', '/var/lib', '/var/log'] + + # Add some top level directories to override the subdirectories + include_items.extend(['/etc', '/var']) + processed_items = process_include_items(include_items) + assert processed_items == ['/etc', '/tmp', '/var'] + + # Add some invalid entries that will get ignored + include_items.extend(['..', '/var/run', '/missing']) + processed_items = process_include_items(include_items) + assert processed_items == ['/etc', '/tmp', '/var'] + + # Add the root directory (/) which will override all the other entries + include_items.append('/') + processed_items = process_include_items(include_items) + assert all([x in processed_items for x in TLDS]) + assert any([x in processed_items for x in DEFAULT_SCAN_EXCLUDE]) + + def test_process_exclude_items(self): + # Call process_exclude_items with variously populated lists + # No entries to exclude + processed_items = process_exclude_items() + assert processed_items == [] + + # Add some valid entries to exclude items (links are ok in the exclude list ... why?) + exclude_items = ['/etc/ssh', '/tmp', '/var/run/'] + processed_items = process_exclude_items(exclude_items) + assert processed_items == ['/etc/ssh', '/tmp', '/var/run'] + + # Add some more subdirectories + exclude_items.extend(['/etc/cron.d', '/tmp', '/var/lock/']) + processed_items = process_exclude_items(exclude_items) + assert processed_items == ['/etc/cron.d', '/etc/ssh', '/tmp', '/var/lock', '/var/run'] + + # Add some top level directories to override the subdirectories + exclude_items.extend(['/etc', '/var']) + processed_items = process_exclude_items(exclude_items) + assert processed_items == ['/etc', '/tmp', '/var'] + + # Add some invalid entries to exclude items, which will be ignored + exclude_items.extend(['..', '/missing']) + processed_items = process_exclude_items(exclude_items) + assert processed_items == ['/etc', '/tmp', '/var'] + + # Add the root directory, which will expand to all top level directories + exclude_items.append('/') + processed_items = process_exclude_items(exclude_items) + assert processed_items == get_toplevel_dirs() + + +@patch(BUILD_YARA_COMMAND_TARGET) +@patch(GET_RULES_TARGET, return_value=RULES_FILE) +@patch(FIND_YARA_TARGET, return_value=YARA) +@patch(LOAD_CONFIG_TARGET, return_value=CONFIG) +@patch.dict(os.environ, {'TEST_SCAN': 'false'}) +class TestIncludeExcludeProcessing: + + def test_process_include_exclude_items_simple(self, conf, yara, rules, cmd): + # Test the process_include_exclude_items function with simple modified include and exclude items + # Simple in that the include and exclude files are modified in such a way that + # directory listings aren't required get the list of included files + # Add a single toplevel directory to the include file - expect only a single directory to scan + mdc = MalwareDetectionClient(None) + mdc.scan_fsobjects = ['/etc'] + mdc.scan_exclude_list = [] + scan_dict = process_include_exclude_items(include_items=mdc.scan_fsobjects, + exclude_items=mdc.scan_exclude_list) + assert list(scan_dict.keys()) == ['/etc'] + assert 'include' not in scan_dict['/etc'] + assert 'exclude' not in scan_dict['/etc'] + + # Add some extra subdirectories to scan + mdc.scan_fsobjects.extend(['/var/lib', '/var/log']) + scan_dict = process_include_exclude_items(include_items=mdc.scan_fsobjects, + exclude_items=mdc.scan_exclude_list) + assert sorted(scan_dict.keys()) == ['/etc', '/var'] + assert sorted(list(scan_dict['/var']['include'])) == ['/var/lib', '/var/log'] + assert 'exclude' not in scan_dict['/var'] + + # Add some extra directories to exclude that won't impact the already included directories + mdc.scan_exclude_list.extend(['/tmp', '/var/run']) + scan_dict = process_include_exclude_items(include_items=mdc.scan_fsobjects, + exclude_items=mdc.scan_exclude_list) + assert sorted(scan_dict.keys()) == ['/etc', '/var'] + assert sorted(scan_dict['/var']['include']) == ['/var/lib', '/var/log'] + assert scan_dict['/var']['exclude']['items'] == ['/var/run'] + + # Exclude /var which will remove it from the list of directories to scan + mdc.scan_exclude_list.append('/var') + scan_dict = process_include_exclude_items(include_items=mdc.scan_fsobjects, + exclude_items=mdc.scan_exclude_list) + assert list(scan_dict.keys()) == ['/etc'] + + # Exclude /etc which means there will be no directories to scan + mdc.scan_exclude_list.append('/etc') + with pytest.raises(SystemExit) as exc_info: + process_include_exclude_items(include_items=mdc.scan_fsobjects, + exclude_items=mdc.scan_exclude_list) + assert exc_info.value.code == constants.sig_kill_bad + + def test_process_include_exclude_items_complex(self, conf, yara, rules, cmd): + # Test the process function with modified include and exclude files that will require more complex + # processing to generate the list of items to be scanned + # Because we are including items in /var/lib, we only need to list the contents of the /var/lib directory + # We don't need to list the contents of the /var directory + mdc = MalwareDetectionClient(None) + mdc.scan_fsobjects = ['/var/lib', '/var/log'] + mdc.scan_exclude_list = ['/var/lib/systemd', '/var/lib/misc/', '/var/log/wtmp'] + + scan_dict = process_include_exclude_items(include_items=mdc.scan_fsobjects, + exclude_items=mdc.scan_exclude_list) + assert list(scan_dict.keys()) == ['/var'] + assert sorted(scan_dict['/var']['exclude']['items']) == ['/var/lib/misc', '/var/lib/systemd', '/var/log/wtmp'] + # The exclude items shouldn't be in the include items + # Nor should other items that aren't in the explicitly included items + assert all([x not in scan_dict['/var']['include'] + for x in ['/var/lib/misc', '/var/lib/systemd', '/var/log/wtmp', + '/var/cache', '/var/lib', '/var/log', '/var/tmp', '/tmp']]) + # In 'include' will be items that are in the same directory as the excluded items, eg /var/log/lastlog + # but not the excluded items, eg /var/log/wtmp + assert all([x in scan_dict['/var']['include'] for x in ['/var/lib/dbus', '/var/log/lastlog']]) + + # Change the include directory to /var + # Now immediate child directories of /var will be in the include list, eg /var/cache and /var/tmp + # Because now we have to list the contents of the /var and /var/lib directories + mdc.scan_fsobjects.append('/var') + scan_dict = process_include_exclude_items(include_items=mdc.scan_fsobjects, + exclude_items=mdc.scan_exclude_list) + assert list(scan_dict.keys()) == ['/var'] + assert sorted(scan_dict['/var']['exclude']['items']) == ['/var/lib/misc', '/var/lib/systemd', '/var/log/wtmp'] + assert all([x not in scan_dict['/var']['include'] + for x in ['/var/lib/misc', '/var/lib/systemd', '/var/log/wtmp', '/var/lib', '/var/log', '/tmp']]) + assert all([x in scan_dict['/var']['include'] + for x in ['/var/cache', '/var/tmp', '/var/lib/dbus', '/var/log/lastlog']]) + + def test_process_include_exclude_tmp_files(self, conf, yara, rules, cmd, extract_tmp_files): + # Test the including/excluding some of the files in the tmp archive + # Specifically tests excluding link files (good or broken) and pipe files (as well as explicit exclude items) + + include_items = list(map(lambda x: os.path.join(TEMP_TEST_DIR, x), ['scan_me', 'scan_me_too'])) + exclude_items = list(map(lambda x: os.path.join(TEMP_TEST_DIR, x), + ['scan_me_not', 'scan_me/dont_scan_me', 'scan_me_too/dont_scan_me_too'])) + scan_dict = process_include_exclude_items(include_items=include_items, exclude_items=exclude_items) + assert list(scan_dict.keys()) == ['/tmp'] + assert sorted(list(scan_dict['/tmp']['exclude']['items'])) == sorted(exclude_items) + + include_files = sorted(list(map(lambda x: os.path.join(TEMP_TEST_DIR, x), + ['scan_me/new_file', 'scan_me/old_file', 'scan_me/scan_me', + 'scan_me/scan_me_file', 'scan_me_too/new_file', 'scan_me_too/old_file', + 'scan_me_too/scan_me_too', 'scan_me_too/scan_me_too_file']))) + dont_include_files = sorted(list(map(lambda x: os.path.join(TEMP_TEST_DIR, x), + ['scan_me/link_file', 'scan_me/pipe_file', 'scan_me/broken_link', + 'scan_me/dont_scan_me', 'scan_me_too/link_file', 'scan_me_too/pipe_file', + 'scan_me_too/broken_link', 'scan_me_too/dont_scan_me_too']))) + assert sorted(scan_dict['/tmp']['include']) == include_files + assert all([x not in scan_dict['/tmp']['include'] for x in dont_include_files]) + + # Another test to assert a bug I found is fixed ... due to only having scan_items = set([]) + # Basically include_files should = ['scan_me/scan_me'] but the bug made include_files = [] + include_items = list(map(lambda x: os.path.join(TEMP_TEST_DIR, x), ['scan_me/scan_me'])) + exclude_items = list(map(lambda x: os.path.join(TEMP_TEST_DIR, x), ['scan_me_not'])) + scan_dict = process_include_exclude_items(include_items=include_items, exclude_items=exclude_items) + + include_files = sorted(list(map(lambda x: os.path.join(TEMP_TEST_DIR, x), ['scan_me/scan_me']))) + assert sorted(scan_dict['/tmp']['include']) == include_files + dont_include_files = sorted(list(map(lambda x: os.path.join(TEMP_TEST_DIR, x), + ['scan_me/scan_me_file', 'scan_me/dont_scan_me', 'scan_me/scan_me/here_i_am']))) + assert all([x not in scan_dict['/tmp']['include'] for x in dont_include_files]) + + +@patch(BUILD_YARA_COMMAND_TARGET) +@patch(GET_RULES_TARGET, return_value=RULES_FILE) +@patch(FIND_YARA_TARGET, return_value=YARA) +@patch(LOAD_CONFIG_TARGET, return_value=CONFIG) +@patch.dict(os.environ, {'TEST_SCAN': 'false'}) +class TestParseScanOutput: + + def test_contrived_scan_output(self, conf, yara, rules, cmd): + # Parse the CONTRIVED_SCAN_OUTPUT to find actual rule matches amongst malformed output lines + mdc = MalwareDetectionClient(None) + mdc.add_metadata = False + mdc.parse_scan_output(CONTRIVED_SCAN_OUTPUT) + + # 1 match for rule 'this', 3 matches for rule 'rule', 2 matches for rule 'another_matching_rule' + assert mdc.matches == 8 + + # 1 matching string for 'this' + rule_match = mdc.host_scan['this'] + assert len(rule_match) == 1 + assert 'e-r-r-o-r s-c-a-n-n-i-n-g' in rule_match[0]['source'] + assert rule_match[0]['string_data'] == "matches 'this' rule" + assert rule_match[0]['string_identifier'] == '$match' + assert rule_match[0]['string_offset'] == 291 + + # 14 matching strings for 'Rule' + rule_match = mdc.host_scan['Rule'] + assert len(rule_match) == 14 + assert rule_match[0]['source'] == MATCHING_ENTITY_FILE + assert rule_match[0]['string_data'] == 'string match in the file "matching_entity"' + assert rule_match[0]['string_identifier'] == '$match0' + assert rule_match[0]['string_offset'] == 21 + assert rule_match[1]['source'] == MATCHING_ENTITY_FILE + assert rule_match[1]['string_data'] == "another string match in matching_entity" + assert rule_match[1]['string_identifier'] == '$match1' + assert rule_match[1]['string_offset'] == 83 + assert rule_match[2]['source'] == MATCHING_ENTITY_FILE + assert rule_match[2]['string_data'] == 'string with different types of quotes \'here\' and "here"' + assert rule_match[2]['string_identifier'] == '$match2' + assert rule_match[2]['string_offset'] == 230 + + # Rule matches for ANOTHER_MATCHING_ENTITY_FILE (which has a space in the filename) + assert rule_match[3]['source'] == ANOTHER_MATCHING_ENTITY_FILE + assert rule_match[3]['string_data'] == "string match containing error scanning but it's ok because its not in a rule line" + assert rule_match[3]['string_identifier'] == '$match3' + assert rule_match[3]['string_offset'] == 2 + assert rule_match[4]['source'] == ANOTHER_MATCHING_ENTITY_FILE + assert rule_match[4]['string_data'] == "contains =" + assert rule_match[4]['string_identifier'] == '$grep1' + assert rule_match[4]['string_offset'] == 97 + assert rule_match[6]['source'] == ANOTHER_MATCHING_ENTITY_FILE + assert rule_match[6]['string_data'] == "contains .+" + assert rule_match[6]['string_identifier'] == '$grep2' + assert rule_match[6]['string_offset'] == 153 + assert rule_match[8]['source'] == ANOTHER_MATCHING_ENTITY_FILE + assert rule_match[8]['string_data'] == 'contains "' + assert rule_match[8]['string_identifier'] == '$grep3' + assert rule_match[8]['string_offset'] == 213 + assert rule_match[9]['source'] == ANOTHER_MATCHING_ENTITY_FILE + assert rule_match[9]['string_data'] == "contains '" + assert rule_match[9]['string_identifier'] == '$grep4' + assert rule_match[9]['string_offset'] == 241 + assert rule_match[10]['source'] == ANOTHER_MATCHING_ENTITY_FILE + assert rule_match[10]['string_data'] == 'contains ()[]' + assert rule_match[10]['string_identifier'] == '$grep5' + assert rule_match[10]['string_offset'] == 269 + assert rule_match[11]['source'] == ANOTHER_MATCHING_ENTITY_FILE + assert rule_match[11]['string_data'] == 'contains {' + assert rule_match[11]['string_identifier'] == '$grep6' + assert rule_match[11]['string_offset'] == 299 + assert rule_match[12]['source'] == ANOTHER_MATCHING_ENTITY_FILE + assert rule_match[12]['string_data'] == 'contains ^$' + assert rule_match[12]['string_identifier'] == '$grep7' + assert rule_match[12]['string_offset'] == 327 + + assert rule_match[13]['source'].startswith('matching_entity_3') + assert rule_match[13]['string_data'] == '' + assert rule_match[13]['string_identifier'] == '' + assert rule_match[13]['string_offset'] == -1 + + # 4 matching strings for 'another_matching_rule' + rule_match = mdc.host_scan['another_matching_rule'] + assert len(rule_match) == 4 + assert rule_match[2]['source'].endswith('snap/signal-desktop/350/opt/Signal/resources/app.asar') + assert rule_match[2]['string_data'] == '#!/bin/sh' + assert rule_match[2]['string_identifier'] == '$s0' + assert rule_match[2]['string_offset'] == 60783814 + assert rule_match[3]['source'] == '1234567' + assert rule_match[3]['string_data'] == '#!/bin/sh' + assert rule_match[3]['string_identifier'] == '$s0' + assert rule_match[3]['string_offset'] == 0 + + rule_match = mdc.host_scan['Iyamtho'] + assert len(rule_match) == 1 + assert rule_match[0]['source'] == " yep" + assert rule_match[0]['string_data'] == '' + assert rule_match[0]['string_identifier'] == '' + assert rule_match[0]['string_offset'] == -1 + + rule_match = mdc.host_scan['n_m3_t00'] + assert len(rule_match) == 1 + assert rule_match[0]['source'] == "damn straight" + assert rule_match[0]['string_data'] == '' + assert rule_match[0]['string_identifier'] == '' + assert rule_match[0]['string_offset'] == -1 + + def test_contrived_scan_output_metadata(self, conf, yara, rules, cmd, create_test_files): + # Again, parse the CONTRIVED_SCAN_OUTPUT to find actual rule matches amongst malformed output lines, + # but this time check the expected metadata values too + + # Again, need to populate rules_file_location with any rule, but its not relevant for the tests + mdc = MalwareDetectionClient(None) + mdc.parse_scan_output(CONTRIVED_SCAN_OUTPUT) + + # Matches and metadata for MATCHING_ENTITY_FILE + rule_match = mdc.host_scan['Rule'] + assert rule_match[0]['source'] == MATCHING_ENTITY_FILE + assert rule_match[0]['string_offset'] == 21 + metadata = rule_match[0]['metadata'] + assert metadata['source_type'] == 'file' + assert metadata['file_type'] == 'ASCII text' + assert metadata['mime_type'] == 'text/plain; charset=us-ascii' + assert metadata['md5sum'] == '9dd5c5e00d28520dc9da3c509c0db2a0' + assert metadata['line_number'] == 1 + assert metadata['line'] == urlencode('This line contains a string match in the file "matching_entity"') + + # Testing displaying long lines + assert rule_match[1]['source'] == MATCHING_ENTITY_FILE + assert rule_match[1]['string_offset'] == 83 + metadata = rule_match[1]['metadata'] + assert metadata['source_type'] == 'file' + assert metadata['file_type'] == 'ASCII text' + assert metadata['mime_type'] == 'text/plain; charset=us-ascii' + assert metadata['md5sum'] == '9dd5c5e00d28520dc9da3c509c0db2a0' + assert metadata['line_number'] == 2 + assert metadata['line'] == urlencode('This line contains another string match in matching_entity and it is very long for testing the ellipses that are added o...') + + # Testing matching/displaying a mixture of quote types in the string_data + assert rule_match[2]['source'] == MATCHING_ENTITY_FILE + assert rule_match[2]['string_offset'] == 230 + metadata = rule_match[2]['metadata'] + assert metadata['source_type'] == 'file' + assert metadata['file_type'] == 'ASCII text' + assert metadata['mime_type'] == 'text/plain; charset=us-ascii' + assert metadata['md5sum'] == '9dd5c5e00d28520dc9da3c509c0db2a0' + assert metadata['line_number'] == 4 + assert metadata['line'] == urlencode("""And this line contains a string with different types of quotes 'here' and "here" and its long too but not long enough""") + + # Rule match metadata for ANOTHER_MATCHING_ENTITY_FILE + assert rule_match[3]['source'] == ANOTHER_MATCHING_ENTITY_FILE + assert rule_match[3]['string_offset'] == 2 + metadata = rule_match[3]['metadata'] + assert metadata['source_type'] == 'file' + assert metadata['file_type'] == 'ASCII text' + assert metadata['mime_type'] == 'text/plain; charset=us-ascii' + assert metadata['md5sum'] == '64764d295e92ffeec36d3fcd646a3af4' + assert metadata['line_number'] == 3 + assert metadata['line'] == urlencode("string match containing error scanning but it's ok because its not in a rule line") + + assert rule_match[4]['source'] == ANOTHER_MATCHING_ENTITY_FILE + assert rule_match[4]['string_offset'] == 97 + metadata = rule_match[4]['metadata'] + assert metadata['md5sum'] == '64764d295e92ffeec36d3fcd646a3af4' + assert metadata['line_number'] == 7 + assert metadata['line'] == urlencode("This line contains = char") + + assert rule_match[5]['source'] == ANOTHER_MATCHING_ENTITY_FILE + assert rule_match[5]['string_offset'] == 123 + metadata = rule_match[5]['metadata'] + assert metadata['line_number'] == 8 + assert metadata['line'] == urlencode("This line contains = char too") + + assert rule_match[6]['source'] == ANOTHER_MATCHING_ENTITY_FILE + assert rule_match[6]['string_offset'] == 153 + metadata = rule_match[6]['metadata'] + assert metadata['line_number'] == 9 + assert metadata['line'] == urlencode("This line contains .+ chars") + + assert rule_match[8]['source'] == ANOTHER_MATCHING_ENTITY_FILE + assert rule_match[8]['string_offset'] == 213 + metadata = rule_match[8]['metadata'] + assert metadata['line_number'] == 11 + assert metadata['line'] == urlencode('This line contains "" chars') + + assert rule_match[9]['source'] == ANOTHER_MATCHING_ENTITY_FILE + assert rule_match[9]['string_offset'] == 241 + metadata = rule_match[9]['metadata'] + assert metadata['line_number'] == 12 + assert metadata['line'] == urlencode("This line contains '' chars") + + assert rule_match[10]['source'] == ANOTHER_MATCHING_ENTITY_FILE + assert rule_match[10]['string_offset'] == 269 + metadata = rule_match[10]['metadata'] + assert metadata['line_number'] == 13 + assert metadata['line'] == urlencode("This line contains ()[] chars") + + assert rule_match[11]['source'] == ANOTHER_MATCHING_ENTITY_FILE + assert rule_match[11]['string_offset'] == 299 + metadata = rule_match[11]['metadata'] + assert metadata['line_number'] == 14 + assert metadata['line'] == urlencode("This line contains {} chars") + + assert rule_match[12]['source'] == ANOTHER_MATCHING_ENTITY_FILE + assert rule_match[12]['string_offset'] == 327 + metadata = rule_match[12]['metadata'] + assert metadata['line_number'] == 15 + assert metadata['line'] == urlencode("This line contains ^$ chars") + + # Testing a missing file - expect minimal metadata because we can't know the other values + assert rule_match[13]['source'] == "matching_entity_3, but without any string matches - yes that's ok" + metadata = rule_match[13]['metadata'] + assert metadata['source_type'] == 'file' + assert all([key not in ['file_type', 'md5sum', 'line_number'] for key in metadata.keys()]) + + # Testing a missing file for another rule - again, expect minimal metadata because we can't find out more info + rule_match = mdc.host_scan['another_matching_rule'] + assert rule_match[2]['source'].endswith('snap/signal-desktop/350/opt/Signal/resources/app.asar') + metadata = rule_match[2]['metadata'] + assert metadata['source_type'] == 'file' + assert all([key not in ['file_type', 'md5sum', 'line_number'] for key in metadata.keys()]) + + # Testing a missing process - again, expect minimal metadata because we can't find out more info + assert rule_match[3]['source'] == '1234567' + metadata = rule_match[3]['metadata'] + assert metadata['source_type'] == 'process' + assert all([key not in ['process_name', 'file_type', 'md5sum', 'line_number'] for key in metadata.keys()]) + + def test_random_output(self, conf, yara, rules, cmd): + mdc = MalwareDetectionClient(None) + mdc.parse_scan_output(RANDOM_OUTPUT) + assert mdc.matches == 2 + rule_match = mdc.host_scan['Lorem'] + assert rule_match[0]['source'].startswith('ipsum dolor') + assert rule_match[0]['string_data'] == '' + assert rule_match[0]['string_identifier'] == '' + assert rule_match[0]['string_offset'] == -1 + rule_match = mdc.host_scan['Dictum'] + assert rule_match[0]['source'].startswith('at tempor') + assert rule_match[0]['string_data'] == '' + assert rule_match[0]['string_identifier'] == '' + assert rule_match[0]['string_offset'] == -1 + + +MATCHING_ENTITY_FILE = os.path.join(TEMP_TEST_DIR, 'matching_entity') +MATCHING_ENTITY_FILE_CONTENTS = """ +This line contains a string match in the file "matching_entity" +This line contains another string match in matching_entity and it is very long for testing the ellipses that are added onto very long lines + +And this line contains a string with different types of quotes 'here' and "here" and its long too but not long enough +""".lstrip() + +ANOTHER_MATCHING_ENTITY_FILE = os.path.join(TEMP_TEST_DIR, 'another matching_entity') +ANOTHER_MATCHING_ENTITY_FILE_CONTENTS = """ + + +string match containing error scanning but it's ok because its not in a rule line + + + +This line contains = char +This line contains = char too +This line contains .+ chars +This line contains .+ chars too +This line contains "" chars +This line contains '' chars +This line contains ()[] chars +This line contains {} chars +This line contains ^$ chars +This line contains ^$ chars too +The previous line and this one too are ignored as they are beyond the default 10 string match limit +"""[1:] # Ignore first newline char otherwise it changes the md5sum + +TEST_RULE_FILE_CONTENTS = """ +rule TEST_RedHatInsightsMalwareDetection +// Verifies the Red Hat Insights Malware Detection Client app is present on the system +{ + strings: + $re1 = /Malware ?Detection ?Client/ + condition: + $re1 +} +""".lstrip() + +CONTRIVED_SCAN_OUTPUT = """ +error scanning this line, it will be skipped +this line also contains error scanning so it will be skipped too +0x1:$string1: skip me coz the rule line had an error +0x11:$string2: skip me too +0x111:$string3: skip me three +this line doesn't contain e-r-r-o-r s-c-a-n-n-i-n-g so will be considered a scan match, even though it shouldn't +0x123:$match: matches 'this' rule +BadFormat +0x1:$skipme: the previous line will fail because it doesn't follow the 'rule matching_entity' format +Rule:matching_entity +0x1:$alsoskipme: the previous line also fails because it has a ":" instead of a " " between rule & matching_entity +Rule %s +0x15:$match0: string match in the file "matching_entity" +0x53:$match1: another string match in matching_entity +0xe6:$match2: string with different types of quotes 'here' and "here" +Rule %s +0x2:$match3: string match containing error scanning but it's ok because its not in a rule line +0x61:$grep1: contains = +0x7b:$grep1: contains = +0x99:$grep2: contains .+ +0xb5:$grep2: contains .+ +0xd5:$grep3: contains " +0xf1:$grep4: contains ' +0x10d:$grep5: contains ()[] +0x12b:$grep6: contains { +0x147:$grep7: contains ^$ +0x163:$grep7: contains ^$ +0x179:$ignored: The previous line and this one too are ignored as they are beyond the default 10 string match limit +Rule matching_entity_3, but without any string matches - yes that's ok +more error scanning this line +another_matching_rule /var/lib/snapd/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/signal-desktop/350/opt/Signal/resources/app.asar +0x212f197:$s0: #!/bin/sh +0x2130313:$s0: #!/bin/sh +0x39f7cc6:$s0: #!/bin/sh +another_matching_rule /var/lib/snapd/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859error scanning /dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/signal-desktop/350/opt/Signal/resources/app2.asar +0x212f197:$s0: #!/bin/sh +0x2130313:$s0: #!/bin/sh +0x39f7cc6:$s0: #!/bin/sh +another_matching_rule 1234567 +0x0:$s0: #!/bin/sh +0x1badoffset:$s1: skip this line +0x2error scanning skip/this/line/too: need more colons +badoffset_but_notarule:$s2: a bad offset line that looks a bit like a rule line but really isn't +007 isn't considered a valid rule name +666neither is this +_me neither + same here +nor: I +Iyamtho yep +n_m3_t00 damn straight +error scanning /var/lib/snapd/snap/core/10859/dev/core: error: 4 +error scanning /var/lib/snapd/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859/dev/fd/3/snap/core/10859/dev/core: error: 4 +error scanning /var/lib/snapd//cookie/snap.gnome-3-28-1804: could not open file +error scanning /var/lib/snapd//device/private-keys-v1/_53ir43FCxbgdSyj8NriGt9gfonABhzHHhsGnGhvjqpK_hwdIcP0ScJpKppzEhps: could not open file +""" % (MATCHING_ENTITY_FILE, ANOTHER_MATCHING_ENTITY_FILE) + +RANDOM_OUTPUT = """ +Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. +Dictum at tempor commodo ullamcorper a lacus vestibulum sed. Non odio euismod lacinia at quis risus sed. +""" + +# Base64 representation of a tgz file containing simple files to be extracted into /tmp to be used for scanning +# It is decoded and unzipped in the extract_tmp_files fixture +SCAN_FILES_BASE64 = """ +H4sIAD+feGEAA+1a247TMBDtc79iVCEVhARxrk888A+8IajSxt1Ym8YlcXZVIf6dcbZG2WzTkm5t +ijpnH3KxHbt7PPaZGdertFxs+MeJRXiIJIn0lSWR170aTFjIgpixJAyCicdY5IcTiGwOyqCpVVoB +TDZ5o/Ij9U6V/6eo9/yX/HGxFgW30YcmOI7DEfwHvh9NwLMxmD6I/5Z/WWRXxT/ziX8XMPxvxZbb +mgBn8B8kyQRiC2N5AeK/5T+TpVrUdsTA+P0/ZFFE+78LHOR/k6pVLsq7BS+VULvX9nHK/iMv6PGf ++Fid1n8H+JKLGgpRcljhDEhFWUMKtaqQfWinAYgSVM5B7w0w682M2fRQ+1Jig+rFV3ptsV4GQgG2 +f+DVDgqJldeyAsVrpRvqXnlRiG3Na3xIFaQVhzTLeAbYl+w00wOop9PP+EV15Ac9CpVDJtZrXuEY +QO22+GW5hh+NxE5hjqPm83ZcM3072w+xfupESQnLRgH+vKcXvJTNXT791xS+Csbkl5W85+UC/3H3 +F+/jaf1PRqz/MYtw//dtDsrgxu3f8G9p628xfv8P8I/2fxfo86/XvYVYpJsL9jFe/4dJRP6fE/T4 +t+ICtvyHw/wnQdTn34ti4t8FptNnMm2vmfQLXlUoxvS8aB+18hFqjnrpHpZ8lTY1b6WRVkMo7lKo +GhSIWnhNEQdk4SdY5Wk1XKL11aHSD+/b4vpY2VDb2Wy47Xw+XPb23ddvw6U/fw2XfX9zumw/Wg7b +ij8I2ezrpUa9SnzQYlPLXXFXygoFb6olMN+175Z8J9u6HDK+TptCAfOey+1CbIT6G2Vq7F9rLJvx +n1H6z2e4AqD+s+mUGty4/Zt1H83YWg5ovP7zQ4/yP07Q5d9WDugM/cc8yv84QZd/WzmgM/j3EtL/ +TtDl31YO6Az+/YjyP07Q5f/PPX/g1QXVwPj9P/KZR/u/Cwzzb8L4r9fdp+z/hf/v4zWh9d8FyP8n +/9/YvyUP+4z8bxSxgOzfBSj/S/lfY/+2YoCj438sCWKd/7W1IHVx4/Zv+MfJfV3xPzr/5QRd/rvh +9ktOiDP8v4QlxL8LHOX/Qi7geP/PDyn/6wbk/5H/Z8z9qvI/ZP9O0OX/qvI/5P87QZf/a8r/xB7l +f1ygy7+t49Zn+P9ee/6Hzn9bR5d/W7GA8f5fGOv1n/w/+xjg/6LbQLv+Hzn/eyD+j7e0/7sAxf8p +/m/s/2ri/34Y+EHv/O8lA1Jd3Lj9EwgEAoFAIBBuC78BaSEregBQAAA=""".replace('\n', '') diff --git a/insights/tests/client/phase/test_collect_and_upload.py b/insights/tests/client/phase/test_collect_and_upload.py index 6cce91705..db9f3837a 100644 --- a/insights/tests/client/phase/test_collect_and_upload.py +++ b/insights/tests/client/phase/test_collect_and_upload.py @@ -21,6 +21,8 @@ def patch_insights_config(old_function): "return_value.load_all.return_value.register": False, "return_value.load_all.return_value.diagnosis": None, "return_value.load_all.return_value.payload": None, + "return_value.load_all.return_value.manifest": None, + "return_value.load_all.return_value.content_type": None, "return_value.load_all.return_value.compliance": False, "return_value.load_all.return_value.module": False, "return_value.load_all.return_value.output_dir": None, From 9e5d1729a04fad9d469715cd9b8663240efb59c9 Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Fri, 19 Nov 2021 08:21:05 +0800 Subject: [PATCH 599/892] fix: update the pmlog_summary to support new metrics (#3290) * Enhance the pmlog_summary to support new metrics Signed-off-by: Xiangce Liu * Fix doc building error Signed-off-by: Xiangce Liu --- insights/parsers/pmlog_summary.py | 53 +++++++++++++++----- insights/parsers/tests/test_pmlog_summary.py | 26 +++++++--- 2 files changed, 59 insertions(+), 20 deletions(-) diff --git a/insights/parsers/pmlog_summary.py b/insights/parsers/pmlog_summary.py index 660e175e9..e0a6efec1 100644 --- a/insights/parsers/pmlog_summary.py +++ b/insights/parsers/pmlog_summary.py @@ -10,14 +10,14 @@ def parse(data): """ - Parse a set of key/value pairs into a heirarchical dictionary of + Parse a set of key/value pairs into a hierarchical dictionary of typed values Arguments: data (dict): Input dictionary of key/value pairs Returns: - dict: Heirarchical dictionary with keys separated at "." and type + dict: Hierarchical dictionary with keys separated at "." and type conversion of the numerical values """ result = {} @@ -31,6 +31,11 @@ def typed(x): def insert(k, v): cur = result key_parts = k.split(".") + # process the '["xxx"]' part as a sub-key + if v.startswith('['): + mk, _, v = v.partition(']') + key_parts.append(mk.strip('"[]')) + v = v.strip() # walk down the structure to the correct leaf for part in key_parts: @@ -70,24 +75,44 @@ class PmLogSummary(CommandParser, dict): kernel.all.cpu.nice 0.000 none kernel.all.cpu.steal 0.000 none kernel.all.cpu.idle 3.986 none + kernel.all.pressure.io.full.avg ["10 second"] 0.001 none + kernel.all.pressure.cpu.some.avg ["1 minute"] 14.942 none + kernel.all.pressure.memory.full.avg ["5 minute"] 0.002 none disk.all.total 0.252 count / sec + disk.dev.total ["vda"] 0.016 count / sec + disk.dev.total ["vdb"] 0.445 count / sec + disk.dev.total ["vdc"] 2.339 count / sec Output is parsed and stored as a dictionary. Each value is stored as a dict in the form ``{'val': number or string, 'units': string}``. Keys are a hierarchy of the input key value split on the "." character. - For instance input line "mem.util.used 3133919.812 Kbyte" is parsed - as:: - - { - 'mem': { - 'util': { - 'used': { - 'val': 3133919.812, - 'units': 'Kbyte' + + For instance:: + + 1. Input line "mem.util.used 3133919.812 Kbyte" is parsed as: + { + 'mem': { + 'util': { + 'used': { + 'val': 3133919.812, + 'units': 'Kbyte' + } + } + } + } + 2. Input line "disk.dev.total ["vdc"] 2.339 count / sec" is parsed as: + { + 'disk': { + 'dev': { + 'total': { + 'vdc': { + 'val': 2.339 + 'units': 'count / sec' + } + } } } } - } Example: >>> type(pmlog_summary) @@ -96,6 +121,10 @@ class PmLogSummary(CommandParser, dict): True >>> pmlog_summary['disk']['all']['total'] == {'val': 0.252, 'units': 'count / sec'} True + >>> pmlog_summary['disk']['dev']['total']['vdc'] == {'val': 2.339, 'units': 'count / sec'} + True + >>> pmlog_summary['kernel']['all']['pressure']['memory']['full']['avg']['5 minute'] == {'val': 0.002, 'units': 'none'} + True """ def parse_content(self, content): diff --git a/insights/parsers/tests/test_pmlog_summary.py b/insights/parsers/tests/test_pmlog_summary.py index eb7826ec4..1df33bbb9 100644 --- a/insights/parsers/tests/test_pmlog_summary.py +++ b/insights/parsers/tests/test_pmlog_summary.py @@ -14,7 +14,13 @@ kernel.all.cpu.nice 0.000 none kernel.all.cpu.steal 0.000 none kernel.all.cpu.idle 3.986 none +kernel.all.pressure.io.full.avg ["10 second"] 0.001 none +kernel.all.pressure.cpu.some.avg ["1 minute"] 14.942 none +kernel.all.pressure.memory.full.avg ["5 minute"] 0.002 none disk.all.total 0.252 count / sec +disk.dev.total ["vda"] 0.016 count / sec +disk.dev.total ["vdb"] 0.445 count / sec +disk.dev.total ["vdc"] 2.339 count / sec """ PMLOG_EMPTY = """ @@ -28,15 +34,19 @@ def test_pmlog_summary(): assert pmlog_summary['mem']['util']['used'] == {'val': 3133919.812, 'units': 'Kbyte'} assert pmlog_summary['mem']['physmem'] == {'val': 3997600.0, 'units': 'Kbyte'} assert pmlog_summary['disk']['all']['total'] == {'val': 0.252, 'units': 'count / sec'} + assert pmlog_summary['kernel']['all']['pressure']['cpu']['some']['avg']['1 minute'] == {'val': 14.942, 'units': 'none'} assert 'not.present' not in pmlog_summary - assert pmlog_summary['kernel'] == { - 'all': {'cpu': { - 'user': {'val': 0.003, 'units': 'none'}, - 'sys': {'val': 0.004, 'units': 'none'}, - 'nice': {'val': 0.0, 'units': 'none'}, - 'steal': {'val': 0.0, 'units': 'none'}, - 'idle': {'val': 3.986, 'units': 'none'}, - }} + assert pmlog_summary['kernel']['all']['cpu'] == { + 'user': {'val': 0.003, 'units': 'none'}, + 'sys': {'val': 0.004, 'units': 'none'}, + 'nice': {'val': 0.0, 'units': 'none'}, + 'steal': {'val': 0.0, 'units': 'none'}, + 'idle': {'val': 3.986, 'units': 'none'}, + } + assert pmlog_summary['disk']['dev']['total'] == { + 'vda': {'val': 0.016, 'units': 'count / sec'}, + 'vdb': {'val': 0.445, 'units': 'count / sec'}, + 'vdc': {'val': 2.339, 'units': 'count / sec'}, } From d307c077d0976eea444c7860cb9d529f26ee0eaf Mon Sep 17 00:00:00 2001 From: Ryan Blakley Date: Sun, 21 Nov 2021 20:02:55 -0500 Subject: [PATCH 600/892] Add in IsRhel9 component (#3288) * Updated the rhel_version component to have a base class that the other classes inherit to cut down on the duplicate code. * Added the IsRhel9 component check. * Added rhel9 checks to the redhat_release combiner. * Added the IsRhel9 check for the BootLoaderEntries parser. * Added the IsRhel9 check for the corosync_cmapctl_cmd_list datasource. * Added IsRhel9 to the collect manifest for the corosync_cmapctl_cmd_list spec. Signed-off-by: Ryan Blakley --- insights/collect.py | 2 + insights/combiners/redhat_release.py | 4 ++ .../combiners/tests/test_redhat_release.py | 2 + insights/components/rhel_version.py | 62 ++++++++++++++----- .../components/tests/test_rhel_version.py | 22 ++++++- insights/parsers/grub_conf.py | 4 +- insights/specs/default.py | 6 +- 7 files changed, 79 insertions(+), 23 deletions(-) diff --git a/insights/collect.py b/insights/collect.py index fa516953b..d416c19ba 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -218,6 +218,8 @@ enabled: true - name: insights.components.rhel_version.IsRhel8 enabled: true + - name: insights.components.rhel_version.IsRhel9 + enabled: true # needed because some specs aren't given names before they're used in DefaultSpecs - name: insights.core.spec_factory diff --git a/insights/combiners/redhat_release.py b/insights/combiners/redhat_release.py index 5185abd0f..b6fdba344 100644 --- a/insights/combiners/redhat_release.py +++ b/insights/combiners/redhat_release.py @@ -90,6 +90,7 @@ class RedHatRelease(object): rhel6 (str): The RHEL version when it's RHEL6, otherwise None rhel7 (str): The RHEL version when it's RHEL7, otherwise None rhel8 (str): The RHEL version when it's RHEL8, otherwise None + rhel9 (str): The RHEL version when it's RHEL9, otherwise None Raises: SkipComponent: If the version can't be determined even though a Uname @@ -126,6 +127,7 @@ def __init__(self, uname, rh_rel): self.rhel6 = self.rhel if self.major == 6 else None self.rhel7 = self.rhel if self.major == 7 else None self.rhel8 = self.rhel if self.major == 8 else None + self.rhel9 = self.rhel if self.major == 9 else None @serializer(RedHatRelease) @@ -137,6 +139,7 @@ def serialize_RedHatRelease(obj, root=None): "rhel6": obj.rhel6, "rhel7": obj.rhel7, "rhel8": obj.rhel8, + "rhel9": obj.rhel9, } @@ -149,4 +152,5 @@ def deserialize_RedHatRelease(_type, obj, root=None): foo.rhel6 = obj.get("rhel6") foo.rhel7 = obj.get("rhel7") foo.rhel8 = obj.get("rhel8") + foo.rhel9 = obj.get("rhel9") return foo diff --git a/insights/combiners/tests/test_redhat_release.py b/insights/combiners/tests/test_redhat_release.py index 3b47e5252..dc0a98391 100644 --- a/insights/combiners/tests/test_redhat_release.py +++ b/insights/combiners/tests/test_redhat_release.py @@ -62,6 +62,7 @@ def test_RedHatRelease_redhat_release(): assert result.minor == expected[1] assert result.rhel == result.rhel7 == '7.2' assert result.rhel8 is None + assert result.rhel9 is None def test_RedHatRelease_both(): @@ -74,6 +75,7 @@ def test_RedHatRelease_both(): assert result.rhel == result.rhel7 == '7.2' assert result.rhel6 is None assert result.rhel8 is None + assert result.rhel9 is None def test_raise(): diff --git a/insights/components/rhel_version.py b/insights/components/rhel_version.py index 5b2d2c9e6..90be805e6 100644 --- a/insights/components/rhel_version.py +++ b/insights/components/rhel_version.py @@ -1,6 +1,6 @@ """ -IsRhel6, IsRhel7 and IsRhel8 -=============================== +IsRhel6, IsRhel7, IsRhel8, and IsRhel9 +====================================== An ``IsRhel*`` component is valid if the :py:class:`insights.combiners.redhat_release.RedHatRelease` combiner indicates @@ -11,14 +11,31 @@ In particular, an ``IsRhel*`` component can be added as a dependency of a parser to limit it to a given version. """ - -from insights.core.plugins import component from insights.combiners.redhat_release import RedHatRelease from insights.core.dr import SkipComponent +from insights.core.plugins import component + + +class IsRhel(object): + """ + This component uses ``RedHatRelease`` combiner to determine the RHEL + major version. It then checks if the major version matches the version + argument, if it doesn't it raises ``SkipComponent``. + + Attributes: + minor (int): The minor version of RHEL. + + Raises: + SkipComponent: When RHEL major version does not match version. + """ + def __init__(self, rhel, version=None): + if rhel.major != version: + raise SkipComponent("Not RHEL{vers}".format(vers=version)) + self.minor = rhel.minor @component(RedHatRelease) -class IsRhel6(object): +class IsRhel6(IsRhel): """ This component uses ``RedHatRelease`` combiner to determine RHEL version. It checks if RHEL6, if not @@ -31,16 +48,14 @@ class IsRhel6(object): SkipComponent: When RHEL version is not RHEL6. """ def __init__(self, rhel): - if rhel.major != 6: - raise SkipComponent('Not RHEL6') - self.minor = rhel.minor + super(IsRhel6, self).__init__(rhel, 6) @component(RedHatRelease) -class IsRhel7(object): +class IsRhel7(IsRhel): """ This component uses ``RedHatRelease`` combiner - to determine RHEL version. It checks if RHEL7, if not \ + to determine RHEL version. It checks if RHEL7, if not RHEL7 it raises ``SkipComponent``. Attributes: @@ -50,13 +65,11 @@ class IsRhel7(object): SkipComponent: When RHEL version is not RHEL7. """ def __init__(self, rhel): - if rhel.major != 7: - raise SkipComponent('Not RHEL7') - self.minor = rhel.minor + super(IsRhel7, self).__init__(rhel, 7) @component(RedHatRelease) -class IsRhel8(object): +class IsRhel8(IsRhel): """ This component uses ``RedhatRelease`` combiner to determine RHEL version. It checks if RHEL8, if not @@ -69,6 +82,21 @@ class IsRhel8(object): SkipComponent: When RHEL version is not RHEL8. """ def __init__(self, rhel): - if rhel.major != 8: - raise SkipComponent('Not RHEL8') - self.minor = rhel.minor + super(IsRhel8, self).__init__(rhel, 8) + + +@component(RedHatRelease) +class IsRhel9(IsRhel): + """ + This component uses ``RedhatRelease`` combiner + to determine RHEL version. It checks if RHEL9, if not + RHEL9 it raises ``SkipComponent``. + + Attributes: + minor (int): The minor version of RHEL 9. + + Raises: + SkipComponent: When RHEL version is not RHEL9. + """ + def __init__(self, rhel): + super(IsRhel9, self).__init__(rhel, 9) diff --git a/insights/components/tests/test_rhel_version.py b/insights/components/tests/test_rhel_version.py index f22ca4f99..2ffcc3ad1 100644 --- a/insights/components/tests/test_rhel_version.py +++ b/insights/components/tests/test_rhel_version.py @@ -1,4 +1,4 @@ -from insights.components.rhel_version import IsRhel6, IsRhel7, IsRhel8 +from insights.components.rhel_version import IsRhel6, IsRhel7, IsRhel8, IsRhel9 from insights.combiners.redhat_release import RedHatRelease as RR from insights.parsers.uname import Uname from insights.parsers.redhat_release import RedhatRelease @@ -25,6 +25,10 @@ Red Hat Enterprise Linux release 8.0 (Ootpa) """.strip() +REDHAT_RELEASE5 = """ +Red Hat Enterprise Linux release 9.0 (Plow) +""".strip() + # RHEL6 Tests def test_is_rhel6(): @@ -96,3 +100,19 @@ def test_not_rhel8(): with pytest.raises(SkipComponent) as e: IsRhel8(rel) assert "Not RHEL8" in str(e) + + +# RHEL9 Tests +def test_is_rhel9(): + rr = RedhatRelease(context_wrap(REDHAT_RELEASE5)) + rel = RR(None, rr) + result = IsRhel9(rel) + assert isinstance(result, IsRhel9) + + +def test_not_rhel9(): + rr = RedhatRelease(context_wrap(REDHAT_RELEASE2)) + rel = RR(None, rr) + with pytest.raises(SkipComponent) as e: + IsRhel9(rel) + assert "Not RHEL9" in str(e) diff --git a/insights/parsers/grub_conf.py b/insights/parsers/grub_conf.py index f0e95ec12..41ab0e2da 100644 --- a/insights/parsers/grub_conf.py +++ b/insights/parsers/grub_conf.py @@ -53,7 +53,7 @@ from insights import Parser, parser, get_active_lines from insights.parsers import ParseException, SkipException -from insights.components.rhel_version import IsRhel6, IsRhel7, IsRhel8 +from insights.components.rhel_version import IsRhel6, IsRhel7, IsRhel8, IsRhel9 from insights.specs import Specs @@ -356,7 +356,7 @@ def __init__(self, *args, **kwargs): self._efi = True -@parser(Specs.boot_loader_entries, IsRhel8) +@parser(Specs.boot_loader_entries, [IsRhel8, IsRhel9]) class BootLoaderEntries(Parser, dict): """ Parses the ``/boot/loader/entries/*.conf`` files. diff --git a/insights/specs/default.py b/insights/specs/default.py index 5e93a505f..734f21b69 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -28,7 +28,7 @@ from insights.core.spec_factory import first_file, listdir from insights.combiners.services import Services from insights.combiners.ps import Ps -from insights.components.rhel_version import IsRhel8, IsRhel7 +from insights.components.rhel_version import IsRhel7, IsRhel8, IsRhel9 from insights.components.cloud_provider import IsAWS, IsAzure, IsGCP from insights.components.ceph import IsCephMonitor from insights.combiners.satellite_version import SatelliteVersion, CapsuleVersion @@ -143,7 +143,7 @@ class DefaultSpecs(Specs): cmdline = simple_file("/proc/cmdline") corosync = simple_file("/etc/sysconfig/corosync") - @datasource(HostContext, [IsRhel7, IsRhel8]) + @datasource(HostContext, [IsRhel7, IsRhel8, IsRhel9]) def corosync_cmapctl_cmd_list(broker): """ corosync-cmapctl add different arguments on RHEL7 and RHEL8. @@ -158,7 +158,7 @@ def corosync_cmapctl_cmd_list(broker): corosync_cmd, ' '.join([corosync_cmd, '-d runtime.schedmiss.timestamp']), ' '.join([corosync_cmd, '-d runtime.schedmiss.delay'])] - if broker.get(IsRhel8): + if broker.get(IsRhel8) or broker.get(IsRhel9): return [ corosync_cmd, ' '.join([corosync_cmd, '-m stats']), From ea58defcb037f696a8c4a32b9c55c42234c74d5a Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Mon, 22 Nov 2021 09:21:25 +0800 Subject: [PATCH 601/892] Refactor: read metrics from config.ros for pmlog_summary (#3278) * Feat: read metrics from config.ros for pmlog_summary - Update pmlog_summary to read all "mandatory on" metrics in config.ros - Move the depended specs to the new datasource directory - Add test for these datasources Signed-off-by: Xiangce Liu * Update the Returns docstring of the pmlog_summary_metrics Signed-off-by: Xiangce Liu * Get the args for pmlog_summary as a whole string Signed-off-by: Xiangce Liu * Fix the doc entry Signed-off-by: Xiangce Liu * update the docstring of returns Signed-off-by: Xiangce Liu * use try/except for datasource Signed-off-by: Xiangce Liu * Fix typos in doc as per review feedback Signed-off-by: Xiangce Liu --- docs/custom_datasources_index.rst | 8 ++ insights/collect.py | 4 + insights/specs/datasources/pcp.py | 76 +++++++++++++ insights/specs/default.py | 42 +------ insights/tests/datasources/test_pcp.py | 149 +++++++++++++++++++++++++ 5 files changed, 239 insertions(+), 40 deletions(-) create mode 100644 insights/specs/datasources/pcp.py create mode 100644 insights/tests/datasources/test_pcp.py diff --git a/docs/custom_datasources_index.rst b/docs/custom_datasources_index.rst index 4da8d1ecb..424a2ed25 100644 --- a/docs/custom_datasources_index.rst +++ b/docs/custom_datasources_index.rst @@ -51,6 +51,14 @@ insights.specs.datasources.package_provides :show-inheritance: :undoc-members: +insights.specs.datasources.pcp +------------------------------ + +.. automodule:: insights.specs.datasources.pcp + :members: pcp_enabled, pmlog_summary_args + :show-inheritance: + :undoc-members: + insights.specs.datasources.ps ----------------------------- diff --git a/insights/collect.py b/insights/collect.py index d416c19ba..70e2f0f33 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -221,6 +221,10 @@ - name: insights.components.rhel_version.IsRhel9 enabled: true + # needed for the 'pmlog_summary' spec + - name: insights.parsers.ros_config.RosConfig + enabled: true + # needed because some specs aren't given names before they're used in DefaultSpecs - name: insights.core.spec_factory enabled: true diff --git a/insights/specs/datasources/pcp.py b/insights/specs/datasources/pcp.py new file mode 100644 index 000000000..a67a0b607 --- /dev/null +++ b/insights/specs/datasources/pcp.py @@ -0,0 +1,76 @@ +""" +Custom datasource related PCP (Performance Co-Pilot) +""" +import logging +import datetime +import os + +from insights.core.dr import SkipComponent +from insights.core.context import HostContext +from insights.core.plugins import datasource +from insights.parsers.ros_config import RosConfig +from insights.combiners.ps import Ps +from insights.combiners.services import Services + +logger = logging.getLogger(__name__) + + +@datasource(Services, HostContext) +def pcp_enabled(broker): + """ + Returns: + bool: True if pmproxy service is on in services + + Raises: + SkipComponent: When pmproxy service is not enabled + """ + if not broker[Services].is_on("pmproxy"): + raise SkipComponent("pmproxy not enabled") + return True + + +@datasource(Ps, RosConfig, HostContext) +def pmlog_summary_args(broker): + """ + Determines the pmlogger file and the metrics to collect via `pmlog_summary` + spec. + + Returns: + str: Full arguments string that will be passed to the `pmlogsummary`, + which contains the `pmlogger` archive file and the required `metrics`. + + Raises: + SkipComponent: Raises when meeting one of the following scenario: + - No pmlogger process is running + - No pmlogger file + - No "mandatory on" metrics in `config.ros` + """ + pm_file = None + try: + ps = broker[Ps] + if not ps.search(COMMAND_NAME__contains='pmlogger'): + raise SkipComponent("No 'pmlogger' is running") + + pcp_log_date = (datetime.date.today() - datetime.timedelta(days=1)).strftime("%Y%m%d") + pm_file = "/var/log/pcp/pmlogger/ros/{0}.index".format(pcp_log_date) + + if not (os.path.exists(pm_file) and os.path.isfile(pm_file)): + raise SkipComponent("No pmlogger archive file: {0}".format(pm_file)) + + except Exception as e: + raise SkipComponent("Failed to check pmlogger file existence: {0}".format(str(e))) + + metrics = set() + try: + ros = broker[RosConfig] + for spec in ros.specs: + if spec.get('state') == 'mandatory on': + metrics.update(spec.get('metrics').keys()) + + if not metrics: + raise SkipComponent("No 'mandatory on' metrics in config.ros") + + except Exception as e: + raise SkipComponent("Failed to get pmlogger metrics: {0}".format(str(e))) + + return "{0} {1}".format(pm_file, ' '.join(sorted(metrics))) diff --git a/insights/specs/default.py b/insights/specs/default.py index 734f21b69..0c223073f 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -8,7 +8,6 @@ data sources that standard Insights `Parsers` resolve against. """ -import datetime import logging import os import signal @@ -26,7 +25,6 @@ from insights.core.spec_factory import first_of, command_with_args from insights.core.spec_factory import foreach_collect, foreach_execute from insights.core.spec_factory import first_file, listdir -from insights.combiners.services import Services from insights.combiners.ps import Ps from insights.components.rhel_version import IsRhel7, IsRhel8, IsRhel9 from insights.components.cloud_provider import IsAWS, IsAzure, IsGCP @@ -37,6 +35,7 @@ awx_manage, cloud_init, candlepin_broker, ethernet, get_running_commands, ipcs, lpstat, package_provides, ps as ps_datasource, sap, satellite_missed_queues, ssl_certificate, yum_updates) from insights.specs.datasources.sap import sap_hana_sid, sap_hana_sid_SID_nr +from insights.specs.datasources.pcp import pcp_enabled, pmlog_summary_args logger = logging.getLogger(__name__) @@ -514,13 +513,6 @@ def md5chk_file_list(broker): pacemaker_log = first_file(["/var/log/pacemaker.log", "/var/log/pacemaker/pacemaker.log"]) partitions = simple_file("/proc/partitions") pci_rport_target_disk_paths = simple_command("/usr/bin/find /sys/devices/ -maxdepth 10 -mindepth 9 -name stat -type f") - - @datasource(Services, HostContext) - def pcp_enabled(broker): - """ bool: Returns True if pmproxy service is on in services """ - if not broker[Services].is_on("pmproxy"): - raise SkipComponent("pmproxy not enabled") - pcp_metrics = simple_command("/usr/bin/curl -s http://127.0.0.1:44322/metrics --connect-timeout 5", deps=[pcp_enabled]) passenger_status = simple_command("/usr/bin/passenger-status") password_auth = simple_file("/etc/pam.d/password-auth") @@ -528,37 +520,7 @@ def pcp_enabled(broker): pcs_status = simple_command("/usr/sbin/pcs status") php_ini = first_file(["/etc/opt/rh/php73/php.ini", "/etc/opt/rh/php72/php.ini", "/etc/php.ini"]) pluginconf_d = glob_file("/etc/yum/pluginconf.d/*.conf") - - @datasource(Ps, HostContext) - def pmlog_summary_file(broker): - """ - Determines the name for the pmlogger file and checks for its existance - - Returns the name of the latest pmlogger summary file if a running ``pmlogger`` - process is detected on the system. - - Returns: - str: Full path to the latest pmlogger file - - Raises: - SkipComponent: raises this exception when the command is not present or - the file is not present - """ - ps = broker[Ps] - if ps.search(COMMAND__contains='pmlogger'): - pcp_log_date = (datetime.date.today() - datetime.timedelta(days=1)).strftime("%Y%m%d") - file = "/var/log/pcp/pmlogger/ros/%s.index" % (pcp_log_date) - try: - if os.path.exists(file) and os.path.isfile(file): - return file - except Exception as e: - SkipComponent("Failed to check for pmlogger file existance: {0}".format(str(e))) - - raise SkipComponent - - pmlog_summary = command_with_args( - "/usr/bin/pmlogsummary %s mem.util.used mem.physmem kernel.all.cpu.user kernel.all.cpu.sys kernel.all.cpu.nice kernel.all.cpu.steal kernel.all.cpu.idle disk.all.total mem.util.cached mem.util.bufmem mem.util.free kernel.all.cpu.wait.total", - pmlog_summary_file) + pmlog_summary = command_with_args("/usr/bin/pmlogsummary %s", pmlog_summary_args) pmrep_metrics = simple_command("/usr/bin/pmrep -t 1s -T 1s network.interface.out.packets network.interface.collisions swap.pagesout mssql.memory_manager.stolen_server_memory mssql.memory_manager.total_server_memory -o csv") postconf_builtin = simple_command("/usr/sbin/postconf -C builtin") postconf = simple_command("/usr/sbin/postconf") diff --git a/insights/tests/datasources/test_pcp.py b/insights/tests/datasources/test_pcp.py new file mode 100644 index 000000000..d13e6ef5a --- /dev/null +++ b/insights/tests/datasources/test_pcp.py @@ -0,0 +1,149 @@ +import pytest +import datetime +from mock.mock import patch + +from insights import dr +from insights.core.dr import SkipComponent +from insights.parsers.ps import PsAuxcww +from insights.parsers.systemd.unitfiles import UnitFiles +from insights.parsers.ros_config import RosConfig +from insights.combiners.ps import Ps +from insights.combiners.services import Services +from insights.specs.datasources.pcp import pcp_enabled, pmlog_summary_args +from insights.tests import context_wrap + +PS_AUXCWW = """ +USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND +root 1 0.0 0.0 19356 1544 ? Ss May31 0:01 init +root 1821 0.0 0.0 0 0 ? S May31 0:29 kondemand/0 +root 20357 0.0 0.0 9120 832 ? Ss 10:09 0:00 dhclient +pcp 71277 0.0 0.1 127060 8384 ? S Oct09 0:06 pmlogger +""".strip() + +PS_AUXCWW_NG = """ +USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND +root 1 0.0 0.0 19356 1544 ? Ss May31 0:01 init +root 1821 0.0 0.0 0 0 ? S May31 0:29 kondemand/0 +root 20357 0.0 0.0 9120 832 ? Ss 10:09 0:00 dhclient +""".strip() + +LIST_UNIT_FILES = """ +UNIT FILE STATE +pmlogger.service enabled +pmlogger_check.service disabled +pmlogger_daily-poll.service static +pmlogger_daily.service static +pmproxy.service enabled + +5 unit files listed. +""".strip() + +LIST_UNIT_FILES_no_pmproxy = """ +UNIT FILE STATE +pmlogger.service enabled +pmlogger_check.service disabled +pmlogger_daily-poll.service static +pmlogger_daily.service static + +5 unit files listed. +""".strip() + +ROS_CONFIG = """ +log mandatory on default { + mem.util.used + kernel.all.cpu.user + disk.all.total + mem.util.free +} +[access] +disallow .* : all; +disallow :* : all; +allow local:* : enquire; +""".strip() + +ROS_CONFIG_NG = """ +log mandatory off { + mem.util.used + kernel.all.cpu.user + disk.all.total + mem.util.free +} +[access] +disallow .* : all; +disallow :* : all; +allow local:* : enquire; +""" + + +def test_pcp_enabled(): + unitfiles = UnitFiles(context_wrap(LIST_UNIT_FILES)) + services = Services(None, unitfiles) + broker = dr.Broker() + broker[Services] = services + + result = pcp_enabled(broker) + assert result is True + + unitfiles = UnitFiles(context_wrap(LIST_UNIT_FILES_no_pmproxy)) + services = Services(None, unitfiles) + broker = dr.Broker() + broker[Services] = services + + with pytest.raises(SkipComponent): + pcp_enabled(broker) + + +@patch("insights.specs.datasources.pcp.os.path.exists", return_value=True) +@patch("insights.specs.datasources.pcp.os.path.isfile", return_value=True) +def test_pmlog_summary_args(isfile, exists): + # Case 1: OK + ros = RosConfig(context_wrap(ROS_CONFIG)) + ps_auxcww = PsAuxcww(context_wrap(PS_AUXCWW)) + ps = Ps(None, None, None, None, ps_auxcww, None, None) + + broker = dr.Broker() + broker[Ps] = ps + broker[RosConfig] = ros + + pcp_log_date = (datetime.date.today() - datetime.timedelta(days=1)).strftime("%Y%m%d") + mock_file = "/var/log/pcp/pmlogger/ros/%s.index" % (pcp_log_date) + + result = pmlog_summary_args(broker) + + metrics = ' '.join(sorted([i.strip() for i in ROS_CONFIG.split('\n')[1:5]])) + expected = '{0} {1}'.format(mock_file, metrics) + assert result == expected + + # Case 2 NG metrics + ros = RosConfig(context_wrap(ROS_CONFIG_NG)) + broker = dr.Broker() + broker[Ps] = ps + broker[RosConfig] = ros + + with pytest.raises(SkipComponent): + pmlog_summary_args(broker) + + # Case 3 No pmloger proc in ps + ros = RosConfig(context_wrap(ROS_CONFIG)) + ps_auxcww = PsAuxcww(context_wrap(PS_AUXCWW_NG)) + ps = Ps(None, None, None, None, ps_auxcww, None, None) + broker = dr.Broker() + broker[Ps] = ps + broker[RosConfig] = ros + + with pytest.raises(SkipComponent): + pmlog_summary_args(broker) + + +@patch("insights.specs.datasources.pcp.os.path.exists", return_value=False) +def test_pmlog_summary_args_no_pmloger_file(isfile): + ros = RosConfig(context_wrap(ROS_CONFIG)) + ps_auxcww = PsAuxcww(context_wrap(PS_AUXCWW)) + ps = Ps(None, None, None, None, ps_auxcww, None, None) + + broker = dr.Broker() + broker[Ps] = ps + broker[RosConfig] = ros + + with pytest.raises(SkipComponent): + pmlog_summary_args(broker) From 8b6135e9d4f90d390dd735496569a2a0be764435 Mon Sep 17 00:00:00 2001 From: Ryan Blakley Date: Tue, 23 Nov 2021 22:46:21 -0500 Subject: [PATCH 602/892] Update ethtool's parsing logic (#3291) * Updated the logic to loop through the content and start parsing after finding the starting line. This way operation not permitted messages at the beginning of content are ignored. * Updated the tests to test with operation not permitted messages at the top of content. * Fixes #3268 Signed-off-by: Ryan Blakley --- insights/parsers/ethtool.py | 47 +++++++++++++++----------- insights/parsers/tests/test_ethtool.py | 4 ++- 2 files changed, 31 insertions(+), 20 deletions(-) diff --git a/insights/parsers/ethtool.py b/insights/parsers/ethtool.py index c354bbb1a..832742d06 100644 --- a/insights/parsers/ethtool.py +++ b/insights/parsers/ethtool.py @@ -876,27 +876,36 @@ def parse_content(self, content): if "ethtool: bad command line argument(s)" in content[0]: raise ParseException('ethtool: bad command line argument for ethtool', content) - if "Settings for" not in content[0]: - raise ParseException("ethtool: unrecognised first line '{l}'".format(l=content[0])) - - self.data['ETHNIC'] = content[0].split()[-1].strip(':') - - if "No data available" in content[1]: - raise ParseException('Fake ethnic as ethtool command argument', content) - - key = value = None - for line in content[1:]: + key = started = None + for line in content: line = line.strip() if line: - try: - if ':' in line: - key, value = line.split(':', 1) - key = key.strip() - self.data[key] = [value.strip()] - else: - self.data[key].append(line) - except: - raise ParseException('Ethtool unable to parse content', line) + # Started is set when a line containing Settings for : + # is found, ignore all other lines prior to this one. + if not started: + if "Settings for" in line: + started = 1 + self.data['ETHNIC'] = line.split()[-1].strip(':') + + continue + else: + if "No data available" in line: + raise ParseException('Fake ethnic as ethtool command argument', content) + + try: + if ':' in line: + key, value = line.split(':', 1) + key = key.strip() + self.data[key] = [value.strip()] + else: + self.data[key].append(line) + except KeyError: + raise ParseException('Ethtool unable to parse content', line) + + # If started was never set then there wasn't any + # valid lines found so raise a parse exception. + if not started: + raise ParseException('Ethtool does not contain Settings for :') self.supported_link_modes = [] if 'Supported link modes' in self.data: diff --git a/insights/parsers/tests/test_ethtool.py b/insights/parsers/tests/test_ethtool.py index 8421d3962..bdf2f873d 100644 --- a/insights/parsers/tests/test_ethtool.py +++ b/insights/parsers/tests/test_ethtool.py @@ -732,6 +732,8 @@ def test_extract_from_path_1(): ''' ETHTOOL_INFO = """ +Cannot get wake-on-lan settings: Operation not permitted +Cannot get link status: Operation not permitted Settings for eth1: Supported ports: [ TP MII ] Supported link modes: 10baseT/Half 10baseT/Full @@ -815,7 +817,7 @@ def test_ethtool_fail(): with pytest.raises(ParseException) as e: ethtool.Ethtool(context_wrap(ETHTOOL_INFO_BAD_2, path="ethtool_eth1")) - assert "ethtool: unrecognised first line " in str(e.value) + assert "Ethtool does not contain Settings for :" in str(e.value) with pytest.raises(ParseException) as e: ethtool.Ethtool(context_wrap(ETHTOOL_INFO_BAD_3, path="ethtool_eth1")) From 94837146a8f92d22f06e0c069a2d9aa09b4cd651 Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Wed, 24 Nov 2021 16:48:29 +0800 Subject: [PATCH 603/892] Fix: Enhance some spec path (#3293) * Enhance spec path Signed-off-by: jiazhang * Update sequence Signed-off-by: jiazhang --- insights/specs/default.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/insights/specs/default.py b/insights/specs/default.py index 0c223073f..883f7f6e7 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -394,7 +394,10 @@ def httpd_cmd(broker): lsblk_pairs = simple_command("/bin/lsblk -P -o NAME,KNAME,MAJ:MIN,FSTYPE,MOUNTPOINT,LABEL,UUID,RA,RO,RM,MODEL,SIZE,STATE,OWNER,GROUP,MODE,ALIGNMENT,MIN-IO,OPT-IO,PHY-SEC,LOG-SEC,ROTA,SCHED,RQ-SIZE,TYPE,DISC-ALN,DISC-GRAN,DISC-MAX,DISC-ZERO") lscpu = simple_command("/usr/bin/lscpu") lsmod = simple_command("/sbin/lsmod") - lsof = simple_command("/usr/sbin/lsof") + lsof = first_of([ + simple_command("/usr/bin/lsof"), + simple_command("/usr/sbin/lsof") + ]) lspci = simple_command("/sbin/lspci -k") lspci_vmmkn = simple_command("/sbin/lspci -vmmkn") lsscsi = simple_command("/usr/bin/lsscsi") @@ -666,7 +669,10 @@ def md5chk_file_list(broker): simple_file("/etc/sysconfig/rhn/systemid"), simple_file("/conf/rhn/sysconfig/rhn/systemid") ]) - systool_b_scsi_v = simple_command("/bin/systool -b scsi -v") + systool_b_scsi_v = first_of([ + simple_command("/usr/bin/systool -b scsi -v"), + simple_command("/bin/systool -b scsi -v") + ]) sys_vmbus_device_id = glob_file('/sys/bus/vmbus/devices/*/device_id') sys_vmbus_class_id = glob_file('/sys/bus/vmbus/devices/*/class_id') testparm_s = simple_command("/usr/bin/testparm -s") From 50e8e19a118600a60600aa7b9a80e4cb366c83e6 Mon Sep 17 00:00:00 2001 From: Ryan Blakley Date: Tue, 30 Nov 2021 14:14:18 -0500 Subject: [PATCH 604/892] Update setup.py (#3289) * Removed the client console script as it doesn't work anymore since the launching of the client is actually handle by a file in the insights-client repo. * Added the pyyaml to the runtime the way pytest was added to the testing set instead of sys.version_info way it was using. Signed-off-by: Ryan Blakley --- setup.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/setup.py b/setup.py index cc5beaf9e..c390b622e 100644 --- a/setup.py +++ b/setup.py @@ -1,5 +1,4 @@ import os -import sys from setuptools import setup, find_packages __here__ = os.path.dirname(os.path.abspath(__file__)) @@ -20,7 +19,6 @@ 'insights-inspect = insights.tools.insights_inspect:main', 'insights-info = insights.tools.query:main', 'insights-ocpshell= insights.ocpshell:main', - 'client = insights.client:run', 'mangle = insights.util.mangle:main' ] } @@ -35,13 +33,10 @@ 'defusedxml', 'lockfile', 'jinja2<=2.11.3', + 'pyyaml>=3.10,<=3.13; python_version < "2.7"', + 'pyyaml; python_version >= "2.7"', ]) -if (sys.version_info < (2, 7)): - runtime.add('pyyaml>=3.10,<=3.13') -else: - runtime.add('pyyaml') - def maybe_require(pkg): try: From b92e28bfa75ff493160deecc268a7e8655baffaf Mon Sep 17 00:00:00 2001 From: Alba Hita <93577878+ahitacat@users.noreply.github.com> Date: Tue, 30 Nov 2021 20:24:23 +0100 Subject: [PATCH 605/892] DOC: Added new section for client development (#3287) * Added new section for client development Create a new section for project development setup. Moved the insights core setup to that new section and added a new subsection to deploy the insights client development. Signed-off-by: ahitacat * Small fixes Signed-off-by: ahitacat Co-authored-by: Glutexo --- docs/quickstart_insights_core.rst | 59 ++++++++++++++++++++----------- 1 file changed, 39 insertions(+), 20 deletions(-) diff --git a/docs/quickstart_insights_core.rst b/docs/quickstart_insights_core.rst index 83b576df9..8a11d4c49 100644 --- a/docs/quickstart_insights_core.rst +++ b/docs/quickstart_insights_core.rst @@ -39,27 +39,12 @@ file associated with the insights-core project. ``unzip`` to be able to run `pytest` on the ``insights-core`` repo, and ``pandoc`` to build Insights Core documentation. -********************** -Rule Development Setup -********************** - -In order to develop rules to run in Red Hat Insights you'll need Insights -Core (http://github.com/RedHatInsights/insights-core) as well as your own rules code. -The commands below assume the following sample project directory structure -containing the insights-core project repo and your directory and files -for rule development:: - - project_dir - ├── insights-core - └── myrules - ├── hostname_rel.py - └── bash_version.py - - -.. _insights_dev_setup: +*************************** +Insights Development Setup +*************************** -Insights Core Setup -=================== +Insights Core Development +========================= Clone the project:: @@ -113,6 +98,40 @@ command. If you use this method make sure you periodically update insights core in your virtualenv with the command `pip install --upgrade insights-core`. +Insights Client Development +=========================== + +Clone the project:: + + [userone@hostone project_dir]$ git clone git@github.com:RedHatInsights/insights-core.git + +Initialize a virtualenv with the ``--system-site-packages`` flag:: + + [userone@hostone project_dir/insights-core]$ python3.6 -m venv --sytem-site-packages . + +Next install the insights-core project and its dependencies into your virtualenv:: + + (insights-core)[userone@hostone project_dir/insights-core]$ bin/pip install -e .[client-develop] + +********************** +Rule Development Setup +********************** + +In order to develop rules to run in Red Hat Insights you'll need Insights +Core (http://github.com/RedHatInsights/insights-core) as well as your own rules code. +The commands below assume the following sample project directory structure +containing the insights-core project repo and your directory and files +for rule development:: + + project_dir + ├── insights-core + └── myrules + ├── hostname_rel.py + └── bash_version.py + + +.. _insights_dev_setup: + Rule Development ================ From b9108cb20e1b606158df1e9f67ba60cf9169b98f Mon Sep 17 00:00:00 2001 From: Mark Huth Date: Tue, 7 Dec 2021 07:50:17 +1000 Subject: [PATCH 606/892] Remove yara_binary as a config option (#3296) * Remove yara_binary as a config option * https://bugzilla.redhat.com/show_bug.cgi?id=2025009 Signed-off-by: Mark Huth * Look for yara binary in specific locations * Don't use 'which yara' which relies on the system path Signed-off-by: Mark Huth * Remove newline from process name metadata (YARA-241) * And better handle problems retrieving file metadata Signed-off-by: Mark Huth --- .../client/apps/malware_detection/__init__.py | 54 +++++++++--------- .../client/apps/test_malware_detection.py | 55 +++++++++++++------ 2 files changed, 63 insertions(+), 46 deletions(-) diff --git a/insights/client/apps/malware_detection/__init__.py b/insights/client/apps/malware_detection/__init__.py index e2b59e05a..e119cade9 100644 --- a/insights/client/apps/malware_detection/__init__.py +++ b/insights/client/apps/malware_detection/__init__.py @@ -92,10 +92,6 @@ # The extra metadata will display in the webUI along with the scan matches add_metadata: true -# Specific location of the yara binary file. Autodetected if not specified. For example: -# yara_binary: /usr/local/bin/yara -yara_binary: - # Abort a particular scan if it takes longer than scan_timeout seconds. Default is 3600 seconds (1 hour) scan_timeout: # 3600 @@ -113,8 +109,7 @@ 'EXCLUDE_NETWORK_FILESYSTEM_MOUNTPOINTS'], 'list': ['SCAN_ONLY', 'SCAN_EXCLUDE', 'NETWORK_FILESYSTEM_TYPES'], 'integer': ['SCAN_TIMEOUT', 'NICE_VALUE', 'CPU_THREAD_LIMIT', 'STRING_MATCH_LIMIT'], - 'int_or_str': ['SCAN_SINCE'], - 'string': ['YARA_BINARY'] + 'int_or_str': ['SCAN_SINCE'] } @@ -218,41 +213,38 @@ def _load_config(): def _find_yara(self): """ - Find the yara binary on the local system and check it's version >= MIN_YARA_VERSION + Find the yara binary in particular locations on the local system. Don't use 'which yara' + and rely on the system path in case it finds a malicious yara. + Also, don't let the user specify where yara is, again in case it is a malicious version of yara + If found, check it's version >= MIN_YARA_VERSION """ def yara_version_ok(yara): # Check the installed yara version >= MIN_YARA_VERSION installed_yara_version = call([[yara, '--version']]).strip() try: if float(installed_yara_version[:3]) < float(MIN_YARA_VERSION[:3]): - raise RuntimeError("Found yara version %s, but malware-detection requires version >= %s\n" + raise RuntimeError("Found %s with version %s, but malware-detection requires version >= %s\n" "Please install a later version of yara." - % (installed_yara_version, MIN_YARA_VERSION)) + % (yara, installed_yara_version, MIN_YARA_VERSION)) except RuntimeError as e: logger.error(str(e)) exit(constants.sig_kill_bad) except Exception as e: - logger.error("Error getting the version of the specified yara binary %s: %s" % (yara, str(e))) + logger.error("Error getting the version of the specified yara binary %s: %s", yara, str(e)) exit(constants.sig_kill_bad) # If we are here then the version of yara was ok return True - yara = self._get_config_option('yara_binary') - if yara and not os.path.isfile(yara): - logger.error("Couldn't find the specified yara binary %s. Please check it exists", yara) - exit(constants.sig_kill_bad) - elif yara and yara_version_ok(yara): - logger.debug("Using specified yara binary: %s", yara) - return yara + # Try to find yara in only these usual locations. + # /bin/yara and /usr/bin/yara will exist if yara is installed via rpm + # /usr/local/bin/yara will (likely) exist if the user has compiled and installed yara manually + for yara in ['/bin/yara', '/usr/bin/yara', '/usr/local/bin/yara']: + if os.path.exists(yara) and yara_version_ok(yara): + logger.debug("Using yara binary: %s", yara) + return yara - try: - yara = str(call([['which', 'yara']])).strip() - except CalledProcessError: - logger.error("Couldn't find yara. Please ensure the yara package is installed") - exit(constants.sig_kill_bad) - yara_version_ok(yara) # Generates an error if not ok - logger.debug("Using yara binary: %s", yara) - return yara + logger.error("Couldn't find yara. Please ensure the yara package is installed") + exit(constants.sig_kill_bad) def _process_scan_options(self): """ @@ -735,7 +727,7 @@ def _add_process_metadata(self, rule_matches): # Get name of process from ps command # -h: no output header, -q: only the specified process, -o args: just the process name and args try: - process_name = call([['ps', '-hq', source, '-o', 'args']]) + process_name = call([['ps', '-hq', source, '-o', 'args']]).strip() except CalledProcessError: process_name = 'unknown' @@ -767,10 +759,16 @@ def get_line_from_file(file_name, line_number): # Get the file type, mime type and md5sum hash of the source file try: file_type = call([['file', '-b', source]]).strip() + except CalledProcessError: + file_type = "" + try: mime_type = call([['file', '-bi', source]]).strip() + except CalledProcessError: + mime_type = "" + try: md5sum = call([['md5sum', source]]).strip().split()[0] - except Exception: - file_type = mime_type = md5sum = "" + except CalledProcessError: + md5sum = "" grep_string_data_match_list = [] if mime_type and 'charset=binary' not in mime_type: diff --git a/insights/tests/client/apps/test_malware_detection.py b/insights/tests/client/apps/test_malware_detection.py index 6ff968ce3..575af75de 100644 --- a/insights/tests/client/apps/test_malware_detection.py +++ b/insights/tests/client/apps/test_malware_detection.py @@ -96,7 +96,6 @@ def test_default_spec(self): def test_default_options(self): # Read in the default malware_detection_config options and check their values - assert CONFIG['yara_binary'] is None assert CONFIG['test_scan'] is True assert CONFIG['scan_filesystem'] is True assert CONFIG['scan_processes'] is False @@ -153,34 +152,54 @@ def test_running_default_options(self, log_mock, yara, rules, cmd, create_test_f @patch(BUILD_YARA_COMMAND_TARGET) @patch(GET_RULES_TARGET, return_value=RULES_FILE) @patch(LOAD_CONFIG_TARGET, return_value=CONFIG) +@patch(LOGGER_TARGET) class TestFindYara: - @patch.dict(os.environ) - def test_find_yara_binary(self, conf, rules, cmd): - # Testing finding yara - os.environ['YARA_BINARY'] = '/bin/yara' - with patch('os.path.isfile', return_value=True): + def test_find_yara_binary(self, log_mock, conf, rules, cmd): + # Testing finding yara with correct version + with patch('os.path.exists', return_value=True): with patch("insights.client.apps.malware_detection.call", return_value='4.1'): mdc = MalwareDetectionClient(None) assert mdc.yara_binary == '/bin/yara' cmd.assert_called() - @patch.dict(os.environ) - def test_missing_yara_binary(self, conf, rules, cmd): - # Test yara_binary option with non-existent file - os.environ['YARA_BINARY'] = '/bin/notyara' - with pytest.raises(SystemExit): - MalwareDetectionClient(None) + # 'Find' yara in /usr/local/bin/yara (fails to 'find' /bin/yara and /usr/bin/yara) + with patch('os.path.exists', side_effect=[False, False, True]): + with patch("insights.client.apps.malware_detection.call", return_value='4.1'): + mdc = MalwareDetectionClient(None) + assert mdc.yara_binary == '/usr/local/bin/yara' + cmd.assert_called() + + def test_find_unsupported_yara(self, log_mock, conf, rules, cmd): + # Test finding unsupported yara version + with patch('os.path.exists', return_value=True): + with patch("insights.client.apps.malware_detection.call", return_value='3.10'): + with pytest.raises(SystemExit): + MalwareDetectionClient(None) + log_mock.error.assert_called_with("Found /bin/yara with version 3.10, but malware-detection requires version >= 4.1.0\n" + "Please install a later version of yara.") cmd.assert_not_called() - # Test yara_binary option with non-yara file - os.environ['YARA_BINARY'] = '/bin/ls' - with pytest.raises(SystemExit): - MalwareDetectionClient(None) + def test_find_invalid_yara(self, log_mock, conf, rules, cmd): + # Test finding a binary called yara, but its not yara + with patch('os.path.exists', return_value=True): + with patch("insights.client.apps.malware_detection.call", return_value='not yara 1.2.3'): + with pytest.raises(SystemExit): + MalwareDetectionClient(None) + log_mock.error.assert_called_with("Error getting the version of the specified yara binary %s: %s", "/bin/yara", ANY) + cmd.assert_not_called() + + def test_cant_find_yara(self, log_mock, conf, rules, cmd): + # Test can't find yara on the system + with patch('os.path.exists', return_value=False): + with pytest.raises(SystemExit): + MalwareDetectionClient(None) + log_mock.error.assert_called_with("Couldn't find yara. Please ensure the yara package is installed") cmd.assert_not_called() + @patch("os.path.exists", return_value=True) @patch("insights.client.apps.malware_detection.call") # mock call to 'yara --version' - def test_invalid_yara_versions(self, version_mock, conf, rules, cmd): + def test_invalid_yara_versions(self, version_mock, exists_mock, log_mock, conf, rules, cmd): # Test checking the version of yara # Invalid versions of yara for version in ['4.0.99', '4']: @@ -193,7 +212,7 @@ def test_invalid_yara_versions(self, version_mock, conf, rules, cmd): for version in ['4.1', '10.0.0']: version_mock.return_value = version mdc = MalwareDetectionClient(None) - assert mdc.yara_binary + assert mdc.yara_binary == '/bin/yara' cmd.assert_called() From 7b2c3f64ed5409090b1d846d29f36504e44d0d27 Mon Sep 17 00:00:00 2001 From: Mark Huth Date: Tue, 7 Dec 2021 07:51:58 +1000 Subject: [PATCH 607/892] Add log_response_text flag to log downloads or not in verbose mode (#3298) * https://issues.redhat.com/browse/YARA-232 * Don't log the malware signatures download in verbose mode * It is a big binary blob and may leave artifacts on the command line afterwards Signed-off-by: Mark Huth --- .../client/apps/malware_detection/__init__.py | 4 +++- insights/client/connection.py | 5 +++-- .../client/apps/test_malware_detection.py | 20 +++++++++---------- 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/insights/client/apps/malware_detection/__init__.py b/insights/client/apps/malware_detection/__init__.py index e119cade9..eadff587a 100644 --- a/insights/client/apps/malware_detection/__init__.py +++ b/insights/client/apps/malware_detection/__init__.py @@ -446,14 +446,16 @@ def _get_rules(self): self.rules_location = urlunparse(parsed_url._replace(netloc='cert.' + parsed_url.netloc)) # If doing a test scan, replace signatures.yar (or any other file suffix) with test-rule.yar + log_rule_contents = False if self.test_scan: self.rules_location = self._get_test_rule_location(self.rules_location) + log_rule_contents = True logger.debug("Downloading rules from: %s", self.rules_location) try: self.insights_config.cert_verify = True conn = InsightsConnection(self.insights_config) - response = conn.get(self.rules_location) + response = conn.get(self.rules_location, log_response_text=log_rule_contents) if response.status_code != 200: logger.error("%s %s: %s", response.status_code, response.reason, response.text) exit(constants.sig_kill_bad) diff --git a/insights/client/connection.py b/insights/client/connection.py index e3012bc94..228d32aca 100644 --- a/insights/client/connection.py +++ b/insights/client/connection.py @@ -175,7 +175,7 @@ def _init_session(self): connection.proxy_headers = auth_map return session - def _http_request(self, url, method, **kwargs): + def _http_request(self, url, method, log_response_text=True, **kwargs): ''' Perform an HTTP request, net logging, and error handling Parameters @@ -188,7 +188,8 @@ def _http_request(self, url, method, **kwargs): logger.log(NETWORK, "%s %s", method, url) res = self.session.request(url=url, method=method, timeout=self.config.http_timeout, **kwargs) logger.log(NETWORK, "HTTP Status: %d %s", res.status_code, res.reason) - logger.log(NETWORK, "HTTP Response Text: %s", res.text) + if log_response_text or res.status_code != 200: + logger.log(NETWORK, "HTTP Response Text: %s", res.text) return res def get(self, url, **kwargs): diff --git a/insights/tests/client/apps/test_malware_detection.py b/insights/tests/client/apps/test_malware_detection.py index 575af75de..ce843ee99 100644 --- a/insights/tests/client/apps/test_malware_detection.py +++ b/insights/tests/client/apps/test_malware_detection.py @@ -239,23 +239,23 @@ def test_download_rules_cert_auth(self, conf, yara, cmd, session, proxies, get): mdc = MalwareDetectionClient(InsightsConfig()) assert mdc.rules_location == "https://cert.console.redhat.com/api/malware-detection/v1/test-rule.yar" assert mdc.rules_file.startswith('/tmp') # rules will be saved into a temp file - get.assert_called_with("https://cert.console.redhat.com/api/malware-detection/v1/test-rule.yar") + get.assert_called_with("https://cert.console.redhat.com/api/malware-detection/v1/test-rule.yar", log_response_text=True) # With authmethod=CERT, expect 'cert.' to be prefixed to the url mdc = MalwareDetectionClient(InsightsConfig(authmethod='CERT')) assert mdc.rules_location == "https://cert.console.redhat.com/api/malware-detection/v1/test-rule.yar" - get.assert_called_with("https://cert.console.redhat.com/api/malware-detection/v1/test-rule.yar") + get.assert_called_with("https://cert.console.redhat.com/api/malware-detection/v1/test-rule.yar", log_response_text=True) # With authmethod=BASIC and test scan false ... # Expect to still use cert auth because no username or password specified os.environ['TEST_SCAN'] = 'false' mdc = MalwareDetectionClient(InsightsConfig(authmethod='BASIC')) assert mdc.rules_location == "https://cert.console.redhat.com/api/malware-detection/v1/signatures.yar" - get.assert_called_with("https://cert.console.redhat.com/api/malware-detection/v1/signatures.yar") + get.assert_called_with("https://cert.console.redhat.com/api/malware-detection/v1/signatures.yar", log_response_text=False) mdc = MalwareDetectionClient(InsightsConfig(authmethod='CERT')) assert mdc.rules_location == "https://cert.console.redhat.com/api/malware-detection/v1/signatures.yar" - get.assert_called_with("https://cert.console.redhat.com/api/malware-detection/v1/signatures.yar") + get.assert_called_with("https://cert.console.redhat.com/api/malware-detection/v1/signatures.yar", log_response_text=False) @patch.dict(os.environ, {'TEST_SCAN': 'true'}) @patch(LOGGER_TARGET) @@ -268,26 +268,26 @@ def test_download_rules_basic_auth(self, log_mock, conf, yara, cmd, session, pro get.return_value = Mock(status_code=401, reason="Unauthorized", text="No can do") with pytest.raises(SystemExit): MalwareDetectionClient(InsightsConfig(username='user')) - get.assert_called_with("https://console.redhat.com/api/malware-detection/v1/test-rule.yar") + get.assert_called_with("https://console.redhat.com/api/malware-detection/v1/test-rule.yar", log_response_text=True) log_mock.error.assert_called_with("%s %s: %s", 401, "Unauthorized", ANY) # Test with just a password specified - expect basic auth to be used but fails with pytest.raises(SystemExit): MalwareDetectionClient(InsightsConfig(password='pass')) - get.assert_called_with("https://console.redhat.com/api/malware-detection/v1/test-rule.yar") + get.assert_called_with("https://console.redhat.com/api/malware-detection/v1/test-rule.yar", log_response_text=True) log_mock.error.assert_called_with("%s %s: %s", 401, "Unauthorized", ANY) # Test with 'incorrect' username and/or password - expect basic auth failure with pytest.raises(SystemExit): MalwareDetectionClient(InsightsConfig(username='user', password='badpass')) - get.assert_called_with("https://console.redhat.com/api/malware-detection/v1/test-rule.yar") + get.assert_called_with("https://console.redhat.com/api/malware-detection/v1/test-rule.yar", log_response_text=True) log_mock.error.assert_called_with("%s %s: %s", 401, "Unauthorized", ANY) # Test with 'correct' username and password - expect basic auth success get.return_value = Mock(status_code=200, content=b"Rule Content") mdc = MalwareDetectionClient(InsightsConfig(username='user', password='goodpass')) assert mdc.rules_location == "https://console.redhat.com/api/malware-detection/v1/test-rule.yar" - get.assert_called_with("https://console.redhat.com/api/malware-detection/v1/test-rule.yar") + get.assert_called_with("https://console.redhat.com/api/malware-detection/v1/test-rule.yar", log_response_text=True) @patch.dict(os.environ, {'TEST_SCAN': 'true', 'RULES_LOCATION': 'console.redhat.com/rules.yar'}) def test_get_rules_missing_protocol(self, conf, yara, cmd, session, proxies, get): @@ -295,13 +295,13 @@ def test_get_rules_missing_protocol(self, conf, yara, cmd, session, proxies, get # test-scan true and BASIC auth by default expect test-rule.yar and no 'cert.' in URL mdc = MalwareDetectionClient(InsightsConfig(username='user', password='pass')) assert mdc.rules_location == "https://console.redhat.com/test-rule.yar" - get.assert_called_with("https://console.redhat.com/test-rule.yar") + get.assert_called_with("https://console.redhat.com/test-rule.yar", log_response_text=True) # test-scan false and CERT auth - expect 'cert.' prefixed to the URL and not test-rule.yar os.environ['TEST_SCAN'] = 'false' mdc = MalwareDetectionClient(InsightsConfig(authmethod='CERT')) assert mdc.rules_location == "https://cert.console.redhat.com/rules.yar" - get.assert_called_with("https://cert.console.redhat.com/rules.yar") + get.assert_called_with("https://cert.console.redhat.com/rules.yar", log_response_text=False) @patch.dict(os.environ, {'TEST_SCAN': 'false', 'RULES_LOCATION': 'http://localhost/rules.yar'}) @patch(LOGGER_TARGET) From 4145a8e1d1ebb1d51d5047df2a23e6189b96942b Mon Sep 17 00:00:00 2001 From: wushiqinlou Date: Tue, 7 Dec 2021 12:58:20 +0800 Subject: [PATCH 608/892] Enhance parser LpstatProtocol (#3301) * Enhance parser LpstatProtocol Signed-off-by: jiazhang * Update example Signed-off-by: jiazhang --- insights/parsers/lpstat.py | 9 +++++++-- insights/parsers/tests/test_lpstat.py | 3 +++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/insights/parsers/lpstat.py b/insights/parsers/lpstat.py index 65229133c..aa5ae6b05 100644 --- a/insights/parsers/lpstat.py +++ b/insights/parsers/lpstat.py @@ -101,12 +101,16 @@ class LpstatProtocol(CommandParser, dict): device for test_printer1: ipp device for test_printer2: ipp + device for savtermhpc: implicitclass:savtermhpc + device for A1: marshaA1:/tmp/A1 Examples: >>> type(lpstat_protocol) >>> lpstat_protocol['test_printer1'] 'ipp' + >>> lpstat_protocol['savtermhpc'] + 'implicitclass' """ def parse_content(self, content): if not content: @@ -114,8 +118,9 @@ def parse_content(self, content): data = {} for line in content: if line.startswith("device for "): - protocol = line.split(":")[-1].strip() - printer = line.split(":")[0].split()[-1].strip() + line_split = line.split(":") + protocol = line_split[1].strip() + printer = line_split[0].split()[-1].strip() data[printer] = protocol if not data: raise SkipException("No Valid Output") diff --git a/insights/parsers/tests/test_lpstat.py b/insights/parsers/tests/test_lpstat.py index b93add6b2..006f31b0b 100644 --- a/insights/parsers/tests/test_lpstat.py +++ b/insights/parsers/tests/test_lpstat.py @@ -21,6 +21,8 @@ LPSTAT_V_OUTPUT = """ device for test_printer1: ipp device for test_printer2: ipp +device for savtermhpc: implicitclass:savtermhpc +device for A1: marshaA1:/tmp/A1 """.strip() LPSTAT_V_OUTPUT_INVALID_1 = """ @@ -82,6 +84,7 @@ def test_lpstat_printer_names_by_status(status, expected_name): def test_lpstat_protocol(): lpstat_protocol = LpstatProtocol(context_wrap(LPSTAT_V_OUTPUT)) assert lpstat_protocol["test_printer1"] == "ipp" + assert lpstat_protocol["savtermhpc"] == "implicitclass" def test_lpstat_protocol_invalid_state(): From ecf72d9db324ed7451c3e9d4852ad6392992a5b1 Mon Sep 17 00:00:00 2001 From: Alba Hita <93577878+ahitacat@users.noreply.github.com> Date: Wed, 8 Dec 2021 20:41:49 +0100 Subject: [PATCH 609/892] Fix test system (#3294) * added core-collect config false * fixing some test to not use system information Signed-off-by: ahitacat Co-authored-by: Link Dupont --- .../client/data_collector/test_redact.py | 14 +++++----- .../tests/client/test_skip_commands_files.py | 26 +++++++++---------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/insights/tests/client/data_collector/test_redact.py b/insights/tests/client/data_collector/test_redact.py index f5879580b..9427f9569 100644 --- a/insights/tests/client/data_collector/test_redact.py +++ b/insights/tests/client/data_collector/test_redact.py @@ -59,7 +59,7 @@ def test_redact_call_walk(walk): Verify that redact() calls os.walk and when an an archive structure is present in /var/tmp/**/insights-* ''' - conf = InsightsConfig() + conf = InsightsConfig(core_collect=False) arch = InsightsArchive(conf) arch.create_archive_dir() @@ -99,7 +99,7 @@ def test_redact_call_process_redaction(_process_content_redaction): "regex" parameter is False in the _process_content_redaction call when rm_conf is empty ''' - conf = InsightsConfig() + conf = InsightsConfig(core_collect=False) arch = InsightsArchive(conf) arch.create_archive_dir() @@ -130,7 +130,7 @@ def test_redact_exclude_regex(_process_content_redaction): exclude == list of strings and regex == True when a list of regex strings is defined in rm_conf ''' - conf = InsightsConfig() + conf = InsightsConfig(core_collect=False) arch = InsightsArchive(conf) arch.create_archive_dir() @@ -159,7 +159,7 @@ def test_redact_exclude_no_regex(_process_content_redaction): exclude == list of strings and regex == False when a list of pattern strings is defined in rm_conf ''' - conf = InsightsConfig() + conf = InsightsConfig(core_collect=False) arch = InsightsArchive(conf) arch.create_archive_dir() @@ -188,7 +188,7 @@ def test_redact_exclude_empty(_process_content_redaction): exclude == [] and regex == False when the patterns key is defined but value is an empty list ''' - conf = InsightsConfig() + conf = InsightsConfig(core_collect=False) arch = InsightsArchive(conf) arch.create_archive_dir() @@ -217,7 +217,7 @@ def test_redact_exclude_none(_process_content_redaction): exclude == None and regex == False when the patterns key is defined but value is an empty dict ''' - conf = InsightsConfig() + conf = InsightsConfig(core_collect=False) arch = InsightsArchive(conf) arch.create_archive_dir() @@ -247,7 +247,7 @@ def test_redact_bad_location(_process_content_redaction, walk): if the directory present in InsightsArchive is in a location other than /var/tmp/**/insights-* ''' - conf = InsightsConfig() + conf = InsightsConfig(core_collect=False) arch = InsightsArchive(conf) for bad_path in ['/', '/home', '/etc', '/var/log/', '/home/test', '/var/tmp/f22D1d/ins2ghts']: diff --git a/insights/tests/client/test_skip_commands_files.py b/insights/tests/client/test_skip_commands_files.py index 81ba11ce2..8cf7139ab 100644 --- a/insights/tests/client/test_skip_commands_files.py +++ b/insights/tests/client/test_skip_commands_files.py @@ -11,7 +11,7 @@ def test_omit_before_expanded_paths(InsightsFile, parse_file_spec): """ Files are omitted based on representation of exact string matching in uploader.json """ - c = InsightsConfig() + c = InsightsConfig(core_collect=False) data_collector = DataCollector(c) collection_rules = {'files': [{"file": "/etc/pam.d/vsftpd", "pattern": [], "symbolic_name": "vsftpd"}], 'commands': {}} @@ -27,7 +27,7 @@ def test_omit_after_expanded_paths(InsightsFile, parse_file_spec): """ Files are omitted based on the expanded paths of the uploader.json path """ - c = InsightsConfig() + c = InsightsConfig(core_collect=False) data_collector = DataCollector(c) collection_rules = {'files': [{"file": "/etc/yum.repos.d/()*.*\\.repo", "pattern": [], "symbolic_name": "yum_repos_d"}], 'commands': {}} @@ -44,7 +44,7 @@ def test_omit_symbolic_name(InsightsCommand, InsightsFile, parse_file_spec): """ Files/commands are omitted based on their symbolic name in uploader.json """ - c = InsightsConfig() + c = InsightsConfig(core_collect=False) data_collector = DataCollector(c) collection_rules = {'files': [{"file": "/etc/pam.d/vsftpd", "pattern": [], "symbolic_name": "vsftpd"}], @@ -86,7 +86,7 @@ def test_dont_archive_when_command_not_found(write_data_to_file): """ If the command is not found do not archive it """ - arch = InsightsArchive(InsightsConfig()) + arch = InsightsArchive(InsightsConfig(core_collect=False)) arch.archive_dir = arch.create_archive_dir() arch.cmd_dir = arch.create_command_dir() @@ -108,7 +108,7 @@ def test_dont_archive_when_missing_dep(write_data_to_file): """ If missing dependencies do not archive it """ - arch = InsightsArchive(InsightsConfig()) + arch = InsightsArchive(InsightsConfig(core_collect=False)) arch.archive_dir = arch.create_archive_dir() arch.cmd_dir = arch.create_command_dir() @@ -126,7 +126,7 @@ def test_omit_after_parse_command(InsightsCommand, run_pre_command): """ Files are omitted based on the expanded paths of the uploader.json path """ - c = InsightsConfig() + c = InsightsConfig(core_collect=False) data_collector = DataCollector(c) collection_rules = {'commands': [{"command": "/sbin/ethtool -i", "pattern": [], "pre_command": "iface", "symbolic_name": "ethtool"}], 'files': [], "pre_commands": {"iface": "/sbin/ip -o link | awk -F ': ' '/.*link\\/ether/ {print $2}'"}} @@ -138,7 +138,7 @@ def test_omit_after_parse_command(InsightsCommand, run_pre_command): @patch("insights.client.data_collector.DataCollector._parse_glob_spec", return_value=[{'glob': '/etc/yum.repos.d/*.repo', 'symbolic_name': 'yum_repos_d', 'pattern': [], 'file': '/etc/yum.repos.d/test.repo'}]) @patch("insights.client.data_collector.logger.warn") def test_run_collection_logs_skipped_globs(warn, parse_glob_spec): - c = InsightsConfig() + c = InsightsConfig(core_collect=False) data_collector = DataCollector(c) collection_rules = {'commands': [], 'files': [], 'globs': [{'glob': '/etc/yum.repos.d/*.repo', 'symbolic_name': 'yum_repos_d', 'pattern': []}]} @@ -149,7 +149,7 @@ def test_run_collection_logs_skipped_globs(warn, parse_glob_spec): @patch("insights.client.data_collector.logger.warn") def test_run_collection_logs_skipped_files_by_file(warn): - c = InsightsConfig() + c = InsightsConfig(core_collect=False) data_collector = DataCollector(c) collection_rules = {'commands': [], 'files': [{'file': '/etc/machine-id', 'pattern': [], 'symbolic_name': 'etc_machine_id'}], 'globs': []} @@ -160,7 +160,7 @@ def test_run_collection_logs_skipped_files_by_file(warn): @patch("insights.client.data_collector.logger.warn") def test_run_collection_logs_skipped_files_by_symbolic_name(warn): - c = InsightsConfig() + c = InsightsConfig(core_collect=False) data_collector = DataCollector(c) collection_rules = {'commands': [], 'files': [{'file': '/etc/machine-id', 'pattern': [], 'symbolic_name': 'etc_machine_id'}], 'globs': []} @@ -172,7 +172,7 @@ def test_run_collection_logs_skipped_files_by_symbolic_name(warn): @patch("insights.client.data_collector.DataCollector._parse_file_spec", return_value=[{'file': '/etc/sysconfig/network-scripts/ifcfg-enp0s3', 'pattern': [], 'symbolic_name': 'ifcfg'}]) @patch("insights.client.data_collector.logger.warn") def test_run_collection_logs_skipped_files_by_wildcard(warn, parse_file_spec): - c = InsightsConfig() + c = InsightsConfig(core_collect=False) data_collector = DataCollector(c) collection_rules = {'commands': [], 'files': [{'file': '/etc/sysconfig/network-scripts/()*ifcfg-.*', 'pattern': [], 'symbolic_name': 'ifcfg'}], 'globs': []} @@ -183,7 +183,7 @@ def test_run_collection_logs_skipped_files_by_wildcard(warn, parse_file_spec): @patch("insights.client.data_collector.logger.warn") def test_run_collection_logs_skipped_commands_by_command(warn): - c = InsightsConfig() + c = InsightsConfig(core_collect=False) data_collector = DataCollector(c) collection_rules = {'commands': [{'command': '/bin/date', 'pattern': [], 'symbolic_name': 'date'}], 'files': [], 'globs': []} @@ -194,7 +194,7 @@ def test_run_collection_logs_skipped_commands_by_command(warn): @patch("insights.client.data_collector.logger.warn") def test_run_collection_logs_skipped_commands_by_symbolic_name(warn): - c = InsightsConfig() + c = InsightsConfig(core_collect=False) data_collector = DataCollector(c) collection_rules = {'commands': [{'command': '/bin/date', 'pattern': [], 'symbolic_name': 'date'}], 'files': [], 'globs': []} @@ -206,7 +206,7 @@ def test_run_collection_logs_skipped_commands_by_symbolic_name(warn): @patch("insights.client.data_collector.DataCollector._parse_command_spec", return_value=[{'command': '/sbin/ethtool enp0s3', 'pattern': [], 'pre_command': 'iface', 'symbolic_name': 'ethtool'}]) @patch("insights.client.data_collector.logger.warn") def test_run_collection_logs_skipped_commands_by_pre_command(warn, parse_command_spec): - c = InsightsConfig() + c = InsightsConfig(core_collect=False) data_collector = DataCollector(c) collection_rules = {'commands': [{'command': '/sbin/ethtool', 'pattern': [], 'pre_command': 'iface', 'symbolic_name': 'ethtool'}], 'files': [], 'globs': [], 'pre_commands': {'iface': '/sbin/ip -o link | awk -F \': \' \'/.*link\\/ether/ {print $2}\''}} From 89602019523485687879c74db7f159b260e9dc7b Mon Sep 17 00:00:00 2001 From: Mark Huth Date: Thu, 9 Dec 2021 12:18:33 +1000 Subject: [PATCH 610/892] Remove old rules files before starting a new scan (#3302) * https://issues.redhat.com/browse/YARA-249 * Old rules files may cause false positives Signed-off-by: Mark Huth --- .../client/apps/malware_detection/__init__.py | 11 +++++++++++ .../client/apps/test_malware_detection.py | 18 ++++++++++-------- 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/insights/client/apps/malware_detection/__init__.py b/insights/client/apps/malware_detection/__init__.py index eadff587a..6ff036723 100644 --- a/insights/client/apps/malware_detection/__init__.py +++ b/insights/client/apps/malware_detection/__init__.py @@ -411,6 +411,17 @@ def _get_rules(self): Obtain the rules used by yara for scanning from the rules_location option. They can either be downloaded from the malware backend or obtained from a local file. """ + from glob import glob + + # The rules file that is downloaded from the backend should be automatically removed when the + # malware-detection client exits. + # However it can happen that the rules file isn't removed for some reason, so remove any existing + # rules files before beginning a new scan, otherwise they may show up as matches in the scan results. + old_rules_files = glob('/tmp/tmp_malware-detection-client_rules.*') + for old_rules_file in old_rules_files: + logger.debug("Removing old rules file %s", old_rules_file) + os.remove(old_rules_file) + self.rules_location = self._get_config_option( 'rules_location', "https://console.redhat.com/api/malware-detection/v1/signatures.yar" ) diff --git a/insights/tests/client/apps/test_malware_detection.py b/insights/tests/client/apps/test_malware_detection.py index ce843ee99..51d5d9fcc 100644 --- a/insights/tests/client/apps/test_malware_detection.py +++ b/insights/tests/client/apps/test_malware_detection.py @@ -217,6 +217,7 @@ def test_invalid_yara_versions(self, version_mock, exists_mock, log_mock, conf, # Use patch.object, just because I wanted to try using patch.object instead of using patch all the time :shrug: +@patch('os.remove') # Mock os.remove so it doesn't actually try to remove any existing files @patch.object(InsightsConnection, 'get', return_value=Mock(status_code=200, content=b"Rule Content")) @patch.object(InsightsConnection, 'get_proxies') @patch.object(InsightsConnection, '_init_session', return_value=Mock()) @@ -227,7 +228,7 @@ class TestGetRules: """ Testing the _get_rules method """ @patch.dict(os.environ, {'TEST_SCAN': 'true'}) - def test_download_rules_cert_auth(self, conf, yara, cmd, session, proxies, get): + def test_download_rules_cert_auth(self, conf, yara, cmd, session, proxies, get, remove): # Test the standard rules_location urls, but will result in cert auth being used to download the rules # Test with insights-config None, expect an error when trying to use the insights-config object with pytest.raises(SystemExit): @@ -259,7 +260,7 @@ def test_download_rules_cert_auth(self, conf, yara, cmd, session, proxies, get): @patch.dict(os.environ, {'TEST_SCAN': 'true'}) @patch(LOGGER_TARGET) - def test_download_rules_basic_auth(self, log_mock, conf, yara, cmd, session, proxies, get): + def test_download_rules_basic_auth(self, log_mock, conf, yara, cmd, session, proxies, get, remove): # Test the standard rules_location urls, with basic auth attempting to be used to download the rules # Basic auth is used by default, but needs to have a valid username and password for it to work # Without a username and password, then cert auth will be used @@ -290,7 +291,7 @@ def test_download_rules_basic_auth(self, log_mock, conf, yara, cmd, session, pro get.assert_called_with("https://console.redhat.com/api/malware-detection/v1/test-rule.yar", log_response_text=True) @patch.dict(os.environ, {'TEST_SCAN': 'true', 'RULES_LOCATION': 'console.redhat.com/rules.yar'}) - def test_get_rules_missing_protocol(self, conf, yara, cmd, session, proxies, get): + def test_get_rules_missing_protocol(self, conf, yara, cmd, session, proxies, get, remove): # Non-standard rules URLS - without https:// at the start and not signatures.yar # test-scan true and BASIC auth by default expect test-rule.yar and no 'cert.' in URL mdc = MalwareDetectionClient(InsightsConfig(username='user', password='pass')) @@ -305,7 +306,7 @@ def test_get_rules_missing_protocol(self, conf, yara, cmd, session, proxies, get @patch.dict(os.environ, {'TEST_SCAN': 'false', 'RULES_LOCATION': 'http://localhost/rules.yar'}) @patch(LOGGER_TARGET) - def test_download_failures(self, log_mock, conf, yara, cmd, session, proxies, get): + def test_download_failures(self, log_mock, conf, yara, cmd, session, proxies, get, remove): from requests.exceptions import ConnectionError, Timeout # Test various problems downloading rules # 404 error - unlikely to occur unless an incorrect rules_location was manually specified @@ -331,7 +332,7 @@ def test_download_failures(self, log_mock, conf, yara, cmd, session, proxies, ge @patch.dict(os.environ, {'TEST_SCAN': 'true', 'RULES_LOCATION': '//console.redhat.com/rules.yar'}) @patch("os.path.isfile", return_value=True) - def test_get_rules_location_files(self, isfile, conf, yara, cmd, session, proxies, get): + def test_get_rules_location_files(self, isfile, conf, yara, cmd, session, proxies, get, remove): # Test using files for rules_location, esp irregular file names # rules_location that starts with a '/' is assumed to be a file, even if its a double '//' # Re-writing the rule to be test-rule.yar doesn't apply to local files @@ -749,12 +750,13 @@ def test_network_filesystem_mountpoints(self, log_mock, call_mock, yara, rules, log_mock.error.assert_called_with("No items to scan because the specified exclude items cancel them out") +@patch('os.remove') # Mock os.remove so it doesn't actually try to remove any existing files @patch(BUILD_YARA_COMMAND_TARGET) @patch(FIND_YARA_TARGET, return_value=YARA) @patch(LOGGER_TARGET) class TestScanning: - def test_scan_rules_file_with_extra_slashes(self, log_mock, yara, cmd, create_test_files): + def test_scan_rules_file_with_extra_slashes(self, log_mock, yara, cmd, remove, create_test_files): # Test scanning RULES_FILE with an extra slash only in the rules_location one # Even with the extra slashes in the rules_location there will be rules matched # because */rules_compiled.yar and *//rules_compiled.yar are the same file @@ -780,7 +782,7 @@ def test_scan_rules_file_with_extra_slashes(self, log_mock, yara, cmd, create_te assert rule_match[0]['string_offset'] == 74 log_mock.info.assert_any_call("Matched rule %s in %s %s", "TEST_RedHatInsightsMalwareDetection", "file", TEST_RULE_FILE) - def test_scan_root_with_extra_slashes(self, log_mock, yara, cmd, create_test_files): + def test_scan_root_with_extra_slashes(self, log_mock, yara, cmd, remove, create_test_files): # Testing we handle the situation where items in scan_only & scan_exclude contain multiple slashes for line in fileinput.FileInput(TEMP_CONFIG_FILE, inplace=1): line = "test_scan: false" if line.startswith("test_scan:") else line @@ -803,7 +805,7 @@ def test_scan_root_with_extra_slashes(self, log_mock, yara, cmd, create_test_fil @patch('insights.client.apps.malware_detection.NamedTemporaryFile') @patch("insights.client.apps.malware_detection.call", return_value="") - def test_scan_since_tmp_files(self, call_mock, tmp_file_mock, log_mock, yara, cmd, extract_tmp_files, create_test_files): + def test_scan_since_tmp_files(self, call_mock, tmp_file_mock, log_mock, yara, cmd, remove, extract_tmp_files, create_test_files): # Set scan_only, scan_exclude options to some of the tmp files and then 'scan' them # Then touch files to test the scan_since option and make sure that only the touched files will be scanned yara_file_list = os.path.join(TEMP_TEST_DIR, 'yara_file_list') From 5f342f59649711586e6d4af3278009f038f30f6c Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 9 Dec 2021 12:23:37 +0800 Subject: [PATCH 611/892] feat: New spec to get the count of satellite tasks with reserved resource (#3300) Signed-off-by: Huanhuan Li --- .../parsers/satellite_postgresql_query.py | 21 +++++++++++++++++++ .../tests/test_satellite_postgresql_query.py | 14 ++++++++++++- insights/specs/__init__.py | 1 + insights/specs/default.py | 4 ++++ 4 files changed, 39 insertions(+), 1 deletion(-) diff --git a/insights/parsers/satellite_postgresql_query.py b/insights/parsers/satellite_postgresql_query.py index 96a75aaa8..2c1f55ee6 100644 --- a/insights/parsers/satellite_postgresql_query.py +++ b/insights/parsers/satellite_postgresql_query.py @@ -10,6 +10,8 @@ ----------------------------------------------------------------------------------------------------------- SatelliteSCAStatus - command ``psql -d candlepin -c "select displayname, content_access_mode from cp_owner" --csv`` ------------------------------------------------------------------------------------------------------------------- +SatelliteCoreTaskReservedResourceCount - command ``psql -d pulpcore -c 'select count(*) from core_taskreservedresource' --csv`` +------------------------------------------------------------------------------------------------------------------------------- """ import os @@ -207,6 +209,25 @@ class SatelliteComputeResources(SatellitePostgreSQLQuery): pass +@parser(Specs.satellite_core_taskreservedresource_count) +class SatelliteCoreTaskReservedResourceCount(SatellitePostgreSQLQuery): + """ + Parse the output of the command ``psql -d pulpcore -c 'select count(*) from core_taskreservedresource' --csv``. + + Sample output:: + + count + 0 + + Examples: + >>> type(tasks) + + >>> tasks[0]['count'] + '0' + """ + pass + + @parser(Specs.satellite_sca_status) class SatelliteSCAStatus(SatellitePostgreSQLQuery): """ diff --git a/insights/parsers/tests/test_satellite_postgresql_query.py b/insights/parsers/tests/test_satellite_postgresql_query.py index 319085c8e..d847eb8da 100644 --- a/insights/parsers/tests/test_satellite_postgresql_query.py +++ b/insights/parsers/tests/test_satellite_postgresql_query.py @@ -147,6 +147,11 @@ Orgq,entitlement ''' +SATELLITE_TASK_RESERVERDRESOURCE_CONTENT = """ +count +0 +""" + def test_satellite_postgesql_query_exception(): with pytest.raises(ContentException): @@ -185,11 +190,13 @@ def test_HTL_doc_examples(): settings = satellite_postgresql_query.SatelliteAdminSettings(context_wrap(SATELLITE_SETTINGS_1)) resources_table = satellite_postgresql_query.SatelliteComputeResources(context_wrap(SATELLITE_COMPUTE_RESOURCE_1)) sat_sca_info = satellite_postgresql_query.SatelliteSCAStatus(context_wrap(SATELLITE_SCA_INFO_1)) + tasks = satellite_postgresql_query.SatelliteCoreTaskReservedResourceCount(context_wrap(SATELLITE_TASK_RESERVERDRESOURCE_CONTENT)) globs = { 'query': query, 'table': settings, 'resources_table': resources_table, - 'sat_sca_info': sat_sca_info + 'sat_sca_info': sat_sca_info, + 'tasks': tasks } failed, tested = doctest.testmod(satellite_postgresql_query, globs=globs) assert failed == 0 @@ -240,3 +247,8 @@ def test_satellite_compute_resources(): def test_satellite_sca(): sat_sca_info = satellite_postgresql_query.SatelliteSCAStatus(context_wrap(SATELLITE_SCA_INFO_2)) assert not sat_sca_info.sca_enabled + + +def test_satellite_taskreservedresource(): + tasks = satellite_postgresql_query.SatelliteCoreTaskReservedResourceCount(context_wrap(SATELLITE_TASK_RESERVERDRESOURCE_CONTENT)) + assert tasks[0]['count'] == '0' diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 1d2c55cd9..63e80f2fc 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -585,6 +585,7 @@ class Specs(SpecSet): sat5_insights_properties = RegistryPoint() satellite_compute_resources = RegistryPoint() satellite_content_hosts_count = RegistryPoint() + satellite_core_taskreservedresource_count = RegistryPoint() satellite_custom_ca_chain = RegistryPoint() satellite_custom_hiera = RegistryPoint() satellite_mongodb_storage_engine = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 883f7f6e7..0b54d7a45 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -586,6 +586,10 @@ def md5chk_file_list(broker): "/usr/bin/sudo -iu postgres /usr/bin/psql -d foreman -c 'select count(*) from hosts'", deps=[SatelliteVersion] ) + satellite_core_taskreservedresource_count = simple_command( + "/usr/bin/sudo -iu postgres /usr/bin/psql -d pulpcore -c 'select count(*) from core_taskreservedresource' --csv", + deps=[SatelliteVersion] + ) satellite_custom_ca_chain = simple_command( '/usr/bin/awk \'BEGIN { pipe="openssl x509 -noout -subject -enddate"} /^-+BEGIN CERT/,/^-+END CERT/ { print | pipe } /^-+END CERT/ { close(pipe); printf("\\n")}\' /etc/pki/katello/certs/katello-server-ca.crt', ) From da93b3c997bbb21431a5bf77196f471f449f3238 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 9 Dec 2021 13:10:46 +0800 Subject: [PATCH 612/892] feat: New spec to get satelltie empty url repositories (#3299) * New spec to get satelltie empty url repositories Signed-off-by: Huanhuan Li * Fix conflicts Co-authored-by: Xiangce Liu --- .../parsers/satellite_postgresql_query.py | 26 +++++++++++++++++++ .../tests/test_satellite_postgresql_query.py | 14 +++++++++- insights/specs/__init__.py | 1 + insights/specs/default.py | 4 +++ 4 files changed, 44 insertions(+), 1 deletion(-) diff --git a/insights/parsers/satellite_postgresql_query.py b/insights/parsers/satellite_postgresql_query.py index 2c1f55ee6..e99ac79d8 100644 --- a/insights/parsers/satellite_postgresql_query.py +++ b/insights/parsers/satellite_postgresql_query.py @@ -10,6 +10,10 @@ ----------------------------------------------------------------------------------------------------------- SatelliteSCAStatus - command ``psql -d candlepin -c "select displayname, content_access_mode from cp_owner" --csv`` ------------------------------------------------------------------------------------------------------------------- + +SatelliteKatelloEmptyURLRepositories - command ``psql -d foreman -c 'select id, name from katello_root_repositories where url is NULL;' --csv`` +----------------------------------------------------------------------------------------------------------------------------------------------- + SatelliteCoreTaskReservedResourceCount - command ``psql -d pulpcore -c 'select count(*) from core_taskreservedresource' --csv`` ------------------------------------------------------------------------------------------------------------------------------- """ @@ -209,6 +213,28 @@ class SatelliteComputeResources(SatellitePostgreSQLQuery): pass +@parser(Specs.satellite_katello_empty_url_repositories) +class SatelliteKatelloEmptyURLRepositories(SatellitePostgreSQLQuery): + """ + Parse the output of the command ``psql -d foreman -c 'select id, name from katello_root_repositories where url is NULL;' --csv``. + + Sample output:: + + id,name + 54,testa + 55,testb + + Examples: + >>> type(katello_root_repositories) + + >>> len(katello_root_repositories) + 2 + >>> katello_root_repositories[0]['name'] + 'testa' + """ + pass + + @parser(Specs.satellite_core_taskreservedresource_count) class SatelliteCoreTaskReservedResourceCount(SatellitePostgreSQLQuery): """ diff --git a/insights/parsers/tests/test_satellite_postgresql_query.py b/insights/parsers/tests/test_satellite_postgresql_query.py index d847eb8da..d6284b5d2 100644 --- a/insights/parsers/tests/test_satellite_postgresql_query.py +++ b/insights/parsers/tests/test_satellite_postgresql_query.py @@ -134,7 +134,6 @@ test_compute_resource2,Foreman::Model::RHV ''' - SATELLITE_SCA_INFO_1 = ''' displayname,content_access_mode Default Organization,entitlement @@ -147,6 +146,12 @@ Orgq,entitlement ''' +SATELLITE_KATELLO_ROOT_REPOSITORIES = """ +id,name +54,testa +55,testb +""" + SATELLITE_TASK_RESERVERDRESOURCE_CONTENT = """ count 0 @@ -190,12 +195,14 @@ def test_HTL_doc_examples(): settings = satellite_postgresql_query.SatelliteAdminSettings(context_wrap(SATELLITE_SETTINGS_1)) resources_table = satellite_postgresql_query.SatelliteComputeResources(context_wrap(SATELLITE_COMPUTE_RESOURCE_1)) sat_sca_info = satellite_postgresql_query.SatelliteSCAStatus(context_wrap(SATELLITE_SCA_INFO_1)) + repositories = satellite_postgresql_query.SatelliteKatelloEmptyURLRepositories(context_wrap(SATELLITE_KATELLO_ROOT_REPOSITORIES)) tasks = satellite_postgresql_query.SatelliteCoreTaskReservedResourceCount(context_wrap(SATELLITE_TASK_RESERVERDRESOURCE_CONTENT)) globs = { 'query': query, 'table': settings, 'resources_table': resources_table, 'sat_sca_info': sat_sca_info, + 'katello_root_repositories': repositories, 'tasks': tasks } failed, tested = doctest.testmod(satellite_postgresql_query, globs=globs) @@ -249,6 +256,11 @@ def test_satellite_sca(): assert not sat_sca_info.sca_enabled +def test_satellite_katello_empty_url_repositories(): + repositories = satellite_postgresql_query.SatelliteKatelloEmptyURLRepositories(context_wrap(SATELLITE_KATELLO_ROOT_REPOSITORIES)) + assert repositories[1]['name'] == 'testb' + + def test_satellite_taskreservedresource(): tasks = satellite_postgresql_query.SatelliteCoreTaskReservedResourceCount(context_wrap(SATELLITE_TASK_RESERVERDRESOURCE_CONTENT)) assert tasks[0]['count'] == '0' diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index 63e80f2fc..dcea444b0 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -588,6 +588,7 @@ class Specs(SpecSet): satellite_core_taskreservedresource_count = RegistryPoint() satellite_custom_ca_chain = RegistryPoint() satellite_custom_hiera = RegistryPoint() + satellite_katello_empty_url_repositories = RegistryPoint() satellite_mongodb_storage_engine = RegistryPoint() satellite_non_yum_type_repos = RegistryPoint() satellite_sca_status = RegistryPoint() diff --git a/insights/specs/default.py b/insights/specs/default.py index 0b54d7a45..6919bc563 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -594,6 +594,10 @@ def md5chk_file_list(broker): '/usr/bin/awk \'BEGIN { pipe="openssl x509 -noout -subject -enddate"} /^-+BEGIN CERT/,/^-+END CERT/ { print | pipe } /^-+END CERT/ { close(pipe); printf("\\n")}\' /etc/pki/katello/certs/katello-server-ca.crt', ) satellite_custom_hiera = simple_file("/etc/foreman-installer/custom-hiera.yaml") + satellite_katello_empty_url_repositories = simple_command( + "/usr/bin/sudo -iu postgres /usr/bin/psql -d foreman -c 'select id, name from katello_root_repositories where url is NULL;' --csv", + deps=[SatelliteVersion] + ) satellite_missed_pulp_agent_queues = satellite_missed_queues.satellite_missed_pulp_agent_queues satellite_mongodb_storage_engine = simple_command("/usr/bin/mongo pulp_database --eval 'db.serverStatus().storageEngine'") satellite_non_yum_type_repos = simple_command( From 40fea6135ae9db4708ef29a717eb5149b7b4be24 Mon Sep 17 00:00:00 2001 From: Ping Qin <30404410+qinpingli@users.noreply.github.com> Date: Thu, 9 Dec 2021 13:25:09 +0800 Subject: [PATCH 613/892] =?UTF-8?q?feat:=20Add=20spec=20and=20parser=20for?= =?UTF-8?q?=20sos=5Fcommands/logs/journalctl=5F--no-pager=E2=80=A6=20(#329?= =?UTF-8?q?7)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Add spec and parser for sos_commands/logs/journalctl_--no-pager file Signed-off-by: Qin Ping * Update the doc format Signed-off-by: Qin Ping --- docs/shared_parsers_catalog/journal_all.rst | 3 ++ insights/parsers/journal_all.py | 44 +++++++++++++++++++++ insights/parsers/tests/test_journal_all.py | 30 ++++++++++++++ insights/specs/__init__.py | 1 + insights/specs/sos_archive.py | 1 + 5 files changed, 79 insertions(+) create mode 100644 docs/shared_parsers_catalog/journal_all.rst create mode 100644 insights/parsers/journal_all.py create mode 100644 insights/parsers/tests/test_journal_all.py diff --git a/docs/shared_parsers_catalog/journal_all.rst b/docs/shared_parsers_catalog/journal_all.rst new file mode 100644 index 000000000..b1feae6b8 --- /dev/null +++ b/docs/shared_parsers_catalog/journal_all.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.journal_all + :members: + :show-inheritance: diff --git a/insights/parsers/journal_all.py b/insights/parsers/journal_all.py new file mode 100644 index 000000000..036f132aa --- /dev/null +++ b/insights/parsers/journal_all.py @@ -0,0 +1,44 @@ +""" +JournalAll file ``/sos_commands/logs/journalctl_--no-pager`` +============================================================ +""" + +from .. import Syslog, parser +from insights.specs import Specs + + +@parser(Specs.journal_all) +class JournalAll(Syslog): + """ + Read the ``/sos_commands/logs/journalctl_--no-pager`` file. Uses the + ``Syslog`` class parser functionality - see the base class for more details. + + Sample log lines:: + + -- Logs begin at Wed 2017-02-08 15:18:00 CET, end at Tue 2017-09-19 09:25:27 CEST. -- + May 18 15:13:34 lxc-rhel68-sat56 jabberd/sm[11057]: session started: jid=rhn-dispatcher-sat@lxc-rhel6-sat56.redhat.com/superclient + May 18 15:13:36 lxc-rhel68-sat56 wrapper[11375]: --> Wrapper Started as Daemon + May 18 15:13:36 lxc-rhel68-sat56 wrapper[11375]: Launching a JVM... + May 18 15:24:28 lxc-rhel68-sat56 yum[11597]: Installed: lynx-2.8.6-27.el6.x86_64 + May 18 15:36:19 lxc-rhel68-sat56 yum[11954]: Updated: sos-3.2-40.el6.noarch + + .. note:: + Because journal timestamps by default have no year, + the year of the logs will be inferred from the year in your timestamp. + This will also work around December/January crossovers. + + Examples: + >>> JournalAll.filters.append('wrapper') + >>> JournalAll.token_scan('daemon_start', 'Wrapper Started as Daemon') + >>> msgs = shared[JournalAll] + >>> len(msgs.lines) + >>> wrapper_msgs = msgs.get('wrapper') # Can only rely on lines filtered being present + >>> wrapper_msgs[0] + {'timestamp': 'May 18 15:13:36', 'hostname': 'lxc-rhel68-sat56', + 'procname': wrapper[11375]', 'message': '--> Wrapper Started as Daemon', + 'raw_message': 'May 18 15:13:36 lxc-rhel68-sat56 wrapper[11375]: --> Wrapper Started as Daemon' + } + >>> msgs.daemon_start # Token set if matching lines present in logs + True + """ + pass diff --git a/insights/parsers/tests/test_journal_all.py b/insights/parsers/tests/test_journal_all.py new file mode 100644 index 000000000..2dc38eb09 --- /dev/null +++ b/insights/parsers/tests/test_journal_all.py @@ -0,0 +1,30 @@ +from insights.parsers.journal_all import JournalAll +from insights.tests import context_wrap + +MSGINFO = """ +-- Logs begin at Wed 2017-02-08 15:18:00 CET, end at Tue 2017-09-19 09:12:59 CEST. -- +May 18 15:13:34 lxc-rhel68-sat56 jabberd/sm[11057]: session started: jid=rhn-dispatcher-sat@lxc-rhel6-sat56.redhat.com/superclient +May 18 15:13:36 lxc-rhel68-sat56 wrapper[11375]: --> Wrapper Started as Daemon +May 18 15:13:36 lxc-rhel68-sat56 wrapper[11375]: Launching a JVM... +May 18 15:24:28 lxc-rhel68-sat56 yum[11597]: Installed: lynx-2.8.6-27.el6.x86_64 +May 18 15:36:19 lxc-rhel68-sat56 yum[11954]: Updated: sos-3.2-40.el6.noarch +Apr 22 10:35:01 boy-bona CROND[27921]: (root) CMD (/usr/lib64/sa/sa1 -S DISK 1 1) +Apr 22 10:37:32 boy-bona crontab[28951]: (root) LIST (root) +Apr 22 10:40:01 boy-bona CROND[30677]: (root) CMD (/usr/lib64/sa/sa1 -S DISK 1 1) +Apr 22 10:41:13 boy-bona crontab[32515]: (root) LIST (root) +""".strip() + + +def test_messages(): + msg_info = JournalAll(context_wrap(MSGINFO)) + bona_list = msg_info.get('(root) LIST (root)') + assert 2 == len(bona_list) + assert bona_list[0].get('timestamp') == "Apr 22 10:37:32" + assert bona_list[1].get('timestamp') == "Apr 22 10:41:13" + crond = msg_info.get('CROND') + assert 2 == len(crond) + assert crond[0].get('procname') == "CROND[27921]" + assert msg_info.get('jabberd/sm[11057]')[0].get('hostname') == "lxc-rhel68-sat56" + assert msg_info.get('Wrapper')[0].get('message') == "--> Wrapper Started as Daemon" + assert msg_info.get('Launching')[0].get('raw_message') == "May 18 15:13:36 lxc-rhel68-sat56 wrapper[11375]: Launching a JVM..." + assert 2 == len(msg_info.get('yum')) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index dcea444b0..c0d3a7871 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -274,6 +274,7 @@ class Specs(SpecSet): jboss_standalone_server_log = RegistryPoint(multi_output=True, filterable=True) jboss_standalone_main_config = RegistryPoint(multi_output=True) jboss_version = RegistryPoint(multi_output=True) + journal_all = RegistryPoint(filterable=True) journal_since_boot = RegistryPoint(filterable=True) katello_service_status = RegistryPoint(filterable=True) kdump_conf = RegistryPoint() diff --git a/insights/specs/sos_archive.py b/insights/specs/sos_archive.py index 427f595ca..8675e000a 100644 --- a/insights/specs/sos_archive.py +++ b/insights/specs/sos_archive.py @@ -115,6 +115,7 @@ class SosSpecs(Specs): ip_s_link = first_of([simple_file("sos_commands/networking/ip_-s_-d_link"), simple_file("sos_commands/networking/ip_-s_link"), simple_file("sos_commands/networking/ip_link")]) ip6tables_permanent = simple_file("etc/sysconfig/ip6tables") iptables = first_file(["/etc/sysconfig/iptables", "/etc/sysconfig/iptables.save"]) + journal_all = simple_file("sos_commands/logs/journalctl_--no-pager") journal_since_boot = first_file(["sos_commands/logs/journalctl_--no-pager_--boot", "sos_commands/logs/journalctl_--no-pager_--catalog_--boot", "sos_commands/logs/journalctl_--all_--this-boot_--no-pager"]) ironic_conf = first_file(["/var/lib/config-data/puppet-generated/ironic/etc/ironic/ironic.conf", "/etc/ironic/ironic.conf"]) kerberos_kdc_log = simple_file("var/log/krb5kdc.log") From 582cdb69130826922e4e48185b4b08ddd782bef0 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 9 Dec 2021 13:37:35 +0800 Subject: [PATCH 614/892] Fix: Only get "SSLCertificateFile" when "SSLEngine on" is configured (#3305) * Fix: Only get "SSLCertificateFile" when "SSLEngine on" is configured Signed-off-by: Huanhuan Li * Update as per the review comment Co-authored-by: Xiangce Liu --- insights/specs/datasources/ssl_certificate.py | 11 +++++++++-- insights/tests/datasources/test_ssl_certificate.py | 13 +++++++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/insights/specs/datasources/ssl_certificate.py b/insights/specs/datasources/ssl_certificate.py index 34de80d2c..4af0bbada 100644 --- a/insights/specs/datasources/ssl_certificate.py +++ b/insights/specs/datasources/ssl_certificate.py @@ -25,9 +25,16 @@ def httpd_ssl_certificate_files(broker): SkipComponent: Raised if "SSLCertificateFile" directive isn't found """ conf = broker[HttpdConfTree] - ssl_certs = conf.find('SSLCertificateFile') + virtual_hosts = conf.find('VirtualHost') + ssl_certs = [] + for host in virtual_hosts: + ssl_cert = ssl_engine = None + ssl_engine = host.select('SSLEngine') + ssl_cert = host.select('SSLCertificateFile') + if ssl_engine and ssl_engine.value and ssl_cert: + ssl_certs.append(str(ssl_cert.value)) if ssl_certs: - return [str(ssl_cert.value) for ssl_cert in ssl_certs] + return ssl_certs raise SkipComponent diff --git a/insights/tests/datasources/test_ssl_certificate.py b/insights/tests/datasources/test_ssl_certificate.py index 8576be085..3e833bcef 100644 --- a/insights/tests/datasources/test_ssl_certificate.py +++ b/insights/tests/datasources/test_ssl_certificate.py @@ -55,6 +55,18 @@ SSLVerifyDepth 3 SSLOptions +StdEnvVars +ExportCertData + + ## SSL directives + ServerName f.g.e.com + SSLEngine off + SSLCertificateFile "/etc/pki/katello/certs/katello-apache_e.crt" + SSLCertificateKeyFile "/etc/pki/katello/private/katello-apache_e.key" + SSLCertificateChainFile "/etc/pki/katello/certs/katello-server-ca_e.crt" + SSLVerifyClient optional + SSLCACertificateFile "/etc/pki/katello/certs/katello-default-ca_e.crt" + SSLVerifyDepth 3 + SSLOptions +StdEnvVars +ExportCertData + """.strip() HTTPD_CONF_WITHOUT_SSL = """ @@ -155,6 +167,7 @@ def test_httpd_certificate(): HttpdConfTree: conf_tree } result = httpd_ssl_certificate_files(broker) + # "/etc/pki/katello/certs/katello-apache_e.crt" not in the result assert result == ['/etc/pki/katello/certs/katello-apache.crt', '/etc/pki/katello/certs/katello-apache_d.crt'] From b08b6a842ea88fe09422bc59cac7eedbffd47fa5 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Tue, 14 Dec 2021 11:36:25 +0800 Subject: [PATCH 615/892] =?UTF-8?q?Feat:=20New=20spec=20to=20get=20the=20h?= =?UTF-8?q?ttpd=20certificate=20expire=20info=20stored=20in=20NSS=E2=80=A6?= =?UTF-8?q?=20(#3303)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Feat: New spec to get the httpd certificate expire info stored in NSS database * Add the parser and combiner Signed-off-by: Huanhuan Li * Add filter for spec "httpd_cert_info_in_nss" Signed-off-by: Huanhuan Li * Move the filter to rules Signed-off-by: Huanhuan Li --- docs/custom_datasources_index.rst | 2 +- insights/combiners/ssl_certificate.py | 22 ++++++- .../combiners/tests/test_ssl_certificate.py | 20 ++++++- insights/parsers/ssl_certificate.py | 45 ++++++++++++++ .../parsers/tests/test_ssl_certificate.py | 29 ++++++++- insights/specs/__init__.py | 1 + insights/specs/datasources/ssl_certificate.py | 29 +++++++++ insights/specs/default.py | 1 + .../tests/datasources/test_ssl_certificate.py | 60 ++++++++++++++++++- 9 files changed, 204 insertions(+), 5 deletions(-) diff --git a/docs/custom_datasources_index.rst b/docs/custom_datasources_index.rst index 424a2ed25..c83c94c2d 100644 --- a/docs/custom_datasources_index.rst +++ b/docs/custom_datasources_index.rst @@ -87,7 +87,7 @@ insights.specs.datasources.ssl_certificate ------------------------------------------ .. automodule:: insights.specs.datasources.ssl_certificate - :members: httpd_ssl_certificate_files, nginx_ssl_certificate_files, mssql_tls_cert_file + :members: httpd_certificate_info_in_nss, httpd_ssl_certificate_files, nginx_ssl_certificate_files, mssql_tls_cert_file :show-inheritance: :undoc-members: diff --git a/insights/combiners/ssl_certificate.py b/insights/combiners/ssl_certificate.py index da825328b..4a80730ea 100644 --- a/insights/combiners/ssl_certificate.py +++ b/insights/combiners/ssl_certificate.py @@ -11,10 +11,14 @@ EarliestHttpdSSLCertExpireDate - The earliest expire date in a lot of httpd ssl certificates -------------------------------------------------------------------------------------------- Combiner to get the earliest expire date in a lot of httpd ssl certificates. + +EarliestHttpdCertInNSSExpireDate - The earliest expire date in a lot of httpd certificates stored in nss database +----------------------------------------------------------------------------------------------------------------- +Combiner to get the earliest expire date in a lot of httpd certificates stored in nss database. """ from insights.core.dr import SkipComponent -from insights.parsers.ssl_certificate import NginxSSLCertExpireDate, HttpdSSLCertExpireDate +from insights.parsers.ssl_certificate import HttpdCertInfoInNSS, NginxSSLCertExpireDate, HttpdSSLCertExpireDate from insights.parsers.certificates_enddate import CertificatesEnddate from insights.core.plugins import combiner @@ -78,3 +82,19 @@ class EarliestHttpdSSLCertExpireDate(EarliestSSLCertExpireDate): '/test/d.pem' """ pass + + +@combiner(HttpdCertInfoInNSS) +class EarliestHttpdCertInNSSExpireDate(EarliestSSLCertExpireDate): + """ + Combiner to get the earliest expire date in a lot of httpd certificates stored in NSS database. + + Examples: + >>> type(httpd_certs_in_nss) + + >>> httpd_certs_in_nss.earliest_expire_date.str + 'Sun Jan 07 05:26:10 2022' + >>> httpd_certs_in_nss.ssl_cert_path + ('/etc/httpd/nss', 'testcerta') + """ + pass diff --git a/insights/combiners/tests/test_ssl_certificate.py b/insights/combiners/tests/test_ssl_certificate.py index 4fc7c3a26..dc8988e1d 100644 --- a/insights/combiners/tests/test_ssl_certificate.py +++ b/insights/combiners/tests/test_ssl_certificate.py @@ -31,6 +31,12 @@ notAfter=Dec 18 07:02:43 2021 GMT ''' +HTTPD_CERT_EXPIRED_INFO_IN_NSS_1 = """ + Not After : Sun Dec 07 05:26:10 2025""" + +HTTPD_CERT_EXPIRED_INFO_IN_NSS_2 = """ + Not After : Sun Jan 07 05:26:10 2022""" + def test_earliest_ssl_expire_date(): date_info1 = CertificateInfo(context_wrap(COMMON_SSL_CERT_INFO1, args='/test/a.pem')) @@ -55,10 +61,14 @@ def test_doc(): date_info1 = ssl_certificate.HttpdSSLCertExpireDate(context_wrap(HTTPD_CERT_EXPIRE_INFO_1, args='/test/c.pem')) date_info2 = ssl_certificate.HttpdSSLCertExpireDate(context_wrap(HTTPD_CERT_EXPIRE_INFO_2, args='/test/d.pem')) httpd_certs = ssl_certificate.EarliestHttpdSSLCertExpireDate([date_info1, date_info2]) + date_info1 = ssl_certificate.HttpdCertInfoInNSS(context_wrap(HTTPD_CERT_EXPIRED_INFO_IN_NSS_1, args=('/etc/httpd/nss', 'testcertb'))) + date_info2 = ssl_certificate.HttpdCertInfoInNSS(context_wrap(HTTPD_CERT_EXPIRED_INFO_IN_NSS_2, args=('/etc/httpd/nss', 'testcerta'))) + httpd_certs_in_nss = ssl_certificate.EarliestHttpdCertInNSSExpireDate([date_info1, date_info2]) globs = { 'ssl_certs': ssl_certs, 'nginx_certs': nginx_certs, - 'httpd_certs': httpd_certs + 'httpd_certs': httpd_certs, + 'httpd_certs_in_nss': httpd_certs_in_nss } failed, _ = doctest.testmod(ssl_certificate, globs=globs) assert failed == 0 @@ -88,3 +98,11 @@ def test_httpd_ssl_cert_combiner(): expiredate_obj = ssl_certificate.EarliestHttpdSSLCertExpireDate([date_info1, date_info2]) assert expiredate_obj.earliest_expire_date.str == 'Dec 18 07:02:43 2021' assert expiredate_obj.ssl_cert_path == '/test/d.pem' + + +def test_httpd_cert_in_nss_combiner(): + date_info1 = ssl_certificate.HttpdCertInfoInNSS(context_wrap(HTTPD_CERT_EXPIRED_INFO_IN_NSS_1, args=('/etc/httpd/nss', 'testcertb'))) + date_info2 = ssl_certificate.HttpdCertInfoInNSS(context_wrap(HTTPD_CERT_EXPIRED_INFO_IN_NSS_2, args=('/etc/httpd/nss', 'testcerta'))) + expiredate_obj = ssl_certificate.EarliestHttpdSSLCertExpireDate([date_info1, date_info2]) + assert expiredate_obj.earliest_expire_date.str == 'Sun Jan 07 05:26:10 2022' + assert expiredate_obj.ssl_cert_path == ('/etc/httpd/nss', 'testcerta') diff --git a/insights/parsers/ssl_certificate.py b/insights/parsers/ssl_certificate.py index c8ed7ddc1..29d70bdf1 100644 --- a/insights/parsers/ssl_certificate.py +++ b/insights/parsers/ssl_certificate.py @@ -14,10 +14,14 @@ ============================================================================================ MssqlTLSCertExpireDate - command ``openssl x509 -in mssql_tls_cert_file -enddate -noout`` ============================================================================================ +HttpdCertInfoInNSS - command ``certutil -L -d xxx -n xxx`` +========================================================== """ + from insights import parser, CommandParser from datetime import datetime +from insights.core.dr import SkipComponent from insights.parsers import ParseException, SkipException from insights.specs import Specs from insights.parsers.certificates_enddate import CertificatesEnddate @@ -275,3 +279,44 @@ class MssqlTLSCertExpireDate(CertificateInfo): datetime.datetime(2022, 11, 5, 1, 43, 59) """ pass + + +@parser(Specs.httpd_cert_info_in_nss) +class HttpdCertInfoInNSS(CommandParser, dict): + """ + It parses the output of "certutil -d -L -n ". + Currently it only parses the "Not After" info and save it into a dict. + And the key is renamed to "notAfter" to keep consistent with the other certificat info. + The value of "notAfter" is transformed to an instance of ExpirationDate, + which contains the date in string and datetime format. + + Raises: + ParseException: when the "Not After" isn't in the expected format. + SkipComponent: when there is no "Not After" info in the content. + + Examples: + >>> type(nss_cert_info) + + >>> nss_cert_info['notAfter'].str + 'Sun Dec 07 05:26:10 2025' + """ + date_format = '%a %b %d %H:%M:%S %Y' + + def parse_content(self, content): + # currently only expire date is needed + for line in content: + if 'Not After :' in line: + key, value = [item.strip() for item in line.split(':', 1)] + try: + date_time = datetime.strptime(value, self.date_format) + except Exception: + raise ParseException('The %s is not in %s format.' % (key, self.date_format)) + value = CertificatesEnddate.ExpirationDate(value, date_time) + self.update({'notAfter': value}) + if not self: + raise SkipComponent + + @property + def cert_path(self): + '''Return the certificate path info.''' + return self.args diff --git a/insights/parsers/tests/test_ssl_certificate.py b/insights/parsers/tests/test_ssl_certificate.py index 84d39763e..5e155312d 100644 --- a/insights/parsers/tests/test_ssl_certificate.py +++ b/insights/parsers/tests/test_ssl_certificate.py @@ -1,4 +1,5 @@ import doctest +from insights.core.dr import SkipComponent import pytest from insights.parsers import ssl_certificate, ParseException, SkipException @@ -88,6 +89,16 @@ notAfter=Nov 5 01:43:59 2022 GMT ''' +NSS_CERT_OUTPUT = """ + Not After : Sun Dec 07 05:26:10 2025""" + +NSS_CERT_BAD_OUTPUT_1 = """ + Not After : Dec 07 05:26:10 2025""" + +NSS_CERT_BAD_OUTPUT_2 = """ + Issuer: "CN=Certificate Shack,O=huali.node2.redhat.com,C=CN +""".strip() + def test_certificate_info_exception(): with pytest.raises(ParseException): @@ -158,6 +169,7 @@ def test_doc(): date_info = ssl_certificate.HttpdSSLCertExpireDate(context_wrap(HTTPD_CERT_EXPIRE_INFO)) nginx_date_info = ssl_certificate.NginxSSLCertExpireDate(context_wrap(HTTPD_CERT_EXPIRE_INFO, args='/a/b/c.pem')) mssql_date_info = ssl_certificate.MssqlTLSCertExpireDate(context_wrap(MSSQL_CERT_EXPIRE_INFO)) + cert_info = ssl_certificate.HttpdCertInfoInNSS(context_wrap(NSS_CERT_OUTPUT)) globs = { 'cert': cert, 'certs': ca_cert, @@ -165,7 +177,8 @@ def test_doc(): 'rhsm_katello_default_ca': rhsm_katello_default_ca, 'date_info': date_info, 'nginx_date_info': nginx_date_info, - 'mssql_date_info': mssql_date_info + 'mssql_date_info': mssql_date_info, + 'nss_cert_info': cert_info } failed, _ = doctest.testmod(ssl_certificate, globs=globs) assert failed == 0 @@ -188,3 +201,17 @@ def test_mssql_tls_cert_parser(): date_info = ssl_certificate.MssqlTLSCertExpireDate(context_wrap(MSSQL_CERT_EXPIRE_INFO)) assert 'notAfter' in date_info assert date_info['notAfter'].str == 'Nov 5 01:43:59 2022' + + +def test_httpd_cert_info_in_nss(): + cert_info = ssl_certificate.HttpdCertInfoInNSS(context_wrap(NSS_CERT_OUTPUT)) + assert 'notAfter' in cert_info + assert cert_info['notAfter'].str == 'Sun Dec 07 05:26:10 2025' + + +def test_httpd_cert_info_in_nss_exception(): + with pytest.raises(ParseException): + ssl_certificate.HttpdCertInfoInNSS(context_wrap(NSS_CERT_BAD_OUTPUT_1)) + + with pytest.raises(SkipComponent): + ssl_certificate.HttpdCertInfoInNSS(context_wrap(NSS_CERT_BAD_OUTPUT_2)) diff --git a/insights/specs/__init__.py b/insights/specs/__init__.py index c0d3a7871..9e93721cb 100644 --- a/insights/specs/__init__.py +++ b/insights/specs/__init__.py @@ -224,6 +224,7 @@ class Specs(SpecSet): hosts = RegistryPoint() hponcfg_g = RegistryPoint() httpd_access_log = RegistryPoint(filterable=True) + httpd_cert_info_in_nss = RegistryPoint(multi_output=True, filterable=True) httpd_conf = RegistryPoint(multi_output=True) httpd_conf_scl_httpd24 = RegistryPoint(multi_output=True) httpd_conf_scl_jbcs_httpd24 = RegistryPoint(multi_output=True) diff --git a/insights/specs/datasources/ssl_certificate.py b/insights/specs/datasources/ssl_certificate.py index 4af0bbada..2e4682010 100644 --- a/insights/specs/datasources/ssl_certificate.py +++ b/insights/specs/datasources/ssl_certificate.py @@ -10,6 +10,35 @@ from insights.core.plugins import datasource +@datasource(HttpdConfTree, HostContext) +def httpd_certificate_info_in_nss(broker): + """ + Get the certificate info configured in nss database + + Arguments: + broker: the broker object for the current session + + Returns: + list: Returns a list of tuple with the Nss database path and the certificate nickname + + Raises: + SkipComponent: Raised when NSSEngine isn't enabled or "NSSCertificateDatabase" and "NSSNickname" directives aren't found + """ + conf = broker[HttpdConfTree] + path_pairs = [] + virtual_hosts = conf.find('VirtualHost') + for host in virtual_hosts: + nss_engine = nss_database = cert_name = None + nss_engine = host.select('NSSEngine') + nss_database = host.select('NSSCertificateDatabase') + cert_name = host.select('NSSNickname') + if nss_engine and nss_engine.value and nss_database and cert_name: + path_pairs.append((nss_database[0].value, cert_name[0].value)) + if path_pairs: + return path_pairs + raise SkipComponent + + @datasource(HttpdConfTree, HostContext) def httpd_ssl_certificate_files(broker): """ diff --git a/insights/specs/default.py b/insights/specs/default.py index 6919bc563..c27905440 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -294,6 +294,7 @@ def du_dirs_list(broker): "/opt/rh/jbcs-httpd24/root/etc/httpd/conf.modules.d/*.conf" ] ) + httpd_cert_info_in_nss = foreach_execute(ssl_certificate.httpd_certificate_info_in_nss, '/usr/bin/certutil -d %s -L -n %s') httpd_error_log = simple_file("var/log/httpd/error_log") httpd24_httpd_error_log = simple_file("/opt/rh/httpd24/root/etc/httpd/logs/error_log") jbcs_httpd24_httpd_error_log = simple_file("/opt/rh/jbcs-httpd24/root/etc/httpd/logs/error_log") diff --git a/insights/tests/datasources/test_ssl_certificate.py b/insights/tests/datasources/test_ssl_certificate.py index 3e833bcef..bf7f07dad 100644 --- a/insights/tests/datasources/test_ssl_certificate.py +++ b/insights/tests/datasources/test_ssl_certificate.py @@ -6,7 +6,8 @@ from insights.combiners.nginx_conf import _NginxConf, NginxConfTree from insights.parsers.mssql_conf import MsSQLConf from insights.specs.datasources.ssl_certificate import ( - httpd_ssl_certificate_files, nginx_ssl_certificate_files, mssql_tls_cert_file + httpd_ssl_certificate_files, nginx_ssl_certificate_files, + mssql_tls_cert_file, httpd_certificate_info_in_nss ) @@ -147,6 +148,41 @@ memorylimitmb = 2048 """.strip() +HTTPD_NSS_CERT_ENDATE = """ +Not After : Mon Jan 18 07:02:43 2038 +""".strip() + +HTTPD_WITH_NSS = """ +Listen 8443 + +ServerName www.examplea.com:8443 +NSSEngine on +NSSCertificateDatabase /etc/httpd/aliasa +NSSNickname testcerta + + +ServerName www.exampleb.com:8443 +NSSEngine on +NSSCertificateDatabase /etc/httpd/aliasb +NSSNickname testcertb + + +ServerName www.examplec.com:8443 +NSSEngine off +NSSCertificateDatabase /etc/httpd/aliasc +NSSNickname testcertc + +""".strip() + +HTTPD_WITH_NSS_OFF = """ +Listen 8443 + +NSSEngine off +NSSCertificateDatabase /etc/httpd/alias +NSSNickname testcert + +""".strip() + def test_httpd_certificate(): conf1 = _HttpdConf(context_wrap(HTTPD_CONF, path='/etc/httpd/conf/httpd.conf')) @@ -238,3 +274,25 @@ def test_mssql_tls_no_cert_exception(): } with pytest.raises(SkipComponent): mssql_tls_cert_file(broker1) + + +def test_httpd_certificate_info_in_nss(): + conf1 = _HttpdConf(context_wrap(HTTPD_CONF, path='/etc/httpd/conf/httpd.conf')) + conf2 = _HttpdConf(context_wrap(HTTPD_WITH_NSS, path='/etc/httpd/conf.d/nss.conf')) + conf_tree = HttpdConfTree([conf1, conf2]) + broker = { + HttpdConfTree: conf_tree + } + result = httpd_certificate_info_in_nss(broker) + assert result == [('/etc/httpd/aliasa', 'testcerta'), ('/etc/httpd/aliasb', 'testcertb')] + + +def test_httpd_certificate_info_in_nss_exception(): + conf1 = _HttpdConf(context_wrap(HTTPD_CONF, path='/etc/httpd/conf/httpd.conf')) + conf2 = _HttpdConf(context_wrap(HTTPD_WITH_NSS_OFF, path='/etc/httpd/conf.d/nss.conf')) + conf_tree = HttpdConfTree([conf1, conf2]) + broker = { + HttpdConfTree: conf_tree + } + with pytest.raises(SkipComponent): + httpd_certificate_info_in_nss(broker) From aac8c490f16bd6818721a7d6487b9322f69e8e92 Mon Sep 17 00:00:00 2001 From: Mark Huth Date: Thu, 16 Dec 2021 08:25:12 +1000 Subject: [PATCH 616/892] Enh: Improved excluding of the insights-client log files (#3306) * https://issues.redhat.com/browse/YARA-249 * Scanning the log files may cause false positives Signed-off-by: Mark Huth --- .../client/apps/malware_detection/__init__.py | 20 ++++++----- .../client/apps/test_malware_detection.py | 33 +++++++++++++++++++ 2 files changed, 44 insertions(+), 9 deletions(-) diff --git a/insights/client/apps/malware_detection/__init__.py b/insights/client/apps/malware_detection/__init__.py index 6ff036723..e8f10aff8 100644 --- a/insights/client/apps/malware_detection/__init__.py +++ b/insights/client/apps/malware_detection/__init__.py @@ -5,6 +5,7 @@ import yaml from sys import exit import logging +from glob import glob from datetime import datetime from tempfile import NamedTemporaryFile try: @@ -55,8 +56,7 @@ # scan_exclude: a single or list of files/directories to be excluded from filesystem scanning # If an item appears in both scan_only and scan_exclude, scan_exclude takes precedence and the item will be excluded -# The scan_exclude list is pre-populated with a number of top level directories that are recommended to be excluded, -# as well as the insights-client log directory which could cause extra matches itself +# scan_exclude is pre-populated with a list of top level directories that are recommended to be excluded scan_exclude: - /proc - /sys @@ -66,7 +66,6 @@ - /mnt - /media - /dev -- /var/log/insights-client # scan_since: scan files created or modified since X days ago or since the 'last' scan. # Valid values are integers >= 1 or the string 'last'. For example: @@ -411,8 +410,6 @@ def _get_rules(self): Obtain the rules used by yara for scanning from the rules_location option. They can either be downloaded from the malware backend or obtained from a local file. """ - from glob import glob - # The rules file that is downloaded from the backend should be automatically removed when the # malware-detection client exits. # However it can happen that the rules file isn't removed for some reason, so remove any existing @@ -523,15 +520,20 @@ def _build_yara_command(self): return yara_cmd def scan_filesystem(self): + """ + Process the filesystem items to scan + If self.scan_fsobjects is set, then just scan its items, less any items in the exclude list + scan_dict will contain all the toplevel directories to scan, and any particular files/subdirectories to scan + """ if not self.do_filesystem_scan: return False - # Process the filesystem items to scan - # If self.scan_fsobjects is set, then just scan its items, less any items in the exclude list - # And exclude the rules file, unless that's the thing we specifically want to scan - # scan_dict will contain all the toplevel directories to scan, and any particular files/subdirectories to scan + # Exclude the rules file and insights-client log files, unless they are things we specifically want to scan if self.rules_file not in self.scan_fsobjects: self.scan_exclude_list.append(self.rules_file) + insights_log_files = glob(constants.default_log_file + '*') + self.scan_exclude_list.extend(list(set(insights_log_files) - set(self.scan_fsobjects))) + scan_dict = process_include_exclude_items(include_items=self.scan_fsobjects, exclude_items=self.scan_exclude_list, exclude_mountpoints=self.network_filesystem_mountpoints) diff --git a/insights/tests/client/apps/test_malware_detection.py b/insights/tests/client/apps/test_malware_detection.py index 51d5d9fcc..ac6057833 100644 --- a/insights/tests/client/apps/test_malware_detection.py +++ b/insights/tests/client/apps/test_malware_detection.py @@ -898,6 +898,39 @@ def test_scan_since_tmp_files(self, call_mock, tmp_file_mock, log_mock, yara, cm assert len(contents) == 2 assert contents == [scan_me_file, scan_me_too_file] + @patch(LOAD_CONFIG_TARGET, return_value=CONFIG) + @patch.dict(os.environ) + def test_rule_n_glob_files_excluded(self, conf, log_mock, yara, cmd, remove, extract_tmp_files, create_test_files): + # Fake a scan but make sure we are excluding the rules file and globbed files (they are supposed to be + # insights log files, but that's hard to mock). + # Also test we are not excluding ones we actually want to scan + glob_files = [os.path.join(TEMP_TEST_DIR, 'scan_me', f) for f in ['new_file', 'old_file']] + os.environ['TEST_SCAN'] = 'false' + os.environ['RULES_LOCATION'] = TEST_RULE_FILE + os.environ['SCAN_ONLY'] = "%s,%s" % (TEMP_TEST_DIR, glob_files[1]) # we actually want to scan glob_files[1] + mdc = MalwareDetectionClient(None) + assert mdc.rules_file == TEST_RULE_FILE + assert mdc.scan_fsobjects == [TEMP_TEST_DIR, glob_files[1]] + + # Patch the call to glob so it returns a specific list of files + # Patch the calls for running yara and have it return no matches + with patch("insights.client.apps.malware_detection.glob", return_value=glob_files): + with patch("insights.client.apps.malware_detection.call", return_value=""): + mdc.scan_filesystem() + assert mdc.rules_file in mdc.scan_exclude_list + assert glob_files[0] in mdc.scan_exclude_list + # Make sure glob_files[1] isn't excluded because we actually want to scan that file + assert glob_files[1] not in mdc.scan_exclude_list + + # This time patch glob so it returns an empty list, ie simulating no extra files to exclude + mdc = MalwareDetectionClient(None) + with patch("insights.client.apps.malware_detection.glob", return_value=[]): + with patch("insights.client.apps.malware_detection.call", return_value=""): + mdc.scan_filesystem() + assert mdc.rules_file in mdc.scan_exclude_list + # None of the glob files should be excluded this time + assert all([f not in mdc.scan_exclude_list for f in glob_files]) + class TestIncludeExcludeMethods: From 59894bcaa547149be92894f3ec1234dc9ff1b454 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Thu, 16 Dec 2021 10:09:54 +0800 Subject: [PATCH 617/892] Add spec "foreman_production_log" back. (#3308) * It is deleted by bob before since it isn't used in any rule. But now I'll use it in one rule, so add it back now. Signed-off-by: Huanhuan Li --- insights/specs/default.py | 1 + 1 file changed, 1 insertion(+) diff --git a/insights/specs/default.py b/insights/specs/default.py index c27905440..ca2e5f2ab 100644 --- a/insights/specs/default.py +++ b/insights/specs/default.py @@ -237,6 +237,7 @@ def du_dirs_list(broker): findmnt_lo_propagation = simple_command("/bin/findmnt -lo+PROPAGATION") firewall_cmd_list_all_zones = simple_command("/usr/bin/firewall-cmd --list-all-zones") firewalld_conf = simple_file("/etc/firewalld/firewalld.conf") + foreman_production_log = simple_file("/var/log/foreman/production.log") foreman_ssl_error_ssl_log = simple_file("/var/log/httpd/foreman-ssl_error_ssl.log") fstab = simple_file("/etc/fstab") fw_devices = simple_command("/bin/fwupdagent get-devices", deps=[IsBareMetal]) From d8b38e1d939c8b1ab61f40409d2c4079b1ef0116 Mon Sep 17 00:00:00 2001 From: Glutexo Date: Thu, 23 Dec 2021 15:15:52 +0100 Subject: [PATCH 618/892] Test IP obfuscation (#3315) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The soscleaner module has currently no tests. Added some for its _sub_ip method that obfuscates IP addresses. The xfail marked test demonstrates how the IP obfuscation incorrectly matches an RPM package version consisting of four numbers separated by a dot. Signed-off-by: Štěpán Tomsa --- insights/tests/test_soscleaner.py | 59 +++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 insights/tests/test_soscleaner.py diff --git a/insights/tests/test_soscleaner.py b/insights/tests/test_soscleaner.py new file mode 100644 index 000000000..52c45afd1 --- /dev/null +++ b/insights/tests/test_soscleaner.py @@ -0,0 +1,59 @@ +from insights.contrib.soscleaner import SOSCleaner + +from mock.mock import Mock +from pytest import mark + + +def _soscleaner(): + soscleaner = SOSCleaner() + soscleaner.logger = Mock() + return soscleaner + + +@mark.parametrize(("line", "expected"), [ + ("radius_ip_1=10.0.0.1", "radius_ip_1=10.230.230.1"), + ( + ( + " inet 10.0.2.15" + " netmask 255.255.255.0" + " broadcast 10.0.2.255" + ), + ( + " inet 10.230.230.1" + " netmask 10.230.230.2" + " broadcast 10.230.230.3" + ), + ), + ( + "radius_ip_1=10.0.0.100-10.0.0.200", + "radius_ip_1=10.230.230.1-10.230.230.2", + ), +]) +def test_sub_ip_match(line, expected): + soscleaner = _soscleaner() + actual = soscleaner._sub_ip(line) + assert actual == expected + + +@mark.xfail +@mark.parametrize(("line",), [ + ( + "{\"name\":\"shadow-utils\"," + "\"epoch\":\"2\"," + "\"version\":\"4.1.5.1\"," + "\"release\":\"5.el6\"," + "\"arch\":\"x86_64\"," + "\"installtime\":\"Wed 13 Jan 2021 10:04:18 AM CET\"," + "\"buildtime\":\"1455012203\"," + "\"vendor\":\"Red Hat, Inc.\"," + "\"buildhost\":\"x86-027.build.eng.bos.redhat.com\"," + "\"sigpgp\":" + "\"RSA/8, " + "Tue 08 Mar 2016 11:15:08 AM CET, " + "Key ID 199e2f91fd431d51\"}", + ) +]) +def test_sub_ip_no_match(line): + soscleaner = _soscleaner() + actual = soscleaner._sub_ip(line) + assert actual == line From 955cf0f91af8f3ea27b0eca89138ee78200b593a Mon Sep 17 00:00:00 2001 From: Xiangce Liu Date: Fri, 24 Dec 2021 09:08:03 +0800 Subject: [PATCH 619/892] feat: enhance calc_offset to support check all target in line (#3316) * feat: enhance calc_offset to support check all target in line Signed-off-by: Xiangce Liu * update examples Signed-off-by: Xiangce Liu --- insights/parsers/__init__.py | 23 +++++++++++++++---- insights/parsers/tests/test_parsers_module.py | 15 ++++++++++++ 2 files changed, 34 insertions(+), 4 deletions(-) diff --git a/insights/parsers/__init__.py b/insights/parsers/__init__.py index d490451ae..301dd6ae8 100644 --- a/insights/parsers/__init__.py +++ b/insights/parsers/__init__.py @@ -220,7 +220,7 @@ def unsplit_lines(lines, cont_char='\\', keep_cont_char=False): yield ''.join(unsplit_lines) -def calc_offset(lines, target, invert_search=False): +def calc_offset(lines, target, invert_search=False, require_all=False): """ Function to search for a line in a list starting with a target string. If `target` is `None` or an empty string then `0` is returned. This @@ -238,6 +238,10 @@ def calc_offset(lines, target, invert_search=False): An empty line is implicitly included in target. Default is `False`. This would typically be used if trimming trailing lines off of a file by passing `reversed(lines)` as the `lines` argument. + require_all (boolean): If `True` this flag causes the search to *also* + require all the items of the `target` being in the line. + This flag only works with `invert_search == False`, when + `invert_search` is `True`, it will be ignored. Returns: int: index into the `lines` indicating the location of `target`. If @@ -256,19 +260,30 @@ def calc_offset(lines, target, invert_search=False): ... 'Error line', ... ' data 1 line', ... ' data 2 line'] - >>> target = ['data'] + >>> target = ['data', '2', 'line'] >>> calc_offset(lines, target) 3 >>> target = ['#', 'Warning', 'Error'] >>> calc_offset(lines, target, invert_search=True) 3 + >>> target = ['data', '2', 'line'] + >>> calc_offset(lines, target, require_all=True) + 4 + >>> target = ['#', 'Warning', 'Error'] + >>> calc_offset(lines, target, invert_search=True, require_all=True) # `require_all` doesn't work when `invert_search=True` + 3 """ if target and target[0] is not None: + target = [t.strip() for t in target] for offset, line in enumerate(l.strip() for l in lines): # strip `target` string along with `line` value - found_any = any([line.startswith(t.strip()) for t in target]) + found_any = any([line.startswith(t) for t in target]) if not invert_search and found_any: - return offset + if require_all: + if all(t in line for t in target): + return offset + else: + return offset elif invert_search and not(line == '' or found_any): return offset diff --git a/insights/parsers/tests/test_parsers_module.py b/insights/parsers/tests/test_parsers_module.py index fc1731202..dcdd79ccd 100644 --- a/insights/parsers/tests/test_parsers_module.py +++ b/insights/parsers/tests/test_parsers_module.py @@ -166,6 +166,21 @@ def test_calc_offset(): target=['Trailing', 'Blank', 'Another ', 'Yet'], invert_search=True) == 6 + assert calc_offset(OFFSET_CONTENT_2.splitlines(), + target=['data', '2']) == 3 + assert calc_offset(OFFSET_CONTENT_2.splitlines(), + target=['data', '2'], + require_all=True) == 4 + assert calc_offset( + reversed(OFFSET_CONTENT_2.splitlines()), + target=['Trailing', 'Blank', 'Another ', 'Yet'], + invert_search=True) == 6 + assert calc_offset( + reversed(OFFSET_CONTENT_2.splitlines()), + target=['Trailing', 'Blank', 'Another ', 'Yet'], + invert_search=True, + require_all=True) == 6 + FIXED_CONTENT_1 = """ Column1 Column2 Column3 From de1edd49ae38457c13ad44ee87e18692bff744d4 Mon Sep 17 00:00:00 2001 From: huali027 <44796653+huali027@users.noreply.github.com> Date: Mon, 27 Dec 2021 09:38:26 +0800 Subject: [PATCH 620/892] Fix: Enhance parser "SatellitePostgreSQLQuery" (#3314) * Fix: Enhance parser "SatellitePostgreSQLQuery" to ignore useless lines Signed-off-by: Huanhuan Li * Move the tests to child class Signed-off-by: Huanhuan Li * Enhance the parser "SatellitePostgreSQLQuery" to check all columns in the header Signed-off-by: Huanhuan Li * Add more tests Signed-off-by: Huanhuan Li --- .../parsers/satellite_postgresql_query.py | 75 ++++--------- .../tests/test_satellite_postgresql_query.py | 103 ++++++++---------- 2 files changed, 68 insertions(+), 110 deletions(-) diff --git a/insights/parsers/satellite_postgresql_query.py b/insights/parsers/satellite_postgresql_query.py index e99ac79d8..fd72ce756 100644 --- a/insights/parsers/satellite_postgresql_query.py +++ b/insights/parsers/satellite_postgresql_query.py @@ -10,10 +10,8 @@ ----------------------------------------------------------------------------------------------------------- SatelliteSCAStatus - command ``psql -d candlepin -c "select displayname, content_access_mode from cp_owner" --csv`` ------------------------------------------------------------------------------------------------------------------- - SatelliteKatelloEmptyURLRepositories - command ``psql -d foreman -c 'select id, name from katello_root_repositories where url is NULL;' --csv`` ----------------------------------------------------------------------------------------------------------------------------------------------- - SatelliteCoreTaskReservedResourceCount - command ``psql -d pulpcore -c 'select count(*) from core_taskreservedresource' --csv`` ------------------------------------------------------------------------------------------------------------------------------- """ @@ -25,12 +23,13 @@ from insights import parser, CommandParser from insights.specs import Specs from insights.parsers import SkipException, ParseException -from insights.parsers import keyword_search +from insights.parsers import keyword_search, calc_offset class SatellitePostgreSQLQuery(CommandParser, list): """ - Parent class of satellite postgresql table queries. + Parent class of satellite postgresql table queries. It can not be used + directly, A child class with overriding columns attribute is required. It saves the rows data into a list. Each row is saved into a dict. The key is the column name, the value is the value of the column. @@ -55,39 +54,25 @@ class SatellitePostgreSQLQuery(CommandParser, list): abc,,test def,http://xx.com, - - Examples: - >>> type(query) - - >>> rows = query.search(name='abc') - >>> len(rows) - 1 - >>> rows[0]['value'] - 'test' - >>> columns=query.get_columns() - >>> 'url' in columns - True - >>> 'name' in columns - True - Raises: SkipException: when there isn't data in the table - ParseException: when the output isn't in good csv format + ParseException: when the output isn't in good csv format. + NotImplementedError: when the subclass doesn't override the columns attribute. """ + # child class should override the columns attribute with its own column names + columns = [] + def parse_content(self, content): - if not content or len(content) == 1: - raise SkipException("There is no data in the table") - try: - # keep the line break for yaml parse in some table - reader = DictReader(os.linesep.join(content).splitlines(True)) - except Exception: - raise ParseException("The content isn't in csv format") + if not self.columns: + raise NotImplementedError("Please override the columns attribute.") + start_index = calc_offset(content, self.columns, require_all=True) + valid_lines = content[start_index:] + reader = DictReader(os.linesep.join(valid_lines).splitlines(True)) for row in reader: self.append(row) - - def get_columns(self): - return list(self[0].keys()) + if not self: + raise SkipException("There is no data in the table.") def search(self, **kwargs): """ @@ -101,17 +86,6 @@ def search(self, **kwargs): Returns: list: A list of dictionaries of rows that match the given search criteria. - - Examples: - >>> query.search(name__startswith='abc') == [ - ... {'name': 'abc', 'url': '', 'value': 'test'}, - ... {'name': 'abcdef', 'url': '', 'value': 'test2'} - ... ] - True - >>> query.search(name__startswith='abc', value='test') == [ - ... {'name': 'abc', 'url': '', 'value': 'test'} - ... ] - True """ return keyword_search(self, **kwargs) @@ -139,6 +113,7 @@ class SatelliteAdminSettings(SatellitePostgreSQLQuery): >>> table.get_setting('destroy_vm_on_host_delete') True """ + columns = ['name', 'value', 'default'] def _parse_yaml(self, value): if value: @@ -161,8 +136,6 @@ def parse_content(self, content): ParseException: when the value or default in bad yaml format. """ super(SatelliteAdminSettings, self).parse_content(content) - if not all(item in self.get_columns() for item in ['default', 'value']): - raise SkipException('No default, value columns in the table.') for row in self: row['default'] = self._parse_yaml(row['default']) row['value'] = self._parse_yaml(row['value']) @@ -191,10 +164,6 @@ class SatelliteComputeResources(SatellitePostgreSQLQuery): """ Parse the output of the command ``psql -d foreman -c 'select name, type from compute_resources' --csv``. - .. note:: - Please refer to its super-class :class:`insights.parsers.satellite_postgresql_query.SatellitePostgreSQLQuery` for more - details. - Sample output:: name,type @@ -210,7 +179,7 @@ class SatelliteComputeResources(SatellitePostgreSQLQuery): >>> rows[0]['name'] 'test_compute_resource1' """ - pass + columns = ['name', 'type'] @parser(Specs.satellite_katello_empty_url_repositories) @@ -232,7 +201,7 @@ class SatelliteKatelloEmptyURLRepositories(SatellitePostgreSQLQuery): >>> katello_root_repositories[0]['name'] 'testa' """ - pass + columns = ['id', 'name'] @parser(Specs.satellite_core_taskreservedresource_count) @@ -251,7 +220,7 @@ class SatelliteCoreTaskReservedResourceCount(SatellitePostgreSQLQuery): >>> tasks[0]['count'] '0' """ - pass + columns = ['count'] @parser(Specs.satellite_sca_status) @@ -259,10 +228,6 @@ class SatelliteSCAStatus(SatellitePostgreSQLQuery): """ Parse the output of the command ``psql -d candlepin -c "select displayname, content_access_mode from cp_owner" --csv``. - .. note:: - Please refer to its super-class :class:`insights.parsers.satellite_postgresql_query.SatellitePostgreSQLQuery` for more - details. - Sample output:: displayname,content_access_mode @@ -276,6 +241,8 @@ class SatelliteSCAStatus(SatellitePostgreSQLQuery): True """ + columns = ['displayname', 'content_access_mode'] + @property def sca_enabled(self): """ diff --git a/insights/parsers/tests/test_satellite_postgresql_query.py b/insights/parsers/tests/test_satellite_postgresql_query.py index d6284b5d2..838704bfa 100644 --- a/insights/parsers/tests/test_satellite_postgresql_query.py +++ b/insights/parsers/tests/test_satellite_postgresql_query.py @@ -24,29 +24,7 @@ SATELLITE_POSTGRESQL_WRONG_5 = ''' name,default,value -''' - -test_data_1 = ''' -name -fix_db_cache -foreman_tasks_sync_task_timeout -dynflow_enable_console -dynflow_console_require_auth -foreman_tasks_proxy_action_retry_count -''' - -test_data_2 = ''' -id,name,created_at,updated_at -1,project-receptor.satellite_receptor_installer,2021-01-30 01:14:22.848735,2021-01-30 01:14:22.848735 -2,theforeman.foreman_scap_client,2021-01-30 01:14:22.916142,2021-01-30 01:14:22.91614 -''' - -test_data_3 = ''' -name,url,value -abc,,test -abcdef,,test2 -def,http://xx.com, -''' +'''.strip() SATELLITE_SETTINGS_1 = ''' name,value,default @@ -157,58 +135,60 @@ 0 """ +SATELLITE_QUERY_DATA1 = """ +logname: no login name +/etc/profile.d/hkuser.sh: line 40: HISTFILE: readonly variable +id,name +1,Puppet_Base +""".strip() -def test_satellite_postgesql_query_exception(): - with pytest.raises(ContentException): - satellite_postgresql_query.SatellitePostgreSQLQuery(context_wrap(SATELLITE_POSTGRESQL_WRONG_1)) - with pytest.raises(SkipException): - satellite_postgresql_query.SatellitePostgreSQLQuery(context_wrap(SATELLITE_POSTGRESQL_WRONG_2)) - with pytest.raises(SkipException): - satellite_postgresql_query.SatellitePostgreSQLQuery(context_wrap(SATELLITE_POSTGRESQL_WRONG_3)) - with pytest.raises(SkipException): - satellite_postgresql_query.SatellitePostgreSQLQuery(context_wrap(SATELLITE_POSTGRESQL_WRONG_4)) - with pytest.raises(SkipException): - satellite_postgresql_query.SatellitePostgreSQLQuery(context_wrap(SATELLITE_POSTGRESQL_WRONG_5)) - +SATELLITE_QUERY_DATA2 = """ +logname: no login name +/etc/profile.d/hkuser.sh: line 40: HISTFILE: readonly variable +id,name +""".strip() -def test_satellite_postgesql_query(): - table = satellite_postgresql_query.SatellitePostgreSQLQuery(context_wrap(test_data_1)) - assert len(table) == 5 - assert table.get_columns() == ['name'] - rows = table.search(name='fix_db_cache') - assert len(rows) == 1 - assert rows[0]['name'] == 'fix_db_cache' - rows = table.search(name__startswith='dynflow') - assert len(rows) == 2 - assert rows[0]['name'] == 'dynflow_enable_console' - assert rows[1]['name'] == 'dynflow_console_require_auth' - - table = satellite_postgresql_query.SatellitePostgreSQLQuery(context_wrap(test_data_2)) - assert len(table) == 2 - rows = table.search(id='1') - assert len(rows) == 1 - assert rows[0]['name'] == 'project-receptor.satellite_receptor_installer' +SATELLITE_QUERY_DATA3 = """ +logname: no login name +/etc/profile.d/hkuser.sh: line 40: HISTFILE: readonly variable +""".strip() def test_HTL_doc_examples(): - query = satellite_postgresql_query.SatellitePostgreSQLQuery(context_wrap(test_data_3)) settings = satellite_postgresql_query.SatelliteAdminSettings(context_wrap(SATELLITE_SETTINGS_1)) resources_table = satellite_postgresql_query.SatelliteComputeResources(context_wrap(SATELLITE_COMPUTE_RESOURCE_1)) sat_sca_info = satellite_postgresql_query.SatelliteSCAStatus(context_wrap(SATELLITE_SCA_INFO_1)) repositories = satellite_postgresql_query.SatelliteKatelloEmptyURLRepositories(context_wrap(SATELLITE_KATELLO_ROOT_REPOSITORIES)) tasks = satellite_postgresql_query.SatelliteCoreTaskReservedResourceCount(context_wrap(SATELLITE_TASK_RESERVERDRESOURCE_CONTENT)) globs = { - 'query': query, 'table': settings, 'resources_table': resources_table, 'sat_sca_info': sat_sca_info, 'katello_root_repositories': repositories, 'tasks': tasks } - failed, tested = doctest.testmod(satellite_postgresql_query, globs=globs) + failed, _ = doctest.testmod(satellite_postgresql_query, globs=globs) assert failed == 0 +def test_no_headers(): + with pytest.raises(NotImplementedError): + satellite_postgresql_query.SatellitePostgreSQLQuery(context_wrap(SATELLITE_POSTGRESQL_WRONG_4)) + + +def test_basic_output_with_satellite_admin_setting(): + with pytest.raises(ContentException): + satellite_postgresql_query.SatelliteAdminSettings(context_wrap(SATELLITE_POSTGRESQL_WRONG_1)) + with pytest.raises(ValueError): + satellite_postgresql_query.SatelliteAdminSettings(context_wrap(SATELLITE_POSTGRESQL_WRONG_2)) + with pytest.raises(ValueError): + satellite_postgresql_query.SatelliteAdminSettings(context_wrap(SATELLITE_POSTGRESQL_WRONG_3)) + with pytest.raises(ValueError): + satellite_postgresql_query.SatelliteAdminSettings(context_wrap(SATELLITE_POSTGRESQL_WRONG_4)) + with pytest.raises(SkipException): + satellite_postgresql_query.SatelliteAdminSettings(context_wrap(SATELLITE_POSTGRESQL_WRONG_5)) + + def test_satellite_admin_settings(): settings = satellite_postgresql_query.SatelliteAdminSettings(context_wrap(SATELLITE_SETTINGS_2)) assert(len(settings)) == 2 @@ -237,7 +217,7 @@ def test_satellite_admin_settings(): def test_satellite_admin_settings_exception(): - with pytest.raises(SkipException): + with pytest.raises(ValueError): satellite_postgresql_query.SatelliteAdminSettings(context_wrap(SATELLITE_SETTINGS_BAD_1)) with pytest.raises(ParseException): satellite_postgresql_query.SatelliteAdminSettings(context_wrap(SATELLITE_SETTINGS_BAD_2)) @@ -260,6 +240,17 @@ def test_satellite_katello_empty_url_repositories(): repositories = satellite_postgresql_query.SatelliteKatelloEmptyURLRepositories(context_wrap(SATELLITE_KATELLO_ROOT_REPOSITORIES)) assert repositories[1]['name'] == 'testb' + table = satellite_postgresql_query.SatelliteKatelloEmptyURLRepositories(context_wrap(SATELLITE_QUERY_DATA1)) + assert len(table) == 1 + assert table[0]['id'] == '1' + assert table[0]['name'] == 'Puppet_Base' + + with pytest.raises(SkipException): + satellite_postgresql_query.SatelliteKatelloEmptyURLRepositories(context_wrap(SATELLITE_QUERY_DATA2)) + + with pytest.raises(ValueError): + satellite_postgresql_query.SatelliteKatelloEmptyURLRepositories(context_wrap(SATELLITE_QUERY_DATA3)) + def test_satellite_taskreservedresource(): tasks = satellite_postgresql_query.SatelliteCoreTaskReservedResourceCount(context_wrap(SATELLITE_TASK_RESERVERDRESOURCE_CONTENT)) From 3dd60c47bba79a8bba025a233f8981a232a5d972 Mon Sep 17 00:00:00 2001 From: Rex White Date: Wed, 12 Jan 2022 06:59:04 -0500 Subject: [PATCH 621/892] Playbook revocation list (#3311) * Added revocation list for signed playbooks Signed-off-by: Rex White * Updated initial revoked_playbooks.yaml Signed-off-by: Rex White * Fixed minor flake8 issue in testcase Signed-off-by: Rex White * Fixed additional flake8 issues Signed-off-by: Rex White * Fixed test to accommodate python 2.6 unicode weirdness Signed-off-by: Rex White --- MANIFEST.in.client | 1 + .../ansible/playbook_verifier/__init__.py | 37 +++++++++- insights/revoked_playbooks.yaml | 33 +++++++++ .../client/apps/test_playbook_verifier.py | 69 ++++++++++++++++++- 4 files changed, 135 insertions(+), 5 deletions(-) create mode 100644 insights/revoked_playbooks.yaml diff --git a/MANIFEST.in.client b/MANIFEST.in.client index 98720a49b..4907ea683 100644 --- a/MANIFEST.in.client +++ b/MANIFEST.in.client @@ -1,4 +1,5 @@ include insights/defaults.yaml +include insights/revoked_playbooks.yaml include insights/NAME include insights/VERSION include insights/COMMIT diff --git a/insights/client/apps/ansible/playbook_verifier/__init__.py b/insights/client/apps/ansible/playbook_verifier/__init__.py index 5d387bb54..f3657fd59 100644 --- a/insights/client/apps/ansible/playbook_verifier/__init__.py +++ b/insights/client/apps/ansible/playbook_verifier/__init__.py @@ -121,7 +121,7 @@ def executeVerification(snippet, encodedSignature): result = gpg.verify_data(fn, snippetHash) os.unlink(fn) - return result + return result, snippetHash def verifyPlaybookSnippet(snippet): @@ -140,6 +140,31 @@ def verifyPlaybookSnippet(snippet): return executeVerification(snippetCopy, encodedSignature) +def getRevocationList(): + """ + Load the list of revoked playbook snippet hashes from the egg + + Returns: + dictionary of revocation list entries (name, hash) + """ + try: + # Import revoked list yaml. The yaml is structured as a list of lists, so we can reuse the playbook signing and + # verification code. There will only ever be one list, so we just grab the first element... + revoked_playbooks = yaml.load(pkgutil.get_data('insights', 'revoked_playbooks.yaml'))[0] + + except Exception: + raise PlaybookVerificationError(message='VERIFICATION FAILED: Error loading revocation list') + + # verify the list signature! + verified, snippetHash = verifyPlaybookSnippet(revoked_playbooks) + + if not verified: + raise PlaybookVerificationError(message='VERIFICATION FAILED: Revocation list signature invalid') + + revocationList = revoked_playbooks.get('revoked_playbooks', []) + return revocationList + + def verify(playbook, skipVerify=False): """ Verify the signed playbook. @@ -153,13 +178,21 @@ def verify(playbook, skipVerify=False): if not skipVerify: if not playbook: raise PlaybookVerificationError(message="PLAYBOOK VERIFICATION FAILURE: Playbook is empty") + + revocationList = getRevocationList() + for snippet in playbook: - verified = verifyPlaybookSnippet(snippet) + verified, snippetHash = verifyPlaybookSnippet(snippet) if not verified: name = snippet.get('name', 'NAME UNAVAILABLE') raise PlaybookVerificationError(message="SIGNATURE NOT VALID: Template [name: {0}] has invalid signature".format(name)) + # check if snippetHash is on the revoked list + for revokedItem in revocationList: + if snippetHash == bytearray.fromhex(revokedItem['hash']): + raise PlaybookVerificationError(message="REVOKED PLAYBOOK: Template is on the revoked list [name: {0}]".format(revokedItem['name'])) + logger.info('All templates successfully validated') return playbook diff --git a/insights/revoked_playbooks.yaml b/insights/revoked_playbooks.yaml new file mode 100644 index 000000000..6feaa8ade --- /dev/null +++ b/insights/revoked_playbooks.yaml @@ -0,0 +1,33 @@ +# This file contains a list of revoked playbook signatures +- name: revocation list + timestamp: 1632510092 + vars: + insights_signature_exclude: /vars/insights_signature + insights_signature: !!binary | + TFMwdExTMUNSVWRKVGlCUVIxQWdVMGxIVGtGVVZWSkZMUzB0TFMwS1ZtVnljMmx2YmpvZ1IyNTFV + RWNnZGpFS0NtbFJTVlpCZDFWQldXSnhNWG80ZG5jMU9FUXJhalZ3VGtGUmFreExRa0ZCWjFsSlF5 + dHVkbVZxZFZobWNIUlBXbW8wYTJwQ1FuaHRhM0VyYzBrdmFTOEtaRmhyU0dkSFUwOHZaVWhxUm1O + ek1Tc3pUMXBPUVV4M2JXUkpkVVJ2V0N0WmQzcFJTRWxyV25GNmMwTnFOMjlXY1docFVXUkhUR1JP + YmpSd1lXbHJjQW8yYmxkdlEzTkZVMUJrWjFkaWJEQXliR3dyVVZoMEx6TTFTWEJMVXpRd0t5OURP + SFZGZGpoSldqTnFRVzV1Y0hwTVRsQm1hbGgwY1RSSU4waHpWR1Z4Q214bFZDdDZVemhEVG13d05t + cG1VVGs0ZEZJd2RHMU1SRlE0ZWtVMWREVjZRVlY2Y1hWbmFtWndhbGt3ZUdwQ09HaDFZVXhKY25w + WFdqRTRXSEUzUnpNS05HZFpWamx2VlRjcldFTnVZMjVEWVdSdU0xSXllRUZTVVdoc2RucFJVV3ho + WWtSYVJtaDRkbHBIV21oRWNWWjNjU3Q0YlZnM1praHBTVWRrWmt4QmVRcFZUVFptT0haNWVtOXBU + bWhVYlM5MlNVdGxlVmcyUnlzMVZtcENSVlJsT1RoSU1qZFllRk5qVmtFMWVrSlpSbGR5YTFsa1VX + OTZSbEI1U1daTk4ySlpDbEYzUkVWV1EyWnRaM1U1YVRGd2JGZFFOV2t2VTA5NVNYUkNhMHBvZUM5 + TGQyMXdlbWwyUjNSSGVHeFpVRmh4VEZwWFdUWllRMng0YkVsRWNsRkpWazRLTWxOWE0zUjNUbWRQ + TlhaVVVWQlNUbWcxVFZOMlVUZExTemhUZFdFeWRtRlVUVzkzY214aU1YQnllbU5YUlhkTE5VTnRX + blpzYTNkMlIyNURURWg1YUFwSmJVbHVXRlZ3VWxGSFFXdDJjMmd6TmpFck5IUlBiM2xvVlhFMWVp + dFdMMW8wVmpoeFlrcGxURTFIUms5bk5XOVZZMWM0WWxGbVZIcGFTekZRWVVockNrVXlXVFZ2V1RK + V2FHeHFaRGx3UkZkRU5uSXZialZWTHpOSWJFeHRNbUY2VlhBNWIxcFlSWFJIYldOVVluVm5XazQ1 + Tm10dk5rZ3hiRUZvTDJVMFptUUtkRGxWVDFNNFQydHdhMHRYTkhkaEwyVnZlakZRVmpoRVJIZEJT + R3MzVDJsck9WaE5URmx2UTJ3d2JrWTFiakZqZVU1UEwwTnlSVGRFTXpaTmNHdHVNZ292YkRoYVlY + Qm5hMXB1YXowS1BXSk9aWGNLTFMwdExTMUZUa1FnVUVkUUlGTkpSMDVCVkZWU1JTMHRMUzB0Q2c9 + PQ== + + revoked_playbooks: + - name: template_playbook_dispatcher_ostree_upgrade_payload.yml bb3cb30e + hash: 8ddc7c9fb264aa24d7b3536ecf00272ca143c2ddb14a499cdefab045f3403e9b + + - name: template_playbook_dispatcher_ostree_upgrade_payload.yml d6af8d54 + hash: 40a6e9af448208759bc4ef59b6c678227aae9b3f6291c74a4a8767eefc0a401f diff --git a/insights/tests/client/apps/test_playbook_verifier.py b/insights/tests/client/apps/test_playbook_verifier.py index bb922c097..e47b9a374 100644 --- a/insights/tests/client/apps/test_playbook_verifier.py +++ b/insights/tests/client/apps/test_playbook_verifier.py @@ -6,7 +6,7 @@ # don't even bother on 2.6 if sys.version_info >= (2, 7): - from insights.client.apps.ansible.playbook_verifier import verify, PlaybookVerificationError # noqa + from insights.client.apps.ansible.playbook_verifier import verify, PlaybookVerificationError, getRevocationList # noqa @pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above') @@ -74,8 +74,9 @@ def test_key_import_error(): @pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above') -@patch('insights.client.apps.ansible.playbook_verifier.verifyPlaybookSnippet', return_value=[]) -def test_playbook_verification_error(call): +@patch('insights.client.apps.ansible.playbook_verifier.verifyPlaybookSnippet', return_value=([], [])) +@patch('insights.client.apps.ansible.playbook_verifier.getRevocationList', return_value=[]) +def test_playbook_verification_error(call_1, call_2): key_error = 'SIGNATURE NOT VALID: Template [name: test playbook] has invalid signature' fake_playbook = [{ 'name': "test playbook", @@ -104,3 +105,65 @@ def test_playbook_verification_success(mock_method): result = verify(fake_playbook, skipVerify=False) assert result == fake_playbook + + +# getRevocationList can't load list +@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above') +@patch('insights.client.apps.ansible.playbook_verifier.contrib.ruamel_yaml.ruamel.yaml.YAML.load', side_effect=Exception()) +def test_revocation_list_not_found(mock_method): + load_error = 'VERIFICATION FAILED: Error loading revocation list' + + with raises(PlaybookVerificationError) as error: + getRevocationList() + + assert load_error in str(error.value) + + +# revocation list signature invalid +@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above') +@patch('insights.client.apps.ansible.playbook_verifier.verifyPlaybookSnippet', return_value=(None, 0xdeadbeef)) +def test_revocation_list_signature_invalid(mock_method): + load_error = 'VERIFICATION FAILED: Revocation list signature invalid' + + with raises(PlaybookVerificationError) as error: + getRevocationList() + + assert load_error in str(error.value) + + +# revocation list empty +@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above') +@patch('insights.client.apps.ansible.playbook_verifier.contrib.ruamel_yaml.ruamel.yaml.YAML.load', return_value=[{}]) +@patch('insights.client.apps.ansible.playbook_verifier.verifyPlaybookSnippet', return_value=(True, 0xdeadbeef)) +def test_revocation_list_empty(call_1, call_2): + fake_playbook = [{ + 'name': "test playbook", + 'vars': { + 'insights_signature': 'TFMwdExTMUNSVWRKVGlCUVIxQWdVMGxIVGtGVVZWSkZMUzB0TFMwS0N==', + 'insights_signature_exclude': '/vars/insights_signature' + } + }] + + result = verify(fake_playbook, skipVerify=False) + assert result == fake_playbook + + +# playbook on revoked list +@pytest.mark.skipif(sys.version_info < (2, 7), reason='Playbook verifier must be run on python 2.7 or above') +@patch('insights.client.apps.ansible.playbook_verifier.contrib.ruamel_yaml.ruamel.yaml.YAML.load', + return_value=[{'revoked_playbooks': [{'name': 'banned book', 'hash': 'deadbeef'}]}]) +@patch('insights.client.apps.ansible.playbook_verifier.verifyPlaybookSnippet', return_value=(True, bytearray.fromhex(u'deadbeef'))) +def test_revoked_playbook(call_1, call_2): + revoked_error = 'REVOKED PLAYBOOK: Template is on the revoked list [name: banned book]' + fake_playbook = [{ + 'name': "test playbook", + 'vars': { + 'insights_signature': 'TFMwdExTMUNSVWRKVGlCUVIxQWdVMGxIVGtGVVZWSkZMUzB0TFMwS0N==', + 'insights_signature_exclude': '/vars/insights_signature' + } + }] + + with raises(PlaybookVerificationError) as error: + verify(fake_playbook, skipVerify=False) + + assert revoked_error in str(error.value) From 804e06148296a8bea3f1dfadd11fd02b20fe2ad4 Mon Sep 17 00:00:00 2001 From: Ryan Blakley Date: Wed, 12 Jan 2022 19:47:05 -0500 Subject: [PATCH 622/892] feat: Switch IniConfigFile from RawConfigParser to parsr's iniparser (#3310) * Switched IniConfigFile from using contrib.RawConfigParser to parsr.iniparser, so the backported RawConfigParser can be removed. * Modified iniparser so that it was backwards compatiable with how RawConfigParser outputted data, so that tests and rules wouldn't break in other projects. * Went through and updated all of the parser's using IniConfigFile so that their doc strings, examples, and the format was up to date. * Went through and added doc string tests to all of the parser's that didn't have them already. * Also made one change from how RawConfigParser parsed files, that was to remove inline comments on options, Fixes #2777. Signed-off-by: Ryan Blakley --- insights/contrib/ConfigParser.py | 762 +----------------- insights/core/__init__.py | 204 +++-- insights/parsers/autofs_conf.py | 48 +- insights/parsers/ceilometer_conf.py | 103 +-- insights/parsers/ceph_conf.py | 117 +-- insights/parsers/checkin_conf.py | 20 +- insights/parsers/cinder_conf.py | 110 ++- insights/parsers/cobbler_modules_conf.py | 52 +- insights/parsers/designate_conf.py | 58 +- insights/parsers/dnf_modules.py | 7 +- insights/parsers/dracut_modules.py | 5 +- insights/parsers/etcd_conf.py | 106 +-- insights/parsers/galera_cnf.py | 110 +-- insights/parsers/gnocchi.py | 9 +- insights/parsers/heat_conf.py | 70 +- insights/parsers/insights_client_conf.py | 24 +- insights/parsers/ironic_conf.py | 15 +- insights/parsers/keystone.py | 79 +- insights/parsers/manila_conf.py | 93 +-- insights/parsers/mssql_conf.py | 46 +- insights/parsers/networkmanager_config.py | 37 +- insights/parsers/neutron_conf.py | 71 +- insights/parsers/neutron_dhcp_agent_conf.py | 12 +- insights/parsers/neutron_l3_agent_conf.py | 85 +- .../parsers/neutron_metadata_agent_conf.py | 97 +-- insights/parsers/neutron_ml2_conf.py | 16 +- insights/parsers/neutron_plugin.py | 48 +- insights/parsers/neutron_sriov_agent.py | 64 +- insights/parsers/nova_conf.py | 88 +- insights/parsers/octavia.py | 103 +-- insights/parsers/odbc.py | 40 +- insights/parsers/pluginconf_d.py | 79 +- insights/parsers/rhsm_conf.py | 111 ++- insights/parsers/samba.py | 172 ++-- insights/parsers/sssd_conf.py | 96 ++- insights/parsers/swift_conf.py | 8 +- insights/parsers/tests/test_autofs_conf.py | 31 +- .../parsers/tests/test_ceilometer_conf.py | 42 +- insights/parsers/tests/test_cinder_conf.py | 14 +- .../tests/test_cobbler_modules_conf.py | 14 +- insights/parsers/tests/test_designate_conf.py | 14 +- insights/parsers/tests/test_galera_cnf.py | 16 +- insights/parsers/tests/test_heat_conf.py | 14 +- .../tests/test_insights_client_conf.py | 25 +- insights/parsers/tests/test_keystone.py | 14 +- insights/parsers/tests/test_manila_conf.py | 37 +- insights/parsers/tests/test_neutron_conf.py | 18 +- insights/parsers/tests/test_neutron_plugin.py | 14 +- .../parsers/tests/test_neutron_sriov_agent.py | 14 +- insights/parsers/tests/test_nova_conf.py | 11 +- insights/parsers/tests/test_odbc.py | 17 +- insights/parsers/tests/test_pluginconf_d.py | 20 +- insights/parsers/tests/test_rhsm_conf.py | 22 +- insights/parsers/tests/test_sssd_conf.py | 18 +- insights/parsers/tests/test_tuned_conf.py | 8 +- insights/parsers/tests/test_virt_who_conf.py | 16 +- insights/parsers/tests/test_yum_conf.py | 43 +- insights/parsers/tuned_conf.py | 5 +- insights/parsers/vdsm_conf.py | 7 +- insights/parsers/virt_who_conf.py | 38 +- insights/parsers/vmware_tools_conf.py | 64 +- insights/parsers/yum_conf.py | 108 ++- insights/parsr/__init__.py | 3 +- insights/parsr/iniparser.py | 48 +- insights/tests/test_config_parser.py | 9 +- 65 files changed, 1762 insertions(+), 2107 deletions(-) diff --git a/insights/contrib/ConfigParser.py b/insights/contrib/ConfigParser.py index d09b2015c..485d8282c 100644 --- a/insights/contrib/ConfigParser.py +++ b/insights/contrib/ConfigParser.py @@ -1,758 +1,14 @@ -"""Configuration file parser. +from insights.parsr.iniparser import NoOptionError as NOE, NoSectionError as NSE -A setup file consists of sections, lead by a "[section]" header, -and followed by "name: value" entries, with continuations and such in -the style of RFC 822. -The option values can contain format strings which refer to other values in -the same section, or values in a special [DEFAULT] section. - -For example: - - something: %(dir)s/whatever - -would resolve the "%(dir)s" to the value of dir. All reference -expansions are done late, on demand. - -Intrinsic defaults can be specified by passing them into the -ConfigParser constructor as a dictionary. - -class: - -ConfigParser -- responsible for parsing a list of - configuration files, and managing the parsed database. - - methods: - - __init__(defaults=None) - create the parser and specify a dictionary of intrinsic defaults. The - keys must be strings, the values must be appropriate for %()s string - interpolation. Note that `__name__' is always an intrinsic default; - its value is the section's name. - - sections() - return all the configuration section names, sans DEFAULT - - has_section(section) - return whether the given section exists - - has_option(section, option) - return whether the given option exists in the given section - - options(section) - return list of configuration options for the named section - - read(filenames) - read and parse the list of named configuration files, given by - name. A single filename is also allowed. Non-existing files - are ignored. Return list of successfully read files. - - readfp(fp, filename=None) - read and parse one configuration file, given as a file object. - The filename defaults to fp.name; it is only used in error - messages (if fp has no `name' attribute, the string `' is used). - - get(section, option, raw=False, vars=None) - return a string value for the named option. All % interpolations are - expanded in the return values, based on the defaults passed into the - constructor and the DEFAULT section. Additional substitutions may be - provided using the `vars' argument, which must be a dictionary whose - contents override any pre-existing defaults. - - getint(section, options) - like get(), but convert value to an integer - - getfloat(section, options) - like get(), but convert value to a float - - getboolean(section, options) - like get(), but convert value to a boolean (currently case - insensitively defined as 0, false, no, off for False, and 1, true, - yes, on for True). Returns False or True. - - items(section, raw=False, vars=None) - return a list of tuples with (name, value) for each option - in the section. - - remove_section(section) - remove the given file section and all its options - - remove_option(section, option) - remove the given option from the given section - - set(section, option, value) - set the given option - - write(fp) - write the configuration state in .ini format -""" - -try: - from collections import OrderedDict as _default_dict -except ImportError: - # fallback for setup.py which hasn't yet built _collections - _default_dict = dict - -import re - -__all__ = ["NoSectionError", "DuplicateSectionError", "NoOptionError", - "InterpolationError", "InterpolationDepthError", - "InterpolationSyntaxError", "ParsingError", - "MissingSectionHeaderError", - "ConfigParser", "SafeConfigParser", "RawConfigParser", - "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"] - -DEFAULTSECT = "DEFAULT" - -MAX_INTERPOLATION_DEPTH = 10 - - - -# exception classes -class Error(Exception): - """Base class for ConfigParser exceptions.""" - - def _get_message(self): - """Getter for 'message'; needed only to override deprecation in - BaseException.""" - return self.__message - - def _set_message(self, value): - """Setter for 'message'; needed only to override deprecation in - BaseException.""" - self.__message = value - - # BaseException.message has been deprecated since Python 2.6. To prevent - # DeprecationWarning from popping up over this pre-existing attribute, use - # a new property that takes lookup precedence. - message = property(_get_message, _set_message) - - def __init__(self, msg=''): - self.message = msg - Exception.__init__(self, msg) - - def __repr__(self): - return self.message - - __str__ = __repr__ - -class NoSectionError(Error): - """Raised when no section matches a requested option.""" - - def __init__(self, section): - Error.__init__(self, 'No section: %r' % (section,)) - self.section = section - self.args = (section, ) - -class DuplicateSectionError(Error): - """Raised when a section is multiply-created.""" - - def __init__(self, section): - Error.__init__(self, "Section %r already exists" % section) - self.section = section - self.args = (section, ) - -class NoOptionError(Error): - """A requested option was not found.""" - - def __init__(self, option, section): - Error.__init__(self, "No option %r in section: %r" % - (option, section)) - self.option = option - self.section = section - self.args = (option, section) - -class InterpolationError(Error): - """Base class for interpolation-related exceptions.""" - - def __init__(self, option, section, msg): - Error.__init__(self, msg) - self.option = option - self.section = section - self.args = (option, section, msg) - -class InterpolationMissingOptionError(InterpolationError): - """A string substitution required a setting which was not available.""" - - def __init__(self, option, section, rawval, reference): - msg = ("Bad value substitution:\n" - "\tsection: [%s]\n" - "\toption : %s\n" - "\tkey : %s\n" - "\trawval : %s\n" - % (section, option, reference, rawval)) - InterpolationError.__init__(self, option, section, msg) - self.reference = reference - self.args = (option, section, rawval, reference) - -class InterpolationSyntaxError(InterpolationError): - """Raised when the source text into which substitutions are made - does not conform to the required syntax.""" - -class InterpolationDepthError(InterpolationError): - """Raised when substitutions are nested too deeply.""" - - def __init__(self, option, section, rawval): - msg = ("Value interpolation too deeply recursive:\n" - "\tsection: [%s]\n" - "\toption : %s\n" - "\trawval : %s\n" - % (section, option, rawval)) - InterpolationError.__init__(self, option, section, msg) - self.args = (option, section, rawval) - -class ParsingError(Error): - """Raised when a configuration file does not follow legal syntax.""" - - def __init__(self, filename): - Error.__init__(self, 'File contains parsing errors: %s' % filename) - self.filename = filename - self.errors = [] - self.args = (filename, ) - - def append(self, lineno, line): - self.errors.append((lineno, line)) - self.message += '\n\t[line %2d]: %s' % (lineno, line) - -class MissingSectionHeaderError(ParsingError): - """Raised when a key-value pair is found before any section header.""" - - def __init__(self, filename, lineno, line): - Error.__init__( - self, - 'File contains no section headers.\nfile: %s, line: %d\n%r' % - (filename, lineno, line)) - self.filename = filename - self.lineno = lineno - self.line = line - self.args = (filename, lineno, line) - - -class RawConfigParser: - def __init__(self, defaults=None, dict_type=_default_dict, - allow_no_value=False): - self._dict = dict_type - self._sections = self._dict() - self._defaults = self._dict() - if allow_no_value: - self._optcre = self.OPTCRE_NV - else: - self._optcre = self.OPTCRE - if defaults: - for key, value in defaults.items(): - self._defaults[self.optionxform(key)] = value - - def defaults(self): - return self._defaults - - def sections(self): - """Return a list of section names, excluding [DEFAULT]""" - # self._sections will never have [DEFAULT] in it - return self._sections.keys() - - def add_section(self, section): - """Create a new section in the configuration. - - Raise DuplicateSectionError if a section by the specified name - already exists. Raise ValueError if name is DEFAULT or any of it's - case-insensitive variants. - """ - if section.lower() == "default": - raise ValueError('Invalid section name: %s' % section) - - if section in self._sections: - raise DuplicateSectionError(section) - self._sections[section] = self._dict() - - def has_section(self, section): - """Indicate whether the named section is present in the configuration. - - The DEFAULT section is not acknowledged. - """ - return section in self._sections - - def options(self, section): - """Return a list of option names for the given section name.""" - try: - opts = self._sections[section].copy() - except KeyError: - raise NoSectionError(section) - opts.update(self._defaults) - if '__name__' in opts: - del opts['__name__'] - return opts.keys() - - def read(self, filenames): - """Read and parse a filename or a list of filenames. - - Files that cannot be opened are silently ignored; this is - designed so that you can specify a list of potential - configuration file locations (e.g. current directory, user's - home directory, systemwide directory), and all existing - configuration files in the list will be read. A single - filename may also be given. - - Return list of successfully read files. - """ - if isinstance(filenames, basestring): - filenames = [filenames] - read_ok = [] - for filename in filenames: - try: - fp = open(filename) - except IOError: - continue - self._read(fp, filename) - fp.close() - read_ok.append(filename) - return read_ok - - def readfp(self, fp, filename=None): - """Like read() but the argument must be a file-like object. - - The `fp' argument must have a `readline' method. Optional - second argument is the `filename', which if not given, is - taken from fp.name. If fp has no `name' attribute, `' is - used. - - """ - if filename is None: - try: - filename = fp.name - except AttributeError: - filename = '' - self._read(fp, filename) - - def get(self, section, option): - opt = self.optionxform(option) - if section not in self._sections: - if section != DEFAULTSECT: - raise NoSectionError(section) - if opt in self._defaults: - return self._defaults[opt] - else: - raise NoOptionError(option, section) - elif opt in self._sections[section]: - return self._sections[section][opt] - elif opt in self._defaults: - return self._defaults[opt] - else: - raise NoOptionError(option, section) - - def items(self, section): - try: - d2 = self._sections[section] - except KeyError: - if section != DEFAULTSECT: - raise NoSectionError(section) - d2 = self._dict() - d = self._defaults.copy() - d.update(d2) - if "__name__" in d: - del d["__name__"] - return d.items() - - def _get(self, section, conv, option): - return conv(self.get(section, option)) - - def getint(self, section, option): - return self._get(section, int, option) - - def getfloat(self, section, option): - return self._get(section, float, option) - - _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True, - '0': False, 'no': False, 'false': False, 'off': False} - - def getboolean(self, section, option): - v = self.get(section, option) - if v.lower() not in self._boolean_states: - raise ValueError('Not a boolean: %s' % v) - return self._boolean_states[v.lower()] - - def optionxform(self, optionstr): - return optionstr.lower() - - def has_option(self, section, option): - """Check for the existence of a given option in a given section.""" - if not section or section == DEFAULTSECT: - option = self.optionxform(option) - return option in self._defaults - elif section not in self._sections: - return False - else: - option = self.optionxform(option) - return (option in self._sections[section] - or option in self._defaults) - - def set(self, section, option, value=None): - """Set an option.""" - if not section or section == DEFAULTSECT: - sectdict = self._defaults - else: - try: - sectdict = self._sections[section] - except KeyError: - raise NoSectionError(section) - sectdict[self.optionxform(option)] = value - - def write(self, fp): - """Write an .ini-format representation of the configuration state.""" - if self._defaults: - fp.write("[%s]\n" % DEFAULTSECT) - for (key, value) in self._defaults.items(): - fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t'))) - fp.write("\n") - for section in self._sections: - fp.write("[%s]\n" % section) - for (key, value) in self._sections[section].items(): - if key == "__name__": - continue - if (value is not None) or (self._optcre == self.OPTCRE): - key = " = ".join((key, str(value).replace('\n', '\n\t'))) - fp.write("%s\n" % (key)) - fp.write("\n") - - def remove_option(self, section, option): - """Remove an option.""" - if not section or section == DEFAULTSECT: - sectdict = self._defaults - else: - try: - sectdict = self._sections[section] - except KeyError: - raise NoSectionError(section) - option = self.optionxform(option) - existed = option in sectdict - if existed: - del sectdict[option] - return existed - - def remove_section(self, section): - """Remove a file section.""" - existed = section in self._sections - if existed: - del self._sections[section] - return existed - - # - # Regular expressions for parsing section headers and options. - # - SECTCRE = re.compile( - r'\[' # [ - r'(?P
[^]]+)' # very permissive! - r'\]' # ] - ) - OPTCRE = re.compile( - r'(?P