diff --git a/docs/config_reference.rst b/docs/config_reference.rst index c8715dbd36..57fbcd4daa 100644 --- a/docs/config_reference.rst +++ b/docs/config_reference.rst @@ -60,6 +60,13 @@ It consists of the following properties, which we also call conventionally *conf A list of `general configuration objects <#general-configuration>`__. +.. py:data:: storage + + :required: No + + A list of :ref:`storage configuration objects ` + + .. versionadded:: 4.7 .. py:data:: autodetect_methods @@ -432,6 +439,16 @@ System Partition Configuration No other test would be able to proceed. +.. py:attribute:: systems.partitions.sched_options.unqualified_hostnames + + :required: No + :default: ``false`` + + Use unqualified hostnames in the ``local`` scheduler backend. + + .. versionadded:: 4.7 + + .. py:attribute:: systems.partitions.sched_options.use_nodes_option :required: No @@ -1602,6 +1619,31 @@ The options of an execution mode will be passed to ReFrame as if they were speci For a detailed description of this property, have a look at the :attr:`~environments.target_systems` definition for environments. +.. _storage-configuration: + +Result storage configuration +============================ + +.. versionadded:: 4.7 + +.. py:attribute:: storage.backend + + :required: No + :default: ``"sqlite"`` + + The backend to use for storing the test results. + + Currently, only Sqlite can be used as a storage backend. + + +.. py:attribute:: storage.sqlite_db_file + + :required: No + :default: ``"${HOME}/.reframe/reports/results.db"`` + + The Sqlite database file to use. + + General Configuration ===================== @@ -1859,6 +1901,20 @@ General Configuration For a detailed description of this property, have a look at the :attr:`~environments.target_systems` definition for environments. +.. py:attribute:: general.table_format + + :required: No + :default: ``"pretty"`` + + Set the formatting of tabular output. + + The acceptable values are the following: + + - ``csv``: Generate CSV output + - ``plain``: Generate a plain table without any lines + - ``pretty``: (default) Generate a pretty table + + .. py:attribute:: general.timestamp_dirs :required: No diff --git a/docs/manpage.rst b/docs/manpage.rst index 60b0f9d2d8..5630174514 100644 --- a/docs/manpage.rst +++ b/docs/manpage.rst @@ -17,8 +17,16 @@ The ``reframe`` command is part of ReFrame's frontend. This frontend is responsible for loading and running regression tests written in ReFrame. ReFrame executes tests by sending them down to a well defined pipeline. The implementation of the different stages of this pipeline is part of ReFrame's core architecture, but the frontend is responsible for driving this pipeline and executing tests through it. -There are three basic phases that the frontend goes through, which are described briefly in the following. +Usually, ReFrame processes tests in three phases: +1. It :ref:`discovers and loads tests ` from the filesystem. +2. It :ref:`filters ` the loaded tests based on the current system and any other criteria specified by the user. +3. It :ref:`acts ` upon the selected tests. + +There are also ReFrame commands that do not operate on a set of tests. + + +.. _test-discovery: ------------------------------- Test discovery and test loading @@ -234,18 +242,24 @@ This happens recursively so that if test ``T1`` depends on ``T2`` and ``T2`` dep The ``NAME`` pattern is matched anywhere in the test name and not at its beginning. If you want to match at the beginning of a test name, you should prepend ``^``. ------------- -Test actions ------------- -ReFrame will finally act upon the selected tests. -There are currently two actions that can be performed on tests: (a) list the tests and (b) execute the tests. -An action must always be specified. +.. _commands: + +-------- +Commands +-------- + +ReFrame commands are mutually exclusive and one of them must always be specified. +There are commands that act upon the selected tests and others that have a helper function, such as querying the configuration, querying the results database etc. + +.. versionchanged:: 4.7 + + ReFrame commands are now mutually exclusive and only one can be specified every time. .. option:: --ci-generate=FILE - Do not run the tests, but generate a Gitlab `child pipeline `__ specification in ``FILE``. + Generate a Gitlab `child pipeline `__ specification in ``FILE`` that will run the selected tests. You can set up your Gitlab CI to use the generated file to run every test as a separate Gitlab job respecting test dependencies. For more information, have a look in :ref:`generate-ci-pipeline`. @@ -256,6 +270,12 @@ An action must always be specified. .. versionadded:: 3.4.1 +.. option:: --delete-stored-session=UUID + + Delete the stored session with the specified UUID from the results database. + + .. versionadded:: 4.7 + .. option:: --describe Print a detailed description of the `selected tests <#test-filtering>`__ in JSON format and exit. @@ -267,6 +287,33 @@ An action must always be specified. .. versionadded:: 3.10.0 +.. option:: --describe-stored-session=UUID + + Get detailed information of the session with the specified UUID. + The output is in JSON format. + + .. versionadded:: 4.7 + +.. option:: --describe-stored-testcases=SESSION_UUID|TIME_PERIOD + + Get detailed test case information of the session with the specified UUID or from the specified time period. + + If a session UUID is provided only information about the test cases of this session will be provided. + This option can be combined with :option:`--name` to restrict the listing to specific tests. + For the exact syntax of ``TIME_PERIOD`` check the :ref:`time-period-syntax` section. + + .. versionadded:: 4.7 + +.. _--detect-host-topology: + +.. option:: --detect-host-topology[=FILE] + + Detect the local host processor topology, store it to ``FILE`` and exit. + + If no ``FILE`` is specified, the standard output will be used. + + .. versionadded:: 3.7.0 + .. option:: --dry-run Dry run the selected tests. @@ -280,7 +327,6 @@ An action must always be specified. .. versionadded:: 4.1 - .. option:: -L, --list-detailed[=T|C] List selected tests providing more details for each test. @@ -297,6 +343,27 @@ An action must always be specified. The variable names to which fixtures are bound are also listed. See :ref:`test_naming_scheme` for more information. +.. _--list-stored-sessions: + +.. option:: --list-stored-sessions[=TIME_PERIOD] + + List sessions stored in the results database. + + If ``TIME_PERIOD`` is not specified or if ``all`` is passed, all stored sessions will be listed. + For the exact syntax of ``TIME_PERIOD`` check the :ref:`time-period-syntax`. + + .. versionadded:: 4.7 + +.. option:: --list-stored-testcases=SESSION_UUID|TIME_PERIOD + + List all test cases from the session with the specified UUID or from the specified time period. + + If a session UUID is provided only the test cases of this session will be listed. + This option can be combined with :option:`--name` to restrict the listing to specific tests. + For the exact syntax of ``TIME_PERIOD`` check the :ref:`time-period-syntax` section. + + .. versionadded:: 4.7 + .. option:: -l, --list[=T|C] List selected tests and their dependencies. @@ -323,15 +390,51 @@ An action must always be specified. .. versionadded:: 3.6.0 +.. option:: --performance-compare=CMPSPEC + + Compare the performance of test cases that have run in the past. + + This option can be combined with :option:`--name` to restrict the comparison to specific tests. + Check the :ref:`performance-comparisons` section for the exact syntax of ``CMPSPEC``. + + .. versionadded:: 4.7 + .. option:: -r, --run - Execute the selected tests. + Run the selected tests. + +.. option:: --show-config [PARAM] -If more than one action options are specified, the precedence order is the following: + Show the value of configuration parameter ``PARAM`` as this is defined for the currently selected system and exit. - .. code-block:: console + The parameter value is printed in JSON format. + If ``PARAM`` is not specified or if it set to ``all``, the whole configuration for the currently selected system will be shown. + Configuration parameters are formatted as a path navigating from the top-level configuration object to the actual parameter. + The ``/`` character acts as a selector of configuration object properties or an index in array objects. + The ``@`` character acts as a selector by name for configuration objects that have a ``name`` property. + Here are some example queries: - --describe > --list-detailed > --list > --list-tags > --ci-generate + - Retrieve all the partitions of the current system: + + .. code:: bash + + reframe --show-config=systems/0/partitions + + - Retrieve the job scheduler of the partition named ``default``: + + .. code:: bash + + reframe --show-config=systems/0/partitions/@default/scheduler + + - Retrieve the check search path for system ``foo``: + + .. code:: bash + + reframe --system=foo --show-config=general/0/check_search_path + +.. option:: -V, --version + + Print version and exit. ---------------------------------- @@ -971,16 +1074,6 @@ Miscellaneous options .. versionchanged:: 4.0.0 -.. _--detect-host-topology: - -.. option:: --detect-host-topology[=FILE] - - Detect the local host processor topology, store it to ``FILE`` and exit. - - If no ``FILE`` is specified, the standard output will be used. - - .. versionadded:: 3.7.0 - .. option:: --failure-stats Print failure statistics at the end of the run. @@ -995,11 +1088,21 @@ Miscellaneous options This option can also be set using the :envvar:`RFM_COLORIZE` environment variable or the :attr:`~config.general.colorize` general configuration parameter. -.. option:: --performance-report +.. _--performance-report: - Print a performance report for all the performance tests that have been run. +.. option:: --performance-report[=CMPSPEC] - The report shows the performance values retrieved for the different performance variables defined in the tests. + Print a report summarizing the performance of all performance tests that have run in the current session. + + For each test all of their performance variables are reported and optionally compared to past results based on the ``CMPSPEC`` specified. + + If not specified, the default ``CMPSPEC`` is ``19700101T0000+0000:now/last:+job_nodelist/+result``, meaning that the current performance will be compared to the last run of the same test grouped additionally by the ``job_nodelist`` and showing also the obtained result (``pass`` or ``fail``). + + For the exact syntax of ``CMPSPEC``, refer to :ref:`performance-comparisons`. + + .. versionchanged:: 4.7 + + The format of the performance report has changed and the optional ``CMPSPEC`` argument is now added. .. option:: -q, --quiet @@ -1014,35 +1117,6 @@ Miscellaneous options .. versionadded:: 3.9.3 -.. option:: --show-config [PARAM] - - Show the value of configuration parameter ``PARAM`` as this is defined for the currently selected system and exit. - - The parameter value is printed in JSON format. - If ``PARAM`` is not specified or if it set to ``all``, the whole configuration for the currently selected system will be shown. - Configuration parameters are formatted as a path navigating from the top-level configuration object to the actual parameter. - The ``/`` character acts as a selector of configuration object properties or an index in array objects. - The ``@`` character acts as a selector by name for configuration objects that have a ``name`` property. - Here are some example queries: - - - Retrieve all the partitions of the current system: - - .. code:: bash - - reframe --show-config=systems/0/partitions - - - Retrieve the job scheduler of the partition named ``default``: - - .. code:: bash - - reframe --show-config=systems/0/partitions/@default/scheduler - - - Retrieve the check search path for system ``foo``: - - .. code:: bash - - reframe --system=foo --show-config=general/0/check_search_path - .. option:: --system=NAME Load the configuration for system ``NAME``. @@ -1056,16 +1130,24 @@ Miscellaneous options This option can also be set using the :envvar:`RFM_SYSTEM` environment variable. +.. option:: --table-format=csv|plain|pretty + + Set the formatting of tabular output printed by options :option:`--performance-compare`, :option:`--performance-report` and the options controlling the stored sessions. + + The acceptable values are the following: + + - ``csv``: Generate CSV output + - ``plain``: Generate a plain table without any lines + - ``pretty``: (default) Generate a pretty table + + .. versionadded:: 4.7 + .. option:: --upgrade-config-file=OLD[:NEW] Convert the old-style configuration file ``OLD``, place it into the new file ``NEW`` and exit. If a new file is not given, a file in the system temporary directory will be created. -.. option:: -V, --version - - Print version and exit. - .. option:: -v, --verbose Increase verbosity level of output. @@ -1210,6 +1292,139 @@ Very large test names meant also very large path names which could also lead to Fixtures followed a similar naming pattern making them hard to debug. +Result storage +-------------- + +.. versionadded:: 4.7 + +ReFrame stores the results of every session that has executed at least one test into a database. +There is only one storage backend supported at the moment and this is SQLite. +The full session information as recorded in a run report file (see :option:`--report-file`) is stored in the database. +The test cases of the session are indexed by their run job completion time for quick retrieval of all the test cases that have run in a certain period of time. + +The database file is controlled by the :attr:`~config.storage.sqlite_db_file` configuration parameter and multiple ReFrame processes can access it safely simultaneously. + +There are several command-line options that allow users to query the results database, such as the :option:`--list-stored-sessions`, :option:`--list-stored-testcases`, :option:`--describe-stored-session` etc. +Other options that access the results database are the :option:`--performance-compare` and :option:`--performance-report` which compare the performance results of the same test cases in different periods of time or from different sessions. +Check the :ref:`commands` section for the complete list and details of each option related to the results database. + +Since the report file information is now kept in the results database, there is no need to keep the report files separately, although this remains the default behavior for backward compatibility. +You can disable the report generation by turning off the :attr:`~config.general.generate_file_reports` configuration parameter. +The file report of any session can be retrieved from the database with the :option:`--describe-stored-session` option. + + +.. _performance-comparisons: + +Performance comparisons +----------------------- + +.. versionadded:: 4.7 + +The :option:`--performance-compare` and :option:`--performance-report` options accept a ``CMPSPEC`` argument that specifies how to select and compare test cases. +The full syntax of ``CMPSPEC`` is the following: + +.. code-block:: console + + /// + +The ```` and ```` subspecs specify how the base and target test cases will be retrieved. +The base test cases will be compared against those from the target period. + +.. note:: + + The ```` subspec is ommitted from the ``CMPSPEC`` of the :option:`--performance-report` option as the base test cases are always the test cases from the current session. + +The test cases for comparison can either be retrieved from an existing past session or a past time period. +A past session is denoted with the ```` syntax and only the test cases of that particular session will be selected. +To view the UUIDs of all stored sessions, use the :option:`--list-stored-sessions` option. + +To retrieve results from a time period, check the :ref:`time period syntax ` below. + +The ```` subspec specifies how the performance of both the base and target cases should be grouped and aggregated. +The syntax is the following: + +.. code-block:: console + + :[+]* + +The ```` is a symbolic name for a function to aggregate the grouped test cases. +It can take one of the following values: + +- ``first``: retrieve the performance data of the first test case only +- ``last``: retrieve the performance data of the last test case only +- ``max``: retrieve the maximum of all test cases +- ``mean``: calculate the mean over all test cases +- ``median``: retrieve the median of all test cases +- ``min``: retrieve the minimum of all test cases + +The test cases are always grouped by the following attributes: + +- The test :attr:`~reframe.core.pipeline.RegressionTest.name` +- The system name +- The partition name +- The environment name +- The performance variable name (see :func:`@performance_function ` and :attr:`~reframe.core.pipeline.RegressionTest.perf_variables`) +- The performance variable unit + +The ``+`` subspec specifies additional attributes to group the test cases by. +Any loggable test attribute can be selected. + +.. note:: + + The loggable attributes of a test are the same as the ones list in the logging :attr:`~config.logging.handlers_perflog.format` option but without the ``check_`` prefix. + +Finally, the ```` subspec specifies additional test attributes to list as columns in the resulting comparison table. +The syntax is the following: + +.. code-block:: console + + [+]* + +``col`` refers to any loggable attribute of the test. +If these attributes have different values across the aggregated test cases, +the unique values will be joined using the ``|`` separator. + +Here are some examples of performance comparison specs: + +- Compare the test cases of the session ``7a70b2da-1544-4ac4-baf4-0fcddd30b672`` with the mean performance of the last 10 days: + + .. code-block:: console + + 7a70b2da-1544-4ac4-baf4-0fcddd30b672/now-10d:now/mean:/ + +- Compare the best performance of the test cases run on two specific days, group by the node list and report also the test result: + + .. code-block:: console + + 20240701:20240701+1d/20240705:20240705+1d/mean:+job_nodelist/+result + +.. _time-period-syntax: + +Time periods +------------ + +A time period needs to be specified as part of the ``CMPSPEC`` of the :option:`--performance-compare` and :option:`--performance-report` options or as an argument to options that request past results from results database. + +The general syntax of time period subspec is the following: + +.. code-block:: console + + : + +```` and ```` are timestamp denoting the start and end of the requested period. +More specifically, the syntax of each timestamp is the following: + +.. code-block:: console + + [+|-w|d|h|m] + +The ```` is an absolute timestamp in one of the following ``strptime``-compatible formats or the special value ``now``: ``%Y%m%d``, ``%Y%m%dT%H%M``, ``%Y%m%dT%H%M%S``, ``%Y%m%dT%H%M%S%z``. + +Optionally, a shift argument can be appended with ``+`` or ``-`` signs, followed by an amount of weeks (``w``), days (``d``), hours (``h``) or minutes (``m``). + +For example, the period of the last 10 days can be specified as ``now-10d:now``. +Similarly, the period of the week starting on August 5, 2024 will be specified as ``20240805:20240805+1w``. + Environment ----------- @@ -1226,21 +1441,6 @@ Here is an alphabetical list of the environment variables recognized by ReFrame. Whenever an environment variable is associated with a configuration option, its default value is omitted as it is the same. -.. envvar:: RFM_SCHED_ACCESS_IN_SUBMIT - - Pass access options in the submission command (relevant for LSF, OAR, PBS and Slurm). - - .. table:: - :align: left - - ================================== ================== - Associated command line option N/A - Associated configuration parameter :attr::attr:`~config.systems.partitions.sched_options.sched_access_in_submit` - ================================== ================== - -.. versionadded:: 4.7 - - .. envvar:: RFM_AUTODETECT_FQDN Use the fully qualified domain name as the hostname. @@ -1452,6 +1652,20 @@ Whenever an environment variable is associated with a configuration option, its .. versionadded:: 4.7 +.. envvar:: RFM_GENERATE_FILE_REPORTS + + Store session reports also in files. + + .. table:: + :align: left + + ================================== ================== + Associated command line option n/a + Associated configuration parameter :attr:`~config.general.generate_file_reports` + ================================== ================== + + .. versionadded:: 4.7 + .. envvar:: RFM_GIT_TIMEOUT Timeout value in seconds used when checking if a git repository exists. @@ -1600,6 +1814,21 @@ Whenever an environment variable is associated with a configuration option, its ================================== ================== +.. envvar:: RFM_PERF_REPORT_SPEC + + The default ``CMPSPEC`` of the :option:`--performance-report` option. + + .. table:: + :align: left + + ================================== ================== + Associated command line option :option:`--performance-report` + Associated configuration parameter :attr:`~config.general.perf_report_spec` + ================================== ================== + + .. versionadded:: 4.7 + + .. envvar:: RFM_PERFLOG_DIR Directory prefix for logging performance data. @@ -1744,6 +1973,21 @@ Whenever an environment variable is associated with a configuration option, its ================================== ================== +.. envvar:: RFM_SCHED_ACCESS_IN_SUBMIT + + Pass access options in the submission command (relevant for LSF, OAR, PBS and Slurm). + + .. table:: + :align: left + + ================================== ================== + Associated command line option N/A + Associated configuration parameter :attr::attr:`~config.systems.partitions.sched_options.sched_access_in_submit` + ================================== ================== + +.. versionadded:: 4.7 + + .. envvar:: RFM_STAGE_DIR Directory prefix for staging test resources. @@ -1757,6 +2001,21 @@ Whenever an environment variable is associated with a configuration option, its ================================== ================== +.. envvar:: RFM_SQLITE_DB_FILE + + The SQlite database file for storing test results. + + .. table:: + :align: left + + ================================== ================== + Associated command line option N/A + Associated configuration parameter :attr:`~config.storage.sqlite_db_file` + ================================== ================== + + .. versionadded:: 4.7 + + .. envvar:: RFM_SYSLOG_ADDRESS The address of the Syslog server to send performance logs. @@ -1787,6 +2046,21 @@ Whenever an environment variable is associated with a configuration option, its ================================== ================== +.. envvar:: RFM_TABLE_FORMAT + + Set the format of the tables printed by various options accessing the results storage. + + .. table:: + :align: left + + ================================== ================== + Associated command line option :option:`--table-format` + Associated configuration parameter :attr:`~config.general.table_format` + ================================== ================== + + .. versionadded:: 4.7 + + .. envvar:: RFM_TIMESTAMP_DIRS Append a timestamp to the output and stage directory prefixes. diff --git a/docs/tutorial.rst b/docs/tutorial.rst index 78b1d3a672..e2d8bd2a99 100644 --- a/docs/tutorial.rst +++ b/docs/tutorial.rst @@ -168,24 +168,28 @@ This can be suppressed by increasing the level at which this information is logg Run reports and performance logging ----------------------------------- -Once a test session finishes, ReFrame generates a detailed JSON report under ``$HOME/.reframe/reports``. -Every time ReFrame is run a new report will be generated automatically. -The latest one is always symlinked by the ``latest.json`` name, unless the :option:`--report-file` option is given. +Once a test session finishes, ReFrame stores the detailed session information in a database file located under ``$HOME/.reframe/reports``. +Past performance data can be retrieved from this database and compared with the current or another run. +We explain in detail the handling of the results database in section :ref:`inspecting-past-results`. + +By default, the session information is also saved in a JSON report file under ``$HOME/.reframe/reports``. +The latest report is always symlinked by the ``latest.json`` name, unless the :option:`--report-file` option is given. For performance tests, in particular, an additional CSV file is generated with all the relevant information. These files are located by default under ``perflogs///.log``. In our example, this translates to ``perflogs/generic/default/stream_test.log``. The information that is being logged is fully configurable and we will cover this in the :ref:`logging` section. -Finally, you can use also the :option:`--performance-report` option, which will print a summary of the results of the performance tests that have run in the current session. +Finally, you can use also the :option:`--performance-report` option, which will print a summary of the results of the performance tests that have run in the current session and compare them (by default) to their last obtained performance. .. code-block:: console - [stream_test /2e15a047 @generic:default:builtin] - num_tasks: 1 - performance: - - copy_bw: 22704.4 MB/s (r: 0 MB/s l: -inf% u: +inf%) - - triad_bw: 16040.9 MB/s (r: 0 MB/s l: -inf% u: +inf%) + ┍━━━━━━━━━━━━━┯━━━━━━━━━━━━━━━━━━━━━━━━━┯━━━━━━━━━━┯━━━━━━━━━┯━━━━━━━━━┯━━━━━━━━━┯━━━━━━━━━━━━━━━━┯━━━━━━━━━━┑ + │ name │ sysenv │ pvar │ pval │ punit │ pdiff │ job_nodelist │ result │ + ┝━━━━━━━━━━━━━┿━━━━━━━━━━━━━━━━━━━━━━━━━┿━━━━━━━━━━┿━━━━━━━━━┿━━━━━━━━━┿━━━━━━━━━┿━━━━━━━━━━━━━━━━┿━━━━━━━━━━┥ + │ stream_test │ generic:default+builtin │ copy_bw │ 40292.1 │ MB/s │ -0.04% │ myhost │ pass │ + │ stream_test │ generic:default+builtin │ triad_bw │ 30564.7 │ MB/s │ +0.12% │ myhost │ pass │ + ┕━━━━━━━━━━━━━┷━━━━━━━━━━━━━━━━━━━━━━━━━┷━━━━━━━━━━┷━━━━━━━━━┷━━━━━━━━━┷━━━━━━━━━┷━━━━━━━━━━━━━━━━┷━━━━━━━━━━┙ Inspecting the test artifacts @@ -1969,3 +1973,138 @@ The format function takes the raw log record, the extras and the keys to ignore Since we can't know the exact log record attributes, we iterate over its :attr:`__dict__` items and format the record keys as we go. Also note that we ignore all private field of the record starting with ``_``. Rerunning the previous example with ``CUSTOM_JSON=1`` will generated the modified JSON record. + + +.. _inspecting-past-results: + +Inspecting past results +======================= + +.. versionadded:: 4.7 + +For every session that has run at least one test case, ReFrame stores all its details, including the test cases, in a database. +Essentially, the stored information is the same as the one found in the :ref:`report file `. + +To list the stored sessions use the :option:`--list-stored-sessions` option: + +.. code-block:: bash + :caption: Run in the single-node container. + + reframe --list-stored-sessions + +This produces a table where the most important information about a session is listed: +its unique identifier, its start and end time and how many test cases have run: + +.. code-block:: console + + ┍━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┯━━━━━━━━━━━━━━━━━━━━━━┯━━━━━━━━━━━━━━━━━━━━━━┯━━━━━━━━━━━━┯━━━━━━━━━━━━━┑ + │ UUID │ Start time │ End time │ Num runs │ Num cases │ + ┝━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┿━━━━━━━━━━━━━━━━━━━━━━┿━━━━━━━━━━━━━━━━━━━━━━┿━━━━━━━━━━━━┿━━━━━━━━━━━━━┥ + │ fedb2cf8-6efa-43d8-a6dc-e72c868deba6 │ 20240823T104554+0000 │ 20240823T104557+0000 │ 1 │ 1 │ + │ 4253d6b3-3926-4c4c-a7e8-3f7dffe9bf23 │ 20240823T104608+0000 │ 20240823T104612+0000 │ 1 │ 1 │ + │ 453e64a2-f941-49e2-b628-bf50883a6387 │ 20240823T104721+0000 │ 20240823T104725+0000 │ 1 │ 1 │ + │ d923cca2-a72b-43ca-aca1-de741b65088b │ 20240823T104753+0000 │ 20240823T104757+0000 │ 1 │ 1 │ + │ 300b973b-84a6-4932-89eb-577a832fe357 │ 20240823T104814+0000 │ 20240823T104815+0000 │ 1 │ 2 │ + │ 1fb8488e-c361-4355-b7df-c0dcf3cdcc1e │ 20240823T104834+0000 │ 20240823T104835+0000 │ 1 │ 4 │ + │ 2a00c55d-4492-498c-89f0-7cf821f308c1 │ 20240823T104843+0000 │ 20240823T104845+0000 │ 1 │ 4 │ + │ 98fe5a68-2582-49ca-9c3c-6bfd9b877143 │ 20240823T104902+0000 │ 20240823T104903+0000 │ 1 │ 4 │ + │ 4bbc27bc-be50-4cca-9d1b-c5fb4988a5c0 │ 20240823T104922+0000 │ 20240823T104933+0000 │ 1 │ 26 │ + │ 200ea28f-6c3a-4973-a2b7-aa08408dbeec │ 20240823T104939+0000 │ 20240823T104943+0000 │ 1 │ 10 │ + │ b756755b-3181-4bb4-9eaa-cc8c3a9d7a43 │ 20240823T104955+0000 │ 20240823T104956+0000 │ 1 │ 10 │ + │ a8a99808-c22d-4b9c-83bc-164289fe6aa7 │ 20240823T105007+0000 │ 20240823T105007+0000 │ 1 │ 4 │ + │ f9b63cdc-7dda-44c5-ab85-1e9752047834 │ 20240823T105019+0000 │ 20240823T105020+0000 │ 1 │ 10 │ + │ 271fc2e7-b550-4325-b8bb-57bdf95f1d0d │ 20240823T105020+0000 │ 20240823T105020+0000 │ 1 │ 1 │ + │ 50cdb774-f231-4f61-8472-7daaa5199d57 │ 20240823T105031+0000 │ 20240823T105032+0000 │ 1 │ 5 │ + ┕━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┷━━━━━━━━━━━━━━━━━━━━━━┷━━━━━━━━━━━━━━━━━━━━━━┷━━━━━━━━━━━━┷━━━━━━━━━━━━━┙ + +You can use the :option:`--list-stored-testcases` to list the test cases of a specific session or those that have run within a certain period of time: + +.. code-block:: bash + :caption: Run in the single-node container. + + reframe --list-stored-testcases=1fb8488e-c361-4355-b7df-c0dcf3cdcc1e + +.. code-block:: console + + ┍━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┯━━━━━━━━━━━━━━━━━━━━━━━━━━━┯━━━━━━━━━━━━┯━━━━━━━━━━━━━━━━━━━━━━┯━━━━━━━━━━┯━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┑ + │ Name │ SysEnv │ Nodelist │ Completion Time │ Result │ UUID │ + ┝━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┿━━━━━━━━━━━━━━━━━━━━━━━━━━━┿━━━━━━━━━━━━┿━━━━━━━━━━━━━━━━━━━━━━┿━━━━━━━━━━┿━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┥ + │ build_stream ~tutorialsys:default+gnu │ tutorialsys:default+gnu │ │ n/a │ pass │ 1fb8488e-c361-4355-b7df-c0dcf3cdcc1e:0:0 │ + │ build_stream ~tutorialsys:default+clang │ tutorialsys:default+clang │ │ n/a │ pass │ 1fb8488e-c361-4355-b7df-c0dcf3cdcc1e:0:1 │ + │ stream_test │ tutorialsys:default+gnu │ myhost │ 20240823T104835+0000 │ pass │ 1fb8488e-c361-4355-b7df-c0dcf3cdcc1e:0:2 │ + │ stream_test │ tutorialsys:default+clang │ myhost │ 20240823T104835+0000 │ pass │ 1fb8488e-c361-4355-b7df-c0dcf3cdcc1e:0:3 │ + ┕━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┷━━━━━━━━━━━━━━━━━━━━━━━━━━━┷━━━━━━━━━━━━┷━━━━━━━━━━━━━━━━━━━━━━┷━━━━━━━━━━┷━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┙ + + +The test case UUID comprises the UUID of the session where this test case belongs to, its run index (which run inside the session) and its test case index inside the run. +A session may have multiple runs if it has retried some failed test cases (see :option:`--max-retries`) or if it has run its tests repeatedly (see :option:`--reruns` and :option:`--duration`). + +You can also list the test cases that have run in a certain period of time use the :ref:`time period ` of :option:`--list-stored-testcases`: + +.. code-block:: bash + :caption: Run in the single-node container. + + reframe --list-stored-testcases=20240823T104835+0000:now + +.. code-block:: console + + ┍━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┯━━━━━━━━━━━━━━━━━━━━━━━━━━━┯━━━━━━━━━━━━┯━━━━━━━━━━━━━━━━━━━━━━┯━━━━━━━━━━┯━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┑ + │ Name │ SysEnv │ Nodelist │ Completion Time │ Result │ UUID │ + ┝━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┿━━━━━━━━━━━━━━━━━━━━━━━━━━━┿━━━━━━━━━━━━┿━━━━━━━━━━━━━━━━━━━━━━┿━━━━━━━━━━┿━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┥ + │ stream_test │ tutorialsys:default+gnu │ myhost │ 20240823T104835+0000 │ pass │ 1fb8488e-c361-4355-b7df-c0dcf3cdcc1e:0:2 │ + │ stream_test │ tutorialsys:default+clang │ myhost │ 20240823T104835+0000 │ pass │ 1fb8488e-c361-4355-b7df-c0dcf3cdcc1e:0:3 │ + │ stream_test │ tutorialsys:default+gnu │ myhost │ 20240823T104844+0000 │ pass │ 2a00c55d-4492-498c-89f0-7cf821f308c1:0:2 │ + │ stream_test │ tutorialsys:default+clang │ myhost │ 20240823T104845+0000 │ pass │ 2a00c55d-4492-498c-89f0-7cf821f308c1:0:3 │ + │ stream_test │ tutorialsys:default+gnu │ myhost │ 20240823T104903+0000 │ pass │ 98fe5a68-2582-49ca-9c3c-6bfd9b877143:0:2 │ + │ stream_test │ tutorialsys:default+clang │ myhost │ 20240823T104903+0000 │ pass │ 98fe5a68-2582-49ca-9c3c-6bfd9b877143:0:3 │ + ... + │ T6 │ generic:default+builtin │ myhost │ 20240823T105020+0000 │ pass │ 271fc2e7-b550-4325-b8bb-57bdf95f1d0d:0:0 │ + │ T0 │ generic:default+builtin │ myhost │ 20240823T105031+0000 │ pass │ 50cdb774-f231-4f61-8472-7daaa5199d57:0:0 │ + │ T4 │ generic:default+builtin │ myhost │ 20240823T105031+0000 │ pass │ 50cdb774-f231-4f61-8472-7daaa5199d57:0:1 │ + │ T5 │ generic:default+builtin │ myhost │ 20240823T105031+0000 │ pass │ 50cdb774-f231-4f61-8472-7daaa5199d57:0:2 │ + │ T1 │ generic:default+builtin │ myhost │ 20240823T105031+0000 │ pass │ 50cdb774-f231-4f61-8472-7daaa5199d57:0:3 │ + │ T6 │ generic:default+builtin │ myhost │ 20240823T105032+0000 │ pass │ 50cdb774-f231-4f61-8472-7daaa5199d57:0:4 │ + ┕━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┷━━━━━━━━━━━━━━━━━━━━━━━━━━━┷━━━━━━━━━━━━┷━━━━━━━━━━━━━━━━━━━━━━┷━━━━━━━━━━┷━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┙ + +To get all the details of a session or a set of test cases you can use the :option:`--describe-stored-session` and :option:`--describe-stored-testcases` options which will return a JSON record with all the details. + +You can also combine the :option:`-n` option with the :option:`--list-stored-testcases` and :option:`--describe-stored-testcases` options in order to restrict the listing to specific tests only: + +.. code-block:: bash + :caption: Run in the single-node container. + + reframe --list-stored-testcases=20240823T104835+0000:now -n stream_test + + +Comparing performance of test cases +----------------------------------- + +ReFrame can be used to compare the performance of the same test cases run in different time periods using the :option:`--performance-compare` option. +The following will compare the performance of the test cases of the session ``1fb8488e-c361-4355-b7df-c0dcf3cdcc1e`` with any other same test case that has run the last 24h: + +.. code-block:: bash + :caption: Run in the single-node container. + + reframe --performance-compare=1fb8488e-c361-4355-b7df-c0dcf3cdcc1e/now-1d:now/mean:/ + +.. code-block:: console + + ┍━━━━━━━━━━━━━┯━━━━━━━━━━━━━━━━━━━━━━━━━━━┯━━━━━━━━━━┯━━━━━━━━━┯━━━━━━━━━┯━━━━━━━━━┑ + │ name │ sysenv │ pvar │ pval │ punit │ pdiff │ + ┝━━━━━━━━━━━━━┿━━━━━━━━━━━━━━━━━━━━━━━━━━━┿━━━━━━━━━━┿━━━━━━━━━┿━━━━━━━━━┿━━━━━━━━━┥ + │ stream_test │ tutorialsys:default+gnu │ copy_bw │ 44139 │ MB/s │ +11.14% │ + │ stream_test │ tutorialsys:default+gnu │ triad_bw │ 39344.7 │ MB/s │ +20.77% │ + │ stream_test │ tutorialsys:default+clang │ copy_bw │ 44979.1 │ MB/s │ +10.81% │ + │ stream_test │ tutorialsys:default+clang │ triad_bw │ 39330.8 │ MB/s │ +8.28% │ + ┕━━━━━━━━━━━━━┷━━━━━━━━━━━━━━━━━━━━━━━━━━━┷━━━━━━━━━━┷━━━━━━━━━┷━━━━━━━━━┷━━━━━━━━━┙ + +The :option:`-n` option can also be combined with :option:`--performance-compare` to restrict the test cases listed. +Similarly to the :option:`--performance-compare` option, the :option:`--performance-report` option can compare the performance of the current run with any arbitrary past session or past time period. + +Finally, you can delete completely a stored session using the :option:`--delete-stored-session` option: + +.. code-block:: bash + + reframe --delete-stored-session=1fb8488e-c361-4355-b7df-c0dcf3cdcc1e + +Deleting a session will also delete all its test cases from the database. diff --git a/examples/tutorial/dockerfiles/singlenode.Dockerfile b/examples/tutorial/dockerfiles/singlenode.Dockerfile index 647f26d231..2c8fed9c78 100644 --- a/examples/tutorial/dockerfiles/singlenode.Dockerfile +++ b/examples/tutorial/dockerfiles/singlenode.Dockerfile @@ -10,8 +10,16 @@ RUN apt-get -y update && \ ARG REFRAME_TAG=develop ARG REFRAME_REPO=reframe-hpc WORKDIR /usr/local/share -RUN git clone --depth 1 --branch $REFRAME_TAG https://github.com/$REFRAME_REPO/reframe.git && \ - cd reframe/ && ./bootstrap.sh + +# Clone reframe +# RUN git clone --depth 1 --branch $REFRAME_TAG https://github.com/$REFRAME_REPO/reframe.git && \ +# cd reframe/ && ./bootstrap.sh + +# Comment the above line and uncomment the following two for development + +COPY . /usr/local/share/reframe +RUN cd reframe && ./bootstrap.sh + ENV PATH=/usr/local/share/reframe/bin:$PATH # Install stream diff --git a/reframe/core/exceptions.py b/reframe/core/exceptions.py index af9acc5751..d63403b587 100644 --- a/reframe/core/exceptions.py +++ b/reframe/core/exceptions.py @@ -388,3 +388,17 @@ def what(exc_type, exc_value, tb): reason += f': {exc_value}' return reason + + +class reraise_as: + def __init__(self, new_exc, exceptions=(Exception,), message=''): + self.__new_exc = new_exc + self.__exceptions = exceptions + self.__message = message + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if isinstance(exc_val, self.__exceptions): + raise self.__new_exc(self.__message) from exc_val diff --git a/reframe/core/logging.py b/reframe/core/logging.py index d34006fed1..d91b067fbe 100644 --- a/reframe/core/logging.py +++ b/reframe/core/logging.py @@ -433,7 +433,7 @@ def _create_file_handler(site_config, config_prefix): def _create_filelog_handler(site_config, config_prefix): basedir = os.path.abspath(os.path.join( - site_config.get(f'systems/0/prefix'), + site_config.get('systems/0/prefix'), osext.expandvars(site_config.get(f'{config_prefix}/basedir')) )) prefix = osext.expandvars(site_config.get(f'{config_prefix}/prefix')) @@ -581,7 +581,7 @@ def _create_httpjson_handler(site_config, config_prefix): def _record_to_json(record, extras, ignore_keys): def _can_send(key): - return not key.startswith('_') and not key in ignore_keys + return not key.startswith('_') and key not in ignore_keys def _sanitize(s): return re.sub(r'\W', '_', s) @@ -860,9 +860,10 @@ def log_performance(self, level, task, msg=None, multiline=False): if self.check is None or not self.check.is_performance_check(): return - self.extra['check_partition'] = task.testcase.partition.name - self.extra['check_environ'] = task.testcase.environ.name - self.extra['check_result'] = 'pass' if task.succeeded else 'fail' + _, part, env = task.testcase + self.extra['check_partition'] = part.name + self.extra['check_environ'] = env.name + self.extra['check_result'] = task.result fail_reason = what(*task.exc_info) if not task.succeeded else None self.extra['check_fail_reason'] = fail_reason if msg is None: diff --git a/reframe/core/pipeline.py b/reframe/core/pipeline.py index 462b103b4b..f928a406f5 100644 --- a/reframe/core/pipeline.py +++ b/reframe/core/pipeline.py @@ -46,7 +46,8 @@ class _NoRuntime(ContainerPlatform): - '''Proxy container runtime for storing container platform info early enough. + '''Proxy container runtime for storing container platform info early + enough. This will be replaced by the framework with a concrete implementation based on the current partition info. @@ -847,8 +848,8 @@ def pipeline_hooks(cls): #: .. deprecated:: 4.0.0 #: Please use :attr:`env_vars` instead. variables = deprecate(variable(alias=env_vars), - f"the use of 'variables' is deprecated; " - f"please use 'env_vars' instead") + "the use of 'variables' is deprecated; " + "please use 'env_vars' instead") #: Time limit for this test. #: @@ -1517,8 +1518,7 @@ def _job_exitcode(self): @loggable_as('job_nodelist') @property def _job_nodelist(self): - if self.job: - return self.job.nodelist + return self.job.nodelist if self.job else [] def info(self): '''Provide live information for this test. @@ -1705,7 +1705,7 @@ def _setup_build_job(self, **job_opts): ) def _setup_run_job(self, **job_opts): - self._job = self._create_job(f'run', self.local, **job_opts) + self._job = self._create_job('run', self.local, **job_opts) def _setup_container_platform(self): try: @@ -2217,7 +2217,7 @@ def check_performance(self): if perf_patterns is not None and self.perf_variables: raise ReframeSyntaxError( - f"you cannot mix 'perf_patterns' and 'perf_variables' syntax" + "you cannot mix 'perf_patterns' and 'perf_variables' syntax" ) # Convert `perf_patterns` to `perf_variables` @@ -2365,7 +2365,7 @@ def cleanup(self, remove_files=False): aliased = os.path.samefile(self._stagedir, self._outputdir) if aliased: self.logger.debug( - f'outputdir and stagedir are the same; copying skipped' + 'outputdir and stagedir are the same; copying skipped' ) else: self._copy_to_outputdir() diff --git a/reframe/core/runtime.py b/reframe/core/runtime.py index 784ba7c750..24d754b991 100644 --- a/reframe/core/runtime.py +++ b/reframe/core/runtime.py @@ -27,11 +27,12 @@ class RuntimeContext: .. versionadded:: 2.13 ''' - def __init__(self, site_config): + def __init__(self, site_config, *, use_timestamps=False): self._site_config = site_config self._system = System.create(site_config) self._current_run = 0 self._timestamp = time.localtime() + self._use_timestamps = use_timestamps def _makedir(self, *dirs, wipeout=False): ret = os.path.join(*dirs) @@ -110,7 +111,11 @@ def perflogdir(self): @property def timestamp(self): - timefmt = self.site_config.get('general/0/timestamp_dirs') + if self._use_timestamps: + timefmt = self.site_config.get('general/0/timestamp_dirs') + else: + timefmt = '' + return time.strftime(timefmt, self._timestamp) @property @@ -192,11 +197,11 @@ def get_default(self, option): _runtime_context = None -def init_runtime(site_config): +def init_runtime(site_config, **kwargs): global _runtime_context if _runtime_context is None: - _runtime_context = RuntimeContext(site_config) + _runtime_context = RuntimeContext(site_config, **kwargs) def runtime(): diff --git a/reframe/core/schedulers/__init__.py b/reframe/core/schedulers/__init__.py index 0266e43235..a8565a99bc 100644 --- a/reframe/core/schedulers/__init__.py +++ b/reframe/core/schedulers/__init__.py @@ -180,8 +180,6 @@ def filter_nodes_by_state(nodelist, state): } return nodelist - nodes[part.fullname] = [n.name for n in nodelist] - class Job(jsonext.JSONSerializable, metaclass=JobMeta): @@ -377,7 +375,7 @@ def __init__(self, self._jobid = None self._exitcode = None self._state = None - self._nodelist = None + self._nodelist = [] self._submit_time = None self._completion_time = None @@ -515,7 +513,7 @@ def nodelist(self): This attribute is supported by the ``local``, ``pbs``, ``slurm``, ``squeue``, ``ssh``, and ``torque`` scheduler backends. - This attribute is :class:`None` if no nodes are assigned to the job + This attribute is an empty list if no nodes are assigned to the job yet. The ``squeue`` scheduler backend, i.e., Slurm *without* accounting, @@ -531,7 +529,10 @@ def nodelist(self): .. versionadded:: 2.17 - :type: :class:`List[str]` or :class:`None` + .. versionchanged:: 4.7 + Default value is the empty list. + + :type: :class:`List[str]` ''' return self._nodelist @@ -554,7 +555,7 @@ def prepare(self, commands, environs=None, prepare_cmds=None, strict_flex=False, **gen_opts): environs = environs or [] if self.num_tasks is not None and self.num_tasks <= 0: - getlogger().debug(f'[F] Flexible node allocation requested') + getlogger().debug('[F] Flexible node allocation requested') num_tasks_per_node = self.num_tasks_per_node or 1 min_num_tasks = (-self.num_tasks if self.num_tasks else num_tasks_per_node) @@ -635,7 +636,7 @@ def finished(self): return done def __eq__(self, other): - return type(self) == type(other) and self.jobid == other.jobid + return type(self) is type(other) and self.jobid == other.jobid def __hash__(self): return hash(self.jobid) diff --git a/reframe/core/schedulers/local.py b/reframe/core/schedulers/local.py index 5e41491f0f..87eead7530 100644 --- a/reframe/core/schedulers/local.py +++ b/reframe/core/schedulers/local.py @@ -70,7 +70,12 @@ def submit(self, job): # Update job info job._jobid = proc.pid - job._nodelist = [socket.gethostname()] + hostname = socket.gethostname() + if self.get_option('unqualified_hostnames'): + job._nodelist = [hostname.split('.')[0]] + else: + job._nodelist = [hostname] + job._proc = proc job._f_stdout = f_stdout job._f_stderr = f_stderr diff --git a/reframe/core/schedulers/pbs.py b/reframe/core/schedulers/pbs.py index a15bea5df9..86dbb6063d 100644 --- a/reframe/core/schedulers/pbs.py +++ b/reframe/core/schedulers/pbs.py @@ -185,7 +185,7 @@ def finished(self, job): return job.completed def _update_nodelist(self, job, nodespec): - if job.nodelist is not None: + if job.nodelist: return job._nodelist = [x.split('/')[0] for x in nodespec.split('+')] diff --git a/reframe/frontend/argparse.py b/reframe/frontend/argparse.py index e6c37b7484..ceec757b1a 100644 --- a/reframe/frontend/argparse.py +++ b/reframe/frontend/argparse.py @@ -40,6 +40,24 @@ # that essentially associate environment variables with configuration # arguments, without having to define a corresponding command line option. +class _Undefined: + pass + + +# We use a special value for denoting const values that are to be set from the +# configuration default. This placeholder must be used as the `const` argument +# for options with `nargs='?'`. The underlying `ArgumentParser` will use the +# `const` value as if it were supplied from the command-line thus fooling our +# machinery of environment variables and configuration options overriding any +# defaults. For this reason, we use a unique placeholder so that we can +# distinguish whether this value is a default or actually supplied from the +# command-line. +CONST_DEFAULT = _Undefined() + + +def _undefined(val): + return val is None or val is CONST_DEFAULT + class _Namespace: def __init__(self, namespace, option_map): @@ -76,7 +94,10 @@ def __getattr__(self, name): return ret envvar, _, action, arg_type, default = self.__option_map[name] - if ret is None and envvar is not None: + if ret is CONST_DEFAULT: + default = CONST_DEFAULT + + if _undefined(ret) and envvar is not None: # Try the environment variable envvar, *delim = envvar.split(maxsplit=2) delim = delim[0] if delim else ',' @@ -120,14 +141,14 @@ def update_config(self, site_config): errors.append(e) continue - if value is not None: + if not _undefined(value): site_config.add_sticky_option(confvar, value) return errors def __repr__(self): return (f'{type(self).__name__}({self.__namespace!r}, ' - '{self.__option_map})') + f'{self.__option_map})') class _ArgumentHolder: @@ -149,6 +170,12 @@ def __getattr__(self, name): return getattr(self._holder, name) + def __setattr__(self, name, value): + if name.startswith('_'): + super().__setattr__(name, value) + else: + setattr(self._holder, name, value) + def add_argument(self, *flags, **kwargs): try: opt_name = kwargs['dest'] @@ -217,7 +244,7 @@ class ArgumentParser(_ArgumentHolder): '''Reframe's extended argument parser. This argument parser behaves almost identical to the original - `argparse.ArgumenParser`. In fact, it uses such a parser internally, + `argparse.ArgumentParser`. In fact, it uses such a parser internally, delegating all the calls to it. The key difference is how newly parsed options are combined with existing namespaces in `parse_args()`.''' @@ -233,6 +260,14 @@ def add_argument_group(self, *args, **kwargs): self._groups.append(group) return group + def add_mutually_exclusive_group(self, *args, **kwargs): + group = _ArgumentGroup( + self._holder.add_mutually_exclusive_group(*args, **kwargs), + self._option_map + ) + self._groups.append(group) + return group + def _resolve_attr(self, attr, namespaces): for ns in namespaces: if ns is None: @@ -248,7 +283,7 @@ def _update_defaults(self): for g in self._groups: self._defaults.__dict__.update(g._defaults.__dict__) - def parse_args(self, args=None, namespace=None): + def parse_args(self, args=None, namespace=None, suppress_required=False): '''Convert argument strings to objects and return them as attributes of a namespace. @@ -260,7 +295,30 @@ def parse_args(self, args=None, namespace=None): for it will be looked up first in `namespace` and if not found there, it will be assigned the default value as specified in its corresponding `add_argument()` call. If no default value was specified either, the - attribute will be set to `None`.''' + attribute will be set to `None`. + + If `suppress_required` is true, required mutually-exclusive groups will + be treated as optional for this parsing operation. + ''' + + class suppress_required_groups: + '''Temporarily suppress required groups if `suppress_required` + is true.''' + def __init__(this): + this._changed_grp = [] + + def __enter__(this): + if suppress_required: + for grp in self._groups: + if hasattr(grp, 'required') and grp.required: + this._changed_grp.append(grp) + grp.required = False + + return this + + def __exit__(this, *args, **kwargs): + for grp in this._changed_grp: + grp.required = True # Enable auto-completion argcomplete.autocomplete(self._holder) @@ -270,7 +328,8 @@ def parse_args(self, args=None, namespace=None): # newly parsed options to completely override any options defined in # namespace. The implementation of `argparse.ArgumentParser` does not # do this in options with an 'append' action. - options = self._holder.parse_args(args, None) + with suppress_required_groups(): + options = self._holder.parse_args(args, None) # Check if namespace refers to our namespace and take the cmd options # namespace suitable for ArgumentParser diff --git a/reframe/frontend/cli.py b/reframe/frontend/cli.py index 2fe96a3eb6..e4e3bd1f27 100644 --- a/reframe/frontend/cli.py +++ b/reframe/frontend/cli.py @@ -9,7 +9,6 @@ import os import random import shlex -import socket import sys import time import traceback @@ -23,12 +22,11 @@ import reframe.frontend.ci as ci import reframe.frontend.dependencies as dependencies import reframe.frontend.filters as filters -import reframe.frontend.runreport as runreport +import reframe.frontend.reporting as reporting import reframe.utility as util import reframe.utility.jsonext as jsonext import reframe.utility.osext as osext import reframe.utility.typecheck as typ - from reframe.frontend.testgenerators import (distribute_tests, getallnodes, repeat_tests, parameterize_tests) @@ -89,7 +87,8 @@ def dep_lines(u, *, prefix, depth=0, lines=None, printed=None, else: fmt_fixt_vars = '' - name_info = f'{u.check.display_name}{fmt_fixt_vars} /{u.check.hashcode}' + name_info = (f'{u.check.display_name}{fmt_fixt_vars} ' + f'/{u.check.hashcode}') tc_info = '' details = '' if concretized: @@ -97,7 +96,8 @@ def dep_lines(u, *, prefix, depth=0, lines=None, printed=None, location = inspect.getfile(type(u.check)) if detailed: - details = f' [variant: {u.check.variant_num}, file: {location!r}]' + details = (f' [variant: {u.check.variant_num}, ' + f'file: {location!r}]') lines.append(f'{prefix}^{name_info}{tc_info}{details}') @@ -207,10 +207,35 @@ def calc_verbosity(site_config, quiesce): return curr_verbosity - quiesce +class exit_gracefully_on_error: + def __init__(self, message, logger=None, exceptions=None, exitcode=1): + self.__message = message + self.__logger = logger or PrettyPrinter() + self.__exceptions = exceptions or (Exception,) + self.__exitcode = exitcode + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + logging.getprofiler().print_report(self.__logger.debug) + if exc_type is SystemExit: + # Allow users to exit inside the context manager + return + + if isinstance(exc_val, self.__exceptions): + self.__logger.error(f'{self.__message}: {exc_val}') + self.__logger.verbose( + ''.join(traceback.format_exception(exc_type, exc_val, exc_tb)) + ) + sys.exit(self.__exitcode) + + @logging.time_function_noexit def main(): # Setup command line options argparser = argparse.ArgumentParser() + action_options = argparser.add_mutually_exclusive_group(required=True) output_options = argparser.add_argument_group( 'Options controlling ReFrame output' ) @@ -220,9 +245,6 @@ def main(): select_options = argparser.add_argument_group( 'Options for selecting checks' ) - action_options = argparser.add_argument_group( - 'Options controlling actions' - ) run_options = argparser.add_argument_group( 'Options controlling the execution of checks' ) @@ -232,6 +254,9 @@ def main(): testgen_options = argparser.add_argument_group( 'Options for generating tests dynamically' ) + reporting_options = argparser.add_argument_group( + 'Options related to results reporting' + ) misc_options = argparser.add_argument_group('Miscellaneous options') # Output directory options @@ -290,10 +315,10 @@ def main(): envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files' ) output_options.add_argument( - '--timestamp', action='store', nargs='?', const='%y%m%dT%H%M%S%z', - metavar='TIMEFMT', + '--timestamp', action='store', nargs='?', metavar='TIMEFMT', + const=argparse.CONST_DEFAULT, help=('Append a timestamp to the output and stage directory prefixes ' - '(default: "%%FT%%T")'), + '(default: "%%Y%%m%%dT%%H%%M%%S%%z")'), envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs' ) @@ -365,22 +390,52 @@ def main(): help='Select checks that satisfy the expression EXPR' ) - # Action options action_options.add_argument( '--ci-generate', action='store', metavar='FILE', help=('Generate into FILE a Gitlab CI pipeline ' 'for the selected tests and exit'), ) - + action_options.add_argument( + '--delete-stored-session', action='store', metavar='UUID', + help='Delete stored session' + ) action_options.add_argument( '--describe', action='store_true', help='Give full details on the selected tests' ) + action_options.add_argument( + '--describe-stored-session', action='store', metavar='UUID', + help='Get detailed session information in JSON' + ) + action_options.add_argument( + '--describe-stored-testcases', action='store', + metavar='SESSION_UUID|PERIOD', + help='Get detailed test case information in JSON' + ) + action_options.add_argument( + '--detect-host-topology', metavar='FILE', action='store', + nargs='?', const='-', + help=('Detect the local host topology and exit, ' + 'optionally saving it in FILE') + ) + action_options.add_argument( + '--dry-run', action='store_true', + help='Dry run the tests without submitting them for execution' + ) action_options.add_argument( '-L', '--list-detailed', nargs='?', const='T', choices=['C', 'T'], help=('List the selected tests (T) or the concretized test cases (C) ' 'providing more details') ) + action_options.add_argument( + '--list-stored-sessions', nargs='?', action='store', const='all', + metavar='PERIOD', help='List stored sessions' + ) + action_options.add_argument( + '--list-stored-testcases', action='store', + metavar='SESSION_UUID|PERIOD', + help='List stored testcases by session or time period' + ) action_options.add_argument( '-l', '--list', nargs='?', const='T', choices=['C', 'T'], help='List the selected tests (T) or the concretized test cases (C)' @@ -389,13 +444,21 @@ def main(): '--list-tags', action='store_true', help='List the unique tags found in the selected tests and exit' ) + action_options.add_argument( + '--performance-compare', metavar='CMPSPEC', action='store', + help='Compare past performance results' + ) action_options.add_argument( '-r', '--run', action='store_true', help='Run the selected checks' ) action_options.add_argument( - '--dry-run', action='store_true', - help='Dry run the tests without submitting them for execution' + '--show-config', action='store', nargs='?', const='all', + metavar='PARAM', + help='Print the value of configuration parameter PARAM and exit' + ) + action_options.add_argument( + '-V', '--version', action='version', version=osext.reframe_version() ) # Run options @@ -534,6 +597,16 @@ def main(): help='Repeat selected tests N times' ) + # Reporting options + reporting_options.add_argument( + '--performance-report', action='store', nargs='?', + const=argparse.CONST_DEFAULT, + configvar='general/perf_report_spec', + envvar='RFM_PERF_REPORT_SPEC', + help=('Print a report for performance tests ' + '(default: "19700101T0000+0000:now/last:+job_nodelist/+result")') + ) + # Miscellaneous options misc_options.add_argument( '-C', '--config-file', action='append', metavar='FILE', @@ -541,12 +614,6 @@ def main(): help='Set configuration file', envvar='RFM_CONFIG_FILES :' ) - misc_options.add_argument( - '--detect-host-topology', metavar='FILE', action='store', - nargs='?', const='-', - help=('Detect the local host topology and exit, ' - 'optionally saving it in FILE') - ) misc_options.add_argument( '--failure-stats', action='store_true', help='Print failure statistics' ) @@ -555,21 +622,14 @@ def main(): help='Disable coloring of output', envvar='RFM_COLORIZE', configvar='general/colorize' ) - misc_options.add_argument( - '--performance-report', action='store_true', - help='Print a report for performance tests' - ) - misc_options.add_argument( - '--show-config', action='store', nargs='?', const='all', - metavar='PARAM', - help='Print the value of configuration parameter PARAM and exit' - ) misc_options.add_argument( '--system', action='store', help='Load configuration for SYSTEM', envvar='RFM_SYSTEM' ) misc_options.add_argument( - '-V', '--version', action='version', version=osext.reframe_version() + '--table-format', choices=['csv', 'plain', 'pretty'], + help='Table formatting', + envvar='RFM_TABLE_FORMAT', configvar='general/table_format' ) misc_options.add_argument( '-v', '--verbose', action='count', @@ -623,6 +683,13 @@ def main(): action='append', help='Directories where ReFrame will look for base configuration' ) + argparser.add_argument( + dest='generate_file_reports', + envvar='RFM_GENERATE_FILE_REPORTS', + configvar='general/generate_file_reports', + action='store_true', + help='Save session report in files' + ) argparser.add_argument( dest='git_timeout', envvar='RFM_GIT_TIMEOUT', @@ -690,6 +757,12 @@ def main(): action='store_true', help='Resolve module conflicts automatically' ) + argparser.add_argument( + dest='sqlite_db_file', + envvar='RFM_SQLITE_DB_FILE', + configvar='storage/sqlite_db_file', + help='DB file where the results database resides' + ) argparser.add_argument( dest='syslog_address', envvar='RFM_SYSLOG_ADDRESS', @@ -724,7 +797,10 @@ def restrict_logging(): ''' if (options.show_config or - options.detect_host_topology or options.describe): + options.detect_host_topology or + options.describe or + options.describe_stored_session or + options.describe_stored_testcases): logging.getlogger().setLevel(logging.ERROR) return True else: @@ -821,7 +897,8 @@ def restrict_logging(): itertools.chain.from_iterable(shlex.split(m) for m in mode_args)) # Parse the mode's options and reparse the command-line - options = argparser.parse_args(mode_args) + options = argparser.parse_args(mode_args, + suppress_required=True) options = argparser.parse_args(namespace=options.cmd_options) options.update_config(site_config) @@ -841,7 +918,8 @@ def restrict_logging(): try: printer.debug('Initializing runtime') - runtime.init_runtime(site_config) + runtime.init_runtime(site_config, + use_timestamps=options.timestamp is not None) except errors.ConfigError as e: printer.error(f'failed to initialize runtime: {e}') printer.info(logfiles_message()) @@ -870,6 +948,66 @@ def restrict_logging(): printer.info(logfiles_message()) sys.exit(1) + if options.list_stored_sessions: + with exit_gracefully_on_error('failed to retrieve session data', + printer): + time_period = options.list_stored_sessions + if time_period == 'all': + time_period = None + + printer.table(reporting.session_data(time_period)) + sys.exit(0) + + if options.list_stored_testcases: + namepatt = '|'.join(options.names) + with exit_gracefully_on_error('failed to retrieve test case data', + printer): + printer.table(reporting.testcase_data( + options.list_stored_testcases, namepatt + )) + sys.exit(0) + + if options.describe_stored_session: + # Restore logging level + printer.setLevel(logging.INFO) + with exit_gracefully_on_error('failed to retrieve session data', + printer): + printer.info(jsonext.dumps(reporting.session_info( + options.describe_stored_session + ), indent=2)) + sys.exit(0) + + if options.describe_stored_testcases: + # Restore logging level + printer.setLevel(logging.INFO) + namepatt = '|'.join(options.names) + with exit_gracefully_on_error('failed to retrieve test case data', + printer): + printer.info(jsonext.dumps(reporting.testcase_info( + options.describe_stored_testcases, namepatt + ), indent=2)) + sys.exit(0) + + if options.delete_stored_session: + session_uuid = options.delete_stored_session + with exit_gracefully_on_error('failed to delete session', printer): + reporting.delete_session(session_uuid) + printer.info(f'Session {session_uuid} deleted successfully.') + sys.exit(0) + + if options.performance_compare: + namepatt = '|'.join(options.names) + try: + printer.table( + reporting.performance_compare(options.performance_compare, + namepatt=namepatt) + ) + except errors.ReframeError as err: + printer.error(f'failed to generate performance report: {err}') + sys.exit(1) + else: + sys.exit(0) + # Show configuration after everything is set up if options.show_config: # Restore logging level @@ -906,8 +1044,8 @@ def restrict_logging(): with open(topofile, 'w') as fp: json.dump(s_cpuinfo, fp, indent=2) fp.write('\n') - except OSError as e: - getlogger().error( + except OSError: + logging.getlogger().error( f'could not write topology file: {topofile!r}' ) sys.exit(1) @@ -923,13 +1061,18 @@ def restrict_logging(): if options.restore_session: filenames = options.restore_session.split(',') else: - filenames = [runreport.next_report_filename( - osext.expandvars(site_config.get('general/0/report_file')), - new=False - )] + filenames = [ + osext.expandvars(site_config.get('general/0/report_file')) + ] - report = runreport.load_report(*filenames) - check_search_path = list(report.slice('filename', unique=True)) + try: + restored_session = reporting.restore_session(*filenames) + except errors.ReframeError as err: + printer.error(f'failed to load restore session: {err}') + sys.exit(1) + + check_search_path = list(restored_session.slice('filename', + unique=True)) check_search_recursive = False # If `-c` or `-R` are passed explicitly outside the configuration @@ -940,9 +1083,7 @@ def restrict_logging(): 'search path set explicitly in the command-line or ' 'the environment' ) - check_search_path = site_config.get( - 'general/0/check_search_path' - ) + check_search_path = site_config.get('general/0/check_search_path') if site_config.is_sticky_option('general/check_search_recursive'): printer.warning( @@ -984,26 +1125,26 @@ def print_infoline(param, value): param = param + ':' printer.info(f" {param.ljust(18)} {value}") - session_info = { + report = reporting.RunReport() + report.update_session_info({ 'cmdline': ' '.join(shlex.quote(arg) for arg in sys.argv), 'config_files': rt.site_config.sources, - 'data_version': runreport.DATA_VERSION, - 'hostname': socket.gethostname(), 'log_files': logging.log_files(), 'prefix_output': rt.output_prefix, 'prefix_stage': rt.stage_prefix, 'user': osext.osuser(), 'version': osext.reframe_version(), 'workdir': os.getcwd(), - } + }) # Print command line - printer.info(f"[ReFrame Setup]") + session_info = report['session_info'] + printer.info('[ReFrame Setup]') print_infoline('version', session_info['version']) print_infoline('command', repr(session_info['cmdline'])) print_infoline( - f"launched by", - f"{session_info['user'] or ''}@{session_info['hostname']}" + 'launched by', + f'{session_info["user"] or ""}@{session_info["hostname"]}' ) print_infoline('working directory', repr(session_info['workdir'])) print_infoline( @@ -1017,6 +1158,10 @@ def print_infoline(param, value): print_infoline('output directory', repr(session_info['prefix_output'])) print_infoline('log files', ', '.join(repr(s) for s in session_info['log_files'])) + print_infoline( + 'results database', + repr(osext.expandvars(rt.get_option('storage/0/sqlite_db_file'))) + ) printer.info('') try: logging.getprofiler().enter_region('test processing') @@ -1120,16 +1265,15 @@ def print_infoline(param, value): sys.exit(1) def _case_failed(t): - rec = report.case(*t) + rec = restored_session.case(t) if not rec: return False - return (rec['result'] == 'failure' or - rec['result'] == 'aborted') + return rec['result'] == 'fail' or rec['result'] == 'abort' testcases = list(filter(_case_failed, testcases)) printer.verbose( - f'Filtering successful test case(s): ' + f'Filtering out successful test case(s): ' f'{len(testcases)} remaining' ) @@ -1220,7 +1364,9 @@ def _sort_testcases(testcases): printer.debug('Pruned test DAG') printer.debug(dependencies.format_deps(testgraph)) if options.restore_session is not None: - testgraph, restored_cases = report.restore_dangling(testgraph) + testgraph, restored_cases = restored_session.restore_dangling( + testgraph + ) testcases = dependencies.toposort( testgraph, @@ -1277,16 +1423,6 @@ def _sort_testcases(testcases): ) sys.exit(0) - if not options.run and not options.dry_run: - printer.error("No action option specified. Available options:\n" - " - `-l'/`-L' for listing\n" - " - `-r' for running\n" - " - `--dry-run' for dry running\n" - " - `--list-tags' for listing unique test tags\n" - " - `--ci-generate' for generating a CI pipeline\n" - f"Try `{argparser.prog} -h' for more options.") - sys.exit(1) - # Manipulate ReFrame's environment if site_config.get('general/0/purge_environment'): rt.modules_system.unload_all() @@ -1296,7 +1432,7 @@ def _sort_testcases(testcases): # Load the environment for the current system try: - printer.debug(f'Loading environment for current system') + printer.debug('Loading environment for current system') runtime.loadenv(rt.system.preload_environ) except errors.EnvironError as e: printer.error("failed to load current system's environment; " @@ -1308,14 +1444,14 @@ def module_use(*paths): try: rt.modules_system.searchpath_add(*paths) except errors.EnvironError as e: - printer.warning(f'could not add module paths correctly') + printer.warning('could not add module paths correctly') printer.debug(str(e)) def module_unuse(*paths): try: rt.modules_system.searchpath_remove(*paths) except errors.EnvironError as e: - printer.warning(f'could not remove module paths correctly') + printer.warning('could not remove module paths correctly') printer.debug(str(e)) printer.debug('(Un)using module paths from command line') @@ -1393,54 +1529,64 @@ def module_unuse(*paths): exec_policy.sched_options = parsed_job_options if options.maxfail < 0: raise errors.CommandLineError( - f'--maxfail should be a non-negative integer: ' + '--maxfail should be a non-negative integer: ' f'{options.maxfail}' ) if options.reruns and options.duration: raise errors.CommandLineError( - f"'--reruns' option cannot be combined with '--duration'" + "'--reruns' option cannot be combined with '--duration'" ) if options.reruns < 0: raise errors.CommandLineError( - f"'--reruns' should be a non-negative integer: {options.reruns}" + "'--reruns' should be a non-negative integer: " + f"{options.reruns}" ) runner = Runner(exec_policy, printer, options.max_retries, options.maxfail, options.reruns, options.duration) try: time_start = time.time() - session_info['time_start'] = time.strftime( - '%FT%T%z', time.localtime(time_start), - ) runner.runall(testcases, restored_cases) finally: + # Build final JSON report time_end = time.time() - session_info['time_end'] = time.strftime( - '%FT%T%z', time.localtime(time_end) - ) - session_info['time_elapsed'] = time_end - time_start + report.update_timestamps(time_start, time_end) + report.update_run_stats(runner.stats) + if options.restore_session is not None: + report.update_restored_cases(restored_cases, restored_session) # Print a retry report if we did any retries if options.max_retries and runner.stats.failed(run=0): - printer.info(runner.stats.retry_report()) + printer.retry_report(report) # Print a failure report if we had failures in the last run success = True if runner.stats.failed(): success = False - runner.stats.print_failure_report( - printer, not options.distribute, - options.duration or options.reruns + printer.failure_report( + report, + rerun_info=not options.distribute, + global_stats=options.duration or options.reruns ) if options.failure_stats: - runner.stats.print_failure_stats( - printer, options.duration or options.reruns + printer.failure_stats( + report, global_stats=options.duration or options.reruns ) - if options.performance_report and not options.dry_run: - printer.info(runner.stats.performance_report()) + if (options.performance_report and + not options.dry_run and not report.is_empty()): + try: + data = reporting.performance_compare( + rt.get_option('general/0/perf_report_spec'), report + ) + except errors.ReframeError as err: + printer.warning( + f'failed to generate performance report: {err}' + ) + else: + printer.performance_report(data) # Generate the report for this session report_file = os.path.normpath( @@ -1450,44 +1596,47 @@ def module_unuse(*paths): if basedir: os.makedirs(basedir, exist_ok=True) - # Build final JSON report - run_stats = runner.stats.json() - session_info.update({ - 'num_cases': run_stats[0]['num_cases'], - 'num_failures': run_stats[-1]['num_failures'] - }) - json_report = { - 'session_info': session_info, - 'runs': run_stats, - 'restored_cases': [] - } - if options.restore_session is not None: - for c in restored_cases: - json_report['restored_cases'].append(report.case(*c)) + if (rt.get_option('general/0/generate_file_reports') and + not report.is_empty()): + # Save the report file + try: + default_loc = os.path.dirname( + osext.expandvars(rt.get_default('general/report_file')) + ) + report.save( + report_file, + compress=rt.get_option('general/0/compress_report'), + link_to_last=(default_loc == + os.path.dirname(report_file)) + ) + except OSError as e: + printer.warning( + f'failed to generate report in {report_file!r}: {e}' + ) + except errors.ReframeError as e: + printer.warning( + f'failed to create symlink to latest report: {e}' + ) - report_file = runreport.next_report_filename(report_file) - default_loc = os.path.dirname( - osext.expandvars(rt.get_default('general/report_file')) - ) - try: - runreport.write_report(json_report, report_file, - rt.get_option( - 'general/0/compress_report'), - os.path.dirname(report_file) == default_loc) - except OSError as e: - printer.warning( - f'failed to generate report in {report_file!r}: {e}' - ) + # Store the generated report for analytics + if not report.is_empty() and not options.dry_run: + try: + sess_uuid = report.store() + except Exception as e: + printer.warning( + f'failed to store results in the database: {e}' + ) + else: + printer.info('Current session stored with UUID: ' + f'{sess_uuid}') # Generate the junit xml report for this session junit_report_file = rt.get_option('general/0/report_junit') - if junit_report_file: + if junit_report_file and not report.is_empty(): # Expand variables in filename junit_report_file = osext.expandvars(junit_report_file) - junit_xml = runreport.junit_xml_report(json_report) try: - with open(junit_report_file, 'w') as fp: - runreport.junit_dump(junit_xml, fp) + report.save_junit(junit_report_file) except OSError as e: printer.warning( f'failed to generate report in {junit_report_file!r}: ' @@ -1519,9 +1668,8 @@ def module_unuse(*paths): finally: try: logging.getprofiler().exit_region() # region: 'test processing' - log_files = logging.log_files() if site_config.get('general/0/save_log_files'): - log_files = logging.save_log_files(rt.output_prefix) + logging.save_log_files(rt.output_prefix) except OSError as e: printer.error(f'could not save log file: {e}') sys.exit(1) diff --git a/reframe/frontend/executors/__init__.py b/reframe/frontend/executors/__init__.py index dd95746987..d96cbaeb72 100644 --- a/reframe/frontend/executors/__init__.py +++ b/reframe/frontend/executors/__init__.py @@ -6,6 +6,7 @@ import abc import contextlib import copy +import itertools import os import signal import sys @@ -24,15 +25,62 @@ ForceExitError, RunSessionTimeout, SkipTestError, + StatisticsError, TaskExit) from reframe.core.schedulers.local import LocalJobScheduler from reframe.frontend.printer import PrettyPrinter -from reframe.frontend.statistics import TestStats ABORT_REASONS = (AssertionError, FailureLimitError, KeyboardInterrupt, ForceExitError, RunSessionTimeout) +class TestStats: + '''Stores test case statistics.''' + + def __init__(self): + # Tasks per run stored as follows: [[run0_tasks], [run1_tasks], ...] + self._alltasks = [[]] + + def add_task(self, task): + current_run = runtime.runtime().current_run + if current_run == len(self._alltasks): + self._alltasks.append([]) + + self._alltasks[current_run].append(task) + + def runs(self): + for runid, tasks in enumerate(self._alltasks): + yield runid, tasks + + def tasks(self, run=-1): + if run is None: + yield from itertools.chain(*self._alltasks) + else: + try: + yield from self._alltasks[run] + except IndexError: + raise StatisticsError(f'no such run: {run}') from None + + def failed(self, run=-1): + return [t for t in self.tasks(run) if t.failed] + + def skipped(self, run=-1): + return [t for t in self.tasks(run) if t.skipped] + + def aborted(self, run=-1): + return [t for t in self.tasks(run) if t.aborted] + + def completed(self, run=-1): + return [t for t in self.tasks(run) if t.completed] + + def num_cases(self, run=-1): + return sum(1 for _ in self.tasks(run)) + + @property + def num_runs(self): + return len(self._alltasks) + + class TestCase: '''A combination of a regression check, a system partition and a programming environment. @@ -115,12 +163,10 @@ def generate_testcases(checks, prepare=False): '''Generate concrete test cases from checks. If `prepare` is true then each of the cases will also be prepared for - being sent to the test pipeline. Note that setting this to true may slow down - the test case generation. - + being sent to the test pipeline. Note that setting this to true may slow + down the test case generation. ''' - rt = runtime.runtime() cases = [] for c in checks: valid_comb = runtime.valid_sysenv_comb(c.valid_systems, @@ -268,7 +314,8 @@ def failed_stage(self): @property def succeeded(self): - return self._current_stage in {'finalize', 'cleanup'} + return (self._current_stage in {'finalize', 'cleanup'} and + not self._failed_stage == 'cleanup') @property def completed(self): @@ -282,6 +329,19 @@ def aborted(self): def skipped(self): return self._skipped + @property + def result(self): + if self.succeeded: + return 'pass' + elif self.failed: + return 'fail' + elif self.aborted: + return 'abort' + elif self.skipped: + return 'skip' + else: + return '' + def _notify_listeners(self, callback_name): for l in self._listeners: callback = getattr(l, callback_name) @@ -349,7 +409,6 @@ def temp_dry_run(check): with temp_dry_run(self.check): return fn(*args, **kwargs) - @logging.time_function def setup(self, *args, **kwargs): self.testcase.prepare() @@ -586,9 +645,6 @@ def runall(self, testcases, restored_cases=None): runid = None if self._global_stats else -1 num_aborted = len(self._stats.aborted(runid)) num_failures = len(self._stats.failed(runid)) - num_completed = len(self._stats.completed(runid)) - num_skipped = len(self._stats.skipped(runid)) - num_tasks = self._stats.num_cases(runid) if num_failures > 0: status = 'FAILED' elif num_aborted > 0: diff --git a/reframe/frontend/printer.py b/reframe/frontend/printer.py index 789889e3d3..a8a88b91cd 100644 --- a/reframe/frontend/printer.py +++ b/reframe/frontend/printer.py @@ -3,9 +3,18 @@ # # SPDX-License-Identifier: BSD-3-Clause +import os +import shutil import time +import traceback +from tabulate import tabulate + import reframe.core.logging as logging +import reframe.core.runtime as rt import reframe.utility.color as color +from reframe.core.exceptions import SanityError +from reframe.frontend.reporting import format_testcase_from_json +from reframe.utility import nodelist_abbrev class PrettyPrinter: @@ -14,6 +23,8 @@ class PrettyPrinter: It takes care of formatting the progress output and adds some more cosmetics to specific levels of messages, such as warnings and errors. + It also takes care of formatting and printing the various reports. + The actual printing is delegated to an internal logger, which is responsible for printing. ''' @@ -84,3 +95,185 @@ def __setattr__(self, attr, value): self.__dict__['colorize'] = value else: super().__setattr__(attr, value) + + def failure_report(self, report, rerun_info=True, global_stats=False): + '''Print a failure report''' + + def _head_n(filename, prefix, num_lines=10): + # filename and prefix are `None` before setup + if filename is None or prefix is None: + return [] + + try: + with open(os.path.join(prefix, filename)) as fp: + lines = [ + f'--- {filename} (first {num_lines} lines) ---' + ] + for i, line in enumerate(fp): + if i < num_lines: + # Remove trailing '\n' + lines.append(line.rstrip()) + + lines += [f'--- {filename} ---'] + except OSError as e: + lines = [f'--- {filename} ({e}) ---'] + + return lines + + def _print_failure_info(rec, runid, total_runs): + self.info(line_width * '-') + self.info(f"FAILURE INFO for {rec['name']} " + f"(run: {runid}/{total_runs})") + self.info(f" * Description: {rec['descr']}") + self.info(f" * System partition: {rec['system']}") + self.info(f" * Environment: {rec['environ']}") + self.info(f" * Stage directory: {rec['stagedir']}") + self.info(f" * Node list: " + f"{nodelist_abbrev(rec['job_nodelist'])}") + job_type = 'local' if rec['scheduler'] == 'local' else 'batch job' + self.info(f" * Job type: {job_type} (id={rec['jobid']})") + self.info(f" * Dependencies (conceptual): " + f"{rec['dependencies_conceptual']}") + self.info(f" * Dependencies (actual): " + f"{rec['dependencies_actual']}") + self.info(f" * Maintainers: {rec['maintainers']}") + self.info(f" * Failing phase: {rec['fail_phase']}") + if rerun_info and not rec['fixture']: + self.info(f" * Rerun with '-n /{rec['hashcode']}" + f" -p {rec['environ']} --system " + f"{rec['system']} -r'") + + msg = rec['fail_reason'] + if isinstance(rec['fail_info']['exc_value'], SanityError): + lines = [msg] + lines += _head_n(rec['job_stdout'], prefix=rec['stagedir']) + lines += _head_n(rec['job_stderr'], prefix=rec['stagedir']) + msg = '\n'.join(lines) + + self.info(f" * Reason: {msg}") + + tb = ''.join(traceback.format_exception( + *rec['fail_info'].values())) + if rec['fail_severe']: + self.info(tb) + else: + self.verbose(tb) + + line_width = min(80, shutil.get_terminal_size()[0]) + self.info(' SUMMARY OF FAILURES '.center(line_width, '=')) + + for run_no, run_info in enumerate(report['runs'], start=1): + if not global_stats and run_no != len(report['runs']): + continue + + for r in run_info['testcases']: + if r['result'] in {'pass', 'abort', 'skip'}: + continue + + _print_failure_info(r, run_no, len(report['runs'])) + + self.info(line_width * '-') + + def failure_stats(self, report, global_stats=False): + current_run = rt.runtime().current_run + failures = {} + for runid, run_data in enumerate(report['runs']): + if not global_stats and runid != current_run: + continue + + for tc in run_data['testcases']: + info = f'{tc["display_name"]}' + info += f' @{tc["system"]}:{tc["partition"]}+{tc["environ"]}' + + failed_stage = tc['fail_phase'] + failures.setdefault(failed_stage, []) + failures[failed_stage].append(info) + + line_width = shutil.get_terminal_size()[0] + stats_start = line_width * '=' + stats_title = 'FAILURE STATISTICS' + stats_end = line_width * '-' + stats_body = [] + row_format = "{:<13} {:<5} {}" + stats_hline = row_format.format(13*'-', 5*'-', 60*'-') + stats_header = row_format.format('Phase', '#', 'Failing test cases') + if global_stats: + num_tests = report['session_info']['num_cases'] + else: + num_tests = report['runs'][current_run]['num_cases'] + + num_failures = 0 + for fl in failures.values(): + num_failures += len(fl) + + stats_body = [''] + stats_body.append(f'Total number of test cases: {num_tests}') + stats_body.append(f'Total number of failures: {num_failures}') + stats_body.append('') + stats_body.append(stats_header) + stats_body.append(stats_hline) + for p, l in failures.items(): + stats_body.append(row_format.format(p, len(l), l[0])) + for f in l[1:]: + stats_body.append(row_format.format('', '', str(f))) + + if stats_body: + for line in (stats_start, stats_title, *stats_body, stats_end): + self.info(line) + + def retry_report(self, report): + '''Print a report for test retries''' + + if not rt.runtime().current_run: + # Do nothing if no retries + return + + line_width = min(80, shutil.get_terminal_size()[0]) + lines = ['', line_width * '='] + lines.append('SUMMARY OF RETRIES') + lines.append(line_width * '-') + retried_tc = set() + for run in reversed(report['runs'][1:]): + runidx = run['run_index'] + for tc in run['testcases']: + # Overwrite entry from previous run if available + tc_info = format_testcase_from_json(tc) + if tc_info not in retried_tc: + lines.append( + f" * Test {tc_info} was retried {runidx} time(s) and " + f" {'failed' if tc['result'] == 'fail' else 'passed'}." + ) + retried_tc.add(tc_info) + + self.info('\n'.join(lines)) + + def performance_report(self, data, **kwargs): + width = min(80, shutil.get_terminal_size()[0]) + self.info('') + self.info(' PERFORMANCE REPORT '.center(width, '=')) + self.info('') + self.table(data, **kwargs) + self.info('') + + def _table_as_csv(self, data): + for line in data: + self.info(','.join(str(x) for x in line)) + + def table(self, data, **kwargs): + '''Print tabular data''' + + table_format = rt.runtime().get_option('general/0/table_format') + if table_format == 'csv': + return self._table_as_csv(data) + + # Map our options to tabulate + if table_format == 'plain': + tablefmt = 'plain' + elif table_format == 'pretty': + tablefmt = 'mixed_outline' + else: + raise ValueError(f'invalid table format: {table_format}') + + kwargs.setdefault('headers', 'firstrow') + kwargs.setdefault('tablefmt', tablefmt) + self.info(tabulate(data, **kwargs)) diff --git a/reframe/frontend/reporting/__init__.py b/reframe/frontend/reporting/__init__.py new file mode 100644 index 0000000000..3c952f8770 --- /dev/null +++ b/reframe/frontend/reporting/__init__.py @@ -0,0 +1,728 @@ +# Copyright 2016-2024 Swiss National Supercomputing Centre (CSCS/ETH Zurich) +# ReFrame Project Developers. See the top-level LICENSE file for details. +# +# SPDX-License-Identifier: BSD-3-Clause + +import decimal +import functools +import inspect +import json +import jsonschema +import lxml.etree as etree +import math +import os +import re +import socket +import time +import uuid +from collections.abc import Hashable +from filelock import FileLock + +import reframe as rfm +import reframe.utility.jsonext as jsonext +import reframe.utility.osext as osext +from reframe.core.exceptions import ReframeError, what, is_severe, reraise_as +from reframe.core.logging import getlogger, _format_time_rfc3339, time_function +from reframe.core.runtime import runtime +from reframe.core.warnings import suppress_deprecations +from reframe.utility import nodelist_abbrev +from .storage import StorageBackend +from .utility import Aggregator, parse_cmp_spec, parse_time_period, is_uuid + +# The schema data version +# Major version bumps are expected to break the validation of previous schemas + +DATA_VERSION = '4.0' +_SCHEMA = os.path.join(rfm.INSTALL_PREFIX, 'reframe/schemas/runreport.json') +_DATETIME_FMT = r'%Y%m%dT%H%M%S%z' + + +def _format_sysenv(system, partition, environ): + return f'{system}:{partition}+{environ}' + + +def format_testcase_from_json(tc): + '''Format test case from its json representation''' + name = tc['name'] + system = tc['system'] + partition = tc['partition'] + environ = tc['environ'] + return f'{name} @{_format_sysenv(system, partition, environ)}' + + +def format_testcase(tc): + return format_testcase_from_json({'name': tc.check.name, + 'system': tc.check.current_system.name, + 'partition': tc.partition.name, + 'environ': tc.environ.name}) + + +class _RestoredSessionInfo: + '''A restored session with some additional functionality.''' + + def __init__(self, report): + self._report = report + self._fallbacks = [] # fallback reports + + # Index all runs by test case; if a test case has run multiple times, + # only the last time will be indexed + self._cases_index = {} + for run in self._report['runs']: + for tc in run['testcases']: + self._cases_index[format_testcase_from_json(tc)] = tc + + # Index also the restored cases + for tc in self._report['restored_cases']: + self._cases_index[format_testcase_from_json(tc)] = tc + + def __getitem__(self, key): + return self._report[key] + + def __getattr__(self, name): + with suppress_deprecations(): + return getattr(self._report, name) + + def add_fallback(self, report): + self._fallbacks.append(report) + + def slice(self, prop, when=None, unique=False): + '''Slice the report on property ``prop``.''' + + if unique: + returned = set() + + for tc in self._report['runs'][-1]['testcases']: + val = tc[prop] + if unique and val in returned: + continue + + if when is None: + if unique: + returned.add(val) + + yield val + elif tc[when[0]] == when[1]: + if unique: + returned.add(val) + + yield val + + def case(self, tc): + key = format_testcase(tc) + ret = self._cases_index.get(key) + if ret is None: + # Look up the case in the fallback reports + for rpt in self._fallbacks: + ret = rpt._cases_index.get(key) + if ret is not None: + break + + return ret + + def restore_dangling(self, graph): + '''Restore dangling dependencies in graph from the report data. + + Returns the updated graph. + ''' + + restored = [] + for tc, deps in graph.items(): + for d in deps: + if d not in graph: + restored.append(d) + self._do_restore(d) + + return graph, restored + + def _do_restore(self, testcase): + tc = self.case(testcase) + if tc is None: + raise ReframeError( + f'could not restore testcase {testcase!r}: ' + f'not found in the report files' + ) + + dump_file = os.path.join(tc['stagedir'], '.rfm_testcase.json') + try: + with open(dump_file) as fp: + testcase._check = jsonext.load(fp) + except (OSError, json.JSONDecodeError) as e: + raise ReframeError( + f'could not restore testcase {testcase!r}') from e + + +def _expand_report_filename(filepatt, *, newfile): + if '{sessionid}' not in os.fspath(filepatt): + return filepatt + + search_patt = os.path.basename(filepatt).replace('{sessionid}', r'(\d+)') + new_id = -1 + basedir = os.path.dirname(filepatt) or '.' + for filename in os.listdir(basedir): + match = re.match(search_patt, filename) + if match: + found_id = int(match.group(1)) + new_id = max(found_id, new_id) + + if newfile: + new_id += 1 + + return filepatt.format(sessionid=new_id) + + +def _restore_session(filename): + filename = _expand_report_filename(filename, newfile=False) + try: + with open(filename) as fp: + report = json.load(fp) + except OSError as e: + raise ReframeError( + f'failed to load report file {filename!r}') from e + except json.JSONDecodeError as e: + raise ReframeError( + f'report file {filename!r} is not a valid JSON file') from e + + # Validate the report + with open(_SCHEMA) as fp: + schema = json.load(fp) + + try: + jsonschema.validate(report, schema) + except jsonschema.ValidationError as e: + try: + found_ver = report['session_info']['data_version'] + except KeyError: + found_ver = 'n/a' + + getlogger().verbose(f'JSON validation error: {e}') + raise ReframeError( + f'failed to validate report {filename!r}: {e.args[0]} ' + f'(check report data version: required {DATA_VERSION}, ' + f'found: {found_ver})' + ) from None + + return _RestoredSessionInfo(report) + + +def restore_session(*filenames): + primary = filenames[0] + restored = _restore_session(primary) + + # Add fallback reports + for f in filenames[1:]: + restored.add_fallback(_restore_session(f)) + + return restored + + +class RunReport: + '''Internal representation of a run report + + This class provides direct access to the underlying report and provides + convenience functions for constructing a new report. + ''' + def __init__(self): + # Initialize the report with the required fields + self.__filename = None + self.__report = { + 'session_info': { + 'data_version': DATA_VERSION, + 'hostname': socket.gethostname(), + 'uuid': str(uuid.uuid4()) + }, + 'runs': [], + 'restored_cases': [] + } + now = time.time() + self.update_timestamps(now, now) + + @property + def filename(self): + return self.__filename + + def __getattr__(self, name): + return getattr(self.__report, name) + + def __getitem__(self, key): + return self.__report[key] + + def __rfm_json_encode__(self): + return self.__report + + def update_session_info(self, session_info): + # Remove timestamps + for key, val in session_info.items(): + if not key.startswith('time_'): + self.__report['session_info'][key] = val + + def update_restored_cases(self, restored_cases, restored_session): + self.__report['restored_cases'] = [restored_session.case(c) + for c in restored_cases] + + def update_timestamps(self, ts_start, ts_end): + self.__report['session_info'].update({ + 'time_start': time.strftime(_DATETIME_FMT, + time.localtime(ts_start)), + 'time_start_unix': ts_start, + 'time_end': time.strftime(_DATETIME_FMT, time.localtime(ts_end)), + 'time_end_unix': ts_end, + 'time_elapsed': ts_end - ts_start + }) + + def update_run_stats(self, stats): + session_uuid = self.__report['session_info']['uuid'] + for runidx, tasks in stats.runs(): + testcases = [] + num_failures = 0 + num_aborted = 0 + num_skipped = 0 + for tidx, t in enumerate(tasks): + # We take partition and environment from the test case and not + # from the check, since if the test fails before `setup()`, + # these are not set inside the check. + check, partition, environ = t.testcase + entry = { + 'build_jobid': None, + 'build_stderr': None, + 'build_stdout': None, + 'dependencies_actual': [ + (d.check.unique_name, + d.partition.fullname, d.environ.name) + for d in t.testcase.deps + ], + 'dependencies_conceptual': [ + d[0] for d in t.check.user_deps() + ], + 'environ': environ.name, + 'fail_phase': None, + 'fail_reason': None, + 'filename': inspect.getfile(type(check)), + 'fixture': check.is_fixture(), + 'job_completion_time': None, + 'job_completion_time_unix': None, + 'job_stderr': None, + 'job_stdout': None, + 'partition': partition.name, + 'result': t.result, + 'run_index': runidx, + 'scheduler': partition.scheduler.registered_name, + 'session_uuid': session_uuid, + 'time_compile': t.duration('compile_complete'), + 'time_performance': t.duration('performance'), + 'time_run': t.duration('run_complete'), + 'time_sanity': t.duration('sanity'), + 'time_setup': t.duration('setup'), + 'time_total': t.duration('total'), + 'uuid': f'{session_uuid}:{runidx}:{tidx}' + } + if check.job: + entry['job_stderr'] = check.stderr.evaluate() + entry['job_stdout'] = check.stdout.evaluate() + + if check.build_job: + entry['build_stderr'] = check.build_stderr.evaluate() + entry['build_stdout'] = check.build_stdout.evaluate() + + if t.failed: + num_failures += 1 + elif t.aborted: + num_aborted += 1 + elif t.skipped: + num_skipped += 1 + + if t.failed or t.aborted: + entry['fail_phase'] = t.failed_stage + if t.exc_info is not None: + entry['fail_reason'] = what(*t.exc_info) + entry['fail_info'] = { + 'exc_type': t.exc_info[0], + 'exc_value': t.exc_info[1], + 'traceback': t.exc_info[2] + } + entry['fail_severe'] = is_severe(*t.exc_info) + elif t.succeeded: + entry['outputdir'] = check.outputdir + + # Add any loggable variables and parameters + test_cls = type(check) + for name, alt_name in test_cls.loggable_attrs(): + if alt_name == 'partition' or alt_name == 'environ': + # We set those from the testcase + continue + + key = alt_name if alt_name else name + try: + with suppress_deprecations(): + entry[key] = getattr(check, name) + except AttributeError: + entry[key] = '' + + if entry['job_completion_time_unix']: + entry['job_completion_time'] = _format_time_rfc3339( + time.localtime(entry['job_completion_time_unix']), + '%FT%T%:z' + ) + + testcases.append(entry) + + self.__report['runs'].append({ + 'num_cases': len(tasks), + 'num_failures': num_failures, + 'num_aborted': num_aborted, + 'num_skipped': num_skipped, + 'run_index': runidx, + 'testcases': testcases + }) + + # Update session info from stats + self.__report['session_info'].update({ + 'num_cases': self.__report['runs'][0]['num_cases'], + 'num_failures': self.__report['runs'][-1]['num_failures'], + 'num_aborted': self.__report['runs'][-1]['num_aborted'], + 'num_skipped': self.__report['runs'][-1]['num_skipped'] + }) + + def _save(self, filename, compress, link_to_last): + filename = _expand_report_filename(filename, newfile=True) + with open(filename, 'w') as fp: + if compress: + jsonext.dump(self.__report, fp) + else: + jsonext.dump(self.__report, fp, indent=2) + fp.write('\n') + + self.__filename = filename + if not link_to_last: + return + + link_name = 'latest.json' + prefix, target_name = os.path.split(filename) + with osext.change_dir(prefix): + create_symlink = functools.partial(os.symlink, + target_name, link_name) + if not os.path.exists(link_name): + create_symlink() + else: + if os.path.islink(link_name): + os.remove(link_name) + create_symlink() + else: + raise ReframeError('path exists and is not a symlink') + + def is_empty(self): + '''Return :obj:`True` is no test cases where run''' + return self.__report['session_info']['num_cases'] == 0 + + def save(self, filename, compress=False, link_to_last=True): + prefix = os.path.dirname(filename) or '.' + with FileLock(os.path.join(prefix, '.report.lock')): + self._save(filename, compress, link_to_last) + + def store(self): + '''Store the report in the results storage.''' + + return StorageBackend.default().store(self, self.filename) + + def generate_xml_report(self): + '''Generate a JUnit report from a standard ReFrame JSON report.''' + + report = self.__report + xml_testsuites = etree.Element('testsuites') + # Create a XSD-friendly timestamp + session_ts = time.strftime( + r'%FT%T', time.localtime(report['session_info']['time_start_unix']) + ) + for run_id, rfm_run in enumerate(report['runs']): + xml_testsuite = etree.SubElement( + xml_testsuites, 'testsuite', + attrib={ + 'errors': '0', + 'failures': str(rfm_run['num_failures']), + 'hostname': report['session_info']['hostname'], + 'id': str(run_id), + 'name': f'ReFrame run {run_id}', + 'package': 'reframe', + 'tests': str(rfm_run['num_cases']), + 'time': str(report['session_info']['time_elapsed']), + 'timestamp': session_ts + } + ) + etree.SubElement(xml_testsuite, 'properties') + for tc in rfm_run['testcases']: + casename = f'{format_testcase_from_json(tc)}' + testcase = etree.SubElement( + xml_testsuite, 'testcase', + attrib={ + 'classname': tc['filename'], + 'name': casename, + + # XSD schema does not like the exponential format and + # since we do not want to impose a fixed width, we pass + # it to `Decimal` to format it automatically. + 'time': str(decimal.Decimal(tc['time_total'] or 0)), + } + ) + if tc['result'] == 'fail': + fail_phase = tc['fail_phase'] + fail_reason = tc['fail_reason'] + testcase_msg = etree.SubElement( + testcase, 'failure', attrib={'type': 'failure', + 'message': fail_phase} + ) + testcase_msg.text = f"{tc['fail_phase']}: {fail_reason}" + + testsuite_stdout = etree.SubElement(xml_testsuite, 'system-out') + testsuite_stdout.text = '' + testsuite_stderr = etree.SubElement(xml_testsuite, 'system-err') + testsuite_stderr.text = '' + + return xml_testsuites + + def save_junit(self, filename): + with open(filename, 'w') as fp: + xml = self.generate_xml_report() + fp.write( + etree.tostring(xml, encoding='utf8', pretty_print=True, + method='xml', xml_declaration=True).decode() + ) + + +def _group_key(groups, testcase): + key = [] + for grp in groups: + with reraise_as(ReframeError, (KeyError,), 'no such group'): + val = testcase[grp] + + if grp == 'job_nodelist': + # Fold nodelist before adding as a key element + key.append(nodelist_abbrev(val)) + elif not isinstance(val, Hashable): + key.append(str(val)) + else: + key.append(val) + + return tuple(key) + + +@time_function +def _group_testcases(testcases, group_by, extra_cols): + grouped = {} + for tc in testcases: + for pvar, reftuple in tc['perfvalues'].items(): + pvar = pvar.split(':')[-1] + pval, pref, plower, pupper, punit = reftuple + plower = pref * (1 + plower) if plower is not None else -math.inf + pupper = pref * (1 + pupper) if pupper is not None else math.inf + record = { + 'pvar': pvar, + 'pval': pval, + 'pref': pref, + 'plower': plower, + 'pupper': pupper, + 'punit': punit, + **{k: tc[k] for k in group_by + extra_cols if k in tc} + } + key = _group_key(group_by, record) + grouped.setdefault(key, []) + grouped[key].append(record) + + return grouped + + +@time_function +def _aggregate_perf(grouped_testcases, aggr_fn, cols): + if runtime().get_option('general/0/table_format') == 'csv': + # Use a csv friendly delimiter + delim = '|' + else: + delim = '\n' + + other_aggr = Aggregator.create('join_uniq', delim) + aggr_data = {} + for key, seq in grouped_testcases.items(): + aggr_data.setdefault(key, {}) + aggr_data[key]['pval'] = aggr_fn(tc['pval'] for tc in seq) + with reraise_as(ReframeError, (KeyError,), 'no such column'): + for c in cols: + aggr_data[key][c] = other_aggr( + nodelist_abbrev(tc[c]) if c == 'job_nodelist' else tc[c] + for tc in seq + ) + + return aggr_data + + +@time_function +def compare_testcase_data(base_testcases, target_testcases, base_fn, target_fn, + extra_group_by=None, extra_cols=None): + extra_group_by = extra_group_by or [] + extra_cols = extra_cols or [] + group_by = (['name', 'system', 'partition', 'environ', 'pvar', 'punit'] + + extra_group_by) + + grouped_base = _group_testcases(base_testcases, group_by, extra_cols) + grouped_target = _group_testcases(target_testcases, group_by, extra_cols) + pbase = _aggregate_perf(grouped_base, base_fn, extra_cols) + ptarget = _aggregate_perf(grouped_target, target_fn, []) + + # Build the final table data + data = [['name', 'sysenv', 'pvar', 'pval', + 'punit', 'pdiff'] + extra_group_by + extra_cols] + for key, aggr_data in pbase.items(): + pval = aggr_data['pval'] + try: + target_pval = ptarget[key]['pval'] + except KeyError: + pdiff = 'n/a' + else: + if pval is None or target_pval is None: + pdiff = 'n/a' + else: + pdiff = (pval - target_pval) / target_pval + pdiff = '{:+7.2%}'.format(pdiff) + + name, system, partition, environ, pvar, punit, *extras = key + line = [name, _format_sysenv(system, partition, environ), + pvar, pval, punit, pdiff, *extras] + # Add the extra columns + line += [aggr_data[c] for c in extra_cols] + data.append(line) + + return data + + +@time_function +def performance_compare(cmp, report=None, namepatt=None): + with reraise_as(ReframeError, (ValueError,), + 'could not parse comparison spec'): + match = parse_cmp_spec(cmp) + + if match.period_base is None and match.session_base is None: + if report is None: + raise ValueError('report cannot be `None` ' + 'for current run comparisons') + try: + # Get the last retry from every test case + num_runs = len(report['runs']) + tcs_base = [] + for run in report['runs']: + run_idx = run['run_index'] + for tc in run['testcases']: + if tc['result'] != 'fail' or run_idx == num_runs - 1: + tcs_base.append(tc) + except IndexError: + tcs_base = [] + elif match.period_base is not None: + tcs_base = StorageBackend.default().fetch_testcases_time_period( + *match.period_base, namepatt + ) + else: + tcs_base = StorageBackend.default().fetch_testcases_from_session( + match.session_base, namepatt + ) + + if match.period_target: + tcs_target = StorageBackend.default().fetch_testcases_time_period( + *match.period_target, namepatt + ) + else: + tcs_target = StorageBackend.default().fetch_testcases_from_session( + match.session_target, namepatt + ) + + return compare_testcase_data(tcs_base, tcs_target, match.aggregator, + match.aggregator, match.extra_groups, + match.extra_cols) + + +@time_function +def session_data(time_period): + '''Retrieve all sessions''' + + data = [['UUID', 'Start time', 'End time', 'Num runs', 'Num cases']] + for sess_data in StorageBackend.default().fetch_sessions_time_period( + *parse_time_period(time_period) if time_period else (None, None) + ): + session_info = sess_data['session_info'] + data.append( + [session_info['uuid'], + session_info['time_start'], + session_info['time_end'], + len(sess_data['runs']), + len(sess_data['runs'][0]['testcases'])] + ) + + return data + + +@time_function +def testcase_data(spec, namepatt=None): + storage = StorageBackend.default() + if is_uuid(spec): + testcases = storage.fetch_testcases_from_session(spec, namepatt) + else: + testcases = storage.fetch_testcases_time_period( + *parse_time_period(spec), namepatt + ) + + data = [['Name', 'SysEnv', + 'Nodelist', 'Completion Time', 'Result', 'UUID']] + for tc in testcases: + ts_completed = tc['job_completion_time_unix'] + if not ts_completed: + completion_time = 'n/a' + else: + # Always format the completion time as users can set their own + # formatting in the log record + completion_time = time.strftime(_DATETIME_FMT, + time.localtime(ts_completed)) + + data.append([ + tc['name'], + _format_sysenv(tc['system'], tc['partition'], tc['environ']), + nodelist_abbrev(tc['job_nodelist']), + completion_time, + tc['result'], + tc['uuid'] + ]) + + return data + + +@time_function +def session_info(uuid): + '''Retrieve session details as JSON''' + + session = StorageBackend.default().fetch_session_json(uuid) + if not session: + raise ReframeError(f'no such session: {uuid}') + + return session + + +@time_function +def testcase_info(spec, namepatt=None): + '''Retrieve test case details as JSON''' + testcases = [] + if is_uuid(spec): + session_uuid, *tc_index = spec.split(':') + session = session_info(session_uuid) + if not tc_index: + for run in session['runs']: + testcases += run['testcases'] + else: + run_index, test_index = tc_index + testcases.append( + session['runs'][run_index]['testcases'][test_index] + ) + else: + testcases = StorageBackend.default().fetch_testcases_time_period( + *parse_time_period(spec), namepatt + ) + + return testcases + + +@time_function +def delete_session(session_uuid): + StorageBackend.default().remove_session(session_uuid) diff --git a/reframe/frontend/reporting/storage.py b/reframe/frontend/reporting/storage.py new file mode 100644 index 0000000000..87fed08720 --- /dev/null +++ b/reframe/frontend/reporting/storage.py @@ -0,0 +1,310 @@ +# Copyright 2016-2024 Swiss National Supercomputing Centre (CSCS/ETH Zurich) +# ReFrame Project Developers. See the top-level LICENSE file for details. +# +# SPDX-License-Identifier: BSD-3-Clause + +import abc +import os +import re +import sqlite3 +from filelock import FileLock + +import reframe.utility.jsonext as jsonext +import reframe.utility.osext as osext +from reframe.core.exceptions import ReframeError +from reframe.core.logging import getlogger, time_function, getprofiler +from reframe.core.runtime import runtime + + +class StorageBackend: + '''Abstract class that represents the results backend storage''' + + @classmethod + def create(cls, backend, *args, **kwargs): + '''Factory method for creating storage backends''' + if backend == 'sqlite': + return _SqliteStorage(*args, **kwargs) + else: + raise ReframeError(f'no such storage backend: {backend}') + + @classmethod + def default(cls): + '''Return default storage backend''' + return cls.create(runtime().get_option('storage/0/backend')) + + @abc.abstractmethod + def store(self, report, report_file): + '''Store the given report''' + + @abc.abstractmethod + def fetch_session_time_period(self, session_uuid): + '''Fetch the time period from specific session''' + + @abc.abstractmethod + def fetch_testcases_time_period(self, ts_start, ts_end): + '''Fetch all test cases from specified period''' + + +class _SqliteStorage(StorageBackend): + SCHEMA_VERSION = '1.0' + + def __init__(self): + self.__db_file = os.path.join( + osext.expandvars(runtime().get_option('storage/0/sqlite_db_file')) + ) + + def _db_file(self): + prefix = os.path.dirname(self.__db_file) + if not os.path.exists(self.__db_file): + # Create subdirs if needed + if prefix: + os.makedirs(prefix, exist_ok=True) + + self._db_create() + + self._db_schema_check() + return self.__db_file + + def _db_matches(self, patt, item): + if patt is None: + return True + + regex = re.compile(patt) + return regex.match(item) is not None + + def _db_create(self): + clsname = type(self).__name__ + getlogger().debug( + f'{clsname}: creating results database in {self.__db_file}...' + ) + with sqlite3.connect(self.__db_file) as conn: + conn.execute('CREATE TABLE IF NOT EXISTS sessions(' + 'uuid TEXT PRIMARY KEY, ' + 'session_start_unix REAL, ' + 'session_end_unix REAL, ' + 'json_blob TEXT, ' + 'report_file TEXT)') + conn.execute('CREATE TABLE IF NOT EXISTS testcases(' + 'name TEXT,' + 'system TEXT, ' + 'partition TEXT, ' + 'environ TEXT, ' + 'job_completion_time_unix REAL, ' + 'session_uuid TEXT, ' + 'uuid TEXT, ' + 'FOREIGN KEY(session_uuid) ' + 'REFERENCES sessions(uuid) ON DELETE CASCADE)') + conn.execute('CREATE INDEX IF NOT EXISTS index_testcases_time ' + 'on testcases(job_completion_time_unix)') + conn.execute('CREATE TABLE IF NOT EXISTS metadata(' + 'schema_version TEXT)') + + def _db_schema_check(self): + with sqlite3.connect(self.__db_file) as conn: + results = conn.execute( + 'SELECT schema_version FROM metadata').fetchall() + + if not results: + # DB is new, insert the schema version + with sqlite3.connect(self.__db_file) as conn: + conn.execute('INSERT INTO metadata VALUES(:schema_version)', + {'schema_version': self.SCHEMA_VERSION}) + else: + found_ver = results[0][0] + if found_ver != self.SCHEMA_VERSION: + raise ReframeError( + f'results DB in {self.__db_file!r} is ' + 'of incompatible version: ' + f'found {found_ver}, required: {self.SCHEMA_VERSION}' + ) + + def _db_store_report(self, conn, report, report_file_path): + session_start_unix = report['session_info']['time_start_unix'] + session_end_unix = report['session_info']['time_end_unix'] + session_uuid = report['session_info']['uuid'] + conn.execute( + 'INSERT INTO sessions VALUES(' + ':uuid, :session_start_unix, :session_end_unix, ' + ':json_blob, :report_file)', + { + 'uuid': session_uuid, + 'session_start_unix': session_start_unix, + 'session_end_unix': session_end_unix, + 'json_blob': jsonext.dumps(report), + 'report_file': report_file_path + } + ) + for run in report['runs']: + for testcase in run['testcases']: + sys, part = testcase['system'], testcase['partition'] + conn.execute( + 'INSERT INTO testcases VALUES(' + ':name, :system, :partition, :environ, ' + ':job_completion_time_unix, ' + ':session_uuid, :uuid)', + { + 'name': testcase['name'], + 'system': sys, + 'partition': part, + 'environ': testcase['environ'], + 'job_completion_time_unix': testcase[ + 'job_completion_time_unix' + ], + 'session_uuid': session_uuid, + 'uuid': testcase['uuid'] + } + ) + + return session_uuid + + def store(self, report, report_file=None): + prefix = os.path.dirname(self.__db_file) + with sqlite3.connect(self._db_file()) as conn: + with FileLock(os.path.join(prefix, '.db.lock')): + return self._db_store_report(conn, report, report_file) + + @time_function + def _fetch_testcases_raw(self, condition): + getprofiler().enter_region('sqlite query') + with sqlite3.connect(self._db_file()) as conn: + query = ('SELECT session_uuid, testcases.uuid as uuid, json_blob ' + 'FROM testcases ' + 'JOIN sessions ON session_uuid == sessions.uuid ' + f'WHERE {condition}') + getlogger().debug(query) + + # Create SQLite function for filtering using name patterns + conn.create_function('REGEXP', 2, self._db_matches) + results = conn.execute(query).fetchall() + + getprofiler().exit_region() + + # Retrieve session info + sessions = {} + for session_uuid, uuid, json_blob in results: + sessions.setdefault(session_uuid, json_blob) + + # Join all sessions and decode them at once + reports_blob = '[' + ','.join(sessions.values()) + ']' + getprofiler().enter_region('json decode') + reports = jsonext.loads(reports_blob) + getprofiler().exit_region() + + # Reindex sessions with their decoded data + for rpt in reports: + sessions[rpt['session_info']['uuid']] = rpt + + # Extract the test case data + testcases = [] + for session_uuid, uuid, json_blob in results: + run_index, test_index = [int(x) for x in uuid.split(':')[1:]] + report = sessions[session_uuid] + testcases.append( + report['runs'][run_index]['testcases'][test_index], + ) + + return testcases + + @time_function + def fetch_session_time_period(self, session_uuid): + with sqlite3.connect(self._db_file()) as conn: + query = ('SELECT session_start_unix, session_end_unix ' + f'FROM sessions WHERE uuid == "{session_uuid}" ' + 'LIMIT 1') + getlogger().debug(query) + results = conn.execute(query).fetchall() + if results: + return results[0] + + return None, None + + @time_function + def fetch_testcases_time_period(self, ts_start, ts_end, name_pattern=None): + expr = (f'job_completion_time_unix >= {ts_start} AND ' + f'job_completion_time_unix <= {ts_end}') + if name_pattern: + expr += f' AND name REGEXP "{name_pattern}"' + + return self._fetch_testcases_raw( + f'({expr}) ORDER BY job_completion_time_unix' + ) + + @time_function + def fetch_testcases_from_session(self, session_uuid, name_pattern=None): + with sqlite3.connect(self._db_file()) as conn: + query = ('SELECT json_blob from sessions ' + f'WHERE uuid == "{session_uuid}"') + getlogger().debug(query) + results = conn.execute(query).fetchall() + + if not results: + return [] + + session_info = jsonext.loads(results[0][0]) + return [tc for run in session_info['runs'] for tc in run['testcases'] + if self._db_matches(name_pattern, tc['name'])] + + @time_function + def fetch_sessions_time_period(self, ts_start=None, ts_end=None): + with sqlite3.connect(self._db_file()) as conn: + query = 'SELECT json_blob from sessions' + if ts_start or ts_end: + query += ' WHERE (' + if ts_start: + query += f'session_start_unix >= {ts_start}' + + if ts_end: + query += f' AND session_start_unix <= {ts_end}' + + query += ')' + + query += ' ORDER BY session_start_unix' + getlogger().debug(query) + results = conn.execute(query).fetchall() + + if not results: + return [] + + return [jsonext.loads(json_blob) for json_blob, *_ in results] + + @time_function + def fetch_session_json(self, uuid): + with sqlite3.connect(self._db_file()) as conn: + query = f'SELECT json_blob FROM sessions WHERE uuid == "{uuid}"' + getlogger().debug(query) + results = conn.execute(query).fetchall() + + return jsonext.loads(results[0][0]) if results else {} + + def _do_remove(self, uuid): + prefix = os.path.dirname(self.__db_file) + with FileLock(os.path.join(prefix, '.db.lock')): + with sqlite3.connect(self._db_file()) as conn: + # Check first if the uuid exists + query = f'SELECT * FROM sessions WHERE uuid == "{uuid}"' + getlogger().debug(query) + if not conn.execute(query).fetchall(): + raise ReframeError(f'no such session: {uuid}') + + query = f'DELETE FROM sessions WHERE uuid == "{uuid}"' + getlogger().debug(query) + conn.execute(query) + + def _do_remove2(self, uuid): + '''Remove a session using the RETURNING keyword''' + prefix = os.path.dirname(self.__db_file) + with FileLock(os.path.join(prefix, '.db.lock')): + with sqlite3.connect(self._db_file()) as conn: + query = (f'DELETE FROM sessions WHERE uuid == "{uuid}" ' + 'RETURNING *') + getlogger().debug(query) + deleted = conn.execute(query).fetchall() + if not deleted: + raise ReframeError(f'no such session: {uuid}') + + @time_function + def remove_session(self, uuid): + if sqlite3.sqlite_version_info >= (3, 35, 0): + self._do_remove2(uuid) + else: + self._do_remove(uuid) diff --git a/reframe/frontend/reporting/utility.py b/reframe/frontend/reporting/utility.py new file mode 100644 index 0000000000..fa98f39c20 --- /dev/null +++ b/reframe/frontend/reporting/utility.py @@ -0,0 +1,210 @@ +# Copyright 2016-2024 Swiss National Supercomputing Centre (CSCS/ETH Zurich) +# ReFrame Project Developers. See the top-level LICENSE file for details. +# +# SPDX-License-Identifier: BSD-3-Clause + +import abc +import re +import statistics +import types +from collections import namedtuple +from datetime import datetime, timedelta +from numbers import Number +from .storage import StorageBackend + + +class Aggregator: + @classmethod + def create(cls, name, *args, **kwargs): + if name == 'first': + return AggrFirst(*args, **kwargs) + elif name == 'last': + return AggrLast(*args, **kwargs) + elif name == 'mean': + return AggrMean(*args, **kwargs) + elif name == 'median': + return AggrMedian(*args, **kwargs) + elif name == 'min': + return AggrMin(*args, **kwargs) + elif name == 'max': + return AggrMax(*args, **kwargs) + elif name == 'join_uniq': + return AggrJoinUniqueValues(*args, **kwargs) + else: + raise ValueError(f'unknown aggregation function: {name!r}') + + @abc.abstractmethod + def __call__(self, iterable): + pass + + +class AggrFirst(Aggregator): + def __call__(self, iterable): + for i, elem in enumerate(iterable): + if i == 0: + return elem + + +class AggrLast(Aggregator): + def __call__(self, iterable): + if not isinstance(iterable, types.GeneratorType): + return iterable[-1] + + for elem in iterable: + pass + + return elem + + +class AggrMean(Aggregator): + def __call__(self, iterable): + return statistics.mean(iterable) + + +class AggrMedian(Aggregator): + def __call__(self, iterable): + return statistics.median(iterable) + + +class AggrMin(Aggregator): + def __call__(self, iterable): + return min(iterable) + + +class AggrMax(Aggregator): + def __call__(self, iterable): + return max(iterable) + + +class AggrJoinUniqueValues(Aggregator): + def __init__(self, delim): + self.__delim = delim + + def __call__(self, iterable): + unique_vals = {str(elem) for elem in iterable} + return self.__delim.join(unique_vals) + + +def _parse_timestamp(s): + if isinstance(s, Number): + return s + + now = datetime.now() + + def _do_parse(s): + if s == 'now': + return now + + formats = [r'%Y%m%d', r'%Y%m%dT%H%M', + r'%Y%m%dT%H%M%S', r'%Y%m%dT%H%M%S%z'] + for fmt in formats: + try: + return datetime.strptime(s, fmt) + except ValueError: + continue + + raise ValueError(f'invalid timestamp: {s}') + + try: + ts = _do_parse(s) + except ValueError as err: + # Try the relative timestamps + match = re.match( + r'(?P.*)(?P[\+|-]\d+)(?P[mhdw])', s + ) + if not match: + raise err + + ts = _do_parse(match.group('ts')) + amount = int(match.group('amount')) + unit = match.group('unit') + if unit == 'w': + ts += timedelta(weeks=amount) + elif unit == 'd': + ts += timedelta(days=amount) + elif unit == 'h': + ts += timedelta(hours=amount) + elif unit == 'm': + ts += timedelta(minutes=amount) + + return ts.timestamp() + + +_UUID_PATTERN = re.compile(r'^\w{8}-\w{4}-\w{4}-\w{4}-\w{12}(:\d+)?(:\d+)?$') + + +def is_uuid(s): + '''Return true if `s` is a valid session, run or test case UUID''' + return _UUID_PATTERN.match(s) is not None + + +def parse_time_period(s): + if is_uuid(s): + # Retrieve the period of a full session + try: + session_uuid = s + except IndexError: + raise ValueError(f'invalid session uuid: {s}') from None + else: + backend = StorageBackend.default() + ts_start, ts_end = backend.fetch_session_time_period( + session_uuid + ) + if not ts_start or not ts_end: + raise ValueError(f'no such session: {session_uuid}') + else: + try: + ts_start, ts_end = s.split(':') + except ValueError: + raise ValueError(f'invalid time period spec: {s}') from None + + return _parse_timestamp(ts_start), _parse_timestamp(ts_end) + + +def _parse_extra_cols(s): + if s and not s.startswith('+'): + raise ValueError(f'invalid column spec: {s}') + + # Remove any empty columns + return [x for x in s.split('+')[1:] if x] + + +def _parse_aggregation(s): + try: + op, extra_groups = s.split(':') + except ValueError: + raise ValueError(f'invalid aggregate function spec: {s}') from None + + return Aggregator.create(op), _parse_extra_cols(extra_groups) + + +_Match = namedtuple('_Match', + ['period_base', 'period_target', + 'session_base', 'session_target', + 'aggregator', 'extra_groups', 'extra_cols']) + + +def parse_cmp_spec(spec): + def _parse_period_spec(s): + if s is None: + return None, None + + if is_uuid(s): + return s, None + + return None, parse_time_period(s) + + parts = spec.split('/') + if len(parts) == 3: + period_base, period_target, aggr, cols = None, *parts + elif len(parts) == 4: + period_base, period_target, aggr, cols = parts + else: + raise ValueError(f'invalid cmp spec: {spec}') + + session_base, period_base = _parse_period_spec(period_base) + session_target, period_target = _parse_period_spec(period_target) + aggr_fn, extra_groups = _parse_aggregation(aggr) + extra_cols = _parse_extra_cols(cols) + return _Match(period_base, period_target, session_base, session_target, + aggr_fn, extra_groups, extra_cols) diff --git a/reframe/frontend/runreport.py b/reframe/frontend/runreport.py deleted file mode 100644 index 47b59df0c7..0000000000 --- a/reframe/frontend/runreport.py +++ /dev/null @@ -1,272 +0,0 @@ -# Copyright 2016-2024 Swiss National Supercomputing Centre (CSCS/ETH Zurich) -# ReFrame Project Developers. See the top-level LICENSE file for details. -# -# SPDX-License-Identifier: BSD-3-Clause - -import decimal -import functools -import json -import jsonschema -import lxml.etree as etree -import os -import re - -import reframe as rfm -import reframe.core.exceptions as errors -import reframe.utility.jsonext as jsonext -import reframe.utility.osext as osext -from reframe.core.logging import getlogger -from reframe.core.warnings import suppress_deprecations - -# The schema data version -# Major version bumps are expected to break the validation of previous schemas - -DATA_VERSION = '3.1' -_SCHEMA = os.path.join(rfm.INSTALL_PREFIX, 'reframe/schemas/runreport.json') - - -class _RunReport: - '''A wrapper to the run report providing some additional functionality''' - - def __init__(self, report): - self._report = report - self._fallbacks = [] # fallback reports - - # Index all runs by test case; if a test case has run multiple times, - # only the last time will be indexed - self._cases_index = {} - for run in self._report['runs']: - for tc in run['testcases']: - c, p, e = tc['unique_name'], tc['system'], tc['environment'] - self._cases_index[c, p, e] = tc - - # Index also the restored cases - for tc in self._report['restored_cases']: - c, p, e = tc['unique_name'], tc['system'], tc['environment'] - self._cases_index[c, p, e] = tc - - def __getitem__(self, key): - return self._report[key] - - def __getattr__(self, name): - with suppress_deprecations(): - return getattr(self._report, name) - - def add_fallback(self, report): - self._fallbacks.append(report) - - def slice(self, prop, when=None, unique=False): - '''Slice the report on property ``prop``.''' - - if unique: - returned = set() - - for tc in self._report['runs'][-1]['testcases']: - val = tc[prop] - if unique and val in returned: - continue - - if when is None: - if unique: - returned.add(val) - - yield val - elif tc[when[0]] == when[1]: - if unique: - returned.add(val) - - yield val - - def case(self, check, part, env): - c, p, e = check.unique_name, part.fullname, env.name - ret = self._cases_index.get((c, p, e)) - if ret is None: - # Look up the case in the fallback reports - for rpt in self._fallbacks: - ret = rpt._cases_index.get((c, p, e)) - if ret is not None: - break - - return ret - - def restore_dangling(self, graph): - '''Restore dangling dependencies in graph from the report data. - - Returns the updated graph. - ''' - - restored = [] - for tc, deps in graph.items(): - for d in deps: - if d not in graph: - restored.append(d) - self._do_restore(d) - - return graph, restored - - def _do_restore(self, testcase): - tc = self.case(*testcase) - if tc is None: - raise errors.ReframeError( - f'could not restore testcase {testcase!r}: ' - f'not found in the report files' - ) - - dump_file = os.path.join(tc['stagedir'], '.rfm_testcase.json') - try: - with open(dump_file) as fp: - testcase._check = jsonext.load(fp) - except (OSError, json.JSONDecodeError) as e: - raise errors.ReframeError( - f'could not restore testcase {testcase!r}') from e - - -def next_report_filename(filepatt, new=True): - if '{sessionid}' not in filepatt: - return filepatt - - search_patt = os.path.basename(filepatt).replace('{sessionid}', r'(\d+)') - new_id = -1 - basedir = os.path.dirname(filepatt) or '.' - for filename in os.listdir(basedir): - match = re.match(search_patt, filename) - if match: - found_id = int(match.group(1)) - new_id = max(found_id, new_id) - - if new: - new_id += 1 - - return filepatt.format(sessionid=new_id) - - -def _load_report(filename): - try: - with open(filename) as fp: - report = json.load(fp) - except OSError as e: - raise errors.ReframeError( - f'failed to load report file {filename!r}') from e - except json.JSONDecodeError as e: - raise errors.ReframeError( - f'report file {filename!r} is not a valid JSON file') from e - - # Validate the report - with open(_SCHEMA) as fp: - schema = json.load(fp) - - try: - jsonschema.validate(report, schema) - except jsonschema.ValidationError as e: - try: - found_ver = report['session_info']['data_version'] - except KeyError: - found_ver = 'n/a' - - raise errors.ReframeError( - f'invalid report {filename!r} ' - f'(required data version: {DATA_VERSION}), found: {found_ver})' - ) from e - - return _RunReport(report) - - -def load_report(*filenames): - primary = filenames[0] - rpt = _load_report(primary) - - # Add fallback reports - for f in filenames[1:]: - rpt.add_fallback(_load_report(f)) - - return rpt - - -def write_report(report, filename, compress=False, link_to_last=False): - with open(filename, 'w') as fp: - if compress: - jsonext.dump(report, fp) - else: - jsonext.dump(report, fp, indent=2) - fp.write('\n') - - if not link_to_last: - return - - # Add a symlink to the latest report - basedir = os.path.dirname(filename) - with osext.change_dir(basedir): - link_name = 'latest.json' - create_symlink = functools.partial( - os.symlink, os.path.basename(filename), link_name - ) - if not os.path.exists(link_name): - create_symlink() - else: - if os.path.islink(link_name): - os.remove(link_name) - create_symlink() - else: - getlogger().warning('could not create a symlink ' - 'to the latest report file: ' - 'path exists and is not a symlink') - - -def junit_xml_report(json_report): - '''Generate a JUnit report from a standard ReFrame JSON report.''' - - xml_testsuites = etree.Element('testsuites') - for run_id, rfm_run in enumerate(json_report['runs']): - xml_testsuite = etree.SubElement( - xml_testsuites, 'testsuite', - attrib={ - 'errors': '0', - 'failures': str(rfm_run['num_failures']), - 'hostname': json_report['session_info']['hostname'], - 'id': str(run_id), - 'name': f'ReFrame run {run_id}', - 'package': 'reframe', - 'tests': str(rfm_run['num_cases']), - 'time': str(json_report['session_info']['time_elapsed']), - - # XSD schema does not like the timezone format, so we remove it - 'timestamp': json_report['session_info']['time_start'][:-5], - } - ) - testsuite_properties = etree.SubElement(xml_testsuite, 'properties') - for tc in rfm_run['testcases']: - casename = ( - f"{tc['unique_name']}[{tc['system']}, {tc['environment']}]" - ) - testcase = etree.SubElement( - xml_testsuite, 'testcase', - attrib={ - 'classname': tc['filename'], - 'name': casename, - - # XSD schema does not like the exponential format and since - # we do not want to impose a fixed width, we pass it to - # `Decimal` to format it automatically. - 'time': str(decimal.Decimal(tc['time_total'] or 0)), - } - ) - if tc['result'] == 'failure': - testcase_msg = etree.SubElement( - testcase, 'failure', attrib={'type': 'failure', - 'message': tc['fail_phase']} - ) - testcase_msg.text = f"{tc['fail_phase']}: {tc['fail_reason']}" - - testsuite_stdout = etree.SubElement(xml_testsuite, 'system-out') - testsuite_stdout.text = '' - testsuite_stderr = etree.SubElement(xml_testsuite, 'system-err') - testsuite_stderr.text = '' - - return xml_testsuites - - -def junit_dump(xml, fp): - fp.write( - etree.tostring(xml, encoding='utf8', pretty_print=True, - method='xml', xml_declaration=True).decode() - ) diff --git a/reframe/frontend/statistics.py b/reframe/frontend/statistics.py deleted file mode 100644 index 84b3d129cd..0000000000 --- a/reframe/frontend/statistics.py +++ /dev/null @@ -1,429 +0,0 @@ -# Copyright 2016-2024 Swiss National Supercomputing Centre (CSCS/ETH Zurich) -# ReFrame Project Developers. See the top-level LICENSE file for details. -# -# SPDX-License-Identifier: BSD-3-Clause - -import inspect -import itertools -import os -import shutil -import traceback - -import reframe.core.runtime as rt -import reframe.core.exceptions as errors -import reframe.utility as util -from reframe.core.warnings import suppress_deprecations - - -def _getattr(obj, attr): - with suppress_deprecations(): - return getattr(obj, attr) - - -class TestStats: - '''Stores test case statistics.''' - - def __init__(self): - # Tasks per run stored as follows: [[run0_tasks], [run1_tasks], ...] - self._alltasks = [[]] - - # Data collected for all the runs of this session in JSON format - self._run_data = [] - - def add_task(self, task): - current_run = rt.runtime().current_run - if current_run == len(self._alltasks): - self._alltasks.append([]) - - self._alltasks[current_run].append(task) - - def tasks(self, run=-1): - if run is None: - yield from itertools.chain(*self._alltasks) - else: - try: - yield from self._alltasks[run] - except IndexError: - raise errors.StatisticsError(f'no such run: {run}') from None - - def failed(self, run=-1): - return [t for t in self.tasks(run) if t.failed] - - def skipped(self, run=-1): - return [t for t in self.tasks(run) if t.skipped] - - def aborted(self, run=-1): - return [t for t in self.tasks(run) if t.aborted] - - def completed(self, run=-1): - return [t for t in self.tasks(run) if t.completed] - - def num_cases(self, run=-1): - return sum(1 for _ in self.tasks(run)) - - @property - def num_runs(self): - return len(self._alltasks) - - def retry_report(self): - # Return an empty report if no retries were done. - if not rt.runtime().current_run: - return '' - - line_width = shutil.get_terminal_size()[0] - report = [line_width * '='] - report.append('SUMMARY OF RETRIES') - report.append(line_width * '-') - messages = {} - for run in range(1, len(self._alltasks)): - for t in self.tasks(run): - partition_name = '' - environ_name = '' - if t.check.current_partition: - partition_name = t.check.current_partition.fullname - - if t.check.current_environ: - environ_name = t.check.current_environ.name - - # Overwrite entry from previous run if available - key = f"{t.check.unique_name}:{partition_name}:{environ_name}" - messages[key] = ( - f" * Test {t.check.info()} was retried {run} time(s) and " - f"{'failed' if t.failed else 'passed'}." - ) - - for key in sorted(messages.keys()): - report.append(messages[key]) - - return '\n'.join(report) - - def json(self, force=False): - if not force and self._run_data: - return self._run_data - - for runid, run in enumerate(self._alltasks): - testcases = [] - num_failures = 0 - num_aborted = 0 - num_skipped = 0 - for t in run: - check = t.check - partition = check.current_partition - entry = { - 'build_stderr': None, - 'build_stdout': None, - 'dependencies_actual': [ - (d.check.unique_name, - d.partition.fullname, d.environ.name) - for d in t.testcase.deps - ], - 'dependencies_conceptual': [ - d[0] for d in t.check.user_deps() - ], - 'description': check.descr, - 'display_name': check.display_name, - 'environment': None, - 'fail_phase': None, - 'fail_reason': None, - 'filename': inspect.getfile(type(check)), - 'fixture': check.is_fixture(), - 'hash': check.hashcode, - 'jobid': None, - 'job_stderr': None, - 'job_stdout': None, - 'maintainers': check.maintainers, - 'name': check.name, - 'nodelist': [], - 'outputdir': None, - 'perfvars': None, - 'prefix': check.prefix, - 'result': None, - 'stagedir': check.stagedir, - 'scheduler': None, - 'system': check.current_system.name, - 'tags': list(check.tags), - 'time_compile': t.duration('compile_complete'), - 'time_performance': t.duration('performance'), - 'time_run': t.duration('run_complete'), - 'time_sanity': t.duration('sanity'), - 'time_setup': t.duration('setup'), - 'time_total': t.duration('total'), - 'unique_name': check.unique_name - } - - # We take partition and environment from the test case and not - # from the check, since if the test fails before `setup()`, - # these are not set inside the check. - partition = t.testcase.partition - environ = t.testcase.environ - entry['system'] = partition.fullname - entry['scheduler'] = partition.scheduler.registered_name - entry['environment'] = environ.name - if check.job: - entry['jobid'] = str(check.job.jobid) - entry['job_stderr'] = check.stderr.evaluate() - entry['job_stdout'] = check.stdout.evaluate() - entry['nodelist'] = check.job.nodelist or [] - - if check.build_job: - entry['build_stderr'] = check.build_stderr.evaluate() - entry['build_stdout'] = check.build_stdout.evaluate() - - if t.failed: - num_failures += 1 - entry['result'] = 'failure' - elif t.aborted: - entry['result'] = 'aborted' - num_aborted += 1 - - if t.failed or t.aborted: - entry['fail_phase'] = t.failed_stage - if t.exc_info is not None: - entry['fail_reason'] = errors.what(*t.exc_info) - entry['fail_info'] = { - 'exc_type': t.exc_info[0], - 'exc_value': t.exc_info[1], - 'traceback': t.exc_info[2] - } - entry['fail_severe'] = errors.is_severe(*t.exc_info) - elif t.skipped: - entry['result'] = 'skipped' - num_skipped += 1 - else: - entry['result'] = 'success' - entry['outputdir'] = check.outputdir - - if check.perfvalues: - # Record performance variables - entry['perfvars'] = [] - for key, ref in check.perfvalues.items(): - var = key.split(':')[-1] - val, ref, lower, upper, unit = ref - entry['perfvars'].append({ - 'name': var, - 'reference': ref, - 'thres_lower': lower, - 'thres_upper': upper, - 'unit': unit, - 'value': val - }) - - # Add any loggable variables and parameters - entry['check_vars'] = {} - test_cls = type(check) - for name, var in test_cls.var_space.items(): - if var.is_loggable(): - try: - entry['check_vars'][name] = _getattr(check, name) - except AttributeError: - entry['check_vars'][name] = '' - - entry['check_params'] = {} - test_cls = type(check) - for name, param in test_cls.param_space.items(): - if param.is_loggable(): - entry['check_params'][name] = _getattr(check, name) - - testcases.append(entry) - - self._run_data.append({ - 'num_cases': len(run), - 'num_failures': num_failures, - 'num_aborted': num_aborted, - 'num_skipped': num_skipped, - 'runid': runid, - 'testcases': testcases - }) - - return self._run_data - - def print_failure_report(self, printer, rerun_info=True, - global_stats=False): - def _head_n(filename, prefix, num_lines=10): - # filename and prefix are `None` before setup - if filename is None or prefix is None: - return [] - - try: - with open(os.path.join(prefix, filename)) as fp: - lines = [ - f'--- {filename} (first {num_lines} lines) ---' - ] - for i, line in enumerate(fp): - if i < num_lines: - # Remove trailing '\n' - lines.append(line.rstrip()) - - lines += [f'--- {filename} ---'] - except OSError as e: - lines = [f'--- {filename} ({e}) ---'] - - return lines - - def _print_failure_info(rec, runid, total_runs): - printer.info(line_width * '-') - printer.info(f"FAILURE INFO for {rec['display_name']} " - f"(run: {runid}/{total_runs})") - printer.info(f" * Description: {rec['description']}") - printer.info(f" * System partition: {rec['system']}") - printer.info(f" * Environment: {rec['environment']}") - printer.info(f" * Stage directory: {rec['stagedir']}") - printer.info( - f" * Node list: {util.nodelist_abbrev(rec['nodelist'])}" - ) - job_type = 'local' if rec['scheduler'] == 'local' else 'batch job' - printer.info(f" * Job type: {job_type} (id={rec['jobid']})") - printer.info(f" * Dependencies (conceptual): " - f"{rec['dependencies_conceptual']}") - printer.info(f" * Dependencies (actual): " - f"{rec['dependencies_actual']}") - printer.info(f" * Maintainers: {rec['maintainers']}") - printer.info(f" * Failing phase: {rec['fail_phase']}") - if rerun_info and not rec['fixture']: - printer.info(f" * Rerun with '-n /{rec['hash']}" - f" -p {rec['environment']} --system " - f"{rec['system']} -r'") - - msg = rec['fail_reason'] - if isinstance(rec['fail_info']['exc_value'], errors.SanityError): - lines = [msg] - lines += _head_n(rec['job_stdout'], prefix=rec['stagedir']) - lines += _head_n(rec['job_stderr'], prefix=rec['stagedir']) - msg = '\n'.join(lines) - - printer.info(f" * Reason: {msg}") - - tb = ''.join(traceback.format_exception( - *rec['fail_info'].values())) - if rec['fail_severe']: - printer.info(tb) - else: - printer.verbose(tb) - - line_width = shutil.get_terminal_size()[0] - printer.info(line_width * '=') - printer.info('SUMMARY OF FAILURES') - - run_report = self.json() - for run_no, run_info in enumerate(run_report, start=1): - if not global_stats and run_no != len(run_report): - continue - - for r in run_info['testcases']: - if r['result'] in {'success', 'aborted', 'skipped'}: - continue - - _print_failure_info(r, run_no, len(run_report)) - - printer.info(line_width * '-') - - def print_failure_stats(self, printer, global_stats=False): - if global_stats: - runid = None - else: - runid = rt.runtime().current_run - - failures = {} - for tf in (t for t in self.tasks(runid) if t.failed): - check, partition, environ = tf.testcase - info = f'[{check.display_name}]' - if partition: - info += f' @{partition.fullname}' - - if environ: - info += f'+{environ.name}' - - if tf.failed_stage not in failures: - failures[tf.failed_stage] = [] - - failures[tf.failed_stage].append(info) - - line_width = shutil.get_terminal_size()[0] - stats_start = line_width * '=' - stats_title = 'FAILURE STATISTICS' - stats_end = line_width * '-' - stats_body = [] - row_format = "{:<13} {:<5} {}" - stats_hline = row_format.format(13*'-', 5*'-', 60*'-') - stats_header = row_format.format('Phase', '#', 'Failing test cases') - num_tests = self.num_cases(runid) - num_failures = 0 - for fl in failures.values(): - num_failures += len(fl) - - stats_body = [''] - stats_body.append(f'Total number of test cases: {num_tests}') - stats_body.append(f'Total number of failures: {num_failures}') - stats_body.append('') - stats_body.append(stats_header) - stats_body.append(stats_hline) - for p, l in failures.items(): - stats_body.append(row_format.format(p, len(l), l[0])) - for f in l[1:]: - stats_body.append(row_format.format('', '', str(f))) - - if stats_body: - for line in (stats_start, stats_title, *stats_body, stats_end): - printer.info(line) - - def performance_report(self): - width = shutil.get_terminal_size()[0] - lines = ['', width*'=', 'PERFORMANCE REPORT', width*'-'] - - # Collect all the records from performance tests - perf_records = {} - for run in self.json(): - for tc in run['testcases']: - if tc['perfvars']: - key = tc['unique_name'] - perf_records.setdefault(key, []) - perf_records[key].append(tc) - - if not perf_records: - return '' - - interesting_vars = { - 'num_cpus_per_task', - 'num_gpus_per_node', - 'num_tasks', - 'num_tasks_per_core', - 'num_tasks_per_node', - 'num_tasks_per_socket', - 'use_multithreading' - } - - for testcases in perf_records.values(): - for tc in testcases: - name = tc['display_name'] - hash = tc['hash'] - env = tc['environment'] - part = tc['system'] - lines.append(f'[{name} /{hash} @{part}:{env}]') - for v in interesting_vars: - val = tc['check_vars'][v] - if val is not None: - lines.append(f' {v}: {val}') - - lines.append(' performance:') - for v in tc['perfvars']: - name = v['name'] - val = v['value'] - ref = v['reference'] - unit = v['unit'] - lthr = v['thres_lower'] - uthr = v['thres_upper'] - if lthr is not None: - lthr *= 100 - else: - lthr = '-inf' - - if uthr is not None: - uthr *= 100 - else: - uthr = 'inf' - - lines.append(f' - {name}: {val} {unit} ' - f'(r: {ref} {unit} l: {lthr}% u: +{uthr}%)') - - lines.append(width*'-') - return '\n'.join(lines) diff --git a/reframe/schemas/config.json b/reframe/schemas/config.json index 61d9c58e18..7183f787d0 100644 --- a/reframe/schemas/config.json +++ b/reframe/schemas/config.json @@ -120,6 +120,7 @@ "type": "array", "items": {"type": "string"} }, + "unqualified_hostnames": {"type": "boolean"}, "use_nodes_option": {"type": "boolean"} } }, @@ -504,6 +505,7 @@ "clean_stagedir": {"type": "boolean"}, "colorize": {"type": "boolean"}, "compress_report": {"type": "boolean"}, + "generate_file_reports": {"type": "boolean"}, "git_timeout": {"type": "number"}, "keep_stage_files": {"type": "boolean"}, "module_map_file": {"type": "string"}, @@ -514,6 +516,7 @@ "non_default_craype": {"type": "boolean"}, "dump_pipeline_progress": {"type": "boolean"}, "perf_info_level": {"$ref": "#/defs/loglevel"}, + "perf_report_spec": {"type": "string"}, "pipeline_timeout": {"type": ["number", "null"]}, "purge_environment": {"type": "boolean"}, "remote_detect": {"type": "boolean"}, @@ -523,6 +526,7 @@ "resolve_module_conflicts": {"type": "boolean"}, "save_log_files": {"type": "boolean"}, "target_systems": {"$ref": "#/defs/system_ref"}, + "table_format": {"enum": ["csv", "plain", "pretty"]}, "timestamp_dirs": {"type": "string"}, "trap_job_errors": {"type": "boolean"}, "unload_modules": {"$ref": "#/defs/modules_list"}, @@ -532,6 +536,16 @@ }, "additionalProperties": false } + }, + "storage": { + "type": "array", + "items": { + "type": "object", + "properties": { + "backend": {"type": "string"}, + "sqlite_db_file": {"type": "string"} + } + } } }, "required": ["systems", "environments", "logging"], @@ -560,12 +574,14 @@ "general/clean_stagedir": true, "general/colorize": true, "general/compress_report": false, + "general/generate_file_reports": true, "general/git_timeout": 5, "general/keep_stage_files": false, "general/module_map_file": "", "general/module_mappings": [], "general/non_default_craype": false, "general/perf_info_level": "info", + "general/perf_report_spec": "19700101T0000+0000:now/last:+job_nodelist/+result", "general/purge_environment": false, "general/remote_detect": false, "general/remote_workdir": ".", @@ -573,8 +589,9 @@ "general/report_junit": null, "general/resolve_module_conflicts": true, "general/save_log_files": false, + "general/table_format": "pretty", "general/target_systems": ["*"], - "general/timestamp_dirs": "", + "general/timestamp_dirs": "%Y%m%dT%H%M%S%z", "general/trap_job_errors": false, "general/unload_modules": [], "general/use_login_shell": false, @@ -606,6 +623,8 @@ "logging/handlers_perflog/httpjson_debug": false, "modes/options": [], "modes/target_systems": ["*"], + "storage/backend": "sqlite", + "storage/sqlite_db_file": "${HOME}/.reframe/reports/results.db", "systems/descr": "", "systems/max_local_jobs": 8, "systems/modules_system": "nomod", @@ -641,6 +660,7 @@ "systems*/sched_options/ignore_reqnodenotavail": false, "systems*/sched_options/job_submit_timeout": 60, "systems*/sched_options/resubmit_on_errors": [], + "systems*/sched_options/unqualified_hostnames": false, "systems*/sched_options/use_nodes_option": false } } diff --git a/reframe/schemas/runreport.json b/reframe/schemas/runreport.json index 7c1efca680..a216a5cd1b 100644 --- a/reframe/schemas/runreport.json +++ b/reframe/schemas/runreport.json @@ -6,10 +6,9 @@ "testcase_type": { "type": "object", "properties": { + "build_jobid": {"type": ["string", "null"]}, "build_stderr": {"type": ["string", "null"]}, "build_stdout": {"type": ["string", "null"]}, - "check_params": {"type": "object"}, - "check_vars": {"type": "object"}, "dependencies_actual": { "type": "array", "items": { @@ -23,9 +22,7 @@ "type": "array", "items": {"type": "string"} }, - "description": {"type": "string"}, - "display_name": {"type": "string"}, - "environment": {"type": ["string", "null"]}, + "environ": {"type": ["string", "null"]}, "fail_info": { "type": ["object", "null"], "properties": { @@ -43,65 +40,30 @@ "fail_severe": {"type": "boolean"}, "filename": {"type": "string"}, "fixture": {"type": "boolean"}, - "jobid": {"type": ["string", "null"]}, + "job_completion_time": {"type": ["string", "null"]}, + "job_completion_time_unix": {"type": ["number", "null"]}, "job_stderr": {"type": ["string", "null"]}, "job_stdout": {"type": ["string", "null"]}, - "maintainers": { - "type": "array", - "items": {"type": "string"} - }, "name": {"type": "string"}, - "nodelist": { - "type": "array", - "items": {"type": "string"} - }, "outputdir": {"type": ["string", "null"]}, - "perfvars": { - "type": ["array", "null"], - "items": { - "type": "object", - "properties": { - "name": {"type": "string"}, - "reference": { - "type": ["number", "null"] - }, - "thres_lower": { - "type": ["number", "null"] - }, - "thres_upper": { - "type": ["number", "null"] - }, - "unit": {"type": ["string", "null"]}, - "value": {"type": "number"} - }, - "required": [ - "name", "reference", - "thres_lower", "thres_upper", - "unit", "value" - ] - } - }, - "prefix": {"type": "string"}, - "result": { - "type": "string", - "enum": ["success", "failure", "aborted", "skipped"] - }, - "scheduler": {"type": ["string", "null"]}, - "stagedir": {"type": ["string", "null"]}, + "perfvalues": {"type": "object"}, + "partition": {"type": ["string", "null"]}, + "result": {"type": "string"}, + "scheduler": {"type": "string"}, "system": {"type": "string"}, - "tags": { - "type": "array", - "items": {"type": "string"} - }, "time_compile": {"type": ["number", "null"]}, "time_performance": {"type": ["number", "null"]}, "time_run": {"type": ["number", "null"]}, "time_sanity": {"type": ["number", "null"]}, "time_setup": {"type": ["number", "null"]}, "time_total": {"type": ["number", "null"]}, - "unique_name": {"type": "string"} + "unique_name": {"type": "string"}, + "uuid": {"type": "string"} }, - "required": ["environment", "stagedir", "system", "unique_name"] + "required": ["environ", "fail_phase", "fail_reason", "filename", + "job_completion_time_unix", "name", "perfvalues", + "partition", "result", "system", "time_total", + "unique_name"] } }, "type": "object", @@ -123,16 +85,23 @@ "num_cases": {"type": "number"}, "num_failures": {"type": "number"}, "num_aborted": {"type": "number"}, + "num_skipped": {"type": "number"}, "prefix_output": {"type": "string"}, "prefix_stage": {"type": "string"}, + "session_uuid": {"type": "string"}, "time_elapsed": {"type": "number"}, "time_end": {"type": "string"}, + "time_end_unix": {"type": "number"}, "time_start": {"type": "string"}, + "time_start_unix": {"type": "number"}, "user": {"type": "string"}, + "uuid": {"type": "string"}, "version": {"type": "string"}, "workdir": {"type": "string"} }, - "required": ["data_version"] + "required": ["data_version", "hostname", + "time_elapsed", "time_end_unix", "time_start_unix", + "uuid"] }, "restored_cases": { "type": "array", @@ -143,16 +112,17 @@ "items": { "type": "object", "properties": { + "num_aborted": {"type": "number"}, "num_cases": {"type": "number"}, "num_failures": {"type": "number"}, - "num_aborted": {"type": "number"}, - "runid": {"type": "number"}, + "num_skipped": {"type": "number"}, + "run_index": {"type": "number"}, "testcases": { "type": "array", "items": {"$ref": "#/defs/testcase_type"} } }, - "required": ["testcases"] + "required": ["num_cases", "num_failures", "testcases"] } } }, diff --git a/requirements.txt b/requirements.txt index 0e9fc5409b..790dd8cc60 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,9 @@ archspec==0.2.4 argcomplete==3.1.2; python_version < '3.8' argcomplete==3.4.0; python_version >= '3.8' +filelock==3.4.1; python_version == '3.6' +filelock==3.12.2; python_version == '3.7' +filelock==3.15.4; python_version >= '3.8' importlib_metadata==4.0.1; python_version < '3.8' jsonschema==3.2.0 lxml==5.2.0; python_version < '3.8' and platform_machine == 'aarch64' @@ -22,5 +25,7 @@ semver==3.0.2; python_version >= '3.7' setuptools==59.6.0; python_version == '3.6' setuptools==68.0.0; python_version == '3.7' setuptools==72.1.0; python_version >= '3.8' +tabulate==0.8.10; python_version == '3.6' +tabulate==0.9.0; python_version >= '3.7' wcwidth==0.2.13 #+pygelf%pygelf==0.4.0 diff --git a/setup.cfg b/setup.cfg index cbbf1b17e4..3dc33868e7 100644 --- a/setup.cfg +++ b/setup.cfg @@ -29,6 +29,9 @@ install_requires = archspec >= 0.2.4 argcomplete argcomplete <= 3.1.2; python_version < '3.8' + filelock + filelock<=3.12.2; python_version == '3.7' + filelock<=3.4.1; python_version == '3.6' jsonschema lxml==5.2.0; python_version < '3.8' and platform_machine == 'aarch64' lxml==5.2.2; python_version >= '3.8' or platform_machine != 'aarch64' @@ -37,6 +40,8 @@ install_requires = requests <= 2.27.1; python_version == '3.6' semver semver <= 2.13.0; python_version == '3.6' + tabulate + tabulate <= 0.8.10; python_version == '3.6' [options.packages.find] include = reframe,reframe.*,hpctestlib.* @@ -45,4 +50,5 @@ include = reframe,reframe.*,hpctestlib.* reframe = schemas/* [flake8] -ignore = E129,E221,E226,E241,E402,E272,E741,E742,E743,W504 +extend-ignore = E129,E221,E226,E241,E402,E272,E741,E742,E743,F821,W504 +exclude = .git,__pycache__,docs/conf.py,external diff --git a/unittests/conftest.py b/unittests/conftest.py index 1a2034f560..711716d174 100644 --- a/unittests/conftest.py +++ b/unittests/conftest.py @@ -14,7 +14,11 @@ import reframe.core.settings as settings import reframe.core.runtime as rt +import reframe.frontend.dependencies as dependencies +import reframe.frontend.executors as executors +import reframe.frontend.executors.policies as policies import reframe.utility as util +from reframe.frontend.loader import RegressionCheckLoader from .utility import TEST_CONFIG_FILE @@ -80,6 +84,78 @@ def _make_exec_ctx(*args, **kwargs): yield _make_exec_ctx +@pytest.fixture +def common_exec_ctx(make_exec_ctx_g): + '''Execution context for the default generic system.''' + yield from make_exec_ctx_g(system='generic') + + +@pytest.fixture +def testsys_exec_ctx(make_exec_ctx_g): + '''Execution context for the `testsys:gpu` system.''' + yield from make_exec_ctx_g(system='testsys:gpu') + + +@pytest.fixture +def make_loader(): + '''Test loader''' + def _make_loader(check_search_path, *args, **kwargs): + return RegressionCheckLoader(check_search_path, *args, **kwargs) + + return _make_loader + + +@pytest.fixture(params=[policies.SerialExecutionPolicy, + policies.AsynchronousExecutionPolicy]) +def make_runner(request): + '''Test runner with all the execution policies''' + + def _make_runner(*args, **kwargs): + # Use a much higher poll rate for the unit tests + policy = request.param() + policy._pollctl.SLEEP_MIN = 0.001 + return executors.Runner(policy, *args, **kwargs) + + return _make_runner + + +@pytest.fixture +def make_async_runner(): + def _make_runner(*args, **kwargs): + policy = policies.AsynchronousExecutionPolicy() + policy._pollctl.SLEEP_MIN = 0.001 + return executors.Runner(policy, *args, **kwargs) + + return _make_runner + + +@pytest.fixture +def make_cases(make_loader): + def _make_cases(checks=None, sort=False, *args, **kwargs): + if checks is None: + checks = make_loader( + ['unittests/resources/checks'], *args, **kwargs + ).load_all(force=True) + + cases = executors.generate_testcases(checks) + if sort: + depgraph, _ = dependencies.build_deps(cases) + dependencies.validate_deps(depgraph) + cases = dependencies.toposort(depgraph) + + return cases + + return _make_cases + + +@pytest.fixture +def cases_with_deps(make_loader, make_cases): + checks = make_loader( + ['unittests/resources/checks_unlisted/deps_complex.py'] + ).load_all() + return make_cases(checks, sort=True) + + @pytest.fixture def make_config_file(tmp_path): '''Create a temporary configuration file from the given configuration. diff --git a/unittests/test_argparser.py b/unittests/test_argparser.py index 6647ed1e19..6be36f3836 100644 --- a/unittests/test_argparser.py +++ b/unittests/test_argparser.py @@ -7,7 +7,7 @@ import reframe.core.runtime as rt import unittests.utility as test_util -from reframe.frontend.argparse import ArgumentParser +from reframe.frontend.argparse import ArgumentParser, CONST_DEFAULT @pytest.fixture @@ -118,7 +118,7 @@ def extended_parser(): default='bar' ) foo_options.add_argument( - '--timestamp', action='store', + '--timestamp', action='store', nargs='?', const=CONST_DEFAULT, envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs' ) foo_options.add_argument( @@ -143,17 +143,14 @@ def extended_parser(): def test_option_precedence(default_exec_ctx, extended_parser): with rt.temp_environment(env_vars={ - 'RFM_TIMESTAMP': '%F', 'RFM_NON_DEFAULT_CRAYPE': 'yes', 'RFM_MODULES_PRELOAD': 'a,b,c', 'RFM_CHECK_SEARCH_PATH': 'x:y:z' }): - options = extended_parser.parse_args( - ['--timestamp=%FT%T', '--nocolor'] - ) + options = extended_parser.parse_args(['--nocolor', '--timestamp']) assert options.recursive is None - assert options.timestamp == '%FT%T' + assert options.timestamp is CONST_DEFAULT assert options.non_default_craype is True assert options.config_file is None assert options.prefix is None @@ -165,19 +162,17 @@ def test_option_precedence(default_exec_ctx, extended_parser): def test_option_with_config(default_exec_ctx, extended_parser, tmp_path): with rt.temp_environment(env_vars={ - 'RFM_TIMESTAMP': '%F', + 'RFM_TIMESTAMP_DIRS': r'%F', 'RFM_NON_DEFAULT_CRAYPE': 'yes', 'RFM_MODULES_PRELOAD': 'a,b,c', 'RFM_KEEP_STAGE_FILES': 'no', 'RFM_GIT_TIMEOUT': '0.3' }): site_config = rt.runtime().site_config - options = extended_parser.parse_args( - ['--timestamp=%FT%T', '--nocolor'] - ) + options = extended_parser.parse_args(['--nocolor', '--timestamp']) options.update_config(site_config) assert site_config.get('general/0/check_search_recursive') is False - assert site_config.get('general/0/timestamp_dirs') == '%FT%T' + assert site_config.get('general/0/timestamp_dirs') == r'%F' assert site_config.get('general/0/non_default_craype') is True assert site_config.get('systems/0/prefix') == str(tmp_path) assert site_config.get('general/0/colorize') is False @@ -208,3 +203,10 @@ def test_envvar_option(default_exec_ctx, extended_parser): def test_envvar_option_default_val(default_exec_ctx, extended_parser): options = extended_parser.parse_args([]) assert options.env_option == 'bar' + + +def test_suppress_required(argparser): + group = argparser.add_mutually_exclusive_group(required=True) + group.add_argument('--foo', action='store_true') + group.add_argument('--bar', action='store_true') + argparser.parse_args([], suppress_required=True) diff --git a/unittests/test_cli.py b/unittests/test_cli.py index 7fc8edd9fd..c5f517819e 100644 --- a/unittests/test_cli.py +++ b/unittests/test_cli.py @@ -17,7 +17,7 @@ import reframe.core.environments as env import reframe.core.logging as logging import reframe.core.runtime as rt -import reframe.frontend.runreport as runreport +import reframe.frontend.reporting as reporting import reframe.utility.osext as osext import unittests.utility as test_util from reframe import INSTALL_PREFIX @@ -107,6 +107,8 @@ def _run_reframe(system='generic:default', argv += ['-h'] elif action == 'describe': argv += ['--describe'] + else: + argv += [action] if perflogdir: argv += ['--perflogdir', perflogdir] @@ -164,11 +166,10 @@ def test_check_restore_session_failed(run_reframe, tmp_path): run_reframe( checkpath=['unittests/resources/checks_unlisted/deps_complex.py'] ) - returncode, stdout, _ = run_reframe( - checkpath=[], - more_options=['--restore-session', '--failed'] + run_reframe(checkpath=[], more_options=['--restore-session', '--failed']) + report = reporting.restore_session( + f'{tmp_path}/.reframe/reports/latest.json' ) - report = runreport.load_report(f'{tmp_path}/.reframe/reports/latest.json') assert set(report.slice('name', when=('fail_phase', 'sanity'))) == {'T2'} assert set(report.slice('name', when=('fail_phase', 'startup'))) == {'T7', 'T9'} @@ -184,11 +185,10 @@ def test_check_restore_session_succeeded_test(run_reframe, tmp_path): checkpath=['unittests/resources/checks_unlisted/deps_complex.py'], more_options=['--keep-stage-files'] ) - returncode, stdout, _ = run_reframe( - checkpath=[], - more_options=['--restore-session', '-n', 'T1'] + run_reframe(checkpath=[], more_options=['--restore-session', '-n', 'T1']) + report = reporting.restore_session( + f'{tmp_path}/.reframe/reports/latest.json' ) - report = runreport.load_report(f'{tmp_path}/.reframe/reports/latest.json') assert report['runs'][-1]['num_cases'] == 1 assert report['runs'][-1]['testcases'][0]['name'] == 'T1' @@ -201,7 +201,7 @@ def test_check_restore_session_check_search_path(run_reframe, tmp_path): checkpath=['unittests/resources/checks_unlisted/deps_complex.py'] ) returncode, stdout, _ = run_reframe( - checkpath=[f'foo/'], + checkpath=['foo/'], more_options=['--restore-session', '-n', 'T1', '-R'], action='list' ) @@ -421,17 +421,20 @@ def test_perflogdir_from_env(run_reframe, tmp_path, monkeypatch): def test_performance_report(run_reframe, run_action): - returncode, stdout, _ = run_reframe( + returncode, stdout, stderr = run_reframe( checkpath=['unittests/resources/checks/frontend_checks.py'], more_options=['-n', '^PerformanceFailureCheck', '--performance-report'], action=run_action ) + if run_action == 'run': - assert r'PERFORMANCE REPORT' in stdout - assert r'perf: 10 Gflop/s' in stdout + assert returncode == 1 else: - assert r'PERFORMANCE REPORT' not in stdout + assert returncode == 0 + + assert 'Traceback' not in stdout + assert 'Traceback' not in stderr def test_skip_system_check_option(run_reframe, run_action): @@ -521,6 +524,7 @@ def test_execution_modes(run_reframe, run_action): returncode, stdout, stderr = run_reframe( mode='unittest', action=run_action ) + assert returncode == 0 assert 'Traceback' not in stdout assert 'Traceback' not in stderr assert 'FAILED' not in stdout @@ -563,7 +567,7 @@ def test_timestamp_option_default(run_reframe): assert returncode == 0 matches = re.findall( - r'(stage|output) directory: .*\/(\d{6}T\d{6}\+\d{4})', stdout + r'(stage|output) directory: .*\/(\d{8}T\d{6}\+\d{4})', stdout ) assert len(matches) == 2 @@ -739,7 +743,7 @@ def test_filtering_by_expr(run_reframe): def test_show_config_all(run_reframe): # Just make sure that this option does not make the frontend crash returncode, stdout, stderr = run_reframe( - more_options=['--show-config'], + action='--show-config', system='testsys' ) assert 'Traceback' not in stdout @@ -750,7 +754,7 @@ def test_show_config_all(run_reframe): def test_show_config_param(run_reframe): # Just make sure that this option does not make the frontend crash returncode, stdout, stderr = run_reframe( - more_options=['--show-config=systems'], + action='--show-config=systems', system='testsys' ) assert 'Traceback' not in stdout @@ -761,7 +765,7 @@ def test_show_config_param(run_reframe): def test_show_config_unknown_param(run_reframe): # Just make sure that this option does not make the frontend crash returncode, stdout, stderr = run_reframe( - more_options=['--show-config=foo'], + action='--show-config=foo', system='testsys' ) assert 'no such configuration parameter found' in stdout @@ -772,7 +776,7 @@ def test_show_config_unknown_param(run_reframe): def test_show_config_null_param(run_reframe): returncode, stdout, stderr = run_reframe( - more_options=['--show-config=general/report_junit'], + action='--show-config=general/report_junit', system='testsys' ) assert 'null' in stdout @@ -923,7 +927,7 @@ def test_failure_stats(run_reframe, run_action): else: assert returncode != 0 assert r'FAILURE STATISTICS' in stdout - assert r'sanity 1 [SanityFailureCheck' in stdout + assert r'sanity 1 SanityFailureCheck' in stdout def test_maxfail_option(run_reframe): @@ -1103,9 +1107,7 @@ def test_exec_order(run_reframe, exec_order): def test_detect_host_topology(run_reframe): from reframe.utility.cpuinfo import cpuinfo - returncode, stdout, stderr = run_reframe( - more_options=['--detect-host-topology'] - ) + returncode, stdout, stderr = run_reframe(action='--detect-host-topology') assert 'Traceback' not in stdout assert 'Traceback' not in stderr assert returncode == 0 @@ -1117,7 +1119,7 @@ def test_detect_host_topology_file(run_reframe, tmp_path): topo_file = tmp_path / 'topo.json' returncode, stdout, stderr = run_reframe( - more_options=[f'--detect-host-topology={topo_file}'] + action=f'--detect-host-topology={topo_file}', ) assert 'Traceback' not in stdout assert 'Traceback' not in stderr @@ -1184,7 +1186,7 @@ def test_fixture_registry_env_sys(run_reframe): assert returncode == 0 assert 'e1' in stdout assert 'sys1:p0' in stdout - returncode, stdout, stderr = run_reframe( + returncode, stdout, _ = run_reframe( system='sys1:p1', environs=['e1'], checkpath=['unittests/resources/checks_unlisted/fixtures_simple.py'], @@ -1194,7 +1196,7 @@ def test_fixture_registry_env_sys(run_reframe): assert returncode == 0 assert 'e1' in stdout assert 'sys1:p1' in stdout - returncode, stdout, stderr = run_reframe( + returncode, stdout, _ = run_reframe( system='sys1:p1', environs=['e2'], checkpath=['unittests/resources/checks_unlisted/fixtures_simple.py'], @@ -1216,7 +1218,7 @@ def test_fixture_resolution(run_reframe, run_action): assert returncode == 0 -def test_dynamic_tests(run_reframe, tmp_path, run_action): +def test_dynamic_tests(run_reframe, run_action): returncode, stdout, _ = run_reframe( system='sys0', environs=[], @@ -1229,7 +1231,7 @@ def test_dynamic_tests(run_reframe, tmp_path, run_action): assert 'FAILED' not in stdout -def test_dynamic_tests_filtering(run_reframe, tmp_path, run_action): +def test_dynamic_tests_filtering(run_reframe, run_action): returncode, stdout, _ = run_reframe( system='sys1', environs=[], @@ -1253,3 +1255,94 @@ def test_testlib_inherit_fixture_in_different_files(run_reframe): assert returncode == 0 assert 'Ran 3/3 test case(s)' in stdout assert 'FAILED' not in stdout + + +@pytest.fixture(params=['csv', 'plain', 'pretty']) +def table_format(request): + return request.param + + +def test_storage_options(run_reframe, tmp_path, table_format): + def assert_no_crash(returncode, stdout, stderr, exitcode=0): + assert returncode == exitcode + assert 'Traceback' not in stdout + assert 'Traceback' not in stderr + return returncode, stdout, stderr + + run_reframe2 = functools.partial( + run_reframe, + checkpath=['unittests/resources/checks/frontend_checks.py'], + more_options=[f'--table-format={table_format}'] + ) + + # Run first a normal run with a performance test to initialize the DB + run_reframe2(action='run') + assert os.path.exists(tmp_path / '.reframe' / 'reports' / 'results.db') + + stdout = assert_no_crash( + *run_reframe(action='--list-stored-sessions') + )[1] + + # Get the session uuid for later queries + uuid = re.search(r'(\w{8}-\w{4}-\w{4}-\w{4}-\w{12})', stdout).group(1) + + # Get details from the last session + stdout = assert_no_crash( + *run_reframe2(action=f'--describe-stored-session={uuid}') + )[1] + session_json = json.loads(stdout) + + # List test cases by session + assert_no_crash(*run_reframe2(action=f'--list-stored-testcases={uuid}')) + assert_no_crash( + *run_reframe2(action=f'--describe-stored-testcases={uuid}') + ) + + # List test cases by time period + ts_start = session_json['session_info']['time_start'] + assert_no_crash( + *run_reframe2(action=f'--list-stored-testcases={ts_start}:now') + ) + assert_no_crash( + *run_reframe2(action=f'--describe-stored-testcases={ts_start}:now') + ) + + # Check that invalid argument do not crash CLI + assert_no_crash(*run_reframe2(action='--describe-stored-session=0'), + exitcode=1) + assert_no_crash(*run_reframe2(action='--describe-stored-testcases=0'), + exitcode=1) + assert_no_crash(*run_reframe2(action='--list-stored-testcases=0'), + exitcode=1) + + # Remove session + assert_no_crash(*run_reframe2(action=f'--delete-stored-session={uuid}')) + + +def test_performance_compare(run_reframe, table_format): + def assert_no_crash(returncode, stdout, stderr, exitcode=0): + assert returncode == exitcode + assert 'Traceback' not in stdout + assert 'Traceback' not in stderr + return returncode, stdout, stderr + + run_reframe2 = functools.partial( + run_reframe, + checkpath=['unittests/resources/checks/frontend_checks.py'], + more_options=[f'--table-format={table_format}'] + ) + run_reframe2(action='run') + + # Rerun with various arguments + assert_no_crash( + *run_reframe2( + action='--performance-compare=now-1m:now/now-1d:now/mean:/+result' + ) + ) + + # Check that invalid arguments do not crash the CLI + assert_no_crash( + *run_reframe2( + action='--performance-compare=now-1m:now/now-1d:now/mean:+foo/+bar' + ), exitcode=1 + ) diff --git a/unittests/test_perflogging.py b/unittests/test_perflogging.py new file mode 100644 index 0000000000..afc5e26f5e --- /dev/null +++ b/unittests/test_perflogging.py @@ -0,0 +1,458 @@ +# Copyright 2016-2024 Swiss National Supercomputing Centre (CSCS/ETH Zurich) +# ReFrame Project Developers. See the top-level LICENSE file for details. +# +# SPDX-License-Identifier: BSD-3-Clause + +import contextlib +import io +import os +import pytest + +import reframe as rfm +import reframe.core.logging as logging +import reframe.core.runtime as rt +import reframe.frontend.executors as executors +import reframe.utility.osext as osext +import reframe.utility.sanity as sn + + +class _MyPerfTest(rfm.RunOnlyRegressionTest): + valid_systems = ['*'] + valid_prog_environs = ['*'] + executable = 'echo perf0=100 && echo perf1=50' + + @sanity_function + def validate(self): + return sn.assert_found(r'perf0', self.stdout) + + @performance_function('unit0') + def perf0(self): + return sn.extractsingle(r'perf0=(\S+)', self.stdout, 1, float) + + @performance_function('unit1') + def perf1(self): + return sn.extractsingle(r'perf1=(\S+)', self.stdout, 1, float) + + +class _MyPerfParamTest(_MyPerfTest): + p = parameter([1, 2]) + + +class _MyFailingTest(rfm.RunOnlyRegressionTest): + valid_systems = ['*'] + valid_prog_environs = ['*'] + executable = 'echo perf0=100' + + @sanity_function + def validate(self): + return False + + @performance_function('unit0') + def perf0(self): + return sn.extractsingle(r'perf0=(\S+)', self.stdout, 1, float) + + +class _LazyPerfTest(rfm.RunOnlyRegressionTest): + valid_systems = ['*'] + valid_prog_environs = ['*'] + executable = 'echo perf0=100' + + @sanity_function + def validate(self): + return True + + @run_before('performance') + def set_perf_vars(self): + self.perf_variables = { + 'perf0': sn.make_performance_function( + sn.extractsingle(r'perf0=(\S+)', self.stdout, 1, float), + 'unit0' + ) + } + + +@pytest.fixture +def perf_test(): + return _MyPerfTest() + + +@pytest.fixture +def perf_param_tests(): + return [_MyPerfParamTest(variant_num=v) + for v in range(_MyPerfParamTest.num_variants)] + + +@pytest.fixture +def failing_perf_test(): + return _MyFailingTest() + + +@pytest.fixture +def lazy_perf_test(): + return _LazyPerfTest() + + +@pytest.fixture +def simple_test(): + class _MySimpleTest(rfm.RunOnlyRegressionTest): + valid_systems = ['*'] + valid_prog_environs = ['*'] + executable = 'echo hello' + + @sanity_function + def validate(self): + return sn.assert_found(r'hello', self.stdout) + + return _MySimpleTest() + + +@pytest.fixture +def config_perflog(make_config_file): + def _config_perflog(fmt, perffmt=None, logging_opts=None): + logging_config = { + 'level': 'debug2', + 'handlers': [{ + 'type': 'stream', + 'name': 'stdout', + 'level': 'info', + 'format': '%(message)s' + }], + 'handlers_perflog': [{ + 'type': 'filelog', + 'prefix': '%(check_system)s/%(check_partition)s', + 'level': 'info', + 'format': fmt + }] + } + if logging_opts: + logging_config.update(logging_opts) + + if perffmt is not None: + logging_config['handlers_perflog'][0]['format_perfvars'] = perffmt + + return make_config_file({'logging': [logging_config]}) + + return _config_perflog + + +def _count_lines(filepath): + count = 0 + with open(filepath) as fp: + for line in fp: + count += 1 + + return count + + +def _assert_header(filepath, header): + with open(filepath) as fp: + assert fp.readline().strip() == header + + +def _assert_no_logging_error(fn, *args, **kwargs): + captured_stderr = io.StringIO() + with contextlib.redirect_stderr(captured_stderr): + fn(*args, **kwargs) + + assert 'Logging error' not in captured_stderr.getvalue() + + +def test_perf_logging(make_runner, make_exec_ctx, perf_test, + config_perflog, tmp_path): + make_exec_ctx( + config_perflog( + fmt=( + '%(check_job_completion_time)s,%(version)s,' + '%(check_display_name)s,%(check_system)s,' + '%(check_partition)s,%(check_environ)s,' + '%(check_jobid)s,%(check_result)s,%(check_perfvalues)s' + ), + perffmt=( + '%(check_perf_value)s,%(check_perf_unit)s,' + '%(check_perf_ref)s,%(check_perf_lower_thres)s,' + '%(check_perf_upper_thres)s,' + ) + ) + ) + logging.configure_logging(rt.runtime().site_config) + runner = make_runner() + testcases = executors.generate_testcases([perf_test]) + runner.runall(testcases) + + logfile = tmp_path / 'perflogs' / 'generic' / 'default' / '_MyPerfTest.log' + assert os.path.exists(logfile) + assert _count_lines(logfile) == 2 + + # Rerun with the same configuration and check that new entry is appended + testcases = executors.generate_testcases([perf_test]) + runner = make_runner() + _assert_no_logging_error(runner.runall, testcases) + assert _count_lines(logfile) == 3 + + # Change the configuration and rerun + make_exec_ctx( + config_perflog( + fmt=( + '%(check_job_completion_time)s,%(version)s,' + '%(check_display_name)s,%(check_system)s,' + '%(check_partition)s,%(check_environ)s,' + '%(check_jobid)s,%(check_result)s,%(check_perfvalues)s' + ), + perffmt='%(check_perf_value)s,%(check_perf_unit)s,' + ) + ) + logging.configure_logging(rt.runtime().site_config) + testcases = executors.generate_testcases([perf_test]) + runner = make_runner() + _assert_no_logging_error(runner.runall, testcases) + assert _count_lines(logfile) == 2 + _assert_header(logfile, + 'job_completion_time,version,display_name,system,partition,' + 'environ,jobid,result,perf0_value,perf0_unit,' + 'perf1_value,perf1_unit') + + logfile_prev = [(str(logfile) + '.h0', 3)] + for f, num_lines in logfile_prev: + assert os.path.exists(f) + _count_lines(f) == num_lines + + # Change the test and rerun + perf_test.perf_variables['perfN'] = perf_test.perf_variables['perf1'] + + # We reconfigure the logging in order for the filelog handler to start + # from a clean state + logging.configure_logging(rt.runtime().site_config) + testcases = executors.generate_testcases([perf_test]) + runner = make_runner() + _assert_no_logging_error(runner.runall, testcases) + assert _count_lines(logfile) == 2 + _assert_header(logfile, + 'job_completion_time,version,display_name,system,partition,' + 'environ,jobid,result,perf0_value,perf0_unit,' + 'perf1_value,perf1_unit,perfN_value,perfN_unit') + + logfile_prev = [(str(logfile) + '.h0', 3), (str(logfile) + '.h1', 2)] + for f, num_lines in logfile_prev: + assert os.path.exists(f) + _count_lines(f) == num_lines + + +def test_perf_logging_no_end_delim(make_runner, make_exec_ctx, perf_test, + config_perflog, tmp_path): + make_exec_ctx( + config_perflog( + fmt=( + '%(check_job_completion_time)s,%(version)s,' + '%(check_display_name)s,%(check_system)s,' + '%(check_partition)s,%(check_environ)s,' + '%(check_jobid)s,%(check_result)s,%(check_perfvalues)s' + ), + perffmt='%(check_perf_value)s,%(check_perf_unit)s' + ) + ) + logging.configure_logging(rt.runtime().site_config) + runner = make_runner() + testcases = executors.generate_testcases([perf_test]) + _assert_no_logging_error(runner.runall, testcases) + + logfile = tmp_path / 'perflogs' / 'generic' / 'default' / '_MyPerfTest.log' + assert os.path.exists(logfile) + assert _count_lines(logfile) == 2 + + with open(logfile) as fp: + lines = fp.readlines() + + assert len(lines) == 2 + assert lines[0] == ( + 'job_completion_time,version,display_name,system,partition,' + 'environ,jobid,result,perf0_value,perf0_unitperf1_value,perf1_unit\n' + ) + assert '