diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a99dd359b..820c76004 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,11 +13,10 @@ repos: tests/.*.in$ )$ -- repo: https://github.com/PyCQA/pylint - rev: pylint-2.5.2 +- repo: https://github.com/PyCQA/pydocstyle + rev: 5.0.2 hooks: - - id: pylint - language: system + - id: pydocstyle exclude: &exclude_files > (?x)^( aiida_quantumespresso/calculations/pwimmigrant.*| @@ -25,12 +24,6 @@ repos: docs/.*| tests/.*(??$ - -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=4 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - -# Maximum number of characters on a single line. -max-line-length=120 - -# Maximum number of lines in a module -max-module-lines=1000 - -# List of optional constructs for which whitespace checking is disabled. `dict- -# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. -# `trailing-comma` allows a space between comma and closing bracket: (a, ). -# `empty-line` allows space-only lines. -no-space-check=trailing-comma, - dict-separator - -# Allow the body of a class to be on the same line as the declaration if body -# contains single statement. -single-line-class-stmt=no - -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no - - -[TYPECHECK] - -# List of decorators that produce context managers, such as -# contextlib.contextmanager. Add to this list to register other decorators that -# produce valid context managers. -contextmanager-decorators=contextlib.contextmanager - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E1101 when accessed. Python regular -# expressions are accepted. -generated-members=self.exit_codes.* - -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# This flag controls whether pylint should warn about no-member and similar -# checks whenever an opaque object is returned when inferring. The inference -# can return multiple potential results while evaluating a Python object, but -# some branches might not be evaluated, which results in partial inference. In -# that case, it might be useful to still emit no-member and other checks for -# the rest of the inferred objects. -ignore-on-opaque-inference=yes - -# List of class names for which member attributes should not be checked (useful -# for classes with dynamically set attributes). This supports the use of -# qualified names. -ignored-classes=optparse.Values,thread._local,_thread._local - -# List of module names for which member attributes should not be checked -# (useful for modules/projects where namespaces are manipulated during runtime -# and thus existing member attributes cannot be deduced by static analysis. It -# supports qualified module names, as well as Unix pattern matching. -ignored-modules= - -# Show a hint with possible names when a member name was not found. The aspect -# of finding the hint is based on edit distance. -missing-member-hint=yes - -# The minimum edit distance a name should have in order to be considered a -# similar match for a missing member name. -missing-member-hint-distance=1 - -# The total number of similar names that should be taken in consideration when -# showing a hint for a missing member. -missing-member-max-choices=1 - - -[VARIABLES] - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= - -# Tells whether unused global variables should be treated as a violation. -allow-global-unused-variables=yes - -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_, - _cb - -# A regular expression matching the name of dummy variables (i.e. expectedly -# not used). -dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore -ignored-argument-names=_.*|^ignored_|^unused_|aiida_profile - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# List of qualified module names which can have objects that can redefine -# builtins. -redefining-builtins-modules=six.moves,past.builtins,future.builtins - - -[LOGGING] - -# Logging modules to check that the string format arguments are in logging -# function parameter format -logging-modules=logging - - -[SIMILARITIES] - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - -# Ignore imports when computing similarities. -ignore-imports=no - -# Minimum lines number of a similarity. -min-similarity-lines=4 - - -[BASIC] - -# Naming style matching correct argument names -argument-naming-style=snake_case - -# Regular expression matching correct argument names. Overrides argument- -# naming-style -#argument-rgx= - -# Naming style matching correct attribute names -attr-naming-style=snake_case - -# Regular expression matching correct attribute names. Overrides attr-naming- -# style -#attr-rgx= - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo, - bar, - baz, - toto, - tutu, - tata - -# Naming style matching correct class attribute names -class-attribute-naming-style=any - -# Regular expression matching correct class attribute names. Overrides class- -# attribute-naming-style -#class-attribute-rgx= - -# Naming style matching correct class names -class-naming-style=PascalCase - -# Regular expression matching correct class names. Overrides class-naming-style -#class-rgx= - -# Naming style matching correct constant names -const-naming-style=UPPER_CASE - -# Regular expression matching correct constant names. Overrides const-naming- -# style -#const-rgx= - -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=-1 - -# Naming style matching correct function names -function-naming-style=snake_case - -# Regular expression matching correct function names. Overrides function- -# naming-style -#function-rgx= - -# Good variable names which should always be accepted, separated by a comma -good-names=i, - j, - k, - Run, - UpfData, - Dict, - KpointsData, - StructureData, - Code, - BandsData, - PwCalculation, - HpCalculation, - PhCalculation, - Q2rCalculation, - MatdynCalculation, - PpCalculation, - DosCalculation, - CpCalculation, - NebCalculation, - ProjwfcCalculation, - Pw2wannier90Calculation, - PwBaseWorkChain, - PwRelaxWorkChain, - PwBandsWorkChain, - PwBandStructureWorkChain, - _ - -# Include a hint for the correct naming format with invalid-name -include-naming-hint=no - -# Naming style matching correct inline iteration names -inlinevar-naming-style=any - -# Regular expression matching correct inline iteration names. Overrides -# inlinevar-naming-style -#inlinevar-rgx= - -# Naming style matching correct method names -method-naming-style=snake_case - -# Regular expression matching correct method names. Overrides method-naming- -# style -#method-rgx= - -# Naming style matching correct module names -module-naming-style=snake_case - -# Regular expression matching correct module names. Overrides module-naming- -# style -#module-rgx= - -# Colon-delimited sets of names that determine each other's naming style when -# the name regexes allow several styles. -name-group= - -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=^_ - -# List of decorators that produce properties, such as abc.abstractproperty. Add -# to this list to register other decorators that produce valid properties. -property-classes=abc.abstractproperty - -# Naming style matching correct variable names -variable-naming-style=snake_case - -# Regular expression matching correct variable names. Overrides variable- -# naming-style -#variable-rgx= - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME, - XXX, - TODO - - -[IMPORTS] - -# Allow wildcard imports from modules that define __all__. -allow-wildcard-with-all=no - -# Analyse import fallback blocks. This can be used to support both Python 2 and -# 3 compatible code, which means that the block might have code that exists -# only in one or another interpreter, leading to false positives when analysed. -analyse-fallback-blocks=no - -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=regsub, - TERMIOS, - Bastion, - rexec - -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled) -ext-import-graph= - -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled) -import-graph= - -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled) -int-import-graph= - -# Force import order to recognize a module as part of the standard -# compatibility libraries. -known-standard-library= - -# Force import order to recognize a module as part of a third party library. -known-third-party=aiida,enchant - - -[DESIGN] - -# Maximum number of arguments for function / method -max-args=100 - -# Maximum number of attributes for a class (see R0902). -max-attributes=7 - -# Maximum number of boolean expressions in a if statement -max-bool-expr=5 - -# Maximum number of branch for function / method body -max-branches=15 - -# Maximum number of locals for function / method body -max-locals=100 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - -# Maximum number of return / yield for function / method body -max-returns=6 - -# Maximum number of statements in function / method body -max-statements=50 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 - - -[CLASSES] - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__, - __new__, - setUp - -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict, - _fields, - _replace, - _source, - _make - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=mcs - - -[EXCEPTIONS] - -# Exceptions that will emit a warning when being caught. Defaults to -# "Exception" -overgeneral-exceptions=Exception diff --git a/aiida_quantumespresso/calculations/__init__.py b/aiida_quantumespresso/calculations/__init__.py index 28ff18e2a..43a6d827e 100644 --- a/aiida_quantumespresso/calculations/__init__.py +++ b/aiida_quantumespresso/calculations/__init__.py @@ -464,7 +464,7 @@ def _generate_PWCPinputdata(cls, parameters, settings, pseudos, structure, kpoin has_mesh = False weights = [1.] * num_kpoints - except AttributeError: + except AttributeError as exception: try: kpoints_list = kpoints.get_kpoints() @@ -472,11 +472,10 @@ def _generate_PWCPinputdata(cls, parameters, settings, pseudos, structure, kpoin has_mesh = False if num_kpoints == 0: raise exceptions.InputValidationError( - 'At least one k point must be ' - 'provided for non-gamma calculations') + 'At least one k point must be provided for non-gamma calculations' + ) from exception except AttributeError: - raise exceptions.InputValidationError( - 'No valid kpoints have been found') + raise exceptions.InputValidationError('No valid kpoints have been found') from exception try: _, weights = kpoints.get_kpoints(also_weights=True) @@ -541,20 +540,20 @@ def _generate_PWCPinputdata(cls, parameters, settings, pseudos, structure, kpoin try: control_nl = input_params['CONTROL'] calculation_type = control_nl['calculation'] - except KeyError: + except KeyError as exception: raise exceptions.InputValidationError( "No 'calculation' in CONTROL namelist." 'It is required for automatic detection of the valid list ' 'of namelists. Otherwise, specify the list of namelists ' - "using the NAMELISTS key inside the 'settings' input node.") + "using the NAMELISTS key inside the 'settings' input node.") from exception try: namelists_toprint = cls._automatic_namelists[calculation_type] - except KeyError: + except KeyError as exception: raise exceptions.InputValidationError("Unknown 'calculation' value in " 'CONTROL namelist {}. Otherwise, specify the list of ' "namelists using the NAMELISTS inside the 'settings' input " - 'node'.format(calculation_type)) + 'node'.format(calculation_type)) from exception inputfile = '' for namelist_name in namelists_toprint: diff --git a/aiida_quantumespresso/calculations/epw.py b/aiida_quantumespresso/calculations/epw.py index 5a1cb1121..7329acfce 100644 --- a/aiida_quantumespresso/calculations/epw.py +++ b/aiida_quantumespresso/calculations/epw.py @@ -125,8 +125,8 @@ def test_offset(offset): parameters['INPUTEPW']['nq2'] = mesh[1] parameters['INPUTEPW']['nq3'] = mesh[2] postpend_text = None - except: - raise exceptions.InputValidationError('Cannot get the coarse q-point grid') + except NotImplementedError as exception: + raise exceptions.InputValidationError('Cannot get the coarse q-point grid') from exception try: mesh, offset = self.inputs.kpoints.get_kpoints_mesh() @@ -135,8 +135,8 @@ def test_offset(offset): parameters['INPUTEPW']['nk2'] = mesh[1] parameters['INPUTEPW']['nk3'] = mesh[2] postpend_text = None - except: - raise exceptions.InputValidationError('Cannot get the coarse k-point grid') + except NotImplementedError as exception: + raise exceptions.InputValidationError('Cannot get the coarse k-point grid') from exception try: mesh, offset = self.inputs.qfpoints.get_kpoints_mesh() @@ -145,8 +145,8 @@ def test_offset(offset): parameters['INPUTEPW']['nqf2'] = mesh[1] parameters['INPUTEPW']['nqf3'] = mesh[2] postpend_text = None - except: - raise exceptions.InputValidationError('Cannot get the fine q-point grid') + except NotImplementedError as exception: + raise exceptions.InputValidationError('Cannot get the fine q-point grid') from exception try: mesh, offset = self.inputs.kfpoints.get_kpoints_mesh() @@ -155,8 +155,8 @@ def test_offset(offset): parameters['INPUTEPW']['nkf2'] = mesh[1] parameters['INPUTEPW']['nkf3'] = mesh[2] postpend_text = None - except: - raise exceptions.InputValidationError('Cannot get the fine k-point grid') + except NotImplementedError as exception: + raise exceptions.InputValidationError('Cannot get the fine k-point grid') from exception # customized namelists, otherwise not present in the distributed epw code diff --git a/aiida_quantumespresso/calculations/helpers/__init__.py b/aiida_quantumespresso/calculations/helpers/__init__.py index 93ff7fdfb..4eeb2b005 100644 --- a/aiida_quantumespresso/calculations/helpers/__init__.py +++ b/aiida_quantumespresso/calculations/helpers/__init__.py @@ -186,7 +186,7 @@ def pw_input_helper(input_params, structure, stop_at_first_error=False, flat_mod try: with open(xml_path, 'r') as handle: dom = xml.dom.minidom.parse(handle) - except IOError: + except IOError as exception: prefix = 'INPUT_PW-' suffix = '.xml' versions = [ @@ -205,7 +205,7 @@ def pw_input_helper(input_params, structure, stop_at_first_error=False, flat_mod raise QEInputValidationError( 'Unknown Quantum Espresso version: {}. ' 'Available versions: {};{}'.format(version, ', '.join(versions), add_str) - ) + ) from exception # ========== List of known PW variables (from XML file) =============== known_kws = dom.getElementsByTagName('var') @@ -340,21 +340,21 @@ def pw_input_helper(input_params, structure, stop_at_first_error=False, flat_mod try: calculation_type = input_params_internal['calculation'] - except KeyError: + except KeyError as exception: raise QEInputValidationError( 'Error, you need to specify at least the ' 'calculation type (among {})'.format(', '.join(list(valid_calculations_and_opt_namelists.keys()))) - ) + ) from exception try: opt_namelists = valid_calculations_and_opt_namelists[calculation_type] - except KeyError: + except KeyError as exception: raise QEInputValidationError( 'Error, {} is not a valid value for ' 'the calculation type (valid values: {})'.format( calculation_type, ', '.join(list(valid_calculations_and_opt_namelists.keys())) ) - ) + ) from exception internal_dict = {i: {} for i in compulsory_namelists + opt_namelists} all_namelists = set(compulsory_namelists) @@ -396,19 +396,19 @@ def pw_input_helper(input_params, structure, stop_at_first_error=False, flat_mod errors_list.append(err_str) try: internal_dict[namelist_name][keyword] = _check_and_convert(keyword, value, found_var['expected_type']) - except KeyError: + except KeyError as exception: if namelist_name in all_namelists: err_str = 'Error, namelist {} not valid for calculation type {}'.format( namelist_name, calculation_type ) if stop_at_first_error: - raise QEInputValidationError(err_str) + raise QEInputValidationError(err_str) from exception else: errors_list.append(err_str) else: err_str = 'Error, unknown namelist {}'.format(namelist_name) if stop_at_first_error: - raise QEInputValidationError(err_str) + raise QEInputValidationError(err_str) from exception else: errors_list.append(err_str) except TypeError as exception: @@ -462,20 +462,20 @@ def pw_input_helper(input_params, structure, stop_at_first_error=False, flat_mod try: internal_dict[namelist_name][keyword] = outdict - except KeyError: + except KeyError as exception: err_str = 'Error, unknown namelist {}'.format(namelist_name) if stop_at_first_error: - raise QEInputValidationError(err_str) + raise QEInputValidationError(err_str) from exception else: errors_list.append(err_str) continue else: try: end_value = int(found_var['end_val']) - except ValueError: + except ValueError as exception: err_str = "Error, invalid end value '{}' for keyword '{}'.".format(found_var['end_val'], keyword) if stop_at_first_error: - raise QEInputValidationError(err_str) + raise QEInputValidationError(err_str) from exception else: errors_list.append(err_str) continue @@ -504,10 +504,10 @@ def pw_input_helper(input_params, structure, stop_at_first_error=False, flat_mod try: internal_dict[namelist_name][keyword] = outlist - except KeyError: + except KeyError as exception: err_str = 'Error, unknown namelist {}'.format(namelist_name) if stop_at_first_error: - raise QEInputValidationError(err_str) + raise QEInputValidationError(err_str) from exception else: errors_list.append(err_str) continue @@ -522,10 +522,10 @@ def pw_input_helper(input_params, structure, stop_at_first_error=False, flat_mod # Create empty list for this keyword in the correct namelist try: internal_dict[namelist_name][keyword] = [] - except KeyError: + except KeyError as exception: err_str = 'Error, unknown namelist {}'.format(namelist_name) if stop_at_first_error: - raise QEInputValidationError(err_str) + raise QEInputValidationError(err_str) from exception else: errors_list.append(err_str) continue @@ -554,12 +554,12 @@ def pw_input_helper(input_params, structure, stop_at_first_error=False, flat_mod try: int(variable['start'][i]) - except ValueError: + except ValueError as exception: err_str = "Error, invalid start value '{}' for keyword '{}'.".format( variable['start'][i], keyword ) if stop_at_first_error: - raise QEInputValidationError(err_str) + raise QEInputValidationError(err_str) from exception else: errors_list.append(err_str) continue @@ -583,12 +583,12 @@ def pw_input_helper(input_params, structure, stop_at_first_error=False, flat_mod # Other types are assumed to be an integer try: index_value = int(index_value) - except ValueError: + except ValueError as exception: err_str = 'Error, only integer types are supported for index {}, got {}'.format( index, index_value ) if stop_at_first_error: - raise QEInputValidationError(err_str) + raise QEInputValidationError(err_str) from exception else: errors_list.append(err_str) continue diff --git a/aiida_quantumespresso/calculations/ph.py b/aiida_quantumespresso/calculations/ph.py index 668d8271f..9f28eb290 100644 --- a/aiida_quantumespresso/calculations/ph.py +++ b/aiida_quantumespresso/calculations/ph.py @@ -126,8 +126,9 @@ def prepare_for_submission(self, folder): except AttributeError: try: default_parent_output_folder = parent_calc._get_output_folder() # pylint: disable=protected-access - except AttributeError: - raise exceptions.InputValidationError('parent calculation does not have a default output subfolder') + except AttributeError as exception: + msg = 'parent calculation does not have a default output subfolder' + raise exceptions.InputValidationError(msg) from exception parent_calc_out_subfolder = settings.pop('PARENT_CALC_OUT_SUBFOLDER', default_parent_output_folder) # I put the first-level keys as uppercase (i.e., namelist and card names) and the second-level keys as lowercase @@ -186,9 +187,10 @@ def prepare_for_submission(self, folder): # this is the case where no mesh was set. Maybe it's a list try: list_of_points = self.inputs.qpoints.get_kpoints(cartesian=True) - except AttributeError: + except AttributeError as exception: # In this case, there are no info on the qpoints at all - raise exceptions.InputValidationError('Input `qpoints` contains neither a mesh nor a list of points') + msg = 'Input `qpoints` contains neither a mesh nor a list of points' + raise exceptions.InputValidationError(msg) from exception # change to 2pi/a coordinates lattice_parameter = numpy.linalg.norm(self.inputs.qpoints.cell[0]) diff --git a/aiida_quantumespresso/calculations/pw.py b/aiida_quantumespresso/calculations/pw.py index 646d60cb1..267893373 100644 --- a/aiida_quantumespresso/calculations/pw.py +++ b/aiida_quantumespresso/calculations/pw.py @@ -157,8 +157,10 @@ def input_file_name_hubbard_file(cls): # pylint: disable=no-self-argument,no-self-use try: HpCalculation = factories.CalculationFactory('quantumespresso.hp') - except Exception: - raise RuntimeError('this is determined by the aiida-quantumespresso-hp plugin but it is not installed') + except Exception as exc: + raise RuntimeError( + 'this is determined by the aiida-quantumespresso-hp plugin but it is not installed' + ) from exc return HpCalculation.input_file_name_hubbard_file diff --git a/aiida_quantumespresso/data/force_constants.py b/aiida_quantumespresso/data/force_constants.py index 16f91a0bf..c305af4ef 100644 --- a/aiida_quantumespresso/data/force_constants.py +++ b/aiida_quantumespresso/data/force_constants.py @@ -227,7 +227,7 @@ def parse_q2r_force_constants_file(lines, also_force_constants=False): force_constants[mi1, mi2, mi3, ji1, ji2, na1, na2] = float(line[3]) current_line += 1 - except (IndexError, ValueError) as exception: - raise ValueError(str(exception) + '\nForce constants file could not be parsed (incorrect file format)') + except (IndexError, ValueError) as exc: + raise ValueError(str(exc) + '\nForce constants file could not be parsed (incorrect file format)') from exc return parsed_data, force_constants, warnings diff --git a/aiida_quantumespresso/tools/base.py b/aiida_quantumespresso/tools/base.py index bdd36c20d..a250e8ee7 100644 --- a/aiida_quantumespresso/tools/base.py +++ b/aiida_quantumespresso/tools/base.py @@ -51,8 +51,10 @@ def get_structuredata(self): for mass, name, pseudo in zip(species['masses'], species['names'], species['pseudo_file_names']): try: symbols = valid_elements_regex.search(pseudo).group('ele').capitalize() - except Exception: - raise InputValidationError('could not determine element name from pseudo name: {}'.format(pseudo)) + except Exception as exception: + raise InputValidationError( + 'could not determine element name from pseudo name: {}'.format(pseudo) + ) from exception structure.append_kind(Kind(name=name, symbols=symbols, mass=mass)) for symbol, position in zip(data['atom_names'], data['positions']): diff --git a/aiida_quantumespresso/tools/calculations/pw.py b/aiida_quantumespresso/tools/calculations/pw.py index 371e5bcaf..83d503eab 100644 --- a/aiida_quantumespresso/tools/calculations/pw.py +++ b/aiida_quantumespresso/tools/calculations/pw.py @@ -24,18 +24,18 @@ def get_scf_accuracy(self, index=0): """ try: trajectory = self._node.outputs.output_trajectory - except exceptions.NotExistent: - raise ValueError('{} does not have the `output_trajectory` output node'.format(self._node)) + except exceptions.NotExistent as exc: + raise ValueError('{} does not have the `output_trajectory` output node'.format(self._node)) from exc try: scf_accuracy = trajectory.get_array('scf_accuracy') - except KeyError: - raise ValueError('{} does not contain the required `scf_accuracy` array'.format(trajectory)) + except KeyError as exc: + raise ValueError('{} does not contain the required `scf_accuracy` array'.format(trajectory)) from exc try: scf_iterations = trajectory.get_array('scf_iterations') - except KeyError: - raise ValueError('{} does not contain the required `scf_iterations` array'.format(trajectory)) + except KeyError as exc: + raise ValueError('{} does not contain the required `scf_iterations` array'.format(trajectory)) from exc number_of_frames = len(scf_iterations) diff --git a/aiida_quantumespresso/tools/dbexporters/tcod_plugins/cp.py b/aiida_quantumespresso/tools/dbexporters/tcod_plugins/cp.py index 22d7596d2..51b56c7e4 100644 --- a/aiida_quantumespresso/tools/dbexporters/tcod_plugins/cp.py +++ b/aiida_quantumespresso/tools/dbexporters/tcod_plugins/cp.py @@ -2,8 +2,8 @@ """TCOD export plugin for `CpCalculations`.""" try: from aiida_tcod.tools.dbexporters.tcod import BaseTcodtranslator # pylint: disable=import-error -except ImportError: - raise ImportError('dependency `aiida-tcod` not installed; run `pip install aiida-tcod` to do so.') +except ImportError as exception: + raise ImportError('dependency `aiida-tcod` not installed; run `pip install aiida-tcod` to do so.') from exception class CpTcodtranslator(BaseTcodtranslator): diff --git a/aiida_quantumespresso/tools/dbexporters/tcod_plugins/pw.py b/aiida_quantumespresso/tools/dbexporters/tcod_plugins/pw.py index 55da93159..bc0b57334 100644 --- a/aiida_quantumespresso/tools/dbexporters/tcod_plugins/pw.py +++ b/aiida_quantumespresso/tools/dbexporters/tcod_plugins/pw.py @@ -6,8 +6,8 @@ try: from aiida_tcod.tools.dbexporters.tcod import BaseTcodtranslator # pylint: disable=import-error -except ImportError: - raise ImportError('dependency `aiida-tcod` not installed; run `pip install aiida-tcod` to do so.') +except ImportError as exception: + raise ImportError('dependency `aiida-tcod` not installed; run `pip install aiida-tcod` to do so.') from exception class PwTcodtranslator(BaseTcodtranslator): diff --git a/aiida_quantumespresso/tools/pwinputparser.py b/aiida_quantumespresso/tools/pwinputparser.py index 26d4a0b3f..854839a37 100644 --- a/aiida_quantumespresso/tools/pwinputparser.py +++ b/aiida_quantumespresso/tools/pwinputparser.py @@ -7,8 +7,8 @@ from aiida.orm import Code, Dict, UpfData from aiida.common.folders import Folder from aiida.plugins import CalculationFactory - from qe_tools.parsers import PwInputFile as BasePwInputFile + from .base import StructureParseMixin diff --git a/aiida_quantumespresso/utils/bands.py b/aiida_quantumespresso/utils/bands.py index f253d4c14..f2cbf2ef7 100644 --- a/aiida_quantumespresso/utils/bands.py +++ b/aiida_quantumespresso/utils/bands.py @@ -37,8 +37,8 @@ def get_highest_occupied_band(bands, threshold=0.005): try: occupations = bands.get_array('occupations') - except KeyError: - raise ValueError('BandsData does not contain a `occupations` array') + except KeyError as exception: + raise ValueError('BandsData does not contain a `occupations` array') from exception lumo_indices = [] diff --git a/aiida_quantumespresso/utils/convert.py b/aiida_quantumespresso/utils/convert.py index 315a26b71..5d0bc994c 100644 --- a/aiida_quantumespresso/utils/convert.py +++ b/aiida_quantumespresso/utils/convert.py @@ -169,8 +169,8 @@ def convert_input_to_namelist_entry(key, val, mapping=None): for elemk, itemval in val.items(): try: idx = mapping[elemk] - except KeyError: - raise ValueError("Unable to find the key '{}' in the mapping dictionary".format(elemk)) + except KeyError as exception: + raise ValueError("Unable to find the key '{}' in the mapping dictionary".format(elemk)) from exception list_of_strings.append((idx, ' {0}({2}) = {1}\n'.format(key, conv_to_fortran(itemval), idx))) diff --git a/aiida_quantumespresso/utils/protocols/pw.py b/aiida_quantumespresso/utils/protocols/pw.py index c60459ce8..f5faa82e1 100644 --- a/aiida_quantumespresso/utils/protocols/pw.py +++ b/aiida_quantumespresso/utils/protocols/pw.py @@ -90,8 +90,8 @@ def __init__(self, name): self.name = name try: self.modifiers = _get_all_protocol_modifiers()[name] - except KeyError: - raise ValueError("Unknown protocol '{}'".format(name)) + except KeyError as exception: + raise ValueError("Unknown protocol '{}'".format(name)) from exception def get_protocol_data(self, modifiers=None): """Return the full info on the specific protocol, using the (optional) modifiers. @@ -126,11 +126,11 @@ def get_protocol_data(self, modifiers=None): if pseudo_modifier_name == 'custom': try: pseudo_data = modifiers_copy.pop('pseudo_data') - except KeyError: + except KeyError as exception: raise ValueError( "You specified 'custom' as a modifier name for 'pseudo', but you did not provide " "a 'pseudo_data' key." - ) + ) from exception else: pseudo_data = self.get_pseudo_data(pseudo_modifier_name) diff --git a/aiida_quantumespresso/utils/pseudopotential.py b/aiida_quantumespresso/utils/pseudopotential.py index ce6996d6c..838dc488f 100644 --- a/aiida_quantumespresso/utils/pseudopotential.py +++ b/aiida_quantumespresso/utils/pseudopotential.py @@ -94,15 +94,16 @@ def get_pseudos_from_dict(structure, pseudos_uuids): symbol = kind.symbol try: uuid = pseudos_uuids[symbol] - except KeyError: - raise NotExistent('No UPF for element {} found in the provided pseudos_uuids dictionary'.format(symbol)) + except KeyError as exception: + msg = 'No UPF for element {} found in the provided pseudos_uuids dictionary'.format(symbol) + raise NotExistent(msg) from exception try: upf = load_node(uuid) - except NotExistent: + except NotExistent as exception: raise NotExistent( 'No node found associated to the UUID {} given for element {} ' 'in the provided pseudos_uuids dictionary'.format(uuid, symbol) - ) + ) from exception if not isinstance(upf, UpfData): raise ValueError('Node with UUID {} is not a UpfData'.format(uuid)) if upf.element != symbol: diff --git a/aiida_quantumespresso/utils/resources.py b/aiida_quantumespresso/utils/resources.py index 032f82d74..d5d6e7365 100644 --- a/aiida_quantumespresso/utils/resources.py +++ b/aiida_quantumespresso/utils/resources.py @@ -31,7 +31,7 @@ def create_scheduler_resources(scheduler, base, goal): try: job_resource = scheduler.create_job_resource(**resources) except TypeError as exception: - raise ValueError('failed to create job resources for {} scheduler: {}'.format(scheduler.__class__, exception)) + raise ValueError('failed to create job resources for {} scheduler'.format(scheduler.__class__)) from exception return {key: value for key, value in job_resource.items() if value is not None} diff --git a/aiida_quantumespresso/utils/validation/trajectory.py b/aiida_quantumespresso/utils/validation/trajectory.py index 95b6d6212..f55078467 100644 --- a/aiida_quantumespresso/utils/validation/trajectory.py +++ b/aiida_quantumespresso/utils/validation/trajectory.py @@ -55,8 +55,8 @@ def verify_convergence_forces(trajectory, index=-1, threshold=None): try: forces = trajectory.get_array('forces')[index] - except (KeyError, IndexError): - raise ValueError('the `forces` array does not exist or the given index exceeds the length.') + except (KeyError, IndexError) as exception: + raise ValueError('the `forces` array does not exist or the given index exceeds the length.') from exception return numpy.max(abs(forces)) < threshold @@ -82,8 +82,8 @@ def verify_convergence_stress(trajectory, index=-1, threshold=None, reference_pr try: stress = trajectory.get_array('stress')[index] - except (KeyError, IndexError): - raise ValueError('the `stress` array does not exist or the given index exceeds the length.') + except (KeyError, IndexError) as exception: + raise ValueError('the `stress` array does not exist or the given index exceeds the length.') from exception pressure = (numpy.trace(stress) / 3.) diff --git a/pyproject.toml b/pyproject.toml index ca925b82e..1240c2b19 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,46 @@ [build-system] requires = ['setuptools>=40.8.0', 'wheel', 'reentry~=1.3', 'fastentrypoints~=0.12'] build-backend = 'setuptools.build_meta:__legacy__' + +[tool.pylint.format] +max-line-length = 120 + +[tool.pylint.tool-check] +generated-members = 'self.exit_codes.*' + +[tool.pylint.messages_control] +disable = [ + 'bad-continuation', + 'duplicate-code', + 'locally-disabled', + 'logging-format-interpolation', + 'inconsistent-return-statements', + 'import-outside-toplevel', + 'no-else-raise', + 'too-many-arguments', + 'too-many-ancestors', + 'too-many-branches', + 'too-many-locals', +] + +[tool.pylint.basic] +good-names = [ + 'i', + 'j', + 'k', + 'UpfData', + 'HpCalculation', + 'PwCalculation', +] + +[tool.pytest.ini_options] +minversion = '6.0' +testpaths = [ + 'tests', +] +filterwarnings = [ + 'ignore::DeprecationWarning:frozendict:', + 'ignore::DeprecationWarning:pkg_resources:', + 'ignore::DeprecationWarning:reentry:', + 'ignore::DeprecationWarning:sqlalchemy_utils:', +] diff --git a/setup.json b/setup.json index 00922d88c..adf94285c 100644 --- a/setup.json +++ b/setup.json @@ -72,11 +72,11 @@ "extras_require": { "pre-commit": [ "pre-commit~=2.2", - "pylint~=2.5.0" + "pylint~=2.6.0" ], "tests": [ "pgtest~=1.3", - "pytest~=5.4", + "pytest~=6.0", "pytest-regressions~=1.0" ], "tcod": [ diff --git a/tests/calculations/test_epw.py b/tests/calculations/test_epw.py index 4e867a645..b47e46c95 100644 --- a/tests/calculations/test_epw.py +++ b/tests/calculations/test_epw.py @@ -9,8 +9,8 @@ def test_epw_default( - aiida_profile, fixture_localhost, fixture_sandbox, generate_calc_job, generate_remote_data, fixture_code, - generate_kpoints_mesh, file_regression, tmpdir + fixture_localhost, fixture_sandbox, generate_calc_job, generate_remote_data, fixture_code, generate_kpoints_mesh, + file_regression, tmpdir ): """Test a default `EpwCalculation`.""" entry_point_name = 'quantumespresso.epw' diff --git a/tests/calculations/test_helpers.py b/tests/calculations/test_helpers.py index df298df2f..9ecb6bf7d 100644 --- a/tests/calculations/test_helpers.py +++ b/tests/calculations/test_helpers.py @@ -5,7 +5,7 @@ from aiida_quantumespresso.calculations.helpers import pw_input_helper, QEInputValidationError -def test_pw_helper_multidimensional(aiida_profile, generate_structure): +def test_pw_helper_multidimensional(generate_structure): """Test the helper for parameters containing a multidimensional parameter.""" structure = generate_structure() parameters = { diff --git a/tests/calculations/test_matdyn.py b/tests/calculations/test_matdyn.py index 3556ea64a..64b6f1a0d 100644 --- a/tests/calculations/test_matdyn.py +++ b/tests/calculations/test_matdyn.py @@ -6,7 +6,7 @@ MatdynCalculation = CalculationFactory('quantumespresso.matdyn') -def test_matdyn_default(aiida_profile, fixture_sandbox, generate_calc_job, generate_inputs_matdyn, file_regression): +def test_matdyn_default(fixture_sandbox, generate_calc_job, generate_inputs_matdyn, file_regression): """Test a default `MatdynCalculation`.""" entry_point_name = 'quantumespresso.matdyn' diff --git a/tests/calculations/test_ph.py b/tests/calculations/test_ph.py index 582e3b012..34cbfe09d 100644 --- a/tests/calculations/test_ph.py +++ b/tests/calculations/test_ph.py @@ -11,8 +11,8 @@ def test_ph_default( - aiida_profile, fixture_localhost, fixture_sandbox, generate_calc_job, fixture_code, generate_kpoints_mesh, - generate_remote_data, file_regression + fixture_localhost, fixture_sandbox, generate_calc_job, fixture_code, generate_kpoints_mesh, generate_remote_data, + file_regression ): """Test a default `PhCalculation`.""" entry_point_name = 'quantumespresso.ph' @@ -51,8 +51,8 @@ def test_ph_default( def test_ph_qpoint_list( - aiida_profile, fixture_localhost, fixture_sandbox, generate_calc_job, fixture_code, generate_structure, - generate_kpoints_mesh, generate_remote_data, file_regression + fixture_localhost, fixture_sandbox, generate_calc_job, fixture_code, generate_structure, generate_kpoints_mesh, + generate_remote_data, file_regression ): """Test a `PhCalculation` with a qpoint list instead of a mesh.""" entry_point_name = 'quantumespresso.ph' diff --git a/tests/calculations/test_pp.py b/tests/calculations/test_pp.py index 1e4bdfad8..4cad94c5a 100644 --- a/tests/calculations/test_pp.py +++ b/tests/calculations/test_pp.py @@ -34,7 +34,7 @@ def _generate_inputs(parameters=None, settings=None): return _generate_inputs -def test_pp_default(aiida_profile, fixture_sandbox, generate_calc_job, generate_inputs, file_regression): +def test_pp_default(fixture_sandbox, generate_calc_job, generate_inputs, file_regression): """Test a default `PpCalculation`.""" entry_point_name = 'quantumespresso.pp' inputs = generate_inputs() @@ -61,7 +61,7 @@ def test_pp_default(aiida_profile, fixture_sandbox, generate_calc_job, generate_ file_regression.check(input_written, encoding='utf-8', extension='.in') -def test_pp_keep_plot_file(aiida_profile, fixture_sandbox, generate_calc_job, generate_inputs): +def test_pp_keep_plot_file(fixture_sandbox, generate_calc_job, generate_inputs): """Test a `PpCalculation` where we want to retrieve the plot file.""" entry_point_name = 'quantumespresso.pp' inputs = generate_inputs() @@ -81,7 +81,7 @@ def test_pp_keep_plot_file(aiida_profile, fixture_sandbox, generate_calc_job, ge assert element in calc_info.retrieve_list -def test_pp_cmdline_setting(aiida_profile, fixture_sandbox, generate_calc_job, generate_inputs): +def test_pp_cmdline_setting(fixture_sandbox, generate_calc_job, generate_inputs): """Test a `PpCalculation` with user-defined cmdline settings.""" entry_point_name = 'quantumespresso.pp' inputs = generate_inputs(settings={'cmdline': ['-npools', '2']}) @@ -93,16 +93,49 @@ def test_pp_cmdline_setting(aiida_profile, fixture_sandbox, generate_calc_job, g ('parameters', 'message'), ( ({}, 'parameter `INPUTPP.plot_num` must be explicitly set'), - ({'INPUTPP': {}}, 'parameter `INPUTPP.plot_num` must be explicitly set'), - ({'INPUTPP': {'plot_num': 'str'}}, '`INTPUTPP.plot_num` must be an integer in the range'), - ({'INPUTPP': {'plot_num': 14}}, '`INTPUTPP.plot_num` must be an integer in the range'), - ({'INPUTPP': {'plot_num': 1}}, 'parameter `PLOT.iflag` must be explicitly set'), - ({'INPUTPP': {'plot_num': 1}, 'PLOT': {}}, 'parameter `PLOT.iflag` must be explicitly set'), - ({'INPUTPP': {'plot_num': 1}, 'PLOT': {'iflag': 'str'}}, '`PLOT.iflag` must be an integer in the range 0-4'), - ({'INPUTPP': {'plot_num': 1}, 'PLOT': {'iflag': 5}}, '`PLOT.iflag` must be an integer in the range 0-4'), + ({ + 'INPUTPP': {} + }, 'parameter `INPUTPP.plot_num` must be explicitly set'), + ({ + 'INPUTPP': { + 'plot_num': 'str' + } + }, '`INTPUTPP.plot_num` must be an integer in the range'), + ({ + 'INPUTPP': { + 'plot_num': 14 + } + }, '`INTPUTPP.plot_num` must be an integer in the range'), + ({ + 'INPUTPP': { + 'plot_num': 1 + } + }, 'parameter `PLOT.iflag` must be explicitly set'), + ({ + 'INPUTPP': { + 'plot_num': 1 + }, + 'PLOT': {} + }, 'parameter `PLOT.iflag` must be explicitly set'), + ({ + 'INPUTPP': { + 'plot_num': 1 + }, + 'PLOT': { + 'iflag': 'str' + } + }, '`PLOT.iflag` must be an integer in the range 0-4'), + ({ + 'INPUTPP': { + 'plot_num': 1 + }, + 'PLOT': { + 'iflag': 5 + } + }, '`PLOT.iflag` must be an integer in the range 0-4'), ), -) # yapf: disable -def test_pp_invalid_parameters(aiida_profile, fixture_sandbox, generate_calc_job, generate_inputs, parameters, message): +) +def test_pp_invalid_parameters(fixture_sandbox, generate_calc_job, generate_inputs, parameters, message): """Test that launching `PpCalculation` fails for invalid parameters.""" entry_point_name = 'quantumespresso.pp' diff --git a/tests/calculations/test_pw.py b/tests/calculations/test_pw.py index b5bb66a24..6028cf20a 100644 --- a/tests/calculations/test_pw.py +++ b/tests/calculations/test_pw.py @@ -3,7 +3,7 @@ from aiida.common import datastructures -def test_pw_default(aiida_profile, fixture_sandbox, generate_calc_job, generate_inputs_pw, file_regression): +def test_pw_default(fixture_sandbox, generate_calc_job, generate_inputs_pw, file_regression): """Test a default `PwCalculation`.""" entry_point_name = 'quantumespresso.pw' diff --git a/tests/calculations/test_pw2gw.py b/tests/calculations/test_pw2gw.py index f10e5681f..84941a520 100644 --- a/tests/calculations/test_pw2gw.py +++ b/tests/calculations/test_pw2gw.py @@ -72,9 +72,7 @@ def inputs(fixture_code, remote, parameters, settings): @pytest.mark.parametrize( 'settings,with_symlink', [(False, False), (True, True)], ids=['base', 'with_symlink'], indirect=['settings'] ) -def test_pw2gw_default( - aiida_profile, fixture_sandbox, generate_calc_job, file_regression, remote, inputs, with_symlink -): +def test_pw2gw_default(fixture_sandbox, generate_calc_job, file_regression, remote, inputs, with_symlink): """Test a default `Pw2gwCalculation`.""" entry_point_name = 'quantumespresso.pw2gw' diff --git a/tests/parsers/__init__.py b/tests/parsers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/parsers/test_cp.py b/tests/parsers/test_cp.py index 04615d062..cd9b05fee 100644 --- a/tests/parsers/test_cp.py +++ b/tests/parsers/test_cp.py @@ -16,9 +16,7 @@ def generate_inputs(generate_structure): }) -def test_cp_default( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression -): +def test_cp_default(fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression): """Test a default `cp.x` calculation.""" entry_point_calc_job = 'quantumespresso.cp' entry_point_parser = 'quantumespresso.cp' diff --git a/tests/parsers/test_dos.py b/tests/parsers/test_dos.py index 940cc9fc7..132d5b25f 100644 --- a/tests/parsers/test_dos.py +++ b/tests/parsers/test_dos.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- """Tests for the `DosParser`.""" + from aiida import orm from aiida.common import AttributeDict @@ -9,9 +10,7 @@ def generate_inputs(): return AttributeDict() -def test_dos_default( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, data_regression, num_regression -): +def test_dos_default(fixture_localhost, generate_calc_job_node, generate_parser, data_regression, num_regression): """Test `DosParser` on the results of a simple `dos.x` calculation.""" entry_point_calc_job = 'quantumespresso.dos' entry_point_parser = 'quantumespresso.dos' @@ -43,7 +42,7 @@ def test_dos_default( default_tolerance=dict(atol=0, rtol=1e-18)) -def test_dos_failed_interrupted(aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser): +def test_dos_failed_interrupted(fixture_localhost, generate_calc_job_node, generate_parser): """Test `DosParser` on the results of a `dos.x` calculation that was interrupted abruptly.""" entry_point_calc_job = 'quantumespresso.dos' entry_point_parser = 'quantumespresso.dos' diff --git a/tests/parsers/test_matdyn.py b/tests/parsers/test_matdyn.py index aa229be2b..551faca83 100644 --- a/tests/parsers/test_matdyn.py +++ b/tests/parsers/test_matdyn.py @@ -14,7 +14,7 @@ def generate_inputs(): }) -def test_matdyn_default(aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, data_regression): +def test_matdyn_default(fixture_localhost, generate_calc_job_node, generate_parser, data_regression): """Test a default `matdyn.x` calculation.""" entry_point_calc_job = 'quantumespresso.matdyn' entry_point_parser = 'quantumespresso.matdyn' diff --git a/tests/parsers/test_neb.py b/tests/parsers/test_neb.py index ddf60e4fc..c6d9604fe 100644 --- a/tests/parsers/test_neb.py +++ b/tests/parsers/test_neb.py @@ -45,9 +45,7 @@ def build_num_regression_dictionary(arrays, array_names): return result -def test_neb_default( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, data_regression, num_regression -): +def test_neb_default(fixture_localhost, generate_calc_job_node, generate_parser, data_regression, num_regression): """Test a NEB calculation with symmetric images and automatic climbing image.""" name = 'default' entry_point_calc_job = 'quantumespresso.neb' @@ -78,7 +76,7 @@ def test_neb_default( def test_neb_all_iterations( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, data_regression, num_regression + fixture_localhost, generate_calc_job_node, generate_parser, data_regression, num_regression ): """Test a NEB calculation with the parser option `all_iterations=True`.""" name = 'default' diff --git a/tests/parsers/test_ph.py b/tests/parsers/test_ph.py index 3c672f60e..d7e9f04e9 100644 --- a/tests/parsers/test_ph.py +++ b/tests/parsers/test_ph.py @@ -8,7 +8,7 @@ def generate_inputs(): return {} -def test_ph_default(aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, data_regression): +def test_ph_default(fixture_localhost, generate_calc_job_node, generate_parser, data_regression): """Test a default `ph.x` calculation.""" name = 'default' entry_point_calc_job = 'quantumespresso.ph' @@ -25,7 +25,7 @@ def test_ph_default(aiida_profile, fixture_localhost, generate_calc_job_node, ge data_regression.check(results['output_parameters'].get_dict()) -def test_ph_not_converged(aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, data_regression): +def test_ph_not_converged(fixture_localhost, generate_calc_job_node, generate_parser, data_regression): """Test a `ph.x` calculation where convergence is not reached.""" name = 'failed_convergence_not_reached' entry_point_calc_job = 'quantumespresso.ph' @@ -42,7 +42,7 @@ def test_ph_not_converged(aiida_profile, fixture_localhost, generate_calc_job_no data_regression.check(results['output_parameters'].get_dict()) -def test_ph_out_of_walltime(aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, data_regression): +def test_ph_out_of_walltime(fixture_localhost, generate_calc_job_node, generate_parser, data_regression): """Test a `ph.x` calculation that runs out of walltime.""" name = 'failed_out_of_walltime' entry_point_calc_job = 'quantumespresso.ph' diff --git a/tests/parsers/test_pp.py b/tests/parsers/test_pp.py index 502b76e87..a2fb50e0c 100644 --- a/tests/parsers/test_pp.py +++ b/tests/parsers/test_pp.py @@ -128,8 +128,7 @@ def generate_inputs_3d(): def test_pp_default_1d( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_1d, data_regression, - num_regression + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_1d, data_regression, num_regression ): """Test a default `pp.x` calculation producing a 1D data set.""" entry_point_calc_job = 'quantumespresso.pp' @@ -156,8 +155,8 @@ def test_pp_default_1d( def test_pp_default_1d_spherical( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_1d_spherical, - data_regression, num_regression + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_1d_spherical, data_regression, + num_regression ): """Test a default `pp.x` calculation producing a 1D data set with spherical averaging.""" entry_point_calc_job = 'quantumespresso.pp' @@ -195,8 +194,7 @@ def test_pp_default_1d_spherical( def test_pp_default_2d( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_2d, data_regression, - num_regression + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_2d, data_regression, num_regression ): """Test a default `pp.x` calculation producing a 2D data set.""" entry_point_calc_job = 'quantumespresso.pp' @@ -228,8 +226,7 @@ def test_pp_default_2d( def test_pp_default_polar( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_polar, data_regression, - num_regression + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_polar, data_regression, num_regression ): """Test a default `pp.x` calculation producing a polar coordinates data set.""" entry_point_calc_job = 'quantumespresso.pp' @@ -256,8 +253,7 @@ def test_pp_default_polar( def test_pp_default_3d( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_3d, data_regression, - num_regression + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_3d, data_regression, num_regression ): """Test a default `pp.x` calculation producing a 3D data set.""" entry_point_calc_job = 'quantumespresso.pp' @@ -288,9 +284,7 @@ def test_pp_default_3d( }) -def test_pp_default_3d_keep_plot_file( - aiida_profile, generate_calc_job_node, generate_parser, generate_inputs_3d, tmpdir -): +def test_pp_default_3d_keep_plot_file(generate_calc_job_node, generate_parser, generate_inputs_3d, tmpdir): """Test a `pp.x` calculation where `keep_plot_file=False` meaning files will be parsed from temporary directory.""" entry_point_calc_job = 'quantumespresso.pp' entry_point_parser = 'quantumespresso.pp' @@ -316,7 +310,7 @@ def test_pp_default_3d_keep_plot_file( assert len(results['output_data'].get_arraynames()) == 4 -def test_pp_default_3d_multiple(aiida_profile, generate_calc_job_node, generate_parser, generate_inputs_3d): +def test_pp_default_3d_multiple(generate_calc_job_node, generate_parser, generate_inputs_3d): """Test a default `pp.x` calculation producing multiple files in 3D format.""" entry_point_calc_job = 'quantumespresso.pp' entry_point_parser = 'quantumespresso.pp' @@ -337,9 +331,7 @@ def test_pp_default_3d_multiple(aiida_profile, generate_calc_job_node, generate_ assert len(node.get_arraynames()) == 4 -def test_pp_default_3d_failed_missing( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_3d -): +def test_pp_default_3d_failed_missing(fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_3d): """Test a default `pp.x` calculation where no files are retrieved, or StdOut is missing.""" entry_point_calc_job = 'quantumespresso.pp' entry_point_parser = 'quantumespresso.pp' @@ -357,7 +349,7 @@ def test_pp_default_3d_failed_missing( def test_pp_default_3d_failed_missing_data( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_3d + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_3d ): """Test a default `pp.x` calculation where the aiida.fileout file is missing.""" entry_point_calc_job = 'quantumespresso.pp' @@ -375,7 +367,7 @@ def test_pp_default_3d_failed_missing_data( def test_pp_default_3d_failed_interrupted( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_3d + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_3d ): """Test a default `pp.x` calculation where the StdOut file is present but incomplete.""" entry_point_calc_job = 'quantumespresso.pp' @@ -392,9 +384,7 @@ def test_pp_default_3d_failed_interrupted( assert calcfunction.exit_status == node.process_class.exit_codes.ERROR_OUTPUT_STDOUT_INCOMPLETE.status -def test_pp_default_3d_failed_format( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_3d -): +def test_pp_default_3d_failed_format(fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_3d): """Test a default `pp.x` calculation where an unsupported output file format is used.""" entry_point_calc_job = 'quantumespresso.pp' entry_point_parser = 'quantumespresso.pp' diff --git a/tests/parsers/test_projwfc.py b/tests/parsers/test_projwfc.py index ec8b077d8..fd316bda4 100644 --- a/tests/parsers/test_projwfc.py +++ b/tests/parsers/test_projwfc.py @@ -24,9 +24,7 @@ def generate_inputs(generate_calc_job_node, fixture_localhost, generate_structur return AttributeDict(inputs) -def test_projwfc_default( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression -): +def test_projwfc_default(fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression): """Test ``ProjwfcParser`` on the results of a simple ``projwfc.x`` calculation.""" entry_point_calc_job = 'quantumespresso.projwfc' entry_point_parser = 'quantumespresso.projwfc' @@ -67,7 +65,7 @@ def generate_inputs_spin(generate_calc_job_node, fixture_localhost, generate_str def test_projwfc_spin( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_spin, data_regression + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_spin, data_regression ): """Test ``ProjwfcParser`` on the results of a LSDA ``projwfc.x`` calculation.""" entry_point_calc_job = 'quantumespresso.projwfc' @@ -112,8 +110,7 @@ def generate_inputs_noncollinear(generate_calc_job_node, fixture_localhost, gene def test_projwfc_noncollinear( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_noncollinear, - data_regression + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_noncollinear, data_regression ): """Test ``ProjwfcParser`` on the results of a noncollinear ``projwfc.x`` calculation.""" entry_point_calc_job = 'quantumespresso.projwfc' @@ -161,8 +158,7 @@ def generate_inputs_spinorbit(generate_calc_job_node, fixture_localhost, generat def test_projwfc_spinorbit( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_spinorbit, - data_regression + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs_spinorbit, data_regression ): """Test ``ProjwfcParser`` on the results of a spinorbit ``projwfc.x`` calculation.""" entry_point_calc_job = 'quantumespresso.projwfc' diff --git a/tests/parsers/test_pw.py b/tests/parsers/test_pw.py index dde35463f..70d00015d 100644 --- a/tests/parsers/test_pw.py +++ b/tests/parsers/test_pw.py @@ -29,9 +29,7 @@ def _generate_inputs(calculation_type='scf', settings=None, metadata=None): return _generate_inputs -def test_pw_default( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression -): +def test_pw_default(fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression): """Test a `pw.x` calculation in `scf` mode. The output is created by running a dead simple SCF calculation for a silicon structure. This test should test the @@ -60,7 +58,7 @@ def test_pw_default( def test_pw_default_no_xml( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression ): """Test the parsing of an output directory without an XML file.""" name = 'default_no_xml' @@ -96,7 +94,7 @@ def test_pw_default_no_xml( def test_pw_default_xml_200420( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression ): """Test a `pw.x` calculation in `scf` mode that produced the XML output with schema of 200420. @@ -126,7 +124,7 @@ def test_pw_default_xml_200420( def test_pw_default_xml_190304( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression ): """Test a `pw.x` calculation in `scf` mode that produced the XML output with schema of 190304. @@ -156,7 +154,7 @@ def test_pw_default_xml_190304( def test_pw_default_xml_191206( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression ): """Test a `pw.x` calculation in `scf` mode that produced the XML output with schema of 191206. @@ -186,7 +184,7 @@ def test_pw_default_xml_191206( def test_pw_initialization_xml_new( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression ): """Test a `pw.x` calculation with new XML that only runs the preamble, i.e. an initialization-only calculation.""" name = 'initialization_xml_new' @@ -211,9 +209,7 @@ def test_pw_initialization_xml_new( }) -def test_pw_failed_computing_cholesky( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs -): +def test_pw_failed_computing_cholesky(fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs): """Test the parsing of a calculation that failed during cholesky factorization. In this test the stdout is incomplete, and the XML is missing completely. The stdout contains @@ -232,9 +228,7 @@ def test_pw_failed_computing_cholesky( assert calcfunction.exit_status == node.process_class.exit_codes.ERROR_COMPUTING_CHOLESKY.status -def test_pw_failed_dexx_negative( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs -): +def test_pw_failed_dexx_negative(fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs): """Test the parsing of a calculation that failed due to negative dexx. In this test the stdout is incomplete, and the XML is missing completely. The stdout contains @@ -253,7 +247,7 @@ def test_pw_failed_dexx_negative( assert calcfunction.exit_status == node.process_class.exit_codes.ERROR_DEXX_IS_NEGATIVE.status -def test_pw_failed_missing(aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs): +def test_pw_failed_missing(fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs): """Test the parsing of a calculation that was interrupted before output files could even be written. In this particular interrupted test both the XML and the stdout are completely missing. @@ -276,7 +270,7 @@ def test_pw_failed_missing(aiida_profile, fixture_localhost, generate_calc_job_n def test_pw_failed_interrupted( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression ): """Test the parsing of a calculation that was interrupted *after* convergence was achieved. @@ -305,7 +299,7 @@ def test_pw_failed_interrupted( def test_pw_failed_interrupted_stdout( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression ): """Test the parsing of a calculation that was interrupted *after* convergence was achieved. @@ -337,7 +331,7 @@ def test_pw_failed_interrupted_stdout( def test_pw_failed_interrupted_xml( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression ): """Test the parsing of a calculation that was interrupted *after* convergence was achieved. @@ -366,7 +360,7 @@ def test_pw_failed_interrupted_xml( data_regression.check(results['output_parameters'].get_dict()) -def test_pw_npools_too_high(aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs): +def test_pw_npools_too_high(fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs): """Test the parsing of a calculation that failed because some nodes have no k-points. In this test the stdout is incomplete, and the XML is missing completely. The stdout contains @@ -386,7 +380,7 @@ def test_pw_npools_too_high(aiida_profile, fixture_localhost, generate_calc_job_ def test_pw_failed_out_of_walltime( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression ): """Test the parsing of an scf calculation that ran nominally but was cut short because it ran out of walltime.""" name = 'failed_out_of_walltime' @@ -410,7 +404,7 @@ def test_pw_failed_out_of_walltime( def test_pw_failed_out_of_walltime_interrupted( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression ): """Test the parsing of an scf calculation that ran nominally but was cut short because it ran out of walltime. @@ -439,7 +433,7 @@ def test_pw_failed_out_of_walltime_interrupted( def test_pw_failed_scf_not_converged( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression ): """Test the parsing of an scf calculation that ran nominally but did not reach convergence.""" name = 'failed_scf_not_converged' @@ -462,9 +456,7 @@ def test_pw_failed_scf_not_converged( }) -def test_pw_relax_success( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression -): +def test_pw_relax_success(fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression): """Test a `relax` that successfully converges.""" name = 'relax_success' entry_point_calc_job = 'quantumespresso.pw' @@ -490,9 +482,7 @@ def test_pw_relax_success( }) -def test_pw_relax_failed_electronic( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs -): +def test_pw_relax_failed_electronic(fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs): """Test a `relax` that failed to converge during electronic cycle before ionic convergence is reached.""" name = 'relax_failed_electronic' entry_point_calc_job = 'quantumespresso.pw' @@ -514,7 +504,7 @@ def test_pw_relax_failed_electronic( def test_pw_relax_failed_not_converged_nstep( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs ): """Test a `relax` that failed to converge within the maximum number of steps.""" name = 'relax_failed_not_converged_nstep' @@ -537,7 +527,7 @@ def test_pw_relax_failed_not_converged_nstep( def test_pw_vcrelax_success( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression ): """Test a `vc-relax` that successfully converges and the final scf also converges.""" name = 'vcrelax_success' @@ -565,7 +555,7 @@ def test_pw_vcrelax_success( def test_pw_vcrelax_success_fractional( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs, data_regression ): """Test a `vc-relax`, that successfully converges and the final scf also converges. @@ -596,7 +586,7 @@ def test_pw_vcrelax_success_fractional( def test_pw_vcrelax_success_external_pressure( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs ): """Test a `vc-relax` with external pressure that successfully converges and the final scf also converges.""" name = 'vcrelax_success_external_pressure' @@ -617,9 +607,7 @@ def test_pw_vcrelax_success_external_pressure( assert 'output_trajectory' in results -def test_pw_vcrelax_failed_charge_wrong( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs -): +def test_pw_vcrelax_failed_charge_wrong(fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs): """Test a `vc-relax` that failed because the integrated charge is different from the expected one.""" name = 'vcrelax_failed_charge_wrong' entry_point_calc_job = 'quantumespresso.pw' @@ -639,7 +627,7 @@ def test_pw_vcrelax_failed_charge_wrong( def test_pw_vcrelax_failed_symmetry_not_orthogonal( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs ): """Test a `vc-relax` that failed because original symmetries no longer map onto new structure.""" name = 'vcrelax_failed_symmetry_not_orthogonal' @@ -659,9 +647,7 @@ def test_pw_vcrelax_failed_symmetry_not_orthogonal( assert 'output_parameters' in results -def test_pw_vcrelax_failed_bfgs_history( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs -): +def test_pw_vcrelax_failed_bfgs_history(fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs): """Test a `vc-relax` that failed to converge due to two consecutive failures of BFGS.""" name = 'vcrelax_failed_bfgs_history' entry_point_calc_job = 'quantumespresso.pw' @@ -683,7 +669,7 @@ def test_pw_vcrelax_failed_bfgs_history( def test_pw_vcrelax_failed_bfgs_history_already_converged( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs ): """Test a `vc-relax` that stops due to two consecutive failures of BFGS but is actually converged. @@ -710,7 +696,7 @@ def test_pw_vcrelax_failed_bfgs_history_already_converged( def test_pw_vcrelax_failed_bfgs_history_final_scf( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs ): """Test a `vc-relax` that failed to converge due to two consecutive failures of BFGS and final SCF fails.""" name = 'vcrelax_failed_bfgs_history_final_scf' @@ -732,9 +718,7 @@ def test_pw_vcrelax_failed_bfgs_history_final_scf( assert 'output_trajectory' in results -def test_pw_vcrelax_failed_electronic( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs -): +def test_pw_vcrelax_failed_electronic(fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs): """Test a `vc-relax` that failed to converge during electronic cycle before ionic convergence is reached.""" name = 'vcrelax_failed_electronic' entry_point_calc_job = 'quantumespresso.pw' @@ -756,7 +740,7 @@ def test_pw_vcrelax_failed_electronic( def test_pw_vcrelax_failed_electronic_final_scf( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs ): """Test a `vc-relax` that failed to converge in electronic cycle in the final SCF after ionic convergence.""" name = 'vcrelax_failed_electronic_final_scf' @@ -779,7 +763,7 @@ def test_pw_vcrelax_failed_electronic_final_scf( def test_pw_vcrelax_failed_not_converged_final_scf( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs ): """Test a `vc-relax` that successfully converges in ionic cycle, but thresholds are exceeded in the SCF.""" name = 'vcrelax_failed_not_converged_final_scf' @@ -802,7 +786,7 @@ def test_pw_vcrelax_failed_not_converged_final_scf( def test_pw_vcrelax_failed_not_converged_nstep( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs + fixture_localhost, generate_calc_job_node, generate_parser, generate_inputs ): """Test a `vc-relax` that failed to converge within the maximum number of steps.""" name = 'vcrelax_failed_not_converged_nstep' diff --git a/tests/parsers/test_pw2gw.py b/tests/parsers/test_pw2gw.py index 602fb310e..c05d6770f 100644 --- a/tests/parsers/test_pw2gw.py +++ b/tests/parsers/test_pw2gw.py @@ -4,9 +4,7 @@ from aiida import orm -def test_pw2gw_default( - aiida_profile, fixture_localhost, generate_parser, generate_calc_job_node, data_regression, num_regression -): +def test_pw2gw_default(fixture_localhost, generate_parser, generate_calc_job_node, data_regression, num_regression): """Test a normal pw2gw.x output.""" name = 'default' entry_point_calc_job = 'quantumespresso.pw2gw' @@ -28,12 +26,7 @@ def test_pw2gw_default( num_regression.check(dict(results['eps'].get_iterarrays()), basename='test_pw2gw_default_eps') -def test_pw2gw_failed_missing_output( - aiida_profile, - fixture_localhost, - generate_parser, - generate_calc_job_node, -): +def test_pw2gw_failed_missing_output(fixture_localhost, generate_parser, generate_calc_job_node): """Test a pw2gw.x output where file are missing.""" name = 'failed_missing_output' entry_point_calc_job = 'quantumespresso.pw2gw' @@ -51,12 +44,7 @@ def test_pw2gw_failed_missing_output( assert orm.Log.objects.get_logs_for(node) -def test_pw2gw_failed_missing_stdout( - aiida_profile, - fixture_localhost, - generate_parser, - generate_calc_job_node, -): +def test_pw2gw_failed_missing_stdout(fixture_localhost, generate_parser, generate_calc_job_node): """Test a pw2gw.x output where file are missing.""" name = 'failed_missing_stdout' entry_point_calc_job = 'quantumespresso.pw2gw' @@ -74,12 +62,7 @@ def test_pw2gw_failed_missing_stdout( assert orm.Log.objects.get_logs_for(node) -def test_pw2gw_failed_corrupted_file( - aiida_profile, - fixture_localhost, - generate_parser, - generate_calc_job_node, -): +def test_pw2gw_failed_corrupted_file(fixture_localhost, generate_parser, generate_calc_job_node): """Test a pw2gw.x output where file are corrupted.""" name = 'failed_corrupted_file' entry_point_calc_job = 'quantumespresso.pw2gw' diff --git a/tests/parsers/test_pw2wannier90.py b/tests/parsers/test_pw2wannier90.py index f50d5ec04..a5aabdb3c 100644 --- a/tests/parsers/test_pw2wannier90.py +++ b/tests/parsers/test_pw2wannier90.py @@ -35,9 +35,7 @@ def generate_inputs(): return AttributeDict(inputs) -def test_pw2wannier90_default( - aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, data_regression -): +def test_pw2wannier90_default(fixture_localhost, generate_calc_job_node, generate_parser, data_regression): """Test a minimal `pw2wannier.x` calculation. The parser only checks for errors in aiida.out, so the reference contents of output_parameters will also be very diff --git a/tests/parsers/test_q2r.py b/tests/parsers/test_q2r.py index 635db1455..eedbd934c 100644 --- a/tests/parsers/test_q2r.py +++ b/tests/parsers/test_q2r.py @@ -8,7 +8,7 @@ def generate_inputs(): return {} -def test_q2r_default(aiida_profile, fixture_localhost, generate_calc_job_node, generate_parser, data_regression): +def test_q2r_default(fixture_localhost, generate_calc_job_node, generate_parser, data_regression): """Test a default `q2r.x` calculation.""" entry_point_calc_job = 'quantumespresso.q2r' entry_point_parser = 'quantumespresso.q2r' diff --git a/tests/pytest.ini b/tests/pytest.ini deleted file mode 100644 index 949d40180..000000000 --- a/tests/pytest.ini +++ /dev/null @@ -1,6 +0,0 @@ -[pytest] -filterwarnings = - ignore::DeprecationWarning:frozendict: - ignore::DeprecationWarning:pkg_resources: - ignore::DeprecationWarning:reentry: - ignore::DeprecationWarning:sqlalchemy_utils: diff --git a/tests/tools/calculations/test_pw.py b/tests/tools/calculations/test_pw.py index d51de501b..cbb50e1d6 100644 --- a/tests/tools/calculations/test_pw.py +++ b/tests/tools/calculations/test_pw.py @@ -7,7 +7,7 @@ from aiida.common.links import LinkType -def test_pw_get_scf_accuracy(aiida_profile, fixture_localhost, generate_calc_job_node): +def test_pw_get_scf_accuracy(fixture_localhost, generate_calc_job_node): """Test the `PwCalculationTools.get_scf_accuracy` method.""" entry_point_name = 'quantumespresso.pw' diff --git a/tests/tools/test_immigrate.py b/tests/tools/test_immigrate.py index a647f2b20..41f308033 100644 --- a/tests/tools/test_immigrate.py +++ b/tests/tools/test_immigrate.py @@ -5,9 +5,7 @@ from aiida_quantumespresso.tools.pwinputparser import create_builder_from_file -def test_create_builder( - aiida_profile, fixture_sandbox, fixture_code, generate_upf_data, generate_calc_job, filepath_tests -): +def test_create_builder(fixture_sandbox, fixture_code, generate_upf_data, generate_calc_job, filepath_tests): """Test the `create_builder_from_file` method that parses an existing `pw.x` folder into a process builder. The input file used is the one generated for `tests.calculations.test_pw.test_pw_default`. diff --git a/tests/utils/test_bands.py b/tests/utils/test_bands.py index 428863716..24d25d80a 100644 --- a/tests/utils/test_bands.py +++ b/tests/utils/test_bands.py @@ -10,7 +10,7 @@ class TestGetHighestOccupiedBand: """Tests for :py:func:`~aiida_quantumespresso.utils.bands.get_highest_occupied_band`.""" @staticmethod - def test_valid_node(aiida_profile): + def test_valid_node(): """Test that the correct exceptions are thrown for incompatible nodes.""" from aiida.orm import ArrayData, BandsData @@ -34,7 +34,7 @@ def test_valid_node(aiida_profile): get_highest_occupied_band(node) @staticmethod - def test_threshold(aiida_profile): + def test_threshold(): """Test the `threshold` parameter.""" from aiida.orm import BandsData @@ -57,7 +57,7 @@ def test_threshold(aiida_profile): get_highest_occupied_band(bands, threshold=threshold) @staticmethod - def test_spin_unpolarized(aiida_profile): + def test_spin_unpolarized(): """Test the function for a non spin-polarized calculation meaning there will be a single spin channel.""" from aiida.orm import BandsData @@ -75,7 +75,7 @@ def test_spin_unpolarized(aiida_profile): assert homo == 4 @staticmethod - def test_spin_polarized(aiida_profile): + def test_spin_polarized(): """Test the function for a spin-polarized calculation meaning there will be two spin channels.""" from aiida.orm import BandsData diff --git a/tests/utils/test_restart.py b/tests/utils/test_restart.py index 2b9030e8a..80c15f8c2 100644 --- a/tests/utils/test_restart.py +++ b/tests/utils/test_restart.py @@ -12,7 +12,7 @@ def generate_inputs(): return {'parameters': orm.Dict(dict={}), 'settings': orm.Dict(dict={})} -def test_restart(aiida_profile, fixture_localhost, generate_calc_job_node): +def test_restart(fixture_localhost, generate_calc_job_node): """Test the generics of the `get_builder_restart`.""" entry_point_calc_job = 'quantumespresso.dos' node = generate_calc_job_node(entry_point_calc_job, fixture_localhost, 'default', generate_inputs()) @@ -22,7 +22,7 @@ def test_restart(aiida_profile, fixture_localhost, generate_calc_job_node): restart.get_builder_restart(node) -def test_restart_cp(aiida_profile, fixture_localhost, generate_calc_job_node): +def test_restart_cp(fixture_localhost, generate_calc_job_node): """Test the `get_builder_restart` for a completed `CpCalculation`.""" entry_point_calc_job = 'quantumespresso.cp' node = generate_calc_job_node(entry_point_calc_job, fixture_localhost, 'default', generate_inputs()) @@ -41,7 +41,7 @@ def test_restart_cp(aiida_profile, fixture_localhost, generate_calc_job_node): assert parameters['CONTROL']['restart_mode'] == 'from_scratch' -def test_restart_neb(aiida_profile, fixture_localhost, generate_calc_job_node): +def test_restart_neb(fixture_localhost, generate_calc_job_node): """Test the `get_builder_restart` for a completed `NebCalculation`.""" entry_point_calc_job = 'quantumespresso.neb' node = generate_calc_job_node(entry_point_calc_job, fixture_localhost, 'default', generate_inputs()) @@ -60,7 +60,7 @@ def test_restart_neb(aiida_profile, fixture_localhost, generate_calc_job_node): assert parameters['PATH']['restart_mode'] == 'from_scratch' -def test_restart_ph(aiida_profile, fixture_localhost, generate_calc_job_node): +def test_restart_ph(fixture_localhost, generate_calc_job_node): """Test the `get_builder_restart` for a completed `PhCalculation`.""" entry_point_calc_job = 'quantumespresso.ph' node = generate_calc_job_node(entry_point_calc_job, fixture_localhost, 'default', generate_inputs()) @@ -72,7 +72,7 @@ def test_restart_ph(aiida_profile, fixture_localhost, generate_calc_job_node): assert parameters['INPUTPH']['recover'] is True -def test_restart_pw(aiida_profile, fixture_localhost, generate_calc_job_node): +def test_restart_pw(fixture_localhost, generate_calc_job_node): """Test the `get_builder_restart` for a completed `PwCalculation`.""" entry_point_calc_job = 'quantumespresso.pw' node = generate_calc_job_node(entry_point_calc_job, fixture_localhost, 'default', generate_inputs()) diff --git a/tests/workflows/matdyn/test_base.py b/tests/workflows/matdyn/test_base.py index 67faf1e0b..0c53992a6 100644 --- a/tests/workflows/matdyn/test_base.py +++ b/tests/workflows/matdyn/test_base.py @@ -33,7 +33,7 @@ def _generate_workchain_matdyn(exit_code=None): return _generate_workchain_matdyn -def test_setup(aiida_profile, generate_workchain_matdyn): +def test_setup(generate_workchain_matdyn): """Test `MatdynBaseWorkChain.setup`.""" process = generate_workchain_matdyn() process.setup() @@ -42,7 +42,7 @@ def test_setup(aiida_profile, generate_workchain_matdyn): assert isinstance(process.ctx.inputs, AttributeDict) -def test_handle_unrecoverable_failure(aiida_profile, generate_workchain_matdyn): +def test_handle_unrecoverable_failure(generate_workchain_matdyn): """Test `MatdynBaseWorkChain.handle_unrecoverable_failure`.""" process = generate_workchain_matdyn(exit_code=MatdynCalculation.exit_codes.ERROR_NO_RETRIEVED_FOLDER) process.setup() diff --git a/tests/workflows/ph/test_base.py b/tests/workflows/ph/test_base.py index ec84288b7..2372d78e3 100644 --- a/tests/workflows/ph/test_base.py +++ b/tests/workflows/ph/test_base.py @@ -33,7 +33,7 @@ def _generate_workchain_ph(exit_code=None): return _generate_workchain_ph -def test_setup(aiida_profile, generate_workchain_ph): +def test_setup(generate_workchain_ph): """Test `PhBaseWorkChain.setup`.""" process = generate_workchain_ph() process.setup() @@ -42,7 +42,7 @@ def test_setup(aiida_profile, generate_workchain_ph): assert isinstance(process.ctx.inputs, AttributeDict) -def test_handle_unrecoverable_failure(aiida_profile, generate_workchain_ph): +def test_handle_unrecoverable_failure(generate_workchain_ph): """Test `PhBaseWorkChain.handle_unrecoverable_failure`.""" process = generate_workchain_ph(exit_code=PhCalculation.exit_codes.ERROR_NO_RETRIEVED_FOLDER) process.setup() @@ -56,7 +56,7 @@ def test_handle_unrecoverable_failure(aiida_profile, generate_workchain_ph): assert result == PhBaseWorkChain.exit_codes.ERROR_UNRECOVERABLE_FAILURE -def test_handle_out_of_walltime(aiida_profile, generate_workchain_ph): +def test_handle_out_of_walltime(generate_workchain_ph): """Test `PhBaseWorkChain.handle_out_of_walltime`.""" process = generate_workchain_ph(exit_code=PhCalculation.exit_codes.ERROR_OUT_OF_WALLTIME) process.setup() @@ -69,7 +69,7 @@ def test_handle_out_of_walltime(aiida_profile, generate_workchain_ph): assert result.status == 0 -def test_handle_convergence_not_achieved(aiida_profile, generate_workchain_ph): +def test_handle_convergence_not_achieved(generate_workchain_ph): """Test `PhBaseWorkChain.handle_convergence_not_achieved`.""" process = generate_workchain_ph(exit_code=PhCalculation.exit_codes.ERROR_CONVERGENCE_NOT_REACHED) process.setup() diff --git a/tests/workflows/pw/test_base.py b/tests/workflows/pw/test_base.py index e7dc83b8c..863e2ab36 100644 --- a/tests/workflows/pw/test_base.py +++ b/tests/workflows/pw/test_base.py @@ -36,7 +36,7 @@ def _generate_workchain_pw(exit_code=None): return _generate_workchain_pw -def test_setup(aiida_profile, generate_workchain_pw): +def test_setup(generate_workchain_pw): """Test `PwBaseWorkChain.setup`.""" process = generate_workchain_pw() process.setup() @@ -45,7 +45,7 @@ def test_setup(aiida_profile, generate_workchain_pw): assert isinstance(process.ctx.inputs, AttributeDict) -def test_handle_unrecoverable_failure(aiida_profile, generate_workchain_pw): +def test_handle_unrecoverable_failure(generate_workchain_pw): """Test `PwBaseWorkChain.handle_unrecoverable_failure`.""" process = generate_workchain_pw(exit_code=PwCalculation.exit_codes.ERROR_NO_RETRIEVED_FOLDER) process.setup() @@ -59,7 +59,7 @@ def test_handle_unrecoverable_failure(aiida_profile, generate_workchain_pw): assert result == PwBaseWorkChain.exit_codes.ERROR_UNRECOVERABLE_FAILURE -def test_handle_out_of_walltime(aiida_profile, generate_workchain_pw): +def test_handle_out_of_walltime(generate_workchain_pw): """Test `PwBaseWorkChain.handle_out_of_walltime`.""" process = generate_workchain_pw(exit_code=PwCalculation.exit_codes.ERROR_OUT_OF_WALLTIME) process.setup() @@ -72,7 +72,7 @@ def test_handle_out_of_walltime(aiida_profile, generate_workchain_pw): assert result.status == 0 -def test_handle_known_unrecoverable_failure(aiida_profile, generate_workchain_pw): +def test_handle_known_unrecoverable_failure(generate_workchain_pw): """Test `PwBaseWorkChain.handle_known_unrecoverable_failure`.""" process = generate_workchain_pw(exit_code=PwCalculation.exit_codes.ERROR_COMPUTING_CHOLESKY) process.setup() @@ -86,7 +86,7 @@ def test_handle_known_unrecoverable_failure(aiida_profile, generate_workchain_pw assert result == PwBaseWorkChain.exit_codes.ERROR_KNOWN_UNRECOVERABLE_FAILURE -def test_handle_vcrelax_converged_except_final_scf(aiida_profile, generate_workchain_pw): +def test_handle_vcrelax_converged_except_final_scf(generate_workchain_pw): """Test `PwBaseWorkChain.handle_vcrelax_converged_except_final_scf`.""" process = generate_workchain_pw( exit_code=PwCalculation.exit_codes.ERROR_IONIC_CONVERGENCE_REACHED_EXCEPT_IN_FINAL_SCF @@ -113,9 +113,7 @@ def test_handle_vcrelax_converged_except_final_scf(aiida_profile, generate_workc PwCalculation.exit_codes.ERROR_IONIC_CYCLE_BFGS_HISTORY_AND_FINAL_SCF_FAILURE, ) ) -def test_handle_relax_recoverable_ionic_convergence_error( - aiida_profile, generate_workchain_pw, generate_structure, exit_code -): +def test_handle_relax_recoverable_ionic_convergence_error(generate_workchain_pw, generate_structure, exit_code): """Test `PwBaseWorkChain.handle_relax_recoverable_ionic_convergence_error`.""" from aiida.common.links import LinkType @@ -136,7 +134,7 @@ def test_handle_relax_recoverable_ionic_convergence_error( assert result.status == 0 -def test_sanity_check_no_bands(aiida_profile, generate_workchain_pw): +def test_sanity_check_no_bands(generate_workchain_pw): """Test that `sanity_check_insufficient_bands` does not except if there is no `output_band`, which is optional.""" process = generate_workchain_pw(exit_code=ExitCode(0)) process.setup() diff --git a/tests/workflows/q2r/test_base.py b/tests/workflows/q2r/test_base.py index 8dcad42ce..0e95a1560 100644 --- a/tests/workflows/q2r/test_base.py +++ b/tests/workflows/q2r/test_base.py @@ -33,7 +33,7 @@ def _generate_workchain_q2r(exit_code=None): return _generate_workchain_q2r -def test_setup(aiida_profile, generate_workchain_q2r): +def test_setup(generate_workchain_q2r): """Test `Q2rBaseWorkChain.setup`.""" process = generate_workchain_q2r() process.setup() @@ -42,7 +42,7 @@ def test_setup(aiida_profile, generate_workchain_q2r): assert isinstance(process.ctx.inputs, AttributeDict) -def test_handle_unrecoverable_failure(aiida_profile, generate_workchain_q2r): +def test_handle_unrecoverable_failure(generate_workchain_q2r): """Test `Q2rBaseWorkChain.handle_unrecoverable_failure`.""" process = generate_workchain_q2r(exit_code=Q2rCalculation.exit_codes.ERROR_NO_RETRIEVED_FOLDER) process.setup()