Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

code formatting #120

Merged
merged 6 commits into from
Mar 1, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 25 additions & 0 deletions .github/workflows/linting.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
name: Static Analysis
on: [push, pull_request, workflow_dispatch]
permissions: read-all

jobs:
python-linting:
runs-on: ubuntu-22.04
steps:
- name: Check out repository
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
persist-credentials: false

- name: Set up Python
uses: actions/setup-python@61a6322f88396a6271a6ee3565807d608ecaddd1 # v4.7.0
with:
python-version: '3.10'

- name: Install Python packages
run: |
pip install --upgrade pip
pip install --upgrade flake8

- name: Run flake8 to verify PEP8-compliance of Python code
run: flake8
4 changes: 2 additions & 2 deletions config/aws_mc.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,8 @@
'name': 'x86_64-skylake-16c-30gb',
'access': ['--partition=x86-64-intel-skylake-node', '--export=NONE'],
'descr': 'Skylake, 16 cores, 30 GB',
},
{
},
{
'name': 'x86_64-zen2-16c-30gb',
'access': ['--partition=x86-64-amd-zen2-node', '--export=NONE'],
'descr': 'Zen2, 16 cores, 30 GB',
Expand Down
14 changes: 7 additions & 7 deletions config/github_actions.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# ReFrame configuration file that can be used in GitHub Actions with EESSI

from eessi.testsuite.common_config import common_logging_config
from eessi.testsuite.constants import * # noqa: F403
from eessi.testsuite.constants import *


site_configuration = {
Expand All @@ -26,18 +26,18 @@
}
],
'max_jobs': 1
}
]
}
],
}
]
}
],
'environments': [
{
'name': 'default',
'cc': 'cc',
'cxx': '',
'ftn': ''
}
],
}
],
'general': [
{
'purge_environment': True,
Expand Down
74 changes: 37 additions & 37 deletions config/it4i_karolina.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,50 +47,50 @@
],
'launcher': 'mpirun',
# Use --export=None to avoid that login environment is passed down to submitted jobs
'access': ['-p qcpu', '-A DD-23-96', '--export=None'],
'access': ['-p qcpu', '-A DD-23-96', '--export=None'],
'environs': ['default'],
'max_jobs': 120,
'features': [
FEATURES[CPU],
] + list(SCALES.keys()),
'descr': 'CPU Universal Compute Nodes, see https://docs.it4i.cz/karolina/hardware-overview/'
},
# We don't have GPU budget on Karolina at this time
# {
# 'name': 'qgpu',
# 'scheduler': 'slurm',
# 'prepare_cmds': [
# 'source %s' % common_eessi_init(),
# # Pass job environment variables like $PATH, etc., into job steps
# 'export SLURM_EXPORT_ENV=ALL',
# # Needed when using srun launcher
# # 'export SLURM_MPI_TYPE=pmix', # WARNING: this broke the GROMACS on Vega
# # Avoid https://github.com/EESSI/software-layer/issues/136
# # Can be taken out once we don't care about old OpenMPI versions anymore (pre-4.1.1)
# 'export OMPI_MCA_pml=ucx',
# ],
# 'launcher': 'mpirun',
# # Use --export=None to avoid that login environment is passed down to submitted jobs
# 'access': ['-p gpu', '-A DD-23-96', '--export=None'],
# 'environs': ['default'],
# 'max_jobs': 60,
# 'devices': [
# {
# 'type': DEVICE_TYPES[GPU],
# 'num_devices': 8,
# }
# ],
# 'resources': [
# {
# 'name': '_rfm_gpu',
# 'options': ['--gpus-per-node={num_gpus_per_node}'],
# }
# ],
# 'features': [
# FEATURES[GPU],
# ] + list(SCALES.keys()),
# 'descr': 'GPU partition with accelerated nodes, see https://docs.it4i.cz/karolina/hardware-overview/'
# },
# We don't have GPU budget on Karolina at this time
# {
# 'name': 'qgpu',
# 'scheduler': 'slurm',
# 'prepare_cmds': [
# 'source %s' % common_eessi_init(),
# # Pass job environment variables like $PATH, etc., into job steps
# 'export SLURM_EXPORT_ENV=ALL',
# # Needed when using srun launcher
# # 'export SLURM_MPI_TYPE=pmix', # WARNING: this broke the GROMACS on Vega
# # Avoid https://github.com/EESSI/software-layer/issues/136
# # Can be taken out once we don't care about old OpenMPI versions anymore (pre-4.1.1)
# 'export OMPI_MCA_pml=ucx',
# ],
# 'launcher': 'mpirun',
# # Use --export=None to avoid that login environment is passed down to submitted jobs
# 'access': ['-p gpu', '-A DD-23-96', '--export=None'],
# 'environs': ['default'],
# 'max_jobs': 60,
# 'devices': [
# {
# 'type': DEVICE_TYPES[GPU],
# 'num_devices': 8,
# }
# ],
# 'resources': [
# {
# 'name': '_rfm_gpu',
# 'options': ['--gpus-per-node={num_gpus_per_node}'],
# }
# ],
# 'features': [
# FEATURES[GPU],
# ] + list(SCALES.keys()),
# 'descr': 'GPU partition with accelerated nodes, https://docs.it4i.cz/karolina/hardware-overview/'
# },
]
},
],
Expand Down
4 changes: 2 additions & 2 deletions config/izum_vega.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
],
'launcher': 'mpirun',
# Use --export=None to avoid that login environment is passed down to submitted jobs
'access': ['-p cpu', '--export=None'],
'access': ['-p cpu', '--export=None'],
'environs': ['default'],
'max_jobs': 120,
'resources': [
Expand Down Expand Up @@ -76,7 +76,7 @@
],
'launcher': 'mpirun',
# Use --export=None to avoid that login environment is passed down to submitted jobs
'access': ['-p gpu', '--export=None'],
'access': ['-p gpu', '--export=None'],
'environs': ['default'],
'max_jobs': 60,
'devices': [
Expand Down
18 changes: 9 additions & 9 deletions config/settings_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
import os

from eessi.testsuite.common_config import common_logging_config, format_perfvars, perflog_format
from eessi.testsuite.constants import * # noqa: F403
from eessi.testsuite.constants import *


site_configuration = {
Expand All @@ -35,10 +35,10 @@
'partitions': [
{
'name': 'cpu_partition',
'descr': 'CPU partition'
'descr': 'CPU partition',
'scheduler': 'slurm',
'launcher': 'mpirun',
'access': ['-p cpu', '--export=None'],
'access': ['-p cpu', '--export=None'],
'prepare_cmds': ['source /cvmfs/pilot.eessi-hpc.org/latest/init/bash'],
'environs': ['default'],
'max_jobs': 4,
Expand All @@ -62,20 +62,20 @@
},
{
'name': 'gpu_partition',
'descr': 'GPU partition'
'descr': 'GPU partition',
'scheduler': 'slurm',
'launcher': 'mpirun',
'access': ['-p gpu', '--export=None'],
'access': ['-p gpu', '--export=None'],
'prepare_cmds': ['source /cvmfs/pilot.eessi-hpc.org/latest/init/bash'],
'environs': ['default'],
'max_jobs': 4,
# We recommend to rely on ReFrame's CPU autodetection,
# and only define the 'processor' field if autodetection fails
# 'processor': {
# 'num_cpus': 72,
# 'num_sockets': 2,
# 'num_cpus_per_socket': 36,
# 'num_cpus_per_core': 1,
# 'num_cpus': 72,
# 'num_sockets': 2,
# 'num_cpus_per_socket': 36,
# 'num_cpus_per_core': 1,
# },
'resources': [
{
Expand Down
6 changes: 3 additions & 3 deletions config/surf_snellius.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
'scheduler': 'slurm',
'prepare_cmds': ['source %s' % common_eessi_init()],
'launcher': 'mpirun',
'access': ['-p rome', '--export=None'],
'access': ['-p rome', '--export=None'],
'environs': ['default'],
'max_jobs': 120,
'resources': [
Expand All @@ -60,7 +60,7 @@
'scheduler': 'slurm',
'prepare_cmds': ['source %s' % common_eessi_init()],
'launcher': 'mpirun',
'access': ['-p genoa', '--export=None'],
'access': ['-p genoa', '--export=None'],
'environs': ['default'],
'max_jobs': 120,
'resources': [
Expand All @@ -80,7 +80,7 @@
'scheduler': 'slurm',
'prepare_cmds': ['source %s' % common_eessi_init()],
'launcher': 'mpirun',
'access': ['-p gpu', '--export=None'],
'access': ['-p gpu', '--export=None'],
'environs': ['default'],
'max_jobs': 60,
'devices': [
Expand Down
2 changes: 1 addition & 1 deletion config/vsc_hortense.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def command(self, job):
}
],
'devices': [
{
{
'type': DEVICE_TYPES[GPU],
'num_devices': 4,
}
Expand Down
22 changes: 12 additions & 10 deletions eessi/testsuite/common_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,33 +69,35 @@ def common_logging_config(prefix=None):
],
}]


def common_eessi_init(eessi_version=None):
"""
Returns the full path that should be sourced to initialize the EESSI environment for a given version of EESSI.
If no eessi_version is passed, the EESSI_VERSION environment variable is read. If that is also not defined, default behaviour is to use `latest`.
If no eessi_version is passed, the EESSI_VERSION environment variable is read.
If that is also not defined, default behaviour is to use `latest`.
:param eessi_version: version of EESSI that should be sourced (e.g. '2023.06' or 'latest') [optional]
"""
# Check which EESSI_CVMFS_REPO we are running under
eessi_cvmfs_repo = os.getenv('EESSI_CVMFS_REPO', None)
if eessi_cvmfs_repo is None:
err_msg = "Environment variable 'EESSI_CVMFS_REPO' was not found. "
err_msg += "Did you initialize the EESSI environment before running the test suite?"
err_msg = "Environment variable 'EESSI_CVMFS_REPO' was not found."
err_msg += " Did you initialize the EESSI environment before running the test suite?"
raise ValueError(err_msg)
if eessi_cvmfs_repo == '/cvmfs/pilot.eessi-hpc.org':
if eessi_version == None:
if eessi_version is None:
# Try also EESSI_VERSION for backwards compatibility with previous common_eessi_init implementation
eessi_version = os.getenv('EESSI_PILOT_VERSION', os.getenv('EESSI_VERSION', 'latest'))
else:
# software.eessi.io, or another where we assume the same variable names to be used
if eessi_version == None:
# software.eessi.io, or another where we assume the same variable names to be used
if eessi_version is None:
eessi_version = os.getenv('EESSI_VERSION', None)
# Without EESSI_VERSION, we don't know what to do. There is no default/latest version
# So, report error
if eessi_version == None:
err_msg = "Environment variable 'EESSI_VERSION' was not found. "
err_msg += "Did you initialize the EESSI environment before running the test suite?"
if eessi_version is None:
err_msg = "Environment variable 'EESSI_VERSION' was not found."
err_msg += " Did you initialize the EESSI environment before running the test suite?"
raise ValueError(err_msg)

if eessi_cvmfs_repo == '/cvmfs/pilot.eessi-hpc.org' and eessi_version == 'latest':
return '/cvmfs/pilot.eessi-hpc.org/latest/init/bash'
else:
Expand Down
4 changes: 2 additions & 2 deletions eessi/testsuite/hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,8 +90,8 @@ def assign_tasks_per_compute_unit(test: rfm.RegressionTest, compute_unit: str, n

# Check if either node_part, or default_num_cpus_per_node and default_num_gpus_per_node are set correctly
if not (
type(test.node_part) == int
or (type(test.default_num_cpus_per_node) == int and type(test.default_num_gpus_per_node) == int)
isinstance(test.node_part, int)
or (isinstance(test.default_num_cpus_per_node, int) and isinstance(test.default_num_gpus_per_node, int))
):
raise ValueError(
f'Either node_part ({test.node_part}), or default_num_cpus_per_node ({test.default_num_cpus_per_node}) and'
Expand Down
1 change: 1 addition & 0 deletions eessi/testsuite/tests/apps/gromacs.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
"""

import reframe as rfm
from reframe.core.builtins import parameter, run_after # added only to make the linter happy

from hpctestlib.sciapps.gromacs.benchmarks import gromacs_check

Expand Down
Loading
Loading