Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Pylint alerts corrections as part of intervention experiment #420

Open
wants to merge 9 commits into
base: master
Choose a base branch
from
3 changes: 2 additions & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,8 @@
# General information about the project.
now = datetime.datetime.now()
project = "alchemlyb"
author = 'The alchemlyb development team (see <a href="https://raw.githubusercontent.com/alchemistry/alchemlyb/master/AUTHORS">AUTHORS</a>)'
author = 'The alchemlyb development team ' \
+ '(see <a href="https://raw.githubusercontent.com/alchemistry/alchemlyb/master/AUTHORS">AUTHORS</a>)'
copyright = f"2017-{now.year}, " + author


Expand Down
6 changes: 4 additions & 2 deletions src/alchemlyb/parsing/amber.py
Original file line number Diff line number Diff line change
Expand Up @@ -334,15 +334,17 @@ def extract(outfile, T):
if nstep != old_nstep and dvdl is not None and nstep is not None:
if finished:
raise ValueError(
"TI Energy detected after the TIMINGS section. Did you concatenate the output file?"
"TI Energy detected after the TIMINGS section."
+ " Did you concatenate the output file?"
)
file_datum.gradients.append(dvdl)
nensec += 1
old_nstep = nstep
elif line.startswith("MBAR Energy analysis") and file_datum.have_mbar:
if finished:
raise ValueError(
"MBAR Energy detected after the TIMINGS section. Did you concatenate the output file?"
"MBAR Energy detected after the TIMINGS section."
+ " Did you concatenate the output file?"
)
mbar = secp.extract_section(
"^MBAR", "^ ---", file_datum.mbar_lambdas, extra=line
Expand Down
80 changes: 51 additions & 29 deletions src/alchemlyb/parsing/namd.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,14 +83,7 @@ def _get_lambdas(fep_files):
f"Lambda values change direction in {fep_file}, relative to the other files: {lambda1} -> {lambda2} (IDWS: {lambda_idws})"
)

# Make sure the lambda2 values are consistent
if lambda1 in lambda_fwd_map and lambda_fwd_map[lambda1] != lambda2:
logger.error(
f"fwd: lambda1 {lambda1} has lambda2 {lambda_fwd_map[lambda1]} in {fep_file} but it has already been {lambda2}"
)
raise ValueError(
"More than one lambda2 value for a particular lambda1"
)
check_lambda2_consistency(fep_file, lambda1, lambda2, lambda_fwd_map)

lambda_fwd_map[lambda1] = lambda2

Expand Down Expand Up @@ -118,6 +111,17 @@ def _get_lambdas(fep_files):
return list(sorted(all_lambdas, reverse=not is_ascending))


def check_lambda2_consistency(fep_file, lambda1, lambda2, lambda_fwd_map):
# Make sure the lambda2 values are consistent
if lambda1 in lambda_fwd_map and lambda_fwd_map[lambda1] != lambda2:
logger.error(
f"fwd: lambda1 {lambda1} has lambda2 {lambda_fwd_map[lambda1]} in {fep_file} but it has already been {lambda2}"
)
raise ValueError(
"More than one lambda2 value for a particular lambda1"
)


@_init_attrs
def extract_u_nk(fep_files, T):
"""Return reduced potentials `u_nk` from NAMD fepout file(s).
Expand Down Expand Up @@ -193,11 +197,7 @@ def extract_u_nk(fep_files, T):
has_idws = False
for line in f:
l = line.strip().split()
# We don't know if IDWS was enabled just from the #Free line, and we might not have
# a #NEW line in this file, so we have to check for the existence of FepE_back lines
# We rely on short-circuit evaluation to avoid the string comparison most of the time
if has_idws is False and l[0] == "FepE_back:":
has_idws = True
has_idws = check_idws(has_idws, l)

# New window, get IDWS lambda if any
# We keep track of lambdas from the #NEW line and if they disagree with the #Free line
Expand Down Expand Up @@ -318,24 +318,11 @@ def extract_u_nk(fep_files, T):
None,
)

# append work value from 'dE' column of fepout file
if parsing:
if l[0] == "FepEnergy:":
win_de.append(float(l[6]))
win_ts.append(float(l[1]))
elif l[0] == "FepE_back:":
win_de_back.append(float(l[6]))
win_ts_back.append(float(l[1]))
append_work(l, parsing, win_de, win_de_back, win_ts, win_ts_back)

# Turn parsing on after line 'STARTING COLLECTION OF ENSEMBLE AVERAGE'
if "#STARTING" in l:
parsing = True
parsing = handle_starting(l, parsing)

if len(win_de) != 0 or len(win_de_back) != 0: # pragma: no cover
logger.warning(
'Trailing data without footer line ("#Free energy..."). Interrupted run?'
)
raise ValueError("Last window is truncated")
validate_no_trailing_data(win_de, win_de_back)

if lambda2 in (0.0, 1.0):
# this excludes the IDWS case where a dataframe already exists for both endpoints
Expand All @@ -348,6 +335,41 @@ def extract_u_nk(fep_files, T):
return u_nk


def validate_no_trailing_data(win_de, win_de_back):
if len(win_de) != 0 or len(win_de_back) != 0: # pragma: no cover
logger.warning(
'Trailing data without footer line ("#Free energy..."). Interrupted run?'
)
raise ValueError("Last window is truncated")


def handle_starting(l, parsing):
# Turn parsing on after line 'STARTING COLLECTION OF ENSEMBLE AVERAGE'
if "#STARTING" in l:
parsing = True
return parsing


def append_work(l, parsing, win_de, win_de_back, win_ts, win_ts_back):
# append work value from 'dE' column of fepout file
if parsing:
if l[0] == "FepEnergy:":
win_de.append(float(l[6]))
win_ts.append(float(l[1]))
elif l[0] == "FepE_back:":
win_de_back.append(float(l[6]))
win_ts_back.append(float(l[1]))


def check_idws(has_idws, l):
# We don't know if IDWS was enabled just from the #Free line, and we might not have
# a #NEW line in this file, so we have to check for the existence of FepE_back lines
# We rely on short-circuit evaluation to avoid the string comparison most of the time
if has_idws is False and l[0] == "FepE_back:":
has_idws = True
return has_idws


def extract(fep_files, T):
"""Return reduced potentials `u_nk` from NAMD fepout file(s).

Expand Down
3 changes: 2 additions & 1 deletion src/alchemlyb/parsing/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,8 @@ def anyopen(datafile: Union[PathLike, IO], mode="r", compression=None):
return compressor(datafile, mode=mode)
else:
raise ValueError(
"`datafile` is a stream, but specified `compression` '{compression}' is not supported"
"`datafile` is a stream"
+ ", but specified `compression` '{compression}' is not supported"
)

# otherwise, treat as a file
Expand Down
Loading
Loading