Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix remaining Python formatting errors in the code base #1

Merged
merged 2 commits into from
Jul 8, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion doc/userdoc/contribute/templates/pynest_api_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@


def GetConnections(source=None, target=None, synape_model=None, synapse_label=None):
"""Return a `SynapseCollection` representing the connection identifiers.
r"""Return a `SynapseCollection` representing the connection identifiers.
[[ In a single 'summary line', state what the function does ]]
[[ All functions should have a docstring with at least a summary line ]]

Expand Down
3 changes: 2 additions & 1 deletion doc/userdoc/contribute/templates/pynest_example_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,8 @@
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Template demonstrating how to create examples for PyNEST

r"""Template demonstrating how to create examples for PyNEST
----------------------------------------------------------------
[[ Titles should be one line and state what the example does.
It should begin with a verb in the present tense and include type of model
Expand Down
8 changes: 4 additions & 4 deletions examples/nest/Potjans_2014/spike_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,13 +122,13 @@
len(files)) + ' spike files from L' + layer + 'P' + population)
if files:
merged_file = open(output, 'w')
for f in files:
data = open(f, 'r')
for file in files:
data = open(file, 'r')
nest_version = next(data)
backend_version = next(data)
column_header = next(data)
for l in data:
a = l.split()
for line in data:
a = line.split()
a[0] = int(a[0])
a[1] = float(a[1])
raw_first_node_id = Raw_first_node_ids[int(layer)][int(population)]
Expand Down
32 changes: 16 additions & 16 deletions extras/ConnPlotter/ConnPlotter.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,40 +227,40 @@ def left(self):
return self._left

@left.setter
def left(self, l):
self._left = float(l)
def left(self, left):
self._left = float(left)

@property
def right(self):
return self._right

@right.setter
def right(self, r):
self._right = float(r)
def right(self, right):
self._right = float(right)

@property
def top(self):
return self._top

@top.setter
def top(self, t):
self._top = float(t)
def top(self, top):
self._top = float(top)

@property
def bottom(self):
return self._bottom

@bottom.setter
def bottom(self, b):
self._bottom = float(b)
def bottom(self, bottom):
self._bottom = float(bottom)

@property
def colbar(self):
return self._colbar

@colbar.setter
def colbar(self, b):
self._colbar = float(b)
def colbar(self, colbar):
self._colbar = float(colbar)

def __init__(self):
"""Set default values"""
Expand Down Expand Up @@ -678,7 +678,7 @@ def __init__(self, conninfo, layers, synapses, tgt_model, intensity, tcd, Vmem):

# get source and target layer
self.slayer, self.tlayer = conninfo[:2]
lnames = [l.name for l in layers]
lnames = [layer.name for layer in layers]

if self.slayer not in lnames:
raise Exception('Unknown source layer "%s".' % self.slayer)
Expand All @@ -688,8 +688,8 @@ def __init__(self, conninfo, layers, synapses, tgt_model, intensity, tcd, Vmem):
# if target layer is singular (extent==(0,0)),
# we do not create a full object
self.singular = False
for l in layers:
if l.name == self.tlayer and l.singular:
for layer in layers:
if layer.name == self.tlayer and layer.singular:
self.singular = True
return

Expand Down Expand Up @@ -1073,7 +1073,7 @@ def _prepareAxes(self, mode, showLegend):
synsep = 0.5 / 20. * patchmax # distance between synapse types

# find maximal extents of individual patches, horizontal and vertical
maxext = max(_flattened([l.ext for l in self._layers]))
maxext = max(_flattened([layer.ext for layer in self._layers]))

patchscale = patchmax / float(maxext) # determines patch size

Expand Down Expand Up @@ -1368,10 +1368,10 @@ def __init__(self, lList, cList, synTypes=None, intensity='wp',
will be sorted in diagram in order of increasing numbers.
"""
# extract layers to dict mapping name to extent
self._layers = [self._LayerProps(l[0], l[3]) for l in lList]
self._layers = [self._LayerProps(layer[0], layer[3]) for layer in lList]

# ensure layer names are unique
lnames = [l.name for l in self._layers]
lnames = [layer.name for layer in self._layers]
if len(lnames) != len(set(lnames)):
raise ValueError('Layer names must be unique.')

Expand Down
2 changes: 1 addition & 1 deletion extras/check_copyright_headers.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def eprint(*args, **kwargs):

# match all file names against these regular expressions. if a match
# is found the file is excluded from the check
exclude_file_patterns = ['\.#.*', '#.*', '.*~', '.*.bak']
exclude_file_patterns = [r'\.#.*', '#.*', '.*~', '.*.bak']
exclude_file_regex = [re.compile(pattern) for pattern in exclude_file_patterns]

exclude_files = [
Expand Down
4 changes: 2 additions & 2 deletions extras/check_unused_names.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,8 +90,8 @@ def get_names(fname, pattern):
for names_file in names_files:
fname = os.path.join(source_dir, names_file)

names_header = get_names(fname + ".h", "extern\s+const\s+Name\s+(\w+)\s*;")
names_source = get_names(fname + ".cpp", "const\s+Name\s+(\w+)\(.*")
names_header = get_names(fname + ".h", r"extern\s+const\s+Name\s+(\w+)\s*;")
names_source = get_names(fname + ".cpp", r"const\s+Name\s+(\w+)\(.*")

for h, s in zip(names_header, names_source):
if h != s:
Expand Down
2 changes: 1 addition & 1 deletion extras/find_imports.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@
# - We need \s* at the start of the regex because some imports are nested in try-blocks.
# This can lead to false positives if a comment line or multiline string line begins with
# "import" or "from".
import_re = re.compile('\s*(import|from)\s+(\w+)')
import_re = re.compile(r'\s*(import|from)\s+(\w+)')

imports = defaultdict(set)
for dirpath, _, fnames in os.walk(source_dir):
Expand Down
4 changes: 2 additions & 2 deletions extras/help_generator/generate_help.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,8 +129,8 @@
line = textwrap.dedent(line).strip()
# Tricks for the blanks
line = html.escape(line)
line = re.sub('^(\s)*- ', ' &bull; ', line)
line = re.sub('^(\s)*@note', ' &bull; ', line)
line = re.sub(r'^(\s)*- ', ' &bull; ', line)
line = re.sub(r'^(\s)*@note', ' &bull; ', line)
alllines.append(line)
item = '\n'.join(alllines)
num += 1
Expand Down
2 changes: 1 addition & 1 deletion extras/help_generator/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def check_ifdef(item, filetext, docstring):
If there is an 'ifdef' requirement write it to the data.
"""
ifdefstring = r'(\#ifdef((.*?)\n(.*?)\n*))\#endif'
require_reg = re.compile('HAVE\_((.*?)*)\n')
require_reg = re.compile(r'HAVE\_((.*?)*)\n')
# every doc in an #ifdef
ifdefs = re.findall(ifdefstring, filetext, re.DOTALL)
for ifitem in ifdefs:
Expand Down
16 changes: 8 additions & 8 deletions extras/help_generator/writers.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,13 +66,13 @@ def write_help_html(doc_dic, helpdir, fname, sli_command_list, keywords):

for key, value in doc_dic.items():
if key == "FullName":
fullname = value.strip("\s\n")
fullname = re.sub("(\n)", " <br/> ", fullname)
fullname = value.strip(r"\s\n")
fullname = re.sub(r"(\n)", " <br/> ", fullname)

htmllist.append('''<div class="doc_header">Name:</div>
<div class="doc_paragraph">%s - %s</div>''' %
(name, fullname))
hlpfullname = re.sub(' <br\/> ', '\n', fullname).strip()
hlpfullname = re.sub(r' <br\/> ', '\n', fullname).strip()
hlplist.append('Name: %s - %s\n' % (name, hlpfullname))

# sorting linked keywords
Expand All @@ -83,18 +83,18 @@ def write_help_html(doc_dic, helpdir, fname, sli_command_list, keywords):
if (key != "Name" and key != "FullName" and
key != "SeeAlso" and key != "File"):
# strip whitespace and paragraph breaks at start of entry
value = re.sub("^(\s*(\n))*\s*", "", value)
value = re.sub(r"^(\s*(\n))*\s*", "", value)
# strip whitespace and paragraph breaks at end of entry
value = re.sub("((\n)\s*)*$", "", value)
value = re.sub("(\n)", " <br/> ", value)
value = re.sub("(^|\n) ", "&nbsp;", value)
value = re.sub(r"((\n)\s*)*$", "", value)
value = re.sub(r"(\n)", " <br/> ", value)
value = re.sub(r"(^|\n) ", "&nbsp;", value)
htmllist.append('<div class="doc_header">%s: </div>' % key)
htmllist.append('<div class="doc_paragraph">%s</div>'
% value)
hlpvalue = re.sub(' <br/> ', '\n', value).rstrip()
hlpvalue = re.sub('\n ', '\n', hlpvalue).rstrip()
hlpvalue = hlpvalue.lstrip('\n')
hlpvalue = re.sub('\n[\s?]*\n', '\n', hlpvalue).rstrip()
hlpvalue = re.sub(r'\n[\s?]*\n', '\n', hlpvalue).rstrip()
# Better looking .hlp files
dedented_text = textwrap.dedent(hlpvalue).strip()
hlpcontent = ('%s:\n\n%s\n\n' % (key, dedented_text))
Expand Down
2 changes: 1 addition & 1 deletion extras/include_checker.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ def process_all_sources(path, all_headers, print_suggestion):
count = 0
for root, dirs, files in os.walk(path):
for f in files:
if re.search("\.h$|\.hpp$|\.c$|\.cc|\.cpp$", f):
if re.search(r"\.h$|\.hpp$|\.c$|\.cc|\.cpp$", f):
# valid source file
count += process_source(root, f, all_headers, print_suggestion)
for d in dirs:
Expand Down
4 changes: 2 additions & 2 deletions extras/static_code_analysis.sh
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ PYCODESTYLE_IGNORES=${17} # The list of pycodestyle error and warning codes

# PYCODESTYLE rules to ignore.
PYCODESTYLE_IGNORES_EXAMPLES="${PYCODESTYLE_IGNORES},E402"
PYCODESTYLE_IGNORES_TOPO_MANUAL="${PYCODESTYLE_IGNORES_EXAMPLES},E265"
PYCODESTYLE_IGNORES_USER_MANUAL="${PYCODESTYLE_IGNORES_EXAMPLES},E265"

# PYCODESTYLE rules.
PYCODESTYLE_MAX_LINE_LENGTH=120
Expand Down Expand Up @@ -236,7 +236,7 @@ for f in $FILE_NAMES; do
print_msg "MSGBLD0190: " "Running PEP8 .......: $f"
case $f in
*user_manual_scripts*)
IGNORES=$PYCODESTYLE_IGNORES_TOPO_MANUAL
IGNORES=$PYCODESTYLE_IGNORES_USER_MANUAL
;;
*examples*)
IGNORES=$PYCODESTYLE_IGNORES_EXAMPLES
Expand Down
6 changes: 3 additions & 3 deletions pynest/examples/Potjans_2014/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def num_synapses_from_conn_probs(conn_probs, popsize1, popsize2):


def postsynaptic_potential_to_current(C_m, tau_m, tau_syn):
""" Computes a factor to convert postsynaptic potentials to currents.
r""" Computes a factor to convert postsynaptic potentials to currents.

The time course of the postsynaptic potential ``v`` is computed as
:math: `v(t)=(i*h)(t)`
Expand Down Expand Up @@ -391,8 +391,8 @@ def __gather_metadata(path, name):
# load node IDs
node_idfile = open(path + 'population_nodeids.dat', 'r')
node_ids = []
for l in node_idfile:
node_ids.append(l.split())
for node_id in node_idfile:
node_ids.append(node_id.split())
node_ids = np.array(node_ids, dtype='i4')
return sd_files, sd_names, node_ids

Expand Down