Skip to content

Commit

Permalink
Run pyupgrade on blib2to3 and src (#3771)
Browse files Browse the repository at this point in the history
  • Loading branch information
hauntsaninja authored Jul 9, 2023
1 parent 114e835 commit 0b4d7d5
Show file tree
Hide file tree
Showing 12 changed files with 102 additions and 112 deletions.
6 changes: 3 additions & 3 deletions src/black/files.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@
import colorama # noqa: F401


@lru_cache()
@lru_cache
def find_project_root(
srcs: Sequence[str], stdin_filename: Optional[str] = None
) -> Tuple[Path, str]:
Expand Down Expand Up @@ -212,7 +212,7 @@ def strip_specifier_set(specifier_set: SpecifierSet) -> SpecifierSet:
return SpecifierSet(",".join(str(s) for s in specifiers))


@lru_cache()
@lru_cache
def find_user_pyproject_toml() -> Path:
r"""Return the path to the top-level user configuration for black.
Expand All @@ -232,7 +232,7 @@ def find_user_pyproject_toml() -> Path:
return user_config_path.resolve()


@lru_cache()
@lru_cache
def get_gitignore(root: Path) -> PathSpec:
"""Return a PathSpec matching gitignore content if present."""
gitignore = root / ".gitignore"
Expand Down
2 changes: 1 addition & 1 deletion src/black/handle_ipynb_magics.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ class Replacement:
src: str


@lru_cache()
@lru_cache
def jupyter_dependencies_are_installed(*, verbose: bool, quiet: bool) -> bool:
try:
# isort: off
Expand Down
6 changes: 3 additions & 3 deletions src/blib2to3/pgen2/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def parse_graminit_h(self, filename):
try:
f = open(filename)
except OSError as err:
print("Can't open %s: %s" % (filename, err))
print(f"Can't open {filename}: {err}")
return False
self.symbol2number = {}
self.number2symbol = {}
Expand All @@ -72,7 +72,7 @@ def parse_graminit_h(self, filename):
lineno += 1
mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
if not mo and line.strip():
print("%s(%s): can't parse %s" % (filename, lineno, line.strip()))
print(f"{filename}({lineno}): can't parse {line.strip()}")
else:
symbol, number = mo.groups()
number = int(number)
Expand Down Expand Up @@ -113,7 +113,7 @@ def parse_graminit_c(self, filename):
try:
f = open(filename)
except OSError as err:
print("Can't open %s: %s" % (filename, err))
print(f"Can't open {filename}: {err}")
return False
# The code below essentially uses f's iterator-ness!
lineno = 0
Expand Down
29 changes: 13 additions & 16 deletions src/blib2to3/pgen2/driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,11 +28,8 @@
Iterable,
List,
Optional,
Text,
Iterator,
Tuple,
TypeVar,
Generic,
Union,
)
from contextlib import contextmanager
Expand Down Expand Up @@ -116,7 +113,7 @@ def can_advance(self, to: int) -> bool:
return True


class Driver(object):
class Driver:
def __init__(self, grammar: Grammar, logger: Optional[Logger] = None) -> None:
self.grammar = grammar
if logger is None:
Expand Down Expand Up @@ -189,30 +186,30 @@ def parse_tokens(self, tokens: Iterable[GoodTokenInfo], debug: bool = False) ->
assert p.rootnode is not None
return p.rootnode

def parse_stream_raw(self, stream: IO[Text], debug: bool = False) -> NL:
def parse_stream_raw(self, stream: IO[str], debug: bool = False) -> NL:
"""Parse a stream and return the syntax tree."""
tokens = tokenize.generate_tokens(stream.readline, grammar=self.grammar)
return self.parse_tokens(tokens, debug)

def parse_stream(self, stream: IO[Text], debug: bool = False) -> NL:
def parse_stream(self, stream: IO[str], debug: bool = False) -> NL:
"""Parse a stream and return the syntax tree."""
return self.parse_stream_raw(stream, debug)

def parse_file(
self, filename: Path, encoding: Optional[Text] = None, debug: bool = False
self, filename: Path, encoding: Optional[str] = None, debug: bool = False
) -> NL:
"""Parse a file and return the syntax tree."""
with io.open(filename, "r", encoding=encoding) as stream:
with open(filename, encoding=encoding) as stream:
return self.parse_stream(stream, debug)

def parse_string(self, text: Text, debug: bool = False) -> NL:
def parse_string(self, text: str, debug: bool = False) -> NL:
"""Parse a string and return the syntax tree."""
tokens = tokenize.generate_tokens(
io.StringIO(text).readline, grammar=self.grammar
)
return self.parse_tokens(tokens, debug)

def _partially_consume_prefix(self, prefix: Text, column: int) -> Tuple[Text, Text]:
def _partially_consume_prefix(self, prefix: str, column: int) -> Tuple[str, str]:
lines: List[str] = []
current_line = ""
current_column = 0
Expand Down Expand Up @@ -240,7 +237,7 @@ def _partially_consume_prefix(self, prefix: Text, column: int) -> Tuple[Text, Te
return "".join(lines), current_line


def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> Text:
def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> str:
head, tail = os.path.splitext(gt)
if tail == ".txt":
tail = ""
Expand All @@ -252,8 +249,8 @@ def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> Text:


def load_grammar(
gt: Text = "Grammar.txt",
gp: Optional[Text] = None,
gt: str = "Grammar.txt",
gp: Optional[str] = None,
save: bool = True,
force: bool = False,
logger: Optional[Logger] = None,
Expand All @@ -276,7 +273,7 @@ def load_grammar(
return g


def _newer(a: Text, b: Text) -> bool:
def _newer(a: str, b: str) -> bool:
"""Inquire whether file a was written since file b."""
if not os.path.exists(a):
return False
Expand All @@ -286,7 +283,7 @@ def _newer(a: Text, b: Text) -> bool:


def load_packaged_grammar(
package: str, grammar_source: Text, cache_dir: Optional[Path] = None
package: str, grammar_source: str, cache_dir: Optional[Path] = None
) -> grammar.Grammar:
"""Normally, loads a pickled grammar by doing
pkgutil.get_data(package, pickled_grammar)
Expand All @@ -309,7 +306,7 @@ def load_packaged_grammar(
return g


def main(*args: Text) -> bool:
def main(*args: str) -> bool:
"""Main program, when run as a script: produce grammar pickle files.
Calls load_grammar for each argument, a path to a grammar text file.
Expand Down
6 changes: 3 additions & 3 deletions src/blib2to3/pgen2/grammar.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,19 +16,19 @@
import os
import pickle
import tempfile
from typing import Any, Dict, List, Optional, Text, Tuple, TypeVar, Union
from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union

# Local imports
from . import token

_P = TypeVar("_P", bound="Grammar")
Label = Tuple[int, Optional[Text]]
Label = Tuple[int, Optional[str]]
DFA = List[List[Tuple[int, int]]]
DFAS = Tuple[DFA, Dict[int, int]]
Path = Union[str, "os.PathLike[str]"]


class Grammar(object):
class Grammar:
"""Pgen parsing tables conversion class.
Once initialized, this class supplies the grammar tables for the
Expand Down
8 changes: 4 additions & 4 deletions src/blib2to3/pgen2/literals.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@

import re

from typing import Dict, Match, Text
from typing import Dict, Match


simple_escapes: Dict[Text, Text] = {
simple_escapes: Dict[str, str] = {
"a": "\a",
"b": "\b",
"f": "\f",
Expand All @@ -22,7 +22,7 @@
}


def escape(m: Match[Text]) -> Text:
def escape(m: Match[str]) -> str:
all, tail = m.group(0, 1)
assert all.startswith("\\")
esc = simple_escapes.get(tail)
Expand All @@ -44,7 +44,7 @@ def escape(m: Match[Text]) -> Text:
return chr(i)


def evalString(s: Text) -> Text:
def evalString(s: str) -> str:
assert s.startswith("'") or s.startswith('"'), repr(s[:1])
q = s[0]
if s[:3] == q * 3:
Expand Down
22 changes: 10 additions & 12 deletions src/blib2to3/pgen2/parse.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
how this parsing engine works.
"""
import copy
from contextlib import contextmanager

# Local imports
Expand All @@ -18,7 +17,6 @@
cast,
Any,
Optional,
Text,
Union,
Tuple,
Dict,
Expand All @@ -35,7 +33,7 @@
from blib2to3.pgen2.driver import TokenProxy


Results = Dict[Text, NL]
Results = Dict[str, NL]
Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]]
DFA = List[List[Tuple[int, int]]]
DFAS = Tuple[DFA, Dict[int, int]]
Expand Down Expand Up @@ -100,7 +98,7 @@ def backtrack(self) -> Iterator[None]:
finally:
self.parser.is_backtracking = is_backtracking

def add_token(self, tok_type: int, tok_val: Text, raw: bool = False) -> None:
def add_token(self, tok_type: int, tok_val: str, raw: bool = False) -> None:
func: Callable[..., Any]
if raw:
func = self.parser._addtoken
Expand All @@ -114,7 +112,7 @@ def add_token(self, tok_type: int, tok_val: Text, raw: bool = False) -> None:
args.insert(0, ilabel)
func(*args)

def determine_route(self, value: Optional[Text] = None, force: bool = False) -> Optional[int]:
def determine_route(self, value: Optional[str] = None, force: bool = False) -> Optional[int]:
alive_ilabels = self.ilabels
if len(alive_ilabels) == 0:
*_, most_successful_ilabel = self._dead_ilabels
Expand All @@ -131,18 +129,18 @@ class ParseError(Exception):
"""Exception to signal the parser is stuck."""

def __init__(
self, msg: Text, type: Optional[int], value: Optional[Text], context: Context
self, msg: str, type: Optional[int], value: Optional[str], context: Context
) -> None:
Exception.__init__(
self, "%s: type=%r, value=%r, context=%r" % (msg, type, value, context)
self, f"{msg}: type={type!r}, value={value!r}, context={context!r}"
)
self.msg = msg
self.type = type
self.value = value
self.context = context


class Parser(object):
class Parser:
"""Parser engine.
The proper usage sequence is:
Expand Down Expand Up @@ -236,7 +234,7 @@ def setup(self, proxy: "TokenProxy", start: Optional[int] = None) -> None:
self.used_names: Set[str] = set()
self.proxy = proxy

def addtoken(self, type: int, value: Text, context: Context) -> bool:
def addtoken(self, type: int, value: str, context: Context) -> bool:
"""Add a token; return True iff this is the end of the program."""
# Map from token to label
ilabels = self.classify(type, value, context)
Expand Down Expand Up @@ -284,7 +282,7 @@ def addtoken(self, type: int, value: Text, context: Context) -> bool:

return self._addtoken(ilabel, type, value, context)

def _addtoken(self, ilabel: int, type: int, value: Text, context: Context) -> bool:
def _addtoken(self, ilabel: int, type: int, value: str, context: Context) -> bool:
# Loop until the token is shifted; may raise exceptions
while True:
dfa, state, node = self.stack[-1]
Expand Down Expand Up @@ -329,7 +327,7 @@ def _addtoken(self, ilabel: int, type: int, value: Text, context: Context) -> bo
# No success finding a transition
raise ParseError("bad input", type, value, context)

def classify(self, type: int, value: Text, context: Context) -> List[int]:
def classify(self, type: int, value: str, context: Context) -> List[int]:
"""Turn a token into a label. (Internal)
Depending on whether the value is a soft-keyword or not,
Expand All @@ -352,7 +350,7 @@ def classify(self, type: int, value: Text, context: Context) -> List[int]:
raise ParseError("bad token", type, value, context)
return [ilabel]

def shift(self, type: int, value: Text, newstate: int, context: Context) -> None:
def shift(self, type: int, value: str, newstate: int, context: Context) -> None:
"""Shift a token. (Internal)"""
if self.is_backtracking:
dfa, state, _ = self.stack[-1]
Expand Down
Loading

0 comments on commit 0b4d7d5

Please sign in to comment.