From 2f9de335db92e79244f24f91953e0cd225c7b8d1 Mon Sep 17 00:00:00 2001 From: Charlie Marsh Date: Wed, 22 Feb 2023 14:36:13 -0500 Subject: [PATCH] Upgrade RustPython to match new flattened exports (#3141) --- Cargo.lock | 8 ++-- Cargo.toml | 4 +- .../test/fixtures/flake8_return/RET503.py | 4 ++ crates/ruff/src/ast/helpers.rs | 29 +++++------- crates/ruff/src/ast/operations.rs | 8 +--- crates/ruff/src/autofix/helpers.rs | 18 ++----- crates/ruff/src/checkers/ast.rs | 11 ++--- crates/ruff/src/checkers/logical_lines.rs | 13 +++-- crates/ruff/src/checkers/tokens.rs | 3 +- crates/ruff/src/directives.rs | 44 ++++++++--------- crates/ruff/src/doc_lines.rs | 6 ++- crates/ruff/src/lex/docstring_detection.rs | 2 +- crates/ruff/src/linter.rs | 2 +- crates/ruff/src/rules/eradicate/detection.rs | 3 +- .../src/rules/flake8_annotations/fixes.rs | 8 +--- crates/ruff/src/rules/flake8_commas/rules.rs | 2 +- .../rules/flake8_implicit_str_concat/rules.rs | 3 +- crates/ruff/src/rules/flake8_quotes/rules.rs | 3 +- crates/ruff/src/rules/isort/comments.rs | 6 +-- crates/ruff/src/rules/isort/helpers.rs | 18 +++---- .../rules/isort/rules/add_required_imports.rs | 47 ++++++++++++------- crates/ruff/src/rules/mccabe/rules.rs | 11 +++-- .../src/rules/pycodestyle/logical_lines.rs | 3 +- .../pycodestyle/rules/compound_statements.rs | 4 +- .../src/rules/pycodestyle/rules/errors.rs | 2 +- .../rules/whitespace_before_comment.rs | 5 +- crates/ruff/src/rules/pyflakes/fixes.rs | 9 ++-- .../rules/pyflakes/rules/unused_variable.rs | 24 ++++------ .../pylint/rules/bad_string_format_type.rs | 11 ++--- .../rules/pylint/rules/too_many_branches.rs | 2 +- .../rules/too_many_return_statements.rs | 2 +- .../rules/pylint/rules/too_many_statements.rs | 2 +- crates/ruff/src/rules/pyupgrade/fixes.rs | 6 +-- .../pyupgrade/rules/extraneous_parentheses.rs | 3 +- .../src/rules/pyupgrade/rules/f_strings.rs | 9 ++-- .../rules/pyupgrade/rules/native_literals.rs | 6 +-- .../pyupgrade/rules/outdated_version_block.rs | 9 ++-- .../rules/printf_string_formatting.rs | 13 ++--- .../pyupgrade/rules/redundant_open_modes.rs | 8 +--- crates/ruff/src/rustpython_helpers.rs | 7 ++- crates/ruff/src/source_code/generator.rs | 2 +- crates/ruff/src/source_code/indexer.rs | 14 +++--- crates/ruff/src/source_code/mod.rs | 4 +- crates/ruff/src/source_code/stylist.rs | 8 ++-- crates/ruff_dev/src/print_ast.rs | 2 +- crates/ruff_dev/src/print_tokens.rs | 5 +- .../src/core/rustpython_helpers.rs | 7 ++- crates/ruff_python_formatter/src/trivia.rs | 6 ++- rustfmt.toml | 4 -- 49 files changed, 195 insertions(+), 235 deletions(-) delete mode 100644 rustfmt.toml diff --git a/Cargo.lock b/Cargo.lock index ce89e2e9e4e46d..2f11fdac042df2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2150,7 +2150,7 @@ dependencies = [ [[package]] name = "rustpython-ast" version = "0.2.0" -source = "git+https://github.com/RustPython/RustPython.git?rev=6d71f758170d504817cc47720762c41d9031506d#6d71f758170d504817cc47720762c41d9031506d" +source = "git+https://github.com/RustPython/RustPython.git?rev=edf5995a1e4c366976304ca05432dd27c913054e#edf5995a1e4c366976304ca05432dd27c913054e" dependencies = [ "num-bigint", "rustpython-compiler-core", @@ -2159,7 +2159,7 @@ dependencies = [ [[package]] name = "rustpython-common" version = "0.2.0" -source = "git+https://github.com/RustPython/RustPython.git?rev=6d71f758170d504817cc47720762c41d9031506d#6d71f758170d504817cc47720762c41d9031506d" +source = "git+https://github.com/RustPython/RustPython.git?rev=edf5995a1e4c366976304ca05432dd27c913054e#edf5995a1e4c366976304ca05432dd27c913054e" dependencies = [ "ascii", "bitflags", @@ -2184,7 +2184,7 @@ dependencies = [ [[package]] name = "rustpython-compiler-core" version = "0.2.0" -source = "git+https://github.com/RustPython/RustPython.git?rev=6d71f758170d504817cc47720762c41d9031506d#6d71f758170d504817cc47720762c41d9031506d" +source = "git+https://github.com/RustPython/RustPython.git?rev=edf5995a1e4c366976304ca05432dd27c913054e#edf5995a1e4c366976304ca05432dd27c913054e" dependencies = [ "bincode", "bitflags", @@ -2201,7 +2201,7 @@ dependencies = [ [[package]] name = "rustpython-parser" version = "0.2.0" -source = "git+https://github.com/RustPython/RustPython.git?rev=6d71f758170d504817cc47720762c41d9031506d#6d71f758170d504817cc47720762c41d9031506d" +source = "git+https://github.com/RustPython/RustPython.git?rev=edf5995a1e4c366976304ca05432dd27c913054e#edf5995a1e4c366976304ca05432dd27c913054e" dependencies = [ "ahash", "anyhow", diff --git a/Cargo.toml b/Cargo.toml index 63abbc554d720e..b0a30c521da958 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,8 +13,8 @@ libcst = { git = "https://github.com/charliermarsh/LibCST", rev = "f2f0b7a487a87 once_cell = { version = "1.16.0" } regex = { version = "1.6.0" } rustc-hash = { version = "1.1.0" } -rustpython-common = { git = "https://github.com/RustPython/RustPython.git", rev = "6d71f758170d504817cc47720762c41d9031506d" } -rustpython-parser = { features = ["lalrpop"], git = "https://github.com/RustPython/RustPython.git", rev = "6d71f758170d504817cc47720762c41d9031506d" } +rustpython-common = { git = "https://github.com/RustPython/RustPython.git", rev = "edf5995a1e4c366976304ca05432dd27c913054e" } +rustpython-parser = { features = ["lalrpop"], git = "https://github.com/RustPython/RustPython.git", rev = "edf5995a1e4c366976304ca05432dd27c913054e" } schemars = { version = "0.8.11" } serde = { version = "1.0.147", features = ["derive"] } serde_json = { version = "1.0.87" } diff --git a/crates/ruff/resources/test/fixtures/flake8_return/RET503.py b/crates/ruff/resources/test/fixtures/flake8_return/RET503.py index d584f4c3867875..49a2c2213cfd01 100644 --- a/crates/ruff/resources/test/fixtures/flake8_return/RET503.py +++ b/crates/ruff/resources/test/fixtures/flake8_return/RET503.py @@ -289,3 +289,7 @@ def x(y): return 1 case 1: print() # error + + +def foo(baz: str) -> str: + return baz diff --git a/crates/ruff/src/ast/helpers.rs b/crates/ruff/src/ast/helpers.rs index 8b72d6fe395db3..c8c906e58afeb2 100644 --- a/crates/ruff/src/ast/helpers.rs +++ b/crates/ruff/src/ast/helpers.rs @@ -9,10 +9,7 @@ use rustpython_parser::ast::{ Arguments, Constant, Excepthandler, ExcepthandlerKind, Expr, ExprKind, Keyword, KeywordData, Located, Location, MatchCase, Pattern, PatternKind, Stmt, StmtKind, }; -use rustpython_parser::lexer; -use rustpython_parser::lexer::Tok; -use rustpython_parser::mode::Mode; -use rustpython_parser::token::StringKind; +use rustpython_parser::{lexer, Mode, StringKind, Tok}; use smallvec::{smallvec, SmallVec}; use crate::ast::types::{Binding, BindingKind, CallPath, Range}; @@ -656,7 +653,7 @@ pub fn has_comments(located: &Located, locator: &Locator) -> bool { /// Returns `true` if a [`Range`] includes at least one comment. pub fn has_comments_in(range: Range, locator: &Locator) -> bool { - for tok in lexer::make_tokenizer_located(locator.slice(&range), Mode::Module, range.location) { + for tok in lexer::lex_located(locator.slice(&range), Mode::Module, range.location) { match tok { Ok((_, tok, _)) => { if matches!(tok, Tok::Comment(..)) { @@ -871,8 +868,7 @@ pub fn match_parens(start: Location, locator: &Locator) -> Option { let mut fix_start = None; let mut fix_end = None; let mut count: usize = 0; - for (start, tok, end) in lexer::make_tokenizer_located(contents, Mode::Module, start).flatten() - { + for (start, tok, end) in lexer::lex_located(contents, Mode::Module, start).flatten() { if matches!(tok, Tok::Lpar) { if count == 0 { fix_start = Some(start); @@ -904,8 +900,7 @@ pub fn identifier_range(stmt: &Stmt, locator: &Locator) -> Range { | StmtKind::AsyncFunctionDef { .. } ) { let contents = locator.slice(&Range::from_located(stmt)); - for (start, tok, end) in - lexer::make_tokenizer_located(contents, Mode::Module, stmt.location).flatten() + for (start, tok, end) in lexer::lex_located(contents, Mode::Module, stmt.location).flatten() { if matches!(tok, Tok::Name { .. }) { return Range::new(start, end); @@ -937,7 +932,7 @@ pub fn find_names<'a, T, U>( locator: &'a Locator, ) -> impl Iterator + 'a { let contents = locator.slice(&Range::from_located(located)); - lexer::make_tokenizer_located(contents, Mode::Module, located.location) + lexer::lex_located(contents, Mode::Module, located.location) .flatten() .filter(|(_, tok, _)| matches!(tok, Tok::Name { .. })) .map(|(start, _, end)| Range { @@ -955,7 +950,7 @@ pub fn excepthandler_name_range(handler: &Excepthandler, locator: &Locator) -> O (Some(_), Some(type_)) => { let type_end_location = type_.end_location.unwrap(); let contents = locator.slice(&Range::new(type_end_location, body[0].location)); - let range = lexer::make_tokenizer_located(contents, Mode::Module, type_end_location) + let range = lexer::lex_located(contents, Mode::Module, type_end_location) .flatten() .tuple_windows() .find(|(tok, next_tok)| { @@ -982,7 +977,7 @@ pub fn except_range(handler: &Excepthandler, locator: &Locator) -> Range { location: handler.location, end_location: end, }); - let range = lexer::make_tokenizer_located(contents, Mode::Module, handler.location) + let range = lexer::lex_located(contents, Mode::Module, handler.location) .flatten() .find(|(_, kind, _)| matches!(kind, Tok::Except { .. })) .map(|(location, _, end_location)| Range { @@ -996,7 +991,7 @@ pub fn except_range(handler: &Excepthandler, locator: &Locator) -> Range { /// Find f-strings that don't contain any formatted values in a `JoinedStr`. pub fn find_useless_f_strings(expr: &Expr, locator: &Locator) -> Vec<(Range, Range)> { let contents = locator.slice(&Range::from_located(expr)); - lexer::make_tokenizer_located(contents, Mode::Module, expr.location) + lexer::lex_located(contents, Mode::Module, expr.location) .flatten() .filter_map(|(location, tok, end_location)| match tok { Tok::String { @@ -1050,7 +1045,7 @@ pub fn else_range(stmt: &Stmt, locator: &Locator) -> Option { .expect("Expected orelse to be non-empty") .location, }); - let range = lexer::make_tokenizer_located(contents, Mode::Module, body_end) + let range = lexer::lex_located(contents, Mode::Module, body_end) .flatten() .find(|(_, kind, _)| matches!(kind, Tok::Else)) .map(|(location, _, end_location)| Range { @@ -1066,7 +1061,7 @@ pub fn else_range(stmt: &Stmt, locator: &Locator) -> Option { /// Return the `Range` of the first `Tok::Colon` token in a `Range`. pub fn first_colon_range(range: Range, locator: &Locator) -> Option { let contents = locator.slice(&range); - let range = lexer::make_tokenizer_located(contents, Mode::Module, range.location) + let range = lexer::lex_located(contents, Mode::Module, range.location) .flatten() .find(|(_, kind, _)| matches!(kind, Tok::Colon)) .map(|(location, _, end_location)| Range { @@ -1096,7 +1091,7 @@ pub fn elif_else_range(stmt: &Stmt, locator: &Locator) -> Option { _ => return None, }; let contents = locator.slice(&Range::new(start, end)); - let range = lexer::make_tokenizer_located(contents, Mode::Module, start) + let range = lexer::lex_located(contents, Mode::Module, start) .flatten() .find(|(_, kind, _)| matches!(kind, Tok::Elif | Tok::Else)) .map(|(location, _, end_location)| Range { @@ -1212,8 +1207,8 @@ pub fn is_logger_candidate(func: &Expr) -> bool { #[cfg(test)] mod tests { use anyhow::Result; + use rustpython_parser as parser; use rustpython_parser::ast::Location; - use rustpython_parser::parser; use crate::ast::helpers::{ elif_else_range, else_range, first_colon_range, identifier_range, match_trailing_content, diff --git a/crates/ruff/src/ast/operations.rs b/crates/ruff/src/ast/operations.rs index b35c4397655f2d..15629d7548ec24 100644 --- a/crates/ruff/src/ast/operations.rs +++ b/crates/ruff/src/ast/operations.rs @@ -1,9 +1,7 @@ use bitflags::bitflags; use rustc_hash::FxHashMap; use rustpython_parser::ast::{Cmpop, Constant, Expr, ExprKind, Located, Stmt, StmtKind}; -use rustpython_parser::lexer; -use rustpython_parser::lexer::Tok; -use rustpython_parser::mode::Mode; +use rustpython_parser::{lexer, Mode, Tok}; use crate::ast::helpers::any_over_expr; use crate::ast::types::{BindingKind, Scope}; @@ -285,9 +283,7 @@ pub type LocatedCmpop = Located; /// `CPython` doesn't either. This method iterates over the token stream and /// re-identifies [`Cmpop`] nodes, annotating them with valid ranges. pub fn locate_cmpops(contents: &str) -> Vec { - let mut tok_iter = lexer::make_tokenizer(contents, Mode::Module) - .flatten() - .peekable(); + let mut tok_iter = lexer::lex(contents, Mode::Module).flatten().peekable(); let mut ops: Vec = vec![]; let mut count: usize = 0; loop { diff --git a/crates/ruff/src/autofix/helpers.rs b/crates/ruff/src/autofix/helpers.rs index 33bdf33b7b11d0..59e31ebd56f86b 100644 --- a/crates/ruff/src/autofix/helpers.rs +++ b/crates/ruff/src/autofix/helpers.rs @@ -4,9 +4,7 @@ use libcst_native::{ Codegen, CodegenState, ImportNames, ParenthesizableWhitespace, SmallStatement, Statement, }; use rustpython_parser::ast::{ExcepthandlerKind, Expr, Keyword, Location, Stmt, StmtKind}; -use rustpython_parser::lexer; -use rustpython_parser::lexer::Tok; -use rustpython_parser::mode::Mode; +use rustpython_parser::{lexer, Mode, Tok}; use crate::ast::helpers; use crate::ast::helpers::to_absolute; @@ -372,9 +370,7 @@ pub fn remove_argument( if n_arguments == 1 { // Case 1: there is only one argument. let mut count: usize = 0; - for (start, tok, end) in - lexer::make_tokenizer_located(contents, Mode::Module, stmt_at).flatten() - { + for (start, tok, end) in lexer::lex_located(contents, Mode::Module, stmt_at).flatten() { if matches!(tok, Tok::Lpar) { if count == 0 { fix_start = Some(if remove_parentheses { @@ -406,9 +402,7 @@ pub fn remove_argument( { // Case 2: argument or keyword is _not_ the last node. let mut seen_comma = false; - for (start, tok, end) in - lexer::make_tokenizer_located(contents, Mode::Module, stmt_at).flatten() - { + for (start, tok, end) in lexer::lex_located(contents, Mode::Module, stmt_at).flatten() { if seen_comma { if matches!(tok, Tok::NonLogicalNewline) { // Also delete any non-logical newlines after the comma. @@ -431,9 +425,7 @@ pub fn remove_argument( } else { // Case 3: argument or keyword is the last node, so we have to find the last // comma in the stmt. - for (start, tok, _) in - lexer::make_tokenizer_located(contents, Mode::Module, stmt_at).flatten() - { + for (start, tok, _) in lexer::lex_located(contents, Mode::Module, stmt_at).flatten() { if start == expr_at { fix_end = Some(expr_end); break; @@ -455,8 +447,8 @@ pub fn remove_argument( #[cfg(test)] mod tests { use anyhow::Result; + use rustpython_parser as parser; use rustpython_parser::ast::Location; - use rustpython_parser::parser; use crate::autofix::helpers::{next_stmt_break, trailing_semicolon}; use crate::source_code::Locator; diff --git a/crates/ruff/src/checkers/ast.rs b/crates/ruff/src/checkers/ast.rs index 19cc9a551d4954..5f45ce71059ee3 100644 --- a/crates/ruff/src/checkers/ast.rs +++ b/crates/ruff/src/checkers/ast.rs @@ -6,19 +6,18 @@ use std::path::Path; use itertools::Itertools; use log::error; use nohash_hasher::IntMap; +use ruff_python::builtins::{BUILTINS, MAGIC_GLOBALS}; +use ruff_python::typing::TYPING_EXTENSIONS; use rustc_hash::{FxHashMap, FxHashSet}; use rustpython_common::cformat::{CFormatError, CFormatErrorType}; +use rustpython_parser as parser; use rustpython_parser::ast::{ Arg, Arguments, Comprehension, Constant, Excepthandler, ExcepthandlerKind, Expr, ExprContext, ExprKind, KeywordData, Located, Location, Operator, Pattern, PatternKind, Stmt, StmtKind, Suite, }; -use rustpython_parser::parser; use smallvec::smallvec; -use ruff_python::builtins::{BUILTINS, MAGIC_GLOBALS}; -use ruff_python::typing::TYPING_EXTENSIONS; - use crate::ast::helpers::{ binding_range, collect_call_path, extract_handler_names, from_relative_import, to_module_path, }; @@ -2060,8 +2059,8 @@ where value, .. } => { - // If we're in a class or module scope, then the annotation needs to be available - // at runtime. + // If we're in a class or module scope, then the annotation needs to be + // available at runtime. // See: https://docs.python.org/3/reference/simple_stmts.html#annotated-assignment-statements if !self.annotations_future_enabled && matches!( diff --git a/crates/ruff/src/checkers/logical_lines.rs b/crates/ruff/src/checkers/logical_lines.rs index e2702b7f9e39d8..6c2f1a8bf330e8 100644 --- a/crates/ruff/src/checkers/logical_lines.rs +++ b/crates/ruff/src/checkers/logical_lines.rs @@ -152,9 +152,8 @@ pub fn check_logical_lines( #[cfg(test)] mod tests { - use rustpython_parser::lexer; use rustpython_parser::lexer::LexResult; - use rustpython_parser::mode::Mode; + use rustpython_parser::{lexer, Mode}; use crate::checkers::logical_lines::iter_logical_lines; use crate::source_code::Locator; @@ -165,7 +164,7 @@ mod tests { x = 1 y = 2 z = x + 1"#; - let lxr: Vec = lexer::make_tokenizer(contents, Mode::Module).collect(); + let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); let locator = Locator::new(contents); let actual: Vec = iter_logical_lines(&lxr, &locator) .into_iter() @@ -186,7 +185,7 @@ x = [ ] y = 2 z = x + 1"#; - let lxr: Vec = lexer::make_tokenizer(contents, Mode::Module).collect(); + let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); let locator = Locator::new(contents); let actual: Vec = iter_logical_lines(&lxr, &locator) .into_iter() @@ -200,7 +199,7 @@ z = x + 1"#; assert_eq!(actual, expected); let contents = "x = 'abc'"; - let lxr: Vec = lexer::make_tokenizer(contents, Mode::Module).collect(); + let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); let locator = Locator::new(contents); let actual: Vec = iter_logical_lines(&lxr, &locator) .into_iter() @@ -213,7 +212,7 @@ z = x + 1"#; def f(): x = 1 f()"#; - let lxr: Vec = lexer::make_tokenizer(contents, Mode::Module).collect(); + let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); let locator = Locator::new(contents); let actual: Vec = iter_logical_lines(&lxr, &locator) .into_iter() @@ -228,7 +227,7 @@ def f(): # Comment goes here. x = 1 f()"#; - let lxr: Vec = lexer::make_tokenizer(contents, Mode::Module).collect(); + let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); let locator = Locator::new(contents); let actual: Vec = iter_logical_lines(&lxr, &locator) .into_iter() diff --git a/crates/ruff/src/checkers/tokens.rs b/crates/ruff/src/checkers/tokens.rs index 5c4d337f721444..6fdfc8ceb9182a 100644 --- a/crates/ruff/src/checkers/tokens.rs +++ b/crates/ruff/src/checkers/tokens.rs @@ -1,6 +1,7 @@ //! Lint rules based on token traversal. -use rustpython_parser::lexer::{LexResult, Tok}; +use rustpython_parser::lexer::LexResult; +use rustpython_parser::Tok; use crate::lex::docstring_detection::StateMachine; use crate::registry::{Diagnostic, Rule}; diff --git a/crates/ruff/src/directives.rs b/crates/ruff/src/directives.rs index 094074e478cde3..891f79a6bef7dd 100644 --- a/crates/ruff/src/directives.rs +++ b/crates/ruff/src/directives.rs @@ -3,7 +3,8 @@ use bitflags::bitflags; use nohash_hasher::{IntMap, IntSet}; use rustpython_parser::ast::Location; -use rustpython_parser::lexer::{LexResult, Tok}; +use rustpython_parser::lexer::LexResult; +use rustpython_parser::Tok; use crate::registry::LintSource; use crate::settings::Settings; @@ -150,15 +151,14 @@ pub fn extract_isort_directives(lxr: &[LexResult]) -> IsortDirectives { #[cfg(test)] mod tests { use nohash_hasher::{IntMap, IntSet}; - use rustpython_parser::lexer; use rustpython_parser::lexer::LexResult; - use rustpython_parser::mode::Mode; + use rustpython_parser::{lexer, Mode}; use crate::directives::{extract_isort_directives, extract_noqa_line_for}; #[test] fn noqa_extraction() { - let lxr: Vec = lexer::make_tokenizer( + let lxr: Vec = lexer::lex( "x = 1 y = 2 z = x + 1", @@ -167,7 +167,7 @@ z = x + 1", .collect(); assert_eq!(extract_noqa_line_for(&lxr), IntMap::default()); - let lxr: Vec = lexer::make_tokenizer( + let lxr: Vec = lexer::lex( " x = 1 y = 2 @@ -177,7 +177,7 @@ z = x + 1", .collect(); assert_eq!(extract_noqa_line_for(&lxr), IntMap::default()); - let lxr: Vec = lexer::make_tokenizer( + let lxr: Vec = lexer::lex( "x = 1 y = 2 z = x + 1 @@ -187,7 +187,7 @@ z = x + 1 .collect(); assert_eq!(extract_noqa_line_for(&lxr), IntMap::default()); - let lxr: Vec = lexer::make_tokenizer( + let lxr: Vec = lexer::lex( "x = 1 y = 2 @@ -198,7 +198,7 @@ z = x + 1 .collect(); assert_eq!(extract_noqa_line_for(&lxr), IntMap::default()); - let lxr: Vec = lexer::make_tokenizer( + let lxr: Vec = lexer::lex( "x = '''abc def ghi @@ -213,7 +213,7 @@ z = x + 1", IntMap::from_iter([(1, 4), (2, 4), (3, 4)]) ); - let lxr: Vec = lexer::make_tokenizer( + let lxr: Vec = lexer::lex( "x = 1 y = '''abc def @@ -228,7 +228,7 @@ z = 2", IntMap::from_iter([(2, 5), (3, 5), (4, 5)]) ); - let lxr: Vec = lexer::make_tokenizer( + let lxr: Vec = lexer::lex( "x = 1 y = '''abc def @@ -242,7 +242,7 @@ ghi IntMap::from_iter([(2, 5), (3, 5), (4, 5)]) ); - let lxr: Vec = lexer::make_tokenizer( + let lxr: Vec = lexer::lex( r#"x = \ 1"#, Mode::Module, @@ -250,7 +250,7 @@ ghi .collect(); assert_eq!(extract_noqa_line_for(&lxr), IntMap::from_iter([(1, 2)])); - let lxr: Vec = lexer::make_tokenizer( + let lxr: Vec = lexer::lex( r#"from foo import \ bar as baz, \ qux as quux"#, @@ -262,7 +262,7 @@ ghi IntMap::from_iter([(1, 3), (2, 3)]) ); - let lxr: Vec = lexer::make_tokenizer( + let lxr: Vec = lexer::lex( r#" # Foo from foo import \ @@ -286,7 +286,7 @@ y = \ let contents = "x = 1 y = 2 z = x + 1"; - let lxr: Vec = lexer::make_tokenizer(contents, Mode::Module).collect(); + let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default()); let contents = "# isort: off @@ -294,7 +294,7 @@ x = 1 y = 2 # isort: on z = x + 1"; - let lxr: Vec = lexer::make_tokenizer(contents, Mode::Module).collect(); + let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); assert_eq!( extract_isort_directives(&lxr).exclusions, IntSet::from_iter([2, 3, 4]) @@ -307,7 +307,7 @@ y = 2 # isort: on z = x + 1 # isort: on"; - let lxr: Vec = lexer::make_tokenizer(contents, Mode::Module).collect(); + let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); assert_eq!( extract_isort_directives(&lxr).exclusions, IntSet::from_iter([2, 3, 4, 5]) @@ -317,7 +317,7 @@ z = x + 1 x = 1 y = 2 z = x + 1"; - let lxr: Vec = lexer::make_tokenizer(contents, Mode::Module).collect(); + let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); assert_eq!( extract_isort_directives(&lxr).exclusions, IntSet::from_iter([2, 3, 4]) @@ -327,7 +327,7 @@ z = x + 1"; x = 1 y = 2 z = x + 1"; - let lxr: Vec = lexer::make_tokenizer(contents, Mode::Module).collect(); + let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default()); let contents = "# isort: off @@ -336,7 +336,7 @@ x = 1 y = 2 # isort: skip_file z = x + 1"; - let lxr: Vec = lexer::make_tokenizer(contents, Mode::Module).collect(); + let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); assert_eq!(extract_isort_directives(&lxr).exclusions, IntSet::default()); } @@ -345,20 +345,20 @@ z = x + 1"; let contents = "x = 1 y = 2 z = x + 1"; - let lxr: Vec = lexer::make_tokenizer(contents, Mode::Module).collect(); + let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); assert_eq!(extract_isort_directives(&lxr).splits, Vec::::new()); let contents = "x = 1 y = 2 # isort: split z = x + 1"; - let lxr: Vec = lexer::make_tokenizer(contents, Mode::Module).collect(); + let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); assert_eq!(extract_isort_directives(&lxr).splits, vec![3]); let contents = "x = 1 y = 2 # isort: split z = x + 1"; - let lxr: Vec = lexer::make_tokenizer(contents, Mode::Module).collect(); + let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); assert_eq!(extract_isort_directives(&lxr).splits, vec![2]); } } diff --git a/crates/ruff/src/doc_lines.rs b/crates/ruff/src/doc_lines.rs index f185a65f64d74b..bd843f3dfabd78 100644 --- a/crates/ruff/src/doc_lines.rs +++ b/crates/ruff/src/doc_lines.rs @@ -1,10 +1,12 @@ //! Doc line extraction. In this context, a doc line is a line consisting of a //! standalone comment or a constant string statement. -use rustpython_parser::ast::{Constant, ExprKind, Stmt, StmtKind, Suite}; -use rustpython_parser::lexer::{LexResult, Tok}; use std::iter::FusedIterator; +use rustpython_parser::ast::{Constant, ExprKind, Stmt, StmtKind, Suite}; +use rustpython_parser::lexer::LexResult; +use rustpython_parser::Tok; + use crate::ast::visitor; use crate::ast::visitor::Visitor; diff --git a/crates/ruff/src/lex/docstring_detection.rs b/crates/ruff/src/lex/docstring_detection.rs index 01509c20c49f80..6bbed700f969c7 100644 --- a/crates/ruff/src/lex/docstring_detection.rs +++ b/crates/ruff/src/lex/docstring_detection.rs @@ -4,7 +4,7 @@ //! //! TODO(charlie): Consolidate with the existing AST-based docstring extraction. -use rustpython_parser::lexer::Tok; +use rustpython_parser::Tok; #[derive(Default)] enum State { diff --git a/crates/ruff/src/linter.rs b/crates/ruff/src/linter.rs index db6e361757f858..2737d64f58256e 100644 --- a/crates/ruff/src/linter.rs +++ b/crates/ruff/src/linter.rs @@ -5,8 +5,8 @@ use anyhow::{anyhow, Result}; use colored::Colorize; use log::error; use rustc_hash::FxHashMap; -use rustpython_parser::error::ParseError; use rustpython_parser::lexer::LexResult; +use rustpython_parser::ParseError; use crate::autofix::fix_file; use crate::checkers::ast::check_ast; diff --git a/crates/ruff/src/rules/eradicate/detection.rs b/crates/ruff/src/rules/eradicate/detection.rs index 6caee16e7dea00..be758b1fd0d6d7 100644 --- a/crates/ruff/src/rules/eradicate/detection.rs +++ b/crates/ruff/src/rules/eradicate/detection.rs @@ -1,6 +1,7 @@ /// See: [eradicate.py](https://github.com/myint/eradicate/blob/98f199940979c94447a461d50d27862b118b282d/eradicate.py) use once_cell::sync::Lazy; use regex::Regex; +use rustpython_parser as parser; static ALLOWLIST_REGEX: Lazy = Lazy::new(|| { Regex::new( @@ -77,7 +78,7 @@ pub fn comment_contains_code(line: &str, task_tags: &[String]) -> bool { } // Finally, compile the source code. - rustpython_parser::parser::parse_program(&line, "").is_ok() + parser::parse_program(&line, "").is_ok() } /// Returns `true` if a line is probably part of some multiline code. diff --git a/crates/ruff/src/rules/flake8_annotations/fixes.rs b/crates/ruff/src/rules/flake8_annotations/fixes.rs index f3a24425a1659c..4b10f31dee0d37 100644 --- a/crates/ruff/src/rules/flake8_annotations/fixes.rs +++ b/crates/ruff/src/rules/flake8_annotations/fixes.rs @@ -1,8 +1,6 @@ use anyhow::{bail, Result}; use rustpython_parser::ast::Stmt; -use rustpython_parser::lexer; -use rustpython_parser::lexer::Tok; -use rustpython_parser::mode::Mode; +use rustpython_parser::{lexer, Mode, Tok}; use crate::ast::types::Range; use crate::fix::Fix; @@ -17,9 +15,7 @@ pub fn add_return_none_annotation(locator: &Locator, stmt: &Stmt) -> Result let mut seen_lpar = false; let mut seen_rpar = false; let mut count: usize = 0; - for (start, tok, ..) in - lexer::make_tokenizer_located(contents, Mode::Module, range.location).flatten() - { + for (start, tok, ..) in lexer::lex_located(contents, Mode::Module, range.location).flatten() { if seen_lpar && seen_rpar { if matches!(tok, Tok::Colon) { return Ok(Fix::insertion(" -> None".to_string(), start)); diff --git a/crates/ruff/src/rules/flake8_commas/rules.rs b/crates/ruff/src/rules/flake8_commas/rules.rs index c3baa0486c53d0..b4c8a3524d9027 100644 --- a/crates/ruff/src/rules/flake8_commas/rules.rs +++ b/crates/ruff/src/rules/flake8_commas/rules.rs @@ -1,7 +1,7 @@ use itertools::Itertools; use ruff_macros::{define_violation, derive_message_formats}; use rustpython_parser::lexer::{LexResult, Spanned}; -use rustpython_parser::token::Tok; +use rustpython_parser::Tok; use crate::ast::types::Range; use crate::fix::Fix; diff --git a/crates/ruff/src/rules/flake8_implicit_str_concat/rules.rs b/crates/ruff/src/rules/flake8_implicit_str_concat/rules.rs index b2415cc7618a4b..22a9abc2be4e2c 100644 --- a/crates/ruff/src/rules/flake8_implicit_str_concat/rules.rs +++ b/crates/ruff/src/rules/flake8_implicit_str_concat/rules.rs @@ -1,7 +1,8 @@ use itertools::Itertools; use ruff_macros::{define_violation, derive_message_formats}; use rustpython_parser::ast::{Constant, Expr, ExprKind, Operator}; -use rustpython_parser::lexer::{LexResult, Tok}; +use rustpython_parser::lexer::LexResult; +use rustpython_parser::Tok; use crate::ast::types::Range; use crate::registry::Diagnostic; diff --git a/crates/ruff/src/rules/flake8_quotes/rules.rs b/crates/ruff/src/rules/flake8_quotes/rules.rs index 0823ab1f2ac887..3643058ebc42d9 100644 --- a/crates/ruff/src/rules/flake8_quotes/rules.rs +++ b/crates/ruff/src/rules/flake8_quotes/rules.rs @@ -1,6 +1,7 @@ use ruff_macros::{define_violation, derive_message_formats}; use rustpython_parser::ast::Location; -use rustpython_parser::lexer::{LexResult, Tok}; +use rustpython_parser::lexer::LexResult; +use rustpython_parser::Tok; use super::settings::Quote; use crate::ast::types::Range; diff --git a/crates/ruff/src/rules/isort/comments.rs b/crates/ruff/src/rules/isort/comments.rs index 01f4cf98ac3c88..f18f1fcda2b9f1 100644 --- a/crates/ruff/src/rules/isort/comments.rs +++ b/crates/ruff/src/rules/isort/comments.rs @@ -1,9 +1,7 @@ use std::borrow::Cow; use rustpython_parser::ast::Location; -use rustpython_parser::lexer; -use rustpython_parser::lexer::Tok; -use rustpython_parser::mode::Mode; +use rustpython_parser::{lexer, Mode, Tok}; use crate::ast::types::Range; use crate::source_code::Locator; @@ -18,7 +16,7 @@ pub struct Comment<'a> { /// Collect all comments in an import block. pub fn collect_comments<'a>(range: &Range, locator: &'a Locator) -> Vec> { let contents = locator.slice(range); - lexer::make_tokenizer_located(contents, Mode::Module, range.location) + lexer::lex_located(contents, Mode::Module, range.location) .flatten() .filter_map(|(start, tok, end)| { if let Tok::Comment(value) = tok { diff --git a/crates/ruff/src/rules/isort/helpers.rs b/crates/ruff/src/rules/isort/helpers.rs index c0ad42a2e8d076..2cc716c0b275d0 100644 --- a/crates/ruff/src/rules/isort/helpers.rs +++ b/crates/ruff/src/rules/isort/helpers.rs @@ -1,23 +1,18 @@ use rustpython_parser::ast::{Location, Stmt}; -use rustpython_parser::lexer; -use rustpython_parser::lexer::Tok; -use rustpython_parser::mode::Mode; +use rustpython_parser::{lexer, Mode, Tok}; +use super::types::TrailingComma; use crate::ast::helpers::is_docstring_stmt; use crate::ast::types::Range; use crate::source_code::Locator; -use super::types::TrailingComma; - /// Return `true` if a `StmtKind::ImportFrom` statement ends with a magic /// trailing comma. pub fn trailing_comma(stmt: &Stmt, locator: &Locator) -> TrailingComma { let contents = locator.slice(&Range::from_located(stmt)); let mut count: usize = 0; let mut trailing_comma = TrailingComma::Absent; - for (_, tok, _) in - lexer::make_tokenizer_located(contents, Mode::Module, stmt.location).flatten() - { + for (_, tok, _) in lexer::lex_located(contents, Mode::Module, stmt.location).flatten() { if matches!(tok, Tok::Lpar) { count += 1; } @@ -114,7 +109,7 @@ pub fn find_splice_location(body: &[Stmt], locator: &Locator) -> Location { // Find the first token that isn't a comment or whitespace. let contents = locator.skip(splice); - for (.., tok, end) in lexer::make_tokenizer_located(contents, Mode::Module, splice).flatten() { + for (.., tok, end) in lexer::lex_located(contents, Mode::Module, splice).flatten() { if matches!(tok, Tok::Comment(..) | Tok::Newline) { splice = end; } else { @@ -128,12 +123,11 @@ pub fn find_splice_location(body: &[Stmt], locator: &Locator) -> Location { #[cfg(test)] mod tests { use anyhow::Result; + use rustpython_parser as parser; use rustpython_parser::ast::Location; - use rustpython_parser::parser; - - use crate::source_code::Locator; use super::find_splice_location; + use crate::source_code::Locator; fn splice_contents(contents: &str) -> Result { let program = parser::parse_program(contents, "")?; diff --git a/crates/ruff/src/rules/isort/rules/add_required_imports.rs b/crates/ruff/src/rules/isort/rules/add_required_imports.rs index 557b83a80e91b4..beeb65c674ee9f 100644 --- a/crates/ruff/src/rules/isort/rules/add_required_imports.rs +++ b/crates/ruff/src/rules/isort/rules/add_required_imports.rs @@ -2,6 +2,7 @@ use std::fmt; use log::error; use ruff_macros::{define_violation, derive_message_formats}; +use rustpython_parser as parser; use rustpython_parser::ast::{Location, StmtKind, Suite}; use super::super::helpers; @@ -16,13 +17,15 @@ use crate::violation::AlwaysAutofixableViolation; define_violation!( /// ## What it does - /// Adds any required imports, as specified by the user, to the top of the file. + /// Adds any required imports, as specified by the user, to the top of the + /// file. /// /// ## Why is this bad? - /// In some projects, certain imports are required to be present in all files. For - /// example, some projects assume that `from __future__ import annotations` is enabled, - /// and thus require that import to be present in all files. Omitting a "required" import - /// (as specified by the user) can cause errors or unexpected behavior. + /// In some projects, certain imports are required to be present in all + /// files. For example, some projects assume that `from __future__ + /// import annotations` is enabled, and thus require that import to be + /// present in all files. Omitting a "required" import (as specified by + /// the user) can cause errors or unexpected behavior. /// /// ## Example /// ```python @@ -210,18 +213,26 @@ pub fn add_required_imports( .required_imports .iter() .flat_map(|required_import| { - let Ok(body) = rustpython_parser::parser::parse_program(required_import, "") else { + let Ok(body) = parser::parse_program(required_import, "") else { error!("Failed to parse required import: `{}`", required_import); return vec![]; }; if body.is_empty() || body.len() > 1 { - error!("Expected require import to contain a single statement: `{}`", required_import); + error!( + "Expected require import to contain a single statement: `{}`", + required_import + ); return vec![]; } match &body[0].node { - StmtKind::ImportFrom { module, names, level } => { - names.iter().filter_map(|name| { + StmtKind::ImportFrom { + module, + names, + level, + } => names + .iter() + .filter_map(|name| { add_required_import( &AnyImport::ImportFrom(ImportFrom { module: module.as_ref().map(String::as_str), @@ -238,10 +249,11 @@ pub fn add_required_imports( settings, autofix, ) - }).collect() - } - StmtKind::Import { names } => { - names.iter().filter_map(|name| { + }) + .collect(), + StmtKind::Import { names } => names + .iter() + .filter_map(|name| { add_required_import( &AnyImport::Import(Import { name: Alias { @@ -256,10 +268,13 @@ pub fn add_required_imports( settings, autofix, ) - }).collect() - } + }) + .collect(), _ => { - error!("Expected required import to be in import-from style: `{}`", required_import); + error!( + "Expected required import to be in import-from style: `{}`", + required_import + ); vec![] } } diff --git a/crates/ruff/src/rules/mccabe/rules.rs b/crates/ruff/src/rules/mccabe/rules.rs index d8aa1f9da7d9e9..7bf0af12ba9048 100644 --- a/crates/ruff/src/rules/mccabe/rules.rs +++ b/crates/ruff/src/rules/mccabe/rules.rs @@ -10,10 +10,11 @@ define_violation!( /// ## What it does /// Checks for functions with a high `McCabe` complexity. /// - /// The `McCabe` complexity of a function is a measure of the complexity of the - /// control flow graph of the function. It is calculated by adding one to the - /// number of decision points in the function. A decision point is a place in - /// the code where the program has a choice of two or more paths to follow. + /// The `McCabe` complexity of a function is a measure of the complexity of + /// the control flow graph of the function. It is calculated by adding + /// one to the number of decision points in the function. A decision + /// point is a place in the code where the program has a choice of two + /// or more paths to follow. /// /// ## Why is this bad? /// Functions with a high complexity are hard to understand and maintain. @@ -147,7 +148,7 @@ pub fn function_is_too_complex( #[cfg(test)] mod tests { use anyhow::Result; - use rustpython_parser::parser; + use rustpython_parser as parser; use super::get_complexity_number; diff --git a/crates/ruff/src/rules/pycodestyle/logical_lines.rs b/crates/ruff/src/rules/pycodestyle/logical_lines.rs index a3fd93f0c73fc9..cc84fefd099b3f 100644 --- a/crates/ruff/src/rules/pycodestyle/logical_lines.rs +++ b/crates/ruff/src/rules/pycodestyle/logical_lines.rs @@ -1,6 +1,7 @@ use bitflags::bitflags; use rustpython_parser::ast::Location; -use rustpython_parser::lexer::{LexResult, Tok}; +use rustpython_parser::lexer::LexResult; +use rustpython_parser::Tok; use crate::ast::types::Range; use crate::source_code::Locator; diff --git a/crates/ruff/src/rules/pycodestyle/rules/compound_statements.rs b/crates/ruff/src/rules/pycodestyle/rules/compound_statements.rs index 0ef79114e61352..8d193a6378d6e8 100644 --- a/crates/ruff/src/rules/pycodestyle/rules/compound_statements.rs +++ b/crates/ruff/src/rules/pycodestyle/rules/compound_statements.rs @@ -1,6 +1,6 @@ -use rustpython_parser::lexer::{LexResult, Tok}; - use ruff_macros::{define_violation, derive_message_formats}; +use rustpython_parser::lexer::LexResult; +use rustpython_parser::Tok; use crate::ast::types::Range; use crate::fix::Fix; diff --git a/crates/ruff/src/rules/pycodestyle/rules/errors.rs b/crates/ruff/src/rules/pycodestyle/rules/errors.rs index 24f0badca8ef26..498485fb5566e4 100644 --- a/crates/ruff/src/rules/pycodestyle/rules/errors.rs +++ b/crates/ruff/src/rules/pycodestyle/rules/errors.rs @@ -1,5 +1,5 @@ use ruff_macros::{define_violation, derive_message_formats}; -use rustpython_parser::error::ParseError; +use rustpython_parser::ParseError; use crate::ast::types::Range; use crate::registry::Diagnostic; diff --git a/crates/ruff/src/rules/pycodestyle/rules/whitespace_before_comment.rs b/crates/ruff/src/rules/pycodestyle/rules/whitespace_before_comment.rs index 92dac92e0fe6bc..60f059d27ccfce 100644 --- a/crates/ruff/src/rules/pycodestyle/rules/whitespace_before_comment.rs +++ b/crates/ruff/src/rules/pycodestyle/rules/whitespace_before_comment.rs @@ -1,9 +1,8 @@ #![allow(dead_code)] -use rustpython_parser::ast::Location; -use rustpython_parser::lexer::Tok; - use ruff_macros::{define_violation, derive_message_formats}; +use rustpython_parser::ast::Location; +use rustpython_parser::Tok; use crate::ast::types::Range; use crate::registry::DiagnosticKind; diff --git a/crates/ruff/src/rules/pyflakes/fixes.rs b/crates/ruff/src/rules/pyflakes/fixes.rs index b6c47ca2f67611..c3e34669585390 100644 --- a/crates/ruff/src/rules/pyflakes/fixes.rs +++ b/crates/ruff/src/rules/pyflakes/fixes.rs @@ -1,11 +1,8 @@ use anyhow::{bail, Result}; use libcst_native::{Call, Codegen, CodegenState, Dict, DictElement, Expression}; -use rustpython_parser::ast::{Excepthandler, Expr}; -use rustpython_parser::lexer; -use rustpython_parser::lexer::Tok; -use rustpython_parser::mode::Mode; - use ruff_python::string::strip_quotes_and_prefixes; +use rustpython_parser::ast::{Excepthandler, Expr}; +use rustpython_parser::{lexer, Mode, Tok}; use crate::ast::types::Range; use crate::cst::matchers::{match_expr, match_module}; @@ -124,7 +121,7 @@ pub fn remove_exception_handler_assignment( // End of the token just before the `as` to the semicolon. let mut prev = None; for (start, tok, end) in - lexer::make_tokenizer_located(contents, Mode::Module, excepthandler.location).flatten() + lexer::lex_located(contents, Mode::Module, excepthandler.location).flatten() { if matches!(tok, Tok::As) { fix_start = prev; diff --git a/crates/ruff/src/rules/pyflakes/rules/unused_variable.rs b/crates/ruff/src/rules/pyflakes/rules/unused_variable.rs index d090b5e23b62f0..532f97d0f90dab 100644 --- a/crates/ruff/src/rules/pyflakes/rules/unused_variable.rs +++ b/crates/ruff/src/rules/pyflakes/rules/unused_variable.rs @@ -1,11 +1,8 @@ use itertools::Itertools; use log::error; -use rustpython_parser::ast::{ExprKind, Located, Stmt, StmtKind}; -use rustpython_parser::lexer; -use rustpython_parser::lexer::Tok; -use rustpython_parser::mode::Mode; - use ruff_macros::{define_violation, derive_message_formats}; +use rustpython_parser::ast::{ExprKind, Located, Stmt, StmtKind}; +use rustpython_parser::{lexer, Mode, Tok}; use crate::ast::helpers::contains_effect; use crate::ast::types::{BindingKind, Range, RefEquality, ScopeKind}; @@ -21,8 +18,8 @@ define_violation!( /// Checks for the presence of unused variables in function scopes. /// /// ## Why is this bad? - /// A variable that is defined but not used is likely a mistake, and should be - /// removed to avoid confusion. + /// A variable that is defined but not used is likely a mistake, and should + /// be removed to avoid confusion. /// /// If a variable is intentionally defined-but-not-used, it should be /// prefixed with an underscore, or some other value that adheres to the @@ -62,8 +59,8 @@ impl AlwaysAutofixableViolation for UnusedVariable { } } -/// Return the start and end [`Location`] of the token after the next match of the predicate, -/// skipping over any bracketed expressions. +/// Return the start and end [`Location`] of the token after the next match of +/// the predicate, skipping over any bracketed expressions. fn match_token_after(located: &Located, locator: &Locator, f: F) -> Range where F: Fn(Tok) -> bool, @@ -76,7 +73,7 @@ where let mut brace_count = 0; for ((_, tok, _), (start, _, end)) in - lexer::make_tokenizer_located(contents, Mode::Module, located.location) + lexer::lex_located(contents, Mode::Module, located.location) .flatten() .tuple_windows() { @@ -125,8 +122,8 @@ where unreachable!("No token after matched"); } -/// Return the start and end [`Location`] of the token matching the predicate, skipping over -/// any bracketed expressions. +/// Return the start and end [`Location`] of the token matching the predicate, +/// skipping over any bracketed expressions. fn match_token(located: &Located, locator: &Locator, f: F) -> Range where F: Fn(Tok) -> bool, @@ -138,8 +135,7 @@ where let mut sqb_count = 0; let mut brace_count = 0; - for (start, tok, end) in - lexer::make_tokenizer_located(contents, Mode::Module, located.location).flatten() + for (start, tok, end) in lexer::lex_located(contents, Mode::Module, located.location).flatten() { match tok { Tok::Lpar => { diff --git a/crates/ruff/src/rules/pylint/rules/bad_string_format_type.rs b/crates/ruff/src/rules/pylint/rules/bad_string_format_type.rs index 953ae3773bf823..8cc696a363b1a3 100644 --- a/crates/ruff/src/rules/pylint/rules/bad_string_format_type.rs +++ b/crates/ruff/src/rules/pylint/rules/bad_string_format_type.rs @@ -1,13 +1,10 @@ use std::str::FromStr; +use ruff_macros::{define_violation, derive_message_formats}; use rustc_hash::FxHashMap; use rustpython_common::cformat::{CFormatPart, CFormatSpec, CFormatStrOrBytes, CFormatString}; use rustpython_parser::ast::{Constant, Expr, ExprKind, Location, Operator}; -use rustpython_parser::lexer; -use rustpython_parser::lexer::Tok; -use rustpython_parser::mode::Mode; - -use ruff_macros::{define_violation, derive_message_formats}; +use rustpython_parser::{lexer, Mode, Tok}; use crate::ast::types::Range; use crate::checkers::ast::Checker; @@ -248,9 +245,7 @@ pub fn bad_string_format_type(checker: &mut Checker, expr: &Expr, right: &Expr) // Grab each string segment (in case there's an implicit concatenation). let content = checker.locator.slice(&Range::from_located(expr)); let mut strings: Vec<(Location, Location)> = vec![]; - for (start, tok, end) in - lexer::make_tokenizer_located(content, Mode::Module, expr.location).flatten() - { + for (start, tok, end) in lexer::lex_located(content, Mode::Module, expr.location).flatten() { if matches!(tok, Tok::String { .. }) { strings.push((start, end)); } else if matches!(tok, Tok::Percent) { diff --git a/crates/ruff/src/rules/pylint/rules/too_many_branches.rs b/crates/ruff/src/rules/pylint/rules/too_many_branches.rs index b3ac5aabc8b6d7..202c1837414079 100644 --- a/crates/ruff/src/rules/pylint/rules/too_many_branches.rs +++ b/crates/ruff/src/rules/pylint/rules/too_many_branches.rs @@ -120,7 +120,7 @@ pub fn too_many_branches( #[cfg(test)] mod tests { use anyhow::Result; - use rustpython_parser::parser; + use rustpython_parser as parser; use super::num_branches; diff --git a/crates/ruff/src/rules/pylint/rules/too_many_return_statements.rs b/crates/ruff/src/rules/pylint/rules/too_many_return_statements.rs index c336a17f4e03ca..57b2fa1f4dfc20 100644 --- a/crates/ruff/src/rules/pylint/rules/too_many_return_statements.rs +++ b/crates/ruff/src/rules/pylint/rules/too_many_return_statements.rs @@ -55,7 +55,7 @@ pub fn too_many_return_statements( #[cfg(test)] mod tests { use anyhow::Result; - use rustpython_parser::parser; + use rustpython_parser as parser; use super::num_returns; diff --git a/crates/ruff/src/rules/pylint/rules/too_many_statements.rs b/crates/ruff/src/rules/pylint/rules/too_many_statements.rs index 1f61a68c29b934..d72f1c52249841 100644 --- a/crates/ruff/src/rules/pylint/rules/too_many_statements.rs +++ b/crates/ruff/src/rules/pylint/rules/too_many_statements.rs @@ -123,7 +123,7 @@ pub fn too_many_statements( #[cfg(test)] mod tests { use anyhow::Result; - use rustpython_parser::parser; + use rustpython_parser as parser; use super::num_statements; diff --git a/crates/ruff/src/rules/pyupgrade/fixes.rs b/crates/ruff/src/rules/pyupgrade/fixes.rs index 8f06ff35c23930..0039fc5b8dd203 100644 --- a/crates/ruff/src/rules/pyupgrade/fixes.rs +++ b/crates/ruff/src/rules/pyupgrade/fixes.rs @@ -4,9 +4,7 @@ use libcst_native::{ SmallStatement, Statement, Suite, }; use rustpython_parser::ast::{Expr, Keyword, Location}; -use rustpython_parser::lexer; -use rustpython_parser::lexer::Tok; -use rustpython_parser::mode::Mode; +use rustpython_parser::{lexer, Mode, Tok}; use crate::ast::types::Range; use crate::autofix::helpers::remove_argument; @@ -111,7 +109,7 @@ pub fn remove_import_members(contents: &str, members: &[&str]) -> String { // Find all Tok::Name tokens that are not preceded by Tok::As, and all // Tok::Comma tokens. let mut prev_tok = None; - for (start, tok, end) in lexer::make_tokenizer(contents, Mode::Module) + for (start, tok, end) in lexer::lex(contents, Mode::Module) .flatten() .skip_while(|(_, tok, _)| !matches!(tok, Tok::Import)) { diff --git a/crates/ruff/src/rules/pyupgrade/rules/extraneous_parentheses.rs b/crates/ruff/src/rules/pyupgrade/rules/extraneous_parentheses.rs index 3aba3417e25497..b64dd8f497f5bb 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/extraneous_parentheses.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/extraneous_parentheses.rs @@ -1,5 +1,6 @@ use ruff_macros::{define_violation, derive_message_formats}; -use rustpython_parser::lexer::{LexResult, Tok}; +use rustpython_parser::lexer::LexResult; +use rustpython_parser::Tok; use crate::ast::types::Range; use crate::fix::Fix; diff --git a/crates/ruff/src/rules/pyupgrade/rules/f_strings.rs b/crates/ruff/src/rules/pyupgrade/rules/f_strings.rs index 4486ebb0db5240..7a6706c73efc64 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/f_strings.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/f_strings.rs @@ -1,13 +1,10 @@ +use ruff_macros::{define_violation, derive_message_formats}; use rustc_hash::FxHashMap; use rustpython_common::format::{ FieldName, FieldNamePart, FieldType, FormatPart, FormatString, FromTemplate, }; use rustpython_parser::ast::{Constant, Expr, ExprKind, KeywordData}; -use rustpython_parser::lexer; -use rustpython_parser::lexer::Tok; -use rustpython_parser::mode::Mode; - -use ruff_macros::{define_violation, derive_message_formats}; +use rustpython_parser::{lexer, Mode, Tok}; use crate::ast::types::Range; use crate::checkers::ast::Checker; @@ -131,7 +128,7 @@ fn try_convert_to_f_string(checker: &Checker, expr: &Expr) -> Option { let contents = checker.locator.slice(&Range::from_located(value)); // Tokenize: we need to avoid trying to fix implicit string concatenations. - if lexer::make_tokenizer(contents, Mode::Module) + if lexer::lex(contents, Mode::Module) .flatten() .filter(|(_, tok, _)| matches!(tok, Tok::String { .. })) .count() diff --git a/crates/ruff/src/rules/pyupgrade/rules/native_literals.rs b/crates/ruff/src/rules/pyupgrade/rules/native_literals.rs index 54911b6ba21c16..493568ee9eddfa 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/native_literals.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/native_literals.rs @@ -2,9 +2,7 @@ use std::fmt; use ruff_macros::{define_violation, derive_message_formats}; use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword}; -use rustpython_parser::lexer; -use rustpython_parser::lexer::Tok; -use rustpython_parser::mode::Mode; +use rustpython_parser::{lexer, Mode, Tok}; use serde::{Deserialize, Serialize}; use crate::ast::types::Range; @@ -119,7 +117,7 @@ pub fn native_literals( // safely remove the outer call in this situation. We're following pyupgrade // here and skip. let arg_code = checker.locator.slice(&Range::from_located(arg)); - if lexer::make_tokenizer_located(arg_code, Mode::Module, arg.location) + if lexer::lex_located(arg_code, Mode::Module, arg.location) .flatten() .filter(|(_, tok, _)| matches!(tok, Tok::String { .. })) .count() diff --git a/crates/ruff/src/rules/pyupgrade/rules/outdated_version_block.rs b/crates/ruff/src/rules/pyupgrade/rules/outdated_version_block.rs index 0f621274f9e3c2..7acc45c67d2b65 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/outdated_version_block.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/outdated_version_block.rs @@ -2,12 +2,9 @@ use std::cmp::Ordering; use log::error; use num_bigint::{BigInt, Sign}; -use rustpython_parser::ast::{Cmpop, Constant, Expr, ExprKind, Located, Location, Stmt}; -use rustpython_parser::lexer; -use rustpython_parser::lexer::Tok; -use rustpython_parser::mode::Mode; - use ruff_macros::{define_violation, derive_message_formats}; +use rustpython_parser::ast::{Cmpop, Constant, Expr, ExprKind, Located, Location, Stmt}; +use rustpython_parser::{lexer, Mode, Tok}; use crate::ast::types::{Range, RefEquality}; use crate::ast::whitespace::indentation; @@ -69,7 +66,7 @@ fn metadata(locator: &Locator, located: &Located) -> Option let mut else_ = None; for (start, tok, _) in - lexer::make_tokenizer_located(text, Mode::Module, Location::new(located.location.row(), 0)) + lexer::lex_located(text, Mode::Module, Location::new(located.location.row(), 0)) .flatten() .filter(|(_, tok, _)| { !matches!( diff --git a/crates/ruff/src/rules/pyupgrade/rules/printf_string_formatting.rs b/crates/ruff/src/rules/pyupgrade/rules/printf_string_formatting.rs index 20f114b477e590..149b0db234079d 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/printf_string_formatting.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/printf_string_formatting.rs @@ -1,16 +1,13 @@ use std::str::FromStr; +use ruff_macros::{define_violation, derive_message_formats}; +use ruff_python::identifiers::is_identifier; +use ruff_python::keyword::KWLIST; use rustpython_common::cformat::{ CConversionFlags, CFormatPart, CFormatPrecision, CFormatQuantity, CFormatString, }; use rustpython_parser::ast::{Constant, Expr, ExprKind, Location}; -use rustpython_parser::lexer; -use rustpython_parser::lexer::Tok; -use rustpython_parser::mode::Mode; - -use ruff_macros::{define_violation, derive_message_formats}; -use ruff_python::identifiers::is_identifier; -use ruff_python::keyword::KWLIST; +use rustpython_parser::{lexer, Mode, Tok}; use crate::ast::types::Range; use crate::ast::whitespace::indentation; @@ -321,7 +318,7 @@ pub(crate) fn printf_string_formatting( // Grab each string segment (in case there's an implicit concatenation). let mut strings: Vec<(Location, Location)> = vec![]; let mut extension = None; - for (start, tok, end) in lexer::make_tokenizer_located( + for (start, tok, end) in lexer::lex_located( checker.locator.slice(&Range::from_located(expr)), Mode::Module, expr.location, diff --git a/crates/ruff/src/rules/pyupgrade/rules/redundant_open_modes.rs b/crates/ruff/src/rules/pyupgrade/rules/redundant_open_modes.rs index 2ea1dd7b5e3c8a..204b7e5224f1c2 100644 --- a/crates/ruff/src/rules/pyupgrade/rules/redundant_open_modes.rs +++ b/crates/ruff/src/rules/pyupgrade/rules/redundant_open_modes.rs @@ -4,9 +4,7 @@ use anyhow::{anyhow, Result}; use log::error; use ruff_macros::{define_violation, derive_message_formats}; use rustpython_parser::ast::{Constant, Expr, ExprKind, Keyword, Location}; -use rustpython_parser::lexer; -use rustpython_parser::mode::Mode; -use rustpython_parser::token::Tok; +use rustpython_parser::{lexer, Mode, Tok}; use crate::ast::helpers::find_keyword; use crate::ast::types::Range; @@ -143,9 +141,7 @@ fn create_remove_param_fix(locator: &Locator, expr: &Expr, mode_param: &Expr) -> let mut fix_end: Option = None; let mut is_first_arg: bool = false; let mut delete_first_arg: bool = false; - for (start, tok, end) in - lexer::make_tokenizer_located(content, Mode::Module, expr.location).flatten() - { + for (start, tok, end) in lexer::lex_located(content, Mode::Module, expr.location).flatten() { if start == mode_param.location { if is_first_arg { delete_first_arg = true; diff --git a/crates/ruff/src/rustpython_helpers.rs b/crates/ruff/src/rustpython_helpers.rs index 260fc51491401b..4f5638a423cfa3 100644 --- a/crates/ruff/src/rustpython_helpers.rs +++ b/crates/ruff/src/rustpython_helpers.rs @@ -1,13 +1,12 @@ +use rustpython_parser as parser; use rustpython_parser::ast::{Mod, Suite}; -use rustpython_parser::error::ParseError; use rustpython_parser::lexer::LexResult; -use rustpython_parser::mode::Mode; -use rustpython_parser::{lexer, parser}; +use rustpython_parser::{lexer, Mode, ParseError}; /// Collect tokens up to and including the first error. pub fn tokenize(contents: &str) -> Vec { let mut tokens: Vec = vec![]; - for tok in lexer::make_tokenizer(contents, Mode::Module) { + for tok in lexer::lex(contents, Mode::Module) { let is_err = tok.is_err(); tokens.push(tok); if is_err { diff --git a/crates/ruff/src/source_code/generator.rs b/crates/ruff/src/source_code/generator.rs index d13f4ee32b77fc..33071934bf7efa 100644 --- a/crates/ruff/src/source_code/generator.rs +++ b/crates/ruff/src/source_code/generator.rs @@ -1258,7 +1258,7 @@ impl<'a> Generator<'a> { #[cfg(test)] mod tests { - use rustpython_parser::parser; + use rustpython_parser as parser; use crate::source_code::stylist::{Indentation, LineEnding, Quote}; use crate::source_code::Generator; diff --git a/crates/ruff/src/source_code/indexer.rs b/crates/ruff/src/source_code/indexer.rs index ef410eeffdb1a7..ecc52c7bd87e77 100644 --- a/crates/ruff/src/source_code/indexer.rs +++ b/crates/ruff/src/source_code/indexer.rs @@ -2,7 +2,8 @@ //! are omitted from the AST (e.g., commented lines). use rustpython_parser::ast::Location; -use rustpython_parser::lexer::{LexResult, Tok}; +use rustpython_parser::lexer::LexResult; +use rustpython_parser::Tok; pub struct Indexer { commented_lines: Vec, @@ -49,16 +50,15 @@ impl From<&[LexResult]> for Indexer { #[cfg(test)] mod tests { - use rustpython_parser::lexer; use rustpython_parser::lexer::LexResult; - use rustpython_parser::mode::Mode; + use rustpython_parser::{lexer, Mode}; use crate::source_code::Indexer; #[test] fn continuation() { let contents = r#"x = 1"#; - let lxr: Vec = lexer::make_tokenizer(contents, Mode::Module).collect(); + let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); let indexer: Indexer = lxr.as_slice().into(); assert_eq!(indexer.continuation_lines(), Vec::::new().as_slice()); @@ -70,7 +70,7 @@ x = 1 y = 2 "# .trim(); - let lxr: Vec = lexer::make_tokenizer(contents, Mode::Module).collect(); + let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); let indexer: Indexer = lxr.as_slice().into(); assert_eq!(indexer.continuation_lines(), Vec::::new().as_slice()); @@ -90,7 +90,7 @@ if True: ) "# .trim(); - let lxr: Vec = lexer::make_tokenizer(contents, Mode::Module).collect(); + let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); let indexer: Indexer = lxr.as_slice().into(); assert_eq!(indexer.continuation_lines(), [1, 5, 6, 11]); @@ -110,7 +110,7 @@ x = 1; \ import os "# .trim(); - let lxr: Vec = lexer::make_tokenizer(contents, Mode::Module).collect(); + let lxr: Vec = lexer::lex(contents, Mode::Module).collect(); let indexer: Indexer = lxr.as_slice().into(); assert_eq!(indexer.continuation_lines(), [9, 12]); } diff --git a/crates/ruff/src/source_code/mod.rs b/crates/ruff/src/source_code/mod.rs index 7a1ac1f535a57f..c1d6eff74cfd05 100644 --- a/crates/ruff/src/source_code/mod.rs +++ b/crates/ruff/src/source_code/mod.rs @@ -6,8 +6,8 @@ mod stylist; pub(crate) use generator::Generator; pub(crate) use indexer::Indexer; pub(crate) use locator::Locator; -use rustpython_parser::error::ParseError; -use rustpython_parser::parser; +use rustpython_parser as parser; +use rustpython_parser::ParseError; pub(crate) use stylist::{LineEnding, Stylist}; /// Run round-trip source code generation on a given Python code. diff --git a/crates/ruff/src/source_code/stylist.rs b/crates/ruff/src/source_code/stylist.rs index 34692a2695b535..73a022d8482123 100644 --- a/crates/ruff/src/source_code/stylist.rs +++ b/crates/ruff/src/source_code/stylist.rs @@ -5,9 +5,7 @@ use std::ops::Deref; use once_cell::unsync::OnceCell; use rustpython_parser::ast::Location; -use rustpython_parser::lexer; -use rustpython_parser::lexer::Tok; -use rustpython_parser::mode::Mode; +use rustpython_parser::{lexer, Mode, Tok}; use crate::ast::types::Range; use crate::rules::pydocstyle::helpers::leading_quote; @@ -166,7 +164,7 @@ impl Deref for LineEnding { /// Detect the indentation style of the given tokens. fn detect_indentation(contents: &str, locator: &Locator) -> Option { - for (_start, tok, end) in lexer::make_tokenizer(contents, Mode::Module).flatten() { + for (_start, tok, end) in lexer::lex(contents, Mode::Module).flatten() { if let Tok::Indent { .. } = tok { let start = Location::new(end.row(), 0); let whitespace = locator.slice(&Range::new(start, end)); @@ -178,7 +176,7 @@ fn detect_indentation(contents: &str, locator: &Locator) -> Option /// Detect the quotation style of the given tokens. fn detect_quote(contents: &str, locator: &Locator) -> Option { - for (start, tok, end) in lexer::make_tokenizer(contents, Mode::Module).flatten() { + for (start, tok, end) in lexer::lex(contents, Mode::Module).flatten() { if let Tok::String { .. } = tok { let content = locator.slice(&Range::new(start, end)); if let Some(pattern) = leading_quote(content) { diff --git a/crates/ruff_dev/src/print_ast.rs b/crates/ruff_dev/src/print_ast.rs index bd32ed687c78fb..d4dd75b32688bf 100644 --- a/crates/ruff_dev/src/print_ast.rs +++ b/crates/ruff_dev/src/print_ast.rs @@ -5,7 +5,7 @@ use std::fs; use std::path::PathBuf; use anyhow::Result; -use rustpython_parser::parser; +use rustpython_parser as parser; #[derive(clap::Args)] pub struct Args { diff --git a/crates/ruff_dev/src/print_tokens.rs b/crates/ruff_dev/src/print_tokens.rs index b10e2787ced89d..af41f32b62aec0 100644 --- a/crates/ruff_dev/src/print_tokens.rs +++ b/crates/ruff_dev/src/print_tokens.rs @@ -5,8 +5,7 @@ use std::fs; use std::path::PathBuf; use anyhow::Result; -use rustpython_parser::lexer; -use rustpython_parser::mode::Mode; +use rustpython_parser::{lexer, Mode}; #[derive(clap::Args)] pub struct Args { @@ -17,7 +16,7 @@ pub struct Args { pub fn main(args: &Args) -> Result<()> { let contents = fs::read_to_string(&args.file)?; - for (_, tok, _) in lexer::make_tokenizer(&contents, Mode::Module).flatten() { + for (_, tok, _) in lexer::lex(&contents, Mode::Module).flatten() { println!("{tok:#?}"); } Ok(()) diff --git a/crates/ruff_python_formatter/src/core/rustpython_helpers.rs b/crates/ruff_python_formatter/src/core/rustpython_helpers.rs index 260fc51491401b..4f5638a423cfa3 100644 --- a/crates/ruff_python_formatter/src/core/rustpython_helpers.rs +++ b/crates/ruff_python_formatter/src/core/rustpython_helpers.rs @@ -1,13 +1,12 @@ +use rustpython_parser as parser; use rustpython_parser::ast::{Mod, Suite}; -use rustpython_parser::error::ParseError; use rustpython_parser::lexer::LexResult; -use rustpython_parser::mode::Mode; -use rustpython_parser::{lexer, parser}; +use rustpython_parser::{lexer, Mode, ParseError}; /// Collect tokens up to and including the first error. pub fn tokenize(contents: &str) -> Vec { let mut tokens: Vec = vec![]; - for tok in lexer::make_tokenizer(contents, Mode::Module) { + for tok in lexer::lex(contents, Mode::Module) { let is_err = tok.is_err(); tokens.push(tok); if is_err { diff --git a/crates/ruff_python_formatter/src/trivia.rs b/crates/ruff_python_formatter/src/trivia.rs index 1bed6d98f54cb6..e10834d2f11e76 100644 --- a/crates/ruff_python_formatter/src/trivia.rs +++ b/crates/ruff_python_formatter/src/trivia.rs @@ -1,6 +1,7 @@ use rustc_hash::FxHashMap; use rustpython_parser::ast::Location; -use rustpython_parser::lexer::{LexResult, Tok}; +use rustpython_parser::lexer::LexResult; +use rustpython_parser::Tok; use crate::core::types::Range; use crate::cst::{Alias, Excepthandler, ExcepthandlerKind, Expr, ExprKind, Stmt, StmtKind}; @@ -45,7 +46,8 @@ pub struct TriviaToken { #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub enum TriviaKind { - /// A Comment that is separated by at least one line break from the preceding token. + /// A Comment that is separated by at least one line break from the + /// preceding token. /// /// # Examples /// diff --git a/rustfmt.toml b/rustfmt.toml deleted file mode 100644 index 5325a858f8cfe5..00000000000000 --- a/rustfmt.toml +++ /dev/null @@ -1,4 +0,0 @@ -edition = "2021" -max_width = 100 -reorder_imports = true -reorder_modules = true