From ec4ffe623f8f69a18ce681d62bce91a470d1093c Mon Sep 17 00:00:00 2001 From: Ben Brandt Date: Sat, 3 Feb 2024 14:57:46 +0100 Subject: [PATCH] Move clippy config to cargo.toml --- Cargo.toml | 14 ++++++++++++++ benches/chunk_size.rs | 2 ++ src/lib.rs | 12 ------------ tests/text_splitter.rs | 12 ------------ tests/text_splitter_snapshots.rs | 16 ++++++++-------- 5 files changed, 24 insertions(+), 32 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index bf99bf03..9038528f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,6 +56,20 @@ harness = false tokenizers = ["dep:tokenizers"] tiktoken-rs = ["dep:tiktoken-rs"] +[lints.rust] +future_incompatible = "warn" +missing_debug_implementations = "warn" +missing_docs = "warn" +nonstandard_style = "warn" +rust_2018_compatibility = "warn" +rust_2018_idioms = "warn" +rust_2021_compatibility = "warn" +unused = "warn" + +[lints.clippy] +cargo = "warn" +pedantic = "warn" + # Tokenizers and indirect deps can cause slow runtime [profile.dev.package."*"] opt-level = 1 diff --git a/benches/chunk_size.rs b/benches/chunk_size.rs index 8841ad7e..01f7d694 100644 --- a/benches/chunk_size.rs +++ b/benches/chunk_size.rs @@ -1,3 +1,5 @@ +#![allow(missing_docs)] + use std::fs; use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; diff --git a/src/lib.rs b/src/lib.rs index 63c65647..93e78ad4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,16 +1,4 @@ #![doc = include_str!("../README.md")] -#![warn( - clippy::cargo, - clippy::pedantic, - future_incompatible, - missing_debug_implementations, - missing_docs, - nonstandard_style, - rust_2018_compatibility, - rust_2018_idioms, - rust_2021_compatibility, - unused -)] #![cfg_attr(docsrs, feature(doc_auto_cfg, doc_cfg))] use core::{ diff --git a/tests/text_splitter.rs b/tests/text_splitter.rs index 8aad0145..ea7ee6a6 100644 --- a/tests/text_splitter.rs +++ b/tests/text_splitter.rs @@ -1,15 +1,3 @@ -#![warn( - clippy::pedantic, - future_incompatible, - missing_debug_implementations, - missing_docs, - nonstandard_style, - rust_2018_compatibility, - rust_2018_idioms, - rust_2021_compatibility, - unused -)] - use text_splitter::TextSplitter; #[test] diff --git a/tests/text_splitter_snapshots.rs b/tests/text_splitter_snapshots.rs index e7275a63..14ee5b12 100644 --- a/tests/text_splitter_snapshots.rs +++ b/tests/text_splitter_snapshots.rs @@ -61,7 +61,7 @@ fn characters_default() { let chunks = splitter.chunks(&text, chunk_size).collect::>(); assert_eq!(chunks.join(""), text); - for chunk in chunks.iter() { + for chunk in &chunks { assert!(Characters.chunk_size(chunk, &chunk_size).fits().is_le()); } insta::assert_yaml_snapshot!(chunks); @@ -78,7 +78,7 @@ fn characters_trim() { let splitter = TextSplitter::default().with_trim_chunks(true); let chunks = splitter.chunks(&text, chunk_size).collect::>(); - for chunk in chunks.iter() { + for chunk in &chunks { assert!(Characters.chunk_size(chunk, &chunk_size).fits().is_le()); } insta::assert_yaml_snapshot!(chunks); @@ -96,7 +96,7 @@ fn characters_range() { let chunks = splitter.chunks(&text, range.clone()).collect::>(); assert_eq!(chunks.join(""), text); - for chunk in chunks.iter() { + for chunk in &chunks { assert!(Characters.chunk_size(chunk, &range).fits().is_le()); } insta::assert_yaml_snapshot!(chunks); @@ -113,7 +113,7 @@ fn characters_range_trim() { let splitter = TextSplitter::default().with_trim_chunks(true); let chunks = splitter.chunks(&text, range.clone()).collect::>(); - for chunk in chunks.iter() { + for chunk in &chunks { assert!(Characters.chunk_size(chunk, &range).fits().is_le()); } insta::assert_yaml_snapshot!(chunks); @@ -136,7 +136,7 @@ fn huggingface_default() { let chunks = splitter.chunks(&text, chunk_size).collect::>(); assert_eq!(chunks.join(""), text); - for chunk in chunks.iter() { + for chunk in &chunks { assert!(HUGGINGFACE_TOKENIZER .chunk_size(chunk, &chunk_size) .fits() @@ -157,7 +157,7 @@ fn huggingface_trim() { let splitter = TextSplitter::new(&*HUGGINGFACE_TOKENIZER).with_trim_chunks(true); let chunks = splitter.chunks(&text, chunk_size).collect::>(); - for chunk in chunks.iter() { + for chunk in &chunks { assert!(HUGGINGFACE_TOKENIZER .chunk_size(chunk, &chunk_size) .fits() @@ -182,7 +182,7 @@ fn tiktoken_default() { let chunks = splitter.chunks(&text, chunk_size).collect::>(); assert_eq!(chunks.join(""), text); - for chunk in chunks.iter() { + for chunk in &chunks { assert!(TIKTOKEN_TOKENIZER .chunk_size(chunk, &chunk_size) .fits() @@ -203,7 +203,7 @@ fn tiktoken_trim() { let splitter = TextSplitter::new(&*TIKTOKEN_TOKENIZER).with_trim_chunks(true); let chunks = splitter.chunks(&text, chunk_size).collect::>(); - for chunk in chunks.iter() { + for chunk in &chunks { assert!(TIKTOKEN_TOKENIZER .chunk_size(chunk, &chunk_size) .fits()