diff --git a/.github/workflows/typos.yml b/.github/workflows/typos.yml new file mode 100644 index 0000000..3055f87 --- /dev/null +++ b/.github/workflows/typos.yml @@ -0,0 +1,19 @@ +# Copied from https://github.com/rerun-io/rerun_template + +# https://github.com/crate-ci/typos +# Add exceptions to `.typos.toml` +# install and run locally: cargo install typos-cli && typos + +name: Spell Check +on: [pull_request] + +jobs: + run: + name: Spell Check + runs-on: ubuntu-latest + steps: + - name: Checkout Actions Repository + uses: actions/checkout@v4 + + - name: Check spelling of entire workspace + uses: crate-ci/typos@master diff --git a/CHANGELOG.md b/CHANGELOG.md index 0ea429e..7c1ea6c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,7 +58,7 @@ depending of the size of the first write call. This increases compression ratio and speed for use cases where the data is larger than 64kb. ``` -- Add fluent API style contruction for FrameInfo [#99](https://github.com/PSeitz/lz4_flex/pull/99) (thanks @CosmicHorrorDev) +- Add fluent API style construction for FrameInfo [#99](https://github.com/PSeitz/lz4_flex/pull/99) (thanks @CosmicHorrorDev) ``` This adds in fluent API style construction for FrameInfo. Now you can do @@ -186,7 +186,7 @@ Fix no_std support for safe-decode 0.9.0 (2021-09-25) ================== -Fix unsoundness in the the api in regards to unitialized data. (thanks to @arthurprs) +Fix unsoundness in the the api in regards to uninitialized data. (thanks to @arthurprs) * https://github.com/PSeitz/lz4_flex/pull/22 0.8.0 (2021-05-17) diff --git a/README.md b/README.md index aaba466..954c7f8 100644 --- a/README.md +++ b/README.md @@ -110,7 +110,7 @@ Tested on AMD Ryzen 7 5900HX, rustc 1.69.0 (84c898d65 2023-04-16), Manjaro, CPU This fuzz target generates corrupted data for the decompressor. `cargo +nightly fuzz run fuzz_decomp_corrupt_block` and `cargo +nightly fuzz run fuzz_decomp_corrupt_frame` -This fuzz target asserts that a compression and decompression rountrip returns the original input. +This fuzz target asserts that a compression and decompression roundtrip returns the original input. `cargo +nightly fuzz run fuzz_roundtrip` and `cargo +nightly fuzz run fuzz_roundtrip_frame` This fuzz target asserts compression with cpp and decompression with lz4_flex returns the original input. diff --git a/_typos.toml b/_typos.toml new file mode 100644 index 0000000..d8271a7 --- /dev/null +++ b/_typos.toml @@ -0,0 +1,2 @@ +[files] +extend-exclude = ["benches/*.txt", "benches/*.json", "benches/*.xml", "tests/tests.rs"] diff --git a/src/block/compress.rs b/src/block/compress.rs index d4308b4..92899fc 100644 --- a/src/block/compress.rs +++ b/src/block/compress.rs @@ -1,7 +1,7 @@ //! The compression algorithm. //! //! We make use of hash tables to find duplicates. This gives a reasonable compression ratio with a -//! high performance. It has fixed memory usage, which contrary to other approachs, makes it less +//! high performance. It has fixed memory usage, which contrary to other approaches, makes it less //! memory hungry. use crate::block::hashtable::HashTable; @@ -929,7 +929,7 @@ mod tests { // and no literal, so a block of 12 bytes can be compressed. let aaas: &[u8] = b"aaaaaaaaaaaaaaa"; - // uncompressible + // incompressible let out = compress(&aaas[..12]); assert_gt!(out.len(), 12); // compressible @@ -940,12 +940,12 @@ mod tests { let out = compress(&aaas[..15]); assert_le!(out.len(), 15); - // dict uncompressible + // dict incompressible let out = compress_with_dict(&aaas[..11], aaas); assert_gt!(out.len(), 11); // compressible let out = compress_with_dict(&aaas[..12], aaas); - // According to the spec this _could_ compres, but it doesn't in this lib + // According to the spec this _could_ compress, but it doesn't in this lib // as it aborts compression for any input len < LZ4_MIN_LENGTH assert_gt!(out.len(), 12); let out = compress_with_dict(&aaas[..13], aaas); diff --git a/src/block/decompress.rs b/src/block/decompress.rs index e270e70..dac5541 100644 --- a/src/block/decompress.rs +++ b/src/block/decompress.rs @@ -311,7 +311,7 @@ pub(crate) fn decompress_internal( // to enable an optimized copy of 18 bytes. if offset >= match_length { unsafe { - // _copy_, not copy_non_overlaping, as it may overlap. + // _copy_, not copy_non_overlapping, as it may overlap. // Compiles to the same assembly on x68_64. core::ptr::copy(start_ptr, output_ptr, 18); output_ptr = output_ptr.add(match_length); diff --git a/src/block/decompress_safe.rs b/src/block/decompress_safe.rs index e3b03b0..b435608 100644 --- a/src/block/decompress_safe.rs +++ b/src/block/decompress_safe.rs @@ -329,7 +329,7 @@ pub fn decompress_into_with_dict( } /// Decompress all bytes of `input` into a new vec. The first 4 bytes are the uncompressed size in -/// litte endian. Can be used in conjunction with `compress_prepend_size` +/// little endian. Can be used in conjunction with `compress_prepend_size` #[inline] pub fn decompress_size_prepended(input: &[u8]) -> Result, DecompressError> { let (uncompressed_size, input) = super::uncompressed_size(input)?; diff --git a/src/block/hashtable.rs b/src/block/hashtable.rs index 0e40c63..89e83dc 100644 --- a/src/block/hashtable.rs +++ b/src/block/hashtable.rs @@ -62,7 +62,7 @@ impl HashTable4KU16 { #[inline] pub fn new() -> Self { // This generates more efficient assembly in contrast to Box::new(slice), because of an - // optmized call alloc_zeroed, vs. alloc + memset + // optimized call alloc_zeroed, vs. alloc + memset // try_into is optimized away let dict = alloc::vec![0; HASHTABLE_SIZE_4K] .into_boxed_slice() diff --git a/src/sink.rs b/src/sink.rs index 721b6a6..59ec997 100644 --- a/src/sink.rs +++ b/src/sink.rs @@ -3,7 +3,7 @@ use alloc::vec::Vec; use crate::fastcpy::slice_copy; -/// Returns a Sink implementation appropriate for outputing up to `required_capacity` +/// Returns a Sink implementation appropriate for outputting up to `required_capacity` /// bytes at `vec[offset..offset+required_capacity]`. /// It can be either a `SliceSink` (pre-filling the vec with zeroes if necessary) /// when the `safe-decode` feature is enabled, or `VecSink` otherwise. @@ -22,7 +22,7 @@ pub fn vec_sink_for_compression( } } -/// Returns a Sink implementation appropriate for outputing up to `required_capacity` +/// Returns a Sink implementation appropriate for outputting up to `required_capacity` /// bytes at `vec[offset..offset+required_capacity]`. /// It can be either a `SliceSink` (pre-filling the vec with zeroes if necessary) /// when the `safe-decode` feature is enabled, or `VecSink` otherwise.