Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implements Pod and Zeroable for AVX512 types that don't require nightly. Additionally allows for the utilization of must_cast_mut and must_cast_slice_mut in const-contexts on Rust 1.83+ #285

Merged
merged 2 commits into from
Dec 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,10 @@ min_const_generics = [] # MSRV 1.51: support arrays via min_const_generics

wasm_simd = [] # MSRV 1.54.0: support wasm simd types
aarch64_simd = [] # MSRV 1.59.0: support aarch64 simd types
avx512_simd = [] # MSRV 1.72.0: support avx512 simd types

must_cast = [] # MSRV 1.64.0: support the `must` module.
must_cast_extra = ["must_cast"] # MSRV 1.83.0: support mutable references in const

# Adds `TransparentWrapper` impls for stdlib types newer than bytemuck's base MSRV.
# Current MSRV 1.74.0: `core::num::Saturating`.
Expand All @@ -62,12 +64,14 @@ track_caller = []
latest_stable_rust = [
# Keep this list sorted.
"aarch64_simd",
"avx512_simd",
"align_offset",
"alloc_uninit",
"const_zeroed",
"derive",
"min_const_generics",
"must_cast",
"must_cast_extra",
"track_caller",
"wasm_simd",
"zeroable_atomics",
Expand Down
11 changes: 11 additions & 0 deletions changelog.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,16 @@
# `bytemuck` changelog

## Unreleased

* Implement `Pod` and `Zeroable` for `core::arch::{x86, x86_64}::__m512`, `__m512d` and `__m512i` without nightly.
Requires Rust 1.72, and is gated through the `avx512_simd` cargo feature.

* Allow the use of `must_cast_mut` and `must_cast_slice_mut` in const contexts.
Requires Rust 1.83, and is gated through the `must_cast_extra` cargo feature.

* internal: introduced the `maybe_const_fn` macro that allows defining some function
to be const depending upon some `cfg` predicate.

## 1.20

* New functions to allocate zeroed `Arc` and `Rc`. Requires Rust 1.82
Expand Down
19 changes: 19 additions & 0 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,25 @@ macro_rules! impl_unsafe_marker_for_simd {
};
}

/// A macro for conditionally const-ifying a function.
/// #[allow(unused)] because currently it is only used with the `must_cast` feature.
#[allow(unused)]
macro_rules! maybe_const_fn {
(
#[cfg($cfg_predicate:meta)]
$(#[$attr:meta])*
$vis:vis $(unsafe $($unsafe:lifetime)?)? fn $name:ident $($rest:tt)*
) => {
#[cfg($cfg_predicate)]
$(#[$attr])*
$vis const $(unsafe $($unsafe)?)? fn $name $($rest)*

#[cfg(not($cfg_predicate))]
$(#[$attr])*
$vis $(unsafe $($unsafe)?)? fn $name $($rest)*
};
}

#[cfg(feature = "extern_crate_std")]
extern crate std;

Expand Down
164 changes: 85 additions & 79 deletions src/must.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,36 +70,39 @@ pub const fn must_cast_ref<A: NoUninit, B: AnyBitPattern>(a: &A) -> &B {
unsafe { &*(a as *const A as *const B) }
}

/// Convert a `&mut A` into `&mut B` if infalliable, or fail to compile.
///
/// As [`must_cast_ref`], but `mut`.
///
/// ## Examples
/// ```
/// let mut i = 12_u16;
/// // compiles:
/// let bytes: &mut [u8; 2] = bytemuck::must_cast_mut(&mut i);
/// ```
/// ```compile_fail,E0080
/// # let mut bytes: &mut [u8; 2] = &mut [1, 2];
/// // fails to compile (alignment requirements increased):
/// let i : &mut u16 = bytemuck::must_cast_mut(bytes);
/// ```
/// ```compile_fail,E0080
/// # let mut i = 12_u16;
/// // fails to compile (size mismatch):
/// let bytes : &mut [u8; 3] = bytemuck::must_cast_mut(&mut i);
/// ```
#[inline]
pub fn must_cast_mut<
A: NoUninit + AnyBitPattern,
B: NoUninit + AnyBitPattern,
>(
a: &mut A,
) -> &mut B {
let _ = Cast::<A, B>::ASSERT_SIZE_EQUAL;
let _ = Cast::<A, B>::ASSERT_ALIGN_GREATER_THAN_EQUAL;
unsafe { &mut *(a as *mut A as *mut B) }
maybe_const_fn! {
#[cfg(feature = "must_cast_extra")]
/// Convert a `&mut A` into `&mut B` if infalliable, or fail to compile.
///
/// As [`must_cast_ref`], but `mut`.
///
/// ## Examples
/// ```
/// let mut i = 12_u16;
/// // compiles:
/// let bytes: &mut [u8; 2] = bytemuck::must_cast_mut(&mut i);
/// ```
/// ```compile_fail,E0080
/// # let mut bytes: &mut [u8; 2] = &mut [1, 2];
/// // fails to compile (alignment requirements increased):
/// let i : &mut u16 = bytemuck::must_cast_mut(bytes);
/// ```
/// ```compile_fail,E0080
/// # let mut i = 12_u16;
/// // fails to compile (size mismatch):
/// let bytes : &mut [u8; 3] = bytemuck::must_cast_mut(&mut i);
/// ```
#[inline]
pub fn must_cast_mut<
A: NoUninit + AnyBitPattern,
B: NoUninit + AnyBitPattern,
>(
a: &mut A,
) -> &mut B {
let _ = Cast::<A, B>::ASSERT_SIZE_EQUAL;
let _ = Cast::<A, B>::ASSERT_ALIGN_GREATER_THAN_EQUAL;
unsafe { &mut *(a as *mut A as *mut B) }
}
}

/// Convert `&[A]` into `&[B]` (possibly with a change in length) if
Expand Down Expand Up @@ -154,53 +157,56 @@ pub const fn must_cast_slice<A: NoUninit, B: AnyBitPattern>(a: &[A]) -> &[B] {
unsafe { core::slice::from_raw_parts(a.as_ptr() as *const B, new_len) }
}

/// Convert `&mut [A]` into `&mut [B]` (possibly with a change in length) if
/// infalliable, or fail to compile.
///
/// As [`must_cast_slice`], but `&mut`.
///
/// ## Examples
/// ```
/// let mut indicies = [1, 2, 3];
/// let indicies: &mut [u16] = &mut indicies;
/// // compiles:
/// let bytes: &mut [u8] = bytemuck::must_cast_slice_mut(indicies);
/// ```
/// ```
/// let zsts: &mut [()] = &mut [(), (), ()];
/// // compiles:
/// let bytes: &mut [u8] = bytemuck::must_cast_slice_mut(zsts);
/// ```
/// ```compile_fail,E0080
/// # let mut bytes = [1, 0, 2, 0, 3, 0];
/// # let bytes : &mut [u8] = &mut bytes[..];
/// // fails to compile (bytes.len() might not be a multiple of 2):
/// let byte_pairs : &mut [[u8; 2]] = bytemuck::must_cast_slice_mut(bytes);
/// ```
/// ```compile_fail,E0080
/// # let mut byte_pairs = [[1, 0], [2, 0], [3, 0]];
/// # let byte_pairs : &mut [[u8; 2]] = &mut byte_pairs[..];
/// // fails to compile (alignment requirements increased):
/// let indicies : &mut [u16] = bytemuck::must_cast_slice_mut(byte_pairs);
/// ```
/// ```compile_fail,E0080
/// let bytes: &mut [u8] = &mut [];
/// // fails to compile: (bytes.len() might not be 0)
/// let zsts: &mut [()] = bytemuck::must_cast_slice_mut(bytes);
/// ```
#[inline]
pub fn must_cast_slice_mut<
A: NoUninit + AnyBitPattern,
B: NoUninit + AnyBitPattern,
>(
a: &mut [A],
) -> &mut [B] {
let _ = Cast::<A, B>::ASSERT_SIZE_MULTIPLE_OF_OR_INPUT_ZST;
let _ = Cast::<A, B>::ASSERT_ALIGN_GREATER_THAN_EQUAL;
let new_len = if size_of::<A>() == size_of::<B>() {
a.len()
} else {
a.len() * (size_of::<A>() / size_of::<B>())
};
unsafe { core::slice::from_raw_parts_mut(a.as_mut_ptr() as *mut B, new_len) }
maybe_const_fn! {
#[cfg(feature = "must_cast_extra")]
/// Convert `&mut [A]` into `&mut [B]` (possibly with a change in length) if
/// infalliable, or fail to compile.
///
/// As [`must_cast_slice`], but `&mut`.
///
/// ## Examples
/// ```
/// let mut indicies = [1, 2, 3];
/// let indicies: &mut [u16] = &mut indicies;
/// // compiles:
/// let bytes: &mut [u8] = bytemuck::must_cast_slice_mut(indicies);
/// ```
/// ```
/// let zsts: &mut [()] = &mut [(), (), ()];
/// // compiles:
/// let bytes: &mut [u8] = bytemuck::must_cast_slice_mut(zsts);
/// ```
/// ```compile_fail,E0080
/// # let mut bytes = [1, 0, 2, 0, 3, 0];
/// # let bytes : &mut [u8] = &mut bytes[..];
/// // fails to compile (bytes.len() might not be a multiple of 2):
/// let byte_pairs : &mut [[u8; 2]] = bytemuck::must_cast_slice_mut(bytes);
/// ```
/// ```compile_fail,E0080
/// # let mut byte_pairs = [[1, 0], [2, 0], [3, 0]];
/// # let byte_pairs : &mut [[u8; 2]] = &mut byte_pairs[..];
/// // fails to compile (alignment requirements increased):
/// let indicies : &mut [u16] = bytemuck::must_cast_slice_mut(byte_pairs);
/// ```
/// ```compile_fail,E0080
/// let bytes: &mut [u8] = &mut [];
/// // fails to compile: (bytes.len() might not be 0)
/// let zsts: &mut [()] = bytemuck::must_cast_slice_mut(bytes);
/// ```
#[inline]
pub fn must_cast_slice_mut<
A: NoUninit + AnyBitPattern,
B: NoUninit + AnyBitPattern,
>(
a: &mut [A],
) -> &mut [B] {
let _ = Cast::<A, B>::ASSERT_SIZE_MULTIPLE_OF_OR_INPUT_ZST;
let _ = Cast::<A, B>::ASSERT_ALIGN_GREATER_THAN_EQUAL;
let new_len = if size_of::<A>() == size_of::<B>() {
a.len()
} else {
a.len() * (size_of::<A>() / size_of::<B>())
};
unsafe { core::slice::from_raw_parts_mut(a.as_mut_ptr() as *mut B, new_len) }
}
}
20 changes: 16 additions & 4 deletions src/pod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -152,18 +152,30 @@ where
{
}

impl_unsafe_marker_for_simd!(
#[cfg(all(target_arch = "x86", any(feature = "nightly_stdsimd", feature = "avx512_simd")))]
unsafe impl Pod for x86::{
__m512, __m512d, __m512i
}
);

impl_unsafe_marker_for_simd!(
#[cfg(all(target_arch = "x86_64", any(feature = "nightly_stdsimd", feature = "avx512_simd")))]
unsafe impl Pod for x86_64::{
__m512, __m512d, __m512i
}
);

impl_unsafe_marker_for_simd!(
#[cfg(all(target_arch = "x86", feature = "nightly_stdsimd"))]
unsafe impl Pod for x86::{
__m128bh, __m256bh, __m512,
__m512bh, __m512d, __m512i,
__m128bh, __m256bh, __m512bh
}
);

impl_unsafe_marker_for_simd!(
#[cfg(all(target_arch = "x86_64", feature = "nightly_stdsimd"))]
unsafe impl Pod for x86_64::{
__m128bh, __m256bh, __m512,
__m512bh, __m512d, __m512i,
__m128bh, __m256bh, __m512bh
}
);
20 changes: 16 additions & 4 deletions src/zeroable.rs
Original file line number Diff line number Diff line change
Expand Up @@ -232,18 +232,30 @@ where
{
}

impl_unsafe_marker_for_simd!(
#[cfg(all(target_arch = "x86", any(feature = "nightly_stdsimd", feature = "avx512_simd")))]
unsafe impl Zeroable for x86::{
__m512, __m512d, __m512i
}
);

impl_unsafe_marker_for_simd!(
#[cfg(all(target_arch = "x86_64", any(feature = "nightly_stdsimd", feature = "avx512_simd")))]
unsafe impl Zeroable for x86_64::{
__m512, __m512d, __m512i
}
);

impl_unsafe_marker_for_simd!(
#[cfg(all(target_arch = "x86", feature = "nightly_stdsimd"))]
unsafe impl Zeroable for x86::{
__m128bh, __m256bh, __m512,
__m512bh, __m512d, __m512i,
__m128bh, __m256bh, __m512bh
}
);

impl_unsafe_marker_for_simd!(
#[cfg(all(target_arch = "x86_64", feature = "nightly_stdsimd"))]
unsafe impl Zeroable for x86_64::{
__m128bh, __m256bh, __m512,
__m512bh, __m512d, __m512i,
__m128bh, __m256bh, __m512bh
}
);
Loading