diff --git a/.travis.yml b/.travis.yml index cc4a61fbea..df9328c490 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,6 +12,11 @@ matrix: rust: stable if: branch != master + # does not support atomic loads + - env: TARGET=riscv32imc-unknown-none-elf + rust: stable + if: branch != master + # build docs on master - env: TARGET=x86_64-unknown-linux-gnu rust: nightly diff --git a/build.rs b/build.rs index 493ae4d5d6..098d45b180 100644 --- a/build.rs +++ b/build.rs @@ -19,5 +19,26 @@ fn main() -> Result<(), Box> { println!("cargo:rustc-cfg=armv8m_main"); } + // built-in targets with no atomic / CAS support as of nightly-2019-12-17 + // see the `no-atomics.sh` / `no-cas.sh` script sitting next to this file + match &target[..] { + "thumbv6m-none-eabi" + | "msp430-none-elf" + | "riscv32i-unknown-none-elf" + | "riscv32imc-unknown-none-elf" => {} + + _ => { + println!("cargo:rustc-cfg=has_cas"); + } + }; + + match &target[..] { + "msp430-none-elf" | "riscv32i-unknown-none-elf" | "riscv32imc-unknown-none-elf" => {} + + _ => { + println!("cargo:rustc-cfg=has_atomics"); + } + }; + Ok(()) } diff --git a/no-atomics.sh b/no-atomics.sh new file mode 100644 index 0000000000..697933eee8 --- /dev/null +++ b/no-atomics.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +set -euo pipefail + +main() { + IFS=' +' + for t in $(rustc --print target-list); do + rustc +nightly --print cfg --target $t | grep 'target_has_atomic_load_store=' >/dev/null || echo $t + done + +} + +main diff --git a/no-cas.sh b/no-cas.sh new file mode 100644 index 0000000000..9ec576fcf3 --- /dev/null +++ b/no-cas.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +set -euo pipefail + +main() { + IFS=' +' + for t in $(rustc --print target-list); do + rustc +nightly --print cfg --target $t | grep 'target_has_atomic=' >/dev/null || echo $t + done + +} + +main diff --git a/src/i.rs b/src/i.rs index f870a823ea..b8a88a907c 100644 --- a/src/i.rs +++ b/src/i.rs @@ -2,6 +2,7 @@ use core::{marker::PhantomData, mem::MaybeUninit}; +#[cfg(has_atomics)] use crate::spsc::{Atomic, MultiCore}; /// `const-fn` version of [`BinaryHeap`](../binary_heap/struct.BinaryHeap.html) @@ -16,6 +17,7 @@ pub struct LinearMap { } /// `const-fn` version of [`spsc::Queue`](../spsc/struct.Queue.html) +#[cfg(has_atomics)] pub struct Queue { // this is from where we dequeue items pub(crate) head: Atomic, diff --git a/src/lib.rs b/src/lib.rs index 6f133b64dd..243a1fc172 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -89,10 +89,11 @@ mod ser; pub mod binary_heap; pub mod i; -#[cfg(not(armv6m))] +#[cfg(has_cas)] pub mod mpmc; -#[cfg(not(armv6m))] +#[cfg(has_cas)] pub mod pool; +#[cfg(has_atomics)] pub mod spsc; mod sealed; diff --git a/src/mpmc.rs b/src/mpmc.rs index dcc950bd6e..dee673e5ca 100644 --- a/src/mpmc.rs +++ b/src/mpmc.rs @@ -1,5 +1,7 @@ //! A fixed capacity Multiple-Producer Multiple-Consumer (MPMC) lock-free queue //! +//! NOTE: This module is not available on targets that do *not* support CAS operations, e.g. ARMv6-M +//! //! # Example //! //! This queue can be constructed in "const context". Placing it in a `static` variable lets *all* diff --git a/src/pool/mod.rs b/src/pool/mod.rs index 959129e459..fe7eea283b 100644 --- a/src/pool/mod.rs +++ b/src/pool/mod.rs @@ -1,5 +1,7 @@ //! A heap-less, interrupt-safe, lock-free memory pool (\*) //! +//! NOTE: This module is not available on targets that do *not* support CAS operations, e.g. ARMv6-M +//! //! (\*) Currently, the implementation is only lock-free *and* `Sync` on ARMv7-M devices //! //! # Examples diff --git a/src/sealed.rs b/src/sealed.rs index 46147c50e4..3f68564b55 100644 --- a/src/sealed.rs +++ b/src/sealed.rs @@ -1,197 +1,210 @@ /// Sealed traits and implementations for `spsc` pub mod spsc { + #[cfg(has_atomics)] + use crate::spsc::{MultiCore, SingleCore}; -use core::sync::atomic::{self, AtomicU16, AtomicU8, AtomicUsize, Ordering}; -use crate::spsc::{MultiCore, SingleCore}; + #[cfg(has_atomics)] + use core::sync::atomic::{self, AtomicU16, AtomicU8, AtomicUsize, Ordering}; -pub unsafe trait XCore { - fn is_multi_core() -> bool; -} + pub unsafe trait XCore { + fn is_multi_core() -> bool; + } -unsafe impl XCore for SingleCore { - fn is_multi_core() -> bool { - false + #[cfg(has_atomics)] + unsafe impl XCore for SingleCore { + fn is_multi_core() -> bool { + false + } } -} -unsafe impl XCore for MultiCore { - fn is_multi_core() -> bool { - true + #[cfg(has_atomics)] + unsafe impl XCore for MultiCore { + fn is_multi_core() -> bool { + true + } } -} -pub unsafe trait Uxx: Into + Send { - #[doc(hidden)] - fn saturate(x: usize) -> Self; + pub unsafe trait Uxx: Into + Send { + #[doc(hidden)] + fn saturate(x: usize) -> Self; - #[doc(hidden)] - fn truncate(x: usize) -> Self; + #[doc(hidden)] + fn truncate(x: usize) -> Self; - #[doc(hidden)] - unsafe fn load_acquire(x: *const Self) -> Self - where - C: XCore; + #[cfg(has_atomics)] + #[doc(hidden)] + unsafe fn load_acquire(x: *const Self) -> Self + where + C: XCore; - #[doc(hidden)] - fn load_relaxed(x: *const Self) -> Self; + #[cfg(has_atomics)] + #[doc(hidden)] + fn load_relaxed(x: *const Self) -> Self; - #[doc(hidden)] - unsafe fn store_release(x: *const Self, val: Self) - where - C: XCore; -} + #[cfg(has_atomics)] + #[doc(hidden)] + unsafe fn store_release(x: *const Self, val: Self) + where + C: XCore; + } -unsafe impl Uxx for u8 { - fn saturate(x: usize) -> Self { - let max = Self::max_value() as usize; - if x >= usize::from(max) { - max as Self - } else { + unsafe impl Uxx for u8 { + fn saturate(x: usize) -> Self { + let max = Self::max_value() as usize; + if x >= usize::from(max) { + max as Self + } else { + x as Self + } + } + + fn truncate(x: usize) -> Self { x as Self } - } - fn truncate(x: usize) -> Self { - x as Self - } + #[cfg(has_atomics)] + unsafe fn load_acquire(x: *const Self) -> Self + where + C: XCore, + { + if C::is_multi_core() { + (*(x as *const AtomicU8)).load(Ordering::Acquire) + } else { + let y = (*(x as *const AtomicU8)).load(Ordering::Relaxed); // read + atomic::compiler_fence(Ordering::Acquire); // ▼ + y + } + } - unsafe fn load_acquire(x: *const Self) -> Self - where - C: XCore, - { - if C::is_multi_core() { - (*(x as *const AtomicU8)).load(Ordering::Acquire) - } else { - let y = (*(x as *const AtomicU8)).load(Ordering::Relaxed); // read - atomic::compiler_fence(Ordering::Acquire); // ▼ - y + #[cfg(has_atomics)] + fn load_relaxed(x: *const Self) -> Self { + unsafe { (*(x as *const AtomicU8)).load(Ordering::Relaxed) } } - } - fn load_relaxed(x: *const Self) -> Self { - unsafe { (*(x as *const AtomicU8)).load(Ordering::Relaxed) } + #[cfg(has_atomics)] + unsafe fn store_release(x: *const Self, val: Self) + where + C: XCore, + { + if C::is_multi_core() { + (*(x as *const AtomicU8)).store(val, Ordering::Release) + } else { + atomic::compiler_fence(Ordering::Release); // ▲ + (*(x as *const AtomicU8)).store(val, Ordering::Relaxed) // write + } + } } - unsafe fn store_release(x: *const Self, val: Self) - where - C: XCore, - { - if C::is_multi_core() { - (*(x as *const AtomicU8)).store(val, Ordering::Release) - } else { - atomic::compiler_fence(Ordering::Release); // ▲ - (*(x as *const AtomicU8)).store(val, Ordering::Relaxed) // write + unsafe impl Uxx for u16 { + fn saturate(x: usize) -> Self { + let max = Self::max_value() as usize; + if x >= usize::from(max) { + max as Self + } else { + x as Self + } } - } -} -unsafe impl Uxx for u16 { - fn saturate(x: usize) -> Self { - let max = Self::max_value() as usize; - if x >= usize::from(max) { - max as Self - } else { + fn truncate(x: usize) -> Self { x as Self } - } - fn truncate(x: usize) -> Self { - x as Self - } - - unsafe fn load_acquire(x: *const Self) -> Self - where - C: XCore, - { - if C::is_multi_core() { - (*(x as *const AtomicU16)).load(Ordering::Acquire) - } else { - let y = (*(x as *const AtomicU16)).load(Ordering::Relaxed); // read - atomic::compiler_fence(Ordering::Acquire); // ▼ - y + #[cfg(has_atomics)] + unsafe fn load_acquire(x: *const Self) -> Self + where + C: XCore, + { + if C::is_multi_core() { + (*(x as *const AtomicU16)).load(Ordering::Acquire) + } else { + let y = (*(x as *const AtomicU16)).load(Ordering::Relaxed); // read + atomic::compiler_fence(Ordering::Acquire); // ▼ + y + } } - } - fn load_relaxed(x: *const Self) -> Self { - unsafe { (*(x as *const AtomicU16)).load(Ordering::Relaxed) } - } + #[cfg(has_atomics)] + fn load_relaxed(x: *const Self) -> Self { + unsafe { (*(x as *const AtomicU16)).load(Ordering::Relaxed) } + } - unsafe fn store_release(x: *const Self, val: Self) - where - C: XCore, - { - if C::is_multi_core() { - (*(x as *const AtomicU16)).store(val, Ordering::Release) - } else { - atomic::compiler_fence(Ordering::Release); // ▲ - (*(x as *const AtomicU16)).store(val, Ordering::Relaxed) // write + #[cfg(has_atomics)] + unsafe fn store_release(x: *const Self, val: Self) + where + C: XCore, + { + if C::is_multi_core() { + (*(x as *const AtomicU16)).store(val, Ordering::Release) + } else { + atomic::compiler_fence(Ordering::Release); // ▲ + (*(x as *const AtomicU16)).store(val, Ordering::Relaxed) // write + } } } -} -unsafe impl Uxx for usize { - fn saturate(x: usize) -> Self { - x - } + unsafe impl Uxx for usize { + fn saturate(x: usize) -> Self { + x + } - fn truncate(x: usize) -> Self { - x - } + fn truncate(x: usize) -> Self { + x + } - unsafe fn load_acquire(x: *const Self) -> Self - where - C: XCore, - { - if C::is_multi_core() { - (*(x as *const AtomicUsize)).load(Ordering::Acquire) - } else { - let y = (*(x as *const AtomicUsize)).load(Ordering::Relaxed); // read - atomic::compiler_fence(Ordering::Acquire); // ▼ - y + #[cfg(has_atomics)] + unsafe fn load_acquire(x: *const Self) -> Self + where + C: XCore, + { + if C::is_multi_core() { + (*(x as *const AtomicUsize)).load(Ordering::Acquire) + } else { + let y = (*(x as *const AtomicUsize)).load(Ordering::Relaxed); // read + atomic::compiler_fence(Ordering::Acquire); // ▼ + y + } } - } - fn load_relaxed(x: *const Self) -> Self { - unsafe { (*(x as *const AtomicUsize)).load(Ordering::Relaxed) } - } + #[cfg(has_atomics)] + fn load_relaxed(x: *const Self) -> Self { + unsafe { (*(x as *const AtomicUsize)).load(Ordering::Relaxed) } + } - unsafe fn store_release(x: *const Self, val: Self) - where - C: XCore, - { - if C::is_multi_core() { - (*(x as *const AtomicUsize)).store(val, Ordering::Release) - } else { - atomic::compiler_fence(Ordering::Release); // ▲ - (*(x as *const AtomicUsize)).store(val, Ordering::Relaxed); // write + #[cfg(has_atomics)] + unsafe fn store_release(x: *const Self, val: Self) + where + C: XCore, + { + if C::is_multi_core() { + (*(x as *const AtomicUsize)).store(val, Ordering::Release) + } else { + atomic::compiler_fence(Ordering::Release); // ▲ + (*(x as *const AtomicUsize)).store(val, Ordering::Relaxed); // write + } } } } -} - /// Sealed traits and implementations for `binary_heap` pub mod binary_heap { + use crate::binary_heap::{Max, Min}; + use core::cmp::Ordering; -use core::cmp::Ordering; -use crate::binary_heap::{Min, Max}; - -/// The binary heap kind: min-heap or max-heap -pub unsafe trait Kind { - #[doc(hidden)] - fn ordering() -> Ordering; -} - -unsafe impl Kind for Min { - fn ordering() -> Ordering { - Ordering::Less + /// The binary heap kind: min-heap or max-heap + pub unsafe trait Kind { + #[doc(hidden)] + fn ordering() -> Ordering; } -} -unsafe impl Kind for Max { - fn ordering() -> Ordering { - Ordering::Greater + unsafe impl Kind for Min { + fn ordering() -> Ordering { + Ordering::Less + } } -} + unsafe impl Kind for Max { + fn ordering() -> Ordering { + Ordering::Greater + } + } } diff --git a/src/spsc/mod.rs b/src/spsc/mod.rs index 3d228a75c9..5199fd0710 100644 --- a/src/spsc/mod.rs +++ b/src/spsc/mod.rs @@ -1,5 +1,8 @@ //! Fixed capacity Single Producer Single Consumer (SPSC) queue //! +//! NOTE: This module is not available on targets that do *not* support atomic loads, e.g. RISC-V +//! cores w/o the A (Atomic) extension +//! //! # Examples //! //! - `Queue` can be used as a plain queue @@ -731,7 +734,7 @@ mod tests { assert_eq!(items.next(), None); assert_eq!(items.next_back(), None); } - + #[test] fn iter_overflow() { let mut rb: Queue = Queue::u8(); @@ -778,7 +781,7 @@ mod tests { assert_eq!(items.next(), None); assert_eq!(items.next_back(), None); } - + #[test] fn sanity() { let mut rb: Queue = Queue::new();