Skip to content

Commit

Permalink
Merge pull request eclipse-iceoryx#202 from elfenpiff/iox2-200-32-bit…
Browse files Browse the repository at this point in the history
…-Atomic64

[eclipse-iceoryx#200] 32 bit atomic64
  • Loading branch information
elfenpiff authored Apr 30, 2024
2 parents 3325ac9 + 9ad018d commit 82b1baf
Show file tree
Hide file tree
Showing 6 changed files with 994 additions and 5 deletions.
1 change: 1 addition & 0 deletions doc/release-notes/iceoryx2-unreleased.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
sample.send()?;
}
```
* Introduce `IoxAtomic` that supports up to 128bit atomics on 32-bit architecture with a ReadWriteLock [#200](https://github.com/eclipse-iceoryx/iceoryx2/issues/200)
* Example that demonstrates publish-subscribe communication with dynamic data [#205](https://github.com/eclipse-iceoryx/iceoryx2/issues/205)

### Bugfixes
Expand Down
1 change: 1 addition & 0 deletions iceoryx2-pal/concurrency-sync/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,3 +14,4 @@ version = { workspace = true }

[dev-dependencies]
iceoryx2-bb-testing = { workspace = true }
generic-tests = { workspace = true }
356 changes: 356 additions & 0 deletions iceoryx2-pal/concurrency-sync/src/iox_atomic.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,356 @@
// Copyright (c) 2024 Contributors to the Eclipse Foundation
//
// See the NOTICE file(s) distributed with this work for additional
// information regarding copyright ownership.
//
// This program and the accompanying materials are made available under the
// terms of the Apache Software License 2.0 which is available at
// https://www.apache.org/licenses/LICENSE-2.0, or the MIT license
// which is available at https://opensource.org/licenses/MIT.
//
// SPDX-License-Identifier: Apache-2.0 OR MIT

use core::{
cell::UnsafeCell,
ops::{AddAssign, BitAndAssign, BitOrAssign, BitXorAssign, Not, SubAssign},
sync::atomic::Ordering,
};

use crate::{rwlock::RwLockWriterPreference, WaitAction};

/// Behaves like [`core::sync::atomic::AtomicBool`]
pub type IoxAtomicBool = core::sync::atomic::AtomicBool;

/// Behaves like [`core::sync::atomic::AtomicUsize`]
pub type IoxAtomicUsize = core::sync::atomic::AtomicUsize;

/// Behaves like [`core::sync::atomic::AtomicU8`]
pub type IoxAtomicU8 = core::sync::atomic::AtomicU8;

/// Behaves like [`core::sync::atomic::AtomicU16`]
pub type IoxAtomicU16 = core::sync::atomic::AtomicU16;

/// Behaves like [`core::sync::atomic::AtomicU32`]
pub type IoxAtomicU32 = core::sync::atomic::AtomicU32;

/// Behaves like [`core::sync::atomic::AtomicI8`]
pub type IoxAtomicI8 = core::sync::atomic::AtomicI8;

/// Behaves like [`core::sync::atomic::AtomicI16`]
pub type IoxAtomicI16 = core::sync::atomic::AtomicI16;

/// Behaves like [`core::sync::atomic::AtomicI32`]
pub type IoxAtomicI32 = core::sync::atomic::AtomicI32;

#[cfg(target_pointer_width = "64")]
/// Behaves like [`core::sync::atomic::AtomicI64`]
pub type IoxAtomicI64 = core::sync::atomic::AtomicI64;

#[cfg(target_pointer_width = "64")]
/// Behaves like [`core::sync::atomic::AtomicU64`]
pub type IoxAtomicU64 = core::sync::atomic::AtomicU64;

#[cfg(target_pointer_width = "32")]
/// Non lock-free implementation that behaves like [`core::sync::atomic::AtomicI64`]
pub type IoxAtomicI64 = IoxAtomic<i64>;

#[cfg(target_pointer_width = "32")]
/// Non lock-free implementation that behaves like [`core::sync::atomic::AtomicU64`]
pub type IoxAtomicU64 = IoxAtomic<u64>;

type LockType = RwLockWriterPreference;

#[doc(hidden)]
pub mod internal {
use core::ops::BitAnd;

use super::*;

pub trait AtomicInteger:
Copy
+ Send
+ Eq
+ AddAssign
+ SubAssign
+ BitAndAssign
+ BitOrAssign
+ BitXorAssign
+ BitAnd<Output = Self>
+ Ord
+ Not<Output = Self>
{
fn overflowing_add(self, rhs: Self) -> (Self, bool);
fn overflowing_sub(self, rhs: Self) -> (Self, bool);
}

impl AtomicInteger for u64 {
fn overflowing_add(self, rhs: Self) -> (Self, bool) {
self.overflowing_add(rhs)
}

fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
self.overflowing_sub(rhs)
}
}

impl AtomicInteger for u128 {
fn overflowing_add(self, rhs: Self) -> (Self, bool) {
self.overflowing_add(rhs)
}

fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
self.overflowing_sub(rhs)
}
}

impl AtomicInteger for i64 {
fn overflowing_add(self, rhs: Self) -> (Self, bool) {
self.overflowing_add(rhs)
}

fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
self.overflowing_sub(rhs)
}
}

impl AtomicInteger for i128 {
fn overflowing_add(self, rhs: Self) -> (Self, bool) {
self.overflowing_add(rhs)
}

fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
self.overflowing_sub(rhs)
}
}
}

/// iceoryx2 implementation of an atomic that has an internal [`RwLockWriterPreference`].
/// It enables atomic operations on platforms that do not support them with the restriction that
/// those operations are no longer lock-free.
#[repr(C)]
pub struct IoxAtomic<T: internal::AtomicInteger> {
data: UnsafeCell<T>,
lock: LockType,
}

impl<T: internal::AtomicInteger> IoxAtomic<T> {
/// See [`core::sync::atomic::AtomicU64::new()`]
pub fn new(v: T) -> Self {
Self {
data: UnsafeCell::new(v),
lock: LockType::new(),
}
}

fn read_lock(&self) {
self.lock.read_lock(|_, _| WaitAction::Continue);
}

fn write_lock(&self) {
self.lock
.write_lock(|_, _| WaitAction::Continue, |_| {}, |_| {});
}

fn unlock(&self) {
self.lock.unlock(|_| {}, |_| {});
}

/// See [`core::sync::atomic::AtomicU64::as_ptr()`]
pub const fn as_ptr(&self) -> *mut T {
self.data.get()
}

/// See [`core::sync::atomic::AtomicU64::compare_exchange()`]
pub fn compare_exchange(
&self,
current: T,
new: T,
_success: Ordering,
_failure: Ordering,
) -> Result<T, T> {
self.write_lock();
let data = unsafe { *self.data.get() };
if data != current {
core::sync::atomic::fence(Ordering::SeqCst);
self.unlock();
return Err(data);
}

unsafe { *self.data.get() = new };
core::sync::atomic::fence(Ordering::SeqCst);
self.unlock();
Ok(data)
}

/// See [`core::sync::atomic::AtomicU64::compare_exchange_weak()`]
pub fn compare_exchange_weak(
&self,
current: T,
new: T,
success: Ordering,
failure: Ordering,
) -> Result<T, T> {
self.compare_exchange(current, new, success, failure)
}

fn fetch_op<F: FnOnce() -> T>(&self, op: F, _order: Ordering) -> T {
self.write_lock();
let data = op();
core::sync::atomic::fence(Ordering::SeqCst);
self.unlock();
data
}

/// See [`core::sync::atomic::AtomicU64::fetch_add()`]
pub fn fetch_add(&self, value: T, order: Ordering) -> T {
self.fetch_op(
|| {
let old = unsafe { *self.data.get() };
unsafe { *self.data.get() = old.overflowing_add(value).0 };
old
},
order,
)
}

/// See [`core::sync::atomic::AtomicU64::fetch_and()`]
pub fn fetch_and(&self, value: T, order: Ordering) -> T {
self.fetch_op(
|| {
let old = unsafe { *self.data.get() };
unsafe { *self.data.get() &= value };
old
},
order,
)
}

/// See [`core::sync::atomic::AtomicU64::fetch_max()`]
pub fn fetch_max(&self, value: T, order: Ordering) -> T {
self.fetch_op(
|| {
let old = unsafe { *self.data.get() };
unsafe { *self.data.get() = old.max(value) };
old
},
order,
)
}

/// See [`core::sync::atomic::AtomicU64::fetch_min()`]
pub fn fetch_min(&self, value: T, order: Ordering) -> T {
self.fetch_op(
|| {
let old = unsafe { *self.data.get() };
unsafe { *self.data.get() = old.min(value) };
old
},
order,
)
}

/// See [`core::sync::atomic::AtomicU64::fetch_nand()`]
pub fn fetch_nand(&self, value: T, order: Ordering) -> T {
self.fetch_op(
|| {
let old = unsafe { *self.data.get() };
unsafe { *self.data.get() = !(old & value) };
old
},
order,
)
}

/// See [`core::sync::atomic::AtomicU64::fetch_or()`]
pub fn fetch_or(&self, value: T, order: Ordering) -> T {
self.fetch_op(
|| {
let old = unsafe { *self.data.get() };
unsafe { *self.data.get() |= value };
old
},
order,
)
}

/// See [`core::sync::atomic::AtomicU64::fetch_sub()`]
pub fn fetch_sub(&self, value: T, order: Ordering) -> T {
self.fetch_op(
|| {
let old = unsafe { *self.data.get() };
unsafe { *self.data.get() = old.overflowing_sub(value).0 };
old
},
order,
)
}

/// See [`core::sync::atomic::AtomicU64::fetch_update()`]
pub fn fetch_update<F: FnMut(T) -> Option<T>>(
&self,
_set_order: Ordering,
_fetch_order: Ordering,
mut f: F,
) -> Result<T, T> {
self.write_lock();
let data = unsafe { *self.data.get() };

match f(data) {
Some(v) => {
unsafe { *self.data.get() = v };
core::sync::atomic::fence(Ordering::SeqCst);
self.unlock();
Ok(data)
}
None => {
core::sync::atomic::fence(Ordering::SeqCst);
self.unlock();
Err(data)
}
}
}

/// See [`core::sync::atomic::AtomicU64::fetch_xor()`]
pub fn fetch_xor(&self, value: T, order: Ordering) -> T {
self.fetch_op(
|| {
let old = unsafe { *self.data.get() };
unsafe { *self.data.get() ^= value };
old
},
order,
)
}

/// See [`core::sync::atomic::AtomicU64::into_inner()`]
pub fn into_inner(self) -> T {
unsafe { *self.data.get() }
}

/// See [`core::sync::atomic::AtomicU64::load()`]
pub fn load(&self, _order: Ordering) -> T {
self.read_lock();
let data = unsafe { *self.data.get() };
core::sync::atomic::fence(Ordering::SeqCst);
self.unlock();
data
}

/// See [`core::sync::atomic::AtomicU64::store()`]
pub fn store(&self, value: T, _order: Ordering) {
self.write_lock();
unsafe { *self.data.get() = value };
core::sync::atomic::fence(Ordering::SeqCst);
self.unlock();
}

/// See [`core::sync::atomic::AtomicU64::swap()`]
pub fn swap(&self, value: T, _order: Ordering) -> T {
self.write_lock();
let data = unsafe { *self.data.get() };
unsafe { *self.data.get() = value };
core::sync::atomic::fence(Ordering::SeqCst);
self.unlock();
data
}
}
1 change: 1 addition & 0 deletions iceoryx2-pal/concurrency-sync/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ const SPIN_REPETITIONS: u64 = 10000;

pub mod barrier;
pub mod condition_variable;
pub mod iox_atomic;
pub mod mutex;
pub mod rwlock;
pub mod semaphore;
Expand Down
10 changes: 5 additions & 5 deletions iceoryx2-pal/concurrency-sync/src/rwlock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,15 @@ pub struct RwLockReaderPreference {

impl Default for RwLockReaderPreference {
fn default() -> Self {
Self {
reader_count: AtomicU32::new(UNLOCKED),
}
Self::new()
}
}

impl RwLockReaderPreference {
pub fn new() -> Self {
Self::default()
pub const fn new() -> Self {
Self {
reader_count: AtomicU32::new(UNLOCKED),
}
}

pub fn try_read_lock(&self) -> WaitResult {
Expand Down
Loading

0 comments on commit 82b1baf

Please sign in to comment.