Skip to content
This repository has been archived by the owner on Mar 21, 2024. It is now read-only.

Commit

Permalink
Use initialized memory rather than an initialized object for MSVC ato…
Browse files Browse the repository at this point in the history
…mics
  • Loading branch information
wmaxey committed Nov 13, 2020
1 parent 7b2a928 commit 2610560
Showing 1 changed file with 44 additions and 32 deletions.
76 changes: 44 additions & 32 deletions libcxx/include/support/win32/atomic_msvc.h
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ bool __atomic_compare_exchange_relaxed(const volatile _Type *__ptr, _Type *__exp
}
template<class _Type>
bool __atomic_compare_exchange(_Type volatile *__ptr, _Type *__expected, const _Type *__desired, bool, int __success_memorder, int __failure_memorder) {
bool success;
bool success = false;
switch (detail::__stronger_order_cuda(__success_memorder, __failure_memorder)) {
case __ATOMIC_RELEASE: _Compiler_or_memory_barrier(); success = __atomic_compare_exchange_relaxed(__ptr, __expected, __desired); break;
case __ATOMIC_ACQ_REL: _Compiler_or_memory_barrier();
Expand Down Expand Up @@ -212,20 +212,22 @@ void __atomic_fetch_add_relaxed(const volatile _Type *__ptr, const _Delta *__val
}
template<class _Type, class _Delta>
_Type __atomic_fetch_add(_Type volatile *__ptr, _Delta __val, int __memorder) {
_Type __ret;
alignas(_Type) unsigned char __buf[sizeof(_Type)] = {};
auto* __dest = reinterpret_cast<_Type*>(__buf);

switch (__memorder) {
case __ATOMIC_RELEASE: _Compiler_or_memory_barrier(); __atomic_fetch_add_relaxed(__ptr, &__val, &__ret);break;
case __ATOMIC_RELEASE: _Compiler_or_memory_barrier(); __atomic_fetch_add_relaxed(__ptr, &__val, __dest);break;
case __ATOMIC_ACQ_REL: _Compiler_or_memory_barrier();
case __ATOMIC_ACQUIRE: __atomic_fetch_add_relaxed(__ptr, &__val, &__ret); _Compiler_or_memory_barrier(); break;
case __ATOMIC_SEQ_CST: _Memory_barrier(); __atomic_fetch_add_relaxed(__ptr, &__val, &__ret); _Compiler_or_memory_barrier(); break;
case __ATOMIC_RELAXED: __atomic_fetch_add_relaxed(__ptr, &__val, &__ret); break;
case __ATOMIC_ACQUIRE: __atomic_fetch_add_relaxed(__ptr, &__val, __dest); _Compiler_or_memory_barrier(); break;
case __ATOMIC_SEQ_CST: _Memory_barrier(); __atomic_fetch_add_relaxed(__ptr, &__val, __dest); _Compiler_or_memory_barrier(); break;
case __ATOMIC_RELAXED: __atomic_fetch_add_relaxed(__ptr, &__val, __dest); break;
default: assert(0);
}
return __ret;
return *__dest;
}
template<class _Type, class _Delta>
_Type __atomic_fetch_sub(_Type volatile *__ptr, _Delta __val, int __memorder) {
return __atomic_fetch_add(__ptr, -__val, __memorder);
return __atomic_fetch_add(__ptr, 0-__val, __memorder);
}


Expand All @@ -251,16 +253,18 @@ void __atomic_fetch_and_relaxed(const volatile _Type *__ptr, const _Delta *__val
}
template<class _Type, class _Delta>
_Type __atomic_fetch_and(_Type volatile *__ptr, _Delta __val, int __memorder) {
_Type __ret;
alignas(_Type) unsigned char __buf[sizeof(_Type)] = {};
auto* __dest = reinterpret_cast<_Type*>(__buf);

switch (__memorder) {
case __ATOMIC_RELEASE: _Compiler_or_memory_barrier(); __atomic_fetch_and_relaxed(__ptr, &__val, &__ret);break;
case __ATOMIC_RELEASE: _Compiler_or_memory_barrier(); __atomic_fetch_and_relaxed(__ptr, &__val, __dest);break;
case __ATOMIC_ACQ_REL: _Compiler_or_memory_barrier();
case __ATOMIC_ACQUIRE: __atomic_fetch_and_relaxed(__ptr, &__val, &__ret); _Compiler_or_memory_barrier(); break;
case __ATOMIC_SEQ_CST: _Memory_barrier(); __atomic_fetch_and_relaxed(__ptr, &__val, &__ret); _Compiler_or_memory_barrier(); break;
case __ATOMIC_RELAXED: __atomic_fetch_and_relaxed(__ptr, &__val, &__ret); break;
case __ATOMIC_ACQUIRE: __atomic_fetch_and_relaxed(__ptr, &__val, __dest); _Compiler_or_memory_barrier(); break;
case __ATOMIC_SEQ_CST: _Memory_barrier(); __atomic_fetch_and_relaxed(__ptr, &__val, __dest); _Compiler_or_memory_barrier(); break;
case __ATOMIC_RELAXED: __atomic_fetch_and_relaxed(__ptr, &__val, __dest); break;
default: assert(0);
}
return __ret;
return *__dest;
}

template<class _Type, class _Delta, detail::_enable_if_sized_as<_Type,1> = 0>
Expand All @@ -285,16 +289,18 @@ void __atomic_fetch_xor_relaxed(const volatile _Type *__ptr, const _Delta *__val
}
template<class _Type, class _Delta>
_Type __atomic_fetch_xor(_Type volatile *__ptr, _Delta __val, int __memorder) {
_Type __ret;
alignas(_Type) unsigned char __buf[sizeof(_Type)] = {};
auto* __dest = reinterpret_cast<_Type*>(__buf);

switch (__memorder) {
case __ATOMIC_RELEASE: _Compiler_or_memory_barrier(); __atomic_fetch_xor_relaxed(__ptr, &__val, &__ret);break;
case __ATOMIC_RELEASE: _Compiler_or_memory_barrier(); __atomic_fetch_xor_relaxed(__ptr, &__val, __dest);break;
case __ATOMIC_ACQ_REL: _Compiler_or_memory_barrier();
case __ATOMIC_ACQUIRE: __atomic_fetch_xor_relaxed(__ptr, &__val, &__ret); _Compiler_or_memory_barrier(); break;
case __ATOMIC_SEQ_CST: _Memory_barrier(); __atomic_fetch_xor_relaxed(__ptr, &__val, &__ret); _Compiler_or_memory_barrier(); break;
case __ATOMIC_RELAXED: __atomic_fetch_xor_relaxed(__ptr, &__val, &__ret); break;
case __ATOMIC_ACQUIRE: __atomic_fetch_xor_relaxed(__ptr, &__val, __dest); _Compiler_or_memory_barrier(); break;
case __ATOMIC_SEQ_CST: _Memory_barrier(); __atomic_fetch_xor_relaxed(__ptr, &__val, __dest); _Compiler_or_memory_barrier(); break;
case __ATOMIC_RELAXED: __atomic_fetch_xor_relaxed(__ptr, &__val, __dest); break;
default: assert(0);
}
return __ret;
return *__dest;
}

template<class _Type, class _Delta, detail::_enable_if_sized_as<_Type,1> = 0>
Expand All @@ -319,23 +325,27 @@ void __atomic_fetch_or_relaxed(const volatile _Type *__ptr, const _Delta *__val,
}
template<class _Type, class _Delta>
_Type __atomic_fetch_or(_Type volatile *__ptr, _Delta __val, int __memorder) {
_Type __ret;
alignas(_Type) unsigned char __buf[sizeof(_Type)] = {};
auto* __dest = reinterpret_cast<_Type*>(__buf);

switch (__memorder) {
case __ATOMIC_RELEASE: _Compiler_or_memory_barrier(); __atomic_fetch_or_relaxed(__ptr, &__val, &__ret);break;
case __ATOMIC_RELEASE: _Compiler_or_memory_barrier(); __atomic_fetch_or_relaxed(__ptr, &__val, __dest);break;
case __ATOMIC_ACQ_REL: _Compiler_or_memory_barrier();
case __ATOMIC_ACQUIRE: __atomic_fetch_or_relaxed(__ptr, &__val, &__ret); _Compiler_or_memory_barrier(); break;
case __ATOMIC_SEQ_CST: _Memory_barrier(); __atomic_fetch_or_relaxed(__ptr, &__val, &__ret); _Compiler_or_memory_barrier(); break;
case __ATOMIC_RELAXED: __atomic_fetch_or_relaxed(__ptr, &__val, &__ret); break;
case __ATOMIC_ACQUIRE: __atomic_fetch_or_relaxed(__ptr, &__val, __dest); _Compiler_or_memory_barrier(); break;
case __ATOMIC_SEQ_CST: _Memory_barrier(); __atomic_fetch_or_relaxed(__ptr, &__val, __dest); _Compiler_or_memory_barrier(); break;
case __ATOMIC_RELAXED: __atomic_fetch_or_relaxed(__ptr, &__val, __dest); break;
default: assert(0);
}
return __ret;
return *__dest;
}

template<class _Type>
_Type __atomic_load_n(const _Type volatile *__ptr, int __memorder) {
_Type __ret;
__atomic_load(__ptr, &__ret, __memorder);
return __ret;
alignas(_Type) unsigned char __buf[sizeof(_Type)] = {};
auto* __dest = reinterpret_cast<_Type*>(__buf);

__atomic_load(__ptr, __dest, __memorder);
return *__dest;
}

template<class _Type>
Expand All @@ -350,9 +360,11 @@ bool __atomic_compare_exchange_n(_Type volatile *__ptr, _Type *__expected, _Type

template<class _Type>
_Type __atomic_exchange_n(_Type volatile *__ptr, _Type __val, int __memorder) {
_Type __ret;
__atomic_exchange(__ptr, &__val, &__ret, __memorder);
return __ret;
alignas(_Type) unsigned char __buf[sizeof(_Type)] = {};
auto* __dest = reinterpret_cast<_Type*>(__buf);

__atomic_exchange(__ptr, &__val, __dest, __memorder);
return *__dest;
}

template<class _Type, class _Delta>
Expand Down

0 comments on commit 2610560

Please sign in to comment.