/* * Copyright (c) 2017 ARM Limited * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef MSTD_ATOMIC_ #define MSTD_ATOMIC_ #include #include #include #include #include #include "platform/mbed_atomic.h" #include "platform/mbed_critical.h" #include "platform/CriticalSectionLock.h" /* * mstd::atomic template and types are designed to be as close as possible to C++11 * std::atomic. Key differences: * * - Operations are specified as atomic with respect to interrupts as well as * threads * - "Lock-free" indicates that a critical section is used, otherwise * exclusive accesses. * - Default initialization follows C17 and proposed C++2x rules - ie that * like normal objects they are zero-initialized if static or thread-local, * else in an indeterminate state when automatic. There is no ATOMIC_VAR_INIT() * equivalent. */ #ifndef MBED_EXCLUSIVE_ACCESS #define MSTD_ATOMIC_BOOL_LOCK_FREE 0 #define MSTD_ATOMIC_CHAR_LOCK_FREE 0 #define MSTD_ATOMIC_CHAR16_T_LOCK_FREE 0 #define MSTD_ATOMIC_CHAR32_T_LOCK_FREE 0 #define MSTD_ATOMIC_WCHAR_T_LOCK_FREE 0 #define MSTD_ATOMIC_SHORT_LOCK_FREE 0 #define MSTD_ATOMIC_INT_LOCK_FREE 0 #define MSTD_ATOMIC_LONG_LOCK_FREE 0 #define MSTD_ATOMIC_LLONG_LOCK_FREE 0 #define MSTD_ATOMIC_POINTER_LOCK_FREE 0 #else #define MSTD_ATOMIC_BOOL_LOCK_FREE 2 #define MSTD_ATOMIC_CHAR_LOCK_FREE 2 #define MSTD_ATOMIC_CHAR16_T_LOCK_FREE 2 #define MSTD_ATOMIC_CHAR32_T_LOCK_FREE 2 #define MSTD_ATOMIC_WCHAR_T_LOCK_FREE 2 #define MSTD_ATOMIC_SHORT_LOCK_FREE 2 #define MSTD_ATOMIC_INT_LOCK_FREE 2 #define MSTD_ATOMIC_LONG_LOCK_FREE 2 #define MSTD_ATOMIC_LLONG_LOCK_FREE 0 #define MSTD_ATOMIC_POINTER_LOCK_FREE 2 #endif namespace mstd { /** Atomic template * * `mstd::atomic` is intended to work as per C++14 `std::atomic`. `T` must be a * _TriviallyCopyable_, _CopyConstructible_ and _CopyAssignable_ type. * - All standard methods of `std::atomic` are supplied: * + For any `T`: `load`, `store`, `exchange`, `compare_exchange_weak`, * `compare_exchange_strong`, `operator T`, `operator=(T)`, `Atomic(T)`; * + For integers and pointers: `++`, `+=`, `--`, `-=`, `fetch_add`, `fetch_sub`; * + For integers: `&=`, `|=`, `^=`, `fetch_and`, `fetch_or`, `fetch_xor`. * - Operations are guaranteed atomic with respect to interrupts, and the * operations can be used in interrupts - `std::atomic` implementations don't specify this. * - Implementation is optimized for uniprocessor use (no DMB instructions), * unlike typical `std::atomic` implementations. * - Lock-free versions (LDREX/STREX) are used for user types if small enough and available, * otherwise critical sections are used. * - If used with large objects, interrupt latency may be impacted. * - Valid initialisation forms are: * + `atomic foo;` (zero initialized if static or thread-local, else value indeterminate) * + `atomic_init(&foo, 2);` (initialize a default-initialized variable, once only, not atomic) * + `atomic foo(2);` (value initialization) * + `atomic foo = { 2 };` (also legal C11 with _Atomic int) * + `atomic foo = 2;` (C++17 or later only - also legal C11 with _Atomic int) * Note that the lack of a copy constructor limits the simple-looking assignment initialization * to C++17 or later only. * - The value constructor is not available for small custom types. * - `MSTD_ATOMIC_XXX_LOCK_FREE` replaces `ATOMIC_XXX_LOCK_FREE` - "locking" forms * take a critical section, non-locking do not. * - For `bool`, integer types and pointers, storage is compatible with the * plain types. If necessary, they can be substituted as plain types for C * compatibility in headers, and accessed using core_util_atomic functions. * @code * struct foo { * #ifdef __cplusplus * mstd::atomic_uint32_t counter; // Use C++ templates from C++ code * #else * uint32_t counter; // Could use core_util_atomic_xxx_u32 from C code, or just have this for structure layout. * #endif * }; * @endcode */ template class atomic; /* Pull C enum from mbed_critical.h into mstd namespace */ using memory_order = ::mbed_memory_order; constexpr memory_order memory_order_relaxed = mbed_memory_order_relaxed; constexpr memory_order memory_order_consume = mbed_memory_order_consume; constexpr memory_order memory_order_acquire = mbed_memory_order_acquire; constexpr memory_order memory_order_release = mbed_memory_order_release; constexpr memory_order memory_order_acq_rel = mbed_memory_order_acq_rel; constexpr memory_order memory_order_seq_cst = mbed_memory_order_seq_cst; namespace impl { /* For types up to uint64_t size, we use the mbed_critical.h functions with * uintX_t containers. Otherwise, we do it ourselves, with no special alignment. */ // *INDENT-OFF* template using atomic_container = conditional >>>; // *INDENT-ON* template using atomic_container_t = typename atomic_container::type; template struct atomic_container_is_lock_free; template<> struct atomic_container_is_lock_free : bool_constant { }; template<> struct atomic_container_is_lock_free : bool_constant { }; template<> struct atomic_container_is_lock_free : bool_constant { }; template<> struct atomic_container_is_lock_free : bool_constant { }; template using atomic_is_lock_free = atomic_container_is_lock_free>; /* If one order is given for compare_exchange, it's reduced for failure case that doesn't store */ MBED_FORCEINLINE constexpr memory_order memorder_for_failure(memory_order order) { return order == memory_order_acq_rel ? memory_order_acquire : order == memory_order_release ? memory_order_relaxed : order; } /* Base template for a raw Atomic (arbitrary type T), using atomic storage size N. * This generic implementation uses critical sections and has no alignment requirements. * There are specialisations for smaller sizes below. */ template struct AtomicBaseRaw { using value_type = T; AtomicBaseRaw() noexcept = default; constexpr AtomicBaseRaw(T v) noexcept : data(std::move(v)) { } bool is_lock_free() const volatile noexcept { return false; } T load(memory_order order = memory_order_seq_cst) const volatile noexcept { MBED_CHECK_LOAD_ORDER(order); // Cope with T not having default constructor union { char c[sizeof(T)]; T val; } ret; { CriticalSectionLock lock; memcpy(std::addressof(ret.val), const_cast(std::addressof(data)), sizeof(T)); } return std::move(ret.val); } T load(memory_order order = memory_order_seq_cst) const noexcept { MBED_CHECK_LOAD_ORDER(order); CriticalSectionLock lock; return data; } void store(T desired, memory_order order = memory_order_seq_cst) volatile noexcept { MBED_CHECK_STORE_ORDER(order); CriticalSectionLock lock; memcpy(const_cast(std::addressof(data)), std::addressof(desired), sizeof(T)); } void store(T desired, memory_order order = memory_order_seq_cst) noexcept { MBED_CHECK_STORE_ORDER(order); CriticalSectionLock lock; data = std::move(desired); // MoveAssignable } T exchange(T desired, memory_order = memory_order_seq_cst) volatile noexcept { // Cope with T not having default constructor union { char c[sizeof(T)]; T val; } old; { CriticalSectionLock lock; memcpy(std::addressof(old.val), const_cast(std::addressof(data)), sizeof(T)); memcpy(const_cast(std::addressof(data)), std::addressof(desired), sizeof(T)); } return old.val; } T exchange(T desired, memory_order = memory_order_seq_cst) noexcept { CriticalSectionLock lock; T old = std::move(data); // MoveConstructible data = std::move(desired); // MoveAssignable return old; } bool compare_exchange_strong(T &expected, T desired, memory_order success, memory_order failure) volatile noexcept { MBED_CHECK_CAS_ORDER(success, failure); CriticalSectionLock lock; if (memcmp(const_cast(std::addressof(data)), std::addressof(expected), sizeof(T)) == 0) { memcpy(const_cast(std::addressof(data)), std::addressof(desired), sizeof(T)); return true; } else { memcpy(std::addressof(expected), const_cast(std::addressof(data)), sizeof(T)); return false; } } bool compare_exchange_strong(T &expected, T desired, memory_order success, memory_order failure) noexcept { MBED_CHECK_CAS_ORDER(success, failure); CriticalSectionLock lock; if (memcmp(std::addressof(data), std::addressof(expected), sizeof(T)) == 0) { data = std::move(desired); // MoveAssignable return true; } else { expected = data; // CopyAssignable return false; } } bool compare_exchange_weak(T &expected, T desired, memory_order success, memory_order failure) volatile noexcept { return compare_exchange_strong(expected, desired, success, failure); } bool compare_exchange_weak(T &expected, T desired, memory_order success, memory_order failure) noexcept { return compare_exchange_strong(expected, desired, success, failure); } bool compare_exchange_strong(T &expected, T desired, memory_order order = memory_order_seq_cst) volatile noexcept { return compare_exchange_strong(expected, desired, order, memorder_for_failure(order)); } bool compare_exchange_strong(T &expected, T desired, memory_order order = memory_order_seq_cst) noexcept { return compare_exchange_strong(expected, desired, order, memorder_for_failure(order)); } bool compare_exchange_weak(T &expected, T desired, memory_order order = memory_order_seq_cst) volatile noexcept { return compare_exchange_weak(expected, desired, order, memorder_for_failure(order)); } bool compare_exchange_weak(T &expected, T desired, memory_order order = memory_order_seq_cst) noexcept { return compare_exchange_weak(expected, desired, order, memorder_for_failure(order)); } protected: union { // Having the union makes us just get zero-initialised, as per std::atomic, or our specializations, // rather than actually running T's default constructor. char dummy_for_zero_init; T data; }; void init(T desired) volatile noexcept { memcpy(const_cast(std::addressof(data)), std::addressof(desired), sizeof(T)); } void init(T desired) noexcept { data = std::move(desired); } }; template struct AtomicSmallStoragePadded { union { A u; struct { T data; char pad[N]; }; }; AtomicSmallStoragePadded() noexcept = default; constexpr AtomicSmallStoragePadded(T v) noexcept : data(std::move(v)), pad{0} { } constexpr AtomicSmallStoragePadded(A v) noexcept : u(v) { } }; template struct AtomicSmallStorageUnpadded { union { A u; T data; }; AtomicSmallStorageUnpadded() noexcept = default; constexpr AtomicSmallStorageUnpadded(T v) noexcept : data(std::move(v)) { } constexpr AtomicSmallStorageUnpadded(A v) noexcept : u(v) { } }; // *INDENT-OFF* template> using AtomicSmallStorage = std::conditional_t, AtomicSmallStoragePadded>; // *INDENT-ON* /* Base implementation specialisation for arbitrary small type T of size N, * using corresponding atomic_xxx functions acting on uintX_t data type A of that size. * This does involve type punning on a union, so isn't strictly legal, but it's no worse than * what has always been done with the pointer atomics. * Always pad when necessary so that compare-exchange works. * * It's only worth using the specific small form if there is a real lock-free implementation. * Otherwise the overhead of shuffling in and out of the integer container is larger than just * doing it directly. */ template class AtomicBaseRaw::value>> { AtomicSmallStorage storage; public: using value_type = T; AtomicBaseRaw() noexcept = default; constexpr AtomicBaseRaw(T v) : storage(std::move(v)) { } bool is_lock_free() const volatile noexcept { return atomic_is_lock_free::value; } T load() const volatile noexcept { AtomicSmallStorage loaded(core_util_atomic_load(&storage.u)); return loaded.data; } T load(memory_order order) const volatile noexcept { AtomicSmallStorage loaded(core_util_atomic_load_explicit(&storage.u, order)); return loaded.data; } T load() const noexcept { AtomicSmallStorage loaded(core_util_atomic_load(&storage.u)); return loaded.data; } T load(memory_order order) const noexcept { AtomicSmallStorage loaded(core_util_atomic_load_explicit(&storage.u, order)); return loaded.data; } void store(T desired) volatile noexcept { AtomicSmallStorage tostore(desired); core_util_atomic_store(&storage.u, tostore.u); } void store(T desired, memory_order order) volatile noexcept { AtomicSmallStorage tostore(desired); core_util_atomic_store_explicit(&storage.u, tostore.u, order); } void store(T desired) noexcept { AtomicSmallStorage tostore(desired); core_util_atomic_store(&storage.u, tostore.u); } void store(T desired, memory_order order) noexcept { AtomicSmallStorage tostore(desired); core_util_atomic_store_explicit(&storage.u, tostore.u, order); } T exchange(T desired, memory_order = memory_order_seq_cst) volatile noexcept { AtomicSmallStorage exchanged(desired); exchanged.u = core_util_atomic_exchange(&storage.u, exchanged.u); return exchanged.data; } bool compare_exchange_strong(T &expected, T desired) volatile noexcept { AtomicSmallStorage expcur(expected); AtomicSmallStorage tostore(desired); bool result = core_util_atomic_compare_exchange_strong(&storage.u, &expcur.u, tostore.u); if (!result) { expected = expcur.data; } return result; } bool compare_exchange_strong(T &expected, T desired, memory_order success, memory_order failure) volatile noexcept { AtomicSmallStorage expcur(expected); AtomicSmallStorage tostore(desired); bool result = core_util_atomic_compare_exchange_strong_explicit(&storage.u, &expcur.u, tostore.u, success, failure); if (!result) { expected = expcur.data; } return result; } bool compare_exchange_strong(T &expected, T desired, memory_order order) volatile noexcept { return compare_exchange_strong(expected, desired, order, memorder_for_failure(order)); } bool compare_exchange_weak(T &expected, T desired) volatile noexcept { AtomicSmallStorage expcur(expected); AtomicSmallStorage tostore(desired); bool result = core_util_atomic_compare_exchange_weak(&storage.u, &expcur.u, tostore.u); if (!result) { expected = expcur.data; } return result; } bool compare_exchange_weak(T &expected, T desired, memory_order success, memory_order failure) volatile noexcept { AtomicSmallStorage expcur(expected); AtomicSmallStorage tostore(desired); bool result = core_util_atomic_compare_exchange_weak_explicit(&storage.u, &expcur.u, tostore.u, success, failure); if (!result) { expected = expcur.data; } return result; } bool compare_exchange_weak(T &expected, T desired, memory_order order) volatile noexcept { return compare_exchange_weak(expected, desired, order, memorder_for_failure(order)); } protected: void init(T desired) volatile noexcept { AtomicSmallStorage tostore(desired); memcpy(const_cast(&storage.u), &tostore.u, sizeof storage.u); } void init(T desired) noexcept { AtomicSmallStorage tostore(desired); storage.u = std::move(tostore.u); } }; /* Template for an integer or pointer Atomic of type T, using atomic storage A * Same functionality as AtomicBaseRaw, but can use simpler implementation using casts * to convert between type T and the underlying storage type. Doesn't change functionality, * compilers can generally optimise this better (will particularly help in debug build). * C casts must be used as reinterpret_cast can't handle integer<->integer, and static_cast * can't handle pointer<->integer. Note that * we always have A be unsigned, so that our arithmetic is unsigned and defined on overflow. * Compilers can and do treat signed arithmetic overflow as undefined, but not cast overflow. * (C++20 explicitly defines assignment of unsigned to signed as 2's-complement). * Our data field is of type T, not A, to permit value/aggregate initialisation. */ template> struct AtomicBaseInt { static_assert(sizeof(T) == sizeof(A), "AtomicBaseInt size mismatch"); using value_type = T; AtomicBaseInt() noexcept = default; constexpr AtomicBaseInt(T v) noexcept : u(A(v)) { } bool is_lock_free() const volatile noexcept { return atomic_container_is_lock_free::value; } T load() const volatile noexcept { return T(core_util_atomic_load(&u)); } T load(memory_order order) const volatile noexcept { return T(core_util_atomic_load_explicit(&u, order)); } T load() const noexcept { return T(core_util_atomic_load(&u)); } T load(memory_order order) const noexcept { return T(core_util_atomic_load_explicit(&u, order)); } void store(T desired) volatile noexcept { core_util_atomic_store(&u, A(desired)); } void store(T desired, memory_order order) volatile noexcept { core_util_atomic_store_explicit(&u, A(desired), order); } void store(T desired) noexcept { core_util_atomic_store(&u, A(desired)); } void store(T desired, memory_order order) noexcept { core_util_atomic_store_explicit(&u, A(desired), order); } T exchange(T desired) volatile noexcept { A d = A(desired); return T(core_util_atomic_exchange(&u, d)); } T exchange(T desired, memory_order order) volatile noexcept { A d = A(desired); return T(core_util_atomic_exchange_explicit(&u, d, order)); } bool compare_exchange_strong(T &expected, T desired) volatile noexcept { A *expcur = reinterpret_cast(&expected); return core_util_atomic_compare_exchange_strong(&u, expcur, A(desired)); } bool compare_exchange_strong(T &expected, T desired, memory_order success, memory_order failure) volatile noexcept { A *expcur = reinterpret_cast(&expected); return core_util_atomic_compare_exchange_strong_explicit(&u, expcur, A(desired), success, failure); } bool compare_exchange_strong(T &expected, T desired, memory_order order) volatile noexcept { return compare_exchange_strong(expected, desired, order, memorder_for_failure(order)); } bool compare_exchange_weak(T &expected, T desired) volatile noexcept { A *expcur = reinterpret_cast(&expected); return core_util_atomic_compare_exchange_weak(&u, expcur, A(desired)); } bool compare_exchange_weak(T &expected, T desired, memory_order success, memory_order failure) volatile noexcept { A *expcur = reinterpret_cast(&expected); return core_util_atomic_compare_exchange_weak_explicit(&u, expcur, A(desired), success, failure); } bool compare_exchange_weak(T &expected, T desired, memory_order order) volatile noexcept { return compare_exchange_weak(expected, desired, order, memorder_for_failure(order)); } protected: A u; void init(T desired) volatile noexcept { u = A(desired); } void init(T desired) noexcept { u = A(desired); } }; /* Template for an integer or pointer Atomic, including increment and * decrement functionality. If StrideT is void, then the increment and * decrement operators are ill-formed, as desired for atomic. */ template> struct AtomicWithAdd : public AtomicBaseInt { using difference_type = DiffT; using AtomicBaseInt::AtomicBaseInt; T operator++() volatile noexcept { A d = static_cast(sizeof(StrideT)); return T(core_util_atomic_incr(&this->u, d)); } T operator++(int) volatile noexcept { A d = static_cast(sizeof(StrideT)); return T(core_util_atomic_fetch_add(&this->u, d)); } T operator--() volatile noexcept { A d = static_cast(sizeof(StrideT)); return T(core_util_atomic_decr(&this->u, d)); } T operator--(int) volatile { A d = static_cast(sizeof(StrideT)); return T(core_util_atomic_fetch_sub(&this->u, d)); } T fetch_add(DiffT arg) volatile { A d = static_cast(arg * sizeof(StrideT)); return T(core_util_atomic_fetch_add(&this->u, d)); } T fetch_add(DiffT arg, memory_order order) volatile { A d = static_cast(arg * sizeof(StrideT)); return T(core_util_atomic_fetch_add_explicit(&this->u, d, order)); } T operator+=(DiffT arg) volatile { A d = static_cast(arg * sizeof(StrideT)); return T(core_util_atomic_incr(&this->u, d)); } T fetch_sub(DiffT arg) volatile { A d = static_cast(arg * sizeof(StrideT)); return T(core_util_atomic_fetch_sub(&this->u, d)); } T fetch_sub(DiffT arg, memory_order order) volatile { A d = static_cast(arg * sizeof(StrideT)); return T(core_util_atomic_fetch_sub_explicit(&this->u, d, order)); } T operator-=(DiffT arg) volatile { A d = static_cast(arg * sizeof(StrideT)); return T(core_util_atomic_decr(&this->u, d)); } }; /* Template for an integer Atomic with bitwise operations */ template> struct AtomicWithBitwise : public AtomicWithAdd { using AtomicWithAdd::AtomicWithAdd; T fetch_and(T arg) volatile noexcept { A d = static_cast(arg); return static_cast(core_util_atomic_fetch_and(&this->u, d)); } T fetch_and(T arg, memory_order order) volatile noexcept { A d = static_cast(arg); return static_cast(core_util_atomic_fetch_and_explicit(&this->u, d, order)); } T operator&=(T arg) volatile noexcept { A d = static_cast(arg); return static_cast(core_util_atomic_and_fetch(&this->u, d)); } T fetch_or(T arg) volatile noexcept { A d = static_cast(arg); return static_cast(core_util_atomic_fetch_or(&this->u, d)); } T fetch_or(T arg, memory_order order) volatile noexcept { A d = static_cast(arg); return static_cast(core_util_atomic_fetch_or_explicit(&this->u, d, order)); } T operator|=(T arg) volatile noexcept { A d = static_cast(arg); return static_cast(core_util_atomic_or_fetch(&this->u, d)); } T fetch_xor(T arg) volatile noexcept { A d = static_cast(arg); return static_cast(core_util_atomic_fetch_xor(&this->u, d)); } T fetch_xor(T arg, memory_order order) volatile noexcept { A d = static_cast(arg); return static_cast(core_util_atomic_fetch_xor_explicit(&this->u, d, order)); } T operator^=(T arg) volatile noexcept { A d = static_cast(arg); return static_cast(core_util_atomic_xor_fetch(&this->u, d)); } }; /* Selector between the implementations * bool -> AtomicBaseInt * other integral types -> AtomicWithBitwise * everything else -> AtomicBaseRaw * (Pointers are specialized in the public API) */ // *INDENT-OFF* template struct AtomicSelector : type_identity> { }; template struct AtomicSelector::value>> : type_identity> { }; template struct AtomicSelector::value && !is_same::value>> : type_identity> { }; // *INDENT-ON* template using Atomic = typename AtomicSelector::type; } // namespace impl template void atomic_init(volatile atomic *obj, typename atomic::value_type desired) noexcept; template void atomic_init(atomic *obj, typename atomic::value_type desired) noexcept; /* Base template - let impl::Atomic dispatch to raw, base integer or integer-with-bitwise */ template struct atomic : public impl::Atomic { // Constraints from LWG 3012 static_assert(is_trivially_copyable::value, "Atomic types must be TriviallyCopyable"); static_assert(is_copy_constructible::value, "Atomic types must be CopyConstructible"); static_assert(is_move_constructible::value, "Atomic types must be MoveConstructible"); static_assert(is_copy_assignable::value, "Atomic types must be CopyAssignable"); static_assert(is_move_assignable::value, "Atomic types must be MoveAssignable"); atomic() noexcept = default; atomic(const atomic &) = delete; constexpr atomic(T v) noexcept : impl::Atomic(std::move(v)) { } operator T() const volatile noexcept { return this->load(); } operator T() const noexcept { return this->load(); } T operator=(T desired) volatile noexcept { this->store(desired); return desired; } T operator=(T desired) noexcept { this->store(desired); return desired; } atomic &operator=(const atomic &) = delete; private: friend void atomic_init<>(volatile atomic *obj, typename atomic::value_type desired) noexcept; friend void atomic_init<>(atomic *obj, typename atomic::value_type desired) noexcept; }; /* Pointer specialisation - support increment and decrement by ptrdiff_t, * as long as sizeof(T) is valid to act as the stride. Annoyingly, C++11 * doesn't provide operator->, so neither do we, so you have to say * "aptr.load()->member" to use it to access a structure. *aptr is fine though. */ template struct atomic : public impl::AtomicWithAdd { atomic() noexcept = default; atomic(const atomic &) = delete; constexpr atomic(T *v) noexcept : impl::AtomicWithAdd(v) { } operator T *() const volatile noexcept { return this->load(); } operator T *() const noexcept { return this->load(); } T *operator=(T *desired) volatile noexcept { this->store(desired); return desired; } T *operator=(T *desired) noexcept { this->store(desired); return desired; } atomic &operator=(const atomic &) = delete; private: friend void atomic_init<>(volatile atomic *obj, typename atomic::value_type desired) noexcept; friend void atomic_init<>(atomic *obj, typename atomic::value_type desired) noexcept; }; using atomic_bool = atomic; using atomic_char = atomic; using atomic_schar = atomic; using atomic_uchar = atomic; using atomic_char16_t = atomic; using atomic_char32_t = atomic; using atomic_wchar_t = atomic; using atomic_short = atomic; using atomic_ushort = atomic; using atomic_int = atomic; using atomic_uint = atomic; using atomic_long = atomic; using atomic_ulong = atomic; using atomic_llong = atomic; using atomic_ullong = atomic; using atomic_int8_t = atomic; using atomic_uint8_t = atomic; using atomic_int16_t = atomic; using atomic_uint16_t = atomic; using atomic_int32_t = atomic; using atomic_uint32_t = atomic; using atomic_int64_t = atomic; using atomic_uint64_t = atomic; using atomic_int_least8_t = atomic; using atomic_uint_least8_t = atomic; using atomic_int_least16_t = atomic; using atomic_uint_least16_t = atomic; using atomic_int_least32_t = atomic; using atomic_uint_least32_t = atomic; using atomic_int_least64_t = atomic; using atomic_uint_least64_t = atomic; using atomic_int_fast8_t = atomic; using atomic_uint_fast8_t = atomic; using atomic_int_fast16_t = atomic; using atomic_uint_fast16_t = atomic; using atomic_int_fast32_t = atomic; using atomic_uint_fast32_t = atomic; using atomic_int_fast64_t = atomic; using atomic_uint_fast64_t = atomic; using atomic_intptr_t = atomic; using atomic_uintptr_t = atomic; using atomic_size_t = atomic; using atomic_ptrdiff_t = atomic; using atomic_intmax_t = atomic; using atomic_uintmax_t = atomic; template void atomic_init(atomic *obj, typename atomic::value_type desired) noexcept { obj->init(desired); } template void atomic_init(volatile atomic *obj, typename atomic::value_type desired) noexcept { obj->init(desired); } template bool atomic_is_lock_free(const atomic *obj) noexcept { return obj->is_lock_free(); } template bool atomic_is_lock_free(const volatile atomic *obj) noexcept { return obj->is_lock_free(); } template void atomic_store(atomic *obj, typename atomic::value_type desired) noexcept { obj->store(desired); } template void atomic_store(volatile atomic *obj, typename atomic::value_type desired) noexcept { obj->store(desired); } template void atomic_store_explicit(atomic *obj, typename atomic::value_type desired, memory_order order) noexcept { obj->store(desired, order); } template void atomic_store_explicit(volatile atomic *obj, typename atomic::value_type desired, memory_order order) noexcept { obj->store(desired, order); } template T atomic_load(const atomic *obj) noexcept { return obj->load(); } template T atomic_load(const volatile atomic *obj) noexcept { return obj->load(); } template T atomic_load_explicit(const atomic *obj, memory_order order) noexcept { return obj->load(order); } template T atomic_load_explicit(const volatile atomic *obj, memory_order order) noexcept { return obj->load(order); } template T atomic_exchange(atomic *obj, typename atomic::value_type desired) noexcept { return obj->exchange(desired); } template T atomic_exchange(volatile atomic *obj, typename atomic::value_type desired) noexcept { return obj->exchange(desired); } template T atomic_exchange_explicit(atomic *obj, typename atomic::value_type desired, memory_order order) noexcept { return obj->exchange(desired, order); } template T atomic_exchange_explicit(volatile atomic *obj, typename atomic::value_type desired, memory_order order) noexcept { return obj->exchange(desired, order); } template bool atomic_compare_exchange_weak(atomic *obj, typename atomic::value_type *currentExpected, typename atomic::value_type desired) noexcept { return obj->compare_exchange_weak(obj, *currentExpected, desired); } template bool atomic_compare_exchange_weak(volatile atomic *obj, typename atomic::value_type *currentExpected, typename atomic::value_type desired) noexcept { return obj->compare_exchange_weak(obj, *currentExpected, desired); } template bool atomic_compare_exchange_strong(atomic *obj, typename atomic::value_type *currentExpected, typename atomic::value_type desired) noexcept { return obj->compare_exchange_strong(obj, *currentExpected, desired); } template bool atomic_compare_exchange_strong(volatile atomic *obj, typename atomic::value_type *currentExpected, typename atomic::value_type desired) noexcept { return obj->compare_exchange_strong(obj, *currentExpected, desired); } template bool atomic_compare_exchange_weak_explicit(atomic *obj, typename atomic::value_type *currentExpected, typename atomic::value_type desired, memory_order success, memory_order failure) noexcept { return obj->compare_exchange_weak(obj, *currentExpected, desired, success, failure); } template bool atomic_compare_exchange_weak_explicit(volatile atomic *obj, typename atomic::value_type *currentExpected, typename atomic::value_type desired, memory_order success, memory_order failure) noexcept { return obj->compare_exchange_weak(obj, *currentExpected, desired, success, failure); } template bool atomic_compare_exchange_strong_explicit(atomic *obj, typename atomic::value_type *currentExpected, typename atomic::value_type desired, memory_order success, memory_order failure) noexcept { return obj->compare_exchange_strong(obj, *currentExpected, desired, success, failure); } template bool atomic_compare_exchange_strong_explicit(volatile atomic *obj, typename atomic::value_type *currentExpected, typename atomic::value_type desired, memory_order success, memory_order failure) noexcept { return obj->compare_exchange_strong(obj, *currentExpected, desired, success, failure); } template T atomic_fetch_add(atomic *obj, typename atomic::difference_type arg) noexcept { return obj->fetch_add(arg); } template T atomic_fetch_add(volatile atomic *obj, typename atomic::difference_type arg) noexcept { return obj->fetch_add(arg); } template T atomic_fetch_add_explicit(atomic *obj, typename atomic::difference_type arg, memory_order order) noexcept { return obj->fetch_add(arg, order); } template T atomic_fetch_add_explicit(volatile atomic *obj, typename atomic::difference_type arg, memory_order order) noexcept { return obj->fetch_add(arg, order); } template T atomic_fetch_sub(atomic *obj, typename atomic::difference_type arg) noexcept { return obj->fetch_sub(arg); } template T atomic_fetch_sub(volatile atomic *obj, typename atomic::difference_type arg) noexcept { return obj->fetch_sub(arg); } template T atomic_fetch_sub_explicit(atomic *obj, typename atomic::difference_type arg, memory_order order) noexcept { return obj->fetch_sub(arg, order); } template T atomic_fetch_sub_explicit(volatile atomic *obj, typename atomic::difference_type arg, memory_order order) noexcept { return obj->fetch_sub(arg, order); } template T atomic_fetch_and(atomic *obj, typename atomic::value_type arg) noexcept { return obj->fetch_and(arg); } template T atomic_fetch_and(volatile atomic *obj, typename atomic::value_type arg) noexcept { return obj->fetch_and(arg); } template T atomic_fetch_and_explicit(atomic *obj, typename atomic::value_type arg, memory_order order) noexcept { return obj->fetch_and(arg, order); } template T atomic_fetch_and_explicit(volatile atomic *obj, typename atomic::value_type arg, memory_order order) noexcept { return obj->fetch_and(arg, order); } template T atomic_fetch_or(atomic *obj, typename atomic::value_type arg) noexcept { return obj->fetch_or(arg); } template T atomic_fetch_or(volatile atomic *obj, typename atomic::value_type arg) noexcept { return obj->fetch_or(arg); } template T atomic_fetch_or_explicit(atomic *obj, typename atomic::value_type arg, memory_order order) noexcept { return obj->fetch_or(arg, order); } template T atomic_fetch_or_explicit(volatile atomic *obj, typename atomic::value_type arg, memory_order order) noexcept { return obj->fetch_or(arg, order); } template T atomic_fetch_xor(atomic *obj, typename atomic::value_type arg) noexcept { return obj->fetch_xor(arg); } template T atomic_fetch_xor(volatile atomic *obj, typename atomic::value_type arg) noexcept { return obj->fetch_xor(arg); } template T atomic_fetch_xor_explicit(atomic *obj, typename atomic::value_type arg, memory_order order) noexcept { return obj->fetch_xor(arg, order); } template T atomic_fetch_xor_explicit(volatile atomic *obj, typename atomic::value_type arg, memory_order order) noexcept { return obj->fetch_xor(arg, order); } struct atomic_flag { atomic_flag() noexcept = default; atomic_flag(const atomic_flag &) = delete; atomic_flag &operator=(const atomic_flag &) = delete; atomic_flag &operator=(const atomic_flag &) volatile = delete; bool test_and_set() volatile noexcept { return core_util_atomic_flag_test_and_set(&_flag); } bool test_and_set(memory_order order) volatile noexcept { return core_util_atomic_flag_test_and_set_explicit(&_flag, order); } void clear() volatile noexcept { core_util_atomic_flag_clear(&_flag); } void clear() noexcept { core_util_atomic_flag_clear(&_flag); } void clear(memory_order order) volatile noexcept { core_util_atomic_flag_clear_explicit(&_flag, order); } void clear(memory_order order) noexcept { core_util_atomic_flag_clear_explicit(&_flag, order); } private: core_util_atomic_flag _flag; }; MBED_FORCEINLINE bool atomic_flag_test_and_set(volatile atomic_flag *flag) noexcept { return flag->test_and_set(); } MBED_FORCEINLINE bool atomic_flag_test_and_set(atomic_flag *flag) noexcept { return flag->test_and_set(); } MBED_FORCEINLINE bool atomic_flag_test_and_set_explicit(volatile atomic_flag *flag, memory_order order) noexcept { return flag->test_and_set(order); } MBED_FORCEINLINE bool atomic_flag_test_and_set_explicit(atomic_flag *flag, memory_order order) noexcept { return flag->test_and_set(order); } MBED_FORCEINLINE void atomic_flag_clear(volatile atomic_flag *flag) noexcept { flag->clear(); } MBED_FORCEINLINE void atomic_flag_clear(atomic_flag *flag) noexcept { flag->clear(); } MBED_FORCEINLINE void atomic_flag_clear_explicit(volatile atomic_flag *flag, memory_order order) noexcept { flag->clear(order); } MBED_FORCEINLINE void atomic_flag_clear_explicit(atomic_flag *flag, memory_order order) noexcept { flag->clear(order); } #define MSTD_ATOMIC_FLAG_INIT { CORE_UTIL_ATOMIC_FLAG_INIT } template T kill_dependency(T y) noexcept { return y; } MBED_FORCEINLINE void atomic_signal_fence(memory_order order) noexcept { if (order != memory_order_relaxed) { MBED_COMPILER_BARRIER(); } } MBED_FORCEINLINE void atomic_thread_fence(memory_order order) noexcept { if (order != memory_order_relaxed) { MBED_BARRIER(); } } /**@}*/ /**@}*/ } // namespace mstd #endif