mirror of https://github.com/ARMmbed/mbed-os.git
mbed_atomic templates: add noexcept
Add noexcept for consistency with upcoming Atomic.hpull/10274/head
parent
f9f887d88e
commit
3fd7e11595
|
@ -1089,49 +1089,49 @@ DO_MBED_LOCKED_CAS_ORDERINGS(compare_exchange_weak)
|
||||||
*/
|
*/
|
||||||
#define DO_MBED_ATOMIC_LOAD_TEMPLATE(T, fn_suffix) \
|
#define DO_MBED_ATOMIC_LOAD_TEMPLATE(T, fn_suffix) \
|
||||||
template<> \
|
template<> \
|
||||||
inline T core_util_atomic_load(const volatile T *valuePtr) \
|
inline T core_util_atomic_load(const volatile T *valuePtr) noexcept \
|
||||||
{ \
|
{ \
|
||||||
return core_util_atomic_load_##fn_suffix(valuePtr); \
|
return core_util_atomic_load_##fn_suffix(valuePtr); \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
template<> \
|
template<> \
|
||||||
inline T core_util_atomic_load(const T *valuePtr) \
|
inline T core_util_atomic_load(const T *valuePtr) noexcept \
|
||||||
{ \
|
{ \
|
||||||
return core_util_atomic_load_##fn_suffix(valuePtr); \
|
return core_util_atomic_load_##fn_suffix(valuePtr); \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
template<> \
|
template<> \
|
||||||
inline T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) \
|
inline T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) noexcept \
|
||||||
{ \
|
{ \
|
||||||
return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \
|
return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
template<> \
|
template<> \
|
||||||
inline T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) \
|
inline T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) noexcept \
|
||||||
{ \
|
{ \
|
||||||
return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \
|
return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
inline T *core_util_atomic_load(T *const volatile *valuePtr)
|
inline T *core_util_atomic_load(T *const volatile *valuePtr) noexcept
|
||||||
{
|
{
|
||||||
return (T *) core_util_atomic_load_ptr((void *const volatile *) valuePtr);
|
return (T *) core_util_atomic_load_ptr((void *const volatile *) valuePtr);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
inline T *core_util_atomic_load(T *const *valuePtr)
|
inline T *core_util_atomic_load(T *const *valuePtr) noexcept
|
||||||
{
|
{
|
||||||
return (T *) core_util_atomic_load_ptr((void *const *) valuePtr);
|
return (T *) core_util_atomic_load_ptr((void *const *) valuePtr);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order)
|
inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order) noexcept
|
||||||
{
|
{
|
||||||
return (T *) core_util_atomic_load_explicit_ptr((void *const volatile *) valuePtr, order);
|
return (T *) core_util_atomic_load_explicit_ptr((void *const volatile *) valuePtr, order);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
inline T *core_util_atomic_load_explicit(T *const *valuePtr, mbed_memory_order order)
|
inline T *core_util_atomic_load_explicit(T *const *valuePtr, mbed_memory_order order) noexcept
|
||||||
{
|
{
|
||||||
return (T *) core_util_atomic_load_explicit_ptr((void *const *) valuePtr, order);
|
return (T *) core_util_atomic_load_explicit_ptr((void *const *) valuePtr, order);
|
||||||
}
|
}
|
||||||
|
@ -1148,49 +1148,49 @@ DO_MBED_ATOMIC_LOAD_TEMPLATE(bool, bool)
|
||||||
|
|
||||||
#define DO_MBED_ATOMIC_STORE_TEMPLATE(T, fn_suffix) \
|
#define DO_MBED_ATOMIC_STORE_TEMPLATE(T, fn_suffix) \
|
||||||
template<> \
|
template<> \
|
||||||
inline void core_util_atomic_store(volatile T *valuePtr, T val) \
|
inline void core_util_atomic_store(volatile T *valuePtr, T val) noexcept \
|
||||||
{ \
|
{ \
|
||||||
core_util_atomic_store_##fn_suffix(valuePtr, val); \
|
core_util_atomic_store_##fn_suffix(valuePtr, val); \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
template<> \
|
template<> \
|
||||||
inline void core_util_atomic_store(T *valuePtr, T val) \
|
inline void core_util_atomic_store(T *valuePtr, T val) noexcept \
|
||||||
{ \
|
{ \
|
||||||
core_util_atomic_store_##fn_suffix(valuePtr, val); \
|
core_util_atomic_store_##fn_suffix(valuePtr, val); \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
template<> \
|
template<> \
|
||||||
inline void core_util_atomic_store_explicit(volatile T *valuePtr, T val, mbed_memory_order order) \
|
inline void core_util_atomic_store_explicit(volatile T *valuePtr, T val, mbed_memory_order order) noexcept \
|
||||||
{ \
|
{ \
|
||||||
core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \
|
core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
template<> \
|
template<> \
|
||||||
inline void core_util_atomic_store_explicit(T *valuePtr, T val, mbed_memory_order order) \
|
inline void core_util_atomic_store_explicit(T *valuePtr, T val, mbed_memory_order order) noexcept \
|
||||||
{ \
|
{ \
|
||||||
core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \
|
core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
inline void core_util_atomic_store(T *volatile *valuePtr, T *val)
|
inline void core_util_atomic_store(T *volatile *valuePtr, T *val) noexcept
|
||||||
{
|
{
|
||||||
core_util_atomic_store_ptr((void *volatile *) valuePtr, val);
|
core_util_atomic_store_ptr((void *volatile *) valuePtr, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
inline void core_util_atomic_store(T **valuePtr, T *val)
|
inline void core_util_atomic_store(T **valuePtr, T *val) noexcept
|
||||||
{
|
{
|
||||||
core_util_atomic_store_ptr((void **) valuePtr, val);
|
core_util_atomic_store_ptr((void **) valuePtr, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
inline void core_util_atomic_store_explicit(T *volatile *valuePtr, T *val, mbed_memory_order order)
|
inline void core_util_atomic_store_explicit(T *volatile *valuePtr, T *val, mbed_memory_order order) noexcept
|
||||||
{
|
{
|
||||||
core_util_atomic_store_ptr((void *volatile *) valuePtr, val, order);
|
core_util_atomic_store_ptr((void *volatile *) valuePtr, val, order);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
inline void core_util_atomic_store_explicit(T **valuePtr, T *val, mbed_memory_order order)
|
inline void core_util_atomic_store_explicit(T **valuePtr, T *val, mbed_memory_order order) noexcept
|
||||||
{
|
{
|
||||||
core_util_atomic_store_ptr((void **) valuePtr, val, order);
|
core_util_atomic_store_ptr((void **) valuePtr, val, order);
|
||||||
}
|
}
|
||||||
|
@ -1207,19 +1207,19 @@ DO_MBED_ATOMIC_STORE_TEMPLATE(bool, bool)
|
||||||
|
|
||||||
#define DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, T, fn_suffix) \
|
#define DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, T, fn_suffix) \
|
||||||
template<> inline \
|
template<> inline \
|
||||||
bool core_util_atomic_##tname(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \
|
bool core_util_atomic_##tname(volatile T *ptr, T *expectedCurrentValue, T desiredValue) noexcept \
|
||||||
{ \
|
{ \
|
||||||
return core_util_atomic_##fname##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \
|
return core_util_atomic_##fname##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
inline bool core_util_atomic_compare_exchange_strong(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue)
|
inline bool core_util_atomic_compare_exchange_strong(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue) noexcept
|
||||||
{
|
{
|
||||||
return core_util_atomic_cas_ptr((void *volatile *) ptr, (void **) expectedCurrentValue, desiredValue);
|
return core_util_atomic_cas_ptr((void *volatile *) ptr, (void **) expectedCurrentValue, desiredValue);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
inline bool core_util_atomic_compare_exchange_weak(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue)
|
inline bool core_util_atomic_compare_exchange_weak(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue) noexcept
|
||||||
{
|
{
|
||||||
return core_util_atomic_compare_exchange_weak_ptr((void *volatile *) ptr, (void **) expectedCurrentValue, desiredValue);
|
return core_util_atomic_compare_exchange_weak_ptr((void *volatile *) ptr, (void **) expectedCurrentValue, desiredValue);
|
||||||
}
|
}
|
||||||
|
@ -1240,63 +1240,63 @@ DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_weak, compare_exchange_weak)
|
||||||
|
|
||||||
#define DO_MBED_ATOMIC_OP_TEMPLATE(name, T, fn_suffix) \
|
#define DO_MBED_ATOMIC_OP_TEMPLATE(name, T, fn_suffix) \
|
||||||
template<> \
|
template<> \
|
||||||
inline T core_util_atomic_##name(volatile T *valuePtr, T arg) \
|
inline T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept \
|
||||||
{ \
|
{ \
|
||||||
return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \
|
return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
template<> \
|
template<> \
|
||||||
inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \
|
inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \
|
||||||
mbed_memory_order order) \
|
mbed_memory_order order) noexcept \
|
||||||
{ \
|
{ \
|
||||||
return core_util_atomic_##name##_explicit_##fn_suffix(valuePtr, arg, order); \
|
return core_util_atomic_##name##_explicit_##fn_suffix(valuePtr, arg, order); \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
inline bool core_util_atomic_exchange(volatile bool *valuePtr, bool arg)
|
inline bool core_util_atomic_exchange(volatile bool *valuePtr, bool arg) noexcept
|
||||||
{
|
{
|
||||||
return core_util_atomic_exchange_bool(valuePtr, arg);
|
return core_util_atomic_exchange_bool(valuePtr, arg);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
inline bool core_util_atomic_exchange_explicit(volatile bool *valuePtr, bool arg, mbed_memory_order order)
|
inline bool core_util_atomic_exchange_explicit(volatile bool *valuePtr, bool arg, mbed_memory_order order) noexcept
|
||||||
{
|
{
|
||||||
return core_util_atomic_exchange_explicit_bool(valuePtr, arg, order);
|
return core_util_atomic_exchange_explicit_bool(valuePtr, arg, order);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
inline T *core_util_atomic_exchange(T *volatile *valuePtr, T *arg)
|
inline T *core_util_atomic_exchange(T *volatile *valuePtr, T *arg) noexcept
|
||||||
{
|
{
|
||||||
return (T *) core_util_atomic_exchange_ptr((void *volatile *) valuePtr, arg);
|
return (T *) core_util_atomic_exchange_ptr((void *volatile *) valuePtr, arg);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, T *arg, mbed_memory_order order)
|
inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, T *arg, mbed_memory_order order) noexcept
|
||||||
{
|
{
|
||||||
return (T *) core_util_atomic_fetch_add_explicit_ptr((void *volatile *) valuePtr, arg, order);
|
return (T *) core_util_atomic_fetch_add_explicit_ptr((void *volatile *) valuePtr, arg, order);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
inline T *core_util_atomic_fetch_add(T *volatile *valuePtr, ptrdiff_t arg)
|
inline T *core_util_atomic_fetch_add(T *volatile *valuePtr, ptrdiff_t arg) noexcept
|
||||||
{
|
{
|
||||||
return (T *) core_util_atomic_fetch_add_ptr((void *volatile *) valuePtr, arg * sizeof(T));
|
return (T *) core_util_atomic_fetch_add_ptr((void *volatile *) valuePtr, arg * sizeof(T));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
inline T *core_util_atomic_fetch_add_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
|
inline T *core_util_atomic_fetch_add_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) noexcept
|
||||||
{
|
{
|
||||||
return (T *) core_util_atomic_fetch_add_explicit_ptr((void *volatile *) valuePtr, arg * sizeof(T), order);
|
return (T *) core_util_atomic_fetch_add_explicit_ptr((void *volatile *) valuePtr, arg * sizeof(T), order);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, ptrdiff_t arg)
|
inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, ptrdiff_t arg) noexcept
|
||||||
{
|
{
|
||||||
return (T *) core_util_atomic_fetch_sub_ptr((void *volatile *) valuePtr, arg * sizeof(T));
|
return (T *) core_util_atomic_fetch_sub_ptr((void *volatile *) valuePtr, arg * sizeof(T));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
|
inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) noexcept
|
||||||
{
|
{
|
||||||
return (T *) core_util_atomic_fetch_sub_explicit_ptr((void *volatile *) valuePtr, arg * sizeof(T), order);
|
return (T *) core_util_atomic_fetch_sub_explicit_ptr((void *volatile *) valuePtr, arg * sizeof(T), order);
|
||||||
}
|
}
|
||||||
|
@ -1316,14 +1316,14 @@ inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t a
|
||||||
|
|
||||||
#define DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, T, fn_suffix, postname, OP) \
|
#define DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, T, fn_suffix, postname, OP) \
|
||||||
template<> \
|
template<> \
|
||||||
inline T core_util_atomic_##name(volatile T *valuePtr, T arg) \
|
inline T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept \
|
||||||
{ \
|
{ \
|
||||||
return core_util_atomic_##postname##_##fn_suffix(valuePtr, arg) OP; \
|
return core_util_atomic_##postname##_##fn_suffix(valuePtr, arg) OP; \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
template<> \
|
template<> \
|
||||||
inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \
|
inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \
|
||||||
mbed_memory_order order) \
|
mbed_memory_order order) noexcept \
|
||||||
{ \
|
{ \
|
||||||
return core_util_atomic_##postname##_explicit_##fn_suffix(valuePtr, arg, order) OP; \
|
return core_util_atomic_##postname##_explicit_##fn_suffix(valuePtr, arg, order) OP; \
|
||||||
}
|
}
|
||||||
|
@ -1344,16 +1344,16 @@ namespace impl {
|
||||||
// Use custom assembler forms for pre-ops where available, else construct from post-ops
|
// Use custom assembler forms for pre-ops where available, else construct from post-ops
|
||||||
#if MBED_EXCLUSIVE_ACCESS
|
#if MBED_EXCLUSIVE_ACCESS
|
||||||
#define DO_MBED_ATOMIC_PRE_OP_TEMPLATES(name, postname, OP) \
|
#define DO_MBED_ATOMIC_PRE_OP_TEMPLATES(name, postname, OP) \
|
||||||
template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg); \
|
template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept; \
|
||||||
template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order); \
|
template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept; \
|
||||||
DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t, u8) \
|
DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t, u8) \
|
||||||
DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \
|
DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \
|
||||||
DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \
|
DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \
|
||||||
DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint64_t, u64, postname, OP)
|
DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint64_t, u64, postname, OP)
|
||||||
#else
|
#else
|
||||||
#define DO_MBED_ATOMIC_PRE_OP_TEMPLATES(name, postname, OP) \
|
#define DO_MBED_ATOMIC_PRE_OP_TEMPLATES(name, postname, OP) \
|
||||||
template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg); \
|
template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept; \
|
||||||
template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order); \
|
template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept; \
|
||||||
DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint8_t, u8, postname, OP) \
|
DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint8_t, u8, postname, OP) \
|
||||||
DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint16_t, u16, postname, OP) \
|
DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint16_t, u16, postname, OP) \
|
||||||
DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint32_t, u32, postname, OP) \
|
DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint32_t, u32, postname, OP) \
|
||||||
|
|
|
@ -895,92 +895,92 @@ MBED_FORCEINLINE uint64_t core_util_atomic_fetch_xor_explicit_u64(volatile uint6
|
||||||
// A proper mbed::Atomic<T> class would solve the issue.
|
// A proper mbed::Atomic<T> class would solve the issue.
|
||||||
|
|
||||||
/** \copydoc core_util_atomic_load_u8 */
|
/** \copydoc core_util_atomic_load_u8 */
|
||||||
template<typename T> T core_util_atomic_load(const volatile T *valuePtr);
|
template<typename T> T core_util_atomic_load(const volatile T *valuePtr) noexcept;
|
||||||
/** \copydoc core_util_atomic_load_u8 */
|
/** \copydoc core_util_atomic_load_u8 */
|
||||||
template<typename T> T core_util_atomic_load(const T *valuePtr);
|
template<typename T> T core_util_atomic_load(const T *valuePtr) noexcept;
|
||||||
/** \copydoc core_util_atomic_store_u8 */
|
/** \copydoc core_util_atomic_store_u8 */
|
||||||
template<typename T> void core_util_atomic_store(volatile T *valuePtr, T desiredValue);
|
template<typename T> void core_util_atomic_store(volatile T *valuePtr, T desiredValue) noexcept;
|
||||||
/** \copydoc core_util_atomic_store_u8 */
|
/** \copydoc core_util_atomic_store_u8 */
|
||||||
template<typename T> void core_util_atomic_store(T *valuePtr, T desiredValue);
|
template<typename T> void core_util_atomic_store(T *valuePtr, T desiredValue) noexcept;
|
||||||
/** \copydoc core_util_atomic_exchange_u8 */
|
/** \copydoc core_util_atomic_exchange_u8 */
|
||||||
template<typename T> T core_util_atomic_exchange(volatile T *ptr, T desiredValue);
|
template<typename T> T core_util_atomic_exchange(volatile T *ptr, T desiredValue) noexcept;
|
||||||
/** \copydoc core_util_atomic_cas_u8 */
|
/** \copydoc core_util_atomic_cas_u8 */
|
||||||
template<typename T> bool core_util_atomic_compare_exchange_strong(volatile T *ptr, T *expectedCurrentValue, T desiredValue);
|
template<typename T> bool core_util_atomic_compare_exchange_strong(volatile T *ptr, T *expectedCurrentValue, T desiredValue) noexcept;
|
||||||
/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
|
/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
|
||||||
template<typename T> bool core_util_atomic_compare_exchange_weak(volatile T *ptr, T *expectedCurrentValue, T desiredValue);
|
template<typename T> bool core_util_atomic_compare_exchange_weak(volatile T *ptr, T *expectedCurrentValue, T desiredValue) noexcept;
|
||||||
/** \copydoc core_util_fetch_add_u8 */
|
/** \copydoc core_util_fetch_add_u8 */
|
||||||
template<typename T> T core_util_atomic_fetch_add(volatile T *valuePtr, T arg);
|
template<typename T> T core_util_atomic_fetch_add(volatile T *valuePtr, T arg) noexcept;
|
||||||
/** \copydoc core_util_fetch_sub_u8 */
|
/** \copydoc core_util_fetch_sub_u8 */
|
||||||
template<typename T> T core_util_atomic_fetch_sub(volatile T *valuePtr, T arg);
|
template<typename T> T core_util_atomic_fetch_sub(volatile T *valuePtr, T arg) noexcept;
|
||||||
/** \copydoc core_util_fetch_and_u8 */
|
/** \copydoc core_util_fetch_and_u8 */
|
||||||
template<typename T> T core_util_atomic_fetch_and(volatile T *valuePtr, T arg);
|
template<typename T> T core_util_atomic_fetch_and(volatile T *valuePtr, T arg) noexcept;
|
||||||
/** \copydoc core_util_fetch_or_u8 */
|
/** \copydoc core_util_fetch_or_u8 */
|
||||||
template<typename T> T core_util_atomic_fetch_or(volatile T *valuePtr, T arg);
|
template<typename T> T core_util_atomic_fetch_or(volatile T *valuePtr, T arg) noexcept;
|
||||||
/** \copydoc core_util_fetch_xor_u8 */
|
/** \copydoc core_util_fetch_xor_u8 */
|
||||||
template<typename T> T core_util_atomic_fetch_xor(volatile T *valuePtr, T arg);
|
template<typename T> T core_util_atomic_fetch_xor(volatile T *valuePtr, T arg) noexcept;
|
||||||
|
|
||||||
/** \copydoc core_util_atomic_load_explicit_u8 */
|
/** \copydoc core_util_atomic_load_explicit_u8 */
|
||||||
template<typename T> T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order);
|
template<typename T> T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) noexcept;
|
||||||
/** \copydoc core_util_atomic_load_explicit_u8 */
|
/** \copydoc core_util_atomic_load_explicit_u8 */
|
||||||
template<typename T> T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order);
|
template<typename T> T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) noexcept;
|
||||||
/** \copydoc core_util_atomic_store_explicit_u8 */
|
/** \copydoc core_util_atomic_store_explicit_u8 */
|
||||||
template<typename T> void core_util_atomic_store_explicit(volatile T *valuePtr, T desiredValue, mbed_memory_order order);
|
template<typename T> void core_util_atomic_store_explicit(volatile T *valuePtr, T desiredValue, mbed_memory_order order) noexcept;
|
||||||
/** \copydoc core_util_atomic_store_explicit_u8 */
|
/** \copydoc core_util_atomic_store_explicit_u8 */
|
||||||
template<typename T> void core_util_atomic_store_explicit(T *valuePtr, T desiredValue, mbed_memory_order order);
|
template<typename T> void core_util_atomic_store_explicit(T *valuePtr, T desiredValue, mbed_memory_order order) noexcept;
|
||||||
/** \copydoc core_util_atomic_exchange_explicit_u8 */
|
/** \copydoc core_util_atomic_exchange_explicit_u8 */
|
||||||
template<typename T> T core_util_atomic_exchange_explicit(volatile T *ptr, T desiredValue, mbed_memory_order order);
|
template<typename T> T core_util_atomic_exchange_explicit(volatile T *ptr, T desiredValue, mbed_memory_order order) noexcept;
|
||||||
/** \copydoc core_util_atomic_cas_explicit_u8 */
|
/** \copydoc core_util_atomic_cas_explicit_u8 */
|
||||||
template<typename T> bool core_util_atomic_compare_exchange_strong_explicit(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure);
|
template<typename T> bool core_util_atomic_compare_exchange_strong_explicit(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) noexcept;
|
||||||
/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
|
/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
|
||||||
template<typename T> bool core_util_atomic_compare_exchange_weak_explicit(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure);
|
template<typename T> bool core_util_atomic_compare_exchange_weak_explicit(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure) noexcept;
|
||||||
/** \copydoc core_util_fetch_add_explicit_u8 */
|
/** \copydoc core_util_fetch_add_explicit_u8 */
|
||||||
template<typename T> T core_util_atomic_fetch_add_explicit(volatile T *valuePtr, T arg, mbed_memory_order order);
|
template<typename T> T core_util_atomic_fetch_add_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept;
|
||||||
/** \copydoc core_util_fetch_sub_explicit_u8 */
|
/** \copydoc core_util_fetch_sub_explicit_u8 */
|
||||||
template<typename T> T core_util_atomic_fetch_sub_explicit(volatile T *valuePtr, T arg, mbed_memory_order order);
|
template<typename T> T core_util_atomic_fetch_sub_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept;
|
||||||
/** \copydoc core_util_fetch_and_explicit_u8 */
|
/** \copydoc core_util_fetch_and_explicit_u8 */
|
||||||
template<typename T> T core_util_atomic_fetch_and_explicit(volatile T *valuePtr, T arg, mbed_memory_order order);
|
template<typename T> T core_util_atomic_fetch_and_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept;
|
||||||
/** \copydoc core_util_fetch_or_explicit_u8 */
|
/** \copydoc core_util_fetch_or_explicit_u8 */
|
||||||
template<typename T> T core_util_atomic_fetch_or_explicit(volatile T *valuePtr, T arg, mbed_memory_order order);
|
template<typename T> T core_util_atomic_fetch_or_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept;
|
||||||
/** \copydoc core_util_fetch_xor_explicit_u8 */
|
/** \copydoc core_util_fetch_xor_explicit_u8 */
|
||||||
template<typename T> T core_util_atomic_fetch_xor_explicit(volatile T *valuePtr, T arg, mbed_memory_order order);
|
template<typename T> T core_util_atomic_fetch_xor_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept;
|
||||||
|
|
||||||
/** \copydoc core_util_atomic_load_ptr */
|
/** \copydoc core_util_atomic_load_ptr */
|
||||||
template<typename T> inline T *core_util_atomic_load(T *const volatile *valuePtr);
|
template<typename T> inline T *core_util_atomic_load(T *const volatile *valuePtr) noexcept;
|
||||||
/** \copydoc core_util_atomic_load_ptr */
|
/** \copydoc core_util_atomic_load_ptr */
|
||||||
template<typename T> inline T *core_util_atomic_load(T *const *valuePtr);
|
template<typename T> inline T *core_util_atomic_load(T *const *valuePtr) noexcept;
|
||||||
/** \copydoc core_util_atomic_store_ptr */
|
/** \copydoc core_util_atomic_store_ptr */
|
||||||
template<typename T> inline void core_util_atomic_store(T *volatile *valuePtr, T *desiredValue);
|
template<typename T> inline void core_util_atomic_store(T *volatile *valuePtr, T *desiredValue) noexcept;
|
||||||
/** \copydoc core_util_atomic_store_ptr */
|
/** \copydoc core_util_atomic_store_ptr */
|
||||||
template<typename T> inline void core_util_atomic_store(T **valuePtr, T *desiredValue);
|
template<typename T> inline void core_util_atomic_store(T **valuePtr, T *desiredValue) noexcept;
|
||||||
/** \copydoc core_util_atomic_exchange_ptr */
|
/** \copydoc core_util_atomic_exchange_ptr */
|
||||||
template<typename T> inline T *core_util_atomic_exchange(T *volatile *valuePtr, T *desiredValue);
|
template<typename T> inline T *core_util_atomic_exchange(T *volatile *valuePtr, T *desiredValue) noexcept;
|
||||||
/** \copydoc core_util_atomic_cas_ptr */
|
/** \copydoc core_util_atomic_cas_ptr */
|
||||||
template<typename T> inline bool core_util_atomic_compare_exchange_strong(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue);
|
template<typename T> inline bool core_util_atomic_compare_exchange_strong(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue) noexcept;
|
||||||
/** \copydoc core_util_atomic_compare_exchange_weak_ptr */
|
/** \copydoc core_util_atomic_compare_exchange_weak_ptr */
|
||||||
template<typename T> inline bool core_util_atomic_compare_exchange_weak(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue);
|
template<typename T> inline bool core_util_atomic_compare_exchange_weak(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue) noexcept;
|
||||||
/** \copydoc core_util_fetch_add_ptr */
|
/** \copydoc core_util_fetch_add_ptr */
|
||||||
template<typename T> inline T *core_util_atomic_fetch_add(T *volatile *valuePtr, ptrdiff_t arg);
|
template<typename T> inline T *core_util_atomic_fetch_add(T *volatile *valuePtr, ptrdiff_t arg) noexcept;
|
||||||
/** \copydoc core_util_fetch_sub_ptr */
|
/** \copydoc core_util_fetch_sub_ptr */
|
||||||
template<typename T> inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, ptrdiff_t arg);
|
template<typename T> inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, ptrdiff_t arg) noexcept;
|
||||||
|
|
||||||
/** \copydoc core_util_atomic_load_explicit_ptr */
|
/** \copydoc core_util_atomic_load_explicit_ptr */
|
||||||
template<typename T> inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order);
|
template<typename T> inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order) noexcept;
|
||||||
/** \copydoc core_util_atomic_load_explicit_ptr */
|
/** \copydoc core_util_atomic_load_explicit_ptr */
|
||||||
template<typename T> inline T *core_util_atomic_load_explicit(T *const *valuePtr, mbed_memory_order order);
|
template<typename T> inline T *core_util_atomic_load_explicit(T *const *valuePtr, mbed_memory_order order) noexcept;
|
||||||
/** \copydoc core_util_atomic_store_explicit_ptr */
|
/** \copydoc core_util_atomic_store_explicit_ptr */
|
||||||
template<typename T> inline void core_util_atomic_store_explicit(T *volatile *valuePtr, T *desiredValue, mbed_memory_order order);
|
template<typename T> inline void core_util_atomic_store_explicit(T *volatile *valuePtr, T *desiredValue, mbed_memory_order order) noexcept;
|
||||||
/** \copydoc core_util_atomic_store_explicit_ptr */
|
/** \copydoc core_util_atomic_store_explicit_ptr */
|
||||||
template<typename T> inline void core_util_atomic_store_explicit(T **valuePtr, T *desiredValue, mbed_memory_order order);
|
template<typename T> inline void core_util_atomic_store_explicit(T **valuePtr, T *desiredValue, mbed_memory_order order) noexcept;
|
||||||
/** \copydoc core_util_atomic_exchange_explicit_ptr */
|
/** \copydoc core_util_atomic_exchange_explicit_ptr */
|
||||||
template<typename T> inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, T *desiredValue, mbed_memory_order order);
|
template<typename T> inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, T *desiredValue, mbed_memory_order order) noexcept;
|
||||||
/** \copydoc core_util_atomic_cas_explicit_ptr */
|
/** \copydoc core_util_atomic_cas_explicit_ptr */
|
||||||
template<typename T> inline bool core_util_atomic_compare_exchange_strong_explicit(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue, mbed_memory_order success, mbed_memory_order failure);
|
template<typename T> inline bool core_util_atomic_compare_exchange_strong_explicit(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue, mbed_memory_order success, mbed_memory_order failure) noexcept;
|
||||||
/** \copydoc core_util_atomic_compare_exchange_weak_explicit_ptr */
|
/** \copydoc core_util_atomic_compare_exchange_weak_explicit_ptr */
|
||||||
template<typename T> inline bool core_util_atomic_compare_exchange_weak_explicit(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue, mbed_memory_order success, mbed_memory_order failure);
|
template<typename T> inline bool core_util_atomic_compare_exchange_weak_explicit(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue, mbed_memory_order success, mbed_memory_order failure) noexcept;
|
||||||
/** \copydoc core_util_fetch_add_explicit_ptr */
|
/** \copydoc core_util_fetch_add_explicit_ptr */
|
||||||
template<typename T> inline T *core_util_atomic_fetch_add_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order);
|
template<typename T> inline T *core_util_atomic_fetch_add_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) noexcept;
|
||||||
/** \copydoc core_util_fetch_sub_explicit_ptr */
|
/** \copydoc core_util_fetch_sub_explicit_ptr */
|
||||||
template<typename T> inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order);
|
template<typename T> inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) noexcept;
|
||||||
|
|
||||||
#endif // __cplusplus
|
#endif // __cplusplus
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue