Merge pull request #10274 from kjbracey-arm/atomic_template

Add Atomic<T> template
pull/11038/head
Kevin Bracey 2019-07-12 14:26:03 +03:00 committed by GitHub
commit d53bd6120d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 1650 additions and 193 deletions

View File

@ -26,69 +26,84 @@
using utest::v1::Case;
namespace {
/* Lock-free operations will be much faster - keep runtime down */
#if MBED_ATOMIC_INT_LOCK_FREE
#define ADD_ITERATIONS (SystemCoreClock / 1000)
#else
#define ADD_ITERATIONS (SystemCoreClock / 8000)
#endif
#define ADD_UNLOCKED_ITERATIONS (SystemCoreClock / 1000)
#define ADD_LOCKED_ITERATIONS (SystemCoreClock / 8000)
template <typename T>
void add_incrementer(T *ptr)
template <typename A>
static inline long add_iterations(A &a)
{
for (long i = ADD_ITERATIONS; i > 0; i--) {
core_util_atomic_fetch_add(ptr, T(1));
}
return a.is_lock_free() ? ADD_UNLOCKED_ITERATIONS : ADD_LOCKED_ITERATIONS;
}
template <typename T>
void add_release_incrementer(T *ptr)
{
for (long i = ADD_ITERATIONS; i > 0; i--) {
core_util_atomic_fetch_add_explicit(ptr, T(1), mbed_memory_order_release);
template <typename A>
struct add_incrementer {
static void op(A *ptr)
{
for (long i = add_iterations(*ptr); i > 0; i--) {
++(*ptr);
}
}
}
};
template <typename T>
void sub_incrementer(T *ptr)
{
for (long i = ADD_ITERATIONS; i > 0; i--) {
core_util_atomic_fetch_sub(ptr, T(-1));
template <typename A>
struct add_release_incrementer {
static void op(A *ptr)
{
for (long i = add_iterations(*ptr); i > 0; i--) {
ptr->fetch_add(1, mbed::memory_order_release);
}
}
}
};
template <typename T>
void bitops_incrementer(T *ptr)
{
for (long i = ADD_ITERATIONS; i > 0; i--) {
core_util_atomic_fetch_add(ptr, T(1));
core_util_atomic_fetch_and(ptr, T(-1));
core_util_atomic_fetch_or(ptr, T(0));
template <typename A>
struct sub_incrementer {
static void op(A *ptr)
{
for (long i = add_iterations(*ptr); i > 0; i--) {
ptr->fetch_sub(-1);
}
}
}
};
template <typename T>
void weak_incrementer(T *ptr)
{
for (long i = ADD_ITERATIONS; i > 0; i--) {
T val = core_util_atomic_load(ptr);
do {
} while (!core_util_atomic_compare_exchange_weak(ptr, &val, T(val + 1)));
template <typename A>
struct bitops_incrementer {
static void op(A *ptr)
{
for (long i = add_iterations(*ptr); i > 0; i--) {
(*ptr) += 1;
(*ptr) &= -1;
(*ptr) |= 0;
}
}
}
};
template <typename T>
void strong_incrementer(T *ptr)
{
for (long i = ADD_ITERATIONS; i > 0; i--) {
T val = core_util_atomic_load(ptr);
do {
} while (!core_util_atomic_compare_exchange_strong(ptr, &val, T(val + 1)));
template <typename A>
struct weak_incrementer {
static void op(A *ptr)
{
for (long i = add_iterations(*ptr); i > 0; i--) {
typename A::value_type val = ptr->load();
do {
} while (!ptr->compare_exchange_weak(val, val + 1));
}
}
}
};
template <typename A>
struct strong_incrementer {
static void op(A *ptr)
{
for (long i = add_iterations(*ptr); i > 0; i--) {
typename A::value_type val = ptr->load();
do {
} while (!ptr->compare_exchange_strong(val, val + 1));
}
}
};
/*
@ -100,32 +115,34 @@ void strong_incrementer(T *ptr)
* Using core_util_atomic_ templates, and exercising
* load and store briefly.
*/
template<typename T, void (*Fn)(T *)>
template<typename T, template<typename A> class Fn>
void test_atomic_add()
{
struct {
volatile T nonatomic1;
T atomic1;
T atomic2;
Atomic<T> atomic1;
volatile Atomic<T> atomic2; // use volatile just to exercise the templates' volatile methods
volatile T nonatomic2;
} data;
} data = { 0, { 0 }, { 1 }, 0 }; // test initialisation
data.nonatomic1 = 0;
core_util_atomic_store(&data.atomic1, T(0));
core_util_atomic_store(&data.atomic2, T(0));
data.nonatomic2 = 0;
TEST_ASSERT_EQUAL(sizeof(T), sizeof data.nonatomic1);
TEST_ASSERT_EQUAL(sizeof(T), sizeof data.atomic1);
TEST_ASSERT_EQUAL(4 * sizeof(T), sizeof data);
// test store
data.atomic2 = 0;
Thread t1(osPriorityNormal, THREAD_STACK);
Thread t2(osPriorityNormal, THREAD_STACK);
Thread t3(osPriorityNormal, THREAD_STACK);
Thread t4(osPriorityNormal, THREAD_STACK);
TEST_ASSERT_EQUAL(osOK, t1.start(callback(Fn, &data.atomic1)));
TEST_ASSERT_EQUAL(osOK, t2.start(callback(Fn, &data.atomic1)));
TEST_ASSERT_EQUAL(osOK, t3.start(callback(Fn, &data.atomic2)));
TEST_ASSERT_EQUAL(osOK, t4.start(callback(Fn, &data.atomic2)));
TEST_ASSERT_EQUAL(osOK, t1.start(callback(Fn<decltype(data.atomic1)>::op, &data.atomic1)));
TEST_ASSERT_EQUAL(osOK, t2.start(callback(Fn<decltype(data.atomic1)>::op, &data.atomic1)));
TEST_ASSERT_EQUAL(osOK, t3.start(callback(Fn<decltype(data.atomic2)>::op, &data.atomic2)));
TEST_ASSERT_EQUAL(osOK, t4.start(callback(Fn<decltype(data.atomic2)>::op, &data.atomic2)));
for (long i = ADD_ITERATIONS; i > 0; i--) {
for (long i = ADD_UNLOCKED_ITERATIONS; i > 0; i--) {
data.nonatomic1++;
data.nonatomic2++;
}
@ -135,10 +152,83 @@ void test_atomic_add()
t3.join();
t4.join();
TEST_ASSERT_EQUAL(T(ADD_ITERATIONS), data.nonatomic1);
TEST_ASSERT_EQUAL(T(2 * ADD_ITERATIONS), core_util_atomic_load(&data.atomic1));
TEST_ASSERT_EQUAL(T(2 * ADD_ITERATIONS), core_util_atomic_load(&data.atomic2));
TEST_ASSERT_EQUAL(T(ADD_ITERATIONS), data.nonatomic2);
TEST_ASSERT_EQUAL(T(ADD_UNLOCKED_ITERATIONS), data.nonatomic1);
TEST_ASSERT_EQUAL(T(2 * add_iterations(data.atomic1)), data.atomic1);
TEST_ASSERT_EQUAL(T(2 * add_iterations(data.atomic2)), data.atomic2);
TEST_ASSERT_EQUAL(T(ADD_UNLOCKED_ITERATIONS), data.nonatomic2);
}
// This should fit into a uint32_t container, and there
// will be 1 byte of padding to ignore.
struct small {
uint8_t a;
uint8_t b;
uint8_t c;
};
// An 11-byte weird structure. Should work with critical sections.
struct large {
uint8_t a;
uint8_t b;
uint8_t c;
uint8_t dummy[8];
};
template<typename A>
void struct_incrementer_a(A *data)
{
for (long i = add_iterations(*data); i > 0; i--) {
typename A::value_type curval = *data, newval;
do {
newval = curval;
newval.a++;
} while (!data->compare_exchange_weak(curval, newval));
}
}
template<typename A>
void struct_incrementer_b(A *data)
{
for (long i = add_iterations(*data); i > 0; i--) {
typename A::value_type curval = *data, newval;
do {
newval = curval;
newval.b++;
} while (!data->compare_exchange_weak(curval, newval));
}
}
template<typename T, size_t N>
void test_atomic_struct()
{
TEST_ASSERT_EQUAL(N, sizeof(Atomic<T>));
// Small structures don't have value constructor implemented;
Atomic<T> data;
atomic_init(&data, T{0, 0, 0});
Thread t1(osPriorityNormal, THREAD_STACK);
Thread t2(osPriorityNormal, THREAD_STACK);
TEST_ASSERT_EQUAL(osOK, t1.start(callback(struct_incrementer_a<Atomic<T> >, &data)));
TEST_ASSERT_EQUAL(osOK, t2.start(callback(struct_incrementer_b<Atomic<T> >, &data)));
for (long i = add_iterations(data); i > 0; i--) {
T curval = data, newval;
do {
newval = curval;
newval.c++;
} while (!data.compare_exchange_weak(curval, newval));
}
t1.join();
t2.join();
T final_val = data;
TEST_ASSERT_EQUAL(uint8_t(add_iterations(data)), final_val.a);
TEST_ASSERT_EQUAL(uint8_t(add_iterations(data)), final_val.b);
TEST_ASSERT_EQUAL(uint8_t(add_iterations(data)), final_val.c);
}
} // namespace
@ -174,7 +264,9 @@ Case cases[] = {
Case("Test atomic compare exchange strong 8-bit", test_atomic_add<uint8_t, strong_incrementer>),
Case("Test atomic compare exchange strong 16-bit", test_atomic_add<uint16_t, strong_incrementer>),
Case("Test atomic compare exchange strong 32-bit", test_atomic_add<uint32_t, strong_incrementer>),
Case("Test atomic compare exchange strong 64-bit", test_atomic_add<uint64_t, strong_incrementer>)
Case("Test atomic compare exchange strong 64-bit", test_atomic_add<uint64_t, strong_incrementer>),
Case("Test small atomic custom structure", test_atomic_struct<small, 4>),
Case("Test large atomic custom structure", test_atomic_struct<large, 11>)
};
utest::v1::Specification specification(test_setup, cases);

View File

@ -9,10 +9,10 @@ project(${PROJECT_NAME})
macro(use_cxx14)
if (CMAKE_VERSION VERSION_LESS 3.1)
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=gnu++98")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=gnu++14")
endif()
else()
set(CMAKE_CXX_STANDARD 98)
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
endif()
endmacro()

View File

@ -55,7 +55,7 @@ int ATHandler_stub::int_count = kRead_int_table_size;
bool ATHandler_stub::process_oob_urc = false;
int ATHandler_stub::read_string_index = kRead_string_table_size;
const char *ATHandler_stub::read_string_table[kRead_string_table_size] = {'\0'};
const char *ATHandler_stub::read_string_table[kRead_string_table_size];
int ATHandler_stub::resp_stop_success_count = kResp_stop_count_default;
bool ATHandler_stub::get_debug_flag = false;

View File

@ -41,7 +41,7 @@ const uint8_t SMS_MAX_GSM7_CONCATENATED_SINGLE_SMS_SIZE = 153;
#define NVAM '?' // Not Valid ascii, ISO-8859-1 mark
// mapping table from 7-bit GSM to ascii (ISO-8859-1)
static const char gsm_to_ascii[] = {
static const unsigned char gsm_to_ascii[] = {
64, // 0
163, // 1
36, // 2
@ -1153,7 +1153,7 @@ uint16_t AT_CellularSMS::pack_7_bit_gsm_and_hex(const char *str, uint16_t len, c
char *gsm_str = new char[len];
for (uint16_t y = 0; y < len; y++) {
for (int x = 0; x < GSM_TO_ASCII_TABLE_SIZE; x++) {
if (gsm_to_ascii[x] == str[y]) {
if (gsm_to_ascii[x] == static_cast<unsigned char>(str[y])) {
gsm_str[y] = x;
}
}

2
mbed.h
View File

@ -88,7 +88,7 @@
#include "drivers/InterruptIn.h"
#include "platform/mbed_wait_api.h"
#include "hal/sleep_api.h"
#include "platform/mbed_atomic.h"
#include "platform/Atomic.h"
#include "platform/mbed_power_mgmt.h"
#include "platform/mbed_rtc_time.h"
#include "platform/mbed_poll.h"

1236
platform/Atomic.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -132,14 +132,49 @@ extern "C" {
#endif
#ifdef __CC_ARM
#define DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M) \
#define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M) \
__asm { \
LDREX##M newValue, [valuePtr] ; \
OP newValue, arg ; \
STREX##M fail, newValue, [valuePtr] \
}
#elif defined __clang__ || defined __GNUC__
#define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M) \
__asm volatile ( \
"LDREX"#M "\t%[newValue], %[value]\n\t" \
#OP "\t%[newValue], %[arg]\n\t" \
"STREX"#M "\t%[fail], %[newValue], %[value]\n\t" \
: [newValue] "=&" MBED_DOP_REG (newValue), \
[fail] "=&r" (fail), \
[value] "+Q" (*valuePtr) \
: [arg] Constants MBED_DOP_REG (arg) \
: "cc" \
)
#elif defined __ICCARM__
/* In IAR "r" means low register if Thumbv1 (there's no way to specify any register...) */
/* IAR does not support "ADDS reg, reg", so write as 3-operand */
#define DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M) \
asm volatile ( \
"LDREX"#M "\t%[newValue], [%[valuePtr]]\n" \
#OP "\t%[newValue], %[newValue], %[arg]\n" \
"STREX"#M "\t%[fail], %[newValue], [%[valuePtr]]\n" \
: [newValue] "=&r" (newValue), \
[fail] "=&r" (fail) \
: [valuePtr] "r" (valuePtr), \
[arg] "r" (arg) \
: "memory", "cc" \
)
#endif
#ifdef __CC_ARM
#define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M) \
__asm { \
LDREX##M oldValue, [valuePtr] ; \
OP newValue, oldValue, arg ; \
STREX##M fail, newValue, [valuePtr] \
}
#elif defined __clang__ || defined __GNUC__
#define DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M) \
#define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M) \
__asm volatile ( \
".syntax unified\n\t" \
"LDREX"#M "\t%[oldValue], %[value]\n\t" \
@ -154,7 +189,7 @@ extern "C" {
)
#elif defined __ICCARM__
/* In IAR "r" means low register if Thumbv1 (there's no way to specify any register...) */
#define DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M) \
#define DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M) \
asm volatile ( \
"LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
#OP "\t%[newValue], %[oldValue], %[arg]\n" \
@ -172,7 +207,7 @@ extern "C" {
* are only 2-operand versions of the instructions.
*/
#ifdef __CC_ARM
#define DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M) \
#define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M) \
__asm { \
LDREX##M oldValue, [valuePtr] ; \
MOV newValue, oldValue ; \
@ -180,7 +215,7 @@ extern "C" {
STREX##M fail, newValue, [valuePtr] \
}
#elif defined __clang__ || defined __GNUC__
#define DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M) \
#define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M) \
__asm volatile ( \
".syntax unified\n\t" \
"LDREX"#M "\t%[oldValue], %[value]\n\t" \
@ -195,7 +230,7 @@ extern "C" {
: "cc" \
)
#elif defined __ICCARM__
#define DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M) \
#define DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M) \
asm volatile ( \
"LDREX"#M "\t%[oldValue], [%[valuePtr]]\n" \
"MOV" "\t%[newValue], %[oldValue]\n" \
@ -444,17 +479,41 @@ MBED_FORCEINLINE bool core_util_atomic_cas_explicit_##fn_suffix(volatile T *ptr,
}
#define DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, T, fn_suffix, M) \
#define DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, T, fn_suffix, M) \
inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \
{ \
uint32_t fail, newValue; \
MBED_BARRIER(); \
do { \
DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M); \
} while (fail); \
MBED_BARRIER(); \
return (T) newValue; \
} \
\
MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \
volatile T *valuePtr, T arg, mbed_memory_order order) \
{ \
uint32_t fail, newValue; \
MBED_RELEASE_BARRIER(order); \
do { \
DO_MBED_LOCKFREE_NEWVAL_2OP_ASM(OP, Constants, M); \
} while (fail); \
MBED_ACQUIRE_BARRIER(order); \
return (T) newValue; \
} \
#define DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, T, fn_suffix, M) \
inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \
{ \
T oldValue; \
uint32_t fail, newValue; \
MBED_BARRIER(); \
do { \
DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M); \
DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M); \
} while (fail); \
MBED_BARRIER(); \
return (T) retValue; \
return oldValue; \
} \
\
MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \
@ -464,22 +523,22 @@ MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix(
uint32_t fail, newValue; \
MBED_RELEASE_BARRIER(order); \
do { \
DO_MBED_LOCKFREE_2OP_ASM(OP, Constants, M); \
DO_MBED_LOCKFREE_OLDVAL_2OP_ASM(OP, Constants, M); \
} while (fail); \
MBED_ACQUIRE_BARRIER(order); \
return (T) retValue; \
return oldValue; \
} \
#define DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, T, fn_suffix, M) \
#define DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, T, fn_suffix, M) \
inline T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) { \
T oldValue; \
uint32_t fail, newValue; \
MBED_BARRIER(); \
do { \
DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M); \
DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M); \
} while (fail); \
MBED_BARRIER(); \
return (T) retValue; \
return oldValue; \
} \
\
MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix( \
@ -489,10 +548,10 @@ MBED_FORCEINLINE T core_util_atomic_##name##_explicit_##fn_suffix(
uint32_t fail, newValue; \
MBED_RELEASE_BARRIER(order); \
do { \
DO_MBED_LOCKFREE_3OP_ASM(OP, Constants, M); \
DO_MBED_LOCKFREE_OLDVAL_3OP_ASM(OP, Constants, M); \
} while (fail); \
MBED_ACQUIRE_BARRIER(order); \
return (T) retValue; \
return oldValue; \
} \
inline bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *valuePtr)
@ -526,15 +585,20 @@ MBED_FORCEINLINE bool core_util_atomic_flag_test_and_set_explicit(volatile core_
DO_MBED_LOCKFREE_EXCHG_OP(uint16_t, u16, H) \
DO_MBED_LOCKFREE_EXCHG_OP(uint32_t, u32, )
#define DO_MBED_LOCKFREE_3OPS(name, OP, Constants, retValue) \
DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, uint8_t, u8, B) \
DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, uint16_t, u16, H) \
DO_MBED_LOCKFREE_3OP(name, OP, Constants, retValue, uint32_t, u32, )
#define DO_MBED_LOCKFREE_NEWVAL_2OPS(name, OP, Constants) \
DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint8_t, u8, B) \
DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint16_t, u16, H) \
DO_MBED_LOCKFREE_NEWVAL_2OP(name, OP, Constants, uint32_t, u32, )
#define DO_MBED_LOCKFREE_2OPS(name, OP, Constants, retValue) \
DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, uint8_t, u8, B) \
DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, uint16_t, u16, H) \
DO_MBED_LOCKFREE_2OP(name, OP, Constants, retValue, uint32_t, u32, )
#define DO_MBED_LOCKFREE_OLDVAL_3OPS(name, OP, Constants) \
DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint8_t, u8, B) \
DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint16_t, u16, H) \
DO_MBED_LOCKFREE_OLDVAL_3OP(name, OP, Constants, uint32_t, u32, )
#define DO_MBED_LOCKFREE_OLDVAL_2OPS(name, OP, Constants) \
DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint8_t, u8, B) \
DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint16_t, u16, H) \
DO_MBED_LOCKFREE_OLDVAL_2OP(name, OP, Constants, uint32_t, u32, )
#define DO_MBED_LOCKFREE_CAS_STRONG_OPS() \
DO_MBED_LOCKFREE_CAS_STRONG_OP(uint8_t, u8, B) \
@ -546,6 +610,11 @@ MBED_FORCEINLINE bool core_util_atomic_flag_test_and_set_explicit(volatile core_
DO_MBED_LOCKFREE_CAS_WEAK_OP(uint16_t, u16, H) \
DO_MBED_LOCKFREE_CAS_WEAK_OP(uint32_t, u32, )
// Note that these macros define a number of functions that are
// not in mbed_atomic.h, like core_util_atomic_and_fetch_u16.
// These are not documented via the doxygen in mbed_atomic.h, so
// for now should be regarded as internal only. They are used by the
// Atomic<T> template as an optimisation though.
// We always use the "S" form of operations - avoids yet another
// possible unneeded distinction between Thumbv1 and Thumbv2, and
@ -559,33 +628,42 @@ MBED_FORCEINLINE bool core_util_atomic_flag_test_and_set_explicit(volatile core_
// of the 16-bit forms. Shame we can't specify "don't care"
// for the "S", or get the GNU multi-alternative to
// choose ADDS/ADD appropriately.
DO_MBED_LOCKFREE_3OPS(incr, ADDS, "IL", newValue)
DO_MBED_LOCKFREE_3OPS(decr, SUBS, "IL", newValue)
DO_MBED_LOCKFREE_3OPS(fetch_add, ADDS, "IL", oldValue)
DO_MBED_LOCKFREE_3OPS(fetch_sub, SUBS, "IL", oldValue)
DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_add, ADDS, "IL")
DO_MBED_LOCKFREE_NEWVAL_2OPS(incr, ADDS, "IL")
DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_sub, SUBS, "IL")
DO_MBED_LOCKFREE_NEWVAL_2OPS(decr, SUBS, "IL")
// K constraint is inverted 12-bit modified immediate constant
// (relying on assembler substituting BIC for AND)
DO_MBED_LOCKFREE_3OPS(fetch_and, ANDS, "IK", oldValue)
DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_and, ANDS, "IK")
DO_MBED_LOCKFREE_NEWVAL_2OPS(and_fetch, ANDS, "IK")
#if MBED_EXCLUSIVE_ACCESS_ARM
// ARM does not have ORN instruction, so take plain immediates.
DO_MBED_LOCKFREE_3OPS(fetch_or, ORRS, "I", oldValue)
DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_or, ORRS, "I")
DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "I")
#else
// Thumb-2 has ORN instruction, and assembler substitutes ORN for ORR.
DO_MBED_LOCKFREE_3OPS(fetch_or, ORRS, "IK", oldValue)
DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_or, ORRS, "IK")
DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "IK")
#endif
// I constraint is 12-bit modified immediate operand
DO_MBED_LOCKFREE_3OPS(fetch_xor, EORS, "I", oldValue)
DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_xor, EORS, "I")
DO_MBED_LOCKFREE_NEWVAL_2OPS(xor_fetch, EORS, "I")
#else // MBED_EXCLUSIVE_ACCESS_THUMB1
// I constraint is 0-255; J is -255 to -1, suitable for
// 2-op ADD/SUB (relying on assembler to swap ADD/SUB)
// L constraint is -7 to +7, suitable for 3-op ADD/SUB
// (relying on assembler to swap ADD/SUB)
DO_MBED_LOCKFREE_3OPS(incr, ADDS, "L", newValue)
DO_MBED_LOCKFREE_3OPS(decr, SUBS, "L", newValue)
DO_MBED_LOCKFREE_3OPS(fetch_add, ADDS, "L", oldValue)
DO_MBED_LOCKFREE_3OPS(fetch_sub, SUBS, "L", oldValue)
DO_MBED_LOCKFREE_2OPS(fetch_and, ANDS, "", oldValue)
DO_MBED_LOCKFREE_2OPS(fetch_or, ORRS, "", oldValue)
DO_MBED_LOCKFREE_2OPS(fetch_xor, EORS, "", oldValue)
DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_add, ADDS, "L")
DO_MBED_LOCKFREE_NEWVAL_2OPS(incr, ADDS, "IJ")
DO_MBED_LOCKFREE_OLDVAL_3OPS(fetch_sub, SUBS, "L")
DO_MBED_LOCKFREE_NEWVAL_2OPS(decr, SUBS, "IJ")
DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_and, ANDS, "")
DO_MBED_LOCKFREE_NEWVAL_2OPS(and_fetch, ANDS, "")
DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_or, ORRS, "")
DO_MBED_LOCKFREE_NEWVAL_2OPS(or_fetch, ORRS, "")
DO_MBED_LOCKFREE_OLDVAL_2OPS(fetch_xor, EORS, "")
DO_MBED_LOCKFREE_NEWVAL_2OPS(xor_fetch, EORS, "")
#endif
DO_MBED_LOCKFREE_EXCHG_OPS()
@ -1011,49 +1089,49 @@ DO_MBED_LOCKED_CAS_ORDERINGS(compare_exchange_weak)
*/
#define DO_MBED_ATOMIC_LOAD_TEMPLATE(T, fn_suffix) \
template<> \
inline T core_util_atomic_load(const volatile T *valuePtr) \
inline T core_util_atomic_load(const volatile T *valuePtr) noexcept \
{ \
return core_util_atomic_load_##fn_suffix(valuePtr); \
} \
\
template<> \
inline T core_util_atomic_load(const T *valuePtr) \
inline T core_util_atomic_load(const T *valuePtr) noexcept \
{ \
return core_util_atomic_load_##fn_suffix(valuePtr); \
} \
\
template<> \
inline T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) \
inline T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) noexcept \
{ \
return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \
} \
\
template<> \
inline T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) \
inline T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) noexcept \
{ \
return core_util_atomic_load_explicit_##fn_suffix(valuePtr, order); \
}
template<typename T>
inline T *core_util_atomic_load(T *const volatile *valuePtr)
inline T *core_util_atomic_load(T *const volatile *valuePtr) noexcept
{
return (T *) core_util_atomic_load_ptr((void *const volatile *) valuePtr);
}
template<typename T>
inline T *core_util_atomic_load(T *const *valuePtr)
inline T *core_util_atomic_load(T *const *valuePtr) noexcept
{
return (T *) core_util_atomic_load_ptr((void *const *) valuePtr);
}
template<typename T>
inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order)
inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order) noexcept
{
return (T *) core_util_atomic_load_explicit_ptr((void *const volatile *) valuePtr, order);
}
template<typename T>
inline T *core_util_atomic_load_explicit(T *const *valuePtr, mbed_memory_order order)
inline T *core_util_atomic_load_explicit(T *const *valuePtr, mbed_memory_order order) noexcept
{
return (T *) core_util_atomic_load_explicit_ptr((void *const *) valuePtr, order);
}
@ -1070,49 +1148,49 @@ DO_MBED_ATOMIC_LOAD_TEMPLATE(bool, bool)
#define DO_MBED_ATOMIC_STORE_TEMPLATE(T, fn_suffix) \
template<> \
inline void core_util_atomic_store(volatile T *valuePtr, T val) \
inline void core_util_atomic_store(volatile T *valuePtr, T val) noexcept \
{ \
core_util_atomic_store_##fn_suffix(valuePtr, val); \
} \
\
template<> \
inline void core_util_atomic_store(T *valuePtr, T val) \
inline void core_util_atomic_store(T *valuePtr, T val) noexcept \
{ \
core_util_atomic_store_##fn_suffix(valuePtr, val); \
} \
\
template<> \
inline void core_util_atomic_store_explicit(volatile T *valuePtr, T val, mbed_memory_order order) \
inline void core_util_atomic_store_explicit(volatile T *valuePtr, T val, mbed_memory_order order) noexcept \
{ \
core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \
} \
\
template<> \
inline void core_util_atomic_store_explicit(T *valuePtr, T val, mbed_memory_order order) \
inline void core_util_atomic_store_explicit(T *valuePtr, T val, mbed_memory_order order) noexcept \
{ \
core_util_atomic_store_explicit_##fn_suffix(valuePtr, val, order); \
}
template<typename T>
inline void core_util_atomic_store(T *volatile *valuePtr, T *val)
inline void core_util_atomic_store(T *volatile *valuePtr, T *val) noexcept
{
core_util_atomic_store_ptr((void *volatile *) valuePtr, val);
}
template<typename T>
inline void core_util_atomic_store(T **valuePtr, T *val)
inline void core_util_atomic_store(T **valuePtr, T *val) noexcept
{
core_util_atomic_store_ptr((void **) valuePtr, val);
}
template<typename T>
inline void core_util_atomic_store_explicit(T *volatile *valuePtr, T *val, mbed_memory_order order)
inline void core_util_atomic_store_explicit(T *volatile *valuePtr, T *val, mbed_memory_order order) noexcept
{
core_util_atomic_store_ptr((void *volatile *) valuePtr, val, order);
}
template<typename T>
inline void core_util_atomic_store_explicit(T **valuePtr, T *val, mbed_memory_order order)
inline void core_util_atomic_store_explicit(T **valuePtr, T *val, mbed_memory_order order) noexcept
{
core_util_atomic_store_ptr((void **) valuePtr, val, order);
}
@ -1129,19 +1207,19 @@ DO_MBED_ATOMIC_STORE_TEMPLATE(bool, bool)
#define DO_MBED_ATOMIC_CAS_TEMPLATE(tname, fname, T, fn_suffix) \
template<> inline \
bool core_util_atomic_##tname(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \
bool core_util_atomic_##tname(volatile T *ptr, T *expectedCurrentValue, T desiredValue) noexcept \
{ \
return core_util_atomic_##fname##_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \
}
template<typename T>
inline bool core_util_atomic_compare_exchange_strong(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue)
inline bool core_util_atomic_compare_exchange_strong(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue) noexcept
{
return core_util_atomic_cas_ptr((void *volatile *) ptr, (void **) expectedCurrentValue, desiredValue);
}
template<typename T>
inline bool core_util_atomic_compare_exchange_weak(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue)
inline bool core_util_atomic_compare_exchange_weak(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue) noexcept
{
return core_util_atomic_compare_exchange_weak_ptr((void *volatile *) ptr, (void **) expectedCurrentValue, desiredValue);
}
@ -1162,63 +1240,63 @@ DO_MBED_ATOMIC_CAS_TEMPLATES(compare_exchange_weak, compare_exchange_weak)
#define DO_MBED_ATOMIC_OP_TEMPLATE(name, T, fn_suffix) \
template<> \
inline T core_util_atomic_##name(volatile T *valuePtr, T arg) \
inline T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept \
{ \
return core_util_atomic_##name##_##fn_suffix(valuePtr, arg); \
} \
\
template<> \
inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \
mbed_memory_order order) \
mbed_memory_order order) noexcept \
{ \
return core_util_atomic_##name##_explicit_##fn_suffix(valuePtr, arg, order); \
}
template<>
inline bool core_util_atomic_exchange(volatile bool *valuePtr, bool arg)
inline bool core_util_atomic_exchange(volatile bool *valuePtr, bool arg) noexcept
{
return core_util_atomic_exchange_bool(valuePtr, arg);
}
template<>
inline bool core_util_atomic_exchange_explicit(volatile bool *valuePtr, bool arg, mbed_memory_order order)
inline bool core_util_atomic_exchange_explicit(volatile bool *valuePtr, bool arg, mbed_memory_order order) noexcept
{
return core_util_atomic_exchange_explicit_bool(valuePtr, arg, order);
}
template<typename T>
inline T *core_util_atomic_exchange(T *volatile *valuePtr, T *arg)
inline T *core_util_atomic_exchange(T *volatile *valuePtr, T *arg) noexcept
{
return (T *) core_util_atomic_exchange_ptr((void *volatile *) valuePtr, arg);
}
template<typename T>
inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, T *arg, mbed_memory_order order)
inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, T *arg, mbed_memory_order order) noexcept
{
return (T *) core_util_atomic_fetch_add_explicit_ptr((void *volatile *) valuePtr, arg, order);
}
template<typename T>
inline T *core_util_atomic_fetch_add(T *volatile *valuePtr, ptrdiff_t arg)
inline T *core_util_atomic_fetch_add(T *volatile *valuePtr, ptrdiff_t arg) noexcept
{
return (T *) core_util_atomic_fetch_add_ptr((void *volatile *) valuePtr, arg * sizeof(T));
}
template<typename T>
inline T *core_util_atomic_fetch_add_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
inline T *core_util_atomic_fetch_add_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) noexcept
{
return (T *) core_util_atomic_fetch_add_explicit_ptr((void *volatile *) valuePtr, arg * sizeof(T), order);
}
template<typename T>
inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, ptrdiff_t arg)
inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, ptrdiff_t arg) noexcept
{
return (T *) core_util_atomic_fetch_sub_ptr((void *volatile *) valuePtr, arg * sizeof(T));
}
template<typename T>
inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) noexcept
{
return (T *) core_util_atomic_fetch_sub_explicit_ptr((void *volatile *) valuePtr, arg * sizeof(T), order);
}
@ -1236,6 +1314,20 @@ inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t a
DO_MBED_ATOMIC_OP_TEMPLATE(name, int32_t, s32) \
DO_MBED_ATOMIC_OP_TEMPLATE(name, int64_t, s64)
#define DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, T, fn_suffix, postname, OP) \
template<> \
inline T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept \
{ \
return core_util_atomic_##postname##_##fn_suffix(valuePtr, arg) OP; \
} \
\
template<> \
inline T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, \
mbed_memory_order order) noexcept \
{ \
return core_util_atomic_##postname##_explicit_##fn_suffix(valuePtr, arg, order) OP; \
}
DO_MBED_ATOMIC_OP_U_TEMPLATES(exchange)
DO_MBED_ATOMIC_OP_S_TEMPLATES(exchange)
DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_add)
@ -1246,25 +1338,61 @@ DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_and)
DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_or)
DO_MBED_ATOMIC_OP_U_TEMPLATES(fetch_xor)
namespace mbed {
namespace impl {
// Use custom assembler forms for pre-ops where available, else construct from post-ops
#if MBED_EXCLUSIVE_ACCESS
#define DO_MBED_ATOMIC_PRE_OP_TEMPLATES(name, postname, OP) \
template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept; \
template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept; \
DO_MBED_ATOMIC_OP_TEMPLATE(name, uint8_t, u8) \
DO_MBED_ATOMIC_OP_TEMPLATE(name, uint16_t, u16) \
DO_MBED_ATOMIC_OP_TEMPLATE(name, uint32_t, u32) \
DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint64_t, u64, postname, OP)
#else
#define DO_MBED_ATOMIC_PRE_OP_TEMPLATES(name, postname, OP) \
template<typename T> T core_util_atomic_##name(volatile T *valuePtr, T arg) noexcept; \
template<typename T> T core_util_atomic_##name##_explicit(volatile T *valuePtr, T arg, mbed_memory_order order) noexcept; \
DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint8_t, u8, postname, OP) \
DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint16_t, u16, postname, OP) \
DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint32_t, u32, postname, OP) \
DO_MBED_ATOMIC_MANUAL_PRE_OP_TEMPLATE(name, uint64_t, u64, postname, OP)
#endif
// *INDENT-OFF*
DO_MBED_ATOMIC_PRE_OP_TEMPLATES(incr, fetch_add, + arg)
DO_MBED_ATOMIC_PRE_OP_TEMPLATES(decr, fetch_sub, - arg)
DO_MBED_ATOMIC_PRE_OP_TEMPLATES(and_fetch, fetch_and, & arg)
DO_MBED_ATOMIC_PRE_OP_TEMPLATES(or_fetch, fetch_or, | arg)
DO_MBED_ATOMIC_PRE_OP_TEMPLATES(xor_fetch, fetch_xor, ^ arg)
// *INDENT-ON*
}
}
#endif // __cplusplus
#undef MBED_DOP_REG
#undef MBED_CMP_IMM
#undef MBED_SUB3_IMM
#undef DO_MBED_LOCKFREE_EXCHG_ASM
#undef DO_MBED_LOCKFREE_3OP_ASM
#undef DO_MBED_LOCKFREE_2OP_ASM
#undef DO_MBED_LOCKFREE_NEWVAL_2OP_ASM
#undef DO_MBED_LOCKFREE_OLDVAL_3OP_ASM
#undef DO_MBED_LOCKFREE_OLDVAL_2OP_ASM
#undef DO_MBED_LOCKFREE_CAS_WEAK_ASM
#undef DO_MBED_LOCKFREE_CAS_STRONG_ASM
#undef DO_MBED_LOCKFREE_LOADSTORE
#undef DO_MBED_LOCKFREE_EXCHG_OP
#undef DO_MBED_LOCKFREE_CAS_WEAK_OP
#undef DO_MBED_LOCKFREE_CAS_STRONG_OP
#undef DO_MBED_LOCKFREE_2OP
#undef DO_MBED_LOCKFREE_3OP
#undef DO_MBED_LOCKFREE_NEWVAL_2OP
#undef DO_MBED_LOCKFREE_OLDVAL_2OP
#undef DO_MBED_LOCKFREE_OLDVAL_3OP
#undef DO_MBED_LOCKFREE_EXCHG_OPS
#undef DO_MBED_LOCKFREE_2OPS
#undef DO_MBED_LOCKFREE_3OPS
#undef DO_MBED_LOCKFREE_NEWVAL_2OPS
#undef DO_MBED_LOCKFREE_OLDVAL_2OPS
#undef DO_MBED_LOCKFREE_OLDVAL_3OPS
#undef DO_MBED_LOCKFREE_CAS_WEAK_OPS
#undef DO_MBED_LOCKFREE_CAS_STRONG_OPS
#undef DO_MBED_SIGNED_CAS_OP

View File

@ -880,6 +880,8 @@ MBED_FORCEINLINE uint64_t core_util_atomic_fetch_xor_explicit_u64(volatile uint6
#ifdef __cplusplus
} // extern "C"
#include "mbed_cxxsupport.h"
// For each operation, two overloaded templates:
// * one for non-pointer types, which has implementations based on the
// u8/u16/u32/u64/s8/s16/s32/s64/bool functions above. No base implementation.
@ -887,100 +889,99 @@ MBED_FORCEINLINE uint64_t core_util_atomic_fetch_xor_explicit_u64(volatile uint6
//
// Templates use standard C/C++ naming - old incr/decr/cas forms are not provided.
//
// Note that C++ template selection somewhat inhibits the ease of use of these templates.
// Ambiguities arise with setting pointers to NULL, or adding constants to integers.
// It may be necessary to cast the argument or desired value to the correct type, or
// explictly specify the type - eg core_util_atomic_store<FileHandle>(&fh, NULL) or
// core_util_atomic_store(&val, (uint8_t)1).
// A proper mbed::Atomic<T> class would solve the issue.
// The `type_identity_t<T>` used here means "same type as T", blocking template
// argument deduction. It forces type selection based on the type of the actual pointer
// to the atomic. If just `T` was used, the following would be ambiguous:
// core_util_atomic_store(&my_uint8_t, 1) - it wouldn't be able to select between T
// being uint8_t and int.
/** \copydoc core_util_atomic_load_u8 */
template<typename T> T core_util_atomic_load(const volatile T *valuePtr);
template<typename T> T core_util_atomic_load(const volatile T *valuePtr) noexcept;
/** \copydoc core_util_atomic_load_u8 */
template<typename T> T core_util_atomic_load(const T *valuePtr);
template<typename T> T core_util_atomic_load(const T *valuePtr) noexcept;
/** \copydoc core_util_atomic_store_u8 */
template<typename T> void core_util_atomic_store(volatile T *valuePtr, T desiredValue);
template<typename T> void core_util_atomic_store(volatile T *valuePtr, mbed::type_identity_t<T> desiredValue) noexcept;
/** \copydoc core_util_atomic_store_u8 */
template<typename T> void core_util_atomic_store(T *valuePtr, T desiredValue);
template<typename T> void core_util_atomic_store(T *valuePtr, mbed::type_identity_t<T> desiredValue) noexcept;
/** \copydoc core_util_atomic_exchange_u8 */
template<typename T> T core_util_atomic_exchange(volatile T *ptr, T desiredValue);
template<typename T> T core_util_atomic_exchange(volatile T *ptr, mbed::type_identity_t<T> desiredValue) noexcept;
/** \copydoc core_util_atomic_cas_u8 */
template<typename T> bool core_util_atomic_compare_exchange_strong(volatile T *ptr, T *expectedCurrentValue, T desiredValue);
template<typename T> bool core_util_atomic_compare_exchange_strong(volatile T *ptr, mbed::type_identity_t<T> *expectedCurrentValue, mbed::type_identity_t<T> desiredValue) noexcept;
/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
template<typename T> bool core_util_atomic_compare_exchange_weak(volatile T *ptr, T *expectedCurrentValue, T desiredValue);
template<typename T> bool core_util_atomic_compare_exchange_weak(volatile T *ptr, mbed::type_identity_t<T> *expectedCurrentValue, mbed::type_identity_t<T> desiredValue) noexcept;
/** \copydoc core_util_fetch_add_u8 */
template<typename T> T core_util_atomic_fetch_add(volatile T *valuePtr, T arg);
template<typename T> T core_util_atomic_fetch_add(volatile T *valuePtr, mbed::type_identity_t<T> arg) noexcept;
/** \copydoc core_util_fetch_sub_u8 */
template<typename T> T core_util_atomic_fetch_sub(volatile T *valuePtr, T arg);
template<typename T> T core_util_atomic_fetch_sub(volatile T *valuePtr, mbed::type_identity_t<T> arg) noexcept;
/** \copydoc core_util_fetch_and_u8 */
template<typename T> T core_util_atomic_fetch_and(volatile T *valuePtr, T arg);
template<typename T> T core_util_atomic_fetch_and(volatile T *valuePtr, mbed::type_identity_t<T> arg) noexcept;
/** \copydoc core_util_fetch_or_u8 */
template<typename T> T core_util_atomic_fetch_or(volatile T *valuePtr, T arg);
template<typename T> T core_util_atomic_fetch_or(volatile T *valuePtr, mbed::type_identity_t<T> arg) noexcept;
/** \copydoc core_util_fetch_xor_u8 */
template<typename T> T core_util_atomic_fetch_xor(volatile T *valuePtr, T arg);
template<typename T> T core_util_atomic_fetch_xor(volatile T *valuePtr, mbed::type_identity_t<T> arg) noexcept;
/** \copydoc core_util_atomic_load_explicit_u8 */
template<typename T> T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order);
template<typename T> T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) noexcept;
/** \copydoc core_util_atomic_load_explicit_u8 */
template<typename T> T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order);
template<typename T> T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) noexcept;
/** \copydoc core_util_atomic_store_explicit_u8 */
template<typename T> void core_util_atomic_store_explicit(volatile T *valuePtr, T desiredValue, mbed_memory_order order);
template<typename T> void core_util_atomic_store_explicit(volatile T *valuePtr, mbed::type_identity_t<T> desiredValue, mbed_memory_order order) noexcept;
/** \copydoc core_util_atomic_store_explicit_u8 */
template<typename T> void core_util_atomic_store_explicit(T *valuePtr, T desiredValue, mbed_memory_order order);
template<typename T> void core_util_atomic_store_explicit(T *valuePtr, mbed::type_identity_t<T> desiredValue, mbed_memory_order order) noexcept;
/** \copydoc core_util_atomic_exchange_explicit_u8 */
template<typename T> T core_util_atomic_exchange_explicit(volatile T *ptr, T desiredValue, mbed_memory_order order);
template<typename T> T core_util_atomic_exchange_explicit(volatile T *ptr, mbed::type_identity_t<T> desiredValue, mbed_memory_order order) noexcept;
/** \copydoc core_util_atomic_cas_explicit_u8 */
template<typename T> bool core_util_atomic_compare_exchange_strong_explicit(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure);
template<typename T> bool core_util_atomic_compare_exchange_strong_explicit(volatile T *ptr, mbed::type_identity_t<T> *expectedCurrentValue, mbed::type_identity_t<T> desiredValue, mbed_memory_order success, mbed_memory_order failure) noexcept;
/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
template<typename T> bool core_util_atomic_compare_exchange_weak_explicit(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure);
template<typename T> bool core_util_atomic_compare_exchange_weak_explicit(volatile T *ptr, mbed::type_identity_t<T> *expectedCurrentValue, mbed::type_identity_t<T> desiredValue, mbed_memory_order success, mbed_memory_order failure) noexcept;
/** \copydoc core_util_fetch_add_explicit_u8 */
template<typename T> T core_util_atomic_fetch_add_explicit(volatile T *valuePtr, T arg, mbed_memory_order order);
template<typename T> T core_util_atomic_fetch_add_explicit(volatile T *valuePtr, mbed::type_identity_t<T> arg, mbed_memory_order order) noexcept;
/** \copydoc core_util_fetch_sub_explicit_u8 */
template<typename T> T core_util_atomic_fetch_sub_explicit(volatile T *valuePtr, T arg, mbed_memory_order order);
template<typename T> T core_util_atomic_fetch_sub_explicit(volatile T *valuePtr, mbed::type_identity_t<T> arg, mbed_memory_order order) noexcept;
/** \copydoc core_util_fetch_and_explicit_u8 */
template<typename T> T core_util_atomic_fetch_and_explicit(volatile T *valuePtr, T arg, mbed_memory_order order);
template<typename T> T core_util_atomic_fetch_and_explicit(volatile T *valuePtr, mbed::type_identity_t<T> arg, mbed_memory_order order) noexcept;
/** \copydoc core_util_fetch_or_explicit_u8 */
template<typename T> T core_util_atomic_fetch_or_explicit(volatile T *valuePtr, T arg, mbed_memory_order order);
template<typename T> T core_util_atomic_fetch_or_explicit(volatile T *valuePtr, mbed::type_identity_t<T> arg, mbed_memory_order order) noexcept;
/** \copydoc core_util_fetch_xor_explicit_u8 */
template<typename T> T core_util_atomic_fetch_xor_explicit(volatile T *valuePtr, T arg, mbed_memory_order order);
template<typename T> T core_util_atomic_fetch_xor_explicit(volatile T *valuePtr, mbed::type_identity_t<T> arg, mbed_memory_order order) noexcept;
/** \copydoc core_util_atomic_load_ptr */
template<typename T> inline T *core_util_atomic_load(T *const volatile *valuePtr);
template<typename T> inline T *core_util_atomic_load(T *const volatile *valuePtr) noexcept;
/** \copydoc core_util_atomic_load_ptr */
template<typename T> inline T *core_util_atomic_load(T *const *valuePtr);
template<typename T> inline T *core_util_atomic_load(T *const *valuePtr) noexcept;
/** \copydoc core_util_atomic_store_ptr */
template<typename T> inline void core_util_atomic_store(T *volatile *valuePtr, T *desiredValue);
template<typename T> inline void core_util_atomic_store(T *volatile *valuePtr, mbed::type_identity_t<T> *desiredValue) noexcept;
/** \copydoc core_util_atomic_store_ptr */
template<typename T> inline void core_util_atomic_store(T **valuePtr, T *desiredValue);
template<typename T> inline void core_util_atomic_store(T **valuePtr, mbed::type_identity_t<T> *desiredValue) noexcept;
/** \copydoc core_util_atomic_exchange_ptr */
template<typename T> inline T *core_util_atomic_exchange(T *volatile *valuePtr, T *desiredValue);
template<typename T> inline T *core_util_atomic_exchange(T *volatile *valuePtr, mbed::type_identity_t<T> *desiredValue) noexcept;
/** \copydoc core_util_atomic_cas_ptr */
template<typename T> inline bool core_util_atomic_compare_exchange_strong(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue);
template<typename T> inline bool core_util_atomic_compare_exchange_strong(T *volatile *ptr, mbed::type_identity_t<T> **expectedCurrentValue, mbed::type_identity_t<T> *desiredValue) noexcept;
/** \copydoc core_util_atomic_compare_exchange_weak_ptr */
template<typename T> inline bool core_util_atomic_compare_exchange_weak(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue);
template<typename T> inline bool core_util_atomic_compare_exchange_weak(T *volatile *ptr, mbed::type_identity_t<T> **expectedCurrentValue, mbed::type_identity_t<T> *desiredValue) noexcept;
/** \copydoc core_util_fetch_add_ptr */
template<typename T> inline T *core_util_atomic_fetch_add(T *volatile *valuePtr, ptrdiff_t arg);
template<typename T> inline T *core_util_atomic_fetch_add(T *volatile *valuePtr, ptrdiff_t arg) noexcept;
/** \copydoc core_util_fetch_sub_ptr */
template<typename T> inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, ptrdiff_t arg);
template<typename T> inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, ptrdiff_t arg) noexcept;
/** \copydoc core_util_atomic_load_explicit_ptr */
template<typename T> inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order);
template<typename T> inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order) noexcept;
/** \copydoc core_util_atomic_load_explicit_ptr */
template<typename T> inline T *core_util_atomic_load_explicit(T *const *valuePtr, mbed_memory_order order);
template<typename T> inline T *core_util_atomic_load_explicit(T *const *valuePtr, mbed_memory_order order) noexcept;
/** \copydoc core_util_atomic_store_explicit_ptr */
template<typename T> inline void core_util_atomic_store_explicit(T *volatile *valuePtr, T *desiredValue, mbed_memory_order order);
template<typename T> inline void core_util_atomic_store_explicit(T *volatile *valuePtr, mbed::type_identity_t<T> *desiredValue, mbed_memory_order order) noexcept;
/** \copydoc core_util_atomic_store_explicit_ptr */
template<typename T> inline void core_util_atomic_store_explicit(T **valuePtr, T *desiredValue, mbed_memory_order order);
template<typename T> inline void core_util_atomic_store_explicit(T **valuePtr, mbed::type_identity_t<T> *desiredValue, mbed_memory_order order) noexcept;
/** \copydoc core_util_atomic_exchange_explicit_ptr */
template<typename T> inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, T *desiredValue, mbed_memory_order order);
template<typename T> inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, mbed::type_identity_t<T> *desiredValue, mbed_memory_order order) noexcept;
/** \copydoc core_util_atomic_cas_explicit_ptr */
template<typename T> inline bool core_util_atomic_compare_exchange_strong_explicit(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue, mbed_memory_order success, mbed_memory_order failure);
template<typename T> inline bool core_util_atomic_compare_exchange_strong_explicit(T *volatile *ptr, mbed::type_identity_t<T> **expectedCurrentValue, mbed::type_identity_t<T> *desiredValue, mbed_memory_order success, mbed_memory_order failure) noexcept;
/** \copydoc core_util_atomic_compare_exchange_weak_explicit_ptr */
template<typename T> inline bool core_util_atomic_compare_exchange_weak_explicit(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue, mbed_memory_order success, mbed_memory_order failure);
template<typename T> inline bool core_util_atomic_compare_exchange_weak_explicit(T *volatile *ptr, mbed::type_identity_t<T> **expectedCurrentValue, mbed::type_identity_t<T> *desiredValue, mbed_memory_order success, mbed_memory_order failure) noexcept;
/** \copydoc core_util_fetch_add_explicit_ptr */
template<typename T> inline T *core_util_atomic_fetch_add_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order);
template<typename T> inline T *core_util_atomic_fetch_add_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) noexcept;
/** \copydoc core_util_fetch_sub_explicit_ptr */
template<typename T> inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order);
template<typename T> inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) noexcept;
#endif // __cplusplus