From 94f56463036e096492d391c8f4f4f4c46f25e363 Mon Sep 17 00:00:00 2001 From: Kevin Bracey Date: Mon, 4 Feb 2019 15:08:16 +0200 Subject: [PATCH 1/5] Atomics: make barrier placement uniform --- platform/mbed_critical.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/platform/mbed_critical.c b/platform/mbed_critical.c index 4b4a4f224a..905d755466 100644 --- a/platform/mbed_critical.c +++ b/platform/mbed_critical.c @@ -109,8 +109,8 @@ void core_util_critical_section_exit(void) bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr) { - uint8_t currentValue; MBED_BARRIER(); + uint8_t currentValue; do { currentValue = __LDREXB(&flagPtr->_flag); } while (__STREXB(true, &flagPtr->_flag)); @@ -188,8 +188,8 @@ uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta) uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta) { - uint32_t newValue; MBED_BARRIER(); + uint32_t newValue; do { newValue = __LDREXW(valuePtr) + delta; } while (__STREXW(newValue, valuePtr)); @@ -200,8 +200,8 @@ uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta) uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta) { - uint8_t newValue; MBED_BARRIER(); + uint8_t newValue; do { newValue = __LDREXB(valuePtr) - delta; } while (__STREXB(newValue, valuePtr)); @@ -211,8 +211,8 @@ uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta) uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta) { - uint16_t newValue; MBED_BARRIER(); + uint16_t newValue; do { newValue = __LDREXH(valuePtr) - delta; } while (__STREXH(newValue, valuePtr)); @@ -222,8 +222,8 @@ uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta) uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta) { - uint32_t newValue; MBED_BARRIER(); + uint32_t newValue; do { newValue = __LDREXW(valuePtr) - delta; } while (__STREXW(newValue, valuePtr)); From f8e3f5dc2c55670dc6aee73578cbad1cac6f6c07 Mon Sep 17 00:00:00 2001 From: Kevin Bracey Date: Mon, 4 Feb 2019 13:06:58 +0200 Subject: [PATCH 2/5] Add 64-bit atomics --- UNITTESTS/stubs/mbed_critical_stub.c | 25 ++++ platform/mbed_critical.c | 52 +++++++ platform/mbed_critical.h | 201 +++++---------------------- 3 files changed, 113 insertions(+), 165 deletions(-) diff --git a/UNITTESTS/stubs/mbed_critical_stub.c b/UNITTESTS/stubs/mbed_critical_stub.c index 2ccf87bc4a..0c6774301c 100644 --- a/UNITTESTS/stubs/mbed_critical_stub.c +++ b/UNITTESTS/stubs/mbed_critical_stub.c @@ -103,6 +103,31 @@ uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta) } +uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr) +{ + return 0; +} + +void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue) +{ +} + +bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue) +{ + return false; +} + +uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta) +{ + return 0; +} + +uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta) +{ + return 0; +} + + bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue) { return false; diff --git a/platform/mbed_critical.c b/platform/mbed_critical.c index 905d755466..9ac527cda2 100644 --- a/platform/mbed_critical.c +++ b/platform/mbed_critical.c @@ -358,6 +358,58 @@ uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta) #endif +/* No architecture we support has LDREXD/STREXD, so must always disable IRQs for 64-bit operations */ +uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr) +{ + core_util_critical_section_enter(); + uint64_t currentValue = *valuePtr; + core_util_critical_section_exit(); + return currentValue; +} + +void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue) +{ + core_util_critical_section_enter(); + *valuePtr = desiredValue; + core_util_critical_section_exit(); +} + +bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue) +{ + bool success; + uint64_t currentValue; + core_util_critical_section_enter(); + currentValue = *ptr; + if (currentValue == *expectedCurrentValue) { + *ptr = desiredValue; + success = true; + } else { + *expectedCurrentValue = currentValue; + success = false; + } + core_util_critical_section_exit(); + return success; +} + +uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta) +{ + uint64_t newValue; + core_util_critical_section_enter(); + newValue = *valuePtr + delta; + *valuePtr = newValue; + core_util_critical_section_exit(); + return newValue; +} + +uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta) +{ + uint64_t newValue; + core_util_critical_section_enter(); + newValue = *valuePtr - delta; + *valuePtr = newValue; + core_util_critical_section_exit(); + return newValue; +} bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue) { diff --git a/platform/mbed_critical.h b/platform/mbed_critical.h index e254534241..69759685dd 100644 --- a/platform/mbed_critical.h +++ b/platform/mbed_critical.h @@ -202,175 +202,16 @@ MBED_FORCEINLINE void core_util_atomic_flag_clear(volatile core_util_atomic_flag */ bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue); -/** - * Atomic compare and set. It compares the contents of a memory location to a - * given value and, only if they are the same, modifies the contents of that - * memory location to a given new value. This is done as a single atomic - * operation. The atomicity guarantees that the new value is calculated based on - * up-to-date information; if the value had been updated by another thread in - * the meantime, the write would fail due to a mismatched expectedCurrentValue. - * - * Refer to https://en.wikipedia.org/wiki/Compare-and-set [which may redirect - * you to the article on compare-and swap]. - * - * @param ptr The target memory location. - * @param[in,out] expectedCurrentValue A pointer to some location holding the - * expected current value of the data being set atomically. - * The computed 'desiredValue' should be a function of this current value. - * @note: This is an in-out parameter. In the - * failure case of atomic_cas (where the - * destination isn't set), the pointee of expectedCurrentValue is - * updated with the current value. - * @param[in] desiredValue The new value computed based on '*expectedCurrentValue'. - * - * @return true if the memory location was atomically - * updated with the desired value (after verifying - * that it contained the expectedCurrentValue), - * false otherwise. In the failure case, - * exepctedCurrentValue is updated with the new - * value of the target memory location. - * - * pseudocode: - * function cas(p : pointer to int, old : pointer to int, new : int) returns bool { - * if *p != *old { - * *old = *p - * return false - * } - * *p = new - * return true - * } - * - * @note: In the failure case (where the destination isn't set), the value - * pointed to by expectedCurrentValue is instead updated with the current value. - * This property helps writing concise code for the following incr: - * - * function incr(p : pointer to int, a : int) returns int { - * done = false - * value = *p // This fetch operation need not be atomic. - * while not done { - * done = atomic_cas(p, &value, value + a) // *value gets updated automatically until success - * } - * return value + a - * } - * - * @note: This corresponds to the C11 "atomic_compare_exchange_strong" - it - * always succeeds if the current value is expected, as per the pseudocode - * above; it will not spuriously fail as "atomic_compare_exchange_weak" may. - */ +/** \copydoc core_util_atomic_cas_u8 */ bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue); -/** - * Atomic compare and set. It compares the contents of a memory location to a - * given value and, only if they are the same, modifies the contents of that - * memory location to a given new value. This is done as a single atomic - * operation. The atomicity guarantees that the new value is calculated based on - * up-to-date information; if the value had been updated by another thread in - * the meantime, the write would fail due to a mismatched expectedCurrentValue. - * - * Refer to https://en.wikipedia.org/wiki/Compare-and-set [which may redirect - * you to the article on compare-and swap]. - * - * @param ptr The target memory location. - * @param[in,out] expectedCurrentValue A pointer to some location holding the - * expected current value of the data being set atomically. - * The computed 'desiredValue' should be a function of this current value. - * @note: This is an in-out parameter. In the - * failure case of atomic_cas (where the - * destination isn't set), the pointee of expectedCurrentValue is - * updated with the current value. - * @param[in] desiredValue The new value computed based on '*expectedCurrentValue'. - * - * @return true if the memory location was atomically - * updated with the desired value (after verifying - * that it contained the expectedCurrentValue), - * false otherwise. In the failure case, - * exepctedCurrentValue is updated with the new - * value of the target memory location. - * - * pseudocode: - * function cas(p : pointer to int, old : pointer to int, new : int) returns bool { - * if *p != *old { - * *old = *p - * return false - * } - * *p = new - * return true - * } - * - * @note: In the failure case (where the destination isn't set), the value - * pointed to by expectedCurrentValue is instead updated with the current value. - * This property helps writing concise code for the following incr: - * - * function incr(p : pointer to int, a : int) returns int { - * done = false - * value = *p // This fetch operation need not be atomic. - * while not done { - * done = atomic_cas(p, &value, value + a) // *value gets updated automatically until success - * } - * return value + a - * - * @note: This corresponds to the C11 "atomic_compare_exchange_strong" - it - * always succeeds if the current value is expected, as per the pseudocode - * above; it will not spuriously fail as "atomic_compare_exchange_weak" may. - * } - */ +/** \copydoc core_util_atomic_cas_u8 */ bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue); -/** - * Atomic compare and set. It compares the contents of a memory location to a - * given value and, only if they are the same, modifies the contents of that - * memory location to a given new value. This is done as a single atomic - * operation. The atomicity guarantees that the new value is calculated based on - * up-to-date information; if the value had been updated by another thread in - * the meantime, the write would fail due to a mismatched expectedCurrentValue. - * - * Refer to https://en.wikipedia.org/wiki/Compare-and-set [which may redirect - * you to the article on compare-and swap]. - * - * @param ptr The target memory location. - * @param[in,out] expectedCurrentValue A pointer to some location holding the - * expected current value of the data being set atomically. - * The computed 'desiredValue' should be a function of this current value. - * @note: This is an in-out parameter. In the - * failure case of atomic_cas (where the - * destination isn't set), the pointee of expectedCurrentValue is - * updated with the current value. - * @param[in] desiredValue The new value computed based on '*expectedCurrentValue'. - * - * @return true if the memory location was atomically - * updated with the desired value (after verifying - * that it contained the expectedCurrentValue), - * false otherwise. In the failure case, - * exepctedCurrentValue is updated with the new - * value of the target memory location. - * - * pseudocode: - * function cas(p : pointer to int, old : pointer to int, new : int) returns bool { - * if *p != *old { - * *old = *p - * return false - * } - * *p = new - * return true - * } - * - * @note: In the failure case (where the destination isn't set), the value - * pointed to by expectedCurrentValue is instead updated with the current value. - * This property helps writing concise code for the following incr: - * - * function incr(p : pointer to int, a : int) returns int { - * done = false - * value = *p // This fetch operation need not be atomic. - * while not done { - * done = atomic_cas(p, &value, value + a) // *value gets updated automatically until success - * } - * return value + a - * } - * - * @note: This corresponds to the C11 "atomic_compare_exchange_strong" - it - * always succeeds if the current value is expected, as per the pseudocode - * above; it will not spuriously fail as "atomic_compare_exchange_weak" may. - */ +/** \copydoc core_util_atomic_cas_u8 */ +bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue); + +/** \copydoc core_util_atomic_cas_u8 */ bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue); /** @@ -409,6 +250,13 @@ MBED_FORCEINLINE uint32_t core_util_atomic_load_u32(const volatile uint32_t *val return value; } +/** + * Atomic load. + * @param valuePtr Target memory location. + * @return The loaded value. + */ +uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr); + /** * Atomic load. * @param valuePtr Target memory location. @@ -457,6 +305,13 @@ MBED_FORCEINLINE void core_util_atomic_store_u32(volatile uint32_t *valuePtr, ui MBED_BARRIER(); } +/** + * Atomic store. + * @param valuePtr Target memory location. + * @param desiredValue The value to store. + */ +void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue); + /** * Atomic store. * @param valuePtr Target memory location. @@ -493,6 +348,14 @@ uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta); */ uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta); +/** + * Atomic increment. + * @param valuePtr Target memory location being incremented. + * @param delta The amount being incremented. + * @return The new incremented value. + */ +uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta); + /** * Atomic increment. * @param valuePtr Target memory location being incremented. @@ -528,6 +391,14 @@ uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta); */ uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta); +/** + * Atomic decrement. + * @param valuePtr Target memory location being decremented. + * @param delta The amount being decremented. + * @return The new decremented value. + */ +uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta); + /** * Atomic decrement. * @param valuePtr Target memory location being decremented. From a71984093ab814a741a1548e5c075e1e94721360 Mon Sep 17 00:00:00 2001 From: Kevin Bracey Date: Mon, 4 Feb 2019 13:12:55 +0200 Subject: [PATCH 3/5] Add bool atomics --- platform/mbed_critical.c | 3 +++ platform/mbed_critical.h | 30 ++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/platform/mbed_critical.c b/platform/mbed_critical.c index 9ac527cda2..aebebe3280 100644 --- a/platform/mbed_critical.c +++ b/platform/mbed_critical.c @@ -100,6 +100,9 @@ void core_util_critical_section_exit(void) } } +/* Inline bool implementations in the header use uint8_t versions to manipulate the bool */ +MBED_STATIC_ASSERT(sizeof(bool) == sizeof(uint8_t), "Surely bool is a byte"); + #if MBED_EXCLUSIVE_ACCESS /* Supress __ldrex and __strex deprecated warnings - "#3731-D: intrinsic is deprecated" */ diff --git a/platform/mbed_critical.h b/platform/mbed_critical.h index 69759685dd..c80b73d057 100644 --- a/platform/mbed_critical.h +++ b/platform/mbed_critical.h @@ -211,6 +211,12 @@ bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentV /** \copydoc core_util_atomic_cas_u8 */ bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue); +/** \copydoc core_util_atomic_cas_u8 */ +MBED_FORCEINLINE bool core_util_atomic_cas_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue) +{ + return (bool)core_util_atomic_cas_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue); +} + /** \copydoc core_util_atomic_cas_u8 */ bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue); @@ -257,6 +263,18 @@ MBED_FORCEINLINE uint32_t core_util_atomic_load_u32(const volatile uint32_t *val */ uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr); +/** + * Atomic load. + * @param valuePtr Target memory location. + * @return The loaded value. + */ +MBED_FORCEINLINE bool core_util_atomic_load_bool(const volatile bool *valuePtr) +{ + bool value = *valuePtr; + MBED_BARRIER(); + return value; +} + /** * Atomic load. * @param valuePtr Target memory location. @@ -312,6 +330,18 @@ MBED_FORCEINLINE void core_util_atomic_store_u32(volatile uint32_t *valuePtr, ui */ void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue); +/** + * Atomic store. + * @param valuePtr Target memory location. + * @param desiredValue The value to store. + */ +MBED_FORCEINLINE void core_util_atomic_store_bool(volatile bool *valuePtr, bool desiredValue) +{ + MBED_BARRIER(); + *valuePtr = desiredValue; + MBED_BARRIER(); +} + /** * Atomic store. * @param valuePtr Target memory location. From 694adaae8b3497373031b6b04bc996b3e07f1534 Mon Sep 17 00:00:00 2001 From: Kevin Bracey Date: Mon, 4 Feb 2019 13:14:07 +0200 Subject: [PATCH 4/5] Add atomic exchange --- UNITTESTS/stubs/mbed_critical_stub.c | 26 ++++++++++ platform/mbed_critical.c | 77 ++++++++++++++++++++++++++++ platform/mbed_critical.h | 51 ++++++++++++++++++ 3 files changed, 154 insertions(+) diff --git a/UNITTESTS/stubs/mbed_critical_stub.c b/UNITTESTS/stubs/mbed_critical_stub.c index 0c6774301c..b13bf61bff 100644 --- a/UNITTESTS/stubs/mbed_critical_stub.c +++ b/UNITTESTS/stubs/mbed_critical_stub.c @@ -71,6 +71,22 @@ bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentV } +uint8_t core_util_atomic_exchange_u8(volatile uint8_t *ptr, uint8_t desiredValue) +{ + return 0; +} + +uint16_t core_util_atomic_exchange_u16(volatile uint16_t *ptr, uint16_t desiredValue) +{ + return 0; +} + +uint32_t core_util_atomic_exchange_u32(volatile uint32_t *ptr, uint32_t desiredValue) +{ + return 0; +} + + uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta) { return 0; @@ -112,6 +128,11 @@ void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredVal { } +uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue) +{ + return 0; +} + bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue) { return false; @@ -133,6 +154,11 @@ bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, return false; } +void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue) +{ + return NULL; +} + void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta) { return NULL; diff --git a/platform/mbed_critical.c b/platform/mbed_critical.c index aebebe3280..8b847730c6 100644 --- a/platform/mbed_critical.c +++ b/platform/mbed_critical.c @@ -167,6 +167,39 @@ bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentV return true; } +uint8_t core_util_atomic_exchange_u8(volatile uint8_t *valuePtr, uint8_t desiredValue) +{ + MBED_BARRIER(); + uint8_t currentValue; + do { + currentValue = __LDREXB(valuePtr); + } while (__STREXB(desiredValue, valuePtr)); + MBED_BARRIER(); + return currentValue; +} + +uint16_t core_util_atomic_exchange_u16(volatile uint16_t *valuePtr, uint16_t desiredValue) +{ + MBED_BARRIER(); + uint16_t currentValue; + do { + currentValue = __LDREXH(valuePtr); + } while (__STREXH(desiredValue, valuePtr)); + MBED_BARRIER(); + return currentValue; +} + +uint32_t core_util_atomic_exchange_u32(volatile uint32_t *valuePtr, uint32_t desiredValue) +{ + MBED_BARRIER(); + uint32_t currentValue; + do { + currentValue = __LDREXW(valuePtr); + } while (__STREXW(desiredValue, valuePtr)); + MBED_BARRIER(); + return currentValue; +} + uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta) { MBED_BARRIER(); @@ -298,6 +331,34 @@ bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentV } +uint8_t core_util_atomic_exchange_u8(volatile uint8_t *ptr, uint8_t desiredValue) +{ + core_util_critical_section_enter(); + uint8_t currentValue = *ptr; + *ptr = desiredValue; + core_util_critical_section_exit(); + return currentValue; +} + +uint16_t core_util_atomic_exchange_u16(volatile uint16_t *ptr, uint16_t desiredValue) +{ + core_util_critical_section_enter(); + uint16_t currentValue = *ptr; + *ptr = desiredValue; + core_util_critical_section_exit(); + return currentValue; +} + +uint32_t core_util_atomic_exchange_u32(volatile uint32_t *ptr, uint32_t desiredValue) +{ + core_util_critical_section_enter(); + uint32_t currentValue = *ptr; + *ptr = desiredValue; + core_util_critical_section_exit(); + return currentValue; +} + + uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta) { uint8_t newValue; @@ -377,6 +438,15 @@ void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredVal core_util_critical_section_exit(); } +uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue) +{ + core_util_critical_section_enter(); + uint64_t currentValue = *valuePtr; + *valuePtr = desiredValue; + core_util_critical_section_exit(); + return currentValue; +} + bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue) { bool success; @@ -414,6 +484,8 @@ uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta) return newValue; } +MBED_STATIC_ASSERT(sizeof(void *) == sizeof(uint32_t), "Alas, pointers must be 32-bit"); + bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue) { return core_util_atomic_cas_u32( @@ -422,6 +494,11 @@ bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, (uint32_t)desiredValue); } +void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue) +{ + return (void *)core_util_atomic_exchange_u32((volatile uint32_t *)valuePtr, (uint32_t)desiredValue); +} + void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta) { return (void *)core_util_atomic_incr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta); diff --git a/platform/mbed_critical.h b/platform/mbed_critical.h index c80b73d057..715bd13e56 100644 --- a/platform/mbed_critical.h +++ b/platform/mbed_critical.h @@ -354,6 +354,57 @@ MBED_FORCEINLINE void core_util_atomic_store_ptr(void *volatile *valuePtr, void MBED_BARRIER(); } +/** + * Atomic exchange. + * @param valuePtr Target memory location. + * @param desiredValue The value to store. + * @return The previous value. + */ +uint8_t core_util_atomic_exchange_u8(volatile uint8_t *valuePtr, uint8_t desiredValue); + +/** + * Atomic exchange. + * @param valuePtr Target memory location. + * @param desiredValue The value to store. + * @return The previous value. + */ +uint16_t core_util_atomic_exchange_u16(volatile uint16_t *valuePtr, uint16_t desiredValue); + +/** + * Atomic exchange. + * @param valuePtr Target memory location. + * @param desiredValue The value to store. + * @return The previous value. + */ +uint32_t core_util_atomic_exchange_u32(volatile uint32_t *valuePtr, uint32_t desiredValue); + +/** + * Atomic exchange. + * @param valuePtr Target memory location. + * @param desiredValue The value to store. + * @return The previous value. + */ +uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue); + +/** + * Atomic exchange. + * @param valuePtr Target memory location. + * @param desiredValue The value to store. + * @return The previous value. + */ +MBED_FORCEINLINE bool core_util_atomic_exchange_bool(volatile bool *valuePtr, bool desiredValue) +{ + return (bool)core_util_atomic_exchange_u8((volatile uint8_t *)valuePtr, desiredValue); +} + +/** + * Atomic exchange. + * @param valuePtr Target memory location. + * @param desiredValue The value to store. + * @return The previous value. + */ +void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue); + /** * Atomic increment. * @param valuePtr Target memory location being incremented. From 9e7c65925d986c018dcebaa258aae30ffb9f3121 Mon Sep 17 00:00:00 2001 From: Kevin Bracey Date: Tue, 5 Feb 2019 10:39:26 +0200 Subject: [PATCH 5/5] Add signed atomics All signed implementations are inline - either directly or as inline calls to out-of-line unsigned definitions. --- platform/mbed_critical.h | 247 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 247 insertions(+) diff --git a/platform/mbed_critical.h b/platform/mbed_critical.h index 715bd13e56..5de8de85db 100644 --- a/platform/mbed_critical.h +++ b/platform/mbed_critical.h @@ -211,6 +211,29 @@ bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentV /** \copydoc core_util_atomic_cas_u8 */ bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue); +/** \copydoc core_util_atomic_cas_u8 */ +MBED_FORCEINLINE int8_t core_util_atomic_cas_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue) +{ + return (int8_t)core_util_atomic_cas_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, (uint8_t)desiredValue); +} + +/** \copydoc core_util_atomic_cas_u8 */ +MBED_FORCEINLINE int16_t core_util_atomic_cas_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue) +{ + return (int16_t)core_util_atomic_cas_u16((volatile uint16_t *)ptr, (uint16_t *)expectedCurrentValue, (uint16_t)desiredValue); +} +/** \copydoc core_util_atomic_cas_u8 */ +MBED_FORCEINLINE int32_t core_util_atomic_cas_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue) +{ + return (int32_t)core_util_atomic_cas_u32((volatile uint32_t *)ptr, (uint32_t *)expectedCurrentValue, (uint32_t)desiredValue); +} + +/** \copydoc core_util_atomic_cas_u8 */ +MBED_FORCEINLINE int64_t core_util_atomic_cas_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue) +{ + return (int64_t)core_util_atomic_cas_u64((volatile uint64_t *)ptr, (uint64_t *)expectedCurrentValue, (uint64_t)desiredValue); +} + /** \copydoc core_util_atomic_cas_u8 */ MBED_FORCEINLINE bool core_util_atomic_cas_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue) { @@ -263,6 +286,52 @@ MBED_FORCEINLINE uint32_t core_util_atomic_load_u32(const volatile uint32_t *val */ uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr); +/** + * Atomic load. + * @param valuePtr Target memory location. + * @return The loaded value. + */ +MBED_FORCEINLINE int8_t core_util_atomic_load_s8(const volatile int8_t *valuePtr) +{ + int8_t value = *valuePtr; + MBED_BARRIER(); + return value; +} + +/** + * Atomic load. + * @param valuePtr Target memory location. + * @return The loaded value. + */ +MBED_FORCEINLINE int16_t core_util_atomic_load_s16(const volatile int16_t *valuePtr) +{ + int16_t value = *valuePtr; + MBED_BARRIER(); + return value; +} + +/** + * Atomic load. + * @param valuePtr Target memory location. + * @return The loaded value. + */ +MBED_FORCEINLINE int32_t core_util_atomic_load_s32(const volatile int32_t *valuePtr) +{ + int32_t value = *valuePtr; + MBED_BARRIER(); + return value; +} + +/** + * Atomic load. + * @param valuePtr Target memory location. + * @return The loaded value. + */ +MBED_FORCEINLINE int64_t core_util_atomic_load_s64(const volatile int64_t *valuePtr) +{ + return (int64_t)core_util_atomic_load_u64((const volatile uint64_t *)valuePtr); +} + /** * Atomic load. * @param valuePtr Target memory location. @@ -330,6 +399,52 @@ MBED_FORCEINLINE void core_util_atomic_store_u32(volatile uint32_t *valuePtr, ui */ void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue); +/** + * Atomic store. + * @param valuePtr Target memory location. + * @param desiredValue The value to store. + */ +MBED_FORCEINLINE void core_util_atomic_store_s8(volatile int8_t *valuePtr, int8_t desiredValue) +{ + MBED_BARRIER(); + *valuePtr = desiredValue; + MBED_BARRIER(); +} + +/** + * Atomic store. + * @param valuePtr Target memory location. + * @param desiredValue The value to store. + */ +MBED_FORCEINLINE void core_util_atomic_store_s16(volatile int16_t *valuePtr, int16_t desiredValue) +{ + MBED_BARRIER(); + *valuePtr = desiredValue; + MBED_BARRIER(); +} + +/** + * Atomic store. + * @param valuePtr Target memory location. + * @param desiredValue The value to store. + */ +MBED_FORCEINLINE void core_util_atomic_store_s32(volatile int32_t *valuePtr, int32_t desiredValue) +{ + MBED_BARRIER(); + *valuePtr = desiredValue; + MBED_BARRIER(); +} + +/** + * Atomic store. + * @param valuePtr Target memory location. + * @param desiredValue The value to store. + */ +MBED_FORCEINLINE void core_util_atomic_store_s64(volatile int64_t *valuePtr, int64_t desiredValue) +{ + core_util_atomic_store_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue); +} + /** * Atomic store. * @param valuePtr Target memory location. @@ -386,6 +501,50 @@ uint32_t core_util_atomic_exchange_u32(volatile uint32_t *valuePtr, uint32_t des */ uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue); +/** + * Atomic exchange. + * @param valuePtr Target memory location. + * @param desiredValue The value to store. + * @return The previous value. + */ +MBED_FORCEINLINE int8_t core_util_atomic_exchange_s8(volatile int8_t *valuePtr, int8_t desiredValue) +{ + return (int8_t)core_util_atomic_exchange_u8((volatile uint8_t *)valuePtr, (uint8_t)desiredValue); +} + +/** + * Atomic exchange. + * @param valuePtr Target memory location. + * @param desiredValue The value to store. + * @return The previous value. + */ +MBED_FORCEINLINE int16_t core_util_atomic_exchange_s16(volatile int16_t *valuePtr, int16_t desiredValue) +{ + return (int16_t)core_util_atomic_exchange_u16((volatile uint16_t *)valuePtr, (uint16_t)desiredValue); +} + +/** + * Atomic exchange. + * @param valuePtr Target memory location. + * @param desiredValue The value to store. + * @return The previous value. + */ +MBED_FORCEINLINE int32_t core_util_atomic_exchange_s32(volatile int32_t *valuePtr, int32_t desiredValue) +{ + return (int32_t)core_util_atomic_exchange_u32((volatile uint32_t *)valuePtr, (uint32_t)desiredValue); +} + +/** + * Atomic exchange. + * @param valuePtr Target memory location. + * @param desiredValue The value to store. + * @return The previous value. + */ +MBED_FORCEINLINE int64_t core_util_atomic_exchange_s64(volatile int64_t *valuePtr, int64_t desiredValue) +{ + return (int64_t)core_util_atomic_exchange_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue); +} + /** * Atomic exchange. * @param valuePtr Target memory location. @@ -437,6 +596,50 @@ uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta); */ uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta); +/** + * Atomic increment. + * @param valuePtr Target memory location being incremented. + * @param delta The amount being incremented. + * @return The new incremented value. + */ +MBED_FORCEINLINE int8_t core_util_atomic_incr_s8(volatile int8_t *valuePtr, int8_t delta) +{ + return (int8_t)core_util_atomic_incr_u8((volatile uint8_t *)valuePtr, (uint8_t)delta); +} + +/** + * Atomic increment. + * @param valuePtr Target memory location being incremented. + * @param delta The amount being incremented. + * @return The new incremented value. + */ +MBED_FORCEINLINE int16_t core_util_atomic_incr_s16(volatile int16_t *valuePtr, int16_t delta) +{ + return (int16_t)core_util_atomic_incr_u16((volatile uint16_t *)valuePtr, (uint16_t)delta); +} + +/** + * Atomic increment. + * @param valuePtr Target memory location being incremented. + * @param delta The amount being incremented. + * @return The new incremented value. + */ +MBED_FORCEINLINE int32_t core_util_atomic_incr_s32(volatile int32_t *valuePtr, int32_t delta) +{ + return (int32_t)core_util_atomic_incr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta); +} + +/** + * Atomic increment. + * @param valuePtr Target memory location being incremented. + * @param delta The amount being incremented. + * @return The new incremented value. + */ +MBED_FORCEINLINE int64_t core_util_atomic_incr_s64(volatile int64_t *valuePtr, int64_t delta) +{ + return (int64_t)core_util_atomic_incr_u64((volatile uint64_t *)valuePtr, (uint64_t)delta); +} + /** * Atomic increment. * @param valuePtr Target memory location being incremented. @@ -480,6 +683,50 @@ uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta); */ uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta); +/** + * Atomic decrement. + * @param valuePtr Target memory location being decremented. + * @param delta The amount being decremented. + * @return The new decremented value. + */ +MBED_FORCEINLINE int8_t core_util_atomic_decr_s8(volatile int8_t *valuePtr, int8_t delta) +{ + return (int8_t)core_util_atomic_decr_u8((volatile uint8_t *)valuePtr, (uint8_t)delta); +} + +/** + * Atomic decrement. + * @param valuePtr Target memory location being decremented. + * @param delta The amount being decremented. + * @return The new decremented value. + */ +MBED_FORCEINLINE int16_t core_util_atomic_decr_s16(volatile int16_t *valuePtr, int16_t delta) +{ + return (int16_t)core_util_atomic_decr_u16((volatile uint16_t *)valuePtr, (uint16_t)delta); +} + +/** + * Atomic decrement. + * @param valuePtr Target memory location being decremented. + * @param delta The amount being decremented. + * @return The new decremented value. + */ +MBED_FORCEINLINE int32_t core_util_atomic_decr_s32(volatile int32_t *valuePtr, int32_t delta) +{ + return (int32_t)core_util_atomic_decr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta); +} + +/** + * Atomic decrement. + * @param valuePtr Target memory location being decremented. + * @param delta The amount being decremented. + * @return The new decremented value. + */ +MBED_FORCEINLINE int64_t core_util_atomic_decr_s64(volatile int64_t *valuePtr, int64_t delta) +{ + return (int64_t)core_util_atomic_decr_u64((volatile uint64_t *)valuePtr, (uint64_t)delta); +} + /** * Atomic decrement. * @param valuePtr Target memory location being decremented.