mirror of https://github.com/ARMmbed/mbed-os.git
Merge pull request #9247 from kjbracey-arm/atomic_load_store
Add atomic loads and stores and barrierspull/9421/head
commit
a23a850a42
|
@ -54,10 +54,6 @@ bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue)
|
bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -100,11 +100,6 @@ void core_util_critical_section_exit(void)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr)
|
|
||||||
{
|
|
||||||
flagPtr->_flag = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if MBED_EXCLUSIVE_ACCESS
|
#if MBED_EXCLUSIVE_ACCESS
|
||||||
|
|
||||||
/* Supress __ldrex and __strex deprecated warnings - "#3731-D: intrinsic is deprecated" */
|
/* Supress __ldrex and __strex deprecated warnings - "#3731-D: intrinsic is deprecated" */
|
||||||
|
@ -115,14 +110,17 @@ void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr)
|
||||||
bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr)
|
bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr)
|
||||||
{
|
{
|
||||||
uint8_t currentValue;
|
uint8_t currentValue;
|
||||||
|
MBED_BARRIER();
|
||||||
do {
|
do {
|
||||||
currentValue = __LDREXB(&flagPtr->_flag);
|
currentValue = __LDREXB(&flagPtr->_flag);
|
||||||
} while (__STREXB(true, &flagPtr->_flag));
|
} while (__STREXB(true, &flagPtr->_flag));
|
||||||
|
MBED_BARRIER();
|
||||||
return currentValue;
|
return currentValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue)
|
bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue)
|
||||||
{
|
{
|
||||||
|
MBED_BARRIER();
|
||||||
do {
|
do {
|
||||||
uint8_t currentValue = __LDREXB(ptr);
|
uint8_t currentValue = __LDREXB(ptr);
|
||||||
if (currentValue != *expectedCurrentValue) {
|
if (currentValue != *expectedCurrentValue) {
|
||||||
|
@ -131,11 +129,13 @@ bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValu
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} while (__STREXB(desiredValue, ptr));
|
} while (__STREXB(desiredValue, ptr));
|
||||||
|
MBED_BARRIER();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue)
|
bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue)
|
||||||
{
|
{
|
||||||
|
MBED_BARRIER();
|
||||||
do {
|
do {
|
||||||
uint16_t currentValue = __LDREXH(ptr);
|
uint16_t currentValue = __LDREXH(ptr);
|
||||||
if (currentValue != *expectedCurrentValue) {
|
if (currentValue != *expectedCurrentValue) {
|
||||||
|
@ -144,12 +144,14 @@ bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentV
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} while (__STREXH(desiredValue, ptr));
|
} while (__STREXH(desiredValue, ptr));
|
||||||
|
MBED_BARRIER();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue)
|
bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue)
|
||||||
{
|
{
|
||||||
|
MBED_BARRIER();
|
||||||
do {
|
do {
|
||||||
uint32_t currentValue = __LDREXW(ptr);
|
uint32_t currentValue = __LDREXW(ptr);
|
||||||
if (currentValue != *expectedCurrentValue) {
|
if (currentValue != *expectedCurrentValue) {
|
||||||
|
@ -158,33 +160,40 @@ bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentV
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
} while (__STREXW(desiredValue, ptr));
|
} while (__STREXW(desiredValue, ptr));
|
||||||
|
MBED_BARRIER();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta)
|
uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta)
|
||||||
{
|
{
|
||||||
|
MBED_BARRIER();
|
||||||
uint8_t newValue;
|
uint8_t newValue;
|
||||||
do {
|
do {
|
||||||
newValue = __LDREXB(valuePtr) + delta;
|
newValue = __LDREXB(valuePtr) + delta;
|
||||||
} while (__STREXB(newValue, valuePtr));
|
} while (__STREXB(newValue, valuePtr));
|
||||||
|
MBED_BARRIER();
|
||||||
return newValue;
|
return newValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta)
|
uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta)
|
||||||
{
|
{
|
||||||
|
MBED_BARRIER();
|
||||||
uint16_t newValue;
|
uint16_t newValue;
|
||||||
do {
|
do {
|
||||||
newValue = __LDREXH(valuePtr) + delta;
|
newValue = __LDREXH(valuePtr) + delta;
|
||||||
} while (__STREXH(newValue, valuePtr));
|
} while (__STREXH(newValue, valuePtr));
|
||||||
|
MBED_BARRIER();
|
||||||
return newValue;
|
return newValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta)
|
uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta)
|
||||||
{
|
{
|
||||||
uint32_t newValue;
|
uint32_t newValue;
|
||||||
|
MBED_BARRIER();
|
||||||
do {
|
do {
|
||||||
newValue = __LDREXW(valuePtr) + delta;
|
newValue = __LDREXW(valuePtr) + delta;
|
||||||
} while (__STREXW(newValue, valuePtr));
|
} while (__STREXW(newValue, valuePtr));
|
||||||
|
MBED_BARRIER();
|
||||||
return newValue;
|
return newValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -192,27 +201,33 @@ uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta)
|
||||||
uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta)
|
uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta)
|
||||||
{
|
{
|
||||||
uint8_t newValue;
|
uint8_t newValue;
|
||||||
|
MBED_BARRIER();
|
||||||
do {
|
do {
|
||||||
newValue = __LDREXB(valuePtr) - delta;
|
newValue = __LDREXB(valuePtr) - delta;
|
||||||
} while (__STREXB(newValue, valuePtr));
|
} while (__STREXB(newValue, valuePtr));
|
||||||
|
MBED_BARRIER();
|
||||||
return newValue;
|
return newValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta)
|
uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta)
|
||||||
{
|
{
|
||||||
uint16_t newValue;
|
uint16_t newValue;
|
||||||
|
MBED_BARRIER();
|
||||||
do {
|
do {
|
||||||
newValue = __LDREXH(valuePtr) - delta;
|
newValue = __LDREXH(valuePtr) - delta;
|
||||||
} while (__STREXH(newValue, valuePtr));
|
} while (__STREXH(newValue, valuePtr));
|
||||||
|
MBED_BARRIER();
|
||||||
return newValue;
|
return newValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta)
|
uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta)
|
||||||
{
|
{
|
||||||
uint32_t newValue;
|
uint32_t newValue;
|
||||||
|
MBED_BARRIER();
|
||||||
do {
|
do {
|
||||||
newValue = __LDREXW(valuePtr) - delta;
|
newValue = __LDREXW(valuePtr) - delta;
|
||||||
} while (__STREXW(newValue, valuePtr));
|
} while (__STREXW(newValue, valuePtr));
|
||||||
|
MBED_BARRIER();
|
||||||
return newValue;
|
return newValue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <stddef.h>
|
#include <stddef.h>
|
||||||
|
#include "mbed_toolchain.h"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
@ -89,6 +90,19 @@ void core_util_critical_section_exit(void);
|
||||||
*/
|
*/
|
||||||
bool core_util_in_critical_section(void);
|
bool core_util_in_critical_section(void);
|
||||||
|
|
||||||
|
/**@}*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \defgroup platform_atomic atomic functions
|
||||||
|
*
|
||||||
|
* Atomic functions function analogously to C11 and C++11 - loads have
|
||||||
|
* acquire semantics, stores have release semantics, and atomic operations
|
||||||
|
* are sequentially consistent. Atomicity is enforced both between threads and
|
||||||
|
* interrupt handlers.
|
||||||
|
*
|
||||||
|
* @{
|
||||||
|
*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A lock-free, primitive atomic flag.
|
* A lock-free, primitive atomic flag.
|
||||||
*
|
*
|
||||||
|
@ -124,7 +138,11 @@ bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr)
|
||||||
*
|
*
|
||||||
* @param flagPtr Target flag being cleared.
|
* @param flagPtr Target flag being cleared.
|
||||||
*/
|
*/
|
||||||
void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr);
|
MBED_FORCEINLINE void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr)
|
||||||
|
{
|
||||||
|
MBED_BARRIER();
|
||||||
|
flagPtr->_flag = false;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Atomic compare and set. It compares the contents of a memory location to a
|
* Atomic compare and set. It compares the contents of a memory location to a
|
||||||
|
@ -354,6 +372,102 @@ bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentV
|
||||||
*/
|
*/
|
||||||
bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue);
|
bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Atomic load.
|
||||||
|
* @param valuePtr Target memory location.
|
||||||
|
* @return The loaded value.
|
||||||
|
*/
|
||||||
|
MBED_FORCEINLINE uint8_t core_util_atomic_load_u8(const volatile uint8_t *valuePtr)
|
||||||
|
{
|
||||||
|
uint8_t value = *valuePtr;
|
||||||
|
MBED_BARRIER();
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Atomic load.
|
||||||
|
* @param valuePtr Target memory location.
|
||||||
|
* @return The loaded value.
|
||||||
|
*/
|
||||||
|
MBED_FORCEINLINE uint16_t core_util_atomic_load_u16(const volatile uint16_t *valuePtr)
|
||||||
|
{
|
||||||
|
uint16_t value = *valuePtr;
|
||||||
|
MBED_BARRIER();
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Atomic load.
|
||||||
|
* @param valuePtr Target memory location.
|
||||||
|
* @return The loaded value.
|
||||||
|
*/
|
||||||
|
MBED_FORCEINLINE uint32_t core_util_atomic_load_u32(const volatile uint32_t *valuePtr)
|
||||||
|
{
|
||||||
|
uint32_t value = *valuePtr;
|
||||||
|
MBED_BARRIER();
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Atomic load.
|
||||||
|
* @param valuePtr Target memory location.
|
||||||
|
* @return The loaded value.
|
||||||
|
*/
|
||||||
|
MBED_FORCEINLINE void *core_util_atomic_load_ptr(void *const volatile *valuePtr)
|
||||||
|
{
|
||||||
|
void *value = *valuePtr;
|
||||||
|
MBED_BARRIER();
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Atomic store.
|
||||||
|
* @param valuePtr Target memory location.
|
||||||
|
* @param desiredValue The value to store.
|
||||||
|
*/
|
||||||
|
MBED_FORCEINLINE void core_util_atomic_store_u8(volatile uint8_t *valuePtr, uint8_t desiredValue)
|
||||||
|
{
|
||||||
|
MBED_BARRIER();
|
||||||
|
*valuePtr = desiredValue;
|
||||||
|
MBED_BARRIER();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Atomic store.
|
||||||
|
* @param valuePtr Target memory location.
|
||||||
|
* @param desiredValue The value to store.
|
||||||
|
*/
|
||||||
|
MBED_FORCEINLINE void core_util_atomic_store_u16(volatile uint16_t *valuePtr, uint16_t desiredValue)
|
||||||
|
{
|
||||||
|
MBED_BARRIER();
|
||||||
|
*valuePtr = desiredValue;
|
||||||
|
MBED_BARRIER();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Atomic store.
|
||||||
|
* @param valuePtr Target memory location.
|
||||||
|
* @param desiredValue The value to store.
|
||||||
|
*/
|
||||||
|
MBED_FORCEINLINE void core_util_atomic_store_u32(volatile uint32_t *valuePtr, uint32_t desiredValue)
|
||||||
|
{
|
||||||
|
MBED_BARRIER();
|
||||||
|
*valuePtr = desiredValue;
|
||||||
|
MBED_BARRIER();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Atomic store.
|
||||||
|
* @param valuePtr Target memory location.
|
||||||
|
* @param desiredValue The value to store.
|
||||||
|
*/
|
||||||
|
MBED_FORCEINLINE void core_util_atomic_store_ptr(void *volatile *valuePtr, void *desiredValue)
|
||||||
|
{
|
||||||
|
MBED_BARRIER();
|
||||||
|
*valuePtr = desiredValue;
|
||||||
|
MBED_BARRIER();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Atomic increment.
|
* Atomic increment.
|
||||||
* @param valuePtr Target memory location being incremented.
|
* @param valuePtr Target memory location being incremented.
|
||||||
|
|
|
@ -152,6 +152,77 @@
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/** MBED_COMPILER_BARRIER
|
||||||
|
* Stop the compiler moving memory accesses.
|
||||||
|
*
|
||||||
|
* The barrier stops memory accesses from being moved from one side of the
|
||||||
|
* barrier to the other for safety against other threads and interrupts.
|
||||||
|
*
|
||||||
|
* This macro should only be used if we know only one CPU is accessing the data,
|
||||||
|
* or we are otherwise synchronising CPUs via acquire/release instructions.
|
||||||
|
* Otherwise, use MBED_BARRIER, which will act as a compiler barrier and also
|
||||||
|
* a CPU barrier if necessary.
|
||||||
|
*
|
||||||
|
* @internal
|
||||||
|
* This is not for use by normal code - it is a building block for the
|
||||||
|
* higher-level functions in mbed_critical.h. Higher-level lock/unlock or
|
||||||
|
* acquire/release APIs always provide ordering semantics, using this if
|
||||||
|
* necessary.
|
||||||
|
*
|
||||||
|
* @code
|
||||||
|
* #include "mbed_toolchain.h"
|
||||||
|
*
|
||||||
|
* void atomic_flag_clear_armv8(atomic_flag *flagPtr)
|
||||||
|
* {
|
||||||
|
* // ARMv8 LDA and STL instructions provide sequential consistency against
|
||||||
|
* // other CPUs, so no CPU barrier is needed. But we still need compiler
|
||||||
|
* // barriers to give us sequentially-consistent release semantics with
|
||||||
|
* // respect to compiler reordering - __STLB does not currently
|
||||||
|
* // include this.
|
||||||
|
* MBED_COMPILER_BARRIER();
|
||||||
|
* __STLB(&flagPtr->_flag, false);
|
||||||
|
* MBED_COMPILER_BARRIER();
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
#ifdef __CC_ARM
|
||||||
|
#define MBED_COMPILER_BARRIER() __memory_changed()
|
||||||
|
#elif defined(__GNUC__) || defined(__clang__) || defined(__ICCARM__)
|
||||||
|
#define MBED_COMPILER_BARRIER() asm volatile("" : : : "memory")
|
||||||
|
#else
|
||||||
|
#error "Missing MBED_COMPILER_BARRIER implementation"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/** MBED_BARRIER
|
||||||
|
* Stop the compiler, and CPU if SMP, from moving memory accesses.
|
||||||
|
*
|
||||||
|
* The barrier stops memory accesses from being moved from one side of the
|
||||||
|
* barrier to the other for safety against other threads and interrupts,
|
||||||
|
* potentially on other CPUs.
|
||||||
|
*
|
||||||
|
* In a single-CPU system, this is just a compiler barrier.
|
||||||
|
* If we supported multiple CPUs, this would be a DMB (with implied compiler
|
||||||
|
* barrier).
|
||||||
|
*
|
||||||
|
* @internal
|
||||||
|
* This is not for use by normal code - it is a building block for the
|
||||||
|
* higher-level functions in mbed_critical.h. Higher-level lock/unlock or
|
||||||
|
* acquire/release APIs always provide ordering semantics, using this if
|
||||||
|
* necessary.
|
||||||
|
* @code
|
||||||
|
* #include "mbed_toolchain.h"
|
||||||
|
*
|
||||||
|
* void atomic_flag_clear_armv7(atomic_flag *flagPtr)
|
||||||
|
* {
|
||||||
|
* // ARMv7 LDR and STR instructions do not provide any ordering
|
||||||
|
* // consistency against other CPUs, so explicit barrier DMBs are needed
|
||||||
|
* // for a multi-CPU system, otherwise just compiler barriers for single-CPU.
|
||||||
|
* MBED_BARRIER();
|
||||||
|
* flagPtr->_flag = false;
|
||||||
|
* MBED_BARRIER();
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
#define MBED_BARRIER() MBED_COMPILER_BARRIER()
|
||||||
|
|
||||||
/** MBED_PURE
|
/** MBED_PURE
|
||||||
* Hint to the compiler that a function depends only on parameters
|
* Hint to the compiler that a function depends only on parameters
|
||||||
*
|
*
|
||||||
|
|
Loading…
Reference in New Issue