mirror of https://github.com/ARMmbed/mbed-os.git
				
				
				
			Add atomic loads and stores and barriers
Add atomic load and store functions, and add barriers to the existing atomic functions. File currently has no explicit barriers - we don't support SMP, so don't need CPU barriers. But we do need to worry about compiler barriers - particularly if link time optimisation is activated so that the compiler can see inside these functions. The assembler or intrinsics that access PRIMASK for enter/exit critical act as barriers, but LDREX, STREX and simple volatile pointer loads and stores do not.pull/9787/head
							parent
							
								
									46bb7540be
								
							
						
					
					
						commit
						703e44031c
					
				| 
						 | 
				
			
			@ -54,10 +54,6 @@ bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr)
 | 
			
		|||
    return false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr)
 | 
			
		||||
{
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue)
 | 
			
		||||
{
 | 
			
		||||
    return false;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -100,11 +100,6 @@ void core_util_critical_section_exit(void)
 | 
			
		|||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr)
 | 
			
		||||
{
 | 
			
		||||
    flagPtr->_flag = false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#if MBED_EXCLUSIVE_ACCESS
 | 
			
		||||
 | 
			
		||||
/* Supress __ldrex and __strex deprecated warnings - "#3731-D: intrinsic is deprecated" */
 | 
			
		||||
| 
						 | 
				
			
			@ -115,14 +110,17 @@ void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr)
 | 
			
		|||
bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr)
 | 
			
		||||
{
 | 
			
		||||
    uint8_t currentValue;
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    do {
 | 
			
		||||
        currentValue = __LDREXB(&flagPtr->_flag);
 | 
			
		||||
    } while (__STREXB(true, &flagPtr->_flag));
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    return currentValue;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue)
 | 
			
		||||
{
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    do {
 | 
			
		||||
        uint8_t currentValue = __LDREXB(ptr);
 | 
			
		||||
        if (currentValue != *expectedCurrentValue) {
 | 
			
		||||
| 
						 | 
				
			
			@ -131,11 +129,13 @@ bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValu
 | 
			
		|||
            return false;
 | 
			
		||||
        }
 | 
			
		||||
    } while (__STREXB(desiredValue, ptr));
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue)
 | 
			
		||||
{
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    do {
 | 
			
		||||
        uint16_t currentValue = __LDREXH(ptr);
 | 
			
		||||
        if (currentValue != *expectedCurrentValue) {
 | 
			
		||||
| 
						 | 
				
			
			@ -144,12 +144,14 @@ bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentV
 | 
			
		|||
            return false;
 | 
			
		||||
        }
 | 
			
		||||
    } while (__STREXH(desiredValue, ptr));
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue)
 | 
			
		||||
{
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    do {
 | 
			
		||||
        uint32_t currentValue = __LDREXW(ptr);
 | 
			
		||||
        if (currentValue != *expectedCurrentValue) {
 | 
			
		||||
| 
						 | 
				
			
			@ -158,33 +160,40 @@ bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentV
 | 
			
		|||
            return false;
 | 
			
		||||
        }
 | 
			
		||||
    } while (__STREXW(desiredValue, ptr));
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta)
 | 
			
		||||
{
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    uint8_t newValue;
 | 
			
		||||
    do {
 | 
			
		||||
        newValue = __LDREXB(valuePtr) + delta;
 | 
			
		||||
    } while (__STREXB(newValue, valuePtr));
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    return newValue;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta)
 | 
			
		||||
{
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    uint16_t newValue;
 | 
			
		||||
    do {
 | 
			
		||||
        newValue = __LDREXH(valuePtr) + delta;
 | 
			
		||||
    } while (__STREXH(newValue, valuePtr));
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    return newValue;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta)
 | 
			
		||||
{
 | 
			
		||||
    uint32_t newValue;
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    do {
 | 
			
		||||
        newValue = __LDREXW(valuePtr) + delta;
 | 
			
		||||
    } while (__STREXW(newValue, valuePtr));
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    return newValue;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -192,27 +201,33 @@ uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta)
 | 
			
		|||
uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta)
 | 
			
		||||
{
 | 
			
		||||
    uint8_t newValue;
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    do {
 | 
			
		||||
        newValue = __LDREXB(valuePtr) - delta;
 | 
			
		||||
    } while (__STREXB(newValue, valuePtr));
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    return newValue;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta)
 | 
			
		||||
{
 | 
			
		||||
    uint16_t newValue;
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    do {
 | 
			
		||||
        newValue = __LDREXH(valuePtr) - delta;
 | 
			
		||||
    } while (__STREXH(newValue, valuePtr));
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    return newValue;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta)
 | 
			
		||||
{
 | 
			
		||||
    uint32_t newValue;
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    do {
 | 
			
		||||
        newValue = __LDREXW(valuePtr) - delta;
 | 
			
		||||
    } while (__STREXW(newValue, valuePtr));
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    return newValue;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -22,6 +22,7 @@
 | 
			
		|||
#include <stdbool.h>
 | 
			
		||||
#include <stdint.h>
 | 
			
		||||
#include <stddef.h>
 | 
			
		||||
#include "mbed_toolchain.h"
 | 
			
		||||
 | 
			
		||||
#ifdef __cplusplus
 | 
			
		||||
extern "C" {
 | 
			
		||||
| 
						 | 
				
			
			@ -89,6 +90,19 @@ void core_util_critical_section_exit(void);
 | 
			
		|||
 */
 | 
			
		||||
bool core_util_in_critical_section(void);
 | 
			
		||||
 | 
			
		||||
/**@}*/
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * \defgroup platform_atomic atomic functions
 | 
			
		||||
 *
 | 
			
		||||
 * Atomic functions function analogously to C11 and C++11 - loads have
 | 
			
		||||
 * acquire semantics, stores have release semantics, and atomic operations
 | 
			
		||||
 * are sequentially consistent. Atomicity is enforced both between threads and
 | 
			
		||||
 * interrupt handlers.
 | 
			
		||||
 *
 | 
			
		||||
 * @{
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * A lock-free, primitive atomic flag.
 | 
			
		||||
 *
 | 
			
		||||
| 
						 | 
				
			
			@ -124,7 +138,11 @@ bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr)
 | 
			
		|||
 *
 | 
			
		||||
 * @param  flagPtr Target flag being cleared.
 | 
			
		||||
 */
 | 
			
		||||
void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr);
 | 
			
		||||
MBED_FORCEINLINE void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr)
 | 
			
		||||
{
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    flagPtr->_flag = false;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Atomic compare and set. It compares the contents of a memory location to a
 | 
			
		||||
| 
						 | 
				
			
			@ -354,6 +372,102 @@ bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentV
 | 
			
		|||
 */
 | 
			
		||||
bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Atomic load.
 | 
			
		||||
 * @param  valuePtr Target memory location.
 | 
			
		||||
 * @return          The loaded value.
 | 
			
		||||
 */
 | 
			
		||||
MBED_FORCEINLINE uint8_t core_util_atomic_load_u8(const volatile uint8_t *valuePtr)
 | 
			
		||||
{
 | 
			
		||||
    uint8_t value = *valuePtr;
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    return value;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Atomic load.
 | 
			
		||||
 * @param  valuePtr Target memory location.
 | 
			
		||||
 * @return          The loaded value.
 | 
			
		||||
 */
 | 
			
		||||
MBED_FORCEINLINE uint16_t core_util_atomic_load_u16(const volatile uint16_t *valuePtr)
 | 
			
		||||
{
 | 
			
		||||
    uint16_t value = *valuePtr;
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    return value;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Atomic load.
 | 
			
		||||
 * @param  valuePtr Target memory location.
 | 
			
		||||
 * @return          The loaded value.
 | 
			
		||||
 */
 | 
			
		||||
MBED_FORCEINLINE uint32_t core_util_atomic_load_u32(const volatile uint32_t *valuePtr)
 | 
			
		||||
{
 | 
			
		||||
    uint32_t value = *valuePtr;
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    return value;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Atomic load.
 | 
			
		||||
 * @param  valuePtr Target memory location.
 | 
			
		||||
 * @return          The loaded value.
 | 
			
		||||
 */
 | 
			
		||||
MBED_FORCEINLINE void *core_util_atomic_load_ptr(void *const volatile *valuePtr)
 | 
			
		||||
{
 | 
			
		||||
    void *value = *valuePtr;
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    return value;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Atomic store.
 | 
			
		||||
 * @param  valuePtr     Target memory location.
 | 
			
		||||
 * @param  desiredValue The value to store.
 | 
			
		||||
 */
 | 
			
		||||
MBED_FORCEINLINE void core_util_atomic_store_u8(volatile uint8_t *valuePtr, uint8_t desiredValue)
 | 
			
		||||
{
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    *valuePtr = desiredValue;
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Atomic store.
 | 
			
		||||
 * @param  valuePtr     Target memory location.
 | 
			
		||||
 * @param  desiredValue The value to store.
 | 
			
		||||
 */
 | 
			
		||||
MBED_FORCEINLINE void core_util_atomic_store_u16(volatile uint16_t *valuePtr, uint16_t desiredValue)
 | 
			
		||||
{
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    *valuePtr = desiredValue;
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Atomic store.
 | 
			
		||||
 * @param  valuePtr     Target memory location.
 | 
			
		||||
 * @param  desiredValue The value to store.
 | 
			
		||||
 */
 | 
			
		||||
MBED_FORCEINLINE void core_util_atomic_store_u32(volatile uint32_t *valuePtr, uint32_t desiredValue)
 | 
			
		||||
{
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    *valuePtr = desiredValue;
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Atomic store.
 | 
			
		||||
 * @param  valuePtr     Target memory location.
 | 
			
		||||
 * @param  desiredValue The value to store.
 | 
			
		||||
 */
 | 
			
		||||
MBED_FORCEINLINE void core_util_atomic_store_ptr(void *volatile *valuePtr, void *desiredValue)
 | 
			
		||||
{
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
    *valuePtr = desiredValue;
 | 
			
		||||
    MBED_BARRIER();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * Atomic increment.
 | 
			
		||||
 * @param  valuePtr Target memory location being incremented.
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -152,6 +152,77 @@
 | 
			
		|||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/** MBED_COMPILER_BARRIER
 | 
			
		||||
 * Stop the compiler moving memory accesses.
 | 
			
		||||
 *
 | 
			
		||||
 * The barrier stops memory accesses from being moved from one side of the
 | 
			
		||||
 * barrier to the other for safety against other threads and interrupts.
 | 
			
		||||
 *
 | 
			
		||||
 * This macro should only be used if we know only one CPU is accessing the data,
 | 
			
		||||
 * or we are otherwise synchronising CPUs via acquire/release instructions.
 | 
			
		||||
 * Otherwise, use MBED_BARRIER, which will act as a compiler barrier and also
 | 
			
		||||
 * a CPU barrier if necessary.
 | 
			
		||||
 *
 | 
			
		||||
 * @internal
 | 
			
		||||
 * This is not for use by normal code - it is a building block for the
 | 
			
		||||
 * higher-level functions in mbed_critical.h. Higher-level lock/unlock or
 | 
			
		||||
 * acquire/release APIs always provide ordering semantics, using this if
 | 
			
		||||
 * necessary.
 | 
			
		||||
 *
 | 
			
		||||
 * @code
 | 
			
		||||
 *  #include "mbed_toolchain.h"
 | 
			
		||||
 *
 | 
			
		||||
 *  void atomic_flag_clear_armv8(atomic_flag *flagPtr)
 | 
			
		||||
 *  {
 | 
			
		||||
 *      // ARMv8 LDA and STL instructions provide sequential consistency against
 | 
			
		||||
 *      // other CPUs, so no CPU barrier is needed. But we still need compiler
 | 
			
		||||
 *      // barriers to give us sequentially-consistent release semantics with
 | 
			
		||||
 *      // respect to compiler reordering - __STLB does not currently
 | 
			
		||||
 *      // include this.
 | 
			
		||||
 *      MBED_COMPILER_BARRIER();
 | 
			
		||||
 *      __STLB(&flagPtr->_flag, false);
 | 
			
		||||
 *      MBED_COMPILER_BARRIER();
 | 
			
		||||
 *  }
 | 
			
		||||
 */
 | 
			
		||||
#ifdef __CC_ARM
 | 
			
		||||
#define MBED_COMPILER_BARRIER() __memory_changed()
 | 
			
		||||
#elif defined(__GNUC__) || defined(__clang__) || defined(__ICCARM__)
 | 
			
		||||
#define MBED_COMPILER_BARRIER() asm volatile("" : : : "memory")
 | 
			
		||||
#else
 | 
			
		||||
#error "Missing MBED_COMPILER_BARRIER implementation"
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/** MBED_BARRIER
 | 
			
		||||
 * Stop the compiler, and CPU if SMP, from moving memory accesses.
 | 
			
		||||
 *
 | 
			
		||||
 * The barrier stops memory accesses from being moved from one side of the
 | 
			
		||||
 * barrier to the other for safety against other threads and interrupts,
 | 
			
		||||
 * potentially on other CPUs.
 | 
			
		||||
 *
 | 
			
		||||
 * In a single-CPU system, this is just a compiler barrier.
 | 
			
		||||
 * If we supported multiple CPUs, this would be a DMB (with implied compiler
 | 
			
		||||
 * barrier).
 | 
			
		||||
 *
 | 
			
		||||
 * @internal
 | 
			
		||||
 * This is not for use by normal code - it is a building block for the
 | 
			
		||||
 * higher-level functions in mbed_critical.h. Higher-level lock/unlock or
 | 
			
		||||
 * acquire/release APIs always provide ordering semantics, using this if
 | 
			
		||||
 * necessary.
 | 
			
		||||
 * @code
 | 
			
		||||
 *  #include "mbed_toolchain.h"
 | 
			
		||||
 *
 | 
			
		||||
 *  void atomic_flag_clear_armv7(atomic_flag *flagPtr)
 | 
			
		||||
 *  {
 | 
			
		||||
 *      // ARMv7 LDR and STR instructions do not provide any ordering
 | 
			
		||||
 *      // consistency against other CPUs, so explicit barrier DMBs are needed
 | 
			
		||||
 *      // for a multi-CPU system, otherwise just compiler barriers for single-CPU.
 | 
			
		||||
 *      MBED_BARRIER();
 | 
			
		||||
 *      flagPtr->_flag = false;
 | 
			
		||||
 *      MBED_BARRIER();
 | 
			
		||||
 *  }
 | 
			
		||||
 */
 | 
			
		||||
#define MBED_BARRIER() MBED_COMPILER_BARRIER()
 | 
			
		||||
 | 
			
		||||
/** MBED_PURE
 | 
			
		||||
 *  Hint to the compiler that a function depends only on parameters
 | 
			
		||||
 *
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in New Issue