Assembler atomics

Reimplement atomic code in inline assembly. This can improve
optimisation, and avoids potential architectural problems with using
LDREX/STREX intrinsics.

API further extended:
* Bitwise operations (fetch_and/fetch_or/fetch_xor)
* fetch_add and fetch_sub (like incr/decr, but returning old value -
  aligning with C++11)
* compare_exchange_weak
* Explicit memory order specification
* Basic freestanding template overloads for C++

This gives our existing C implementation essentially all the functionality
needed by C++11.

An actual Atomic<T> template based upon these C functions could follow.
pull/10147/head
Kevin Bracey 2019-01-22 16:09:25 +02:00
parent beed42e666
commit 87396e0bf6
46 changed files with 2623 additions and 1222 deletions

View File

@ -35,7 +35,7 @@ set(unittest-test-sources
stubs/LoRaPHY_stub.cpp
stubs/LoRaMac_stub.cpp
stubs/mbed_assert_stub.c
stubs/mbed_critical_stub.c
stubs/mbed_atomic_stub.c
stubs/LoRaMacCrypto_stub.cpp
stubs/LoRaMacChannelPlan_stub.cpp
stubs/LoRaWANTimer_stub.cpp

View File

@ -22,6 +22,7 @@ set(unittest-test-sources
features/netsocket/DTLSSocket/test_DTLSSocket.cpp
stubs/Mutex_stub.cpp
stubs/mbed_assert_stub.c
stubs/mbed_atomic_stub.c
stubs/mbed_critical_stub.c
stubs/equeue_stub.c
../features/nanostack/coap-service/test/coap-service/unittest/stub/mbedtls_stub.c

View File

@ -21,6 +21,7 @@ set(unittest-test-sources
features/netsocket/DTLSSocketWrapper/test_DTLSSocketWrapper.cpp
stubs/Mutex_stub.cpp
stubs/mbed_assert_stub.c
stubs/mbed_atomic_stub.c
stubs/mbed_critical_stub.c
stubs/equeue_stub.c
../features/nanostack/coap-service/test/coap-service/unittest/stub/mbedtls_stub.c

View File

@ -18,6 +18,7 @@ set(unittest-test-sources
features/netsocket/InternetSocket/test_InternetSocket.cpp
stubs/Mutex_stub.cpp
stubs/mbed_assert_stub.c
stubs/mbed_atomic_stub.c
stubs/mbed_critical_stub.c
stubs/equeue_stub.c
stubs/EventQueue_stub.cpp

View File

@ -22,6 +22,7 @@ set(unittest-sources
set(unittest-test-sources
stubs/Mutex_stub.cpp
stubs/mbed_assert_stub.c
stubs/mbed_atomic_stub.c
stubs/mbed_critical_stub.c
stubs/equeue_stub.c
stubs/EventQueue_stub.cpp

View File

@ -19,6 +19,7 @@ set(unittest-test-sources
features/netsocket/TCPSocket/test_TCPSocket.cpp
stubs/Mutex_stub.cpp
stubs/mbed_assert_stub.c
stubs/mbed_atomic_stub.c
stubs/mbed_critical_stub.c
stubs/equeue_stub.c
stubs/EventQueue_stub.cpp

View File

@ -21,6 +21,7 @@ set(unittest-test-sources
features/netsocket/TLSSocket/test_TLSSocket.cpp
stubs/Mutex_stub.cpp
stubs/mbed_assert_stub.c
stubs/mbed_atomic_stub.c
stubs/mbed_critical_stub.c
stubs/equeue_stub.c
../features/nanostack/coap-service/test/coap-service/unittest/stub/mbedtls_stub.c

View File

@ -20,6 +20,7 @@ set(unittest-test-sources
features/netsocket/TLSSocketWrapper/test_TLSSocketWrapper.cpp
stubs/Mutex_stub.cpp
stubs/mbed_assert_stub.c
stubs/mbed_atomic_stub.c
stubs/mbed_critical_stub.c
stubs/equeue_stub.c
../features/nanostack/coap-service/test/coap-service/unittest/stub/mbedtls_stub.c

View File

@ -19,6 +19,7 @@ set(unittest-test-sources
features/netsocket/UDPSocket/test_UDPSocket.cpp
stubs/Mutex_stub.cpp
stubs/mbed_assert_stub.c
stubs/mbed_atomic_stub.c
stubs/mbed_critical_stub.c
stubs/equeue_stub.c
stubs/EventQueue_stub.cpp

View File

@ -0,0 +1,233 @@
/*
* Copyright (c) 2017, Arm Limited and affiliates.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "platform/mbed_atomic.h"
bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr)
{
return false;
}
bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue)
{
return false;
}
bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue)
{
return false;
}
bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue)
{
return false;
}
uint8_t core_util_atomic_exchange_u8(volatile uint8_t *ptr, uint8_t desiredValue)
{
return 0;
}
uint16_t core_util_atomic_exchange_u16(volatile uint16_t *ptr, uint16_t desiredValue)
{
return 0;
}
uint32_t core_util_atomic_exchange_u32(volatile uint32_t *ptr, uint32_t desiredValue)
{
return 0;
}
uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta)
{
return 0;
}
uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta)
{
return 0;
}
uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta)
{
return 0;
}
uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta)
{
return 0;
}
uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta)
{
return 0;
}
uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta)
{
return 0;
}
uint8_t core_util_atomic_fetch_add_u8(volatile uint8_t *valuePtr, uint8_t arg)
{
return 0;
}
uint16_t core_util_atomic_fetch_add_u16(volatile uint16_t *valuePtr, uint16_t arg)
{
return 0;
}
uint32_t core_util_atomic_fetch_add_u32(volatile uint32_t *valuePtr, uint32_t arg)
{
return 0;
}
uint8_t core_util_atomic_fetch_sub_u8(volatile uint8_t *valuePtr, uint8_t arg)
{
return 0;
}
uint16_t core_util_atomic_fetch_sub_u16(volatile uint16_t *valuePtr, uint16_t arg)
{
return 0;
}
uint32_t core_util_atomic_fetch_sub_u32(volatile uint32_t *valuePtr, uint32_t arg)
{
return 0;
}
uint8_t core_util_atomic_fetch_and_u8(volatile uint8_t *valuePtr, uint8_t arg)
{
return 0;
}
uint16_t core_util_atomic_fetch_and_u16(volatile uint16_t *valuePtr, uint16_t arg)
{
return 0;
}
uint32_t core_util_atomic_fetch_and_u32(volatile uint32_t *valuePtr, uint32_t arg)
{
return 0;
}
uint8_t core_util_atomic_fetch_or_u8(volatile uint8_t *valuePtr, uint8_t arg)
{
return 0;
}
uint16_t core_util_atomic_fetch_or_u16(volatile uint16_t *valuePtr, uint16_t arg)
{
return 0;
}
uint32_t core_util_atomic_fetch_or_u32(volatile uint32_t *valuePtr, uint32_t arg)
{
return 0;
}
uint8_t core_util_atomic_fetch_xor_u8(volatile uint8_t *valuePtr, uint8_t arg)
{
return 0;
}
uint16_t core_util_atomic_fetch_xor_u16(volatile uint16_t *valuePtr, uint16_t arg)
{
return 0;
}
uint32_t core_util_atomic_fetch_xor_u32(volatile uint32_t *valuePtr, uint32_t arg)
{
return 0;
}
uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr)
{
return 0;
}
void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue)
{
}
uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue)
{
return 0;
}
bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue)
{
return false;
}
bool core_util_atomic_compare_exchange_weak_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue)
{
return false;
}
uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta)
{
return 0;
}
uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta)
{
return 0;
}
uint64_t core_util_atomic_fetch_add_u64(volatile uint64_t *valuePtr, uint64_t arg)
{
return 0;
}
uint64_t core_util_atomic_fetch_sub_u64(volatile uint64_t *valuePtr, uint64_t arg)
{
return 0;
}
uint64_t core_util_atomic_fetch_and_u64(volatile uint64_t *valuePtr, uint64_t arg)
{
return 0;
}
uint64_t core_util_atomic_fetch_or_u64(volatile uint64_t *valuePtr, uint64_t arg)
{
return 0;
}
uint64_t core_util_atomic_fetch_xor_u64(volatile uint64_t *valuePtr, uint64_t arg)
{
return 0;
}
/* Similar functions for s32 etc are static inline, but these are extern inline for legacy binary compatibility */
extern inline void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue);
extern inline void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta);
extern inline void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta);
extern inline bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue);

View File

@ -24,8 +24,6 @@
#include "platform/mbed_critical.h"
#include "platform/mbed_toolchain.h"
static volatile uint32_t critical_section_reentrancy_counter = 0;
bool core_util_are_interrupts_enabled(void)
{
return false;
@ -48,124 +46,3 @@ void core_util_critical_section_enter(void)
void core_util_critical_section_exit(void)
{
}
bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr)
{
return false;
}
bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue)
{
return false;
}
bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue)
{
return false;
}
bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue)
{
return false;
}
uint8_t core_util_atomic_exchange_u8(volatile uint8_t *ptr, uint8_t desiredValue)
{
return 0;
}
uint16_t core_util_atomic_exchange_u16(volatile uint16_t *ptr, uint16_t desiredValue)
{
return 0;
}
uint32_t core_util_atomic_exchange_u32(volatile uint32_t *ptr, uint32_t desiredValue)
{
return 0;
}
uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta)
{
return 0;
}
uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta)
{
return 0;
}
uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta)
{
return 0;
}
uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta)
{
return 0;
}
uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta)
{
return 0;
}
uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta)
{
return 0;
}
uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr)
{
return 0;
}
void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue)
{
}
uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue)
{
return 0;
}
bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue)
{
return false;
}
uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta)
{
return 0;
}
uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta)
{
return 0;
}
bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue)
{
return false;
}
void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue)
{
return NULL;
}
void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta)
{
return NULL;
}
void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta)
{
return NULL;
}

View File

@ -19,7 +19,7 @@
#include "psa_defs.h"
#include "cmsis_os2.h"
#include "mbed_critical.h"
#include "mbed_atomic.h"
#include "spm_internal.h"
#include "spm_panic.h"
#include "handles_manager.h"

View File

@ -16,7 +16,7 @@
*/
#include "cmsis_os2.h"
#include "mbed_critical.h"
#include "mbed_atomic.h"
#include "psa_defs.h"
#include "spm_internal.h"
#include "spm_panic.h"

View File

@ -15,7 +15,7 @@
*/
#include "DataFlashBlockDevice.h"
#include "mbed_critical.h"
#include "mbed_atomic.h"
#include <inttypes.h>

View File

@ -17,7 +17,7 @@
#if DEVICE_FLASH
#include "FlashIAPBlockDevice.h"
#include "mbed_critical.h"
#include "mbed_atomic.h"
#include "mbed_error.h"
using namespace mbed;

View File

@ -26,7 +26,7 @@
#include "features/netsocket/nsapi_types.h"
#include "mbed_trace.h"
#include "platform/Callback.h"
#include "platform/mbed_critical.h"
#include "platform/mbed_atomic.h"
#include "platform/mbed_debug.h"
#include "platform/mbed_wait_api.h"

View File

@ -42,7 +42,7 @@
#include <stdint.h>
#include "events/EventQueue.h"
#include "platform/mbed_critical.h"
#include "platform/mbed_atomic.h"
#include "platform/Callback.h"
#include "platform/NonCopyable.h"
#include "platform/ScopedLock.h"

View File

@ -15,6 +15,7 @@
*/
#include "InternetSocket.h"
#include "platform/mbed_critical.h"
#include "platform/Callback.h"
using namespace mbed;

View File

@ -25,7 +25,7 @@
#include "rtos/Mutex.h"
#include "rtos/EventFlags.h"
#include "Callback.h"
#include "mbed_critical.h"
#include "mbed_atomic.h"
#include "mbed_toolchain.h"
#include "SocketStats.h"

View File

@ -16,7 +16,7 @@
#include "BufferedBlockDevice.h"
#include "platform/mbed_assert.h"
#include "platform/mbed_critical.h"
#include "platform/mbed_atomic.h"
#include <algorithm>
#include <string.h>

View File

@ -15,7 +15,7 @@
*/
#include "ChainingBlockDevice.h"
#include "platform/mbed_critical.h"
#include "platform/mbed_atomic.h"
#include "platform/mbed_assert.h"
namespace mbed {

View File

@ -15,7 +15,7 @@
*/
#include "ExhaustibleBlockDevice.h"
#include "platform/mbed_critical.h"
#include "platform/mbed_atomic.h"
#include "platform/mbed_assert.h"
namespace mbed {

View File

@ -16,7 +16,7 @@
#include "FlashSimBlockDevice.h"
#include "platform/mbed_assert.h"
#include "platform/mbed_critical.h"
#include "platform/mbed_atomic.h"
#include <algorithm>
#include <stdlib.h>
#include <string.h>

View File

@ -15,7 +15,7 @@
*/
#include "HeapBlockDevice.h"
#include "platform/mbed_critical.h"
#include "platform/mbed_atomic.h"
#include <stdlib.h>
#include <string.h>

View File

@ -15,7 +15,7 @@
*/
#include "MBRBlockDevice.h"
#include "platform/mbed_critical.h"
#include "platform/mbed_atomic.h"
#include "platform/mbed_toolchain.h"
#include "platform/mbed_assert.h"
#include <algorithm>

View File

@ -22,7 +22,7 @@
#include "FlashIAP.h"
#include "SystemStorage.h"
#include "mbed_critical.h"
#include "mbed_atomic.h"
#include "mbed_assert.h"
#include "mbed_error.h"
#include "mbed_wait_api.h"

1
mbed.h
View File

@ -86,6 +86,7 @@
#include "drivers/InterruptIn.h"
#include "platform/mbed_wait_api.h"
#include "hal/sleep_api.h"
#include "platform/mbed_atomic.h"
#include "platform/mbed_power_mgmt.h"
#include "platform/mbed_rtc_time.h"
#include "platform/mbed_poll.h"

View File

@ -17,6 +17,7 @@
#ifndef MBED_CIRCULARBUFFER_H
#define MBED_CIRCULARBUFFER_H
#include <stdint.h>
#include "platform/mbed_critical.h"
#include "platform/mbed_assert.h"

View File

@ -19,7 +19,7 @@
#include <limits.h>
#include "platform/mbed_power_mgmt.h"
#include "platform/mbed_critical.h"
#include "platform/mbed_atomic.h"
namespace mbed {

View File

@ -23,7 +23,7 @@
#include <stdint.h>
#include <stddef.h>
#include "platform/mbed_critical.h"
#include "platform/mbed_atomic.h"
namespace mbed {

View File

@ -28,7 +28,7 @@
#include <stdint.h>
#include <new>
#include "platform/mbed_assert.h"
#include "platform/mbed_critical.h"
#include "platform/mbed_atomic.h"
#ifdef MBED_CONF_RTOS_PRESENT
#include "cmsis_os2.h"
#endif

View File

@ -0,0 +1,169 @@
/*
* Copyright (c) 2019, ARM Limited, All Rights Reserved
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "platform/mbed_assert.h"
#include "platform/mbed_atomic.h"
#include "platform/mbed_critical.h"
/* Inline bool implementations in the header use uint8_t versions to manipulate the bool */
MBED_STATIC_ASSERT(sizeof(bool) == sizeof(uint8_t), "Surely bool is a byte");
/* Inline implementations in the header use uint32_t versions to manipulate pointers */
MBED_STATIC_ASSERT(sizeof(void *) == sizeof(uint32_t), "Alas, pointers must be 32-bit");
#define DO_MBED_LOCKED_OP(name, OP, retValue, T, fn_suffix) \
T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \
{ \
T oldValue, newValue; \
core_util_critical_section_enter(); \
oldValue = *valuePtr; \
newValue = OP; \
*valuePtr = newValue; \
core_util_critical_section_exit(); \
return retValue; \
}
#define DO_MBED_LOCKED_CAS_OP(T, fn_suffix) \
bool core_util_atomic_cas_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \
{ \
bool success; \
T currentValue; \
core_util_critical_section_enter(); \
currentValue = *ptr; \
if (currentValue == *expectedCurrentValue) { \
*ptr = desiredValue; \
success = true; \
} else { \
*expectedCurrentValue = currentValue; \
success = false; \
} \
core_util_critical_section_exit(); \
return success; \
} \
\
bool core_util_atomic_compare_exchange_weak_##fn_suffix(volatile T *ptr, \
T *expectedCurrentValue, T desiredValue) \
{ \
return core_util_atomic_cas_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \
}
#if MBED_EXCLUSIVE_ACCESS
/* These are the C99 external definitions for the inline functions */
/* We maintain external definitions rather than using "static inline" for backwards binary compatibility
* and to give the compiler plenty of leeway to choose to not inline in both C and C++ modes
*/
extern inline bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr);
extern inline uint8_t core_util_atomic_exchange_u8(volatile uint8_t *valuePtr, uint8_t newValue);
extern inline uint16_t core_util_atomic_exchange_u16(volatile uint16_t *valuePtr, uint16_t newValue);
extern inline uint32_t core_util_atomic_exchange_u32(volatile uint32_t *valuePtr, uint32_t newValue);
extern inline uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t arg);
extern inline uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t arg);
extern inline uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t arg);
extern inline uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t arg);
extern inline uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t arg);
extern inline uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t arg);
extern inline uint8_t core_util_atomic_fetch_add_u8(volatile uint8_t *valuePtr, uint8_t arg);
extern inline uint16_t core_util_atomic_fetch_add_u16(volatile uint16_t *valuePtr, uint16_t arg);
extern inline uint32_t core_util_atomic_fetch_add_u32(volatile uint32_t *valuePtr, uint32_t arg);
extern inline uint8_t core_util_atomic_fetch_sub_u8(volatile uint8_t *valuePtr, uint8_t arg);
extern inline uint16_t core_util_atomic_fetch_sub_u16(volatile uint16_t *valuePtr, uint16_t arg);
extern inline uint32_t core_util_atomic_fetch_sub_u32(volatile uint32_t *valuePtr, uint32_t arg);
extern inline uint8_t core_util_atomic_fetch_and_u8(volatile uint8_t *valuePtr, uint8_t arg);
extern inline uint16_t core_util_atomic_fetch_and_u16(volatile uint16_t *valuePtr, uint16_t arg);
extern inline uint32_t core_util_atomic_fetch_and_u32(volatile uint32_t *valuePtr, uint32_t arg);
extern inline uint8_t core_util_atomic_fetch_or_u8(volatile uint8_t *valuePtr, uint8_t arg);
extern inline uint16_t core_util_atomic_fetch_or_u16(volatile uint16_t *valuePtr, uint16_t arg);
extern inline uint32_t core_util_atomic_fetch_or_u32(volatile uint32_t *valuePtr, uint32_t arg);
extern inline uint8_t core_util_atomic_fetch_xor_u8(volatile uint8_t *valuePtr, uint8_t arg);
extern inline uint16_t core_util_atomic_fetch_xor_u16(volatile uint16_t *valuePtr, uint16_t arg);
extern inline uint32_t core_util_atomic_fetch_xor_u32(volatile uint32_t *valuePtr, uint32_t arg);
extern inline bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue);
extern inline bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue);
extern inline bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue);
extern inline bool core_util_atomic_compare_exchange_weak_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue);
extern inline bool core_util_atomic_compare_exchange_weak_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue);
extern inline bool core_util_atomic_compare_exchange_weak_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue);
#else
bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr)
{
core_util_critical_section_enter();
uint8_t currentValue = flagPtr->_flag;
flagPtr->_flag = true;
core_util_critical_section_exit();
return currentValue;
}
#endif
/* No architecture we support has LDREXD/STREXD, so must always disable IRQs for 64-bit operations */
uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr)
{
core_util_critical_section_enter();
uint64_t currentValue = *valuePtr;
core_util_critical_section_exit();
return currentValue;
}
void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue)
{
core_util_critical_section_enter();
*valuePtr = desiredValue;
core_util_critical_section_exit();
}
/* Now locked operations for whichever we don't have lock-free ones for */
#if MBED_EXCLUSIVE_ACCESS
/* Just need 64-bit locked operations */
#define DO_MBED_LOCKED_OPS(name, OP, retValue) \
DO_MBED_LOCKED_OP(name, OP, retValue, uint64_t, u64)
#define DO_MBED_LOCKED_CAS_OPS() \
DO_MBED_LOCKED_CAS_OP(uint64_t, u64)
#else
/* All the operations are locked */
#define DO_MBED_LOCKED_OPS(name, OP, retValue) \
DO_MBED_LOCKED_OP(name, OP, retValue, uint8_t, u8) \
DO_MBED_LOCKED_OP(name, OP, retValue, uint16_t, u16) \
DO_MBED_LOCKED_OP(name, OP, retValue, uint32_t, u32) \
DO_MBED_LOCKED_OP(name, OP, retValue, uint64_t, u64)
#define DO_MBED_LOCKED_CAS_OPS() \
DO_MBED_LOCKED_CAS_OP(uint8_t, u8) \
DO_MBED_LOCKED_CAS_OP(uint16_t, u16) \
DO_MBED_LOCKED_CAS_OP(uint32_t, u32) \
DO_MBED_LOCKED_CAS_OP(uint64_t, u64)
#endif
// *INDENT-OFF*
DO_MBED_LOCKED_OPS(exchange, arg, oldValue)
DO_MBED_LOCKED_OPS(incr, oldValue + arg, newValue)
DO_MBED_LOCKED_OPS(decr, oldValue - arg, newValue)
DO_MBED_LOCKED_OPS(fetch_add, oldValue + arg, oldValue)
DO_MBED_LOCKED_OPS(fetch_sub, oldValue - arg, oldValue)
DO_MBED_LOCKED_OPS(fetch_and, oldValue & arg, oldValue)
DO_MBED_LOCKED_OPS(fetch_or, oldValue | arg, oldValue)
DO_MBED_LOCKED_OPS(fetch_xor, oldValue ^ arg, oldValue)
DO_MBED_LOCKED_CAS_OPS()
// *INDENT-ON*
/* Similar functions for s32 etc are static inline, but these are extern inline for legacy binary compatibility */
extern inline void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue);
extern inline void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta);
extern inline void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta);
extern inline bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue);

File diff suppressed because it is too large Load Diff

981
platform/mbed_atomic.h Normal file
View File

@ -0,0 +1,981 @@
/*
* Copyright (c) 2015-2016, ARM Limited, All Rights Reserved
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __MBED_UTIL_ATOMIC_H__
#define __MBED_UTIL_ATOMIC_H__
#include "cmsis.h"
#include <stdbool.h>
#include <stdint.h>
#include <stddef.h>
#include "mbed_toolchain.h"
/** \addtogroup platform */
/** @{*/
/**
* \defgroup platform_atomic atomic functions
*
* Atomic functions function analogously to C11 and C++11 - loads have
* acquire semantics, stores have release semantics, and atomic operations
* are sequentially consistent. Atomicity is enforced both between threads and
* interrupt handlers.
*
* @{
*/
#ifdef __cplusplus
extern "C" {
#endif
/**
* Memory order constraints for atomic operations. Intended semantics
* are as per C++11.
*/
typedef enum mbed_memory_order {
/* Bits 0 = consume
* 1 = acquire (explicitly requested, or implied by seq.cst)
* 2 = release (explicitly requested, or implied by seq.cst)
* 4 = sequentially consistent
*/
mbed_memory_order_relaxed = 0x00,
mbed_memory_order_consume = 0x01,
mbed_memory_order_acquire = 0x02,
mbed_memory_order_release = 0x04,
mbed_memory_order_acq_rel = 0x06,
mbed_memory_order_seq_cst = 0x16
} mbed_memory_order;
// if __EXCLUSIVE_ACCESS rtx macro not defined, we need to get this via own-set architecture macros
#ifndef MBED_EXCLUSIVE_ACCESS
#ifndef __EXCLUSIVE_ACCESS
#if defined __arm__ || defined __ICC_ARM__ || defined __ARM_ARCH
#if ((__ARM_ARCH_7M__ == 1U) || \
(__ARM_ARCH_7EM__ == 1U) || \
(__ARM_ARCH_8M_BASE__ == 1U) || \
(__ARM_ARCH_8M_MAIN__ == 1U)) || \
(__ARM_ARCH_7A__ == 1U)
#define MBED_EXCLUSIVE_ACCESS 1U
#define MBED_EXCLUSIVE_ACCESS_THUMB1 (__ARM_ARCH_8M_BASE__ == 1U)
#ifdef __ICCARM__
#if __CPU_MODE__ == 2
#define MBED_EXCLUSIVE_ACCESS_ARM 1U
#else
#define MBED_EXCLUSIVE_ACCESS_ARM 0U
#endif
#else
#if !defined (__thumb__)
#define MBED_EXCLUSIVE_ACCESS_ARM 1U
#else
#define MBED_EXCLUSIVE_ACCESS_ARM 0U
#endif
#endif
#elif (__ARM_ARCH_6M__ == 1U)
#define MBED_EXCLUSIVE_ACCESS 0U
#else
#error "Unknown ARM architecture for exclusive access"
#endif // __ARM_ARCH_xxx
#else // __arm__ || defined __ICC_ARM__ || defined __ARM_ARCH
// Seem to be compiling for non-ARM, so stick with critical section implementations
#define MBED_EXCLUSIVE_ACCESS 0U
#endif
#else
#define MBED_EXCLUSIVE_ACCESS __EXCLUSIVE_ACCESS
#endif
#endif
#if MBED_EXCLUSIVE_ACCESS
#define MBED_INLINE_IF_EX inline
#else
#define MBED_INLINE_IF_EX
#endif
/**
* A lock-free, primitive atomic flag.
*
* Emulate C11's atomic_flag. The flag is initially in an indeterminate state
* unless explicitly initialized with CORE_UTIL_ATOMIC_FLAG_INIT.
*/
typedef struct core_util_atomic_flag {
uint8_t _flag;
} core_util_atomic_flag;
/**
* Initializer for a core_util_atomic_flag.
*
* Example:
* ~~~
* core_util_atomic_flag in_progress = CORE_UTIL_ATOMIC_FLAG_INIT;
* ~~~
*/
#define CORE_UTIL_ATOMIC_FLAG_INIT { 0 }
/**
* Atomic test and set.
*
* Atomically tests then sets the flag to true, returning the previous value.
*
* @param flagPtr Target flag being tested and set.
* @return The previous value.
*/
MBED_INLINE_IF_EX bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr);
/** \ copydoc core_util_atomic_flag_test_and_set
* @param order memory ordering constraint
*/
MBED_FORCEINLINE bool core_util_atomic_flag_test_and_set_explicit(volatile core_util_atomic_flag *valuePtr, mbed_memory_order order);
/**
* Atomic clear.
*
* @param flagPtr Target flag being cleared.
*/
MBED_FORCEINLINE void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr);
/** \ copydoc core_util_atomic_flag_clear
* @param order memory ordering constraint
*/
MBED_FORCEINLINE void core_util_atomic_flag_clear_explicit(volatile core_util_atomic_flag *flagPtr, mbed_memory_order order);
/**
* Atomic compare and set. It compares the contents of a memory location to a
* given value and, only if they are the same, modifies the contents of that
* memory location to a given new value. This is done as a single atomic
* operation. The atomicity guarantees that the new value is calculated based on
* up-to-date information; if the value had been updated by another thread in
* the meantime, the write would fail due to a mismatched expectedCurrentValue.
*
* Refer to https://en.wikipedia.org/wiki/Compare-and-set [which may redirect
* you to the article on compare-and swap].
*
* @param ptr The target memory location.
* @param[in,out] expectedCurrentValue A pointer to some location holding the
* expected current value of the data being set atomically.
* The computed 'desiredValue' should be a function of this current value.
* @note: This is an in-out parameter. In the
* failure case of atomic_cas (where the
* destination isn't set), the pointee of expectedCurrentValue is
* updated with the current value.
* @param[in] desiredValue The new value computed based on '*expectedCurrentValue'.
*
* @return true if the memory location was atomically
* updated with the desired value (after verifying
* that it contained the expectedCurrentValue),
* false otherwise. In the failure case,
* exepctedCurrentValue is updated with the new
* value of the target memory location.
*
* pseudocode:
* function cas(p : pointer to int, old : pointer to int, new : int) returns bool {
* if *p != *old {
* *old = *p
* return false
* }
* *p = new
* return true
* }
*
* @note: In the failure case (where the destination isn't set), the value
* pointed to by expectedCurrentValue is instead updated with the current value.
* This property helps writing concise code for the following incr:
*
* function incr(p : pointer to int, a : int) returns int {
* done = false
* value = atomic_load(p)
* while not done {
* done = atomic_cas(p, &value, value + a) // *value gets updated automatically until success
* }
* return value + a
* }
*
* However, if the call is made in a loop like this, the atomic_compare_exchange_weak
* functions are to be preferred.
*
* @note: This corresponds to the C11 "atomic_compare_exchange_strong" - it
* always succeeds if the current value is expected, as per the pseudocode
* above; it will not spuriously fail as "atomic_compare_exchange_weak" may.
* This call would normally be used when a fail return does not retry.
*/
MBED_INLINE_IF_EX bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue);
/** \copydoc core_util_atomic_cas_u8
* @param success memory ordering constraint for successful exchange
* @param failure memory ordering constraint for failure
*/
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
/** \copydoc core_util_atomic_cas_u8 */
MBED_INLINE_IF_EX bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue);
/** \copydoc core_util_atomic_cas_explicit_u8 */
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
/** \copydoc core_util_atomic_cas_u8 */
MBED_INLINE_IF_EX bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue);
/** \copydoc core_util_atomic_cas_explicit_u8 */
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
/** \copydoc core_util_atomic_cas_u8 */
bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue);
/** \copydoc core_util_atomic_cas_explicit_u8 */
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
/** \copydoc core_util_atomic_cas_u8 */
MBED_FORCEINLINE bool core_util_atomic_cas_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue);
/** \copydoc core_util_atomic_cas_explicit_u8 */
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
/** \copydoc core_util_atomic_cas_u8 */
MBED_FORCEINLINE bool core_util_atomic_cas_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue);
/** \copydoc core_util_atomic_cas_explicit_u8 */
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
/** \copydoc core_util_atomic_cas_u8 */
MBED_FORCEINLINE bool core_util_atomic_cas_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue);
/** \copydoc core_util_atomic_cas_explicit_u8 */
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
/** \copydoc core_util_atomic_cas_u8 */
MBED_FORCEINLINE bool core_util_atomic_cas_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue);
/** \copydoc core_util_atomic_cas_explicit_u8 */
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
/** \copydoc core_util_atomic_cas_u8 */
MBED_FORCEINLINE bool core_util_atomic_cas_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue);
/** \copydoc core_util_atomic_cas_explicit_u8 */
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure);
/** \copydoc core_util_atomic_cas_u8 */
inline bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue);
/** \copydoc core_util_atomic_cas_explicit_u8 */
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure);
/**
* Atomic compare and set. It compares the contents of a memory location to a
* given value and, only if they are the same, modifies the contents of that
* memory location to a given new value. This is done as a single atomic
* operation. The atomicity guarantees that the new value is calculated based on
* up-to-date information; if the value had been updated by another thread in
* the meantime, the write would fail due to a mismatched expectedCurrentValue.
*
* Refer to https://en.wikipedia.org/wiki/Compare-and-set [which may redirect
* you to the article on compare-and swap].
*
* @param ptr The target memory location.
* @param[in,out] expectedCurrentValue A pointer to some location holding the
* expected current value of the data being set atomically.
* The computed 'desiredValue' should be a function of this current value.
* @note: This is an in-out parameter. In the
* failure case of atomic_cas (where the
* destination isn't set), the pointee of expectedCurrentValue is
* updated with the current value.
* @param[in] desiredValue The new value computed based on '*expectedCurrentValue'.
*
* @return true if the memory location was atomically
* updated with the desired value (after verifying
* that it contained the expectedCurrentValue),
* false otherwise. In the failure case,
* exepctedCurrentValue is updated with the new
* value of the target memory location.
*
* pseudocode:
* function cas(p : pointer to int, old : pointer to int, new : int) returns bool {
* if *p != *old or spurious failure {
* *old = *p
* return false
* }
* *p = new
* return true
* }
*
* @note: In the failure case (where the destination isn't set), the value
* pointed to by expectedCurrentValue is instead updated with the current value.
* This property helps writing concise code for the following incr:
*
* function incr(p : pointer to int, a : int) returns int {
* done = false
* value = *p // This fetch operation need not be atomic.
* while not done {
* done = atomic_compare_exchange_weak(p, &value, value + a) // *value gets updated automatically until success
* }
* return value + a
* }
*
* @note: This corresponds to the C11 "atomic_compare_exchange_weak" - it
* may spuriously fail if the current value is expected, as per the pseudocode
* above; it will not spuriously fail as "atomic_compare_exchange_weak" may.
* This call would normally be used when a fail return will cause a retry anyway,
* saving the need for an extra loop inside the cas operation.
*/
MBED_INLINE_IF_EX bool core_util_atomic_compare_exchange_weak_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue);
/** \copydoc core_util_atomic_compare_exchange_weak_u8
* @param success memory ordering constraint for successful exchange
* @param failure memory ordering constraint for failure
*/
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
MBED_INLINE_IF_EX bool core_util_atomic_compare_exchange_weak_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue);
/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
MBED_INLINE_IF_EX bool core_util_atomic_compare_exchange_weak_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue);
/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
bool core_util_atomic_compare_exchange_weak_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue);
/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue);
/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue);
/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue);
/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue);
/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue);
/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure);
/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue);
/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure);
/**
* Atomic load.
* @param valuePtr Target memory location.
* @return The loaded value.
*/
MBED_FORCEINLINE uint8_t core_util_atomic_load_u8(const volatile uint8_t *valuePtr);
/**
* \copydoc core_util_atomic_load_u8
* @param order memory ordering constraint
*/
MBED_FORCEINLINE uint8_t core_util_atomic_load_explicit_u8(const volatile uint8_t *valuePtr, mbed_memory_order order);
/** \copydoc core_util_atomic_load_u8 */
MBED_FORCEINLINE uint16_t core_util_atomic_load_u16(const volatile uint16_t *valuePtr);
/** \copydoc core_util_atomic_load_explicit_u8 */
MBED_FORCEINLINE uint16_t core_util_atomic_load_explicit_u16(const volatile uint16_t *valuePtr, mbed_memory_order order);
/** \copydoc core_util_atomic_load_u8 */
MBED_FORCEINLINE uint32_t core_util_atomic_load_u32(const volatile uint32_t *valuePtr);
/** \copydoc core_util_atomic_load_explicit_u8 */
MBED_FORCEINLINE uint32_t core_util_atomic_load_explicit_u32(const volatile uint32_t *valuePtr, mbed_memory_order order);
/** \copydoc core_util_atomic_load_u8 */
uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr);
/** \copydoc core_util_atomic_load_explicit_u8 */
MBED_FORCEINLINE uint64_t core_util_atomic_load_explicit_u64(const volatile uint64_t *valuePtr, mbed_memory_order order);
/** \copydoc core_util_atomic_load_u8 */
MBED_FORCEINLINE int8_t core_util_atomic_load_s8(const volatile int8_t *valuePtr);
/** \copydoc core_util_atomic_load_explicit_u8 */
MBED_FORCEINLINE int8_t core_util_atomic_load_explicit_s8(const volatile int8_t *valuePtr, mbed_memory_order order);
/** \copydoc core_util_atomic_load_u8 */
MBED_FORCEINLINE int16_t core_util_atomic_load_s16(const volatile int16_t *valuePtr);
/** \copydoc core_util_atomic_load_explicit_u8 */
MBED_FORCEINLINE int16_t core_util_atomic_load_explicit_s16(const volatile int16_t *valuePtr, mbed_memory_order order);
/** \copydoc core_util_atomic_load_u8 */
MBED_FORCEINLINE int32_t core_util_atomic_load_s32(const volatile int32_t *valuePtr);
/** \copydoc core_util_atomic_load_explicit_u8 */
MBED_FORCEINLINE int32_t core_util_atomic_load_explicit_s32(const volatile int32_t *valuePtr, mbed_memory_order order);
/** \copydoc core_util_atomic_load_u8 */
MBED_FORCEINLINE int64_t core_util_atomic_load_s64(const volatile int64_t *valuePtr);
/** \copydoc core_util_atomic_load_u8 */
MBED_FORCEINLINE int64_t core_util_atomic_load_explicit_s64(const volatile int64_t *valuePtr, MBED_UNUSED mbed_memory_order order);
/** \copydoc core_util_atomic_load_u8 */
MBED_FORCEINLINE bool core_util_atomic_load_bool(const volatile bool *valuePtr);
/** \copydoc core_util_atomic_load_u8 */
MBED_FORCEINLINE bool core_util_atomic_load_explicit_bool(const volatile bool *valuePtr, mbed_memory_order order);
/** \copydoc core_util_atomic_load_u8 */
MBED_FORCEINLINE void *core_util_atomic_load_ptr(void *const volatile *valuePtr);
/** \copydoc core_util_atomic_load_u8 */
MBED_FORCEINLINE void *core_util_atomic_load_explicit_ptr(void *const volatile *valuePtr, mbed_memory_order order);
/**
* Atomic store.
* @param valuePtr Target memory location.
* @param desiredValue The value to store.
*/
MBED_FORCEINLINE void core_util_atomic_store_u8(volatile uint8_t *valuePtr, uint8_t desiredValue);
/**
* \copydoc core_util_atomic_store_u8
* @param order memory ordering constraint
*/
MBED_FORCEINLINE void core_util_atomic_store_explicit_u8(volatile uint8_t *valuePtr, uint8_t desiredValue, mbed_memory_order order);
/** \copydoc core_util_atomic_store_u8 */
MBED_FORCEINLINE void core_util_atomic_store_u16(volatile uint16_t *valuePtr, uint16_t desiredValue);
/** \copydoc core_util_atomic_store_explicit_u8 */
MBED_FORCEINLINE void core_util_atomic_store_explicit_u16(volatile uint16_t *valuePtr, uint16_t desiredValue, mbed_memory_order order);
/** \copydoc core_util_atomic_store_u8 */
MBED_FORCEINLINE void core_util_atomic_store_u32(volatile uint32_t *valuePtr, uint32_t desiredValue);
/** \copydoc core_util_atomic_store_explicit_u8 */
MBED_FORCEINLINE void core_util_atomic_store_explicit_u32(volatile uint32_t *valuePtr, uint32_t desiredValue, mbed_memory_order order);
/** \copydoc core_util_atomic_store_u8 */
void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue);
/** \copydoc core_util_atomic_store_explicit_u8 */
MBED_FORCEINLINE void core_util_atomic_store_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, mbed_memory_order order);
/** \copydoc core_util_atomic_store_u8 */
MBED_FORCEINLINE void core_util_atomic_store_s8(volatile int8_t *valuePtr, int8_t desiredValue);
/** \copydoc core_util_atomic_store_explicit_u8 */
MBED_FORCEINLINE void core_util_atomic_store_explicit_s8(volatile int8_t *valuePtr, int8_t desiredValue, mbed_memory_order order);
/** \copydoc core_util_atomic_store_u8 */
MBED_FORCEINLINE void core_util_atomic_store_s16(volatile int16_t *valuePtr, int16_t desiredValue);
/** \copydoc core_util_atomic_store_explicit_u8 */
MBED_FORCEINLINE void core_util_atomic_store_explicit_s16(volatile int16_t *valuePtr, int16_t desiredValue, mbed_memory_order order);
/** \copydoc core_util_atomic_store_u8 */
MBED_FORCEINLINE void core_util_atomic_store_s32(volatile int32_t *valuePtr, int32_t desiredValue);
/** \copydoc core_util_atomic_store_explicit_u8 */
MBED_FORCEINLINE void core_util_atomic_store_explicit_s32(volatile int32_t *valuePtr, int32_t desiredValue, mbed_memory_order order);
/** \copydoc core_util_atomic_store_u8 */
MBED_FORCEINLINE void core_util_atomic_store_s64(volatile int64_t *valuePtr, int64_t desiredValue);
/** \copydoc core_util_atomic_store_explicit_u8 */
MBED_FORCEINLINE void core_util_atomic_store_explicit_s64(volatile int64_t *valuePtr, int64_t desiredValue, mbed_memory_order order);
/** \copydoc core_util_atomic_store_u8 */
MBED_FORCEINLINE void core_util_atomic_store_bool(volatile bool *valuePtr, bool desiredValue);
/** \copydoc core_util_atomic_store_explicit_u8 */
MBED_FORCEINLINE void core_util_atomic_store_explicit_bool(volatile bool *valuePtr, bool desiredValue, mbed_memory_order order);
/** \copydoc core_util_atomic_store_u8 */
MBED_FORCEINLINE void core_util_atomic_store_ptr(void *volatile *valuePtr, void *desiredValue);
/** \copydoc core_util_atomic_store_explicit_u8 */
MBED_FORCEINLINE void core_util_atomic_store_explicit_ptr(void *volatile *valuePtr, void *desiredValue, mbed_memory_order order);
/**
* Atomic exchange.
* @param valuePtr Target memory location.
* @param desiredValue The value to store.
* @return The previous value.
*/
MBED_INLINE_IF_EX uint8_t core_util_atomic_exchange_u8(volatile uint8_t *valuePtr, uint8_t desiredValue);
/** \copydoc core_util_atomic_exchange_u8
* @param order memory ordering constraint
*/
MBED_FORCEINLINE uint8_t core_util_atomic_exchange_explicit_u8(volatile uint8_t *valuePtr, uint8_t desiredValue, mbed_memory_order order);
/** \copydoc core_util_atomic_exchange_u8 */
MBED_INLINE_IF_EX uint16_t core_util_atomic_exchange_u16(volatile uint16_t *valuePtr, uint16_t desiredValue);
/** \copydoc core_util_atomic_exchange_explicit_u8 */
MBED_FORCEINLINE uint16_t core_util_atomic_exchange_explicit_u16(volatile uint16_t *valuePtr, uint16_t desiredValue, mbed_memory_order order);
/** \copydoc core_util_atomic_exchange_u8 */
MBED_INLINE_IF_EX uint32_t core_util_atomic_exchange_u32(volatile uint32_t *valuePtr, uint32_t desiredValue);
/** \copydoc core_util_atomic_exchange_explicit_u8 */
MBED_FORCEINLINE uint32_t core_util_atomic_exchange_explicit_u32(volatile uint32_t *valuePtr, uint32_t desiredValue, mbed_memory_order order);
/** \copydoc core_util_atomic_exchange_u8 */
uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue);
/** \copydoc core_util_atomic_exchange_explicit_u8 */
MBED_FORCEINLINE uint64_t core_util_atomic_exchange_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, mbed_memory_order order);
/** \copydoc core_util_atomic_exchange_u8 */
MBED_FORCEINLINE int8_t core_util_atomic_exchange_s8(volatile int8_t *valuePtr, int8_t desiredValue);
/** \copydoc core_util_atomic_exchange_explicit_u8 */
MBED_FORCEINLINE int8_t core_util_atomic_exchange_explicit_s8(volatile int8_t *valuePtr, int8_t desiredValue, mbed_memory_order order);
/** \copydoc core_util_atomic_exchange_u8 */
MBED_FORCEINLINE int16_t core_util_atomic_exchange_s16(volatile int16_t *valuePtr, int16_t desiredValue);
/** \copydoc core_util_atomic_exchange_explicit_u8 */
MBED_FORCEINLINE int16_t core_util_atomic_exchange_explicit_s16(volatile int16_t *valuePtr, int16_t desiredValue, mbed_memory_order order);
/** \copydoc core_util_atomic_exchange_u8 */
MBED_FORCEINLINE int32_t core_util_atomic_exchange_s32(volatile int32_t *valuePtr, int32_t desiredValue);
/** \copydoc core_util_atomic_exchange_explicit_u8 */
MBED_FORCEINLINE int32_t core_util_atomic_exchange_explicit_s32(volatile int32_t *valuePtr, int32_t desiredValue, mbed_memory_order order);
/** \copydoc core_util_atomic_exchange_u8 */
MBED_FORCEINLINE int64_t core_util_atomic_exchange_s64(volatile int64_t *valuePtr, int64_t desiredValue);
/** \copydoc core_util_atomic_exchange_explicit_u8 */
MBED_FORCEINLINE int64_t core_util_atomic_exchange_explicit_s64(volatile int64_t *valuePtr, int64_t desiredValue, mbed_memory_order order);
/** \copydoc core_util_atomic_exchange_u8 */
MBED_FORCEINLINE bool core_util_atomic_exchange_bool(volatile bool *valuePtr, bool desiredValue);
/** \copydoc core_util_atomic_exchange_explicit_u8 */
MBED_FORCEINLINE bool core_util_atomic_exchange_explicit_bool(volatile bool *valuePtr, bool desiredValue, mbed_memory_order order);
/** \copydoc core_util_atomic_exchange_u8 */
inline void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue);
/** \copydoc core_util_atomic_exchange_explicit_u8 */
MBED_FORCEINLINE void *core_util_atomic_exchange_explicit_ptr(void *volatile *valuePtr, void *desiredValue, mbed_memory_order order);
/**
* Atomic increment.
* @param valuePtr Target memory location being incremented.
* @param delta The amount being incremented.
* @return The new incremented value.
*/
MBED_INLINE_IF_EX uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta);
/** \copydoc core_util_atomic_incr_u8 */
MBED_INLINE_IF_EX uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta);
/** \copydoc core_util_atomic_incr_u8 */
MBED_INLINE_IF_EX uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta);
/** \copydoc core_util_atomic_incr_u8 */
uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta);
/** \copydoc core_util_atomic_incr_u8 */
MBED_FORCEINLINE int8_t core_util_atomic_incr_s8(volatile int8_t *valuePtr, int8_t delta);
/** \copydoc core_util_atomic_incr_u8 */
MBED_FORCEINLINE int16_t core_util_atomic_incr_s16(volatile int16_t *valuePtr, int16_t delta);
/** \copydoc core_util_atomic_incr_u8 */
MBED_FORCEINLINE int32_t core_util_atomic_incr_s32(volatile int32_t *valuePtr, int32_t delta);
/** \copydoc core_util_atomic_incr_u8 */
MBED_FORCEINLINE int64_t core_util_atomic_incr_s64(volatile int64_t *valuePtr, int64_t delta);
/** \copydoc core_util_atomic_incr_u8 */
inline void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta);
/**
* Atomic decrement.
* @param valuePtr Target memory location being decremented.
* @param delta The amount being decremented.
* @return The new decremented value.
*/
MBED_INLINE_IF_EX uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta);
/** \copydoc core_util_atomic_decr_u8 */
MBED_INLINE_IF_EX uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta);
/** \copydoc core_util_atomic_decr_u8 */
MBED_INLINE_IF_EX uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta);
/** \copydoc core_util_atomic_decr_u8 */
uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta);
/** \copydoc core_util_atomic_decr_u8 */
MBED_FORCEINLINE int8_t core_util_atomic_decr_s8(volatile int8_t *valuePtr, int8_t delta);
/** \copydoc core_util_atomic_decr_u8 */
MBED_FORCEINLINE int16_t core_util_atomic_decr_s16(volatile int16_t *valuePtr, int16_t delta);
/** \copydoc core_util_atomic_decr_u8 */
MBED_FORCEINLINE int32_t core_util_atomic_decr_s32(volatile int32_t *valuePtr, int32_t delta);
/** \copydoc core_util_atomic_decr_u8 */
MBED_FORCEINLINE int64_t core_util_atomic_decr_s64(volatile int64_t *valuePtr, int64_t delta);
/** \copydoc core_util_atomic_decr_u8 */
inline void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta);
/**
* Atomic add.
* @param valuePtr Target memory location being modified.
* @param arg The argument for the addition.
* @return The original value.
*/
MBED_INLINE_IF_EX uint8_t core_util_atomic_fetch_add_u8(volatile uint8_t *valuePtr, uint8_t arg);
/** \copydoc core_util_atomic_fetch_add_u8
* @param order memory ordering constraint
*/
MBED_FORCEINLINE uint8_t core_util_atomic_fetch_add_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_add_u8 */
MBED_INLINE_IF_EX uint16_t core_util_atomic_fetch_add_u16(volatile uint16_t *valuePtr, uint16_t arg);
/** \copydoc core_util_atomic_fetch_add_explicit_u8 */
MBED_FORCEINLINE uint16_t core_util_atomic_fetch_add_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_add_u8 */
MBED_INLINE_IF_EX uint32_t core_util_atomic_fetch_add_u32(volatile uint32_t *valuePtr, uint32_t arg);
/** \copydoc core_util_atomic_fetch_add_explicit_u8 */
MBED_FORCEINLINE uint32_t core_util_atomic_fetch_add_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_add_u8 */
uint64_t core_util_atomic_fetch_add_u64(volatile uint64_t *valuePtr, uint64_t arg);
/** \copydoc core_util_atomic_fetch_add_explicit_u8 */
MBED_FORCEINLINE uint64_t core_util_atomic_fetch_add_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_add_u8 */
MBED_FORCEINLINE int8_t core_util_atomic_fetch_add_s8(volatile int8_t *valuePtr, int8_t arg);
/** \copydoc core_util_atomic_fetch_add_explicit_u8 */
MBED_FORCEINLINE int8_t core_util_atomic_fetch_add_explicit_s8(volatile int8_t *valuePtr, int8_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_add_u8 */
MBED_FORCEINLINE int16_t core_util_atomic_fetch_add_s16(volatile int16_t *valuePtr, int16_t arg);
/** \copydoc core_util_atomic_fetch_add_explicit_u8 */
MBED_FORCEINLINE int16_t core_util_atomic_fetch_add_explicit_s16(volatile int16_t *valuePtr, int16_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_add_u8 */
MBED_FORCEINLINE int32_t core_util_atomic_fetch_add_s32(volatile int32_t *valuePtr, int32_t arg);
/** \copydoc core_util_atomic_fetch_add_explicit_u8 */
MBED_FORCEINLINE int32_t core_util_atomic_fetch_add_explicit_s32(volatile int32_t *valuePtr, int32_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_add_u8 */
MBED_FORCEINLINE int64_t core_util_atomic_fetch_add_s64(volatile int64_t *valuePtr, int64_t arg);
/** \copydoc core_util_atomic_fetch_add_explicit_u8 */
MBED_FORCEINLINE int64_t core_util_atomic_fetch_add_explicit_s64(volatile int64_t *valuePtr, int64_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_add_u8 */
MBED_FORCEINLINE void *core_util_atomic_fetch_add_ptr(void *volatile *valuePtr, ptrdiff_t arg);
/** \copydoc core_util_atomic_fetch_add_explicit_u8 */
MBED_FORCEINLINE void *core_util_atomic_fetch_add_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order);
/**
* Atomic subtract.
* @param valuePtr Target memory location being modified.
* @param arg The argument for the subtraction.
* @return The original value.
*/
MBED_INLINE_IF_EX uint8_t core_util_atomic_fetch_sub_u8(volatile uint8_t *valuePtr, uint8_t arg);
/** \copydoc core_util_atomic_fetch_sub_u8
* @param order memory ordering constraint
*/
MBED_FORCEINLINE uint8_t core_util_atomic_fetch_sub_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_sub_u8 */
MBED_INLINE_IF_EX uint16_t core_util_atomic_fetch_sub_u16(volatile uint16_t *valuePtr, uint16_t arg);
/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */
MBED_FORCEINLINE uint16_t core_util_atomic_fetch_sub_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_sub_u8 */
MBED_INLINE_IF_EX uint32_t core_util_atomic_fetch_sub_u32(volatile uint32_t *valuePtr, uint32_t arg);
/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */
MBED_FORCEINLINE uint32_t core_util_atomic_fetch_sub_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_sub_u8 */
uint64_t core_util_atomic_fetch_sub_u64(volatile uint64_t *valuePtr, uint64_t arg);
/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */
MBED_FORCEINLINE uint64_t core_util_atomic_fetch_sub_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_sub_u8 */
MBED_FORCEINLINE int8_t core_util_atomic_fetch_sub_s8(volatile int8_t *valuePtr, int8_t arg);
/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */
MBED_FORCEINLINE int8_t core_util_atomic_fetch_sub_explicit_s8(volatile int8_t *valuePtr, int8_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_sub_u8 */
MBED_FORCEINLINE int16_t core_util_atomic_fetch_sub_s16(volatile int16_t *valuePtr, int16_t arg);
/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */
MBED_FORCEINLINE int16_t core_util_atomic_fetch_sub_explicit_s16(volatile int16_t *valuePtr, int16_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_sub_u8 */
MBED_FORCEINLINE int32_t core_util_atomic_fetch_sub_s32(volatile int32_t *valuePtr, int32_t arg);
/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */
MBED_FORCEINLINE int32_t core_util_atomic_fetch_sub_explicit_s32(volatile int32_t *valuePtr, int32_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_sub_u8 */
MBED_FORCEINLINE int64_t core_util_atomic_fetch_sub_s64(volatile int64_t *valuePtr, int64_t arg);
/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */
MBED_FORCEINLINE int64_t core_util_atomic_fetch_sub_explicit_s64(volatile int64_t *valuePtr, int64_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_sub_u8 */
MBED_FORCEINLINE void *core_util_atomic_fetch_sub_ptr(void *volatile *valuePtr, ptrdiff_t arg);
/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */
MBED_FORCEINLINE void *core_util_atomic_fetch_sub_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order);
/**
* Atomic bitwise and.
* @param valuePtr Target memory location being modified.
* @param arg The argument for the bitwise operation.
* @return The original value.
*/
MBED_INLINE_IF_EX uint8_t core_util_atomic_fetch_and_u8(volatile uint8_t *valuePtr, uint8_t arg);
/** \copydoc core_util_atomic_fetch_and_u8
* @param order memory ordering constraint
*/
MBED_FORCEINLINE uint8_t core_util_atomic_fetch_and_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_and_u8 */
MBED_INLINE_IF_EX uint16_t core_util_atomic_fetch_and_u16(volatile uint16_t *valuePtr, uint16_t arg);
/** \copydoc core_util_atomic_fetch_and_explicit_u8 */
MBED_FORCEINLINE uint16_t core_util_atomic_fetch_and_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_and_u8 */
MBED_INLINE_IF_EX uint32_t core_util_atomic_fetch_and_u32(volatile uint32_t *valuePtr, uint32_t arg);
/** \copydoc core_util_atomic_fetch_and_explicit_u8 */
MBED_FORCEINLINE uint32_t core_util_atomic_fetch_and_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_and_u8 */
uint64_t core_util_atomic_fetch_and_u64(volatile uint64_t *valuePtr, uint64_t arg);
/** \copydoc core_util_atomic_fetch_and_explicit_u8 */
MBED_FORCEINLINE uint64_t core_util_atomic_fetch_and_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order);
/**
* Atomic bitwise inclusive or.
* @param valuePtr Target memory location being modified.
* @param arg The argument for the bitwise operation.
* @return The original value.
*/
MBED_INLINE_IF_EX uint8_t core_util_atomic_fetch_or_u8(volatile uint8_t *valuePtr, uint8_t arg);
/** \copydoc core_util_atomic_fetch_or_u8
* @param order memory ordering constraint
*/
MBED_FORCEINLINE uint8_t core_util_atomic_fetch_or_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_or_u8 */
MBED_INLINE_IF_EX uint16_t core_util_atomic_fetch_or_u16(volatile uint16_t *valuePtr, uint16_t arg);
/** \copydoc core_util_atomic_fetch_or_explicit_u8 */
MBED_FORCEINLINE uint16_t core_util_atomic_fetch_or_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_or_u8 */
MBED_INLINE_IF_EX uint32_t core_util_atomic_fetch_or_u32(volatile uint32_t *valuePtr, uint32_t arg);
/** \copydoc core_util_atomic_fetch_or_explicit_u8 */
MBED_FORCEINLINE uint32_t core_util_atomic_fetch_or_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_or_u8 */
uint64_t core_util_atomic_fetch_or_u64(volatile uint64_t *valuePtr, uint64_t arg);
/** \copydoc core_util_atomic_fetch_or_explicit_u8 */
MBED_FORCEINLINE uint64_t core_util_atomic_fetch_or_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order);
/**
* Atomic bitwise exclusive or.
* @param valuePtr Target memory location being modified.
* @param arg The argument for the bitwise operation.
* @return The original value.
*/
MBED_INLINE_IF_EX uint8_t core_util_atomic_fetch_xor_u8(volatile uint8_t *valuePtr, uint8_t arg);
/** \copydoc core_util_atomic_fetch_xor_u8
* @param order memory ordering constraint
*/
MBED_FORCEINLINE uint8_t core_util_atomic_fetch_xor_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_xor_u8 */
MBED_INLINE_IF_EX uint16_t core_util_atomic_fetch_xor_u16(volatile uint16_t *valuePtr, uint16_t arg);
/** \copydoc core_util_atomic_fetch_xor_explicit_u8 */
MBED_FORCEINLINE uint16_t core_util_atomic_fetch_xor_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_xor_u8 */
MBED_INLINE_IF_EX uint32_t core_util_atomic_fetch_xor_u32(volatile uint32_t *valuePtr, uint32_t arg);
/** \copydoc core_util_atomic_fetch_xor_explicit_u8 */
MBED_FORCEINLINE uint32_t core_util_atomic_fetch_xor_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order);
/** \copydoc core_util_atomic_fetch_xor_u8 */
uint64_t core_util_atomic_fetch_xor_u64(volatile uint64_t *valuePtr, uint64_t arg);
/** \copydoc core_util_atomic_fetch_xor_explicit_u8 */
MBED_FORCEINLINE uint64_t core_util_atomic_fetch_xor_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order);
#ifdef __cplusplus
} // extern "C"
// For each operation, two overloaded templates:
// * one for non-pointer types, which has implementations based on the
// u8/u16/u32/u64/s8/s16/s32/s64/bool functions above. No base implementation.
// * one for any pointer type, generically implemented based on ptr function above.
//
// Templates use standard C/C++ naming - old incr/decr/cas forms are not provided.
//
// Note that C++ template selection somewhat inhibits the ease of use of these templates.
// Ambiguities arise with setting pointers to NULL, or adding constants to integers.
// It may be necessary to cast the argument or desired value to the correct type, or
// explictly specify the type - eg core_util_atomic_store<FileHandle>(&fh, NULL) or
// core_util_atomic_store(&val, (uint8_t)1).
// A proper mbed::Atomic<T> class would solve the issue.
/** \copydoc core_util_atomic_load_u8 */
template<typename T> T core_util_atomic_load(const volatile T *valuePtr);
/** \copydoc core_util_atomic_store_u8 */
template<typename T> void core_util_atomic_store(volatile T *valuePtr, T desiredValue);
/** \copydoc core_util_atomic_exchange_u8 */
template<typename T> T core_util_atomic_exchange(volatile T *ptr, T desiredValue);
/** \copydoc core_util_atomic_cas_u8 */
template<typename T> bool core_util_atomic_compare_exchange_strong(volatile T *ptr, T *expectedCurrentValue, T desiredValue);
/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
template<typename T> bool core_util_atomic_compare_exchange_weak(volatile T *ptr, T *expectedCurrentValue, T desiredValue);
/** \copydoc core_util_fetch_add_u8 */
template<typename T> T core_util_atomic_fetch_add(volatile T *valuePtr, T arg);
/** \copydoc core_util_fetch_sub_u8 */
template<typename T> T core_util_atomic_fetch_sub(volatile T *valuePtr, T arg);
/** \copydoc core_util_fetch_and_u8 */
template<typename T> T core_util_atomic_fetch_and(volatile T *valuePtr, T arg);
/** \copydoc core_util_fetch_or_u8 */
template<typename T> T core_util_atomic_fetch_or(volatile T *valuePtr, T arg);
/** \copydoc core_util_fetch_xor_u8 */
template<typename T> T core_util_atomic_fetch_xor(volatile T *valuePtr, T arg);
/** \copydoc core_util_atomic_load_explicit_u8 */
template<typename T> T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order);
/** \copydoc core_util_atomic_store_explicit_u8 */
template<typename T> void core_util_atomic_store_explicit(volatile T *valuePtr, T desiredValue, mbed_memory_order order);
/** \copydoc core_util_atomic_exchange_explicit_u8 */
template<typename T> T core_util_atomic_exchange_explicit(volatile T *ptr, T desiredValue, mbed_memory_order order);
/** \copydoc core_util_atomic_cas_explicit_u8 */
template<typename T> bool core_util_atomic_compare_exchange_strong_explicit(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure);
/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
template<typename T> bool core_util_atomic_compare_exchange_weak_explicit(volatile T *ptr, T *expectedCurrentValue, T desiredValue, mbed_memory_order success, mbed_memory_order failure);
/** \copydoc core_util_fetch_add_explicit_u8 */
template<typename T> T core_util_atomic_fetch_add_explicit(volatile T *valuePtr, T arg, mbed_memory_order order);
/** \copydoc core_util_fetch_sub_explicit_u8 */
template<typename T> T core_util_atomic_fetch_sub_explicit(volatile T *valuePtr, T arg, mbed_memory_order order);
/** \copydoc core_util_fetch_and_explicit_u8 */
template<typename T> T core_util_atomic_fetch_and_explicit(volatile T *valuePtr, T arg, mbed_memory_order order);
/** \copydoc core_util_fetch_or_explicit_u8 */
template<typename T> T core_util_atomic_fetch_or_explicit(volatile T *valuePtr, T arg, mbed_memory_order order);
/** \copydoc core_util_fetch_xor_explicit_u8 */
template<typename T> T core_util_atomic_fetch_xor_explicit(volatile T *valuePtr, T arg, mbed_memory_order order);
/** \copydoc core_util_atomic_load_ptr */
template<typename T> inline T *core_util_atomic_load(T *const volatile *valuePtr);
/** \copydoc core_util_atomic_store_ptr */
template<typename T> inline void core_util_atomic_store(T *volatile *valuePtr, T *desiredValue);
/** \copydoc core_util_atomic_exchange_ptr */
template<typename T> inline T *core_util_atomic_exchange(T *volatile *valuePtr, T *desiredValue);
/** \copydoc core_util_atomic_cas_ptr */
template<typename T> inline bool core_util_atomic_compare_exchange_strong(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue);
/** \copydoc core_util_atomic_compare_exchange_weak_ptr */
template<typename T> inline bool core_util_atomic_compare_exchange_weak(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue);
/** \copydoc core_util_fetch_add_ptr */
template<typename T> inline T *core_util_atomic_fetch_add(T *volatile *valuePtr, ptrdiff_t arg);
/** \copydoc core_util_fetch_sub_ptr */
template<typename T> inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, ptrdiff_t arg);
/** \copydoc core_util_atomic_load_explicit_ptr */
template<typename T> inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order);
/** \copydoc core_util_atomic_store_explicit_ptr */
template<typename T> inline void core_util_atomic_store_explicit(T *volatile *valuePtr, T *desiredValue, mbed_memory_order order);
/** \copydoc core_util_atomic_exchange_explicit_ptr */
template<typename T> inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, T *desiredValue, mbed_memory_order order);
/** \copydoc core_util_atomic_cas_explicit_ptr */
template<typename T> inline bool core_util_atomic_compare_exchange_strong_explicit(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue, mbed_memory_order success, mbed_memory_order failure);
/** \copydoc core_util_atomic_compare_exchange_weak_explicit_ptr */
template<typename T> inline bool core_util_atomic_compare_exchange_weak_explicit(T *volatile *ptr, T **expectedCurrentValue, T *desiredValue, mbed_memory_order success, mbed_memory_order failure);
/** \copydoc core_util_fetch_add_explicit_ptr */
template<typename T> inline T *core_util_atomic_fetch_add_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order);
/** \copydoc core_util_fetch_sub_explicit_ptr */
template<typename T> inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order);
#endif // __cplusplus
/**@}*/
/**@}*/
/* Hide the implementation away */
#include "platform/internal/mbed_atomic_impl.h"
#endif // __MBED_UTIL_ATOMICL_H__

View File

@ -24,25 +24,6 @@
#include "platform/mbed_critical.h"
#include "platform/mbed_toolchain.h"
// if __EXCLUSIVE_ACCESS rtx macro not defined, we need to get this via own-set architecture macros
#ifndef MBED_EXCLUSIVE_ACCESS
#ifndef __EXCLUSIVE_ACCESS
#if ((__ARM_ARCH_7M__ == 1U) || \
(__ARM_ARCH_7EM__ == 1U) || \
(__ARM_ARCH_8M_BASE__ == 1U) || \
(__ARM_ARCH_8M_MAIN__ == 1U)) || \
(__ARM_ARCH_7A__ == 1U)
#define MBED_EXCLUSIVE_ACCESS 1U
#elif (__ARM_ARCH_6M__ == 1U)
#define MBED_EXCLUSIVE_ACCESS 0U
#else
#error "Unknown architecture for exclusive access"
#endif
#else
#define MBED_EXCLUSIVE_ACCESS __EXCLUSIVE_ACCESS
#endif
#endif
static uint32_t critical_section_reentrancy_counter = 0;
bool core_util_are_interrupts_enabled(void)
@ -99,413 +80,3 @@ void core_util_critical_section_exit(void)
hal_critical_section_exit();
}
}
/* Inline bool implementations in the header use uint8_t versions to manipulate the bool */
MBED_STATIC_ASSERT(sizeof(bool) == sizeof(uint8_t), "Surely bool is a byte");
#if MBED_EXCLUSIVE_ACCESS
/* Supress __ldrex and __strex deprecated warnings - "#3731-D: intrinsic is deprecated" */
#if defined (__CC_ARM)
#pragma diag_suppress 3731
#endif
bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr)
{
MBED_BARRIER();
uint8_t currentValue;
do {
currentValue = __LDREXB(&flagPtr->_flag);
} while (__STREXB(true, &flagPtr->_flag));
MBED_BARRIER();
return currentValue;
}
bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue)
{
MBED_BARRIER();
do {
uint8_t currentValue = __LDREXB(ptr);
if (currentValue != *expectedCurrentValue) {
*expectedCurrentValue = currentValue;
__CLREX();
return false;
}
} while (__STREXB(desiredValue, ptr));
MBED_BARRIER();
return true;
}
bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue)
{
MBED_BARRIER();
do {
uint16_t currentValue = __LDREXH(ptr);
if (currentValue != *expectedCurrentValue) {
*expectedCurrentValue = currentValue;
__CLREX();
return false;
}
} while (__STREXH(desiredValue, ptr));
MBED_BARRIER();
return true;
}
bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue)
{
MBED_BARRIER();
do {
uint32_t currentValue = __LDREXW(ptr);
if (currentValue != *expectedCurrentValue) {
*expectedCurrentValue = currentValue;
__CLREX();
return false;
}
} while (__STREXW(desiredValue, ptr));
MBED_BARRIER();
return true;
}
uint8_t core_util_atomic_exchange_u8(volatile uint8_t *valuePtr, uint8_t desiredValue)
{
MBED_BARRIER();
uint8_t currentValue;
do {
currentValue = __LDREXB(valuePtr);
} while (__STREXB(desiredValue, valuePtr));
MBED_BARRIER();
return currentValue;
}
uint16_t core_util_atomic_exchange_u16(volatile uint16_t *valuePtr, uint16_t desiredValue)
{
MBED_BARRIER();
uint16_t currentValue;
do {
currentValue = __LDREXH(valuePtr);
} while (__STREXH(desiredValue, valuePtr));
MBED_BARRIER();
return currentValue;
}
uint32_t core_util_atomic_exchange_u32(volatile uint32_t *valuePtr, uint32_t desiredValue)
{
MBED_BARRIER();
uint32_t currentValue;
do {
currentValue = __LDREXW(valuePtr);
} while (__STREXW(desiredValue, valuePtr));
MBED_BARRIER();
return currentValue;
}
uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta)
{
MBED_BARRIER();
uint8_t newValue;
do {
newValue = __LDREXB(valuePtr) + delta;
} while (__STREXB(newValue, valuePtr));
MBED_BARRIER();
return newValue;
}
uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta)
{
MBED_BARRIER();
uint16_t newValue;
do {
newValue = __LDREXH(valuePtr) + delta;
} while (__STREXH(newValue, valuePtr));
MBED_BARRIER();
return newValue;
}
uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta)
{
MBED_BARRIER();
uint32_t newValue;
do {
newValue = __LDREXW(valuePtr) + delta;
} while (__STREXW(newValue, valuePtr));
MBED_BARRIER();
return newValue;
}
uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta)
{
MBED_BARRIER();
uint8_t newValue;
do {
newValue = __LDREXB(valuePtr) - delta;
} while (__STREXB(newValue, valuePtr));
MBED_BARRIER();
return newValue;
}
uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta)
{
MBED_BARRIER();
uint16_t newValue;
do {
newValue = __LDREXH(valuePtr) - delta;
} while (__STREXH(newValue, valuePtr));
MBED_BARRIER();
return newValue;
}
uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta)
{
MBED_BARRIER();
uint32_t newValue;
do {
newValue = __LDREXW(valuePtr) - delta;
} while (__STREXW(newValue, valuePtr));
MBED_BARRIER();
return newValue;
}
#else
bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr)
{
core_util_critical_section_enter();
uint8_t currentValue = flagPtr->_flag;
flagPtr->_flag = true;
core_util_critical_section_exit();
return currentValue;
}
bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue)
{
bool success;
uint8_t currentValue;
core_util_critical_section_enter();
currentValue = *ptr;
if (currentValue == *expectedCurrentValue) {
*ptr = desiredValue;
success = true;
} else {
*expectedCurrentValue = currentValue;
success = false;
}
core_util_critical_section_exit();
return success;
}
bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue)
{
bool success;
uint16_t currentValue;
core_util_critical_section_enter();
currentValue = *ptr;
if (currentValue == *expectedCurrentValue) {
*ptr = desiredValue;
success = true;
} else {
*expectedCurrentValue = currentValue;
success = false;
}
core_util_critical_section_exit();
return success;
}
bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue)
{
bool success;
uint32_t currentValue;
core_util_critical_section_enter();
currentValue = *ptr;
if (currentValue == *expectedCurrentValue) {
*ptr = desiredValue;
success = true;
} else {
*expectedCurrentValue = currentValue;
success = false;
}
core_util_critical_section_exit();
return success;
}
uint8_t core_util_atomic_exchange_u8(volatile uint8_t *ptr, uint8_t desiredValue)
{
core_util_critical_section_enter();
uint8_t currentValue = *ptr;
*ptr = desiredValue;
core_util_critical_section_exit();
return currentValue;
}
uint16_t core_util_atomic_exchange_u16(volatile uint16_t *ptr, uint16_t desiredValue)
{
core_util_critical_section_enter();
uint16_t currentValue = *ptr;
*ptr = desiredValue;
core_util_critical_section_exit();
return currentValue;
}
uint32_t core_util_atomic_exchange_u32(volatile uint32_t *ptr, uint32_t desiredValue)
{
core_util_critical_section_enter();
uint32_t currentValue = *ptr;
*ptr = desiredValue;
core_util_critical_section_exit();
return currentValue;
}
uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta)
{
uint8_t newValue;
core_util_critical_section_enter();
newValue = *valuePtr + delta;
*valuePtr = newValue;
core_util_critical_section_exit();
return newValue;
}
uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta)
{
uint16_t newValue;
core_util_critical_section_enter();
newValue = *valuePtr + delta;
*valuePtr = newValue;
core_util_critical_section_exit();
return newValue;
}
uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta)
{
uint32_t newValue;
core_util_critical_section_enter();
newValue = *valuePtr + delta;
*valuePtr = newValue;
core_util_critical_section_exit();
return newValue;
}
uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta)
{
uint8_t newValue;
core_util_critical_section_enter();
newValue = *valuePtr - delta;
*valuePtr = newValue;
core_util_critical_section_exit();
return newValue;
}
uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta)
{
uint16_t newValue;
core_util_critical_section_enter();
newValue = *valuePtr - delta;
*valuePtr = newValue;
core_util_critical_section_exit();
return newValue;
}
uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta)
{
uint32_t newValue;
core_util_critical_section_enter();
newValue = *valuePtr - delta;
*valuePtr = newValue;
core_util_critical_section_exit();
return newValue;
}
#endif
/* No architecture we support has LDREXD/STREXD, so must always disable IRQs for 64-bit operations */
uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr)
{
core_util_critical_section_enter();
uint64_t currentValue = *valuePtr;
core_util_critical_section_exit();
return currentValue;
}
void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue)
{
core_util_critical_section_enter();
*valuePtr = desiredValue;
core_util_critical_section_exit();
}
uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue)
{
core_util_critical_section_enter();
uint64_t currentValue = *valuePtr;
*valuePtr = desiredValue;
core_util_critical_section_exit();
return currentValue;
}
bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue)
{
bool success;
uint64_t currentValue;
core_util_critical_section_enter();
currentValue = *ptr;
if (currentValue == *expectedCurrentValue) {
*ptr = desiredValue;
success = true;
} else {
*expectedCurrentValue = currentValue;
success = false;
}
core_util_critical_section_exit();
return success;
}
uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta)
{
uint64_t newValue;
core_util_critical_section_enter();
newValue = *valuePtr + delta;
*valuePtr = newValue;
core_util_critical_section_exit();
return newValue;
}
uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta)
{
uint64_t newValue;
core_util_critical_section_enter();
newValue = *valuePtr - delta;
*valuePtr = newValue;
core_util_critical_section_exit();
return newValue;
}
MBED_STATIC_ASSERT(sizeof(void *) == sizeof(uint32_t), "Alas, pointers must be 32-bit");
bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue)
{
return core_util_atomic_cas_u32(
(volatile uint32_t *)ptr,
(uint32_t *)expectedCurrentValue,
(uint32_t)desiredValue);
}
void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue)
{
return (void *)core_util_atomic_exchange_u32((volatile uint32_t *)valuePtr, (uint32_t)desiredValue);
}
void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta)
{
return (void *)core_util_atomic_incr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta);
}
void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta)
{
return (void *)core_util_atomic_decr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta);
}

View File

@ -20,9 +20,6 @@
#define __MBED_UTIL_CRITICAL_H__
#include <stdbool.h>
#include <stdint.h>
#include <stddef.h>
#include "mbed_toolchain.h"
#ifdef __cplusplus
extern "C" {
@ -92,658 +89,11 @@ bool core_util_in_critical_section(void);
/**@}*/
/**
* \defgroup platform_atomic atomic functions
*
* Atomic functions function analogously to C11 and C++11 - loads have
* acquire semantics, stores have release semantics, and atomic operations
* are sequentially consistent. Atomicity is enforced both between threads and
* interrupt handlers.
*
* @{
*/
/**
* A lock-free, primitive atomic flag.
*
* Emulate C11's atomic_flag. The flag is initially in an indeterminate state
* unless explicitly initialized with CORE_UTIL_ATOMIC_FLAG_INIT.
*/
typedef struct core_util_atomic_flag {
uint8_t _flag;
} core_util_atomic_flag;
/**
* Initializer for a core_util_atomic_flag.
*
* Example:
* ~~~
* core_util_atomic_flag in_progress = CORE_UTIL_ATOMIC_FLAG_INIT;
* ~~~
*/
#define CORE_UTIL_ATOMIC_FLAG_INIT { 0 }
/**
* Atomic test and set.
*
* Atomically tests then sets the flag to true, returning the previous value.
*
* @param flagPtr Target flag being tested and set.
* @return The previous value.
*/
bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr);
/**
* Atomic clear.
*
* @param flagPtr Target flag being cleared.
*/
MBED_FORCEINLINE void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr)
{
MBED_BARRIER();
flagPtr->_flag = false;
MBED_BARRIER();
}
/**
* Atomic compare and set. It compares the contents of a memory location to a
* given value and, only if they are the same, modifies the contents of that
* memory location to a given new value. This is done as a single atomic
* operation. The atomicity guarantees that the new value is calculated based on
* up-to-date information; if the value had been updated by another thread in
* the meantime, the write would fail due to a mismatched expectedCurrentValue.
*
* Refer to https://en.wikipedia.org/wiki/Compare-and-set [which may redirect
* you to the article on compare-and swap].
*
* @param ptr The target memory location.
* @param[in,out] expectedCurrentValue A pointer to some location holding the
* expected current value of the data being set atomically.
* The computed 'desiredValue' should be a function of this current value.
* @note: This is an in-out parameter. In the
* failure case of atomic_cas (where the
* destination isn't set), the pointee of expectedCurrentValue is
* updated with the current value.
* @param[in] desiredValue The new value computed based on '*expectedCurrentValue'.
*
* @return true if the memory location was atomically
* updated with the desired value (after verifying
* that it contained the expectedCurrentValue),
* false otherwise. In the failure case,
* exepctedCurrentValue is updated with the new
* value of the target memory location.
*
* pseudocode:
* function cas(p : pointer to int, old : pointer to int, new : int) returns bool {
* if *p != *old {
* *old = *p
* return false
* }
* *p = new
* return true
* }
*
* @note: In the failure case (where the destination isn't set), the value
* pointed to by expectedCurrentValue is instead updated with the current value.
* This property helps writing concise code for the following incr:
*
* function incr(p : pointer to int, a : int) returns int {
* done = false
* value = *p // This fetch operation need not be atomic.
* while not done {
* done = atomic_cas(p, &value, value + a) // *value gets updated automatically until success
* }
* return value + a
* }
*
* @note: This corresponds to the C11 "atomic_compare_exchange_strong" - it
* always succeeds if the current value is expected, as per the pseudocode
* above; it will not spuriously fail as "atomic_compare_exchange_weak" may.
*/
bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue);
/** \copydoc core_util_atomic_cas_u8 */
bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue);
/** \copydoc core_util_atomic_cas_u8 */
bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue);
/** \copydoc core_util_atomic_cas_u8 */
bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue);
/** \copydoc core_util_atomic_cas_u8 */
MBED_FORCEINLINE int8_t core_util_atomic_cas_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue)
{
return (int8_t)core_util_atomic_cas_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, (uint8_t)desiredValue);
}
/** \copydoc core_util_atomic_cas_u8 */
MBED_FORCEINLINE int16_t core_util_atomic_cas_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue)
{
return (int16_t)core_util_atomic_cas_u16((volatile uint16_t *)ptr, (uint16_t *)expectedCurrentValue, (uint16_t)desiredValue);
}
/** \copydoc core_util_atomic_cas_u8 */
MBED_FORCEINLINE int32_t core_util_atomic_cas_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue)
{
return (int32_t)core_util_atomic_cas_u32((volatile uint32_t *)ptr, (uint32_t *)expectedCurrentValue, (uint32_t)desiredValue);
}
/** \copydoc core_util_atomic_cas_u8 */
MBED_FORCEINLINE int64_t core_util_atomic_cas_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue)
{
return (int64_t)core_util_atomic_cas_u64((volatile uint64_t *)ptr, (uint64_t *)expectedCurrentValue, (uint64_t)desiredValue);
}
/** \copydoc core_util_atomic_cas_u8 */
MBED_FORCEINLINE bool core_util_atomic_cas_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue)
{
return (bool)core_util_atomic_cas_u8((volatile uint8_t *)ptr, (uint8_t *)expectedCurrentValue, desiredValue);
}
/** \copydoc core_util_atomic_cas_u8 */
bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue);
/**
* Atomic load.
* @param valuePtr Target memory location.
* @return The loaded value.
*/
MBED_FORCEINLINE uint8_t core_util_atomic_load_u8(const volatile uint8_t *valuePtr)
{
uint8_t value = *valuePtr;
MBED_BARRIER();
return value;
}
/**
* Atomic load.
* @param valuePtr Target memory location.
* @return The loaded value.
*/
MBED_FORCEINLINE uint16_t core_util_atomic_load_u16(const volatile uint16_t *valuePtr)
{
uint16_t value = *valuePtr;
MBED_BARRIER();
return value;
}
/**
* Atomic load.
* @param valuePtr Target memory location.
* @return The loaded value.
*/
MBED_FORCEINLINE uint32_t core_util_atomic_load_u32(const volatile uint32_t *valuePtr)
{
uint32_t value = *valuePtr;
MBED_BARRIER();
return value;
}
/**
* Atomic load.
* @param valuePtr Target memory location.
* @return The loaded value.
*/
uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr);
/**
* Atomic load.
* @param valuePtr Target memory location.
* @return The loaded value.
*/
MBED_FORCEINLINE int8_t core_util_atomic_load_s8(const volatile int8_t *valuePtr)
{
int8_t value = *valuePtr;
MBED_BARRIER();
return value;
}
/**
* Atomic load.
* @param valuePtr Target memory location.
* @return The loaded value.
*/
MBED_FORCEINLINE int16_t core_util_atomic_load_s16(const volatile int16_t *valuePtr)
{
int16_t value = *valuePtr;
MBED_BARRIER();
return value;
}
/**
* Atomic load.
* @param valuePtr Target memory location.
* @return The loaded value.
*/
MBED_FORCEINLINE int32_t core_util_atomic_load_s32(const volatile int32_t *valuePtr)
{
int32_t value = *valuePtr;
MBED_BARRIER();
return value;
}
/**
* Atomic load.
* @param valuePtr Target memory location.
* @return The loaded value.
*/
MBED_FORCEINLINE int64_t core_util_atomic_load_s64(const volatile int64_t *valuePtr)
{
return (int64_t)core_util_atomic_load_u64((const volatile uint64_t *)valuePtr);
}
/**
* Atomic load.
* @param valuePtr Target memory location.
* @return The loaded value.
*/
MBED_FORCEINLINE bool core_util_atomic_load_bool(const volatile bool *valuePtr)
{
bool value = *valuePtr;
MBED_BARRIER();
return value;
}
/**
* Atomic load.
* @param valuePtr Target memory location.
* @return The loaded value.
*/
MBED_FORCEINLINE void *core_util_atomic_load_ptr(void *const volatile *valuePtr)
{
void *value = *valuePtr;
MBED_BARRIER();
return value;
}
/**
* Atomic store.
* @param valuePtr Target memory location.
* @param desiredValue The value to store.
*/
MBED_FORCEINLINE void core_util_atomic_store_u8(volatile uint8_t *valuePtr, uint8_t desiredValue)
{
MBED_BARRIER();
*valuePtr = desiredValue;
MBED_BARRIER();
}
/**
* Atomic store.
* @param valuePtr Target memory location.
* @param desiredValue The value to store.
*/
MBED_FORCEINLINE void core_util_atomic_store_u16(volatile uint16_t *valuePtr, uint16_t desiredValue)
{
MBED_BARRIER();
*valuePtr = desiredValue;
MBED_BARRIER();
}
/**
* Atomic store.
* @param valuePtr Target memory location.
* @param desiredValue The value to store.
*/
MBED_FORCEINLINE void core_util_atomic_store_u32(volatile uint32_t *valuePtr, uint32_t desiredValue)
{
MBED_BARRIER();
*valuePtr = desiredValue;
MBED_BARRIER();
}
/**
* Atomic store.
* @param valuePtr Target memory location.
* @param desiredValue The value to store.
*/
void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue);
/**
* Atomic store.
* @param valuePtr Target memory location.
* @param desiredValue The value to store.
*/
MBED_FORCEINLINE void core_util_atomic_store_s8(volatile int8_t *valuePtr, int8_t desiredValue)
{
MBED_BARRIER();
*valuePtr = desiredValue;
MBED_BARRIER();
}
/**
* Atomic store.
* @param valuePtr Target memory location.
* @param desiredValue The value to store.
*/
MBED_FORCEINLINE void core_util_atomic_store_s16(volatile int16_t *valuePtr, int16_t desiredValue)
{
MBED_BARRIER();
*valuePtr = desiredValue;
MBED_BARRIER();
}
/**
* Atomic store.
* @param valuePtr Target memory location.
* @param desiredValue The value to store.
*/
MBED_FORCEINLINE void core_util_atomic_store_s32(volatile int32_t *valuePtr, int32_t desiredValue)
{
MBED_BARRIER();
*valuePtr = desiredValue;
MBED_BARRIER();
}
/**
* Atomic store.
* @param valuePtr Target memory location.
* @param desiredValue The value to store.
*/
MBED_FORCEINLINE void core_util_atomic_store_s64(volatile int64_t *valuePtr, int64_t desiredValue)
{
core_util_atomic_store_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue);
}
/**
* Atomic store.
* @param valuePtr Target memory location.
* @param desiredValue The value to store.
*/
MBED_FORCEINLINE void core_util_atomic_store_bool(volatile bool *valuePtr, bool desiredValue)
{
MBED_BARRIER();
*valuePtr = desiredValue;
MBED_BARRIER();
}
/**
* Atomic store.
* @param valuePtr Target memory location.
* @param desiredValue The value to store.
*/
MBED_FORCEINLINE void core_util_atomic_store_ptr(void *volatile *valuePtr, void *desiredValue)
{
MBED_BARRIER();
*valuePtr = desiredValue;
MBED_BARRIER();
}
/**
* Atomic exchange.
* @param valuePtr Target memory location.
* @param desiredValue The value to store.
* @return The previous value.
*/
uint8_t core_util_atomic_exchange_u8(volatile uint8_t *valuePtr, uint8_t desiredValue);
/**
* Atomic exchange.
* @param valuePtr Target memory location.
* @param desiredValue The value to store.
* @return The previous value.
*/
uint16_t core_util_atomic_exchange_u16(volatile uint16_t *valuePtr, uint16_t desiredValue);
/**
* Atomic exchange.
* @param valuePtr Target memory location.
* @param desiredValue The value to store.
* @return The previous value.
*/
uint32_t core_util_atomic_exchange_u32(volatile uint32_t *valuePtr, uint32_t desiredValue);
/**
* Atomic exchange.
* @param valuePtr Target memory location.
* @param desiredValue The value to store.
* @return The previous value.
*/
uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue);
/**
* Atomic exchange.
* @param valuePtr Target memory location.
* @param desiredValue The value to store.
* @return The previous value.
*/
MBED_FORCEINLINE int8_t core_util_atomic_exchange_s8(volatile int8_t *valuePtr, int8_t desiredValue)
{
return (int8_t)core_util_atomic_exchange_u8((volatile uint8_t *)valuePtr, (uint8_t)desiredValue);
}
/**
* Atomic exchange.
* @param valuePtr Target memory location.
* @param desiredValue The value to store.
* @return The previous value.
*/
MBED_FORCEINLINE int16_t core_util_atomic_exchange_s16(volatile int16_t *valuePtr, int16_t desiredValue)
{
return (int16_t)core_util_atomic_exchange_u16((volatile uint16_t *)valuePtr, (uint16_t)desiredValue);
}
/**
* Atomic exchange.
* @param valuePtr Target memory location.
* @param desiredValue The value to store.
* @return The previous value.
*/
MBED_FORCEINLINE int32_t core_util_atomic_exchange_s32(volatile int32_t *valuePtr, int32_t desiredValue)
{
return (int32_t)core_util_atomic_exchange_u32((volatile uint32_t *)valuePtr, (uint32_t)desiredValue);
}
/**
* Atomic exchange.
* @param valuePtr Target memory location.
* @param desiredValue The value to store.
* @return The previous value.
*/
MBED_FORCEINLINE int64_t core_util_atomic_exchange_s64(volatile int64_t *valuePtr, int64_t desiredValue)
{
return (int64_t)core_util_atomic_exchange_u64((volatile uint64_t *)valuePtr, (uint64_t)desiredValue);
}
/**
* Atomic exchange.
* @param valuePtr Target memory location.
* @param desiredValue The value to store.
* @return The previous value.
*/
MBED_FORCEINLINE bool core_util_atomic_exchange_bool(volatile bool *valuePtr, bool desiredValue)
{
return (bool)core_util_atomic_exchange_u8((volatile uint8_t *)valuePtr, desiredValue);
}
/**
* Atomic exchange.
* @param valuePtr Target memory location.
* @param desiredValue The value to store.
* @return The previous value.
*/
void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue);
/**
* Atomic increment.
* @param valuePtr Target memory location being incremented.
* @param delta The amount being incremented.
* @return The new incremented value.
*/
uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta);
/**
* Atomic increment.
* @param valuePtr Target memory location being incremented.
* @param delta The amount being incremented.
* @return The new incremented value.
*/
uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta);
/**
* Atomic increment.
* @param valuePtr Target memory location being incremented.
* @param delta The amount being incremented.
* @return The new incremented value.
*/
uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta);
/**
* Atomic increment.
* @param valuePtr Target memory location being incremented.
* @param delta The amount being incremented.
* @return The new incremented value.
*/
uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta);
/**
* Atomic increment.
* @param valuePtr Target memory location being incremented.
* @param delta The amount being incremented.
* @return The new incremented value.
*/
MBED_FORCEINLINE int8_t core_util_atomic_incr_s8(volatile int8_t *valuePtr, int8_t delta)
{
return (int8_t)core_util_atomic_incr_u8((volatile uint8_t *)valuePtr, (uint8_t)delta);
}
/**
* Atomic increment.
* @param valuePtr Target memory location being incremented.
* @param delta The amount being incremented.
* @return The new incremented value.
*/
MBED_FORCEINLINE int16_t core_util_atomic_incr_s16(volatile int16_t *valuePtr, int16_t delta)
{
return (int16_t)core_util_atomic_incr_u16((volatile uint16_t *)valuePtr, (uint16_t)delta);
}
/**
* Atomic increment.
* @param valuePtr Target memory location being incremented.
* @param delta The amount being incremented.
* @return The new incremented value.
*/
MBED_FORCEINLINE int32_t core_util_atomic_incr_s32(volatile int32_t *valuePtr, int32_t delta)
{
return (int32_t)core_util_atomic_incr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta);
}
/**
* Atomic increment.
* @param valuePtr Target memory location being incremented.
* @param delta The amount being incremented.
* @return The new incremented value.
*/
MBED_FORCEINLINE int64_t core_util_atomic_incr_s64(volatile int64_t *valuePtr, int64_t delta)
{
return (int64_t)core_util_atomic_incr_u64((volatile uint64_t *)valuePtr, (uint64_t)delta);
}
/**
* Atomic increment.
* @param valuePtr Target memory location being incremented.
* @param delta The amount being incremented in bytes.
* @return The new incremented value.
*
* @note The type of the pointer argument is not taken into account
* and the pointer is incremented by bytes.
*/
void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta);
/**
* Atomic decrement.
* @param valuePtr Target memory location being decremented.
* @param delta The amount being decremented.
* @return The new decremented value.
*/
uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta);
/**
* Atomic decrement.
* @param valuePtr Target memory location being decremented.
* @param delta The amount being decremented.
* @return The new decremented value.
*/
uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta);
/**
* Atomic decrement.
* @param valuePtr Target memory location being decremented.
* @param delta The amount being decremented.
* @return The new decremented value.
*/
uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta);
/**
* Atomic decrement.
* @param valuePtr Target memory location being decremented.
* @param delta The amount being decremented.
* @return The new decremented value.
*/
uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta);
/**
* Atomic decrement.
* @param valuePtr Target memory location being decremented.
* @param delta The amount being decremented.
* @return The new decremented value.
*/
MBED_FORCEINLINE int8_t core_util_atomic_decr_s8(volatile int8_t *valuePtr, int8_t delta)
{
return (int8_t)core_util_atomic_decr_u8((volatile uint8_t *)valuePtr, (uint8_t)delta);
}
/**
* Atomic decrement.
* @param valuePtr Target memory location being decremented.
* @param delta The amount being decremented.
* @return The new decremented value.
*/
MBED_FORCEINLINE int16_t core_util_atomic_decr_s16(volatile int16_t *valuePtr, int16_t delta)
{
return (int16_t)core_util_atomic_decr_u16((volatile uint16_t *)valuePtr, (uint16_t)delta);
}
/**
* Atomic decrement.
* @param valuePtr Target memory location being decremented.
* @param delta The amount being decremented.
* @return The new decremented value.
*/
MBED_FORCEINLINE int32_t core_util_atomic_decr_s32(volatile int32_t *valuePtr, int32_t delta)
{
return (int32_t)core_util_atomic_decr_u32((volatile uint32_t *)valuePtr, (uint32_t)delta);
}
/**
* Atomic decrement.
* @param valuePtr Target memory location being decremented.
* @param delta The amount being decremented.
* @return The new decremented value.
*/
MBED_FORCEINLINE int64_t core_util_atomic_decr_s64(volatile int64_t *valuePtr, int64_t delta)
{
return (int64_t)core_util_atomic_decr_u64((volatile uint64_t *)valuePtr, (uint64_t)delta);
}
/**
* Atomic decrement.
* @param valuePtr Target memory location being decremented.
* @param delta The amount being decremented in bytes.
* @return The new decremented value.
*
* @note The type of the pointer argument is not taken into account
* and the pointer is decremented by bytes
*/
void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta);
/**@}*/
#ifdef __cplusplus
} // extern "C"
#endif
/**@}*/
/**@}*/
#endif // __MBED_UTIL_CRITICAL_H__

View File

@ -20,6 +20,7 @@
#include "device.h"
#include "platform/mbed_crash_data_offsets.h"
#include "platform/mbed_retarget.h"
#include "platform/mbed_atomic.h"
#include "platform/mbed_critical.h"
#include "platform/mbed_error.h"
#include "platform/mbed_error_hist.h"

View File

@ -27,6 +27,7 @@
#include "platform/PlatformMutex.h"
#include "platform/mbed_error.h"
#include "platform/mbed_stats.h"
#include "platform/mbed_atomic.h"
#include "platform/mbed_critical.h"
#include "platform/mbed_poll.h"
#include "platform/PlatformMutex.h"

View File

@ -17,6 +17,7 @@
#include "mbed_power_mgmt.h"
#include "mbed_interface.h"
#include "mbed_atomic.h"
#include "mbed_critical.h"
#include "mbed_assert.h"
#include "mbed_error.h"

View File

@ -50,6 +50,7 @@
#include "nrf_drv_gpiote.h"
#include "PeripheralPins.h"
#include "platform/mbed_atomic.h"
#include "platform/mbed_critical.h"
#if UART0_ENABLED == 0

View File

@ -17,6 +17,7 @@
#include "cmsis.h"
#include "mbed_assert.h"
#include "mbed_atomic.h"
#include "mbed_critical.h"
#include "mbed_error.h"
#include <limits.h>

View File

@ -17,6 +17,7 @@
#include "cmsis.h"
#include "mbed_assert.h"
#include "mbed_atomic.h"
#include "mbed_critical.h"
#include "mbed_error.h"
#include <limits.h>

View File

@ -17,6 +17,7 @@
#include "cmsis.h"
#include "mbed_assert.h"
#include "mbed_atomic.h"
#include "mbed_critical.h"
#include "mbed_error.h"
#include <limits.h>

View File

@ -15,6 +15,7 @@
*/
#include "flash_api.h"
#include "mbed_toolchain.h"
#include "mbed_critical.h"
#if DEVICE_FLASH

View File

@ -24,7 +24,7 @@
#include "cmsis.h"
#include "trng_api.h"
#include "mbed_error.h"
#include "mbed_critical.h"
#include "mbed_atomic.h"
#if defined (TARGET_STM32WB)
/* Family specific include for WB with HW semaphores */
#include "hw.h"

View File

@ -18,6 +18,7 @@
#ifndef USBDEVICE_H
#define USBDEVICE_H
#include <stddef.h>
#include "USBDevice_Types.h"
#include "USBPhy.h"
#include "mbed_critical.h"