Merge pull request #10366 from kjbracey-arm/feature_CMSIS_5_ca812421

Update CMSIS to 5.5.1
pull/10385/head
Cruz Monrreal 2019-04-11 20:34:00 -05:00 committed by GitHub
commit 582edf503a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
51 changed files with 8867 additions and 1544 deletions

View File

@ -1,11 +1,11 @@
/**************************************************************************//** /**************************************************************************//**
* @file cmsis_armclang.h * @file cmsis_armclang.h
* @brief CMSIS compiler specific macros, functions, instructions * @brief CMSIS compiler specific macros, functions, instructions
* @version V1.0.2 * @version V1.1.0
* @date 10. January 2018 * @date 18. March 2019
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved. * Copyright (c) 2009-2019 Arm Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -214,7 +214,23 @@ __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
\param [in] value Value to count the leading zeros \param [in] value Value to count the leading zeros
\return number of leading zeros in value \return number of leading zeros in value
*/ */
#define __CLZ (uint8_t)__builtin_clz __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value)
{
/* Even though __builtin_clz produces a CLZ instruction on ARM, formally
__builtin_clz(0) is undefined behaviour, so handle this case specially.
This guarantees ARM-compatible results if happening to compile on a non-ARM
target, and ensures the compiler doesn't decide to activate any
optimisations using the logic "value was passed to __builtin_clz, so it
is non-zero".
ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a
single CLZ instruction.
*/
if (value == 0U)
{
return 32U;
}
return __builtin_clz(value);
}
/** /**
\brief LDR Exclusive (8 bit) \brief LDR Exclusive (8 bit)
@ -375,8 +391,8 @@ __STATIC_FORCEINLINE uint32_t __get_SP_usr()
"MRS %0, cpsr \n" "MRS %0, cpsr \n"
"CPS #0x1F \n" // no effect in USR mode "CPS #0x1F \n" // no effect in USR mode
"MOV %1, sp \n" "MOV %1, sp \n"
"MSR cpsr_c, %2 \n" // no effect in USR mode "MSR cpsr_c, %0 \n" // no effect in USR mode
"ISB" : "=r"(cpsr), "=r"(result) : "r"(cpsr) : "memory" "ISB" : "=r"(cpsr), "=r"(result) : : "memory"
); );
return result; return result;
} }
@ -391,8 +407,8 @@ __STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack)
"MRS %0, cpsr \n" "MRS %0, cpsr \n"
"CPS #0x1F \n" // no effect in USR mode "CPS #0x1F \n" // no effect in USR mode
"MOV sp, %1 \n" "MOV sp, %1 \n"
"MSR cpsr_c, %2 \n" // no effect in USR mode "MSR cpsr_c, %0 \n" // no effect in USR mode
"ISB" : "=r"(cpsr) : "r" (topOfProcStack), "r"(cpsr) : "memory" "ISB" : "=r"(cpsr) : "r" (topOfProcStack) : "memory"
); );
} }

View File

@ -1,8 +1,8 @@
/**************************************************************************//** /**************************************************************************//**
* @file cmsis_gcc.h * @file cmsis_gcc.h
* @brief CMSIS compiler specific macros, functions, instructions * @brief CMSIS compiler specific macros, functions, instructions
* @version V1.0.2 * @version V1.1.0
* @date 09. April 2018 * @date 20. December 2018
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved. * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
@ -171,7 +171,7 @@ __STATIC_FORCEINLINE uint32_t __REV(uint32_t value)
#else #else
uint32_t result; uint32_t result;
__ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); __ASM volatile ("rev %0, %1" : "=r" (result) : "r" (value) );
return result; return result;
#endif #endif
} }
@ -204,7 +204,7 @@ __STATIC_FORCEINLINE int16_t __REVSH(int16_t value)
#else #else
int16_t result; int16_t result;
__ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); __ASM volatile ("revsh %0, %1" : "=r" (result) : "r" (value) );
return result; return result;
#endif #endif
} }
@ -267,7 +267,23 @@ __STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value)
\param [in] value Value to count the leading zeros \param [in] value Value to count the leading zeros
\return number of leading zeros in value \return number of leading zeros in value
*/ */
#define __CLZ (uint8_t)__builtin_clz __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value)
{
/* Even though __builtin_clz produces a CLZ instruction on ARM, formally
__builtin_clz(0) is undefined behaviour, so handle this case specially.
This guarantees ARM-compatible results if happening to compile on a non-ARM
target, and ensures the compiler doesn't decide to activate any
optimisations using the logic "value was passed to __builtin_clz, so it
is non-zero".
ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a
single CLZ instruction.
*/
if (value == 0U)
{
return 32U;
}
return __builtin_clz(value);
}
/** /**
\brief LDR Exclusive (8 bit) \brief LDR Exclusive (8 bit)

View File

@ -1,8 +1,8 @@
/**************************************************************************//** /**************************************************************************//**
* @file cmsis_iccarm.h * @file cmsis_iccarm.h
* @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file * @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file
* @version V5.0.6 * @version V5.0.7
* @date 02. March 2018 * @date 04. Semptember 2018
******************************************************************************/ ******************************************************************************/
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
@ -109,7 +109,12 @@
#endif #endif
#ifndef __RESTRICT #ifndef __RESTRICT
#define __RESTRICT restrict #if __ICCARM_V8
#define __RESTRICT __restrict
#else
/* Needs IAR language extensions */
#define __RESTRICT restrict
#endif
#endif #endif
#ifndef __STATIC_INLINE #ifndef __STATIC_INLINE

View File

@ -1,11 +1,11 @@
/**************************************************************************//** /**************************************************************************//**
* @file core_ca.h * @file core_ca.h
* @brief CMSIS Cortex-A Core Peripheral Access Layer Header File * @brief CMSIS Cortex-A Core Peripheral Access Layer Header File
* @version V1.0.1 * @version V1.0.2
* @date 07. May 2018 * @date 12. November 2018
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (c) 2009-2017 ARM Limited. All rights reserved. * Copyright (c) 2009-2018 ARM Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -28,13 +28,12 @@
#pragma clang system_header /* treat file as system include file */ #pragma clang system_header /* treat file as system include file */
#endif #endif
#ifdef __cplusplus
extern "C" {
#endif
#ifndef __CORE_CA_H_GENERIC #ifndef __CORE_CA_H_GENERIC
#define __CORE_CA_H_GENERIC #define __CORE_CA_H_GENERIC
#ifdef __cplusplus
extern "C" {
#endif
/******************************************************************************* /*******************************************************************************
* CMSIS definitions * CMSIS definitions

View File

@ -1,8 +1,8 @@
/**************************************************************************//** /**************************************************************************//**
* @file cmsis_armcc.h * @file cmsis_armcc.h
* @brief CMSIS compiler ARMCC (Arm Compiler 5) header file * @brief CMSIS compiler ARMCC (Arm Compiler 5) header file
* @version V5.0.4 * @version V5.0.5
* @date 10. January 2018 * @date 14. December 2018
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved. * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
@ -47,6 +47,10 @@
/* __ARM_ARCH_8M_BASE__ not applicable */ /* __ARM_ARCH_8M_BASE__ not applicable */
/* __ARM_ARCH_8M_MAIN__ not applicable */ /* __ARM_ARCH_8M_MAIN__ not applicable */
/* CMSIS compiler control DSP macros */
#if ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) )
#define __ARM_FEATURE_DSP 1
#endif
/* CMSIS compiler specific defines */ /* CMSIS compiler specific defines */
#ifndef __ASM #ifndef __ASM

View File

@ -1,11 +1,11 @@
/**************************************************************************//** /**************************************************************************//**
* @file cmsis_armclang.h * @file cmsis_armclang.h
* @brief CMSIS compiler armclang (Arm Compiler 6) header file * @brief CMSIS compiler armclang (Arm Compiler 6) header file
* @version V5.0.4 * @version V5.1.0
* @date 10. January 2018 * @date 14. March 2019
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved. * Copyright (c) 2009-2019 Arm Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -43,9 +43,9 @@
#ifndef __STATIC_INLINE #ifndef __STATIC_INLINE
#define __STATIC_INLINE static __inline #define __STATIC_INLINE static __inline
#endif #endif
#ifndef __STATIC_FORCEINLINE #ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline #define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline
#endif #endif
#ifndef __NO_RETURN #ifndef __NO_RETURN
#define __NO_RETURN __attribute__((__noreturn__)) #define __NO_RETURN __attribute__((__noreturn__))
#endif #endif
@ -781,9 +781,11 @@ __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit)
* Otherwise, use general registers, specified by constraint "r" */ * Otherwise, use general registers, specified by constraint "r" */
#if defined (__thumb__) && !defined (__thumb2__) #if defined (__thumb__) && !defined (__thumb2__)
#define __CMSIS_GCC_OUT_REG(r) "=l" (r) #define __CMSIS_GCC_OUT_REG(r) "=l" (r)
#define __CMSIS_GCC_RW_REG(r) "+l" (r)
#define __CMSIS_GCC_USE_REG(r) "l" (r) #define __CMSIS_GCC_USE_REG(r) "l" (r)
#else #else
#define __CMSIS_GCC_OUT_REG(r) "=r" (r) #define __CMSIS_GCC_OUT_REG(r) "=r" (r)
#define __CMSIS_GCC_RW_REG(r) "+r" (r)
#define __CMSIS_GCC_USE_REG(r) "r" (r) #define __CMSIS_GCC_USE_REG(r) "r" (r)
#endif #endif
@ -821,14 +823,14 @@ __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit)
so that all instructions following the ISB are fetched from cache or memory, so that all instructions following the ISB are fetched from cache or memory,
after the instruction has been completed. after the instruction has been completed.
*/ */
#define __ISB() __builtin_arm_isb(0xF); #define __ISB() __builtin_arm_isb(0xF)
/** /**
\brief Data Synchronization Barrier \brief Data Synchronization Barrier
\details Acts as a special kind of Data Memory Barrier. \details Acts as a special kind of Data Memory Barrier.
It completes when all explicit memory accesses before this instruction complete. It completes when all explicit memory accesses before this instruction complete.
*/ */
#define __DSB() __builtin_arm_dsb(0xF); #define __DSB() __builtin_arm_dsb(0xF)
/** /**
@ -836,7 +838,7 @@ __STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit)
\details Ensures the apparent order of the explicit memory operations before \details Ensures the apparent order of the explicit memory operations before
and after the instruction, without ensuring their completion. and after the instruction, without ensuring their completion.
*/ */
#define __DMB() __builtin_arm_dmb(0xF); #define __DMB() __builtin_arm_dmb(0xF)
/** /**
@ -908,7 +910,23 @@ __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
\param [in] value Value to count the leading zeros \param [in] value Value to count the leading zeros
\return number of leading zeros in value \return number of leading zeros in value
*/ */
#define __CLZ (uint8_t)__builtin_clz __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value)
{
/* Even though __builtin_clz produces a CLZ instruction on ARM, formally
__builtin_clz(0) is undefined behaviour, so handle this case specially.
This guarantees ARM-compatible results if happening to compile on a non-ARM
target, and ensures the compiler doesn't decide to activate any
optimisations using the logic "value was passed to __builtin_clz, so it
is non-zero".
ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a
single CLZ instruction.
*/
if (value == 0U)
{
return 32U;
}
return __builtin_clz(value);
}
#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
@ -1321,532 +1339,65 @@ __STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr)
#if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) #if (defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1))
__STATIC_FORCEINLINE uint32_t __SADD8(uint32_t op1, uint32_t op2) #define __SADD8 __builtin_arm_sadd8
{ #define __QADD8 __builtin_arm_qadd8
uint32_t result; #define __SHADD8 __builtin_arm_shadd8
#define __UADD8 __builtin_arm_uadd8
__ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); #define __UQADD8 __builtin_arm_uqadd8
return(result); #define __UHADD8 __builtin_arm_uhadd8
} #define __SSUB8 __builtin_arm_ssub8
#define __QSUB8 __builtin_arm_qsub8
__STATIC_FORCEINLINE uint32_t __QADD8(uint32_t op1, uint32_t op2) #define __SHSUB8 __builtin_arm_shsub8
{ #define __USUB8 __builtin_arm_usub8
uint32_t result; #define __UQSUB8 __builtin_arm_uqsub8
#define __UHSUB8 __builtin_arm_uhsub8
__ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); #define __SADD16 __builtin_arm_sadd16
return(result); #define __QADD16 __builtin_arm_qadd16
} #define __SHADD16 __builtin_arm_shadd16
#define __UADD16 __builtin_arm_uadd16
__STATIC_FORCEINLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2) #define __UQADD16 __builtin_arm_uqadd16
{ #define __UHADD16 __builtin_arm_uhadd16
uint32_t result; #define __SSUB16 __builtin_arm_ssub16
#define __QSUB16 __builtin_arm_qsub16
__ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); #define __SHSUB16 __builtin_arm_shsub16
return(result); #define __USUB16 __builtin_arm_usub16
} #define __UQSUB16 __builtin_arm_uqsub16
#define __UHSUB16 __builtin_arm_uhsub16
__STATIC_FORCEINLINE uint32_t __UADD8(uint32_t op1, uint32_t op2) #define __SASX __builtin_arm_sasx
{ #define __QASX __builtin_arm_qasx
uint32_t result; #define __SHASX __builtin_arm_shasx
#define __UASX __builtin_arm_uasx
__ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); #define __UQASX __builtin_arm_uqasx
return(result); #define __UHASX __builtin_arm_uhasx
} #define __SSAX __builtin_arm_ssax
#define __QSAX __builtin_arm_qsax
__STATIC_FORCEINLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2) #define __SHSAX __builtin_arm_shsax
{ #define __USAX __builtin_arm_usax
uint32_t result; #define __UQSAX __builtin_arm_uqsax
#define __UHSAX __builtin_arm_uhsax
__ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); #define __USAD8 __builtin_arm_usad8
return(result); #define __USADA8 __builtin_arm_usada8
} #define __SSAT16 __builtin_arm_ssat16
#define __USAT16 __builtin_arm_usat16
__STATIC_FORCEINLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2) #define __UXTB16 __builtin_arm_uxtb16
{ #define __UXTAB16 __builtin_arm_uxtab16
uint32_t result; #define __SXTB16 __builtin_arm_sxtb16
#define __SXTAB16 __builtin_arm_sxtab16
__ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); #define __SMUAD __builtin_arm_smuad
return(result); #define __SMUADX __builtin_arm_smuadx
} #define __SMLAD __builtin_arm_smlad
#define __SMLADX __builtin_arm_smladx
#define __SMLALD __builtin_arm_smlald
__STATIC_FORCEINLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2) #define __SMLALDX __builtin_arm_smlaldx
{ #define __SMUSD __builtin_arm_smusd
uint32_t result; #define __SMUSDX __builtin_arm_smusdx
#define __SMLSD __builtin_arm_smlsd
__ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) ); #define __SMLSDX __builtin_arm_smlsdx
return(result); #define __SMLSLD __builtin_arm_smlsld
} #define __SMLSLDX __builtin_arm_smlsldx
#define __SEL __builtin_arm_sel
__STATIC_FORCEINLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2) #define __QADD __builtin_arm_qadd
{ #define __QSUB __builtin_arm_qsub
uint32_t result;
__ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
{
uint32_t result;
__ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
return(result);
}
#define __SSAT16(ARG1,ARG2) \
({ \
int32_t __RES, __ARG1 = (ARG1); \
__ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
__RES; \
})
#define __USAT16(ARG1,ARG2) \
({ \
uint32_t __RES, __ARG1 = (ARG1); \
__ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
__RES; \
})
__STATIC_FORCEINLINE uint32_t __UXTB16(uint32_t op1)
{
uint32_t result;
__ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
return(result);
}
__STATIC_FORCEINLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1)
{
uint32_t result;
__ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
return(result);
}
__STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
{
uint32_t result;
__ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
{
uint32_t result;
__ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
return(result);
}
__STATIC_FORCEINLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
{
union llreg_u{
uint32_t w32[2];
uint64_t w64;
} llr;
llr.w64 = acc;
#ifndef __ARMEB__ /* Little endian */
__ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
#else /* Big endian */
__ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
#endif
return(llr.w64);
}
__STATIC_FORCEINLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
{
union llreg_u{
uint32_t w32[2];
uint64_t w64;
} llr;
llr.w64 = acc;
#ifndef __ARMEB__ /* Little endian */
__ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
#else /* Big endian */
__ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
#endif
return(llr.w64);
}
__STATIC_FORCEINLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
{
uint32_t result;
__ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
return(result);
}
__STATIC_FORCEINLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
{
uint32_t result;
__ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
return(result);
}
__STATIC_FORCEINLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
{
union llreg_u{
uint32_t w32[2];
uint64_t w64;
} llr;
llr.w64 = acc;
#ifndef __ARMEB__ /* Little endian */
__ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
#else /* Big endian */
__ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
#endif
return(llr.w64);
}
__STATIC_FORCEINLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
{
union llreg_u{
uint32_t w32[2];
uint64_t w64;
} llr;
llr.w64 = acc;
#ifndef __ARMEB__ /* Little endian */
__ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
#else /* Big endian */
__ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
#endif
return(llr.w64);
}
__STATIC_FORCEINLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
{
uint32_t result;
__ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE int32_t __QADD( int32_t op1, int32_t op2)
{
int32_t result;
__ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
__STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2)
{
int32_t result;
__ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
return(result);
}
#if 0
#define __PKHBT(ARG1,ARG2,ARG3) \
({ \
uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
__ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
__RES; \
})
#define __PKHTB(ARG1,ARG2,ARG3) \
({ \
uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
if (ARG3 == 0) \
__ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
else \
__ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
__RES; \
})
#endif
#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,8 @@
/**************************************************************************//** /**************************************************************************//**
* @file cmsis_compiler.h * @file cmsis_compiler.h
* @brief CMSIS compiler generic header file * @brief CMSIS compiler generic header file
* @version V5.0.4 * @version V5.1.0
* @date 10. January 2018 * @date 09. October 2018
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved. * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
@ -35,9 +35,15 @@
/* /*
* Arm Compiler 6 (armclang) * Arm Compiler 6.6 LTM (armclang)
*/ */
#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) #elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) && (__ARMCC_VERSION < 6100100)
#include "cmsis_armclang_ltm.h"
/*
* Arm Compiler above 6.10.1 (armclang)
*/
#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6100100)
#include "cmsis_armclang.h" #include "cmsis_armclang.h"
@ -115,8 +121,7 @@
#define __ALIGNED(x) __attribute__((aligned(x))) #define __ALIGNED(x) __attribute__((aligned(x)))
#endif #endif
#ifndef __RESTRICT #ifndef __RESTRICT
#warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored. #define __RESTRICT __restrict
#define __RESTRICT
#endif #endif

View File

@ -1,8 +1,8 @@
/**************************************************************************//** /**************************************************************************//**
* @file cmsis_gcc.h * @file cmsis_gcc.h
* @brief CMSIS compiler GCC header file * @brief CMSIS compiler GCC header file
* @version V5.0.4 * @version V5.1.0
* @date 09. April 2018 * @date 20. December 2018
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved. * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
@ -1008,7 +1008,23 @@ __STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value)
\param [in] value Value to count the leading zeros \param [in] value Value to count the leading zeros
\return number of leading zeros in value \return number of leading zeros in value
*/ */
#define __CLZ (uint8_t)__builtin_clz __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value)
{
/* Even though __builtin_clz produces a CLZ instruction on ARM, formally
__builtin_clz(0) is undefined behaviour, so handle this case specially.
This guarantees ARM-compatible results if happening to compile on a non-ARM
target, and ensures the compiler doesn't decide to activate any
optimisations using the logic "value was passed to __builtin_clz, so it
is non-zero".
ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a
single CLZ instruction.
*/
if (value == 0U)
{
return 32U;
}
return __builtin_clz(value);
}
#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \

View File

@ -1,8 +1,8 @@
/**************************************************************************//** /**************************************************************************//**
* @file cmsis_iccarm.h * @file cmsis_iccarm.h
* @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file * @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file
* @version V5.0.7 * @version V5.0.8
* @date 19. June 2018 * @date 04. September 2018
******************************************************************************/ ******************************************************************************/
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
@ -150,7 +150,12 @@
#endif #endif
#ifndef __RESTRICT #ifndef __RESTRICT
#define __RESTRICT restrict #if __ICCARM_V8
#define __RESTRICT __restrict
#else
/* Needs IAR language extensions */
#define __RESTRICT restrict
#endif
#endif #endif
#ifndef __STATIC_INLINE #ifndef __STATIC_INLINE

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,8 @@
/**************************************************************************//** /**************************************************************************//**
* @file core_armv8mbl.h * @file core_armv8mbl.h
* @brief CMSIS Armv8-M Baseline Core Peripheral Access Layer Header File * @brief CMSIS Armv8-M Baseline Core Peripheral Access Layer Header File
* @version V5.0.7 * @version V5.0.8
* @date 22. June 2018 * @date 12. November 2018
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved. * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
@ -1223,7 +1223,7 @@ typedef struct
#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ #define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */
#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ #define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */
#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ #define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */
#define EXC_RETURN_SPSEL (0x00000002UL) /* bit [1] stack pointer used to restore context: 0=MSP 1=PSP */ #define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */
#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ #define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */
/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ /* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */

View File

@ -1,8 +1,8 @@
/**************************************************************************//** /**************************************************************************//**
* @file core_armv8mml.h * @file core_armv8mml.h
* @brief CMSIS Armv8-M Mainline Core Peripheral Access Layer Header File * @brief CMSIS Armv8-M Mainline Core Peripheral Access Layer Header File
* @version V5.0.7 * @version V5.1.0
* @date 06. July 2018 * @date 12. September 2018
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved. * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
@ -538,14 +538,6 @@ typedef struct
__OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */
__OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */
__OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */
uint32_t RESERVED7[6U];
__IOM uint32_t ITCMCR; /*!< Offset: 0x290 (R/W) Instruction Tightly-Coupled Memory Control Register */
__IOM uint32_t DTCMCR; /*!< Offset: 0x294 (R/W) Data Tightly-Coupled Memory Control Registers */
__IOM uint32_t AHBPCR; /*!< Offset: 0x298 (R/W) AHBP Control Register */
__IOM uint32_t CACR; /*!< Offset: 0x29C (R/W) L1 Cache Control Register */
__IOM uint32_t AHBSCR; /*!< Offset: 0x2A0 (R/W) AHB Slave Control Register */
uint32_t RESERVED8[1U];
__IOM uint32_t ABFSR; /*!< Offset: 0x2A8 (R/W) Auxiliary Bus Fault Status Register */
} SCB_Type; } SCB_Type;
/* SCB CPUID Register Definitions */ /* SCB CPUID Register Definitions */
@ -921,78 +913,6 @@ typedef struct
#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ #define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */
#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ #define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */
/* Instruction Tightly-Coupled Memory Control Register Definitions */
#define SCB_ITCMCR_SZ_Pos 3U /*!< SCB ITCMCR: SZ Position */
#define SCB_ITCMCR_SZ_Msk (0xFUL << SCB_ITCMCR_SZ_Pos) /*!< SCB ITCMCR: SZ Mask */
#define SCB_ITCMCR_RETEN_Pos 2U /*!< SCB ITCMCR: RETEN Position */
#define SCB_ITCMCR_RETEN_Msk (1UL << SCB_ITCMCR_RETEN_Pos) /*!< SCB ITCMCR: RETEN Mask */
#define SCB_ITCMCR_RMW_Pos 1U /*!< SCB ITCMCR: RMW Position */
#define SCB_ITCMCR_RMW_Msk (1UL << SCB_ITCMCR_RMW_Pos) /*!< SCB ITCMCR: RMW Mask */
#define SCB_ITCMCR_EN_Pos 0U /*!< SCB ITCMCR: EN Position */
#define SCB_ITCMCR_EN_Msk (1UL /*<< SCB_ITCMCR_EN_Pos*/) /*!< SCB ITCMCR: EN Mask */
/* Data Tightly-Coupled Memory Control Register Definitions */
#define SCB_DTCMCR_SZ_Pos 3U /*!< SCB DTCMCR: SZ Position */
#define SCB_DTCMCR_SZ_Msk (0xFUL << SCB_DTCMCR_SZ_Pos) /*!< SCB DTCMCR: SZ Mask */
#define SCB_DTCMCR_RETEN_Pos 2U /*!< SCB DTCMCR: RETEN Position */
#define SCB_DTCMCR_RETEN_Msk (1UL << SCB_DTCMCR_RETEN_Pos) /*!< SCB DTCMCR: RETEN Mask */
#define SCB_DTCMCR_RMW_Pos 1U /*!< SCB DTCMCR: RMW Position */
#define SCB_DTCMCR_RMW_Msk (1UL << SCB_DTCMCR_RMW_Pos) /*!< SCB DTCMCR: RMW Mask */
#define SCB_DTCMCR_EN_Pos 0U /*!< SCB DTCMCR: EN Position */
#define SCB_DTCMCR_EN_Msk (1UL /*<< SCB_DTCMCR_EN_Pos*/) /*!< SCB DTCMCR: EN Mask */
/* AHBP Control Register Definitions */
#define SCB_AHBPCR_SZ_Pos 1U /*!< SCB AHBPCR: SZ Position */
#define SCB_AHBPCR_SZ_Msk (7UL << SCB_AHBPCR_SZ_Pos) /*!< SCB AHBPCR: SZ Mask */
#define SCB_AHBPCR_EN_Pos 0U /*!< SCB AHBPCR: EN Position */
#define SCB_AHBPCR_EN_Msk (1UL /*<< SCB_AHBPCR_EN_Pos*/) /*!< SCB AHBPCR: EN Mask */
/* L1 Cache Control Register Definitions */
#define SCB_CACR_FORCEWT_Pos 2U /*!< SCB CACR: FORCEWT Position */
#define SCB_CACR_FORCEWT_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: FORCEWT Mask */
#define SCB_CACR_ECCEN_Pos 1U /*!< SCB CACR: ECCEN Position */
#define SCB_CACR_ECCEN_Msk (1UL << SCB_CACR_ECCEN_Pos) /*!< SCB CACR: ECCEN Mask */
#define SCB_CACR_SIWT_Pos 0U /*!< SCB CACR: SIWT Position */
#define SCB_CACR_SIWT_Msk (1UL /*<< SCB_CACR_SIWT_Pos*/) /*!< SCB CACR: SIWT Mask */
/* AHBS Control Register Definitions */
#define SCB_AHBSCR_INITCOUNT_Pos 11U /*!< SCB AHBSCR: INITCOUNT Position */
#define SCB_AHBSCR_INITCOUNT_Msk (0x1FUL << SCB_AHBPCR_INITCOUNT_Pos) /*!< SCB AHBSCR: INITCOUNT Mask */
#define SCB_AHBSCR_TPRI_Pos 2U /*!< SCB AHBSCR: TPRI Position */
#define SCB_AHBSCR_TPRI_Msk (0x1FFUL << SCB_AHBPCR_TPRI_Pos) /*!< SCB AHBSCR: TPRI Mask */
#define SCB_AHBSCR_CTL_Pos 0U /*!< SCB AHBSCR: CTL Position*/
#define SCB_AHBSCR_CTL_Msk (3UL /*<< SCB_AHBPCR_CTL_Pos*/) /*!< SCB AHBSCR: CTL Mask */
/* Auxiliary Bus Fault Status Register Definitions */
#define SCB_ABFSR_AXIMTYPE_Pos 8U /*!< SCB ABFSR: AXIMTYPE Position*/
#define SCB_ABFSR_AXIMTYPE_Msk (3UL << SCB_ABFSR_AXIMTYPE_Pos) /*!< SCB ABFSR: AXIMTYPE Mask */
#define SCB_ABFSR_EPPB_Pos 4U /*!< SCB ABFSR: EPPB Position*/
#define SCB_ABFSR_EPPB_Msk (1UL << SCB_ABFSR_EPPB_Pos) /*!< SCB ABFSR: EPPB Mask */
#define SCB_ABFSR_AXIM_Pos 3U /*!< SCB ABFSR: AXIM Position*/
#define SCB_ABFSR_AXIM_Msk (1UL << SCB_ABFSR_AXIM_Pos) /*!< SCB ABFSR: AXIM Mask */
#define SCB_ABFSR_AHBP_Pos 2U /*!< SCB ABFSR: AHBP Position*/
#define SCB_ABFSR_AHBP_Msk (1UL << SCB_ABFSR_AHBP_Pos) /*!< SCB ABFSR: AHBP Mask */
#define SCB_ABFSR_DTCM_Pos 1U /*!< SCB ABFSR: DTCM Position*/
#define SCB_ABFSR_DTCM_Msk (1UL << SCB_ABFSR_DTCM_Pos) /*!< SCB ABFSR: DTCM Mask */
#define SCB_ABFSR_ITCM_Pos 0U /*!< SCB ABFSR: ITCM Position*/
#define SCB_ABFSR_ITCM_Msk (1UL /*<< SCB_ABFSR_ITCM_Pos*/) /*!< SCB ABFSR: ITCM Mask */
/*@} end of group CMSIS_SCB */ /*@} end of group CMSIS_SCB */
@ -1097,10 +1017,7 @@ typedef struct
__IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */
uint32_t RESERVED2[15U]; uint32_t RESERVED2[15U];
__IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */
uint32_t RESERVED3[29U]; uint32_t RESERVED3[32U];
__OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */
__IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */
__IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */
uint32_t RESERVED4[43U]; uint32_t RESERVED4[43U];
__OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */
__IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */
@ -1163,18 +1080,6 @@ typedef struct
#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ #define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */
#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ #define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */
/* ITM Integration Write Register Definitions */
#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */
#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */
/* ITM Integration Read Register Definitions */
#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */
#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */
/* ITM Integration Mode Control Register Definitions */
#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */
#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */
/* ITM Lock Status Register Definitions */ /* ITM Lock Status Register Definitions */
#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ #define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */
#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ #define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */
@ -2093,7 +1998,7 @@ typedef struct
#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ #define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */
#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ #define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */
#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ #define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */
#define EXC_RETURN_SPSEL (0x00000002UL) /* bit [1] stack pointer used to restore context: 0=MSP 1=PSP */ #define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */
#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ #define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */
/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ /* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */
@ -2122,7 +2027,7 @@ __STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup)
reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */
reg_value = (reg_value | reg_value = (reg_value |
((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) |
(PriorityGroupTmp << 8U) ); /* Insert write key and priorty group */ (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */
SCB->AIRCR = reg_value; SCB->AIRCR = reg_value;
} }
@ -2496,7 +2401,7 @@ __STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup)
reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */
reg_value = (reg_value | reg_value = (reg_value |
((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) |
(PriorityGroupTmp << 8U) ); /* Insert write key and priorty group */ (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */
SCB_NS->AIRCR = reg_value; SCB_NS->AIRCR = reg_value;
} }

View File

@ -1,11 +1,11 @@
/**************************************************************************//** /**************************************************************************//**
* @file core_cm0.h * @file core_cm0.h
* @brief CMSIS Cortex-M0 Core Peripheral Access Layer Header File * @brief CMSIS Cortex-M0 Core Peripheral Access Layer Header File
* @version V5.0.5 * @version V5.0.6
* @date 28. May 2018 * @date 13. March 2019
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved. * Copyright (c) 2009-2019 Arm Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -316,7 +316,7 @@ typedef struct
__IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */
uint32_t RESERVED0[31U]; uint32_t RESERVED0[31U];
__IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */
uint32_t RSERVED1[31U]; uint32_t RESERVED1[31U];
__IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */
uint32_t RESERVED2[31U]; uint32_t RESERVED2[31U];
__IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */
@ -829,8 +829,8 @@ __STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGr
*/ */
__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector)
{ {
uint32_t *vectors = (uint32_t *)0x0U; uint32_t vectors = 0x0U;
vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; (* (int *) (vectors + ((int32_t)IRQn + NVIC_USER_IRQ_OFFSET) * 4)) = vector;
} }
@ -844,8 +844,8 @@ __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector)
*/ */
__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) __STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn)
{ {
uint32_t *vectors = (uint32_t *)0x0U; uint32_t vectors = 0x0U;
return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; return (uint32_t)(* (int *) (vectors + ((int32_t)IRQn + NVIC_USER_IRQ_OFFSET) * 4));
} }

View File

@ -1,8 +1,8 @@
/**************************************************************************//** /**************************************************************************//**
* @file core_cm0plus.h * @file core_cm0plus.h
* @brief CMSIS Cortex-M0+ Core Peripheral Access Layer Header File * @brief CMSIS Cortex-M0+ Core Peripheral Access Layer Header File
* @version V5.0.6 * @version V5.0.7
* @date 28. May 2018 * @date 13. March 2019
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved. * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
@ -330,7 +330,7 @@ typedef struct
__IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ __IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */
uint32_t RESERVED0[31U]; uint32_t RESERVED0[31U];
__IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ __IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */
uint32_t RSERVED1[31U]; uint32_t RESERVED1[31U];
__IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ __IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */
uint32_t RESERVED2[31U]; uint32_t RESERVED2[31U];
__IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ __IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */
@ -948,11 +948,11 @@ __STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGr
__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector)
{ {
#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) #if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U)
uint32_t *vectors = (uint32_t *)SCB->VTOR; uint32_t vectors = SCB->VTOR;
#else #else
uint32_t *vectors = (uint32_t *)0x0U; uint32_t vectors = 0x0U;
#endif #endif
vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; (* (int *) (vectors + ((int32_t)IRQn + NVIC_USER_IRQ_OFFSET) * 4)) = vector;
} }
@ -967,12 +967,11 @@ __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector)
__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) __STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn)
{ {
#if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U) #if defined (__VTOR_PRESENT) && (__VTOR_PRESENT == 1U)
uint32_t *vectors = (uint32_t *)SCB->VTOR; uint32_t vectors = SCB->VTOR;
#else #else
uint32_t *vectors = (uint32_t *)0x0U; uint32_t vectors = 0x0U;
#endif #endif
return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; return (uint32_t)(* (int *) (vectors + ((int32_t)IRQn + NVIC_USER_IRQ_OFFSET) * 4));
} }

View File

@ -1,8 +1,8 @@
/**************************************************************************//** /**************************************************************************//**
* @file core_cm1.h * @file core_cm1.h
* @brief CMSIS Cortex-M1 Core Peripheral Access Layer Header File * @brief CMSIS Cortex-M1 Core Peripheral Access Layer Header File
* @version V1.0.0 * @version V1.0.1
* @date 23. July 2018 * @date 12. November 2018
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved. * Copyright (c) 2009-2018 Arm Limited. All rights reserved.

View File

@ -1,8 +1,8 @@
/**************************************************************************//** /**************************************************************************//**
* @file core_cm23.h * @file core_cm23.h
* @brief CMSIS Cortex-M23 Core Peripheral Access Layer Header File * @brief CMSIS Cortex-M23 Core Peripheral Access Layer Header File
* @version V5.0.7 * @version V5.0.8
* @date 22. June 2018 * @date 12. November 2018
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved. * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
@ -1298,7 +1298,7 @@ typedef struct
#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ #define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */
#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ #define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */
#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ #define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */
#define EXC_RETURN_SPSEL (0x00000002UL) /* bit [1] stack pointer used to restore context: 0=MSP 1=PSP */ #define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */
#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ #define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */
/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ /* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */

View File

@ -1,11 +1,11 @@
/**************************************************************************//** /**************************************************************************//**
* @file core_cm3.h * @file core_cm3.h
* @brief CMSIS Cortex-M3 Core Peripheral Access Layer Header File * @brief CMSIS Cortex-M3 Core Peripheral Access Layer Header File
* @version V5.0.8 * @version V5.1.0
* @date 04. June 2018 * @date 13. March 2019
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved. * Copyright (c) 2009-2019 Arm Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -342,7 +342,7 @@ typedef struct
__IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */
uint32_t RESERVED0[24U]; uint32_t RESERVED0[24U];
__IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */
uint32_t RSERVED1[24U]; uint32_t RESERVED1[24U];
__IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */
uint32_t RESERVED2[24U]; uint32_t RESERVED2[24U];
__IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */
@ -669,6 +669,12 @@ typedef struct
/* Auxiliary Control Register Definitions */ /* Auxiliary Control Register Definitions */
#if defined (__CM3_REV) && (__CM3_REV >= 0x200U) #if defined (__CM3_REV) && (__CM3_REV >= 0x200U)
#define SCnSCB_ACTLR_DISOOFP_Pos 9U /*!< ACTLR: DISOOFP Position */
#define SCnSCB_ACTLR_DISOOFP_Msk (1UL << SCnSCB_ACTLR_DISOOFP_Pos) /*!< ACTLR: DISOOFP Mask */
#define SCnSCB_ACTLR_DISFPCA_Pos 8U /*!< ACTLR: DISFPCA Position */
#define SCnSCB_ACTLR_DISFPCA_Msk (1UL << SCnSCB_ACTLR_DISFPCA_Pos) /*!< ACTLR: DISFPCA Mask */
#define SCnSCB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ #define SCnSCB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */
#define SCnSCB_ACTLR_DISFOLD_Msk (1UL << SCnSCB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ #define SCnSCB_ACTLR_DISFOLD_Msk (1UL << SCnSCB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */
@ -758,10 +764,7 @@ typedef struct
__IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */
uint32_t RESERVED2[15U]; uint32_t RESERVED2[15U];
__IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */
uint32_t RESERVED3[29U]; uint32_t RESERVED3[32U];
__OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */
__IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */
__IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */
uint32_t RESERVED4[43U]; uint32_t RESERVED4[43U];
__OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */
__IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */
@ -812,18 +815,6 @@ typedef struct
#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ #define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */
#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ #define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */
/* ITM Integration Write Register Definitions */
#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */
#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */
/* ITM Integration Read Register Definitions */
#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */
#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */
/* ITM Integration Mode Control Register Definitions */
#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */
#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */
/* ITM Lock Status Register Definitions */ /* ITM Lock Status Register Definitions */
#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ #define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */
#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ #define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */
@ -1056,13 +1047,13 @@ typedef struct
/* TPI Integration ETM Data Register Definitions (FIFO0) */ /* TPI Integration ETM Data Register Definitions (FIFO0) */
#define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ #define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */
#define TPI_FIFO0_ITM_ATVALID_Msk (0x3UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ #define TPI_FIFO0_ITM_ATVALID_Msk (0x1UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */
#define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ #define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */
#define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ #define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */
#define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ #define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */
#define TPI_FIFO0_ETM_ATVALID_Msk (0x3UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ #define TPI_FIFO0_ETM_ATVALID_Msk (0x1UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */
#define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ #define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */
#define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ #define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */
@ -1085,13 +1076,13 @@ typedef struct
/* TPI Integration ITM Data Register Definitions (FIFO1) */ /* TPI Integration ITM Data Register Definitions (FIFO1) */
#define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ #define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */
#define TPI_FIFO1_ITM_ATVALID_Msk (0x3UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ #define TPI_FIFO1_ITM_ATVALID_Msk (0x1UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */
#define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ #define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */
#define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ #define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */
#define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ #define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */
#define TPI_FIFO1_ETM_ATVALID_Msk (0x3UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ #define TPI_FIFO1_ETM_ATVALID_Msk (0x1UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */
#define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ #define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */
#define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ #define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */
@ -1736,8 +1727,8 @@ __STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGr
*/ */
__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector)
{ {
uint32_t *vectors = (uint32_t *)SCB->VTOR; uint32_t vectors = (uint32_t )SCB->VTOR;
vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; (* (int *) (vectors + ((int32_t)IRQn + NVIC_USER_IRQ_OFFSET) * 4)) = vector;
} }
@ -1751,8 +1742,8 @@ __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector)
*/ */
__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) __STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn)
{ {
uint32_t *vectors = (uint32_t *)SCB->VTOR; uint32_t vectors = (uint32_t )SCB->VTOR;
return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; return (uint32_t)(* (int *) (vectors + ((int32_t)IRQn + NVIC_USER_IRQ_OFFSET) * 4));
} }
@ -1785,6 +1776,7 @@ __NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void)
#endif #endif
/* ########################## FPU functions #################################### */ /* ########################## FPU functions #################################### */
/** /**
\ingroup CMSIS_Core_FunctionInterface \ingroup CMSIS_Core_FunctionInterface

View File

@ -1,8 +1,8 @@
/**************************************************************************//** /**************************************************************************//**
* @file core_cm33.h * @file core_cm33.h
* @brief CMSIS Cortex-M33 Core Peripheral Access Layer Header File * @brief CMSIS Cortex-M33 Core Peripheral Access Layer Header File
* @version V5.0.9 * @version V5.1.0
* @date 06. July 2018 * @date 12. November 2018
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved. * Copyright (c) 2009-2018 Arm Limited. All rights reserved.
@ -538,14 +538,6 @@ typedef struct
__OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */
__OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */
__OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */
uint32_t RESERVED7[6U];
__IOM uint32_t ITCMCR; /*!< Offset: 0x290 (R/W) Instruction Tightly-Coupled Memory Control Register */
__IOM uint32_t DTCMCR; /*!< Offset: 0x294 (R/W) Data Tightly-Coupled Memory Control Registers */
__IOM uint32_t AHBPCR; /*!< Offset: 0x298 (R/W) AHBP Control Register */
__IOM uint32_t CACR; /*!< Offset: 0x29C (R/W) L1 Cache Control Register */
__IOM uint32_t AHBSCR; /*!< Offset: 0x2A0 (R/W) AHB Slave Control Register */
uint32_t RESERVED8[1U];
__IOM uint32_t ABFSR; /*!< Offset: 0x2A8 (R/W) Auxiliary Bus Fault Status Register */
} SCB_Type; } SCB_Type;
/* SCB CPUID Register Definitions */ /* SCB CPUID Register Definitions */
@ -921,78 +913,6 @@ typedef struct
#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ #define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */
#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ #define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */
/* Instruction Tightly-Coupled Memory Control Register Definitions */
#define SCB_ITCMCR_SZ_Pos 3U /*!< SCB ITCMCR: SZ Position */
#define SCB_ITCMCR_SZ_Msk (0xFUL << SCB_ITCMCR_SZ_Pos) /*!< SCB ITCMCR: SZ Mask */
#define SCB_ITCMCR_RETEN_Pos 2U /*!< SCB ITCMCR: RETEN Position */
#define SCB_ITCMCR_RETEN_Msk (1UL << SCB_ITCMCR_RETEN_Pos) /*!< SCB ITCMCR: RETEN Mask */
#define SCB_ITCMCR_RMW_Pos 1U /*!< SCB ITCMCR: RMW Position */
#define SCB_ITCMCR_RMW_Msk (1UL << SCB_ITCMCR_RMW_Pos) /*!< SCB ITCMCR: RMW Mask */
#define SCB_ITCMCR_EN_Pos 0U /*!< SCB ITCMCR: EN Position */
#define SCB_ITCMCR_EN_Msk (1UL /*<< SCB_ITCMCR_EN_Pos*/) /*!< SCB ITCMCR: EN Mask */
/* Data Tightly-Coupled Memory Control Register Definitions */
#define SCB_DTCMCR_SZ_Pos 3U /*!< SCB DTCMCR: SZ Position */
#define SCB_DTCMCR_SZ_Msk (0xFUL << SCB_DTCMCR_SZ_Pos) /*!< SCB DTCMCR: SZ Mask */
#define SCB_DTCMCR_RETEN_Pos 2U /*!< SCB DTCMCR: RETEN Position */
#define SCB_DTCMCR_RETEN_Msk (1UL << SCB_DTCMCR_RETEN_Pos) /*!< SCB DTCMCR: RETEN Mask */
#define SCB_DTCMCR_RMW_Pos 1U /*!< SCB DTCMCR: RMW Position */
#define SCB_DTCMCR_RMW_Msk (1UL << SCB_DTCMCR_RMW_Pos) /*!< SCB DTCMCR: RMW Mask */
#define SCB_DTCMCR_EN_Pos 0U /*!< SCB DTCMCR: EN Position */
#define SCB_DTCMCR_EN_Msk (1UL /*<< SCB_DTCMCR_EN_Pos*/) /*!< SCB DTCMCR: EN Mask */
/* AHBP Control Register Definitions */
#define SCB_AHBPCR_SZ_Pos 1U /*!< SCB AHBPCR: SZ Position */
#define SCB_AHBPCR_SZ_Msk (7UL << SCB_AHBPCR_SZ_Pos) /*!< SCB AHBPCR: SZ Mask */
#define SCB_AHBPCR_EN_Pos 0U /*!< SCB AHBPCR: EN Position */
#define SCB_AHBPCR_EN_Msk (1UL /*<< SCB_AHBPCR_EN_Pos*/) /*!< SCB AHBPCR: EN Mask */
/* L1 Cache Control Register Definitions */
#define SCB_CACR_FORCEWT_Pos 2U /*!< SCB CACR: FORCEWT Position */
#define SCB_CACR_FORCEWT_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: FORCEWT Mask */
#define SCB_CACR_ECCEN_Pos 1U /*!< SCB CACR: ECCEN Position */
#define SCB_CACR_ECCEN_Msk (1UL << SCB_CACR_ECCEN_Pos) /*!< SCB CACR: ECCEN Mask */
#define SCB_CACR_SIWT_Pos 0U /*!< SCB CACR: SIWT Position */
#define SCB_CACR_SIWT_Msk (1UL /*<< SCB_CACR_SIWT_Pos*/) /*!< SCB CACR: SIWT Mask */
/* AHBS Control Register Definitions */
#define SCB_AHBSCR_INITCOUNT_Pos 11U /*!< SCB AHBSCR: INITCOUNT Position */
#define SCB_AHBSCR_INITCOUNT_Msk (0x1FUL << SCB_AHBPCR_INITCOUNT_Pos) /*!< SCB AHBSCR: INITCOUNT Mask */
#define SCB_AHBSCR_TPRI_Pos 2U /*!< SCB AHBSCR: TPRI Position */
#define SCB_AHBSCR_TPRI_Msk (0x1FFUL << SCB_AHBPCR_TPRI_Pos) /*!< SCB AHBSCR: TPRI Mask */
#define SCB_AHBSCR_CTL_Pos 0U /*!< SCB AHBSCR: CTL Position*/
#define SCB_AHBSCR_CTL_Msk (3UL /*<< SCB_AHBPCR_CTL_Pos*/) /*!< SCB AHBSCR: CTL Mask */
/* Auxiliary Bus Fault Status Register Definitions */
#define SCB_ABFSR_AXIMTYPE_Pos 8U /*!< SCB ABFSR: AXIMTYPE Position*/
#define SCB_ABFSR_AXIMTYPE_Msk (3UL << SCB_ABFSR_AXIMTYPE_Pos) /*!< SCB ABFSR: AXIMTYPE Mask */
#define SCB_ABFSR_EPPB_Pos 4U /*!< SCB ABFSR: EPPB Position*/
#define SCB_ABFSR_EPPB_Msk (1UL << SCB_ABFSR_EPPB_Pos) /*!< SCB ABFSR: EPPB Mask */
#define SCB_ABFSR_AXIM_Pos 3U /*!< SCB ABFSR: AXIM Position*/
#define SCB_ABFSR_AXIM_Msk (1UL << SCB_ABFSR_AXIM_Pos) /*!< SCB ABFSR: AXIM Mask */
#define SCB_ABFSR_AHBP_Pos 2U /*!< SCB ABFSR: AHBP Position*/
#define SCB_ABFSR_AHBP_Msk (1UL << SCB_ABFSR_AHBP_Pos) /*!< SCB ABFSR: AHBP Mask */
#define SCB_ABFSR_DTCM_Pos 1U /*!< SCB ABFSR: DTCM Position*/
#define SCB_ABFSR_DTCM_Msk (1UL << SCB_ABFSR_DTCM_Pos) /*!< SCB ABFSR: DTCM Mask */
#define SCB_ABFSR_ITCM_Pos 0U /*!< SCB ABFSR: ITCM Position*/
#define SCB_ABFSR_ITCM_Msk (1UL /*<< SCB_ABFSR_ITCM_Pos*/) /*!< SCB ABFSR: ITCM Mask */
/*@} end of group CMSIS_SCB */ /*@} end of group CMSIS_SCB */
@ -1097,10 +1017,7 @@ typedef struct
__IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */
uint32_t RESERVED2[15U]; uint32_t RESERVED2[15U];
__IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */
uint32_t RESERVED3[29U]; uint32_t RESERVED3[32U];
__OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */
__IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */
__IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */
uint32_t RESERVED4[43U]; uint32_t RESERVED4[43U];
__OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */
__IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */
@ -1163,18 +1080,6 @@ typedef struct
#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ #define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */
#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ #define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */
/* ITM Integration Write Register Definitions */
#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */
#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */
/* ITM Integration Read Register Definitions */
#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */
#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */
/* ITM Integration Mode Control Register Definitions */
#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */
#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */
/* ITM Lock Status Register Definitions */ /* ITM Lock Status Register Definitions */
#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ #define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */
#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ #define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */
@ -2168,7 +2073,7 @@ typedef struct
#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ #define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */
#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ #define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */
#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ #define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */
#define EXC_RETURN_SPSEL (0x00000002UL) /* bit [1] stack pointer used to restore context: 0=MSP 1=PSP */ #define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */
#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ #define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */
/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ /* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */
@ -2197,7 +2102,7 @@ __STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup)
reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */
reg_value = (reg_value | reg_value = (reg_value |
((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) |
(PriorityGroupTmp << 8U) ); /* Insert write key and priority group */ (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */
SCB->AIRCR = reg_value; SCB->AIRCR = reg_value;
} }

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +1,11 @@
/**************************************************************************//** /**************************************************************************//**
* @file core_cm4.h * @file core_cm4.h
* @brief CMSIS Cortex-M4 Core Peripheral Access Layer Header File * @brief CMSIS Cortex-M4 Core Peripheral Access Layer Header File
* @version V5.0.8 * @version V5.1.0
* @date 04. June 2018 * @date 13. March 2019
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved. * Copyright (c) 2009-2019 Arm Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -408,7 +408,7 @@ typedef struct
__IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */
uint32_t RESERVED0[24U]; uint32_t RESERVED0[24U];
__IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */
uint32_t RSERVED1[24U]; uint32_t RESERVED1[24U];
__IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */
uint32_t RESERVED2[24U]; uint32_t RESERVED2[24U];
__IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */
@ -822,10 +822,7 @@ typedef struct
__IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */
uint32_t RESERVED2[15U]; uint32_t RESERVED2[15U];
__IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */
uint32_t RESERVED3[29U]; uint32_t RESERVED3[32U];
__OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */
__IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */
__IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */
uint32_t RESERVED4[43U]; uint32_t RESERVED4[43U];
__OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */
__IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */
@ -876,18 +873,6 @@ typedef struct
#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ #define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */
#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ #define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */
/* ITM Integration Write Register Definitions */
#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */
#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */
/* ITM Integration Read Register Definitions */
#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */
#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */
/* ITM Integration Mode Control Register Definitions */
#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */
#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */
/* ITM Lock Status Register Definitions */ /* ITM Lock Status Register Definitions */
#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ #define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */
#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ #define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */
@ -1120,13 +1105,13 @@ typedef struct
/* TPI Integration ETM Data Register Definitions (FIFO0) */ /* TPI Integration ETM Data Register Definitions (FIFO0) */
#define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ #define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */
#define TPI_FIFO0_ITM_ATVALID_Msk (0x3UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ #define TPI_FIFO0_ITM_ATVALID_Msk (0x1UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */
#define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ #define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */
#define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ #define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */
#define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ #define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */
#define TPI_FIFO0_ETM_ATVALID_Msk (0x3UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ #define TPI_FIFO0_ETM_ATVALID_Msk (0x1UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */
#define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ #define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */
#define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ #define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */
@ -1149,13 +1134,13 @@ typedef struct
/* TPI Integration ITM Data Register Definitions (FIFO1) */ /* TPI Integration ITM Data Register Definitions (FIFO1) */
#define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ #define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */
#define TPI_FIFO1_ITM_ATVALID_Msk (0x3UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ #define TPI_FIFO1_ITM_ATVALID_Msk (0x1UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */
#define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ #define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */
#define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ #define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */
#define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ #define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */
#define TPI_FIFO1_ETM_ATVALID_Msk (0x3UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ #define TPI_FIFO1_ETM_ATVALID_Msk (0x1UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */
#define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ #define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */
#define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ #define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */
@ -1324,6 +1309,7 @@ typedef struct
__IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */
__IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and FP Feature Register 0 */ __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and FP Feature Register 0 */
__IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and FP Feature Register 1 */ __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and FP Feature Register 1 */
__IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and FP Feature Register 2 */
} FPU_Type; } FPU_Type;
/* Floating-Point Context Control Register Definitions */ /* Floating-Point Context Control Register Definitions */
@ -1409,6 +1395,11 @@ typedef struct
#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ #define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */
#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ #define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */
/* Media and FP Feature Register 2 Definitions */
#define FPU_MVFR2_VFP_Misc_Pos 4U /*!< MVFR2: VFP Misc bits Position */
#define FPU_MVFR2_VFP_Misc_Msk (0xFUL << FPU_MVFR2_VFP_Misc_Pos) /*!< MVFR2: VFP Misc bits Mask */
/*@} end of group CMSIS_FPU */ /*@} end of group CMSIS_FPU */
@ -1625,7 +1616,7 @@ typedef struct
#ifdef CMSIS_VECTAB_VIRTUAL #ifdef CMSIS_VECTAB_VIRTUAL
#ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE
#define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h"
#endif #endif
#include CMSIS_VECTAB_VIRTUAL_HEADER_FILE #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE
#else #else
@ -1912,8 +1903,8 @@ __STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGr
*/ */
__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector)
{ {
uint32_t *vectors = (uint32_t *)SCB->VTOR; uint32_t vectors = (uint32_t )SCB->VTOR;
vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; (* (int *) (vectors + ((int32_t)IRQn + NVIC_USER_IRQ_OFFSET) * 4)) = vector;
} }
@ -1927,8 +1918,8 @@ __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector)
*/ */
__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) __STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn)
{ {
uint32_t *vectors = (uint32_t *)SCB->VTOR; uint32_t vectors = (uint32_t )SCB->VTOR;
return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; return (uint32_t)(* (int *) (vectors + ((int32_t)IRQn + NVIC_USER_IRQ_OFFSET) * 4));
} }
@ -1953,6 +1944,7 @@ __NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void)
/*@} end of CMSIS_Core_NVICFunctions */ /*@} end of CMSIS_Core_NVICFunctions */
/* ########################## MPU functions #################################### */ /* ########################## MPU functions #################################### */
#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U)

View File

@ -1,11 +1,11 @@
/**************************************************************************//** /**************************************************************************//**
* @file core_cm7.h * @file core_cm7.h
* @brief CMSIS Cortex-M7 Core Peripheral Access Layer Header File * @brief CMSIS Cortex-M7 Core Peripheral Access Layer Header File
* @version V5.0.8 * @version V5.1.0
* @date 04. June 2018 * @date 13. March 2019
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved. * Copyright (c) 2009-2019 Arm Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -423,7 +423,7 @@ typedef struct
__IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ __IOM uint32_t ISER[8U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */
uint32_t RESERVED0[24U]; uint32_t RESERVED0[24U];
__IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ __IOM uint32_t ICER[8U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */
uint32_t RSERVED1[24U]; uint32_t RESERVED1[24U];
__IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ __IOM uint32_t ISPR[8U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */
uint32_t RESERVED2[24U]; uint32_t RESERVED2[24U];
__IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ __IOM uint32_t ICPR[8U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */
@ -930,6 +930,24 @@ typedef struct
#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ #define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */
/* Auxiliary Control Register Definitions */ /* Auxiliary Control Register Definitions */
#define SCnSCB_ACTLR_DISDYNADD_Pos 26U /*!< ACTLR: DISDYNADD Position */
#define SCnSCB_ACTLR_DISDYNADD_Msk (1UL << SCnSCB_ACTLR_DISDYNADD_Pos) /*!< ACTLR: DISDYNADD Mask */
#define SCnSCB_ACTLR_DISISSCH1_Pos 21U /*!< ACTLR: DISISSCH1 Position */
#define SCnSCB_ACTLR_DISISSCH1_Msk (0x1FUL << SCnSCB_ACTLR_DISISSCH1_Pos) /*!< ACTLR: DISISSCH1 Mask */
#define SCnSCB_ACTLR_DISDI_Pos 16U /*!< ACTLR: DISDI Position */
#define SCnSCB_ACTLR_DISDI_Msk (0x1FUL << SCnSCB_ACTLR_DISDI_Pos) /*!< ACTLR: DISDI Mask */
#define SCnSCB_ACTLR_DISCRITAXIRUR_Pos 15U /*!< ACTLR: DISCRITAXIRUR Position */
#define SCnSCB_ACTLR_DISCRITAXIRUR_Msk (1UL << SCnSCB_ACTLR_DISCRITAXIRUR_Pos) /*!< ACTLR: DISCRITAXIRUR Mask */
#define SCnSCB_ACTLR_DISBTACALLOC_Pos 14U /*!< ACTLR: DISBTACALLOC Position */
#define SCnSCB_ACTLR_DISBTACALLOC_Msk (1UL << SCnSCB_ACTLR_DISBTACALLOC_Pos) /*!< ACTLR: DISBTACALLOC Mask */
#define SCnSCB_ACTLR_DISBTACREAD_Pos 13U /*!< ACTLR: DISBTACREAD Position */
#define SCnSCB_ACTLR_DISBTACREAD_Msk (1UL << SCnSCB_ACTLR_DISBTACREAD_Pos) /*!< ACTLR: DISBTACREAD Mask */
#define SCnSCB_ACTLR_DISITMATBFLUSH_Pos 12U /*!< ACTLR: DISITMATBFLUSH Position */ #define SCnSCB_ACTLR_DISITMATBFLUSH_Pos 12U /*!< ACTLR: DISITMATBFLUSH Position */
#define SCnSCB_ACTLR_DISITMATBFLUSH_Msk (1UL << SCnSCB_ACTLR_DISITMATBFLUSH_Pos) /*!< ACTLR: DISITMATBFLUSH Mask */ #define SCnSCB_ACTLR_DISITMATBFLUSH_Msk (1UL << SCnSCB_ACTLR_DISITMATBFLUSH_Pos) /*!< ACTLR: DISITMATBFLUSH Mask */
@ -1024,10 +1042,7 @@ typedef struct
__IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */
uint32_t RESERVED2[15U]; uint32_t RESERVED2[15U];
__IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */
uint32_t RESERVED3[29U]; uint32_t RESERVED3[32U];
__OM uint32_t IWR; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */
__IM uint32_t IRR; /*!< Offset: 0xEFC (R/ ) ITM Integration Read Register */
__IOM uint32_t IMCR; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */
uint32_t RESERVED4[43U]; uint32_t RESERVED4[43U];
__OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */
__IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */
@ -1078,18 +1093,6 @@ typedef struct
#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ #define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */
#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ #define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */
/* ITM Integration Write Register Definitions */
#define ITM_IWR_ATVALIDM_Pos 0U /*!< ITM IWR: ATVALIDM Position */
#define ITM_IWR_ATVALIDM_Msk (1UL /*<< ITM_IWR_ATVALIDM_Pos*/) /*!< ITM IWR: ATVALIDM Mask */
/* ITM Integration Read Register Definitions */
#define ITM_IRR_ATREADYM_Pos 0U /*!< ITM IRR: ATREADYM Position */
#define ITM_IRR_ATREADYM_Msk (1UL /*<< ITM_IRR_ATREADYM_Pos*/) /*!< ITM IRR: ATREADYM Mask */
/* ITM Integration Mode Control Register Definitions */
#define ITM_IMCR_INTEGRATION_Pos 0U /*!< ITM IMCR: INTEGRATION Position */
#define ITM_IMCR_INTEGRATION_Msk (1UL /*<< ITM_IMCR_INTEGRATION_Pos*/) /*!< ITM IMCR: INTEGRATION Mask */
/* ITM Lock Status Register Definitions */ /* ITM Lock Status Register Definitions */
#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ #define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */
#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ #define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */
@ -1325,13 +1328,13 @@ typedef struct
/* TPI Integration ETM Data Register Definitions (FIFO0) */ /* TPI Integration ETM Data Register Definitions (FIFO0) */
#define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */ #define TPI_FIFO0_ITM_ATVALID_Pos 29U /*!< TPI FIFO0: ITM_ATVALID Position */
#define TPI_FIFO0_ITM_ATVALID_Msk (0x3UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */ #define TPI_FIFO0_ITM_ATVALID_Msk (0x1UL << TPI_FIFO0_ITM_ATVALID_Pos) /*!< TPI FIFO0: ITM_ATVALID Mask */
#define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */ #define TPI_FIFO0_ITM_bytecount_Pos 27U /*!< TPI FIFO0: ITM_bytecount Position */
#define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */ #define TPI_FIFO0_ITM_bytecount_Msk (0x3UL << TPI_FIFO0_ITM_bytecount_Pos) /*!< TPI FIFO0: ITM_bytecount Mask */
#define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */ #define TPI_FIFO0_ETM_ATVALID_Pos 26U /*!< TPI FIFO0: ETM_ATVALID Position */
#define TPI_FIFO0_ETM_ATVALID_Msk (0x3UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */ #define TPI_FIFO0_ETM_ATVALID_Msk (0x1UL << TPI_FIFO0_ETM_ATVALID_Pos) /*!< TPI FIFO0: ETM_ATVALID Mask */
#define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */ #define TPI_FIFO0_ETM_bytecount_Pos 24U /*!< TPI FIFO0: ETM_bytecount Position */
#define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */ #define TPI_FIFO0_ETM_bytecount_Msk (0x3UL << TPI_FIFO0_ETM_bytecount_Pos) /*!< TPI FIFO0: ETM_bytecount Mask */
@ -1354,13 +1357,13 @@ typedef struct
/* TPI Integration ITM Data Register Definitions (FIFO1) */ /* TPI Integration ITM Data Register Definitions (FIFO1) */
#define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */ #define TPI_FIFO1_ITM_ATVALID_Pos 29U /*!< TPI FIFO1: ITM_ATVALID Position */
#define TPI_FIFO1_ITM_ATVALID_Msk (0x3UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */ #define TPI_FIFO1_ITM_ATVALID_Msk (0x1UL << TPI_FIFO1_ITM_ATVALID_Pos) /*!< TPI FIFO1: ITM_ATVALID Mask */
#define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */ #define TPI_FIFO1_ITM_bytecount_Pos 27U /*!< TPI FIFO1: ITM_bytecount Position */
#define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */ #define TPI_FIFO1_ITM_bytecount_Msk (0x3UL << TPI_FIFO1_ITM_bytecount_Pos) /*!< TPI FIFO1: ITM_bytecount Mask */
#define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */ #define TPI_FIFO1_ETM_ATVALID_Pos 26U /*!< TPI FIFO1: ETM_ATVALID Position */
#define TPI_FIFO1_ETM_ATVALID_Msk (0x3UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */ #define TPI_FIFO1_ETM_ATVALID_Msk (0x1UL << TPI_FIFO1_ETM_ATVALID_Pos) /*!< TPI FIFO1: ETM_ATVALID Mask */
#define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */ #define TPI_FIFO1_ETM_bytecount_Pos 24U /*!< TPI FIFO1: ETM_bytecount Position */
#define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */ #define TPI_FIFO1_ETM_bytecount_Msk (0x3UL << TPI_FIFO1_ETM_bytecount_Pos) /*!< TPI FIFO1: ETM_bytecount Mask */
@ -1617,6 +1620,9 @@ typedef struct
/* Media and FP Feature Register 2 Definitions */ /* Media and FP Feature Register 2 Definitions */
#define FPU_MVFR2_VFP_Misc_Pos 4U /*!< MVFR2: VFP Misc bits Position */
#define FPU_MVFR2_VFP_Misc_Msk (0xFUL << FPU_MVFR2_VFP_Misc_Pos) /*!< MVFR2: VFP Misc bits Mask */
/*@} end of group CMSIS_FPU */ /*@} end of group CMSIS_FPU */
@ -2120,8 +2126,8 @@ __STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGr
*/ */
__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector)
{ {
uint32_t *vectors = (uint32_t *)SCB->VTOR; uint32_t vectors = (uint32_t )SCB->VTOR;
vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; (* (int *) (vectors + ((int32_t)IRQn + NVIC_USER_IRQ_OFFSET) * 4)) = vector;
} }
@ -2135,8 +2141,8 @@ __STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector)
*/ */
__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) __STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn)
{ {
uint32_t *vectors = (uint32_t *)SCB->VTOR; uint32_t vectors = (uint32_t )SCB->VTOR;
return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; return (uint32_t)(* (int *) (vectors + ((int32_t)IRQn + NVIC_USER_IRQ_OFFSET) * 4));
} }
@ -2161,6 +2167,7 @@ __NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void)
/*@} end of CMSIS_Core_NVICFunctions */ /*@} end of CMSIS_Core_NVICFunctions */
/* ########################## MPU functions #################################### */ /* ########################## MPU functions #################################### */
#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U)
@ -2169,6 +2176,7 @@ __NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void)
#endif #endif
/* ########################## FPU functions #################################### */ /* ########################## FPU functions #################################### */
/** /**
\ingroup CMSIS_Core_FunctionInterface \ingroup CMSIS_Core_FunctionInterface
@ -2204,7 +2212,6 @@ __STATIC_INLINE uint32_t SCB_GetFPUType(void)
} }
} }
/*@} end of CMSIS_Core_FpuFunctions */ /*@} end of CMSIS_Core_FpuFunctions */
@ -2221,14 +2228,17 @@ __STATIC_INLINE uint32_t SCB_GetFPUType(void)
#define CCSIDR_WAYS(x) (((x) & SCB_CCSIDR_ASSOCIATIVITY_Msk) >> SCB_CCSIDR_ASSOCIATIVITY_Pos) #define CCSIDR_WAYS(x) (((x) & SCB_CCSIDR_ASSOCIATIVITY_Msk) >> SCB_CCSIDR_ASSOCIATIVITY_Pos)
#define CCSIDR_SETS(x) (((x) & SCB_CCSIDR_NUMSETS_Msk ) >> SCB_CCSIDR_NUMSETS_Pos ) #define CCSIDR_SETS(x) (((x) & SCB_CCSIDR_NUMSETS_Msk ) >> SCB_CCSIDR_NUMSETS_Pos )
#define __SCB_DCACHE_LINE_SIZE 32U /*!< Cortex-M7 cache line size is fixed to 32 bytes (8 words). See also register SCB_CCSIDR */
/** /**
\brief Enable I-Cache \brief Enable I-Cache
\details Turns on I-Cache \details Turns on I-Cache
*/ */
__STATIC_INLINE void SCB_EnableICache (void) __STATIC_FORCEINLINE void SCB_EnableICache (void)
{ {
#if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)
if (SCB->CCR & SCB_CCR_IC_Msk) return; /* return if ICache is already enabled */
__DSB(); __DSB();
__ISB(); __ISB();
SCB->ICIALLU = 0UL; /* invalidate I-Cache */ SCB->ICIALLU = 0UL; /* invalidate I-Cache */
@ -2245,7 +2255,7 @@ __STATIC_INLINE void SCB_EnableICache (void)
\brief Disable I-Cache \brief Disable I-Cache
\details Turns off I-Cache \details Turns off I-Cache
*/ */
__STATIC_INLINE void SCB_DisableICache (void) __STATIC_FORCEINLINE void SCB_DisableICache (void)
{ {
#if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)
__DSB(); __DSB();
@ -2262,7 +2272,7 @@ __STATIC_INLINE void SCB_DisableICache (void)
\brief Invalidate I-Cache \brief Invalidate I-Cache
\details Invalidates I-Cache \details Invalidates I-Cache
*/ */
__STATIC_INLINE void SCB_InvalidateICache (void) __STATIC_FORCEINLINE void SCB_InvalidateICache (void)
{ {
#if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)
__DSB(); __DSB();
@ -2278,14 +2288,16 @@ __STATIC_INLINE void SCB_InvalidateICache (void)
\brief Enable D-Cache \brief Enable D-Cache
\details Turns on D-Cache \details Turns on D-Cache
*/ */
__STATIC_INLINE void SCB_EnableDCache (void) __STATIC_FORCEINLINE void SCB_EnableDCache (void)
{ {
#if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
uint32_t ccsidr; uint32_t ccsidr;
uint32_t sets; uint32_t sets;
uint32_t ways; uint32_t ways;
SCB->CSSELR = 0U; /*(0U << 1U) | 0U;*/ /* Level 1 data cache */ if (SCB->CCR & SCB_CCR_DC_Msk) return; /* return if DCache is already enabled */
SCB->CSSELR = 0U; /* select Level 1 data cache */
__DSB(); __DSB();
ccsidr = SCB->CCSIDR; ccsidr = SCB->CCSIDR;
@ -2316,14 +2328,14 @@ __STATIC_INLINE void SCB_EnableDCache (void)
\brief Disable D-Cache \brief Disable D-Cache
\details Turns off D-Cache \details Turns off D-Cache
*/ */
__STATIC_INLINE void SCB_DisableDCache (void) __STATIC_FORCEINLINE void SCB_DisableDCache (void)
{ {
#if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
uint32_t ccsidr; uint32_t ccsidr;
uint32_t sets; uint32_t sets;
uint32_t ways; uint32_t ways;
SCB->CSSELR = 0U; /*(0U << 1U) | 0U;*/ /* Level 1 data cache */ SCB->CSSELR = 0U; /* select Level 1 data cache */
__DSB(); __DSB();
SCB->CCR &= ~(uint32_t)SCB_CCR_DC_Msk; /* disable D-Cache */ SCB->CCR &= ~(uint32_t)SCB_CCR_DC_Msk; /* disable D-Cache */
@ -2354,14 +2366,14 @@ __STATIC_INLINE void SCB_DisableDCache (void)
\brief Invalidate D-Cache \brief Invalidate D-Cache
\details Invalidates D-Cache \details Invalidates D-Cache
*/ */
__STATIC_INLINE void SCB_InvalidateDCache (void) __STATIC_FORCEINLINE void SCB_InvalidateDCache (void)
{ {
#if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
uint32_t ccsidr; uint32_t ccsidr;
uint32_t sets; uint32_t sets;
uint32_t ways; uint32_t ways;
SCB->CSSELR = 0U; /*(0U << 1U) | 0U;*/ /* Level 1 data cache */ SCB->CSSELR = 0U; /* select Level 1 data cache */
__DSB(); __DSB();
ccsidr = SCB->CCSIDR; ccsidr = SCB->CCSIDR;
@ -2389,15 +2401,15 @@ __STATIC_INLINE void SCB_InvalidateDCache (void)
\brief Clean D-Cache \brief Clean D-Cache
\details Cleans D-Cache \details Cleans D-Cache
*/ */
__STATIC_INLINE void SCB_CleanDCache (void) __STATIC_FORCEINLINE void SCB_CleanDCache (void)
{ {
#if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
uint32_t ccsidr; uint32_t ccsidr;
uint32_t sets; uint32_t sets;
uint32_t ways; uint32_t ways;
SCB->CSSELR = 0U; /*(0U << 1U) | 0U;*/ /* Level 1 data cache */ SCB->CSSELR = 0U; /* select Level 1 data cache */
__DSB(); __DSB();
ccsidr = SCB->CCSIDR; ccsidr = SCB->CCSIDR;
@ -2424,14 +2436,14 @@ __STATIC_INLINE void SCB_CleanDCache (void)
\brief Clean & Invalidate D-Cache \brief Clean & Invalidate D-Cache
\details Cleans and Invalidates D-Cache \details Cleans and Invalidates D-Cache
*/ */
__STATIC_INLINE void SCB_CleanInvalidateDCache (void) __STATIC_FORCEINLINE void SCB_CleanInvalidateDCache (void)
{ {
#if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
uint32_t ccsidr; uint32_t ccsidr;
uint32_t sets; uint32_t sets;
uint32_t ways; uint32_t ways;
SCB->CSSELR = 0U; /*(0U << 1U) | 0U;*/ /* Level 1 data cache */ SCB->CSSELR = 0U; /* select Level 1 data cache */
__DSB(); __DSB();
ccsidr = SCB->CCSIDR; ccsidr = SCB->CCSIDR;
@ -2457,27 +2469,30 @@ __STATIC_INLINE void SCB_CleanInvalidateDCache (void)
/** /**
\brief D-Cache Invalidate by address \brief D-Cache Invalidate by address
\details Invalidates D-Cache for the given address \details Invalidates D-Cache for the given address.
\param[in] addr address (aligned to 32-byte boundary) D-Cache is invalidated starting from a 32 byte aligned address in 32 byte granularity.
D-Cache memory blocks which are part of given address + given size are invalidated.
\param[in] addr address
\param[in] dsize size of memory block (in number of bytes) \param[in] dsize size of memory block (in number of bytes)
*/ */
__STATIC_INLINE void SCB_InvalidateDCache_by_Addr (uint32_t *addr, int32_t dsize) __STATIC_FORCEINLINE void SCB_InvalidateDCache_by_Addr (void *addr, int32_t dsize)
{ {
#if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
int32_t op_size = dsize; if ( dsize > 0 ) {
uint32_t op_addr = (uint32_t)addr; int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U));
int32_t linesize = 32; /* in Cortex-M7 size of cache line is fixed to 8 words (32 bytes) */ uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */;
__DSB();
__DSB(); do {
SCB->DCIMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */
op_addr += __SCB_DCACHE_LINE_SIZE;
op_size -= __SCB_DCACHE_LINE_SIZE;
} while ( op_size > 0 );
while (op_size > 0) { __DSB();
SCB->DCIMVAC = op_addr; __ISB();
op_addr += (uint32_t)linesize;
op_size -= linesize;
} }
__DSB();
__ISB();
#endif #endif
} }
@ -2485,26 +2500,29 @@ __STATIC_INLINE void SCB_InvalidateDCache_by_Addr (uint32_t *addr, int32_t dsize
/** /**
\brief D-Cache Clean by address \brief D-Cache Clean by address
\details Cleans D-Cache for the given address \details Cleans D-Cache for the given address
\param[in] addr address (aligned to 32-byte boundary) D-Cache is cleaned starting from a 32 byte aligned address in 32 byte granularity.
D-Cache memory blocks which are part of given address + given size are cleaned.
\param[in] addr address
\param[in] dsize size of memory block (in number of bytes) \param[in] dsize size of memory block (in number of bytes)
*/ */
__STATIC_INLINE void SCB_CleanDCache_by_Addr (uint32_t *addr, int32_t dsize) __STATIC_FORCEINLINE void SCB_CleanDCache_by_Addr (uint32_t *addr, int32_t dsize)
{ {
#if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
int32_t op_size = dsize; if ( dsize > 0 ) {
uint32_t op_addr = (uint32_t) addr; int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U));
int32_t linesize = 32; /* in Cortex-M7 size of cache line is fixed to 8 words (32 bytes) */ uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */;
__DSB();
__DSB(); do {
SCB->DCCMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */
op_addr += __SCB_DCACHE_LINE_SIZE;
op_size -= __SCB_DCACHE_LINE_SIZE;
} while ( op_size > 0 );
while (op_size > 0) { __DSB();
SCB->DCCMVAC = op_addr; __ISB();
op_addr += (uint32_t)linesize;
op_size -= linesize;
} }
__DSB();
__ISB();
#endif #endif
} }
@ -2512,30 +2530,32 @@ __STATIC_INLINE void SCB_CleanDCache_by_Addr (uint32_t *addr, int32_t dsize)
/** /**
\brief D-Cache Clean and Invalidate by address \brief D-Cache Clean and Invalidate by address
\details Cleans and invalidates D_Cache for the given address \details Cleans and invalidates D_Cache for the given address
D-Cache is cleaned and invalidated starting from a 32 byte aligned address in 32 byte granularity.
D-Cache memory blocks which are part of given address + given size are cleaned and invalidated.
\param[in] addr address (aligned to 32-byte boundary) \param[in] addr address (aligned to 32-byte boundary)
\param[in] dsize size of memory block (in number of bytes) \param[in] dsize size of memory block (in number of bytes)
*/ */
__STATIC_INLINE void SCB_CleanInvalidateDCache_by_Addr (uint32_t *addr, int32_t dsize) __STATIC_FORCEINLINE void SCB_CleanInvalidateDCache_by_Addr (uint32_t *addr, int32_t dsize)
{ {
#if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
int32_t op_size = dsize; if ( dsize > 0 ) {
uint32_t op_addr = (uint32_t) addr; int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U));
int32_t linesize = 32; /* in Cortex-M7 size of cache line is fixed to 8 words (32 bytes) */ uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */;
__DSB();
__DSB(); do {
SCB->DCCIMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */
op_addr += __SCB_DCACHE_LINE_SIZE;
op_size -= __SCB_DCACHE_LINE_SIZE;
} while ( op_size > 0 );
while (op_size > 0) { __DSB();
SCB->DCCIMVAC = op_addr; __ISB();
op_addr += (uint32_t)linesize;
op_size -= linesize;
} }
__DSB();
__ISB();
#endif #endif
} }
/*@} end of CMSIS_Core_CacheFunctions */ /*@} end of CMSIS_Core_CacheFunctions */

View File

@ -1,8 +1,8 @@
/**************************************************************************//** /**************************************************************************//**
* @file core_sc000.h * @file core_sc000.h
* @brief CMSIS SC000 Core Peripheral Access Layer Header File * @brief CMSIS SC000 Core Peripheral Access Layer Header File
* @version V5.0.5 * @version V5.0.6
* @date 28. May 2018 * @date 12. November 2018
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved. * Copyright (c) 2009-2018 Arm Limited. All rights reserved.

View File

@ -1,8 +1,8 @@
/**************************************************************************//** /**************************************************************************//**
* @file core_sc300.h * @file core_sc300.h
* @brief CMSIS SC300 Core Peripheral Access Layer Header File * @brief CMSIS SC300 Core Peripheral Access Layer Header File
* @version V5.0.6 * @version V5.0.7
* @date 04. June 2018 * @date 12. November 2018
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved. * Copyright (c) 2009-2018 Arm Limited. All rights reserved.

View File

@ -1,11 +1,11 @@
/****************************************************************************** /******************************************************************************
* @file mpu_armv7.h * @file mpu_armv7.h
* @brief CMSIS MPU API for Armv7-M MPU * @brief CMSIS MPU API for Armv7-M MPU
* @version V5.0.5 * @version V5.1.0
* @date 06. September 2018 * @date 08. March 2019
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (c) 2017-2018 Arm Limited. All rights reserved. * Copyright (c) 2017-2019 Arm Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -134,7 +134,7 @@
/** /**
* MPU Memory Access Attribute for device memory. * MPU Memory Access Attribute for device memory.
* - TEX: 000b (if non-shareable) or 010b (if shareable) * - TEX: 000b (if shareable) or 010b (if non-shareable)
* - Shareable or non-shareable * - Shareable or non-shareable
* - Non-cacheable * - Non-cacheable
* - Bufferable (if shareable) or non-bufferable (if non-shareable) * - Bufferable (if shareable) or non-bufferable (if non-shareable)
@ -190,20 +190,19 @@ typedef struct {
*/ */
__STATIC_INLINE void ARM_MPU_Enable(uint32_t MPU_Control) __STATIC_INLINE void ARM_MPU_Enable(uint32_t MPU_Control)
{ {
__DSB();
__ISB();
MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk;
#ifdef SCB_SHCSR_MEMFAULTENA_Msk #ifdef SCB_SHCSR_MEMFAULTENA_Msk
SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk;
#endif #endif
__DSB();
__ISB();
} }
/** Disable the MPU. /** Disable the MPU.
*/ */
__STATIC_INLINE void ARM_MPU_Disable(void) __STATIC_INLINE void ARM_MPU_Disable(void)
{ {
__DSB(); __DMB();
__ISB();
#ifdef SCB_SHCSR_MEMFAULTENA_Msk #ifdef SCB_SHCSR_MEMFAULTENA_Msk
SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk;
#endif #endif
@ -246,7 +245,7 @@ __STATIC_INLINE void ARM_MPU_SetRegionEx(uint32_t rnr, uint32_t rbar, uint32_t r
* \param src Source data is copied from. * \param src Source data is copied from.
* \param len Amount of data words to be copied. * \param len Amount of data words to be copied.
*/ */
__STATIC_INLINE void orderedCpy(volatile uint32_t* dst, const uint32_t* __RESTRICT src, uint32_t len) __STATIC_INLINE void ARM_MPU_OrderedMemcpy(volatile uint32_t* dst, const uint32_t* __RESTRICT src, uint32_t len)
{ {
uint32_t i; uint32_t i;
for (i = 0U; i < len; ++i) for (i = 0U; i < len; ++i)
@ -263,11 +262,11 @@ __STATIC_INLINE void ARM_MPU_Load(ARM_MPU_Region_t const* table, uint32_t cnt)
{ {
const uint32_t rowWordSize = sizeof(ARM_MPU_Region_t)/4U; const uint32_t rowWordSize = sizeof(ARM_MPU_Region_t)/4U;
while (cnt > MPU_TYPE_RALIASES) { while (cnt > MPU_TYPE_RALIASES) {
orderedCpy(&(MPU->RBAR), &(table->RBAR), MPU_TYPE_RALIASES*rowWordSize); ARM_MPU_OrderedMemcpy(&(MPU->RBAR), &(table->RBAR), MPU_TYPE_RALIASES*rowWordSize);
table += MPU_TYPE_RALIASES; table += MPU_TYPE_RALIASES;
cnt -= MPU_TYPE_RALIASES; cnt -= MPU_TYPE_RALIASES;
} }
orderedCpy(&(MPU->RBAR), &(table->RBAR), cnt*rowWordSize); ARM_MPU_OrderedMemcpy(&(MPU->RBAR), &(table->RBAR), cnt*rowWordSize);
} }
#endif #endif

View File

@ -1,11 +1,11 @@
/****************************************************************************** /******************************************************************************
* @file mpu_armv8.h * @file mpu_armv8.h
* @brief CMSIS MPU API for Armv8-M MPU * @brief CMSIS MPU API for Armv8-M and Armv8.1-M MPU
* @version V5.0.4 * @version V5.1.0
* @date 10. January 2018 * @date 08. March 2019
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (c) 2017-2018 Arm Limited. All rights reserved. * Copyright (c) 2017-2019 Arm Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -101,6 +101,21 @@
((IDX << MPU_RLAR_AttrIndx_Pos) & MPU_RLAR_AttrIndx_Msk) | \ ((IDX << MPU_RLAR_AttrIndx_Pos) & MPU_RLAR_AttrIndx_Msk) | \
(MPU_RLAR_EN_Msk)) (MPU_RLAR_EN_Msk))
#if defined(MPU_RLAR_PXN_Pos)
/** \brief Region Limit Address Register with PXN value
* \param LIMIT The limit address bits [31:5] for this memory region. The value is one extended.
* \param PXN Privileged execute never. Defines whether code can be executed from this privileged region.
* \param IDX The attribute index to be associated with this memory region.
*/
#define ARM_MPU_RLAR_PXN(LIMIT, PXN, IDX) \
((LIMIT & MPU_RLAR_LIMIT_Msk) | \
((PXN << MPU_RLAR_PXN_Pos) & MPU_RLAR_PXN_Msk) | \
((IDX << MPU_RLAR_AttrIndx_Pos) & MPU_RLAR_AttrIndx_Msk) | \
(MPU_RLAR_EN_Msk))
#endif
/** /**
* Struct for a single MPU Region * Struct for a single MPU Region
*/ */
@ -114,20 +129,19 @@ typedef struct {
*/ */
__STATIC_INLINE void ARM_MPU_Enable(uint32_t MPU_Control) __STATIC_INLINE void ARM_MPU_Enable(uint32_t MPU_Control)
{ {
__DSB();
__ISB();
MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk;
#ifdef SCB_SHCSR_MEMFAULTENA_Msk #ifdef SCB_SHCSR_MEMFAULTENA_Msk
SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk;
#endif #endif
__DSB();
__ISB();
} }
/** Disable the MPU. /** Disable the MPU.
*/ */
__STATIC_INLINE void ARM_MPU_Disable(void) __STATIC_INLINE void ARM_MPU_Disable(void)
{ {
__DSB(); __DMB();
__ISB();
#ifdef SCB_SHCSR_MEMFAULTENA_Msk #ifdef SCB_SHCSR_MEMFAULTENA_Msk
SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk;
#endif #endif
@ -140,20 +154,19 @@ __STATIC_INLINE void ARM_MPU_Disable(void)
*/ */
__STATIC_INLINE void ARM_MPU_Enable_NS(uint32_t MPU_Control) __STATIC_INLINE void ARM_MPU_Enable_NS(uint32_t MPU_Control)
{ {
__DSB();
__ISB();
MPU_NS->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; MPU_NS->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk;
#ifdef SCB_SHCSR_MEMFAULTENA_Msk #ifdef SCB_SHCSR_MEMFAULTENA_Msk
SCB_NS->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; SCB_NS->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk;
#endif #endif
__DSB();
__ISB();
} }
/** Disable the Non-secure MPU. /** Disable the Non-secure MPU.
*/ */
__STATIC_INLINE void ARM_MPU_Disable_NS(void) __STATIC_INLINE void ARM_MPU_Disable_NS(void)
{ {
__DSB(); __DMB();
__ISB();
#ifdef SCB_SHCSR_MEMFAULTENA_Msk #ifdef SCB_SHCSR_MEMFAULTENA_Msk
SCB_NS->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; SCB_NS->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk;
#endif #endif
@ -267,7 +280,7 @@ __STATIC_INLINE void ARM_MPU_SetRegion_NS(uint32_t rnr, uint32_t rbar, uint32_t
* \param src Source data is copied from. * \param src Source data is copied from.
* \param len Amount of data words to be copied. * \param len Amount of data words to be copied.
*/ */
__STATIC_INLINE void orderedCpy(volatile uint32_t* dst, const uint32_t* __RESTRICT src, uint32_t len) __STATIC_INLINE void ARM_MPU_OrderedMemcpy(volatile uint32_t* dst, const uint32_t* __RESTRICT src, uint32_t len)
{ {
uint32_t i; uint32_t i;
for (i = 0U; i < len; ++i) for (i = 0U; i < len; ++i)
@ -287,7 +300,7 @@ __STATIC_INLINE void ARM_MPU_LoadEx(MPU_Type* mpu, uint32_t rnr, ARM_MPU_Region_
const uint32_t rowWordSize = sizeof(ARM_MPU_Region_t)/4U; const uint32_t rowWordSize = sizeof(ARM_MPU_Region_t)/4U;
if (cnt == 1U) { if (cnt == 1U) {
mpu->RNR = rnr; mpu->RNR = rnr;
orderedCpy(&(mpu->RBAR), &(table->RBAR), rowWordSize); ARM_MPU_OrderedMemcpy(&(mpu->RBAR), &(table->RBAR), rowWordSize);
} else { } else {
uint32_t rnrBase = rnr & ~(MPU_TYPE_RALIASES-1U); uint32_t rnrBase = rnr & ~(MPU_TYPE_RALIASES-1U);
uint32_t rnrOffset = rnr % MPU_TYPE_RALIASES; uint32_t rnrOffset = rnr % MPU_TYPE_RALIASES;
@ -295,7 +308,7 @@ __STATIC_INLINE void ARM_MPU_LoadEx(MPU_Type* mpu, uint32_t rnr, ARM_MPU_Region_
mpu->RNR = rnrBase; mpu->RNR = rnrBase;
while ((rnrOffset + cnt) > MPU_TYPE_RALIASES) { while ((rnrOffset + cnt) > MPU_TYPE_RALIASES) {
uint32_t c = MPU_TYPE_RALIASES - rnrOffset; uint32_t c = MPU_TYPE_RALIASES - rnrOffset;
orderedCpy(&(mpu->RBAR)+(rnrOffset*2U), &(table->RBAR), c*rowWordSize); ARM_MPU_OrderedMemcpy(&(mpu->RBAR)+(rnrOffset*2U), &(table->RBAR), c*rowWordSize);
table += c; table += c;
cnt -= c; cnt -= c;
rnrOffset = 0U; rnrOffset = 0U;
@ -303,7 +316,7 @@ __STATIC_INLINE void ARM_MPU_LoadEx(MPU_Type* mpu, uint32_t rnr, ARM_MPU_Region_
mpu->RNR = rnrBase; mpu->RNR = rnrBase;
} }
orderedCpy(&(mpu->RBAR)+(rnrOffset*2U), &(table->RBAR), cnt*rowWordSize); ARM_MPU_OrderedMemcpy(&(mpu->RBAR)+(rnrOffset*2U), &(table->RBAR), cnt*rowWordSize);
} }
} }

View File

@ -32,82 +32,82 @@
void print_context_info(void); void print_context_info(void);
#if MBED_CONF_PLATFORM_CRASH_CAPTURE_ENABLED #if MBED_CONF_PLATFORM_CRASH_CAPTURE_ENABLED
//Global for populating the context in exception handler //Global for populating the context in exception handler
mbed_fault_context_t *const mbed_fault_context=(mbed_fault_context_t *)(FAULT_CONTEXT_LOCATION); mbed_fault_context_t *const mbed_fault_context = (mbed_fault_context_t *)(FAULT_CONTEXT_LOCATION);
#else #else
mbed_fault_context_t fault_context; mbed_fault_context_t fault_context;
mbed_fault_context_t *const mbed_fault_context=(mbed_fault_context_t *)&fault_context; mbed_fault_context_t *const mbed_fault_context = (mbed_fault_context_t *) &fault_context;
#endif #endif
//This is a handler function called from Fault handler to print the error information out. //This is a handler function called from Fault handler to print the error information out.
//This runs in fault context and uses special functions(defined in mbed_rtx_fault_handler.c) to print the information without using C-lib support. //This runs in fault context and uses special functions(defined in mbed_rtx_fault_handler.c) to print the information without using C-lib support.
void mbed_fault_handler (uint32_t fault_type, void *mbed_fault_context_in) void mbed_fault_handler(uint32_t fault_type, void *mbed_fault_context_in)
{ {
mbed_error_status_t faultStatus = MBED_SUCCESS; mbed_error_status_t faultStatus = MBED_SUCCESS;
mbed_error_printf("\n++ MbedOS Fault Handler ++\n\nFaultType: "); mbed_error_printf("\n++ MbedOS Fault Handler ++\n\nFaultType: ");
switch( fault_type ) { switch (fault_type) {
case MEMMANAGE_FAULT_EXCEPTION: case MEMMANAGE_FAULT_EXCEPTION:
mbed_error_printf("MemManageFault"); mbed_error_printf("MemManageFault");
faultStatus = MBED_ERROR_MEMMANAGE_EXCEPTION; faultStatus = MBED_ERROR_MEMMANAGE_EXCEPTION;
break; break;
case BUS_FAULT_EXCEPTION: case BUS_FAULT_EXCEPTION:
mbed_error_printf("BusFault"); mbed_error_printf("BusFault");
faultStatus = MBED_ERROR_BUSFAULT_EXCEPTION; faultStatus = MBED_ERROR_BUSFAULT_EXCEPTION;
break; break;
case USAGE_FAULT_EXCEPTION: case USAGE_FAULT_EXCEPTION:
mbed_error_printf("UsageFault"); mbed_error_printf("UsageFault");
faultStatus = MBED_ERROR_USAGEFAULT_EXCEPTION; faultStatus = MBED_ERROR_USAGEFAULT_EXCEPTION;
break; break;
//There is no way we can hit this code without getting an exception, so we have the default treated like hardfault //There is no way we can hit this code without getting an exception, so we have the default treated like hardfault
case HARD_FAULT_EXCEPTION: case HARD_FAULT_EXCEPTION:
default: default:
mbed_error_printf("HardFault"); mbed_error_printf("HardFault");
faultStatus = MBED_ERROR_HARDFAULT_EXCEPTION; faultStatus = MBED_ERROR_HARDFAULT_EXCEPTION;
break; break;
} }
mbed_error_printf("\n\nContext:"); mbed_error_printf("\n\nContext:");
print_context_info(); print_context_info();
mbed_error_printf("\n\n-- MbedOS Fault Handler --\n\n"); mbed_error_printf("\n\n-- MbedOS Fault Handler --\n\n");
//Now call mbed_error, to log the error and halt the system //Now call mbed_error, to log the error and halt the system
mbed_error( faultStatus, "Fault exception", mbed_fault_context->PC_reg, NULL, 0 ); mbed_error(faultStatus, "Fault exception", mbed_fault_context->PC_reg, NULL, 0);
} }
MBED_NOINLINE void print_context_info(void) MBED_NOINLINE void print_context_info(void)
{ {
//Context Regs //Context Regs
for(int i=0;i<13;i++) { for (int i = 0; i < 13; i++) {
mbed_error_printf("\nR%-4d: %08" PRIX32, i, ((uint32_t *)(mbed_fault_context))[i]); mbed_error_printf("\nR%-4d: %08" PRIX32, i, ((uint32_t *)(mbed_fault_context))[i]);
} }
mbed_error_printf("\nSP : %08" PRIX32 mbed_error_printf("\nSP : %08" PRIX32
"\nLR : %08" PRIX32 "\nLR : %08" PRIX32
"\nPC : %08" PRIX32 "\nPC : %08" PRIX32
"\nxPSR : %08" PRIX32 "\nxPSR : %08" PRIX32
"\nPSP : %08" PRIX32 "\nPSP : %08" PRIX32
"\nMSP : %08" PRIX32, mbed_fault_context->SP_reg, mbed_fault_context->LR_reg, mbed_fault_context->PC_reg, "\nMSP : %08" PRIX32, mbed_fault_context->SP_reg, mbed_fault_context->LR_reg, mbed_fault_context->PC_reg,
mbed_fault_context->xPSR, mbed_fault_context->PSP, mbed_fault_context->MSP ); mbed_fault_context->xPSR, mbed_fault_context->PSP, mbed_fault_context->MSP);
//Capture CPUID to get core/cpu info //Capture CPUID to get core/cpu info
mbed_error_printf("\nCPUID: %08" PRIX32, SCB->CPUID); mbed_error_printf("\nCPUID: %08" PRIX32, SCB->CPUID);
#if !defined(TARGET_M0) && !defined(TARGET_M0P) #if !defined(TARGET_M0) && !defined(TARGET_M0P)
//Capture fault information registers to infer the cause of exception //Capture fault information registers to infer the cause of exception
mbed_error_printf("\nHFSR : %08" PRIX32 mbed_error_printf("\nHFSR : %08" PRIX32
"\nMMFSR: %08" PRIX32 "\nMMFSR: %08" PRIX32
"\nBFSR : %08" PRIX32 "\nBFSR : %08" PRIX32
"\nUFSR : %08" PRIX32 "\nUFSR : %08" PRIX32
"\nDFSR : %08" PRIX32 "\nDFSR : %08" PRIX32
"\nAFSR : %08" PRIX32 ////Split/Capture CFSR into MMFSR, BFSR, UFSR "\nAFSR : %08" PRIX32 ////Split/Capture CFSR into MMFSR, BFSR, UFSR
,SCB->HFSR, (0xFF & SCB->CFSR), ((0xFF00 & SCB->CFSR) >> 8), ((0xFFFF0000 & SCB->CFSR) >> 16), SCB->DFSR, SCB->AFSR ); , SCB->HFSR, (0xFF & SCB->CFSR), ((0xFF00 & SCB->CFSR) >> 8), ((0xFFFF0000 & SCB->CFSR) >> 16), SCB->DFSR, SCB->AFSR);
//Print MMFAR only if its valid as indicated by MMFSR //Print MMFAR only if its valid as indicated by MMFSR
if ((0xFF & SCB->CFSR) & 0x80) { if ((0xFF & SCB->CFSR) & 0x80) {
mbed_error_printf("\nMMFAR: %08" PRIX32, SCB->MMFAR); mbed_error_printf("\nMMFAR: %08" PRIX32, SCB->MMFAR);
@ -117,39 +117,40 @@ MBED_NOINLINE void print_context_info(void)
mbed_error_printf("\nBFAR : %08" PRIX32, SCB->BFAR); mbed_error_printf("\nBFAR : %08" PRIX32, SCB->BFAR);
} }
#endif #endif
//Print Mode //Print Mode
if (mbed_fault_context->EXC_RETURN & 0x8) { if (mbed_fault_context->EXC_RETURN & 0x8) {
mbed_error_printf("\nMode : Thread"); mbed_error_printf("\nMode : Thread");
//Print Priv level in Thread mode - We capture CONTROL reg which reflects the privilege. //Print Priv level in Thread mode - We capture CONTROL reg which reflects the privilege.
//Note that the CONTROL register captured still reflects the privilege status of the //Note that the CONTROL register captured still reflects the privilege status of the
//thread mode eventhough we are in Handler mode by the time we capture it. //thread mode eventhough we are in Handler mode by the time we capture it.
if(mbed_fault_context->CONTROL & 0x1) { if (mbed_fault_context->CONTROL & 0x1) {
mbed_error_printf("\nPriv : User"); mbed_error_printf("\nPriv : User");
} else { } else {
mbed_error_printf("\nPriv : Privileged"); mbed_error_printf("\nPriv : Privileged");
} }
} else { } else {
mbed_error_printf("\nMode : Handler"); mbed_error_printf("\nMode : Handler");
mbed_error_printf("\nPriv : Privileged"); mbed_error_printf("\nPriv : Privileged");
} }
//Print Return Stack //Print Return Stack
if (mbed_fault_context->EXC_RETURN & 0x4) { if (mbed_fault_context->EXC_RETURN & 0x4) {
mbed_error_printf("\nStack: PSP"); mbed_error_printf("\nStack: PSP");
} else { } else {
mbed_error_printf("\nStack: MSP"); mbed_error_printf("\nStack: MSP");
} }
} }
mbed_error_status_t mbed_get_reboot_fault_context (mbed_fault_context_t *fault_context) mbed_error_status_t mbed_get_reboot_fault_context(mbed_fault_context_t *fault_context)
{ {
mbed_error_status_t status = MBED_MAKE_ERROR(MBED_MODULE_PLATFORM, MBED_ERROR_CODE_ITEM_NOT_FOUND); mbed_error_status_t status = MBED_MAKE_ERROR(MBED_MODULE_PLATFORM, MBED_ERROR_CODE_ITEM_NOT_FOUND);
#if MBED_CONF_PLATFORM_CRASH_CAPTURE_ENABLED #if MBED_CONF_PLATFORM_CRASH_CAPTURE_ENABLED
if(fault_context == NULL) if (fault_context == NULL) {
return MBED_MAKE_ERROR(MBED_MODULE_PLATFORM, MBED_ERROR_CODE_INVALID_ARGUMENT); return MBED_MAKE_ERROR(MBED_MODULE_PLATFORM, MBED_ERROR_CODE_INVALID_ARGUMENT);
}
memcpy(fault_context, mbed_fault_context, sizeof(mbed_fault_context_t)); memcpy(fault_context, mbed_fault_context, sizeof(mbed_fault_context_t));
status = MBED_SUCCESS; status = MBED_SUCCESS;
#endif #endif
return status; return status;
} }

View File

@ -13,39 +13,39 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
#ifndef MBED_FAULT_HANDLER_H #ifndef MBED_FAULT_HANDLER_H
#define MBED_FAULT_HANDLER_H #define MBED_FAULT_HANDLER_H
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif
//Fault context struct //Fault context struct
//WARNING: DO NOT CHANGE THIS STRUCT WITHOUT MAKING CORRESPONDING CHANGES in except.S files. //WARNING: DO NOT CHANGE THIS STRUCT WITHOUT MAKING CORRESPONDING CHANGES in except.S files.
//Offset of these registers are used by fault handler in except.S //Offset of these registers are used by fault handler in except.S
typedef struct { typedef struct {
uint32_t R0_reg; uint32_t R0_reg;
uint32_t R1_reg; uint32_t R1_reg;
uint32_t R2_reg; uint32_t R2_reg;
uint32_t R3_reg; uint32_t R3_reg;
uint32_t R4_reg; uint32_t R4_reg;
uint32_t R5_reg; uint32_t R5_reg;
uint32_t R6_reg; uint32_t R6_reg;
uint32_t R7_reg; uint32_t R7_reg;
uint32_t R8_reg; uint32_t R8_reg;
uint32_t R9_reg; uint32_t R9_reg;
uint32_t R10_reg; uint32_t R10_reg;
uint32_t R11_reg; uint32_t R11_reg;
uint32_t R12_reg; uint32_t R12_reg;
uint32_t SP_reg; uint32_t SP_reg;
uint32_t LR_reg; uint32_t LR_reg;
uint32_t PC_reg; uint32_t PC_reg;
uint32_t xPSR; uint32_t xPSR;
uint32_t PSP; uint32_t PSP;
uint32_t MSP; uint32_t MSP;
uint32_t EXC_RETURN; uint32_t EXC_RETURN;
uint32_t CONTROL; uint32_t CONTROL;
} mbed_fault_context_t; } mbed_fault_context_t;
//Fault type definitions //Fault type definitions
@ -57,20 +57,20 @@ typedef struct {
//This is a handler function called from Fault handler to print the error information out. //This is a handler function called from Fault handler to print the error information out.
//This runs in fault context and uses special functions(defined in mbed_fault_handler.c) to print the information without using C-lib support. //This runs in fault context and uses special functions(defined in mbed_fault_handler.c) to print the information without using C-lib support.
void mbed_fault_handler (uint32_t fault_type, void *mbed_fault_context_in); void mbed_fault_handler(uint32_t fault_type, void *mbed_fault_context_in);
/** /**
* Call this function to retrieve the fault context after a fatal exception which triggered a system reboot. The function retrieves the fault context stored in crash-report ram area which is preserved over reboot. * Call this function to retrieve the fault context after a fatal exception which triggered a system reboot. The function retrieves the fault context stored in crash-report ram area which is preserved over reboot.
* @param fault_context Pointer to mbed_fault_context_t struct allocated by the caller. This is the mbed_fault_context_t info captured as part of the fatal exception which triggered the reboot. * @param fault_context Pointer to mbed_fault_context_t struct allocated by the caller. This is the mbed_fault_context_t info captured as part of the fatal exception which triggered the reboot.
* @return 0 or MBED_SUCCESS on success. * @return 0 or MBED_SUCCESS on success.
* MBED_ERROR_INVALID_ARGUMENT in case of invalid error_info pointer * MBED_ERROR_INVALID_ARGUMENT in case of invalid error_info pointer
* MBED_ERROR_ITEM_NOT_FOUND if no reboot context is currently captured by teh system * MBED_ERROR_ITEM_NOT_FOUND if no reboot context is currently captured by teh system
* *
*/ */
mbed_error_status_t mbed_get_reboot_fault_context (mbed_fault_context_t *fault_context); mbed_error_status_t mbed_get_reboot_fault_context(mbed_fault_context_t *fault_context);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
#endif #endif

View File

@ -156,9 +156,11 @@
#define EVR_RTX_THREAD_FLAGS_WAIT_TIMEOUT_DISABLE #define EVR_RTX_THREAD_FLAGS_WAIT_TIMEOUT_DISABLE
#define EVR_RTX_THREAD_FLAGS_WAIT_COMPLETED_DISABLE #define EVR_RTX_THREAD_FLAGS_WAIT_COMPLETED_DISABLE
#define EVR_RTX_THREAD_FLAGS_WAIT_NOT_COMPLETED_DISABLE #define EVR_RTX_THREAD_FLAGS_WAIT_NOT_COMPLETED_DISABLE
#define EVR_RTX_THREAD_DELAY_DISABLE #define EVR_RTX_DELAY_DISABLE
#define EVR_RTX_THREAD_DELAY_UNTIL_DISABLE #define EVR_RTX_DELAY_UNTIL_DISABLE
#define EVR_RTX_THREAD_DELAY_COMPLETED_DISABLE #define EVR_RTX_DELAY_STARTED_DISABLE
#define EVR_RTX_DELAY_UNTIL_STARTED_DISABLE
#define EVR_RTX_DELAY_COMPLETED_DISABLE
#define EVR_RTX_TIMER_CALLBACK_DISABLE #define EVR_RTX_TIMER_CALLBACK_DISABLE
#define EVR_RTX_TIMER_NEW_DISABLE #define EVR_RTX_TIMER_NEW_DISABLE
#define EVR_RTX_TIMER_CREATED_DISABLE #define EVR_RTX_TIMER_CREATED_DISABLE

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved. * Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -17,7 +17,7 @@
* *
* ----------------------------------------------------------------------------- * -----------------------------------------------------------------------------
* *
* $Revision: V5.4.0 * $Revision: V5.5.0
* *
* Project: CMSIS-RTOS RTX * Project: CMSIS-RTOS RTX
* Title: RTX Configuration definitions * Title: RTX Configuration definitions
@ -353,7 +353,7 @@
#endif #endif
// <h>Global Event Filter Setup // <h>Global Event Filter Setup
// <i> Initial event filter settings applied to all components. // <i> Initial recording level applied to all components.
// <o.0>Error events // <o.0>Error events
// <o.1>API function call events // <o.1>API function call events
// <o.2>Operation events // <o.2>Operation events
@ -364,106 +364,128 @@
#endif #endif
// <h>RTOS Event Filter Setup // <h>RTOS Event Filter Setup
// <i> Event filter settings for RTX components. // <i> Recording levels for RTX components.
// <i> Only applicable if events for the respective component are generated. // <i> Only applicable if events for the respective component are generated.
// <e.7>Memory Management // <h>Memory Management
// <i> Filter enable settings for Memory Management events. // <i> Recording level for Memory Management events.
// <o.0>Error events // <o.0>Error events
// <o.1>API function call events // <o.1>API function call events
// <o.2>Operation events // <o.2>Operation events
// <o.3>Detailed operation events // <o.3>Detailed operation events
// </e> // </h>
#ifndef OS_EVR_MEMORY_FILTER #ifndef OS_EVR_MEMORY_LEVEL
#define OS_EVR_MEMORY_FILTER 0x81U #define OS_EVR_MEMORY_LEVEL 0x01U
#endif #endif
// <e.7>Kernel // <h>Kernel
// <i> Filter enable settings for Kernel events. // <i> Recording level for Kernel events.
// <o.0>Error events // <o.0>Error events
// <o.1>API function call events // <o.1>API function call events
// <o.2>Operation events // <o.2>Operation events
// <o.3>Detailed operation events // <o.3>Detailed operation events
// </e> // </h>
#ifndef OS_EVR_KERNEL_FILTER #ifndef OS_EVR_KERNEL_LEVEL
#define OS_EVR_KERNEL_FILTER 0x81U #define OS_EVR_KERNEL_LEVEL 0x01U
#endif #endif
// <e.7>Thread // <h>Thread
// <i> Filter enable settings for Thread events. // <i> Recording level for Thread events.
// <o.0>Error events // <o.0>Error events
// <o.1>API function call events // <o.1>API function call events
// <o.2>Operation events // <o.2>Operation events
// <o.3>Detailed operation events // <o.3>Detailed operation events
// </e> // </h>
#ifndef OS_EVR_THREAD_FILTER #ifndef OS_EVR_THREAD_LEVEL
#define OS_EVR_THREAD_FILTER 0x85U #define OS_EVR_THREAD_LEVEL 0x05U
#endif #endif
// <e.7>Timer // <h>Generic Wait
// <i> Filter enable settings for Timer events. // <i> Recording level for Generic Wait events.
// <o.0>Error events // <o.0>Error events
// <o.1>API function call events // <o.1>API function call events
// <o.2>Operation events // <o.2>Operation events
// <o.3>Detailed operation events // <o.3>Detailed operation events
// </e> // </h>
#ifndef OS_EVR_TIMER_FILTER #ifndef OS_EVR_WAIT_LEVEL
#define OS_EVR_TIMER_FILTER 0x81U #define OS_EVR_WAIT_LEVEL 0x01U
#endif #endif
// <e.7>Event Flags // <h>Thread Flags
// <i> Filter enable settings for Event Flags events. // <i> Recording level for Thread Flags events.
// <o.0>Error events // <o.0>Error events
// <o.1>API function call events // <o.1>API function call events
// <o.2>Operation events // <o.2>Operation events
// <o.3>Detailed operation events // <o.3>Detailed operation events
// </e> // </h>
#ifndef OS_EVR_EVFLAGS_FILTER #ifndef OS_EVR_THFLAGS_LEVEL
#define OS_EVR_EVFLAGS_FILTER 0x81U #define OS_EVR_THFLAGS_LEVEL 0x01U
#endif #endif
// <e.7>Mutex // <h>Event Flags
// <i> Filter enable settings for Mutex events. // <i> Recording level for Event Flags events.
// <o.0>Error events // <o.0>Error events
// <o.1>API function call events // <o.1>API function call events
// <o.2>Operation events // <o.2>Operation events
// <o.3>Detailed operation events // <o.3>Detailed operation events
// </e> // </h>
#ifndef OS_EVR_MUTEX_FILTER #ifndef OS_EVR_EVFLAGS_LEVEL
#define OS_EVR_MUTEX_FILTER 0x81U #define OS_EVR_EVFLAGS_LEVEL 0x01U
#endif #endif
// <e.7>Semaphore // <h>Timer
// <i> Filter enable settings for Semaphore events. // <i> Recording level for Timer events.
// <o.0>Error events // <o.0>Error events
// <o.1>API function call events // <o.1>API function call events
// <o.2>Operation events // <o.2>Operation events
// <o.3>Detailed operation events // <o.3>Detailed operation events
// </e> // </h>
#ifndef OS_EVR_SEMAPHORE_FILTER #ifndef OS_EVR_TIMER_LEVEL
#define OS_EVR_SEMAPHORE_FILTER 0x81U #define OS_EVR_TIMER_LEVEL 0x01U
#endif #endif
// <e.7>Memory Pool // <h>Mutex
// <i> Filter enable settings for Memory Pool events. // <i> Recording level for Mutex events.
// <o.0>Error events // <o.0>Error events
// <o.1>API function call events // <o.1>API function call events
// <o.2>Operation events // <o.2>Operation events
// <o.3>Detailed operation events // <o.3>Detailed operation events
// </e> // </h>
#ifndef OS_EVR_MEMPOOL_FILTER #ifndef OS_EVR_MUTEX_LEVEL
#define OS_EVR_MEMPOOL_FILTER 0x81U #define OS_EVR_MUTEX_LEVEL 0x01U
#endif #endif
// <e.7>Message Queue // <h>Semaphore
// <i> Filter enable settings for Message Queue events. // <i> Recording level for Semaphore events.
// <o.0>Error events // <o.0>Error events
// <o.1>API function call events // <o.1>API function call events
// <o.2>Operation events // <o.2>Operation events
// <o.3>Detailed operation events // <o.3>Detailed operation events
// </e> // </h>
#ifndef OS_EVR_MSGQUEUE_FILTER #ifndef OS_EVR_SEMAPHORE_LEVEL
#define OS_EVR_MSGQUEUE_FILTER 0x81U #define OS_EVR_SEMAPHORE_LEVEL 0x01U
#endif
// <h>Memory Pool
// <i> Recording level for Memory Pool events.
// <o.0>Error events
// <o.1>API function call events
// <o.2>Operation events
// <o.3>Detailed operation events
// </h>
#ifndef OS_EVR_MEMPOOL_LEVEL
#define OS_EVR_MEMPOOL_LEVEL 0x01U
#endif
// <h>Message Queue
// <i> Recording level for Message Queue events.
// <o.0>Error events
// <o.1>API function call events
// <o.2>Operation events
// <o.3>Detailed operation events
// </h>
#ifndef OS_EVR_MSGQUEUE_LEVEL
#define OS_EVR_MSGQUEUE_LEVEL 0x01U
#endif #endif
// </h> // </h>
@ -491,10 +513,16 @@
#define OS_EVR_THREAD 1 #define OS_EVR_THREAD 1
#endif #endif
// <q>Timer // <q>Generic Wait
// <i> Enables Timer event generation. // <i> Enables Generic Wait event generation.
#ifndef OS_EVR_TIMER #ifndef OS_EVR_WAIT
#define OS_EVR_TIMER 1 #define OS_EVR_WAIT 1
#endif
// <q>Thread Flags
// <i> Enables Thread Flags event generation.
#ifndef OS_EVR_THFLAGS
#define OS_EVR_THFLAGS 1
#endif #endif
// <q>Event Flags // <q>Event Flags
@ -502,7 +530,13 @@
#ifndef OS_EVR_EVFLAGS #ifndef OS_EVR_EVFLAGS
#define OS_EVR_EVFLAGS 1 #define OS_EVR_EVFLAGS 1
#endif #endif
// <q>Timer
// <i> Enables Timer event generation.
#ifndef OS_EVR_TIMER
#define OS_EVR_TIMER 1
#endif
// <q>Mutex // <q>Mutex
// <i> Enables Mutex event generation. // <i> Enables Mutex event generation.
#ifndef OS_EVR_MUTEX #ifndef OS_EVR_MUTEX

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved. * Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -30,10 +30,22 @@
#include "RTX_Config.h" // RTX Configuration #include "RTX_Config.h" // RTX Configuration
#include "rtx_os.h" // RTX OS definitions #include "rtx_os.h" // RTX OS definitions
// Initial Thread configuration covered also Thread Flags and Generic Wait
#ifndef OS_EVR_THFLAGS
#define OS_EVR_THFLAGS OS_EVR_THREAD
#endif
#ifndef OS_EVR_WAIT
#define OS_EVR_WAIT OS_EVR_THREAD
#endif
#ifdef _RTE_
#include "RTE_Components.h" #include "RTE_Components.h"
#endif
#ifdef RTE_Compiler_EventRecorder #ifdef RTE_Compiler_EventRecorder
//lint -emacro((835,845),EventID) [MISRA Note 13]
#include "EventRecorder.h" #include "EventRecorder.h"
#include "EventRecorderConf.h" #include "EventRecorderConf.h"
@ -56,12 +68,14 @@
#define EvtRtxMemoryNo (0xF0U) #define EvtRtxMemoryNo (0xF0U)
#define EvtRtxKernelNo (0xF1U) #define EvtRtxKernelNo (0xF1U)
#define EvtRtxThreadNo (0xF2U) #define EvtRtxThreadNo (0xF2U)
#define EvtRtxTimerNo (0xF3U) #define EvtRtxThreadFlagsNo (0xF4U)
#define EvtRtxEventFlagsNo (0xF4U) #define EvtRtxWaitNo (0xF3U)
#define EvtRtxMutexNo (0xF5U) #define EvtRtxTimerNo (0xF6U)
#define EvtRtxSemaphoreNo (0xF6U) #define EvtRtxEventFlagsNo (0xF5U)
#define EvtRtxMemoryPoolNo (0xF7U) #define EvtRtxMutexNo (0xF7U)
#define EvtRtxMessageQueueNo (0xF8U) #define EvtRtxSemaphoreNo (0xF8U)
#define EvtRtxMemoryPoolNo (0xF9U)
#define EvtRtxMessageQueueNo (0xFAU)
#endif // RTE_Compiler_EventRecorder #endif // RTE_Compiler_EventRecorder
@ -207,11 +221,12 @@ extern void EvrRtxKernelGetInfo (osVersion_t *version, char *id_buf, uint32_t id
\brief Event on successful RTOS kernel information retrieve (Op) \brief Event on successful RTOS kernel information retrieve (Op)
\param[in] version pointer to buffer for retrieving version information. \param[in] version pointer to buffer for retrieving version information.
\param[in] id_buf pointer to buffer for retrieving kernel identification string. \param[in] id_buf pointer to buffer for retrieving kernel identification string.
\param[in] id_size size of buffer for kernel identification string.
*/ */
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_KERNEL != 0) && !defined(EVR_RTX_KERNEL_INFO_RETRIEVED_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_KERNEL != 0) && !defined(EVR_RTX_KERNEL_INFO_RETRIEVED_DISABLE))
extern void EvrRtxKernelInfoRetrieved (osVersion_t *version, char *id_buf); extern void EvrRtxKernelInfoRetrieved (const osVersion_t *version, const char *id_buf, uint32_t id_size);
#else #else
#define EvrRtxKernelInfoRetrieved(version, id_buf) #define EvrRtxKernelInfoRetrieved(version, id_buf, id_size)
#endif #endif
/** /**
@ -481,6 +496,17 @@ extern void EvrRtxThreadSetPriority (osThreadId_t thread_id, osPriority_t priori
#define EvrRtxThreadSetPriority(thread_id, priority) #define EvrRtxThreadSetPriority(thread_id, priority)
#endif #endif
/**
\brief Event on thread priority updated (Op)
\param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId.
\param[in] priority new priority value for the thread function.
*/
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_PRIORITY_UPDATED_DISABLE))
extern void EvrRtxThreadPriorityUpdated (osThreadId_t thread_id, osPriority_t priority);
#else
#define EvrRtxThreadPriorityUpdated(thread_id, priority)
#endif
/** /**
\brief Event on thread priority retrieve (API) \brief Event on thread priority retrieve (API)
\param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId.
@ -592,7 +618,7 @@ extern void EvrRtxThreadJoined (osThreadId_t thread_id);
#endif #endif
/** /**
\brief Event on thread execution block (Op) \brief Event on thread execution block (Detail)
\param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId.
\param[in] timeout \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. \param[in] timeout \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out.
*/ */
@ -603,7 +629,7 @@ extern void EvrRtxThreadBlocked (osThreadId_t thread_id, uint32_t timeout);
#endif #endif
/** /**
\brief Event on thread execution unblock (Op) \brief Event on thread execution unblock (Detail)
\param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId.
\param[in] ret_val extended execution status of the thread. \param[in] ret_val extended execution status of the thread.
*/ */
@ -614,7 +640,7 @@ extern void EvrRtxThreadUnblocked (osThreadId_t thread_id, uint32_t ret_val);
#endif #endif
/** /**
\brief Event on running thread pre-emption (Op) \brief Event on running thread pre-emption (Detail)
\param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId.
*/ */
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_PREEMPTED_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_PREEMPTED_DISABLE))
@ -684,12 +710,26 @@ extern void EvrRtxThreadEnumerate (osThreadId_t *thread_array, uint32_t array_it
#define EvrRtxThreadEnumerate(thread_array, array_items, count) #define EvrRtxThreadEnumerate(thread_array, array_items, count)
#endif #endif
// ==== Thread Flags Events ====
/**
\brief Event on thread flags error (Error)
\param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId or NULL when ID is unknown.
\param[in] status extended execution status.
*/
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THFLAGS != 0) && !defined(EVR_RTX_THREAD_FLAGS_ERROR_DISABLE))
extern void EvrRtxThreadFlagsError (osThreadId_t thread_id, int32_t status);
#else
#define EvrRtxThreadFlagsError(thread_id, status)
#endif
/** /**
\brief Event on thread flags set (API) \brief Event on thread flags set (API)
\param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId.
\param[in] flags flags of the thread that shall be set. \param[in] flags flags of the thread that shall be set.
*/ */
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_FLAGS_SET_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THFLAGS != 0) && !defined(EVR_RTX_THREAD_FLAGS_SET_DISABLE))
extern void EvrRtxThreadFlagsSet (osThreadId_t thread_id, uint32_t flags); extern void EvrRtxThreadFlagsSet (osThreadId_t thread_id, uint32_t flags);
#else #else
#define EvrRtxThreadFlagsSet(thread_id, flags) #define EvrRtxThreadFlagsSet(thread_id, flags)
@ -700,7 +740,7 @@ extern void EvrRtxThreadFlagsSet (osThreadId_t thread_id, uint32_t flags);
\param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId. \param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId.
\param[in] thread_flags thread flags after setting. \param[in] thread_flags thread flags after setting.
*/ */
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_FLAGS_SET_DONE_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THFLAGS != 0) && !defined(EVR_RTX_THREAD_FLAGS_SET_DONE_DISABLE))
extern void EvrRtxThreadFlagsSetDone (osThreadId_t thread_id, uint32_t thread_flags); extern void EvrRtxThreadFlagsSetDone (osThreadId_t thread_id, uint32_t thread_flags);
#else #else
#define EvrRtxThreadFlagsSetDone(thread_id, thread_flags) #define EvrRtxThreadFlagsSetDone(thread_id, thread_flags)
@ -710,7 +750,7 @@ extern void EvrRtxThreadFlagsSetDone (osThreadId_t thread_id, uint32_t thread_fl
\brief Event on thread flags clear (API) \brief Event on thread flags clear (API)
\param[in] flags flags of the thread that shall be cleared. \param[in] flags flags of the thread that shall be cleared.
*/ */
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_FLAGS_CLEAR_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THFLAGS != 0) && !defined(EVR_RTX_THREAD_FLAGS_CLEAR_DISABLE))
extern void EvrRtxThreadFlagsClear (uint32_t flags); extern void EvrRtxThreadFlagsClear (uint32_t flags);
#else #else
#define EvrRtxThreadFlagsClear(flags) #define EvrRtxThreadFlagsClear(flags)
@ -720,7 +760,7 @@ extern void EvrRtxThreadFlagsClear (uint32_t flags);
\brief Event on successful thread flags clear (Op) \brief Event on successful thread flags clear (Op)
\param[in] thread_flags thread flags before clearing. \param[in] thread_flags thread flags before clearing.
*/ */
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_FLAGS_CLEAR_DONE_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THFLAGS != 0) && !defined(EVR_RTX_THREAD_FLAGS_CLEAR_DONE_DISABLE))
extern void EvrRtxThreadFlagsClearDone (uint32_t thread_flags); extern void EvrRtxThreadFlagsClearDone (uint32_t thread_flags);
#else #else
#define EvrRtxThreadFlagsClearDone(thread_flags) #define EvrRtxThreadFlagsClearDone(thread_flags)
@ -730,7 +770,7 @@ extern void EvrRtxThreadFlagsClearDone (uint32_t thread_flags);
\brief Event on thread flags retrieve (API) \brief Event on thread flags retrieve (API)
\param[in] thread_flags current thread flags. \param[in] thread_flags current thread flags.
*/ */
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_FLAGS_GET_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THFLAGS != 0) && !defined(EVR_RTX_THREAD_FLAGS_GET_DISABLE))
extern void EvrRtxThreadFlagsGet (uint32_t thread_flags); extern void EvrRtxThreadFlagsGet (uint32_t thread_flags);
#else #else
#define EvrRtxThreadFlagsGet(thread_flags) #define EvrRtxThreadFlagsGet(thread_flags)
@ -742,7 +782,7 @@ extern void EvrRtxThreadFlagsGet (uint32_t thread_flags);
\param[in] options flags options (osFlagsXxxx). \param[in] options flags options (osFlagsXxxx).
\param[in] timeout \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. \param[in] timeout \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out.
*/ */
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_FLAGS_WAIT_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THFLAGS != 0) && !defined(EVR_RTX_THREAD_FLAGS_WAIT_DISABLE))
extern void EvrRtxThreadFlagsWait (uint32_t flags, uint32_t options, uint32_t timeout); extern void EvrRtxThreadFlagsWait (uint32_t flags, uint32_t options, uint32_t timeout);
#else #else
#define EvrRtxThreadFlagsWait(flags, options, timeout) #define EvrRtxThreadFlagsWait(flags, options, timeout)
@ -754,7 +794,7 @@ extern void EvrRtxThreadFlagsWait (uint32_t flags, uint32_t options, uint32_t ti
\param[in] options flags options (osFlagsXxxx). \param[in] options flags options (osFlagsXxxx).
\param[in] timeout \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out. \param[in] timeout \ref CMSIS_RTOS_TimeOutValue or 0 in case of no time-out.
*/ */
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_FLAGS_WAIT_PENDING_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THFLAGS != 0) && !defined(EVR_RTX_THREAD_FLAGS_WAIT_PENDING_DISABLE))
extern void EvrRtxThreadFlagsWaitPending (uint32_t flags, uint32_t options, uint32_t timeout); extern void EvrRtxThreadFlagsWaitPending (uint32_t flags, uint32_t options, uint32_t timeout);
#else #else
#define EvrRtxThreadFlagsWaitPending(flags, options, timeout) #define EvrRtxThreadFlagsWaitPending(flags, options, timeout)
@ -762,11 +802,12 @@ extern void EvrRtxThreadFlagsWaitPending (uint32_t flags, uint32_t options, uint
/** /**
\brief Event on wait timeout for thread flags (Op) \brief Event on wait timeout for thread flags (Op)
\param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId.
*/ */
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_FLAGS_WAIT_TIMEOUT_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THFLAGS != 0) && !defined(EVR_RTX_THREAD_FLAGS_WAIT_TIMEOUT_DISABLE))
extern void EvrRtxThreadFlagsWaitTimeout (void); extern void EvrRtxThreadFlagsWaitTimeout (osThreadId_t thread_id);
#else #else
#define EvrRtxThreadFlagsWaitTimeout() #define EvrRtxThreadFlagsWaitTimeout(thread_id)
#endif #endif
/** /**
@ -774,11 +815,12 @@ extern void EvrRtxThreadFlagsWaitTimeout (void);
\param[in] flags flags to wait for. \param[in] flags flags to wait for.
\param[in] options flags options (osFlagsXxxx). \param[in] options flags options (osFlagsXxxx).
\param[in] thread_flags thread flags before clearing. \param[in] thread_flags thread flags before clearing.
\param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId.
*/ */
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_FLAGS_WAIT_COMPLETED_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THFLAGS != 0) && !defined(EVR_RTX_THREAD_FLAGS_WAIT_COMPLETED_DISABLE))
extern void EvrRtxThreadFlagsWaitCompleted (uint32_t flags, uint32_t options, uint32_t thread_flags); extern void EvrRtxThreadFlagsWaitCompleted (uint32_t flags, uint32_t options, uint32_t thread_flags, osThreadId_t thread_id);
#else #else
#define EvrRtxThreadFlagsWaitCompleted(flags, options, thread_flags) #define EvrRtxThreadFlagsWaitCompleted(flags, options, thread_flags, thread_id)
#endif #endif
/** /**
@ -786,41 +828,74 @@ extern void EvrRtxThreadFlagsWaitCompleted (uint32_t flags, uint32_t options, ui
\param[in] flags flags to wait for. \param[in] flags flags to wait for.
\param[in] options flags options (osFlagsXxxx). \param[in] options flags options (osFlagsXxxx).
*/ */
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_FLAGS_WAIT_NOT_COMPLETED_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THFLAGS != 0) && !defined(EVR_RTX_THREAD_FLAGS_WAIT_NOT_COMPLETED_DISABLE))
extern void EvrRtxThreadFlagsWaitNotCompleted (uint32_t flags, uint32_t options); extern void EvrRtxThreadFlagsWaitNotCompleted (uint32_t flags, uint32_t options);
#else #else
#define EvrRtxThreadFlagsWaitNotCompleted(flags, options) #define EvrRtxThreadFlagsWaitNotCompleted(flags, options)
#endif #endif
// ==== Generic Wait Events ====
/** /**
\brief Event on wait for timeout (API) \brief Event on delay error (Error)
\param[in] status extended execution status.
*/
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_WAIT != 0) && !defined(EVR_RTX_DELAY_ERROR_DISABLE))
extern void EvrRtxDelayError (int32_t status);
#else
#define EvrRtxDelayError(status)
#endif
/**
\brief Event on delay for specified time (API)
\param[in] ticks \ref CMSIS_RTOS_TimeOutValue "time ticks" value. \param[in] ticks \ref CMSIS_RTOS_TimeOutValue "time ticks" value.
*/ */
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_DELAY_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_WAIT != 0) && !defined(EVR_RTX_DELAY_DISABLE))
extern void EvrRtxThreadDelay (uint32_t ticks); extern void EvrRtxDelay (uint32_t ticks);
#else #else
#define EvrRtxThreadDelay(ticks) #define EvrRtxDelay(ticks)
#endif #endif
/** /**
\brief Event on wait until specified time (API) \brief Event on delay until specified time (API)
\param[in] ticks absolute time in ticks. \param[in] ticks absolute time in ticks.
*/ */
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_DELAY_UNTIL_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_WAIT != 0) && !defined(EVR_RTX_DELAY_UNTIL_DISABLE))
extern void EvrRtxThreadDelayUntil (uint32_t ticks); extern void EvrRtxDelayUntil (uint32_t ticks);
#else #else
#define EvrRtxThreadDelayUntil(ticks) #define EvrRtxDelayUntil(ticks)
#endif #endif
/** /**
\brief Event on completed wait (Op) \brief Event on delay started (Op)
\param[in] ticks \ref CMSIS_RTOS_TimeOutValue "time ticks" value.
*/ */
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_DELAY_COMPLETED_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_WAIT != 0) && !defined(EVR_RTX_DELAY_STARTED_DISABLE))
extern void EvrRtxThreadDelayCompleted (void); extern void EvrRtxDelayStarted (uint32_t ticks);
#else #else
#define EvrRtxThreadDelayCompleted() #define EvrRtxDelayStarted(ticks)
#endif #endif
/**
\brief Event on delay until specified time started (Op)
\param[in] ticks \ref CMSIS_RTOS_TimeOutValue "time ticks" value.
*/
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_WAIT != 0) && !defined(EVR_RTX_DELAY_UNTIL_STARTED_DISABLE))
extern void EvrRtxDelayUntilStarted (uint32_t ticks);
#else
#define EvrRtxDelayUntilStarted(ticks)
#endif
/**
\brief Event on delay completed (Op)
\param[in] thread_id thread ID obtained by \ref osThreadNew or \ref osThreadGetId.
*/
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_WAIT != 0) && !defined(EVR_RTX_DELAY_COMPLETED_DISABLE))
extern void EvrRtxDelayCompleted (osThreadId_t thread_id);
#else
#define EvrRtxDelayCompleted(thread_id)
#endif
// ==== Timer Events ==== // ==== Timer Events ====

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved. * Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -38,22 +38,22 @@ extern "C"
/// Kernel Information /// Kernel Information
#define osRtxVersionAPI 20010003 ///< API version (2.1.3) #define osRtxVersionAPI 20010003 ///< API version (2.1.3)
#define osRtxVersionKernel 50040000 ///< Kernel version (5.4.0) #define osRtxVersionKernel 50050000 ///< Kernel version (5.5.0)
#define osRtxKernelId "RTX V5.4.0" ///< Kernel identification string #define osRtxKernelId "RTX V5.5.0" ///< Kernel identification string
// ==== Common definitions ==== // ==== Common definitions ====
/// Object Identifier definitions /// Object Identifier definitions
#define osRtxIdInvalid 0x00U #define osRtxIdInvalid 0x00U
#define osRtxIdThread 0x01U #define osRtxIdThread 0xF1U
#define osRtxIdTimer 0x02U #define osRtxIdTimer 0xF2U
#define osRtxIdEventFlags 0x03U #define osRtxIdEventFlags 0xF3U
#define osRtxIdMutex 0x04U #define osRtxIdMutex 0xF5U
#define osRtxIdSemaphore 0x05U #define osRtxIdSemaphore 0xF6U
#define osRtxIdMemoryPool 0x06U #define osRtxIdMemoryPool 0xF7U
#define osRtxIdMessage 0x07U #define osRtxIdMessage 0xF9U
#define osRtxIdMessageQueue 0x08U #define osRtxIdMessageQueue 0xFAU
/// Object Flags definitions /// Object Flags definitions
#define osRtxFlagSystemObject 0x01U #define osRtxFlagSystemObject 0x01U

View File

@ -100,6 +100,7 @@ Undef_Cont
SUB LR, LR, R0 SUB LR, LR, R0
LDR R0, [SP, #28] ; Restore stacked SPSR LDR R0, [SP, #28] ; Restore stacked SPSR
MSR SPSR_CXSF, R0 MSR SPSR_CXSF, R0
CLREX ; Clear exclusive monitor
POP {R0-R4, R12} ; Restore stacked APCS registers POP {R0-R4, R12} ; Restore stacked APCS registers
ADD SP, SP, #8 ; Adjust SP for already-restored banked registers ADD SP, SP, #8 ; Adjust SP for already-restored banked registers
MOVS PC, LR MOVS PC, LR
@ -129,6 +130,7 @@ PAbt_Handler\
POP {R12, LR} ; Get stack adjustment & discard dummy LR POP {R12, LR} ; Get stack adjustment & discard dummy LR
ADD SP, SP, R12 ; Unadjust stack ADD SP, SP, R12 ; Unadjust stack
CLREX ; Clear exclusive monitor
POP {R0-R4, R12} ; Restore stack APCS registers POP {R0-R4, R12} ; Restore stack APCS registers
RFEFD SP! ; Return from exception RFEFD SP! ; Return from exception
@ -143,7 +145,6 @@ DAbt_Handler\
SUB LR, LR, #8 ; Pre-adjust LR SUB LR, LR, #8 ; Pre-adjust LR
SRSFD SP!, #MODE_ABT ; Save LR and SPRS to ABT mode stack SRSFD SP!, #MODE_ABT ; Save LR and SPRS to ABT mode stack
PUSH {R0-R4, R12} ; Save APCS corruptible registers to ABT mode stack PUSH {R0-R4, R12} ; Save APCS corruptible registers to ABT mode stack
CLREX ; State of exclusive monitors unknown after taken data abort
MRC p15, 0, R0, c5, c0, 0 ; DFSR MRC p15, 0, R0, c5, c0, 0 ; DFSR
MRC p15, 0, R1, c6, c0, 0 ; DFAR MRC p15, 0, R1, c6, c0, 0 ; DFAR
@ -158,6 +159,7 @@ DAbt_Handler\
POP {R12, LR} ; Get stack adjustment & discard dummy LR POP {R12, LR} ; Get stack adjustment & discard dummy LR
ADD SP, SP, R12 ; Unadjust stack ADD SP, SP, R12 ; Unadjust stack
CLREX ; Clear exclusive monitor
POP {R0-R4, R12} ; Restore stacked APCS registers POP {R0-R4, R12} ; Restore stacked APCS registers
RFEFD SP! ; Return from exception RFEFD SP! ; Return from exception
@ -211,6 +213,7 @@ IRQ_End
SUBS R1, R1, #1 ; Decrement IRQ nesting level SUBS R1, R1, #1 ; Decrement IRQ nesting level
STR R1, [R0] STR R1, [R0]
CLREX ; Clear exclusive monitor for interrupted code
POP {R0-R3, R12, LR} ; Restore stacked APCS registers POP {R0-R3, R12, LR} ; Restore stacked APCS registers
RFEFD SP! ; Return from IRQ handler RFEFD SP! ; Return from IRQ handler
@ -281,6 +284,7 @@ SVC_ContextCheck
SUB R1, R1, #1 ; Decrement IRQ nesting level SUB R1, R1, #1 ; Decrement IRQ nesting level
STR R1, [R0] STR R1, [R0]
CLREX ; Clear exclusive monitor
POP {R0-R3, R12, LR} ; Restore stacked APCS registers POP {R0-R3, R12, LR} ; Restore stacked APCS registers
RFEFD SP! ; Return from exception RFEFD SP! ; Return from exception
@ -293,6 +297,7 @@ SVC_User
LDR R12,[R5,R12,LSL #2] ; Load SVC Function Address LDR R12,[R5,R12,LSL #2] ; Load SVC Function Address
BLX R12 ; Call SVC Function BLX R12 ; Call SVC Function
SVC_Done SVC_Done
CLREX ; Clear exclusive monitor
POP {R4, R5, R12, LR} POP {R4, R5, R12, LR}
RFEFD SP! ; Return from exception RFEFD SP! ; Return from exception

View File

@ -103,6 +103,7 @@ Undef_Cont:
SUB LR, LR, R0 SUB LR, LR, R0
LDR R0, [SP, #28] // Restore stacked SPSR LDR R0, [SP, #28] // Restore stacked SPSR
MSR SPSR_cxsf, R0 MSR SPSR_cxsf, R0
CLREX // Clear exclusive monitor
POP {R0-R4, R12} // Restore stacked APCS registers POP {R0-R4, R12} // Restore stacked APCS registers
ADD SP, SP, #8 // Adjust SP for already-restored banked registers ADD SP, SP, #8 // Adjust SP for already-restored banked registers
MOVS PC, LR MOVS PC, LR
@ -134,6 +135,7 @@ PAbt_Handler:
POP {R12, LR} // Get stack adjustment & discard dummy LR POP {R12, LR} // Get stack adjustment & discard dummy LR
ADD SP, SP, R12 // Unadjust stack ADD SP, SP, R12 // Unadjust stack
CLREX // Clear exclusive monitor
POP {R0-R4, R12} // Restore stack APCS registers POP {R0-R4, R12} // Restore stack APCS registers
RFEFD SP! // Return from exception RFEFD SP! // Return from exception
@ -149,7 +151,6 @@ DAbt_Handler:
SUB LR, LR, #8 // Pre-adjust LR SUB LR, LR, #8 // Pre-adjust LR
SRSFD SP!, #MODE_ABT // Save LR and SPRS to ABT mode stack SRSFD SP!, #MODE_ABT // Save LR and SPRS to ABT mode stack
PUSH {R0-R4, R12} // Save APCS corruptible registers to ABT mode stack PUSH {R0-R4, R12} // Save APCS corruptible registers to ABT mode stack
CLREX // State of exclusive monitors unknown after taken data abort
MRC p15, 0, R0, c5, c0, 0 // DFSR MRC p15, 0, R0, c5, c0, 0 // DFSR
MRC p15, 0, R1, c6, c0, 0 // DFAR MRC p15, 0, R1, c6, c0, 0 // DFAR
@ -164,6 +165,7 @@ DAbt_Handler:
POP {R12, LR} // Get stack adjustment & discard dummy LR POP {R12, LR} // Get stack adjustment & discard dummy LR
ADD SP, SP, R12 // Unadjust stack ADD SP, SP, R12 // Unadjust stack
CLREX // Clear exclusive monitor
POP {R0-R4, R12} // Restore stacked APCS registers POP {R0-R4, R12} // Restore stacked APCS registers
RFEFD SP! // Return from exception RFEFD SP! // Return from exception
@ -217,6 +219,7 @@ IRQ_End:
SUBS R1, R1, #1 // Decrement IRQ nesting level SUBS R1, R1, #1 // Decrement IRQ nesting level
STR R1, [R0] STR R1, [R0]
CLREX // Clear exclusive monitor for interrupted code
POP {R0-R3, R12, LR} // Restore stacked APCS registers POP {R0-R3, R12, LR} // Restore stacked APCS registers
RFEFD SP! // Return from IRQ handler RFEFD SP! // Return from IRQ handler
@ -286,6 +289,7 @@ SVC_ContextCheck:
SUB R1, R1, #1 // Decrement IRQ nesting level SUB R1, R1, #1 // Decrement IRQ nesting level
STR R1, [R0] STR R1, [R0]
CLREX // Clear exclusive monitor
POP {R0-R3, R12, LR} // Restore stacked APCS registers POP {R0-R3, R12, LR} // Restore stacked APCS registers
RFEFD SP! // Return from exception RFEFD SP! // Return from exception
@ -300,6 +304,7 @@ SVC_User:
BLX R12 // Call SVC Function BLX R12 // Call SVC Function
SVC_Done: SVC_Done:
CLREX // Clear exclusive monitor
POP {R4, R5, R12, LR} POP {R4, R5, R12, LR}
RFEFD SP! // Return from exception RFEFD SP! // Return from exception

View File

@ -100,6 +100,7 @@ Undef_Cont
SUB LR, LR, R0 SUB LR, LR, R0
LDR R0, [SP, #28] ; Restore stacked SPSR LDR R0, [SP, #28] ; Restore stacked SPSR
MSR SPSR_CXSF, R0 MSR SPSR_CXSF, R0
CLREX ; Clear exclusive monitor
POP {R0-R4, R12} ; Restore stacked APCS registers POP {R0-R4, R12} ; Restore stacked APCS registers
ADD SP, SP, #8 ; Adjust SP for already-restored banked registers ADD SP, SP, #8 ; Adjust SP for already-restored banked registers
MOVS PC, LR MOVS PC, LR
@ -126,6 +127,7 @@ PAbt_Handler
POP {R12, LR} ; Get stack adjustment & discard dummy LR POP {R12, LR} ; Get stack adjustment & discard dummy LR
ADD SP, SP, R12 ; Unadjust stack ADD SP, SP, R12 ; Unadjust stack
CLREX ; Clear exclusive monitor
POP {R0-R4, R12} ; Restore stack APCS registers POP {R0-R4, R12} ; Restore stack APCS registers
RFEFD SP! ; Return from exception RFEFD SP! ; Return from exception
@ -137,7 +139,6 @@ DAbt_Handler
SUB LR, LR, #8 ; Pre-adjust LR SUB LR, LR, #8 ; Pre-adjust LR
SRSFD SP!, #MODE_ABT ; Save LR and SPRS to ABT mode stack SRSFD SP!, #MODE_ABT ; Save LR and SPRS to ABT mode stack
PUSH {R0-R4, R12} ; Save APCS corruptible registers to ABT mode stack PUSH {R0-R4, R12} ; Save APCS corruptible registers to ABT mode stack
CLREX ; State of exclusive monitors unknown after taken data abort
MRC p15, 0, R0, c5, c0, 0 ; DFSR MRC p15, 0, R0, c5, c0, 0 ; DFSR
MRC p15, 0, R1, c6, c0, 0 ; DFAR MRC p15, 0, R1, c6, c0, 0 ; DFAR
@ -152,6 +153,7 @@ DAbt_Handler
POP {R12, LR} ; Get stack adjustment & discard dummy LR POP {R12, LR} ; Get stack adjustment & discard dummy LR
ADD SP, SP, R12 ; Unadjust stack ADD SP, SP, R12 ; Unadjust stack
CLREX ; Clear exclusive monitor
POP {R0-R4, R12} ; Restore stacked APCS registers POP {R0-R4, R12} ; Restore stacked APCS registers
RFEFD SP! ; Return from exception RFEFD SP! ; Return from exception
@ -202,6 +204,7 @@ IRQ_End
SUBS R1, R1, #1 ; Decrement IRQ nesting level SUBS R1, R1, #1 ; Decrement IRQ nesting level
STR R1, [R0] STR R1, [R0]
CLREX ; Clear exclusive monitor for interrupted code
POP {R0-R3, R12, LR} ; Restore stacked APCS registers POP {R0-R3, R12, LR} ; Restore stacked APCS registers
RFEFD SP! ; Return from IRQ handler RFEFD SP! ; Return from IRQ handler
@ -269,6 +272,7 @@ SVC_ContextCheck
SUB R1, R1, #1 ; Decrement IRQ nesting level SUB R1, R1, #1 ; Decrement IRQ nesting level
STR R1, [R0] STR R1, [R0]
CLREX ; Clear exclusive monitor
POP {R0-R3, R12, LR} ; Restore stacked APCS registers POP {R0-R3, R12, LR} ; Restore stacked APCS registers
RFEFD SP! ; Return from exception RFEFD SP! ; Return from exception
@ -281,6 +285,7 @@ SVC_User
LDR R12,[R5,R12,LSL #2] ; Load SVC Function Address LDR R12,[R5,R12,LSL #2] ; Load SVC Function Address
BLX R12 ; Call SVC Function BLX R12 ; Call SVC Function
SVC_Done SVC_Done
CLREX ; Clear exclusive monitor
POP {R4, R5, R12, LR} POP {R4, R5, R12, LR}
RFEFD SP! ; Return from exception RFEFD SP! ; Return from exception

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved. * Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved. * Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -33,8 +33,10 @@
static osStatus_t svcRtxDelay (uint32_t ticks) { static osStatus_t svcRtxDelay (uint32_t ticks) {
if (ticks != 0U) { if (ticks != 0U) {
if (!osRtxThreadWaitEnter(osRtxThreadWaitingDelay, ticks)) { if (osRtxThreadWaitEnter(osRtxThreadWaitingDelay, ticks)) {
EvrRtxThreadDelayCompleted(); EvrRtxDelayStarted(ticks);
} else {
EvrRtxDelayCompleted(osRtxThreadGetRunning());
} }
} }
@ -47,13 +49,15 @@ static osStatus_t svcRtxDelayUntil (uint32_t ticks) {
ticks -= osRtxInfo.kernel.tick; ticks -= osRtxInfo.kernel.tick;
if ((ticks == 0U) || (ticks > 0x7FFFFFFFU)) { if ((ticks == 0U) || (ticks > 0x7FFFFFFFU)) {
EvrRtxThreadError(NULL, (int32_t)osErrorParameter); EvrRtxDelayError((int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1] //lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorParameter; return osErrorParameter;
} }
if (!osRtxThreadWaitEnter(osRtxThreadWaitingDelay, ticks)) { if (osRtxThreadWaitEnter(osRtxThreadWaitingDelay, ticks)) {
EvrRtxThreadDelayCompleted(); EvrRtxDelayUntilStarted(ticks);
} else {
EvrRtxDelayCompleted(osRtxThreadGetRunning());
} }
return osOK; return osOK;
@ -72,9 +76,9 @@ SVC0_1(DelayUntil, osStatus_t, uint32_t)
osStatus_t osDelay (uint32_t ticks) { osStatus_t osDelay (uint32_t ticks) {
osStatus_t status; osStatus_t status;
EvrRtxThreadDelay(ticks); EvrRtxDelay(ticks);
if (IsIrqMode() || IsIrqMasked()) { if (IsIrqMode() || IsIrqMasked()) {
EvrRtxThreadError(NULL, (int32_t)osErrorISR); EvrRtxDelayError((int32_t)osErrorISR);
status = osErrorISR; status = osErrorISR;
} else { } else {
status = __svcDelay(ticks); status = __svcDelay(ticks);
@ -86,9 +90,9 @@ osStatus_t osDelay (uint32_t ticks) {
osStatus_t osDelayUntil (uint32_t ticks) { osStatus_t osDelayUntil (uint32_t ticks) {
osStatus_t status; osStatus_t status;
EvrRtxThreadDelayUntil(ticks); EvrRtxDelayUntil(ticks);
if (IsIrqMode() || IsIrqMasked()) { if (IsIrqMode() || IsIrqMasked()) {
EvrRtxThreadError(NULL, (int32_t)osErrorISR); EvrRtxDelayError((int32_t)osErrorISR);
status = osErrorISR; status = osErrorISR;
} else { } else {
status = __svcDelayUntil(ticks); status = __svcDelayUntil(ticks);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved. * Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -338,17 +338,9 @@ static uint32_t svcRtxEventFlagsGet (osEventFlagsId_t ef_id) {
/// \note API identical to osEventFlagsWait /// \note API identical to osEventFlagsWait
static uint32_t svcRtxEventFlagsWait (osEventFlagsId_t ef_id, uint32_t flags, uint32_t options, uint32_t timeout) { static uint32_t svcRtxEventFlagsWait (osEventFlagsId_t ef_id, uint32_t flags, uint32_t options, uint32_t timeout) {
os_event_flags_t *ef = osRtxEventFlagsId(ef_id); os_event_flags_t *ef = osRtxEventFlagsId(ef_id);
os_thread_t *running_thread; os_thread_t *thread;
uint32_t event_flags; uint32_t event_flags;
// Check running thread
running_thread = osRtxThreadGetRunning();
if (running_thread == NULL) {
EvrRtxEventFlagsError(ef, osRtxErrorKernelNotRunning);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return ((uint32_t)osError);
}
// Check parameters // Check parameters
if ((ef == NULL) || (ef->id != osRtxIdEventFlags) || if ((ef == NULL) || (ef->id != osRtxIdEventFlags) ||
((flags & ~(((uint32_t)1U << osRtxEventFlagsLimit) - 1U)) != 0U)) { ((flags & ~(((uint32_t)1U << osRtxEventFlagsLimit) - 1U)) != 0U)) {
@ -365,12 +357,13 @@ static uint32_t svcRtxEventFlagsWait (osEventFlagsId_t ef_id, uint32_t flags, ui
// Check if timeout is specified // Check if timeout is specified
if (timeout != 0U) { if (timeout != 0U) {
EvrRtxEventFlagsWaitPending(ef, flags, options, timeout); EvrRtxEventFlagsWaitPending(ef, flags, options, timeout);
// Store waiting flags and options
running_thread->wait_flags = flags;
running_thread->flags_options = (uint8_t)options;
// Suspend current Thread // Suspend current Thread
if (osRtxThreadWaitEnter(osRtxThreadWaitingEventFlags, timeout)) { if (osRtxThreadWaitEnter(osRtxThreadWaitingEventFlags, timeout)) {
osRtxThreadListPut(osRtxObject(ef), running_thread); thread = osRtxThreadGetRunning();
osRtxThreadListPut(osRtxObject(ef), thread);
// Store waiting flags and options
thread->wait_flags = flags;
thread->flags_options = (uint8_t)options;
} else { } else {
EvrRtxEventFlagsWaitTimeout(ef); EvrRtxEventFlagsWaitTimeout(ef);
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved. * Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -29,7 +29,7 @@
#ifdef RTE_Compiler_EventRecorder #ifdef RTE_Compiler_EventRecorder
//lint -e923 -e9074 -e9078 -emacro((835,845),EventID) [MISRA Note 13] //lint -e923 -e9074 -e9078 [MISRA Note 13]
/// Event IDs for "RTX Memory Management" /// Event IDs for "RTX Memory Management"
#define EvtRtxMemoryInit EventID(EventLevelOp, EvtRtxMemoryNo, 0x00U) #define EvtRtxMemoryInit EventID(EventLevelOp, EvtRtxMemoryNo, 0x00U)
@ -45,6 +45,7 @@
#define EvtRtxKernelInitialized EventID(EventLevelOp, EvtRtxKernelNo, 0x02U) #define EvtRtxKernelInitialized EventID(EventLevelOp, EvtRtxKernelNo, 0x02U)
#define EvtRtxKernelGetInfo EventID(EventLevelAPI, EvtRtxKernelNo, 0x03U) #define EvtRtxKernelGetInfo EventID(EventLevelAPI, EvtRtxKernelNo, 0x03U)
#define EvtRtxKernelInfoRetrieved EventID(EventLevelOp, EvtRtxKernelNo, 0x04U) #define EvtRtxKernelInfoRetrieved EventID(EventLevelOp, EvtRtxKernelNo, 0x04U)
#define EvtRtxKernelInfoRetrieved_Detail EventID(EventLevelDetail, EvtRtxKernelNo, 0x05U)
#define EvtRtxKernelGetState EventID(EventLevelAPI, EvtRtxKernelNo, 0x06U) #define EvtRtxKernelGetState EventID(EventLevelAPI, EvtRtxKernelNo, 0x06U)
#define EvtRtxKernelStart EventID(EventLevelAPI, EvtRtxKernelNo, 0x07U) #define EvtRtxKernelStart EventID(EventLevelAPI, EvtRtxKernelNo, 0x07U)
#define EvtRtxKernelStarted EventID(EventLevelOp, EvtRtxKernelNo, 0x08U) #define EvtRtxKernelStarted EventID(EventLevelOp, EvtRtxKernelNo, 0x08U)
@ -66,7 +67,6 @@
/// Event IDs for "RTX Thread" /// Event IDs for "RTX Thread"
#define EvtRtxThreadError EventID(EventLevelError, EvtRtxThreadNo, 0x00U) #define EvtRtxThreadError EventID(EventLevelError, EvtRtxThreadNo, 0x00U)
#define EvtRtxThreadNew EventID(EventLevelAPI, EvtRtxThreadNo, 0x01U) #define EvtRtxThreadNew EventID(EventLevelAPI, EvtRtxThreadNo, 0x01U)
#define EvtRtxThreadNew_Detail EventID(EventLevelDetail, EvtRtxThreadNo, 0x02U)
#define EvtRtxThreadCreated_Addr EventID(EventLevelOp, EvtRtxThreadNo, 0x03U) #define EvtRtxThreadCreated_Addr EventID(EventLevelOp, EvtRtxThreadNo, 0x03U)
#define EvtRtxThreadCreated_Name EventID(EventLevelOp, EvtRtxThreadNo, 0x2CU) #define EvtRtxThreadCreated_Name EventID(EventLevelOp, EvtRtxThreadNo, 0x2CU)
#define EvtRtxThreadGetName EventID(EventLevelAPI, EvtRtxThreadNo, 0x04U) #define EvtRtxThreadGetName EventID(EventLevelAPI, EvtRtxThreadNo, 0x04U)
@ -75,6 +75,7 @@
#define EvtRtxThreadGetStackSize EventID(EventLevelAPI, EvtRtxThreadNo, 0x08U) #define EvtRtxThreadGetStackSize EventID(EventLevelAPI, EvtRtxThreadNo, 0x08U)
#define EvtRtxThreadGetStackSpace EventID(EventLevelAPI, EvtRtxThreadNo, 0x09U) #define EvtRtxThreadGetStackSpace EventID(EventLevelAPI, EvtRtxThreadNo, 0x09U)
#define EvtRtxThreadSetPriority EventID(EventLevelAPI, EvtRtxThreadNo, 0x0AU) #define EvtRtxThreadSetPriority EventID(EventLevelAPI, EvtRtxThreadNo, 0x0AU)
#define EvtRtxThreadPriorityUpdated EventID(EventLevelOp, EvtRtxThreadNo, 0x2DU)
#define EvtRtxThreadGetPriority EventID(EventLevelAPI, EvtRtxThreadNo, 0x0BU) #define EvtRtxThreadGetPriority EventID(EventLevelAPI, EvtRtxThreadNo, 0x0BU)
#define EvtRtxThreadYield EventID(EventLevelAPI, EvtRtxThreadNo, 0x0CU) #define EvtRtxThreadYield EventID(EventLevelAPI, EvtRtxThreadNo, 0x0CU)
#define EvtRtxThreadSuspend EventID(EventLevelAPI, EvtRtxThreadNo, 0x0DU) #define EvtRtxThreadSuspend EventID(EventLevelAPI, EvtRtxThreadNo, 0x0DU)
@ -86,34 +87,41 @@
#define EvtRtxThreadJoin EventID(EventLevelAPI, EvtRtxThreadNo, 0x13U) #define EvtRtxThreadJoin EventID(EventLevelAPI, EvtRtxThreadNo, 0x13U)
#define EvtRtxThreadJoinPending EventID(EventLevelOp, EvtRtxThreadNo, 0x14U) #define EvtRtxThreadJoinPending EventID(EventLevelOp, EvtRtxThreadNo, 0x14U)
#define EvtRtxThreadJoined EventID(EventLevelOp, EvtRtxThreadNo, 0x15U) #define EvtRtxThreadJoined EventID(EventLevelOp, EvtRtxThreadNo, 0x15U)
#define EvtRtxThreadBlocked EventID(EventLevelOp, EvtRtxThreadNo, 0x16U) #define EvtRtxThreadBlocked EventID(EventLevelDetail, EvtRtxThreadNo, 0x16U)
#define EvtRtxThreadUnblocked EventID(EventLevelOp, EvtRtxThreadNo, 0x17U) #define EvtRtxThreadUnblocked EventID(EventLevelDetail, EvtRtxThreadNo, 0x17U)
#define EvtRtxThreadPreempted EventID(EventLevelOp, EvtRtxThreadNo, 0x18U) #define EvtRtxThreadPreempted EventID(EventLevelDetail, EvtRtxThreadNo, 0x18U)
#define EvtRtxThreadSwitched EventID(EventLevelOp, EvtRtxThreadNo, 0x19U) #define EvtRtxThreadSwitched EventID(EventLevelOp, EvtRtxThreadNo, 0x19U)
#define EvtRtxThreadExit EventID(EventLevelAPI, EvtRtxThreadNo, 0x1AU) #define EvtRtxThreadExit EventID(EventLevelAPI, EvtRtxThreadNo, 0x1AU)
#define EvtRtxThreadTerminate EventID(EventLevelAPI, EvtRtxThreadNo, 0x1BU) #define EvtRtxThreadTerminate EventID(EventLevelAPI, EvtRtxThreadNo, 0x1BU)
#define EvtRtxThreadDestroyed EventID(EventLevelOp, EvtRtxThreadNo, 0x1CU) #define EvtRtxThreadDestroyed EventID(EventLevelOp, EvtRtxThreadNo, 0x1CU)
#define EvtRtxThreadGetCount EventID(EventLevelAPI, EvtRtxThreadNo, 0x1DU) #define EvtRtxThreadGetCount EventID(EventLevelAPI, EvtRtxThreadNo, 0x1DU)
#define EvtRtxThreadEnumerate EventID(EventLevelAPI, EvtRtxThreadNo, 0x1EU) #define EvtRtxThreadEnumerate EventID(EventLevelAPI, EvtRtxThreadNo, 0x1EU)
#define EvtRtxThreadFlagsSet EventID(EventLevelAPI, EvtRtxThreadNo, 0x1FU)
#define EvtRtxThreadFlagsSetDone EventID(EventLevelOp, EvtRtxThreadNo, 0x20U) /// Event IDs for "RTX Thread Flags"
#define EvtRtxThreadFlagsClear EventID(EventLevelAPI, EvtRtxThreadNo, 0x21U) #define EvtRtxThreadFlagsError EventID(EventLevelError, EvtRtxThreadFlagsNo, 0x00U)
#define EvtRtxThreadFlagsClearDone EventID(EventLevelOp, EvtRtxThreadNo, 0x22U) #define EvtRtxThreadFlagsSet EventID(EventLevelAPI, EvtRtxThreadFlagsNo, 0x01U)
#define EvtRtxThreadFlagsGet EventID(EventLevelAPI, EvtRtxThreadNo, 0x23U) #define EvtRtxThreadFlagsSetDone EventID(EventLevelOp, EvtRtxThreadFlagsNo, 0x02U)
#define EvtRtxThreadFlagsWait EventID(EventLevelAPI, EvtRtxThreadNo, 0x24U) #define EvtRtxThreadFlagsClear EventID(EventLevelAPI, EvtRtxThreadFlagsNo, 0x03U)
#define EvtRtxThreadFlagsWaitPending EventID(EventLevelOp, EvtRtxThreadNo, 0x25U) #define EvtRtxThreadFlagsClearDone EventID(EventLevelOp, EvtRtxThreadFlagsNo, 0x04U)
#define EvtRtxThreadFlagsWaitTimeout EventID(EventLevelOp, EvtRtxThreadNo, 0x26U) #define EvtRtxThreadFlagsGet EventID(EventLevelAPI, EvtRtxThreadFlagsNo, 0x05U)
#define EvtRtxThreadFlagsWaitCompleted EventID(EventLevelOp, EvtRtxThreadNo, 0x27U) #define EvtRtxThreadFlagsWait EventID(EventLevelAPI, EvtRtxThreadFlagsNo, 0x06U)
#define EvtRtxThreadFlagsWaitNotCompleted EventID(EventLevelOp, EvtRtxThreadNo, 0x28U) #define EvtRtxThreadFlagsWaitPending EventID(EventLevelOp, EvtRtxThreadFlagsNo, 0x07U)
#define EvtRtxThreadDelay EventID(EventLevelAPI, EvtRtxThreadNo, 0x29U) #define EvtRtxThreadFlagsWaitTimeout EventID(EventLevelOp, EvtRtxThreadFlagsNo, 0x08U)
#define EvtRtxThreadDelayUntil EventID(EventLevelAPI, EvtRtxThreadNo, 0x2AU) #define EvtRtxThreadFlagsWaitCompleted EventID(EventLevelOp, EvtRtxThreadFlagsNo, 0x09U)
#define EvtRtxThreadDelayCompleted EventID(EventLevelOp, EvtRtxThreadNo, 0x2BU) #define EvtRtxThreadFlagsWaitNotCompleted EventID(EventLevelOp, EvtRtxThreadFlagsNo, 0x0AU)
/// Event IDs for "RTX Generic Wait"
#define EvtRtxDelayError EventID(EventLevelError, EvtRtxWaitNo, 0x00U)
#define EvtRtxDelay EventID(EventLevelAPI, EvtRtxWaitNo, 0x01U)
#define EvtRtxDelayUntil EventID(EventLevelAPI, EvtRtxWaitNo, 0x02U)
#define EvtRtxDelayStarted EventID(EventLevelOp, EvtRtxWaitNo, 0x03U)
#define EvtRtxDelayUntilStarted EventID(EventLevelOp, EvtRtxWaitNo, 0x04U)
#define EvtRtxDelayCompleted EventID(EventLevelOp, EvtRtxWaitNo, 0x05U)
/// Event IDs for "RTX Timer" /// Event IDs for "RTX Timer"
#define EvtRtxTimerError EventID(EventLevelError, EvtRtxTimerNo, 0x00U) #define EvtRtxTimerError EventID(EventLevelError, EvtRtxTimerNo, 0x00U)
#define EvtRtxTimerCallback EventID(EventLevelOp, EvtRtxTimerNo, 0x01U) #define EvtRtxTimerCallback EventID(EventLevelOp, EvtRtxTimerNo, 0x01U)
#define EvtRtxTimerNew EventID(EventLevelAPI, EvtRtxTimerNo, 0x02U) #define EvtRtxTimerNew EventID(EventLevelAPI, EvtRtxTimerNo, 0x02U)
#define EvtRtxTimerNew_Detail EventID(EventLevelDetail, EvtRtxTimerNo, 0x03U)
#define EvtRtxTimerCreated EventID(EventLevelOp, EvtRtxTimerNo, 0x04U) #define EvtRtxTimerCreated EventID(EventLevelOp, EvtRtxTimerNo, 0x04U)
#define EvtRtxTimerGetName EventID(EventLevelAPI, EvtRtxTimerNo, 0x05U) #define EvtRtxTimerGetName EventID(EventLevelAPI, EvtRtxTimerNo, 0x05U)
#define EvtRtxTimerStart EventID(EventLevelAPI, EvtRtxTimerNo, 0x07U) #define EvtRtxTimerStart EventID(EventLevelAPI, EvtRtxTimerNo, 0x07U)
@ -127,7 +135,6 @@
/// Event IDs for "RTX Event Flags" /// Event IDs for "RTX Event Flags"
#define EvtRtxEventFlagsError EventID(EventLevelError, EvtRtxEventFlagsNo, 0x00U) #define EvtRtxEventFlagsError EventID(EventLevelError, EvtRtxEventFlagsNo, 0x00U)
#define EvtRtxEventFlagsNew EventID(EventLevelAPI, EvtRtxEventFlagsNo, 0x01U) #define EvtRtxEventFlagsNew EventID(EventLevelAPI, EvtRtxEventFlagsNo, 0x01U)
#define EvtRtxEventFlagsNew_Detail EventID(EventLevelDetail, EvtRtxEventFlagsNo, 0x02U)
#define EvtRtxEventFlagsCreated EventID(EventLevelOp, EvtRtxEventFlagsNo, 0x03U) #define EvtRtxEventFlagsCreated EventID(EventLevelOp, EvtRtxEventFlagsNo, 0x03U)
#define EvtRtxEventFlagsGetName EventID(EventLevelAPI, EvtRtxEventFlagsNo, 0x04U) #define EvtRtxEventFlagsGetName EventID(EventLevelAPI, EvtRtxEventFlagsNo, 0x04U)
#define EvtRtxEventFlagsSet EventID(EventLevelAPI, EvtRtxEventFlagsNo, 0x06U) #define EvtRtxEventFlagsSet EventID(EventLevelAPI, EvtRtxEventFlagsNo, 0x06U)
@ -146,7 +153,6 @@
/// Event IDs for "RTX Mutex" /// Event IDs for "RTX Mutex"
#define EvtRtxMutexError EventID(EventLevelError, EvtRtxMutexNo, 0x00U) #define EvtRtxMutexError EventID(EventLevelError, EvtRtxMutexNo, 0x00U)
#define EvtRtxMutexNew EventID(EventLevelAPI, EvtRtxMutexNo, 0x01U) #define EvtRtxMutexNew EventID(EventLevelAPI, EvtRtxMutexNo, 0x01U)
#define EvtRtxMutexNew_Detail EventID(EventLevelDetail, EvtRtxMutexNo, 0x02U)
#define EvtRtxMutexCreated EventID(EventLevelOp, EvtRtxMutexNo, 0x03U) #define EvtRtxMutexCreated EventID(EventLevelOp, EvtRtxMutexNo, 0x03U)
#define EvtRtxMutexGetName EventID(EventLevelAPI, EvtRtxMutexNo, 0x04U) #define EvtRtxMutexGetName EventID(EventLevelAPI, EvtRtxMutexNo, 0x04U)
#define EvtRtxMutexAcquire EventID(EventLevelAPI, EvtRtxMutexNo, 0x06U) #define EvtRtxMutexAcquire EventID(EventLevelAPI, EvtRtxMutexNo, 0x06U)
@ -163,7 +169,6 @@
/// Event IDs for "RTX Semaphore" /// Event IDs for "RTX Semaphore"
#define EvtRtxSemaphoreError EventID(EventLevelError, EvtRtxSemaphoreNo, 0x00U) #define EvtRtxSemaphoreError EventID(EventLevelError, EvtRtxSemaphoreNo, 0x00U)
#define EvtRtxSemaphoreNew EventID(EventLevelAPI, EvtRtxSemaphoreNo, 0x01U) #define EvtRtxSemaphoreNew EventID(EventLevelAPI, EvtRtxSemaphoreNo, 0x01U)
#define EvtRtxSemaphoreNew_Detail EventID(EventLevelDetail, EvtRtxSemaphoreNo, 0x02U)
#define EvtRtxSemaphoreCreated EventID(EventLevelOp, EvtRtxSemaphoreNo, 0x03U) #define EvtRtxSemaphoreCreated EventID(EventLevelOp, EvtRtxSemaphoreNo, 0x03U)
#define EvtRtxSemaphoreGetName EventID(EventLevelAPI, EvtRtxSemaphoreNo, 0x04U) #define EvtRtxSemaphoreGetName EventID(EventLevelAPI, EvtRtxSemaphoreNo, 0x04U)
#define EvtRtxSemaphoreAcquire EventID(EventLevelAPI, EvtRtxSemaphoreNo, 0x06U) #define EvtRtxSemaphoreAcquire EventID(EventLevelAPI, EvtRtxSemaphoreNo, 0x06U)
@ -180,7 +185,6 @@
/// Event IDs for "RTX Memory Pool" /// Event IDs for "RTX Memory Pool"
#define EvtRtxMemoryPoolError EventID(EventLevelError, EvtRtxMemoryPoolNo, 0x00U) #define EvtRtxMemoryPoolError EventID(EventLevelError, EvtRtxMemoryPoolNo, 0x00U)
#define EvtRtxMemoryPoolNew EventID(EventLevelAPI, EvtRtxMemoryPoolNo, 0x01U) #define EvtRtxMemoryPoolNew EventID(EventLevelAPI, EvtRtxMemoryPoolNo, 0x01U)
#define EvtRtxMemoryPoolNew_Detail EventID(EventLevelDetail, EvtRtxMemoryPoolNo, 0x02U)
#define EvtRtxMemoryPoolCreated EventID(EventLevelOp, EvtRtxMemoryPoolNo, 0x03U) #define EvtRtxMemoryPoolCreated EventID(EventLevelOp, EvtRtxMemoryPoolNo, 0x03U)
#define EvtRtxMemoryPoolGetName EventID(EventLevelAPI, EvtRtxMemoryPoolNo, 0x04U) #define EvtRtxMemoryPoolGetName EventID(EventLevelAPI, EvtRtxMemoryPoolNo, 0x04U)
#define EvtRtxMemoryPoolAlloc EventID(EventLevelAPI, EvtRtxMemoryPoolNo, 0x06U) #define EvtRtxMemoryPoolAlloc EventID(EventLevelAPI, EvtRtxMemoryPoolNo, 0x06U)
@ -201,7 +205,6 @@
/// Event IDs for "RTX Message Queue" /// Event IDs for "RTX Message Queue"
#define EvtRtxMessageQueueError EventID(EventLevelError, EvtRtxMessageQueueNo, 0x00U) #define EvtRtxMessageQueueError EventID(EventLevelError, EvtRtxMessageQueueNo, 0x00U)
#define EvtRtxMessageQueueNew EventID(EventLevelAPI, EvtRtxMessageQueueNo, 0x01U) #define EvtRtxMessageQueueNew EventID(EventLevelAPI, EvtRtxMessageQueueNo, 0x01U)
#define EvtRtxMessageQueueNew_Detail EventID(EventLevelDetail, EvtRtxMessageQueueNo, 0x02U)
#define EvtRtxMessageQueueCreated EventID(EventLevelOp, EvtRtxMessageQueueNo, 0x03U) #define EvtRtxMessageQueueCreated EventID(EventLevelOp, EvtRtxMessageQueueNo, 0x03U)
#define EvtRtxMessageQueueGetName EventID(EventLevelAPI, EvtRtxMessageQueueNo, 0x04U) #define EvtRtxMessageQueueGetName EventID(EventLevelAPI, EvtRtxMessageQueueNo, 0x04U)
#define EvtRtxMessageQueuePut EventID(EventLevelAPI, EvtRtxMessageQueueNo, 0x06U) #define EvtRtxMessageQueuePut EventID(EventLevelAPI, EvtRtxMessageQueueNo, 0x06U)
@ -348,12 +351,18 @@ __WEAK void EvrRtxKernelGetInfo (osVersion_t *version, char *id_buf, uint32_t id
#endif #endif
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_KERNEL != 0) && !defined(EVR_RTX_KERNEL_INFO_RETRIEVED_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_KERNEL != 0) && !defined(EVR_RTX_KERNEL_INFO_RETRIEVED_DISABLE))
__WEAK void EvrRtxKernelInfoRetrieved (osVersion_t *version, char *id_buf) { __WEAK void EvrRtxKernelInfoRetrieved (const osVersion_t *version, const char *id_buf, uint32_t id_size) {
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
(void)EventRecord4(EvtRtxKernelInfoRetrieved, (uint32_t)version->api, (uint32_t)version->kernel, (uint32_t)id_buf, 0U); if (version != NULL) {
(void)EventRecord2(EvtRtxKernelInfoRetrieved, version->api, version->kernel);
}
if (id_buf != NULL) {
(void)EventRecordData(EvtRtxKernelInfoRetrieved_Detail, id_buf, id_size);
}
#else #else
(void)version; (void)version;
(void)id_buf; (void)id_buf;
(void)id_size;
#endif #endif
} }
#endif #endif
@ -540,9 +549,6 @@ __WEAK void EvrRtxThreadError (osThreadId_t thread_id, int32_t status) {
__WEAK void EvrRtxThreadNew (osThreadFunc_t func, void *argument, const osThreadAttr_t *attr) { __WEAK void EvrRtxThreadNew (osThreadFunc_t func, void *argument, const osThreadAttr_t *attr) {
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
(void)EventRecord4(EvtRtxThreadNew, (uint32_t)func, (uint32_t)argument, (uint32_t)attr, 0U); (void)EventRecord4(EvtRtxThreadNew, (uint32_t)func, (uint32_t)argument, (uint32_t)attr, 0U);
if (attr != NULL) {
(void)EventRecordData(EvtRtxThreadNew_Detail, attr, sizeof (osThreadAttr_t));
}
#else #else
(void)func; (void)func;
(void)argument; (void)argument;
@ -632,6 +638,17 @@ __WEAK void EvrRtxThreadSetPriority (osThreadId_t thread_id, osPriority_t priori
} }
#endif #endif
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_PRIORITY_UPDATED_DISABLE))
__WEAK void EvrRtxThreadPriorityUpdated (osThreadId_t thread_id, osPriority_t priority) {
#if defined(RTE_Compiler_EventRecorder)
(void)EventRecord2(EvtRtxThreadPriorityUpdated, (uint32_t)thread_id, (uint32_t)priority);
#else
(void)thread_id;
(void)priority;
#endif
}
#endif
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_GET_PRIORITY_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_GET_PRIORITY_DISABLE))
__WEAK void EvrRtxThreadGetPriority (osThreadId_t thread_id, osPriority_t priority) { __WEAK void EvrRtxThreadGetPriority (osThreadId_t thread_id, osPriority_t priority) {
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
@ -835,7 +852,21 @@ __WEAK void EvrRtxThreadEnumerate (osThreadId_t *thread_array, uint32_t array_it
} }
#endif #endif
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_FLAGS_SET_DISABLE))
// ==== Thread Flags Events ====
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THFLAGS != 0) && !defined(EVR_RTX_THREAD_FLAGS_ERROR_DISABLE))
__WEAK void EvrRtxThreadFlagsError (osThreadId_t thread_id, int32_t status) {
#if defined(RTE_Compiler_EventRecorder)
(void)EventRecord2(EvtRtxThreadFlagsError, (uint32_t)thread_id, (uint32_t)status);
#else
(void)thread_id;
(void)status;
#endif
}
#endif
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THFLAGS != 0) && !defined(EVR_RTX_THREAD_FLAGS_SET_DISABLE))
__WEAK void EvrRtxThreadFlagsSet (osThreadId_t thread_id, uint32_t flags) { __WEAK void EvrRtxThreadFlagsSet (osThreadId_t thread_id, uint32_t flags) {
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
(void)EventRecord2(EvtRtxThreadFlagsSet, (uint32_t)thread_id, flags); (void)EventRecord2(EvtRtxThreadFlagsSet, (uint32_t)thread_id, flags);
@ -846,7 +877,7 @@ __WEAK void EvrRtxThreadFlagsSet (osThreadId_t thread_id, uint32_t flags) {
} }
#endif #endif
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_FLAGS_SET_DONE_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THFLAGS != 0) && !defined(EVR_RTX_THREAD_FLAGS_SET_DONE_DISABLE))
__WEAK void EvrRtxThreadFlagsSetDone (osThreadId_t thread_id, uint32_t thread_flags) { __WEAK void EvrRtxThreadFlagsSetDone (osThreadId_t thread_id, uint32_t thread_flags) {
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
(void)EventRecord2(EvtRtxThreadFlagsSetDone, (uint32_t)thread_id, thread_flags); (void)EventRecord2(EvtRtxThreadFlagsSetDone, (uint32_t)thread_id, thread_flags);
@ -857,7 +888,7 @@ __WEAK void EvrRtxThreadFlagsSetDone (osThreadId_t thread_id, uint32_t thread_fl
} }
#endif #endif
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_FLAGS_CLEAR_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THFLAGS != 0) && !defined(EVR_RTX_THREAD_FLAGS_CLEAR_DISABLE))
__WEAK void EvrRtxThreadFlagsClear (uint32_t flags) { __WEAK void EvrRtxThreadFlagsClear (uint32_t flags) {
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
(void)EventRecord2(EvtRtxThreadFlagsClear, flags, 0U); (void)EventRecord2(EvtRtxThreadFlagsClear, flags, 0U);
@ -867,7 +898,7 @@ __WEAK void EvrRtxThreadFlagsClear (uint32_t flags) {
} }
#endif #endif
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_FLAGS_CLEAR_DONE_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THFLAGS != 0) && !defined(EVR_RTX_THREAD_FLAGS_CLEAR_DONE_DISABLE))
__WEAK void EvrRtxThreadFlagsClearDone (uint32_t thread_flags) { __WEAK void EvrRtxThreadFlagsClearDone (uint32_t thread_flags) {
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
(void)EventRecord2(EvtRtxThreadFlagsClearDone, thread_flags, 0U); (void)EventRecord2(EvtRtxThreadFlagsClearDone, thread_flags, 0U);
@ -877,7 +908,7 @@ __WEAK void EvrRtxThreadFlagsClearDone (uint32_t thread_flags) {
} }
#endif #endif
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_FLAGS_GET_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THFLAGS != 0) && !defined(EVR_RTX_THREAD_FLAGS_GET_DISABLE))
__WEAK void EvrRtxThreadFlagsGet (uint32_t thread_flags) { __WEAK void EvrRtxThreadFlagsGet (uint32_t thread_flags) {
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
(void)EventRecord2(EvtRtxThreadFlagsGet, thread_flags, 0U); (void)EventRecord2(EvtRtxThreadFlagsGet, thread_flags, 0U);
@ -887,7 +918,7 @@ __WEAK void EvrRtxThreadFlagsGet (uint32_t thread_flags) {
} }
#endif #endif
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_FLAGS_WAIT_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THFLAGS != 0) && !defined(EVR_RTX_THREAD_FLAGS_WAIT_DISABLE))
__WEAK void EvrRtxThreadFlagsWait (uint32_t flags, uint32_t options, uint32_t timeout) { __WEAK void EvrRtxThreadFlagsWait (uint32_t flags, uint32_t options, uint32_t timeout) {
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
(void)EventRecord4(EvtRtxThreadFlagsWait, flags, options, timeout, 0U); (void)EventRecord4(EvtRtxThreadFlagsWait, flags, options, timeout, 0U);
@ -899,7 +930,7 @@ __WEAK void EvrRtxThreadFlagsWait (uint32_t flags, uint32_t options, uint32_t ti
} }
#endif #endif
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_FLAGS_WAIT_PENDING_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THFLAGS != 0) && !defined(EVR_RTX_THREAD_FLAGS_WAIT_PENDING_DISABLE))
__WEAK void EvrRtxThreadFlagsWaitPending (uint32_t flags, uint32_t options, uint32_t timeout) { __WEAK void EvrRtxThreadFlagsWaitPending (uint32_t flags, uint32_t options, uint32_t timeout) {
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
(void)EventRecord4(EvtRtxThreadFlagsWaitPending, flags, options, timeout, 0U); (void)EventRecord4(EvtRtxThreadFlagsWaitPending, flags, options, timeout, 0U);
@ -911,27 +942,30 @@ __WEAK void EvrRtxThreadFlagsWaitPending (uint32_t flags, uint32_t options, uint
} }
#endif #endif
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_FLAGS_WAIT_TIMEOUT_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THFLAGS != 0) && !defined(EVR_RTX_THREAD_FLAGS_WAIT_TIMEOUT_DISABLE))
__WEAK void EvrRtxThreadFlagsWaitTimeout (void) { __WEAK void EvrRtxThreadFlagsWaitTimeout (osThreadId_t thread_id) {
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
(void)EventRecord2(EvtRtxThreadFlagsWaitTimeout, 0U, 0U); (void)EventRecord2(EvtRtxThreadFlagsWaitTimeout, (uint32_t)thread_id, 0U);
#else
(void)thread_id;
#endif #endif
} }
#endif #endif
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_FLAGS_WAIT_COMPLETED_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THFLAGS != 0) && !defined(EVR_RTX_THREAD_FLAGS_WAIT_COMPLETED_DISABLE))
__WEAK void EvrRtxThreadFlagsWaitCompleted (uint32_t flags, uint32_t options, uint32_t thread_flags) { __WEAK void EvrRtxThreadFlagsWaitCompleted (uint32_t flags, uint32_t options, uint32_t thread_flags, osThreadId_t thread_id) {
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
(void)EventRecord4(EvtRtxThreadFlagsWaitCompleted, flags, options, thread_flags, 0U); (void)EventRecord4(EvtRtxThreadFlagsWaitCompleted, flags, options, thread_flags, (uint32_t)thread_id);
#else #else
(void)flags; (void)flags;
(void)options; (void)options;
(void)thread_flags; (void)thread_flags;
(void)thread_id;
#endif #endif
} }
#endif #endif
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_FLAGS_WAIT_NOT_COMPLETED_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THFLAGS != 0) && !defined(EVR_RTX_THREAD_FLAGS_WAIT_NOT_COMPLETED_DISABLE))
__WEAK void EvrRtxThreadFlagsWaitNotCompleted (uint32_t flags, uint32_t options) { __WEAK void EvrRtxThreadFlagsWaitNotCompleted (uint32_t flags, uint32_t options) {
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
(void)EventRecord2(EvtRtxThreadFlagsWaitNotCompleted, flags, options); (void)EventRecord2(EvtRtxThreadFlagsWaitNotCompleted, flags, options);
@ -942,30 +976,66 @@ __WEAK void EvrRtxThreadFlagsWaitNotCompleted (uint32_t flags, uint32_t options)
} }
#endif #endif
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_DELAY_DISABLE))
__WEAK void EvrRtxThreadDelay (uint32_t ticks) { // ==== Generic Wait Events ====
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_WAIT != 0) && !defined(EVR_RTX_DELAY_ERROR_DISABLE))
__WEAK void EvrRtxDelayError (int32_t status) {
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
(void)EventRecord2(EvtRtxThreadDelay, ticks, 0U); (void)EventRecord2(EvtRtxDelayError, (uint32_t)status, 0U);
#else
(void)status;
#endif
}
#endif
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_WAIT != 0) && !defined(EVR_RTX_DELAY_DISABLE))
__WEAK void EvrRtxDelay (uint32_t ticks) {
#if defined(RTE_Compiler_EventRecorder)
(void)EventRecord2(EvtRtxDelay, ticks, 0U);
#else #else
(void)ticks; (void)ticks;
#endif #endif
} }
#endif #endif
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_DELAY_UNTIL_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_WAIT != 0) && !defined(EVR_RTX_DELAY_UNTIL_DISABLE))
__WEAK void EvrRtxThreadDelayUntil (uint32_t ticks) { __WEAK void EvrRtxDelayUntil (uint32_t ticks) {
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
(void)EventRecord2(EvtRtxThreadDelayUntil, ticks, 0U); (void)EventRecord2(EvtRtxDelayUntil, ticks, 0U);
#else #else
(void)ticks; (void)ticks;
#endif #endif
} }
#endif #endif
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_THREAD != 0) && !defined(EVR_RTX_THREAD_DELAY_COMPLETED_DISABLE)) #if (!defined(EVR_RTX_DISABLE) && (OS_EVR_WAIT != 0) && !defined(EVR_RTX_DELAY_STARTED_DISABLE))
__WEAK void EvrRtxThreadDelayCompleted (void) { __WEAK void EvrRtxDelayStarted (uint32_t ticks) {
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
(void)EventRecord2(EvtRtxThreadDelayCompleted, 0U, 0U); (void)EventRecord2(EvtRtxDelayStarted, ticks, 0U);
#else
(void)ticks;
#endif
}
#endif
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_WAIT != 0) && !defined(EVR_RTX_DELAY_UNTIL_STARTED_DISABLE))
__WEAK void EvrRtxDelayUntilStarted (uint32_t ticks) {
#if defined(RTE_Compiler_EventRecorder)
(void)EventRecord2(EvtRtxDelayUntilStarted, ticks, 0U);
#else
(void)ticks;
#endif
}
#endif
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_WAIT != 0) && !defined(EVR_RTX_DELAY_COMPLETED_DISABLE))
__WEAK void EvrRtxDelayCompleted (osThreadId_t thread_id) {
#if defined(RTE_Compiler_EventRecorder)
(void)EventRecord2(EvtRtxDelayCompleted, (uint32_t)thread_id, 0U);
#else
(void)thread_id;
#endif #endif
} }
#endif #endif
@ -999,9 +1069,6 @@ __WEAK void EvrRtxTimerCallback (osTimerFunc_t func, void *argument) {
__WEAK void EvrRtxTimerNew (osTimerFunc_t func, osTimerType_t type, void *argument, const osTimerAttr_t *attr) { __WEAK void EvrRtxTimerNew (osTimerFunc_t func, osTimerType_t type, void *argument, const osTimerAttr_t *attr) {
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
(void)EventRecord4(EvtRtxTimerNew, (uint32_t)func, (uint32_t)type, (uint32_t)argument, (uint32_t)attr); (void)EventRecord4(EvtRtxTimerNew, (uint32_t)func, (uint32_t)type, (uint32_t)argument, (uint32_t)attr);
if (attr != NULL) {
(void)EventRecordData(EvtRtxTimerNew_Detail, attr, sizeof (osTimerAttr_t));
}
#else #else
(void)func; (void)func;
(void)type; (void)type;
@ -1123,9 +1190,6 @@ __WEAK void EvrRtxEventFlagsError (osEventFlagsId_t ef_id, int32_t status) {
__WEAK void EvrRtxEventFlagsNew (const osEventFlagsAttr_t *attr) { __WEAK void EvrRtxEventFlagsNew (const osEventFlagsAttr_t *attr) {
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
(void)EventRecord2(EvtRtxEventFlagsNew, (uint32_t)attr, 0U); (void)EventRecord2(EvtRtxEventFlagsNew, (uint32_t)attr, 0U);
if (attr != NULL) {
(void)EventRecordData(EvtRtxEventFlagsNew_Detail, attr, sizeof (osEventFlagsAttr_t));
}
#else #else
(void)attr; (void)attr;
#endif #endif
@ -1308,9 +1372,6 @@ __WEAK void EvrRtxMutexError (osMutexId_t mutex_id, int32_t status) {
__WEAK void EvrRtxMutexNew (const osMutexAttr_t *attr) { __WEAK void EvrRtxMutexNew (const osMutexAttr_t *attr) {
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
(void)EventRecord2(EvtRtxMutexNew, (uint32_t)attr, 0U); (void)EventRecord2(EvtRtxMutexNew, (uint32_t)attr, 0U);
if (attr != NULL) {
(void)EventRecordData(EvtRtxMutexNew_Detail, attr, sizeof (osMutexAttr_t));
}
#else #else
(void)attr; (void)attr;
#endif #endif
@ -1462,9 +1523,6 @@ __WEAK void EvrRtxSemaphoreError (osSemaphoreId_t semaphore_id, int32_t status)
__WEAK void EvrRtxSemaphoreNew (uint32_t max_count, uint32_t initial_count, const osSemaphoreAttr_t *attr) { __WEAK void EvrRtxSemaphoreNew (uint32_t max_count, uint32_t initial_count, const osSemaphoreAttr_t *attr) {
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
(void)EventRecord4(EvtRtxSemaphoreNew, max_count, initial_count, (uint32_t)attr, 0U); (void)EventRecord4(EvtRtxSemaphoreNew, max_count, initial_count, (uint32_t)attr, 0U);
if (attr != NULL) {
(void)EventRecordData(EvtRtxSemaphoreNew_Detail, attr, sizeof (osSemaphoreAttr_t));
}
#else #else
(void)max_count; (void)max_count;
(void)initial_count; (void)initial_count;
@ -1618,9 +1676,6 @@ __WEAK void EvrRtxMemoryPoolError (osMemoryPoolId_t mp_id, int32_t status) {
__WEAK void EvrRtxMemoryPoolNew (uint32_t block_count, uint32_t block_size, const osMemoryPoolAttr_t *attr) { __WEAK void EvrRtxMemoryPoolNew (uint32_t block_count, uint32_t block_size, const osMemoryPoolAttr_t *attr) {
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
(void)EventRecord4(EvtRtxMemoryPoolNew, block_count, block_size, (uint32_t)attr, 0U); (void)EventRecord4(EvtRtxMemoryPoolNew, block_count, block_size, (uint32_t)attr, 0U);
if (attr != NULL) {
(void)EventRecordData(EvtRtxMemoryPoolNew_Detail, attr, sizeof (osMemoryPoolAttr_t));
}
#else #else
(void)block_count; (void)block_count;
(void)block_size; (void)block_size;
@ -1819,9 +1874,6 @@ __WEAK void EvrRtxMessageQueueError (osMessageQueueId_t mq_id, int32_t status) {
__WEAK void EvrRtxMessageQueueNew (uint32_t msg_count, uint32_t msg_size, const osMessageQueueAttr_t *attr) { __WEAK void EvrRtxMessageQueueNew (uint32_t msg_count, uint32_t msg_size, const osMessageQueueAttr_t *attr) {
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
(void)EventRecord4(EvtRtxMessageQueueNew, msg_count, msg_size, (uint32_t)attr, 0U); (void)EventRecord4(EvtRtxMessageQueueNew, msg_count, msg_size, (uint32_t)attr, 0U);
if (attr != NULL) {
(void)EventRecordData(EvtRtxMessageQueueNew_Detail, attr, sizeof (osMemoryPoolAttr_t));
}
#else #else
(void)msg_count; (void)msg_count;
(void)msg_size; (void)msg_size;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved. * Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -80,18 +80,6 @@ static osStatus_t svcRtxKernelInitialize (void) {
return osError; return osError;
} }
if (osRtxConfig.thread_stack_size < (64U + 8U)) {
EvrRtxKernelError(osRtxErrorInvalidThreadStack);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osError;
}
if ((osRtxConfig.isr_queue.data == NULL) || (osRtxConfig.isr_queue.max == 0U)) {
EvrRtxKernelError((int32_t)osError);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osError;
}
#if (DOMAIN_NS == 1) #if (DOMAIN_NS == 1)
// Initialize Secure Process Stack // Initialize Secure Process Stack
if (TZ_InitContextSystem_S() == 0U) { if (TZ_InitContextSystem_S() == 0U) {
@ -131,68 +119,60 @@ static osStatus_t svcRtxKernelInitialize (void) {
// Initialize Memory Pools (Fixed Block Size) // Initialize Memory Pools (Fixed Block Size)
if (osRtxConfig.mpi.stack != NULL) { if (osRtxConfig.mpi.stack != NULL) {
if (osRtxMemoryPoolInit(osRtxConfig.mpi.stack, (void)osRtxMemoryPoolInit(osRtxConfig.mpi.stack,
osRtxConfig.mpi.stack->max_blocks, osRtxConfig.mpi.stack->max_blocks,
osRtxConfig.mpi.stack->block_size, osRtxConfig.mpi.stack->block_size,
osRtxConfig.mpi.stack->block_base) != 0U) { osRtxConfig.mpi.stack->block_base);
osRtxInfo.mpi.stack = osRtxConfig.mpi.stack; osRtxInfo.mpi.stack = osRtxConfig.mpi.stack;
}
} }
if (osRtxConfig.mpi.thread != NULL) { if (osRtxConfig.mpi.thread != NULL) {
if (osRtxMemoryPoolInit(osRtxConfig.mpi.thread, (void)osRtxMemoryPoolInit(osRtxConfig.mpi.thread,
osRtxConfig.mpi.thread->max_blocks, osRtxConfig.mpi.thread->max_blocks,
osRtxConfig.mpi.thread->block_size, osRtxConfig.mpi.thread->block_size,
osRtxConfig.mpi.thread->block_base) != 0U) { osRtxConfig.mpi.thread->block_base);
osRtxInfo.mpi.thread = osRtxConfig.mpi.thread; osRtxInfo.mpi.thread = osRtxConfig.mpi.thread;
}
} }
if (osRtxConfig.mpi.timer != NULL) { if (osRtxConfig.mpi.timer != NULL) {
if (osRtxMemoryPoolInit(osRtxConfig.mpi.timer, (void)osRtxMemoryPoolInit(osRtxConfig.mpi.timer,
osRtxConfig.mpi.timer->max_blocks, osRtxConfig.mpi.timer->max_blocks,
osRtxConfig.mpi.timer->block_size, osRtxConfig.mpi.timer->block_size,
osRtxConfig.mpi.timer->block_base) != 0U) { osRtxConfig.mpi.timer->block_base);
osRtxInfo.mpi.timer = osRtxConfig.mpi.timer; osRtxInfo.mpi.timer = osRtxConfig.mpi.timer;
}
} }
if (osRtxConfig.mpi.event_flags != NULL) { if (osRtxConfig.mpi.event_flags != NULL) {
if (osRtxMemoryPoolInit(osRtxConfig.mpi.event_flags, (void)osRtxMemoryPoolInit(osRtxConfig.mpi.event_flags,
osRtxConfig.mpi.event_flags->max_blocks, osRtxConfig.mpi.event_flags->max_blocks,
osRtxConfig.mpi.event_flags->block_size, osRtxConfig.mpi.event_flags->block_size,
osRtxConfig.mpi.event_flags->block_base) != 0U) { osRtxConfig.mpi.event_flags->block_base);
osRtxInfo.mpi.event_flags = osRtxConfig.mpi.event_flags; osRtxInfo.mpi.event_flags = osRtxConfig.mpi.event_flags;
}
} }
if (osRtxConfig.mpi.mutex != NULL) { if (osRtxConfig.mpi.mutex != NULL) {
if (osRtxMemoryPoolInit(osRtxConfig.mpi.mutex, (void)osRtxMemoryPoolInit(osRtxConfig.mpi.mutex,
osRtxConfig.mpi.mutex->max_blocks, osRtxConfig.mpi.mutex->max_blocks,
osRtxConfig.mpi.mutex->block_size, osRtxConfig.mpi.mutex->block_size,
osRtxConfig.mpi.mutex->block_base) != 0U) { osRtxConfig.mpi.mutex->block_base);
osRtxInfo.mpi.mutex = osRtxConfig.mpi.mutex; osRtxInfo.mpi.mutex = osRtxConfig.mpi.mutex;
}
} }
if (osRtxConfig.mpi.semaphore != NULL) { if (osRtxConfig.mpi.semaphore != NULL) {
if (osRtxMemoryPoolInit(osRtxConfig.mpi.semaphore, (void)osRtxMemoryPoolInit(osRtxConfig.mpi.semaphore,
osRtxConfig.mpi.semaphore->max_blocks, osRtxConfig.mpi.semaphore->max_blocks,
osRtxConfig.mpi.semaphore->block_size, osRtxConfig.mpi.semaphore->block_size,
osRtxConfig.mpi.semaphore->block_base) != 0U) { osRtxConfig.mpi.semaphore->block_base);
osRtxInfo.mpi.semaphore = osRtxConfig.mpi.semaphore; osRtxInfo.mpi.semaphore = osRtxConfig.mpi.semaphore;
}
} }
if (osRtxConfig.mpi.memory_pool != NULL) { if (osRtxConfig.mpi.memory_pool != NULL) {
if (osRtxMemoryPoolInit(osRtxConfig.mpi.memory_pool, (void)osRtxMemoryPoolInit(osRtxConfig.mpi.memory_pool,
osRtxConfig.mpi.memory_pool->max_blocks, osRtxConfig.mpi.memory_pool->max_blocks,
osRtxConfig.mpi.memory_pool->block_size, osRtxConfig.mpi.memory_pool->block_size,
osRtxConfig.mpi.memory_pool->block_base) != 0U) { osRtxConfig.mpi.memory_pool->block_base);
osRtxInfo.mpi.memory_pool = osRtxConfig.mpi.memory_pool; osRtxInfo.mpi.memory_pool = osRtxConfig.mpi.memory_pool;
}
} }
if (osRtxConfig.mpi.message_queue != NULL) { if (osRtxConfig.mpi.message_queue != NULL) {
if (osRtxMemoryPoolInit(osRtxConfig.mpi.message_queue, (void)osRtxMemoryPoolInit(osRtxConfig.mpi.message_queue,
osRtxConfig.mpi.message_queue->max_blocks, osRtxConfig.mpi.message_queue->max_blocks,
osRtxConfig.mpi.message_queue->block_size, osRtxConfig.mpi.message_queue->block_size,
osRtxConfig.mpi.message_queue->block_base) != 0U) { osRtxConfig.mpi.message_queue->block_base);
osRtxInfo.mpi.message_queue = osRtxConfig.mpi.message_queue; osRtxInfo.mpi.message_queue = osRtxConfig.mpi.message_queue;
}
} }
osRtxInfo.kernel.state = osRtxKernelReady; osRtxInfo.kernel.state = osRtxKernelReady;
@ -221,7 +201,7 @@ static osStatus_t svcRtxKernelGetInfo (osVersion_t *version, char *id_buf, uint3
memcpy(id_buf, osRtxKernelId, size); memcpy(id_buf, osRtxKernelId, size);
} }
EvrRtxKernelInfoRetrieved(version, id_buf); EvrRtxKernelInfoRetrieved(version, id_buf, id_size);
return osOK; return osOK;
} }
@ -268,11 +248,6 @@ static osStatus_t svcRtxKernelStart (void) {
// Switch to Ready Thread with highest Priority // Switch to Ready Thread with highest Priority
thread = osRtxThreadListGet(&osRtxInfo.thread.ready); thread = osRtxThreadListGet(&osRtxInfo.thread.ready);
if (thread == NULL) {
EvrRtxKernelError((int32_t)osError);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osError;
}
osRtxThreadSwitch(thread); osRtxThreadSwitch(thread);
if ((osRtxConfig.flags & osRtxConfigPrivilegedMode) != 0U) { if ((osRtxConfig.flags & osRtxConfigPrivilegedMode) != 0U) {
@ -413,6 +388,7 @@ static void svcRtxKernelResume (uint32_t sleep_ticks) {
os_thread_t *thread; os_thread_t *thread;
os_timer_t *timer; os_timer_t *timer;
uint32_t delay; uint32_t delay;
uint32_t ticks;
if (osRtxInfo.kernel.state != osRtxKernelSuspended) { if (osRtxInfo.kernel.state != osRtxKernelSuspended) {
EvrRtxKernelResumed(); EvrRtxKernelResumed();
@ -420,46 +396,40 @@ static void svcRtxKernelResume (uint32_t sleep_ticks) {
return; return;
} }
osRtxInfo.kernel.tick += sleep_ticks;
// Process Thread Delay list // Process Thread Delay list
thread = osRtxInfo.thread.delay_list; thread = osRtxInfo.thread.delay_list;
if (thread != NULL) { if (thread != NULL) {
delay = sleep_ticks; delay = sleep_ticks;
if (delay >= thread->delay) { do {
if (delay >= thread->delay) {
delay -= thread->delay; delay -= thread->delay;
osRtxInfo.kernel.tick += thread->delay; thread->delay = 1U;
thread->delay = 1U;
do {
osRtxThreadDelayTick(); osRtxThreadDelayTick();
if (delay == 0U) { thread = osRtxInfo.thread.delay_list;
break; } else {
} thread->delay -= delay;
delay--; delay = 0U;
osRtxInfo.kernel.tick++; }
} while (osRtxInfo.thread.delay_list != NULL); } while ((thread != NULL) && (delay != 0U));
} else {
thread->delay -= delay;
osRtxInfo.kernel.tick += delay;
}
} else {
osRtxInfo.kernel.tick += sleep_ticks;
} }
// Process Active Timer list // Process Active Timer list
timer = osRtxInfo.timer.list; timer = osRtxInfo.timer.list;
if (timer != NULL) { if (timer != NULL) {
if (sleep_ticks >= timer->tick) { ticks = sleep_ticks;
sleep_ticks -= timer->tick; do {
timer->tick = 1U; if (ticks >= timer->tick) {
do { ticks -= timer->tick;
timer->tick = 1U;
osRtxInfo.timer.tick(); osRtxInfo.timer.tick();
if (sleep_ticks == 0U) { timer = osRtxInfo.timer.list;
break; } else {
} timer->tick -= ticks;
sleep_ticks--; ticks = 0U;
} while (osRtxInfo.timer.list != NULL); }
} else { } while ((timer != NULL) && (ticks != 0U));
timer->tick -= sleep_ticks;
}
} }
osRtxInfo.kernel.state = osRtxKernelRunning; osRtxInfo.kernel.state = osRtxKernelRunning;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved. * Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -52,6 +52,9 @@ __attribute__((section(".bss.os")));
#endif #endif
// ISR FIFO Queue // ISR FIFO Queue
#if (OS_ISR_FIFO_QUEUE < 4)
#error "Invalid ISR FIFO Queue size!"
#endif
static void *os_isr_queue[OS_ISR_FIFO_QUEUE] \ static void *os_isr_queue[OS_ISR_FIFO_QUEUE] \
__attribute__((section(".bss.os"))); __attribute__((section(".bss.os")));
@ -365,6 +368,51 @@ __attribute__((section(".bss.os.msgqueue.mem")));
#if (defined(OS_EVR_INIT) && (OS_EVR_INIT != 0)) #if (defined(OS_EVR_INIT) && (OS_EVR_INIT != 0))
// Initial Thread configuration covered also Thread Flags and Generic Wait
#if defined(OS_EVR_THREAD_FILTER)
#if !defined(OS_EVR_THFLAGS_FILTER)
#define OS_EVR_THFLAGS_FILTER OS_EVR_THREAD_FILTER
#endif
#if !defined(OS_EVR_WAIT_FILTER)
#define OS_EVR_WAIT_FILTER OS_EVR_THREAD_FILTER
#endif
#endif
// Migrate initial filter configuration
#if defined(OS_EVR_MEMORY_FILTER)
#define OS_EVR_MEMORY_LEVEL (((OS_EVR_MEMORY_FILTER & 0x80U) != 0U) ? (OS_EVR_MEMORY_FILTER & 0x0FU) : 0U)
#endif
#if defined(OS_EVR_KERNEL_FILTER)
#define OS_EVR_KERNEL_LEVEL (((OS_EVR_KERNEL_FILTER & 0x80U) != 0U) ? (OS_EVR_KERNEL_FILTER & 0x0FU) : 0U)
#endif
#if defined(OS_EVR_THREAD_FILTER)
#define OS_EVR_THREAD_LEVEL (((OS_EVR_THREAD_FILTER & 0x80U) != 0U) ? (OS_EVR_THREAD_FILTER & 0x0FU) : 0U)
#endif
#if defined(OS_EVR_WAIT_FILTER)
#define OS_EVR_WAIT_LEVEL (((OS_EVR_WAIT_FILTER & 0x80U) != 0U) ? (OS_EVR_WAIT_FILTER & 0x0FU) : 0U)
#endif
#if defined(OS_EVR_THFLAGS_FILTER)
#define OS_EVR_THFLAGS_LEVEL (((OS_EVR_THFLAGS_FILTER & 0x80U) != 0U) ? (OS_EVR_THFLAGS_FILTER & 0x0FU) : 0U)
#endif
#if defined(OS_EVR_EVFLAGS_FILTER)
#define OS_EVR_EVFLAGS_LEVEL (((OS_EVR_EVFLAGS_FILTER & 0x80U) != 0U) ? (OS_EVR_EVFLAGS_FILTER & 0x0FU) : 0U)
#endif
#if defined(OS_EVR_TIMER_FILTER)
#define OS_EVR_TIMER_LEVEL (((OS_EVR_TIMER_FILTER & 0x80U) != 0U) ? (OS_EVR_TIMER_FILTER & 0x0FU) : 0U)
#endif
#if defined(OS_EVR_MUTEX_FILTER)
#define OS_EVR_MUTEX_LEVEL (((OS_EVR_MUTEX_FILTER & 0x80U) != 0U) ? (OS_EVR_MUTEX_FILTER & 0x0FU) : 0U)
#endif
#if defined(OS_EVR_SEMAPHORE_FILTER)
#define OS_EVR_SEMAPHORE_LEVEL (((OS_EVR_SEMAPHORE_FILTER & 0x80U) != 0U) ? (OS_EVR_SEMAPHORE_FILTER & 0x0FU) : 0U)
#endif
#if defined(OS_EVR_MEMPOOL_FILTER)
#define OS_EVR_MEMPOOL_LEVEL (((OS_EVR_MEMPOOL_FILTER & 0x80U) != 0U) ? (OS_EVR_MEMPOOL_FILTER & 0x0FU) : 0U)
#endif
#if defined(OS_EVR_MSGQUEUE_FILTER)
#define OS_EVR_MSGQUEUE_LEVEL (((OS_EVR_MSGQUEUE_FILTER & 0x80U) != 0U) ? (OS_EVR_MSGQUEUE_FILTER & 0x0FU) : 0U)
#endif
#if defined(RTE_Compiler_EventRecorder) #if defined(RTE_Compiler_EventRecorder)
// Event Recorder Initialize // Event Recorder Initialize
@ -372,33 +420,17 @@ __STATIC_INLINE void evr_initialize (void) {
(void)EventRecorderInitialize(OS_EVR_LEVEL, (uint32_t)OS_EVR_START); (void)EventRecorderInitialize(OS_EVR_LEVEL, (uint32_t)OS_EVR_START);
#if ((OS_EVR_MEMORY_FILTER & 0x80U) != 0U) (void)EventRecorderEnable(OS_EVR_MEMORY_LEVEL, EvtRtxMemoryNo, EvtRtxMemoryNo);
(void)EventRecorderEnable(OS_EVR_MEMORY_FILTER & 0x0FU, EvtRtxMemoryNo, EvtRtxMemoryNo); (void)EventRecorderEnable(OS_EVR_KERNEL_LEVEL, EvtRtxKernelNo, EvtRtxKernelNo);
#endif (void)EventRecorderEnable(OS_EVR_THREAD_LEVEL, EvtRtxThreadNo, EvtRtxThreadNo);
#if ((OS_EVR_KERNEL_FILTER & 0x80U) != 0U) (void)EventRecorderEnable(OS_EVR_WAIT_LEVEL, EvtRtxWaitNo, EvtRtxWaitNo);
(void)EventRecorderEnable(OS_EVR_KERNEL_FILTER & 0x0FU, EvtRtxKernelNo, EvtRtxKernelNo); (void)EventRecorderEnable(OS_EVR_THFLAGS_LEVEL, EvtRtxThreadFlagsNo, EvtRtxThreadFlagsNo);
#endif (void)EventRecorderEnable(OS_EVR_EVFLAGS_LEVEL, EvtRtxEventFlagsNo, EvtRtxEventFlagsNo);
#if ((OS_EVR_THREAD_FILTER & 0x80U) != 0U) (void)EventRecorderEnable(OS_EVR_TIMER_LEVEL, EvtRtxTimerNo, EvtRtxTimerNo);
(void)EventRecorderEnable(OS_EVR_THREAD_FILTER & 0x0FU, EvtRtxThreadNo, EvtRtxThreadNo); (void)EventRecorderEnable(OS_EVR_MUTEX_LEVEL, EvtRtxMutexNo, EvtRtxMutexNo);
#endif (void)EventRecorderEnable(OS_EVR_SEMAPHORE_LEVEL, EvtRtxSemaphoreNo, EvtRtxSemaphoreNo);
#if ((OS_EVR_TIMER_FILTER & 0x80U) != 0U) (void)EventRecorderEnable(OS_EVR_MEMPOOL_LEVEL, EvtRtxMemoryPoolNo, EvtRtxMemoryPoolNo);
(void)EventRecorderEnable(OS_EVR_TIMER_FILTER & 0x0FU, EvtRtxTimerNo, EvtRtxTimerNo); (void)EventRecorderEnable(OS_EVR_MSGQUEUE_LEVEL, EvtRtxMessageQueueNo, EvtRtxMessageQueueNo);
#endif
#if ((OS_EVR_EVFLAGS_FILTER & 0x80U) != 0U)
(void)EventRecorderEnable(OS_EVR_EVFLAGS_FILTER & 0x0FU, EvtRtxEventFlagsNo, EvtRtxEventFlagsNo);
#endif
#if ((OS_EVR_MUTEX_FILTER & 0x80U) != 0U)
(void)EventRecorderEnable(OS_EVR_MUTEX_FILTER & 0x0FU, EvtRtxMutexNo, EvtRtxMutexNo);
#endif
#if ((OS_EVR_SEMAPHORE_FILTER & 0x80U) != 0U)
(void)EventRecorderEnable(OS_EVR_SEMAPHORE_FILTER & 0x0FU, EvtRtxSemaphoreNo, EvtRtxSemaphoreNo);
#endif
#if ((OS_EVR_MEMPOOL_FILTER & 0x80U) != 0U)
(void)EventRecorderEnable(OS_EVR_MEMPOOL_FILTER & 0x0FU, EvtRtxMemoryPoolNo, EvtRtxMemoryPoolNo);
#endif
#if ((OS_EVR_MSGQUEUE_FILTER & 0x80U) != 0U)
(void)EventRecorderEnable(OS_EVR_MSGQUEUE_FILTER & 0x0FU, EvtRtxMessageQueueNo, EvtRtxMessageQueueNo);
#endif
} }
#else #else
@ -673,14 +705,14 @@ __WEAK void software_init_hook (void) {
// ======== // ========
// RTOS Kernel Pre-Initialization Hook // RTOS Kernel Pre-Initialization Hook
#if (defined(OS_EVR_INIT) && (OS_EVR_INIT != 0))
void osRtxKernelPreInit (void); void osRtxKernelPreInit (void);
void osRtxKernelPreInit (void) { void osRtxKernelPreInit (void) {
#if (defined(OS_EVR_INIT) && (OS_EVR_INIT != 0))
if (osKernelGetState() == osKernelInactive) { if (osKernelGetState() == osKernelInactive) {
evr_initialize(); evr_initialize();
} }
#endif
} }
#endif
// C/C++ Standard Library Multithreading Interface // C/C++ Standard Library Multithreading Interface

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved. * Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -177,7 +177,7 @@ static void osRtxMessageQueuePostProcess (os_message_t *msg) {
msg->id = osRtxIdInvalid; msg->id = osRtxIdInvalid;
(void)osRtxMemoryPoolFree(&mq->mp_info, msg); (void)osRtxMemoryPoolFree(&mq->mp_info, msg);
// Check if Thread is waiting to send a Message // Check if Thread is waiting to send a Message
if ((mq->thread_list != NULL) && (mq->thread_list->state == osRtxThreadWaitingMessagePut)) { if (mq->thread_list != NULL) {
// Try to allocate memory // Try to allocate memory
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5] //lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5]
msg0 = osRtxMemoryPoolAlloc(&mq->mp_info); msg0 = osRtxMemoryPoolAlloc(&mq->mp_info);
@ -494,7 +494,7 @@ static osStatus_t svcRtxMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr
msg->id = osRtxIdInvalid; msg->id = osRtxIdInvalid;
(void)osRtxMemoryPoolFree(&mq->mp_info, msg); (void)osRtxMemoryPoolFree(&mq->mp_info, msg);
// Check if Thread is waiting to send a Message // Check if Thread is waiting to send a Message
if ((mq->thread_list != NULL) && (mq->thread_list->state == osRtxThreadWaitingMessagePut)) { if (mq->thread_list != NULL) {
// Try to allocate memory // Try to allocate memory
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5] //lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5]
msg = osRtxMemoryPoolAlloc(&mq->mp_info); msg = osRtxMemoryPoolAlloc(&mq->mp_info);
@ -544,7 +544,7 @@ static osStatus_t svcRtxMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr
} }
/// Get maximum number of messages in a Message Queue. /// Get maximum number of messages in a Message Queue.
/// \note API identical to osMessageGetCapacity /// \note API identical to osMessageQueueGetCapacity
static uint32_t svcRtxMessageQueueGetCapacity (osMessageQueueId_t mq_id) { static uint32_t svcRtxMessageQueueGetCapacity (osMessageQueueId_t mq_id) {
os_message_queue_t *mq = osRtxMessageQueueId(mq_id); os_message_queue_t *mq = osRtxMessageQueueId(mq_id);
@ -561,7 +561,7 @@ static uint32_t svcRtxMessageQueueGetCapacity (osMessageQueueId_t mq_id) {
} }
/// Get maximum message size in a Memory Pool. /// Get maximum message size in a Memory Pool.
/// \note API identical to osMessageGetMsgSize /// \note API identical to osMessageQueueGetMsgSize
static uint32_t svcRtxMessageQueueGetMsgSize (osMessageQueueId_t mq_id) { static uint32_t svcRtxMessageQueueGetMsgSize (osMessageQueueId_t mq_id) {
os_message_queue_t *mq = osRtxMessageQueueId(mq_id); os_message_queue_t *mq = osRtxMessageQueueId(mq_id);
@ -578,7 +578,7 @@ static uint32_t svcRtxMessageQueueGetMsgSize (osMessageQueueId_t mq_id) {
} }
/// Get number of queued messages in a Message Queue. /// Get number of queued messages in a Message Queue.
/// \note API identical to osMessageGetCount /// \note API identical to osMessageQueueGetCount
static uint32_t svcRtxMessageQueueGetCount (osMessageQueueId_t mq_id) { static uint32_t svcRtxMessageQueueGetCount (osMessageQueueId_t mq_id) {
os_message_queue_t *mq = osRtxMessageQueueId(mq_id); os_message_queue_t *mq = osRtxMessageQueueId(mq_id);
@ -595,7 +595,7 @@ static uint32_t svcRtxMessageQueueGetCount (osMessageQueueId_t mq_id) {
} }
/// Get number of available slots for messages in a Message Queue. /// Get number of available slots for messages in a Message Queue.
/// \note API identical to osMessageGetSpace /// \note API identical to osMessageQueueGetSpace
static uint32_t svcRtxMessageQueueGetSpace (osMessageQueueId_t mq_id) { static uint32_t svcRtxMessageQueueGetSpace (osMessageQueueId_t mq_id) {
os_message_queue_t *mq = osRtxMessageQueueId(mq_id); os_message_queue_t *mq = osRtxMessageQueueId(mq_id);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved. * Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -172,12 +172,12 @@ static const char *svcRtxMutexGetName (osMutexId_t mutex_id) {
/// \note API identical to osMutexAcquire /// \note API identical to osMutexAcquire
static osStatus_t svcRtxMutexAcquire (osMutexId_t mutex_id, uint32_t timeout) { static osStatus_t svcRtxMutexAcquire (osMutexId_t mutex_id, uint32_t timeout) {
os_mutex_t *mutex = osRtxMutexId(mutex_id); os_mutex_t *mutex = osRtxMutexId(mutex_id);
os_thread_t *runnig_thread; os_thread_t *thread;
osStatus_t status; osStatus_t status;
// Check running thread // Check running thread
runnig_thread = osRtxThreadGetRunning(); thread = osRtxThreadGetRunning();
if (runnig_thread == NULL) { if (thread == NULL) {
EvrRtxMutexError(mutex, osRtxErrorKernelNotRunning); EvrRtxMutexError(mutex, osRtxErrorKernelNotRunning);
//lint -e{904} "Return statement before end of function" [MISRA Note 1] //lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osError; return osError;
@ -193,19 +193,19 @@ static osStatus_t svcRtxMutexAcquire (osMutexId_t mutex_id, uint32_t timeout) {
// Check if Mutex is not locked // Check if Mutex is not locked
if (mutex->lock == 0U) { if (mutex->lock == 0U) {
// Acquire Mutex // Acquire Mutex
mutex->owner_thread = runnig_thread; mutex->owner_thread = thread;
mutex->owner_next = runnig_thread->mutex_list; mutex->owner_next = thread->mutex_list;
mutex->owner_prev = NULL; mutex->owner_prev = NULL;
if (runnig_thread->mutex_list != NULL) { if (thread->mutex_list != NULL) {
runnig_thread->mutex_list->owner_prev = mutex; thread->mutex_list->owner_prev = mutex;
} }
runnig_thread->mutex_list = mutex; thread->mutex_list = mutex;
mutex->lock = 1U; mutex->lock = 1U;
EvrRtxMutexAcquired(mutex, mutex->lock); EvrRtxMutexAcquired(mutex, mutex->lock);
status = osOK; status = osOK;
} else { } else {
// Check if Mutex is recursive and running Thread is the owner // Check if Mutex is recursive and running Thread is the owner
if (((mutex->attr & osMutexRecursive) != 0U) && (mutex->owner_thread == runnig_thread)) { if (((mutex->attr & osMutexRecursive) != 0U) && (mutex->owner_thread == thread)) {
// Try to increment lock counter // Try to increment lock counter
if (mutex->lock == osRtxMutexLockLimit) { if (mutex->lock == osRtxMutexLockLimit) {
EvrRtxMutexError(mutex, osRtxErrorMutexLockLimit); EvrRtxMutexError(mutex, osRtxErrorMutexLockLimit);
@ -221,15 +221,15 @@ static osStatus_t svcRtxMutexAcquire (osMutexId_t mutex_id, uint32_t timeout) {
// Check if Priority inheritance protocol is enabled // Check if Priority inheritance protocol is enabled
if ((mutex->attr & osMutexPrioInherit) != 0U) { if ((mutex->attr & osMutexPrioInherit) != 0U) {
// Raise priority of owner Thread if lower than priority of running Thread // Raise priority of owner Thread if lower than priority of running Thread
if (mutex->owner_thread->priority < runnig_thread->priority) { if (mutex->owner_thread->priority < thread->priority) {
mutex->owner_thread->priority = runnig_thread->priority; mutex->owner_thread->priority = thread->priority;
osRtxThreadListSort(mutex->owner_thread); osRtxThreadListSort(mutex->owner_thread);
} }
} }
EvrRtxMutexAcquirePending(mutex, timeout); EvrRtxMutexAcquirePending(mutex, timeout);
// Suspend current Thread // Suspend current Thread
if (osRtxThreadWaitEnter(osRtxThreadWaitingMutex, timeout)) { if (osRtxThreadWaitEnter(osRtxThreadWaitingMutex, timeout)) {
osRtxThreadListPut(osRtxObject(mutex), runnig_thread); osRtxThreadListPut(osRtxObject(mutex), thread);
} else { } else {
EvrRtxMutexAcquireTimeout(mutex); EvrRtxMutexAcquireTimeout(mutex);
} }
@ -250,12 +250,11 @@ static osStatus_t svcRtxMutexRelease (osMutexId_t mutex_id) {
os_mutex_t *mutex = osRtxMutexId(mutex_id); os_mutex_t *mutex = osRtxMutexId(mutex_id);
const os_mutex_t *mutex0; const os_mutex_t *mutex0;
os_thread_t *thread; os_thread_t *thread;
os_thread_t *runnig_thread;
int8_t priority; int8_t priority;
// Check running thread // Check running thread
runnig_thread = osRtxThreadGetRunning(); thread = osRtxThreadGetRunning();
if (runnig_thread == NULL) { if (thread == NULL) {
EvrRtxMutexError(mutex, osRtxErrorKernelNotRunning); EvrRtxMutexError(mutex, osRtxErrorKernelNotRunning);
//lint -e{904} "Return statement before end of function" [MISRA Note 1] //lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osError; return osError;
@ -276,7 +275,7 @@ static osStatus_t svcRtxMutexRelease (osMutexId_t mutex_id) {
} }
// Check if running Thread is not the owner // Check if running Thread is not the owner
if (mutex->owner_thread != runnig_thread) { if (mutex->owner_thread != thread) {
EvrRtxMutexError(mutex, osRtxErrorMutexNotOwned); EvrRtxMutexError(mutex, osRtxErrorMutexNotOwned);
//lint -e{904} "Return statement before end of function" [MISRA Note 1] //lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorResource; return osErrorResource;
@ -296,13 +295,13 @@ static osStatus_t svcRtxMutexRelease (osMutexId_t mutex_id) {
if (mutex->owner_prev != NULL) { if (mutex->owner_prev != NULL) {
mutex->owner_prev->owner_next = mutex->owner_next; mutex->owner_prev->owner_next = mutex->owner_next;
} else { } else {
runnig_thread->mutex_list = mutex->owner_next; thread->mutex_list = mutex->owner_next;
} }
// Restore running Thread priority // Restore running Thread priority
if ((mutex->attr & osMutexPrioInherit) != 0U) { if ((mutex->attr & osMutexPrioInherit) != 0U) {
priority = runnig_thread->priority_base; priority = thread->priority_base;
mutex0 = runnig_thread->mutex_list; mutex0 = thread->mutex_list;
while (mutex0 != NULL) { while (mutex0 != NULL) {
// Mutexes owned by running Thread // Mutexes owned by running Thread
if ((mutex0->thread_list != NULL) && (mutex0->thread_list->priority > priority)) { if ((mutex0->thread_list != NULL) && (mutex0->thread_list->priority > priority)) {
@ -311,7 +310,7 @@ static osStatus_t svcRtxMutexRelease (osMutexId_t mutex_id) {
} }
mutex0 = mutex0->owner_next; mutex0 = mutex0->owner_next;
} }
runnig_thread->priority = priority; thread->priority = priority;
} }
// Check if Thread is waiting for a Mutex // Check if Thread is waiting for a Mutex
@ -407,11 +406,9 @@ static osStatus_t svcRtxMutexDelete (osMutexId_t mutex_id) {
} }
// Unblock waiting threads // Unblock waiting threads
if (mutex->thread_list != NULL) { while (mutex->thread_list != NULL) {
do { thread = osRtxThreadListGet(osRtxObject(mutex));
thread = osRtxThreadListGet(osRtxObject(mutex)); osRtxThreadWaitExit(thread, (uint32_t)osErrorResource, FALSE);
osRtxThreadWaitExit(thread, (uint32_t)osErrorResource, FALSE);
} while (mutex->thread_list != NULL);
} }
osRtxThreadDispatch(NULL); osRtxThreadDispatch(NULL);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved. * Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -75,9 +75,7 @@ static uint32_t isr_queue_put (os_object_t *object) {
/// Get Object from ISR Queue. /// Get Object from ISR Queue.
/// \return object or NULL. /// \return object or NULL.
static os_object_t *isr_queue_get (void) { static os_object_t *isr_queue_get (void) {
#if (EXCLUSIVE_ACCESS == 0) #if (EXCLUSIVE_ACCESS != 0)
uint32_t primask = __get_PRIMASK();
#else
uint32_t n; uint32_t n;
#endif #endif
uint16_t max; uint16_t max;
@ -97,10 +95,8 @@ static os_object_t *isr_queue_get (void) {
} else { } else {
ret = NULL; ret = NULL;
} }
if (primask == 0U) { __enable_irq();
__enable_irq();
}
#else #else
if (atomic_dec16_nz(&osRtxInfo.isr_queue.cnt) != 0U) { if (atomic_dec16_nz(&osRtxInfo.isr_queue.cnt) != 0U) {
n = atomic_inc16_lim(&osRtxInfo.isr_queue.out, max); n = atomic_inc16_lim(&osRtxInfo.isr_queue.out, max);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved. * Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -143,11 +143,6 @@ void osRtxThreadListPut (os_object_t *object, os_thread_t *thread) {
os_thread_t *prev, *next; os_thread_t *prev, *next;
int32_t priority; int32_t priority;
if (thread == NULL) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return;
}
priority = thread->priority; priority = thread->priority;
prev = osRtxThreadObject(object); prev = osRtxThreadObject(object);
@ -171,13 +166,11 @@ os_thread_t *osRtxThreadListGet (os_object_t *object) {
os_thread_t *thread; os_thread_t *thread;
thread = object->thread_list; thread = object->thread_list;
if (thread != NULL) { object->thread_list = thread->thread_next;
object->thread_list = thread->thread_next; if (thread->thread_next != NULL) {
if (thread->thread_next != NULL) { thread->thread_next->thread_prev = osRtxThreadObject(object);
thread->thread_next->thread_prev = osRtxThreadObject(object);
}
thread->thread_prev = NULL;
} }
thread->thread_prev = NULL;
return thread; return thread;
} }
@ -196,7 +189,7 @@ static void *osRtxThreadListRoot (os_thread_t *thread) {
os_thread_t *thread0; os_thread_t *thread0;
thread0 = thread; thread0 = thread;
while ((thread0 != NULL) && (thread0->id == osRtxIdThread)) { while (thread0->id == osRtxIdThread) {
thread0 = thread0->thread_prev; thread0 = thread0->thread_prev;
} }
return thread0; return thread0;
@ -308,29 +301,25 @@ static void osRtxThreadDelayInsert (os_thread_t *thread, uint32_t delay) {
static void osRtxThreadDelayRemove (os_thread_t *thread) { static void osRtxThreadDelayRemove (os_thread_t *thread) {
if (thread->delay == osWaitForever) { if (thread->delay == osWaitForever) {
if ((thread->delay_prev != NULL) || (osRtxInfo.thread.wait_list == thread)) { if (thread->delay_next != NULL) {
if (thread->delay_next != NULL) { thread->delay_next->delay_prev = thread->delay_prev;
thread->delay_next->delay_prev = thread->delay_prev; }
} if (thread->delay_prev != NULL) {
if (thread->delay_prev != NULL) { thread->delay_prev->delay_next = thread->delay_next;
thread->delay_prev->delay_next = thread->delay_next; thread->delay_prev = NULL;
thread->delay_prev = NULL; } else {
} else { osRtxInfo.thread.wait_list = thread->delay_next;
osRtxInfo.thread.wait_list = thread->delay_next;
}
} }
} else { } else {
if ((thread->delay_prev != NULL) || (osRtxInfo.thread.delay_list == thread)) { if (thread->delay_next != NULL) {
if (thread->delay_next != NULL) { thread->delay_next->delay += thread->delay;
thread->delay_next->delay += thread->delay; thread->delay_next->delay_prev = thread->delay_prev;
thread->delay_next->delay_prev = thread->delay_prev; }
} if (thread->delay_prev != NULL) {
if (thread->delay_prev != NULL) { thread->delay_prev->delay_next = thread->delay_next;
thread->delay_prev->delay_next = thread->delay_next; thread->delay_prev = NULL;
thread->delay_prev = NULL; } else {
} else { osRtxInfo.thread.delay_list = thread->delay_next;
osRtxInfo.thread.delay_list = thread->delay_next;
}
} }
} }
} }
@ -351,10 +340,10 @@ void osRtxThreadDelayTick (void) {
do { do {
switch (thread->state) { switch (thread->state) {
case osRtxThreadWaitingDelay: case osRtxThreadWaitingDelay:
EvrRtxThreadDelayCompleted(); EvrRtxDelayCompleted(thread);
break; break;
case osRtxThreadWaitingThreadFlags: case osRtxThreadWaitingThreadFlags:
EvrRtxThreadFlagsWaitTimeout(); EvrRtxThreadFlagsWaitTimeout(thread);
break; break;
case osRtxThreadWaitingEventFlags: case osRtxThreadWaitingEventFlags:
EvrRtxEventFlagsWaitTimeout((osEventFlagsId_t)osRtxThreadListRoot(thread)); EvrRtxEventFlagsWaitTimeout((osEventFlagsId_t)osRtxThreadListRoot(thread));
@ -449,7 +438,7 @@ void osRtxThreadDispatch (os_thread_t *thread) {
if (thread == NULL) { if (thread == NULL) {
thread_ready = osRtxInfo.thread.ready.thread_list; thread_ready = osRtxInfo.thread.ready.thread_list;
if ((kernel_state == osRtxKernelRunning) && if ((kernel_state == osRtxKernelRunning) &&
(thread_running != NULL) && (thread_ready != NULL) && (thread_ready != NULL) &&
(thread_ready->priority > thread_running->priority)) { (thread_ready->priority > thread_running->priority)) {
// Preempt running Thread // Preempt running Thread
osRtxThreadListRemove(thread_ready); osRtxThreadListRemove(thread_ready);
@ -458,7 +447,6 @@ void osRtxThreadDispatch (os_thread_t *thread) {
} }
} else { } else {
if ((kernel_state == osRtxKernelRunning) && if ((kernel_state == osRtxKernelRunning) &&
(thread_running != NULL) &&
(thread->priority > thread_running->priority)) { (thread->priority > thread_running->priority)) {
// Preempt running Thread // Preempt running Thread
osRtxThreadBlock(thread_running); osRtxThreadBlock(thread_running);
@ -503,19 +491,15 @@ bool_t osRtxThreadWaitEnter (uint8_t state, uint32_t timeout) {
return FALSE; return FALSE;
} }
// Check running thread
thread = osRtxThreadGetRunning();
if (thread == NULL) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return FALSE;
}
// Check if any thread is ready // Check if any thread is ready
if (osRtxInfo.thread.ready.thread_list == NULL) { if (osRtxInfo.thread.ready.thread_list == NULL) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1] //lint -e{904} "Return statement before end of function" [MISRA Note 1]
return FALSE; return FALSE;
} }
// Get running thread
thread = osRtxThreadGetRunning();
EvrRtxThreadBlocked(thread, timeout); EvrRtxThreadBlocked(thread, timeout);
thread->state = state; thread->state = state;
@ -574,7 +558,7 @@ static void osRtxThreadPostProcess (os_thread_t *thread) {
thread_flags = ThreadFlagsCheck(thread, thread->wait_flags, thread->flags_options); thread_flags = ThreadFlagsCheck(thread, thread->wait_flags, thread->flags_options);
if (thread_flags != 0U) { if (thread_flags != 0U) {
osRtxThreadWaitExit(thread, thread_flags, FALSE); osRtxThreadWaitExit(thread, thread_flags, FALSE);
EvrRtxThreadFlagsWaitCompleted(thread->wait_flags, thread->flags_options, thread_flags); EvrRtxThreadFlagsWaitCompleted(thread->wait_flags, thread->flags_options, thread_flags, thread);
} }
} }
} }
@ -953,6 +937,7 @@ static osStatus_t svcRtxThreadSetPriority (osThreadId_t thread_id, osPriority_t
if (thread->priority != (int8_t)priority) { if (thread->priority != (int8_t)priority) {
thread->priority = (int8_t)priority; thread->priority = (int8_t)priority;
thread->priority_base = (int8_t)priority; thread->priority_base = (int8_t)priority;
EvrRtxThreadPriorityUpdated(thread, priority);
osRtxThreadListSort(thread); osRtxThreadListSort(thread);
osRtxThreadDispatch(NULL); osRtxThreadDispatch(NULL);
} }
@ -990,20 +975,19 @@ static osPriority_t svcRtxThreadGetPriority (osThreadId_t thread_id) {
/// Pass control to next thread that is in state READY. /// Pass control to next thread that is in state READY.
/// \note API identical to osThreadYield /// \note API identical to osThreadYield
static osStatus_t svcRtxThreadYield (void) { static osStatus_t svcRtxThreadYield (void) {
uint8_t kernel_state;
os_thread_t *thread_running; os_thread_t *thread_running;
os_thread_t *thread_ready; os_thread_t *thread_ready;
kernel_state = osRtxKernelGetState(); if (osRtxKernelGetState() == osRtxKernelRunning) {
thread_running = osRtxThreadGetRunning(); thread_running = osRtxThreadGetRunning();
thread_ready = osRtxInfo.thread.ready.thread_list; thread_ready = osRtxInfo.thread.ready.thread_list;
if ((kernel_state == osRtxKernelRunning) && if ((thread_ready != NULL) &&
(thread_ready != NULL) && (thread_running != NULL) && (thread_ready->priority == thread_running->priority)) {
(thread_ready->priority == thread_running->priority)) { osRtxThreadListRemove(thread_ready);
osRtxThreadListRemove(thread_ready); osRtxThreadReadyPut(thread_running);
osRtxThreadReadyPut(thread_running); EvrRtxThreadPreempted(thread_running);
EvrRtxThreadPreempted(thread_running); osRtxThreadSwitch(thread_ready);
osRtxThreadSwitch(thread_ready); }
} }
return osOK; return osOK;
@ -1215,13 +1199,6 @@ static osStatus_t svcRtxThreadJoin (osThreadId_t thread_id) {
static void svcRtxThreadExit (void) { static void svcRtxThreadExit (void) {
os_thread_t *thread; os_thread_t *thread;
// Check running thread
thread = osRtxThreadGetRunning();
if (thread == NULL) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return;
}
// Check if switch to next Ready Thread is possible // Check if switch to next Ready Thread is possible
if ((osRtxKernelGetState() != osRtxKernelRunning) || if ((osRtxKernelGetState() != osRtxKernelRunning) ||
(osRtxInfo.thread.ready.thread_list == NULL)) { (osRtxInfo.thread.ready.thread_list == NULL)) {
@ -1229,6 +1206,9 @@ static void svcRtxThreadExit (void) {
return; return;
} }
// Get running thread
thread = osRtxThreadGetRunning();
// Release owned Mutexes // Release owned Mutexes
osRtxMutexOwnerRelease(thread->mutex_list); osRtxMutexOwnerRelease(thread->mutex_list);
@ -1427,14 +1407,14 @@ static uint32_t svcRtxThreadFlagsSet (osThreadId_t thread_id, uint32_t flags) {
// Check parameters // Check parameters
if ((thread == NULL) || (thread->id != osRtxIdThread) || if ((thread == NULL) || (thread->id != osRtxIdThread) ||
((flags & ~(((uint32_t)1U << osRtxThreadFlagsLimit) - 1U)) != 0U)) { ((flags & ~(((uint32_t)1U << osRtxThreadFlagsLimit) - 1U)) != 0U)) {
EvrRtxThreadError(thread, (int32_t)osErrorParameter); EvrRtxThreadFlagsError(thread, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1] //lint -e{904} "Return statement before end of function" [MISRA Note 1]
return ((uint32_t)osErrorParameter); return ((uint32_t)osErrorParameter);
} }
// Check object state // Check object state
if (thread->state == osRtxThreadTerminated) { if (thread->state == osRtxThreadTerminated) {
EvrRtxThreadError(thread, (int32_t)osErrorResource); EvrRtxThreadFlagsError(thread, (int32_t)osErrorResource);
//lint -e{904} "Return statement before end of function" [MISRA Note 1] //lint -e{904} "Return statement before end of function" [MISRA Note 1]
return ((uint32_t)osErrorResource); return ((uint32_t)osErrorResource);
} }
@ -1452,7 +1432,7 @@ static uint32_t svcRtxThreadFlagsSet (osThreadId_t thread_id, uint32_t flags) {
thread_flags = thread_flags0; thread_flags = thread_flags0;
} }
osRtxThreadWaitExit(thread, thread_flags0, TRUE); osRtxThreadWaitExit(thread, thread_flags0, TRUE);
EvrRtxThreadFlagsWaitCompleted(thread->wait_flags, thread->flags_options, thread_flags0); EvrRtxThreadFlagsWaitCompleted(thread->wait_flags, thread->flags_options, thread_flags0, thread);
} }
} }
@ -1470,14 +1450,14 @@ static uint32_t svcRtxThreadFlagsClear (uint32_t flags) {
// Check running thread // Check running thread
thread = osRtxThreadGetRunning(); thread = osRtxThreadGetRunning();
if (thread == NULL) { if (thread == NULL) {
EvrRtxThreadError(NULL, osRtxErrorKernelNotRunning); EvrRtxThreadFlagsError(NULL, osRtxErrorKernelNotRunning);
//lint -e{904} "Return statement before end of function" [MISRA Note 1] //lint -e{904} "Return statement before end of function" [MISRA Note 1]
return ((uint32_t)osError); return ((uint32_t)osError);
} }
// Check parameters // Check parameters
if ((flags & ~(((uint32_t)1U << osRtxThreadFlagsLimit) - 1U)) != 0U) { if ((flags & ~(((uint32_t)1U << osRtxThreadFlagsLimit) - 1U)) != 0U) {
EvrRtxThreadError(thread, (int32_t)osErrorParameter); EvrRtxThreadFlagsError(thread, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1] //lint -e{904} "Return statement before end of function" [MISRA Note 1]
return ((uint32_t)osErrorParameter); return ((uint32_t)osErrorParameter);
} }
@ -1517,14 +1497,14 @@ static uint32_t svcRtxThreadFlagsWait (uint32_t flags, uint32_t options, uint32_
// Check running thread // Check running thread
thread = osRtxThreadGetRunning(); thread = osRtxThreadGetRunning();
if (thread == NULL) { if (thread == NULL) {
EvrRtxThreadError(NULL, osRtxErrorKernelNotRunning); EvrRtxThreadFlagsError(NULL, osRtxErrorKernelNotRunning);
//lint -e{904} "Return statement before end of function" [MISRA Note 1] //lint -e{904} "Return statement before end of function" [MISRA Note 1]
return ((uint32_t)osError); return ((uint32_t)osError);
} }
// Check parameters // Check parameters
if ((flags & ~(((uint32_t)1U << osRtxThreadFlagsLimit) - 1U)) != 0U) { if ((flags & ~(((uint32_t)1U << osRtxThreadFlagsLimit) - 1U)) != 0U) {
EvrRtxThreadError(thread, (int32_t)osErrorParameter); EvrRtxThreadFlagsError(thread, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1] //lint -e{904} "Return statement before end of function" [MISRA Note 1]
return ((uint32_t)osErrorParameter); return ((uint32_t)osErrorParameter);
} }
@ -1532,7 +1512,7 @@ static uint32_t svcRtxThreadFlagsWait (uint32_t flags, uint32_t options, uint32_
// Check Thread Flags // Check Thread Flags
thread_flags = ThreadFlagsCheck(thread, flags, options); thread_flags = ThreadFlagsCheck(thread, flags, options);
if (thread_flags != 0U) { if (thread_flags != 0U) {
EvrRtxThreadFlagsWaitCompleted(flags, options, thread_flags); EvrRtxThreadFlagsWaitCompleted(flags, options, thread_flags, thread);
} else { } else {
// Check if timeout is specified // Check if timeout is specified
if (timeout != 0U) { if (timeout != 0U) {
@ -1542,7 +1522,7 @@ static uint32_t svcRtxThreadFlagsWait (uint32_t flags, uint32_t options, uint32_
thread->flags_options = (uint8_t)options; thread->flags_options = (uint8_t)options;
// Suspend current Thread // Suspend current Thread
if (!osRtxThreadWaitEnter(osRtxThreadWaitingThreadFlags, timeout)) { if (!osRtxThreadWaitEnter(osRtxThreadWaitingThreadFlags, timeout)) {
EvrRtxThreadFlagsWaitTimeout(); EvrRtxThreadFlagsWaitTimeout(thread);
} }
thread_flags = (uint32_t)osErrorTimeout; thread_flags = (uint32_t)osErrorTimeout;
} else { } else {
@ -1591,14 +1571,14 @@ uint32_t isrRtxThreadFlagsSet (osThreadId_t thread_id, uint32_t flags) {
// Check parameters // Check parameters
if ((thread == NULL) || (thread->id != osRtxIdThread) || if ((thread == NULL) || (thread->id != osRtxIdThread) ||
((flags & ~(((uint32_t)1U << osRtxThreadFlagsLimit) - 1U)) != 0U)) { ((flags & ~(((uint32_t)1U << osRtxThreadFlagsLimit) - 1U)) != 0U)) {
EvrRtxThreadError(thread, (int32_t)osErrorParameter); EvrRtxThreadFlagsError(thread, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1] //lint -e{904} "Return statement before end of function" [MISRA Note 1]
return ((uint32_t)osErrorParameter); return ((uint32_t)osErrorParameter);
} }
// Check object state // Check object state
if (thread->state == osRtxThreadTerminated) { if (thread->state == osRtxThreadTerminated) {
EvrRtxThreadError(thread, (int32_t)osErrorResource); EvrRtxThreadFlagsError(thread, (int32_t)osErrorResource);
//lint -e{904} "Return statement before end of function" [MISRA Note 1] //lint -e{904} "Return statement before end of function" [MISRA Note 1]
return ((uint32_t)osErrorResource); return ((uint32_t)osErrorResource);
} }
@ -1623,24 +1603,17 @@ bool_t osRtxThreadStartup (void) {
bool_t ret = TRUE; bool_t ret = TRUE;
// Create Idle Thread // Create Idle Thread
if (osRtxInfo.thread.idle == NULL) { osRtxInfo.thread.idle = osRtxThreadId(
osRtxInfo.thread.idle = osRtxThreadId( svcRtxThreadNew(osRtxIdleThread, NULL, osRtxConfig.idle_thread_attr)
svcRtxThreadNew(osRtxIdleThread, NULL, osRtxConfig.idle_thread_attr) );
);
if (osRtxInfo.thread.idle == NULL) {
ret = FALSE;
}
}
// Create Timer Thread // Create Timer Thread
if (osRtxConfig.timer_mq_mcnt != 0U) { if (osRtxConfig.timer_mq_mcnt != 0U) {
osRtxInfo.timer.thread = osRtxThreadId(
svcRtxThreadNew(osRtxTimerThread, NULL, osRtxConfig.timer_thread_attr)
);
if (osRtxInfo.timer.thread == NULL) { if (osRtxInfo.timer.thread == NULL) {
osRtxInfo.timer.thread = osRtxThreadId( ret = FALSE;
svcRtxThreadNew(osRtxTimerThread, NULL, osRtxConfig.timer_thread_attr)
);
if (osRtxInfo.timer.thread == NULL) {
ret = FALSE;
}
} }
} }
@ -1892,7 +1865,7 @@ uint32_t osThreadFlagsClear (uint32_t flags) {
EvrRtxThreadFlagsClear(flags); EvrRtxThreadFlagsClear(flags);
if (IsIrqMode() || IsIrqMasked()) { if (IsIrqMode() || IsIrqMasked()) {
EvrRtxThreadError(NULL, (int32_t)osErrorISR); EvrRtxThreadFlagsError(NULL, (int32_t)osErrorISR);
thread_flags = (uint32_t)osErrorISR; thread_flags = (uint32_t)osErrorISR;
} else { } else {
thread_flags = __svcThreadFlagsClear(flags); thread_flags = __svcThreadFlagsClear(flags);
@ -1919,7 +1892,7 @@ uint32_t osThreadFlagsWait (uint32_t flags, uint32_t options, uint32_t timeout)
EvrRtxThreadFlagsWait(flags, options, timeout); EvrRtxThreadFlagsWait(flags, options, timeout);
if (IsIrqMode() || IsIrqMasked()) { if (IsIrqMode() || IsIrqMasked()) {
EvrRtxThreadError(NULL, (int32_t)osErrorISR); EvrRtxThreadFlagsError(NULL, (int32_t)osErrorISR);
thread_flags = (uint32_t)osErrorISR; thread_flags = (uint32_t)osErrorISR;
} else { } else {
thread_flags = __svcThreadFlagsWait(flags, options, timeout); thread_flags = __svcThreadFlagsWait(flags, options, timeout);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved. * Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* *
* SPDX-License-Identifier: Apache-2.0 * SPDX-License-Identifier: Apache-2.0
* *
@ -119,7 +119,7 @@ static void osRtxTimerTick (void) {
} }
/// Timer Thread /// Timer Thread
__WEAK void osRtxTimerThread (void *argument) { __WEAK __NO_RETURN void osRtxTimerThread (void *argument) {
os_timer_finfo_t finfo; os_timer_finfo_t finfo;
osStatus_t status; osStatus_t status;
(void) argument; (void) argument;
@ -127,11 +127,8 @@ __WEAK void osRtxTimerThread (void *argument) {
osRtxInfo.timer.mq = osRtxMessageQueueId( osRtxInfo.timer.mq = osRtxMessageQueueId(
osMessageQueueNew(osRtxConfig.timer_mq_mcnt, sizeof(os_timer_finfo_t), osRtxConfig.timer_mq_attr) osMessageQueueNew(osRtxConfig.timer_mq_mcnt, sizeof(os_timer_finfo_t), osRtxConfig.timer_mq_attr)
); );
if (osRtxInfo.timer.mq == NULL) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return;
}
osRtxInfo.timer.tick = osRtxTimerTick; osRtxInfo.timer.tick = osRtxTimerTick;
for (;;) { for (;;) {
//lint -e{934} "Taking address of near auto variable" //lint -e{934} "Taking address of near auto variable"
status = osMessageQueueGet(osRtxInfo.timer.mq, &finfo, NULL, osWaitForever); status = osMessageQueueGet(osRtxInfo.timer.mq, &finfo, NULL, osWaitForever);

View File

@ -17,31 +17,31 @@
"dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Config/RTX_Config.c" "dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Config/RTX_Config.c"
}, },
{ {
"src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_cm0.S", "src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_cm0.s",
"dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_ARM/TARGET_M0/irq_cm0.S" "dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_ARM/TARGET_M0/irq_cm0.S"
}, },
{ {
"src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_cm0.S", "src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_cm0.s",
"dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_ARM/TARGET_M0P/irq_cm0.S" "dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_ARM/TARGET_M0P/irq_cm0.S"
}, },
{ {
"src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_armv8mbl.S", "src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_armv8mbl.s",
"dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_ARM/TARGET_M23/irq_armv8mbl.S" "dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_ARM/TARGET_M23/irq_armv8mbl.S"
}, },
{ {
"src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_cm3.S", "src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_cm3.s",
"dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_ARM/TARGET_M3/irq_cm3.S" "dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_ARM/TARGET_M3/irq_cm3.S"
}, },
{ {
"src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_armv8mml.S", "src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_armv8mml.s",
"dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_ARM/TARGET_M33/irq_armv8mml.S" "dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_ARM/TARGET_M33/irq_armv8mml.S"
}, },
{ {
"src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_cm4f.S", "src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_cm4f.s",
"dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_ARM/TARGET_RTOS_M4_M7/irq_cm4f.S" "dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_ARM/TARGET_RTOS_M4_M7/irq_cm4f.S"
}, },
{ {
"src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_ca.S", "src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_ca.s",
"dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_ARM/TARGET_CORTEX_A/irq_ca.S" "dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_ARM/TARGET_CORTEX_A/irq_ca.S"
}, },
{ {
@ -73,31 +73,35 @@
"dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_GCC/TARGET_CORTEX_A/irq_ca.S" "dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_GCC/TARGET_CORTEX_A/irq_ca.S"
}, },
{ {
"src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_cm0.S", "src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_cm0.s",
"dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_IAR/TARGET_M0/irq_cm0.S" "dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_IAR/TARGET_M0/irq_cm0.S"
}, },
{ {
"src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_cm0.S", "src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_cm0.s",
"dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_IAR/TARGET_M0P/irq_cm0.S" "dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_IAR/TARGET_M0P/irq_cm0.S"
}, },
{ {
"src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_armv8mbl_common.S", "src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_armv8mbl_common.s",
"dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_IAR/TARGET_M23/irq_armv8mbl_common.S" "dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_IAR/TARGET_M23/irq_armv8mbl_common.S"
}, },
{ {
"src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_cm3.S", "src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_cm3.s",
"dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_IAR/TARGET_M3/irq_cm3.S" "dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_IAR/TARGET_M3/irq_cm3.S"
}, },
{ {
"src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_armv8mml_common.S", "src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_armv8mml_common.s",
"dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_IAR/TARGET_M33/irq_armv8mml_common.S" "dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_IAR/TARGET_M33/irq_armv8mml_common.S"
}, },
{ {
"src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_cm4f.S", "src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_cm4f.s",
"dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_IAR/TARGET_RTOS_M4_M7/irq_cm4f.S" "dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_IAR/TARGET_RTOS_M4_M7/irq_cm4f.S"
}, },
{ {
"src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_ca.S", "src_file" : "CMSIS/RTOS2/Source/os_systick.c",
"dest_file" : "rtos/TARGET_CORTEX/rtx5/Source/os_systick.c"
},
{
"src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_ca.s",
"dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_IAR/TARGET_CORTEX_A/irq_ca.S" "dest_file" : "rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_IAR/TARGET_CORTEX_A/irq_ca.S"
}, },
{ {
@ -114,10 +118,6 @@
"src_folder" : "CMSIS/RTOS2/Include/", "src_folder" : "CMSIS/RTOS2/Include/",
"dest_folder" : "rtos/TARGET_CORTEX/rtx5/Include/" "dest_folder" : "rtos/TARGET_CORTEX/rtx5/Include/"
}, },
{
"src_folder" : "CMSIS/RTOS2/Source/",
"dest_folder" : "rtos/TARGET_CORTEX/rtx5/Source/"
},
{ {
"src_folder" : "CMSIS/RTOS2/RTX/Include1/", "src_folder" : "CMSIS/RTOS2/RTX/Include1/",
"dest_folder" : "rtos/TARGET_CORTEX/rtx4/" "dest_folder" : "rtos/TARGET_CORTEX/rtx4/"