Core(A): Updated __FPU_Enable function (VFP register count detection)

pull/5767/head
Robert Rostohar 2017-10-27 11:04:39 +02:00 committed by adbridge
parent 6852fffd49
commit 80521c8369
6 changed files with 169 additions and 118 deletions

View File

@ -134,6 +134,7 @@
/** /**
\brief Reverse byte order (32 bit) \brief Reverse byte order (32 bit)
\details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
\param [in] value Value to reverse \param [in] value Value to reverse
\return Reversed value \return Reversed value
*/ */
@ -141,6 +142,7 @@
/** /**
\brief Reverse byte order (16 bit) \brief Reverse byte order (16 bit)
\details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
\param [in] value Value to reverse \param [in] value Value to reverse
\return Reversed value \return Reversed value
*/ */
@ -153,12 +155,13 @@ __attribute__((section(".rev16_text"))) __STATIC_INLINE __ASM uint32_t __REV16(u
#endif #endif
/** /**
\brief Reverse byte order in signed short value \brief Reverse byte order (16 bit)
\details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
\param [in] value Value to reverse \param [in] value Value to reverse
\return Reversed value \return Reversed value
*/ */
#ifndef __NO_EMBEDDED_ASM #ifndef __NO_EMBEDDED_ASM
__attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int32_t __REVSH(int32_t value) __attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int16_t __REVSH(int16_t value)
{ {
revsh r0, r0 revsh r0, r0
bx lr bx lr
@ -351,14 +354,16 @@ __STATIC_INLINE void __set_CPSR(uint32_t cpsr)
/** \brief Get Mode /** \brief Get Mode
\return Processor Mode \return Processor Mode
*/ */
__STATIC_INLINE uint32_t __get_mode(void) { __STATIC_INLINE uint32_t __get_mode(void)
{
return (__get_CPSR() & 0x1FU); return (__get_CPSR() & 0x1FU);
} }
/** \brief Set Mode /** \brief Set Mode
\param [in] mode Mode value to set \param [in] mode Mode value to set
*/ */
__STATIC_INLINE __ASM void __set_mode(uint32_t mode) { __STATIC_INLINE __ASM void __set_mode(uint32_t mode)
{
MOV r1, lr MOV r1, lr
MSR CPSR_C, r0 MSR CPSR_C, r0
BX r1 BX r1
@ -443,15 +448,30 @@ __STATIC_INLINE void __set_FPEXC(uint32_t fpexc)
* Include common core functions to access Coprocessor 15 registers * Include common core functions to access Coprocessor 15 registers
*/ */
#define __get_CP(cp, op1, Rt, CRn, CRm, op2) do { register uint32_t tmp __ASM("cp" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2); Rt = tmp; } while(0) #define __get_CP(cp, op1, Rt, CRn, CRm, op2) do { register uint32_t tmp __ASM("cp" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2); (Rt) = tmp; } while(0)
#define __set_CP(cp, op1, Rt, CRn, CRm, op2) do { register uint32_t tmp __ASM("cp" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2); tmp = Rt; } while(0) #define __set_CP(cp, op1, Rt, CRn, CRm, op2) do { register uint32_t tmp __ASM("cp" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2); tmp = (Rt); } while(0)
#define __get_CP64(cp, op1, Rt, CRm) \
do { \
uint32_t ltmp, htmp; \
__ASM volatile("MRRC p" # cp ", " # op1 ", ltmp, htmp, c" # CRm); \
(Rt) = ((((uint64_t)htmp) << 32U) | ((uint64_t)ltmp)); \
} while(0)
#define __set_CP64(cp, op1, Rt, CRm) \
do { \
const uint64_t tmp = (Rt); \
const uint32_t ltmp = (uint32_t)(tmp); \
const uint32_t htmp = (uint32_t)(tmp >> 32U); \
__ASM volatile("MCRR p" # cp ", " # op1 ", ltmp, htmp, c" # CRm); \
} while(0)
#include "cmsis_cp15.h" #include "cmsis_cp15.h"
/** \brief Clean and Invalidate the entire data or unified cache /** \brief Clean and Invalidate the entire data or unified cache
* \param [in] op 0 - invalidate, 1 - clean, otherwise - invalidate and clean * \param [in] op 0 - invalidate, 1 - clean, otherwise - invalidate and clean
*/ */
__STATIC_INLINE __ASM void __L1C_CleanInvalidateCache(uint32_t op) { __STATIC_INLINE __ASM void __L1C_CleanInvalidateCache(uint32_t op)
{
ARM ARM
PUSH {R4-R11} PUSH {R4-R11}
@ -510,7 +530,8 @@ Finished
Critical section, called from undef handler, so systick is disabled Critical section, called from undef handler, so systick is disabled
*/ */
__STATIC_INLINE __ASM void __FPU_Enable(void) { __STATIC_INLINE __ASM void __FPU_Enable(void)
{
ARM ARM
//Permit access to VFP/NEON, registers by modifying CPACR //Permit access to VFP/NEON, registers by modifying CPACR
@ -528,7 +549,7 @@ __STATIC_INLINE __ASM void __FPU_Enable(void) {
//Initialise VFP/NEON registers to 0 //Initialise VFP/NEON registers to 0
MOV R2,#0 MOV R2,#0
IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} >= 16
//Initialise D16 registers to 0 //Initialise D16 registers to 0
VMOV D0, R2,R2 VMOV D0, R2,R2
VMOV D1, R2,R2 VMOV D1, R2,R2
@ -546,7 +567,7 @@ __STATIC_INLINE __ASM void __FPU_Enable(void) {
VMOV D13,R2,R2 VMOV D13,R2,R2
VMOV D14,R2,R2 VMOV D14,R2,R2
VMOV D15,R2,R2 VMOV D15,R2,R2
ENDIF
IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32 IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32
//Initialise D32 registers to 0 //Initialise D32 registers to 0
VMOV D16,R2,R2 VMOV D16,R2,R2

View File

@ -25,6 +25,8 @@
#ifndef __CMSIS_ARMCLANG_H #ifndef __CMSIS_ARMCLANG_H
#define __CMSIS_ARMCLANG_H #define __CMSIS_ARMCLANG_H
#pragma clang system_header /* treat file as system include file */
#ifndef __ARM_COMPAT_H #ifndef __ARM_COMPAT_H
#include <arm_compat.h> /* Compatibility header for ARM Compiler 5 intrinsics */ #include <arm_compat.h> /* Compatibility header for ARM Compiler 5 intrinsics */
#endif #endif
@ -148,38 +150,29 @@
/** /**
\brief Reverse byte order (32 bit) \brief Reverse byte order (32 bit)
\details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
\param [in] value Value to reverse \param [in] value Value to reverse
\return Reversed value \return Reversed value
*/ */
#define __REV __builtin_bswap32 #define __REV(value) __builtin_bswap32(value)
/** /**
\brief Reverse byte order (16 bit) \brief Reverse byte order (16 bit)
\details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
\param [in] value Value to reverse \param [in] value Value to reverse
\return Reversed value \return Reversed value
*/ */
#ifndef __NO_EMBEDDED_ASM #define __REV16(value) __ROR(__REV(value), 16)
__attribute__((section(".rev16_text"))) __STATIC_INLINE uint32_t __REV16(uint32_t value)
{
uint32_t result;
__ASM volatile("rev16 %0, %1" : "=r" (result) : "r" (value));
return result;
}
#endif
/** /**
\brief Reverse byte order in signed short value \brief Reverse byte order (16 bit)
\details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
\param [in] value Value to reverse \param [in] value Value to reverse
\return Reversed value \return Reversed value
*/ */
#ifndef __NO_EMBEDDED_ASM #define __REVSH(value) (int16_t)__builtin_bswap16(value)
__attribute__((section(".revsh_text"))) __STATIC_INLINE int32_t __REVSH(int32_t value)
{
int32_t result;
__ASM volatile("revsh %0, %1" : "=r" (result) : "r" (value));
return result;
}
#endif
/** /**
\brief Rotate Right in unsigned value (32 bit) \brief Rotate Right in unsigned value (32 bit)
@ -188,11 +181,17 @@ __attribute__((section(".revsh_text"))) __STATIC_INLINE int32_t __REVSH(int32_t
\param [in] op2 Number of Bits to rotate \param [in] op2 Number of Bits to rotate
\return Rotated value \return Rotated value
*/ */
__attribute__((always_inline)) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2) __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
{ {
op2 %= 32U;
if (op2 == 0U)
{
return op1;
}
return (op1 >> op2) | (op1 << (32U - op2)); return (op1 >> op2) | (op1 << (32U - op2));
} }
/** /**
\brief Breakpoint \brief Breakpoint
\param [in] value is ignored by the processor. \param [in] value is ignored by the processor.
@ -212,7 +211,7 @@ __attribute__((always_inline)) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint
\param [in] value Value to count the leading zeros \param [in] value Value to count the leading zeros
\return number of leading zeros in value \return number of leading zeros in value
*/ */
#define __CLZ __builtin_clz #define __CLZ (uint8_t)__builtin_clz
/** /**
\brief LDR Exclusive (8 bit) \brief LDR Exclusive (8 bit)
@ -313,7 +312,7 @@ __attribute__((always_inline)) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint
/** \brief Get CPSR Register /** \brief Get CPSR Register
\return CPSR Register value \return CPSR Register value
*/ */
__attribute__((always_inline)) __STATIC_INLINE uint32_t __get_CPSR(void) __STATIC_FORCEINLINE uint32_t __get_CPSR(void)
{ {
uint32_t result; uint32_t result;
__ASM volatile("MRS %0, cpsr" : "=r" (result) ); __ASM volatile("MRS %0, cpsr" : "=r" (result) );
@ -323,7 +322,7 @@ __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_CPSR(void)
/** \brief Set CPSR Register /** \brief Set CPSR Register
\param [in] cpsr CPSR value to set \param [in] cpsr CPSR value to set
*/ */
__attribute__((always_inline)) __STATIC_INLINE void __set_CPSR(uint32_t cpsr) __STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr)
{ {
__ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "memory"); __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "memory");
} }
@ -331,7 +330,7 @@ __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "memory");
/** \brief Get Mode /** \brief Get Mode
\return Processor Mode \return Processor Mode
*/ */
__attribute__((always_inline)) __STATIC_INLINE uint32_t __get_mode(void) __STATIC_FORCEINLINE uint32_t __get_mode(void)
{ {
return (__get_CPSR() & 0x1FU); return (__get_CPSR() & 0x1FU);
} }
@ -339,7 +338,7 @@ __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_mode(void)
/** \brief Set Mode /** \brief Set Mode
\param [in] mode Mode value to set \param [in] mode Mode value to set
*/ */
__attribute__((always_inline)) __STATIC_INLINE void __set_mode(uint32_t mode) __STATIC_FORCEINLINE void __set_mode(uint32_t mode)
{ {
__ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory"); __ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory");
} }
@ -347,7 +346,7 @@ __attribute__((always_inline)) __STATIC_INLINE void __set_mode(uint32_t mode)
/** \brief Get Stack Pointer /** \brief Get Stack Pointer
\return Stack Pointer value \return Stack Pointer value
*/ */
__attribute__((always_inline)) __STATIC_INLINE uint32_t __get_SP() __STATIC_FORCEINLINE uint32_t __get_SP()
{ {
uint32_t result; uint32_t result;
__ASM volatile("MOV %0, sp" : "=r" (result) : : "memory"); __ASM volatile("MOV %0, sp" : "=r" (result) : : "memory");
@ -357,7 +356,7 @@ __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_SP()
/** \brief Set Stack Pointer /** \brief Set Stack Pointer
\param [in] stack Stack Pointer value to set \param [in] stack Stack Pointer value to set
*/ */
__attribute__((always_inline)) __STATIC_INLINE void __set_SP(uint32_t stack) __STATIC_FORCEINLINE void __set_SP(uint32_t stack)
{ {
__ASM volatile("MOV sp, %0" : : "r" (stack) : "memory"); __ASM volatile("MOV sp, %0" : : "r" (stack) : "memory");
} }
@ -365,7 +364,7 @@ __attribute__((always_inline)) __STATIC_INLINE void __set_SP(uint32_t stack)
/** \brief Get USR/SYS Stack Pointer /** \brief Get USR/SYS Stack Pointer
\return USR/SYS Stack Pointer value \return USR/SYS Stack Pointer value
*/ */
__attribute__((always_inline)) __STATIC_INLINE uint32_t __get_SP_usr() __STATIC_FORCEINLINE uint32_t __get_SP_usr()
{ {
uint32_t cpsr; uint32_t cpsr;
uint32_t result; uint32_t result;
@ -382,7 +381,7 @@ __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_SP_usr()
/** \brief Set USR/SYS Stack Pointer /** \brief Set USR/SYS Stack Pointer
\param [in] topOfProcStack USR/SYS Stack Pointer value to set \param [in] topOfProcStack USR/SYS Stack Pointer value to set
*/ */
__attribute__((always_inline)) __STATIC_INLINE void __set_SP_usr(uint32_t topOfProcStack) __STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack)
{ {
uint32_t cpsr; uint32_t cpsr;
__ASM volatile( __ASM volatile(
@ -397,7 +396,7 @@ __attribute__((always_inline)) __STATIC_INLINE void __set_SP_usr(uint32_t topOfP
/** \brief Get FPEXC /** \brief Get FPEXC
\return Floating Point Exception Control register value \return Floating Point Exception Control register value
*/ */
__attribute__((always_inline)) __STATIC_INLINE uint32_t __get_FPEXC(void) __STATIC_FORCEINLINE uint32_t __get_FPEXC(void)
{ {
#if (__FPU_PRESENT == 1) #if (__FPU_PRESENT == 1)
uint32_t result; uint32_t result;
@ -411,7 +410,7 @@ __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_FPEXC(void)
/** \brief Set FPEXC /** \brief Set FPEXC
\param [in] fpexc Floating Point Exception Control value to set \param [in] fpexc Floating Point Exception Control value to set
*/ */
__attribute__((always_inline)) __STATIC_INLINE void __set_FPEXC(uint32_t fpexc) __STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc)
{ {
#if (__FPU_PRESENT == 1) #if (__FPU_PRESENT == 1)
__ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory"); __ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory");
@ -424,6 +423,8 @@ __attribute__((always_inline)) __STATIC_INLINE void __set_FPEXC(uint32_t fpexc)
#define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" ) #define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" )
#define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" ) #define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" )
#define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" )
#define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" )
#include "cmsis_cp15.h" #include "cmsis_cp15.h"
@ -510,7 +511,6 @@ __STATIC_INLINE void __FPU_Enable(void)
//Initialise VFP/NEON registers to 0 //Initialise VFP/NEON registers to 0
" MOV R2,#0 \n" " MOV R2,#0 \n"
#if TARGET_FEATURE_EXTENSION_REGISTER_COUNT >= 16
//Initialise D16 registers to 0 //Initialise D16 registers to 0
" VMOV D0, R2,R2 \n" " VMOV D0, R2,R2 \n"
" VMOV D1, R2,R2 \n" " VMOV D1, R2,R2 \n"
@ -528,9 +528,8 @@ __STATIC_INLINE void __FPU_Enable(void)
" VMOV D13,R2,R2 \n" " VMOV D13,R2,R2 \n"
" VMOV D14,R2,R2 \n" " VMOV D14,R2,R2 \n"
" VMOV D15,R2,R2 \n" " VMOV D15,R2,R2 \n"
#endif
#if TARGET_FEATURE_EXTENSION_REGISTER_COUNT == 32 #if __ARM_NEON == 1
//Initialise D32 registers to 0 //Initialise D32 registers to 0
" VMOV D16,R2,R2 \n" " VMOV D16,R2,R2 \n"
" VMOV D17,R2,R2 \n" " VMOV D17,R2,R2 \n"
@ -548,8 +547,8 @@ __STATIC_INLINE void __FPU_Enable(void)
" VMOV D29,R2,R2 \n" " VMOV D29,R2,R2 \n"
" VMOV D30,R2,R2 \n" " VMOV D30,R2,R2 \n"
" VMOV D31,R2,R2 \n" " VMOV D31,R2,R2 \n"
".endif \n"
#endif #endif
//Initialise FPSCR to a known state //Initialise FPSCR to a known state
" VMRS R2,FPSCR \n" " VMRS R2,FPSCR \n"
" LDR R3,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero. " LDR R3,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero.

View File

@ -157,7 +157,7 @@ __STATIC_FORCEINLINE void __DMB(void)
/** /**
\brief Reverse byte order (32 bit) \brief Reverse byte order (32 bit)
\details Reverses the byte order in integer value. \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
\param [in] value Value to reverse \param [in] value Value to reverse
\return Reversed value \return Reversed value
*/ */
@ -169,12 +169,13 @@ __STATIC_FORCEINLINE uint32_t __REV(uint32_t value)
uint32_t result; uint32_t result;
__ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
return(result); return result;
#endif #endif
} }
/** /**
\brief Reverse byte order (16 bit) \brief Reverse byte order (16 bit)
\details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
\param [in] value Value to reverse \param [in] value Value to reverse
\return Reversed value \return Reversed value
*/ */
@ -188,20 +189,20 @@ __attribute__((section(".rev16_text"))) __STATIC_INLINE uint32_t __REV16(uint32_
#endif #endif
/** /**
\brief Reverse byte order in signed short value \brief Reverse byte order (16 bit)
\details Reverses the byte order in a signed short value with sign extension to integer. \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
\param [in] value Value to reverse \param [in] value Value to reverse
\return Reversed value \return Reversed value
*/ */
__STATIC_FORCEINLINE int32_t __REVSH(int32_t value) __STATIC_FORCEINLINE int16_t __REVSH(int16_t value)
{ {
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
return (short)__builtin_bswap16(value); return (int16_t)__builtin_bswap16(value);
#else #else
int32_t result; int16_t result;
__ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
return(result); return result;
#endif #endif
} }
@ -214,9 +215,14 @@ __STATIC_FORCEINLINE int32_t __REVSH(int32_t value)
*/ */
__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
{ {
op2 %= 32U;
if (op2 == 0U) {
return op1;
}
return (op1 >> op2) | (op1 << (32U - op2)); return (op1 >> op2) | (op1 << (32U - op2));
} }
/** /**
\brief Breakpoint \brief Breakpoint
\param [in] value is ignored by the processor. \param [in] value is ignored by the processor.
@ -239,7 +245,7 @@ __STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value)
(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) )
__ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) ); __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) );
#else #else
int32_t s = (4 /*sizeof(v)*/ * 8) - 1; /* extra shift needed at end */ int32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */
result = value; /* r will be reversed bits of v; first get LSB of v */ result = value; /* r will be reversed bits of v; first get LSB of v */
for (value >>= 1U; value; value >>= 1U) for (value >>= 1U; value; value >>= 1U)
@ -250,7 +256,7 @@ __STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value)
} }
result <<= s; /* shift when v's highest bits are zero */ result <<= s; /* shift when v's highest bits are zero */
#endif #endif
return(result); return result;
} }
/** /**
@ -496,14 +502,16 @@ __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "memory");
/** \brief Get Mode /** \brief Get Mode
\return Processor Mode \return Processor Mode
*/ */
__STATIC_FORCEINLINE uint32_t __get_mode(void) { __STATIC_FORCEINLINE uint32_t __get_mode(void)
{
return (__get_CPSR() & 0x1FU); return (__get_CPSR() & 0x1FU);
} }
/** \brief Set Mode /** \brief Set Mode
\param [in] mode Mode value to set \param [in] mode Mode value to set
*/ */
__STATIC_FORCEINLINE void __set_mode(uint32_t mode) { __STATIC_FORCEINLINE void __set_mode(uint32_t mode)
{
__ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory"); __ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory");
} }
@ -585,6 +593,8 @@ __STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc)
#define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" ) #define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" )
#define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" ) #define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" )
#define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" )
#define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" )
#include "cmsis_cp15.h" #include "cmsis_cp15.h"
@ -621,8 +631,10 @@ __STATIC_INLINE void __L1C_MaintainDCacheSetWay(uint32_t level, uint32_t maint)
log2_linesize = (ccsidr & 0x00000007) + 2 + 2; log2_linesize = (ccsidr & 0x00000007) + 2 + 2;
log2_num_ways = log2_up(num_ways); log2_num_ways = log2_up(num_ways);
shift_way = 32 - log2_num_ways; shift_way = 32 - log2_num_ways;
for(int way = num_ways-1; way >= 0; way--) { for(int way = num_ways-1; way >= 0; way--)
for(int set = num_sets-1; set >= 0; set--) { {
for(int set = num_sets-1; set >= 0; set--)
{
Dummy = (level << 1) | (set << log2_linesize) | (way << shift_way); Dummy = (level << 1) | (set << log2_linesize) | (way << shift_way);
switch (maint) switch (maint)
{ {
@ -648,7 +660,8 @@ __STATIC_INLINE void __L1C_MaintainDCacheSetWay(uint32_t level, uint32_t maint)
Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency
*/ */
__STATIC_INLINE void __L1C_CleanInvalidateCache(uint32_t op) { __STATIC_INLINE void __L1C_CleanInvalidateCache(uint32_t op)
{
register volatile uint32_t clidr; register volatile uint32_t clidr;
uint32_t cache_type; uint32_t cache_type;
clidr = __get_CLIDR(); clidr = __get_CLIDR();
@ -667,7 +680,8 @@ __STATIC_INLINE void __L1C_CleanInvalidateCache(uint32_t op) {
Critical section, called from undef handler, so systick is disabled Critical section, called from undef handler, so systick is disabled
*/ */
__STATIC_INLINE void __FPU_Enable(void) { __STATIC_INLINE void __FPU_Enable(void)
{
__ASM volatile( __ASM volatile(
//Permit access to VFP/NEON, registers by modifying CPACR //Permit access to VFP/NEON, registers by modifying CPACR
" MRC p15,0,R1,c1,c0,2 \n" " MRC p15,0,R1,c1,c0,2 \n"
@ -685,7 +699,6 @@ __STATIC_INLINE void __FPU_Enable(void) {
//Initialise VFP/NEON registers to 0 //Initialise VFP/NEON registers to 0
" MOV R2,#0 \n" " MOV R2,#0 \n"
#if TARGET_FEATURE_EXTENSION_REGISTER_COUNT >= 16
//Initialise D16 registers to 0 //Initialise D16 registers to 0
" VMOV D0, R2,R2 \n" " VMOV D0, R2,R2 \n"
" VMOV D1, R2,R2 \n" " VMOV D1, R2,R2 \n"
@ -703,9 +716,8 @@ __STATIC_INLINE void __FPU_Enable(void) {
" VMOV D13,R2,R2 \n" " VMOV D13,R2,R2 \n"
" VMOV D14,R2,R2 \n" " VMOV D14,R2,R2 \n"
" VMOV D15,R2,R2 \n" " VMOV D15,R2,R2 \n"
#endif
#if TARGET_FEATURE_EXTENSION_REGISTER_COUNT == 32 #if __ARM_NEON == 1
//Initialise D32 registers to 0 //Initialise D32 registers to 0
" VMOV D16,R2,R2 \n" " VMOV D16,R2,R2 \n"
" VMOV D17,R2,R2 \n" " VMOV D17,R2,R2 \n"
@ -724,6 +736,7 @@ __STATIC_INLINE void __FPU_Enable(void) {
" VMOV D30,R2,R2 \n" " VMOV D30,R2,R2 \n"
" VMOV D31,R2,R2 \n" " VMOV D31,R2,R2 \n"
#endif #endif
//Initialise FPSCR to a known state //Initialise FPSCR to a known state
" VMRS R2,FPSCR \n" " VMRS R2,FPSCR \n"
" LDR R3,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero. " LDR R3,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero.

View File

@ -123,7 +123,8 @@
#ifndef __UNALIGNED_UINT16_READ #ifndef __UNALIGNED_UINT16_READ
#pragma language=save #pragma language=save
#pragma language=extended #pragma language=extended
__IAR_FT uint16_t __iar_uint16_read(void const *ptr) { __IAR_FT uint16_t __iar_uint16_read(void const *ptr)
{
return *(__packed uint16_t*)(ptr); return *(__packed uint16_t*)(ptr);
} }
#pragma language=restore #pragma language=restore
@ -134,7 +135,8 @@
#ifndef __UNALIGNED_UINT16_WRITE #ifndef __UNALIGNED_UINT16_WRITE
#pragma language=save #pragma language=save
#pragma language=extended #pragma language=extended
__IAR_FT void __iar_uint16_write(void const *ptr, uint16_t val) { __IAR_FT void __iar_uint16_write(void const *ptr, uint16_t val)
{
*(__packed uint16_t*)(ptr) = val;; *(__packed uint16_t*)(ptr) = val;;
} }
#pragma language=restore #pragma language=restore
@ -144,7 +146,8 @@
#ifndef __UNALIGNED_UINT32_READ #ifndef __UNALIGNED_UINT32_READ
#pragma language=save #pragma language=save
#pragma language=extended #pragma language=extended
__IAR_FT uint32_t __iar_uint32_read(void const *ptr) { __IAR_FT uint32_t __iar_uint32_read(void const *ptr)
{
return *(__packed uint32_t*)(ptr); return *(__packed uint32_t*)(ptr);
} }
#pragma language=restore #pragma language=restore
@ -154,7 +157,8 @@
#ifndef __UNALIGNED_UINT32_WRITE #ifndef __UNALIGNED_UINT32_WRITE
#pragma language=save #pragma language=save
#pragma language=extended #pragma language=extended
__IAR_FT void __iar_uint32_write(void const *ptr, uint32_t val) { __IAR_FT void __iar_uint32_write(void const *ptr, uint32_t val)
{
*(__packed uint32_t*)(ptr) = val;; *(__packed uint32_t*)(ptr) = val;;
} }
#pragma language=restore #pragma language=restore
@ -238,19 +242,22 @@
#define __set_FPEXC(VALUE) (__arm_wsr("FPEXC", VALUE)) #define __set_FPEXC(VALUE) (__arm_wsr("FPEXC", VALUE))
#define __get_CP(cp, op1, RT, CRn, CRm, op2) \ #define __get_CP(cp, op1, RT, CRn, CRm, op2) \
(RT = __arm_rsr("p" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2)) ((RT) = __arm_rsr("p" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2))
#define __set_CP(cp, op1, RT, CRn, CRm, op2) \ #define __set_CP(cp, op1, RT, CRn, CRm, op2) \
(__arm_wsr("p" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2, RT)) (__arm_wsr("p" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2, (RT)))
#define __get_CP64(cp, op1, RT, CRm) \
((RT) = __arm_rsr("p" # cp ":" # op1 ":c" # CRm))
#define __set_CP64(cp, op1, RT, CRm) \
(__arm_wsr("p" # cp ":" # op1 ":c" # CRm, (RT)))
#include "cmsis_cp15.h" #include "cmsis_cp15.h"
#define __NOP __iar_builtin_no_operation #define __NOP __iar_builtin_no_operation
__IAR_FT uint8_t __CLZ(uint32_t val) { #define __CLZ __iar_builtin_CLZ
return __iar_builtin_CLZ(val);
}
#define __CLREX __iar_builtin_CLREX #define __CLREX __iar_builtin_CLREX
#define __DMB __iar_builtin_DMB #define __DMB __iar_builtin_DMB
@ -265,8 +272,9 @@
#define __REV __iar_builtin_REV #define __REV __iar_builtin_REV
#define __REV16 __iar_builtin_REV16 #define __REV16 __iar_builtin_REV16
__IAR_FT int32_t __REVSH(int32_t val) { __IAR_FT int16_t __REVSH(int16_t val)
return __iar_builtin_REVSH((int16_t)val); {
return (int16_t) __iar_builtin_REVSH(val);
} }
#define __ROR __iar_builtin_ROR #define __ROR __iar_builtin_ROR
@ -354,6 +362,10 @@
#define __get_FPSCR __cmsis_iar_get_FPSR_not_active #define __get_FPSCR __cmsis_iar_get_FPSR_not_active
#endif #endif
#ifdef __INTRINSICS_INCLUDED
#error intrinsics.h is already included previously!
#endif
#include <intrinsics.h> #include <intrinsics.h>
#if !__FPU_PRESENT #if !__FPU_PRESENT
@ -376,23 +388,27 @@
__ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory"); __ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory");
} }
__IAR_FT uint32_t __LDREXW(uint32_t volatile *ptr) { __IAR_FT uint32_t __LDREXW(uint32_t volatile *ptr)
{
return __LDREX((unsigned long *)ptr); return __LDREX((unsigned long *)ptr);
} }
__IAR_FT uint32_t __STREXW(uint32_t value, uint32_t volatile *ptr) { __IAR_FT uint32_t __STREXW(uint32_t value, uint32_t volatile *ptr)
{
return __STREX(value, (unsigned long *)ptr); return __STREX(value, (unsigned long *)ptr);
} }
__IAR_FT uint32_t __RRX(uint32_t value) { __IAR_FT uint32_t __RRX(uint32_t value)
{
uint32_t result; uint32_t result;
__ASM("RRX %0, %1" : "=r"(result) : "r" (value) : "cc"); __ASM("RRX %0, %1" : "=r"(result) : "r" (value) : "cc");
return(result); return(result);
} }
__IAR_FT uint32_t __ROR(uint32_t op1, uint32_t op2) { __IAR_FT uint32_t __ROR(uint32_t op1, uint32_t op2)
{
return (op1 >> op2) | (op1 << ((sizeof(op1)*8)-op2)); return (op1 >> op2) | (op1 << ((sizeof(op1)*8)-op2));
} }
@ -419,6 +435,10 @@
__ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" ) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" )
#define __set_CP(cp, op1, Rt, CRn, CRm, op2) \ #define __set_CP(cp, op1, Rt, CRn, CRm, op2) \
__ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" ) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" )
#define __get_CP64(cp, op1, Rt, CRm) \
__ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" )
#define __set_CP64(cp, op1, Rt, CRm) \
__ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" )
#include "cmsis_cp15.h" #include "cmsis_cp15.h"
@ -533,7 +553,6 @@ void __FPU_Enable(void)
//Initialise VFP/NEON registers to 0 //Initialise VFP/NEON registers to 0
" MOV R2,#0 \n" " MOV R2,#0 \n"
#if TARGET_FEATURE_EXTENSION_REGISTER_COUNT >= 16
//Initialise D16 registers to 0 //Initialise D16 registers to 0
" VMOV D0, R2,R2 \n" " VMOV D0, R2,R2 \n"
" VMOV D1, R2,R2 \n" " VMOV D1, R2,R2 \n"
@ -551,9 +570,8 @@ void __FPU_Enable(void)
" VMOV D13,R2,R2 \n" " VMOV D13,R2,R2 \n"
" VMOV D14,R2,R2 \n" " VMOV D14,R2,R2 \n"
" VMOV D15,R2,R2 \n" " VMOV D15,R2,R2 \n"
#endif
#if TARGET_FEATURE_EXTENSION_REGISTER_COUNT == 32 #ifdef __ARM_ADVANCED_SIMD__
//Initialise D32 registers to 0 //Initialise D32 registers to 0
" VMOV D16,R2,R2 \n" " VMOV D16,R2,R2 \n"
" VMOV D17,R2,R2 \n" " VMOV D17,R2,R2 \n"
@ -571,8 +589,8 @@ void __FPU_Enable(void)
" VMOV D29,R2,R2 \n" " VMOV D29,R2,R2 \n"
" VMOV D30,R2,R2 \n" " VMOV D30,R2,R2 \n"
" VMOV D31,R2,R2 \n" " VMOV D31,R2,R2 \n"
".endif \n"
#endif #endif
//Initialise FPSCR to a known state //Initialise FPSCR to a known state
" VMRS R2,FPSCR \n" " VMRS R2,FPSCR \n"
" MOV32 R3,#0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero. " MOV32 R3,#0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero.