mirror of https://github.com/ARMmbed/mbed-os.git
Core(A): Updated __FPU_Enable function (VFP register count detection)
parent
6852fffd49
commit
80521c8369
|
@ -134,6 +134,7 @@
|
|||
|
||||
/**
|
||||
\brief Reverse byte order (32 bit)
|
||||
\details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
|
||||
\param [in] value Value to reverse
|
||||
\return Reversed value
|
||||
*/
|
||||
|
@ -141,6 +142,7 @@
|
|||
|
||||
/**
|
||||
\brief Reverse byte order (16 bit)
|
||||
\details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
|
||||
\param [in] value Value to reverse
|
||||
\return Reversed value
|
||||
*/
|
||||
|
@ -153,12 +155,13 @@ __attribute__((section(".rev16_text"))) __STATIC_INLINE __ASM uint32_t __REV16(u
|
|||
#endif
|
||||
|
||||
/**
|
||||
\brief Reverse byte order in signed short value
|
||||
\brief Reverse byte order (16 bit)
|
||||
\details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
|
||||
\param [in] value Value to reverse
|
||||
\return Reversed value
|
||||
*/
|
||||
#ifndef __NO_EMBEDDED_ASM
|
||||
__attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int32_t __REVSH(int32_t value)
|
||||
__attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int16_t __REVSH(int16_t value)
|
||||
{
|
||||
revsh r0, r0
|
||||
bx lr
|
||||
|
@ -351,14 +354,16 @@ __STATIC_INLINE void __set_CPSR(uint32_t cpsr)
|
|||
/** \brief Get Mode
|
||||
\return Processor Mode
|
||||
*/
|
||||
__STATIC_INLINE uint32_t __get_mode(void) {
|
||||
__STATIC_INLINE uint32_t __get_mode(void)
|
||||
{
|
||||
return (__get_CPSR() & 0x1FU);
|
||||
}
|
||||
|
||||
/** \brief Set Mode
|
||||
\param [in] mode Mode value to set
|
||||
*/
|
||||
__STATIC_INLINE __ASM void __set_mode(uint32_t mode) {
|
||||
__STATIC_INLINE __ASM void __set_mode(uint32_t mode)
|
||||
{
|
||||
MOV r1, lr
|
||||
MSR CPSR_C, r0
|
||||
BX r1
|
||||
|
@ -443,15 +448,30 @@ __STATIC_INLINE void __set_FPEXC(uint32_t fpexc)
|
|||
* Include common core functions to access Coprocessor 15 registers
|
||||
*/
|
||||
|
||||
#define __get_CP(cp, op1, Rt, CRn, CRm, op2) do { register uint32_t tmp __ASM("cp" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2); Rt = tmp; } while(0)
|
||||
#define __set_CP(cp, op1, Rt, CRn, CRm, op2) do { register uint32_t tmp __ASM("cp" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2); tmp = Rt; } while(0)
|
||||
#define __get_CP(cp, op1, Rt, CRn, CRm, op2) do { register uint32_t tmp __ASM("cp" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2); (Rt) = tmp; } while(0)
|
||||
#define __set_CP(cp, op1, Rt, CRn, CRm, op2) do { register uint32_t tmp __ASM("cp" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2); tmp = (Rt); } while(0)
|
||||
#define __get_CP64(cp, op1, Rt, CRm) \
|
||||
do { \
|
||||
uint32_t ltmp, htmp; \
|
||||
__ASM volatile("MRRC p" # cp ", " # op1 ", ltmp, htmp, c" # CRm); \
|
||||
(Rt) = ((((uint64_t)htmp) << 32U) | ((uint64_t)ltmp)); \
|
||||
} while(0)
|
||||
|
||||
#define __set_CP64(cp, op1, Rt, CRm) \
|
||||
do { \
|
||||
const uint64_t tmp = (Rt); \
|
||||
const uint32_t ltmp = (uint32_t)(tmp); \
|
||||
const uint32_t htmp = (uint32_t)(tmp >> 32U); \
|
||||
__ASM volatile("MCRR p" # cp ", " # op1 ", ltmp, htmp, c" # CRm); \
|
||||
} while(0)
|
||||
|
||||
#include "cmsis_cp15.h"
|
||||
|
||||
/** \brief Clean and Invalidate the entire data or unified cache
|
||||
* \param [in] op 0 - invalidate, 1 - clean, otherwise - invalidate and clean
|
||||
*/
|
||||
__STATIC_INLINE __ASM void __L1C_CleanInvalidateCache(uint32_t op) {
|
||||
__STATIC_INLINE __ASM void __L1C_CleanInvalidateCache(uint32_t op)
|
||||
{
|
||||
ARM
|
||||
|
||||
PUSH {R4-R11}
|
||||
|
@ -510,7 +530,8 @@ Finished
|
|||
|
||||
Critical section, called from undef handler, so systick is disabled
|
||||
*/
|
||||
__STATIC_INLINE __ASM void __FPU_Enable(void) {
|
||||
__STATIC_INLINE __ASM void __FPU_Enable(void)
|
||||
{
|
||||
ARM
|
||||
|
||||
//Permit access to VFP/NEON, registers by modifying CPACR
|
||||
|
@ -528,7 +549,7 @@ __STATIC_INLINE __ASM void __FPU_Enable(void) {
|
|||
|
||||
//Initialise VFP/NEON registers to 0
|
||||
MOV R2,#0
|
||||
IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} >= 16
|
||||
|
||||
//Initialise D16 registers to 0
|
||||
VMOV D0, R2,R2
|
||||
VMOV D1, R2,R2
|
||||
|
@ -546,7 +567,7 @@ __STATIC_INLINE __ASM void __FPU_Enable(void) {
|
|||
VMOV D13,R2,R2
|
||||
VMOV D14,R2,R2
|
||||
VMOV D15,R2,R2
|
||||
ENDIF
|
||||
|
||||
IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32
|
||||
//Initialise D32 registers to 0
|
||||
VMOV D16,R2,R2
|
||||
|
|
|
@ -25,6 +25,8 @@
|
|||
#ifndef __CMSIS_ARMCLANG_H
|
||||
#define __CMSIS_ARMCLANG_H
|
||||
|
||||
#pragma clang system_header /* treat file as system include file */
|
||||
|
||||
#ifndef __ARM_COMPAT_H
|
||||
#include <arm_compat.h> /* Compatibility header for ARM Compiler 5 intrinsics */
|
||||
#endif
|
||||
|
@ -148,38 +150,29 @@
|
|||
|
||||
/**
|
||||
\brief Reverse byte order (32 bit)
|
||||
\details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
|
||||
\param [in] value Value to reverse
|
||||
\return Reversed value
|
||||
*/
|
||||
#define __REV __builtin_bswap32
|
||||
#define __REV(value) __builtin_bswap32(value)
|
||||
|
||||
/**
|
||||
\brief Reverse byte order (16 bit)
|
||||
\details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
|
||||
\param [in] value Value to reverse
|
||||
\return Reversed value
|
||||
*/
|
||||
#ifndef __NO_EMBEDDED_ASM
|
||||
__attribute__((section(".rev16_text"))) __STATIC_INLINE uint32_t __REV16(uint32_t value)
|
||||
{
|
||||
uint32_t result;
|
||||
__ASM volatile("rev16 %0, %1" : "=r" (result) : "r" (value));
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
#define __REV16(value) __ROR(__REV(value), 16)
|
||||
|
||||
|
||||
/**
|
||||
\brief Reverse byte order in signed short value
|
||||
\brief Reverse byte order (16 bit)
|
||||
\details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
|
||||
\param [in] value Value to reverse
|
||||
\return Reversed value
|
||||
*/
|
||||
#ifndef __NO_EMBEDDED_ASM
|
||||
__attribute__((section(".revsh_text"))) __STATIC_INLINE int32_t __REVSH(int32_t value)
|
||||
{
|
||||
int32_t result;
|
||||
__ASM volatile("revsh %0, %1" : "=r" (result) : "r" (value));
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
#define __REVSH(value) (int16_t)__builtin_bswap16(value)
|
||||
|
||||
|
||||
/**
|
||||
\brief Rotate Right in unsigned value (32 bit)
|
||||
|
@ -188,31 +181,37 @@ __attribute__((section(".revsh_text"))) __STATIC_INLINE int32_t __REVSH(int32_t
|
|||
\param [in] op2 Number of Bits to rotate
|
||||
\return Rotated value
|
||||
*/
|
||||
__attribute__((always_inline)) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
|
||||
__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
|
||||
{
|
||||
op2 %= 32U;
|
||||
if (op2 == 0U)
|
||||
{
|
||||
return op1;
|
||||
}
|
||||
return (op1 >> op2) | (op1 << (32U - op2));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
\brief Breakpoint
|
||||
\param [in] value is ignored by the processor.
|
||||
If required, a debugger can use it to store additional information about the breakpoint.
|
||||
*/
|
||||
#define __BKPT(value) __ASM volatile ("bkpt "#value)
|
||||
#define __BKPT(value) __ASM volatile ("bkpt "#value)
|
||||
|
||||
/**
|
||||
\brief Reverse bit order of value
|
||||
\param [in] value Value to reverse
|
||||
\return Reversed value
|
||||
*/
|
||||
#define __RBIT __builtin_arm_rbit
|
||||
#define __RBIT __builtin_arm_rbit
|
||||
|
||||
/**
|
||||
\brief Count leading zeros
|
||||
\param [in] value Value to count the leading zeros
|
||||
\return number of leading zeros in value
|
||||
*/
|
||||
#define __CLZ __builtin_clz
|
||||
#define __CLZ (uint8_t)__builtin_clz
|
||||
|
||||
/**
|
||||
\brief LDR Exclusive (8 bit)
|
||||
|
@ -313,7 +312,7 @@ __attribute__((always_inline)) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint
|
|||
/** \brief Get CPSR Register
|
||||
\return CPSR Register value
|
||||
*/
|
||||
__attribute__((always_inline)) __STATIC_INLINE uint32_t __get_CPSR(void)
|
||||
__STATIC_FORCEINLINE uint32_t __get_CPSR(void)
|
||||
{
|
||||
uint32_t result;
|
||||
__ASM volatile("MRS %0, cpsr" : "=r" (result) );
|
||||
|
@ -323,7 +322,7 @@ __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_CPSR(void)
|
|||
/** \brief Set CPSR Register
|
||||
\param [in] cpsr CPSR value to set
|
||||
*/
|
||||
__attribute__((always_inline)) __STATIC_INLINE void __set_CPSR(uint32_t cpsr)
|
||||
__STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr)
|
||||
{
|
||||
__ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "memory");
|
||||
}
|
||||
|
@ -331,7 +330,7 @@ __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "memory");
|
|||
/** \brief Get Mode
|
||||
\return Processor Mode
|
||||
*/
|
||||
__attribute__((always_inline)) __STATIC_INLINE uint32_t __get_mode(void)
|
||||
__STATIC_FORCEINLINE uint32_t __get_mode(void)
|
||||
{
|
||||
return (__get_CPSR() & 0x1FU);
|
||||
}
|
||||
|
@ -339,7 +338,7 @@ __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_mode(void)
|
|||
/** \brief Set Mode
|
||||
\param [in] mode Mode value to set
|
||||
*/
|
||||
__attribute__((always_inline)) __STATIC_INLINE void __set_mode(uint32_t mode)
|
||||
__STATIC_FORCEINLINE void __set_mode(uint32_t mode)
|
||||
{
|
||||
__ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory");
|
||||
}
|
||||
|
@ -347,7 +346,7 @@ __attribute__((always_inline)) __STATIC_INLINE void __set_mode(uint32_t mode)
|
|||
/** \brief Get Stack Pointer
|
||||
\return Stack Pointer value
|
||||
*/
|
||||
__attribute__((always_inline)) __STATIC_INLINE uint32_t __get_SP()
|
||||
__STATIC_FORCEINLINE uint32_t __get_SP()
|
||||
{
|
||||
uint32_t result;
|
||||
__ASM volatile("MOV %0, sp" : "=r" (result) : : "memory");
|
||||
|
@ -357,7 +356,7 @@ __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_SP()
|
|||
/** \brief Set Stack Pointer
|
||||
\param [in] stack Stack Pointer value to set
|
||||
*/
|
||||
__attribute__((always_inline)) __STATIC_INLINE void __set_SP(uint32_t stack)
|
||||
__STATIC_FORCEINLINE void __set_SP(uint32_t stack)
|
||||
{
|
||||
__ASM volatile("MOV sp, %0" : : "r" (stack) : "memory");
|
||||
}
|
||||
|
@ -365,7 +364,7 @@ __attribute__((always_inline)) __STATIC_INLINE void __set_SP(uint32_t stack)
|
|||
/** \brief Get USR/SYS Stack Pointer
|
||||
\return USR/SYS Stack Pointer value
|
||||
*/
|
||||
__attribute__((always_inline)) __STATIC_INLINE uint32_t __get_SP_usr()
|
||||
__STATIC_FORCEINLINE uint32_t __get_SP_usr()
|
||||
{
|
||||
uint32_t cpsr;
|
||||
uint32_t result;
|
||||
|
@ -382,7 +381,7 @@ __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_SP_usr()
|
|||
/** \brief Set USR/SYS Stack Pointer
|
||||
\param [in] topOfProcStack USR/SYS Stack Pointer value to set
|
||||
*/
|
||||
__attribute__((always_inline)) __STATIC_INLINE void __set_SP_usr(uint32_t topOfProcStack)
|
||||
__STATIC_FORCEINLINE void __set_SP_usr(uint32_t topOfProcStack)
|
||||
{
|
||||
uint32_t cpsr;
|
||||
__ASM volatile(
|
||||
|
@ -397,7 +396,7 @@ __attribute__((always_inline)) __STATIC_INLINE void __set_SP_usr(uint32_t topOfP
|
|||
/** \brief Get FPEXC
|
||||
\return Floating Point Exception Control register value
|
||||
*/
|
||||
__attribute__((always_inline)) __STATIC_INLINE uint32_t __get_FPEXC(void)
|
||||
__STATIC_FORCEINLINE uint32_t __get_FPEXC(void)
|
||||
{
|
||||
#if (__FPU_PRESENT == 1)
|
||||
uint32_t result;
|
||||
|
@ -411,7 +410,7 @@ __attribute__((always_inline)) __STATIC_INLINE uint32_t __get_FPEXC(void)
|
|||
/** \brief Set FPEXC
|
||||
\param [in] fpexc Floating Point Exception Control value to set
|
||||
*/
|
||||
__attribute__((always_inline)) __STATIC_INLINE void __set_FPEXC(uint32_t fpexc)
|
||||
__STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc)
|
||||
{
|
||||
#if (__FPU_PRESENT == 1)
|
||||
__ASM volatile ("VMSR fpexc, %0" : : "r" (fpexc) : "memory");
|
||||
|
@ -424,6 +423,8 @@ __attribute__((always_inline)) __STATIC_INLINE void __set_FPEXC(uint32_t fpexc)
|
|||
|
||||
#define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" )
|
||||
#define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" )
|
||||
#define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" )
|
||||
#define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" )
|
||||
|
||||
#include "cmsis_cp15.h"
|
||||
|
||||
|
@ -494,24 +495,23 @@ __STATIC_INLINE void __L1C_CleanInvalidateCache(uint32_t op)
|
|||
__STATIC_INLINE void __FPU_Enable(void)
|
||||
{
|
||||
__ASM volatile(
|
||||
//Permit access to VFP/NEON, registers by modifying CPACR
|
||||
//Permit access to VFP/NEON, registers by modifying CPACR
|
||||
" MRC p15,0,R1,c1,c0,2 \n"
|
||||
" ORR R1,R1,#0x00F00000 \n"
|
||||
" MCR p15,0,R1,c1,c0,2 \n"
|
||||
|
||||
//Ensure that subsequent instructions occur in the context of VFP/NEON access permitted
|
||||
//Ensure that subsequent instructions occur in the context of VFP/NEON access permitted
|
||||
" ISB \n"
|
||||
|
||||
//Enable VFP/NEON
|
||||
//Enable VFP/NEON
|
||||
" VMRS R1,FPEXC \n"
|
||||
" ORR R1,R1,#0x40000000 \n"
|
||||
" VMSR FPEXC,R1 \n"
|
||||
|
||||
//Initialise VFP/NEON registers to 0
|
||||
//Initialise VFP/NEON registers to 0
|
||||
" MOV R2,#0 \n"
|
||||
|
||||
#if TARGET_FEATURE_EXTENSION_REGISTER_COUNT >= 16
|
||||
//Initialise D16 registers to 0
|
||||
//Initialise D16 registers to 0
|
||||
" VMOV D0, R2,R2 \n"
|
||||
" VMOV D1, R2,R2 \n"
|
||||
" VMOV D2, R2,R2 \n"
|
||||
|
@ -528,10 +528,9 @@ __STATIC_INLINE void __FPU_Enable(void)
|
|||
" VMOV D13,R2,R2 \n"
|
||||
" VMOV D14,R2,R2 \n"
|
||||
" VMOV D15,R2,R2 \n"
|
||||
#endif
|
||||
|
||||
#if TARGET_FEATURE_EXTENSION_REGISTER_COUNT == 32
|
||||
//Initialise D32 registers to 0
|
||||
#if __ARM_NEON == 1
|
||||
//Initialise D32 registers to 0
|
||||
" VMOV D16,R2,R2 \n"
|
||||
" VMOV D17,R2,R2 \n"
|
||||
" VMOV D18,R2,R2 \n"
|
||||
|
@ -548,9 +547,9 @@ __STATIC_INLINE void __FPU_Enable(void)
|
|||
" VMOV D29,R2,R2 \n"
|
||||
" VMOV D30,R2,R2 \n"
|
||||
" VMOV D31,R2,R2 \n"
|
||||
".endif \n"
|
||||
#endif
|
||||
//Initialise FPSCR to a known state
|
||||
|
||||
//Initialise FPSCR to a known state
|
||||
" VMRS R2,FPSCR \n"
|
||||
" LDR R3,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero.
|
||||
" AND R2,R2,R3 \n"
|
||||
|
|
|
@ -157,7 +157,7 @@ __STATIC_FORCEINLINE void __DMB(void)
|
|||
|
||||
/**
|
||||
\brief Reverse byte order (32 bit)
|
||||
\details Reverses the byte order in integer value.
|
||||
\details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
|
||||
\param [in] value Value to reverse
|
||||
\return Reversed value
|
||||
*/
|
||||
|
@ -169,12 +169,13 @@ __STATIC_FORCEINLINE uint32_t __REV(uint32_t value)
|
|||
uint32_t result;
|
||||
|
||||
__ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
|
||||
return(result);
|
||||
return result;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
\brief Reverse byte order (16 bit)
|
||||
\details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
|
||||
\param [in] value Value to reverse
|
||||
\return Reversed value
|
||||
*/
|
||||
|
@ -188,20 +189,20 @@ __attribute__((section(".rev16_text"))) __STATIC_INLINE uint32_t __REV16(uint32_
|
|||
#endif
|
||||
|
||||
/**
|
||||
\brief Reverse byte order in signed short value
|
||||
\details Reverses the byte order in a signed short value with sign extension to integer.
|
||||
\brief Reverse byte order (16 bit)
|
||||
\details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
|
||||
\param [in] value Value to reverse
|
||||
\return Reversed value
|
||||
*/
|
||||
__STATIC_FORCEINLINE int32_t __REVSH(int32_t value)
|
||||
__STATIC_FORCEINLINE int16_t __REVSH(int16_t value)
|
||||
{
|
||||
#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
|
||||
return (short)__builtin_bswap16(value);
|
||||
return (int16_t)__builtin_bswap16(value);
|
||||
#else
|
||||
int32_t result;
|
||||
int16_t result;
|
||||
|
||||
__ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
|
||||
return(result);
|
||||
return result;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -214,9 +215,14 @@ __STATIC_FORCEINLINE int32_t __REVSH(int32_t value)
|
|||
*/
|
||||
__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
|
||||
{
|
||||
op2 %= 32U;
|
||||
if (op2 == 0U) {
|
||||
return op1;
|
||||
}
|
||||
return (op1 >> op2) | (op1 << (32U - op2));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
\brief Breakpoint
|
||||
\param [in] value is ignored by the processor.
|
||||
|
@ -239,7 +245,7 @@ __STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value)
|
|||
(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) )
|
||||
__ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) );
|
||||
#else
|
||||
int32_t s = (4 /*sizeof(v)*/ * 8) - 1; /* extra shift needed at end */
|
||||
int32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */
|
||||
|
||||
result = value; /* r will be reversed bits of v; first get LSB of v */
|
||||
for (value >>= 1U; value; value >>= 1U)
|
||||
|
@ -250,7 +256,7 @@ __STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value)
|
|||
}
|
||||
result <<= s; /* shift when v's highest bits are zero */
|
||||
#endif
|
||||
return(result);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -496,14 +502,16 @@ __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "memory");
|
|||
/** \brief Get Mode
|
||||
\return Processor Mode
|
||||
*/
|
||||
__STATIC_FORCEINLINE uint32_t __get_mode(void) {
|
||||
__STATIC_FORCEINLINE uint32_t __get_mode(void)
|
||||
{
|
||||
return (__get_CPSR() & 0x1FU);
|
||||
}
|
||||
|
||||
/** \brief Set Mode
|
||||
\param [in] mode Mode value to set
|
||||
*/
|
||||
__STATIC_FORCEINLINE void __set_mode(uint32_t mode) {
|
||||
__STATIC_FORCEINLINE void __set_mode(uint32_t mode)
|
||||
{
|
||||
__ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory");
|
||||
}
|
||||
|
||||
|
@ -585,6 +593,8 @@ __STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc)
|
|||
|
||||
#define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" )
|
||||
#define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" )
|
||||
#define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" )
|
||||
#define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" )
|
||||
|
||||
#include "cmsis_cp15.h"
|
||||
|
||||
|
@ -621,8 +631,10 @@ __STATIC_INLINE void __L1C_MaintainDCacheSetWay(uint32_t level, uint32_t maint)
|
|||
log2_linesize = (ccsidr & 0x00000007) + 2 + 2;
|
||||
log2_num_ways = log2_up(num_ways);
|
||||
shift_way = 32 - log2_num_ways;
|
||||
for(int way = num_ways-1; way >= 0; way--) {
|
||||
for(int set = num_sets-1; set >= 0; set--) {
|
||||
for(int way = num_ways-1; way >= 0; way--)
|
||||
{
|
||||
for(int set = num_sets-1; set >= 0; set--)
|
||||
{
|
||||
Dummy = (level << 1) | (set << log2_linesize) | (way << shift_way);
|
||||
switch (maint)
|
||||
{
|
||||
|
@ -648,7 +660,8 @@ __STATIC_INLINE void __L1C_MaintainDCacheSetWay(uint32_t level, uint32_t maint)
|
|||
|
||||
Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency
|
||||
*/
|
||||
__STATIC_INLINE void __L1C_CleanInvalidateCache(uint32_t op) {
|
||||
__STATIC_INLINE void __L1C_CleanInvalidateCache(uint32_t op)
|
||||
{
|
||||
register volatile uint32_t clidr;
|
||||
uint32_t cache_type;
|
||||
clidr = __get_CLIDR();
|
||||
|
@ -667,26 +680,26 @@ __STATIC_INLINE void __L1C_CleanInvalidateCache(uint32_t op) {
|
|||
|
||||
Critical section, called from undef handler, so systick is disabled
|
||||
*/
|
||||
__STATIC_INLINE void __FPU_Enable(void) {
|
||||
__STATIC_INLINE void __FPU_Enable(void)
|
||||
{
|
||||
__ASM volatile(
|
||||
//Permit access to VFP/NEON, registers by modifying CPACR
|
||||
//Permit access to VFP/NEON, registers by modifying CPACR
|
||||
" MRC p15,0,R1,c1,c0,2 \n"
|
||||
" ORR R1,R1,#0x00F00000 \n"
|
||||
" MCR p15,0,R1,c1,c0,2 \n"
|
||||
|
||||
//Ensure that subsequent instructions occur in the context of VFP/NEON access permitted
|
||||
//Ensure that subsequent instructions occur in the context of VFP/NEON access permitted
|
||||
" ISB \n"
|
||||
|
||||
//Enable VFP/NEON
|
||||
//Enable VFP/NEON
|
||||
" VMRS R1,FPEXC \n"
|
||||
" ORR R1,R1,#0x40000000 \n"
|
||||
" VMSR FPEXC,R1 \n"
|
||||
|
||||
//Initialise VFP/NEON registers to 0
|
||||
//Initialise VFP/NEON registers to 0
|
||||
" MOV R2,#0 \n"
|
||||
|
||||
#if TARGET_FEATURE_EXTENSION_REGISTER_COUNT >= 16
|
||||
//Initialise D16 registers to 0
|
||||
//Initialise D16 registers to 0
|
||||
" VMOV D0, R2,R2 \n"
|
||||
" VMOV D1, R2,R2 \n"
|
||||
" VMOV D2, R2,R2 \n"
|
||||
|
@ -703,10 +716,9 @@ __STATIC_INLINE void __FPU_Enable(void) {
|
|||
" VMOV D13,R2,R2 \n"
|
||||
" VMOV D14,R2,R2 \n"
|
||||
" VMOV D15,R2,R2 \n"
|
||||
#endif
|
||||
|
||||
#if TARGET_FEATURE_EXTENSION_REGISTER_COUNT == 32
|
||||
//Initialise D32 registers to 0
|
||||
#if __ARM_NEON == 1
|
||||
//Initialise D32 registers to 0
|
||||
" VMOV D16,R2,R2 \n"
|
||||
" VMOV D17,R2,R2 \n"
|
||||
" VMOV D18,R2,R2 \n"
|
||||
|
@ -724,7 +736,8 @@ __STATIC_INLINE void __FPU_Enable(void) {
|
|||
" VMOV D30,R2,R2 \n"
|
||||
" VMOV D31,R2,R2 \n"
|
||||
#endif
|
||||
//Initialise FPSCR to a known state
|
||||
|
||||
//Initialise FPSCR to a known state
|
||||
" VMRS R2,FPSCR \n"
|
||||
" LDR R3,=0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero.
|
||||
" AND R2,R2,R3 \n"
|
||||
|
|
|
@ -123,7 +123,8 @@
|
|||
#ifndef __UNALIGNED_UINT16_READ
|
||||
#pragma language=save
|
||||
#pragma language=extended
|
||||
__IAR_FT uint16_t __iar_uint16_read(void const *ptr) {
|
||||
__IAR_FT uint16_t __iar_uint16_read(void const *ptr)
|
||||
{
|
||||
return *(__packed uint16_t*)(ptr);
|
||||
}
|
||||
#pragma language=restore
|
||||
|
@ -134,7 +135,8 @@
|
|||
#ifndef __UNALIGNED_UINT16_WRITE
|
||||
#pragma language=save
|
||||
#pragma language=extended
|
||||
__IAR_FT void __iar_uint16_write(void const *ptr, uint16_t val) {
|
||||
__IAR_FT void __iar_uint16_write(void const *ptr, uint16_t val)
|
||||
{
|
||||
*(__packed uint16_t*)(ptr) = val;;
|
||||
}
|
||||
#pragma language=restore
|
||||
|
@ -144,7 +146,8 @@
|
|||
#ifndef __UNALIGNED_UINT32_READ
|
||||
#pragma language=save
|
||||
#pragma language=extended
|
||||
__IAR_FT uint32_t __iar_uint32_read(void const *ptr) {
|
||||
__IAR_FT uint32_t __iar_uint32_read(void const *ptr)
|
||||
{
|
||||
return *(__packed uint32_t*)(ptr);
|
||||
}
|
||||
#pragma language=restore
|
||||
|
@ -154,7 +157,8 @@
|
|||
#ifndef __UNALIGNED_UINT32_WRITE
|
||||
#pragma language=save
|
||||
#pragma language=extended
|
||||
__IAR_FT void __iar_uint32_write(void const *ptr, uint32_t val) {
|
||||
__IAR_FT void __iar_uint32_write(void const *ptr, uint32_t val)
|
||||
{
|
||||
*(__packed uint32_t*)(ptr) = val;;
|
||||
}
|
||||
#pragma language=restore
|
||||
|
@ -238,24 +242,27 @@
|
|||
#define __set_FPEXC(VALUE) (__arm_wsr("FPEXC", VALUE))
|
||||
|
||||
#define __get_CP(cp, op1, RT, CRn, CRm, op2) \
|
||||
(RT = __arm_rsr("p" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2))
|
||||
((RT) = __arm_rsr("p" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2))
|
||||
|
||||
#define __set_CP(cp, op1, RT, CRn, CRm, op2) \
|
||||
(__arm_wsr("p" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2, RT))
|
||||
(__arm_wsr("p" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2, (RT)))
|
||||
|
||||
#define __get_CP64(cp, op1, RT, CRm) \
|
||||
((RT) = __arm_rsr("p" # cp ":" # op1 ":c" # CRm))
|
||||
|
||||
#define __set_CP64(cp, op1, RT, CRm) \
|
||||
(__arm_wsr("p" # cp ":" # op1 ":c" # CRm, (RT)))
|
||||
|
||||
#include "cmsis_cp15.h"
|
||||
|
||||
#define __NOP __iar_builtin_no_operation
|
||||
#define __NOP __iar_builtin_no_operation
|
||||
|
||||
__IAR_FT uint8_t __CLZ(uint32_t val) {
|
||||
return __iar_builtin_CLZ(val);
|
||||
}
|
||||
#define __CLZ __iar_builtin_CLZ
|
||||
#define __CLREX __iar_builtin_CLREX
|
||||
|
||||
#define __CLREX __iar_builtin_CLREX
|
||||
|
||||
#define __DMB __iar_builtin_DMB
|
||||
#define __DSB __iar_builtin_DSB
|
||||
#define __ISB __iar_builtin_ISB
|
||||
#define __DMB __iar_builtin_DMB
|
||||
#define __DSB __iar_builtin_DSB
|
||||
#define __ISB __iar_builtin_ISB
|
||||
|
||||
#define __LDREXB __iar_builtin_LDREXB
|
||||
#define __LDREXH __iar_builtin_LDREXH
|
||||
|
@ -265,8 +272,9 @@
|
|||
#define __REV __iar_builtin_REV
|
||||
#define __REV16 __iar_builtin_REV16
|
||||
|
||||
__IAR_FT int32_t __REVSH(int32_t val) {
|
||||
return __iar_builtin_REVSH((int16_t)val);
|
||||
__IAR_FT int16_t __REVSH(int16_t val)
|
||||
{
|
||||
return (int16_t) __iar_builtin_REVSH(val);
|
||||
}
|
||||
|
||||
#define __ROR __iar_builtin_ROR
|
||||
|
@ -354,6 +362,10 @@
|
|||
#define __get_FPSCR __cmsis_iar_get_FPSR_not_active
|
||||
#endif
|
||||
|
||||
#ifdef __INTRINSICS_INCLUDED
|
||||
#error intrinsics.h is already included previously!
|
||||
#endif
|
||||
|
||||
#include <intrinsics.h>
|
||||
|
||||
#if !__FPU_PRESENT
|
||||
|
@ -376,23 +388,27 @@
|
|||
__ASM volatile("MSR cpsr_c, %0" : : "r" (mode) : "memory");
|
||||
}
|
||||
|
||||
__IAR_FT uint32_t __LDREXW(uint32_t volatile *ptr) {
|
||||
__IAR_FT uint32_t __LDREXW(uint32_t volatile *ptr)
|
||||
{
|
||||
return __LDREX((unsigned long *)ptr);
|
||||
}
|
||||
|
||||
__IAR_FT uint32_t __STREXW(uint32_t value, uint32_t volatile *ptr) {
|
||||
__IAR_FT uint32_t __STREXW(uint32_t value, uint32_t volatile *ptr)
|
||||
{
|
||||
return __STREX(value, (unsigned long *)ptr);
|
||||
}
|
||||
|
||||
|
||||
__IAR_FT uint32_t __RRX(uint32_t value) {
|
||||
__IAR_FT uint32_t __RRX(uint32_t value)
|
||||
{
|
||||
uint32_t result;
|
||||
__ASM("RRX %0, %1" : "=r"(result) : "r" (value) : "cc");
|
||||
return(result);
|
||||
}
|
||||
|
||||
|
||||
__IAR_FT uint32_t __ROR(uint32_t op1, uint32_t op2) {
|
||||
__IAR_FT uint32_t __ROR(uint32_t op1, uint32_t op2)
|
||||
{
|
||||
return (op1 >> op2) | (op1 << ((sizeof(op1)*8)-op2));
|
||||
}
|
||||
|
||||
|
@ -418,7 +434,11 @@
|
|||
#define __get_CP(cp, op1, Rt, CRn, CRm, op2) \
|
||||
__ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" )
|
||||
#define __set_CP(cp, op1, Rt, CRn, CRm, op2) \
|
||||
__ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" )
|
||||
__ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" )
|
||||
#define __get_CP64(cp, op1, Rt, CRm) \
|
||||
__ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" )
|
||||
#define __set_CP64(cp, op1, Rt, CRm) \
|
||||
__ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" )
|
||||
|
||||
#include "cmsis_cp15.h"
|
||||
|
||||
|
@ -517,24 +537,23 @@ __STATIC_INLINE
|
|||
void __FPU_Enable(void)
|
||||
{
|
||||
__ASM volatile(
|
||||
//Permit access to VFP/NEON, registers by modifying CPACR
|
||||
//Permit access to VFP/NEON, registers by modifying CPACR
|
||||
" MRC p15,0,R1,c1,c0,2 \n"
|
||||
" ORR R1,R1,#0x00F00000 \n"
|
||||
" MCR p15,0,R1,c1,c0,2 \n"
|
||||
|
||||
//Ensure that subsequent instructions occur in the context of VFP/NEON access permitted
|
||||
//Ensure that subsequent instructions occur in the context of VFP/NEON access permitted
|
||||
" ISB \n"
|
||||
|
||||
//Enable VFP/NEON
|
||||
//Enable VFP/NEON
|
||||
" VMRS R1,FPEXC \n"
|
||||
" ORR R1,R1,#0x40000000 \n"
|
||||
" VMSR FPEXC,R1 \n"
|
||||
|
||||
//Initialise VFP/NEON registers to 0
|
||||
//Initialise VFP/NEON registers to 0
|
||||
" MOV R2,#0 \n"
|
||||
|
||||
#if TARGET_FEATURE_EXTENSION_REGISTER_COUNT >= 16
|
||||
//Initialise D16 registers to 0
|
||||
//Initialise D16 registers to 0
|
||||
" VMOV D0, R2,R2 \n"
|
||||
" VMOV D1, R2,R2 \n"
|
||||
" VMOV D2, R2,R2 \n"
|
||||
|
@ -551,10 +570,9 @@ void __FPU_Enable(void)
|
|||
" VMOV D13,R2,R2 \n"
|
||||
" VMOV D14,R2,R2 \n"
|
||||
" VMOV D15,R2,R2 \n"
|
||||
#endif
|
||||
|
||||
#if TARGET_FEATURE_EXTENSION_REGISTER_COUNT == 32
|
||||
//Initialise D32 registers to 0
|
||||
#ifdef __ARM_ADVANCED_SIMD__
|
||||
//Initialise D32 registers to 0
|
||||
" VMOV D16,R2,R2 \n"
|
||||
" VMOV D17,R2,R2 \n"
|
||||
" VMOV D18,R2,R2 \n"
|
||||
|
@ -571,12 +589,12 @@ void __FPU_Enable(void)
|
|||
" VMOV D29,R2,R2 \n"
|
||||
" VMOV D30,R2,R2 \n"
|
||||
" VMOV D31,R2,R2 \n"
|
||||
".endif \n"
|
||||
#endif
|
||||
//Initialise FPSCR to a known state
|
||||
|
||||
//Initialise FPSCR to a known state
|
||||
" VMRS R2,FPSCR \n"
|
||||
" MOV32 R3,#0x00086060 \n" //Mask off all bits that do not have to be preserved. Non-preserved bits can/should be zero.
|
||||
" AND R2,R2,R3 \n"
|
||||
" AND R2,R2,R3 \n"
|
||||
" VMSR FPSCR,R2 \n");
|
||||
}
|
||||
|
||||
|
|
|
@ -393,11 +393,11 @@ osRtxContextSave:
|
|||
STMDB R3!, {R2,R12} // Push FPSCR, maintain 8-byte alignment
|
||||
|
||||
VSTMDB R3!, {D0-D15} // Save D0-D15
|
||||
#if __ARM_NEON == 1
|
||||
#if __ARM_NEON == 1
|
||||
VSTMDB R3!, {D16-D31} // Save D16-D31
|
||||
#endif
|
||||
LDRB R2, [R0, #TCB_SP_FRAME]
|
||||
#if __ARM_NEON == 1
|
||||
#if __ARM_NEON == 1
|
||||
ORR R2, R2, #4 // NEON state
|
||||
#else
|
||||
ORR R2, R2, #2 // VFP state
|
||||
|
|
|
@ -371,11 +371,11 @@ osRtxContextSave
|
|||
STMDB R3!, {R2,R12} ; Push FPSCR, maintain 8-byte alignment
|
||||
|
||||
VSTMDB R3!, {D0-D15} ; Save D0-D15
|
||||
#ifdef __ARM_ADVANCED_SIMD__
|
||||
#ifdef __ARM_ADVANCED_SIMD__
|
||||
VSTMDB R3!, {D16-D31} ; Save D16-D31
|
||||
#endif
|
||||
LDRB R2, [R0, #TCB_SP_FRAME]
|
||||
#ifdef __ARM_ADVANCED_SIMD__
|
||||
#ifdef __ARM_ADVANCED_SIMD__
|
||||
ORR R2, R2, #4 ; NEON state
|
||||
#else
|
||||
ORR R2, R2, #2 ; VFP state
|
||||
|
@ -397,7 +397,7 @@ osRtxContextRestore
|
|||
MCR p15, 0, R2, c1, c0, 2 ; Write CPACR
|
||||
BEQ osRtxContextRestore1 ; No VFP
|
||||
ISB ; Only sync if we enabled VFP, otherwise we will context switch before next VFP instruction anyway
|
||||
#ifdef __ARM_ADVANCED_SIMD__
|
||||
#ifdef __ARM_ADVANCED_SIMD__
|
||||
VLDMIA R3!, {D16-D31} ; Restore D16-D31
|
||||
#endif
|
||||
VLDMIA R3!, {D0-D15} ; Restore D0-D15
|
||||
|
|
Loading…
Reference in New Issue