Core(A): Refactored L1 Cache maintenance to be compiler agnostic.

- Added L1 Cache test cases to CoreValidation.
- Adopted FVP Cortex-A configs to simulate cache states.
pull/5628/head
Jonatan Antoni 2017-12-11 10:46:31 +01:00 committed by TomoYamanaka
parent 0ff62f6b9e
commit 2f06202a9b
7 changed files with 297 additions and 387 deletions

View File

@ -37,26 +37,29 @@
/* CMSIS compiler specific defines */
#ifndef __ASM
#define __ASM __asm
#endif
#ifndef __INLINE
#endif
#ifndef __INLINE
#define __INLINE __inline
#endif
#ifndef __FORCEINLINE
#endif
#ifndef __FORCEINLINE
#define __FORCEINLINE __forceinline
#endif
#ifndef __STATIC_INLINE
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static __inline
#endif
#ifndef __STATIC_FORCEINLINE
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE static __forceinline
#endif
#ifndef __NO_RETURN
#endif
#ifndef __NO_RETURN
#define __NO_RETURN __declspec(noreturn)
#endif
#ifndef __USED
#endif
#ifndef __DEPRECATED
#define __DEPRECATED __attribute__((deprecated))
#endif
#ifndef __USED
#define __USED __attribute__((used))
#endif
#ifndef __WEAK
#endif
#ifndef __WEAK
#define __WEAK __attribute__((weak))
#endif
#ifndef __PACKED
@ -79,8 +82,8 @@
#endif
#ifndef __ALIGNED
#define __ALIGNED(x) __attribute__((aligned(x)))
#endif
#ifndef __PACKED
#endif
#ifndef __PACKED
#define __PACKED __attribute__((packed))
#endif
@ -378,7 +381,7 @@ __STATIC_INLINE __ASM uint32_t __get_SP(void)
BX lr
}
/** \brief Set Stack Pointer
/** \brief Set Stack Pointer
\param [in] stack Stack Pointer value to set
*/
__STATIC_INLINE __ASM void __set_SP(uint32_t stack)
@ -447,7 +450,7 @@ __STATIC_INLINE void __set_FPEXC(uint32_t fpexc)
/*
* Include common core functions to access Coprocessor 15 registers
*/
#define __get_CP(cp, op1, Rt, CRn, CRm, op2) do { register uint32_t tmp __ASM("cp" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2); (Rt) = tmp; } while(0)
#define __set_CP(cp, op1, Rt, CRn, CRm, op2) do { register uint32_t tmp __ASM("cp" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2); tmp = (Rt); } while(0)
#define __get_CP64(cp, op1, Rt, CRm) \
@ -467,65 +470,6 @@ __STATIC_INLINE void __set_FPEXC(uint32_t fpexc)
#include "cmsis_cp15.h"
/** \brief Clean and Invalidate the entire data or unified cache
* \param [in] op 0 - invalidate, 1 - clean, otherwise - invalidate and clean
*/
__STATIC_INLINE __ASM void __L1C_CleanInvalidateCache(uint32_t op)
{
ARM
PUSH {R4-R11}
MRC p15, 1, R6, c0, c0, 1 // Read CLIDR
ANDS R3, R6, #0x07000000 // Extract coherency level
MOV R3, R3, LSR #23 // Total cache levels << 1
BEQ Finished // If 0, no need to clean
MOV R10, #0 // R10 holds current cache level << 1
Loop1 ADD R2, R10, R10, LSR #1 // R2 holds cache "Set" position
MOV R1, R6, LSR R2 // Bottom 3 bits are the Cache-type for this level
AND R1, R1, #7 // Isolate those lower 3 bits
CMP R1, #2
BLT Skip // No cache or only instruction cache at this level
MCR p15, 2, R10, c0, c0, 0 // Write the Cache Size selection register
ISB // ISB to sync the change to the CacheSizeID reg
MRC p15, 1, R1, c0, c0, 0 // Reads current Cache Size ID register
AND R2, R1, #7 // Extract the line length field
ADD R2, R2, #4 // Add 4 for the line length offset (log2 16 bytes)
LDR R4, =0x3FF
ANDS R4, R4, R1, LSR #3 // R4 is the max number on the way size (right aligned)
CLZ R5, R4 // R5 is the bit position of the way size increment
LDR R7, =0x7FFF
ANDS R7, R7, R1, LSR #13 // R7 is the max number of the index size (right aligned)
Loop2 MOV R9, R4 // R9 working copy of the max way size (right aligned)
Loop3 ORR R11, R10, R9, LSL R5 // Factor in the Way number and cache number into R11
ORR R11, R11, R7, LSL R2 // Factor in the Set number
CMP R0, #0
BNE Dccsw
MCR p15, 0, R11, c7, c6, 2 // DCISW. Invalidate by Set/Way
B cont
Dccsw CMP R0, #1
BNE Dccisw
MCR p15, 0, R11, c7, c10, 2 // DCCSW. Clean by Set/Way
B cont
Dccisw MCR p15, 0, R11, c7, c14, 2 // DCCISW. Clean and Invalidate by Set/Way
cont SUBS R9, R9, #1 // Decrement the Way number
BGE Loop3
SUBS R7, R7, #1 // Decrement the Set number
BGE Loop2
Skip ADD R10, R10, #2 // Increment the cache number
CMP R3, R10
BGT Loop1
Finished
DSB
POP {R4-R11}
BX lr
}
/** \brief Enable Floating Point Unit
Critical section, called from undef handler, so systick is disabled

View File

@ -34,26 +34,29 @@
/* CMSIS compiler specific defines */
#ifndef __ASM
#define __ASM __asm
#endif
#ifndef __INLINE
#endif
#ifndef __INLINE
#define __INLINE __inline
#endif
#ifndef __FORCEINLINE
#endif
#ifndef __FORCEINLINE
#define __FORCEINLINE __attribute__((always_inline))
#endif
#ifndef __STATIC_INLINE
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static __inline
#endif
#ifndef __STATIC_FORCEINLINE
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __attribute__((always_inline)) static __inline
#endif
#ifndef __NO_RETURN
#define __NO_RETURN __declspec(noreturn)
#endif
#ifndef __USED
#endif
#ifndef __NO_RETURN
#define __NO_RETURN __attribute__((__noreturn__))
#endif
#ifndef __DEPRECATED
#define __DEPRECATED __attribute__((deprecated))
#endif
#ifndef __USED
#define __USED __attribute__((used))
#endif
#ifndef __WEAK
#endif
#ifndef __WEAK
#define __WEAK __attribute__((weak))
#endif
#ifndef __PACKED
@ -95,8 +98,8 @@
#endif
#ifndef __ALIGNED
#define __ALIGNED(x) __attribute__((aligned(x)))
#endif
#ifndef __PACKED
#endif
#ifndef __PACKED
#define __PACKED __attribute__((packed))
#endif
@ -428,66 +431,6 @@ __STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc)
#include "cmsis_cp15.h"
/** \brief Clean and Invalidate the entire data or unified cache
Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency
*/
__STATIC_INLINE void __L1C_CleanInvalidateCache(uint32_t op)
{
__ASM volatile(
" PUSH {R4-R11} \n"
" MRC p15, 1, R6, c0, c0, 1 \n" // Read CLIDR
" ANDS R3, R6, #0x07000000 \n" // Extract coherency level
" MOV R3, R3, LSR #23 \n" // Total cache levels << 1
" BEQ Finished \n" // If 0, no need to clean
" MOV R10, #0 \n" // R10 holds current cache level << 1
"Loop1: ADD R2, R10, R10, LSR #1 \n" // R2 holds cache "Set" position
" MOV R1, R6, LSR R2 \n" // Bottom 3 bits are the Cache-type for this level
" AND R1, R1, #7 \n" // Isolate those lower 3 bits
" CMP R1, #2 \n"
" BLT Skip \n" // No cache or only instruction cache at this level
" MCR p15, 2, R10, c0, c0, 0 \n" // Write the Cache Size selection register
" ISB \n" // ISB to sync the change to the CacheSizeID reg
" MRC p15, 1, R1, c0, c0, 0 \n" // Reads current Cache Size ID register
" AND R2, R1, #7 \n" // Extract the line length field
" ADD R2, R2, #4 \n" // Add 4 for the line length offset (log2 16 bytes)
" LDR R4, =0x3FF \n"
" ANDS R4, R4, R1, LSR #3 \n" // R4 is the max number on the way size (right aligned)
" CLZ R5, R4 \n" // R5 is the bit position of the way size increment
" LDR R7, =0x7FFF \n"
" ANDS R7, R7, R1, LSR #13 \n" // R7 is the max number of the index size (right aligned)
"Loop2: MOV R9, R4 \n" // R9 working copy of the max way size (right aligned)
"Loop3: ORR R11, R10, R9, LSL R5 \n" // Factor in the Way number and cache number into R11
" ORR R11, R11, R7, LSL R2 \n" // Factor in the Set number
" CMP R0, #0 \n"
" BNE Dccsw \n"
" MCR p15, 0, R11, c7, c6, 2 \n" // DCISW. Invalidate by Set/Way
" B cont \n"
"Dccsw: CMP R0, #1 \n"
" BNE Dccisw \n"
" MCR p15, 0, R11, c7, c10, 2 \n" // DCCSW. Clean by Set/Way
" B cont \n"
"Dccisw: MCR p15, 0, R11, c7, c14, 2 \n" // DCCISW. Clean and Invalidate by Set/Way
"cont: SUBS R9, R9, #1 \n" // Decrement the Way number
" BGE Loop3 \n"
" SUBS R7, R7, #1 \n" // Decrement the Set number
" BGE Loop2 \n"
"Skip: ADD R10, R10, #2 \n" // Increment the cache number
" CMP R3, R10 \n"
" BGT Loop1 \n"
"Finished: \n"
" DSB \n"
" POP {R4-R11} "
);
}
/** \brief Enable Floating Point Unit
Critical section, called from undef handler, so systick is disabled

View File

@ -1,8 +1,8 @@
/**************************************************************************//**
* @file cmsis_compiler.h
* @brief CMSIS compiler specific macros, functions, instructions
* @version V1.00
* @date 22. Feb 2017
* @version V1.0.1
* @date 01. December 2017
******************************************************************************/
/*
* Copyright (c) 2009-2017 ARM Limited. All rights reserved.
@ -54,7 +54,7 @@
#elif defined ( __ICCARM__ )
#include "cmsis_iccarm.h"
/*
* TI ARM Compiler
*/
@ -70,9 +70,18 @@
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __STATIC_INLINE
#endif
#ifndef __NO_RETURN
#define __NO_RETURN __attribute__((noreturn))
#endif
#ifndef __DEPRECATED
#define __DEPRECATED __attribute__((deprecated))
#endif
#ifndef __USED
#define __USED __attribute__((used))
#endif
@ -110,9 +119,15 @@
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __STATIC_INLINE
#endif
#ifndef __NO_RETURN
#define __NO_RETURN __attribute__((noreturn))
#endif
#ifndef __DEPRECATED
#define __DEPRECATED __attribute__((deprecated))
#endif
#ifndef __USED
#define __USED __attribute__((used))
#endif
@ -146,6 +161,9 @@
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __STATIC_INLINE
#endif
#ifndef __NO_RETURN
// NO RETURN is automatically detected hence no warning here
#define __NO_RETURN
@ -154,6 +172,10 @@
#warning No compiler specific solution for __USED. __USED is ignored.
#define __USED
#endif
#ifndef __DEPRECATED
#warning No compiler specific solution for __DEPRECATED. __DEPRECATED is ignored.
#define __DEPRECATED
#endif
#ifndef __WEAK
#define __WEAK __weak
#endif

View File

@ -22,6 +22,12 @@
* limitations under the License.
*/
#if defined ( __ICCARM__ )
#pragma system_include /* treat file as system include file for MISRA check */
#elif defined (__clang__)
#pragma clang system_header /* treat file as system include file */
#endif
#ifndef __CMSIS_CP15_H
#define __CMSIS_CP15_H
@ -31,7 +37,6 @@
__STATIC_FORCEINLINE uint32_t __get_ACTLR(void)
{
uint32_t result;
// __ASM volatile("MRC p15, 0, %0, c1, c0, 1" : "=r" (result) : : "memory" );
__get_CP(15, 0, result, 1, 0, 1);
return(result);
}
@ -41,7 +46,6 @@ __STATIC_FORCEINLINE uint32_t __get_ACTLR(void)
*/
__STATIC_FORCEINLINE void __set_ACTLR(uint32_t actlr)
{
// __ASM volatile ("MCR p15, 0, %0, c1, c0, 1" : : "r" (actlr) : "memory");
__set_CP(15, 0, actlr, 1, 0, 1);
}
@ -51,7 +55,6 @@ __STATIC_FORCEINLINE void __set_ACTLR(uint32_t actlr)
__STATIC_FORCEINLINE uint32_t __get_CPACR(void)
{
uint32_t result;
// __ASM volatile("MRC p15, 0, %0, c1, c0, 2" : "=r"(result) : : "memory");
__get_CP(15, 0, result, 1, 0, 2);
return result;
}
@ -61,7 +64,6 @@ __STATIC_FORCEINLINE uint32_t __get_CPACR(void)
*/
__STATIC_FORCEINLINE void __set_CPACR(uint32_t cpacr)
{
// __ASM volatile("MCR p15, 0, %0, c1, c0, 2" : : "r"(cpacr) : "memory");
__set_CP(15, 0, cpacr, 1, 0, 2);
}
@ -71,7 +73,6 @@ __STATIC_FORCEINLINE void __set_CPACR(uint32_t cpacr)
__STATIC_FORCEINLINE uint32_t __get_DFSR(void)
{
uint32_t result;
// __ASM volatile("MRC p15, 0, %0, c5, c0, 0" : "=r"(result) : : "memory");
__get_CP(15, 0, result, 5, 0, 0);
return result;
}
@ -81,7 +82,6 @@ __STATIC_FORCEINLINE uint32_t __get_DFSR(void)
*/
__STATIC_FORCEINLINE void __set_DFSR(uint32_t dfsr)
{
// __ASM volatile("MCR p15, 0, %0, c5, c0, 0" : : "r"(dfsr) : "memory");
__set_CP(15, 0, dfsr, 5, 0, 0);
}
@ -91,7 +91,6 @@ __STATIC_FORCEINLINE void __set_DFSR(uint32_t dfsr)
__STATIC_FORCEINLINE uint32_t __get_IFSR(void)
{
uint32_t result;
// __ASM volatile("MRC p15, 0, %0, c5, c0, 1" : "=r"(result) : : "memory");
__get_CP(15, 0, result, 5, 0, 1);
return result;
}
@ -101,7 +100,6 @@ __STATIC_FORCEINLINE uint32_t __get_IFSR(void)
*/
__STATIC_FORCEINLINE void __set_IFSR(uint32_t ifsr)
{
// __ASM volatile("MCR p15, 0, %0, c5, c0, 1" : : "r"(ifsr) : "memory");
__set_CP(15, 0, ifsr, 5, 0, 1);
}
@ -111,7 +109,6 @@ __STATIC_FORCEINLINE void __set_IFSR(uint32_t ifsr)
__STATIC_FORCEINLINE uint32_t __get_ISR(void)
{
uint32_t result;
// __ASM volatile("MRC p15, 0, %0, c12, c1, 0" : "=r"(result) : : "memory");
__get_CP(15, 0, result, 12, 1, 0);
return result;
}
@ -122,7 +119,6 @@ __STATIC_FORCEINLINE uint32_t __get_ISR(void)
__STATIC_FORCEINLINE uint32_t __get_CBAR(void)
{
uint32_t result;
// __ASM volatile("MRC p15, 4, %0, c15, c0, 0" : "=r"(result) : : "memory");
__get_CP(15, 4, result, 15, 0, 0);
return result;
}
@ -136,7 +132,6 @@ __STATIC_FORCEINLINE uint32_t __get_CBAR(void)
__STATIC_FORCEINLINE uint32_t __get_TTBR0(void)
{
uint32_t result;
// __ASM volatile("MRC p15, 0, %0, c2, c0, 0" : "=r"(result) : : "memory");
__get_CP(15, 0, result, 2, 0, 0);
return result;
}
@ -149,7 +144,6 @@ __STATIC_FORCEINLINE uint32_t __get_TTBR0(void)
*/
__STATIC_FORCEINLINE void __set_TTBR0(uint32_t ttbr0)
{
// __ASM volatile("MCR p15, 0, %0, c2, c0, 0" : : "r"(ttbr0) : "memory");
__set_CP(15, 0, ttbr0, 2, 0, 0);
}
@ -162,7 +156,6 @@ __STATIC_FORCEINLINE void __set_TTBR0(uint32_t ttbr0)
__STATIC_FORCEINLINE uint32_t __get_DACR(void)
{
uint32_t result;
// __ASM volatile("MRC p15, 0, %0, c3, c0, 0" : "=r"(result) : : "memory");
__get_CP(15, 0, result, 3, 0, 0);
return result;
}
@ -175,7 +168,6 @@ __STATIC_FORCEINLINE uint32_t __get_DACR(void)
*/
__STATIC_FORCEINLINE void __set_DACR(uint32_t dacr)
{
// __ASM volatile("MCR p15, 0, %0, c3, c0, 0" : : "r"(dacr) : "memory");
__set_CP(15, 0, dacr, 3, 0, 0);
}
@ -187,7 +179,6 @@ __STATIC_FORCEINLINE void __set_DACR(uint32_t dacr)
*/
__STATIC_FORCEINLINE void __set_SCTLR(uint32_t sctlr)
{
// __ASM volatile("MCR p15, 0, %0, c1, c0, 0" : : "r"(sctlr) : "memory");
__set_CP(15, 0, sctlr, 1, 0, 0);
}
@ -197,7 +188,6 @@ __STATIC_FORCEINLINE void __set_SCTLR(uint32_t sctlr)
__STATIC_FORCEINLINE uint32_t __get_SCTLR(void)
{
uint32_t result;
// __ASM volatile("MRC p15, 0, %0, c1, c0, 0" : "=r"(result) : : "memory");
__get_CP(15, 0, result, 1, 0, 0);
return result;
}
@ -207,7 +197,6 @@ __STATIC_FORCEINLINE uint32_t __get_SCTLR(void)
*/
__STATIC_FORCEINLINE void __set_ACTRL(uint32_t actrl)
{
// __ASM volatile("MCR p15, 0, %0, c1, c0, 1" : : "r"(actrl) : "memory");
__set_CP(15, 0, actrl, 1, 0, 1);
}
@ -217,7 +206,6 @@ __STATIC_FORCEINLINE void __set_ACTRL(uint32_t actrl)
__STATIC_FORCEINLINE uint32_t __get_ACTRL(void)
{
uint32_t result;
// __ASM volatile("MRC p15, 0, %0, c1, c0, 1" : "=r"(result) : : "memory");
__get_CP(15, 0, result, 1, 0, 1);
return result;
}
@ -231,7 +219,6 @@ __STATIC_FORCEINLINE uint32_t __get_ACTRL(void)
__STATIC_FORCEINLINE uint32_t __get_MPIDR(void)
{
uint32_t result;
// __ASM volatile("MRC p15, 0, %0, c0, c0, 5" : "=r"(result) : : "memory");
__get_CP(15, 0, result, 0, 0, 5);
return result;
}
@ -245,7 +232,6 @@ __STATIC_FORCEINLINE uint32_t __get_MPIDR(void)
__STATIC_FORCEINLINE uint32_t __get_VBAR(void)
{
uint32_t result;
// __ASM volatile("MRC p15, 0, %0, c12, c0, 0" : "=r"(result) : : "memory");
__get_CP(15, 0, result, 12, 0, 0);
return result;
}
@ -258,7 +244,6 @@ __STATIC_FORCEINLINE uint32_t __get_VBAR(void)
*/
__STATIC_FORCEINLINE void __set_VBAR(uint32_t vbar)
{
// __ASM volatile("MCR p15, 0, %0, c12, c0, 1" : : "r"(vbar) : "memory");
__set_CP(15, 0, vbar, 12, 0, 1);
}
@ -274,7 +259,6 @@ __STATIC_FORCEINLINE void __set_VBAR(uint32_t vbar)
*/
__STATIC_FORCEINLINE void __set_CNTFRQ(uint32_t value)
{
// __ASM volatile("MCR p15, 0, %0, c14, c0, 0" : : "r"(value) : "memory");
__set_CP(15, 0, value, 14, 0, 0);
}
@ -287,7 +271,6 @@ __STATIC_FORCEINLINE void __set_CNTFRQ(uint32_t value)
__STATIC_FORCEINLINE uint32_t __get_CNTFRQ(void)
{
uint32_t result;
// __ASM volatile("MRC p15, 0, %0, c14, c0, 0" : "=r"(result) : : "memory");
__get_CP(15, 0, result, 14, 0 , 0);
return result;
}
@ -300,7 +283,6 @@ __STATIC_FORCEINLINE uint32_t __get_CNTFRQ(void)
*/
__STATIC_FORCEINLINE void __set_CNTP_TVAL(uint32_t value)
{
// __ASM volatile("MCR p15, 0, %0, c14, c2, 0" : : "r"(value) : "memory");
__set_CP(15, 0, value, 14, 2, 0);
}
@ -313,11 +295,47 @@ __STATIC_FORCEINLINE void __set_CNTP_TVAL(uint32_t value)
__STATIC_FORCEINLINE uint32_t __get_CNTP_TVAL(void)
{
uint32_t result;
// __ASM volatile("MRC p15, 0, %0, c14, c2, 0" : "=r"(result) : : "memory");
__get_CP(15, 0, result, 14, 2, 0);
return result;
}
/** \brief Get CNTPCT
This function returns the value of the 64 bits PL1 Physical Count Register (CNTPCT).
\return CNTPCT Register value
*/
__STATIC_FORCEINLINE uint64_t __get_CNTPCT(void)
{
uint64_t result;
__get_CP64(15, 0, result, 14);
return result;
}
/** \brief Set CNTP_CVAL
This function assigns the given value to 64bits PL1 Physical Timer CompareValue Register (CNTP_CVAL).
\param [in] value CNTP_CVAL Register value to set
*/
__STATIC_FORCEINLINE void __set_CNTP_CVAL(uint64_t value)
{
__set_CP64(15, 2, value, 14);
}
/** \brief Get CNTP_CVAL
This function returns the value of the 64 bits PL1 Physical Timer CompareValue Register (CNTP_CVAL).
\return CNTP_CVAL Register value
*/
__STATIC_FORCEINLINE uint64_t __get_CNTP_CVAL(void)
{
uint64_t result;
__get_CP64(15, 2, result, 14);
return result;
}
/** \brief Set CNTP_CTL
This function assigns the given value to PL1 Physical Timer Control Register (CNTP_CTL).
@ -326,7 +344,6 @@ __STATIC_FORCEINLINE uint32_t __get_CNTP_TVAL(void)
*/
__STATIC_FORCEINLINE void __set_CNTP_CTL(uint32_t value)
{
// __ASM volatile("MCR p15, 0, %0, c14, c2, 1" : : "r"(value) : "memory");
__set_CP(15, 0, value, 14, 2, 1);
}
@ -336,7 +353,6 @@ __STATIC_FORCEINLINE void __set_CNTP_CTL(uint32_t value)
__STATIC_FORCEINLINE uint32_t __get_CNTP_CTL(void)
{
uint32_t result;
// __ASM volatile("MRC p15, 0, %0, c14, c2, 1" : "=r"(result) : : "memory");
__get_CP(15, 0, result, 14, 2, 1);
return result;
}
@ -349,7 +365,6 @@ __STATIC_FORCEINLINE uint32_t __get_CNTP_CTL(void)
*/
__STATIC_FORCEINLINE void __set_TLBIALL(uint32_t value)
{
// __ASM volatile("MCR p15, 0, %0, c8, c7, 0" : : "r"(value) : "memory");
__set_CP(15, 0, value, 8, 7, 0);
}
@ -359,7 +374,6 @@ __STATIC_FORCEINLINE void __set_TLBIALL(uint32_t value)
*/
__STATIC_FORCEINLINE void __set_BPIALL(uint32_t value)
{
// __ASM volatile("MCR p15, 0, %0, c7, c5, 6" : : "r"(value) : "memory");
__set_CP(15, 0, value, 7, 5, 6);
}
@ -369,7 +383,6 @@ __STATIC_FORCEINLINE void __set_BPIALL(uint32_t value)
*/
__STATIC_FORCEINLINE void __set_ICIALLU(uint32_t value)
{
// __ASM volatile("MCR p15, 0, %0, c7, c5, 0" : : "r"(value) : "memory");
__set_CP(15, 0, value, 7, 5, 0);
}
@ -379,7 +392,6 @@ __STATIC_FORCEINLINE void __set_ICIALLU(uint32_t value)
*/
__STATIC_FORCEINLINE void __set_DCCMVAC(uint32_t value)
{
// __ASM volatile("MCR p15, 0, %0, c7, c10, 1" : : "r"(value) : "memory");
__set_CP(15, 0, value, 7, 10, 1);
}
@ -389,7 +401,6 @@ __STATIC_FORCEINLINE void __set_DCCMVAC(uint32_t value)
*/
__STATIC_FORCEINLINE void __set_DCIMVAC(uint32_t value)
{
// __ASM volatile("MCR p15, 0, %0, c7, c6, 1" : : "r"(value) : "memory");
__set_CP(15, 0, value, 7, 6, 1);
}
@ -399,7 +410,6 @@ __STATIC_FORCEINLINE void __set_DCIMVAC(uint32_t value)
*/
__STATIC_FORCEINLINE void __set_DCCIMVAC(uint32_t value)
{
// __ASM volatile("MCR p15, 0, %0, c7, c14, 1" : : "r"(value) : "memory");
__set_CP(15, 0, value, 7, 14, 1);
}
@ -434,4 +444,28 @@ __STATIC_FORCEINLINE uint32_t __get_CLIDR(void)
return result;
}
/** \brief Set DCISW
*/
__STATIC_FORCEINLINE void __set_DCISW(uint32_t value)
{
// __ASM volatile("MCR p15, 0, %0, c7, c6, 2" : : "r"(value) : "memory")
__set_CP(15, 0, value, 7, 6, 2);
}
/** \brief Set DCCSW
*/
__STATIC_FORCEINLINE void __set_DCCSW(uint32_t value)
{
// __ASM volatile("MCR p15, 0, %0, c7, c10, 2" : : "r"(value) : "memory")
__set_CP(15, 0, value, 7, 10, 2);
}
/** \brief Set DCCISW
*/
__STATIC_FORCEINLINE void __set_DCCISW(uint32_t value)
{
// __ASM volatile("MCR p15, 0, %0, c7, c14, 2" : : "r"(value) : "memory")
__set_CP(15, 0, value, 7, 14, 2);
}
#endif

View File

@ -43,17 +43,20 @@
#ifndef __INLINE
#define __INLINE inline
#endif
#ifndef __FORCEINLINE
#ifndef __FORCEINLINE
#define __FORCEINLINE __attribute__((always_inline))
#endif
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __STATIC_FORCEINLINE
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline
#endif
#endif
#ifndef __NO_RETURN
#define __NO_RETURN __attribute__((noreturn))
#define __NO_RETURN __attribute__((__noreturn__))
#endif
#ifndef __DEPRECATED
#define __DEPRECATED __attribute__((deprecated))
#endif
#ifndef __USED
#define __USED __attribute__((used))
@ -264,7 +267,7 @@ __STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value)
\param [in] value Value to count the leading zeros
\return number of leading zeros in value
*/
#define __CLZ __builtin_clz
#define __CLZ (uint8_t)__builtin_clz
/**
\brief LDR Exclusive (8 bit)
@ -598,84 +601,6 @@ __STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc)
#include "cmsis_cp15.h"
__STATIC_FORCEINLINE int32_t log2_up(uint32_t n)
{
int32_t log = -1;
uint32_t t = n;
while(t)
{
log++; t >>=1;
}
/* if n not power of 2 -> round up*/
if ( n & (n - 1) ) log++;
return log;
}
__STATIC_INLINE void __L1C_MaintainDCacheSetWay(uint32_t level, uint32_t maint)
{
register volatile uint32_t Dummy;
register volatile uint32_t ccsidr;
uint32_t num_sets;
uint32_t num_ways;
uint32_t shift_way;
uint32_t log2_linesize;
uint32_t log2_num_ways;
Dummy = level << 1;
/* set csselr, select ccsidr register */
__set_CCSIDR(Dummy);
/* get current ccsidr register */
ccsidr = __get_CCSIDR();
num_sets = ((ccsidr & 0x0FFFE000) >> 13) + 1;
num_ways = ((ccsidr & 0x00001FF8) >> 3) + 1;
log2_linesize = (ccsidr & 0x00000007) + 2 + 2;
log2_num_ways = log2_up(num_ways);
shift_way = 32 - log2_num_ways;
for(int way = num_ways-1; way >= 0; way--)
{
for(int set = num_sets-1; set >= 0; set--)
{
Dummy = (level << 1) | (set << log2_linesize) | (way << shift_way);
switch (maint)
{
case 0:
__ASM volatile("MCR p15, 0, %0, c7, c6, 2" : : "r"(Dummy) : "memory"); // DCISW. Invalidate by Set/Way
break;
case 1:
__ASM volatile("MCR p15, 0, %0, c7, c10, 2" : : "r"(Dummy) : "memory"); // DCCSW. Clean by Set/Way
break;
default:
__ASM volatile("MCR p15, 0, %0, c7, c14, 2" : : "r"(Dummy) : "memory"); // DCCISW. Clean and Invalidate by Set/Way
break;
}
}
}
__DMB();
}
/** \brief Clean and Invalidate the entire data or unified cache
Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency
*/
__STATIC_INLINE void __L1C_CleanInvalidateCache(uint32_t op)
{
register volatile uint32_t clidr;
uint32_t cache_type;
clidr = __get_CLIDR();
for(uint32_t i = 0; i<7; i++)
{
cache_type = (clidr >> i*3) & 0x7UL;
if ((cache_type >= 2) && (cache_type <= 4))
{
__L1C_MaintainDCacheSetWay(i, op);
}
}
}
/** \brief Enable Floating Point Unit
Critical section, called from undef handler, so systick is disabled

View File

@ -1,8 +1,8 @@
/**************************************************************************//**
* @file cmsis_iccarm.h
* @brief CMSIS compiler ICCARM (IAR compiler) header file
* @version V5.0.3
* @date 29. August 2017
* @version V5.0.4
* @date 01. December 2017
******************************************************************************/
//------------------------------------------------------------------------------
@ -74,7 +74,11 @@
#endif
#ifndef __NO_RETURN
#define __NO_RETURN _Pragma("object_attribute=__noreturn")
#if __ICCARM_V8
#define __NO_RETURN __attribute__((__noreturn__))
#else
#define __NO_RETURN _Pragma("object_attribute=__noreturn")
#endif
#endif
#ifndef __PACKED
@ -105,19 +109,23 @@
#endif
#ifndef __RESTRICT
#define __RESTRICT restrict
#define __RESTRICT restrict
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#define __STATIC_INLINE static inline
#endif
#ifndef __FORCEINLINE
#define __FORCEINLINE _Pragma("inline=forced")
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE _Pragma("inline=forced") static inline
#define __STATIC_FORCEINLINE __FORCEINLINE __STATIC_INLINE
#endif
#ifndef __FORCEINLINE
#define __FORCEINLINE _Pragma("inline=forced")
#ifndef __DEPRECATED
#define __DEPRECATED __attribute__((deprecated))
#endif
#ifndef __UNALIGNED_UINT16_READ
@ -216,12 +224,12 @@
#include "iccarm_builtin.h"
#define __enable_irq __iar_builtin_enable_interrupt
#define __disable_irq __iar_builtin_disable_interrupt
#define __enable_fault_irq __iar_builtin_enable_fiq
#define __disable_fault_irq __iar_builtin_disable_fiq
#define __arm_rsr __iar_builtin_rsr
#define __arm_wsr __iar_builtin_wsr
#define __enable_irq __iar_builtin_enable_interrupt
#define __disable_irq __iar_builtin_disable_interrupt
#define __enable_fault_irq __iar_builtin_enable_fiq
#define __disable_fault_irq __iar_builtin_disable_fiq
#define __arm_rsr __iar_builtin_rsr
#define __arm_wsr __iar_builtin_wsr
#if __FPU_PRESENT
#define __get_FPSCR() (__arm_rsr("FPSCR"))
@ -238,8 +246,8 @@
#define __set_mode(VALUE) (__arm_wsr("CPSR_c", (VALUE)))
#define __get_FPEXC() (__arm_rsr("FPEXC"))
#define __set_FPEXC(VALUE) (__arm_wsr("FPEXC", VALUE))
#define __get_FPEXC() (__arm_rsr("FPEXC"))
#define __set_FPEXC(VALUE) (__arm_wsr("FPEXC", VALUE))
#define __get_CP(cp, op1, RT, CRn, CRm, op2) \
((RT) = __arm_rsr("p" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2))
@ -247,11 +255,11 @@
#define __set_CP(cp, op1, RT, CRn, CRm, op2) \
(__arm_wsr("p" # cp ":" # op1 ":c" # CRn ":c" # CRm ":" # op2, (RT)))
#define __get_CP64(cp, op1, RT, CRm) \
((RT) = __arm_rsr("p" # cp ":" # op1 ":c" # CRm))
#define __get_CP64(cp, op1, Rt, CRm) \
__ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" )
#define __set_CP64(cp, op1, RT, CRm) \
(__arm_wsr("p" # cp ":" # op1 ":c" # CRm, (RT)))
#define __set_CP64(cp, op1, Rt, CRm) \
__ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" )
#include "cmsis_cp15.h"
@ -365,7 +373,7 @@
#ifdef __INTRINSICS_INCLUDED
#error intrinsics.h is already included previously!
#endif
#include <intrinsics.h>
#if !__FPU_PRESENT
@ -375,13 +383,13 @@
#pragma diag_suppress=Pe940
#pragma diag_suppress=Pe177
#define __enable_irq __enable_interrupt
#define __disable_irq __disable_interrupt
#define __enable_fault_irq __enable_fiq
#define __disable_fault_irq __disable_fiq
#define __NOP __no_operation
#define __enable_irq __enable_interrupt
#define __disable_irq __disable_interrupt
#define __enable_fault_irq __enable_fiq
#define __disable_fault_irq __disable_fiq
#define __NOP __no_operation
#define __get_xPSR __get_PSR
#define __get_xPSR __get_PSR
__IAR_FT void __set_mode(uint32_t mode)
{
@ -475,64 +483,6 @@ __IAR_FT void __set_SP_usr(uint32_t topOfProcStack)
#define __get_mode() (__get_CPSR() & 0x1FU)
__STATIC_INLINE
void __L1C_CleanInvalidateCache(uint32_t op)
{
__ASM volatile(
" PUSH {R4-R11} \n"
" MRC p15, 1, R6, c0, c0, 1 \n" // Read CLIDR
" ANDS R3, R6, #0x07000000 \n" // Extract coherency level
" MOV R3, R3, LSR #23 \n" // Total cache levels << 1
" BEQ Finished \n" // If 0, no need to clean
" MOV R10, #0 \n" // R10 holds current cache level << 1
"Loop1: ADD R2, R10, R10, LSR #1 \n" // R2 holds cache "Set" position
" MOV R1, R6, LSR R2 \n" // Bottom 3 bits are the Cache-type for this level
" AND R1, R1, #7 \n" // Isolate those lower 3 bits
" CMP R1, #2 \n"
" BLT Skip \n" // No cache or only instruction cache at this level
" MCR p15, 2, R10, c0, c0, 0 \n" // Write the Cache Size selection register
" ISB \n" // ISB to sync the change to the CacheSizeID reg
" MRC p15, 1, R1, c0, c0, 0 \n" // Reads current Cache Size ID register
" AND R2, R1, #7 \n" // Extract the line length field
" ADD R2, R2, #4 \n" // Add 4 for the line length offset (log2 16 bytes)
" MOVW R4, #0x3FF \n"
" ANDS R4, R4, R1, LSR #3 \n" // R4 is the max number on the way size (right aligned)
" CLZ R5, R4 \n" // R5 is the bit position of the way size increment
" MOVW R7, #0x7FFF \n"
" ANDS R7, R7, R1, LSR #13 \n" // R7 is the max number of the index size (right aligned)
"Loop2: MOV R9, R4 \n" // R9 working copy of the max way size (right aligned)
"Loop3: ORR R11, R10, R9, LSL R5 \n" // Factor in the Way number and cache number into R11
" ORR R11, R11, R7, LSL R2 \n" // Factor in the Set number
" CMP R0, #0 \n"
" BNE Dccsw \n"
" MCR p15, 0, R11, c7, c6, 2 \n" // DCISW. Invalidate by Set/Way
" B cont \n"
"Dccsw: CMP R0, #1 \n"
" BNE Dccisw \n"
" MCR p15, 0, R11, c7, c10, 2 \n" // DCCSW. Clean by Set/Way
" B cont \n"
"Dccisw: MCR p15, 0, R11, c7, c14, 2 \n" // DCCISW. Clean and Invalidate by Set/Way
"cont: SUBS R9, R9, #1 \n" // Decrement the Way number
" BGE Loop3 \n"
" SUBS R7, R7, #1 \n" // Decrement the Set number
" BGE Loop2 \n"
"Skip: ADD R10, R10, #2 \n" // Increment the cache number
" CMP R3, R10 \n"
" BGT Loop1 \n"
"Finished: \n"
" DSB \n"
" POP {R4-R11} "
);
}
__STATIC_INLINE
void __FPU_Enable(void)
{

View File

@ -507,9 +507,9 @@ typedef union
uint32_t CP9:2; /*!< \brief bit:18..19 Access rights for coprocessor 9 */
uint32_t CP10:2; /*!< \brief bit:20..21 Access rights for coprocessor 10 */
uint32_t CP11:2; /*!< \brief bit:22..23 Access rights for coprocessor 11 */
uint32_t CP12:2; /*!< \brief bit:24..25 Access rights for coprocessor 11 */
uint32_t CP13:2; /*!< \brief bit:26..27 Access rights for coprocessor 11 */
uint32_t TRCDIS:1; /*!< \brief bit: 28 Disable CP14 access to trace registers */
uint32_t CP12:2; /*!< \brief bit:24..25 Access rights for coprocessor 11 */
uint32_t CP13:2; /*!< \brief bit:26..27 Access rights for coprocessor 11 */
uint32_t TRCDIS:1; /*!< \brief bit: 28 Disable CP14 access to trace registers */
RESERVED(0:1, uint32_t)
uint32_t D32DIS:1; /*!< \brief bit: 30 Disable use of registers D16-D31 of the VFP register file */
uint32_t ASEDIS:1; /*!< \brief bit: 31 Disable Advanced SIMD Functionality */
@ -541,7 +541,7 @@ typedef union
uint32_t FS0:4; /*!< \brief bit: 0.. 3 Fault Status bits bit 0-3 */
uint32_t Domain:4; /*!< \brief bit: 4.. 7 Fault on which domain */
RESERVED(0:1, uint32_t)
uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */
uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */
uint32_t FS1:1; /*!< \brief bit: 10 Fault Status bits bit 4 */
uint32_t WnR:1; /*!< \brief bit: 11 Write not Read bit */
uint32_t ExT:1; /*!< \brief bit: 12 External abort type */
@ -552,7 +552,7 @@ typedef union
{
uint32_t STATUS:5; /*!< \brief bit: 0.. 5 Fault Status bits */
RESERVED(0:3, uint32_t)
uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */
uint32_t LPAE:1; /*!< \brief bit: 9 Large Physical Address Extension */
RESERVED(1:1, uint32_t)
uint32_t WnR:1; /*!< \brief bit: 11 Write not Read bit */
uint32_t ExT:1; /*!< \brief bit: 12 External abort type */
@ -848,35 +848,35 @@ typedef struct
/** \brief Enable Caches by setting I and C bits in SCTLR register.
*/
__STATIC_INLINE void L1C_EnableCaches(void) {
__set_SCTLR( __get_SCTLR() | (1U << SCTLR_I_Pos) | (1U << SCTLR_C_Pos));
__STATIC_FORCEINLINE void L1C_EnableCaches(void) {
__set_SCTLR( __get_SCTLR() | SCTLR_I_Msk | SCTLR_C_Msk);
__ISB();
}
/** \brief Disable Caches by clearing I and C bits in SCTLR register.
*/
__STATIC_INLINE void L1C_DisableCaches(void) {
__set_SCTLR( __get_SCTLR() & ~(1U << SCTLR_I_Pos) & ~(1U << SCTLR_C_Pos));
__STATIC_FORCEINLINE void L1C_DisableCaches(void) {
__set_SCTLR( __get_SCTLR() & (~SCTLR_I_Msk) & (~SCTLR_C_Msk));
__ISB();
}
/** \brief Enable Branch Prediction by setting Z bit in SCTLR register.
*/
__STATIC_INLINE void L1C_EnableBTAC(void) {
__set_SCTLR( __get_SCTLR() | (1U << SCTLR_Z_Pos));
__STATIC_FORCEINLINE void L1C_EnableBTAC(void) {
__set_SCTLR( __get_SCTLR() | SCTLR_Z_Msk);
__ISB();
}
/** \brief Disable Branch Prediction by clearing Z bit in SCTLR register.
*/
__STATIC_INLINE void L1C_DisableBTAC(void) {
__set_SCTLR( __get_SCTLR() & ~(1U << SCTLR_Z_Pos));
__STATIC_FORCEINLINE void L1C_DisableBTAC(void) {
__set_SCTLR( __get_SCTLR() & (~SCTLR_Z_Msk));
__ISB();
}
/** \brief Invalidate entire branch predictor array
*/
__STATIC_INLINE void L1C_InvalidateBTAC(void) {
__STATIC_FORCEINLINE void L1C_InvalidateBTAC(void) {
__set_BPIALL(0);
__DSB(); //ensure completion of the invalidation
__ISB(); //ensure instruction fetch path sees new state
@ -884,7 +884,7 @@ __STATIC_INLINE void L1C_InvalidateBTAC(void) {
/** \brief Invalidate the whole instruction cache
*/
__STATIC_INLINE void L1C_InvalidateICacheAll(void) {
__STATIC_FORCEINLINE void L1C_InvalidateICacheAll(void) {
__set_ICIALLU(0);
__DSB(); //ensure completion of the invalidation
__ISB(); //ensure instruction fetch path sees new I cache state
@ -893,7 +893,7 @@ __STATIC_INLINE void L1C_InvalidateICacheAll(void) {
/** \brief Clean data cache line by address.
* \param [in] va Pointer to data to clear the cache for.
*/
__STATIC_INLINE void L1C_CleanDCacheMVA(void *va) {
__STATIC_FORCEINLINE void L1C_CleanDCacheMVA(void *va) {
__set_DCCMVAC((uint32_t)va);
__DMB(); //ensure the ordering of data cache maintenance operations and their effects
}
@ -901,7 +901,7 @@ __STATIC_INLINE void L1C_CleanDCacheMVA(void *va) {
/** \brief Invalidate data cache line by address.
* \param [in] va Pointer to data to invalidate the cache for.
*/
__STATIC_INLINE void L1C_InvalidateDCacheMVA(void *va) {
__STATIC_FORCEINLINE void L1C_InvalidateDCacheMVA(void *va) {
__set_DCIMVAC((uint32_t)va);
__DMB(); //ensure the ordering of data cache maintenance operations and their effects
}
@ -909,38 +909,130 @@ __STATIC_INLINE void L1C_InvalidateDCacheMVA(void *va) {
/** \brief Clean and Invalidate data cache by address.
* \param [in] va Pointer to data to invalidate the cache for.
*/
__STATIC_INLINE void L1C_CleanInvalidateDCacheMVA(void *va) {
__STATIC_FORCEINLINE void L1C_CleanInvalidateDCacheMVA(void *va) {
__set_DCCIMVAC((uint32_t)va);
__DMB(); //ensure the ordering of data cache maintenance operations and their effects
}
/** \brief Clean and Invalidate the entire data or unified cache
* \param [in] op 0 - invalidate, 1 - clean, otherwise - invalidate and clean
* \see __L1C_CleanInvalidateCache
/** \brief Calculate log2 rounded up
* - log(0) => 0
* - log(1) => 0
* - log(2) => 1
* - log(3) => 2
* - log(4) => 2
* - log(5) => 3
* : :
* - log(16) => 4
* - log(32) => 5
* : :
* \param [in] n input value parameter
* \return log2(n)
*/
__STATIC_INLINE void L1C_CleanInvalidateCache(uint32_t op) {
__L1C_CleanInvalidateCache(op);
__STATIC_FORCEINLINE uint8_t __log2_up(uint32_t n)
{
if (n < 2U) {
return 0U;
}
uint8_t log = 0U;
uint32_t t = n;
while(t > 1U)
{
log++;
t >>= 1U;
}
if (n & 1U) { log++; }
return log;
}
/** \brief Apply cache maintenance to given cache level.
* \param [in] level cache level to be maintained
* \param [in] maint 0 - invalidate, 1 - clean, otherwise - invalidate and clean
*/
__STATIC_FORCEINLINE void __L1C_MaintainDCacheSetWay(uint32_t level, uint32_t maint)
{
register volatile uint32_t Dummy;
register volatile uint32_t ccsidr;
uint32_t num_sets;
uint32_t num_ways;
uint32_t shift_way;
uint32_t log2_linesize;
int32_t log2_num_ways;
Dummy = level << 1U;
/* set csselr, select ccsidr register */
__set_CCSIDR(Dummy);
/* get current ccsidr register */
ccsidr = __get_CCSIDR();
num_sets = ((ccsidr & 0x0FFFE000U) >> 13U) + 1U;
num_ways = ((ccsidr & 0x00001FF8U) >> 3U) + 1U;
log2_linesize = (ccsidr & 0x00000007U) + 2U + 2U;
log2_num_ways = __log2_up(num_ways);
if ((log2_num_ways < 0) || (log2_num_ways > 32)) {
return; // FATAL ERROR
}
shift_way = 32U - (uint32_t)log2_num_ways;
for(int32_t way = num_ways-1; way >= 0; way--)
{
for(int32_t set = num_sets-1; set >= 0; set--)
{
Dummy = (level << 1U) | (((uint32_t)set) << log2_linesize) | (((uint32_t)way) << shift_way);
switch (maint)
{
case 0U: __set_DCISW(Dummy); break;
case 1U: __set_DCCSW(Dummy); break;
default: __set_DCCISW(Dummy); break;
}
}
}
__DMB();
}
/** \brief Clean and Invalidate the entire data or unified cache
* Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency
* \param [in] op 0 - invalidate, 1 - clean, otherwise - invalidate and clean
*/
__STATIC_FORCEINLINE void L1C_CleanInvalidateCache(uint32_t op) {
register volatile uint32_t clidr;
uint32_t cache_type;
clidr = __get_CLIDR();
for(uint32_t i = 0U; i<7U; i++)
{
cache_type = (clidr >> i*3U) & 0x7UL;
if ((cache_type >= 2U) && (cache_type <= 4U))
{
__L1C_MaintainDCacheSetWay(i, op);
}
}
}
/** \brief Clean and Invalidate the entire data or unified cache
* Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency
* \param [in] op 0 - invalidate, 1 - clean, otherwise - invalidate and clean
* \deprecated Use generic L1C_CleanInvalidateCache instead.
*/
__DEPRECATED
__STATIC_FORCEINLINE void __L1C_CleanInvalidateCache(uint32_t op) {
L1C_CleanInvalidateCache(op);
}
/** \brief Invalidate the whole data cache.
*/
__STATIC_INLINE void L1C_InvalidateDCacheAll(void) {
__STATIC_FORCEINLINE void L1C_InvalidateDCacheAll(void) {
L1C_CleanInvalidateCache(0);
}
/** \brief Clean the whole data cache.
*/
__STATIC_INLINE void L1C_CleanDCacheAll(void) {
__STATIC_FORCEINLINE void L1C_CleanDCacheAll(void) {
L1C_CleanInvalidateCache(1);
}
/** \brief Clean and invalidate the whole data cache.
*/
__STATIC_INLINE void L1C_CleanInvalidateDCacheAll(void) {
__STATIC_FORCEINLINE void L1C_CleanInvalidateDCacheAll(void) {
L1C_CleanInvalidateCache(2);
}
/* ########################## L2 Cache functions ################################# */
#if (__L2C_PRESENT == 1U) || defined(DOXYGEN)
/** \brief Cache Sync operation by writing CACHE_SYNC register.