diff --git a/cmsis/TARGET_CORTEX_A/core_ca9.h b/cmsis/TARGET_CORTEX_A/core_ca9.h
new file mode 100644
index 0000000000..c7c402d750
--- /dev/null
+++ b/cmsis/TARGET_CORTEX_A/core_ca9.h
@@ -0,0 +1,276 @@
+/**************************************************************************//**
+ * @file core_ca9.h
+ * @brief CMSIS Cortex-A9 Core Peripheral Access Layer Header File
+ * @version
+ * @date 25 March 2013
+ *
+ * @note
+ *
+ ******************************************************************************/
+/* Copyright (c) 2009 - 2012 ARM LIMITED
+
+ All rights reserved.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ - Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ - Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ - Neither the name of ARM nor the names of its contributors may be used
+ to endorse or promote products derived from this software without
+ specific prior written permission.
+ *
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+ ---------------------------------------------------------------------------*/
+
+
+#if defined ( __ICCARM__ )
+ #pragma system_include /* treat file as system include file for MISRA check */
+#endif
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#ifndef __CORE_CA9_H_GENERIC
+#define __CORE_CA9_H_GENERIC
+
+
+/** \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions
+ CMSIS violates the following MISRA-C:2004 rules:
+
+ \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'.
+
+ \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers.
+
+ \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code.
+ */
+
+
+/*******************************************************************************
+ * CMSIS definitions
+ ******************************************************************************/
+/** \ingroup Cortex_A9
+ @{
+ */
+
+/* CMSIS CA9 definitions */
+#define __CA9_CMSIS_VERSION_MAIN (0x03) /*!< [31:16] CMSIS HAL main version */
+#define __CA9_CMSIS_VERSION_SUB (0x10) /*!< [15:0] CMSIS HAL sub version */
+#define __CA9_CMSIS_VERSION ((__CA9_CMSIS_VERSION_MAIN << 16) | \
+ __CA9_CMSIS_VERSION_SUB ) /*!< CMSIS HAL version number */
+
+#define __CORTEX_A (0x09) /*!< Cortex-A Core */
+
+
+#if defined ( __CC_ARM )
+ #define __ASM __asm /*!< asm keyword for ARM Compiler */
+ #define __INLINE __inline /*!< inline keyword for ARM Compiler */
+ #define __STATIC_INLINE static __inline
+ #define __STATIC_ASM static __asm
+
+#elif defined ( __ICCARM__ )
+ #define __ASM __asm /*!< asm keyword for IAR Compiler */
+ #define __INLINE inline /*!< inline keyword for IAR Compiler. Only available in High optimization mode! */
+ #define __STATIC_INLINE static inline
+ #define __STATIC_ASM static __asm
+
+#include
+inline uint32_t __get_PSR(void) {
+ __ASM("mrs r0, cpsr");
+}
+
+#elif defined ( __TMS470__ )
+ #define __ASM __asm /*!< asm keyword for TI CCS Compiler */
+ #define __STATIC_INLINE static inline
+ #define __STATIC_ASM static __asm
+
+#elif defined ( __GNUC__ )
+ #define __ASM __asm /*!< asm keyword for GNU Compiler */
+ #define __INLINE inline /*!< inline keyword for GNU Compiler */
+ #define __STATIC_INLINE static inline
+ #define __STATIC_ASM static __asm
+
+#elif defined ( __TASKING__ )
+ #define __ASM __asm /*!< asm keyword for TASKING Compiler */
+ #define __INLINE inline /*!< inline keyword for TASKING Compiler */
+ #define __STATIC_INLINE static inline
+ #define __STATIC_ASM static __asm
+
+#endif
+
+/** __FPU_USED indicates whether an FPU is used or not. For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions.
+*/
+#if defined ( __CC_ARM )
+ #if defined __TARGET_FPU_VFP
+ #if (__FPU_PRESENT == 1)
+ #define __FPU_USED 1
+ #else
+ #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+ #define __FPU_USED 0
+ #endif
+ #else
+ #define __FPU_USED 0
+ #endif
+
+#elif defined ( __ICCARM__ )
+ #if defined __ARMVFP__
+ #if (__FPU_PRESENT == 1)
+ #define __FPU_USED 1
+ #else
+ #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+ #define __FPU_USED 0
+ #endif
+ #else
+ #define __FPU_USED 0
+ #endif
+
+#elif defined ( __TMS470__ )
+ #if defined __TI_VFP_SUPPORT__
+ #if (__FPU_PRESENT == 1)
+ #define __FPU_USED 1
+ #else
+ #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+ #define __FPU_USED 0
+ #endif
+ #else
+ #define __FPU_USED 0
+ #endif
+
+#elif defined ( __GNUC__ )
+ #if defined (__VFP_FP__) && !defined(__SOFTFP__)
+ #if (__FPU_PRESENT == 1)
+ #define __FPU_USED 1
+ #else
+ #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+ #define __FPU_USED 0
+ #endif
+ #else
+ #define __FPU_USED 0
+ #endif
+
+#elif defined ( __TASKING__ )
+ #if defined __FPU_VFP__
+ #if (__FPU_PRESENT == 1)
+ #define __FPU_USED 1
+ #else
+ #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
+ #define __FPU_USED 0
+ #endif
+ #else
+ #define __FPU_USED 0
+ #endif
+#endif
+
+#include /*!< standard types definitions */
+#include "core_caInstr.h" /*!< Core Instruction Access */
+#include "core_caFunc.h" /*!< Core Function Access */
+#include "core_cm4_simd.h" /*!< Compiler specific SIMD Intrinsics */
+
+#endif /* __CORE_CA9_H_GENERIC */
+
+#ifndef __CMSIS_GENERIC
+
+#ifndef __CORE_CA9_H_DEPENDANT
+#define __CORE_CA9_H_DEPENDANT
+
+/* check device defines and use defaults */
+#if defined __CHECK_DEVICE_DEFINES
+ #ifndef __CA9_REV
+ #define __CA9_REV 0x0000
+ #warning "__CA9_REV not defined in device header file; using default!"
+ #endif
+
+ #ifndef __FPU_PRESENT
+ #define __FPU_PRESENT 1
+ #warning "__FPU_PRESENT not defined in device header file; using default!"
+ #endif
+
+ #ifndef __Vendor_SysTickConfig
+ #define __Vendor_SysTickConfig 1
+ #endif
+
+ #if __Vendor_SysTickConfig == 0
+ #error "__Vendor_SysTickConfig set to 0, but vendor systick timer must be supplied for Cortex-A9"
+ #endif
+#endif
+
+/* IO definitions (access restrictions to peripheral registers) */
+/**
+ \defgroup CMSIS_glob_defs CMSIS Global Defines
+
+ IO Type Qualifiers are used
+ \li to specify the access to peripheral variables.
+ \li for automatic generation of peripheral register debug information.
+*/
+#ifdef __cplusplus
+ #define __I volatile /*!< Defines 'read only' permissions */
+#else
+ #define __I volatile const /*!< Defines 'read only' permissions */
+#endif
+#define __O volatile /*!< Defines 'write only' permissions */
+#define __IO volatile /*!< Defines 'read / write' permissions */
+
+/*@} end of group Cortex_A9 */
+
+
+/*******************************************************************************
+ * Register Abstraction
+ ******************************************************************************/
+/** \defgroup CMSIS_core_register Defines and Type Definitions
+ \brief Type definitions and defines for Cortex-A processor based devices.
+*/
+
+/** \ingroup CMSIS_core_register
+ \defgroup CMSIS_CORE Status and Control Registers
+ \brief Core Register type definitions.
+ @{
+ */
+
+/** \brief Union type to access the Application Program Status Register (APSR).
+ */
+typedef union
+{
+ struct
+ {
+ uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */
+ uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */
+ uint32_t reserved1:7; /*!< bit: 20..23 Reserved */
+ uint32_t Q:1; /*!< bit: 27 Saturation condition flag */
+ uint32_t V:1; /*!< bit: 28 Overflow condition code flag */
+ uint32_t C:1; /*!< bit: 29 Carry condition code flag */
+ uint32_t Z:1; /*!< bit: 30 Zero condition code flag */
+ uint32_t N:1; /*!< bit: 31 Negative condition code flag */
+ } b; /*!< Structure used for bit access */
+ uint32_t w; /*!< Type used for word access */
+} APSR_Type;
+
+
+/*@} end of group CMSIS_CORE */
+
+/*@} end of CMSIS_Core_FPUFunctions */
+
+
+#endif /* __CORE_CA9_H_GENERIC */
+
+#endif /* __CMSIS_GENERIC */
+
+#ifdef __cplusplus
+}
+
+
+#endif
diff --git a/cmsis/TARGET_CORTEX_A/core_caFunc.h b/cmsis/TARGET_CORTEX_A/core_caFunc.h
new file mode 100644
index 0000000000..6e473214b6
--- /dev/null
+++ b/cmsis/TARGET_CORTEX_A/core_caFunc.h
@@ -0,0 +1,1444 @@
+/**************************************************************************//**
+ * @file core_caFunc.h
+ * @brief CMSIS Cortex-A Core Function Access Header File
+ * @version V3.10
+ * @date 30 Oct 2013
+ *
+ * @note
+ *
+ ******************************************************************************/
+/* Copyright (c) 2009 - 2013 ARM LIMITED
+
+ All rights reserved.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ - Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ - Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ - Neither the name of ARM nor the names of its contributors may be used
+ to endorse or promote products derived from this software without
+ specific prior written permission.
+ *
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+ ---------------------------------------------------------------------------*/
+
+
+#ifndef __CORE_CAFUNC_H__
+#define __CORE_CAFUNC_H__
+
+
+/* ########################### Core Function Access ########################### */
+/** \ingroup CMSIS_Core_FunctionInterface
+ \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
+ @{
+ */
+
+#if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
+/* ARM armcc specific functions */
+
+#if (__ARMCC_VERSION < 400677)
+ #error "Please use ARM Compiler Toolchain V4.0.677 or later!"
+#endif
+
+#define MODE_USR 0x10
+#define MODE_FIQ 0x11
+#define MODE_IRQ 0x12
+#define MODE_SVC 0x13
+#define MODE_MON 0x16
+#define MODE_ABT 0x17
+#define MODE_HYP 0x1A
+#define MODE_UND 0x1B
+#define MODE_SYS 0x1F
+
+/** \brief Get APSR Register
+
+ This function returns the content of the APSR Register.
+
+ \return APSR Register value
+ */
+__STATIC_INLINE uint32_t __get_APSR(void)
+{
+ register uint32_t __regAPSR __ASM("apsr");
+ return(__regAPSR);
+}
+
+
+/** \brief Get CPSR Register
+
+ This function returns the content of the CPSR Register.
+
+ \return CPSR Register value
+ */
+__STATIC_INLINE uint32_t __get_CPSR(void)
+{
+ register uint32_t __regCPSR __ASM("cpsr");
+ return(__regCPSR);
+}
+
+/** \brief Set Stack Pointer
+
+ This function assigns the given value to the current stack pointer.
+
+ \param [in] topOfStack Stack Pointer value to set
+ */
+register uint32_t __regSP __ASM("sp");
+__STATIC_INLINE void __set_SP(uint32_t topOfStack)
+{
+ __regSP = topOfStack;
+}
+
+
+/** \brief Get link register
+
+ This function returns the value of the link register
+
+ \return Value of link register
+ */
+register uint32_t __reglr __ASM("lr");
+__STATIC_INLINE uint32_t __get_LR(void)
+{
+ return(__reglr);
+}
+
+/** \brief Set link register
+
+ This function sets the value of the link register
+
+ \param [in] lr LR value to set
+ */
+__STATIC_INLINE void __set_LR(uint32_t lr)
+{
+ __reglr = lr;
+}
+
+/** \brief Set Process Stack Pointer
+
+ This function assigns the given value to the USR/SYS Stack Pointer (PSP).
+
+ \param [in] topOfProcStack USR/SYS Stack Pointer value to set
+ */
+__STATIC_ASM void __set_PSP(uint32_t topOfProcStack)
+{
+ ARM
+ PRESERVE8
+
+ BIC R0, R0, #7 ;ensure stack is 8-byte aligned
+ MRS R1, CPSR
+ CPS #MODE_SYS ;no effect in USR mode
+ MOV SP, R0
+ MSR CPSR_c, R1 ;no effect in USR mode
+ ISB
+ BX LR
+
+}
+
+/** \brief Set User Mode
+
+ This function changes the processor state to User Mode
+ */
+__STATIC_ASM void __set_CPS_USR(void)
+{
+ ARM
+
+ CPS #MODE_USR
+ BX LR
+}
+
+
+/** \brief Enable FIQ
+
+ This function enables FIQ interrupts by clearing the F-bit in the CPSR.
+ Can only be executed in Privileged modes.
+ */
+#define __enable_fault_irq __enable_fiq
+
+
+/** \brief Disable FIQ
+
+ This function disables FIQ interrupts by setting the F-bit in the CPSR.
+ Can only be executed in Privileged modes.
+ */
+#define __disable_fault_irq __disable_fiq
+
+
+/** \brief Get FPSCR
+
+ This function returns the current value of the Floating Point Status/Control register.
+
+ \return Floating Point Status/Control register value
+ */
+__STATIC_INLINE uint32_t __get_FPSCR(void)
+{
+#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
+ register uint32_t __regfpscr __ASM("fpscr");
+ return(__regfpscr);
+#else
+ return(0);
+#endif
+}
+
+
+/** \brief Set FPSCR
+
+ This function assigns the given value to the Floating Point Status/Control register.
+
+ \param [in] fpscr Floating Point Status/Control value to set
+ */
+__STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
+{
+#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
+ register uint32_t __regfpscr __ASM("fpscr");
+ __regfpscr = (fpscr);
+#endif
+}
+
+/** \brief Get FPEXC
+
+ This function returns the current value of the Floating Point Exception Control register.
+
+ \return Floating Point Exception Control register value
+ */
+__STATIC_INLINE uint32_t __get_FPEXC(void)
+{
+#if (__FPU_PRESENT == 1)
+ register uint32_t __regfpexc __ASM("fpexc");
+ return(__regfpexc);
+#else
+ return(0);
+#endif
+}
+
+
+/** \brief Set FPEXC
+
+ This function assigns the given value to the Floating Point Exception Control register.
+
+ \param [in] fpscr Floating Point Exception Control value to set
+ */
+__STATIC_INLINE void __set_FPEXC(uint32_t fpexc)
+{
+#if (__FPU_PRESENT == 1)
+ register uint32_t __regfpexc __ASM("fpexc");
+ __regfpexc = (fpexc);
+#endif
+}
+
+/** \brief Get CPACR
+
+ This function returns the current value of the Coprocessor Access Control register.
+
+ \return Coprocessor Access Control register value
+ */
+__STATIC_INLINE uint32_t __get_CPACR(void)
+{
+ register uint32_t __regCPACR __ASM("cp15:0:c1:c0:2");
+ return __regCPACR;
+}
+
+/** \brief Set CPACR
+
+ This function assigns the given value to the Coprocessor Access Control register.
+
+ \param [in] cpacr Coprocessor Acccess Control value to set
+ */
+__STATIC_INLINE void __set_CPACR(uint32_t cpacr)
+{
+ register uint32_t __regCPACR __ASM("cp15:0:c1:c0:2");
+ __regCPACR = cpacr;
+ __ISB();
+}
+
+/** \brief Get CBAR
+
+ This function returns the value of the Configuration Base Address register.
+
+ \return Configuration Base Address register value
+ */
+__STATIC_INLINE uint32_t __get_CBAR() {
+ register uint32_t __regCBAR __ASM("cp15:4:c15:c0:0");
+ return(__regCBAR);
+}
+
+/** \brief Get TTBR0
+
+ This function returns the value of the Translation Table Base Register 0.
+
+ \return Translation Table Base Register 0 value
+ */
+__STATIC_INLINE uint32_t __get_TTBR0() {
+ register uint32_t __regTTBR0 __ASM("cp15:0:c2:c0:0");
+ return(__regTTBR0);
+}
+
+/** \brief Set TTBR0
+
+ This function assigns the given value to the Translation Table Base Register 0.
+
+ \param [in] ttbr0 Translation Table Base Register 0 value to set
+ */
+__STATIC_INLINE void __set_TTBR0(uint32_t ttbr0) {
+ register uint32_t __regTTBR0 __ASM("cp15:0:c2:c0:0");
+ __regTTBR0 = ttbr0;
+ __ISB();
+}
+
+/** \brief Get DACR
+
+ This function returns the value of the Domain Access Control Register.
+
+ \return Domain Access Control Register value
+ */
+__STATIC_INLINE uint32_t __get_DACR() {
+ register uint32_t __regDACR __ASM("cp15:0:c3:c0:0");
+ return(__regDACR);
+}
+
+/** \brief Set DACR
+
+ This function assigns the given value to the Domain Access Control Register.
+
+ \param [in] dacr Domain Access Control Register value to set
+ */
+__STATIC_INLINE void __set_DACR(uint32_t dacr) {
+ register uint32_t __regDACR __ASM("cp15:0:c3:c0:0");
+ __regDACR = dacr;
+ __ISB();
+}
+
+/******************************** Cache and BTAC enable ****************************************************/
+
+/** \brief Set SCTLR
+
+ This function assigns the given value to the System Control Register.
+
+ \param [in] sctlr System Control Register value to set
+ */
+__STATIC_INLINE void __set_SCTLR(uint32_t sctlr)
+{
+ register uint32_t __regSCTLR __ASM("cp15:0:c1:c0:0");
+ __regSCTLR = sctlr;
+}
+
+/** \brief Get SCTLR
+
+ This function returns the value of the System Control Register.
+
+ \return System Control Register value
+ */
+__STATIC_INLINE uint32_t __get_SCTLR() {
+ register uint32_t __regSCTLR __ASM("cp15:0:c1:c0:0");
+ return(__regSCTLR);
+}
+
+/** \brief Enable Caches
+
+ Enable Caches
+ */
+__STATIC_INLINE void __enable_caches(void) {
+ // Set I bit 12 to enable I Cache
+ // Set C bit 2 to enable D Cache
+ __set_SCTLR( __get_SCTLR() | (1 << 12) | (1 << 2));
+}
+
+/** \brief Disable Caches
+
+ Disable Caches
+ */
+__STATIC_INLINE void __disable_caches(void) {
+ // Clear I bit 12 to disable I Cache
+ // Clear C bit 2 to disable D Cache
+ __set_SCTLR( __get_SCTLR() & ~(1 << 12) & ~(1 << 2));
+ __ISB();
+}
+
+/** \brief Enable BTAC
+
+ Enable BTAC
+ */
+__STATIC_INLINE void __enable_btac(void) {
+ // Set Z bit 11 to enable branch prediction
+ __set_SCTLR( __get_SCTLR() | (1 << 11));
+ __ISB();
+}
+
+/** \brief Disable BTAC
+
+ Disable BTAC
+ */
+__STATIC_INLINE void __disable_btac(void) {
+ // Clear Z bit 11 to disable branch prediction
+ __set_SCTLR( __get_SCTLR() & ~(1 << 11));
+}
+
+
+/** \brief Enable MMU
+
+ Enable MMU
+ */
+__STATIC_INLINE void __enable_mmu(void) {
+ // Set M bit 0 to enable the MMU
+ // Set AFE bit to enable simplified access permissions model
+ // Clear TRE bit to disable TEX remap and A bit to disable strict alignment fault checking
+ __set_SCTLR( (__get_SCTLR() & ~(1 << 28) & ~(1 << 1)) | 1 | (1 << 29));
+ __ISB();
+}
+
+/** \brief Disable MMU
+
+ Disable MMU
+ */
+__STATIC_INLINE void __disable_mmu(void) {
+ // Clear M bit 0 to disable the MMU
+ __set_SCTLR( __get_SCTLR() & ~1);
+ __ISB();
+}
+
+/******************************** TLB maintenance operations ************************************************/
+/** \brief Invalidate the whole tlb
+
+ TLBIALL. Invalidate the whole tlb
+ */
+
+__STATIC_INLINE void __ca9u_inv_tlb_all(void) {
+ register uint32_t __TLBIALL __ASM("cp15:0:c8:c7:0");
+ __TLBIALL = 0;
+ __DSB();
+ __ISB();
+}
+
+/******************************** BTB maintenance operations ************************************************/
+/** \brief Invalidate entire branch predictor array
+
+ BPIALL. Branch Predictor Invalidate All.
+ */
+
+__STATIC_INLINE void __v7_inv_btac(void) {
+ register uint32_t __BPIALL __ASM("cp15:0:c7:c5:6");
+ __BPIALL = 0;
+ __DSB(); //ensure completion of the invalidation
+ __ISB(); //ensure instruction fetch path sees new state
+}
+
+
+/******************************** L1 cache operations ******************************************************/
+
+/** \brief Invalidate the whole I$
+
+ ICIALLU. Instruction Cache Invalidate All to PoU
+ */
+__STATIC_INLINE void __v7_inv_icache_all(void) {
+ register uint32_t __ICIALLU __ASM("cp15:0:c7:c5:0");
+ __ICIALLU = 0;
+ __DSB(); //ensure completion of the invalidation
+ __ISB(); //ensure instruction fetch path sees new I cache state
+}
+
+/** \brief Clean D$ by MVA
+
+ DCCMVAC. Data cache clean by MVA to PoC
+ */
+__STATIC_INLINE void __v7_clean_dcache_mva(void *va) {
+ register uint32_t __DCCMVAC __ASM("cp15:0:c7:c10:1");
+ __DCCMVAC = (uint32_t)va;
+ __DMB(); //ensure the ordering of data cache maintenance operations and their effects
+}
+
+/** \brief Invalidate D$ by MVA
+
+ DCIMVAC. Data cache invalidate by MVA to PoC
+ */
+__STATIC_INLINE void __v7_inv_dcache_mva(void *va) {
+ register uint32_t __DCIMVAC __ASM("cp15:0:c7:c6:1");
+ __DCIMVAC = (uint32_t)va;
+ __DMB(); //ensure the ordering of data cache maintenance operations and their effects
+}
+
+/** \brief Clean and Invalidate D$ by MVA
+
+ DCCIMVAC. Data cache clean and invalidate by MVA to PoC
+ */
+__STATIC_INLINE void __v7_clean_inv_dcache_mva(void *va) {
+ register uint32_t __DCCIMVAC __ASM("cp15:0:c7:c14:1");
+ __DCCIMVAC = (uint32_t)va;
+ __DMB(); //ensure the ordering of data cache maintenance operations and their effects
+}
+
+/** \brief Clean and Invalidate the entire data or unified cache
+
+ Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency.
+ */
+#pragma push
+#pragma arm
+__STATIC_ASM void __v7_all_cache(uint32_t op) {
+ ARM
+
+ PUSH {R4-R11}
+
+ MRC p15, 1, R6, c0, c0, 1 // Read CLIDR
+ ANDS R3, R6, #0x07000000 // Extract coherency level
+ MOV R3, R3, LSR #23 // Total cache levels << 1
+ BEQ Finished // If 0, no need to clean
+
+ MOV R10, #0 // R10 holds current cache level << 1
+Loop1 ADD R2, R10, R10, LSR #1 // R2 holds cache "Set" position
+ MOV R1, R6, LSR R2 // Bottom 3 bits are the Cache-type for this level
+ AND R1, R1, #7 // Isolate those lower 3 bits
+ CMP R1, #2
+ BLT Skip // No cache or only instruction cache at this level
+
+ MCR p15, 2, R10, c0, c0, 0 // Write the Cache Size selection register
+ ISB // ISB to sync the change to the CacheSizeID reg
+ MRC p15, 1, R1, c0, c0, 0 // Reads current Cache Size ID register
+ AND R2, R1, #7 // Extract the line length field
+ ADD R2, R2, #4 // Add 4 for the line length offset (log2 16 bytes)
+ LDR R4, =0x3FF
+ ANDS R4, R4, R1, LSR #3 // R4 is the max number on the way size (right aligned)
+ CLZ R5, R4 // R5 is the bit position of the way size increment
+ LDR R7, =0x7FFF
+ ANDS R7, R7, R1, LSR #13 // R7 is the max number of the index size (right aligned)
+
+Loop2 MOV R9, R4 // R9 working copy of the max way size (right aligned)
+
+Loop3 ORR R11, R10, R9, LSL R5 // Factor in the Way number and cache number into R11
+ ORR R11, R11, R7, LSL R2 // Factor in the Set number
+ CMP R0, #0
+ BNE Dccsw
+ MCR p15, 0, R11, c7, c6, 2 // DCISW. Invalidate by Set/Way
+ B cont
+Dccsw CMP R0, #1
+ BNE Dccisw
+ MCR p15, 0, R11, c7, c10, 2 // DCCSW. Clean by Set/Way
+ B cont
+Dccisw MCR p15, 0, R11, c7, c14, 2 // DCCISW. Clean and Invalidate by Set/Way
+cont SUBS R9, R9, #1 // Decrement the Way number
+ BGE Loop3
+ SUBS R7, R7, #1 // Decrement the Set number
+ BGE Loop2
+Skip ADD R10, R10, #2 // Increment the cache number
+ CMP R3, R10
+ BGT Loop1
+
+Finished
+ DSB
+ POP {R4-R11}
+ BX lr
+
+}
+#pragma pop
+
+
+/** \brief Invalidate the whole D$
+
+ DCISW. Invalidate by Set/Way
+ */
+
+__STATIC_INLINE void __v7_inv_dcache_all(void) {
+ __v7_all_cache(0);
+}
+
+/** \brief Clean the whole D$
+
+ DCCSW. Clean by Set/Way
+ */
+
+__STATIC_INLINE void __v7_clean_dcache_all(void) {
+ __v7_all_cache(1);
+}
+
+/** \brief Clean and invalidate the whole D$
+
+ DCCISW. Clean and Invalidate by Set/Way
+ */
+
+__STATIC_INLINE void __v7_clean_inv_dcache_all(void) {
+ __v7_all_cache(2);
+}
+
+#include "core_ca_mmu.h"
+
+#elif (defined (__ICCARM__)) /*---------------- ICC Compiler ---------------------*/
+
+#define __inline inline
+
+inline static uint32_t __disable_irq_iar() {
+ int irq_dis = __get_CPSR() & 0x80; // 7bit CPSR.I
+ __disable_irq();
+ return irq_dis;
+}
+
+#define MODE_USR 0x10
+#define MODE_FIQ 0x11
+#define MODE_IRQ 0x12
+#define MODE_SVC 0x13
+#define MODE_MON 0x16
+#define MODE_ABT 0x17
+#define MODE_HYP 0x1A
+#define MODE_UND 0x1B
+#define MODE_SYS 0x1F
+
+/** \brief Set Process Stack Pointer
+
+ This function assigns the given value to the USR/SYS Stack Pointer (PSP).
+
+ \param [in] topOfProcStack USR/SYS Stack Pointer value to set
+ */
+// from rt_CMSIS.c
+__arm static inline void __set_PSP(uint32_t topOfProcStack) {
+__asm(
+ " ARM\n"
+// " PRESERVE8\n"
+
+ " BIC R0, R0, #7 ;ensure stack is 8-byte aligned \n"
+ " MRS R1, CPSR \n"
+ " CPS #0x1F ;no effect in USR mode \n" // MODE_SYS
+ " MOV SP, R0 \n"
+ " MSR CPSR_c, R1 ;no effect in USR mode \n"
+ " ISB \n"
+ " BX LR \n");
+}
+
+/** \brief Set User Mode
+
+ This function changes the processor state to User Mode
+ */
+// from rt_CMSIS.c
+__arm static inline void __set_CPS_USR(void) {
+__asm(
+ " ARM \n"
+
+ " CPS #0x10 \n" // MODE_USR
+ " BX LR\n");
+}
+
+/** \brief Set TTBR0
+
+ This function assigns the given value to the Translation Table Base Register 0.
+
+ \param [in] ttbr0 Translation Table Base Register 0 value to set
+ */
+// from mmu_Renesas_RZ_A1.c
+__STATIC_INLINE void __set_TTBR0(uint32_t ttbr0) {
+ __MCR(15, 0, ttbr0, 2, 0, 0); // reg to cp15
+ __ISB();
+}
+
+/** \brief Set DACR
+
+ This function assigns the given value to the Domain Access Control Register.
+
+ \param [in] dacr Domain Access Control Register value to set
+ */
+// from mmu_Renesas_RZ_A1.c
+__STATIC_INLINE void __set_DACR(uint32_t dacr) {
+ __MCR(15, 0, dacr, 3, 0, 0); // reg to cp15
+ __ISB();
+}
+
+
+/******************************** Cache and BTAC enable ****************************************************/
+/** \brief Set SCTLR
+
+ This function assigns the given value to the System Control Register.
+
+ \param [in] sctlr System Control Register value to set
+ */
+// from __enable_mmu()
+__STATIC_INLINE void __set_SCTLR(uint32_t sctlr) {
+ __MCR(15, 0, sctlr, 1, 0, 0); // reg to cp15
+}
+
+/** \brief Get SCTLR
+
+ This function returns the value of the System Control Register.
+
+ \return System Control Register value
+ */
+// from __enable_mmu()
+__STATIC_INLINE uint32_t __get_SCTLR() {
+ uint32_t __regSCTLR = __MRC(15, 0, 1, 0, 0);
+ return __regSCTLR;
+}
+
+/** \brief Enable Caches
+
+ Enable Caches
+ */
+// from system_Renesas_RZ_A1.c
+__STATIC_INLINE void __enable_caches(void) {
+ __set_SCTLR( __get_SCTLR() | (1 << 12) | (1 << 2));
+}
+
+/** \brief Enable BTAC
+
+ Enable BTAC
+ */
+// from system_Renesas_RZ_A1.c
+__STATIC_INLINE void __enable_btac(void) {
+ __set_SCTLR( __get_SCTLR() | (1 << 11));
+ __ISB();
+}
+
+/** \brief Enable MMU
+
+ Enable MMU
+ */
+// from system_Renesas_RZ_A1.c
+__STATIC_INLINE void __enable_mmu(void) {
+ // Set M bit 0 to enable the MMU
+ // Set AFE bit to enable simplified access permissions model
+ // Clear TRE bit to disable TEX remap and A bit to disable strict alignment fault checking
+ __set_SCTLR( (__get_SCTLR() & ~(1 << 28) & ~(1 << 1)) | 1 | (1 << 29));
+ __ISB();
+}
+
+/******************************** TLB maintenance operations ************************************************/
+/** \brief Invalidate the whole tlb
+
+ TLBIALL. Invalidate the whole tlb
+ */
+// from system_Renesas_RZ_A1.c
+__STATIC_INLINE void __ca9u_inv_tlb_all(void) {
+ uint32_t val = 0;
+ __MCR(15, 0, val, 8, 7, 0); // reg to cp15
+ __MCR(15, 0, val, 8, 6, 0); // reg to cp15
+ __MCR(15, 0, val, 8, 5, 0); // reg to cp15
+ __DSB();
+ __ISB();
+}
+
+/******************************** BTB maintenance operations ************************************************/
+/** \brief Invalidate entire branch predictor array
+
+ BPIALL. Branch Predictor Invalidate All.
+ */
+// from system_Renesas_RZ_A1.c
+__STATIC_INLINE void __v7_inv_btac(void) {
+ uint32_t val = 0;
+ __MCR(15, 0, val, 7, 5, 6); // reg to cp15
+ __DSB(); //ensure completion of the invalidation
+ __ISB(); //ensure instruction fetch path sees new state
+}
+
+
+/******************************** L1 cache operations ******************************************************/
+
+/** \brief Invalidate the whole I$
+
+ ICIALLU. Instruction Cache Invalidate All to PoU
+ */
+// from system_Renesas_RZ_A1.c
+__STATIC_INLINE void __v7_inv_icache_all(void) {
+ uint32_t val = 0;
+ __MCR(15, 0, val, 7, 5, 0); // reg to cp15
+ __DSB(); //ensure completion of the invalidation
+ __ISB(); //ensure instruction fetch path sees new I cache state
+}
+
+// from __v7_inv_dcache_all()
+__arm static inline void __v7_all_cache(uint32_t op) {
+__asm(
+ " ARM \n"
+
+ " PUSH {R4-R11} \n"
+
+ " MRC p15, 1, R6, c0, c0, 1\n" // Read CLIDR
+ " ANDS R3, R6, #0x07000000\n" // Extract coherency level
+ " MOV R3, R3, LSR #23\n" // Total cache levels << 1
+ " BEQ Finished\n" // If 0, no need to clean
+
+ " MOV R10, #0\n" // R10 holds current cache level << 1
+ "Loop1: ADD R2, R10, R10, LSR #1\n" // R2 holds cache "Set" position
+ " MOV R1, R6, LSR R2 \n" // Bottom 3 bits are the Cache-type for this level
+ " AND R1, R1, #7 \n" // Isolate those lower 3 bits
+ " CMP R1, #2 \n"
+ " BLT Skip \n" // No cache or only instruction cache at this level
+
+ " MCR p15, 2, R10, c0, c0, 0 \n" // Write the Cache Size selection register
+ " ISB \n" // ISB to sync the change to the CacheSizeID reg
+ " MRC p15, 1, R1, c0, c0, 0 \n" // Reads current Cache Size ID register
+ " AND R2, R1, #7 \n" // Extract the line length field
+ " ADD R2, R2, #4 \n" // Add 4 for the line length offset (log2 16 bytes)
+ " movw R4, #0x3FF \n"
+ " ANDS R4, R4, R1, LSR #3 \n" // R4 is the max number on the way size (right aligned)
+ " CLZ R5, R4 \n" // R5 is the bit position of the way size increment
+ " movw R7, #0x7FFF \n"
+ " ANDS R7, R7, R1, LSR #13 \n" // R7 is the max number of the index size (right aligned)
+
+ "Loop2: MOV R9, R4 \n" // R9 working copy of the max way size (right aligned)
+
+ "Loop3: ORR R11, R10, R9, LSL R5 \n" // Factor in the Way number and cache number into R11
+ " ORR R11, R11, R7, LSL R2 \n" // Factor in the Set number
+ " CMP R0, #0 \n"
+ " BNE Dccsw \n"
+ " MCR p15, 0, R11, c7, c6, 2 \n" // DCISW. Invalidate by Set/Way
+ " B cont \n"
+ "Dccsw: CMP R0, #1 \n"
+ " BNE Dccisw \n"
+ " MCR p15, 0, R11, c7, c10, 2 \n" // DCCSW. Clean by Set/Way
+ " B cont \n"
+ "Dccisw: MCR p15, 0, R11, c7, c14, 2 \n" // DCCISW, Clean and Invalidate by Set/Way
+ "cont: SUBS R9, R9, #1 \n" // Decrement the Way number
+ " BGE Loop3 \n"
+ " SUBS R7, R7, #1 \n" // Decrement the Set number
+ " BGE Loop2 \n"
+ "Skip: ADD R10, R10, #2 \n" // increment the cache number
+ " CMP R3, R10 \n"
+ " BGT Loop1 \n"
+
+ "Finished: \n"
+ " DSB \n"
+ " POP {R4-R11} \n"
+ " BX lr \n" );
+}
+
+/** \brief Invalidate the whole D$
+
+ DCISW. Invalidate by Set/Way
+ */
+// from system_Renesas_RZ_A1.c
+__STATIC_INLINE void __v7_inv_dcache_all(void) {
+ __v7_all_cache(0);
+}
+/** \brief Clean the whole D$
+
+ DCCSW. Clean by Set/Way
+ */
+
+__STATIC_INLINE void __v7_clean_dcache_all(void) {
+ __v7_all_cache(1);
+}
+
+/** \brief Clean and invalidate the whole D$
+
+ DCCISW. Clean and Invalidate by Set/Way
+ */
+
+__STATIC_INLINE void __v7_clean_inv_dcache_all(void) {
+ __v7_all_cache(2);
+}
+/** \brief Clean and Invalidate D$ by MVA
+
+ DCCIMVAC. Data cache clean and invalidate by MVA to PoC
+ */
+__STATIC_INLINE void __v7_clean_inv_dcache_mva(void *va) {
+ __MCR(15, 0, (uint32_t)va, 7, 14, 1);
+ __DMB();
+}
+
+#include "core_ca_mmu.h"
+
+#elif (defined (__GNUC__)) /*------------------ GNU Compiler ---------------------*/
+/* GNU gcc specific functions */
+
+#define MODE_USR 0x10
+#define MODE_FIQ 0x11
+#define MODE_IRQ 0x12
+#define MODE_SVC 0x13
+#define MODE_MON 0x16
+#define MODE_ABT 0x17
+#define MODE_HYP 0x1A
+#define MODE_UND 0x1B
+#define MODE_SYS 0x1F
+
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_irq(void)
+{
+ __ASM volatile ("cpsie i");
+}
+
+/** \brief Disable IRQ Interrupts
+
+ This function disables IRQ interrupts by setting the I-bit in the CPSR.
+ Can only be executed in Privileged modes.
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __disable_irq(void)
+{
+ uint32_t result;
+
+ __ASM volatile ("mrs %0, cpsr" : "=r" (result));
+ __ASM volatile ("cpsid i");
+ return(result & 0x80);
+}
+
+
+/** \brief Get APSR Register
+
+ This function returns the content of the APSR Register.
+
+ \return APSR Register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_APSR(void)
+{
+#if 1
+ register uint32_t __regAPSR;
+ __ASM volatile ("mrs %0, apsr" : "=r" (__regAPSR) );
+#else
+ register uint32_t __regAPSR __ASM("apsr");
+#endif
+ return(__regAPSR);
+}
+
+
+/** \brief Get CPSR Register
+
+ This function returns the content of the CPSR Register.
+
+ \return CPSR Register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CPSR(void)
+{
+#if 1
+ register uint32_t __regCPSR;
+ __ASM volatile ("mrs %0, cpsr" : "=r" (__regCPSR));
+#else
+ register uint32_t __regCPSR __ASM("cpsr");
+#endif
+ return(__regCPSR);
+}
+
+#if 0
+/** \brief Set Stack Pointer
+
+ This function assigns the given value to the current stack pointer.
+
+ \param [in] topOfStack Stack Pointer value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_SP(uint32_t topOfStack)
+{
+ register uint32_t __regSP __ASM("sp");
+ __regSP = topOfStack;
+}
+#endif
+
+/** \brief Get link register
+
+ This function returns the value of the link register
+
+ \return Value of link register
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_LR(void)
+{
+ register uint32_t __reglr __ASM("lr");
+ return(__reglr);
+}
+
+#if 0
+/** \brief Set link register
+
+ This function sets the value of the link register
+
+ \param [in] lr LR value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_LR(uint32_t lr)
+{
+ register uint32_t __reglr __ASM("lr");
+ __reglr = lr;
+}
+#endif
+
+/** \brief Set Process Stack Pointer
+
+ This function assigns the given value to the USR/SYS Stack Pointer (PSP).
+
+ \param [in] topOfProcStack USR/SYS Stack Pointer value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)
+{
+ __asm__ volatile (
+ ".ARM;"
+ ".eabi_attribute Tag_ABI_align8_preserved,1;"
+
+ "BIC R0, R0, #7;" /* ;ensure stack is 8-byte aligned */
+ "MRS R1, CPSR;"
+ "CPS %0;" /* ;no effect in USR mode */
+ "MOV SP, R0;"
+ "MSR CPSR_c, R1;" /* ;no effect in USR mode */
+ "ISB;"
+ //"BX LR;"
+ :
+ : "i"(MODE_SYS)
+ : "r0", "r1");
+ return;
+}
+
+/** \brief Set User Mode
+
+ This function changes the processor state to User Mode
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_CPS_USR(void)
+{
+ __asm__ volatile (
+ ".ARM;"
+
+ "CPS %0;"
+ //"BX LR;"
+ :
+ : "i"(MODE_USR)
+ : );
+ return;
+}
+
+
+/** \brief Enable FIQ
+
+ This function enables FIQ interrupts by clearing the F-bit in the CPSR.
+ Can only be executed in Privileged modes.
+ */
+#define __enable_fault_irq() __asm__ volatile ("cpsie f")
+
+
+/** \brief Disable FIQ
+
+ This function disables FIQ interrupts by setting the F-bit in the CPSR.
+ Can only be executed in Privileged modes.
+ */
+#define __disable_fault_irq() __asm__ volatile ("cpsid f")
+
+
+/** \brief Get FPSCR
+
+ This function returns the current value of the Floating Point Status/Control register.
+
+ \return Floating Point Status/Control register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPSCR(void)
+{
+#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
+#if 1
+ uint32_t result;
+
+ __ASM volatile ("vmrs %0, fpscr" : "=r" (result) );
+ return (result);
+#else
+ register uint32_t __regfpscr __ASM("fpscr");
+ return(__regfpscr);
+#endif
+#else
+ return(0);
+#endif
+}
+
+
+/** \brief Set FPSCR
+
+ This function assigns the given value to the Floating Point Status/Control register.
+
+ \param [in] fpscr Floating Point Status/Control value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
+{
+#if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
+#if 1
+ __ASM volatile ("vmsr fpscr, %0" : : "r" (fpscr) );
+#else
+ register uint32_t __regfpscr __ASM("fpscr");
+ __regfpscr = (fpscr);
+#endif
+#endif
+}
+
+/** \brief Get FPEXC
+
+ This function returns the current value of the Floating Point Exception Control register.
+
+ \return Floating Point Exception Control register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPEXC(void)
+{
+#if (__FPU_PRESENT == 1)
+#if 1
+ uint32_t result;
+
+ __ASM volatile ("vmrs %0, fpexc" : "=r" (result));
+ return (result);
+#else
+ register uint32_t __regfpexc __ASM("fpexc");
+ return(__regfpexc);
+#endif
+#else
+ return(0);
+#endif
+}
+
+
+/** \brief Set FPEXC
+
+ This function assigns the given value to the Floating Point Exception Control register.
+
+ \param [in] fpscr Floating Point Exception Control value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPEXC(uint32_t fpexc)
+{
+#if (__FPU_PRESENT == 1)
+#if 1
+ __ASM volatile ("vmsr fpexc, %0" : : "r" (fpexc));
+#else
+ register uint32_t __regfpexc __ASM("fpexc");
+ __regfpexc = (fpexc);
+#endif
+#endif
+}
+
+/** \brief Get CPACR
+
+ This function returns the current value of the Coprocessor Access Control register.
+
+ \return Coprocessor Access Control register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CPACR(void)
+{
+#if 1
+ register uint32_t __regCPACR;
+ __ASM volatile ("mrc p15, 0, %0, c1, c0, 2" : "=r" (__regCPACR));
+#else
+ register uint32_t __regCPACR __ASM("cp15:0:c1:c0:2");
+#endif
+ return __regCPACR;
+}
+
+/** \brief Set CPACR
+
+ This function assigns the given value to the Coprocessor Access Control register.
+
+ \param [in] cpacr Coprocessor Acccess Control value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_CPACR(uint32_t cpacr)
+{
+#if 1
+ __ASM volatile ("mcr p15, 0, %0, c1, c0, 2" : : "r" (cpacr));
+#else
+ register uint32_t __regCPACR __ASM("cp15:0:c1:c0:2");
+ __regCPACR = cpacr;
+#endif
+ __ISB();
+}
+
+/** \brief Get CBAR
+
+ This function returns the value of the Configuration Base Address register.
+
+ \return Configuration Base Address register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CBAR() {
+#if 1
+ register uint32_t __regCBAR;
+ __ASM volatile ("mrc p15, 4, %0, c15, c0, 0" : "=r" (__regCBAR));
+#else
+ register uint32_t __regCBAR __ASM("cp15:4:c15:c0:0");
+#endif
+ return(__regCBAR);
+}
+
+/** \brief Get TTBR0
+
+ This function returns the value of the Translation Table Base Register 0.
+
+ \return Translation Table Base Register 0 value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_TTBR0() {
+#if 1
+ register uint32_t __regTTBR0;
+ __ASM volatile ("mrc p15, 0, %0, c2, c0, 0" : "=r" (__regTTBR0));
+#else
+ register uint32_t __regTTBR0 __ASM("cp15:0:c2:c0:0");
+#endif
+ return(__regTTBR0);
+}
+
+/** \brief Set TTBR0
+
+ This function assigns the given value to the Translation Table Base Register 0.
+
+ \param [in] ttbr0 Translation Table Base Register 0 value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_TTBR0(uint32_t ttbr0) {
+#if 1
+ __ASM volatile ("mcr p15, 0, %0, c2, c0, 0" : : "r" (ttbr0));
+#else
+ register uint32_t __regTTBR0 __ASM("cp15:0:c2:c0:0");
+ __regTTBR0 = ttbr0;
+#endif
+ __ISB();
+}
+
+/** \brief Get DACR
+
+ This function returns the value of the Domain Access Control Register.
+
+ \return Domain Access Control Register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_DACR() {
+#if 1
+ register uint32_t __regDACR;
+ __ASM volatile ("mrc p15, 0, %0, c3, c0, 0" : "=r" (__regDACR));
+#else
+ register uint32_t __regDACR __ASM("cp15:0:c3:c0:0");
+#endif
+ return(__regDACR);
+}
+
+/** \brief Set DACR
+
+ This function assigns the given value to the Domain Access Control Register.
+
+ \param [in] dacr Domain Access Control Register value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_DACR(uint32_t dacr) {
+#if 1
+ __ASM volatile ("mcr p15, 0, %0, c3, c0, 0" : : "r" (dacr));
+#else
+ register uint32_t __regDACR __ASM("cp15:0:c3:c0:0");
+ __regDACR = dacr;
+#endif
+ __ISB();
+}
+
+/******************************** Cache and BTAC enable ****************************************************/
+
+/** \brief Set SCTLR
+
+ This function assigns the given value to the System Control Register.
+
+ \param [in] sctlr System Control Register value to set
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __set_SCTLR(uint32_t sctlr)
+{
+#if 1
+ __ASM volatile ("mcr p15, 0, %0, c1, c0, 0" : : "r" (sctlr));
+#else
+ register uint32_t __regSCTLR __ASM("cp15:0:c1:c0:0");
+ __regSCTLR = sctlr;
+#endif
+}
+
+/** \brief Get SCTLR
+
+ This function returns the value of the System Control Register.
+
+ \return System Control Register value
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_SCTLR() {
+#if 1
+ register uint32_t __regSCTLR;
+ __ASM volatile ("mrc p15, 0, %0, c1, c0, 0" : "=r" (__regSCTLR));
+#else
+ register uint32_t __regSCTLR __ASM("cp15:0:c1:c0:0");
+#endif
+ return(__regSCTLR);
+}
+
+/** \brief Enable Caches
+
+ Enable Caches
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_caches(void) {
+ // Set I bit 12 to enable I Cache
+ // Set C bit 2 to enable D Cache
+ __set_SCTLR( __get_SCTLR() | (1 << 12) | (1 << 2));
+}
+
+/** \brief Disable Caches
+
+ Disable Caches
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_caches(void) {
+ // Clear I bit 12 to disable I Cache
+ // Clear C bit 2 to disable D Cache
+ __set_SCTLR( __get_SCTLR() & ~(1 << 12) & ~(1 << 2));
+ __ISB();
+}
+
+/** \brief Enable BTAC
+
+ Enable BTAC
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_btac(void) {
+ // Set Z bit 11 to enable branch prediction
+ __set_SCTLR( __get_SCTLR() | (1 << 11));
+ __ISB();
+}
+
+/** \brief Disable BTAC
+
+ Disable BTAC
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_btac(void) {
+ // Clear Z bit 11 to disable branch prediction
+ __set_SCTLR( __get_SCTLR() & ~(1 << 11));
+}
+
+
+/** \brief Enable MMU
+
+ Enable MMU
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_mmu(void) {
+ // Set M bit 0 to enable the MMU
+ // Set AFE bit to enable simplified access permissions model
+ // Clear TRE bit to disable TEX remap and A bit to disable strict alignment fault checking
+ __set_SCTLR( (__get_SCTLR() & ~(1 << 28) & ~(1 << 1)) | 1 | (1 << 29));
+ __ISB();
+}
+
+/** \brief Disable MMU
+
+ Disable MMU
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_mmu(void) {
+ // Clear M bit 0 to disable the MMU
+ __set_SCTLR( __get_SCTLR() & ~1);
+ __ISB();
+}
+
+/******************************** TLB maintenance operations ************************************************/
+/** \brief Invalidate the whole tlb
+
+ TLBIALL. Invalidate the whole tlb
+ */
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __ca9u_inv_tlb_all(void) {
+#if 1
+ __ASM volatile ("mcr p15, 0, %0, c8, c7, 0" : : "r" (0));
+#else
+ register uint32_t __TLBIALL __ASM("cp15:0:c8:c7:0");
+ __TLBIALL = 0;
+#endif
+ __DSB();
+ __ISB();
+}
+
+/******************************** BTB maintenance operations ************************************************/
+/** \brief Invalidate entire branch predictor array
+
+ BPIALL. Branch Predictor Invalidate All.
+ */
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_inv_btac(void) {
+#if 1
+ __ASM volatile ("mcr p15, 0, %0, c7, c5, 6" : : "r" (0));
+#else
+ register uint32_t __BPIALL __ASM("cp15:0:c7:c5:6");
+ __BPIALL = 0;
+#endif
+ __DSB(); //ensure completion of the invalidation
+ __ISB(); //ensure instruction fetch path sees new state
+}
+
+
+/******************************** L1 cache operations ******************************************************/
+
+/** \brief Invalidate the whole I$
+
+ ICIALLU. Instruction Cache Invalidate All to PoU
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_inv_icache_all(void) {
+#if 1
+ __ASM volatile ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
+#else
+ register uint32_t __ICIALLU __ASM("cp15:0:c7:c5:0");
+ __ICIALLU = 0;
+#endif
+ __DSB(); //ensure completion of the invalidation
+ __ISB(); //ensure instruction fetch path sees new I cache state
+}
+
+/** \brief Clean D$ by MVA
+
+ DCCMVAC. Data cache clean by MVA to PoC
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_clean_dcache_mva(void *va) {
+#if 1
+ __ASM volatile ("mcr p15, 0, %0, c7, c10, 1" : : "r" ((uint32_t)va));
+#else
+ register uint32_t __DCCMVAC __ASM("cp15:0:c7:c10:1");
+ __DCCMVAC = (uint32_t)va;
+#endif
+ __DMB(); //ensure the ordering of data cache maintenance operations and their effects
+}
+
+/** \brief Invalidate D$ by MVA
+
+ DCIMVAC. Data cache invalidate by MVA to PoC
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_inv_dcache_mva(void *va) {
+#if 1
+ __ASM volatile ("mcr p15, 0, %0, c7, c6, 1" : : "r" ((uint32_t)va));
+#else
+ register uint32_t __DCIMVAC __ASM("cp15:0:c7:c6:1");
+ __DCIMVAC = (uint32_t)va;
+#endif
+ __DMB(); //ensure the ordering of data cache maintenance operations and their effects
+}
+
+/** \brief Clean and Invalidate D$ by MVA
+
+ DCCIMVAC. Data cache clean and invalidate by MVA to PoC
+ */
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_clean_inv_dcache_mva(void *va) {
+#if 1
+ __ASM volatile ("mcr p15, 0, %0, c7, c14, 1" : : "r" ((uint32_t)va));
+#else
+ register uint32_t __DCCIMVAC __ASM("cp15:0:c7:c14:1");
+ __DCCIMVAC = (uint32_t)va;
+#endif
+ __DMB(); //ensure the ordering of data cache maintenance operations and their effects
+}
+
+/** \brief Clean and Invalidate the entire data or unified cache
+
+ Generic mechanism for cleaning/invalidating the entire data or unified cache to the point of coherency.
+ */
+extern void __v7_all_cache(uint32_t op);
+
+
+/** \brief Invalidate the whole D$
+
+ DCISW. Invalidate by Set/Way
+ */
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_inv_dcache_all(void) {
+ __v7_all_cache(0);
+}
+
+/** \brief Clean the whole D$
+
+ DCCSW. Clean by Set/Way
+ */
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_clean_dcache_all(void) {
+ __v7_all_cache(1);
+}
+
+/** \brief Clean and invalidate the whole D$
+
+ DCCISW. Clean and Invalidate by Set/Way
+ */
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE void __v7_clean_inv_dcache_all(void) {
+ __v7_all_cache(2);
+}
+
+#include "core_ca_mmu.h"
+
+#elif (defined (__TASKING__)) /*--------------- TASKING Compiler -----------------*/
+
+#error TASKING Compiler support not implemented for Cortex-A
+
+#endif
+
+/*@} end of CMSIS_Core_RegAccFunctions */
+
+
+#endif /* __CORE_CAFUNC_H__ */
diff --git a/cmsis/TARGET_CORTEX_A/core_caInstr.h b/cmsis/TARGET_CORTEX_A/core_caInstr.h
new file mode 100644
index 0000000000..b1d3435765
--- /dev/null
+++ b/cmsis/TARGET_CORTEX_A/core_caInstr.h
@@ -0,0 +1,45 @@
+/**************************************************************************//**
+ * @file core_caInstr.h
+ * @brief CMSIS Cortex-A9 Core Peripheral Access Layer Header File
+ * @version
+ * @date 04. December 2012
+ *
+ * @note
+ *
+ ******************************************************************************/
+/* Copyright (c) 2009 - 2012 ARM LIMITED
+
+ All rights reserved.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ - Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ - Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ - Neither the name of ARM nor the names of its contributors may be used
+ to endorse or promote products derived from this software without
+ specific prior written permission.
+ *
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+ ---------------------------------------------------------------------------*/
+
+#ifndef __CORE_CAINSTR_H__
+#define __CORE_CAINSTR_H__
+
+#define __CORTEX_M 0x3
+#include "core_cmInstr.h"
+#undef __CORTEX_M
+
+#endif
+
diff --git a/cmsis/TARGET_CORTEX_A/core_ca_mmu.h b/cmsis/TARGET_CORTEX_A/core_ca_mmu.h
new file mode 100644
index 0000000000..189b073dbc
--- /dev/null
+++ b/cmsis/TARGET_CORTEX_A/core_ca_mmu.h
@@ -0,0 +1,847 @@
+;/**************************************************************************//**
+; * @file core_ca_mmu.h
+; * @brief MMU Startup File for A9_MP Device Series
+; * @version V1.01
+; * @date 10 Sept 2014
+; *
+; * @note
+; *
+; ******************************************************************************/
+;/* Copyright (c) 2012-2014 ARM LIMITED
+;
+; All rights reserved.
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+; - Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; - Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in the
+; documentation and/or other materials provided with the distribution.
+; - Neither the name of ARM nor the names of its contributors may be used
+; to endorse or promote products derived from this software without
+; specific prior written permission.
+; *
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+; ---------------------------------------------------------------------------*/
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#ifndef _MMU_FUNC_H
+#define _MMU_FUNC_H
+
+#define SECTION_DESCRIPTOR (0x2)
+#define SECTION_MASK (0xFFFFFFFC)
+
+#define SECTION_TEXCB_MASK (0xFFFF8FF3)
+#define SECTION_B_SHIFT (2)
+#define SECTION_C_SHIFT (3)
+#define SECTION_TEX0_SHIFT (12)
+#define SECTION_TEX1_SHIFT (13)
+#define SECTION_TEX2_SHIFT (14)
+
+#define SECTION_XN_MASK (0xFFFFFFEF)
+#define SECTION_XN_SHIFT (4)
+
+#define SECTION_DOMAIN_MASK (0xFFFFFE1F)
+#define SECTION_DOMAIN_SHIFT (5)
+
+#define SECTION_P_MASK (0xFFFFFDFF)
+#define SECTION_P_SHIFT (9)
+
+#define SECTION_AP_MASK (0xFFFF73FF)
+#define SECTION_AP_SHIFT (10)
+#define SECTION_AP2_SHIFT (15)
+
+#define SECTION_S_MASK (0xFFFEFFFF)
+#define SECTION_S_SHIFT (16)
+
+#define SECTION_NG_MASK (0xFFFDFFFF)
+#define SECTION_NG_SHIFT (17)
+
+#define SECTION_NS_MASK (0xFFF7FFFF)
+#define SECTION_NS_SHIFT (19)
+
+
+#define PAGE_L1_DESCRIPTOR (0x1)
+#define PAGE_L1_MASK (0xFFFFFFFC)
+
+#define PAGE_L2_4K_DESC (0x2)
+#define PAGE_L2_4K_MASK (0xFFFFFFFD)
+
+#define PAGE_L2_64K_DESC (0x1)
+#define PAGE_L2_64K_MASK (0xFFFFFFFC)
+
+#define PAGE_4K_TEXCB_MASK (0xFFFFFE33)
+#define PAGE_4K_B_SHIFT (2)
+#define PAGE_4K_C_SHIFT (3)
+#define PAGE_4K_TEX0_SHIFT (6)
+#define PAGE_4K_TEX1_SHIFT (7)
+#define PAGE_4K_TEX2_SHIFT (8)
+
+#define PAGE_64K_TEXCB_MASK (0xFFFF8FF3)
+#define PAGE_64K_B_SHIFT (2)
+#define PAGE_64K_C_SHIFT (3)
+#define PAGE_64K_TEX0_SHIFT (12)
+#define PAGE_64K_TEX1_SHIFT (13)
+#define PAGE_64K_TEX2_SHIFT (14)
+
+#define PAGE_TEXCB_MASK (0xFFFF8FF3)
+#define PAGE_B_SHIFT (2)
+#define PAGE_C_SHIFT (3)
+#define PAGE_TEX_SHIFT (12)
+
+#define PAGE_XN_4K_MASK (0xFFFFFFFE)
+#define PAGE_XN_4K_SHIFT (0)
+#define PAGE_XN_64K_MASK (0xFFFF7FFF)
+#define PAGE_XN_64K_SHIFT (15)
+
+
+#define PAGE_DOMAIN_MASK (0xFFFFFE1F)
+#define PAGE_DOMAIN_SHIFT (5)
+
+#define PAGE_P_MASK (0xFFFFFDFF)
+#define PAGE_P_SHIFT (9)
+
+#define PAGE_AP_MASK (0xFFFFFDCF)
+#define PAGE_AP_SHIFT (4)
+#define PAGE_AP2_SHIFT (9)
+
+#define PAGE_S_MASK (0xFFFFFBFF)
+#define PAGE_S_SHIFT (10)
+
+#define PAGE_NG_MASK (0xFFFFF7FF)
+#define PAGE_NG_SHIFT (11)
+
+#define PAGE_NS_MASK (0xFFFFFFF7)
+#define PAGE_NS_SHIFT (3)
+
+#define OFFSET_1M (0x00100000)
+#define OFFSET_64K (0x00010000)
+#define OFFSET_4K (0x00001000)
+
+#define DESCRIPTOR_FAULT (0x00000000)
+
+/* ########################### MMU Function Access ########################### */
+/** \ingroup MMU_FunctionInterface
+ \defgroup MMU_Functions MMU Functions Interface
+ @{
+ */
+
+/* Attributes enumerations */
+
+/* Region size attributes */
+typedef enum
+{
+ SECTION,
+ PAGE_4k,
+ PAGE_64k,
+} mmu_region_size_Type;
+
+/* Region type attributes */
+typedef enum
+{
+ NORMAL,
+ DEVICE,
+ SHARED_DEVICE,
+ NON_SHARED_DEVICE,
+ STRONGLY_ORDERED
+} mmu_memory_Type;
+
+/* Region cacheability attributes */
+typedef enum
+{
+ NON_CACHEABLE,
+ WB_WA,
+ WT,
+ WB_NO_WA,
+} mmu_cacheability_Type;
+
+/* Region parity check attributes */
+typedef enum
+{
+ ECC_DISABLED,
+ ECC_ENABLED,
+} mmu_ecc_check_Type;
+
+/* Region execution attributes */
+typedef enum
+{
+ EXECUTE,
+ NON_EXECUTE,
+} mmu_execute_Type;
+
+/* Region global attributes */
+typedef enum
+{
+ GLOBAL,
+ NON_GLOBAL,
+} mmu_global_Type;
+
+/* Region shareability attributes */
+typedef enum
+{
+ NON_SHARED,
+ SHARED,
+} mmu_shared_Type;
+
+/* Region security attributes */
+typedef enum
+{
+ SECURE,
+ NON_SECURE,
+} mmu_secure_Type;
+
+/* Region access attributes */
+typedef enum
+{
+ NO_ACCESS,
+ RW,
+ READ,
+} mmu_access_Type;
+
+/* Memory Region definition */
+typedef struct RegionStruct {
+ mmu_region_size_Type rg_t;
+ mmu_memory_Type mem_t;
+ uint8_t domain;
+ mmu_cacheability_Type inner_norm_t;
+ mmu_cacheability_Type outer_norm_t;
+ mmu_ecc_check_Type e_t;
+ mmu_execute_Type xn_t;
+ mmu_global_Type g_t;
+ mmu_secure_Type sec_t;
+ mmu_access_Type priv_t;
+ mmu_access_Type user_t;
+ mmu_shared_Type sh_t;
+
+} mmu_region_attributes_Type;
+
+/** \brief Set section execution-never attribute
+
+ The function sets section execution-never attribute
+
+ \param [out] descriptor_l1 L1 descriptor.
+ \param [in] xn Section execution-never attribute : EXECUTE , NON_EXECUTE.
+
+ \return 0
+ */
+__STATIC_INLINE int __xn_section(uint32_t *descriptor_l1, mmu_execute_Type xn)
+{
+ *descriptor_l1 &= SECTION_XN_MASK;
+ *descriptor_l1 |= ((xn & 0x1) << SECTION_XN_SHIFT);
+ return 0;
+}
+
+/** \brief Set section domain
+
+ The function sets section domain
+
+ \param [out] descriptor_l1 L1 descriptor.
+ \param [in] domain Section domain
+
+ \return 0
+ */
+__STATIC_INLINE int __domain_section(uint32_t *descriptor_l1, uint8_t domain)
+{
+ *descriptor_l1 &= SECTION_DOMAIN_MASK;
+ *descriptor_l1 |= ((domain & 0xF) << SECTION_DOMAIN_SHIFT);
+ return 0;
+}
+
+/** \brief Set section parity check
+
+ The function sets section parity check
+
+ \param [out] descriptor_l1 L1 descriptor.
+ \param [in] p_bit Parity check: ECC_DISABLED, ECC_ENABLED
+
+ \return 0
+ */
+__STATIC_INLINE int __p_section(uint32_t *descriptor_l1, mmu_ecc_check_Type p_bit)
+{
+ *descriptor_l1 &= SECTION_P_MASK;
+ *descriptor_l1 |= ((p_bit & 0x1) << SECTION_P_SHIFT);
+ return 0;
+}
+
+/** \brief Set section access privileges
+
+ The function sets section access privileges
+
+ \param [out] descriptor_l1 L1 descriptor.
+ \param [in] user User Level Access: NO_ACCESS, RW, READ
+ \param [in] priv Privilege Level Access: NO_ACCESS, RW, READ
+ \param [in] afe Access flag enable
+
+ \return 0
+ */
+__STATIC_INLINE int __ap_section(uint32_t *descriptor_l1, mmu_access_Type user, mmu_access_Type priv, uint32_t afe)
+{
+ uint32_t ap = 0;
+
+ if (afe == 0) { //full access
+ if ((priv == NO_ACCESS) && (user == NO_ACCESS)) { ap = 0x0; }
+ else if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; }
+ else if ((priv == RW) && (user == READ)) { ap = 0x2; }
+ else if ((priv == RW) && (user == RW)) { ap = 0x3; }
+ else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
+ else if ((priv == READ) && (user == READ)) { ap = 0x7; }
+ }
+
+ else { //Simplified access
+ if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; }
+ else if ((priv == RW) && (user == RW)) { ap = 0x3; }
+ else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
+ else if ((priv == READ) && (user == READ)) { ap = 0x7; }
+ }
+
+ *descriptor_l1 &= SECTION_AP_MASK;
+ *descriptor_l1 |= (ap & 0x3) << SECTION_AP_SHIFT;
+ *descriptor_l1 |= ((ap & 0x4)>>2) << SECTION_AP2_SHIFT;
+
+ return 0;
+}
+
+/** \brief Set section shareability
+
+ The function sets section shareability
+
+ \param [out] descriptor_l1 L1 descriptor.
+ \param [in] s_bit Section shareability: NON_SHARED, SHARED
+
+ \return 0
+ */
+__STATIC_INLINE int __shared_section(uint32_t *descriptor_l1, mmu_shared_Type s_bit)
+{
+ *descriptor_l1 &= SECTION_S_MASK;
+ *descriptor_l1 |= ((s_bit & 0x1) << SECTION_S_SHIFT);
+ return 0;
+}
+
+/** \brief Set section Global attribute
+
+ The function sets section Global attribute
+
+ \param [out] descriptor_l1 L1 descriptor.
+ \param [in] g_bit Section attribute: GLOBAL, NON_GLOBAL
+
+ \return 0
+ */
+__STATIC_INLINE int __global_section(uint32_t *descriptor_l1, mmu_global_Type g_bit)
+{
+ *descriptor_l1 &= SECTION_NG_MASK;
+ *descriptor_l1 |= ((g_bit & 0x1) << SECTION_NG_SHIFT);
+ return 0;
+}
+
+/** \brief Set section Security attribute
+
+ The function sets section Global attribute
+
+ \param [out] descriptor_l1 L1 descriptor.
+ \param [in] s_bit Section Security attribute: SECURE, NON_SECURE
+
+ \return 0
+ */
+__STATIC_INLINE int __secure_section(uint32_t *descriptor_l1, mmu_secure_Type s_bit)
+{
+ *descriptor_l1 &= SECTION_NS_MASK;
+ *descriptor_l1 |= ((s_bit & 0x1) << SECTION_NS_SHIFT);
+ return 0;
+}
+
+/* Page 4k or 64k */
+/** \brief Set 4k/64k page execution-never attribute
+
+ The function sets 4k/64k page execution-never attribute
+
+ \param [out] descriptor_l2 L2 descriptor.
+ \param [in] xn Page execution-never attribute : EXECUTE , NON_EXECUTE.
+ \param [in] page Page size: PAGE_4k, PAGE_64k,
+
+ \return 0
+ */
+__STATIC_INLINE int __xn_page(uint32_t *descriptor_l2, mmu_execute_Type xn, mmu_region_size_Type page)
+{
+ if (page == PAGE_4k)
+ {
+ *descriptor_l2 &= PAGE_XN_4K_MASK;
+ *descriptor_l2 |= ((xn & 0x1) << PAGE_XN_4K_SHIFT);
+ }
+ else
+ {
+ *descriptor_l2 &= PAGE_XN_64K_MASK;
+ *descriptor_l2 |= ((xn & 0x1) << PAGE_XN_64K_SHIFT);
+ }
+ return 0;
+}
+
+/** \brief Set 4k/64k page domain
+
+ The function sets 4k/64k page domain
+
+ \param [out] descriptor_l1 L1 descriptor.
+ \param [in] domain Page domain
+
+ \return 0
+ */
+__STATIC_INLINE int __domain_page(uint32_t *descriptor_l1, uint8_t domain)
+{
+ *descriptor_l1 &= PAGE_DOMAIN_MASK;
+ *descriptor_l1 |= ((domain & 0xf) << PAGE_DOMAIN_SHIFT);
+ return 0;
+}
+
+/** \brief Set 4k/64k page parity check
+
+ The function sets 4k/64k page parity check
+
+ \param [out] descriptor_l1 L1 descriptor.
+ \param [in] p_bit Parity check: ECC_DISABLED, ECC_ENABLED
+
+ \return 0
+ */
+__STATIC_INLINE int __p_page(uint32_t *descriptor_l1, mmu_ecc_check_Type p_bit)
+{
+ *descriptor_l1 &= SECTION_P_MASK;
+ *descriptor_l1 |= ((p_bit & 0x1) << SECTION_P_SHIFT);
+ return 0;
+}
+
+/** \brief Set 4k/64k page access privileges
+
+ The function sets 4k/64k page access privileges
+
+ \param [out] descriptor_l2 L2 descriptor.
+ \param [in] user User Level Access: NO_ACCESS, RW, READ
+ \param [in] priv Privilege Level Access: NO_ACCESS, RW, READ
+ \param [in] afe Access flag enable
+
+ \return 0
+ */
+__STATIC_INLINE int __ap_page(uint32_t *descriptor_l2, mmu_access_Type user, mmu_access_Type priv, uint32_t afe)
+{
+ uint32_t ap = 0;
+
+ if (afe == 0) { //full access
+ if ((priv == NO_ACCESS) && (user == NO_ACCESS)) { ap = 0x0; }
+ else if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; }
+ else if ((priv == RW) && (user == READ)) { ap = 0x2; }
+ else if ((priv == RW) && (user == RW)) { ap = 0x3; }
+ else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
+ else if ((priv == READ) && (user == READ)) { ap = 0x6; }
+ }
+
+ else { //Simplified access
+ if ((priv == RW) && (user == NO_ACCESS)) { ap = 0x1; }
+ else if ((priv == RW) && (user == RW)) { ap = 0x3; }
+ else if ((priv == READ) && (user == NO_ACCESS)) { ap = 0x5; }
+ else if ((priv == READ) && (user == READ)) { ap = 0x7; }
+ }
+
+ *descriptor_l2 &= PAGE_AP_MASK;
+ *descriptor_l2 |= (ap & 0x3) << PAGE_AP_SHIFT;
+ *descriptor_l2 |= ((ap & 0x4)>>2) << PAGE_AP2_SHIFT;
+
+ return 0;
+}
+
+/** \brief Set 4k/64k page shareability
+
+ The function sets 4k/64k page shareability
+
+ \param [out] descriptor_l2 L2 descriptor.
+ \param [in] s_bit 4k/64k page shareability: NON_SHARED, SHARED
+
+ \return 0
+ */
+__STATIC_INLINE int __shared_page(uint32_t *descriptor_l2, mmu_shared_Type s_bit)
+{
+ *descriptor_l2 &= PAGE_S_MASK;
+ *descriptor_l2 |= ((s_bit & 0x1) << PAGE_S_SHIFT);
+ return 0;
+}
+
+/** \brief Set 4k/64k page Global attribute
+
+ The function sets 4k/64k page Global attribute
+
+ \param [out] descriptor_l2 L2 descriptor.
+ \param [in] g_bit 4k/64k page attribute: GLOBAL, NON_GLOBAL
+
+ \return 0
+ */
+__STATIC_INLINE int __global_page(uint32_t *descriptor_l2, mmu_global_Type g_bit)
+{
+ *descriptor_l2 &= PAGE_NG_MASK;
+ *descriptor_l2 |= ((g_bit & 0x1) << PAGE_NG_SHIFT);
+ return 0;
+}
+
+/** \brief Set 4k/64k page Security attribute
+
+ The function sets 4k/64k page Global attribute
+
+ \param [out] descriptor_l1 L1 descriptor.
+ \param [in] s_bit 4k/64k page Security attribute: SECURE, NON_SECURE
+
+ \return 0
+ */
+__STATIC_INLINE int __secure_page(uint32_t *descriptor_l1, mmu_secure_Type s_bit)
+{
+ *descriptor_l1 &= PAGE_NS_MASK;
+ *descriptor_l1 |= ((s_bit & 0x1) << PAGE_NS_SHIFT);
+ return 0;
+}
+
+
+/** \brief Set Section memory attributes
+
+ The function sets section memory attributes
+
+ \param [out] descriptor_l1 L1 descriptor.
+ \param [in] mem Section memory type: NORMAL, DEVICE, SHARED_DEVICE, NON_SHARED_DEVICE, STRONGLY_ORDERED
+ \param [in] outer Outer cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
+ \param [in] inner Inner cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
+
+ \return 0
+ */
+__STATIC_INLINE int __memory_section(uint32_t *descriptor_l1, mmu_memory_Type mem, mmu_cacheability_Type outer, mmu_cacheability_Type inner)
+{
+ *descriptor_l1 &= SECTION_TEXCB_MASK;
+
+ if (STRONGLY_ORDERED == mem)
+ {
+ return 0;
+ }
+ else if (SHARED_DEVICE == mem)
+ {
+ *descriptor_l1 |= (1 << SECTION_B_SHIFT);
+ }
+ else if (NON_SHARED_DEVICE == mem)
+ {
+ *descriptor_l1 |= (1 << SECTION_TEX1_SHIFT);
+ }
+ else if (NORMAL == mem)
+ {
+ *descriptor_l1 |= 1 << SECTION_TEX2_SHIFT;
+ switch(inner)
+ {
+ case NON_CACHEABLE:
+ break;
+ case WB_WA:
+ *descriptor_l1 |= (1 << SECTION_B_SHIFT);
+ break;
+ case WT:
+ *descriptor_l1 |= 1 << SECTION_C_SHIFT;
+ break;
+ case WB_NO_WA:
+ *descriptor_l1 |= (1 << SECTION_B_SHIFT) | (1 << SECTION_C_SHIFT);
+ break;
+ }
+ switch(outer)
+ {
+ case NON_CACHEABLE:
+ break;
+ case WB_WA:
+ *descriptor_l1 |= (1 << SECTION_TEX0_SHIFT);
+ break;
+ case WT:
+ *descriptor_l1 |= 1 << SECTION_TEX1_SHIFT;
+ break;
+ case WB_NO_WA:
+ *descriptor_l1 |= (1 << SECTION_TEX0_SHIFT) | (1 << SECTION_TEX0_SHIFT);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/** \brief Set 4k/64k page memory attributes
+
+ The function sets 4k/64k page memory attributes
+
+ \param [out] descriptor_l2 L2 descriptor.
+ \param [in] mem 4k/64k page memory type: NORMAL, DEVICE, SHARED_DEVICE, NON_SHARED_DEVICE, STRONGLY_ORDERED
+ \param [in] outer Outer cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
+ \param [in] inner Inner cacheability: NON_CACHEABLE, WB_WA, WT, WB_NO_WA,
+
+ \return 0
+ */
+__STATIC_INLINE int __memory_page(uint32_t *descriptor_l2, mmu_memory_Type mem, mmu_cacheability_Type outer, mmu_cacheability_Type inner, mmu_region_size_Type page)
+{
+ *descriptor_l2 &= PAGE_4K_TEXCB_MASK;
+
+ if (page == PAGE_64k)
+ {
+ //same as section
+ __memory_section(descriptor_l2, mem, outer, inner);
+ }
+ else
+ {
+ if (STRONGLY_ORDERED == mem)
+ {
+ return 0;
+ }
+ else if (SHARED_DEVICE == mem)
+ {
+ *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT);
+ }
+ else if (NON_SHARED_DEVICE == mem)
+ {
+ *descriptor_l2 |= (1 << PAGE_4K_TEX1_SHIFT);
+ }
+ else if (NORMAL == mem)
+ {
+ *descriptor_l2 |= 1 << PAGE_4K_TEX2_SHIFT;
+ switch(inner)
+ {
+ case NON_CACHEABLE:
+ break;
+ case WB_WA:
+ *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT);
+ break;
+ case WT:
+ *descriptor_l2 |= 1 << PAGE_4K_C_SHIFT;
+ break;
+ case WB_NO_WA:
+ *descriptor_l2 |= (1 << PAGE_4K_B_SHIFT) | (1 << PAGE_4K_C_SHIFT);
+ break;
+ }
+ switch(outer)
+ {
+ case NON_CACHEABLE:
+ break;
+ case WB_WA:
+ *descriptor_l2 |= (1 << PAGE_4K_TEX0_SHIFT);
+ break;
+ case WT:
+ *descriptor_l2 |= 1 << PAGE_4K_TEX1_SHIFT;
+ break;
+ case WB_NO_WA:
+ *descriptor_l2 |= (1 << PAGE_4K_TEX0_SHIFT) | (1 << PAGE_4K_TEX0_SHIFT);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/** \brief Create a L1 section descriptor
+
+ The function creates a section descriptor.
+
+ Assumptions:
+ - 16MB super sections not supported
+ - TEX remap disabled, so memory type and attributes are described directly by bits in the descriptor
+ - Functions always return 0
+
+ \param [out] descriptor L1 descriptor
+ \param [out] descriptor2 L2 descriptor
+ \param [in] reg Section attributes
+
+ \return 0
+ */
+__STATIC_INLINE int __get_section_descriptor(uint32_t *descriptor, mmu_region_attributes_Type reg)
+{
+ *descriptor = 0;
+
+ __memory_section(descriptor, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t);
+ __xn_section(descriptor,reg.xn_t);
+ __domain_section(descriptor, reg.domain);
+ __p_section(descriptor, reg.e_t);
+ __ap_section(descriptor, reg.priv_t, reg.user_t, 1);
+ __shared_section(descriptor,reg.sh_t);
+ __global_section(descriptor,reg.g_t);
+ __secure_section(descriptor,reg.sec_t);
+ *descriptor &= SECTION_MASK;
+ *descriptor |= SECTION_DESCRIPTOR;
+
+ return 0;
+
+}
+
+
+/** \brief Create a L1 and L2 4k/64k page descriptor
+
+ The function creates a 4k/64k page descriptor.
+ Assumptions:
+ - TEX remap disabled, so memory type and attributes are described directly by bits in the descriptor
+ - Functions always return 0
+
+ \param [out] descriptor L1 descriptor
+ \param [out] descriptor2 L2 descriptor
+ \param [in] reg 4k/64k page attributes
+
+ \return 0
+ */
+__STATIC_INLINE int __get_page_descriptor(uint32_t *descriptor, uint32_t *descriptor2, mmu_region_attributes_Type reg)
+{
+ *descriptor = 0;
+ *descriptor2 = 0;
+
+ switch (reg.rg_t)
+ {
+ case PAGE_4k:
+ __memory_page(descriptor2, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t, PAGE_4k);
+ __xn_page(descriptor2, reg.xn_t, PAGE_4k);
+ __domain_page(descriptor, reg.domain);
+ __p_page(descriptor, reg.e_t);
+ __ap_page(descriptor2, reg.priv_t, reg.user_t, 1);
+ __shared_page(descriptor2,reg.sh_t);
+ __global_page(descriptor2,reg.g_t);
+ __secure_page(descriptor,reg.sec_t);
+ *descriptor &= PAGE_L1_MASK;
+ *descriptor |= PAGE_L1_DESCRIPTOR;
+ *descriptor2 &= PAGE_L2_4K_MASK;
+ *descriptor2 |= PAGE_L2_4K_DESC;
+ break;
+
+ case PAGE_64k:
+ __memory_page(descriptor2, reg.mem_t, reg.outer_norm_t, reg.inner_norm_t, PAGE_64k);
+ __xn_page(descriptor2, reg.xn_t, PAGE_64k);
+ __domain_page(descriptor, reg.domain);
+ __p_page(descriptor, reg.e_t);
+ __ap_page(descriptor2, reg.priv_t, reg.user_t, 1);
+ __shared_page(descriptor2,reg.sh_t);
+ __global_page(descriptor2,reg.g_t);
+ __secure_page(descriptor,reg.sec_t);
+ *descriptor &= PAGE_L1_MASK;
+ *descriptor |= PAGE_L1_DESCRIPTOR;
+ *descriptor2 &= PAGE_L2_64K_MASK;
+ *descriptor2 |= PAGE_L2_64K_DESC;
+ break;
+
+ case SECTION:
+ //error
+ break;
+
+ }
+
+ return 0;
+
+}
+
+/** \brief Create a 1MB Section
+
+ \param [in] ttb Translation table base address
+ \param [in] base_address Section base address
+ \param [in] count Number of sections to create
+ \param [in] descriptor_l1 L1 descriptor (region attributes)
+
+ */
+__STATIC_INLINE void __TTSection(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1)
+{
+ uint32_t offset;
+ uint32_t entry;
+ uint32_t i;
+
+ offset = base_address >> 20;
+ entry = (base_address & 0xFFF00000) | descriptor_l1;
+
+ //4 bytes aligned
+ ttb = ttb + offset;
+
+ for (i = 0; i < count; i++ )
+ {
+ //4 bytes aligned
+ *ttb++ = entry;
+ entry += OFFSET_1M;
+ }
+}
+
+/** \brief Create a 4k page entry
+
+ \param [in] ttb L1 table base address
+ \param [in] base_address 4k base address
+ \param [in] count Number of 4k pages to create
+ \param [in] descriptor_l1 L1 descriptor (region attributes)
+ \param [in] ttb_l2 L2 table base address
+ \param [in] descriptor_l2 L2 descriptor (region attributes)
+
+ */
+__STATIC_INLINE void __TTPage_4k(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1, uint32_t *ttb_l2, uint32_t descriptor_l2 )
+{
+
+ uint32_t offset, offset2;
+ uint32_t entry, entry2;
+ uint32_t i;
+
+
+ offset = base_address >> 20;
+ entry = ((int)ttb_l2 & 0xFFFFFC00) | descriptor_l1;
+
+ //4 bytes aligned
+ ttb += offset;
+ //create l1_entry
+ *ttb = entry;
+
+ offset2 = (base_address & 0xff000) >> 12;
+ ttb_l2 += offset2;
+ entry2 = (base_address & 0xFFFFF000) | descriptor_l2;
+ for (i = 0; i < count; i++ )
+ {
+ //4 bytes aligned
+ *ttb_l2++ = entry2;
+ entry2 += OFFSET_4K;
+ }
+}
+
+/** \brief Create a 64k page entry
+
+ \param [in] ttb L1 table base address
+ \param [in] base_address 64k base address
+ \param [in] count Number of 64k pages to create
+ \param [in] descriptor_l1 L1 descriptor (region attributes)
+ \param [in] ttb_l2 L2 table base address
+ \param [in] descriptor_l2 L2 descriptor (region attributes)
+
+ */
+__STATIC_INLINE void __TTPage_64k(uint32_t *ttb, uint32_t base_address, uint32_t count, uint32_t descriptor_l1, uint32_t *ttb_l2, uint32_t descriptor_l2 )
+{
+ uint32_t offset, offset2;
+ uint32_t entry, entry2;
+ uint32_t i,j;
+
+
+ offset = base_address >> 20;
+ entry = ((int)ttb_l2 & 0xFFFFFC00) | descriptor_l1;
+
+ //4 bytes aligned
+ ttb += offset;
+ //create l1_entry
+ *ttb = entry;
+
+ offset2 = (base_address & 0xff000) >> 12;
+ ttb_l2 += offset2;
+ entry2 = (base_address & 0xFFFF0000) | descriptor_l2;
+ for (i = 0; i < count; i++ )
+ {
+ //create 16 entries
+ for (j = 0; j < 16; j++)
+ //4 bytes aligned
+ *ttb_l2++ = entry2;
+ entry2 += OFFSET_64K;
+ }
+}
+
+/*@} end of MMU_Functions */
+#endif
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/cmsis/TARGET_CORTEX_A/core_cm4_simd.h b/cmsis/TARGET_CORTEX_A/core_cm4_simd.h
new file mode 100644
index 0000000000..83db95b5f1
--- /dev/null
+++ b/cmsis/TARGET_CORTEX_A/core_cm4_simd.h
@@ -0,0 +1,673 @@
+/**************************************************************************//**
+ * @file core_cm4_simd.h
+ * @brief CMSIS Cortex-M4 SIMD Header File
+ * @version V3.20
+ * @date 25. February 2013
+ *
+ * @note
+ *
+ ******************************************************************************/
+/* Copyright (c) 2009 - 2013 ARM LIMITED
+
+ All rights reserved.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ - Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ - Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ - Neither the name of ARM nor the names of its contributors may be used
+ to endorse or promote products derived from this software without
+ specific prior written permission.
+ *
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+ ---------------------------------------------------------------------------*/
+
+
+#ifdef __cplusplus
+ extern "C" {
+#endif
+
+#ifndef __CORE_CM4_SIMD_H
+#define __CORE_CM4_SIMD_H
+
+
+/*******************************************************************************
+ * Hardware Abstraction Layer
+ ******************************************************************************/
+
+
+/* ################### Compiler specific Intrinsics ########################### */
+/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
+ Access to dedicated SIMD instructions
+ @{
+*/
+
+#if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
+/* ARM armcc specific functions */
+
+/*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
+#define __SADD8 __sadd8
+#define __QADD8 __qadd8
+#define __SHADD8 __shadd8
+#define __UADD8 __uadd8
+#define __UQADD8 __uqadd8
+#define __UHADD8 __uhadd8
+#define __SSUB8 __ssub8
+#define __QSUB8 __qsub8
+#define __SHSUB8 __shsub8
+#define __USUB8 __usub8
+#define __UQSUB8 __uqsub8
+#define __UHSUB8 __uhsub8
+#define __SADD16 __sadd16
+#define __QADD16 __qadd16
+#define __SHADD16 __shadd16
+#define __UADD16 __uadd16
+#define __UQADD16 __uqadd16
+#define __UHADD16 __uhadd16
+#define __SSUB16 __ssub16
+#define __QSUB16 __qsub16
+#define __SHSUB16 __shsub16
+#define __USUB16 __usub16
+#define __UQSUB16 __uqsub16
+#define __UHSUB16 __uhsub16
+#define __SASX __sasx
+#define __QASX __qasx
+#define __SHASX __shasx
+#define __UASX __uasx
+#define __UQASX __uqasx
+#define __UHASX __uhasx
+#define __SSAX __ssax
+#define __QSAX __qsax
+#define __SHSAX __shsax
+#define __USAX __usax
+#define __UQSAX __uqsax
+#define __UHSAX __uhsax
+#define __USAD8 __usad8
+#define __USADA8 __usada8
+#define __SSAT16 __ssat16
+#define __USAT16 __usat16
+#define __UXTB16 __uxtb16
+#define __UXTAB16 __uxtab16
+#define __SXTB16 __sxtb16
+#define __SXTAB16 __sxtab16
+#define __SMUAD __smuad
+#define __SMUADX __smuadx
+#define __SMLAD __smlad
+#define __SMLADX __smladx
+#define __SMLALD __smlald
+#define __SMLALDX __smlaldx
+#define __SMUSD __smusd
+#define __SMUSDX __smusdx
+#define __SMLSD __smlsd
+#define __SMLSDX __smlsdx
+#define __SMLSLD __smlsld
+#define __SMLSLDX __smlsldx
+#define __SEL __sel
+#define __QADD __qadd
+#define __QSUB __qsub
+
+#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
+ ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
+
+#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
+ ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
+
+#define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \
+ ((int64_t)(ARG3) << 32) ) >> 32))
+
+/*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
+
+
+
+#elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
+/* IAR iccarm specific functions */
+
+/*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
+#include
+
+/*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
+
+
+
+#elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
+/* TI CCS specific functions */
+
+/*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
+#include
+
+/*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
+
+
+
+#elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
+/* GNU gcc specific functions */
+
+/*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
+{
+ uint32_t result;
+
+ __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+ return(result);
+}
+
+#define __SSAT16(ARG1,ARG2) \
+({ \
+ uint32_t __RES, __ARG1 = (ARG1); \
+ __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
+ __RES; \
+ })
+
+#define __USAT16(ARG1,ARG2) \
+({ \
+ uint32_t __RES, __ARG1 = (ARG1); \
+ __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
+ __RES; \
+ })
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
+{
+ uint32_t result;
+
+ __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
+{
+ uint32_t result;
+
+ __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
+{
+ uint32_t result;
+
+ __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
+{
+ uint32_t result;
+
+ __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+ return(result);
+}
+
+#define __SMLALD(ARG1,ARG2,ARG3) \
+({ \
+ uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
+ __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
+ (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
+ })
+
+#define __SMLALDX(ARG1,ARG2,ARG3) \
+({ \
+ uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((uint64_t)(ARG3) >> 32), __ARG3_L = (uint32_t)((uint64_t)(ARG3) & 0xFFFFFFFFUL); \
+ __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
+ (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
+ })
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
+{
+ uint32_t result;
+
+ __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
+{
+ uint32_t result;
+
+ __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
+ return(result);
+}
+
+#define __SMLSLD(ARG1,ARG2,ARG3) \
+({ \
+ uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
+ __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
+ (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
+ })
+
+#define __SMLSLDX(ARG1,ARG2,ARG3) \
+({ \
+ uint32_t __ARG1 = (ARG1), __ARG2 = (ARG2), __ARG3_H = (uint32_t)((ARG3) >> 32), __ARG3_L = (uint32_t)((ARG3) & 0xFFFFFFFFUL); \
+ __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (__ARG3_L), "=r" (__ARG3_H) : "r" (__ARG1), "r" (__ARG2), "0" (__ARG3_L), "1" (__ARG3_H) ); \
+ (uint64_t)(((uint64_t)__ARG3_H << 32) | __ARG3_L); \
+ })
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2)
+{
+ uint32_t result;
+
+ __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
+ return(result);
+}
+
+#define __PKHBT(ARG1,ARG2,ARG3) \
+({ \
+ uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
+ __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
+ __RES; \
+ })
+
+#define __PKHTB(ARG1,ARG2,ARG3) \
+({ \
+ uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
+ if (ARG3 == 0) \
+ __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
+ else \
+ __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
+ __RES; \
+ })
+
+__attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
+{
+ int32_t result;
+
+ __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
+ return(result);
+}
+
+/*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
+
+
+
+#elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
+/* TASKING carm specific functions */
+
+
+/*------ CM4 SIMD Intrinsics -----------------------------------------------------*/
+/* not yet supported */
+/*-- End CM4 SIMD Intrinsics -----------------------------------------------------*/
+
+
+#endif
+
+/*@} end of group CMSIS_SIMD_intrinsics */
+
+
+#endif /* __CORE_CM4_SIMD_H */
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/cmsis/TARGET_CORTEX_A/core_cmInstr.h b/cmsis/TARGET_CORTEX_A/core_cmInstr.h
new file mode 100644
index 0000000000..fca425c51d
--- /dev/null
+++ b/cmsis/TARGET_CORTEX_A/core_cmInstr.h
@@ -0,0 +1,916 @@
+/**************************************************************************//**
+ * @file core_cmInstr.h
+ * @brief CMSIS Cortex-M Core Instruction Access Header File
+ * @version V4.10
+ * @date 18. March 2015
+ *
+ * @note
+ *
+ ******************************************************************************/
+/* Copyright (c) 2009 - 2014 ARM LIMITED
+
+ All rights reserved.
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ - Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ - Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ - Neither the name of ARM nor the names of its contributors may be used
+ to endorse or promote products derived from this software without
+ specific prior written permission.
+ *
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+ ---------------------------------------------------------------------------*/
+
+
+#ifndef __CORE_CMINSTR_H
+#define __CORE_CMINSTR_H
+
+
+/* ########################## Core Instruction Access ######################### */
+/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
+ Access to dedicated instructions
+ @{
+*/
+
+#if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
+/* ARM armcc specific functions */
+
+#if (__ARMCC_VERSION < 400677)
+ #error "Please use ARM Compiler Toolchain V4.0.677 or later!"
+#endif
+
+
+/** \brief No Operation
+
+ No Operation does nothing. This instruction can be used for code alignment purposes.
+ */
+#define __NOP __nop
+
+
+/** \brief Wait For Interrupt
+
+ Wait For Interrupt is a hint instruction that suspends execution
+ until one of a number of events occurs.
+ */
+#define __WFI __wfi
+
+
+/** \brief Wait For Event
+
+ Wait For Event is a hint instruction that permits the processor to enter
+ a low-power state until one of a number of events occurs.
+ */
+#define __WFE __wfe
+
+
+/** \brief Send Event
+
+ Send Event is a hint instruction. It causes an event to be signaled to the CPU.
+ */
+#define __SEV __sev
+
+
+/** \brief Instruction Synchronization Barrier
+
+ Instruction Synchronization Barrier flushes the pipeline in the processor,
+ so that all instructions following the ISB are fetched from cache or
+ memory, after the instruction has been completed.
+ */
+#define __ISB() do {\
+ __schedule_barrier();\
+ __isb(0xF);\
+ __schedule_barrier();\
+ } while (0)
+
+/** \brief Data Synchronization Barrier
+
+ This function acts as a special kind of Data Memory Barrier.
+ It completes when all explicit memory accesses before this instruction complete.
+ */
+#define __DSB() do {\
+ __schedule_barrier();\
+ __dsb(0xF);\
+ __schedule_barrier();\
+ } while (0)
+
+/** \brief Data Memory Barrier
+
+ This function ensures the apparent order of the explicit memory operations before
+ and after the instruction, without ensuring their completion.
+ */
+#define __DMB() do {\
+ __schedule_barrier();\
+ __dmb(0xF);\
+ __schedule_barrier();\
+ } while (0)
+
+/** \brief Reverse byte order (32 bit)
+
+ This function reverses the byte order in integer value.
+
+ \param [in] value Value to reverse
+ \return Reversed value
+ */
+#define __REV __rev
+
+
+/** \brief Reverse byte order (16 bit)
+
+ This function reverses the byte order in two unsigned short values.
+
+ \param [in] value Value to reverse
+ \return Reversed value
+ */
+#ifndef __NO_EMBEDDED_ASM
+__attribute__((section(".rev16_text"))) __STATIC_INLINE __ASM uint32_t __REV16(uint32_t value)
+{
+ rev16 r0, r0
+ bx lr
+}
+#endif
+
+/** \brief Reverse byte order in signed short value
+
+ This function reverses the byte order in a signed short value with sign extension to integer.
+
+ \param [in] value Value to reverse
+ \return Reversed value
+ */
+#ifndef __NO_EMBEDDED_ASM
+__attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int32_t __REVSH(int32_t value)
+{
+ revsh r0, r0
+ bx lr
+}
+#endif
+
+
+/** \brief Rotate Right in unsigned value (32 bit)
+
+ This function Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
+
+ \param [in] value Value to rotate
+ \param [in] value Number of Bits to rotate
+ \return Rotated value
+ */
+#define __ROR __ror
+
+
+/** \brief Breakpoint
+
+ This function causes the processor to enter Debug state.
+ Debug tools can use this to investigate system state when the instruction at a particular address is reached.
+
+ \param [in] value is ignored by the processor.
+ If required, a debugger can use it to store additional information about the breakpoint.
+ */
+#define __BKPT(value) __breakpoint(value)
+
+
+/** \brief Reverse bit order of value
+
+ This function reverses the bit order of the given value.
+
+ \param [in] value Value to reverse
+ \return Reversed value
+ */
+#if (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300)
+ #define __RBIT __rbit
+#else
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value)
+{
+ uint32_t result;
+ int32_t s = 4 /*sizeof(v)*/ * 8 - 1; // extra shift needed at end
+
+ result = value; // r will be reversed bits of v; first get LSB of v
+ for (value >>= 1; value; value >>= 1)
+ {
+ result <<= 1;
+ result |= value & 1;
+ s--;
+ }
+ result <<= s; // shift when v's highest bits are zero
+ return(result);
+}
+#endif
+
+
+/** \brief Count leading zeros
+
+ This function counts the number of leading zeros of a data value.
+
+ \param [in] value Value to count the leading zeros
+ \return number of leading zeros in value
+ */
+#define __CLZ __clz
+
+
+#if (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300)
+
+/** \brief LDR Exclusive (8 bit)
+
+ This function executes a exclusive LDR instruction for 8 bit value.
+
+ \param [in] ptr Pointer to data
+ \return value of type uint8_t at (*ptr)
+ */
+#define __LDREXB(ptr) ((uint8_t ) __ldrex(ptr))
+
+
+/** \brief LDR Exclusive (16 bit)
+
+ This function executes a exclusive LDR instruction for 16 bit values.
+
+ \param [in] ptr Pointer to data
+ \return value of type uint16_t at (*ptr)
+ */
+#define __LDREXH(ptr) ((uint16_t) __ldrex(ptr))
+
+
+/** \brief LDR Exclusive (32 bit)
+
+ This function executes a exclusive LDR instruction for 32 bit values.
+
+ \param [in] ptr Pointer to data
+ \return value of type uint32_t at (*ptr)
+ */
+#define __LDREXW(ptr) ((uint32_t ) __ldrex(ptr))
+
+
+/** \brief STR Exclusive (8 bit)
+
+ This function executes a exclusive STR instruction for 8 bit values.
+
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ \return 0 Function succeeded
+ \return 1 Function failed
+ */
+#define __STREXB(value, ptr) __strex(value, ptr)
+
+
+/** \brief STR Exclusive (16 bit)
+
+ This function executes a exclusive STR instruction for 16 bit values.
+
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ \return 0 Function succeeded
+ \return 1 Function failed
+ */
+#define __STREXH(value, ptr) __strex(value, ptr)
+
+
+/** \brief STR Exclusive (32 bit)
+
+ This function executes a exclusive STR instruction for 32 bit values.
+
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ \return 0 Function succeeded
+ \return 1 Function failed
+ */
+#define __STREXW(value, ptr) __strex(value, ptr)
+
+
+/** \brief Remove the exclusive lock
+
+ This function removes the exclusive lock which is created by LDREX.
+
+ */
+#define __CLREX __clrex
+
+
+/** \brief Signed Saturate
+
+ This function saturates a signed value.
+
+ \param [in] value Value to be saturated
+ \param [in] sat Bit position to saturate to (1..32)
+ \return Saturated value
+ */
+#define __SSAT __ssat
+
+
+/** \brief Unsigned Saturate
+
+ This function saturates an unsigned value.
+
+ \param [in] value Value to be saturated
+ \param [in] sat Bit position to saturate to (0..31)
+ \return Saturated value
+ */
+#define __USAT __usat
+
+
+/** \brief Rotate Right with Extend (32 bit)
+
+ This function moves each bit of a bitstring right by one bit.
+ The carry input is shifted in at the left end of the bitstring.
+
+ \param [in] value Value to rotate
+ \return Rotated value
+ */
+#ifndef __NO_EMBEDDED_ASM
+__attribute__((section(".rrx_text"))) __STATIC_INLINE __ASM uint32_t __RRX(uint32_t value)
+{
+ rrx r0, r0
+ bx lr
+}
+#endif
+
+
+/** \brief LDRT Unprivileged (8 bit)
+
+ This function executes a Unprivileged LDRT instruction for 8 bit value.
+
+ \param [in] ptr Pointer to data
+ \return value of type uint8_t at (*ptr)
+ */
+#define __LDRBT(ptr) ((uint8_t ) __ldrt(ptr))
+
+
+/** \brief LDRT Unprivileged (16 bit)
+
+ This function executes a Unprivileged LDRT instruction for 16 bit values.
+
+ \param [in] ptr Pointer to data
+ \return value of type uint16_t at (*ptr)
+ */
+#define __LDRHT(ptr) ((uint16_t) __ldrt(ptr))
+
+
+/** \brief LDRT Unprivileged (32 bit)
+
+ This function executes a Unprivileged LDRT instruction for 32 bit values.
+
+ \param [in] ptr Pointer to data
+ \return value of type uint32_t at (*ptr)
+ */
+#define __LDRT(ptr) ((uint32_t ) __ldrt(ptr))
+
+
+/** \brief STRT Unprivileged (8 bit)
+
+ This function executes a Unprivileged STRT instruction for 8 bit values.
+
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ */
+#define __STRBT(value, ptr) __strt(value, ptr)
+
+
+/** \brief STRT Unprivileged (16 bit)
+
+ This function executes a Unprivileged STRT instruction for 16 bit values.
+
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ */
+#define __STRHT(value, ptr) __strt(value, ptr)
+
+
+/** \brief STRT Unprivileged (32 bit)
+
+ This function executes a Unprivileged STRT instruction for 32 bit values.
+
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ */
+#define __STRT(value, ptr) __strt(value, ptr)
+
+#endif /* (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300) */
+
+
+#elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
+/* GNU gcc specific functions */
+
+/* Define macros for porting to both thumb1 and thumb2.
+ * For thumb1, use low register (r0-r7), specified by constrant "l"
+ * Otherwise, use general registers, specified by constrant "r" */
+#if defined (__thumb__) && !defined (__thumb2__)
+#define __CMSIS_GCC_OUT_REG(r) "=l" (r)
+#define __CMSIS_GCC_USE_REG(r) "l" (r)
+#else
+#define __CMSIS_GCC_OUT_REG(r) "=r" (r)
+#define __CMSIS_GCC_USE_REG(r) "r" (r)
+#endif
+
+/** \brief No Operation
+
+ No Operation does nothing. This instruction can be used for code alignment purposes.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __NOP(void)
+{
+ __ASM volatile ("nop");
+}
+
+
+/** \brief Wait For Interrupt
+
+ Wait For Interrupt is a hint instruction that suspends execution
+ until one of a number of events occurs.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __WFI(void)
+{
+ __ASM volatile ("wfi");
+}
+
+
+/** \brief Wait For Event
+
+ Wait For Event is a hint instruction that permits the processor to enter
+ a low-power state until one of a number of events occurs.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __WFE(void)
+{
+ __ASM volatile ("wfe");
+}
+
+
+/** \brief Send Event
+
+ Send Event is a hint instruction. It causes an event to be signaled to the CPU.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __SEV(void)
+{
+ __ASM volatile ("sev");
+}
+
+
+/** \brief Instruction Synchronization Barrier
+
+ Instruction Synchronization Barrier flushes the pipeline in the processor,
+ so that all instructions following the ISB are fetched from cache or
+ memory, after the instruction has been completed.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __ISB(void)
+{
+ __ASM volatile ("isb 0xF":::"memory");
+}
+
+
+/** \brief Data Synchronization Barrier
+
+ This function acts as a special kind of Data Memory Barrier.
+ It completes when all explicit memory accesses before this instruction complete.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __DSB(void)
+{
+ __ASM volatile ("dsb 0xF":::"memory");
+}
+
+
+/** \brief Data Memory Barrier
+
+ This function ensures the apparent order of the explicit memory operations before
+ and after the instruction, without ensuring their completion.
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __DMB(void)
+{
+ __ASM volatile ("dmb 0xF":::"memory");
+}
+
+
+/** \brief Reverse byte order (32 bit)
+
+ This function reverses the byte order in integer value.
+
+ \param [in] value Value to reverse
+ \return Reversed value
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __REV(uint32_t value)
+{
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
+ return __builtin_bswap32(value);
+#else
+ uint32_t result;
+
+ __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
+ return(result);
+#endif
+}
+
+
+/** \brief Reverse byte order (16 bit)
+
+ This function reverses the byte order in two unsigned short values.
+
+ \param [in] value Value to reverse
+ \return Reversed value
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __REV16(uint32_t value)
+{
+ uint32_t result;
+
+ __ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
+ return(result);
+}
+
+
+/** \brief Reverse byte order in signed short value
+
+ This function reverses the byte order in a signed short value with sign extension to integer.
+
+ \param [in] value Value to reverse
+ \return Reversed value
+ */
+__attribute__((always_inline)) __STATIC_INLINE int32_t __REVSH(int32_t value)
+{
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+ return (short)__builtin_bswap16(value);
+#else
+ uint32_t result;
+
+ __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
+ return(result);
+#endif
+}
+
+
+/** \brief Rotate Right in unsigned value (32 bit)
+
+ This function Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
+
+ \param [in] value Value to rotate
+ \param [in] value Number of Bits to rotate
+ \return Rotated value
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
+{
+ return (op1 >> op2) | (op1 << (32 - op2));
+}
+
+
+/** \brief Breakpoint
+
+ This function causes the processor to enter Debug state.
+ Debug tools can use this to investigate system state when the instruction at a particular address is reached.
+
+ \param [in] value is ignored by the processor.
+ If required, a debugger can use it to store additional information about the breakpoint.
+ */
+#define __BKPT(value) __ASM volatile ("bkpt "#value)
+
+
+/** \brief Reverse bit order of value
+
+ This function reverses the bit order of the given value.
+
+ \param [in] value Value to reverse
+ \return Reversed value
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value)
+{
+ uint32_t result;
+
+#if (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300)
+ __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) );
+#else
+ int32_t s = 4 /*sizeof(v)*/ * 8 - 1; // extra shift needed at end
+
+ result = value; // r will be reversed bits of v; first get LSB of v
+ for (value >>= 1; value; value >>= 1)
+ {
+ result <<= 1;
+ result |= value & 1;
+ s--;
+ }
+ result <<= s; // shift when v's highest bits are zero
+#endif
+ return(result);
+}
+
+
+/** \brief Count leading zeros
+
+ This function counts the number of leading zeros of a data value.
+
+ \param [in] value Value to count the leading zeros
+ \return number of leading zeros in value
+ */
+#define __CLZ __builtin_clz
+
+
+#if (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300)
+
+/** \brief LDR Exclusive (8 bit)
+
+ This function executes a exclusive LDR instruction for 8 bit value.
+
+ \param [in] ptr Pointer to data
+ \return value of type uint8_t at (*ptr)
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint8_t __LDREXB(volatile uint8_t *addr)
+{
+ uint32_t result;
+
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+ __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
+#else
+ /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
+ accepted by assembler. So has to use following less efficient pattern.
+ */
+ __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
+#endif
+ return ((uint8_t) result); /* Add explicit type cast here */
+}
+
+
+/** \brief LDR Exclusive (16 bit)
+
+ This function executes a exclusive LDR instruction for 16 bit values.
+
+ \param [in] ptr Pointer to data
+ \return value of type uint16_t at (*ptr)
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint16_t __LDREXH(volatile uint16_t *addr)
+{
+ uint32_t result;
+
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+ __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
+#else
+ /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
+ accepted by assembler. So has to use following less efficient pattern.
+ */
+ __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
+#endif
+ return ((uint16_t) result); /* Add explicit type cast here */
+}
+
+
+/** \brief LDR Exclusive (32 bit)
+
+ This function executes a exclusive LDR instruction for 32 bit values.
+
+ \param [in] ptr Pointer to data
+ \return value of type uint32_t at (*ptr)
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __LDREXW(volatile uint32_t *addr)
+{
+ uint32_t result;
+
+ __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
+ return(result);
+}
+
+
+/** \brief STR Exclusive (8 bit)
+
+ This function executes a exclusive STR instruction for 8 bit values.
+
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ \return 0 Function succeeded
+ \return 1 Function failed
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
+{
+ uint32_t result;
+
+ __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
+ return(result);
+}
+
+
+/** \brief STR Exclusive (16 bit)
+
+ This function executes a exclusive STR instruction for 16 bit values.
+
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ \return 0 Function succeeded
+ \return 1 Function failed
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
+{
+ uint32_t result;
+
+ __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
+ return(result);
+}
+
+
+/** \brief STR Exclusive (32 bit)
+
+ This function executes a exclusive STR instruction for 32 bit values.
+
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ \return 0 Function succeeded
+ \return 1 Function failed
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
+{
+ uint32_t result;
+
+ __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
+ return(result);
+}
+
+
+/** \brief Remove the exclusive lock
+
+ This function removes the exclusive lock which is created by LDREX.
+
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __CLREX(void)
+{
+ __ASM volatile ("clrex" ::: "memory");
+}
+
+
+/** \brief Signed Saturate
+
+ This function saturates a signed value.
+
+ \param [in] value Value to be saturated
+ \param [in] sat Bit position to saturate to (1..32)
+ \return Saturated value
+ */
+#define __SSAT(ARG1,ARG2) \
+({ \
+ uint32_t __RES, __ARG1 = (ARG1); \
+ __ASM ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
+ __RES; \
+ })
+
+
+/** \brief Unsigned Saturate
+
+ This function saturates an unsigned value.
+
+ \param [in] value Value to be saturated
+ \param [in] sat Bit position to saturate to (0..31)
+ \return Saturated value
+ */
+#define __USAT(ARG1,ARG2) \
+({ \
+ uint32_t __RES, __ARG1 = (ARG1); \
+ __ASM ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
+ __RES; \
+ })
+
+
+/** \brief Rotate Right with Extend (32 bit)
+
+ This function moves each bit of a bitstring right by one bit.
+ The carry input is shifted in at the left end of the bitstring.
+
+ \param [in] value Value to rotate
+ \return Rotated value
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __RRX(uint32_t value)
+{
+ uint32_t result;
+
+ __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
+ return(result);
+}
+
+
+/** \brief LDRT Unprivileged (8 bit)
+
+ This function executes a Unprivileged LDRT instruction for 8 bit value.
+
+ \param [in] ptr Pointer to data
+ \return value of type uint8_t at (*ptr)
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint8_t __LDRBT(volatile uint8_t *addr)
+{
+ uint32_t result;
+
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+ __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*addr) );
+#else
+ /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
+ accepted by assembler. So has to use following less efficient pattern.
+ */
+ __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
+#endif
+ return ((uint8_t) result); /* Add explicit type cast here */
+}
+
+
+/** \brief LDRT Unprivileged (16 bit)
+
+ This function executes a Unprivileged LDRT instruction for 16 bit values.
+
+ \param [in] ptr Pointer to data
+ \return value of type uint16_t at (*ptr)
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint16_t __LDRHT(volatile uint16_t *addr)
+{
+ uint32_t result;
+
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
+ __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*addr) );
+#else
+ /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
+ accepted by assembler. So has to use following less efficient pattern.
+ */
+ __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
+#endif
+ return ((uint16_t) result); /* Add explicit type cast here */
+}
+
+
+/** \brief LDRT Unprivileged (32 bit)
+
+ This function executes a Unprivileged LDRT instruction for 32 bit values.
+
+ \param [in] ptr Pointer to data
+ \return value of type uint32_t at (*ptr)
+ */
+__attribute__((always_inline)) __STATIC_INLINE uint32_t __LDRT(volatile uint32_t *addr)
+{
+ uint32_t result;
+
+ __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*addr) );
+ return(result);
+}
+
+
+/** \brief STRT Unprivileged (8 bit)
+
+ This function executes a Unprivileged STRT instruction for 8 bit values.
+
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __STRBT(uint8_t value, volatile uint8_t *addr)
+{
+ __ASM volatile ("strbt %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) );
+}
+
+
+/** \brief STRT Unprivileged (16 bit)
+
+ This function executes a Unprivileged STRT instruction for 16 bit values.
+
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __STRHT(uint16_t value, volatile uint16_t *addr)
+{
+ __ASM volatile ("strht %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) );
+}
+
+
+/** \brief STRT Unprivileged (32 bit)
+
+ This function executes a Unprivileged STRT instruction for 32 bit values.
+
+ \param [in] value Value to store
+ \param [in] ptr Pointer to location
+ */
+__attribute__((always_inline)) __STATIC_INLINE void __STRT(uint32_t value, volatile uint32_t *addr)
+{
+ __ASM volatile ("strt %1, %0" : "=Q" (*addr) : "r" (value) );
+}
+
+#endif /* (__CORTEX_M >= 0x03) || (__CORTEX_SC >= 300) */
+
+
+#elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
+/* IAR iccarm specific functions */
+#include
+
+
+#elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
+/* TI CCS specific functions */
+#include
+
+
+#elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
+/* TASKING carm specific functions */
+/*
+ * The CMSIS functions have been implemented as intrinsics in the compiler.
+ * Please use "carm -?i" to get an up to date list of all intrinsics,
+ * Including the CMSIS ones.
+ */
+
+
+#elif defined ( __CSMC__ ) /*------------------ COSMIC Compiler -------------------*/
+/* Cosmic specific functions */
+#include
+
+#endif
+
+/*@}*/ /* end of group CMSIS_Core_InstructionInterface */
+
+#endif /* __CORE_CMINSTR_H */