Merge pull request #14900 from 0xc0170/feature_CMSIS_5_13b9f72f2

Update CMSIS to 5.8.0
pull/14978/head
Jaeden Amero 2021-07-30 09:43:11 +01:00 committed by GitHub
commit 744814f0a0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
77 changed files with 5727 additions and 4879 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved.
* Copyright (c) 2013-2020 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -17,7 +17,7 @@
*
* ----------------------------------------------------------------------
*
* $Date: 18. June 2018
* $Date: 12. June 2020
* $Revision: V2.1.3
*
* Project: CMSIS-RTOS2 API
@ -86,7 +86,7 @@ typedef enum {
osKernelLocked = 3, ///< Locked.
osKernelSuspended = 4, ///< Suspended.
osKernelError = -1, ///< Error.
osKernelReserved = 0x7FFFFFFFU ///< Prevents enum down-size compiler optimization.
osKernelReserved = 0x7FFFFFFF ///< Prevents enum down-size compiler optimization.
} osKernelState_t;
/// Thread state.
@ -723,7 +723,7 @@ osStatus_t osMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr, uint8_t *
/// \return maximum number of messages.
uint32_t osMessageQueueGetCapacity (osMessageQueueId_t mq_id);
/// Get maximum message size in a Memory Pool.
/// Get maximum message size in a Message Queue.
/// \param[in] mq_id message queue ID obtained by \ref osMessageQueueNew.
/// \return maximum message size in bytes.
uint32_t osMessageQueueGetMsgSize (osMessageQueueId_t mq_id);

View File

@ -1,11 +1,11 @@
/**************************************************************************//**
* @file os_tick.h
* @brief CMSIS OS Tick header file
* @version V1.0.1
* @date 24. November 2017
* @version V1.0.2
* @date 19. March 2021
******************************************************************************/
/*
* Copyright (c) 2017-2017 ARM Limited. All rights reserved.
* Copyright (c) 2017-2021 ARM Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -27,6 +27,11 @@
#include <stdint.h>
#ifdef __cplusplus
extern "C"
{
#endif
/// IRQ Handler.
#ifndef IRQHANDLER_T
#define IRQHANDLER_T
@ -68,4 +73,8 @@ uint32_t OS_Tick_GetCount (void);
/// \return OS Tick overflow status (1 - overflow, 0 - no overflow).
uint32_t OS_Tick_GetOverflow (void);
#ifdef __cplusplus
}
#endif
#endif /* OS_TICK_H */

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -17,7 +17,7 @@
*
* -----------------------------------------------------------------------------
*
* $Revision: V5.1.0
* $Revision: V5.1.1
*
* Project: CMSIS-RTOS RTX
* Title: RTX Configuration
@ -40,7 +40,7 @@ __WEAK uint32_t osRtxErrorNotify (uint32_t code, void *object_id) {
(void)object_id;
switch (code) {
case osRtxErrorStackUnderflow:
case osRtxErrorStackOverflow:
// Stack overflow detected for thread (thread_id=object_id)
break;
case osRtxErrorISRQueueOverflow:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2020 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -17,7 +17,7 @@
*
* -----------------------------------------------------------------------------
*
* $Revision: V5.5.1
* $Revision: V5.5.2
*
* Project: CMSIS-RTOS RTX
* Title: RTX Configuration definitions
@ -69,7 +69,7 @@
// </e>
// <o>ISR FIFO Queue
// <o>ISR FIFO Queue
// <4=> 4 entries <8=> 8 entries <12=> 12 entries <16=> 16 entries
// <24=> 24 entries <32=> 32 entries <48=> 48 entries <64=> 64 entries
// <96=> 96 entries <128=> 128 entries <196=> 196 entries <256=> 256 entries
@ -143,10 +143,10 @@
#endif
// <q>Stack overrun checking
// <i> Enables stack overrun check at thread switch.
// <i> Enables stack overrun check at thread switch (requires RTX source variant).
// <i> Enabling this option increases slightly the execution time of a thread switch.
#ifndef OS_STACK_CHECK
#define OS_STACK_CHECK 1
#define OS_STACK_CHECK 0
#endif
// <q>Stack usage watermark
@ -156,8 +156,8 @@
#define OS_STACK_WATERMARK 0
#endif
// <o>Processor mode for Thread execution
// <0=> Unprivileged mode
// <o>Processor mode for Thread execution
// <0=> Unprivileged mode
// <1=> Privileged mode
// <i> Default: Privileged mode
#ifndef OS_PRIVILEGE_MODE
@ -367,125 +367,125 @@
// <i> Recording levels for RTX components.
// <i> Only applicable if events for the respective component are generated.
// <h>Memory Management
// <e.7>Memory Management
// <i> Recording level for Memory Management events.
// <o.0>Error events
// <o.1>API function call events
// <o.2>Operation events
// <o.3>Detailed operation events
// </h>
#ifndef OS_EVR_MEMORY_LEVEL
#define OS_EVR_MEMORY_LEVEL 0x01U
// </e>
#ifndef OS_EVR_MEMORY_LEVEL
#define OS_EVR_MEMORY_LEVEL 0x81U
#endif
// <h>Kernel
// <e.7>Kernel
// <i> Recording level for Kernel events.
// <o.0>Error events
// <o.1>API function call events
// <o.2>Operation events
// <o.3>Detailed operation events
// </h>
#ifndef OS_EVR_KERNEL_LEVEL
#define OS_EVR_KERNEL_LEVEL 0x01U
// </e>
#ifndef OS_EVR_KERNEL_LEVEL
#define OS_EVR_KERNEL_LEVEL 0x81U
#endif
// <h>Thread
// <e.7>Thread
// <i> Recording level for Thread events.
// <o.0>Error events
// <o.1>API function call events
// <o.2>Operation events
// <o.3>Detailed operation events
// </h>
#ifndef OS_EVR_THREAD_LEVEL
#define OS_EVR_THREAD_LEVEL 0x05U
// </e>
#ifndef OS_EVR_THREAD_LEVEL
#define OS_EVR_THREAD_LEVEL 0x85U
#endif
// <h>Generic Wait
// <e.7>Generic Wait
// <i> Recording level for Generic Wait events.
// <o.0>Error events
// <o.1>API function call events
// <o.2>Operation events
// <o.3>Detailed operation events
// </h>
#ifndef OS_EVR_WAIT_LEVEL
#define OS_EVR_WAIT_LEVEL 0x01U
// </e>
#ifndef OS_EVR_WAIT_LEVEL
#define OS_EVR_WAIT_LEVEL 0x81U
#endif
// <h>Thread Flags
// <e.7>Thread Flags
// <i> Recording level for Thread Flags events.
// <o.0>Error events
// <o.1>API function call events
// <o.2>Operation events
// <o.3>Detailed operation events
// </h>
#ifndef OS_EVR_THFLAGS_LEVEL
#define OS_EVR_THFLAGS_LEVEL 0x01U
// </e>
#ifndef OS_EVR_THFLAGS_LEVEL
#define OS_EVR_THFLAGS_LEVEL 0x81U
#endif
// <h>Event Flags
// <e.7>Event Flags
// <i> Recording level for Event Flags events.
// <o.0>Error events
// <o.1>API function call events
// <o.2>Operation events
// <o.3>Detailed operation events
// </h>
#ifndef OS_EVR_EVFLAGS_LEVEL
#define OS_EVR_EVFLAGS_LEVEL 0x01U
// </e>
#ifndef OS_EVR_EVFLAGS_LEVEL
#define OS_EVR_EVFLAGS_LEVEL 0x81U
#endif
// <h>Timer
// <e.7>Timer
// <i> Recording level for Timer events.
// <o.0>Error events
// <o.1>API function call events
// <o.2>Operation events
// <o.3>Detailed operation events
// </h>
#ifndef OS_EVR_TIMER_LEVEL
#define OS_EVR_TIMER_LEVEL 0x01U
// </e>
#ifndef OS_EVR_TIMER_LEVEL
#define OS_EVR_TIMER_LEVEL 0x81U
#endif
// <h>Mutex
// <e.7>Mutex
// <i> Recording level for Mutex events.
// <o.0>Error events
// <o.1>API function call events
// <o.2>Operation events
// <o.3>Detailed operation events
// </h>
#ifndef OS_EVR_MUTEX_LEVEL
#define OS_EVR_MUTEX_LEVEL 0x01U
// </e>
#ifndef OS_EVR_MUTEX_LEVEL
#define OS_EVR_MUTEX_LEVEL 0x81U
#endif
// <h>Semaphore
// <e.7>Semaphore
// <i> Recording level for Semaphore events.
// <o.0>Error events
// <o.1>API function call events
// <o.2>Operation events
// <o.3>Detailed operation events
// </h>
#ifndef OS_EVR_SEMAPHORE_LEVEL
#define OS_EVR_SEMAPHORE_LEVEL 0x01U
// </e>
#ifndef OS_EVR_SEMAPHORE_LEVEL
#define OS_EVR_SEMAPHORE_LEVEL 0x81U
#endif
// <h>Memory Pool
// <e.7>Memory Pool
// <i> Recording level for Memory Pool events.
// <o.0>Error events
// <o.1>API function call events
// <o.2>Operation events
// <o.3>Detailed operation events
// </h>
#ifndef OS_EVR_MEMPOOL_LEVEL
#define OS_EVR_MEMPOOL_LEVEL 0x01U
// </e>
#ifndef OS_EVR_MEMPOOL_LEVEL
#define OS_EVR_MEMPOOL_LEVEL 0x81U
#endif
// <h>Message Queue
// <e.7>Message Queue
// <i> Recording level for Message Queue events.
// <o.0>Error events
// <o.1>API function call events
// <o.2>Operation events
// <o.3>Detailed operation events
// </h>
#ifndef OS_EVR_MSGQUEUE_LEVEL
#define OS_EVR_MSGQUEUE_LEVEL 0x01U
// </e>
#ifndef OS_EVR_MSGQUEUE_LEVEL
#define OS_EVR_MSGQUEUE_LEVEL 0x81U
#endif
// </h>

View File

@ -0,0 +1,46 @@
/*
* Copyright (c) 2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: RTX derived definitions
*
* -----------------------------------------------------------------------------
*/
#ifndef RTX_DEF_H_
#define RTX_DEF_H_
#ifdef _RTE_
#include "RTE_Components.h"
#endif
#include "RTX_Config.h"
#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))
#define RTX_OBJ_MEM_USAGE
#endif
#if (defined(OS_STACK_CHECK) && (OS_STACK_CHECK != 0))
#define RTX_STACK_CHECK
#endif
#ifdef RTE_CMSIS_RTOS2_RTX5_ARMV8M_NS
#define DOMAIN_NS 1
#endif
#endif // RTX_DEF_H_

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -26,8 +26,6 @@
#ifndef RTX_EVR_H_
#define RTX_EVR_H_
#include "cmsis_os2.h" // CMSIS RTOS API
#include "RTX_Config.h" // RTX Configuration
#include "rtx_os.h" // RTX OS definitions
// Initial Thread configuration covered also Thread Flags and Generic Wait
@ -393,6 +391,17 @@ extern void EvrRtxKernelGetSysTimerFreq (uint32_t freq);
#define EvrRtxKernelGetSysTimerFreq(freq)
#endif
/**
\brief Event on RTOS kernel system error (Error)
\param[in] code error code.
\param[in] object_id object that caused the error.
*/
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_KERNEL != 0) && !defined(EVR_RTX_KERNEL_ERROR_NOTIFY_DISABLE))
extern void EvrRtxKernelErrorNotify (uint32_t code, void *object_id);
#else
#define EvrRtxKernelErrorNotify(code, object_id)
#endif
// ==== Thread Events ====

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -29,6 +29,7 @@
#include <stdint.h>
#include <stddef.h>
#include "cmsis_os2.h"
#include "rtx_def.h"
#ifdef __cplusplus
extern "C"
@ -38,8 +39,8 @@ extern "C"
/// Kernel Information
#define osRtxVersionAPI 20010003 ///< API version (2.1.3)
#define osRtxVersionKernel 50050002 ///< Kernel version (5.5.2)
#define osRtxKernelId "RTX V5.5.2" ///< Kernel identification string
#define osRtxVersionKernel 50050003 ///< Kernel version (5.5.3)
#define osRtxKernelId "RTX V5.5.3" ///< Kernel identification string
// ==== Common definitions ====
@ -110,7 +111,7 @@ typedef struct osRtxThread_s {
struct osRtxThread_s *delay_next; ///< Link pointer to next Thread in Delay list
struct osRtxThread_s *delay_prev; ///< Link pointer to previous Thread in Delay list
struct osRtxThread_s *thread_join; ///< Thread waiting to Join
uint32_t delay; ///< Delay Time
uint32_t delay; ///< Delay Time/Round Robin Time Tick
int8_t priority; ///< Thread Priority
int8_t priority_base; ///< Base Priority
uint8_t stack_frame; ///< Stack Frame (EXC_RETURN[7..0])
@ -296,9 +297,9 @@ typedef struct {
osRtxThread_t *delay_list; ///< Delay List
osRtxThread_t *wait_list; ///< Wait List (no Timeout)
osRtxThread_t *terminate_list; ///< Terminate Thread List
uint32_t reserved;
struct { ///< Thread Round Robin Info
osRtxThread_t *thread; ///< Round Robin Thread
uint32_t tick; ///< Round Robin Time Tick
uint32_t timeout; ///< Round Robin Timeout
} robin;
} thread;
@ -392,7 +393,8 @@ extern osRtxObjectMemUsage_t osRtxMessageQueueMemUsage;
// ==== OS External Functions ====
// OS Error Codes
#define osRtxErrorStackUnderflow 1U ///< Stack overflow, i.e. stack pointer below its lower memory limit for descending stacks.
#define osRtxErrorStackUnderflow 1U ///< \deprecated Superseded by \ref osRtxErrorStackOverflow.
#define osRtxErrorStackOverflow 1U ///< Stack overflow, i.e. stack pointer below its lower memory limit for descending stacks.
#define osRtxErrorISRQueueOverflow 2U ///< ISR Queue overflow detected when inserting object.
#define osRtxErrorTimerQueueOverflow 3U ///< User Timer Callback Queue overflow detected for timer.
#define osRtxErrorClibSpace 4U ///< Standard C/C++ library libspace not available: increase \c OS_THREAD_LIBSPACE_NUM.
@ -400,6 +402,7 @@ extern osRtxObjectMemUsage_t osRtxMessageQueueMemUsage;
/// OS Error Callback function
extern uint32_t osRtxErrorNotify (uint32_t code, void *object_id);
extern uint32_t osRtxKernelErrorNotify (uint32_t code, void *object_id);
/// OS Idle Thread
extern void osRtxIdleThread (void *argument);
@ -453,10 +456,12 @@ typedef struct {
osRtxMpInfo_t *message_queue; ///< Message Queue Control Blocks
} mpi;
uint32_t thread_stack_size; ///< Default Thread Stack Size
const
const
osThreadAttr_t *idle_thread_attr; ///< Idle Thread Attributes
const
osThreadAttr_t *timer_thread_attr; ///< Timer Thread Attributes
void (*timer_thread)(void *); ///< Timer Thread Function
int32_t (*timer_setup)(void); ///< Timer Setup Function
const
osMessageQueueAttr_t *timer_mq_attr; ///< Timer Message Queue Attributes
uint32_t timer_mq_mcnt; ///< Timer Message Queue maximum Messages

View File

@ -1,5 +1,5 @@
;/*
; * Copyright (c) 2013-2018 Arm Limited. All rights reserved.
; * Copyright (c) 2013-2021 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
@ -18,7 +18,7 @@
; * -----------------------------------------------------------------------------
; *
; * Project: CMSIS-RTOS RTX
; * Title: Cortex-A Exception handlers
; * Title: ARMv7-A Exception handlers
; *
; * -----------------------------------------------------------------------------
; */
@ -367,16 +367,16 @@ osRtxContextSave
STMDB R1!, {R2,R12} ; Push FPSCR, maintain 8-byte alignment
VSTMDB R1!, {D0-D15} ; Save D0-D15
IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32
IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32
VSTMDB R1!, {D16-D31} ; Save D16-D31
ENDIF
ENDIF
LDRB R2, [LR, #TCB_SP_FRAME] ; Load osRtxInfo.thread.run.curr frame info
IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32
IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32
ORR R2, R2, #4 ; NEON state
ELSE
ELSE
ORR R2, R2, #2 ; VFP state
ENDIF
ENDIF
STRB R2, [LR, #TCB_SP_FRAME] ; Store VFP/NEON state
osRtxContextSave1
@ -428,9 +428,9 @@ osRtxContextRestore
MCR p15, 0, R2, c1, c0, 2 ; Write CPACR
BEQ osRtxContextRestore1 ; No VFP
ISB ; Sync if VFP was enabled
IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32
IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32
VLDMIA LR!, {D16-D31} ; Restore D16-D31
ENDIF
ENDIF
VLDMIA LR!, {D0-D15} ; Restore D0-D15
LDR R2, [LR]
VMSR FPSCR, R2 ; Restore FPSCR

View File

@ -1,5 +1,5 @@
;/*
; * Copyright (c) 2013-2018 Arm Limited. All rights reserved.
; * Copyright (c) 2013-2021 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
@ -18,15 +18,22 @@
; * -----------------------------------------------------------------------------
; *
; * Project: CMSIS-RTOS RTX
; * Title: Cortex-M0 Exception handlers
; * Title: ARMv6-M Exception handlers
; *
; * -----------------------------------------------------------------------------
; */
#ifndef RTX_STACK_CHECK
RTX_STACK_CHECK EQU 0
#endif
I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset
TCB_SP_OFS EQU 56 ; TCB.SP offset
osRtxErrorStackOverflow\
EQU 1 ; Stack overflow
PRESERVE8
THUMB
@ -44,9 +51,10 @@ SVC_Handler PROC
EXPORT SVC_Handler
IMPORT osRtxUserSVC
IMPORT osRtxInfo
IF :DEF:MPU_LOAD
IMPORT osRtxMpuLoad
ENDIF
IF RTX_STACK_CHECK != 0
IMPORT osRtxThreadStackCheck
IMPORT osRtxKernelErrorNotify
ENDIF
MOV R0,LR
LSRS R0,R0,#3 ; Determine return stack from EXC_RETURN bit 2
@ -57,7 +65,7 @@ SVC_Number
LDR R1,[R0,#24] ; Load saved PC from stack
SUBS R1,R1,#2 ; Point to SVC instruction
LDRB R1,[R1] ; Load SVC number
CMP R1,#0
CMP R1,#0 ; Check SVC number
BNE SVC_User ; Branch if not SVC 0
PUSH {R0,LR} ; Save SP and EXC_RETURN
@ -68,18 +76,42 @@ SVC_Number
MOV LR,R3 ; Set EXC_RETURN
SVC_Context
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run
LDMIA R3!,{R1,R2} ; Load osRtxInfo.thread.run: curr & next
CMP R1,R2 ; Check if thread switch is required
BEQ SVC_Exit ; Branch when threads are the same
SUBS R3,R3,#8 ; Adjust address
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
CMP R1,#0
BEQ SVC_ContextSwitch ; Branch if running thread is deleted
BEQ SVC_ContextRestore ; Branch if running thread is deleted
SVC_ContextSave
MRS R0,PSP ; Get PSP
SUBS R0,R0,#32 ; Calculate SP
SUBS R0,R0,#32 ; Calculate SP: space for R4..R11
STR R0,[R1,#TCB_SP_OFS] ; Store SP
IF RTX_STACK_CHECK != 0
PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next
MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr
BL osRtxThreadStackCheck ; Check if thread stack is overrun
POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next
CMP R0,#0
BNE SVC_ContextSaveRegs ; Branch when stack check is ok
MOVS R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run
LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
B SVC_ContextRestore ; Branch to context restore handling
SVC_ContextSaveRegs
LDR R0,[R1,#TCB_SP_OFS] ; Load SP
ENDIF
STMIA R0!,{R4-R7} ; Save R4..R7
MOV R4,R8
MOV R5,R9
@ -87,17 +119,6 @@ SVC_ContextSave
MOV R7,R11
STMIA R0!,{R4-R7} ; Save R8..R11
SVC_ContextSwitch
SUBS R3,R3,#8 ; Adjust address
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
IF :DEF:MPU_LOAD
PUSH {R2,R3} ; Save registers
MOV R0,R2 ; osRtxMpuLoad parameter
BL osRtxMpuLoad ; Load MPU for next thread
POP {R2,R3} ; Restore registers
ENDIF
SVC_ContextRestore
LDR R0,[R2,#TCB_SP_OFS] ; Load SP
ADDS R0,R0,#16 ; Adjust address
@ -110,7 +131,7 @@ SVC_ContextRestore
SUBS R0,R0,#32 ; Adjust address
LDMIA R0!,{R4-R7} ; Restore R4..R7
MOVS R0,#~0xFFFFFFFD
MOVS R0,#2 ; Binary complement of 0xFFFFFFFD
MVNS R0,R0 ; Set EXC_RETURN value
BX R0 ; Exit from handler
@ -151,7 +172,7 @@ PendSV_Handler PROC
BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler
POP {R0,R1} ; Restore EXC_RETURN
MOV LR,R1 ; Set EXC_RETURN
B SVC_Context
B SVC_Context ; Branch to context handling
ALIGN
ENDP
@ -165,7 +186,7 @@ SysTick_Handler PROC
BL osRtxTick_Handler ; Call osRtxTick_Handler
POP {R0,R1} ; Restore EXC_RETURN
MOV LR,R1 ; Set EXC_RETURN
B SVC_Context
B SVC_Context ; Branch to context handling
ALIGN
ENDP

View File

@ -1,5 +1,5 @@
;/*
; * Copyright (c) 2013-2018 Arm Limited. All rights reserved.
; * Copyright (c) 2013-2021 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
@ -18,15 +18,22 @@
; * -----------------------------------------------------------------------------
; *
; * Project: CMSIS-RTOS RTX
; * Title: Cortex-M0 Exception handlers
; * Title: ARMv6-M Exception handlers
; *
; * -----------------------------------------------------------------------------
; */
#ifndef RTX_STACK_CHECK
RTX_STACK_CHECK EQU 0
#endif
I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset
TCB_SP_OFS EQU 56 ; TCB.SP offset
osRtxErrorStackOverflow\
EQU 1 ; Stack overflow
PRESERVE8
THUMB
@ -44,9 +51,10 @@ SVC_Handler PROC
EXPORT SVC_Handler
IMPORT osRtxUserSVC
IMPORT osRtxInfo
IF :DEF:MPU_LOAD
IMPORT osRtxMpuLoad
ENDIF
IF RTX_STACK_CHECK != 0
IMPORT osRtxThreadStackCheck
IMPORT osRtxKernelErrorNotify
ENDIF
MOV R0,LR
LSRS R0,R0,#3 ; Determine return stack from EXC_RETURN bit 2
@ -57,7 +65,7 @@ SVC_Number
LDR R1,[R0,#24] ; Load saved PC from stack
SUBS R1,R1,#2 ; Point to SVC instruction
LDRB R1,[R1] ; Load SVC number
CMP R1,#0
CMP R1,#0 ; Check SVC number
BNE SVC_User ; Branch if not SVC 0
PUSH {R0,LR} ; Save SP and EXC_RETURN
@ -68,18 +76,42 @@ SVC_Number
MOV LR,R3 ; Set EXC_RETURN
SVC_Context
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run
LDMIA R3!,{R1,R2} ; Load osRtxInfo.thread.run: curr & next
CMP R1,R2 ; Check if thread switch is required
BEQ SVC_Exit ; Branch when threads are the same
SUBS R3,R3,#8 ; Adjust address
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
CMP R1,#0
BEQ SVC_ContextSwitch ; Branch if running thread is deleted
BEQ SVC_ContextRestore ; Branch if running thread is deleted
SVC_ContextSave
MRS R0,PSP ; Get PSP
SUBS R0,R0,#32 ; Calculate SP
SUBS R0,R0,#32 ; Calculate SP: space for R4..R11
STR R0,[R1,#TCB_SP_OFS] ; Store SP
IF RTX_STACK_CHECK != 0
PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next
MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr
BL osRtxThreadStackCheck ; Check if thread stack is overrun
POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next
CMP R0,#0
BNE SVC_ContextSaveRegs ; Branch when stack check is ok
MOVS R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run
LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
B SVC_ContextRestore ; Branch to context restore handling
SVC_ContextSaveRegs
LDR R0,[R1,#TCB_SP_OFS] ; Load SP
ENDIF
STMIA R0!,{R4-R7} ; Save R4..R7
MOV R4,R8
MOV R5,R9
@ -87,17 +119,6 @@ SVC_ContextSave
MOV R7,R11
STMIA R0!,{R4-R7} ; Save R8..R11
SVC_ContextSwitch
SUBS R3,R3,#8 ; Adjust address
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
IF :DEF:MPU_LOAD
PUSH {R2,R3} ; Save registers
MOV R0,R2 ; osRtxMpuLoad parameter
BL osRtxMpuLoad ; Load MPU for next thread
POP {R2,R3} ; Restore registers
ENDIF
SVC_ContextRestore
LDR R0,[R2,#TCB_SP_OFS] ; Load SP
ADDS R0,R0,#16 ; Adjust address
@ -110,7 +131,7 @@ SVC_ContextRestore
SUBS R0,R0,#32 ; Adjust address
LDMIA R0!,{R4-R7} ; Restore R4..R7
MOVS R0,#~0xFFFFFFFD
MOVS R0,#2 ; Binary complement of 0xFFFFFFFD
MVNS R0,R0 ; Set EXC_RETURN value
BX R0 ; Exit from handler
@ -151,7 +172,7 @@ PendSV_Handler PROC
BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler
POP {R0,R1} ; Restore EXC_RETURN
MOV LR,R1 ; Set EXC_RETURN
B SVC_Context
B SVC_Context ; Branch to context handling
ALIGN
ENDP
@ -165,7 +186,7 @@ SysTick_Handler PROC
BL osRtxTick_Handler ; Call osRtxTick_Handler
POP {R0,R1} ; Restore EXC_RETURN
MOV LR,R1 ; Set EXC_RETURN
B SVC_Context
B SVC_Context ; Branch to context handling
ALIGN
ENDP

View File

@ -1,5 +1,5 @@
;/*
; * Copyright (c) 2016-2020 Arm Limited. All rights reserved.
; * Copyright (c) 2016-2021 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
@ -18,14 +18,18 @@
; * -----------------------------------------------------------------------------
; *
; * Project: CMSIS-RTOS RTX
; * Title: ARMv8M Baseline Exception handlers
; * Title: ARMv8-M Baseline Exception handlers
; *
; * -----------------------------------------------------------------------------
; */
#ifndef RTX_STACK_CHECK
RTX_STACK_CHECK EQU 0
#endif
#ifndef DOMAIN_NS
DOMAIN_NS EQU 0
DOMAIN_NS EQU 0
#endif
I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset
@ -34,6 +38,9 @@ TCB_SP_OFS EQU 56 ; TCB.SP offset
TCB_SF_OFS EQU 34 ; TCB.stack_frame offset
TCB_TZM_OFS EQU 64 ; TCB.tz_memory offset
osRtxErrorStackOverflow\
EQU 1 ; Stack overflow
PRESERVE8
THUMB
@ -51,13 +58,14 @@ SVC_Handler PROC
EXPORT SVC_Handler
IMPORT osRtxUserSVC
IMPORT osRtxInfo
IF :DEF:MPU_LOAD
IMPORT osRtxMpuLoad
ENDIF
IF DOMAIN_NS = 1
IF RTX_STACK_CHECK != 0
IMPORT osRtxThreadStackCheck
IMPORT osRtxKernelErrorNotify
ENDIF
IF DOMAIN_NS != 0
IMPORT TZ_LoadContext_S
IMPORT TZ_StoreContext_S
ENDIF
ENDIF
MOV R0,LR
LSRS R0,R0,#3 ; Determine return stack from EXC_RETURN bit 2
@ -68,102 +76,137 @@ SVC_Number
LDR R1,[R0,#24] ; Load saved PC from stack
SUBS R1,R1,#2 ; Point to SVC instruction
LDRB R1,[R1] ; Load SVC number
CMP R1,#0
CMP R1,#0 ; Check SVC number
BNE SVC_User ; Branch if not SVC 0
PUSH {R0,LR} ; Save SP and EXC_RETURN
LDM R0,{R0-R3} ; Load function parameters from stack
LDMIA R0,{R0-R3} ; Load function parameters from stack
BLX R7 ; Call service function
POP {R2,R3} ; Restore SP and EXC_RETURN
STMIA R2!,{R0-R1} ; Store function return values
MOV LR,R3 ; Set EXC_RETURN
SVC_Context
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run
LDMIA R3!,{R1,R2} ; Load osRtxInfo.thread.run: curr & next
CMP R1,R2 ; Check if thread switch is required
BEQ SVC_Exit ; Branch when threads are the same
CBZ R1,SVC_ContextSwitch ; Branch if running thread is deleted
SUBS R3,R3,#8 ; Adjust address
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
CBZ R1,SVC_ContextRestore ; Branch if running thread is deleted
SVC_ContextSave
IF DOMAIN_NS = 1
IF DOMAIN_NS != 0
LDR R0,[R1,#TCB_TZM_OFS] ; Load TrustZone memory identifier
CBZ R0,SVC_ContextSave1 ; Branch if there is no secure context
CBZ R0,SVC_ContextSave_NS ; Branch if there is no secure context
PUSH {R1,R2,R3,R7} ; Save registers
MOV R7,LR ; Get EXC_RETURN
BL TZ_StoreContext_S ; Store secure context
MOV LR,R7 ; Set EXC_RETURN
POP {R1,R2,R3,R7} ; Restore registers
ENDIF
ENDIF
SVC_ContextSave1
SVC_ContextSave_NS
MRS R0,PSP ; Get PSP
SUBS R0,R0,#32 ; Calculate SP
IF DOMAIN_NS != 0
MOV R3,LR ; Get EXC_RETURN
LSLS R3,R3,#25 ; Check domain of interrupted thread
BMI SVC_ContextSaveSP ; Branch if secure
ENDIF
IF RTX_STACK_CHECK != 0
SUBS R0,R0,#32 ; Calculate SP: space for R4..R11
SVC_ContextSaveSP
STR R0,[R1,#TCB_SP_OFS] ; Store SP
MOV R3,LR ; Get EXC_RETURN
MOV R0,R1 ; osRtxInfo.thread.run.curr
ADDS R0,R0,#TCB_SF_OFS ; Adjust address
STRB R3,[R0] ; Store stack frame information
PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next
MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr
BL osRtxThreadStackCheck ; Check if thread stack is overrun
POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next
CMP R0,#0
BNE SVC_ContextSaveRegs ; Branch when stack check is ok
MOVS R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run
LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
B SVC_ContextRestore ; Branch to context restore handling
SVC_ContextSaveRegs
IF DOMAIN_NS != 0
MOV R0,R1 ; osRtxInfo.thread.run.curr
ADDS R0,R0,#TCB_SF_OFS ; Adjust address
LDRB R3,[R0] ; Load stack frame information
LSLS R3,R3,#25 ; Check domain of interrupted thread
BMI SVC_ContextRestore ; Branch if secure
ENDIF
LDR R0,[R1,#TCB_SP_OFS] ; Load SP
STMIA R0!,{R4-R7} ; Save R4..R7
MOV R4,R8
MOV R5,R9
MOV R6,R10
MOV R7,R11
STMIA R0!,{R4-R7} ; Save R8..R11
SVC_ContextSave2
ELSE
SUBS R0,R0,#32 ; Calculate SP: space for R4..R11
STMIA R0!,{R4-R7} ; Save R4..R7
MOV R4,R8
MOV R5,R9
MOV R6,R10
MOV R7,R11
STMIA R0!,{R4-R7} ; Save R8..R11
SUBS R0,R0,#32 ; Adjust address
SVC_ContextSaveSP
STR R0,[R1,#TCB_SP_OFS] ; Store SP
MOV R0,LR ; Get EXC_RETURN
ADDS R1,R1,#TCB_SF_OFS ; Adjust address
STRB R0,[R1] ; Store stack frame information
SVC_ContextSwitch
SUBS R3,R3,#8 ; Adjust address
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
IF :DEF:MPU_LOAD
PUSH {R2,R3} ; Save registers
MOV R0,R2 ; osRtxMpuLoad parameter
BL osRtxMpuLoad ; Load MPU for next thread
POP {R2,R3} ; Restore registers
ENDIF
ENDIF
SVC_ContextRestore
IF DOMAIN_NS = 1
IF DOMAIN_NS != 0
LDR R0,[R2,#TCB_TZM_OFS] ; Load TrustZone memory identifier
CBZ R0,SVC_ContextRestore1 ; Branch if there is no secure context
CBZ R0,SVC_ContextRestore_NS ; Branch if there is no secure context
PUSH {R2,R3} ; Save registers
BL TZ_LoadContext_S ; Load secure context
POP {R2,R3} ; Restore registers
ENDIF
ENDIF
SVC_ContextRestore1
MOV R1,R2
ADDS R1,R1,#TCB_SF_OFS ; Adjust address
LDRB R0,[R1] ; Load stack frame information
MOVS R1,#0xFF
MVNS R1,R1 ; R1=0xFFFFFF00
ORRS R0,R1
MOV LR,R0 ; Set EXC_RETURN
IF DOMAIN_NS = 1
LSLS R0,R0,#25 ; Check domain of interrupted thread
BPL SVC_ContextRestore2 ; Branch if non-secure
LDR R0,[R2,#TCB_SP_OFS] ; Load SP
MSR PSP,R0 ; Set PSP
BX LR ; Exit from handler
ELSE
SVC_ContextRestore_NS
LDR R0,[R2,#TCB_SM_OFS] ; Load stack memory base
MSR PSPLIM,R0 ; Set PSPLIM
ENDIF
SVC_ContextRestore2
MOV R0,R2 ; osRtxInfo.thread.run.next
ADDS R0,R0,#TCB_SF_OFS ; Adjust address
LDRB R3,[R0] ; Load stack frame information
MOVS R0,#0xFF
MVNS R0,R0 ; R0=0xFFFFFF00
ORRS R3,R3,R0
MOV LR,R3 ; Set EXC_RETURN
LDR R0,[R2,#TCB_SP_OFS] ; Load SP
IF DOMAIN_NS != 0
LSLS R3,R3,#25 ; Check domain of interrupted thread
BMI SVC_ContextRestoreSP ; Branch if secure
ENDIF
ADDS R0,R0,#16 ; Adjust address
LDMIA R0!,{R4-R7} ; Restore R8..R11
MOV R8,R4
MOV R9,R5
MOV R10,R6
MOV R11,R7
MSR PSP,R0 ; Set PSP
SUBS R0,R0,#32 ; Adjust address
LDMIA R0!,{R4-R7} ; Restore R4..R7
ADDS R0,R0,#16 ; Adjust address
SVC_ContextRestoreSP
MSR PSP,R0 ; Set PSP
SVC_Exit
BX LR ; Exit from handler
@ -202,7 +245,7 @@ PendSV_Handler PROC
BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler
POP {R0,R1} ; Restore EXC_RETURN
MOV LR,R1 ; Set EXC_RETURN
B Sys_Context
B SVC_Context ; Branch to context handling
ALIGN
ENDP
@ -216,117 +259,7 @@ SysTick_Handler PROC
BL osRtxTick_Handler ; Call osRtxTick_Handler
POP {R0,R1} ; Restore EXC_RETURN
MOV LR,R1 ; Set EXC_RETURN
B Sys_Context
ALIGN
ENDP
Sys_Context PROC
EXPORT Sys_Context
IMPORT osRtxInfo
IF :DEF:MPU_LOAD
IMPORT osRtxMpuLoad
ENDIF
IF DOMAIN_NS = 1
IMPORT TZ_LoadContext_S
IMPORT TZ_StoreContext_S
ENDIF
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run
LDM R3!,{R1,R2} ; Load osRtxInfo.thread.run: curr & next
CMP R1,R2 ; Check if thread switch is required
BEQ Sys_ContextExit ; Branch when threads are the same
Sys_ContextSave
IF DOMAIN_NS = 1
LDR R0,[R1,#TCB_TZM_OFS] ; Load TrustZone memory identifier
CBZ R0,Sys_ContextSave1 ; Branch if there is no secure context
PUSH {R1,R2,R3,R7} ; Save registers
MOV R7,LR ; Get EXC_RETURN
BL TZ_StoreContext_S ; Store secure context
MOV LR,R7 ; Set EXC_RETURN
POP {R1,R2,R3,R7} ; Restore registers
Sys_ContextSave1
MOV R0,LR ; Get EXC_RETURN
LSLS R0,R0,#25 ; Check domain of interrupted thread
BPL Sys_ContextSave2 ; Branch if non-secure
MRS R0,PSP ; Get PSP
STR R0,[R1,#TCB_SP_OFS] ; Store SP
B Sys_ContextSave3
ENDIF
Sys_ContextSave2
MRS R0,PSP ; Get PSP
SUBS R0,R0,#32 ; Adjust address
STR R0,[R1,#TCB_SP_OFS] ; Store SP
STMIA R0!,{R4-R7} ; Save R4..R7
MOV R4,R8
MOV R5,R9
MOV R6,R10
MOV R7,R11
STMIA R0!,{R4-R7} ; Save R8..R11
Sys_ContextSave3
MOV R0,LR ; Get EXC_RETURN
ADDS R1,R1,#TCB_SF_OFS ; Adjust address
STRB R0,[R1] ; Store stack frame information
Sys_ContextSwitch
SUBS R3,R3,#8 ; Adjust address
STR R2,[R3] ; osRtxInfo.run: curr = next
IF :DEF:MPU_LOAD
PUSH {R2,R3} ; Save registers
MOV R0,R2 ; osRtxMpuLoad parameter
BL osRtxMpuLoad ; Load MPU for next thread
POP {R2,R3} ; Restore registers
ENDIF
Sys_ContextRestore
IF DOMAIN_NS = 1
LDR R0,[R2,#TCB_TZM_OFS] ; Load TrustZone memory identifier
CBZ R0,Sys_ContextRestore1 ; Branch if there is no secure context
PUSH {R2,R3} ; Save registers
BL TZ_LoadContext_S ; Load secure context
POP {R2,R3} ; Restore registers
ENDIF
Sys_ContextRestore1
MOV R1,R2
ADDS R1,R1,#TCB_SF_OFS ; Adjust offset
LDRB R0,[R1] ; Load stack frame information
MOVS R1,#0xFF
MVNS R1,R1 ; R1=0xFFFFFF00
ORRS R0,R1
MOV LR,R0 ; Set EXC_RETURN
IF DOMAIN_NS = 1
LSLS R0,R0,#25 ; Check domain of interrupted thread
BPL Sys_ContextRestore2 ; Branch if non-secure
LDR R0,[R2,#TCB_SP_OFS] ; Load SP
MSR PSP,R0 ; Set PSP
BX LR ; Exit from handler
ELSE
LDR R0,[R2,#TCB_SM_OFS] ; Load stack memory base
MSR PSPLIM,R0 ; Set PSPLIM
ENDIF
Sys_ContextRestore2
LDR R0,[R2,#TCB_SP_OFS] ; Load SP
ADDS R0,R0,#16 ; Adjust address
LDMIA R0!,{R4-R7} ; Restore R8..R11
MOV R8,R4
MOV R9,R5
MOV R10,R6
MOV R11,R7
MSR PSP,R0 ; Set PSP
SUBS R0,R0,#32 ; Adjust address
LDMIA R0!,{R4-R7} ; Restore R4..R7
Sys_ContextExit
BX LR ; Exit from handler
B SVC_Context ; Branch to context handling
ALIGN
ENDP

View File

@ -1,5 +1,5 @@
;/*
; * Copyright (c) 2013-2018 Arm Limited. All rights reserved.
; * Copyright (c) 2013-2021 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
@ -18,14 +18,30 @@
; * -----------------------------------------------------------------------------
; *
; * Project: CMSIS-RTOS RTX
; * Title: Cortex-M3 Exception handlers
; * Title: ARMv7-M Exception handlers
; *
; * -----------------------------------------------------------------------------
; */
#ifndef RTX_STACK_CHECK
RTX_STACK_CHECK EQU 0
#endif
IF ({FPU}="FPv4-SP")
FPU_USED EQU 1
ELSE
FPU_USED EQU 0
ENDIF
I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset
TCB_SP_OFS EQU 56 ; TCB.SP offset
TCB_SF_OFS EQU 34 ; TCB.stack_frame offset
FPCCR EQU 0xE000EF34 ; FPCCR Address
osRtxErrorStackOverflow\
EQU 1 ; Stack overflow
PRESERVE8
@ -44,9 +60,10 @@ SVC_Handler PROC
EXPORT SVC_Handler
IMPORT osRtxUserSVC
IMPORT osRtxInfo
IF :DEF:MPU_LOAD
IMPORT osRtxMpuLoad
ENDIF
IF RTX_STACK_CHECK != 0
IMPORT osRtxThreadStackCheck
IMPORT osRtxKernelErrorNotify
ENDIF
TST LR,#0x04 ; Determine return stack from EXC_RETURN bit 2
ITE EQ
@ -55,7 +72,8 @@ SVC_Handler PROC
LDR R1,[R0,#24] ; Load saved PC from stack
LDRB R1,[R1,#-2] ; Load SVC number
CBNZ R1,SVC_User ; Branch if not SVC 0
CMP R1,#0 ; Check SVC number
BNE SVC_User ; Branch if not SVC 0
PUSH {R0,LR} ; Save SP and EXC_RETURN
LDM R0,{R0-R3,R12} ; Load function parameters and address from stack
@ -64,35 +82,94 @@ SVC_Handler PROC
STM R12,{R0-R1} ; Store function return values
SVC_Context
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run
LDM R3,{R1,R2} ; Load osRtxInfo.thread.run: curr & next
CMP R1,R2 ; Check if thread switch is required
IT EQ
BXEQ LR ; Exit when threads are the same
CBZ R1,SVC_ContextSwitch ; Branch if running thread is deleted
SVC_ContextSave
STMDB R12!,{R4-R11} ; Save R4..R11
STR R12,[R1,#TCB_SP_OFS] ; Store SP
SVC_ContextSwitch
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
IF :DEF:MPU_LOAD
PUSH {R2,R3} ; Save registers
MOV R0,R2 ; osRtxMpuLoad parameter
BL osRtxMpuLoad ; Load MPU for next thread
POP {R2,R3} ; Restore registers
ENDIF
IF FPU_USED != 0
CBNZ R1,SVC_ContextSave ; Branch if running thread is not deleted
SVC_FP_LazyState
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
BNE SVC_ContextRestore ; Branch if not extended stack frame
LDR R3,=FPCCR ; FPCCR Address
LDR R0,[R3] ; Load FPCCR
BIC R0,R0,#1 ; Clear LSPACT (Lazy state preservation)
STR R0,[R3] ; Store FPCCR
B SVC_ContextRestore ; Branch to context restore handling
ELSE
CBZ R1,SVC_ContextRestore ; Branch if running thread is deleted
ENDIF
SVC_ContextSave
IF RTX_STACK_CHECK != 0
SUB R12,R12,#32 ; Calculate SP: space for R4..R11
IF FPU_USED != 0
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
SUBEQ R12,R12,#64 ; Additional space for S16..S31
STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information
ENDIF
STR R12,[R1,#TCB_SP_OFS] ; Store SP
PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next
MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr
BL osRtxThreadStackCheck ; Check if thread stack is overrun
POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next
CBNZ R0,SVC_ContextSaveRegs ; Branch when stack check is ok
IF FPU_USED != 0
MOV R4,R1 ; Save osRtxInfo.thread.run.curr
ENDIF
MOV R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run
LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
IF FPU_USED != 0
LDRB LR,[R4,#TCB_SF_OFS] ; Load stack frame information
B SVC_FP_LazyState ; Branch to FP lazy state handling
ELSE
B SVC_ContextRestore ; Branch to context restore handling
ENDIF
SVC_ContextSaveRegs
LDR R12,[R1,#TCB_SP_OFS] ; Load SP
IF FPU_USED != 0
LDRB LR, [R1,#TCB_SF_OFS] ; Load stack frame information
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VSTMIAEQ R12!,{S16-S31} ; Save VFP S16..S31
ENDIF
STM R12,{R4-R11} ; Save R4..R11
ELSE
STMDB R12!,{R4-R11} ; Save R4..R11
IF FPU_USED != 0
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VSTMDBEQ R12!,{S16-S31} ; Save VFP S16.S31
STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information
ENDIF
STR R12,[R1,#TCB_SP_OFS] ; Store SP
ENDIF
SVC_ContextRestore
LDR R0,[R2,#TCB_SP_OFS] ; Load SP
IF FPU_USED != 0
LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information
ORN LR,R1,#0xFF ; Set EXC_RETURN
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VLDMIAEQ R0!,{S16-S31} ; Restore VFP S16..S31
ELSE
MVN LR,#~0xFFFFFFFD ; Set EXC_RETURN value
ENDIF
LDMIA R0!,{R4-R11} ; Restore R4..R11
MSR PSP,R0 ; Set PSP
MVN LR,#~0xFFFFFFFD ; Set EXC_RETURN value
SVC_Exit
BX LR ; Exit from handler
@ -122,8 +199,8 @@ PendSV_Handler PROC
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler
POP {R0,LR} ; Restore EXC_RETURN
MRS R12,PSP
B SVC_Context
MRS R12,PSP ; Save PSP to R12
B SVC_Context ; Branch to context handling
ALIGN
ENDP
@ -136,8 +213,8 @@ SysTick_Handler PROC
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxTick_Handler ; Call osRtxTick_Handler
POP {R0,LR} ; Restore EXC_RETURN
MRS R12,PSP
B SVC_Context
MRS R12,PSP ; Save PSP to R12
B SVC_Context ; Branch to context handling
ALIGN
ENDP

View File

@ -1,5 +1,5 @@
;/*
; * Copyright (c) 2016-2020 Arm Limited. All rights reserved.
; * Copyright (c) 2016-2021 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
@ -18,22 +18,25 @@
; * -----------------------------------------------------------------------------
; *
; * Project: CMSIS-RTOS RTX
; * Title: ARMv8M Mainline Exception handlers
; * Title: ARMv8-M Mainline Exception handlers
; *
; * -----------------------------------------------------------------------------
; */
#ifndef RTX_STACK_CHECK
RTX_STACK_CHECK EQU 0
#endif
#ifndef DOMAIN_NS
DOMAIN_NS EQU 0
#endif
#ifdef __ARM_FP
__FPU_USED EQU 1
#else
__FPU_USED EQU 0
DOMAIN_NS EQU 0
#endif
IF ({FPU}="FPv5-SP") || ({FPU}="FPv5_D16")
FPU_USED EQU 1
ELSE
FPU_USED EQU 0
ENDIF
I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset
TCB_SM_OFS EQU 48 ; TCB.stack_mem offset
@ -41,6 +44,11 @@ TCB_SP_OFS EQU 56 ; TCB.SP offset
TCB_SF_OFS EQU 34 ; TCB.stack_frame offset
TCB_TZM_OFS EQU 64 ; TCB.tz_memory offset
FPCCR EQU 0xE000EF34 ; FPCCR Address
osRtxErrorStackOverflow\
EQU 1 ; Stack overflow
PRESERVE8
THUMB
@ -58,13 +66,14 @@ SVC_Handler PROC
EXPORT SVC_Handler
IMPORT osRtxUserSVC
IMPORT osRtxInfo
IF :DEF:MPU_LOAD
IMPORT osRtxMpuLoad
ENDIF
IF DOMAIN_NS = 1
IF RTX_STACK_CHECK != 0
IMPORT osRtxThreadStackCheck
IMPORT osRtxKernelErrorNotify
ENDIF
IF DOMAIN_NS != 0
IMPORT TZ_LoadContext_S
IMPORT TZ_StoreContext_S
ENDIF
ENDIF
TST LR,#0x04 ; Determine return stack from EXC_RETURN bit 2
ITE EQ
@ -73,7 +82,7 @@ SVC_Handler PROC
LDR R1,[R0,#24] ; Load saved PC from stack
LDRB R1,[R1,#-2] ; Load SVC number
CMP R1,#0
CMP R1,#0 ; Check SVC number
BNE SVC_User ; Branch if not SVC 0
PUSH {R0,LR} ; Save SP and EXC_RETURN
@ -83,86 +92,130 @@ SVC_Handler PROC
STM R12,{R0-R1} ; Store function return values
SVC_Context
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run
LDM R3,{R1,R2} ; Load osRtxInfo.thread.run: curr & next
CMP R1,R2 ; Check if thread switch is required
IT EQ
BXEQ LR ; Exit when threads are the same
IF __FPU_USED = 1
CBNZ R1,SVC_ContextSave ; Branch if running thread is not deleted
TST LR,#0x10 ; Check if extended stack frame
BNE SVC_ContextSwitch
LDR R1,=0xE000EF34 ; FPCCR Address
LDR R0,[R1] ; Load FPCCR
BIC R0,R0,#1 ; Clear LSPACT (Lazy state)
STR R0,[R1] ; Store FPCCR
B SVC_ContextSwitch
ELSE
CBZ R1,SVC_ContextSwitch ; Branch if running thread is deleted
ENDIF
SVC_ContextSave
IF DOMAIN_NS = 1
LDR R0,[R1,#TCB_TZM_OFS] ; Load TrustZone memory identifier
CBZ R0,SVC_ContextSave1 ; Branch if there is no secure context
PUSH {R1,R2,R3,LR} ; Save registers and EXC_RETURN
BL TZ_StoreContext_S ; Store secure context
POP {R1,R2,R3,LR} ; Restore registers and EXC_RETURN
ENDIF
SVC_ContextSave1
MRS R0,PSP ; Get PSP
STMDB R0!,{R4-R11} ; Save R4..R11
IF __FPU_USED = 1
TST LR,#0x10 ; Check if extended stack frame
IT EQ
VSTMDBEQ R0!,{S16-S31} ; Save VFP S16.S31
ENDIF
SVC_ContextSave2
STR R0,[R1,#TCB_SP_OFS] ; Store SP
STRB LR,[R1,#TCB_SF_OFS] ; Store stack frame information
SVC_ContextSwitch
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
IF :DEF:MPU_LOAD
PUSH {R2,R3} ; Save registers
MOV R0,R2 ; osRtxMpuLoad parameter
BL osRtxMpuLoad ; Load MPU for next thread
POP {R2,R3} ; Restore registers
ENDIF
IF FPU_USED != 0
CBNZ R1,SVC_ContextSave ; Branch if running thread is not deleted
SVC_FP_LazyState
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
BNE SVC_ContextRestore ; Branch if not extended stack frame
LDR R3,=FPCCR ; FPCCR Address
LDR R0,[R3] ; Load FPCCR
BIC R0,R0,#1 ; Clear LSPACT (Lazy state preservation)
STR R0,[R3] ; Store FPCCR
B SVC_ContextRestore ; Branch to context restore handling
ELSE
CBZ R1,SVC_ContextRestore ; Branch if running thread is deleted
ENDIF
SVC_ContextSave
IF DOMAIN_NS != 0
LDR R0,[R1,#TCB_TZM_OFS] ; Load TrustZone memory identifier
CBZ R0,SVC_ContextSave_NS ; Branch if there is no secure context
PUSH {R1,R2,R12,LR} ; Save registers and EXC_RETURN
BL TZ_StoreContext_S ; Store secure context
POP {R1,R2,R12,LR} ; Restore registers and EXC_RETURN
ENDIF
SVC_ContextSave_NS
IF DOMAIN_NS != 0
TST LR,#0x40 ; Check domain of interrupted thread
BNE SVC_ContextSaveSP ; Branch if secure
ENDIF
IF RTX_STACK_CHECK != 0
SUB R12,R12,#32 ; Calculate SP: space for R4..R11
IF FPU_USED != 0
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
SUBEQ R12,R12,#64 ; Additional space for S16..S31
ENDIF
SVC_ContextSaveSP
STR R12,[R1,#TCB_SP_OFS] ; Store SP
STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information
PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next
MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr
BL osRtxThreadStackCheck ; Check if thread stack is overrun
POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next
CBNZ R0,SVC_ContextSaveRegs ; Branch when stack check is ok
IF FPU_USED != 0
MOV R4,R1 ; Save osRtxInfo.thread.run.curr
ENDIF
MOV R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run
LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
IF FPU_USED != 0
LDRB LR,[R4,#TCB_SF_OFS] ; Load stack frame information
B SVC_FP_LazyState ; Branch to FP lazy state handling
ELSE
B SVC_ContextRestore ; Branch to context restore handling
ENDIF
SVC_ContextSaveRegs
LDRB LR,[R1,#TCB_SF_OFS] ; Load stack frame information
IF DOMAIN_NS != 0
TST LR,#0x40 ; Check domain of interrupted thread
BNE SVC_ContextRestore ; Branch if secure
ENDIF
LDR R12,[R1,#TCB_SP_OFS] ; Load SP
IF FPU_USED != 0
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VSTMIAEQ R12!,{S16-S31} ; Save VFP S16..S31
ENDIF
STM R12,{R4-R11} ; Save R4..R11
ELSE
STMDB R12!,{R4-R11} ; Save R4..R11
IF FPU_USED != 0
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VSTMDBEQ R12!,{S16-S31} ; Save VFP S16.S31
ENDIF
SVC_ContextSaveSP
STR R12,[R1,#TCB_SP_OFS] ; Store SP
STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information
ENDIF
SVC_ContextRestore
IF DOMAIN_NS = 1
IF DOMAIN_NS != 0
LDR R0,[R2,#TCB_TZM_OFS] ; Load TrustZone memory identifier
CBZ R0,SVC_ContextRestore1 ; Branch if there is no secure context
CBZ R0,SVC_ContextRestore_NS; Branch if there is no secure context
PUSH {R2,R3} ; Save registers
BL TZ_LoadContext_S ; Load secure context
POP {R2,R3} ; Restore registers
ENDIF
ENDIF
SVC_ContextRestore1
LDR R0,[R2,#TCB_SM_OFS] ; Load stack memory base
LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information
MSR PSPLIM,R0 ; Set PSPLIM
SVC_ContextRestore_NS
LDR R0,[R2,#TCB_SP_OFS] ; Load SP
ORR LR,R1,#0xFFFFFF00 ; Set EXC_RETURN
LDR R1,[R2,#TCB_SM_OFS] ; Load stack memory base
MSR PSPLIM,R1 ; Set PSPLIM
LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information
ORN LR,R1,#0xFF ; Set EXC_RETURN
IF DOMAIN_NS = 1
IF DOMAIN_NS != 0
TST LR,#0x40 ; Check domain of interrupted thread
BNE SVC_ContextRestore2 ; Branch if secure
ENDIF
BNE SVC_ContextRestoreSP ; Branch if secure
ENDIF
IF __FPU_USED = 1
TST LR,#0x10 ; Check if extended stack frame
IT EQ
IF FPU_USED != 0
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VLDMIAEQ R0!,{S16-S31} ; Restore VFP S16..S31
ENDIF
ENDIF
LDMIA R0!,{R4-R11} ; Restore R4..R11
SVC_ContextRestore2
SVC_ContextRestoreSP
MSR PSP,R0 ; Set PSP
SVC_Exit
@ -194,7 +247,8 @@ PendSV_Handler PROC
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler
POP {R0,LR} ; Restore EXC_RETURN
B Sys_Context
MRS R12,PSP ; Save PSP to R12
B SVC_Context ; Branch to context handling
ALIGN
ENDP
@ -207,100 +261,8 @@ SysTick_Handler PROC
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxTick_Handler ; Call osRtxTick_Handler
POP {R0,LR} ; Restore EXC_RETURN
B Sys_Context
ALIGN
ENDP
Sys_Context PROC
EXPORT Sys_Context
IMPORT osRtxInfo
IF :DEF:MPU_LOAD
IMPORT osRtxMpuLoad
ENDIF
IF DOMAIN_NS = 1
IMPORT TZ_LoadContext_S
IMPORT TZ_StoreContext_S
ENDIF
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run
LDM R3,{R1,R2} ; Load osRtxInfo.thread.run: curr & next
CMP R1,R2 ; Check if thread switch is required
IT EQ
BXEQ LR ; Exit when threads are the same
Sys_ContextSave
IF DOMAIN_NS = 1
LDR R0,[R1,#TCB_TZM_OFS] ; Load TrustZone memory identifier
CBZ R0,Sys_ContextSave1 ; Branch if there is no secure context
PUSH {R1,R2,R3,LR} ; Save registers and EXC_RETURN
BL TZ_StoreContext_S ; Store secure context
POP {R1,R2,R3,LR} ; Restore registers and EXC_RETURN
Sys_ContextSave1
TST LR,#0x40 ; Check domain of interrupted thread
IT NE
MRSNE R0,PSP ; Get PSP
BNE Sys_ContextSave3 ; Branch if secure
ENDIF
Sys_ContextSave2
MRS R0,PSP ; Get PSP
STMDB R0!,{R4-R11} ; Save R4..R11
IF __FPU_USED = 1
TST LR,#0x10 ; Check if extended stack frame
IT EQ
VSTMDBEQ R0!,{S16-S31} ; Save VFP S16.S31
ENDIF
Sys_ContextSave3
STR R0,[R1,#TCB_SP_OFS] ; Store SP
STRB LR,[R1,#TCB_SF_OFS] ; Store stack frame information
Sys_ContextSwitch
STR R2,[R3] ; osRtxInfo.run: curr = next
IF :DEF:MPU_LOAD
PUSH {R2,R3} ; Save registers
MOV R0,R2 ; osRtxMpuLoad parameter
BL osRtxMpuLoad ; Load MPU for next thread
POP {R2,R3} ; Restore registers
ENDIF
Sys_ContextRestore
IF DOMAIN_NS = 1
LDR R0,[R2,#TCB_TZM_OFS] ; Load TrustZone memory identifier
CBZ R0,Sys_ContextRestore1 ; Branch if there is no secure context
PUSH {R2,R3} ; Save registers
BL TZ_LoadContext_S ; Load secure context
POP {R2,R3} ; Restore registers
ENDIF
Sys_ContextRestore1
LDR R0,[R2,#TCB_SM_OFS] ; Load stack memory base
LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information
MSR PSPLIM,R0 ; Set PSPLIM
LDR R0,[R2,#TCB_SP_OFS] ; Load SP
ORR LR,R1,#0xFFFFFF00 ; Set EXC_RETURN
IF DOMAIN_NS = 1
TST LR,#0x40 ; Check domain of interrupted thread
BNE Sys_ContextRestore2 ; Branch if secure
ENDIF
IF __FPU_USED = 1
TST LR,#0x10 ; Check if extended stack frame
IT EQ
VLDMIAEQ R0!,{S16-S31} ; Restore VFP S16..S31
ENDIF
LDMIA R0!,{R4-R11} ; Restore R4..R11
Sys_ContextRestore2
MSR PSP,R0 ; Set PSP
Sys_ContextExit
BX LR ; Exit from handler
MRS R12,PSP ; Save PSP to R12
B SVC_Context ; Branch to context handling
ALIGN
ENDP

View File

@ -1,5 +1,5 @@
;/*
; * Copyright (c) 2013-2018 Arm Limited. All rights reserved.
; * Copyright (c) 2013-2021 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
@ -18,16 +18,31 @@
; * -----------------------------------------------------------------------------
; *
; * Project: CMSIS-RTOS RTX
; * Title: Cortex-M4F Exception handlers
; * Title: ARMv7-M Exception handlers
; *
; * -----------------------------------------------------------------------------
; */
#ifndef RTX_STACK_CHECK
RTX_STACK_CHECK EQU 0
#endif
IF ({FPU}="FPv4-SP") || ({FPU}="VFPv4_SP_D16") || ({FPU}="FPv5-SP") || ({FPU}="FPv5_D16")
FPU_USED EQU 1
ELSE
FPU_USED EQU 0
ENDIF
I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset
TCB_SP_OFS EQU 56 ; TCB.SP offset
TCB_SF_OFS EQU 34 ; TCB.stack_frame offset
FPCCR EQU 0xE000EF34 ; FPCCR Address
osRtxErrorStackOverflow\
EQU 1 ; Stack overflow
PRESERVE8
THUMB
@ -45,9 +60,10 @@ SVC_Handler PROC
EXPORT SVC_Handler
IMPORT osRtxUserSVC
IMPORT osRtxInfo
IF :DEF:MPU_LOAD
IMPORT osRtxMpuLoad
ENDIF
IF RTX_STACK_CHECK != 0
IMPORT osRtxThreadStackCheck
IMPORT osRtxKernelErrorNotify
ENDIF
TST LR,#0x04 ; Determine return stack from EXC_RETURN bit 2
ITE EQ
@ -56,7 +72,8 @@ SVC_Handler PROC
LDR R1,[R0,#24] ; Load saved PC from stack
LDRB R1,[R1,#-2] ; Load SVC number
CBNZ R1,SVC_User ; Branch if not SVC 0
CMP R1,#0 ; Check SVC number
BNE SVC_User ; Branch if not SVC 0
PUSH {R0,LR} ; Save SP and EXC_RETURN
LDM R0,{R0-R3,R12} ; Load function parameters and address from stack
@ -65,54 +82,91 @@ SVC_Handler PROC
STM R12,{R0-R1} ; Store function return values
SVC_Context
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run
LDM R3,{R1,R2} ; Load osRtxInfo.thread.run: curr & next
CMP R1,R2 ; Check if thread switch is required
IT EQ
BXEQ LR ; Exit when threads are the same
CBNZ R1,SVC_ContextSave ; Branch if running thread is not deleted
TST LR,#0x10 ; Check if extended stack frame
BNE SVC_ContextSwitch
#ifdef __FPU_PRESENT
LDR R1,=0xE000EF34 ; FPCCR Address
LDR R0,[R1] ; Load FPCCR
BIC R0,R0,#1 ; Clear LSPACT (Lazy state)
STR R0,[R1] ; Store FPCCR
B SVC_ContextSwitch
#endif
SVC_ContextSave
STMDB R12!,{R4-R11} ; Save R4..R11
#ifdef __FPU_PRESENT
TST LR,#0x10 ; Check if extended stack frame
IT EQ
VSTMDBEQ R12!,{S16-S31} ; Save VFP S16.S31
#endif
STR R12,[R1,#TCB_SP_OFS] ; Store SP
STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information
SVC_ContextSwitch
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
IF :DEF:MPU_LOAD
PUSH {R2,R3} ; Save registers
MOV R0,R2 ; osRtxMpuLoad parameter
BL osRtxMpuLoad ; Load MPU for next thread
POP {R2,R3} ; Restore registers
ENDIF
IF FPU_USED != 0
CBNZ R1,SVC_ContextSave ; Branch if running thread is not deleted
SVC_FP_LazyState
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
BNE SVC_ContextRestore ; Branch if not extended stack frame
LDR R3,=FPCCR ; FPCCR Address
LDR R0,[R3] ; Load FPCCR
BIC R0,R0,#1 ; Clear LSPACT (Lazy state preservation)
STR R0,[R3] ; Store FPCCR
B SVC_ContextRestore ; Branch to context restore handling
ELSE
CBZ R1,SVC_ContextRestore ; Branch if running thread is deleted
ENDIF
SVC_ContextSave
IF RTX_STACK_CHECK != 0
SUB R12,R12,#32 ; Calculate SP: space for R4..R11
IF FPU_USED != 0
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
SUBEQ R12,R12,#64 ; Additional space for S16..S31
STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information
ENDIF
STR R12,[R1,#TCB_SP_OFS] ; Store SP
PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next
MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr
BL osRtxThreadStackCheck ; Check if thread stack is overrun
POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next
CBNZ R0,SVC_ContextSaveRegs ; Branch when stack check is ok
IF FPU_USED != 0
MOV R4,R1 ; Save osRtxInfo.thread.run.curr
ENDIF
MOV R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run
LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
IF FPU_USED != 0
LDRB LR,[R4,#TCB_SF_OFS] ; Load stack frame information
B SVC_FP_LazyState ; Branch to FP lazy state handling
ELSE
B SVC_ContextRestore ; Branch to context restore handling
ENDIF
SVC_ContextSaveRegs
LDR R12,[R1,#TCB_SP_OFS] ; Load SP
IF FPU_USED != 0
LDRB LR, [R1,#TCB_SF_OFS] ; Load stack frame information
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VSTMIAEQ R12!,{S16-S31} ; Save VFP S16..S31
ENDIF
STM R12,{R4-R11} ; Save R4..R11
ELSE
STMDB R12!,{R4-R11} ; Save R4..R11
IF FPU_USED != 0
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VSTMDBEQ R12!,{S16-S31} ; Save VFP S16.S31
STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information
ENDIF
STR R12,[R1,#TCB_SP_OFS] ; Store SP
ENDIF
SVC_ContextRestore
LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information
LDR R0,[R2,#TCB_SP_OFS] ; Load SP
ORR LR,R1,#0xFFFFFF00 ; Set EXC_RETURN
#ifdef __FPU_PRESENT
TST LR,#0x10 ; Check if extended stack frame
IT EQ
IF FPU_USED != 0
LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information
ORN LR,R1,#0xFF ; Set EXC_RETURN
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VLDMIAEQ R0!,{S16-S31} ; Restore VFP S16..S31
#endif
ELSE
MVN LR,#~0xFFFFFFFD ; Set EXC_RETURN value
ENDIF
LDMIA R0!,{R4-R11} ; Restore R4..R11
MSR PSP,R0 ; Set PSP
@ -145,8 +199,8 @@ PendSV_Handler PROC
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler
POP {R0,LR} ; Restore EXC_RETURN
MRS R12,PSP
B SVC_Context
MRS R12,PSP ; Save PSP to R12
B SVC_Context ; Branch to context handling
ALIGN
ENDP
@ -159,8 +213,8 @@ SysTick_Handler PROC
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxTick_Handler ; Call osRtxTick_Handler
POP {R0,LR} ; Restore EXC_RETURN
MRS R12,PSP
B SVC_Context
MRS R12,PSP ; Save PSP to R12
B SVC_Context ; Branch to context handling
ALIGN
ENDP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -18,7 +18,7 @@
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: Cortex-A Exception handlers
* Title: ARMv7-A Exception handlers
*
* -----------------------------------------------------------------------------
*/
@ -64,48 +64,48 @@ IRQ_PendSV:
.cantunwind
Undef_Handler:
SRSFD SP!, #MODE_UND
PUSH {R0-R4, R12} // Save APCS corruptible registers to UND mode stack
srsfd sp!, #MODE_UND
push {r0-r4, r12} // Save APCS corruptible registers to UND mode stack
MRS R0, SPSR
TST R0, #CPSR_BIT_T // Check mode
MOVEQ R1, #4 // R1 = 4 ARM mode
MOVNE R1, #2 // R1 = 2 Thumb mode
SUB R0, LR, R1
LDREQ R0, [R0] // ARM mode - R0 points to offending instruction
BEQ Undef_Cont
mrs r0, spsr
tst r0, #CPSR_BIT_T // Check mode
moveq r1, #4 // R1 = 4 ARM mode
movne r1, #2 // R1 = 2 Thumb mode
sub r0, lr, r1
ldreq r0, [r0] // ARM mode - R0 points to offending instruction
beq Undef_Cont
// Thumb instruction
// Determine if it is a 32-bit Thumb instruction
LDRH R0, [R0]
MOV R2, #0x1C
CMP R2, R0, LSR #11
BHS Undef_Cont // 16-bit Thumb instruction
ldrh r0, [r0]
mov r2, #0x1C
cmp r2, r0, lsr #11
bhs Undef_Cont // 16-bit Thumb instruction
// 32-bit Thumb instruction. Unaligned - reconstruct the offending instruction
LDRH R2, [LR]
ORR R0, R2, R0, LSL #16
ldrh r2, [lr]
orr r0, r2, r0, lsl #16
Undef_Cont:
MOV R2, LR // Set LR to third argument
mov r2, lr // Set LR to third argument
AND R12, SP, #4 // Ensure stack is 8-byte aligned
SUB SP, SP, R12 // Adjust stack
PUSH {R12, LR} // Store stack adjustment and dummy LR
and r12, sp, #4 // Ensure stack is 8-byte aligned
sub sp, sp, r12 // Adjust stack
push {r12, lr} // Store stack adjustment and dummy LR
// R0 =Offending instruction, R1 =2(Thumb) or =4(ARM)
BL CUndefHandler
bl CUndefHandler
POP {R12, LR} // Get stack adjustment & discard dummy LR
ADD SP, SP, R12 // Unadjust stack
pop {r12, lr} // Get stack adjustment & discard dummy LR
add sp, sp, r12 // Unadjust stack
LDR LR, [SP, #24] // Restore stacked LR and possibly adjust for retry
SUB LR, LR, R0
LDR R0, [SP, #28] // Restore stacked SPSR
MSR SPSR_cxsf, R0
CLREX // Clear exclusive monitor
POP {R0-R4, R12} // Restore stacked APCS registers
ADD SP, SP, #8 // Adjust SP for already-restored banked registers
MOVS PC, LR
ldr lr, [sp, #24] // Restore stacked LR and possibly adjust for retry
sub lr, lr, r0
ldr r0, [sp, #28] // Restore stacked SPSR
msr spsr_cxsf, r0
clrex // Clear exclusive monitor
pop {r0-r4, r12} // Restore stacked APCS registers
add sp, sp, #8 // Adjust SP for already-restored banked registers
movs pc, lr
.fnend
.size Undef_Handler, .-Undef_Handler
@ -117,26 +117,26 @@ Undef_Cont:
.cantunwind
PAbt_Handler:
SUB LR, LR, #4 // Pre-adjust LR
SRSFD SP!, #MODE_ABT // Save LR and SPRS to ABT mode stack
PUSH {R0-R4, R12} // Save APCS corruptible registers to ABT mode stack
MRC p15, 0, R0, c5, c0, 1 // IFSR
MRC p15, 0, R1, c6, c0, 2 // IFAR
sub lr, lr, #4 // Pre-adjust LR
srsfd sp!, #MODE_ABT // Save LR and SPRS to ABT mode stack
push {r0-r4, r12} // Save APCS corruptible registers to ABT mode stack
mrc p15, 0, r0, c5, c0, 1 // IFSR
mrc p15, 0, r1, c6, c0, 2 // IFAR
MOV R2, LR // Set LR to third argument
mov r2, lr // Set LR to third argument
AND R12, SP, #4 // Ensure stack is 8-byte aligned
SUB SP, SP, R12 // Adjust stack
PUSH {R12, LR} // Store stack adjustment and dummy LR
and r12, sp, #4 // Ensure stack is 8-byte aligned
sub sp, sp, r12 // Adjust stack
push {r12, lr} // Store stack adjustment and dummy LR
BL CPAbtHandler
bl CPAbtHandler
POP {R12, LR} // Get stack adjustment & discard dummy LR
ADD SP, SP, R12 // Unadjust stack
pop {r12, lr} // Get stack adjustment & discard dummy LR
add sp, sp, r12 // Unadjust stack
CLREX // Clear exclusive monitor
POP {R0-R4, R12} // Restore stack APCS registers
RFEFD SP! // Return from exception
clrex // Clear exclusive monitor
pop {r0-r4, r12} // Restore stack APCS registers
rfefd sp! // Return from exception
.fnend
.size PAbt_Handler, .-PAbt_Handler
@ -147,26 +147,26 @@ PAbt_Handler:
.fnstart
.cantunwind
DAbt_Handler:
SUB LR, LR, #8 // Pre-adjust LR
SRSFD SP!, #MODE_ABT // Save LR and SPRS to ABT mode stack
PUSH {R0-R4, R12} // Save APCS corruptible registers to ABT mode stack
MRC p15, 0, R0, c5, c0, 0 // DFSR
MRC p15, 0, R1, c6, c0, 0 // DFAR
sub lr, lr, #8 // Pre-adjust LR
srsfd sp!, #MODE_ABT // Save LR and SPRS to ABT mode stack
push {r0-r4, r12} // Save APCS corruptible registers to ABT mode stack
mrc p15, 0, r0, c5, c0, 0 // DFSR
mrc p15, 0, r1, c6, c0, 0 // DFAR
MOV R2, LR // Set LR to third argument
mov r2, lr // Set LR to third argument
AND R12, SP, #4 // Ensure stack is 8-byte aligned
SUB SP, SP, R12 // Adjust stack
PUSH {R12, LR} // Store stack adjustment and dummy LR
and r12, sp, #4 // Ensure stack is 8-byte aligned
sub sp, sp, r12 // Adjust stack
push {r12, lr} // Store stack adjustment and dummy LR
BL CDAbtHandler
bl CDAbtHandler
POP {R12, LR} // Get stack adjustment & discard dummy LR
ADD SP, SP, R12 // Unadjust stack
pop {r12, lr} // Get stack adjustment & discard dummy LR
add sp, sp, r12 // Unadjust stack
CLREX // Clear exclusive monitor
POP {R0-R4, R12} // Restore stacked APCS registers
RFEFD SP! // Return from exception
clrex // Clear exclusive monitor
pop {r0-r4, r12} // Restore stacked APCS registers
rfefd sp! // Return from exception
.fnend
.size DAbt_Handler, .-DAbt_Handler
@ -178,49 +178,49 @@ DAbt_Handler:
.cantunwind
IRQ_Handler:
SUB LR, LR, #4 // Pre-adjust LR
SRSFD SP!, #MODE_SVC // Save LR_irq and SPSR_irq on to the SVC stack
CPS #MODE_SVC // Change to SVC mode
PUSH {R0-R3, R12, LR} // Save APCS corruptible registers
sub lr, lr, #4 // Pre-adjust LR
srsfd sp!, #MODE_SVC // Save LR_irq and SPSR_irq on to the SVC stack
cps #MODE_SVC // Change to SVC mode
push {r0-r3, r12, lr} // Save APCS corruptible registers
LDR R0, =IRQ_NestLevel
LDR R1, [R0]
ADD R1, R1, #1 // Increment IRQ nesting level
STR R1, [R0]
ldr r0, =IRQ_NestLevel
ldr r1, [r0]
add r1, r1, #1 // Increment IRQ nesting level
str r1, [r0]
MOV R3, SP // Move SP into R3
AND R3, R3, #4 // Get stack adjustment to ensure 8-byte alignment
SUB SP, SP, R3 // Adjust stack
PUSH {R3, R4} // Store stack adjustment(R3) and user data(R4)
mov r3, sp // Move SP into R3
and r3, r3, #4 // Get stack adjustment to ensure 8-byte alignment
sub sp, sp, r3 // Adjust stack
push {r3, r4} // Store stack adjustment(R3) and user data(R4)
BLX IRQ_GetActiveIRQ // Retrieve interrupt ID into R0
MOV R4, R0 // Move interrupt ID to R4
blx IRQ_GetActiveIRQ // Retrieve interrupt ID into R0
mov r4, r0 // Move interrupt ID to R4
BLX IRQ_GetHandler // Retrieve interrupt handler address for current ID
CMP R0, #0 // Check if handler address is 0
BEQ IRQ_End // If 0, end interrupt and return
blx IRQ_GetHandler // Retrieve interrupt handler address for current ID
cmp r0, #0 // Check if handler address is 0
beq IRQ_End // If 0, end interrupt and return
CPSIE i // Re-enable interrupts
BLX R0 // Call IRQ handler
CPSID i // Disable interrupts
cpsie i // Re-enable interrupts
blx r0 // Call IRQ handler
cpsid i // Disable interrupts
IRQ_End:
MOV R0, R4 // Move interrupt ID to R0
BLX IRQ_EndOfInterrupt // Signal end of interrupt
mov r0, r4 // Move interrupt ID to R0
blx IRQ_EndOfInterrupt // Signal end of interrupt
POP {R3, R4} // Restore stack adjustment(R3) and user data(R4)
ADD SP, SP, R3 // Unadjust stack
pop {r3, r4} // Restore stack adjustment(R3) and user data(R4)
add sp, sp, r3 // Unadjust stack
BL osRtxContextSwitch // Continue in context switcher
bl osRtxContextSwitch // Continue in context switcher
LDR R0, =IRQ_NestLevel
LDR R1, [R0]
SUBS R1, R1, #1 // Decrement IRQ nesting level
STR R1, [R0]
ldr r0, =IRQ_NestLevel
ldr r1, [r0]
subs r1, r1, #1 // Decrement IRQ nesting level
str r1, [r0]
CLREX // Clear exclusive monitor for interrupted code
POP {R0-R3, R12, LR} // Restore stacked APCS registers
RFEFD SP! // Return from IRQ handler
clrex // Clear exclusive monitor for interrupted code
pop {r0-r3, r12, lr} // Restore stacked APCS registers
rfefd sp! // Return from IRQ handler
.fnend
.size IRQ_Handler, .-IRQ_Handler
@ -232,80 +232,80 @@ IRQ_End:
.cantunwind
SVC_Handler:
SRSFD SP!, #MODE_SVC // Store SPSR_svc and LR_svc onto SVC stack
PUSH {R12, LR}
srsfd sp!, #MODE_SVC // Store SPSR_svc and LR_svc onto SVC stack
push {r12, lr}
MRS R12, SPSR // Load SPSR
TST R12, #CPSR_BIT_T // Thumb bit set?
LDRHNE R12, [LR,#-2] // Thumb: load halfword
BICNE R12, R12, #0xFF00 // extract SVC number
LDREQ R12, [LR,#-4] // ARM: load word
BICEQ R12, R12, #0xFF000000 // extract SVC number
CMP R12, #0 // Compare SVC number
BNE SVC_User // Branch if User SVC
mrs r12, spsr // Load SPSR
tst r12, #CPSR_BIT_T // Thumb bit set?
ldrhne r12, [lr,#-2] // Thumb: load halfword
bicne r12, r12, #0xFF00 // extract SVC number
ldreq r12, [lr,#-4] // ARM: load word
biceq r12, r12, #0xFF000000 // extract SVC number
cmp r12, #0 // Compare SVC number
bne SVC_User // Branch if User SVC
PUSH {R0-R3}
push {r0-r3}
LDR R0, =IRQ_NestLevel
LDR R1, [R0]
ADD R1, R1, #1 // Increment IRQ nesting level
STR R1, [R0]
ldr r0, =IRQ_NestLevel
ldr r1, [r0]
add r1, r1, #1 // Increment IRQ nesting level
str r1, [r0]
LDR R0, =osRtxInfo
LDR R1, [R0, #I_K_STATE_OFS] // Load RTX5 kernel state
CMP R1, #K_STATE_RUNNING // Check osKernelRunning
BLT SVC_FuncCall // Continue if kernel is not running
LDR R0, [R0, #I_TICK_IRQN_OFS] // Load OS Tick irqn
BLX IRQ_Disable // Disable OS Tick interrupt
ldr r0, =osRtxInfo
ldr r1, [r0, #I_K_STATE_OFS] // Load RTX5 kernel state
cmp r1, #K_STATE_RUNNING // Check osKernelRunning
blt SVC_FuncCall // Continue if kernel is not running
ldr r0, [r0, #I_TICK_IRQN_OFS] // Load OS Tick irqn
blx IRQ_Disable // Disable OS Tick interrupt
SVC_FuncCall:
POP {R0-R3}
pop {r0-r3}
LDR R12, [SP] // Reload R12 from stack
ldr r12, [sp] // Reload R12 from stack
CPSIE i // Re-enable interrupts
BLX R12 // Branch to SVC function
CPSID i // Disable interrupts
cpsie i // Re-enable interrupts
blx r12 // Branch to SVC function
cpsid i // Disable interrupts
SUB SP, SP, #4
STM SP, {SP}^ // Store SP_usr onto stack
POP {R12} // Pop SP_usr into R12
SUB R12, R12, #16 // Adjust pointer to SP_usr
LDMDB R12, {R2,R3} // Load return values from SVC function
PUSH {R0-R3} // Push return values to stack
sub sp, sp, #4
stm sp, {sp}^ // Store SP_usr onto stack
pop {r12} // Pop SP_usr into R12
sub r12, r12, #16 // Adjust pointer to SP_usr
ldmdb r12, {r2,r3} // Load return values from SVC function
push {r0-r3} // Push return values to stack
LDR R0, =osRtxInfo
LDR R1, [R0, #I_K_STATE_OFS] // Load RTX5 kernel state
CMP R1, #K_STATE_RUNNING // Check osKernelRunning
BLT SVC_ContextCheck // Continue if kernel is not running
LDR R0, [R0, #I_TICK_IRQN_OFS] // Load OS Tick irqn
BLX IRQ_Enable // Enable OS Tick interrupt
ldr r0, =osRtxInfo
ldr r1, [r0, #I_K_STATE_OFS] // Load RTX5 kernel state
cmp r1, #K_STATE_RUNNING // Check osKernelRunning
blt SVC_ContextCheck // Continue if kernel is not running
ldr r0, [r0, #I_TICK_IRQN_OFS] // Load OS Tick irqn
blx IRQ_Enable // Enable OS Tick interrupt
SVC_ContextCheck:
BL osRtxContextSwitch // Continue in context switcher
bl osRtxContextSwitch // Continue in context switcher
LDR R0, =IRQ_NestLevel
LDR R1, [R0]
SUB R1, R1, #1 // Decrement IRQ nesting level
STR R1, [R0]
ldr r0, =IRQ_NestLevel
ldr r1, [r0]
sub r1, r1, #1 // Decrement IRQ nesting level
str r1, [r0]
CLREX // Clear exclusive monitor
POP {R0-R3, R12, LR} // Restore stacked APCS registers
RFEFD SP! // Return from exception
clrex // Clear exclusive monitor
pop {r0-r3, r12, lr} // Restore stacked APCS registers
rfefd sp! // Return from exception
SVC_User:
PUSH {R4, R5}
LDR R5,=osRtxUserSVC // Load address of SVC table
LDR R4,[R5] // Load SVC maximum number
CMP R12,R4 // Check SVC number range
BHI SVC_Done // Branch if out of range
push {r4, r5}
ldr r5,=osRtxUserSVC // Load address of SVC table
ldr r4,[r5] // Load SVC maximum number
cmp r12,r4 // Check SVC number range
bhi SVC_Done // Branch if out of range
LDR R12,[R5,R12,LSL #2] // Load SVC Function Address
BLX R12 // Call SVC Function
ldr r12,[r5,r12,lsl #2] // Load SVC Function Address
blx r12 // Call SVC Function
SVC_Done:
CLREX // Clear exclusive monitor
POP {R4, R5, R12, LR}
RFEFD SP! // Return from exception
clrex // Clear exclusive monitor
pop {r4, r5, r12, lr}
rfefd sp! // Return from exception
.fnend
.size SVC_Handler, .-SVC_Handler
@ -317,146 +317,146 @@ SVC_Done:
.cantunwind
osRtxContextSwitch:
PUSH {LR}
push {lr}
// Check interrupt nesting level
LDR R0, =IRQ_NestLevel
LDR R1, [R0] // Load IRQ nest level
CMP R1, #1
BNE osRtxContextExit // Nesting interrupts, exit context switcher
ldr r0, =IRQ_NestLevel
ldr r1, [r0] // Load IRQ nest level
cmp r1, #1
bne osRtxContextExit // Nesting interrupts, exit context switcher
LDR R12, =osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run
LDM R12, {R0, R1} // Load osRtxInfo.thread.run: curr & next
LDR R2, =IRQ_PendSV // Load address of IRQ_PendSV flag
LDRB R3, [R2] // Load PendSV flag
ldr r12, =osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run
ldm r12, {r0, r1} // Load osRtxInfo.thread.run: curr & next
ldr r2, =IRQ_PendSV // Load address of IRQ_PendSV flag
ldrb r3, [r2] // Load PendSV flag
CMP R0, R1 // Check if context switch is required
BNE osRtxContextCheck // Not equal, check if context save required
CMP R3, #1 // Compare IRQ_PendSV value
BNE osRtxContextExit // No post processing (and no context switch requested)
cmp r0, r1 // Check if context switch is required
bne osRtxContextCheck // Not equal, check if context save required
cmp r3, #1 // Compare IRQ_PendSV value
bne osRtxContextExit // No post processing (and no context switch requested)
osRtxContextCheck:
STR R1, [R12] // Store run.next as run.curr
str r1, [r12] // Store run.next as run.curr
// R0 = curr, R1 = next, R2 = &IRQ_PendSV, R3 = IRQ_PendSV, R12 = &osRtxInfo.thread.run
PUSH {R1-R3, R12}
push {r1-r3, r12}
CMP R0, #0 // Is osRtxInfo.thread.run.curr == 0
BEQ osRtxPostProcess // Current deleted, skip context save
cmp r0, #0 // Is osRtxInfo.thread.run.curr == 0
beq osRtxPostProcess // Current deleted, skip context save
osRtxContextSave:
MOV LR, R0 // Move &osRtxInfo.thread.run.curr to LR
MOV R0, SP // Move SP_svc into R0
ADD R0, R0, #20 // Adjust SP_svc to R0 of the basic frame
SUB SP, SP, #4
STM SP, {SP}^ // Save SP_usr to current stack
POP {R1} // Pop SP_usr into R1
mov lr, r0 // Move &osRtxInfo.thread.run.curr to LR
mov r0, sp // Move SP_svc into R0
add r0, r0, #20 // Adjust SP_svc to R0 of the basic frame
sub sp, sp, #4
stm sp, {sp}^ // Save SP_usr to current stack
pop {r1} // Pop SP_usr into R1
SUB R1, R1, #64 // Adjust SP_usr to R4 of the basic frame
STMIA R1!, {R4-R11} // Save R4-R11 to user stack
LDMIA R0!, {R4-R8} // Load stacked R0-R3,R12 into R4-R8
STMIA R1!, {R4-R8} // Store them to user stack
STM R1, {LR}^ // Store LR_usr directly
ADD R1, R1, #4 // Adjust user sp to PC
LDMIB R0!, {R5-R6} // Load current PC, CPSR
STMIA R1!, {R5-R6} // Restore user PC and CPSR
sub r1, r1, #64 // Adjust SP_usr to R4 of the basic frame
stmia r1!, {r4-r11} // Save R4-R11 to user stack
ldmia r0!, {r4-r8} // Load stacked R0-R3,R12 into R4-R8
stmia r1!, {r4-r8} // Store them to user stack
stm r1, {lr}^ // Store LR_usr directly
add r1, r1, #4 // Adjust user sp to PC
ldmib r0!, {r5-r6} // Load current PC, CPSR
stmia r1!, {r5-r6} // Restore user PC and CPSR
SUB R1, R1, #64 // Adjust SP_usr to stacked R4
sub r1, r1, #64 // Adjust SP_usr to stacked R4
// Check if VFP state need to be saved
MRC p15, 0, R2, c1, c0, 2 // VFP/NEON access enabled? (CPACR)
AND R2, R2, #0x00F00000
CMP R2, #0x00F00000
BNE osRtxContextSave1 // Continue, no VFP
mrc p15, 0, r2, c1, c0, 2 // VFP/NEON access enabled? (CPACR)
and r2, r2, #0x00F00000
cmp r2, #0x00F00000
bne osRtxContextSaveSP // Continue, no VFP
VMRS R2, FPSCR
STMDB R1!, {R2,R12} // Push FPSCR, maintain 8-byte alignment
vmrs r2, fpscr
stmdb r1!, {r2,r12} // Push FPSCR, maintain 8-byte alignment
VSTMDB R1!, {D0-D15} // Save D0-D15
#if __ARM_NEON == 1
VSTMDB R1!, {D16-D31} // Save D16-D31
#endif
vstmdb r1!, {d0-d15} // Save D0-D15
#if defined(__ARM_NEON) && (__ARM_NEON == 1)
vstmdb r1!, {d16-d31} // Save D16-D31
#endif
LDRB R2, [LR, #TCB_SP_FRAME] // Load osRtxInfo.thread.run.curr frame info
#if __ARM_NEON == 1
ORR R2, R2, #4 // NEON state
#else
ORR R2, R2, #2 // VFP state
#endif
STRB R2, [LR, #TCB_SP_FRAME] // Store VFP/NEON state
ldrb r2, [lr, #TCB_SP_FRAME] // Load osRtxInfo.thread.run.curr frame info
#if defined(__ARM_NEON) && (__ARM_NEON == 1)
orr r2, r2, #4 // NEON state
#else
orr r2, r2, #2 // VFP state
#endif
strb r2, [lr, #TCB_SP_FRAME] // Store VFP/NEON state
osRtxContextSave1:
STR R1, [LR, #TCB_SP_OFS] // Store user sp to osRtxInfo.thread.run.curr
osRtxContextSaveSP:
str r1, [lr, #TCB_SP_OFS] // Store user sp to osRtxInfo.thread.run.curr
osRtxPostProcess:
// RTX IRQ post processing check
POP {R8-R11} // Pop R8 = run.next, R9 = &IRQ_PendSV, R10 = IRQ_PendSV, R11 = &osRtxInfo.thread.run
CMP R10, #1 // Compare PendSV value
BNE osRtxContextRestore // Skip post processing if not pending
pop {r8-r11} // Pop R8 = run.next, R9 = &IRQ_PendSV, R10 = IRQ_PendSV, R11 = &osRtxInfo.thread.run
cmp r10, #1 // Compare PendSV value
bne osRtxContextRestore // Skip post processing if not pending
MOV R4, SP // Move SP_svc into R4
AND R4, R4, #4 // Get stack adjustment to ensure 8-byte alignment
SUB SP, SP, R4 // Adjust stack
mov r4, sp // Move SP_svc into R4
and r4, r4, #4 // Get stack adjustment to ensure 8-byte alignment
sub sp, sp, r4 // Adjust stack
// Disable OS Tick
LDR R5, =osRtxInfo // Load address of osRtxInfo
LDR R5, [R5, #I_TICK_IRQN_OFS] // Load OS Tick irqn
MOV R0, R5 // Set it as function parameter
BLX IRQ_Disable // Disable OS Tick interrupt
MOV R6, #0 // Set PendSV clear value
B osRtxPendCheck
ldr r5, =osRtxInfo // Load address of osRtxInfo
ldr r5, [r5, #I_TICK_IRQN_OFS] // Load OS Tick irqn
mov r0, r5 // Set it as function parameter
blx IRQ_Disable // Disable OS Tick interrupt
mov r6, #0 // Set PendSV clear value
b osRtxPendCheck
osRtxPendExec:
STRB R6, [R9] // Clear PendSV flag
CPSIE i // Re-enable interrupts
BLX osRtxPendSV_Handler // Post process pending objects
CPSID i // Disable interrupts
strb r6, [r9] // Clear PendSV flag
cpsie i // Re-enable interrupts
blx osRtxPendSV_Handler // Post process pending objects
cpsid i // Disable interrupts
osRtxPendCheck:
LDR R8, [R11, #4] // Load osRtxInfo.thread.run.next
STR R8, [R11] // Store run.next as run.curr
LDRB R0, [R9] // Load PendSV flag
CMP R0, #1 // Compare PendSV value
BEQ osRtxPendExec // Branch to PendExec if PendSV is set
ldr r8, [r11, #4] // Load osRtxInfo.thread.run.next
str r8, [r11] // Store run.next as run.curr
ldrb r0, [r9] // Load PendSV flag
cmp r0, #1 // Compare PendSV value
beq osRtxPendExec // Branch to PendExec if PendSV is set
// Re-enable OS Tick
MOV R0, R5 // Restore irqn as function parameter
BLX IRQ_Enable // Enable OS Tick interrupt
mov r0, r5 // Restore irqn as function parameter
blx IRQ_Enable // Enable OS Tick interrupt
ADD SP, SP, R4 // Restore stack adjustment
add sp, sp, r4 // Restore stack adjustment
osRtxContextRestore:
LDR LR, [R8, #TCB_SP_OFS] // Load next osRtxThread_t.sp
LDRB R2, [R8, #TCB_SP_FRAME] // Load next osRtxThread_t.stack_frame
ldr lr, [r8, #TCB_SP_OFS] // Load next osRtxThread_t.sp
ldrb r2, [r8, #TCB_SP_FRAME] // Load next osRtxThread_t.stack_frame
ANDS R2, R2, #0x6 // Check stack frame for VFP context
MRC p15, 0, R2, c1, c0, 2 // Read CPACR
ANDEQ R2, R2, #0xFF0FFFFF // VFP/NEON state not stacked, disable VFP/NEON
ORRNE R2, R2, #0x00F00000 // VFP/NEON state is stacked, enable VFP/NEON
MCR p15, 0, R2, c1, c0, 2 // Write CPACR
BEQ osRtxContextRestore1 // No VFP
ISB // Sync if VFP was enabled
#if __ARM_NEON == 1
VLDMIA LR!, {D16-D31} // Restore D16-D31
#endif
VLDMIA LR!, {D0-D15} // Restore D0-D15
LDR R2, [LR]
VMSR FPSCR, R2 // Restore FPSCR
ADD LR, LR, #8 // Adjust sp pointer to R4
ands r2, r2, #0x6 // Check stack frame for VFP context
mrc p15, 0, r2, c1, c0, 2 // Read CPACR
andeq r2, r2, #0xFF0FFFFF // VFP/NEON state not stacked, disable VFP/NEON
orrne r2, r2, #0x00F00000 // VFP/NEON state is stacked, enable VFP/NEON
mcr p15, 0, r2, c1, c0, 2 // Write CPACR
beq osRtxContextRestoreRegs // No VFP
isb // Sync if VFP was enabled
#if defined(__ARM_NEON) && (__ARM_NEON == 1)
vldmia lr!, {d16-d31} // Restore D16-D31
#endif
vldmia lr!, {d0-d15} // Restore D0-D15
ldr r2, [lr]
vmsr fpscr, r2 // Restore FPSCR
add lr, lr, #8 // Adjust sp pointer to R4
osRtxContextRestore1:
LDMIA LR!, {R4-R11} // Restore R4-R11
ADD R12, LR, #32 // Adjust sp and save it into R12
PUSH {R12} // Push sp onto stack
LDM SP, {SP}^ // Restore SP_usr directly
ADD SP, SP, #4 // Adjust SP_svc
LDMIA LR!, {R0-R3, R12} // Load user registers R0-R3,R12
STMIB SP!, {R0-R3, R12} // Store them to SP_svc
LDM LR, {LR}^ // Restore LR_usr directly
LDMIB LR!, {R0-R1} // Load user registers PC,CPSR
ADD SP, SP, #4
STMIB SP!, {R0-R1} // Store them to SP_svc
SUB SP, SP, #32 // Adjust SP_svc to stacked LR
osRtxContextRestoreRegs:
ldmia lr!, {r4-r11} // Restore R4-R11
add r12, lr, #32 // Adjust sp and save it into R12
push {r12} // Push sp onto stack
ldm sp, {sp}^ // Restore SP_usr directly
add sp, sp, #4 // Adjust SP_svc
ldmia lr!, {r0-r3, r12} // Load user registers R0-R3,R12
stmib sp!, {r0-r3, r12} // Store them to SP_svc
ldm lr, {lr}^ // Restore LR_usr directly
ldmib lr!, {r0-r1} // Load user registers PC,CPSR
add sp, sp, #4
stmib sp!, {r0-r1} // Store them to SP_svc
sub sp, sp, #32 // Adjust SP_svc to stacked LR
osRtxContextExit:
POP {PC} // Return
pop {pc} // Return
.fnend
.size osRtxContextSwitch, .-osRtxContextSwitch

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -18,7 +18,7 @@
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: Cortex-M0 Exception handlers
* Title: ARMv6-M Exception handlers
*
* -----------------------------------------------------------------------------
*/
@ -26,9 +26,16 @@
.syntax unified
// Mbed OS patch: Exclude RTE_Components.h and RTX_Config.h inclusion in .S files
#define RTX_CONFIG_H_
#undef _RTE_
#include "rtx_def.h"
.equ I_T_RUN_OFS, 20 // osRtxInfo.thread.run offset
.equ TCB_SP_OFS, 56 // TCB.SP offset
.equ osRtxErrorStackOverflow, 1 // Stack overflow
.section ".rodata"
.global irqRtxLib // Non weak library reference
irqRtxLib:
@ -38,6 +45,7 @@ irqRtxLib:
.thumb
.section ".text"
.align 2
.eabi_attribute Tag_ABI_align_preserved, 1
.thumb_func
@ -47,89 +55,109 @@ irqRtxLib:
.cantunwind
SVC_Handler:
MOV R0,LR
LSRS R0,R0,#3 // Determine return stack from EXC_RETURN bit 2
BCC SVC_MSP // Branch if return stack is MSP
MRS R0,PSP // Get PSP
mov r0,lr
lsrs r0,r0,#3 // Determine return stack from EXC_RETURN bit 2
bcc SVC_MSP // Branch if return stack is MSP
mrs r0,psp // Get PSP
SVC_Number:
LDR R1,[R0,#24] // Load saved PC from stack
SUBS R1,R1,#2 // Point to SVC instruction
LDRB R1,[R1] // Load SVC number
CMP R1,#0
BNE SVC_User // Branch if not SVC 0
ldr r1,[r0,#24] // Load saved PC from stack
subs r1,r1,#2 // Point to SVC instruction
ldrb r1,[r1] // Load SVC number
cmp r1,#0 // Check SVC number
bne SVC_User // Branch if not SVC 0
PUSH {R0,LR} // Save SP and EXC_RETURN
LDMIA R0,{R0-R3} // Load function parameters from stack
BLX R7 // Call service function
POP {R2,R3} // Restore SP and EXC_RETURN
STMIA R2!,{R0-R1} // Store function return values
MOV LR,R3 // Set EXC_RETURN
push {r0,lr} // Save SP and EXC_RETURN
ldmia r0,{r0-r3} // Load function parameters from stack
blx r7 // Call service function
pop {r2,r3} // Restore SP and EXC_RETURN
stmia r2!,{r0-r1} // Store function return values
mov lr,r3 // Set EXC_RETURN
SVC_Context:
LDR R3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run
LDMIA R3!,{R1,R2} // Load osRtxInfo.thread.run: curr & next
CMP R1,R2 // Check if thread switch is required
BEQ SVC_Exit // Branch when threads are the same
ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run
ldmia r3!,{r1,r2} // Load osRtxInfo.thread.run: curr & next
cmp r1,r2 // Check if thread switch is required
beq SVC_Exit // Branch when threads are the same
CMP R1,#0
BEQ SVC_ContextSwitch // Branch if running thread is deleted
subs r3,r3,#8 // Adjust address
str r2,[r3] // osRtxInfo.thread.run: curr = next
cmp r1,#0
beq SVC_ContextRestore // Branch if running thread is deleted
SVC_ContextSave:
MRS R0,PSP // Get PSP
SUBS R0,R0,#32 // Calculate SP
STR R0,[R1,#TCB_SP_OFS] // Store SP
STMIA R0!,{R4-R7} // Save R4..R7
MOV R4,R8
MOV R5,R9
MOV R6,R10
MOV R7,R11
STMIA R0!,{R4-R7} // Save R8..R11
mrs r0,psp // Get PSP
subs r0,r0,#32 // Calculate SP: space for R4..R11
str r0,[r1,#TCB_SP_OFS] // Store SP
SVC_ContextSwitch:
SUBS R3,R3,#8 // Adjust address
STR R2,[R3] // osRtxInfo.thread.run: curr = next
#ifdef RTX_STACK_CHECK
push {r1,r2} // Save osRtxInfo.thread.run: curr & next
mov r0,r1 // Parameter: osRtxInfo.thread.run.curr
bl osRtxThreadStackCheck // Check if thread stack is overrun
pop {r1,r2} // Restore osRtxInfo.thread.run: curr & next
cmp r0,#0
bne SVC_ContextSaveRegs // Branch when stack check is ok
movs r0,#osRtxErrorStackOverflow // Parameter: r0=code, r1=object_id
bl osRtxKernelErrorNotify // Call osRtxKernelErrorNotify
ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run
ldr r2,[r3,#4] // Load osRtxInfo.thread.run: next
str r2,[r3] // osRtxInfo.thread.run: curr = next
b SVC_ContextRestore // Branch to context restore handling
SVC_ContextSaveRegs:
ldr r0,[r1,#TCB_SP_OFS] // Load SP
#endif // RTX_STACK_CHECK
stmia r0!,{r4-r7} // Save R4..R7
mov r4,r8
mov r5,r9
mov r6,r10
mov r7,r11
stmia r0!,{r4-r7} // Save R8..R11
SVC_ContextRestore:
LDR R0,[R2,#TCB_SP_OFS] // Load SP
ADDS R0,R0,#16 // Adjust address
LDMIA R0!,{R4-R7} // Restore R8..R11
MOV R8,R4
MOV R9,R5
MOV R10,R6
MOV R11,R7
MSR PSP,R0 // Set PSP
SUBS R0,R0,#32 // Adjust address
LDMIA R0!,{R4-R7} // Restore R4..R7
ldr r0,[r2,#TCB_SP_OFS] // Load SP
adds r0,r0,#16 // Adjust address
ldmia r0!,{r4-r7} // Restore R8..R11
mov r8,r4
mov r9,r5
mov r10,r6
mov r11,r7
msr psp,r0 // Set PSP
subs r0,r0,#32 // Adjust address
ldmia r0!,{r4-r7} // Restore R4..R7
MOVS R0,#2 // Binary complement of 0xFFFFFFFD
MVNS R0,R0 // Set EXC_RETURN value
BX R0 // Exit from handler
movs r0,#2 // Binary complement of 0xFFFFFFFD
mvns r0,r0 // Set EXC_RETURN value
bx r0 // Exit from handler
SVC_MSP:
MRS R0,MSP // Get MSP
B SVC_Number
mrs r0,msp // Get MSP
b SVC_Number
SVC_Exit:
BX LR // Exit from handler
bx lr // Exit from handler
SVC_User:
LDR R2,=osRtxUserSVC // Load address of SVC table
LDR R3,[R2] // Load SVC maximum number
CMP R1,R3 // Check SVC number range
BHI SVC_Exit // Branch if out of range
ldr r2,=osRtxUserSVC // Load address of SVC table
ldr r3,[r2] // Load SVC maximum number
cmp r1,r3 // Check SVC number range
bhi SVC_Exit // Branch if out of range
PUSH {R0,LR} // Save SP and EXC_RETURN
LSLS R1,R1,#2
LDR R3,[R2,R1] // Load address of SVC function
MOV R12,R3
LDMIA R0,{R0-R3} // Load function parameters from stack
BLX R12 // Call service function
POP {R2,R3} // Restore SP and EXC_RETURN
STR R0,[R2] // Store function return value
MOV LR,R3 // Set EXC_RETURN
push {r0,lr} // Save SP and EXC_RETURN
lsls r1,r1,#2
ldr r3,[r2,r1] // Load address of SVC function
mov r12,r3
ldmia r0,{r0-r3} // Load function parameters from stack
blx r12 // Call service function
pop {r2,r3} // Restore SP and EXC_RETURN
str r0,[r2] // Store function return value
mov lr,r3 // Set EXC_RETURN
BX LR // Return from handler
bx lr // Return from handler
.fnend
.size SVC_Handler, .-SVC_Handler
@ -142,11 +170,11 @@ SVC_User:
.cantunwind
PendSV_Handler:
PUSH {R0,LR} // Save EXC_RETURN
BL osRtxPendSV_Handler // Call osRtxPendSV_Handler
POP {R0,R1} // Restore EXC_RETURN
MOV LR,R1 // Set EXC_RETURN
B SVC_Context
push {r0,lr} // Save EXC_RETURN
bl osRtxPendSV_Handler // Call osRtxPendSV_Handler
pop {r0,r1} // Restore EXC_RETURN
mov lr,r1 // Set EXC_RETURN
b SVC_Context // Branch to context handling
.fnend
.size PendSV_Handler, .-PendSV_Handler
@ -159,11 +187,11 @@ PendSV_Handler:
.cantunwind
SysTick_Handler:
PUSH {R0,LR} // Save EXC_RETURN
BL osRtxTick_Handler // Call osRtxTick_Handler
POP {R0,R1} // Restore EXC_RETURN
MOV LR,R1 // Set EXC_RETURN
B SVC_Context
push {r0,lr} // Save EXC_RETURN
bl osRtxTick_Handler // Call osRtxTick_Handler
pop {r0,r1} // Restore EXC_RETURN
mov lr,r1 // Set EXC_RETURN
b SVC_Context // Branch to context handling
.fnend
.size SysTick_Handler, .-SysTick_Handler

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -18,7 +18,7 @@
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: Cortex-M0 Exception handlers
* Title: ARMv6-M Exception handlers
*
* -----------------------------------------------------------------------------
*/
@ -26,9 +26,16 @@
.syntax unified
// Mbed OS patch: Exclude RTE_Components.h and RTX_Config.h inclusion in .S files
#define RTX_CONFIG_H_
#undef _RTE_
#include "rtx_def.h"
.equ I_T_RUN_OFS, 20 // osRtxInfo.thread.run offset
.equ TCB_SP_OFS, 56 // TCB.SP offset
.equ osRtxErrorStackOverflow, 1 // Stack overflow
.section ".rodata"
.global irqRtxLib // Non weak library reference
irqRtxLib:
@ -38,6 +45,7 @@ irqRtxLib:
.thumb
.section ".text"
.align 2
.eabi_attribute Tag_ABI_align_preserved, 1
.thumb_func
@ -47,89 +55,109 @@ irqRtxLib:
.cantunwind
SVC_Handler:
MOV R0,LR
LSRS R0,R0,#3 // Determine return stack from EXC_RETURN bit 2
BCC SVC_MSP // Branch if return stack is MSP
MRS R0,PSP // Get PSP
mov r0,lr
lsrs r0,r0,#3 // Determine return stack from EXC_RETURN bit 2
bcc SVC_MSP // Branch if return stack is MSP
mrs r0,psp // Get PSP
SVC_Number:
LDR R1,[R0,#24] // Load saved PC from stack
SUBS R1,R1,#2 // Point to SVC instruction
LDRB R1,[R1] // Load SVC number
CMP R1,#0
BNE SVC_User // Branch if not SVC 0
ldr r1,[r0,#24] // Load saved PC from stack
subs r1,r1,#2 // Point to SVC instruction
ldrb r1,[r1] // Load SVC number
cmp r1,#0 // Check SVC number
bne SVC_User // Branch if not SVC 0
PUSH {R0,LR} // Save SP and EXC_RETURN
LDMIA R0,{R0-R3} // Load function parameters from stack
BLX R7 // Call service function
POP {R2,R3} // Restore SP and EXC_RETURN
STMIA R2!,{R0-R1} // Store function return values
MOV LR,R3 // Set EXC_RETURN
push {r0,lr} // Save SP and EXC_RETURN
ldmia r0,{r0-r3} // Load function parameters from stack
blx r7 // Call service function
pop {r2,r3} // Restore SP and EXC_RETURN
stmia r2!,{r0-r1} // Store function return values
mov lr,r3 // Set EXC_RETURN
SVC_Context:
LDR R3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run
LDMIA R3!,{R1,R2} // Load osRtxInfo.thread.run: curr & next
CMP R1,R2 // Check if thread switch is required
BEQ SVC_Exit // Branch when threads are the same
ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run
ldmia r3!,{r1,r2} // Load osRtxInfo.thread.run: curr & next
cmp r1,r2 // Check if thread switch is required
beq SVC_Exit // Branch when threads are the same
CMP R1,#0
BEQ SVC_ContextSwitch // Branch if running thread is deleted
subs r3,r3,#8 // Adjust address
str r2,[r3] // osRtxInfo.thread.run: curr = next
cmp r1,#0
beq SVC_ContextRestore // Branch if running thread is deleted
SVC_ContextSave:
MRS R0,PSP // Get PSP
SUBS R0,R0,#32 // Calculate SP
STR R0,[R1,#TCB_SP_OFS] // Store SP
STMIA R0!,{R4-R7} // Save R4..R7
MOV R4,R8
MOV R5,R9
MOV R6,R10
MOV R7,R11
STMIA R0!,{R4-R7} // Save R8..R11
mrs r0,psp // Get PSP
subs r0,r0,#32 // Calculate SP: space for R4..R11
str r0,[r1,#TCB_SP_OFS] // Store SP
SVC_ContextSwitch:
SUBS R3,R3,#8 // Adjust address
STR R2,[R3] // osRtxInfo.thread.run: curr = next
#ifdef RTX_STACK_CHECK
push {r1,r2} // Save osRtxInfo.thread.run: curr & next
mov r0,r1 // Parameter: osRtxInfo.thread.run.curr
bl osRtxThreadStackCheck // Check if thread stack is overrun
pop {r1,r2} // Restore osRtxInfo.thread.run: curr & next
cmp r0,#0
bne SVC_ContextSaveRegs // Branch when stack check is ok
movs r0,#osRtxErrorStackOverflow // Parameter: r0=code, r1=object_id
bl osRtxKernelErrorNotify // Call osRtxKernelErrorNotify
ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run
ldr r2,[r3,#4] // Load osRtxInfo.thread.run: next
str r2,[r3] // osRtxInfo.thread.run: curr = next
b SVC_ContextRestore // Branch to context restore handling
SVC_ContextSaveRegs:
ldr r0,[r1,#TCB_SP_OFS] // Load SP
#endif // RTX_STACK_CHECK
stmia r0!,{r4-r7} // Save R4..R7
mov r4,r8
mov r5,r9
mov r6,r10
mov r7,r11
stmia r0!,{r4-r7} // Save R8..R11
SVC_ContextRestore:
LDR R0,[R2,#TCB_SP_OFS] // Load SP
ADDS R0,R0,#16 // Adjust address
LDMIA R0!,{R4-R7} // Restore R8..R11
MOV R8,R4
MOV R9,R5
MOV R10,R6
MOV R11,R7
MSR PSP,R0 // Set PSP
SUBS R0,R0,#32 // Adjust address
LDMIA R0!,{R4-R7} // Restore R4..R7
ldr r0,[r2,#TCB_SP_OFS] // Load SP
adds r0,r0,#16 // Adjust address
ldmia r0!,{r4-r7} // Restore R8..R11
mov r8,r4
mov r9,r5
mov r10,r6
mov r11,r7
msr psp,r0 // Set PSP
subs r0,r0,#32 // Adjust address
ldmia r0!,{r4-r7} // Restore R4..R7
MOVS R0,#2 // Binary complement of 0xFFFFFFFD
MVNS R0,R0 // Set EXC_RETURN value
BX R0 // Exit from handler
movs r0,#2 // Binary complement of 0xFFFFFFFD
mvns r0,r0 // Set EXC_RETURN value
bx r0 // Exit from handler
SVC_MSP:
MRS R0,MSP // Get MSP
B SVC_Number
mrs r0,msp // Get MSP
b SVC_Number
SVC_Exit:
BX LR // Exit from handler
bx lr // Exit from handler
SVC_User:
LDR R2,=osRtxUserSVC // Load address of SVC table
LDR R3,[R2] // Load SVC maximum number
CMP R1,R3 // Check SVC number range
BHI SVC_Exit // Branch if out of range
ldr r2,=osRtxUserSVC // Load address of SVC table
ldr r3,[r2] // Load SVC maximum number
cmp r1,r3 // Check SVC number range
bhi SVC_Exit // Branch if out of range
PUSH {R0,LR} // Save SP and EXC_RETURN
LSLS R1,R1,#2
LDR R3,[R2,R1] // Load address of SVC function
MOV R12,R3
LDMIA R0,{R0-R3} // Load function parameters from stack
BLX R12 // Call service function
POP {R2,R3} // Restore SP and EXC_RETURN
STR R0,[R2] // Store function return value
MOV LR,R3 // Set EXC_RETURN
push {r0,lr} // Save SP and EXC_RETURN
lsls r1,r1,#2
ldr r3,[r2,r1] // Load address of SVC function
mov r12,r3
ldmia r0,{r0-r3} // Load function parameters from stack
blx r12 // Call service function
pop {r2,r3} // Restore SP and EXC_RETURN
str r0,[r2] // Store function return value
mov lr,r3 // Set EXC_RETURN
BX LR // Return from handler
bx lr // Return from handler
.fnend
.size SVC_Handler, .-SVC_Handler
@ -142,11 +170,11 @@ SVC_User:
.cantunwind
PendSV_Handler:
PUSH {R0,LR} // Save EXC_RETURN
BL osRtxPendSV_Handler // Call osRtxPendSV_Handler
POP {R0,R1} // Restore EXC_RETURN
MOV LR,R1 // Set EXC_RETURN
B SVC_Context
push {r0,lr} // Save EXC_RETURN
bl osRtxPendSV_Handler // Call osRtxPendSV_Handler
pop {r0,r1} // Restore EXC_RETURN
mov lr,r1 // Set EXC_RETURN
b SVC_Context // Branch to context handling
.fnend
.size PendSV_Handler, .-PendSV_Handler
@ -159,11 +187,11 @@ PendSV_Handler:
.cantunwind
SysTick_Handler:
PUSH {R0,LR} // Save EXC_RETURN
BL osRtxTick_Handler // Call osRtxTick_Handler
POP {R0,R1} // Restore EXC_RETURN
MOV LR,R1 // Set EXC_RETURN
B SVC_Context
push {r0,lr} // Save EXC_RETURN
bl osRtxTick_Handler // Call osRtxTick_Handler
pop {r0,r1} // Restore EXC_RETURN
mov lr,r1 // Set EXC_RETURN
b SVC_Context // Branch to context handling
.fnend
.size SysTick_Handler, .-SysTick_Handler

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2020 Arm Limited. All rights reserved.
* Copyright (c) 2016-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -18,7 +18,7 @@
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: ARMv8M Baseline Exception handlers
* Title: ARMv8-M Baseline Exception handlers
*
* -----------------------------------------------------------------------------
*/
@ -26,11 +26,10 @@
.syntax unified
#ifdef _RTE_
#ifdef RTE_CMSIS_RTOS2_RTX5_ARMV8M_NS
#define DOMAIN_NS 1
#endif
#endif
// Mbed OS patch: Exclude RTE_Components.h and RTX_Config.h inclusion in .S files
#define RTX_CONFIG_H_
#undef _RTE_
#include "rtx_def.h"
#ifndef DOMAIN_NS
#define DOMAIN_NS 0
@ -42,6 +41,8 @@
.equ TCB_SF_OFS, 34 // TCB.stack_frame offset
.equ TCB_TZM_OFS, 64 // TCB.tz_memory offset
.equ osRtxErrorStackOverflow, 1 // Stack overflow
.section ".rodata"
.global irqRtxLib // Non weak library reference
irqRtxLib:
@ -51,6 +52,7 @@ irqRtxLib:
.thumb
.section ".text"
.align 2
.eabi_attribute Tag_ABI_align_preserved, 1
.thumb_func
@ -60,129 +62,171 @@ irqRtxLib:
.cantunwind
SVC_Handler:
MOV R0,LR
LSRS R0,R0,#3 // Determine return stack from EXC_RETURN bit 2
BCC SVC_MSP // Branch if return stack is MSP
MRS R0,PSP // Get PSP
mov r0,lr
lsrs r0,r0,#3 // Determine return stack from EXC_RETURN bit 2
bcc SVC_MSP // Branch if return stack is MSP
mrs r0,psp // Get PSP
SVC_Number:
LDR R1,[R0,#24] // Load saved PC from stack
SUBS R1,R1,#2 // Point to SVC instruction
LDRB R1,[R1] // Load SVC number
CMP R1,#0
BNE SVC_User // Branch if not SVC 0
ldr r1,[r0,#24] // Load saved PC from stack
subs r1,r1,#2 // Point to SVC instruction
ldrb r1,[r1] // Load SVC number
cmp r1,#0 // Check SVC number
bne SVC_User // Branch if not SVC 0
PUSH {R0,LR} // Save SP and EXC_RETURN
LDM R0,{R0-R3} // Load function parameters from stack
BLX R7 // Call service function
POP {R2,R3} // Restore SP and EXC_RETURN
STMIA R2!,{R0-R1} // Store function return values
MOV LR,R3 // Set EXC_RETURN
push {r0,lr} // Save SP and EXC_RETURN
ldmia r0,{r0-r3} // Load function parameters from stack
blx r7 // Call service function
pop {r2,r3} // Restore SP and EXC_RETURN
stmia r2!,{r0-r1} // Store function return values
mov lr,r3 // Set EXC_RETURN
SVC_Context:
LDR R3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run
LDMIA R3!,{R1,R2} // Load osRtxInfo.thread.run: curr & next
CMP R1,R2 // Check if thread switch is required
BEQ SVC_Exit // Branch when threads are the same
ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run
ldmia r3!,{r1,r2} // Load osRtxInfo.thread.run: curr & next
cmp r1,r2 // Check if thread switch is required
beq SVC_Exit // Branch when threads are the same
CBZ R1,SVC_ContextSwitch // Branch if running thread is deleted
subs r3,r3,#8 // Adjust address
str r2,[r3] // osRtxInfo.thread.run: curr = next
cbz r1,SVC_ContextRestore // Branch if running thread is deleted
SVC_ContextSave:
#if (DOMAIN_NS == 1)
LDR R0,[R1,#TCB_TZM_OFS] // Load TrustZone memory identifier
CBZ R0,SVC_ContextSave1 // Branch if there is no secure context
PUSH {R1,R2,R3,R7} // Save registers
MOV R7,LR // Get EXC_RETURN
BL TZ_StoreContext_S // Store secure context
MOV LR,R7 // Set EXC_RETURN
POP {R1,R2,R3,R7} // Restore registers
#endif
#if (DOMAIN_NS != 0)
ldr r0,[r1,#TCB_TZM_OFS] // Load TrustZone memory identifier
cbz r0,SVC_ContextSave_NS // Branch if there is no secure context
push {r1,r2,r3,r7} // Save registers
mov r7,lr // Get EXC_RETURN
bl TZ_StoreContext_S // Store secure context
mov lr,r7 // Set EXC_RETURN
pop {r1,r2,r3,r7} // Restore registers
#endif
SVC_ContextSave1:
MRS R0,PSP // Get PSP
SUBS R0,R0,#32 // Calculate SP
STR R0,[R1,#TCB_SP_OFS] // Store SP
STMIA R0!,{R4-R7} // Save R4..R7
MOV R4,R8
MOV R5,R9
MOV R6,R10
MOV R7,R11
STMIA R0!,{R4-R7} // Save R8..R11
SVC_ContextSave_NS:
mrs r0,psp // Get PSP
#if (DOMAIN_NS != 0)
mov r3,lr // Get EXC_RETURN
lsls r3,r3,#25 // Check domain of interrupted thread
bmi SVC_ContextSaveSP // Branch if secure
#endif
SVC_ContextSave2:
MOV R0,LR // Get EXC_RETURN
ADDS R1,R1,#TCB_SF_OFS // Adjust address
STRB R0,[R1] // Store stack frame information
#ifdef RTX_STACK_CHECK
subs r0,r0,#32 // Calculate SP: space for R4..R11
SVC_ContextSwitch:
SUBS R3,R3,#8 // Adjust address
STR R2,[R3] // osRtxInfo.thread.run: curr = next
SVC_ContextSaveSP:
str r0,[r1,#TCB_SP_OFS] // Store SP
mov r3,lr // Get EXC_RETURN
mov r0,r1 // osRtxInfo.thread.run.curr
adds r0,r0,#TCB_SF_OFS // Adjust address
strb r3,[r0] // Store stack frame information
push {r1,r2} // Save osRtxInfo.thread.run: curr & next
mov r0,r1 // Parameter: osRtxInfo.thread.run.curr
bl osRtxThreadStackCheck // Check if thread stack is overrun
pop {r1,r2} // Restore osRtxInfo.thread.run: curr & next
cmp r0,#0
bne SVC_ContextSaveRegs // Branch when stack check is ok
movs r0,#osRtxErrorStackOverflow // Parameter: r0=code, r1=object_id
bl osRtxKernelErrorNotify // Call osRtxKernelErrorNotify
ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run
ldr r2,[r3,#4] // Load osRtxInfo.thread.run: next
str r2,[r3] // osRtxInfo.thread.run: curr = next
b SVC_ContextRestore // Branch to context restore handling
SVC_ContextSaveRegs:
#if (DOMAIN_NS != 0)
mov r0,r1 // osRtxInfo.thread.run.curr
adds r0,r0,#TCB_SF_OFS // Adjust address
ldrb r3,[r0] // Load stack frame information
lsls r3,r3,#25 // Check domain of interrupted thread
bmi SVC_ContextRestore // Branch if secure
#endif
ldr r0,[r1,#TCB_SP_OFS] // Load SP
stmia r0!,{r4-r7} // Save R4..R7
mov r4,r8
mov r5,r9
mov r6,r10
mov r7,r11
stmia r0!,{r4-r7} // Save R8..R11
#else
subs r0,r0,#32 // Calculate SP: space for R4..R11
stmia r0!,{r4-r7} // Save R4..R7
mov r4,r8
mov r5,r9
mov r6,r10
mov r7,r11
stmia r0!,{r4-r7} // Save R8..R11
subs r0,r0,#32 // Adjust address
SVC_ContextSaveSP:
str r0,[r1,#TCB_SP_OFS] // Store SP
mov r0,lr // Get EXC_RETURN
adds r1,r1,#TCB_SF_OFS // Adjust address
strb r0,[r1] // Store stack frame information
#endif // RTX_STACK_CHECK
SVC_ContextRestore:
#if (DOMAIN_NS == 1)
LDR R0,[R2,#TCB_TZM_OFS] // Load TrustZone memory identifier
CBZ R0,SVC_ContextRestore1 // Branch if there is no secure context
PUSH {R2,R3} // Save registers
BL TZ_LoadContext_S // Load secure context
POP {R2,R3} // Restore registers
#endif
#if (DOMAIN_NS != 0)
ldr r0,[r2,#TCB_TZM_OFS] // Load TrustZone memory identifier
cbz r0,SVC_ContextRestore_NS // Branch if there is no secure context
push {r2,r3} // Save registers
bl TZ_LoadContext_S // Load secure context
pop {r2,r3} // Restore registers
#endif
SVC_ContextRestore1:
MOV R1,R2
ADDS R1,R1,#TCB_SF_OFS // Adjust address
LDRB R0,[R1] // Load stack frame information
MOVS R1,#0xFF
MVNS R1,R1 // R1=0xFFFFFF00
ORRS R0,R1
MOV LR,R0 // Set EXC_RETURN
SVC_ContextRestore_NS:
ldr r0,[r2,#TCB_SM_OFS] // Load stack memory base
msr psplim,r0 // Set PSPLIM
mov r0,r2 // osRtxInfo.thread.run.next
adds r0,r0,#TCB_SF_OFS // Adjust address
ldrb r3,[r0] // Load stack frame information
movs r0,#0xFF
mvns r0,r0 // R0=0xFFFFFF00
orrs r3,r3,r0
mov lr,r3 // Set EXC_RETURN
ldr r0,[r2,#TCB_SP_OFS] // Load SP
#if (DOMAIN_NS != 0)
lsls r3,r3,#25 // Check domain of interrupted thread
bmi SVC_ContextRestoreSP // Branch if secure
#endif
#if (DOMAIN_NS == 1)
LSLS R0,R0,#25 // Check domain of interrupted thread
BPL SVC_ContextRestore2 // Branch if non-secure
LDR R0,[R2,#TCB_SP_OFS] // Load SP
MSR PSP,R0 // Set PSP
BX LR // Exit from handler
#else
LDR R0,[R2,#TCB_SM_OFS] // Load stack memory base
MSR PSPLIM,R0 // Set PSPLIM
#endif
adds r0,r0,#16 // Adjust address
ldmia r0!,{r4-r7} // Restore R8..R11
mov r8,r4
mov r9,r5
mov r10,r6
mov r11,r7
subs r0,r0,#32 // Adjust address
ldmia r0!,{r4-r7} // Restore R4..R7
adds r0,r0,#16 // Adjust address
SVC_ContextRestore2:
LDR R0,[R2,#TCB_SP_OFS] // Load SP
ADDS R0,R0,#16 // Adjust address
LDMIA R0!,{R4-R7} // Restore R8..R11
MOV R8,R4
MOV R9,R5
MOV R10,R6
MOV R11,R7
MSR PSP,R0 // Set PSP
SUBS R0,R0,#32 // Adjust address
LDMIA R0!,{R4-R7} // Restore R4..R7
SVC_ContextRestoreSP:
msr psp,r0 // Set PSP
SVC_Exit:
BX LR // Exit from handler
bx lr // Exit from handler
SVC_MSP:
MRS R0,MSP // Get MSP
B SVC_Number
mrs r0,msp // Get MSP
b SVC_Number
SVC_User:
LDR R2,=osRtxUserSVC // Load address of SVC table
LDR R3,[R2] // Load SVC maximum number
CMP R1,R3 // Check SVC number range
BHI SVC_Exit // Branch if out of range
ldr r2,=osRtxUserSVC // Load address of SVC table
ldr r3,[r2] // Load SVC maximum number
cmp r1,r3 // Check SVC number range
bhi SVC_Exit // Branch if out of range
PUSH {R0,LR} // Save SP and EXC_RETURN
LSLS R1,R1,#2
LDR R3,[R2,R1] // Load address of SVC function
MOV R12,R3
LDMIA R0,{R0-R3} // Load function parameters from stack
BLX R12 // Call service function
POP {R2,R3} // Restore SP and EXC_RETURN
STR R0,[R2] // Store function return value
MOV LR,R3 // Set EXC_RETURN
push {r0,lr} // Save SP and EXC_RETURN
lsls r1,r1,#2
ldr r3,[r2,r1] // Load address of SVC function
mov r12,r3
ldmia r0,{r0-r3} // Load function parameters from stack
blx r12 // Call service function
pop {r2,r3} // Restore SP and EXC_RETURN
str r0,[r2] // Store function return value
mov lr,r3 // Set EXC_RETURN
BX LR // Return from handler
bx lr // Return from handler
.fnend
.size SVC_Handler, .-SVC_Handler
@ -195,11 +239,11 @@ SVC_User:
.cantunwind
PendSV_Handler:
PUSH {R0,LR} // Save EXC_RETURN
BL osRtxPendSV_Handler // Call osRtxPendSV_Handler
POP {R0,R1} // Restore EXC_RETURN
MOV LR,R1 // Set EXC_RETURN
B Sys_Context
push {r0,lr} // Save EXC_RETURN
bl osRtxPendSV_Handler // Call osRtxPendSV_Handler
pop {r0,r1} // Restore EXC_RETURN
mov lr,r1 // Set EXC_RETURN
b SVC_Context // Branch to context handling
.fnend
.size PendSV_Handler, .-PendSV_Handler
@ -212,113 +256,14 @@ PendSV_Handler:
.cantunwind
SysTick_Handler:
PUSH {R0,LR} // Save EXC_RETURN
BL osRtxTick_Handler // Call osRtxTick_Handler
POP {R0,R1} // Restore EXC_RETURN
MOV LR,R1 // Set EXC_RETURN
B Sys_Context
push {r0,lr} // Save EXC_RETURN
bl osRtxTick_Handler // Call osRtxTick_Handler
pop {r0,r1} // Restore EXC_RETURN
mov lr,r1 // Set EXC_RETURN
b SVC_Context // Branch to context handling
.fnend
.size SysTick_Handler, .-SysTick_Handler
.thumb_func
.type Sys_Context, %function
.global Sys_Context
.fnstart
.cantunwind
Sys_Context:
LDR R3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run
LDM R3!,{R1,R2} // Load osRtxInfo.thread.run: curr & next
CMP R1,R2 // Check if thread switch is required
BEQ Sys_ContextExit // Branch when threads are the same
Sys_ContextSave:
#if (DOMAIN_NS == 1)
LDR R0,[R1,#TCB_TZM_OFS] // Load TrustZone memory identifier
CBZ R0,Sys_ContextSave1 // Branch if there is no secure context
PUSH {R1,R2,R3,R7} // Save registers
MOV R7,LR // Get EXC_RETURN
BL TZ_StoreContext_S // Store secure context
MOV LR,R7 // Set EXC_RETURN
POP {R1,R2,R3,R7} // Restore registers
Sys_ContextSave1:
MOV R0,LR // Get EXC_RETURN
LSLS R0,R0,#25 // Check domain of interrupted thread
BPL Sys_ContextSave2 // Branch if non-secure
MRS R0,PSP // Get PSP
STR R0,[R1,#TCB_SP_OFS] // Store SP
B Sys_ContextSave3
#endif
Sys_ContextSave2:
MRS R0,PSP // Get PSP
SUBS R0,R0,#32 // Adjust address
STR R0,[R1,#TCB_SP_OFS] // Store SP
STMIA R0!,{R4-R7} // Save R4..R7
MOV R4,R8
MOV R5,R9
MOV R6,R10
MOV R7,R11
STMIA R0!,{R4-R7} // Save R8..R11
Sys_ContextSave3:
MOV R0,LR // Get EXC_RETURN
ADDS R1,R1,#TCB_SF_OFS // Adjust address
STRB R0,[R1] // Store stack frame information
Sys_ContextSwitch:
SUBS R3,R3,#8 // Adjust address
STR R2,[R3] // osRtxInfo.run: curr = next
Sys_ContextRestore:
#if (DOMAIN_NS == 1)
LDR R0,[R2,#TCB_TZM_OFS] // Load TrustZone memory identifier
CBZ R0,Sys_ContextRestore1 // Branch if there is no secure context
PUSH {R2,R3} // Save registers
BL TZ_LoadContext_S // Load secure context
POP {R2,R3} // Restore registers
#endif
Sys_ContextRestore1:
MOV R1,R2
ADDS R1,R1,#TCB_SF_OFS // Adjust offset
LDRB R0,[R1] // Load stack frame information
MOVS R1,#0xFF
MVNS R1,R1 // R1=0xFFFFFF00
ORRS R0,R1
MOV LR,R0 // Set EXC_RETURN
#if (DOMAIN_NS == 1)
LSLS R0,R0,#25 // Check domain of interrupted thread
BPL Sys_ContextRestore2 // Branch if non-secure
LDR R0,[R2,#TCB_SP_OFS] // Load SP
MSR PSP,R0 // Set PSP
BX LR // Exit from handler
#else
LDR R0,[R2,#TCB_SM_OFS] // Load stack memory base
MSR PSPLIM,R0 // Set PSPLIM
#endif
Sys_ContextRestore2:
LDR R0,[R2,#TCB_SP_OFS] // Load SP
ADDS R0,R0,#16 // Adjust address
LDMIA R0!,{R4-R7} // Restore R8..R11
MOV R8,R4
MOV R9,R5
MOV R10,R6
MOV R11,R7
MSR PSP,R0 // Set PSP
SUBS R0,R0,#32 // Adjust address
LDMIA R0!,{R4-R7} // Restore R4..R7
Sys_ContextExit:
BX LR // Exit from handler
.fnend
.size Sys_Context, .-Sys_Context
.size SysTick_Handler, .-SysTick_Handler
.end

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -18,7 +18,7 @@
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: Cortex-M3 Exception handlers
* Title: ARMv7-M Exception handlers
*
* -----------------------------------------------------------------------------
*/
@ -26,8 +26,24 @@
.syntax unified
// Mbed OS patch: Exclude RTE_Components.h and RTX_Config.h inclusion in .S files
#define RTX_CONFIG_H_
#undef _RTE_
#include "rtx_def.h"
#if (defined(__ARM_FP) && (__ARM_FP > 0))
.equ FPU_USED, 1
#else
.equ FPU_USED, 0
#endif
.equ I_T_RUN_OFS, 20 // osRtxInfo.thread.run offset
.equ TCB_SP_OFS, 56 // TCB.SP offset
.equ TCB_SF_OFS, 34 // TCB.stack_frame offset
.equ FPCCR, 0xE000EF34 // FPCCR Address
.equ osRtxErrorStackOverflow, 1 // Stack overflow
.section ".rodata"
.global irqRtxLib // Non weak library reference
@ -38,6 +54,7 @@ irqRtxLib:
.thumb
.section ".text"
.align 2
.eabi_attribute Tag_ABI_align_preserved, 1
.thumb_func
@ -47,60 +64,128 @@ irqRtxLib:
.cantunwind
SVC_Handler:
TST LR,#0x04 // Determine return stack from EXC_RETURN bit 2
ITE EQ
MRSEQ R0,MSP // Get MSP if return stack is MSP
MRSNE R0,PSP // Get PSP if return stack is PSP
tst lr,#0x04 // Determine return stack from EXC_RETURN bit 2
ite eq
mrseq r0,msp // Get MSP if return stack is MSP
mrsne r0,psp // Get PSP if return stack is PSP
LDR R1,[R0,#24] // Load saved PC from stack
LDRB R1,[R1,#-2] // Load SVC number
CBNZ R1,SVC_User // Branch if not SVC 0
ldr r1,[r0,#24] // Load saved PC from stack
ldrb r1,[r1,#-2] // Load SVC number
cmp r1,#0 // Check SVC number
bne SVC_User // Branch if not SVC 0
PUSH {R0,LR} // Save SP and EXC_RETURN
LDM R0,{R0-R3,R12} // Load function parameters and address from stack
BLX R12 // Call service function
POP {R12,LR} // Restore SP and EXC_RETURN
STM R12,{R0-R1} // Store function return values
push {r0,lr} // Save SP and EXC_RETURN
ldm r0,{r0-r3,r12} // Load function parameters and address from stack
blx r12 // Call service function
pop {r12,lr} // Restore SP and EXC_RETURN
stm r12,{r0-r1} // Store function return values
SVC_Context:
LDR R3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run
LDM R3,{R1,R2} // Load osRtxInfo.thread.run: curr & next
CMP R1,R2 // Check if thread switch is required
IT EQ
BXEQ LR // Exit when threads are the same
ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run
ldm r3,{r1,r2} // Load osRtxInfo.thread.run: curr & next
cmp r1,r2 // Check if thread switch is required
it eq
bxeq lr // Exit when threads are the same
CBZ R1,SVC_ContextSwitch // Branch if running thread is deleted
str r2,[r3] // osRtxInfo.thread.run: curr = next
.if (FPU_USED != 0)
cbnz r1,SVC_ContextSave // Branch if running thread is not deleted
SVC_FP_LazyState:
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
bne SVC_ContextRestore // Branch if not extended stack frame
ldr r3,=FPCCR // FPCCR Address
ldr r0,[r3] // Load FPCCR
bic r0,r0,#1 // Clear LSPACT (Lazy state preservation)
str r0,[r3] // Store FPCCR
b SVC_ContextRestore // Branch to context restore handling
.else
cbz r1,SVC_ContextRestore // Branch if running thread is deleted
.endif
SVC_ContextSave:
STMDB R12!,{R4-R11} // Save R4..R11
STR R12,[R1,#TCB_SP_OFS] // Store SP
#ifdef RTX_STACK_CHECK
sub r12,r12,#32 // Calculate SP: space for R4..R11
.if (FPU_USED != 0)
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
it eq // If extended stack frame
subeq r12,r12,#64 // Additional space for S16..S31
strb lr, [r1,#TCB_SF_OFS] // Store stack frame information
.endif
str r12,[r1,#TCB_SP_OFS] // Store SP
SVC_ContextSwitch:
STR R2,[R3] // osRtxInfo.thread.run: curr = next
push {r1,r2} // Save osRtxInfo.thread.run: curr & next
mov r0,r1 // Parameter: osRtxInfo.thread.run.curr
bl osRtxThreadStackCheck // Check if thread stack is overrun
pop {r1,r2} // Restore osRtxInfo.thread.run: curr & next
cbnz r0,SVC_ContextSaveRegs // Branch when stack check is ok
.if (FPU_USED != 0)
mov r4,r1 // Save osRtxInfo.thread.run.curr
.endif
mov r0,#osRtxErrorStackOverflow // Parameter: r0=code, r1=object_id
bl osRtxKernelErrorNotify // Call osRtxKernelErrorNotify
ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run
ldr r2,[r3,#4] // Load osRtxInfo.thread.run: next
str r2,[r3] // osRtxInfo.thread.run: curr = next
.if (FPU_USED != 0)
ldrb lr,[r4,#TCB_SF_OFS] // Load stack frame information
b SVC_FP_LazyState // Branch to FP lazy state handling
.else
b SVC_ContextRestore // Branch to context restore handling
.endif
SVC_ContextSaveRegs:
ldr r12,[r1,#TCB_SP_OFS] // Load SP
.if (FPU_USED != 0)
ldrb lr, [r1,#TCB_SF_OFS] // Load stack frame information
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
it eq // If extended stack frame
vstmiaeq r12!,{s16-s31} // Save VFP S16..S31
.endif
stm r12,{r4-r11} // Save R4..R11
#else
stmdb r12!,{r4-r11} // Save R4..R11
.if (FPU_USED != 0)
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
it eq // If extended stack frame
vstmdbeq r12!,{s16-s31} // Save VFP S16.S31
strb lr, [r1,#TCB_SF_OFS] // Store stack frame information
.endif
str r12,[r1,#TCB_SP_OFS] // Store SP
#endif // RTX_STACK_CHECK
SVC_ContextRestore:
LDR R0,[R2,#TCB_SP_OFS] // Load SP
LDMIA R0!,{R4-R11} // Restore R4..R11
MSR PSP,R0 // Set PSP
MVN LR,#~0xFFFFFFFD // Set EXC_RETURN value
ldr r0,[r2,#TCB_SP_OFS] // Load SP
.if (FPU_USED != 0)
ldrb r1,[r2,#TCB_SF_OFS] // Load stack frame information
orn lr,r1,#0xFF // Set EXC_RETURN
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
it eq // If extended stack frame
vldmiaeq r0!,{s16-s31} // Restore VFP S16..S31
.else
mvn lr,#~0xFFFFFFFD // Set EXC_RETURN value
.endif
ldmia r0!,{r4-r11} // Restore R4..R11
msr psp,r0 // Set PSP
SVC_Exit:
BX LR // Exit from handler
bx lr // Exit from handler
SVC_User:
LDR R2,=osRtxUserSVC // Load address of SVC table
LDR R3,[R2] // Load SVC maximum number
CMP R1,R3 // Check SVC number range
BHI SVC_Exit // Branch if out of range
ldr r2,=osRtxUserSVC // Load address of SVC table
ldr r3,[r2] // Load SVC maximum number
cmp r1,r3 // Check SVC number range
bhi SVC_Exit // Branch if out of range
PUSH {R0,LR} // Save SP and EXC_RETURN
LDR R12,[R2,R1,LSL #2] // Load address of SVC function
LDM R0,{R0-R3} // Load function parameters from stack
BLX R12 // Call service function
POP {R12,LR} // Restore SP and EXC_RETURN
STR R0,[R12] // Store function return value
push {r0,lr} // Save SP and EXC_RETURN
ldr r12,[r2,r1,lsl #2] // Load address of SVC function
ldm r0,{r0-r3} // Load function parameters from stack
blx r12 // Call service function
pop {r12,lr} // Restore SP and EXC_RETURN
str r0,[r12] // Store function return value
BX LR // Return from handler
bx lr // Return from handler
.fnend
.size SVC_Handler, .-SVC_Handler
@ -113,11 +198,11 @@ SVC_User:
.cantunwind
PendSV_Handler:
PUSH {R0,LR} // Save EXC_RETURN
BL osRtxPendSV_Handler // Call osRtxPendSV_Handler
POP {R0,LR} // Restore EXC_RETURN
MRS R12,PSP
B SVC_Context
push {r0,lr} // Save EXC_RETURN
bl osRtxPendSV_Handler // Call osRtxPendSV_Handler
pop {r0,lr} // Restore EXC_RETURN
mrs r12,psp // Save PSP to R12
b SVC_Context // Branch to context handling
.fnend
.size PendSV_Handler, .-PendSV_Handler
@ -130,11 +215,11 @@ PendSV_Handler:
.cantunwind
SysTick_Handler:
PUSH {R0,LR} // Save EXC_RETURN
BL osRtxTick_Handler // Call osRtxTick_Handler
POP {R0,LR} // Restore EXC_RETURN
MRS R12,PSP
B SVC_Context
push {r0,lr} // Save EXC_RETURN
bl osRtxTick_Handler // Call osRtxTick_Handler
pop {r0,lr} // Restore EXC_RETURN
mrs r12,psp // Save PSP to R12
b SVC_Context // Branch to context handling
.fnend
.size SysTick_Handler, .-SysTick_Handler

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2020 Arm Limited. All rights reserved.
* Copyright (c) 2016-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -18,7 +18,7 @@
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: ARMv8M Mainline Exception handlers
* Title: ARMv8-M Mainline Exception handlers
*
* -----------------------------------------------------------------------------
*/
@ -26,11 +26,10 @@
.syntax unified
#ifdef _RTE_
#ifdef RTE_CMSIS_RTOS2_RTX5_ARMV8M_NS
#define DOMAIN_NS 1
#endif
#endif
// Mbed OS patch: Exclude RTE_Components.h and RTX_Config.h inclusion in .S files
#define RTX_CONFIG_H_
#undef _RTE_
#include "rtx_def.h"
#ifndef DOMAIN_NS
#define DOMAIN_NS 0
@ -54,6 +53,10 @@
.equ TCB_SF_OFS, 34 // TCB.stack_frame offset
.equ TCB_TZM_OFS, 64 // TCB.tz_memory offset
.equ FPCCR, 0xE000EF34 // FPCCR Address
.equ osRtxErrorStackOverflow, 1 // Stack overflow
.section ".rodata"
.global irqRtxLib // Non weak library reference
irqRtxLib:
@ -63,6 +66,7 @@ irqRtxLib:
.thumb
.section ".text"
.align 2
.eabi_attribute Tag_ABI_align_preserved, 1
.thumb_func
@ -72,115 +76,165 @@ irqRtxLib:
.cantunwind
SVC_Handler:
TST LR,#0x04 // Determine return stack from EXC_RETURN bit 2
ITE EQ
MRSEQ R0,MSP // Get MSP if return stack is MSP
MRSNE R0,PSP // Get PSP if return stack is PSP
tst lr,#0x04 // Determine return stack from EXC_RETURN bit 2
ite eq
mrseq r0,msp // Get MSP if return stack is MSP
mrsne r0,psp // Get PSP if return stack is PSP
LDR R1,[R0,#24] // Load saved PC from stack
LDRB R1,[R1,#-2] // Load SVC number
CMP R1,#0
BNE SVC_User // Branch if not SVC 0
ldr r1,[r0,#24] // Load saved PC from stack
ldrb r1,[r1,#-2] // Load SVC number
cmp r1,#0 // Check SVC number
bne SVC_User // Branch if not SVC 0
PUSH {R0,LR} // Save SP and EXC_RETURN
LDM R0,{R0-R3,R12} // Load function parameters and address from stack
BLX R12 // Call service function
POP {R12,LR} // Restore SP and EXC_RETURN
STM R12,{R0-R1} // Store function return values
push {r0,lr} // Save SP and EXC_RETURN
ldm r0,{r0-r3,r12} // Load function parameters and address from stack
blx r12 // Call service function
pop {r12,lr} // Restore SP and EXC_RETURN
stm r12,{r0-r1} // Store function return values
SVC_Context:
LDR R3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run
LDM R3,{R1,R2} // Load osRtxInfo.thread.run: curr & next
CMP R1,R2 // Check if thread switch is required
IT EQ
BXEQ LR // Exit when threads are the same
ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run
ldm r3,{r1,r2} // Load osRtxInfo.thread.run: curr & next
cmp r1,r2 // Check if thread switch is required
it eq
bxeq lr // Exit when threads are the same
.if (FPU_USED == 1) || (MVE_USED == 1)
CBNZ R1,SVC_ContextSave // Branch if running thread is not deleted
TST LR,#0x10 // Check if extended stack frame
BNE SVC_ContextSwitch
LDR R1,=0xE000EF34 // FPCCR Address
LDR R0,[R1] // Load FPCCR
BIC R0,R0,#1 // Clear LSPACT (Lazy state)
STR R0,[R1] // Store FPCCR
B SVC_ContextSwitch
.else
CBZ R1,SVC_ContextSwitch // Branch if running thread is deleted
.endif
str r2,[r3] // osRtxInfo.thread.run: curr = next
.if (FPU_USED != 0) || (MVE_USED != 0)
cbnz r1,SVC_ContextSave // Branch if running thread is not deleted
SVC_FP_LazyState:
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
bne SVC_ContextRestore // Branch if not extended stack frame
ldr r3,=FPCCR // FPCCR Address
ldr r0,[r3] // Load FPCCR
bic r0,r0,#1 // Clear LSPACT (Lazy state preservation)
str r0,[r3] // Store FPCCR
b SVC_ContextRestore // Branch to context restore handling
.else
cbz r1,SVC_ContextRestore // Branch if running thread is deleted
.endif
SVC_ContextSave:
#if (DOMAIN_NS == 1)
LDR R0,[R1,#TCB_TZM_OFS] // Load TrustZone memory identifier
CBZ R0,SVC_ContextSave1 // Branch if there is no secure context
PUSH {R1,R2,R3,LR} // Save registers and EXC_RETURN
BL TZ_StoreContext_S // Store secure context
POP {R1,R2,R3,LR} // Restore registers and EXC_RETURN
#endif
#if (DOMAIN_NS != 0)
ldr r0,[r1,#TCB_TZM_OFS] // Load TrustZone memory identifier
cbz r0,SVC_ContextSave_NS // Branch if there is no secure context
push {r1,r2,r12,lr} // Save registers and EXC_RETURN
bl TZ_StoreContext_S // Store secure context
pop {r1,r2,r12,lr} // Restore registers and EXC_RETURN
#endif
SVC_ContextSave1:
MRS R0,PSP // Get PSP
STMDB R0!,{R4-R11} // Save R4..R11
.if (FPU_USED == 1) || (MVE_USED == 1)
TST LR,#0x10 // Check if extended stack frame
IT EQ
VSTMDBEQ R0!,{S16-S31} // Save VFP S16.S31
.endif
SVC_ContextSave_NS:
#if (DOMAIN_NS != 0)
tst lr,#0x40 // Check domain of interrupted thread
bne SVC_ContextSaveSP // Branch if secure
#endif
SVC_ContextSave2:
STR R0,[R1,#TCB_SP_OFS] // Store SP
STRB LR,[R1,#TCB_SF_OFS] // Store stack frame information
#ifdef RTX_STACK_CHECK
sub r12,r12,#32 // Calculate SP: space for R4..R11
.if (FPU_USED != 0) || (MVE_USED != 0)
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
it eq // If extended stack frame
subeq r12,r12,#64 // Additional space for S16..S31
.endif
SVC_ContextSwitch:
STR R2,[R3] // osRtxInfo.thread.run: curr = next
SVC_ContextSaveSP:
str r12,[r1,#TCB_SP_OFS] // Store SP
strb lr, [r1,#TCB_SF_OFS] // Store stack frame information
push {r1,r2} // Save osRtxInfo.thread.run: curr & next
mov r0,r1 // Parameter: osRtxInfo.thread.run.curr
bl osRtxThreadStackCheck // Check if thread stack is overrun
pop {r1,r2} // Restore osRtxInfo.thread.run: curr & next
cbnz r0,SVC_ContextSaveRegs // Branch when stack check is ok
.if (FPU_USED != 0) || (MVE_USED != 0)
mov r4,r1 // Save osRtxInfo.thread.run.curr
.endif
mov r0,#osRtxErrorStackOverflow // Parameter: r0=code, r1=object_id
bl osRtxKernelErrorNotify // Call osRtxKernelErrorNotify
ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run
ldr r2,[r3,#4] // Load osRtxInfo.thread.run: next
str r2,[r3] // osRtxInfo.thread.run: curr = next
.if (FPU_USED != 0) || (MVE_USED != 0)
ldrb lr,[r4,#TCB_SF_OFS] // Load stack frame information
b SVC_FP_LazyState // Branch to FP lazy state handling
.else
b SVC_ContextRestore // Branch to context restore handling
.endif
SVC_ContextSaveRegs:
ldrb lr,[r1,#TCB_SF_OFS] // Load stack frame information
#if (DOMAIN_NS != 0)
tst lr,#0x40 // Check domain of interrupted thread
bne SVC_ContextRestore // Branch if secure
#endif
ldr r12,[r1,#TCB_SP_OFS] // Load SP
.if (FPU_USED != 0) || (MVE_USED != 0)
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
it eq // If extended stack frame
vstmiaeq r12!,{s16-s31} // Save VFP S16..S31
.endif
stm r12,{r4-r11} // Save R4..R11
#else
stmdb r12!,{r4-r11} // Save R4..R11
.if (FPU_USED != 0) || (MVE_USED != 0)
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
it eq // If extended stack frame
vstmdbeq r12!,{s16-s31} // Save VFP S16.S31
.endif
SVC_ContextSaveSP:
str r12,[r1,#TCB_SP_OFS] // Store SP
strb lr, [r1,#TCB_SF_OFS] // Store stack frame information
#endif // RTX_STACK_CHECK
SVC_ContextRestore:
#if (DOMAIN_NS == 1)
LDR R0,[R2,#TCB_TZM_OFS] // Load TrustZone memory identifier
CBZ R0,SVC_ContextRestore1 // Branch if there is no secure context
PUSH {R2,R3} // Save registers
BL TZ_LoadContext_S // Load secure context
POP {R2,R3} // Restore registers
#endif
#if (DOMAIN_NS != 0)
ldr r0,[r2,#TCB_TZM_OFS] // Load TrustZone memory identifier
cbz r0,SVC_ContextRestore_NS // Branch if there is no secure context
push {r2,r3} // Save registers
bl TZ_LoadContext_S // Load secure context
pop {r2,r3} // Restore registers
#endif
SVC_ContextRestore1:
LDR R0,[R2,#TCB_SM_OFS] // Load stack memory base
LDRB R1,[R2,#TCB_SF_OFS] // Load stack frame information
MSR PSPLIM,R0 // Set PSPLIM
LDR R0,[R2,#TCB_SP_OFS] // Load SP
ORR LR,R1,#0xFFFFFF00 // Set EXC_RETURN
SVC_ContextRestore_NS:
ldr r0,[r2,#TCB_SP_OFS] // Load SP
ldr r1,[r2,#TCB_SM_OFS] // Load stack memory base
msr psplim,r1 // Set PSPLIM
ldrb r1,[r2,#TCB_SF_OFS] // Load stack frame information
orn lr,r1,#0xFF // Set EXC_RETURN
#if (DOMAIN_NS != 0)
tst lr,#0x40 // Check domain of interrupted thread
bne SVC_ContextRestoreSP // Branch if secure
#endif
#if (DOMAIN_NS == 1)
TST LR,#0x40 // Check domain of interrupted thread
BNE SVC_ContextRestore2 // Branch if secure
#endif
.if (FPU_USED != 0) || (MVE_USED != 0)
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
it eq // If extended stack frame
vldmiaeq r0!,{s16-s31} // Restore VFP S16..S31
.endif
ldmia r0!,{r4-r11} // Restore R4..R11
.if (FPU_USED == 1) || (MVE_USED == 1)
TST LR,#0x10 // Check if extended stack frame
IT EQ
VLDMIAEQ R0!,{S16-S31} // Restore VFP S16..S31
.endif
LDMIA R0!,{R4-R11} // Restore R4..R11
SVC_ContextRestore2:
MSR PSP,R0 // Set PSP
SVC_ContextRestoreSP:
msr psp,r0 // Set PSP
SVC_Exit:
BX LR // Exit from handler
bx lr // Exit from handler
SVC_User:
LDR R2,=osRtxUserSVC // Load address of SVC table
LDR R3,[R2] // Load SVC maximum number
CMP R1,R3 // Check SVC number range
BHI SVC_Exit // Branch if out of range
ldr r2,=osRtxUserSVC // Load address of SVC table
ldr r3,[r2] // Load SVC maximum number
cmp r1,r3 // Check SVC number range
bhi SVC_Exit // Branch if out of range
PUSH {R0,LR} // Save SP and EXC_RETURN
LDR R12,[R2,R1,LSL #2] // Load address of SVC function
LDM R0,{R0-R3} // Load function parameters from stack
BLX R12 // Call service function
POP {R12,LR} // Restore SP and EXC_RETURN
STR R0,[R12] // Store function return value
push {r0,lr} // Save SP and EXC_RETURN
ldr r12,[r2,r1,lsl #2] // Load address of SVC function
ldm r0,{r0-r3} // Load function parameters from stack
blx r12 // Call service function
pop {r12,lr} // Restore SP and EXC_RETURN
str r0,[r12] // Store function return value
BX LR // Return from handler
bx lr // Return from handler
.fnend
.size SVC_Handler, .-SVC_Handler
@ -193,10 +247,11 @@ SVC_User:
.cantunwind
PendSV_Handler:
PUSH {R0,LR} // Save EXC_RETURN
BL osRtxPendSV_Handler // Call osRtxPendSV_Handler
POP {R0,LR} // Restore EXC_RETURN
B Sys_Context
push {r0,lr} // Save EXC_RETURN
bl osRtxPendSV_Handler // Call osRtxPendSV_Handler
pop {r0,lr} // Restore EXC_RETURN
mrs r12,psp // Save PSP to R12
b SVC_Context // Branch to context handling
.fnend
.size PendSV_Handler, .-PendSV_Handler
@ -209,95 +264,14 @@ PendSV_Handler:
.cantunwind
SysTick_Handler:
PUSH {R0,LR} // Save EXC_RETURN
BL osRtxTick_Handler // Call osRtxTick_Handler
POP {R0,LR} // Restore EXC_RETURN
B Sys_Context
push {r0,lr} // Save EXC_RETURN
bl osRtxTick_Handler // Call osRtxTick_Handler
pop {r0,lr} // Restore EXC_RETURN
mrs r12,psp // Save PSP to R12
b SVC_Context // Branch to context handling
.fnend
.size SysTick_Handler, .-SysTick_Handler
.thumb_func
.type Sys_Context, %function
.global Sys_Context
.fnstart
.cantunwind
Sys_Context:
LDR R3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run
LDM R3,{R1,R2} // Load osRtxInfo.thread.run: curr & next
CMP R1,R2 // Check if thread switch is required
IT EQ
BXEQ LR // Exit when threads are the same
Sys_ContextSave:
#if (DOMAIN_NS == 1)
LDR R0,[R1,#TCB_TZM_OFS] // Load TrustZone memory identifier
CBZ R0,Sys_ContextSave1 // Branch if there is no secure context
PUSH {R1,R2,R3,LR} // Save registers and EXC_RETURN
BL TZ_StoreContext_S // Store secure context
POP {R1,R2,R3,LR} // Restore registers and EXC_RETURN
Sys_ContextSave1:
TST LR,#0x40 // Check domain of interrupted thread
IT NE
MRSNE R0,PSP // Get PSP
BNE Sys_ContextSave3 // Branch if secure
#endif
Sys_ContextSave2:
MRS R0,PSP // Get PSP
STMDB R0!,{R4-R11} // Save R4..R11
.if (FPU_USED == 1) || (MVE_USED == 1)
TST LR,#0x10 // Check if extended stack frame
IT EQ
VSTMDBEQ R0!,{S16-S31} // Save VFP S16.S31
.endif
Sys_ContextSave3:
STR R0,[R1,#TCB_SP_OFS] // Store SP
STRB LR,[R1,#TCB_SF_OFS] // Store stack frame information
Sys_ContextSwitch:
STR R2,[R3] // osRtxInfo.run: curr = next
Sys_ContextRestore:
#if (DOMAIN_NS == 1)
LDR R0,[R2,#TCB_TZM_OFS] // Load TrustZone memory identifier
CBZ R0,Sys_ContextRestore1 // Branch if there is no secure context
PUSH {R2,R3} // Save registers
BL TZ_LoadContext_S // Load secure context
POP {R2,R3} // Restore registers
#endif
Sys_ContextRestore1:
LDR R0,[R2,#TCB_SM_OFS] // Load stack memory base
LDRB R1,[R2,#TCB_SF_OFS] // Load stack frame information
MSR PSPLIM,R0 // Set PSPLIM
LDR R0,[R2,#TCB_SP_OFS] // Load SP
ORR LR,R1,#0xFFFFFF00 // Set EXC_RETURN
#if (DOMAIN_NS == 1)
TST LR,#0x40 // Check domain of interrupted thread
BNE Sys_ContextRestore2 // Branch if secure
#endif
.if (FPU_USED == 1) || (MVE_USED == 1)
TST LR,#0x10 // Check if extended stack frame
IT EQ
VLDMIAEQ R0!,{S16-S31} // Restore VFP S16..S31
.endif
LDMIA R0!,{R4-R11} // Restore R4..R11
Sys_ContextRestore2:
MSR PSP,R0 // Set PSP
Sys_ContextExit:
BX LR // Exit from handler
.fnend
.size Sys_Context, .-Sys_Context
.size SysTick_Handler, .-SysTick_Handler
.end

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -18,7 +18,7 @@
* -----------------------------------------------------------------------------
*
* Project: CMSIS-RTOS RTX
* Title: Cortex-M4F Exception handlers
* Title: ARMv7-M Exception handlers
*
* -----------------------------------------------------------------------------
*/
@ -26,10 +26,25 @@
.syntax unified
// Mbed OS patch: Exclude RTE_Components.h and RTX_Config.h inclusion in .S files
#define RTX_CONFIG_H_
#undef _RTE_
#include "rtx_def.h"
#if (defined(__ARM_FP) && (__ARM_FP > 0))
.equ FPU_USED, 1
#else
.equ FPU_USED, 0
#endif
.equ I_T_RUN_OFS, 20 // osRtxInfo.thread.run offset
.equ TCB_SP_OFS, 56 // TCB.SP offset
.equ TCB_SF_OFS, 34 // TCB.stack_frame offset
.equ FPCCR, 0xE000EF34 // FPCCR Address
.equ osRtxErrorStackOverflow, 1 // Stack overflow
.section ".rodata"
.global irqRtxLib // Non weak library reference
irqRtxLib:
@ -39,6 +54,7 @@ irqRtxLib:
.thumb
.section ".text"
.align 2
.eabi_attribute Tag_ABI_align_preserved, 1
.thumb_func
@ -48,83 +64,128 @@ irqRtxLib:
.cantunwind
SVC_Handler:
TST LR,#0x04 // Determine return stack from EXC_RETURN bit 2
ITE EQ
MRSEQ R0,MSP // Get MSP if return stack is MSP
MRSNE R0,PSP // Get PSP if return stack is PSP
tst lr,#0x04 // Determine return stack from EXC_RETURN bit 2
ite eq
mrseq r0,msp // Get MSP if return stack is MSP
mrsne r0,psp // Get PSP if return stack is PSP
LDR R1,[R0,#24] // Load saved PC from stack
LDRB R1,[R1,#-2] // Load SVC number
CBNZ R1,SVC_User // Branch if not SVC 0
ldr r1,[r0,#24] // Load saved PC from stack
ldrb r1,[r1,#-2] // Load SVC number
cmp r1,#0 // Check SVC number
bne SVC_User // Branch if not SVC 0
PUSH {R0,LR} // Save SP and EXC_RETURN
LDM R0,{R0-R3,R12} // Load function parameters and address from stack
BLX R12 // Call service function
POP {R12,LR} // Restore SP and EXC_RETURN
STM R12,{R0-R1} // Store function return values
push {r0,lr} // Save SP and EXC_RETURN
ldm r0,{r0-r3,r12} // Load function parameters and address from stack
blx r12 // Call service function
pop {r12,lr} // Restore SP and EXC_RETURN
stm r12,{r0-r1} // Store function return values
SVC_Context:
LDR R3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run
LDM R3,{R1,R2} // Load osRtxInfo.thread.run: curr & next
CMP R1,R2 // Check if thread switch is required
IT EQ
BXEQ LR // Exit when threads are the same
ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run
ldm r3,{r1,r2} // Load osRtxInfo.thread.run: curr & next
cmp r1,r2 // Check if thread switch is required
it eq
bxeq lr // Exit when threads are the same
CBNZ R1,SVC_ContextSave // Branch if running thread is not deleted
TST LR,#0x10 // Check if extended stack frame
BNE SVC_ContextSwitch
#ifdef __FPU_PRESENT
LDR R1,=0xE000EF34 // FPCCR Address
LDR R0,[R1] // Load FPCCR
BIC R0,R0,#1 // Clear LSPACT (Lazy state)
STR R0,[R1] // Store FPCCR
B SVC_ContextSwitch
#endif
str r2,[r3] // osRtxInfo.thread.run: curr = next
.if (FPU_USED != 0)
cbnz r1,SVC_ContextSave // Branch if running thread is not deleted
SVC_FP_LazyState:
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
bne SVC_ContextRestore // Branch if not extended stack frame
ldr r3,=FPCCR // FPCCR Address
ldr r0,[r3] // Load FPCCR
bic r0,r0,#1 // Clear LSPACT (Lazy state preservation)
str r0,[r3] // Store FPCCR
b SVC_ContextRestore // Branch to context restore handling
.else
cbz r1,SVC_ContextRestore // Branch if running thread is deleted
.endif
SVC_ContextSave:
STMDB R12!,{R4-R11} // Save R4..R11
#ifdef __FPU_PRESENT
TST LR,#0x10 // Check if extended stack frame
IT EQ
VSTMDBEQ R12!,{S16-S31} // Save VFP S16.S31
#endif
#ifdef RTX_STACK_CHECK
sub r12,r12,#32 // Calculate SP: space for R4..R11
.if (FPU_USED != 0)
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
it eq // If extended stack frame
subeq r12,r12,#64 // Additional space for S16..S31
strb lr, [r1,#TCB_SF_OFS] // Store stack frame information
.endif
str r12,[r1,#TCB_SP_OFS] // Store SP
STR R12,[R1,#TCB_SP_OFS] // Store SP
STRB LR, [R1,#TCB_SF_OFS] // Store stack frame information
push {r1,r2} // Save osRtxInfo.thread.run: curr & next
mov r0,r1 // Parameter: osRtxInfo.thread.run.curr
bl osRtxThreadStackCheck // Check if thread stack is overrun
pop {r1,r2} // Restore osRtxInfo.thread.run: curr & next
cbnz r0,SVC_ContextSaveRegs // Branch when stack check is ok
SVC_ContextSwitch:
STR R2,[R3] // osRtxInfo.thread.run: curr = next
.if (FPU_USED != 0)
mov r4,r1 // Save osRtxInfo.thread.run.curr
.endif
mov r0,#osRtxErrorStackOverflow // Parameter: r0=code, r1=object_id
bl osRtxKernelErrorNotify // Call osRtxKernelErrorNotify
ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run
ldr r2,[r3,#4] // Load osRtxInfo.thread.run: next
str r2,[r3] // osRtxInfo.thread.run: curr = next
.if (FPU_USED != 0)
ldrb lr,[r4,#TCB_SF_OFS] // Load stack frame information
b SVC_FP_LazyState // Branch to FP lazy state handling
.else
b SVC_ContextRestore // Branch to context restore handling
.endif
SVC_ContextSaveRegs:
ldr r12,[r1,#TCB_SP_OFS] // Load SP
.if (FPU_USED != 0)
ldrb lr, [r1,#TCB_SF_OFS] // Load stack frame information
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
it eq // If extended stack frame
vstmiaeq r12!,{s16-s31} // Save VFP S16..S31
.endif
stm r12,{r4-r11} // Save R4..R11
#else
stmdb r12!,{r4-r11} // Save R4..R11
.if (FPU_USED != 0)
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
it eq // If extended stack frame
vstmdbeq r12!,{s16-s31} // Save VFP S16.S31
strb lr, [r1,#TCB_SF_OFS] // Store stack frame information
.endif
str r12,[r1,#TCB_SP_OFS] // Store SP
#endif // RTX_STACK_CHECK
SVC_ContextRestore:
LDRB R1,[R2,#TCB_SF_OFS] // Load stack frame information
LDR R0,[R2,#TCB_SP_OFS] // Load SP
ORR LR,R1,#0xFFFFFF00 // Set EXC_RETURN
#ifdef __FPU_PRESENT
TST LR,#0x10 // Check if extended stack frame
IT EQ
VLDMIAEQ R0!,{S16-S31} // Restore VFP S16..S31
#endif
LDMIA R0!,{R4-R11} // Restore R4..R11
MSR PSP,R0 // Set PSP
ldr r0,[r2,#TCB_SP_OFS] // Load SP
.if (FPU_USED != 0)
ldrb r1,[r2,#TCB_SF_OFS] // Load stack frame information
orn lr,r1,#0xFF // Set EXC_RETURN
tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4
it eq // If extended stack frame
vldmiaeq r0!,{s16-s31} // Restore VFP S16..S31
.else
mvn lr,#~0xFFFFFFFD // Set EXC_RETURN value
.endif
ldmia r0!,{r4-r11} // Restore R4..R11
msr psp,r0 // Set PSP
SVC_Exit:
BX LR // Exit from handler
bx lr // Exit from handler
SVC_User:
LDR R2,=osRtxUserSVC // Load address of SVC table
LDR R3,[R2] // Load SVC maximum number
CMP R1,R3 // Check SVC number range
BHI SVC_Exit // Branch if out of range
ldr r2,=osRtxUserSVC // Load address of SVC table
ldr r3,[r2] // Load SVC maximum number
cmp r1,r3 // Check SVC number range
bhi SVC_Exit // Branch if out of range
PUSH {R0,LR} // Save SP and EXC_RETURN
LDR R12,[R2,R1,LSL #2] // Load address of SVC function
LDM R0,{R0-R3} // Load function parameters from stack
BLX R12 // Call service function
POP {R12,LR} // Restore SP and EXC_RETURN
STR R0,[R12] // Store function return value
push {r0,lr} // Save SP and EXC_RETURN
ldr r12,[r2,r1,lsl #2] // Load address of SVC function
ldm r0,{r0-r3} // Load function parameters from stack
blx r12 // Call service function
pop {r12,lr} // Restore SP and EXC_RETURN
str r0,[r12] // Store function return value
BX LR // Return from handler
bx lr // Return from handler
.fnend
.size SVC_Handler, .-SVC_Handler
@ -137,11 +198,11 @@ SVC_User:
.cantunwind
PendSV_Handler:
PUSH {R0,LR} // Save EXC_RETURN
BL osRtxPendSV_Handler // Call osRtxPendSV_Handler
POP {R0,LR} // Restore EXC_RETURN
MRS R12,PSP
B SVC_Context
push {r0,lr} // Save EXC_RETURN
bl osRtxPendSV_Handler // Call osRtxPendSV_Handler
pop {r0,lr} // Restore EXC_RETURN
mrs r12,psp // Save PSP to R12
b SVC_Context // Branch to context handling
.fnend
.size PendSV_Handler, .-PendSV_Handler
@ -154,11 +215,11 @@ PendSV_Handler:
.cantunwind
SysTick_Handler:
PUSH {R0,LR} // Save EXC_RETURN
BL osRtxTick_Handler // Call osRtxTick_Handler
POP {R0,LR} // Restore EXC_RETURN
MRS R12,PSP
B SVC_Context
push {r0,lr} // Save EXC_RETURN
bl osRtxTick_Handler // Call osRtxTick_Handler
pop {r0,lr} // Restore EXC_RETURN
mrs r12,psp // Save PSP to R12
b SVC_Context // Branch to context handling
.fnend
.size SysTick_Handler, .-SysTick_Handler

View File

@ -1,5 +1,5 @@
;/*
; * Copyright (c) 2013-2018 Arm Limited. All rights reserved.
; * Copyright (c) 2013-2021 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
@ -18,12 +18,13 @@
; * -----------------------------------------------------------------------------
; *
; * Project: CMSIS-RTOS RTX
; * Title: Cortex-A Exception handlers
; * Title: ARMv7-A Exception handlers
; *
; * -----------------------------------------------------------------------------
; */
NAME irq_ca.s
NAME irq_armv7a.s
MODE_FIQ EQU 0x11
MODE_IRQ EQU 0x12
@ -352,16 +353,16 @@ osRtxContextSave
STMDB R1!, {R2,R12} ; Push FPSCR, maintain 8-byte alignment
VSTMDB R1!, {D0-D15} ; Save D0-D15
#ifdef __ARM_ADVANCED_SIMD__
#ifdef __ARM_ADVANCED_SIMD__
VSTMDB R1!, {D16-D31} ; Save D16-D31
#endif
#endif
LDRB R2, [LR, #TCB_SP_FRAME] ; Load osRtxInfo.thread.run.curr frame info
#ifdef __ARM_ADVANCED_SIMD__
#ifdef __ARM_ADVANCED_SIMD__
ORR R2, R2, #4 ; NEON state
#else
#else
ORR R2, R2, #2 ; VFP state
#endif
#endif
STRB R2, [LR, #TCB_SP_FRAME] ; Store VFP/NEON state
osRtxContextSave1
@ -413,9 +414,9 @@ osRtxContextRestore
MCR p15, 0, R2, c1, c0, 2 ; Write CPACR
BEQ osRtxContextRestore1 ; No VFP
ISB ; Sync if VFP was enabled
#ifdef __ARM_ADVANCED_SIMD__
#ifdef __ARM_ADVANCED_SIMD__
VLDMIA LR!, {D16-D31} ; Restore D16-D31
#endif
#endif
VLDMIA LR!, {D0-D15} ; Restore D0-D15
LDR R2, [LR]
VMSR FPSCR, R2 ; Restore FPSCR

View File

@ -1,5 +1,5 @@
;/*
; * Copyright (c) 2013-2018 Arm Limited. All rights reserved.
; * Copyright (c) 2013-2021 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
@ -18,18 +18,23 @@
; * -----------------------------------------------------------------------------
; *
; * Project: CMSIS-RTOS RTX
; * Title: Cortex-M0 Exception handlers
; * Title: ARMv6-M Exception handlers
; *
; * -----------------------------------------------------------------------------
; */
NAME irq_cm0.s
NAME irq_armv6m.s
#include "rtx_def.h"
I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset
TCB_SP_OFS EQU 56 ; TCB.SP offset
osRtxErrorStackOverflow\
EQU 1 ; Stack overflow
PRESERVE8
SECTION .rodata:DATA:NOROOT(2)
@ -47,6 +52,10 @@ SVC_Handler
EXPORT SVC_Handler
IMPORT osRtxUserSVC
IMPORT osRtxInfo
#ifdef RTX_STACK_CHECK
IMPORT osRtxThreadStackCheck
IMPORT osRtxKernelErrorNotify
#endif
MOV R0,LR
LSRS R0,R0,#3 ; Determine return stack from EXC_RETURN bit 2
@ -57,7 +66,7 @@ SVC_Number
LDR R1,[R0,#24] ; Load saved PC from stack
SUBS R1,R1,#2 ; Point to SVC instruction
LDRB R1,[R1] ; Load SVC number
CMP R1,#0
CMP R1,#0 ; Check SVC number
BNE SVC_User ; Branch if not SVC 0
PUSH {R0,LR} ; Save SP and EXC_RETURN
@ -68,18 +77,42 @@ SVC_Number
MOV LR,R3 ; Set EXC_RETURN
SVC_Context
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run
LDMIA R3!,{R1,R2} ; Load osRtxInfo.thread.run: curr & next
CMP R1,R2 ; Check if thread switch is required
BEQ SVC_Exit ; Branch when threads are the same
SUBS R3,R3,#8 ; Adjust address
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
CMP R1,#0
BEQ SVC_ContextSwitch ; Branch if running thread is deleted
BEQ SVC_ContextRestore ; Branch if running thread is deleted
SVC_ContextSave
MRS R0,PSP ; Get PSP
SUBS R0,R0,#32 ; Calculate SP
SUBS R0,R0,#32 ; Calculate SP: space for R4..R11
STR R0,[R1,#TCB_SP_OFS] ; Store SP
#ifdef RTX_STACK_CHECK
PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next
MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr
BL osRtxThreadStackCheck ; Check if thread stack is overrun
POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next
CMP R0,#0
BNE SVC_ContextSaveRegs ; Branch when stack check is ok
MOVS R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run
LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
B SVC_ContextRestore ; Branch to context restore handling
SVC_ContextSaveRegs
LDR R0,[R1,#TCB_SP_OFS] ; Load SP
#endif
STMIA R0!,{R4-R7} ; Save R4..R7
MOV R4,R8
MOV R5,R9
@ -87,10 +120,6 @@ SVC_ContextSave
MOV R7,R11
STMIA R0!,{R4-R7} ; Save R8..R11
SVC_ContextSwitch
SUBS R3,R3,#8 ; Adjust address
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
SVC_ContextRestore
LDR R0,[R2,#TCB_SP_OFS] ; Load SP
ADDS R0,R0,#16 ; Adjust address
@ -103,7 +132,7 @@ SVC_ContextRestore
SUBS R0,R0,#32 ; Adjust address
LDMIA R0!,{R4-R7} ; Restore R4..R7
MOVS R0,#~0xFFFFFFFD
MOVS R0,#2 ; Binary complement of 0xFFFFFFFD
MVNS R0,R0 ; Set EXC_RETURN value
BX R0 ; Exit from handler
@ -141,7 +170,7 @@ PendSV_Handler
BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler
POP {R0,R1} ; Restore EXC_RETURN
MOV LR,R1 ; Set EXC_RETURN
B SVC_Context
B SVC_Context ; Branch to context handling
SysTick_Handler
@ -152,7 +181,7 @@ SysTick_Handler
BL osRtxTick_Handler ; Call osRtxTick_Handler
POP {R0,R1} ; Restore EXC_RETURN
MOV LR,R1 ; Set EXC_RETURN
B SVC_Context
B SVC_Context ; Branch to context handling
END

View File

@ -1,5 +1,5 @@
;/*
; * Copyright (c) 2013-2018 Arm Limited. All rights reserved.
; * Copyright (c) 2013-2021 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
@ -18,18 +18,23 @@
; * -----------------------------------------------------------------------------
; *
; * Project: CMSIS-RTOS RTX
; * Title: Cortex-M0 Exception handlers
; * Title: ARMv6-M Exception handlers
; *
; * -----------------------------------------------------------------------------
; */
NAME irq_cm0.s
NAME irq_armv6m.s
#include "rtx_def.h"
I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset
TCB_SP_OFS EQU 56 ; TCB.SP offset
osRtxErrorStackOverflow\
EQU 1 ; Stack overflow
PRESERVE8
SECTION .rodata:DATA:NOROOT(2)
@ -47,6 +52,10 @@ SVC_Handler
EXPORT SVC_Handler
IMPORT osRtxUserSVC
IMPORT osRtxInfo
#ifdef RTX_STACK_CHECK
IMPORT osRtxThreadStackCheck
IMPORT osRtxKernelErrorNotify
#endif
MOV R0,LR
LSRS R0,R0,#3 ; Determine return stack from EXC_RETURN bit 2
@ -57,7 +66,7 @@ SVC_Number
LDR R1,[R0,#24] ; Load saved PC from stack
SUBS R1,R1,#2 ; Point to SVC instruction
LDRB R1,[R1] ; Load SVC number
CMP R1,#0
CMP R1,#0 ; Check SVC number
BNE SVC_User ; Branch if not SVC 0
PUSH {R0,LR} ; Save SP and EXC_RETURN
@ -68,18 +77,42 @@ SVC_Number
MOV LR,R3 ; Set EXC_RETURN
SVC_Context
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run
LDMIA R3!,{R1,R2} ; Load osRtxInfo.thread.run: curr & next
CMP R1,R2 ; Check if thread switch is required
BEQ SVC_Exit ; Branch when threads are the same
SUBS R3,R3,#8 ; Adjust address
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
CMP R1,#0
BEQ SVC_ContextSwitch ; Branch if running thread is deleted
BEQ SVC_ContextRestore ; Branch if running thread is deleted
SVC_ContextSave
MRS R0,PSP ; Get PSP
SUBS R0,R0,#32 ; Calculate SP
SUBS R0,R0,#32 ; Calculate SP: space for R4..R11
STR R0,[R1,#TCB_SP_OFS] ; Store SP
#ifdef RTX_STACK_CHECK
PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next
MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr
BL osRtxThreadStackCheck ; Check if thread stack is overrun
POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next
CMP R0,#0
BNE SVC_ContextSaveRegs ; Branch when stack check is ok
MOVS R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run
LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
B SVC_ContextRestore ; Branch to context restore handling
SVC_ContextSaveRegs
LDR R0,[R1,#TCB_SP_OFS] ; Load SP
#endif
STMIA R0!,{R4-R7} ; Save R4..R7
MOV R4,R8
MOV R5,R9
@ -87,10 +120,6 @@ SVC_ContextSave
MOV R7,R11
STMIA R0!,{R4-R7} ; Save R8..R11
SVC_ContextSwitch
SUBS R3,R3,#8 ; Adjust address
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
SVC_ContextRestore
LDR R0,[R2,#TCB_SP_OFS] ; Load SP
ADDS R0,R0,#16 ; Adjust address
@ -103,7 +132,7 @@ SVC_ContextRestore
SUBS R0,R0,#32 ; Adjust address
LDMIA R0!,{R4-R7} ; Restore R4..R7
MOVS R0,#~0xFFFFFFFD
MOVS R0,#2 ; Binary complement of 0xFFFFFFFD
MVNS R0,R0 ; Set EXC_RETURN value
BX R0 ; Exit from handler
@ -141,7 +170,7 @@ PendSV_Handler
BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler
POP {R0,R1} ; Restore EXC_RETURN
MOV LR,R1 ; Set EXC_RETURN
B SVC_Context
B SVC_Context ; Branch to context handling
SysTick_Handler
@ -152,7 +181,7 @@ SysTick_Handler
BL osRtxTick_Handler ; Call osRtxTick_Handler
POP {R0,R1} ; Restore EXC_RETURN
MOV LR,R1 ; Set EXC_RETURN
B SVC_Context
B SVC_Context ; Branch to context handling
END

View File

@ -1,5 +1,5 @@
;/*
; * Copyright (c) 2016-2020 Arm Limited. All rights reserved.
; * Copyright (c) 2016-2021 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
@ -18,12 +18,17 @@
; * -----------------------------------------------------------------------------
; *
; * Project: CMSIS-RTOS RTX
; * Title: ARMv8M Baseline Exception handlers
; * Title: ARMv8-M Baseline Exception handlers
; *
; * -----------------------------------------------------------------------------
; */
NAME irq_armv8mbl.s
#include "rtx_def.h"
#ifndef DOMAIN_NS
#define DOMAIN_NS 0
#endif
@ -34,6 +39,9 @@ TCB_SP_OFS EQU 56 ; TCB.SP offset
TCB_SF_OFS EQU 34 ; TCB.stack_frame offset
TCB_TZM_OFS EQU 64 ; TCB.tz_memory offset
osRtxErrorStackOverflow\
EQU 1 ; Stack overflow
PRESERVE8
@ -51,10 +59,14 @@ SVC_Handler
EXPORT SVC_Handler
IMPORT osRtxUserSVC
IMPORT osRtxInfo
#if (DOMAIN_NS == 1)
#ifdef RTX_STACK_CHECK
IMPORT osRtxThreadStackCheck
IMPORT osRtxKernelErrorNotify
#endif
#if (DOMAIN_NS != 0)
IMPORT TZ_LoadContext_S
IMPORT TZ_StoreContext_S
#endif
#endif
MOV R0,LR
LSRS R0,R0,#3 ; Determine return stack from EXC_RETURN bit 2
@ -65,95 +77,136 @@ SVC_Number
LDR R1,[R0,#24] ; Load saved PC from stack
SUBS R1,R1,#2 ; Point to SVC instruction
LDRB R1,[R1] ; Load SVC number
CMP R1,#0
CMP R1,#0 ; Check SVC number
BNE SVC_User ; Branch if not SVC 0
PUSH {R0,LR} ; Save SP and EXC_RETURN
LDM R0,{R0-R3} ; Load function parameters from stack
LDMIA R0,{R0-R3} ; Load function parameters from stack
BLX R7 ; Call service function
POP {R2,R3} ; Restore SP and EXC_RETURN
STMIA R2!,{R0-R1} ; Store function return values
MOV LR,R3 ; Set EXC_RETURN
SVC_Context
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run
LDMIA R3!,{R1,R2} ; Load osRtxInfo.thread.run: curr & next
CMP R1,R2 ; Check if thread switch is required
BEQ SVC_Exit ; Branch when threads are the same
CBZ R1,SVC_ContextSwitch ; Branch if running thread is deleted
SUBS R3,R3,#8 ; Adjust address
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
CBZ R1,SVC_ContextRestore ; Branch if running thread is deleted
SVC_ContextSave
#if (DOMAIN_NS == 1)
#if (DOMAIN_NS != 0)
LDR R0,[R1,#TCB_TZM_OFS] ; Load TrustZone memory identifier
CBZ R0,SVC_ContextSave1 ; Branch if there is no secure context
CBZ R0,SVC_ContextSave_NS ; Branch if there is no secure context
PUSH {R1,R2,R3,R7} ; Save registers
MOV R7,LR ; Get EXC_RETURN
BL TZ_StoreContext_S ; Store secure context
MOV LR,R7 ; Set EXC_RETURN
POP {R1,R2,R3,R7} ; Restore registers
#endif
#endif
SVC_ContextSave1
SVC_ContextSave_NS
MRS R0,PSP ; Get PSP
SUBS R0,R0,#32 ; Calculate SP
#if (DOMAIN_NS != 0)
MOV R3,LR ; Get EXC_RETURN
LSLS R3,R3,#25 ; Check domain of interrupted thread
BMI SVC_ContextSaveSP ; Branch if secure
#endif
#ifdef RTX_STACK_CHECK
SUBS R0,R0,#32 ; Calculate SP: space for R4..R11
SVC_ContextSaveSP
STR R0,[R1,#TCB_SP_OFS] ; Store SP
MOV R3,LR ; Get EXC_RETURN
MOV R0,R1 ; osRtxInfo.thread.run.curr
ADDS R0,R0,#TCB_SF_OFS ; Adjust address
STRB R3,[R0] ; Store stack frame information
PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next
MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr
BL osRtxThreadStackCheck ; Check if thread stack is overrun
POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next
CMP R0,#0
BNE SVC_ContextSaveRegs ; Branch when stack check is ok
MOVS R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run
LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
B SVC_ContextRestore ; Branch to context restore handling
SVC_ContextSaveRegs
#if (DOMAIN_NS != 0)
MOV R0,R1 ; osRtxInfo.thread.run.curr
ADDS R0,R0,#TCB_SF_OFS ; Adjust address
LDRB R3,[R0] ; Load stack frame information
LSLS R3,R3,#25 ; Check domain of interrupted thread
BMI SVC_ContextRestore ; Branch if secure
#endif
LDR R0,[R1,#TCB_SP_OFS] ; Load SP
STMIA R0!,{R4-R7} ; Save R4..R7
MOV R4,R8
MOV R5,R9
MOV R6,R10
MOV R7,R11
STMIA R0!,{R4-R7} ; Save R8..R11
SVC_ContextSave2
#else
SUBS R0,R0,#32 ; Calculate SP: space for R4..R11
STMIA R0!,{R4-R7} ; Save R4..R7
MOV R4,R8
MOV R5,R9
MOV R6,R10
MOV R7,R11
STMIA R0!,{R4-R7} ; Save R8..R11
SUBS R0,R0,#32 ; Adjust address
SVC_ContextSaveSP
STR R0,[R1,#TCB_SP_OFS] ; Store SP
MOV R0,LR ; Get EXC_RETURN
ADDS R1,R1,#TCB_SF_OFS ; Adjust address
STRB R0,[R1] ; Store stack frame information
SVC_ContextSwitch
SUBS R3,R3,#8 ; Adjust address
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
#endif
SVC_ContextRestore
#if (DOMAIN_NS == 1)
#if (DOMAIN_NS != 0)
LDR R0,[R2,#TCB_TZM_OFS] ; Load TrustZone memory identifier
CBZ R0,SVC_ContextRestore1 ; Branch if there is no secure context
CBZ R0,SVC_ContextRestore_NS ; Branch if there is no secure context
PUSH {R2,R3} ; Save registers
BL TZ_LoadContext_S ; Load secure context
POP {R2,R3} ; Restore registers
#endif
#endif
SVC_ContextRestore1
MOV R1,R2
ADDS R1,R1,#TCB_SF_OFS ; Adjust address
LDRB R0,[R1] ; Load stack frame information
MOVS R1,#0xFF
MVNS R1,R1 ; R1=0xFFFFFF00
ORRS R0,R1
MOV LR,R0 ; Set EXC_RETURN
#if (DOMAIN_NS == 1)
LSLS R0,R0,#25 ; Check domain of interrupted thread
BPL SVC_ContextRestore2 ; Branch if non-secure
LDR R0,[R2,#TCB_SP_OFS] ; Load SP
MSR PSP,R0 ; Set PSP
BX LR ; Exit from handler
#else
SVC_ContextRestore_NS
LDR R0,[R2,#TCB_SM_OFS] ; Load stack memory base
MSR PSPLIM,R0 ; Set PSPLIM
#endif
SVC_ContextRestore2
MOV R0,R2 ; osRtxInfo.thread.run.next
ADDS R0,R0,#TCB_SF_OFS ; Adjust address
LDRB R3,[R0] ; Load stack frame information
MOVS R0,#0xFF
MVNS R0,R0 ; R0=0xFFFFFF00
ORRS R3,R3,R0
MOV LR,R3 ; Set EXC_RETURN
LDR R0,[R2,#TCB_SP_OFS] ; Load SP
#if (DOMAIN_NS != 0)
LSLS R3,R3,#25 ; Check domain of interrupted thread
BMI SVC_ContextRestoreSP ; Branch if secure
#endif
ADDS R0,R0,#16 ; Adjust address
LDMIA R0!,{R4-R7} ; Restore R8..R11
MOV R8,R4
MOV R9,R5
MOV R10,R6
MOV R11,R7
MSR PSP,R0 ; Set PSP
SUBS R0,R0,#32 ; Adjust address
LDMIA R0!,{R4-R7} ; Restore R4..R7
ADDS R0,R0,#16 ; Adjust address
SVC_ContextRestoreSP
MSR PSP,R0 ; Set PSP
SVC_Exit
BX LR ; Exit from handler
@ -189,7 +242,7 @@ PendSV_Handler
BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler
POP {R0,R1} ; Restore EXC_RETURN
MOV LR,R1 ; Set EXC_RETURN
B Sys_Context
B SVC_Context ; Branch to context handling
SysTick_Handler
@ -200,104 +253,7 @@ SysTick_Handler
BL osRtxTick_Handler ; Call osRtxTick_Handler
POP {R0,R1} ; Restore EXC_RETURN
MOV LR,R1 ; Set EXC_RETURN
B Sys_Context
B SVC_Context ; Branch to context handling
Sys_Context
EXPORT Sys_Context
IMPORT osRtxInfo
#if (DOMAIN_NS == 1)
IMPORT TZ_LoadContext_S
IMPORT TZ_StoreContext_S
#endif
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run
LDM R3!,{R1,R2} ; Load osRtxInfo.thread.run: curr & next
CMP R1,R2 ; Check if thread switch is required
BEQ Sys_ContextExit ; Branch when threads are the same
Sys_ContextSave
#if (DOMAIN_NS == 1)
LDR R0,[R1,#TCB_TZM_OFS] ; Load TrustZone memory identifier
CBZ R0,Sys_ContextSave1 ; Branch if there is no secure context
PUSH {R1,R2,R3,R7} ; Save registers
MOV R7,LR ; Get EXC_RETURN
BL TZ_StoreContext_S ; Store secure context
MOV LR,R7 ; Set EXC_RETURN
POP {R1,R2,R3,R7} ; Restore registers
Sys_ContextSave1
MOV R0,LR ; Get EXC_RETURN
LSLS R0,R0,#25 ; Check domain of interrupted thread
BPL Sys_ContextSave2 ; Branch if non-secure
MRS R0,PSP ; Get PSP
STR R0,[R1,#TCB_SP_OFS] ; Store SP
B Sys_ContextSave3
#endif
Sys_ContextSave2
MRS R0,PSP ; Get PSP
SUBS R0,R0,#32 ; Adjust address
STR R0,[R1,#TCB_SP_OFS] ; Store SP
STMIA R0!,{R4-R7} ; Save R4..R7
MOV R4,R8
MOV R5,R9
MOV R6,R10
MOV R7,R11
STMIA R0!,{R4-R7} ; Save R8..R11
Sys_ContextSave3
MOV R0,LR ; Get EXC_RETURN
ADDS R1,R1,#TCB_SF_OFS ; Adjust address
STRB R0,[R1] ; Store stack frame information
Sys_ContextSwitch
SUBS R3,R3,#8 ; Adjust address
STR R2,[R3] ; osRtxInfo.run: curr = next
Sys_ContextRestore
#if (DOMAIN_NS == 1)
LDR R0,[R2,#TCB_TZM_OFS] ; Load TrustZone memory identifier
CBZ R0,Sys_ContextRestore1 ; Branch if there is no secure context
PUSH {R2,R3} ; Save registers
BL TZ_LoadContext_S ; Load secure context
POP {R2,R3} ; Restore registers
#endif
Sys_ContextRestore1
MOV R1,R2
ADDS R1,R1,#TCB_SF_OFS ; Adjust offset
LDRB R0,[R1] ; Load stack frame information
MOVS R1,#0xFF
MVNS R1,R1 ; R1=0xFFFFFF00
ORRS R0,R1
MOV LR,R0 ; Set EXC_RETURN
#if (DOMAIN_NS == 1)
LSLS R0,R0,#25 ; Check domain of interrupted thread
BPL Sys_ContextRestore2 ; Branch if non-secure
LDR R0,[R2,#TCB_SP_OFS] ; Load SP
MSR PSP,R0 ; Set PSP
BX LR ; Exit from handler
#else
LDR R0,[R2,#TCB_SM_OFS] ; Load stack memory base
MSR PSPLIM,R0 ; Set PSPLIM
#endif
Sys_ContextRestore2
LDR R0,[R2,#TCB_SP_OFS] ; Load SP
ADDS R0,R0,#16 ; Adjust address
LDMIA R0!,{R4-R7} ; Restore R8..R11
MOV R8,R4
MOV R9,R5
MOV R10,R6
MOV R11,R7
MSR PSP,R0 ; Set PSP
SUBS R0,R0,#32 ; Adjust address
LDMIA R0!,{R4-R7} ; Restore R4..R7
Sys_ContextExit
BX LR ; Exit from handler
END

View File

@ -1,5 +1,5 @@
;/*
; * Copyright (c) 2013-2018 Arm Limited. All rights reserved.
; * Copyright (c) 2013-2021 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
@ -18,17 +18,31 @@
; * -----------------------------------------------------------------------------
; *
; * Project: CMSIS-RTOS RTX
; * Title: Cortex-M3 Exception handlers
; * Title: ARMv7-M Exception handlers
; *
; * -----------------------------------------------------------------------------
; */
NAME irq_cm3.s
NAME irq_armv7m.s
#include "rtx_def.h"
#ifdef __ARMVFP__
FPU_USED EQU 1
#else
FPU_USED EQU 0
#endif
I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset
TCB_SP_OFS EQU 56 ; TCB.SP offset
TCB_SF_OFS EQU 34 ; TCB.stack_frame offset
FPCCR EQU 0xE000EF34 ; FPCCR Address
osRtxErrorStackOverflow\
EQU 1 ; Stack overflow
PRESERVE8
@ -47,6 +61,10 @@ SVC_Handler
EXPORT SVC_Handler
IMPORT osRtxUserSVC
IMPORT osRtxInfo
#ifdef RTX_STACK_CHECK
IMPORT osRtxThreadStackCheck
IMPORT osRtxKernelErrorNotify
#endif
TST LR,#0x04 ; Determine return stack from EXC_RETURN bit 2
ITE EQ
@ -55,7 +73,8 @@ SVC_Handler
LDR R1,[R0,#24] ; Load saved PC from stack
LDRB R1,[R1,#-2] ; Load SVC number
CBNZ R1,SVC_User ; Branch if not SVC 0
CMP R1,#0 ; Check SVC number
BNE SVC_User ; Branch if not SVC 0
PUSH {R0,LR} ; Save SP and EXC_RETURN
LDM R0,{R0-R3,R12} ; Load function parameters and address from stack
@ -64,28 +83,94 @@ SVC_Handler
STM R12,{R0-R1} ; Store function return values
SVC_Context
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run
LDM R3,{R1,R2} ; Load osRtxInfo.thread.run: curr & next
CMP R1,R2 ; Check if thread switch is required
IT EQ
BXEQ LR ; Exit when threads are the same
CBZ R1,SVC_ContextSwitch ; Branch if running thread is deleted
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
#if (FPU_USED != 0)
CBNZ R1,SVC_ContextSave ; Branch if running thread is not deleted
SVC_FP_LazyState
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
BNE SVC_ContextRestore ; Branch if not extended stack frame
LDR R3,=FPCCR ; FPCCR Address
LDR R0,[R3] ; Load FPCCR
BIC R0,R0,#1 ; Clear LSPACT (Lazy state preservation)
STR R0,[R3] ; Store FPCCR
B SVC_ContextRestore ; Branch to context restore handling
#else
CBZ R1,SVC_ContextRestore ; Branch if running thread is deleted
#endif
SVC_ContextSave
STMDB R12!,{R4-R11} ; Save R4..R11
#ifdef RTX_STACK_CHECK
SUB R12,R12,#32 ; Calculate SP: space for R4..R11
#if (FPU_USED != 0)
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
SUBEQ R12,R12,#64 ; Additional space for S16..S31
STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information
#endif
STR R12,[R1,#TCB_SP_OFS] ; Store SP
SVC_ContextSwitch
PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next
MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr
BL osRtxThreadStackCheck ; Check if thread stack is overrun
POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next
CBNZ R0,SVC_ContextSaveRegs ; Branch when stack check is ok
#if (FPU_USED != 0)
MOV R4,R1 ; Save osRtxInfo.thread.run.curr
#endif
MOV R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run
LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
#if (FPU_USED != 0)
LDRB LR,[R4,#TCB_SF_OFS] ; Load stack frame information
B SVC_FP_LazyState ; Branch to FP lazy state handling
#else
B SVC_ContextRestore ; Branch to context restore handling
#endif
SVC_ContextSaveRegs
LDR R12,[R1,#TCB_SP_OFS] ; Load SP
#if (FPU_USED != 0)
LDRB LR, [R1,#TCB_SF_OFS] ; Load stack frame information
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VSTMIAEQ R12!,{S16-S31} ; Save VFP S16..S31
#endif
STM R12,{R4-R11} ; Save R4..R11
#else
STMDB R12!,{R4-R11} ; Save R4..R11
#if (FPU_USED != 0)
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VSTMDBEQ R12!,{S16-S31} ; Save VFP S16.S31
STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information
#endif
STR R12,[R1,#TCB_SP_OFS] ; Store SP
#endif
SVC_ContextRestore
LDR R0,[R2,#TCB_SP_OFS] ; Load SP
#if (FPU_USED != 0)
LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information
ORN LR,R1,#0xFF ; Set EXC_RETURN
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VLDMIAEQ R0!,{S16-S31} ; Restore VFP S16..S31
#else
MVN LR,#~0xFFFFFFFD ; Set EXC_RETURN value
#endif
LDMIA R0!,{R4-R11} ; Restore R4..R11
MSR PSP,R0 ; Set PSP
MVN LR,#~0xFFFFFFFD ; Set EXC_RETURN value
SVC_Exit
BX LR ; Exit from handler
@ -112,8 +197,8 @@ PendSV_Handler
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler
POP {R0,LR} ; Restore EXC_RETURN
MRS R12,PSP
B SVC_Context
MRS R12,PSP ; Save PSP to R12
B SVC_Context ; Branch to context handling
SysTick_Handler
@ -123,8 +208,8 @@ SysTick_Handler
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxTick_Handler ; Call osRtxTick_Handler
POP {R0,LR} ; Restore EXC_RETURN
MRS R12,PSP
B SVC_Context
MRS R12,PSP ; Save PSP to R12
B SVC_Context ; Branch to context handling
END

View File

@ -1,5 +1,5 @@
;/*
; * Copyright (c) 2016-2020 Arm Limited. All rights reserved.
; * Copyright (c) 2016-2021 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
@ -18,12 +18,17 @@
; * -----------------------------------------------------------------------------
; *
; * Project: CMSIS-RTOS RTX
; * Title: ARMv8M Mainline Exception handlers
; * Title: ARMv8-M Mainline Exception handlers
; *
; * -----------------------------------------------------------------------------
; */
NAME irq_armv8mml.s
#include "rtx_def.h"
#ifndef DOMAIN_NS
#define DOMAIN_NS 0
#endif
@ -46,6 +51,11 @@ TCB_SP_OFS EQU 56 ; TCB.SP offset
TCB_SF_OFS EQU 34 ; TCB.stack_frame offset
TCB_TZM_OFS EQU 64 ; TCB.tz_memory offset
FPCCR EQU 0xE000EF34 ; FPCCR Address
osRtxErrorStackOverflow\
EQU 1 ; Stack overflow
PRESERVE8
@ -63,10 +73,14 @@ SVC_Handler
EXPORT SVC_Handler
IMPORT osRtxUserSVC
IMPORT osRtxInfo
#if (DOMAIN_NS == 1)
#ifdef RTX_STACK_CHECK
IMPORT osRtxThreadStackCheck
IMPORT osRtxKernelErrorNotify
#endif
#if (DOMAIN_NS != 0)
IMPORT TZ_LoadContext_S
IMPORT TZ_StoreContext_S
#endif
#endif
TST LR,#0x04 ; Determine return stack from EXC_RETURN bit 2
ITE EQ
@ -75,7 +89,7 @@ SVC_Handler
LDR R1,[R0,#24] ; Load saved PC from stack
LDRB R1,[R1,#-2] ; Load SVC number
CMP R1,#0
CMP R1,#0 ; Check SVC number
BNE SVC_User ; Branch if not SVC 0
PUSH {R0,LR} ; Save SP and EXC_RETURN
@ -85,79 +99,131 @@ SVC_Handler
STM R12,{R0-R1} ; Store function return values
SVC_Context
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run
LDM R3,{R1,R2} ; Load osRtxInfo.thread.run: curr & next
CMP R1,R2 ; Check if thread switch is required
IT EQ
BXEQ LR ; Exit when threads are the same
#if ((FPU_USED == 1) || (MVE_USED == 1))
CBNZ R1,SVC_ContextSave ; Branch if running thread is not deleted
TST LR,#0x10 ; Check if extended stack frame
BNE SVC_ContextSwitch
LDR R1,=0xE000EF34 ; FPCCR Address
LDR R0,[R1] ; Load FPCCR
BIC R0,R0,#1 ; Clear LSPACT (Lazy state)
STR R0,[R1] ; Store FPCCR
B SVC_ContextSwitch
#else
CBZ R1,SVC_ContextSwitch ; Branch if running thread is deleted
#endif
SVC_ContextSave
#if (DOMAIN_NS == 1)
LDR R0,[R1,#TCB_TZM_OFS] ; Load TrustZone memory identifier
CBZ R0,SVC_ContextSave1 ; Branch if there is no secure context
PUSH {R1,R2,R3,LR} ; Save registers and EXC_RETURN
BL TZ_StoreContext_S ; Store secure context
POP {R1,R2,R3,LR} ; Restore registers and EXC_RETURN
#endif
SVC_ContextSave1
MRS R0,PSP ; Get PSP
STMDB R0!,{R4-R11} ; Save R4..R11
#if ((FPU_USED == 1) || (MVE_USED == 1))
TST LR,#0x10 ; Check if extended stack frame
IT EQ
VSTMDBEQ R0!,{S16-S31} ; Save VFP S16.S31
#endif
SVC_ContextSave2
STR R0,[R1,#TCB_SP_OFS] ; Store SP
STRB LR,[R1,#TCB_SF_OFS] ; Store stack frame information
SVC_ContextSwitch
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
#if ((FPU_USED != 0) || (MVE_USED != 0))
CBNZ R1,SVC_ContextSave ; Branch if running thread is not deleted
SVC_FP_LazyState
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
BNE SVC_ContextRestore ; Branch if not extended stack frame
LDR R3,=FPCCR ; FPCCR Address
LDR R0,[R3] ; Load FPCCR
BIC R0,R0,#1 ; Clear LSPACT (Lazy state preservation)
STR R0,[R3] ; Store FPCCR
B SVC_ContextRestore ; Branch to context restore handling
#else
CBZ R1,SVC_ContextRestore ; Branch if running thread is deleted
#endif
SVC_ContextSave
#if (DOMAIN_NS != 0)
LDR R0,[R1,#TCB_TZM_OFS] ; Load TrustZone memory identifier
CBZ R0,SVC_ContextSave_NS ; Branch if there is no secure context
PUSH {R1,R2,R12,LR} ; Save registers and EXC_RETURN
BL TZ_StoreContext_S ; Store secure context
POP {R1,R2,R12,LR} ; Restore registers and EXC_RETURN
#endif
SVC_ContextSave_NS
#if (DOMAIN_NS != 0)
TST LR,#0x40 ; Check domain of interrupted thread
BNE SVC_ContextSaveSP ; Branch if secure
#endif
#ifdef RTX_STACK_CHECK
SUB R12,R12,#32 ; Calculate SP: space for R4..R11
#if ((FPU_USED != 0) || (MVE_USED != 0))
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
SUBEQ R12,R12,#64 ; Additional space for S16..S31
#endif
SVC_ContextSaveSP
STR R12,[R1,#TCB_SP_OFS] ; Store SP
STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information
PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next
MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr
BL osRtxThreadStackCheck ; Check if thread stack is overrun
POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next
CBNZ R0,SVC_ContextSaveRegs ; Branch when stack check is ok
#if ((FPU_USED != 0) || (MVE_USED != 0))
MOV R4,R1 ; Save osRtxInfo.thread.run.curr
#endif
MOV R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run
LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
#if ((FPU_USED != 0) || (MVE_USED != 0))
LDRB LR,[R4,#TCB_SF_OFS] ; Load stack frame information
B SVC_FP_LazyState ; Branch to FP lazy state handling
#else
B SVC_ContextRestore ; Branch to context restore handling
#endif
SVC_ContextSaveRegs
LDRB LR,[R1,#TCB_SF_OFS] ; Load stack frame information
#if (DOMAIN_NS != 0)
TST LR,#0x40 ; Check domain of interrupted thread
BNE SVC_ContextRestore ; Branch if secure
#endif
LDR R12,[R1,#TCB_SP_OFS] ; Load SP
#if ((FPU_USED != 0) || (MVE_USED != 0))
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VSTMIAEQ R12!,{S16-S31} ; Save VFP S16..S31
#endif
STM R12,{R4-R11} ; Save R4..R11
#else
STMDB R12!,{R4-R11} ; Save R4..R11
#if ((FPU_USED != 0) || (MVE_USED != 0))
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VSTMDBEQ R12!,{S16-S31} ; Save VFP S16.S31
#endif
SVC_ContextSaveSP
STR R12,[R1,#TCB_SP_OFS] ; Store SP
STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information
#endif
SVC_ContextRestore
#if (DOMAIN_NS == 1)
#if (DOMAIN_NS != 0)
LDR R0,[R2,#TCB_TZM_OFS] ; Load TrustZone memory identifier
CBZ R0,SVC_ContextRestore1 ; Branch if there is no secure context
CBZ R0,SVC_ContextRestore_NS; Branch if there is no secure context
PUSH {R2,R3} ; Save registers
BL TZ_LoadContext_S ; Load secure context
POP {R2,R3} ; Restore registers
#endif
#endif
SVC_ContextRestore1
LDR R0,[R2,#TCB_SM_OFS] ; Load stack memory base
LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information
MSR PSPLIM,R0 ; Set PSPLIM
SVC_ContextRestore_NS
LDR R0,[R2,#TCB_SP_OFS] ; Load SP
ORR LR,R1,#0xFFFFFF00 ; Set EXC_RETURN
LDR R1,[R2,#TCB_SM_OFS] ; Load stack memory base
MSR PSPLIM,R1 ; Set PSPLIM
LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information
ORN LR,R1,#0xFF ; Set EXC_RETURN
#if (DOMAIN_NS == 1)
#if (DOMAIN_NS != 0)
TST LR,#0x40 ; Check domain of interrupted thread
BNE SVC_ContextRestore2 ; Branch if secure
#endif
BNE SVC_ContextRestoreSP ; Branch if secure
#endif
#if ((FPU_USED == 1) || (MVE_USED == 1))
TST LR,#0x10 ; Check if extended stack frame
IT EQ
#if ((FPU_USED != 0) || (MVE_USED != 0))
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VLDMIAEQ R0!,{S16-S31} ; Restore VFP S16..S31
#endif
#endif
LDMIA R0!,{R4-R11} ; Restore R4..R11
SVC_ContextRestore2
SVC_ContextRestoreSP
MSR PSP,R0 ; Set PSP
SVC_Exit
@ -186,7 +252,8 @@ PendSV_Handler
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler
POP {R0,LR} ; Restore EXC_RETURN
B Sys_Context
MRS R12,PSP ; Save PSP to R12
B SVC_Context ; Branch to context handling
SysTick_Handler
@ -196,87 +263,8 @@ SysTick_Handler
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxTick_Handler ; Call osRtxTick_Handler
POP {R0,LR} ; Restore EXC_RETURN
B Sys_Context
MRS R12,PSP ; Save PSP to R12
B SVC_Context ; Branch to context handling
Sys_Context
EXPORT Sys_Context
IMPORT osRtxInfo
#if (DOMAIN_NS == 1)
IMPORT TZ_LoadContext_S
IMPORT TZ_StoreContext_S
#endif
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run
LDM R3,{R1,R2} ; Load osRtxInfo.thread.run: curr & next
CMP R1,R2 ; Check if thread switch is required
IT EQ
BXEQ LR ; Exit when threads are the same
Sys_ContextSave
#if (DOMAIN_NS == 1)
LDR R0,[R1,#TCB_TZM_OFS] ; Load TrustZone memory identifier
CBZ R0,Sys_ContextSave1 ; Branch if there is no secure context
PUSH {R1,R2,R3,LR} ; Save registers and EXC_RETURN
BL TZ_StoreContext_S ; Store secure context
POP {R1,R2,R3,LR} ; Restore registers and EXC_RETURN
Sys_ContextSave1
TST LR,#0x40 ; Check domain of interrupted thread
IT NE
MRSNE R0,PSP ; Get PSP
BNE Sys_ContextSave3 ; Branch if secure
#endif
Sys_ContextSave2
MRS R0,PSP ; Get PSP
STMDB R0!,{R4-R11} ; Save R4..R11
#if ((FPU_USED == 1) || (MVE_USED == 1))
TST LR,#0x10 ; Check if extended stack frame
IT EQ
VSTMDBEQ R0!,{S16-S31} ; Save VFP S16.S31
#endif
Sys_ContextSave3
STR R0,[R1,#TCB_SP_OFS] ; Store SP
STRB LR,[R1,#TCB_SF_OFS] ; Store stack frame information
Sys_ContextSwitch
STR R2,[R3] ; osRtxInfo.run: curr = next
Sys_ContextRestore
#if (DOMAIN_NS == 1)
LDR R0,[R2,#TCB_TZM_OFS] ; Load TrustZone memory identifier
CBZ R0,Sys_ContextRestore1 ; Branch if there is no secure context
PUSH {R2,R3} ; Save registers
BL TZ_LoadContext_S ; Load secure context
POP {R2,R3} ; Restore registers
#endif
Sys_ContextRestore1
LDR R0,[R2,#TCB_SM_OFS] ; Load stack memory base
LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information
MSR PSPLIM,R0 ; Set PSPLIM
LDR R0,[R2,#TCB_SP_OFS] ; Load SP
ORR LR,R1,#0xFFFFFF00 ; Set EXC_RETURN
#if (DOMAIN_NS == 1)
TST LR,#0x40 ; Check domain of interrupted thread
BNE Sys_ContextRestore2 ; Branch if secure
#endif
#if ((FPU_USED == 1) || (MVE_USED == 1))
TST LR,#0x10 ; Check if extended stack frame
IT EQ
VLDMIAEQ R0!,{S16-S31} ; Restore VFP S16..S31
#endif
LDMIA R0!,{R4-R11} ; Restore R4..R11
Sys_ContextRestore2
MSR PSP,R0 ; Set PSP
Sys_ContextExit
BX LR ; Exit from handler
END

View File

@ -1,5 +1,5 @@
;/*
; * Copyright (c) 2013-2018 Arm Limited. All rights reserved.
; * Copyright (c) 2013-2021 Arm Limited. All rights reserved.
; *
; * SPDX-License-Identifier: Apache-2.0
; *
@ -18,19 +18,32 @@
; * -----------------------------------------------------------------------------
; *
; * Project: CMSIS-RTOS RTX
; * Title: Cortex-M4F Exception handlers
; * Title: ARMv7-M Exception handlers
; *
; * -----------------------------------------------------------------------------
; */
NAME irq_cm4f.s
NAME irq_armv7m.s
#include "rtx_def.h"
#ifdef __ARMVFP__
FPU_USED EQU 1
#else
FPU_USED EQU 0
#endif
I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset
TCB_SP_OFS EQU 56 ; TCB.SP offset
TCB_SF_OFS EQU 34 ; TCB.stack_frame offset
FPCCR EQU 0xE000EF34 ; FPCCR Address
osRtxErrorStackOverflow\
EQU 1 ; Stack overflow
PRESERVE8
SECTION .rodata:DATA:NOROOT(2)
@ -48,6 +61,10 @@ SVC_Handler
EXPORT SVC_Handler
IMPORT osRtxUserSVC
IMPORT osRtxInfo
#ifdef RTX_STACK_CHECK
IMPORT osRtxThreadStackCheck
IMPORT osRtxKernelErrorNotify
#endif
TST LR,#0x04 ; Determine return stack from EXC_RETURN bit 2
ITE EQ
@ -56,7 +73,8 @@ SVC_Handler
LDR R1,[R0,#24] ; Load saved PC from stack
LDRB R1,[R1,#-2] ; Load SVC number
CBNZ R1,SVC_User ; Branch if not SVC 0
CMP R1,#0 ; Check SVC number
BNE SVC_User ; Branch if not SVC 0
PUSH {R0,LR} ; Save SP and EXC_RETURN
LDM R0,{R0-R3,R12} ; Load function parameters and address from stack
@ -65,47 +83,91 @@ SVC_Handler
STM R12,{R0-R1} ; Store function return values
SVC_Context
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run
LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run
LDM R3,{R1,R2} ; Load osRtxInfo.thread.run: curr & next
CMP R1,R2 ; Check if thread switch is required
IT EQ
BXEQ LR ; Exit when threads are the same
CBNZ R1,SVC_ContextSave ; Branch if running thread is not deleted
TST LR,#0x10 ; Check if extended stack frame
BNE SVC_ContextSwitch
#ifdef __FPU_PRESENT
LDR R1,=0xE000EF34 ; FPCCR Address
LDR R0,[R1] ; Load FPCCR
BIC R0,R0,#1 ; Clear LSPACT (Lazy state)
STR R0,[R1] ; Store FPCCR
B SVC_ContextSwitch
#endif
SVC_ContextSave
STMDB R12!,{R4-R11} ; Save R4..R11
#ifdef __FPU_PRESENT
TST LR,#0x10 ; Check if extended stack frame
IT EQ
VSTMDBEQ R12!,{S16-S31} ; Save VFP S16.S31
#endif
STR R12,[R1,#TCB_SP_OFS] ; Store SP
STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information
SVC_ContextSwitch
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
SVC_ContextRestore
LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information
LDR R0,[R2,#TCB_SP_OFS] ; Load SP
ORR LR,R1,#0xFFFFFF00 ; Set EXC_RETURN
#if (FPU_USED != 0)
CBNZ R1,SVC_ContextSave ; Branch if running thread is not deleted
SVC_FP_LazyState
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
BNE SVC_ContextRestore ; Branch if not extended stack frame
LDR R3,=FPCCR ; FPCCR Address
LDR R0,[R3] ; Load FPCCR
BIC R0,R0,#1 ; Clear LSPACT (Lazy state preservation)
STR R0,[R3] ; Store FPCCR
B SVC_ContextRestore ; Branch to context restore handling
#else
CBZ R1,SVC_ContextRestore ; Branch if running thread is deleted
#endif
#ifdef __FPU_PRESENT
TST LR,#0x10 ; Check if extended stack frame
IT EQ
SVC_ContextSave
#ifdef RTX_STACK_CHECK
SUB R12,R12,#32 ; Calculate SP: space for R4..R11
#if (FPU_USED != 0)
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
SUBEQ R12,R12,#64 ; Additional space for S16..S31
STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information
#endif
STR R12,[R1,#TCB_SP_OFS] ; Store SP
PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next
MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr
BL osRtxThreadStackCheck ; Check if thread stack is overrun
POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next
CBNZ R0,SVC_ContextSaveRegs ; Branch when stack check is ok
#if (FPU_USED != 0)
MOV R4,R1 ; Save osRtxInfo.thread.run.curr
#endif
MOV R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id
BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify
LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run
LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next
STR R2,[R3] ; osRtxInfo.thread.run: curr = next
#if (FPU_USED != 0)
LDRB LR,[R4,#TCB_SF_OFS] ; Load stack frame information
B SVC_FP_LazyState ; Branch to FP lazy state handling
#else
B SVC_ContextRestore ; Branch to context restore handling
#endif
SVC_ContextSaveRegs
LDR R12,[R1,#TCB_SP_OFS] ; Load SP
#if (FPU_USED != 0)
LDRB LR, [R1,#TCB_SF_OFS] ; Load stack frame information
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VSTMIAEQ R12!,{S16-S31} ; Save VFP S16..S31
#endif
STM R12,{R4-R11} ; Save R4..R11
#else
STMDB R12!,{R4-R11} ; Save R4..R11
#if (FPU_USED != 0)
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VSTMDBEQ R12!,{S16-S31} ; Save VFP S16.S31
STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information
#endif
STR R12,[R1,#TCB_SP_OFS] ; Store SP
#endif
SVC_ContextRestore
LDR R0,[R2,#TCB_SP_OFS] ; Load SP
#if (FPU_USED != 0)
LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information
ORN LR,R1,#0xFF ; Set EXC_RETURN
TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4
IT EQ ; If extended stack frame
VLDMIAEQ R0!,{S16-S31} ; Restore VFP S16..S31
#endif
#else
MVN LR,#~0xFFFFFFFD ; Set EXC_RETURN value
#endif
LDMIA R0!,{R4-R11} ; Restore R4..R11
MSR PSP,R0 ; Set PSP
@ -135,8 +197,8 @@ PendSV_Handler
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler
POP {R0,LR} ; Restore EXC_RETURN
MRS R12,PSP
B SVC_Context
MRS R12,PSP ; Save PSP to R12
B SVC_Context ; Branch to context handling
SysTick_Handler
@ -146,8 +208,8 @@ SysTick_Handler
PUSH {R0,LR} ; Save EXC_RETURN
BL osRtxTick_Handler ; Call osRtxTick_Handler
POP {R0,LR} ; Restore EXC_RETURN
MRS R12,PSP
B SVC_Context
MRS R12,PSP ; Save PSP to R12
B SVC_Context ; Branch to context handling
END

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2020 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -27,7 +27,9 @@
#define RTX_CORE_C_H_
//lint -emacro((923,9078),SCB) "cast from unsigned long to pointer" [MISRA Note 9]
#ifndef RTE_COMPONENTS_H
#include "RTE_Components.h"
#endif
#include CMSIS_device_header
#if ((!defined(__ARM_ARCH_6M__)) && \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -27,7 +27,9 @@
#define RTX_CORE_CA_H_
#ifndef RTX_CORE_C_H_
#ifndef RTE_COMPONENTS_H
#include "RTE_Components.h"
#endif
#include CMSIS_device_header
#endif
@ -158,9 +160,9 @@ __STATIC_INLINE bool_t IsPrivileged (void) {
return (__get_mode() != CPSR_MODE_USER);
}
/// Check if in IRQ Mode
/// \return true=IRQ, false=thread
__STATIC_INLINE bool_t IsIrqMode (void) {
/// Check if in Exception
/// \return true=exception, false=thread
__STATIC_INLINE bool_t IsException (void) {
return ((__get_mode() != CPSR_MODE_USER) && (__get_mode() != CPSR_MODE_SYSTEM));
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2020 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -27,7 +27,9 @@
#define RTX_CORE_CM_H_
#ifndef RTX_CORE_C_H_
#ifndef RTE_COMPONENTS_H
#include "RTE_Components.h"
#endif
#include CMSIS_device_header
#endif
@ -35,15 +37,11 @@
typedef bool bool_t;
#ifndef FALSE
#define FALSE (0)
#define FALSE ((bool_t)0)
#endif
#ifndef TRUE
#define TRUE (1)
#endif
#ifdef RTE_CMSIS_RTOS2_RTX5_ARMV8M_NS
#define DOMAIN_NS 1
#define TRUE ((bool_t)1)
#endif
#ifndef DOMAIN_NS
@ -120,9 +118,9 @@ __STATIC_INLINE bool_t IsPrivileged (void) {
return ((__get_CONTROL() & 1U) == 0U);
}
/// Check if in IRQ Mode
/// \return true=IRQ, false=thread
__STATIC_INLINE bool_t IsIrqMode (void) {
/// Check if in Exception
/// \return true=exception, false=thread
__STATIC_INLINE bool_t IsException (void) {
return (__get_IPSR() != 0U);
}
@ -209,58 +207,58 @@ __STATIC_INLINE void SetPendSV (void) {
(defined(__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ != 0)) || \
(defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ != 0)) || \
(defined(__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ != 0)))
#define __SVC_INDIRECT(n) __svc_indirect(n)
#define SVC_INDIRECT(n) __svc_indirect(n)
#elif ((defined(__ARM_ARCH_6M__) && (__ARM_ARCH_6M__ != 0)) || \
(defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0)))
#define __SVC_INDIRECT(n) __svc_indirect_r7(n)
#define SVC_INDIRECT(n) __svc_indirect_r7(n)
#endif
#define SVC0_0N(f,t) \
__SVC_INDIRECT(0) t svc##f (t(*)()); \
SVC_INDIRECT(0) t svc##f (t(*)()); \
__attribute__((always_inline)) \
__STATIC_INLINE t __svc##f (void) { \
__STATIC_INLINE t __svc##f (void) { \
svc##f(svcRtx##f); \
}
#define SVC0_0(f,t) \
__SVC_INDIRECT(0) t svc##f (t(*)()); \
SVC_INDIRECT(0) t svc##f (t(*)()); \
__attribute__((always_inline)) \
__STATIC_INLINE t __svc##f (void) { \
__STATIC_INLINE t __svc##f (void) { \
return svc##f(svcRtx##f); \
}
#define SVC0_1N(f,t,t1) \
__SVC_INDIRECT(0) t svc##f (t(*)(t1),t1); \
SVC_INDIRECT(0) t svc##f (t(*)(t1),t1); \
__attribute__((always_inline)) \
__STATIC_INLINE t __svc##f (t1 a1) { \
__STATIC_INLINE t __svc##f (t1 a1) { \
svc##f(svcRtx##f,a1); \
}
#define SVC0_1(f,t,t1) \
__SVC_INDIRECT(0) t svc##f (t(*)(t1),t1); \
SVC_INDIRECT(0) t svc##f (t(*)(t1),t1); \
__attribute__((always_inline)) \
__STATIC_INLINE t __svc##f (t1 a1) { \
__STATIC_INLINE t __svc##f (t1 a1) { \
return svc##f(svcRtx##f,a1); \
}
#define SVC0_2(f,t,t1,t2) \
__SVC_INDIRECT(0) t svc##f (t(*)(t1,t2),t1,t2); \
SVC_INDIRECT(0) t svc##f (t(*)(t1,t2),t1,t2); \
__attribute__((always_inline)) \
__STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \
__STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \
return svc##f(svcRtx##f,a1,a2); \
}
#define SVC0_3(f,t,t1,t2,t3) \
__SVC_INDIRECT(0) t svc##f (t(*)(t1,t2,t3),t1,t2,t3); \
SVC_INDIRECT(0) t svc##f (t(*)(t1,t2,t3),t1,t2,t3); \
__attribute__((always_inline)) \
__STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \
__STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \
return svc##f(svcRtx##f,a1,a2,a3); \
}
#define SVC0_4(f,t,t1,t2,t3,t4) \
__SVC_INDIRECT(0) t svc##f (t(*)(t1,t2,t3,t4),t1,t2,t3,t4); \
SVC_INDIRECT(0) t svc##f (t(*)(t1,t2,t3,t4),t1,t2,t3,t4); \
__attribute__((always_inline)) \
__STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \
__STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \
return svc##f(svcRtx##f,a1,a2,a3,a4); \
}
@ -285,60 +283,60 @@ __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \
#endif
#define STRINGIFY(a) #a
#define __SVC_INDIRECT(n) _Pragma(STRINGIFY(swi_number = n)) __swi
#define SVC_INDIRECT(n) _Pragma(STRINGIFY(swi_number = n)) __swi
#define SVC0_0N(f,t) \
__SVC_INDIRECT(0) t svc##f (); \
SVC_INDIRECT(0) t svc##f (); \
__attribute__((always_inline)) \
__STATIC_INLINE t __svc##f (void) { \
__STATIC_INLINE t __svc##f (void) { \
SVC_ArgF(svcRtx##f); \
svc##f(); \
}
#define SVC0_0(f,t) \
__SVC_INDIRECT(0) t svc##f (); \
SVC_INDIRECT(0) t svc##f (); \
__attribute__((always_inline)) \
__STATIC_INLINE t __svc##f (void) { \
__STATIC_INLINE t __svc##f (void) { \
SVC_ArgF(svcRtx##f); \
return svc##f(); \
}
#define SVC0_1N(f,t,t1) \
__SVC_INDIRECT(0) t svc##f (t1 a1); \
SVC_INDIRECT(0) t svc##f (t1 a1); \
__attribute__((always_inline)) \
__STATIC_INLINE t __svc##f (t1 a1) { \
__STATIC_INLINE t __svc##f (t1 a1) { \
SVC_ArgF(svcRtx##f); \
svc##f(a1); \
}
#define SVC0_1(f,t,t1) \
__SVC_INDIRECT(0) t svc##f (t1 a1); \
SVC_INDIRECT(0) t svc##f (t1 a1); \
__attribute__((always_inline)) \
__STATIC_INLINE t __svc##f (t1 a1) { \
__STATIC_INLINE t __svc##f (t1 a1) { \
SVC_ArgF(svcRtx##f); \
return svc##f(a1); \
}
#define SVC0_2(f,t,t1,t2) \
__SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2); \
SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2); \
__attribute__((always_inline)) \
__STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \
__STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \
SVC_ArgF(svcRtx##f); \
return svc##f(a1,a2); \
}
#define SVC0_3(f,t,t1,t2,t3) \
__SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2, t3 a3); \
SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2, t3 a3); \
__attribute__((always_inline)) \
__STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \
__STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \
SVC_ArgF(svcRtx##f); \
return svc##f(a1,a2,a3); \
}
#define SVC0_4(f,t,t1,t2,t3,t4) \
__SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2, t3 a3, t4 a4); \
SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2, t3 a3, t4 a4); \
__attribute__((always_inline)) \
__STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \
__STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \
SVC_ArgF(svcRtx##f); \
return svc##f(a1,a2,a3,a4); \
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -31,21 +31,29 @@
/// Wait for Timeout (Time Delay).
/// \note API identical to osDelay
static osStatus_t svcRtxDelay (uint32_t ticks) {
osStatus_t status;
if (ticks != 0U) {
if (osRtxThreadWaitEnter(osRtxThreadWaitingDelay, ticks)) {
EvrRtxDelayStarted(ticks);
} else {
EvrRtxDelayCompleted(osRtxThreadGetRunning());
}
if (ticks == 0U) {
EvrRtxDelayError((int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return osErrorParameter;
}
return osOK;
if (osRtxThreadWaitEnter(osRtxThreadWaitingDelay, ticks)) {
EvrRtxDelayStarted(ticks);
status = osOK;
} else {
EvrRtxDelayError((int32_t)osError);
status = osError;
}
return status;
}
/// Wait until specified time.
/// \note API identical to osDelayUntil
static osStatus_t svcRtxDelayUntil (uint32_t ticks) {
osStatus_t status;
ticks -= osRtxInfo.kernel.tick;
if ((ticks == 0U) || (ticks > 0x7FFFFFFFU)) {
@ -56,11 +64,13 @@ static osStatus_t svcRtxDelayUntil (uint32_t ticks) {
if (osRtxThreadWaitEnter(osRtxThreadWaitingDelay, ticks)) {
EvrRtxDelayUntilStarted(ticks);
status = osOK;
} else {
EvrRtxDelayCompleted(osRtxThreadGetRunning());
EvrRtxDelayError((int32_t)osError);
status = osError;
}
return osOK;
return status;
}
// Service Calls definitions
@ -77,7 +87,7 @@ osStatus_t osDelay (uint32_t ticks) {
osStatus_t status;
EvrRtxDelay(ticks);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxDelayError((int32_t)osErrorISR);
status = osErrorISR;
} else {
@ -91,7 +101,7 @@ osStatus_t osDelayUntil (uint32_t ticks) {
osStatus_t status;
EvrRtxDelayUntil(ticks);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxDelayError((int32_t)osErrorISR);
status = osErrorISR;
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -27,7 +27,7 @@
// OS Runtime Object Memory Usage
#if ((defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)))
#ifdef RTX_OBJ_MEM_USAGE
osRtxObjectMemUsage_t osRtxEventFlagsMemUsage \
__attribute__((section(".data.os.evflags.obj"))) =
{ 0U, 0U, 0U };
@ -200,7 +200,7 @@ static osEventFlagsId_t svcRtxEventFlagsNew (const osEventFlagsAttr_t *attr) {
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5]
ef = osRtxMemoryAlloc(osRtxInfo.mem.common, sizeof(os_event_flags_t), 1U);
}
#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))
#ifdef RTX_OBJ_MEM_USAGE
if (ef != NULL) {
uint32_t used;
osRtxEventFlagsMemUsage.cnt_alloc++;
@ -313,7 +313,7 @@ static uint32_t svcRtxEventFlagsClear (osEventFlagsId_t ef_id, uint32_t flags) {
event_flags = EventFlagsClear(ef, flags);
EvrRtxEventFlagsClearDone(ef, event_flags);
return event_flags;
}
@ -409,7 +409,7 @@ static osStatus_t svcRtxEventFlagsDelete (osEventFlagsId_t ef_id) {
} else {
(void)osRtxMemoryFree(osRtxInfo.mem.common, ef);
}
#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))
#ifdef RTX_OBJ_MEM_USAGE
osRtxEventFlagsMemUsage.cnt_free++;
#endif
}
@ -494,7 +494,7 @@ osEventFlagsId_t osEventFlagsNew (const osEventFlagsAttr_t *attr) {
osEventFlagsId_t ef_id;
EvrRtxEventFlagsNew(attr);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxEventFlagsError(NULL, (int32_t)osErrorISR);
ef_id = NULL;
} else {
@ -507,7 +507,7 @@ osEventFlagsId_t osEventFlagsNew (const osEventFlagsAttr_t *attr) {
const char *osEventFlagsGetName (osEventFlagsId_t ef_id) {
const char *name;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxEventFlagsGetName(ef_id, NULL);
name = NULL;
} else {
@ -521,7 +521,7 @@ uint32_t osEventFlagsSet (osEventFlagsId_t ef_id, uint32_t flags) {
uint32_t event_flags;
EvrRtxEventFlagsSet(ef_id, flags);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
event_flags = isrRtxEventFlagsSet(ef_id, flags);
} else {
event_flags = __svcEventFlagsSet(ef_id, flags);
@ -534,7 +534,7 @@ uint32_t osEventFlagsClear (osEventFlagsId_t ef_id, uint32_t flags) {
uint32_t event_flags;
EvrRtxEventFlagsClear(ef_id, flags);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
event_flags = svcRtxEventFlagsClear(ef_id, flags);
} else {
event_flags = __svcEventFlagsClear(ef_id, flags);
@ -546,7 +546,7 @@ uint32_t osEventFlagsClear (osEventFlagsId_t ef_id, uint32_t flags) {
uint32_t osEventFlagsGet (osEventFlagsId_t ef_id) {
uint32_t event_flags;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
event_flags = svcRtxEventFlagsGet(ef_id);
} else {
event_flags = __svcEventFlagsGet(ef_id);
@ -559,7 +559,7 @@ uint32_t osEventFlagsWait (osEventFlagsId_t ef_id, uint32_t flags, uint32_t opti
uint32_t event_flags;
EvrRtxEventFlagsWait(ef_id, flags, options, timeout);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
event_flags = isrRtxEventFlagsWait(ef_id, flags, options, timeout);
} else {
event_flags = __svcEventFlagsWait(ef_id, flags, options, timeout);
@ -572,7 +572,7 @@ osStatus_t osEventFlagsDelete (osEventFlagsId_t ef_id) {
osStatus_t status;
EvrRtxEventFlagsDelete(ef_id);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxEventFlagsError(ef_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -63,6 +63,7 @@
#define EvtRtxKernelGetTickFreq EventID(EventLevelAPI, EvtRtxKernelNo, 0x14U)
#define EvtRtxKernelGetSysTimerCount EventID(EventLevelAPI, EvtRtxKernelNo, 0x15U)
#define EvtRtxKernelGetSysTimerFreq EventID(EventLevelAPI, EvtRtxKernelNo, 0x16U)
#define EvtRtxKernelErrorNotify EventID(EventLevelError, EvtRtxKernelNo, 0x19U)
/// Event IDs for "RTX Thread"
#define EvtRtxThreadError EventID(EventLevelError, EvtRtxThreadNo, 0x00U)
@ -531,6 +532,17 @@ __WEAK void EvrRtxKernelGetSysTimerFreq (uint32_t freq) {
}
#endif
#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_KERNEL != 0) && !defined(EVR_RTX_KERNEL_ERROR_NOTIFY_DISABLE))
__WEAK void EvrRtxKernelErrorNotify (uint32_t code, void *object_id) {
#if defined(RTE_Compiler_EventRecorder)
(void)EventRecord2(EvtRtxKernelErrorNotify, code, (uint32_t)object_id);
#else
(void)code;
(void)object_id;
#endif
}
#endif
// ==== Thread Events ====

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -62,6 +62,31 @@ static void KernelUnblock (void) {
OS_Tick_Enable();
}
// Get Kernel sleep time
static uint32_t GetKernelSleepTime (void) {
const os_thread_t *thread;
const os_timer_t *timer;
uint32_t delay;
delay = osWaitForever;
// Check Thread Delay list
thread = osRtxInfo.thread.delay_list;
if (thread != NULL) {
delay = thread->delay;
}
// Check Active Timer list
timer = osRtxInfo.timer.list;
if (timer != NULL) {
if (timer->tick < delay) {
delay = timer->tick;
}
}
return delay;
}
// ==== Service Calls ====
@ -90,7 +115,6 @@ static osStatus_t svcRtxKernelInitialize (void) {
#endif
// Initialize osRtxInfo
memset(&osRtxInfo.kernel, 0, sizeof(osRtxInfo) - offsetof(osRtxInfo_t, kernel));
osRtxInfo.isr_queue.data = osRtxConfig.isr_queue.data;
osRtxInfo.isr_queue.max = osRtxConfig.isr_queue.max;
@ -198,7 +222,7 @@ static osStatus_t svcRtxKernelGetInfo (osVersion_t *version, char *id_buf, uint3
} else {
size = id_size;
}
memcpy(id_buf, osRtxKernelId, size);
(void)memcpy(id_buf, osRtxKernelId, size);
}
EvrRtxKernelInfoRetrieved(version, id_buf, id_size);
@ -287,7 +311,7 @@ static int32_t svcRtxKernelLock (void) {
}
return lock;
}
/// Unlock the RTOS Kernel scheduler.
/// \note API identical to osKernelUnlock
static int32_t svcRtxKernelUnlock (void) {
@ -347,9 +371,7 @@ static int32_t svcRtxKernelRestoreLock (int32_t lock) {
/// Suspend the RTOS Kernel scheduler.
/// \note API identical to osKernelSuspend
static uint32_t svcRtxKernelSuspend (void) {
const os_thread_t *thread;
const os_timer_t *timer;
uint32_t delay;
uint32_t delay;
if (osRtxInfo.kernel.state != osRtxKernelRunning) {
EvrRtxKernelError(osRtxErrorKernelNotRunning);
@ -359,24 +381,10 @@ static uint32_t svcRtxKernelSuspend (void) {
KernelBlock();
delay = osWaitForever;
// Check Thread Delay list
thread = osRtxInfo.thread.delay_list;
if (thread != NULL) {
delay = thread->delay;
}
// Check Active Timer list
timer = osRtxInfo.timer.list;
if (timer != NULL) {
if (timer->tick < delay) {
delay = timer->tick;
}
}
osRtxInfo.kernel.state = osRtxKernelSuspended;
delay = GetKernelSleepTime();
EvrRtxKernelSuspended(delay);
return delay;
@ -388,7 +396,7 @@ static void svcRtxKernelResume (uint32_t sleep_ticks) {
os_thread_t *thread;
os_timer_t *timer;
uint32_t delay;
uint32_t ticks;
uint32_t ticks, kernel_tick;
if (osRtxInfo.kernel.state != osRtxKernelSuspended) {
EvrRtxKernelResumed();
@ -396,40 +404,38 @@ static void svcRtxKernelResume (uint32_t sleep_ticks) {
return;
}
osRtxInfo.kernel.tick += sleep_ticks;
// Process Thread Delay list
thread = osRtxInfo.thread.delay_list;
if (thread != NULL) {
delay = sleep_ticks;
do {
if (delay >= thread->delay) {
delay -= thread->delay;
thread->delay = 1U;
osRtxThreadDelayTick();
thread = osRtxInfo.thread.delay_list;
} else {
thread->delay -= delay;
delay = 0U;
}
} while ((thread != NULL) && (delay != 0U));
delay = GetKernelSleepTime();
if (sleep_ticks >= delay) {
ticks = delay - 1U;
} else {
ticks = sleep_ticks;
}
// Process Active Timer list
// Update Thread Delay sleep ticks
thread = osRtxInfo.thread.delay_list;
if (thread != NULL) {
thread->delay -= ticks;
}
// Update Timer sleep ticks
timer = osRtxInfo.timer.list;
if (timer != NULL) {
ticks = sleep_ticks;
do {
if (ticks >= timer->tick) {
ticks -= timer->tick;
timer->tick = 1U;
osRtxInfo.timer.tick();
timer = osRtxInfo.timer.list;
} else {
timer->tick -= ticks;
ticks = 0U;
}
} while ((timer != NULL) && (ticks != 0U));
timer->tick -= ticks;
}
kernel_tick = osRtxInfo.kernel.tick + sleep_ticks;
osRtxInfo.kernel.tick += ticks;
while (osRtxInfo.kernel.tick != kernel_tick) {
osRtxInfo.kernel.tick++;
// Process Thread Delays
osRtxThreadDelayTick();
// Process Timers
if (osRtxInfo.timer.tick != NULL) {
osRtxInfo.timer.tick();
}
}
osRtxInfo.kernel.state = osRtxKernelRunning;
@ -507,6 +513,13 @@ SVC0_0 (KernelGetSysTimerFreq, uint32_t)
__WEAK void osRtxKernelPreInit (void) {
}
/// RTOS Kernel Error Notification Handler
/// \note API identical to osRtxErrorNotify
uint32_t osRtxKernelErrorNotify (uint32_t code, void *object_id) {
EvrRtxKernelErrorNotify(code, object_id);
return osRtxErrorNotify(code, object_id);
}
// ==== Public API ====
@ -516,7 +529,7 @@ osStatus_t osKernelInitialize (void) {
osRtxKernelPreInit();
EvrRtxKernelInitialize();
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxKernelError((int32_t)osErrorISR);
status = osErrorISR;
} else {
@ -530,7 +543,7 @@ osStatus_t osKernelGetInfo (osVersion_t *version, char *id_buf, uint32_t id_size
osStatus_t status;
EvrRtxKernelGetInfo(version, id_buf, id_size);
if (IsIrqMode() || IsIrqMasked() || IsPrivileged()) {
if (IsException() || IsIrqMasked() || IsPrivileged()) {
status = svcRtxKernelGetInfo(version, id_buf, id_size);
} else {
status = __svcKernelGetInfo(version, id_buf, id_size);
@ -542,7 +555,7 @@ osStatus_t osKernelGetInfo (osVersion_t *version, char *id_buf, uint32_t id_size
osKernelState_t osKernelGetState (void) {
osKernelState_t state;
if (IsIrqMode() || IsIrqMasked() || IsPrivileged()) {
if (IsException() || IsIrqMasked() || IsPrivileged()) {
state = svcRtxKernelGetState();
} else {
state = __svcKernelGetState();
@ -555,7 +568,7 @@ osStatus_t osKernelStart (void) {
osStatus_t status;
EvrRtxKernelStart();
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxKernelError((int32_t)osErrorISR);
status = osErrorISR;
} else {
@ -569,7 +582,7 @@ int32_t osKernelLock (void) {
int32_t lock;
EvrRtxKernelLock();
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxKernelError((int32_t)osErrorISR);
lock = (int32_t)osErrorISR;
} else {
@ -577,13 +590,13 @@ int32_t osKernelLock (void) {
}
return lock;
}
/// Unlock the RTOS Kernel scheduler.
int32_t osKernelUnlock (void) {
int32_t lock;
EvrRtxKernelUnlock();
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxKernelError((int32_t)osErrorISR);
lock = (int32_t)osErrorISR;
} else {
@ -597,7 +610,7 @@ int32_t osKernelRestoreLock (int32_t lock) {
int32_t lock_new;
EvrRtxKernelRestoreLock(lock);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxKernelError((int32_t)osErrorISR);
lock_new = (int32_t)osErrorISR;
} else {
@ -611,7 +624,7 @@ uint32_t osKernelSuspend (void) {
uint32_t ticks;
EvrRtxKernelSuspend();
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxKernelError((int32_t)osErrorISR);
ticks = 0U;
} else {
@ -624,7 +637,7 @@ uint32_t osKernelSuspend (void) {
void osKernelResume (uint32_t sleep_ticks) {
EvrRtxKernelResume(sleep_ticks);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxKernelError((int32_t)osErrorISR);
} else {
__svcKernelResume(sleep_ticks);
@ -635,7 +648,7 @@ void osKernelResume (uint32_t sleep_ticks) {
uint32_t osKernelGetTickCount (void) {
uint32_t count;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
count = svcRtxKernelGetTickCount();
} else {
count = __svcKernelGetTickCount();
@ -647,7 +660,7 @@ uint32_t osKernelGetTickCount (void) {
uint32_t osKernelGetTickFreq (void) {
uint32_t freq;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
freq = svcRtxKernelGetTickFreq();
} else {
freq = __svcKernelGetTickFreq();
@ -659,7 +672,7 @@ uint32_t osKernelGetTickFreq (void) {
uint32_t osKernelGetSysTimerCount (void) {
uint32_t count;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
count = svcRtxKernelGetSysTimerCount();
} else {
count = __svcKernelGetSysTimerCount();
@ -671,7 +684,7 @@ uint32_t osKernelGetSysTimerCount (void) {
uint32_t osKernelGetSysTimerFreq (void) {
uint32_t freq;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
freq = svcRtxKernelGetSysTimerFreq();
} else {
freq = __svcKernelGetSysTimerFreq();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -24,7 +24,6 @@
*/
#include "cmsis_compiler.h"
#include "RTX_Config.h"
#include "rtx_os.h"
#ifdef RTE_Compiler_EventRecorder
@ -87,7 +86,7 @@ __attribute__((section(".bss.os.thread.cb")));
// Thread Default Stack
#if (OS_THREAD_DEF_STACK_NUM != 0)
static uint64_t os_thread_def_stack[OS_THREAD_DEF_STACK_NUM*(OS_STACK_SIZE/8)] \
static uint64_t os_thread_def_stack[(OS_THREAD_DEF_STACK_NUM*OS_STACK_SIZE)/8] \
__attribute__((section(".bss.os.thread.stack")));
#endif
@ -105,32 +104,21 @@ __attribute__((section(".data.os.thread.mpi"))) =
// Memory Pool for Thread Stack
#if (OS_THREAD_USER_STACK_SIZE != 0)
static uint64_t os_thread_stack[2 + OS_THREAD_NUM + (OS_THREAD_USER_STACK_SIZE/8)] \
static uint64_t os_thread_stack[(16 + (8*OS_THREAD_NUM) + OS_THREAD_USER_STACK_SIZE)/8] \
__attribute__((section(".bss.os.thread.stack")));
#endif
#endif // (OS_THREAD_OBJ_MEM != 0)
// Stack overrun checking
#if (OS_STACK_CHECK == 0)
// Override library function
extern void osRtxThreadStackCheck (void);
void osRtxThreadStackCheck (void) {}
#endif
// Idle Thread Control Block
static osRtxThread_t os_idle_thread_cb \
__attribute__((section(".bss.os.thread.cb")));
// Idle Thread Stack
#if defined (__CC_ARM)
static uint64_t os_idle_thread_stack[OS_IDLE_THREAD_STACK_SIZE/8];
#else
static uint64_t os_idle_thread_stack[OS_IDLE_THREAD_STACK_SIZE/8] \
__attribute__((section(".bss.os.thread.stack")));
#endif
__attribute__((section(".bss.os.thread.idle.stack")));
// Idle Thread Attributes
static const osThreadAttr_t os_idle_thread_attr = {
#if defined(OS_IDLE_THREAD_NAME)
@ -184,13 +172,9 @@ __attribute__((section(".data.os.timer.mpi"))) =
static osRtxThread_t os_timer_thread_cb \
__attribute__((section(".bss.os.thread.cb")));
#if defined (__CC_ARM)
static uint64_t os_timer_thread_stack[OS_TIMER_THREAD_STACK_SIZE/8];
#else
// Timer Thread Stack
static uint64_t os_timer_thread_stack[OS_TIMER_THREAD_STACK_SIZE/8] \
__attribute__((section(".bss.os.thread.stack")));
#endif
__attribute__((section(".bss.os.thread.timer.stack")));
// Timer Thread Attributes
static const osThreadAttr_t os_timer_thread_attr = {
@ -232,10 +216,8 @@ static const osMessageQueueAttr_t os_timer_mq_attr = {
(uint32_t)sizeof(os_timer_mq_data)
};
#else
extern void osRtxTimerThread (void *argument);
void osRtxTimerThread (void *argument) { (void)argument; }
extern int32_t osRtxTimerSetup (void);
extern void osRtxTimerThread (void *argument);
#endif // ((OS_TIMER_THREAD_STACK_SIZE != 0) && (OS_TIMER_CB_QUEUE != 0))
@ -326,7 +308,7 @@ __attribute__((section(".data.os.mempool.mpi"))) =
#if ((OS_MEMPOOL_DATA_SIZE % 8) != 0)
#error "Invalid Data Memory size for Memory Pools!"
#endif
static uint64_t os_mp_data[2 + OS_MEMPOOL_NUM + (OS_MEMPOOL_DATA_SIZE/8)] \
static uint64_t os_mp_data[(16 + (8*OS_MEMPOOL_NUM) + OS_MEMPOOL_DATA_SIZE)/8] \
__attribute__((section(".bss.os.mempool.mem")));
#endif
@ -356,7 +338,7 @@ __attribute__((section(".data.os.msgqueue.mpi"))) =
#if ((OS_MSGQUEUE_DATA_SIZE % 8) != 0)
#error "Invalid Data Memory size for Message Queues!"
#endif
static uint64_t os_mq_data[2 + OS_MSGQUEUE_NUM + (OS_MSGQUEUE_DATA_SIZE/8)] \
static uint64_t os_mq_data[(16 + ((8+12)*OS_MSGQUEUE_NUM) + OS_MSGQUEUE_DATA_SIZE + 7)/8] \
__attribute__((section(".bss.os.msgqueue.mem")));
#endif
@ -368,69 +350,57 @@ __attribute__((section(".bss.os.msgqueue.mem")));
#if (defined(OS_EVR_INIT) && (OS_EVR_INIT != 0))
// Initial Thread configuration covered also Thread Flags and Generic Wait
#if defined(OS_EVR_THREAD_FILTER)
#if !defined(OS_EVR_THFLAGS_FILTER)
#define OS_EVR_THFLAGS_FILTER OS_EVR_THREAD_FILTER
#endif
#if !defined(OS_EVR_WAIT_FILTER)
#define OS_EVR_WAIT_FILTER OS_EVR_THREAD_FILTER
#endif
#endif
// Migrate initial filter configuration
#if defined(OS_EVR_MEMORY_FILTER)
#define OS_EVR_MEMORY_LEVEL (((OS_EVR_MEMORY_FILTER & 0x80U) != 0U) ? (OS_EVR_MEMORY_FILTER & 0x0FU) : 0U)
#endif
#if defined(OS_EVR_KERNEL_FILTER)
#define OS_EVR_KERNEL_LEVEL (((OS_EVR_KERNEL_FILTER & 0x80U) != 0U) ? (OS_EVR_KERNEL_FILTER & 0x0FU) : 0U)
#endif
#if defined(OS_EVR_THREAD_FILTER)
#define OS_EVR_THREAD_LEVEL (((OS_EVR_THREAD_FILTER & 0x80U) != 0U) ? (OS_EVR_THREAD_FILTER & 0x0FU) : 0U)
#endif
#if defined(OS_EVR_WAIT_FILTER)
#define OS_EVR_WAIT_LEVEL (((OS_EVR_WAIT_FILTER & 0x80U) != 0U) ? (OS_EVR_WAIT_FILTER & 0x0FU) : 0U)
#endif
#if defined(OS_EVR_THFLAGS_FILTER)
#define OS_EVR_THFLAGS_LEVEL (((OS_EVR_THFLAGS_FILTER & 0x80U) != 0U) ? (OS_EVR_THFLAGS_FILTER & 0x0FU) : 0U)
#endif
#if defined(OS_EVR_EVFLAGS_FILTER)
#define OS_EVR_EVFLAGS_LEVEL (((OS_EVR_EVFLAGS_FILTER & 0x80U) != 0U) ? (OS_EVR_EVFLAGS_FILTER & 0x0FU) : 0U)
#endif
#if defined(OS_EVR_TIMER_FILTER)
#define OS_EVR_TIMER_LEVEL (((OS_EVR_TIMER_FILTER & 0x80U) != 0U) ? (OS_EVR_TIMER_FILTER & 0x0FU) : 0U)
#endif
#if defined(OS_EVR_MUTEX_FILTER)
#define OS_EVR_MUTEX_LEVEL (((OS_EVR_MUTEX_FILTER & 0x80U) != 0U) ? (OS_EVR_MUTEX_FILTER & 0x0FU) : 0U)
#endif
#if defined(OS_EVR_SEMAPHORE_FILTER)
#define OS_EVR_SEMAPHORE_LEVEL (((OS_EVR_SEMAPHORE_FILTER & 0x80U) != 0U) ? (OS_EVR_SEMAPHORE_FILTER & 0x0FU) : 0U)
#endif
#if defined(OS_EVR_MEMPOOL_FILTER)
#define OS_EVR_MEMPOOL_LEVEL (((OS_EVR_MEMPOOL_FILTER & 0x80U) != 0U) ? (OS_EVR_MEMPOOL_FILTER & 0x0FU) : 0U)
#endif
#if defined(OS_EVR_MSGQUEUE_FILTER)
#define OS_EVR_MSGQUEUE_LEVEL (((OS_EVR_MSGQUEUE_FILTER & 0x80U) != 0U) ? (OS_EVR_MSGQUEUE_FILTER & 0x0FU) : 0U)
#endif
#if defined(RTE_Compiler_EventRecorder)
#ifdef RTE_Compiler_EventRecorder
// Event Recorder Initialize
__STATIC_INLINE void evr_initialize (void) {
(void)EventRecorderInitialize(OS_EVR_LEVEL, (uint32_t)OS_EVR_START);
(void)EventRecorderEnable(OS_EVR_MEMORY_LEVEL, EvtRtxMemoryNo, EvtRtxMemoryNo);
(void)EventRecorderEnable(OS_EVR_KERNEL_LEVEL, EvtRtxKernelNo, EvtRtxKernelNo);
(void)EventRecorderEnable(OS_EVR_THREAD_LEVEL, EvtRtxThreadNo, EvtRtxThreadNo);
(void)EventRecorderEnable(OS_EVR_WAIT_LEVEL, EvtRtxWaitNo, EvtRtxWaitNo);
(void)EventRecorderEnable(OS_EVR_THFLAGS_LEVEL, EvtRtxThreadFlagsNo, EvtRtxThreadFlagsNo);
(void)EventRecorderEnable(OS_EVR_EVFLAGS_LEVEL, EvtRtxEventFlagsNo, EvtRtxEventFlagsNo);
(void)EventRecorderEnable(OS_EVR_TIMER_LEVEL, EvtRtxTimerNo, EvtRtxTimerNo);
(void)EventRecorderEnable(OS_EVR_MUTEX_LEVEL, EvtRtxMutexNo, EvtRtxMutexNo);
(void)EventRecorderEnable(OS_EVR_SEMAPHORE_LEVEL, EvtRtxSemaphoreNo, EvtRtxSemaphoreNo);
(void)EventRecorderEnable(OS_EVR_MEMPOOL_LEVEL, EvtRtxMemoryPoolNo, EvtRtxMemoryPoolNo);
(void)EventRecorderEnable(OS_EVR_MSGQUEUE_LEVEL, EvtRtxMessageQueueNo, EvtRtxMessageQueueNo);
#if ((OS_EVR_MEMORY_LEVEL & 0x80U) != 0U)
(void)EventRecorderEnable( OS_EVR_MEMORY_LEVEL & 0x0FU, EvtRtxMemoryNo, EvtRtxMemoryNo);
(void)EventRecorderDisable(~OS_EVR_MEMORY_LEVEL & 0x0FU, EvtRtxMemoryNo, EvtRtxMemoryNo);
#endif
#if ((OS_EVR_KERNEL_LEVEL & 0x80U) != 0U)
(void)EventRecorderEnable( OS_EVR_KERNEL_LEVEL & 0x0FU, EvtRtxKernelNo, EvtRtxKernelNo);
(void)EventRecorderDisable(~OS_EVR_KERNEL_LEVEL & 0x0FU, EvtRtxKernelNo, EvtRtxMemoryNo);
#endif
#if ((OS_EVR_THREAD_LEVEL & 0x80U) != 0U)
(void)EventRecorderEnable( OS_EVR_THREAD_LEVEL & 0x0FU, EvtRtxThreadNo, EvtRtxThreadNo);
(void)EventRecorderDisable(~OS_EVR_THREAD_LEVEL & 0x0FU, EvtRtxThreadNo, EvtRtxThreadNo);
#endif
#if ((OS_EVR_WAIT_LEVEL & 0x80U) != 0U)
(void)EventRecorderEnable( OS_EVR_WAIT_LEVEL & 0x0FU, EvtRtxWaitNo, EvtRtxWaitNo);
(void)EventRecorderDisable(~OS_EVR_WAIT_LEVEL & 0x0FU, EvtRtxWaitNo, EvtRtxWaitNo);
#endif
#if ((OS_EVR_THFLAGS_LEVEL & 0x80U) != 0U)
(void)EventRecorderEnable( OS_EVR_THFLAGS_LEVEL & 0x0FU, EvtRtxThreadFlagsNo, EvtRtxThreadFlagsNo);
(void)EventRecorderDisable(~OS_EVR_THFLAGS_LEVEL & 0x0FU, EvtRtxThreadFlagsNo, EvtRtxThreadFlagsNo);
#endif
#if ((OS_EVR_EVFLAGS_LEVEL & 0x80U) != 0U)
(void)EventRecorderEnable( OS_EVR_EVFLAGS_LEVEL & 0x0FU, EvtRtxEventFlagsNo, EvtRtxEventFlagsNo);
(void)EventRecorderDisable(~OS_EVR_EVFLAGS_LEVEL & 0x0FU, EvtRtxEventFlagsNo, EvtRtxEventFlagsNo);
#endif
#if ((OS_EVR_TIMER_LEVEL & 0x80U) != 0U)
(void)EventRecorderEnable( OS_EVR_TIMER_LEVEL & 0x0FU, EvtRtxTimerNo, EvtRtxTimerNo);
(void)EventRecorderDisable(~OS_EVR_TIMER_LEVEL & 0x0FU, EvtRtxTimerNo, EvtRtxTimerNo);
#endif
#if ((OS_EVR_MUTEX_LEVEL & 0x80U) != 0U)
(void)EventRecorderEnable( OS_EVR_MUTEX_LEVEL & 0x0FU, EvtRtxMutexNo, EvtRtxMutexNo);
(void)EventRecorderDisable(~OS_EVR_MUTEX_LEVEL & 0x0FU, EvtRtxMutexNo, EvtRtxMutexNo);
#endif
#if ((OS_EVR_SEMAPHORE_LEVEL & 0x80U) != 0U)
(void)EventRecorderEnable( OS_EVR_SEMAPHORE_LEVEL & 0x0FU, EvtRtxSemaphoreNo, EvtRtxSemaphoreNo);
(void)EventRecorderDisable(~OS_EVR_SEMAPHORE_LEVEL & 0x0FU, EvtRtxSemaphoreNo, EvtRtxSemaphoreNo);
#endif
#if ((OS_EVR_MEMPOOL_LEVEL & 0x80U) != 0U)
(void)EventRecorderEnable( OS_EVR_MEMPOOL_LEVEL & 0x0FU, EvtRtxMemoryPoolNo, EvtRtxMemoryPoolNo);
(void)EventRecorderDisable(~OS_EVR_MEMPOOL_LEVEL & 0x0FU, EvtRtxMemoryPoolNo, EvtRtxMemoryPoolNo);
#endif
#if ((OS_EVR_MSGQUEUE_LEVEL & 0x80U) != 0U)
(void)EventRecorderEnable( OS_EVR_MSGQUEUE_LEVEL & 0x0FU, EvtRtxMessageQueueNo, EvtRtxMessageQueueNo);
(void)EventRecorderDisable(~OS_EVR_MSGQUEUE_LEVEL & 0x0FU, EvtRtxMessageQueueNo, EvtRtxMessageQueueNo);
#endif
}
#else
@ -539,9 +509,13 @@ __attribute__((section(".rodata"))) =
&os_idle_thread_attr,
#if ((OS_TIMER_THREAD_STACK_SIZE != 0) && (OS_TIMER_CB_QUEUE != 0))
&os_timer_thread_attr,
osRtxTimerThread,
osRtxTimerSetup,
&os_timer_mq_attr,
(uint32_t)OS_TIMER_CB_QUEUE
#else
NULL,
NULL,
NULL,
NULL,
0U
@ -553,9 +527,9 @@ __attribute__((section(".rodata"))) =
//lint -esym(526,irqRtxLib) "Defined by Exception handlers"
//lint -esym(714,irqRtxLibRef) "Non weak reference"
//lint -esym(765,irqRtxLibRef) "Global scope"
extern uint8_t irqRtxLib;
extern const uint8_t *irqRtxLibRef;
const uint8_t *irqRtxLibRef = &irqRtxLib;
extern const uint8_t irqRtxLib;
extern const uint8_t * const irqRtxLibRef;
const uint8_t * const irqRtxLibRef = &irqRtxLib;
// Default User SVC Table
//lint -esym(714,osRtxUserSVC) "Referenced by Exception handlers"
@ -570,35 +544,43 @@ __WEAK void * const osRtxUserSVC[1] = { (void *)0 };
#if defined(__CC_ARM) || \
(defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050))
static uint32_t __os_thread_cb_start__ __attribute__((weakref(".bss.os.thread.cb$$Base"))); //lint -esym(728,__os_thread_cb_start__)
static uint32_t __os_thread_cb_end__ __attribute__((weakref(".bss.os.thread.cb$$Limit"))); //lint -esym(728,__os_thread_cb_end__)
static uint32_t __os_timer_cb_start__ __attribute__((weakref(".bss.os.timer.cb$$Base"))); //lint -esym(728,__os_timer_cb_start__)
static uint32_t __os_timer_cb_end__ __attribute__((weakref(".bss.os.timer.cb$$Limit"))); //lint -esym(728,__os_timer_cb_end__)
static uint32_t __os_evflags_cb_start__ __attribute__((weakref(".bss.os.evflags.cb$$Base"))); //lint -esym(728,__os_evflags_cb_start__)
static uint32_t __os_evflags_cb_end__ __attribute__((weakref(".bss.os.evflags.cb$$Limit"))); //lint -esym(728,__os_evflags_cb_end__)
static uint32_t __os_mutex_cb_start__ __attribute__((weakref(".bss.os.mutex.cb$$Base"))); //lint -esym(728,__os_mutex_cb_start__)
static uint32_t __os_mutex_cb_end__ __attribute__((weakref(".bss.os.mutex.cb$$Limit"))); //lint -esym(728,__os_mutex_cb_end__)
static uint32_t __os_semaphore_cb_start__ __attribute__((weakref(".bss.os.semaphore.cb$$Base"))); //lint -esym(728,__os_semaphore_cb_start__)
static uint32_t __os_semaphore_cb_end__ __attribute__((weakref(".bss.os.semaphore.cb$$Limit"))); //lint -esym(728,__os_semaphore_cb_end__)
static uint32_t __os_mempool_cb_start__ __attribute__((weakref(".bss.os.mempool.cb$$Base"))); //lint -esym(728,__os_mempool_cb_start__)
static uint32_t __os_mempool_cb_end__ __attribute__((weakref(".bss.os.mempool.cb$$Limit"))); //lint -esym(728,__os_mempool_cb_end__)
static uint32_t __os_msgqueue_cb_start__ __attribute__((weakref(".bss.os.msgqueue.cb$$Base"))); //lint -esym(728,__os_msgqueue_cb_start__)
static uint32_t __os_msgqueue_cb_end__ __attribute__((weakref(".bss.os.msgqueue.cb$$Limit"))); //lint -esym(728,__os_msgqueue_cb_end__)
// Initialized through linker
//lint -esym(728, __os_thread_cb_start__, __os_thread_cb_end__)
//lint -esym(728, __os_timer_cb_start__, __os_timer_cb_end__)
//lint -esym(728, __os_evflags_cb_start__, __os_evflags_cb_end__)
//lint -esym(728, __os_mutex_cb_start__, __os_mutex_cb_end__)
//lint -esym(728, __os_semaphore_cb_start__, __os_semaphore_cb_end__)
//lint -esym(728, __os_mempool_cb_start__, __os_mempool_cb_end__)
//lint -esym(728, __os_msgqueue_cb_start__, __os_msgqueue_cb_end__)
static const uint32_t __os_thread_cb_start__ __attribute__((weakref(".bss.os.thread.cb$$Base")));
static const uint32_t __os_thread_cb_end__ __attribute__((weakref(".bss.os.thread.cb$$Limit")));
static const uint32_t __os_timer_cb_start__ __attribute__((weakref(".bss.os.timer.cb$$Base")));
static const uint32_t __os_timer_cb_end__ __attribute__((weakref(".bss.os.timer.cb$$Limit")));
static const uint32_t __os_evflags_cb_start__ __attribute__((weakref(".bss.os.evflags.cb$$Base")));
static const uint32_t __os_evflags_cb_end__ __attribute__((weakref(".bss.os.evflags.cb$$Limit")));
static const uint32_t __os_mutex_cb_start__ __attribute__((weakref(".bss.os.mutex.cb$$Base")));
static const uint32_t __os_mutex_cb_end__ __attribute__((weakref(".bss.os.mutex.cb$$Limit")));
static const uint32_t __os_semaphore_cb_start__ __attribute__((weakref(".bss.os.semaphore.cb$$Base")));
static const uint32_t __os_semaphore_cb_end__ __attribute__((weakref(".bss.os.semaphore.cb$$Limit")));
static const uint32_t __os_mempool_cb_start__ __attribute__((weakref(".bss.os.mempool.cb$$Base")));
static const uint32_t __os_mempool_cb_end__ __attribute__((weakref(".bss.os.mempool.cb$$Limit")));
static const uint32_t __os_msgqueue_cb_start__ __attribute__((weakref(".bss.os.msgqueue.cb$$Base")));
static const uint32_t __os_msgqueue_cb_end__ __attribute__((weakref(".bss.os.msgqueue.cb$$Limit")));
#else
extern uint32_t __os_thread_cb_start__ __attribute__((weak));
extern uint32_t __os_thread_cb_end__ __attribute__((weak));
extern uint32_t __os_timer_cb_start__ __attribute__((weak));
extern uint32_t __os_timer_cb_end__ __attribute__((weak));
extern uint32_t __os_evflags_cb_start__ __attribute__((weak));
extern uint32_t __os_evflags_cb_end__ __attribute__((weak));
extern uint32_t __os_mutex_cb_start__ __attribute__((weak));
extern uint32_t __os_mutex_cb_end__ __attribute__((weak));
extern uint32_t __os_semaphore_cb_start__ __attribute__((weak));
extern uint32_t __os_semaphore_cb_end__ __attribute__((weak));
extern uint32_t __os_mempool_cb_start__ __attribute__((weak));
extern uint32_t __os_mempool_cb_end__ __attribute__((weak));
extern uint32_t __os_msgqueue_cb_start__ __attribute__((weak));
extern uint32_t __os_msgqueue_cb_end__ __attribute__((weak));
extern const uint32_t __os_thread_cb_start__ __attribute__((weak));
extern const uint32_t __os_thread_cb_end__ __attribute__((weak));
extern const uint32_t __os_timer_cb_start__ __attribute__((weak));
extern const uint32_t __os_timer_cb_end__ __attribute__((weak));
extern const uint32_t __os_evflags_cb_start__ __attribute__((weak));
extern const uint32_t __os_evflags_cb_end__ __attribute__((weak));
extern const uint32_t __os_mutex_cb_start__ __attribute__((weak));
extern const uint32_t __os_mutex_cb_end__ __attribute__((weak));
extern const uint32_t __os_semaphore_cb_start__ __attribute__((weak));
extern const uint32_t __os_semaphore_cb_end__ __attribute__((weak));
extern const uint32_t __os_mempool_cb_start__ __attribute__((weak));
extern const uint32_t __os_mempool_cb_end__ __attribute__((weak));
extern const uint32_t __os_msgqueue_cb_start__ __attribute__((weak));
extern const uint32_t __os_msgqueue_cb_end__ __attribute__((weak));
#endif
//lint -e{9067} "extern array declared without size"
@ -649,6 +631,14 @@ __WEAK void software_init_hook (void) {
(void)osKernelInitialize();
}
#elif defined(__ICCARM__)
extern void $Super$$__iar_data_init3 (void);
void $Sub$$__iar_data_init3 (void) {
$Super$$__iar_data_init3();
(void)osKernelInitialize();
}
#endif
@ -717,7 +707,7 @@ void *__user_perthread_libspace (void) {
}
}
if (n == (uint32_t)OS_THREAD_LIBSPACE_NUM) {
(void)osRtxErrorNotify(osRtxErrorClibSpace, id);
(void)osRtxKernelErrorNotify(osRtxErrorClibSpace, id);
}
} else {
n = OS_THREAD_LIBSPACE_NUM;
@ -746,7 +736,7 @@ __WEAK int _mutex_initialize(mutex *m) {
result = 1;
} else {
result = 0;
(void)osRtxErrorNotify(osRtxErrorClibMutex, m);
(void)osRtxKernelErrorNotify(osRtxErrorClibMutex, m);
}
return result;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2020 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -27,6 +27,7 @@
#define RTX_LIB_H_
#include <string.h>
#include "rtx_def.h" // RTX Configuration definitions
#include "rtx_core_c.h" // Cortex core definitions
#if ((defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0)) || \
(defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ != 0)) || \
@ -35,7 +36,6 @@
#endif
#include "os_tick.h" // CMSIS OS Tick API
#include "cmsis_os2.h" // CMSIS RTOS API
#include "RTX_Config.h" // RTX Configuration
#include "rtx_os.h" // RTX OS definitions
#include "rtx_evr.h" // RTX Event Recorder definitions
@ -189,11 +189,14 @@ extern void osRtxThreadSwitch (os_thread_t *thread);
extern void osRtxThreadDispatch (os_thread_t *thread);
extern void osRtxThreadWaitExit (os_thread_t *thread, uint32_t ret_val, bool_t dispatch);
extern bool_t osRtxThreadWaitEnter (uint8_t state, uint32_t timeout);
extern void osRtxThreadStackCheck (void);
#ifdef RTX_STACK_CHECK
extern bool_t osRtxThreadStackCheck (const os_thread_t *thread);
#endif
extern bool_t osRtxThreadStartup (void);
// Timer Library functions
extern void osRtxTimerThread (void *argument);
extern int32_t osRtxTimerSetup (void);
extern void osRtxTimerThread (void *argument);
// Mutex Library functions
extern void osRtxMutexOwnerRelease (os_mutex_t *mutex_list);
@ -209,6 +212,9 @@ extern uint32_t osRtxMemoryPoolInit (os_mp_info_t *mp_info, uint32_t block_co
extern void *osRtxMemoryPoolAlloc (os_mp_info_t *mp_info);
extern osStatus_t osRtxMemoryPoolFree (os_mp_info_t *mp_info, void *block);
// Message Queue Library functions
extern int32_t osRtxMessageQueueTimerSetup (void);
// System Library functions
extern void osRtxTick_Handler (void);
extern void osRtxPendSV_Handler (void);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -27,7 +27,7 @@
// OS Runtime Object Memory Usage
#if ((defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)))
#ifdef RTX_OBJ_MEM_USAGE
osRtxObjectMemUsage_t osRtxMemoryPoolMemUsage \
__attribute__((section(".data.os.mempool.obj"))) =
{ 0U, 0U, 0U };
@ -191,20 +191,16 @@ static osMemoryPoolId_t svcRtxMemoryPoolNew (uint32_t block_count, uint32_t bloc
const char *name;
// Check parameters
if ((block_count == 0U) || (block_size == 0U)) {
EvrRtxMemoryPoolError(NULL, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
b_count = block_count;
b_size = (block_size + 3U) & ~3UL;
if ((__CLZ(b_count) + __CLZ(b_size)) < 32U) {
if ((block_count == 0U) || (block_size == 0U) ||
((__CLZ(block_count) + __CLZ(block_size)) < 32U)) {
EvrRtxMemoryPoolError(NULL, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
size = b_count * b_size;
b_count = block_count;
b_size = (block_size + 3U) & ~3UL;
size = b_count * b_size;
// Process attributes
if (attr != NULL) {
@ -229,7 +225,7 @@ static osMemoryPoolId_t svcRtxMemoryPoolNew (uint32_t block_count, uint32_t bloc
}
}
if (mp_mem != NULL) {
//lint -e(923) -e(9078) "cast from pointer to unsigned int" [MISRA Note 7]
//lint -e{923} "cast from pointer to unsigned int" [MISRA Note 7]
if ((((uint32_t)mp_mem & 3U) != 0U) || (mp_size < size)) {
EvrRtxMemoryPoolError(NULL, osRtxErrorInvalidDataMemory);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
@ -257,7 +253,7 @@ static osMemoryPoolId_t svcRtxMemoryPoolNew (uint32_t block_count, uint32_t bloc
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5]
mp = osRtxMemoryAlloc(osRtxInfo.mem.common, sizeof(os_memory_pool_t), 1U);
}
#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))
#ifdef RTX_OBJ_MEM_USAGE
if (mp != NULL) {
uint32_t used;
osRtxMemoryPoolMemUsage.cnt_alloc++;
@ -283,13 +279,13 @@ static osMemoryPoolId_t svcRtxMemoryPoolNew (uint32_t block_count, uint32_t bloc
} else {
(void)osRtxMemoryFree(osRtxInfo.mem.common, mp);
}
#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))
#ifdef RTX_OBJ_MEM_USAGE
osRtxMemoryPoolMemUsage.cnt_free++;
#endif
}
mp = NULL;
} else {
memset(mp_mem, 0, size);
(void)memset(mp_mem, 0, size);
}
flags |= osRtxFlagSystemMemory;
}
@ -508,7 +504,7 @@ static osStatus_t svcRtxMemoryPoolDelete (osMemoryPoolId_t mp_id) {
} else {
(void)osRtxMemoryFree(osRtxInfo.mem.common, mp);
}
#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))
#ifdef RTX_OBJ_MEM_USAGE
osRtxMemoryPoolMemUsage.cnt_free++;
#endif
}
@ -594,7 +590,7 @@ osMemoryPoolId_t osMemoryPoolNew (uint32_t block_count, uint32_t block_size, con
osMemoryPoolId_t mp_id;
EvrRtxMemoryPoolNew(block_count, block_size, attr);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxMemoryPoolError(NULL, (int32_t)osErrorISR);
mp_id = NULL;
} else {
@ -607,7 +603,7 @@ osMemoryPoolId_t osMemoryPoolNew (uint32_t block_count, uint32_t block_size, con
const char *osMemoryPoolGetName (osMemoryPoolId_t mp_id) {
const char *name;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxMemoryPoolGetName(mp_id, NULL);
name = NULL;
} else {
@ -621,7 +617,7 @@ void *osMemoryPoolAlloc (osMemoryPoolId_t mp_id, uint32_t timeout) {
void *memory;
EvrRtxMemoryPoolAlloc(mp_id, timeout);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
memory = isrRtxMemoryPoolAlloc(mp_id, timeout);
} else {
memory = __svcMemoryPoolAlloc(mp_id, timeout);
@ -634,7 +630,7 @@ osStatus_t osMemoryPoolFree (osMemoryPoolId_t mp_id, void *block) {
osStatus_t status;
EvrRtxMemoryPoolFree(mp_id, block);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
status = isrRtxMemoryPoolFree(mp_id, block);
} else {
status = __svcMemoryPoolFree(mp_id, block);
@ -646,7 +642,7 @@ osStatus_t osMemoryPoolFree (osMemoryPoolId_t mp_id, void *block) {
uint32_t osMemoryPoolGetCapacity (osMemoryPoolId_t mp_id) {
uint32_t capacity;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
capacity = svcRtxMemoryPoolGetCapacity(mp_id);
} else {
capacity = __svcMemoryPoolGetCapacity(mp_id);
@ -658,7 +654,7 @@ uint32_t osMemoryPoolGetCapacity (osMemoryPoolId_t mp_id) {
uint32_t osMemoryPoolGetBlockSize (osMemoryPoolId_t mp_id) {
uint32_t block_size;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
block_size = svcRtxMemoryPoolGetBlockSize(mp_id);
} else {
block_size = __svcMemoryPoolGetBlockSize(mp_id);
@ -670,7 +666,7 @@ uint32_t osMemoryPoolGetBlockSize (osMemoryPoolId_t mp_id) {
uint32_t osMemoryPoolGetCount (osMemoryPoolId_t mp_id) {
uint32_t count;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
count = svcRtxMemoryPoolGetCount(mp_id);
} else {
count = __svcMemoryPoolGetCount(mp_id);
@ -682,7 +678,7 @@ uint32_t osMemoryPoolGetCount (osMemoryPoolId_t mp_id) {
uint32_t osMemoryPoolGetSpace (osMemoryPoolId_t mp_id) {
uint32_t space;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
space = svcRtxMemoryPoolGetSpace(mp_id);
} else {
space = __svcMemoryPoolGetSpace(mp_id);
@ -695,7 +691,7 @@ osStatus_t osMemoryPoolDelete (osMemoryPoolId_t mp_id) {
osStatus_t status;
EvrRtxMemoryPoolDelete(mp_id);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxMemoryPoolError(mp_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -27,7 +27,7 @@
// OS Runtime Object Memory Usage
#if ((defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)))
#ifdef RTX_OBJ_MEM_USAGE
osRtxObjectMemUsage_t osRtxMessageQueueMemUsage \
__attribute__((section(".data.os.msgqueue.obj"))) =
{ 0U, 0U, 0U };
@ -189,7 +189,7 @@ static void osRtxMessageQueuePostProcess (os_message_t *msg) {
reg = osRtxThreadRegPtr(thread);
//lint -e{923} "cast from unsigned int to pointer"
ptr_src = (const void *)reg[2];
memcpy(&msg0[1], ptr_src, mq->msg_size);
(void)memcpy(&msg0[1], ptr_src, mq->msg_size);
// Store Message into Queue
msg0->id = osRtxIdMessage;
msg0->flags = 0U;
@ -214,7 +214,7 @@ static void osRtxMessageQueuePostProcess (os_message_t *msg) {
reg = osRtxThreadRegPtr(thread);
//lint -e{923} "cast from unsigned int to pointer"
ptr_dst = (void *)reg[2];
memcpy(ptr_dst, &msg[1], mq->msg_size);
(void)memcpy(ptr_dst, &msg[1], mq->msg_size);
if (reg[3] != 0U) {
//lint -e{923} -e{9078} "cast from unsigned int to pointer"
*((uint8_t *)reg[3]) = msg->priority;
@ -245,19 +245,15 @@ static osMessageQueueId_t svcRtxMessageQueueNew (uint32_t msg_count, uint32_t ms
const char *name;
// Check parameters
if ((msg_count == 0U) || (msg_size == 0U)) {
EvrRtxMessageQueueError(NULL, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
block_size = ((msg_size + 3U) & ~3UL) + sizeof(os_message_t);
if ((__CLZ(msg_count) + __CLZ(block_size)) < 32U) {
if ((msg_count == 0U) || (msg_size == 0U) ||
((__CLZ(msg_count) + __CLZ(msg_size)) < 32U)) {
EvrRtxMessageQueueError(NULL, (int32_t)osErrorParameter);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
size = msg_count * block_size;
block_size = ((msg_size + 3U) & ~3UL) + sizeof(os_message_t);
size = msg_count * block_size;
// Process attributes
if (attr != NULL) {
@ -282,7 +278,7 @@ static osMessageQueueId_t svcRtxMessageQueueNew (uint32_t msg_count, uint32_t ms
}
}
if (mq_mem != NULL) {
//lint -e(923) -e(9078) "cast from pointer to unsigned int" [MISRA Note 7]
//lint -e{923} "cast from pointer to unsigned int" [MISRA Note 7]
if ((((uint32_t)mq_mem & 3U) != 0U) || (mq_size < size)) {
EvrRtxMessageQueueError(NULL, osRtxErrorInvalidDataMemory);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
@ -310,7 +306,7 @@ static osMessageQueueId_t svcRtxMessageQueueNew (uint32_t msg_count, uint32_t ms
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5]
mq = osRtxMemoryAlloc(osRtxInfo.mem.common, sizeof(os_message_queue_t), 1U);
}
#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))
#ifdef RTX_OBJ_MEM_USAGE
if (mq != NULL) {
uint32_t used;
osRtxMessageQueueMemUsage.cnt_alloc++;
@ -336,13 +332,13 @@ static osMessageQueueId_t svcRtxMessageQueueNew (uint32_t msg_count, uint32_t ms
} else {
(void)osRtxMemoryFree(osRtxInfo.mem.common, mq);
}
#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))
#ifdef RTX_OBJ_MEM_USAGE
osRtxMessageQueueMemUsage.cnt_free++;
#endif
}
mq = NULL;
} else {
memset(mq_mem, 0, size);
(void)memset(mq_mem, 0, size);
}
flags |= osRtxFlagSystemMemory;
}
@ -414,7 +410,7 @@ static osStatus_t svcRtxMessageQueuePut (osMessageQueueId_t mq_id, const void *m
reg = osRtxThreadRegPtr(thread);
//lint -e{923} "cast from unsigned int to pointer"
ptr = (void *)reg[2];
memcpy(ptr, msg_ptr, mq->msg_size);
(void)memcpy(ptr, msg_ptr, mq->msg_size);
if (reg[3] != 0U) {
//lint -e{923} -e{9078} "cast from unsigned int to pointer"
*((uint8_t *)reg[3]) = msg_prio;
@ -427,7 +423,7 @@ static osStatus_t svcRtxMessageQueuePut (osMessageQueueId_t mq_id, const void *m
msg = osRtxMemoryPoolAlloc(&mq->mp_info);
if (msg != NULL) {
// Copy Message
memcpy(&msg[1], msg_ptr, mq->msg_size);
(void)memcpy(&msg[1], msg_ptr, mq->msg_size);
// Put Message into Queue
msg->id = osRtxIdMessage;
msg->flags = 0U;
@ -485,7 +481,7 @@ static osStatus_t svcRtxMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr
if (msg != NULL) {
MessageQueueRemove(mq, msg);
// Copy Message
memcpy(msg_ptr, &msg[1], mq->msg_size);
(void)memcpy(msg_ptr, &msg[1], mq->msg_size);
if (msg_prio != NULL) {
*msg_prio = msg->priority;
}
@ -506,7 +502,7 @@ static osStatus_t svcRtxMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr
reg = osRtxThreadRegPtr(thread);
//lint -e{923} "cast from unsigned int to pointer"
ptr = (const void *)reg[2];
memcpy(&msg[1], ptr, mq->msg_size);
(void)memcpy(&msg[1], ptr, mq->msg_size);
// Store Message into Queue
msg->id = osRtxIdMessage;
msg->flags = 0U;
@ -655,7 +651,7 @@ static osStatus_t svcRtxMessageQueueReset (osMessageQueueId_t mq_id) {
reg = osRtxThreadRegPtr(thread);
//lint -e{923} "cast from unsigned int to pointer"
ptr = (const void *)reg[2];
memcpy(&msg[1], ptr, mq->msg_size);
(void)memcpy(&msg[1], ptr, mq->msg_size);
// Store Message into Queue
msg->id = osRtxIdMessage;
msg->flags = 0U;
@ -709,7 +705,7 @@ static osStatus_t svcRtxMessageQueueDelete (osMessageQueueId_t mq_id) {
} else {
(void)osRtxMemoryFree(osRtxInfo.mem.common, mq);
}
#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))
#ifdef RTX_OBJ_MEM_USAGE
osRtxMessageQueueMemUsage.cnt_free++;
#endif
}
@ -756,7 +752,7 @@ osStatus_t isrRtxMessageQueuePut (osMessageQueueId_t mq_id, const void *msg_ptr,
msg = osRtxMemoryPoolAlloc(&mq->mp_info);
if (msg != NULL) {
// Copy Message
memcpy(&msg[1], msg_ptr, mq->msg_size);
(void)memcpy(&msg[1], msg_ptr, mq->msg_size);
msg->id = osRtxIdMessage;
msg->flags = 0U;
msg->priority = msg_prio;
@ -816,6 +812,23 @@ osStatus_t isrRtxMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr, uint8
}
// ==== Library functions ====
/// Create a Message Queue for the Timer Thread.
int32_t osRtxMessageQueueTimerSetup (void) {
int32_t ret = -1;
osRtxInfo.timer.mq = osRtxMessageQueueId(
svcRtxMessageQueueNew(osRtxConfig.timer_mq_mcnt, sizeof(os_timer_finfo_t), osRtxConfig.timer_mq_attr)
);
if (osRtxInfo.timer.mq != NULL) {
ret = 0;
}
return ret;
}
// ==== Public API ====
/// Create and Initialize a Message Queue object.
@ -823,7 +836,7 @@ osMessageQueueId_t osMessageQueueNew (uint32_t msg_count, uint32_t msg_size, con
osMessageQueueId_t mq_id;
EvrRtxMessageQueueNew(msg_count, msg_size, attr);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxMessageQueueError(NULL, (int32_t)osErrorISR);
mq_id = NULL;
} else {
@ -836,7 +849,7 @@ osMessageQueueId_t osMessageQueueNew (uint32_t msg_count, uint32_t msg_size, con
const char *osMessageQueueGetName (osMessageQueueId_t mq_id) {
const char *name;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxMessageQueueGetName(mq_id, NULL);
name = NULL;
} else {
@ -850,7 +863,7 @@ osStatus_t osMessageQueuePut (osMessageQueueId_t mq_id, const void *msg_ptr, uin
osStatus_t status;
EvrRtxMessageQueuePut(mq_id, msg_ptr, msg_prio, timeout);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
status = isrRtxMessageQueuePut(mq_id, msg_ptr, msg_prio, timeout);
} else {
status = __svcMessageQueuePut(mq_id, msg_ptr, msg_prio, timeout);
@ -863,7 +876,7 @@ osStatus_t osMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr, uint8_t *
osStatus_t status;
EvrRtxMessageQueueGet(mq_id, msg_ptr, msg_prio, timeout);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
status = isrRtxMessageQueueGet(mq_id, msg_ptr, msg_prio, timeout);
} else {
status = __svcMessageQueueGet(mq_id, msg_ptr, msg_prio, timeout);
@ -875,7 +888,7 @@ osStatus_t osMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr, uint8_t *
uint32_t osMessageQueueGetCapacity (osMessageQueueId_t mq_id) {
uint32_t capacity;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
capacity = svcRtxMessageQueueGetCapacity(mq_id);
} else {
capacity = __svcMessageQueueGetCapacity(mq_id);
@ -887,7 +900,7 @@ uint32_t osMessageQueueGetCapacity (osMessageQueueId_t mq_id) {
uint32_t osMessageQueueGetMsgSize (osMessageQueueId_t mq_id) {
uint32_t msg_size;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
msg_size = svcRtxMessageQueueGetMsgSize(mq_id);
} else {
msg_size = __svcMessageQueueGetMsgSize(mq_id);
@ -899,7 +912,7 @@ uint32_t osMessageQueueGetMsgSize (osMessageQueueId_t mq_id) {
uint32_t osMessageQueueGetCount (osMessageQueueId_t mq_id) {
uint32_t count;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
count = svcRtxMessageQueueGetCount(mq_id);
} else {
count = __svcMessageQueueGetCount(mq_id);
@ -911,7 +924,7 @@ uint32_t osMessageQueueGetCount (osMessageQueueId_t mq_id) {
uint32_t osMessageQueueGetSpace (osMessageQueueId_t mq_id) {
uint32_t space;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
space = svcRtxMessageQueueGetSpace(mq_id);
} else {
space = __svcMessageQueueGetSpace(mq_id);
@ -924,7 +937,7 @@ osStatus_t osMessageQueueReset (osMessageQueueId_t mq_id) {
osStatus_t status;
EvrRtxMessageQueueReset(mq_id);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxMessageQueueError(mq_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {
@ -938,7 +951,7 @@ osStatus_t osMessageQueueDelete (osMessageQueueId_t mq_id) {
osStatus_t status;
EvrRtxMessageQueueDelete(mq_id);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxMessageQueueError(mq_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -27,7 +27,7 @@
// OS Runtime Object Memory Usage
#if ((defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)))
#ifdef RTX_OBJ_MEM_USAGE
osRtxObjectMemUsage_t osRtxMutexMemUsage \
__attribute__((section(".data.os.mutex.obj"))) =
{ 0U, 0U, 0U };
@ -78,7 +78,7 @@ void osRtxMutexOwnerRelease (os_mutex_t *mutex_list) {
void osRtxMutexOwnerRestore (const os_mutex_t *mutex, const os_thread_t *thread_wakeup) {
const os_mutex_t *mutex0;
os_thread_t *thread;
os_thread_t *thread0;
const os_thread_t *thread0;
int8_t priority;
// Restore owner Thread priority
@ -88,15 +88,17 @@ void osRtxMutexOwnerRestore (const os_mutex_t *mutex, const os_thread_t *thread_
mutex0 = thread->mutex_list;
// Check Mutexes owned by Thread
do {
// Check Threads waiting for Mutex
thread0 = mutex0->thread_list;
if (thread0 == thread_wakeup) {
// Skip thread that is waken-up
thread0 = thread0->thread_next;
}
if ((thread0 != NULL) && (thread0->priority > priority)) {
// Higher priority Thread is waiting for Mutex
priority = thread0->priority;
if ((mutex0->attr & osMutexPrioInherit) != 0U) {
// Check Threads waiting for Mutex
thread0 = mutex0->thread_list;
if (thread0 == thread_wakeup) {
// Skip thread that is waken-up
thread0 = thread0->thread_next;
}
if ((thread0 != NULL) && (thread0->priority > priority)) {
// Higher priority Thread is waiting for Mutex
priority = thread0->priority;
}
}
mutex0 = mutex0->owner_next;
} while (mutex0 != NULL);
@ -153,7 +155,7 @@ static osMutexId_t svcRtxMutexNew (const osMutexAttr_t *attr) {
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5]
mutex = osRtxMemoryAlloc(osRtxInfo.mem.common, sizeof(os_mutex_t), 1U);
}
#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))
#ifdef RTX_OBJ_MEM_USAGE
if (mutex != NULL) {
uint32_t used;
osRtxMutexMemUsage.cnt_alloc++;
@ -336,19 +338,19 @@ static osStatus_t svcRtxMutexRelease (osMutexId_t mutex_id) {
}
// Restore running Thread priority
if ((mutex->attr & osMutexPrioInherit) != 0U) {
priority = thread->priority_base;
mutex0 = thread->mutex_list;
// Check mutexes owned by running Thread
while (mutex0 != NULL) {
priority = thread->priority_base;
mutex0 = thread->mutex_list;
// Check mutexes owned by running Thread
while (mutex0 != NULL) {
if ((mutex0->attr & osMutexPrioInherit) != 0U) {
if ((mutex0->thread_list != NULL) && (mutex0->thread_list->priority > priority)) {
// Higher priority Thread is waiting for Mutex
priority = mutex0->thread_list->priority;
}
mutex0 = mutex0->owner_next;
}
thread->priority = priority;
mutex0 = mutex0->owner_next;
}
thread->priority = priority;
// Check if Thread is waiting for a Mutex
if (mutex->thread_list != NULL) {
@ -428,21 +430,21 @@ static osStatus_t svcRtxMutexDelete (osMutexId_t mutex_id) {
}
// Restore owner Thread priority
if ((mutex->attr & osMutexPrioInherit) != 0U) {
priority = thread->priority_base;
mutex0 = thread->mutex_list;
// Check Mutexes owned by Thread
while (mutex0 != NULL) {
priority = thread->priority_base;
mutex0 = thread->mutex_list;
// Check Mutexes owned by Thread
while (mutex0 != NULL) {
if ((mutex0->attr & osMutexPrioInherit) != 0U) {
if ((mutex0->thread_list != NULL) && (mutex0->thread_list->priority > priority)) {
// Higher priority Thread is waiting for Mutex
priority = mutex0->thread_list->priority;
}
mutex0 = mutex0->owner_next;
}
if (thread->priority != priority) {
thread->priority = priority;
osRtxThreadListSort(thread);
}
mutex0 = mutex0->owner_next;
}
if (thread->priority != priority) {
thread->priority = priority;
osRtxThreadListSort(thread);
}
// Unblock waiting threads
@ -464,7 +466,7 @@ static osStatus_t svcRtxMutexDelete (osMutexId_t mutex_id) {
} else {
(void)osRtxMemoryFree(osRtxInfo.mem.common, mutex);
}
#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))
#ifdef RTX_OBJ_MEM_USAGE
osRtxMutexMemUsage.cnt_free++;
#endif
}
@ -492,7 +494,7 @@ osMutexId_t osMutexNew (const osMutexAttr_t *attr) {
osMutexId_t mutex_id;
EvrRtxMutexNew(attr);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxMutexError(NULL, (int32_t)osErrorISR);
mutex_id = NULL;
} else {
@ -505,7 +507,7 @@ osMutexId_t osMutexNew (const osMutexAttr_t *attr) {
const char *osMutexGetName (osMutexId_t mutex_id) {
const char *name;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxMutexGetName(mutex_id, NULL);
name = NULL;
} else {
@ -519,7 +521,7 @@ osStatus_t osMutexAcquire (osMutexId_t mutex_id, uint32_t timeout) {
osStatus_t status;
EvrRtxMutexAcquire(mutex_id, timeout);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxMutexError(mutex_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {
@ -533,7 +535,7 @@ osStatus_t osMutexRelease (osMutexId_t mutex_id) {
osStatus_t status;
EvrRtxMutexRelease(mutex_id);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxMutexError(mutex_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {
@ -546,7 +548,7 @@ osStatus_t osMutexRelease (osMutexId_t mutex_id) {
osThreadId_t osMutexGetOwner (osMutexId_t mutex_id) {
osThreadId_t thread;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxMutexGetOwner(mutex_id, NULL);
thread = NULL;
} else {
@ -560,7 +562,7 @@ osStatus_t osMutexDelete (osMutexId_t mutex_id) {
osStatus_t status;
EvrRtxMutexDelete(mutex_id);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxMutexError(mutex_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2018 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -27,7 +27,7 @@
// OS Runtime Object Memory Usage
#if ((defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)))
#ifdef RTX_OBJ_MEM_USAGE
osRtxObjectMemUsage_t osRtxSemaphoreMemUsage \
__attribute__((section(".data.os.semaphore.obj"))) =
{ 0U, 0U, 0U };
@ -172,7 +172,7 @@ static osSemaphoreId_t svcRtxSemaphoreNew (uint32_t max_count, uint32_t initial_
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5]
semaphore = osRtxMemoryAlloc(osRtxInfo.mem.common, sizeof(os_semaphore_t), 1U);
}
#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))
#ifdef RTX_OBJ_MEM_USAGE
if (semaphore != NULL) {
uint32_t used;
osRtxSemaphoreMemUsage.cnt_alloc++;
@ -346,7 +346,7 @@ static osStatus_t svcRtxSemaphoreDelete (osSemaphoreId_t semaphore_id) {
} else {
(void)osRtxMemoryFree(osRtxInfo.mem.common, semaphore);
}
#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))
#ifdef RTX_OBJ_MEM_USAGE
osRtxSemaphoreMemUsage.cnt_free++;
#endif
}
@ -432,7 +432,7 @@ osSemaphoreId_t osSemaphoreNew (uint32_t max_count, uint32_t initial_count, cons
osSemaphoreId_t semaphore_id;
EvrRtxSemaphoreNew(max_count, initial_count, attr);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxSemaphoreError(NULL, (int32_t)osErrorISR);
semaphore_id = NULL;
} else {
@ -445,7 +445,7 @@ osSemaphoreId_t osSemaphoreNew (uint32_t max_count, uint32_t initial_count, cons
const char *osSemaphoreGetName (osSemaphoreId_t semaphore_id) {
const char *name;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxSemaphoreGetName(semaphore_id, NULL);
name = NULL;
} else {
@ -459,7 +459,7 @@ osStatus_t osSemaphoreAcquire (osSemaphoreId_t semaphore_id, uint32_t timeout) {
osStatus_t status;
EvrRtxSemaphoreAcquire(semaphore_id, timeout);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
status = isrRtxSemaphoreAcquire(semaphore_id, timeout);
} else {
status = __svcSemaphoreAcquire(semaphore_id, timeout);
@ -472,7 +472,7 @@ osStatus_t osSemaphoreRelease (osSemaphoreId_t semaphore_id) {
osStatus_t status;
EvrRtxSemaphoreRelease(semaphore_id);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
status = isrRtxSemaphoreRelease(semaphore_id);
} else {
status = __svcSemaphoreRelease(semaphore_id);
@ -484,7 +484,7 @@ osStatus_t osSemaphoreRelease (osSemaphoreId_t semaphore_id) {
uint32_t osSemaphoreGetCount (osSemaphoreId_t semaphore_id) {
uint32_t count;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
count = svcRtxSemaphoreGetCount(semaphore_id);
} else {
count = __svcSemaphoreGetCount(semaphore_id);
@ -497,7 +497,7 @@ osStatus_t osSemaphoreDelete (osSemaphoreId_t semaphore_id) {
osStatus_t status;
EvrRtxSemaphoreDelete(semaphore_id);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxSemaphoreError(semaphore_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -122,38 +122,40 @@ void osRtxTick_Handler (void) {
OS_Tick_AcknowledgeIRQ();
osRtxInfo.kernel.tick++;
// Process Timers
if (osRtxInfo.timer.tick != NULL) {
osRtxInfo.timer.tick();
}
// Process Thread Delays
osRtxThreadDelayTick();
osRtxThreadDispatch(NULL);
// Process Timers
if (osRtxInfo.timer.tick != NULL) {
osRtxInfo.timer.tick();
}
// Check Round Robin timeout
if (osRtxInfo.thread.robin.timeout != 0U) {
if (osRtxInfo.thread.robin.thread != osRtxInfo.thread.run.next) {
// Reset Round Robin
osRtxInfo.thread.robin.thread = osRtxInfo.thread.run.next;
osRtxInfo.thread.robin.tick = osRtxInfo.thread.robin.timeout;
} else {
if (osRtxInfo.thread.robin.tick != 0U) {
osRtxInfo.thread.robin.tick--;
thread = osRtxInfo.thread.run.next;
if (thread != osRtxInfo.thread.robin.thread) {
osRtxInfo.thread.robin.thread = thread;
if (thread->delay == 0U) {
// Reset Round Robin
thread->delay = osRtxInfo.thread.robin.timeout;
}
if (osRtxInfo.thread.robin.tick == 0U) {
// Round Robin Timeout
if (osRtxKernelGetState() == osRtxKernelRunning) {
thread = osRtxInfo.thread.ready.thread_list;
if ((thread != NULL) && (thread->priority == osRtxInfo.thread.robin.thread->priority)) {
osRtxThreadListRemove(thread);
osRtxThreadReadyPut(osRtxInfo.thread.robin.thread);
EvrRtxThreadPreempted(osRtxInfo.thread.robin.thread);
osRtxThreadSwitch(thread);
osRtxInfo.thread.robin.thread = thread;
osRtxInfo.thread.robin.tick = osRtxInfo.thread.robin.timeout;
}
}
if (thread->delay != 0U) {
thread->delay--;
}
if (thread->delay == 0U) {
// Round Robin Timeout
if (osRtxKernelGetState() == osRtxKernelRunning) {
thread = osRtxInfo.thread.ready.thread_list;
if ((thread != NULL) && (thread->priority == osRtxInfo.thread.robin.thread->priority)) {
osRtxThreadListRemove(thread);
osRtxThreadReadyPut(osRtxInfo.thread.robin.thread);
EvrRtxThreadPreempted(osRtxInfo.thread.robin.thread);
osRtxThreadSwitch(thread);
osRtxInfo.thread.robin.thread = thread;
thread->delay = osRtxInfo.thread.robin.timeout;
}
}
}
@ -208,6 +210,6 @@ void osRtxPostProcess (os_object_t *object) {
osRtxInfo.kernel.pendSV = 1U;
}
} else {
(void)osRtxErrorNotify(osRtxErrorISRQueueOverflow, object);
(void)osRtxKernelErrorNotify(osRtxErrorISRQueueOverflow, object);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -27,7 +27,7 @@
// OS Runtime Object Memory Usage
#if ((defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)))
#ifdef RTX_OBJ_MEM_USAGE
osRtxObjectMemUsage_t osRtxThreadMemUsage \
__attribute__((section(".data.os.thread.obj"))) =
{ 0U, 0U, 0U };
@ -313,6 +313,7 @@ static void osRtxThreadDelayRemove (os_thread_t *thread) {
osRtxInfo.thread.delay_list = thread->delay_next;
}
}
thread->delay = 0U;
}
/// Process Thread Delay Tick (executed each System Tick).
@ -420,7 +421,6 @@ void osRtxThreadSwitch (os_thread_t *thread) {
thread->state = osRtxThreadRunning;
osRtxInfo.thread.run.next = thread;
osRtxThreadStackCheck();
EvrRtxThreadSwitched(thread);
}
@ -509,22 +509,25 @@ bool_t osRtxThreadWaitEnter (uint8_t state, uint32_t timeout) {
return TRUE;
}
#ifdef RTX_STACK_CHECK
/// Check current running Thread Stack.
/// \param[in] thread running thread.
/// \return true - success, false - failure.
//lint -esym(714,osRtxThreadStackCheck) "Referenced by Exception handlers"
//lint -esym(759,osRtxThreadStackCheck) "Prototype in header"
//lint -esym(765,osRtxThreadStackCheck) "Global scope (can be overridden)"
__WEAK void osRtxThreadStackCheck (void) {
os_thread_t *thread;
//lint -esym(765,osRtxThreadStackCheck) "Global scope"
bool_t osRtxThreadStackCheck (const os_thread_t *thread) {
thread = osRtxThreadGetRunning();
if (thread != NULL) {
//lint -e{923} "cast from pointer to unsigned int"
//lint -e{9079} -e{9087} "cast between pointers to different object types"
if ((thread->sp <= (uint32_t)thread->stack_mem) ||
(*((uint32_t *)thread->stack_mem) != osRtxStackMagicWord)) {
(void)osRtxErrorNotify(osRtxErrorStackUnderflow, thread);
}
//lint -e{923} "cast from pointer to unsigned int"
//lint -e{9079} -e{9087} "cast between pointers to different object types"
if ((thread->sp <= (uint32_t)thread->stack_mem) ||
(*((uint32_t *)thread->stack_mem) != osRtxStackMagicWord)) {
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return FALSE;
}
return TRUE;
}
#endif
#ifdef RTX_TF_M_EXTENSION
/// Get TrustZone Module Identifier of running Thread.
@ -617,7 +620,7 @@ static osThreadId_t svcRtxThreadNew (osThreadFunc_t func, void *argument, const
}
}
if (stack_mem != NULL) {
//lint -e(923) -e(9078) "cast from pointer to unsigned int" [MISRA Note 7]
//lint -e{923} "cast from pointer to unsigned int" [MISRA Note 7]
if ((((uint32_t)stack_mem & 7U) != 0U) || (stack_size == 0U)) {
EvrRtxThreadError(NULL, osRtxErrorInvalidThreadStack);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
@ -646,10 +649,12 @@ static osThreadId_t svcRtxThreadNew (osThreadFunc_t func, void *argument, const
}
// Check stack size
if ((stack_size != 0U) && (((stack_size & 7U) != 0U) || (stack_size < (64U + 8U)))) {
EvrRtxThreadError(NULL, osRtxErrorInvalidThreadStack);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
if (stack_size != 0U) {
if (((stack_size & 7U) != 0U) || (stack_size < (64U + 8U)) || (stack_size > 0x7FFFFFFFU)) {
EvrRtxThreadError(NULL, osRtxErrorInvalidThreadStack);
//lint -e{904} "Return statement before end of function" [MISRA Note 1]
return NULL;
}
}
// Allocate object memory if not provided
@ -661,7 +666,7 @@ static osThreadId_t svcRtxThreadNew (osThreadFunc_t func, void *argument, const
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5]
thread = osRtxMemoryAlloc(osRtxInfo.mem.common, sizeof(os_thread_t), 1U);
}
#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))
#ifdef RTX_OBJ_MEM_USAGE
if (thread != NULL) {
uint32_t used;
osRtxThreadMemUsage.cnt_alloc++;
@ -701,7 +706,7 @@ static osThreadId_t svcRtxThreadNew (osThreadFunc_t func, void *argument, const
} else {
(void)osRtxMemoryFree(osRtxInfo.mem.common, thread);
}
#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))
#ifdef RTX_OBJ_MEM_USAGE
osRtxThreadMemUsage.cnt_free++;
#endif
}
@ -729,7 +734,7 @@ static osThreadId_t svcRtxThreadNew (osThreadFunc_t func, void *argument, const
} else {
(void)osRtxMemoryFree(osRtxInfo.mem.common, thread);
}
#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))
#ifdef RTX_OBJ_MEM_USAGE
osRtxThreadMemUsage.cnt_free++;
#endif
}
@ -803,7 +808,7 @@ static osThreadId_t svcRtxThreadNew (osThreadFunc_t func, void *argument, const
} else {
EvrRtxThreadError(NULL, (int32_t)osErrorNoMemory);
}
if (thread != NULL) {
osRtxThreadDispatch(thread);
}
@ -1042,8 +1047,6 @@ static osStatus_t svcRtxThreadSuspend (osThreadId_t thread_id) {
// Update Thread State and put it into Delay list
thread->state = osRtxThreadBlocked;
thread->thread_prev = NULL;
thread->thread_next = NULL;
osRtxThreadDelayInsert(thread, osWaitForever);
}
@ -1079,6 +1082,19 @@ static osStatus_t svcRtxThreadResume (osThreadId_t thread_id) {
return osOK;
}
/// Wakeup a thread waiting to join.
/// \param[in] thread thread object.
static void osRtxThreadJoinWakeup (os_thread_t *thread) {
if (thread->thread_join != NULL) {
osRtxThreadWaitExit(thread->thread_join, (uint32_t)osOK, FALSE);
EvrRtxThreadJoined(thread->thread_join);
}
if (thread->state == osRtxThreadWaitingJoin) {
thread->thread_next->thread_join = NULL;
}
}
/// Free Thread resources.
/// \param[in] thread thread object.
static void osRtxThreadFree (os_thread_t *thread) {
@ -1110,12 +1126,31 @@ static void osRtxThreadFree (os_thread_t *thread) {
} else {
(void)osRtxMemoryFree(osRtxInfo.mem.common, thread);
}
#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))
#ifdef RTX_OBJ_MEM_USAGE
osRtxThreadMemUsage.cnt_free++;
#endif
}
}
/// Destroy a Thread.
/// \param[in] thread thread object.
static void osRtxThreadDestroy (os_thread_t *thread) {
if ((thread->attr & osThreadJoinable) == 0U) {
osRtxThreadFree(thread);
} else {
// Update Thread State and put it into Terminate Thread list
thread->state = osRtxThreadTerminated;
thread->thread_prev = NULL;
thread->thread_next = osRtxInfo.thread.terminate_list;
if (osRtxInfo.thread.terminate_list != NULL) {
osRtxInfo.thread.terminate_list->thread_prev = thread;
}
osRtxInfo.thread.terminate_list = thread;
}
EvrRtxThreadDestroyed(thread);
}
/// Detach a thread (thread storage can be reclaimed when thread terminates).
/// \note API identical to osThreadDetach
static osStatus_t svcRtxThreadDetach (osThreadId_t thread_id) {
@ -1151,6 +1186,7 @@ static osStatus_t svcRtxThreadDetach (osThreadId_t thread_id) {
/// \note API identical to osThreadJoin
static osStatus_t svcRtxThreadJoin (osThreadId_t thread_id) {
os_thread_t *thread = osRtxThreadId(thread_id);
os_thread_t *thread_running;
osStatus_t status;
// Check parameters
@ -1182,7 +1218,9 @@ static osStatus_t svcRtxThreadJoin (osThreadId_t thread_id) {
} else {
// Suspend current Thread
if (osRtxThreadWaitEnter(osRtxThreadWaitingJoin, osWaitForever)) {
thread->thread_join = osRtxThreadGetRunning();
thread_running = osRtxThreadGetRunning();
thread_running->thread_next = thread;
thread->thread_join = thread_running;
thread->attr &= ~osThreadJoinable;
EvrRtxThreadJoinPending(thread);
} else {
@ -1213,30 +1251,26 @@ static void svcRtxThreadExit (void) {
osRtxMutexOwnerRelease(thread->mutex_list);
// Wakeup Thread waiting to Join
if (thread->thread_join != NULL) {
osRtxThreadWaitExit(thread->thread_join, (uint32_t)osOK, FALSE);
EvrRtxThreadJoined(thread->thread_join);
}
osRtxThreadJoinWakeup(thread);
// Switch to next Ready Thread
thread->sp = __get_PSP();
osRtxThreadSwitch(osRtxThreadListGet(&osRtxInfo.thread.ready));
// Update Stack Pointer
thread->sp = __get_PSP();
#ifdef RTX_STACK_CHECK
// Check Stack usage
if (!osRtxThreadStackCheck(thread)) {
osRtxThreadSetRunning(osRtxInfo.thread.run.next);
(void)osRtxKernelErrorNotify(osRtxErrorStackOverflow, thread);
}
#endif
// Mark running thread as deleted
osRtxThreadSetRunning(NULL);
if ((thread->attr & osThreadJoinable) == 0U) {
osRtxThreadFree(thread);
} else {
// Update Thread State and put it into Terminate Thread list
thread->state = osRtxThreadTerminated;
thread->thread_prev = NULL;
thread->thread_next = osRtxInfo.thread.terminate_list;
if (osRtxInfo.thread.terminate_list != NULL) {
osRtxInfo.thread.terminate_list->thread_prev = thread;
}
osRtxInfo.thread.terminate_list = thread;
}
EvrRtxThreadDestroyed(thread);
// Destroy Thread
osRtxThreadDestroy(thread);
}
/// Terminate execution of a thread.
@ -1285,34 +1319,28 @@ static osStatus_t svcRtxThreadTerminate (osThreadId_t thread_id) {
osRtxMutexOwnerRelease(thread->mutex_list);
// Wakeup Thread waiting to Join
if (thread->thread_join != NULL) {
osRtxThreadWaitExit(thread->thread_join, (uint32_t)osOK, FALSE);
EvrRtxThreadJoined(thread->thread_join);
}
osRtxThreadJoinWakeup(thread);
// Switch to next Ready Thread when terminating running Thread
if (thread->state == osRtxThreadRunning) {
thread->sp = __get_PSP();
osRtxThreadSwitch(osRtxThreadListGet(&osRtxInfo.thread.ready));
// Update Stack Pointer
thread->sp = __get_PSP();
#ifdef RTX_STACK_CHECK
// Check Stack usage
if (!osRtxThreadStackCheck(thread)) {
osRtxThreadSetRunning(osRtxInfo.thread.run.next);
(void)osRtxKernelErrorNotify(osRtxErrorStackOverflow, thread);
}
#endif
// Mark running thread as deleted
osRtxThreadSetRunning(NULL);
} else {
osRtxThreadDispatch(NULL);
}
if ((thread->attr & osThreadJoinable) == 0U) {
osRtxThreadFree(thread);
} else {
// Update Thread State and put it into Terminate Thread list
thread->state = osRtxThreadTerminated;
thread->thread_prev = NULL;
thread->thread_next = osRtxInfo.thread.terminate_list;
if (osRtxInfo.thread.terminate_list != NULL) {
osRtxInfo.thread.terminate_list->thread_prev = thread;
}
osRtxInfo.thread.terminate_list = thread;
}
EvrRtxThreadDestroyed(thread);
// Destroy Thread
osRtxThreadDestroy(thread);
}
return status;
@ -1539,8 +1567,8 @@ SVC0_3 (ThreadNew, osThreadId_t, osThreadFunc_t, void *, const osTh
SVC0_1 (ThreadGetName, const char *, osThreadId_t)
SVC0_0 (ThreadGetId, osThreadId_t)
SVC0_1 (ThreadGetState, osThreadState_t, osThreadId_t)
SVC0_1 (ThreadGetStackSize, uint32_t, osThreadId_t)
SVC0_1 (ThreadGetStackSpace, uint32_t, osThreadId_t)
SVC0_1 (ThreadGetStackSize, uint32_t, osThreadId_t)
SVC0_1 (ThreadGetStackSpace, uint32_t, osThreadId_t)
SVC0_2 (ThreadSetPriority, osStatus_t, osThreadId_t, osPriority_t)
SVC0_1 (ThreadGetPriority, osPriority_t, osThreadId_t)
SVC0_0 (ThreadYield, osStatus_t)
@ -1600,7 +1628,7 @@ uint32_t isrRtxThreadFlagsSet (osThreadId_t thread_id, uint32_t flags) {
/// Thread startup (Idle and Timer Thread).
/// \return true - success, false - failure.
bool_t osRtxThreadStartup (void) {
bool_t ret = TRUE;
bool_t ret = FALSE;
// Create Idle Thread
osRtxInfo.thread.idle = osRtxThreadId(
@ -1608,13 +1636,17 @@ bool_t osRtxThreadStartup (void) {
);
// Create Timer Thread
if (osRtxConfig.timer_mq_mcnt != 0U) {
osRtxInfo.timer.thread = osRtxThreadId(
svcRtxThreadNew(osRtxTimerThread, NULL, osRtxConfig.timer_thread_attr)
);
if (osRtxInfo.timer.thread == NULL) {
ret = FALSE;
if (osRtxConfig.timer_setup != NULL) {
if (osRtxConfig.timer_setup() == 0) {
osRtxInfo.timer.thread = osRtxThreadId(
svcRtxThreadNew(osRtxConfig.timer_thread, osRtxInfo.timer.mq, osRtxConfig.timer_thread_attr)
);
if (osRtxInfo.timer.thread != NULL) {
ret = TRUE;
}
}
} else {
ret = TRUE;
}
return ret;
@ -1628,7 +1660,7 @@ osThreadId_t osThreadNew (osThreadFunc_t func, void *argument, const osThreadAtt
osThreadId_t thread_id;
EvrRtxThreadNew(func, argument, attr);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxThreadError(NULL, (int32_t)osErrorISR);
thread_id = NULL;
} else {
@ -1641,7 +1673,7 @@ osThreadId_t osThreadNew (osThreadFunc_t func, void *argument, const osThreadAtt
const char *osThreadGetName (osThreadId_t thread_id) {
const char *name;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxThreadGetName(thread_id, NULL);
name = NULL;
} else {
@ -1654,7 +1686,7 @@ const char *osThreadGetName (osThreadId_t thread_id) {
osThreadId_t osThreadGetId (void) {
osThreadId_t thread_id;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
thread_id = svcRtxThreadGetId();
} else {
thread_id = __svcThreadGetId();
@ -1666,7 +1698,7 @@ osThreadId_t osThreadGetId (void) {
osThreadState_t osThreadGetState (osThreadId_t thread_id) {
osThreadState_t state;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxThreadGetState(thread_id, osThreadError);
state = osThreadError;
} else {
@ -1679,7 +1711,7 @@ osThreadState_t osThreadGetState (osThreadId_t thread_id) {
uint32_t osThreadGetStackSize (osThreadId_t thread_id) {
uint32_t stack_size;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxThreadGetStackSize(thread_id, 0U);
stack_size = 0U;
} else {
@ -1692,7 +1724,7 @@ uint32_t osThreadGetStackSize (osThreadId_t thread_id) {
uint32_t osThreadGetStackSpace (osThreadId_t thread_id) {
uint32_t stack_space;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxThreadGetStackSpace(thread_id, 0U);
stack_space = 0U;
} else {
@ -1706,7 +1738,7 @@ osStatus_t osThreadSetPriority (osThreadId_t thread_id, osPriority_t priority) {
osStatus_t status;
EvrRtxThreadSetPriority(thread_id, priority);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxThreadError(thread_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {
@ -1719,7 +1751,7 @@ osStatus_t osThreadSetPriority (osThreadId_t thread_id, osPriority_t priority) {
osPriority_t osThreadGetPriority (osThreadId_t thread_id) {
osPriority_t priority;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxThreadGetPriority(thread_id, osPriorityError);
priority = osPriorityError;
} else {
@ -1733,7 +1765,7 @@ osStatus_t osThreadYield (void) {
osStatus_t status;
EvrRtxThreadYield();
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxThreadError(NULL, (int32_t)osErrorISR);
status = osErrorISR;
} else {
@ -1747,7 +1779,7 @@ osStatus_t osThreadSuspend (osThreadId_t thread_id) {
osStatus_t status;
EvrRtxThreadSuspend(thread_id);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxThreadError(thread_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {
@ -1761,7 +1793,7 @@ osStatus_t osThreadResume (osThreadId_t thread_id) {
osStatus_t status;
EvrRtxThreadResume(thread_id);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxThreadError(thread_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {
@ -1775,7 +1807,7 @@ osStatus_t osThreadDetach (osThreadId_t thread_id) {
osStatus_t status;
EvrRtxThreadDetach(thread_id);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxThreadError(thread_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {
@ -1789,7 +1821,7 @@ osStatus_t osThreadJoin (osThreadId_t thread_id) {
osStatus_t status;
EvrRtxThreadJoin(thread_id);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxThreadError(thread_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {
@ -1811,7 +1843,7 @@ osStatus_t osThreadTerminate (osThreadId_t thread_id) {
osStatus_t status;
EvrRtxThreadTerminate(thread_id);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxThreadError(thread_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {
@ -1824,7 +1856,7 @@ osStatus_t osThreadTerminate (osThreadId_t thread_id) {
uint32_t osThreadGetCount (void) {
uint32_t count;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxThreadGetCount(0U);
count = 0U;
} else {
@ -1837,7 +1869,7 @@ uint32_t osThreadGetCount (void) {
uint32_t osThreadEnumerate (osThreadId_t *thread_array, uint32_t array_items) {
uint32_t count;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxThreadEnumerate(thread_array, array_items, 0U);
count = 0U;
} else {
@ -1851,7 +1883,7 @@ uint32_t osThreadFlagsSet (osThreadId_t thread_id, uint32_t flags) {
uint32_t thread_flags;
EvrRtxThreadFlagsSet(thread_id, flags);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
thread_flags = isrRtxThreadFlagsSet(thread_id, flags);
} else {
thread_flags = __svcThreadFlagsSet(thread_id, flags);
@ -1864,7 +1896,7 @@ uint32_t osThreadFlagsClear (uint32_t flags) {
uint32_t thread_flags;
EvrRtxThreadFlagsClear(flags);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxThreadFlagsError(NULL, (int32_t)osErrorISR);
thread_flags = (uint32_t)osErrorISR;
} else {
@ -1877,7 +1909,7 @@ uint32_t osThreadFlagsClear (uint32_t flags) {
uint32_t osThreadFlagsGet (void) {
uint32_t thread_flags;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxThreadFlagsGet(0U);
thread_flags = 0U;
} else {
@ -1891,7 +1923,7 @@ uint32_t osThreadFlagsWait (uint32_t flags, uint32_t options, uint32_t timeout)
uint32_t thread_flags;
EvrRtxThreadFlagsWait(flags, options, timeout);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxThreadFlagsError(NULL, (int32_t)osErrorISR);
thread_flags = (uint32_t)osErrorISR;
} else {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2019 Arm Limited. All rights reserved.
* Copyright (c) 2013-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -27,7 +27,7 @@
// OS Runtime Object Memory Usage
#if ((defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)))
#ifdef RTX_OBJ_MEM_USAGE
osRtxObjectMemUsage_t osRtxTimerMemUsage \
__attribute__((section(".data.os.timer.obj"))) =
{ 0U, 0U, 0U };
@ -93,8 +93,9 @@ static void TimerUnlink (const os_timer_t *timer) {
/// Timer Tick (called each SysTick).
static void osRtxTimerTick (void) {
os_timer_t *timer;
osStatus_t status;
os_thread_t *thread_running;
os_timer_t *timer;
osStatus_t status;
timer = osRtxInfo.timer.list;
if (timer == NULL) {
@ -102,12 +103,21 @@ static void osRtxTimerTick (void) {
return;
}
thread_running = osRtxThreadGetRunning();
timer->tick--;
while ((timer != NULL) && (timer->tick == 0U)) {
TimerUnlink(timer);
status = osMessageQueuePut(osRtxInfo.timer.mq, &timer->finfo, 0U, 0U);
if (status != osOK) {
(void)osRtxErrorNotify(osRtxErrorTimerQueueOverflow, timer);
const os_thread_t *thread = osRtxThreadGetRunning();
osRtxThreadSetRunning(osRtxInfo.thread.run.next);
(void)osRtxKernelErrorNotify(osRtxErrorTimerQueueOverflow, timer);
if (osRtxThreadGetRunning() == NULL) {
if (thread_running == thread) {
thread_running = NULL;
}
}
}
if (timer->type == osRtxTimerPeriodic) {
TimerInsert(timer, timer->load);
@ -116,22 +126,37 @@ static void osRtxTimerTick (void) {
}
timer = osRtxInfo.timer.list;
}
osRtxThreadSetRunning(thread_running);
}
/// Setup Timer Thread objects.
//lint -esym(714,osRtxTimerSetup) "Referenced from library configuration"
//lint -esym(759,osRtxTimerSetup) "Prototype in header"
//lint -esym(765,osRtxTimerSetup) "Global scope"
int32_t osRtxTimerSetup (void) {
int32_t ret = -1;
if (osRtxMessageQueueTimerSetup() == 0) {
osRtxInfo.timer.tick = osRtxTimerTick;
ret = 0;
}
return ret;
}
/// Timer Thread
__WEAK __NO_RETURN void osRtxTimerThread (void *argument) {
os_timer_finfo_t finfo;
osStatus_t status;
(void) argument;
osRtxInfo.timer.mq = osRtxMessageQueueId(
osMessageQueueNew(osRtxConfig.timer_mq_mcnt, sizeof(os_timer_finfo_t), osRtxConfig.timer_mq_attr)
);
osRtxInfo.timer.tick = osRtxTimerTick;
//lint -esym(714,osRtxTimerThread) "Referenced from library configuration"
//lint -esym(759,osRtxTimerThread) "Prototype in header"
//lint -esym(765,osRtxTimerThread) "Global scope"
__NO_RETURN void osRtxTimerThread (void *argument) {
os_timer_finfo_t finfo;
osStatus_t status;
osMessageQueueId_t mq = (osMessageQueueId_t)argument;
for (;;) {
//lint -e{934} "Taking address of near auto variable"
status = osMessageQueueGet(osRtxInfo.timer.mq, &finfo, NULL, osWaitForever);
status = osMessageQueueGet(mq, &finfo, NULL, osWaitForever);
if (status == osOK) {
EvrRtxTimerCallback(finfo.func, finfo.arg);
(finfo.func)(finfo.arg);
@ -188,7 +213,7 @@ static osTimerId_t svcRtxTimerNew (osTimerFunc_t func, osTimerType_t type, void
//lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5]
timer = osRtxMemoryAlloc(osRtxInfo.mem.common, sizeof(os_timer_t), 1U);
}
#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))
#ifdef RTX_OBJ_MEM_USAGE
if (timer != NULL) {
uint32_t used;
osRtxTimerMemUsage.cnt_alloc++;
@ -353,7 +378,7 @@ static osStatus_t svcRtxTimerDelete (osTimerId_t timer_id) {
} else {
(void)osRtxMemoryFree(osRtxInfo.mem.common, timer);
}
#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))
#ifdef RTX_OBJ_MEM_USAGE
osRtxTimerMemUsage.cnt_free++;
#endif
}
@ -381,7 +406,7 @@ osTimerId_t osTimerNew (osTimerFunc_t func, osTimerType_t type, void *argument,
osTimerId_t timer_id;
EvrRtxTimerNew(func, type, argument, attr);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxTimerError(NULL, (int32_t)osErrorISR);
timer_id = NULL;
} else {
@ -394,7 +419,7 @@ osTimerId_t osTimerNew (osTimerFunc_t func, osTimerType_t type, void *argument,
const char *osTimerGetName (osTimerId_t timer_id) {
const char *name;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxTimerGetName(timer_id, NULL);
name = NULL;
} else {
@ -408,7 +433,7 @@ osStatus_t osTimerStart (osTimerId_t timer_id, uint32_t ticks) {
osStatus_t status;
EvrRtxTimerStart(timer_id, ticks);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxTimerError(timer_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {
@ -422,7 +447,7 @@ osStatus_t osTimerStop (osTimerId_t timer_id) {
osStatus_t status;
EvrRtxTimerStop(timer_id);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxTimerError(timer_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {
@ -435,7 +460,7 @@ osStatus_t osTimerStop (osTimerId_t timer_id) {
uint32_t osTimerIsRunning (osTimerId_t timer_id) {
uint32_t is_running;
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxTimerIsRunning(timer_id, 0U);
is_running = 0U;
} else {
@ -449,7 +474,7 @@ osStatus_t osTimerDelete (osTimerId_t timer_id) {
osStatus_t status;
EvrRtxTimerDelete(timer_id);
if (IsIrqMode() || IsIrqMasked()) {
if (IsException() || IsIrqMasked()) {
EvrRtxTimerError(timer_id, (int32_t)osErrorISR);
status = osErrorISR;
} else {

View File

@ -1,11 +1,11 @@
/**************************************************************************//**
* @file os_systick.c
* @brief CMSIS OS Tick SysTick implementation
* @version V1.0.2
* @date 6. March 2020
* @version V1.0.3
* @date 19. March 2021
******************************************************************************/
/*
* Copyright (c) 2017-2020 ARM Limited. All rights reserved.
* Copyright (c) 2017-2021 ARM Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -34,7 +34,7 @@
#define SYSTICK_IRQ_PRIORITY 0xFFU
#endif
static uint8_t PendST;
static uint8_t PendST __attribute__((section(".bss.os")));
// Setup OS Tick.
__WEAK int32_t OS_Tick_Setup (uint32_t freq, IRQHandler_t handler) {
@ -127,7 +127,7 @@ __WEAK uint32_t OS_Tick_GetCount (void) {
// Get OS Tick overflow status.
__WEAK uint32_t OS_Tick_GetOverflow (void) {
return ((SysTick->CTRL >> 16) & 1U);
return ((SCB->ICSR & SCB_ICSR_PENDSTSET_Msk) >> SCB_ICSR_PENDSTSET_Pos);
}
#endif // SysTick

View File

@ -1,11 +1,11 @@
/**************************************************************************//**
* @file cmsis_armcc.h
* @brief CMSIS compiler specific macros, functions, instructions
* @version V1.0.4
* @date 30. July 2019
* @version V1.0.5
* @date 05. May 2021
******************************************************************************/
/*
* Copyright (c) 2009-2019 Arm Limited. All rights reserved.
* Copyright (c) 2009-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -296,6 +296,34 @@ __attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int16_t __REVSH(in
/* ########################### Core Function Access ########################### */
/**
\brief Enable IRQ Interrupts
\details Enables IRQ interrupts by clearing the I-bit in the CPSR.
Can only be executed in Privileged modes.
*/
/* intrinsic void __enable_irq(); */
/**
\brief Disable IRQ Interrupts
\details Disables IRQ interrupts by setting the I-bit in the CPSR.
Can only be executed in Privileged modes.
*/
/* intrinsic void __disable_irq(void); */
/**
\brief Enable FIQ
\details Enables FIQ interrupts by clearing the F-bit in the CPSR.
Can only be executed in Privileged modes.
*/
#define __enable_fault_irq __enable_fiq
/**
\brief Disable FIQ
\details Disables FIQ interrupts by setting the F-bit in the CPSR.
Can only be executed in Privileged modes.
*/
#define __disable_fault_irq __disable_fiq
/**
\brief Get FPSCR (Floating Point Status/Control)
\return Floating Point Status/Control register value

View File

@ -1,11 +1,11 @@
/**************************************************************************//**
* @file cmsis_armclang.h
* @brief CMSIS compiler specific macros, functions, instructions
* @version V1.2.0
* @date 05. August 2019
* @version V1.2.1
* @date 05. May 2021
******************************************************************************/
/*
* Copyright (c) 2009-2019 Arm Limited. All rights reserved.
* Copyright (c) 2009-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -27,10 +27,6 @@
#pragma clang system_header /* treat file as system include file */
#ifndef __ARM_COMPAT_H
#include <arm_compat.h> /* Compatibility header for Arm Compiler 5 intrinsics */
#endif
/* CMSIS compiler specific defines */
#ifndef __ASM
#define __ASM __asm
@ -372,6 +368,50 @@ __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
/* ########################### Core Function Access ########################### */
/**
\brief Enable IRQ Interrupts
\details Enables IRQ interrupts by clearing the I-bit in the CPSR.
Can only be executed in Privileged modes.
*/
#ifndef __ARM_COMPAT_H
__STATIC_FORCEINLINE void __enable_irq(void)
{
__ASM volatile ("cpsie i" : : : "memory");
}
#endif
/**
\brief Disable IRQ Interrupts
\details Disables IRQ interrupts by setting the I-bit in the CPSR.
Can only be executed in Privileged modes.
*/
#ifndef __ARM_COMPAT_H
__STATIC_FORCEINLINE void __disable_irq(void)
{
__ASM volatile ("cpsid i" : : : "memory");
}
#endif
/**
\brief Enable FIQ
\details Enables FIQ interrupts by clearing the F-bit in the CPSR.
Can only be executed in Privileged modes.
*/
__STATIC_FORCEINLINE void __enable_fault_irq(void)
{
__ASM volatile ("cpsie f" : : : "memory");
}
/**
\brief Disable FIQ
\details Disables FIQ interrupts by setting the F-bit in the CPSR.
Can only be executed in Privileged modes.
*/
__STATIC_FORCEINLINE void __disable_fault_irq(void)
{
__ASM volatile ("cpsid f" : : : "memory");
}
/**
\brief Get FPSCR
\details Returns the current value of the Floating Point Status/Control register.
@ -401,7 +441,7 @@ __STATIC_FORCEINLINE uint32_t __get_CPSR(void)
*/
__STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr)
{
__ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory");
__ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory");
}
/** \brief Get Mode
@ -409,7 +449,7 @@ __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory");
*/
__STATIC_FORCEINLINE uint32_t __get_mode(void)
{
return (__get_CPSR() & 0x1FU);
return (__get_CPSR() & 0x1FU);
}
/** \brief Set Mode
@ -423,7 +463,7 @@ __STATIC_FORCEINLINE void __set_mode(uint32_t mode)
/** \brief Get Stack Pointer
\return Stack Pointer value
*/
__STATIC_FORCEINLINE uint32_t __get_SP()
__STATIC_FORCEINLINE uint32_t __get_SP(void)
{
uint32_t result;
__ASM volatile("MOV %0, sp" : "=r" (result) : : "memory");
@ -441,7 +481,7 @@ __STATIC_FORCEINLINE void __set_SP(uint32_t stack)
/** \brief Get USR/SYS Stack Pointer
\return USR/SYS Stack Pointer value
*/
__STATIC_FORCEINLINE uint32_t __get_SP_usr()
__STATIC_FORCEINLINE uint32_t __get_SP_usr(void)
{
uint32_t cpsr;
uint32_t result;
@ -546,7 +586,7 @@ __STATIC_INLINE void __FPU_Enable(void)
" VMOV D14,R2,R2 \n"
" VMOV D15,R2,R2 \n"
#if __ARM_NEON == 1
#if (defined(__ARM_NEON) && (__ARM_NEON == 1))
//Initialise D32 registers to 0
" VMOV D16,R2,R2 \n"
" VMOV D17,R2,R2 \n"

View File

@ -1,11 +1,11 @@
/**************************************************************************//**
* @file cmsis_gcc.h
* @brief CMSIS compiler specific macros, functions, instructions
* @version V1.3.0
* @date 17. December 2019
* @version V1.3.1
* @date 05. May 2021
******************************************************************************/
/*
* Copyright (c) 2009-2019 Arm Limited. All rights reserved.
* Copyright (c) 2009-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -37,7 +37,6 @@
#endif
/* CMSIS compiler specific defines */
#ifndef __ASM
#define __ASM __asm
#endif
@ -57,7 +56,7 @@
#define __NO_RETURN __attribute__((__noreturn__))
#endif
#ifndef CMSIS_DEPRECATED
#define CMSIS_DEPRECATED __attribute__((deprecated))
#define CMSIS_DEPRECATED __attribute__((deprecated))
#endif
#ifndef __USED
#define __USED __attribute__((used))
@ -433,10 +432,11 @@ __STATIC_FORCEINLINE int16_t __REVSH(int16_t value)
\param [in] op2 Number of Bits to rotate
\return Rotated value
*/
__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
{
op2 %= 32U;
if (op2 == 0U) {
if (op2 == 0U)
{
return op1;
}
return (op1 >> op2) | (op1 << (32U - op2));
@ -448,7 +448,7 @@ __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
\param [in] value is ignored by the processor.
If required, a debugger can use it to store additional information about the breakpoint.
*/
#define __BKPT(value) __ASM volatile ("bkpt "#value)
#define __BKPT(value) __ASM volatile ("bkpt "#value)
/**
\brief Reverse bit order of value
@ -669,16 +669,36 @@ __STATIC_FORCEINLINE void __enable_irq(void)
\details Disables IRQ interrupts by setting the I-bit in the CPSR.
Can only be executed in Privileged modes.
*/
__STATIC_FORCEINLINE void __disable_irq(void)
__STATIC_FORCEINLINE void __disable_irq(void)
{
__ASM volatile ("cpsid i" : : : "memory");
}
/**
\brief Enable FIQ
\details Enables FIQ interrupts by clearing the F-bit in the CPSR.
Can only be executed in Privileged modes.
*/
__STATIC_FORCEINLINE void __enable_fault_irq(void)
{
__ASM volatile ("cpsie f" : : : "memory");
}
/**
\brief Disable FIQ
\details Disables FIQ interrupts by setting the F-bit in the CPSR.
Can only be executed in Privileged modes.
*/
__STATIC_FORCEINLINE void __disable_fault_irq(void)
{
__ASM volatile ("cpsid f" : : : "memory");
}
/**
\brief Get FPSCR
\details Returns the current value of the Floating Point Status/Control register.
\return Floating Point Status/Control register value
*/
\return Floating Point Status/Control register value
*/
__STATIC_FORCEINLINE uint32_t __get_FPSCR(void)
{
#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
@ -702,8 +722,8 @@ __STATIC_FORCEINLINE uint32_t __get_FPSCR(void)
/**
\brief Set FPSCR
\details Assigns the given value to the Floating Point Status/Control register.
\param [in] fpscr Floating Point Status/Control value to set
*/
\param [in] fpscr Floating Point Status/Control value to set
*/
__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr)
{
#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
@ -736,7 +756,7 @@ __STATIC_FORCEINLINE uint32_t __get_CPSR(void)
*/
__STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr)
{
__ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory");
__ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory");
}
/** \brief Get Mode
@ -744,7 +764,7 @@ __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory");
*/
__STATIC_FORCEINLINE uint32_t __get_mode(void)
{
return (__get_CPSR() & 0x1FU);
return (__get_CPSR() & 0x1FU);
}
/** \brief Set Mode
@ -810,7 +830,7 @@ __STATIC_FORCEINLINE uint32_t __get_FPEXC(void)
{
#if (__FPU_PRESENT == 1)
uint32_t result;
__ASM volatile("VMRS %0, fpexc" : "=r" (result) );
__ASM volatile("VMRS %0, fpexc" : "=r" (result) : : "memory");
return(result);
#else
return(0);
@ -833,8 +853,8 @@ __STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc)
#define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" )
#define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" )
#define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" )
#define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" )
#define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" )
#define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" )
#include "cmsis_cp15.h"

View File

@ -1,11 +1,11 @@
/**************************************************************************//**
* @file irq_ctrl_gic.c
* @brief Interrupt controller handling implementation for GIC
* @version V1.1.0
* @date 03. March 2020
* @version V1.1.1
* @date 29. March 2021
******************************************************************************/
/*
* Copyright (c) 2017-2020 ARM Limited. All rights reserved.
* Copyright (c) 2017-2021 ARM Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -184,7 +184,7 @@ __WEAK int32_t IRQ_SetMode (IRQn_ID_t irqn, uint32_t mode) {
if (val == IRQ_MODE_CPU_ALL) {
cpu = 0xFFU;
} else {
cpu = val >> IRQ_MODE_CPU_Pos;
cpu = (uint8_t)(val >> IRQ_MODE_CPU_Pos);
}
// Apply configuration if no mode error

View File

@ -1,11 +1,11 @@
/******************************************************************************
* @file cachel1_armv7.h
* @brief CMSIS Level 1 Cache API for Armv7-M and later
* @version V1.0.0
* @date 03. March 2020
* @version V1.0.1
* @date 19. April 2021
******************************************************************************/
/*
* Copyright (c) 2020 Arm Limited. All rights reserved.
* Copyright (c) 2020-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -48,7 +48,7 @@
#ifndef __SCB_ICACHE_LINE_SIZE
#define __SCB_ICACHE_LINE_SIZE 32U /*!< Cortex-M7 cache line size is fixed to 32 bytes (8 words). See also register SCB_CCSIDR */
#endif
#endif
/**
\brief Enable I-Cache
@ -112,7 +112,7 @@ __STATIC_FORCEINLINE void SCB_InvalidateICache (void)
\param[in] addr address
\param[in] isize size of memory block (in number of bytes)
*/
__STATIC_FORCEINLINE void SCB_InvalidateICache_by_Addr (void *addr, int32_t isize)
__STATIC_FORCEINLINE void SCB_InvalidateICache_by_Addr (volatile void *addr, int32_t isize)
{
#if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)
if ( isize > 0 ) {
@ -325,13 +325,13 @@ __STATIC_FORCEINLINE void SCB_CleanInvalidateDCache (void)
\param[in] addr address
\param[in] dsize size of memory block (in number of bytes)
*/
__STATIC_FORCEINLINE void SCB_InvalidateDCache_by_Addr (void *addr, int32_t dsize)
__STATIC_FORCEINLINE void SCB_InvalidateDCache_by_Addr (volatile void *addr, int32_t dsize)
{
#if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
if ( dsize > 0 ) {
if ( dsize > 0 ) {
int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U));
uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */;
__DSB();
do {
@ -355,13 +355,13 @@ __STATIC_FORCEINLINE void SCB_InvalidateDCache_by_Addr (void *addr, int32_t dsiz
\param[in] addr address
\param[in] dsize size of memory block (in number of bytes)
*/
__STATIC_FORCEINLINE void SCB_CleanDCache_by_Addr (uint32_t *addr, int32_t dsize)
__STATIC_FORCEINLINE void SCB_CleanDCache_by_Addr (volatile void *addr, int32_t dsize)
{
#if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
if ( dsize > 0 ) {
if ( dsize > 0 ) {
int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U));
uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */;
__DSB();
do {
@ -385,13 +385,13 @@ __STATIC_FORCEINLINE void SCB_CleanDCache_by_Addr (uint32_t *addr, int32_t dsize
\param[in] addr address (aligned to 32-byte boundary)
\param[in] dsize size of memory block (in number of bytes)
*/
__STATIC_FORCEINLINE void SCB_CleanInvalidateDCache_by_Addr (uint32_t *addr, int32_t dsize)
__STATIC_FORCEINLINE void SCB_CleanInvalidateDCache_by_Addr (volatile void *addr, int32_t dsize)
{
#if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
if ( dsize > 0 ) {
if ( dsize > 0 ) {
int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U));
uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */;
__DSB();
do {

View File

@ -1,11 +1,11 @@
/**************************************************************************//**
* @file cmsis_armcc.h
* @brief CMSIS compiler ARMCC (Arm Compiler 5) header file
* @version V5.2.1
* @date 26. March 2020
* @version V5.3.2
* @date 27. May 2021
******************************************************************************/
/*
* Copyright (c) 2009-2020 Arm Limited. All rights reserved.
* Copyright (c) 2009-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -63,9 +63,9 @@
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static __inline
#endif
#ifndef __STATIC_FORCEINLINE
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE static __forceinline
#endif
#endif
#ifndef __NO_RETURN
#define __NO_RETURN __declspec(noreturn)
#endif
@ -131,279 +131,6 @@
#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET")))
#endif
/* ########################### Core Function Access ########################### */
/** \ingroup CMSIS_Core_FunctionInterface
\defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
@{
*/
/**
\brief Enable IRQ Interrupts
\details Enables IRQ interrupts by clearing the I-bit in the CPSR.
Can only be executed in Privileged modes.
*/
/* intrinsic void __enable_irq(); */
/**
\brief Disable IRQ Interrupts
\details Disables IRQ interrupts by setting the I-bit in the CPSR.
Can only be executed in Privileged modes.
*/
/* intrinsic void __disable_irq(); */
/**
\brief Get Control Register
\details Returns the content of the Control Register.
\return Control Register value
*/
__STATIC_INLINE uint32_t __get_CONTROL(void)
{
register uint32_t __regControl __ASM("control");
return(__regControl);
}
/**
\brief Set Control Register
\details Writes the given value to the Control Register.
\param [in] control Control Register value to set
*/
__STATIC_INLINE void __set_CONTROL(uint32_t control)
{
register uint32_t __regControl __ASM("control");
__regControl = control;
}
/**
\brief Get IPSR Register
\details Returns the content of the IPSR Register.
\return IPSR Register value
*/
__STATIC_INLINE uint32_t __get_IPSR(void)
{
register uint32_t __regIPSR __ASM("ipsr");
return(__regIPSR);
}
/**
\brief Get APSR Register
\details Returns the content of the APSR Register.
\return APSR Register value
*/
__STATIC_INLINE uint32_t __get_APSR(void)
{
register uint32_t __regAPSR __ASM("apsr");
return(__regAPSR);
}
/**
\brief Get xPSR Register
\details Returns the content of the xPSR Register.
\return xPSR Register value
*/
__STATIC_INLINE uint32_t __get_xPSR(void)
{
register uint32_t __regXPSR __ASM("xpsr");
return(__regXPSR);
}
/**
\brief Get Process Stack Pointer
\details Returns the current value of the Process Stack Pointer (PSP).
\return PSP Register value
*/
__STATIC_INLINE uint32_t __get_PSP(void)
{
register uint32_t __regProcessStackPointer __ASM("psp");
return(__regProcessStackPointer);
}
/**
\brief Set Process Stack Pointer
\details Assigns the given value to the Process Stack Pointer (PSP).
\param [in] topOfProcStack Process Stack Pointer value to set
*/
__STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)
{
register uint32_t __regProcessStackPointer __ASM("psp");
__regProcessStackPointer = topOfProcStack;
}
/**
\brief Get Main Stack Pointer
\details Returns the current value of the Main Stack Pointer (MSP).
\return MSP Register value
*/
__STATIC_INLINE uint32_t __get_MSP(void)
{
register uint32_t __regMainStackPointer __ASM("msp");
return(__regMainStackPointer);
}
/**
\brief Set Main Stack Pointer
\details Assigns the given value to the Main Stack Pointer (MSP).
\param [in] topOfMainStack Main Stack Pointer value to set
*/
__STATIC_INLINE void __set_MSP(uint32_t topOfMainStack)
{
register uint32_t __regMainStackPointer __ASM("msp");
__regMainStackPointer = topOfMainStack;
}
/**
\brief Get Priority Mask
\details Returns the current state of the priority mask bit from the Priority Mask Register.
\return Priority Mask value
*/
__STATIC_INLINE uint32_t __get_PRIMASK(void)
{
register uint32_t __regPriMask __ASM("primask");
return(__regPriMask);
}
/**
\brief Set Priority Mask
\details Assigns the given value to the Priority Mask Register.
\param [in] priMask Priority Mask
*/
__STATIC_INLINE void __set_PRIMASK(uint32_t priMask)
{
register uint32_t __regPriMask __ASM("primask");
__regPriMask = (priMask);
}
#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
(defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) )
/**
\brief Enable FIQ
\details Enables FIQ interrupts by clearing the F-bit in the CPSR.
Can only be executed in Privileged modes.
*/
#define __enable_fault_irq __enable_fiq
/**
\brief Disable FIQ
\details Disables FIQ interrupts by setting the F-bit in the CPSR.
Can only be executed in Privileged modes.
*/
#define __disable_fault_irq __disable_fiq
/**
\brief Get Base Priority
\details Returns the current value of the Base Priority register.
\return Base Priority register value
*/
__STATIC_INLINE uint32_t __get_BASEPRI(void)
{
register uint32_t __regBasePri __ASM("basepri");
return(__regBasePri);
}
/**
\brief Set Base Priority
\details Assigns the given value to the Base Priority register.
\param [in] basePri Base Priority value to set
*/
__STATIC_INLINE void __set_BASEPRI(uint32_t basePri)
{
register uint32_t __regBasePri __ASM("basepri");
__regBasePri = (basePri & 0xFFU);
}
/**
\brief Set Base Priority with condition
\details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled,
or the new value increases the BASEPRI priority level.
\param [in] basePri Base Priority value to set
*/
__STATIC_INLINE void __set_BASEPRI_MAX(uint32_t basePri)
{
register uint32_t __regBasePriMax __ASM("basepri_max");
__regBasePriMax = (basePri & 0xFFU);
}
/**
\brief Get Fault Mask
\details Returns the current value of the Fault Mask register.
\return Fault Mask register value
*/
__STATIC_INLINE uint32_t __get_FAULTMASK(void)
{
register uint32_t __regFaultMask __ASM("faultmask");
return(__regFaultMask);
}
/**
\brief Set Fault Mask
\details Assigns the given value to the Fault Mask register.
\param [in] faultMask Fault Mask value to set
*/
__STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask)
{
register uint32_t __regFaultMask __ASM("faultmask");
__regFaultMask = (faultMask & (uint32_t)1U);
}
#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
(defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */
/**
\brief Get FPSCR
\details Returns the current value of the Floating Point Status/Control register.
\return Floating Point Status/Control register value
*/
__STATIC_INLINE uint32_t __get_FPSCR(void)
{
#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
(defined (__FPU_USED ) && (__FPU_USED == 1U)) )
register uint32_t __regfpscr __ASM("fpscr");
return(__regfpscr);
#else
return(0U);
#endif
}
/**
\brief Set FPSCR
\details Assigns the given value to the Floating Point Status/Control register.
\param [in] fpscr Floating Point Status/Control value to set
*/
__STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
{
#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
(defined (__FPU_USED ) && (__FPU_USED == 1U)) )
register uint32_t __regfpscr __ASM("fpscr");
__regfpscr = (fpscr);
#else
(void)fpscr;
#endif
}
/*@} end of CMSIS_Core_RegAccFunctions */
/* ########################## Core Instruction Access ######################### */
/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
Access to dedicated instructions
@ -461,7 +188,7 @@ __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
*/
#define __DMB() __dmb(0xF)
/**
\brief Reverse byte order (32 bit)
\details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
@ -799,6 +526,280 @@ __attribute__((always_inline)) __STATIC_INLINE uint32_t __USAT(int32_t val, uint
/*@}*/ /* end of group CMSIS_Core_InstructionInterface */
/* ########################### Core Function Access ########################### */
/** \ingroup CMSIS_Core_FunctionInterface
\defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
@{
*/
/**
\brief Enable IRQ Interrupts
\details Enables IRQ interrupts by clearing special-purpose register PRIMASK.
Can only be executed in Privileged modes.
*/
/* intrinsic void __enable_irq(); */
/**
\brief Disable IRQ Interrupts
\details Disables IRQ interrupts by setting special-purpose register PRIMASK.
Can only be executed in Privileged modes.
*/
/* intrinsic void __disable_irq(); */
/**
\brief Get Control Register
\details Returns the content of the Control Register.
\return Control Register value
*/
__STATIC_INLINE uint32_t __get_CONTROL(void)
{
register uint32_t __regControl __ASM("control");
return(__regControl);
}
/**
\brief Set Control Register
\details Writes the given value to the Control Register.
\param [in] control Control Register value to set
*/
__STATIC_INLINE void __set_CONTROL(uint32_t control)
{
register uint32_t __regControl __ASM("control");
__regControl = control;
__ISB();
}
/**
\brief Get IPSR Register
\details Returns the content of the IPSR Register.
\return IPSR Register value
*/
__STATIC_INLINE uint32_t __get_IPSR(void)
{
register uint32_t __regIPSR __ASM("ipsr");
return(__regIPSR);
}
/**
\brief Get APSR Register
\details Returns the content of the APSR Register.
\return APSR Register value
*/
__STATIC_INLINE uint32_t __get_APSR(void)
{
register uint32_t __regAPSR __ASM("apsr");
return(__regAPSR);
}
/**
\brief Get xPSR Register
\details Returns the content of the xPSR Register.
\return xPSR Register value
*/
__STATIC_INLINE uint32_t __get_xPSR(void)
{
register uint32_t __regXPSR __ASM("xpsr");
return(__regXPSR);
}
/**
\brief Get Process Stack Pointer
\details Returns the current value of the Process Stack Pointer (PSP).
\return PSP Register value
*/
__STATIC_INLINE uint32_t __get_PSP(void)
{
register uint32_t __regProcessStackPointer __ASM("psp");
return(__regProcessStackPointer);
}
/**
\brief Set Process Stack Pointer
\details Assigns the given value to the Process Stack Pointer (PSP).
\param [in] topOfProcStack Process Stack Pointer value to set
*/
__STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)
{
register uint32_t __regProcessStackPointer __ASM("psp");
__regProcessStackPointer = topOfProcStack;
}
/**
\brief Get Main Stack Pointer
\details Returns the current value of the Main Stack Pointer (MSP).
\return MSP Register value
*/
__STATIC_INLINE uint32_t __get_MSP(void)
{
register uint32_t __regMainStackPointer __ASM("msp");
return(__regMainStackPointer);
}
/**
\brief Set Main Stack Pointer
\details Assigns the given value to the Main Stack Pointer (MSP).
\param [in] topOfMainStack Main Stack Pointer value to set
*/
__STATIC_INLINE void __set_MSP(uint32_t topOfMainStack)
{
register uint32_t __regMainStackPointer __ASM("msp");
__regMainStackPointer = topOfMainStack;
}
/**
\brief Get Priority Mask
\details Returns the current state of the priority mask bit from the Priority Mask Register.
\return Priority Mask value
*/
__STATIC_INLINE uint32_t __get_PRIMASK(void)
{
register uint32_t __regPriMask __ASM("primask");
return(__regPriMask);
}
/**
\brief Set Priority Mask
\details Assigns the given value to the Priority Mask Register.
\param [in] priMask Priority Mask
*/
__STATIC_INLINE void __set_PRIMASK(uint32_t priMask)
{
register uint32_t __regPriMask __ASM("primask");
__regPriMask = (priMask);
}
#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
(defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) )
/**
\brief Enable FIQ
\details Enables FIQ interrupts by clearing special-purpose register FAULTMASK.
Can only be executed in Privileged modes.
*/
#define __enable_fault_irq __enable_fiq
/**
\brief Disable FIQ
\details Disables FIQ interrupts by setting special-purpose register FAULTMASK.
Can only be executed in Privileged modes.
*/
#define __disable_fault_irq __disable_fiq
/**
\brief Get Base Priority
\details Returns the current value of the Base Priority register.
\return Base Priority register value
*/
__STATIC_INLINE uint32_t __get_BASEPRI(void)
{
register uint32_t __regBasePri __ASM("basepri");
return(__regBasePri);
}
/**
\brief Set Base Priority
\details Assigns the given value to the Base Priority register.
\param [in] basePri Base Priority value to set
*/
__STATIC_INLINE void __set_BASEPRI(uint32_t basePri)
{
register uint32_t __regBasePri __ASM("basepri");
__regBasePri = (basePri & 0xFFU);
}
/**
\brief Set Base Priority with condition
\details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled,
or the new value increases the BASEPRI priority level.
\param [in] basePri Base Priority value to set
*/
__STATIC_INLINE void __set_BASEPRI_MAX(uint32_t basePri)
{
register uint32_t __regBasePriMax __ASM("basepri_max");
__regBasePriMax = (basePri & 0xFFU);
}
/**
\brief Get Fault Mask
\details Returns the current value of the Fault Mask register.
\return Fault Mask register value
*/
__STATIC_INLINE uint32_t __get_FAULTMASK(void)
{
register uint32_t __regFaultMask __ASM("faultmask");
return(__regFaultMask);
}
/**
\brief Set Fault Mask
\details Assigns the given value to the Fault Mask register.
\param [in] faultMask Fault Mask value to set
*/
__STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask)
{
register uint32_t __regFaultMask __ASM("faultmask");
__regFaultMask = (faultMask & (uint32_t)1U);
}
#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
(defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */
/**
\brief Get FPSCR
\details Returns the current value of the Floating Point Status/Control register.
\return Floating Point Status/Control register value
*/
__STATIC_INLINE uint32_t __get_FPSCR(void)
{
#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
(defined (__FPU_USED ) && (__FPU_USED == 1U)) )
register uint32_t __regfpscr __ASM("fpscr");
return(__regfpscr);
#else
return(0U);
#endif
}
/**
\brief Set FPSCR
\details Assigns the given value to the Floating Point Status/Control register.
\param [in] fpscr Floating Point Status/Control value to set
*/
__STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
{
#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
(defined (__FPU_USED ) && (__FPU_USED == 1U)) )
register uint32_t __regfpscr __ASM("fpscr");
__regfpscr = (fpscr);
#else
(void)fpscr;
#endif
}
/*@} end of CMSIS_Core_RegAccFunctions */
/* ################### Compiler specific Intrinsics ########################### */
/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
Access to dedicated SIMD instructions
@ -878,6 +879,8 @@ __attribute__((always_inline)) __STATIC_INLINE uint32_t __USAT(int32_t val, uint
#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2))
#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3))
#endif /* ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */
/*@} end of group CMSIS_SIMD_intrinsics */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,14 +1,14 @@
/**************************************************************************//**
* @file cmsis_iccarm.h
* @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file
* @version V5.2.0
* @date 28. January 2020
* @version V5.3.0
* @date 14. April 2021
******************************************************************************/
//------------------------------------------------------------------------------
//
// Copyright (c) 2017-2019 IAR Systems
// Copyright (c) 2017-2019 Arm Limited. All rights reserved.
// Copyright (c) 2017-2021 IAR Systems
// Copyright (c) 2017-2021 Arm Limited. All rights reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
@ -238,6 +238,7 @@ __packed struct __iar_u32 { uint32_t v; };
#endif
#endif
#undef __WEAK /* undo the definition from DLib_Defaults.h */
#ifndef __WEAK
#if __ICCARM_V8
#define __WEAK __attribute__((weak))
@ -266,6 +267,24 @@ __packed struct __iar_u32 { uint32_t v; };
#define __VECTOR_TABLE_ATTRIBUTE @".intvec"
#endif
#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U)
#ifndef __STACK_SEAL
#define __STACK_SEAL STACKSEAL$$Base
#endif
#ifndef __TZ_STACK_SEAL_SIZE
#define __TZ_STACK_SEAL_SIZE 8U
#endif
#ifndef __TZ_STACK_SEAL_VALUE
#define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL
#endif
__STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) {
*((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE;
}
#endif
#ifndef __ICCARM_INTRINSICS_VERSION__
#define __ICCARM_INTRINSICS_VERSION__ 0
#endif
@ -336,7 +355,13 @@ __packed struct __iar_u32 { uint32_t v; };
#define __set_BASEPRI(VALUE) (__arm_wsr("BASEPRI", (VALUE)))
#define __set_BASEPRI_MAX(VALUE) (__arm_wsr("BASEPRI_MAX", (VALUE)))
#define __set_CONTROL(VALUE) (__arm_wsr("CONTROL", (VALUE)))
__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control)
{
__arm_wsr("CONTROL", control);
__iar_builtin_ISB();
}
#define __set_FAULTMASK(VALUE) (__arm_wsr("FAULTMASK", (VALUE)))
#define __set_MSP(VALUE) (__arm_wsr("MSP", (VALUE)))
@ -358,7 +383,13 @@ __packed struct __iar_u32 { uint32_t v; };
#endif
#define __TZ_get_CONTROL_NS() (__arm_rsr("CONTROL_NS"))
#define __TZ_set_CONTROL_NS(VALUE) (__arm_wsr("CONTROL_NS", (VALUE)))
__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control)
{
__arm_wsr("CONTROL_NS", control);
__iar_builtin_ISB();
}
#define __TZ_get_PSP_NS() (__arm_rsr("PSP_NS"))
#define __TZ_set_PSP_NS(VALUE) (__arm_wsr("PSP_NS", (VALUE)))
#define __TZ_get_MSP_NS() (__arm_rsr("MSP_NS"))
@ -680,6 +711,7 @@ __packed struct __iar_u32 { uint32_t v; };
__IAR_FT void __TZ_set_CONTROL_NS(uint32_t value)
{
__asm volatile("MSR CONTROL_NS,%0" :: "r" (value));
__iar_builtin_ISB();
}
__IAR_FT uint32_t __TZ_get_PSP_NS(void)
@ -965,4 +997,6 @@ __packed struct __iar_u32 { uint32_t v; };
#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2))
#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3))
#endif /* __CMSIS_ICCARM_H__ */

View File

@ -1,11 +1,11 @@
/**************************************************************************//**
* @file core_armv81mml.h
* @brief CMSIS Armv8.1-M Mainline Core Peripheral Access Layer Header File
* @version V1.3.1
* @date 27. March 2020
* @version V1.4.1
* @date 04. June 2021
******************************************************************************/
/*
* Copyright (c) 2018-2020 Arm Limited. All rights reserved.
* Copyright (c) 2018-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -210,14 +210,14 @@
#define __FPU_PRESENT 0U
#warning "__FPU_PRESENT not defined in device header file; using default!"
#endif
#if __FPU_PRESENT != 0U
#ifndef __FPU_DP
#define __FPU_DP 0U
#warning "__FPU_DP not defined in device header file; using default!"
#endif
#endif
#ifndef __MPU_PRESENT
#define __MPU_PRESENT 0U
#warning "__MPU_PRESENT not defined in device header file; using default!"
@ -232,7 +232,7 @@
#define __DCACHE_PRESENT 0U
#warning "__DCACHE_PRESENT not defined in device header file; using default!"
#endif
#ifndef __PMU_PRESENT
#define __PMU_PRESENT 0U
#warning "__PMU_PRESENT not defined in device header file; using default!"
@ -261,7 +261,7 @@
#define __VTOR_PRESENT 1U
#warning "__VTOR_PRESENT not defined in device header file; using default!"
#endif
#ifndef __NVIC_PRIO_BITS
#define __NVIC_PRIO_BITS 3U
#warning "__NVIC_PRIO_BITS not defined in device header file; using default!"
@ -766,22 +766,22 @@ typedef struct
#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */
/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */
#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */
#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */
#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */
#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */
#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */
#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */
#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */
#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */
#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */
#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */
#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */
#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */
#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */
#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */
#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */
#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */
#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */
#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */
/* BusFault Status Register (part of SCB Configurable Fault Status Register) */
@ -1508,12 +1508,12 @@ typedef struct
/** \brief PMU Event Counter Registers (0-30) Definitions */
#define PMU_EVCNTR_CNT_Pos 0U /*!< PMU EVCNTR: Counter Position */
#define PMU_EVCNTR_CNT_Msk (16UL /*<< PMU_EVCNTRx_CNT_Pos*/) /*!< PMU EVCNTR: Counter Mask */
#define PMU_EVCNTR_CNT_Msk (0xFFFFUL /*<< PMU_EVCNTRx_CNT_Pos*/) /*!< PMU EVCNTR: Counter Mask */
/** \brief PMU Event Type and Filter Registers (0-30) Definitions */
#define PMU_EVTYPER_EVENTTOCNT_Pos 0U /*!< PMU EVTYPER: Event to Count Position */
#define PMU_EVTYPER_EVENTTOCNT_Msk (16UL /*<< EVTYPERx_EVENTTOCNT_Pos*/) /*!< PMU EVTYPER: Event to Count Mask */
#define PMU_EVTYPER_EVENTTOCNT_Msk (0xFFFFUL /*<< EVTYPERx_EVENTTOCNT_Pos*/) /*!< PMU EVTYPER: Event to Count Mask */
/** \brief PMU Count Enable Set Register Definitions */
@ -2221,10 +2221,10 @@ typedef struct
/** \brief PMU Type Register Definitions */
#define PMU_TYPE_NUM_CNTS_Pos 0U /*!< PMU TYPE: Number of Counters Position */
#define PMU_TYPE_NUM_CNTS_Msk (8UL /*<< PMU_TYPE_NUM_CNTS_Pos*/) /*!< PMU TYPE: Number of Counters Mask */
#define PMU_TYPE_NUM_CNTS_Msk (0xFFUL /*<< PMU_TYPE_NUM_CNTS_Pos*/) /*!< PMU TYPE: Number of Counters Mask */
#define PMU_TYPE_SIZE_CNTS_Pos 8U /*!< PMU TYPE: Size of Counters Position */
#define PMU_TYPE_SIZE_CNTS_Msk (6UL << PMU_TYPE_SIZE_CNTS_Pos) /*!< PMU TYPE: Size of Counters Mask */
#define PMU_TYPE_SIZE_CNTS_Msk (0x3FUL << PMU_TYPE_SIZE_CNTS_Pos) /*!< PMU TYPE: Size of Counters Mask */
#define PMU_TYPE_CYCCNT_PRESENT_Pos 14U /*!< PMU TYPE: Cycle Counter Present Position */
#define PMU_TYPE_CYCCNT_PRESENT_Msk (1UL << PMU_TYPE_CYCCNT_PRESENT_Pos) /*!< PMU TYPE: Cycle Counter Present Mask */
@ -2235,6 +2235,32 @@ typedef struct
#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Pos 23U /*!< PMU TYPE: Trace-on-overflow Support Position */
#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Trace-on-overflow Support Mask */
/** \brief PMU Authentication Status Register Definitions */
#define PMU_AUTHSTATUS_NSID_Pos 0U /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Position */
#define PMU_AUTHSTATUS_NSID_Msk (0x3UL /*<< PMU_AUTHSTATUS_NSID_Pos*/) /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Mask */
#define PMU_AUTHSTATUS_NSNID_Pos 2U /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Position */
#define PMU_AUTHSTATUS_NSNID_Msk (0x3UL << PMU_AUTHSTATUS_NSNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Mask */
#define PMU_AUTHSTATUS_SID_Pos 4U /*!< PMU AUTHSTATUS: Secure Invasive Debug Position */
#define PMU_AUTHSTATUS_SID_Msk (0x3UL << PMU_AUTHSTATUS_SID_Pos) /*!< PMU AUTHSTATUS: Secure Invasive Debug Mask */
#define PMU_AUTHSTATUS_SNID_Pos 6U /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Position */
#define PMU_AUTHSTATUS_SNID_Msk (0x3UL << PMU_AUTHSTATUS_SNID_Pos) /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Mask */
#define PMU_AUTHSTATUS_NSUID_Pos 16U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Position */
#define PMU_AUTHSTATUS_NSUID_Msk (0x3UL << PMU_AUTHSTATUS_NSUID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Mask */
#define PMU_AUTHSTATUS_NSUNID_Pos 18U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Position */
#define PMU_AUTHSTATUS_NSUNID_Msk (0x3UL << PMU_AUTHSTATUS_NSUNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Mask */
#define PMU_AUTHSTATUS_SUID_Pos 20U /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Position */
#define PMU_AUTHSTATUS_SUID_Msk (0x3UL << PMU_AUTHSTATUS_SUID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Mask */
#define PMU_AUTHSTATUS_SUNID_Pos 22U /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Position */
#define PMU_AUTHSTATUS_SUNID_Msk (0x3UL << PMU_AUTHSTATUS_SUNID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Mask */
/*@} end of group CMSIS_PMU */
#endif

View File

@ -1,11 +1,11 @@
/**************************************************************************//**
* @file core_armv8mml.h
* @brief CMSIS Armv8-M Mainline Core Peripheral Access Layer Header File
* @version V5.2.0
* @date 27. March 2020
* @version V5.2.2
* @date 04. June 2021
******************************************************************************/
/*
* Copyright (c) 2009-2020 Arm Limited. All rights reserved.
* Copyright (c) 2009-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -254,7 +254,7 @@
#define __VTOR_PRESENT 1U
#warning "__VTOR_PRESENT not defined in device header file; using default!"
#endif
#ifndef __NVIC_PRIO_BITS
#define __NVIC_PRIO_BITS 3U
#warning "__NVIC_PRIO_BITS not defined in device header file; using default!"
@ -545,6 +545,7 @@ typedef struct
__OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */
__OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */
__OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */
__OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */
} SCB_Type;
/* SCB CPUID Register Definitions */
@ -745,22 +746,22 @@ typedef struct
#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */
/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */
#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */
#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */
#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */
#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */
#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */
#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */
#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */
#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */
#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */
#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */
#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */
#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */
#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */
#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */
#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */
#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */
#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */
#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */
/* BusFault Status Register (part of SCB Configurable Fault Status Register) */
@ -2939,7 +2940,7 @@ __STATIC_INLINE void TZ_SAU_Disable(void)
@{
*/
/**
\brief Set Debug Authentication Control Register
\details writes to Debug Authentication Control register.
@ -3006,7 +3007,7 @@ __STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void)
@{
*/
/**
\brief Get Debug Authentication Status Register
\details Reads Debug Authentication Status register.

View File

@ -1,11 +1,11 @@
/**************************************************************************//**
* @file core_cm3.h
* @brief CMSIS Cortex-M3 Core Peripheral Access Layer Header File
* @version V5.1.1
* @date 27. March 2020
* @version V5.1.2
* @date 04. June 2021
******************************************************************************/
/*
* Copyright (c) 2009-2020 Arm Limited. All rights reserved.
* Copyright (c) 2009-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -146,7 +146,7 @@
#define __VTOR_PRESENT 1U
#warning "__VTOR_PRESENT not defined in device header file; using default!"
#endif
#ifndef __NVIC_PRIO_BITS
#define __NVIC_PRIO_BITS 3U
#warning "__NVIC_PRIO_BITS not defined in device header file; using default!"
@ -565,19 +565,19 @@ typedef struct
#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */
/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */
#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */
#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */
#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */
#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */
#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */
#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */
#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */
#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */
#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */
#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */
#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */
#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */
#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */
#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */
#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */
/* BusFault Status Register (part of SCB Configurable Fault Status Register) */

View File

@ -1,11 +1,11 @@
/**************************************************************************//**
* @file core_cm33.h
* @brief CMSIS Cortex-M33 Core Peripheral Access Layer Header File
* @version V5.2.0
* @date 27. March 2020
* @version V5.2.2
* @date 04. June 2021
******************************************************************************/
/*
* Copyright (c) 2009-2020 Arm Limited. All rights reserved.
* Copyright (c) 2009-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -254,7 +254,7 @@
#define __VTOR_PRESENT 1U
#warning "__VTOR_PRESENT not defined in device header file; using default!"
#endif
#ifndef __NVIC_PRIO_BITS
#define __NVIC_PRIO_BITS 3U
#warning "__NVIC_PRIO_BITS not defined in device header file; using default!"
@ -545,6 +545,7 @@ typedef struct
__OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */
__OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */
__OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */
__OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */
} SCB_Type;
/* SCB CPUID Register Definitions */
@ -745,22 +746,22 @@ typedef struct
#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */
/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */
#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */
#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */
#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */
#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */
#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */
#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */
#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */
#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */
#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */
#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */
#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */
#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */
#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */
#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */
#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */
#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */
#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */
#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */
/* BusFault Status Register (part of SCB Configurable Fault Status Register) */
@ -3007,7 +3008,7 @@ __STATIC_INLINE void TZ_SAU_Disable(void)
@{
*/
/**
\brief Set Debug Authentication Control Register
\details writes to Debug Authentication Control register.
@ -3074,7 +3075,7 @@ __STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void)
@{
*/
/**
\brief Get Debug Authentication Status Register
\details Reads Debug Authentication Status register.

View File

@ -1,11 +1,11 @@
/**************************************************************************//**
* @file core_cm35p.h
* @brief CMSIS Cortex-M35P Core Peripheral Access Layer Header File
* @version V1.1.0
* @date 27. March 2020
* @version V1.1.2
* @date 04. June 2021
******************************************************************************/
/*
* Copyright (c) 2018-2020 Arm Limited. All rights reserved.
* Copyright (c) 2018-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -249,12 +249,12 @@
#define __DSP_PRESENT 0U
#warning "__DSP_PRESENT not defined in device header file; using default!"
#endif
#ifndef __VTOR_PRESENT
#define __VTOR_PRESENT 1U
#warning "__VTOR_PRESENT not defined in device header file; using default!"
#endif
#ifndef __NVIC_PRIO_BITS
#define __NVIC_PRIO_BITS 3U
#warning "__NVIC_PRIO_BITS not defined in device header file; using default!"
@ -545,6 +545,7 @@ typedef struct
__OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */
__OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */
__OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */
__OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */
} SCB_Type;
/* SCB CPUID Register Definitions */
@ -745,22 +746,22 @@ typedef struct
#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */
/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */
#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */
#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */
#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */
#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */
#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */
#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */
#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */
#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */
#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */
#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */
#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */
#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */
#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */
#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */
#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */
#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */
#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */
#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */
/* BusFault Status Register (part of SCB Configurable Fault Status Register) */
@ -3007,7 +3008,7 @@ __STATIC_INLINE void TZ_SAU_Disable(void)
@{
*/
/**
\brief Set Debug Authentication Control Register
\details writes to Debug Authentication Control register.
@ -3074,7 +3075,7 @@ __STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void)
@{
*/
/**
\brief Get Debug Authentication Status Register
\details Reads Debug Authentication Status register.

View File

@ -1,8 +1,8 @@
/**************************************************************************//**
* @file core_cm4.h
* @brief CMSIS Cortex-M4 Core Peripheral Access Layer Header File
* @version V5.1.1
* @date 27. March 2020
* @version V5.1.2
* @date 04. June 2021
******************************************************************************/
/*
* Copyright (c) 2009-2020 Arm Limited. All rights reserved.
@ -198,7 +198,7 @@
#define __VTOR_PRESENT 1U
#warning "__VTOR_PRESENT not defined in device header file; using default!"
#endif
#ifndef __NVIC_PRIO_BITS
#define __NVIC_PRIO_BITS 3U
#warning "__NVIC_PRIO_BITS not defined in device header file; using default!"
@ -623,22 +623,22 @@ typedef struct
#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */
/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */
#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */
#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */
#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */
#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */
#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */
#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */
#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */
#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */
#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */
#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */
#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */
#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */
#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */
#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */
#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */
#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */
#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */
#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */
/* BusFault Status Register (part of SCB Configurable Fault Status Register) */

View File

@ -1,11 +1,11 @@
/**************************************************************************//**
* @file core_cm55.h
* @brief CMSIS Cortex-M55 Core Peripheral Access Layer Header File
* @version V1.0.0
* @date 27. March 2020
* @version V1.2.1
* @date 04. June 2021
******************************************************************************/
/*
* Copyright (c) 2018-2020 Arm Limited. All rights reserved.
* Copyright (c) 2018-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -210,7 +210,7 @@
#define __FPU_PRESENT 0U
#warning "__FPU_PRESENT not defined in device header file; using default!"
#endif
#if __FPU_PRESENT != 0U
#ifndef __FPU_DP
#define __FPU_DP 0U
@ -232,12 +232,12 @@
#define __DCACHE_PRESENT 0U
#warning "__DCACHE_PRESENT not defined in device header file; using default!"
#endif
#ifndef __VTOR_PRESENT
#define __VTOR_PRESENT 1U
#warning "__VTOR_PRESENT not defined in device header file; using default!"
#endif
#ifndef __PMU_PRESENT
#define __PMU_PRESENT 0U
#warning "__PMU_PRESENT not defined in device header file; using default!"
@ -766,22 +766,22 @@ typedef struct
#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */
/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */
#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */
#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */
#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */
#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */
#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */
#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */
#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */
#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */
#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */
#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */
#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */
#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */
#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */
#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */
#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */
#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */
#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */
#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */
/* BusFault Status Register (part of SCB Configurable Fault Status Register) */
@ -1349,6 +1349,40 @@ typedef struct
/*@}*/ /* end of group CMSIS_DWT */
/**
\ingroup CMSIS_core_register
\defgroup PwrModCtl_Type Power Mode Control Registers
\brief Type definitions for the Power Mode Control Registers (PWRMODCTL)
@{
*/
/**
\brief Structure type to access the Power Mode Control Registers (PWRMODCTL).
*/
typedef struct
{
__IOM uint32_t CPDLPSTATE;
__IOM uint32_t DPDLPSTATE;
} PwrModCtl_Type;
/* PWRMODCTL Core Power Domain Low Power State (CPDLPSTATE) Register Definitions */
#define PWRMODCTL_CPDLPSTATE_CLPSTATE_Pos 0U /*!< PWRMODCTL CPDLPSTATE CLPSTATE Position */
#define PWRMODCTL_CPDLPSTATE_CLPSTATE_Msk 3UL /*!< PWRMODCTL CPDLPSTATE CLPSTATE Mask */
#define PWRMODCTL_CPDLPSTATE_ELPSTATE_Pos 4U /*!< PWRMODCTL CPDLPSTATE ELPSTATE Position */
#define PWRMODCTL_CPDLPSTATE_ELPSTATE_Msk 3UL /*!< PWRMODCTL CPDLPSTATE ELPSTATE Mask */
#define PWRMODCTL_CPDLPSTATE_RLPSTATE_Pos 8U /*!< PWRMODCTL CPDLPSTATE RLPSTATE Position */
#define PWRMODCTL_CPDLPSTATE_RLPSTATE_Msk 3UL /*!< PWRMODCTL CPDLPSTATE RLPSTATE Mask */
/* PWRMODCTL Debug Power Domain Low Power State (DPDLPSTATE) Register Definitions */
#define PWRMODCTL_DPDLPSTATE_DLPSTATE_Pos 0U /*!< PWRMODCTL DPDLPSTATE DLPSTATE Position */
#define PWRMODCTL_DPDLPSTATE_DLPSTATE_Msk 3UL /*!< PWRMODCTL DPDLPSTATE DLPSTATE Mask */
/*@}*/ /* end of group CMSIS_PWRMODCTL */
/**
\ingroup CMSIS_core_register
\defgroup CMSIS_TPI Trace Port Interface (TPI)
@ -1508,12 +1542,12 @@ typedef struct
/** \brief PMU Event Counter Registers (0-30) Definitions */
#define PMU_EVCNTR_CNT_Pos 0U /*!< PMU EVCNTR: Counter Position */
#define PMU_EVCNTR_CNT_Msk (16UL /*<< PMU_EVCNTRx_CNT_Pos*/) /*!< PMU EVCNTR: Counter Mask */
#define PMU_EVCNTR_CNT_Msk (0xFFFFUL /*<< PMU_EVCNTRx_CNT_Pos*/) /*!< PMU EVCNTR: Counter Mask */
/** \brief PMU Event Type and Filter Registers (0-30) Definitions */
#define PMU_EVTYPER_EVENTTOCNT_Pos 0U /*!< PMU EVTYPER: Event to Count Position */
#define PMU_EVTYPER_EVENTTOCNT_Msk (16UL /*<< EVTYPERx_EVENTTOCNT_Pos*/) /*!< PMU EVTYPER: Event to Count Mask */
#define PMU_EVTYPER_EVENTTOCNT_Msk (0xFFFFUL /*<< EVTYPERx_EVENTTOCNT_Pos*/) /*!< PMU EVTYPER: Event to Count Mask */
/** \brief PMU Count Enable Set Register Definitions */
@ -2221,10 +2255,10 @@ typedef struct
/** \brief PMU Type Register Definitions */
#define PMU_TYPE_NUM_CNTS_Pos 0U /*!< PMU TYPE: Number of Counters Position */
#define PMU_TYPE_NUM_CNTS_Msk (8UL /*<< PMU_TYPE_NUM_CNTS_Pos*/) /*!< PMU TYPE: Number of Counters Mask */
#define PMU_TYPE_NUM_CNTS_Msk (0xFFUL /*<< PMU_TYPE_NUM_CNTS_Pos*/) /*!< PMU TYPE: Number of Counters Mask */
#define PMU_TYPE_SIZE_CNTS_Pos 8U /*!< PMU TYPE: Size of Counters Position */
#define PMU_TYPE_SIZE_CNTS_Msk (6UL << PMU_TYPE_SIZE_CNTS_Pos) /*!< PMU TYPE: Size of Counters Mask */
#define PMU_TYPE_SIZE_CNTS_Msk (0x3FUL << PMU_TYPE_SIZE_CNTS_Pos) /*!< PMU TYPE: Size of Counters Mask */
#define PMU_TYPE_CYCCNT_PRESENT_Pos 14U /*!< PMU TYPE: Cycle Counter Present Position */
#define PMU_TYPE_CYCCNT_PRESENT_Msk (1UL << PMU_TYPE_CYCCNT_PRESENT_Pos) /*!< PMU TYPE: Cycle Counter Present Mask */
@ -2235,6 +2269,33 @@ typedef struct
#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Pos 23U /*!< PMU TYPE: Trace-on-overflow Support Position */
#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Trace-on-overflow Support Mask */
/** \brief PMU Authentication Status Register Definitions */
#define PMU_AUTHSTATUS_NSID_Pos 0U /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Position */
#define PMU_AUTHSTATUS_NSID_Msk (0x3UL /*<< PMU_AUTHSTATUS_NSID_Pos*/) /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Mask */
#define PMU_AUTHSTATUS_NSNID_Pos 2U /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Position */
#define PMU_AUTHSTATUS_NSNID_Msk (0x3UL << PMU_AUTHSTATUS_NSNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Mask */
#define PMU_AUTHSTATUS_SID_Pos 4U /*!< PMU AUTHSTATUS: Secure Invasive Debug Position */
#define PMU_AUTHSTATUS_SID_Msk (0x3UL << PMU_AUTHSTATUS_SID_Pos) /*!< PMU AUTHSTATUS: Secure Invasive Debug Mask */
#define PMU_AUTHSTATUS_SNID_Pos 6U /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Position */
#define PMU_AUTHSTATUS_SNID_Msk (0x3UL << PMU_AUTHSTATUS_SNID_Pos) /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Mask */
#define PMU_AUTHSTATUS_NSUID_Pos 16U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Position */
#define PMU_AUTHSTATUS_NSUID_Msk (0x3UL << PMU_AUTHSTATUS_NSUID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Mask */
#define PMU_AUTHSTATUS_NSUNID_Pos 18U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Position */
#define PMU_AUTHSTATUS_NSUNID_Msk (0x3UL << PMU_AUTHSTATUS_NSUNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Mask */
#define PMU_AUTHSTATUS_SUID_Pos 20U /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Position */
#define PMU_AUTHSTATUS_SUID_Msk (0x3UL << PMU_AUTHSTATUS_SUID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Mask */
#define PMU_AUTHSTATUS_SUNID_Pos 22U /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Position */
#define PMU_AUTHSTATUS_SUNID_Msk (0x3UL << PMU_AUTHSTATUS_SUNID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Mask */
/*@} end of group CMSIS_PMU */
#endif
@ -3066,6 +3127,7 @@ typedef struct
#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */
#define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */
#define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */
#define PWRMODCTL_BASE (0xE001E300UL) /*!< Power Mode Control Base Address */
#define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */
#define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */
#define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */
@ -3081,6 +3143,7 @@ typedef struct
#define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */
#define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */
#define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */
#define PWRMODCTL ((PwrModCtl_Type *) PWRMODCTL_BASE ) /*!< Power Mode Control configuration struct */
#define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */
#define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */
#define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */

View File

@ -1,11 +1,11 @@
/**************************************************************************//**
* @file core_cm7.h
* @brief CMSIS Cortex-M7 Core Peripheral Access Layer Header File
* @version V5.1.2
* @date 27. March 2020
* @version V5.1.6
* @date 04. June 2021
******************************************************************************/
/*
* Copyright (c) 2009-2020 Arm Limited. All rights reserved.
* Copyright (c) 2009-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -213,7 +213,7 @@
#define __VTOR_PRESENT 1U
#warning "__VTOR_PRESENT not defined in device header file; using default!"
#endif
#ifndef __NVIC_PRIO_BITS
#define __NVIC_PRIO_BITS 3U
#warning "__NVIC_PRIO_BITS not defined in device header file; using default!"
@ -501,7 +501,8 @@ typedef struct
__OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */
__OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */
__OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */
uint32_t RESERVED7[6U];
__OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */
uint32_t RESERVED7[5U];
__IOM uint32_t ITCMCR; /*!< Offset: 0x290 (R/W) Instruction Tightly-Coupled Memory Control Register */
__IOM uint32_t DTCMCR; /*!< Offset: 0x294 (R/W) Data Tightly-Coupled Memory Control Registers */
__IOM uint32_t AHBPCR; /*!< Offset: 0x298 (R/W) AHBP Control Register */
@ -676,22 +677,22 @@ typedef struct
#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */
/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */
#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */
#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */
#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */
#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */
#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */
#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */
#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */
#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */
#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */
#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */
#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */
#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */
#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */
#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */
#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */
#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */
#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */
#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */
/* BusFault Status Register (part of SCB Configurable Fault Status Register) */
@ -875,21 +876,24 @@ typedef struct
#define SCB_CACR_FORCEWT_Pos 2U /*!< SCB CACR: FORCEWT Position */
#define SCB_CACR_FORCEWT_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: FORCEWT Mask */
#define SCB_CACR_ECCEN_Pos 1U /*!< SCB CACR: ECCEN Position */
#define SCB_CACR_ECCEN_Msk (1UL << SCB_CACR_ECCEN_Pos) /*!< SCB CACR: ECCEN Mask */
#define SCB_CACR_ECCEN_Pos 1U /*!< \deprecated SCB CACR: ECCEN Position */
#define SCB_CACR_ECCEN_Msk (1UL << SCB_CACR_ECCEN_Pos) /*!< \deprecated SCB CACR: ECCEN Mask */
#define SCB_CACR_ECCDIS_Pos 1U /*!< SCB CACR: ECCDIS Position */
#define SCB_CACR_ECCDIS_Msk (1UL << SCB_CACR_ECCDIS_Pos) /*!< SCB CACR: ECCDIS Mask */
#define SCB_CACR_SIWT_Pos 0U /*!< SCB CACR: SIWT Position */
#define SCB_CACR_SIWT_Msk (1UL /*<< SCB_CACR_SIWT_Pos*/) /*!< SCB CACR: SIWT Mask */
/* AHBS Control Register Definitions */
#define SCB_AHBSCR_INITCOUNT_Pos 11U /*!< SCB AHBSCR: INITCOUNT Position */
#define SCB_AHBSCR_INITCOUNT_Msk (0x1FUL << SCB_AHBPCR_INITCOUNT_Pos) /*!< SCB AHBSCR: INITCOUNT Mask */
#define SCB_AHBSCR_INITCOUNT_Msk (0x1FUL << SCB_AHBSCR_INITCOUNT_Pos) /*!< SCB AHBSCR: INITCOUNT Mask */
#define SCB_AHBSCR_TPRI_Pos 2U /*!< SCB AHBSCR: TPRI Position */
#define SCB_AHBSCR_TPRI_Msk (0x1FFUL << SCB_AHBPCR_TPRI_Pos) /*!< SCB AHBSCR: TPRI Mask */
#define SCB_AHBSCR_TPRI_Msk (0x1FFUL << SCB_AHBSCR_TPRI_Pos) /*!< SCB AHBSCR: TPRI Mask */
#define SCB_AHBSCR_CTL_Pos 0U /*!< SCB AHBSCR: CTL Position*/
#define SCB_AHBSCR_CTL_Msk (3UL /*<< SCB_AHBPCR_CTL_Pos*/) /*!< SCB AHBSCR: CTL Mask */
#define SCB_AHBSCR_CTL_Msk (3UL /*<< SCB_AHBSCR_CTL_Pos*/) /*!< SCB AHBSCR: CTL Mask */
/* Auxiliary Bus Fault Status Register Definitions */
#define SCB_ABFSR_AXIMTYPE_Pos 8U /*!< SCB ABFSR: AXIMTYPE Position*/

View File

@ -1,11 +1,11 @@
/**************************************************************************//**
* @file core_sc300.h
* @brief CMSIS SC300 Core Peripheral Access Layer Header File
* @version V5.0.9
* @date 27. March 2020
* @version V5.0.10
* @date 04. June 2021
******************************************************************************/
/*
* Copyright (c) 2009-2020 Arm Limited. All rights reserved.
* Copyright (c) 2009-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -562,19 +562,19 @@ typedef struct
#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */
/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */
#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */
#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */
#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */
#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */
#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */
#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */
#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */
#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */
#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */
#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */
#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */
#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */
#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */
#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */
#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */
/* BusFault Status Register (part of SCB Configurable Fault Status Register) */

View File

@ -1,8 +1,8 @@
/******************************************************************************
* @file mpu_armv7.h
* @brief CMSIS MPU API for Armv7-M MPU
* @version V5.1.1
* @date 10. February 2020
* @version V5.1.2
* @date 25. May 2020
******************************************************************************/
/*
* Copyright (c) 2017-2020 Arm Limited. All rights reserved.
@ -223,7 +223,7 @@ __STATIC_INLINE void ARM_MPU_ClrRegion(uint32_t rnr)
/** Configure an MPU region.
* \param rbar Value for RBAR register.
* \param rsar Value for RSAR register.
* \param rasr Value for RASR register.
*/
__STATIC_INLINE void ARM_MPU_SetRegion(uint32_t rbar, uint32_t rasr)
{
@ -234,7 +234,7 @@ __STATIC_INLINE void ARM_MPU_SetRegion(uint32_t rbar, uint32_t rasr)
/** Configure the given MPU region.
* \param rnr Region number to be configured.
* \param rbar Value for RBAR register.
* \param rsar Value for RSAR register.
* \param rasr Value for RASR register.
*/
__STATIC_INLINE void ARM_MPU_SetRegionEx(uint32_t rnr, uint32_t rbar, uint32_t rasr)
{
@ -243,7 +243,7 @@ __STATIC_INLINE void ARM_MPU_SetRegionEx(uint32_t rnr, uint32_t rbar, uint32_t r
MPU->RASR = rasr;
}
/** Memcopy with strictly ordered memory access, e.g. for register targets.
/** Memcpy with strictly ordered memory access, e.g. used by code in ARM_MPU_Load().
* \param dst Destination data is copied to.
* \param src Source data is copied from.
* \param len Amount of data words to be copied.

View File

@ -1,11 +1,11 @@
/******************************************************************************
* @file mpu_armv8.h
* @brief CMSIS MPU API for Armv8-M and Armv8.1-M MPU
* @version V5.1.2
* @date 10. February 2020
* @version V5.1.3
* @date 03. February 2021
******************************************************************************/
/*
* Copyright (c) 2017-2020 Arm Limited. All rights reserved.
* Copyright (c) 2017-2021 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -281,7 +281,7 @@ __STATIC_INLINE void ARM_MPU_SetRegion_NS(uint32_t rnr, uint32_t rbar, uint32_t
}
#endif
/** Memcopy with strictly ordered memory access, e.g. for register targets.
/** Memcpy with strictly ordered memory access, e.g. used by code in ARM_MPU_LoadEx()
* \param dst Destination data is copied to.
* \param src Source data is copied from.
* \param len Amount of data words to be copied.

View File

@ -1,8 +1,8 @@
/******************************************************************************
* @file pmu_armv8.h
* @brief CMSIS PMU API for Armv8.1-M PMU
* @version V1.0.0
* @date 24. March 2020
* @version V1.0.1
* @date 15. April 2020
******************************************************************************/
/*
* Copyright (c) 2020 Arm Limited. All rights reserved.
@ -274,7 +274,7 @@ __STATIC_INLINE uint32_t ARM_PMU_Get_CCNTR(void)
*/
__STATIC_INLINE uint32_t ARM_PMU_Get_EVCNTR(uint32_t num)
{
return PMU->EVCNTR[num];
return PMU_EVCNTR_CNT_Msk & PMU->EVCNTR[num];
}
/**

View File

@ -5,7 +5,7 @@
* @date 10. January 2018
******************************************************************************/
/*
* Copyright (c) 2016-2020 Arm Limited. All rights reserved.
* Copyright (c) 2016-2018 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
@ -22,8 +22,6 @@
* limitations under the License.
*/
#if !FEATURE_TFM
#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U)
#include "RTE_Components.h"
@ -203,5 +201,3 @@ uint32_t TZ_StoreContext_S (TZ_MemoryId_t id) {
return 1U; // Success
}
#endif
#endif // !FEATURE_TFM

View File

@ -14,6 +14,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if defined(__ARMCC_VERSION)
#include <arm_compat.h>
#endif
#include "platform/LocalFileSystem.h"
#if DEVICE_LOCALFILESYSTEM

View File

@ -15,6 +15,11 @@
* limitations under the License.
*/
// Workaround for CMSIS 5.8.0, compat header must be placed before any CMSIS header inclusion
#if defined(__ARMCC_VERSION)
# include <arm_compat.h>
#endif
#include <mstd_mutex>
#include <time.h>
#include "platform/platform.h"
@ -53,7 +58,6 @@ struct DIR_impl {
};
#if defined(__ARMCC_VERSION)
# include <arm_compat.h>
# include <rt_sys.h>
# include <rt_misc.h>
# include <stdint.h>

View File

@ -14,6 +14,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if defined(__ARMCC_VERSION)
#include <arm_compat.h>
#endif
#include "cmsis.h"
#include "platform/mbed_semihost_api.h"

View File

@ -221,9 +221,12 @@ void ThisThread::sleep_for(uint32_t millisec)
void ThisThread::sleep_for(Clock::duration_u32 rel_time)
{
#if MBED_CONF_RTOS_PRESENT
osStatus_t status = osDelay(rel_time.count());
MBED_ASSERT(status == osOK);
(void) status;
uint32_t delay = rel_time.count();
if (delay != 0) {
osStatus_t status = osDelay(delay);
MBED_ASSERT(status == osOK);
(void) status;
}
#else
thread_sleep_for(rel_time.count());
#endif

View File

@ -37,7 +37,7 @@
#define IRQ_MODE_CHECK(is_func_void) \
/* Secure service can't be called in interrupt context. */ \
if (IsIrqMode()) { \
if (IsException()) { \
MBED_WARNING(MBED_MAKE_ERROR(MBED_MODULE_HAL, \
MBED_ERROR_INVALID_OPERATION), \
"GPIO secure service can't be called in interrupt context\n"); \

View File

@ -81,7 +81,7 @@ void pin_function(PinName pin, int function)
MBED_ASSERT(pin != NC);
/* Secure service can't be called in interrupt context. */
if (IsIrqMode()) {
if (IsException()) {
MBED_WARNING(MBED_MAKE_ERROR(MBED_MODULE_HAL,
MBED_ERROR_INVALID_OPERATION),
"Pin secure service can't be called in interrupt context\n");
@ -127,7 +127,7 @@ void pin_mode(PinName pin, PinMode mode)
MBED_ASSERT(pin != NC);
/* Secure service can't be called in interrupt context. */
if (IsIrqMode()) {
if (IsException()) {
MBED_WARNING(MBED_MAKE_ERROR(MBED_MODULE_HAL,
MBED_ERROR_INVALID_OPERATION),
"Pin secure service can't be called in interrupt context\n");

View File

@ -37,7 +37,7 @@
#define IRQ_MODE_CHECK(is_func_void) \
/* Secure service can't be called in interrupt context. */ \
if (IsIrqMode()) { \
if (IsException()) { \
MBED_WARNING(MBED_MAKE_ERROR(MBED_MODULE_HAL, \
MBED_ERROR_INVALID_OPERATION), \
"GPIO secure service can't be called in interrupt context\n"); \

View File

@ -21,6 +21,9 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#if defined(__ARMCC_VERSION)
#include <arm_compat.h>
#endif
#include "cy_device.h"

View File

@ -17,11 +17,11 @@
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Config/RTX_Config.c"
},
{
"src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_cm0.s",
"src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_armv6m.s",
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M0/irq_cm0.S"
},
{
"src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_cm0.s",
"src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_armv6m.s",
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M0P/irq_cm0.S"
},
{
@ -29,7 +29,7 @@
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M23/irq_armv8mbl.S"
},
{
"src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_cm3.s",
"src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_armv7m.s",
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M3/irq_cm3.S"
},
{
@ -37,19 +37,19 @@
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M33/irq_armv8mml.S"
},
{
"src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_cm4f.s",
"src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_armv7m.s",
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_RTOS_M4_M7/irq_cm4f.S"
},
{
"src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_ca.s",
"src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_armv7a.s",
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_CORTEX_A/irq_ca.S"
},
{
"src_file" : "CMSIS/RTOS2/RTX/Source/GCC/irq_cm0.S",
"src_file" : "CMSIS/RTOS2/RTX/Source/GCC/irq_armv6m.S",
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M0/irq_cm0.S"
},
{
"src_file" : "CMSIS/RTOS2/RTX/Source/GCC/irq_cm0.S",
"src_file" : "CMSIS/RTOS2/RTX/Source/GCC/irq_armv6m.S",
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M0P/irq_cm0.S"
},
{
@ -57,7 +57,7 @@
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M23/irq_armv8mbl.S"
},
{
"src_file" : "CMSIS/RTOS2/RTX/Source/GCC/irq_cm3.S",
"src_file" : "CMSIS/RTOS2/RTX/Source/GCC/irq_armv7m.S",
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M3/irq_cm3.S"
},
{
@ -65,35 +65,35 @@
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M33/irq_armv8mml.S"
},
{
"src_file" : "CMSIS/RTOS2/RTX/Source/GCC/irq_cm4f.S",
"src_file" : "CMSIS/RTOS2/RTX/Source/GCC/irq_armv7m.S",
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_RTOS_M4_M7/irq_cm4f.S"
},
{
"src_file" : "CMSIS/RTOS2/RTX/Source/GCC/irq_ca.S",
"src_file" : "CMSIS/RTOS2/RTX/Source/GCC/irq_armv7a.S",
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_CORTEX_A/irq_ca.S"
},
{
"src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_cm0.s",
"src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_armv6m.s",
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M0/irq_cm0.S"
},
{
"src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_cm0.s",
"src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_armv6m.s",
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M0P/irq_cm0.S"
},
{
"src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_armv8mbl_common.s",
"src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_armv8mbl.s",
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M23/irq_armv8mbl_common.S"
},
{
"src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_cm3.s",
"src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_armv7m.s",
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M3/irq_cm3.S"
},
{
"src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_armv8mml_common.s",
"src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_armv8mml.s",
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M33/irq_armv8mml_common.S"
},
{
"src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_cm4f.s",
"src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_armv7m.s",
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_RTOS_M4_M7/irq_cm4f.S"
},
{
@ -101,7 +101,7 @@
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/Source/os_systick.c"
},
{
"src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_ca.s",
"src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_armv7a.s",
"dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_CORTEX_A/irq_ca.S"
},
{
@ -146,12 +146,9 @@
"commit_sha" : [
"4360b7bbf815c4d812005938c9c27af199803a97",
"fb354752eb69403ad503c8e53da67da6483776d6",
"d3f7abdb7c109517e6a71daed8bae63ad6436afc",
"08ab8cc47d8722bf0c767990cd615cf1c427d006",
"dd21ea0ae0559f148d3ff5b1a1937f9d7e0e1138",
"9549fff786475bdcd6ab1d8ac8db1c8618c19f6f",
"96e0689204d375e23bf69d7787a18ba07182f085",
"7149ffed11c0ef6a16f8808f12b7aca16921a66a"
"00580ce3f5d64b76342b7f26deed55e842056ea0",
"c122158d496d59fbd723bdb17bbc7bd5cc95245e"
]
}