From 73a957997fc2def1c7fa538ce972ae808b6da7d1 Mon Sep 17 00:00:00 2001 From: Jaeden Amero Date: Thu, 25 May 2017 09:48:04 +0100 Subject: [PATCH] RTX5: uVisor: Switch threads very carefully uVisor doesn't set the PSP of the target thread. The RTOS sets the PSP of the target thread from the target thread's TCB. However, when interrupts of higher priority than PendSV happen between the call to uVisor to switch boxes, and the RTOS setting PSP, the uVisor vIRQ interrupt handler will attempt to use an invalid PSP (the PSP from before the box and thread switch). This leads to a crash. Make box and thread switching atomic by disabling interrupts immediately before the box switching until immediately after the new PSP is set. --- .../RTX/Source/TOOLCHAIN_GCC/TARGET_M3/irq_cm3.S | 15 +++++++++++++++ .../TOOLCHAIN_GCC/TARGET_RTOS_M4_M7/irq_cm4f.S | 15 +++++++++++++++ rtos/TARGET_CORTEX/rtx5/RTX/Source/rtx_thread.c | 15 +++++++++++++-- 3 files changed, 43 insertions(+), 2 deletions(-) diff --git a/rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_GCC/TARGET_M3/irq_cm3.S b/rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_GCC/TARGET_M3/irq_cm3.S index 95c5c156cf..6367f59819 100644 --- a/rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_GCC/TARGET_M3/irq_cm3.S +++ b/rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_GCC/TARGET_M3/irq_cm3.S @@ -77,12 +77,27 @@ SVC_ContextSave: STR R12,[R1,#TCB_SP_OFS] // Store SP SVC_ContextSwitch: +#ifdef FEATURE_UVISOR + CPSID I // The call to the thread switch helper and PSP loading must be atomic. +#endif + /* The call to thread_switch_helper can clobber R2 and R3, but we don't + * want to clobber R2 or R3. We can't save R2 and R3 to the stack (as + * the stack we save them onto is likely to be inaccessible after the + * call to thread_switch_helper). So, we just re-obtain the values from + * osRtxInfo again. */ + BL thread_switch_helper + LDR R3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run + LDM R3,{R1,R2} // Load osRtxInfo.thread.run: curr & next + STR R2,[R3] // osRtxInfo.thread.run: curr = next SVC_ContextRestore: LDR R0,[R2,#TCB_SP_OFS] // Load SP LDMIA R0!,{R4-R11} // Restore R4..R11 MSR PSP,R0 // Set PSP +#ifdef FEATURE_UVISOR + CPSIE I // The PSP has been set. Re-enable interrupts. +#endif MVN LR,#~0xFFFFFFFD // Set EXC_RETURN value SVC_Exit: diff --git a/rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_GCC/TARGET_RTOS_M4_M7/irq_cm4f.S b/rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_GCC/TARGET_RTOS_M4_M7/irq_cm4f.S index cd59935afb..118f9eaa1e 100644 --- a/rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_GCC/TARGET_RTOS_M4_M7/irq_cm4f.S +++ b/rtos/TARGET_CORTEX/rtx5/RTX/Source/TOOLCHAIN_GCC/TARGET_RTOS_M4_M7/irq_cm4f.S @@ -94,6 +94,18 @@ SVC_ContextSave: STRB LR, [R1,#TCB_SF_OFS] // Store stack frame information SVC_ContextSwitch: +#ifdef FEATURE_UVISOR + CPSID I // The call to the thread switch helper and PSP loading must be atomic. +#endif + /* The call to thread_switch_helper can clobber R2 and R3, but we don't + * want to clobber R2 or R3. We can't save R2 and R3 to the stack (as + * the stack we save them onto is likely to be inaccessible after the + * call to thread_switch_helper). So, we just re-obtain the values from + * osRtxInfo again. */ + BL thread_switch_helper + LDR R3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run + LDM R3,{R1,R2} // Load osRtxInfo.thread.run: curr & next + STR R2,[R3] // osRtxInfo.thread.run: curr = next SVC_ContextRestore: @@ -108,6 +120,9 @@ SVC_ContextRestore: #endif LDMIA R0!,{R4-R11} // Restore R4..R11 MSR PSP,R0 // Set PSP +#ifdef FEATURE_UVISOR + CPSIE I // The PSP has been set. Re-enable interrupts. +#endif SVC_Exit: BX LR // Exit from handler diff --git a/rtos/TARGET_CORTEX/rtx5/RTX/Source/rtx_thread.c b/rtos/TARGET_CORTEX/rtx5/RTX/Source/rtx_thread.c index 2c4c4c3999..47e1c88774 100644 --- a/rtos/TARGET_CORTEX/rtx5/RTX/Source/rtx_thread.c +++ b/rtos/TARGET_CORTEX/rtx5/RTX/Source/rtx_thread.c @@ -166,7 +166,7 @@ void osRtxThreadListPut (os_object_t *object, os_thread_t *thread) { /// Get a Thread with Highest Priority from specified Object list and remove it. /// \param[in] object generic object. -/// \return thread object. +/// \return thread object. os_thread_t *osRtxThreadListGet (os_object_t *object) { os_thread_t *thread; @@ -426,6 +426,17 @@ void osRtxThreadSwitch (os_thread_t *thread) { osRtxInfo.thread.run.next = thread; osRtxThreadStackCheck(); EvrRtxThreadSwitched(thread); + + if (osEventObs && osEventObs->thread_switch) { + osEventObs->thread_switch(thread->context); + } +} + +/// Notify the OS event observer of an imminent thread switch. +void thread_switch_helper(void) { + if (osEventObs && osEventObs->thread_switch) { + osEventObs->thread_switch(osRtxInfo.thread.run.next->context); + } } /// Dispatch specified Thread or Ready Thread with Highest Priority. @@ -804,7 +815,7 @@ static osThreadId_t svcRtxThreadNew (osThreadFunc_t func, void *argument, const } else { EvrRtxThreadError(NULL, (int32_t)osErrorNoMemory); } - + if (thread != NULL) { osRtxThreadDispatch(thread); }