From 794128653191df929d2a50f1c32d18ac0bea76e8 Mon Sep 17 00:00:00 2001 From: Martin Kojtal Date: Fri, 9 Jul 2021 10:50:42 +0100 Subject: [PATCH 01/16] [CMSIS_5]: Updated to 13b9f72f2 --- cmsis/CMSIS_5/CMSIS/RTOS2/Include/cmsis_os2.h | 8 +- cmsis/CMSIS_5/CMSIS/RTOS2/Include/os_tick.h | 15 +- .../CMSIS/RTOS2/RTX/Config/RTX_Config.c | 6 +- .../CMSIS/RTOS2/RTX/Config/RTX_Config.h | 102 +- .../CMSIS_5/CMSIS/RTOS2/RTX/Include/rtx_def.h | 34 + .../CMSIS_5/CMSIS/RTOS2/RTX/Include/rtx_evr.h | 15 +- .../CMSIS_5/CMSIS/RTOS2/RTX/Include/rtx_os.h | 19 +- .../CMSIS/RTOS2/RTX/Include1/cmsis_os.h | 17 +- .../TOOLCHAIN_ARM/TARGET_CORTEX_A/irq_ca.S | 18 +- .../Source/TOOLCHAIN_ARM/TARGET_M0/irq_cm0.S | 67 +- .../Source/TOOLCHAIN_ARM/TARGET_M0P/irq_cm0.S | 67 +- .../TOOLCHAIN_ARM/TARGET_M23/irq_armv8mbl.S | 267 +-- .../Source/TOOLCHAIN_ARM/TARGET_M3/irq_cm3.S | 123 +- .../TOOLCHAIN_ARM/TARGET_M33/irq_armv8mml.S | 292 ++- .../TARGET_RTOS_M4_M7/irq_cm4f.S | 140 +- .../TOOLCHAIN_GCC/TARGET_CORTEX_A/irq_ca.S | 534 ++--- .../Source/TOOLCHAIN_GCC/TARGET_M0/irq_cm0.S | 185 +- .../Source/TOOLCHAIN_GCC/TARGET_M0P/irq_cm0.S | 185 +- .../TOOLCHAIN_GCC/TARGET_M23/irq_armv8mbl.S | 392 ++-- .../Source/TOOLCHAIN_GCC/TARGET_M3/irq_cm3.S | 186 +- .../TOOLCHAIN_GCC/TARGET_M33/irq_armv8mml.S | 357 ++-- .../TARGET_RTOS_M4_M7/irq_cm4f.S | 208 +- .../TOOLCHAIN_IAR/TARGET_CORTEX_A/irq_ca.S | 21 +- .../Source/TOOLCHAIN_IAR/TARGET_M0/irq_cm0.S | 57 +- .../Source/TOOLCHAIN_IAR/TARGET_M0P/irq_cm0.S | 57 +- .../TARGET_M23/irq_armv8mbl_common.S | 238 +-- .../Source/TOOLCHAIN_IAR/TARGET_M3/irq_cm3.S | 113 +- .../TARGET_M33/irq_armv8mml_common.S | 256 ++- .../TARGET_RTOS_M4_M7/irq_cm4f.S | 130 +- .../CMSIS/RTOS2/RTX/Source/rtx_core_c.h | 4 +- .../CMSIS/RTOS2/RTX/Source/rtx_core_ca.h | 10 +- .../CMSIS/RTOS2/RTX/Source/rtx_core_cm.h | 80 +- .../CMSIS/RTOS2/RTX/Source/rtx_delay.c | 34 +- .../CMSIS/RTOS2/RTX/Source/rtx_evflags.c | 24 +- .../CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_evr.c | 14 +- .../CMSIS/RTOS2/RTX/Source/rtx_kernel.c | 143 +- .../CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_lib.c | 231 +- .../CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_lib.h | 14 +- .../CMSIS/RTOS2/RTX/Source/rtx_mempool.c | 44 +- .../CMSIS/RTOS2/RTX/Source/rtx_msgqueue.c | 79 +- .../CMSIS/RTOS2/RTX/Source/rtx_mutex.c | 76 +- .../CMSIS/RTOS2/RTX/Source/rtx_semaphore.c | 20 +- .../CMSIS/RTOS2/RTX/Source/rtx_system.c | 54 +- .../CMSIS/RTOS2/RTX/Source/rtx_thread.c | 218 +- .../CMSIS/RTOS2/RTX/Source/rtx_timer.c | 71 +- cmsis/CMSIS_5/CMSIS/RTOS2/Source/os_systick.c | 10 +- .../TARGET_CORTEX_A/Include/cmsis_armcc.h | 34 +- .../TARGET_CORTEX_A/Include/cmsis_armclang.h | 60 +- .../CMSIS/TARGET_CORTEX_A/Include/cmsis_gcc.h | 56 +- .../TARGET_CORTEX_A/Source/irq_ctrl_gic.c | 8 +- .../TARGET_CORTEX_M/Include/cachel1_armv7.h | 28 +- .../TARGET_CORTEX_M/Include/cmsis_armcc.h | 803 +++---- .../TARGET_CORTEX_M/Include/cmsis_armclang.h | 1656 ++++++++------- .../Include/cmsis_armclang_ltm.h | 1615 +++++++------- .../CMSIS/TARGET_CORTEX_M/Include/cmsis_gcc.h | 1868 +++++++++-------- .../TARGET_CORTEX_M/Include/cmsis_iccarm.h | 46 +- .../TARGET_CORTEX_M/Include/core_armv81mml.h | 60 +- .../TARGET_CORTEX_M/Include/core_armv8mml.h | 25 +- .../CMSIS/TARGET_CORTEX_M/Include/core_cm3.h | 18 +- .../CMSIS/TARGET_CORTEX_M/Include/core_cm33.h | 25 +- .../TARGET_CORTEX_M/Include/core_cm35p.h | 27 +- .../CMSIS/TARGET_CORTEX_M/Include/core_cm4.h | 18 +- .../CMSIS/TARGET_CORTEX_M/Include/core_cm55.h | 95 +- .../CMSIS/TARGET_CORTEX_M/Include/core_cm7.h | 36 +- .../TARGET_CORTEX_M/Include/core_sc300.h | 16 +- .../CMSIS/TARGET_CORTEX_M/Include/mpu_armv7.h | 10 +- .../CMSIS/TARGET_CORTEX_M/Include/mpu_armv8.h | 8 +- .../CMSIS/TARGET_CORTEX_M/Include/pmu_armv8.h | 6 +- .../TARGET_CORTEX_M/Source/mbed_tz_context.c | 9 +- tools/importer/cmsis_importer.json | 34 +- 70 files changed, 6293 insertions(+), 5503 deletions(-) create mode 100644 cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include/rtx_def.h diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/Include/cmsis_os2.h b/cmsis/CMSIS_5/CMSIS/RTOS2/Include/cmsis_os2.h index e0b602c79a0..76612e29291 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/Include/cmsis_os2.h +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/Include/cmsis_os2.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2018 Arm Limited. All rights reserved. + * Copyright (c) 2013-2020 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -17,7 +17,7 @@ * * ---------------------------------------------------------------------- * - * $Date: 18. June 2018 + * $Date: 12. June 2020 * $Revision: V2.1.3 * * Project: CMSIS-RTOS2 API @@ -86,7 +86,7 @@ typedef enum { osKernelLocked = 3, ///< Locked. osKernelSuspended = 4, ///< Suspended. osKernelError = -1, ///< Error. - osKernelReserved = 0x7FFFFFFFU ///< Prevents enum down-size compiler optimization. + osKernelReserved = 0x7FFFFFFF ///< Prevents enum down-size compiler optimization. } osKernelState_t; /// Thread state. @@ -723,7 +723,7 @@ osStatus_t osMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr, uint8_t * /// \return maximum number of messages. uint32_t osMessageQueueGetCapacity (osMessageQueueId_t mq_id); -/// Get maximum message size in a Memory Pool. +/// Get maximum message size in a Message Queue. /// \param[in] mq_id message queue ID obtained by \ref osMessageQueueNew. /// \return maximum message size in bytes. uint32_t osMessageQueueGetMsgSize (osMessageQueueId_t mq_id); diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/Include/os_tick.h b/cmsis/CMSIS_5/CMSIS/RTOS2/Include/os_tick.h index 8f7cdf667b1..3cfd8954759 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/Include/os_tick.h +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/Include/os_tick.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file os_tick.h * @brief CMSIS OS Tick header file - * @version V1.0.1 - * @date 24. November 2017 + * @version V1.0.2 + * @date 19. March 2021 ******************************************************************************/ /* - * Copyright (c) 2017-2017 ARM Limited. All rights reserved. + * Copyright (c) 2017-2021 ARM Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -27,6 +27,11 @@ #include +#ifdef __cplusplus +extern "C" +{ +#endif + /// IRQ Handler. #ifndef IRQHANDLER_T #define IRQHANDLER_T @@ -68,4 +73,8 @@ uint32_t OS_Tick_GetCount (void); /// \return OS Tick overflow status (1 - overflow, 0 - no overflow). uint32_t OS_Tick_GetOverflow (void); +#ifdef __cplusplus +} +#endif + #endif /* OS_TICK_H */ diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Config/RTX_Config.c b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Config/RTX_Config.c index e4871014aa1..737078aae77 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Config/RTX_Config.c +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Config/RTX_Config.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2018 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -17,7 +17,7 @@ * * ----------------------------------------------------------------------------- * - * $Revision: V5.1.0 + * $Revision: V5.1.1 * * Project: CMSIS-RTOS RTX * Title: RTX Configuration @@ -40,7 +40,7 @@ __WEAK uint32_t osRtxErrorNotify (uint32_t code, void *object_id) { (void)object_id; switch (code) { - case osRtxErrorStackUnderflow: + case osRtxErrorStackOverflow: // Stack overflow detected for thread (thread_id=object_id) break; case osRtxErrorISRQueueOverflow: diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Config/RTX_Config.h b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Config/RTX_Config.h index 244ed17e34d..4d2f501b4b0 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Config/RTX_Config.h +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Config/RTX_Config.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2020 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -17,7 +17,7 @@ * * ----------------------------------------------------------------------------- * - * $Revision: V5.5.1 + * $Revision: V5.5.2 * * Project: CMSIS-RTOS RTX * Title: RTX Configuration definitions @@ -69,7 +69,7 @@ // -// ISR FIFO Queue +// ISR FIFO Queue // <4=> 4 entries <8=> 8 entries <12=> 12 entries <16=> 16 entries // <24=> 24 entries <32=> 32 entries <48=> 48 entries <64=> 64 entries // <96=> 96 entries <128=> 128 entries <196=> 196 entries <256=> 256 entries @@ -143,10 +143,10 @@ #endif // Stack overrun checking -// Enables stack overrun check at thread switch. +// Enables stack overrun check at thread switch (requires RTX source variant). // Enabling this option increases slightly the execution time of a thread switch. #ifndef OS_STACK_CHECK -#define OS_STACK_CHECK 1 +#define OS_STACK_CHECK 0 #endif // Stack usage watermark @@ -156,8 +156,8 @@ #define OS_STACK_WATERMARK 0 #endif -// Processor mode for Thread execution -// <0=> Unprivileged mode +// Processor mode for Thread execution +// <0=> Unprivileged mode // <1=> Privileged mode // Default: Privileged mode #ifndef OS_PRIVILEGE_MODE @@ -367,125 +367,125 @@ // Recording levels for RTX components. // Only applicable if events for the respective component are generated. -// Memory Management +// Memory Management // Recording level for Memory Management events. // Error events // API function call events // Operation events // Detailed operation events -// -#ifndef OS_EVR_MEMORY_LEVEL -#define OS_EVR_MEMORY_LEVEL 0x01U +// +#ifndef OS_EVR_MEMORY_LEVEL +#define OS_EVR_MEMORY_LEVEL 0x81U #endif -// Kernel +// Kernel // Recording level for Kernel events. // Error events // API function call events // Operation events // Detailed operation events -// -#ifndef OS_EVR_KERNEL_LEVEL -#define OS_EVR_KERNEL_LEVEL 0x01U +// +#ifndef OS_EVR_KERNEL_LEVEL +#define OS_EVR_KERNEL_LEVEL 0x81U #endif -// Thread +// Thread // Recording level for Thread events. // Error events // API function call events // Operation events // Detailed operation events -// -#ifndef OS_EVR_THREAD_LEVEL -#define OS_EVR_THREAD_LEVEL 0x05U +// +#ifndef OS_EVR_THREAD_LEVEL +#define OS_EVR_THREAD_LEVEL 0x85U #endif -// Generic Wait +// Generic Wait // Recording level for Generic Wait events. // Error events // API function call events // Operation events // Detailed operation events -// -#ifndef OS_EVR_WAIT_LEVEL -#define OS_EVR_WAIT_LEVEL 0x01U +// +#ifndef OS_EVR_WAIT_LEVEL +#define OS_EVR_WAIT_LEVEL 0x81U #endif -// Thread Flags +// Thread Flags // Recording level for Thread Flags events. // Error events // API function call events // Operation events // Detailed operation events -// -#ifndef OS_EVR_THFLAGS_LEVEL -#define OS_EVR_THFLAGS_LEVEL 0x01U +// +#ifndef OS_EVR_THFLAGS_LEVEL +#define OS_EVR_THFLAGS_LEVEL 0x81U #endif -// Event Flags +// Event Flags // Recording level for Event Flags events. // Error events // API function call events // Operation events // Detailed operation events -// -#ifndef OS_EVR_EVFLAGS_LEVEL -#define OS_EVR_EVFLAGS_LEVEL 0x01U +// +#ifndef OS_EVR_EVFLAGS_LEVEL +#define OS_EVR_EVFLAGS_LEVEL 0x81U #endif -// Timer +// Timer // Recording level for Timer events. // Error events // API function call events // Operation events // Detailed operation events -// -#ifndef OS_EVR_TIMER_LEVEL -#define OS_EVR_TIMER_LEVEL 0x01U +// +#ifndef OS_EVR_TIMER_LEVEL +#define OS_EVR_TIMER_LEVEL 0x81U #endif -// Mutex +// Mutex // Recording level for Mutex events. // Error events // API function call events // Operation events // Detailed operation events -// -#ifndef OS_EVR_MUTEX_LEVEL -#define OS_EVR_MUTEX_LEVEL 0x01U +// +#ifndef OS_EVR_MUTEX_LEVEL +#define OS_EVR_MUTEX_LEVEL 0x81U #endif -// Semaphore +// Semaphore // Recording level for Semaphore events. // Error events // API function call events // Operation events // Detailed operation events -// -#ifndef OS_EVR_SEMAPHORE_LEVEL -#define OS_EVR_SEMAPHORE_LEVEL 0x01U +// +#ifndef OS_EVR_SEMAPHORE_LEVEL +#define OS_EVR_SEMAPHORE_LEVEL 0x81U #endif -// Memory Pool +// Memory Pool // Recording level for Memory Pool events. // Error events // API function call events // Operation events // Detailed operation events -// -#ifndef OS_EVR_MEMPOOL_LEVEL -#define OS_EVR_MEMPOOL_LEVEL 0x01U +// +#ifndef OS_EVR_MEMPOOL_LEVEL +#define OS_EVR_MEMPOOL_LEVEL 0x81U #endif -// Message Queue +// Message Queue // Recording level for Message Queue events. // Error events // API function call events // Operation events // Detailed operation events -// -#ifndef OS_EVR_MSGQUEUE_LEVEL -#define OS_EVR_MSGQUEUE_LEVEL 0x01U +// +#ifndef OS_EVR_MSGQUEUE_LEVEL +#define OS_EVR_MSGQUEUE_LEVEL 0x81U #endif // diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include/rtx_def.h b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include/rtx_def.h new file mode 100644 index 00000000000..a7076a4e46f --- /dev/null +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include/rtx_def.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2021 Arm Limited. All rights reserved. + * + * This Software is licensed under an Arm proprietary license. + * + * ----------------------------------------------------------------------------- + * + * Project: CMSIS-RTOS RTX + * Title: RTX derived definitions + * + * ----------------------------------------------------------------------------- + */ + +#ifndef RTX_DEF_H_ +#define RTX_DEF_H_ + +#ifdef _RTE_ +#include "RTE_Components.h" +#endif +#include "RTX_Config.h" + +#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)) + #define RTX_OBJ_MEM_USAGE +#endif + +#if (defined(OS_STACK_CHECK) && (OS_STACK_CHECK != 0)) + #define RTX_STACK_CHECK +#endif + +#ifdef RTE_CMSIS_RTOS2_RTX5_ARMV8M_NS + #define DOMAIN_NS 1 +#endif + +#endif // RTX_DEF_H_ diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include/rtx_evr.h b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include/rtx_evr.h index 1dec30a79fd..2a6899e8ce2 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include/rtx_evr.h +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include/rtx_evr.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -26,8 +26,6 @@ #ifndef RTX_EVR_H_ #define RTX_EVR_H_ -#include "cmsis_os2.h" // CMSIS RTOS API -#include "RTX_Config.h" // RTX Configuration #include "rtx_os.h" // RTX OS definitions // Initial Thread configuration covered also Thread Flags and Generic Wait @@ -393,6 +391,17 @@ extern void EvrRtxKernelGetSysTimerFreq (uint32_t freq); #define EvrRtxKernelGetSysTimerFreq(freq) #endif +/** + \brief Event on RTOS kernel system error (Error) + \param[in] code error code. + \param[in] object_id object that caused the error. +*/ +#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_KERNEL != 0) && !defined(EVR_RTX_KERNEL_ERROR_NOTIFY_DISABLE)) +extern void EvrRtxKernelErrorNotify (uint32_t code, void *object_id); +#else +#define EvrRtxKernelErrorNotify(code, object_id) +#endif + // ==== Thread Events ==== diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include/rtx_os.h b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include/rtx_os.h index c54e2f230c3..65e4227aa08 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include/rtx_os.h +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include/rtx_os.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -29,6 +29,7 @@ #include #include #include "cmsis_os2.h" +#include "rtx_def.h" #ifdef __cplusplus extern "C" @@ -38,8 +39,8 @@ extern "C" /// Kernel Information #define osRtxVersionAPI 20010003 ///< API version (2.1.3) -#define osRtxVersionKernel 50050002 ///< Kernel version (5.5.2) -#define osRtxKernelId "RTX V5.5.2" ///< Kernel identification string +#define osRtxVersionKernel 50050003 ///< Kernel version (5.5.3) +#define osRtxKernelId "RTX V5.5.3" ///< Kernel identification string // ==== Common definitions ==== @@ -110,7 +111,7 @@ typedef struct osRtxThread_s { struct osRtxThread_s *delay_next; ///< Link pointer to next Thread in Delay list struct osRtxThread_s *delay_prev; ///< Link pointer to previous Thread in Delay list struct osRtxThread_s *thread_join; ///< Thread waiting to Join - uint32_t delay; ///< Delay Time + uint32_t delay; ///< Delay Time/Round Robin Time Tick int8_t priority; ///< Thread Priority int8_t priority_base; ///< Base Priority uint8_t stack_frame; ///< Stack Frame (EXC_RETURN[7..0]) @@ -296,9 +297,9 @@ typedef struct { osRtxThread_t *delay_list; ///< Delay List osRtxThread_t *wait_list; ///< Wait List (no Timeout) osRtxThread_t *terminate_list; ///< Terminate Thread List + uint32_t reserved; struct { ///< Thread Round Robin Info osRtxThread_t *thread; ///< Round Robin Thread - uint32_t tick; ///< Round Robin Time Tick uint32_t timeout; ///< Round Robin Timeout } robin; } thread; @@ -392,7 +393,8 @@ extern osRtxObjectMemUsage_t osRtxMessageQueueMemUsage; // ==== OS External Functions ==== // OS Error Codes -#define osRtxErrorStackUnderflow 1U ///< Stack overflow, i.e. stack pointer below its lower memory limit for descending stacks. +#define osRtxErrorStackUnderflow 1U ///< \deprecated Superseded by \ref osRtxErrorStackOverflow. +#define osRtxErrorStackOverflow 1U ///< Stack overflow, i.e. stack pointer below its lower memory limit for descending stacks. #define osRtxErrorISRQueueOverflow 2U ///< ISR Queue overflow detected when inserting object. #define osRtxErrorTimerQueueOverflow 3U ///< User Timer Callback Queue overflow detected for timer. #define osRtxErrorClibSpace 4U ///< Standard C/C++ library libspace not available: increase \c OS_THREAD_LIBSPACE_NUM. @@ -400,6 +402,7 @@ extern osRtxObjectMemUsage_t osRtxMessageQueueMemUsage; /// OS Error Callback function extern uint32_t osRtxErrorNotify (uint32_t code, void *object_id); +extern uint32_t osRtxKernelErrorNotify (uint32_t code, void *object_id); /// OS Idle Thread extern void osRtxIdleThread (void *argument); @@ -453,10 +456,12 @@ typedef struct { osRtxMpInfo_t *message_queue; ///< Message Queue Control Blocks } mpi; uint32_t thread_stack_size; ///< Default Thread Stack Size - const + const osThreadAttr_t *idle_thread_attr; ///< Idle Thread Attributes const osThreadAttr_t *timer_thread_attr; ///< Timer Thread Attributes + void (*timer_thread)(void *); ///< Timer Thread Function + int32_t (*timer_setup)(void); ///< Timer Setup Function const osMessageQueueAttr_t *timer_mq_attr; ///< Timer Message Queue Attributes uint32_t timer_mq_mcnt; ///< Timer Message Queue maximum Messages diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include1/cmsis_os.h b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include1/cmsis_os.h index 59a9e3307c7..ac487e143d9 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include1/cmsis_os.h +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include1/cmsis_os.h @@ -438,25 +438,26 @@ uint32_t osKernelSysTick (void); /// Create a Thread Definition with function, priority, and stack requirements. /// \param name name of the thread function. /// \param priority initial priority of the thread function. +/// \param instances number of possible thread instances. /// \param stacksz stack size (in bytes) requirements for the thread function. #if defined (osObjectsExternal) // object is external -#define osThreadDef(name, priority, stacksz) \ +#define osThreadDef(name, priority, instances, stacksz) \ extern const osThreadDef_t os_thread_def_##name #else // define the object #if (osCMSIS < 0x20000U) -#define osThreadDef(name, priority, stacksz) \ +#define osThreadDef(name, priority, instances, stacksz) \ const osThreadDef_t os_thread_def_##name = \ -{ (name), (priority), 1, (stacksz) } +{ (name), (priority), (instances), (stacksz) } #else -#define osThreadDef(name, priority, stacksz) \ -uint64_t os_thread_stack##name[(stacksz)?(((stacksz+7)/8)):1] __attribute__((section(".bss.os.thread.stack"))); \ +#define osThreadDef(name, priority, instances, stacksz) \ +static uint64_t os_thread_stack##name[(stacksz)?(((stacksz+7)/8)):1] __attribute__((section(".bss.os.thread.stack"))); \ static osRtxThread_t os_thread_cb_##name __attribute__((section(".bss.os.thread.cb"))); \ const osThreadDef_t os_thread_def_##name = \ { (name), \ { NULL, osThreadDetached, \ - &os_thread_cb_##name,\ - osRtxThreadCbSize, \ - (stacksz) ? (&os_thread_stack##name) : NULL, \ + (instances == 1) ? (&os_thread_cb_##name) : NULL,\ + (instances == 1) ? osRtxThreadCbSize : 0U, \ + ((stacksz) && (instances == 1)) ? (&os_thread_stack##name) : NULL, \ 8*((stacksz+7)/8), \ (priority), 0U, 0U } } #endif diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_CORTEX_A/irq_ca.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_CORTEX_A/irq_ca.S index e54c42de3c6..daf5fe8372f 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_CORTEX_A/irq_ca.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_CORTEX_A/irq_ca.S @@ -1,5 +1,5 @@ ;/* -; * Copyright (c) 2013-2018 Arm Limited. All rights reserved. +; * Copyright (c) 2013-2021 Arm Limited. All rights reserved. ; * ; * SPDX-License-Identifier: Apache-2.0 ; * @@ -18,7 +18,7 @@ ; * ----------------------------------------------------------------------------- ; * ; * Project: CMSIS-RTOS RTX -; * Title: Cortex-A Exception handlers +; * Title: ARMv7-A Exception handlers ; * ; * ----------------------------------------------------------------------------- ; */ @@ -367,16 +367,16 @@ osRtxContextSave STMDB R1!, {R2,R12} ; Push FPSCR, maintain 8-byte alignment VSTMDB R1!, {D0-D15} ; Save D0-D15 - IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32 + IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32 VSTMDB R1!, {D16-D31} ; Save D16-D31 - ENDIF + ENDIF LDRB R2, [LR, #TCB_SP_FRAME] ; Load osRtxInfo.thread.run.curr frame info - IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32 + IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32 ORR R2, R2, #4 ; NEON state - ELSE + ELSE ORR R2, R2, #2 ; VFP state - ENDIF + ENDIF STRB R2, [LR, #TCB_SP_FRAME] ; Store VFP/NEON state osRtxContextSave1 @@ -428,9 +428,9 @@ osRtxContextRestore MCR p15, 0, R2, c1, c0, 2 ; Write CPACR BEQ osRtxContextRestore1 ; No VFP ISB ; Sync if VFP was enabled - IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32 + IF {TARGET_FEATURE_EXTENSION_REGISTER_COUNT} == 32 VLDMIA LR!, {D16-D31} ; Restore D16-D31 - ENDIF + ENDIF VLDMIA LR!, {D0-D15} ; Restore D0-D15 LDR R2, [LR] VMSR FPSCR, R2 ; Restore FPSCR diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M0/irq_cm0.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M0/irq_cm0.S index c5146b3b81f..602a8186ef6 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M0/irq_cm0.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M0/irq_cm0.S @@ -1,5 +1,5 @@ ;/* -; * Copyright (c) 2013-2018 Arm Limited. All rights reserved. +; * Copyright (c) 2013-2021 Arm Limited. All rights reserved. ; * ; * SPDX-License-Identifier: Apache-2.0 ; * @@ -18,15 +18,22 @@ ; * ----------------------------------------------------------------------------- ; * ; * Project: CMSIS-RTOS RTX -; * Title: Cortex-M0 Exception handlers +; * Title: ARMv6-M Exception handlers ; * ; * ----------------------------------------------------------------------------- ; */ + IF :LNOT::DEF:RTX_STACK_CHECK +RTX_STACK_CHECK EQU 0 + ENDIF + I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset TCB_SP_OFS EQU 56 ; TCB.SP offset +osRtxErrorStackOverflow\ + EQU 1 ; Stack overflow + PRESERVE8 THUMB @@ -44,9 +51,10 @@ SVC_Handler PROC EXPORT SVC_Handler IMPORT osRtxUserSVC IMPORT osRtxInfo - IF :DEF:MPU_LOAD - IMPORT osRtxMpuLoad - ENDIF + IF RTX_STACK_CHECK != 0 + IMPORT osRtxThreadStackCheck + IMPORT osRtxKernelErrorNotify + ENDIF MOV R0,LR LSRS R0,R0,#3 ; Determine return stack from EXC_RETURN bit 2 @@ -57,7 +65,7 @@ SVC_Number LDR R1,[R0,#24] ; Load saved PC from stack SUBS R1,R1,#2 ; Point to SVC instruction LDRB R1,[R1] ; Load SVC number - CMP R1,#0 + CMP R1,#0 ; Check SVC number BNE SVC_User ; Branch if not SVC 0 PUSH {R0,LR} ; Save SP and EXC_RETURN @@ -68,18 +76,42 @@ SVC_Number MOV LR,R3 ; Set EXC_RETURN SVC_Context - LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run + LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run LDMIA R3!,{R1,R2} ; Load osRtxInfo.thread.run: curr & next CMP R1,R2 ; Check if thread switch is required BEQ SVC_Exit ; Branch when threads are the same + SUBS R3,R3,#8 ; Adjust address + STR R2,[R3] ; osRtxInfo.thread.run: curr = next CMP R1,#0 - BEQ SVC_ContextSwitch ; Branch if running thread is deleted + BEQ SVC_ContextRestore ; Branch if running thread is deleted SVC_ContextSave MRS R0,PSP ; Get PSP - SUBS R0,R0,#32 ; Calculate SP + SUBS R0,R0,#32 ; Calculate SP: space for R4..R11 STR R0,[R1,#TCB_SP_OFS] ; Store SP + + IF RTX_STACK_CHECK != 0 + + PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next + MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr + BL osRtxThreadStackCheck ; Check if thread stack is overrun + POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next + CMP R0,#0 + BNE SVC_ContextSaveRegs ; Branch when stack check is ok + + MOVS R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id + BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify + LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run + LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next + STR R2,[R3] ; osRtxInfo.thread.run: curr = next + B SVC_ContextRestore ; Branch to context restore handling + +SVC_ContextSaveRegs + LDR R0,[R1,#TCB_SP_OFS] ; Load SP + + ENDIF + STMIA R0!,{R4-R7} ; Save R4..R7 MOV R4,R8 MOV R5,R9 @@ -87,17 +119,6 @@ SVC_ContextSave MOV R7,R11 STMIA R0!,{R4-R7} ; Save R8..R11 -SVC_ContextSwitch - SUBS R3,R3,#8 ; Adjust address - STR R2,[R3] ; osRtxInfo.thread.run: curr = next - - IF :DEF:MPU_LOAD - PUSH {R2,R3} ; Save registers - MOV R0,R2 ; osRtxMpuLoad parameter - BL osRtxMpuLoad ; Load MPU for next thread - POP {R2,R3} ; Restore registers - ENDIF - SVC_ContextRestore LDR R0,[R2,#TCB_SP_OFS] ; Load SP ADDS R0,R0,#16 ; Adjust address @@ -110,7 +131,7 @@ SVC_ContextRestore SUBS R0,R0,#32 ; Adjust address LDMIA R0!,{R4-R7} ; Restore R4..R7 - MOVS R0,#~0xFFFFFFFD + MOVS R0,#2 ; Binary complement of 0xFFFFFFFD MVNS R0,R0 ; Set EXC_RETURN value BX R0 ; Exit from handler @@ -151,7 +172,7 @@ PendSV_Handler PROC BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler POP {R0,R1} ; Restore EXC_RETURN MOV LR,R1 ; Set EXC_RETURN - B SVC_Context + B SVC_Context ; Branch to context handling ALIGN ENDP @@ -165,7 +186,7 @@ SysTick_Handler PROC BL osRtxTick_Handler ; Call osRtxTick_Handler POP {R0,R1} ; Restore EXC_RETURN MOV LR,R1 ; Set EXC_RETURN - B SVC_Context + B SVC_Context ; Branch to context handling ALIGN ENDP diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M0P/irq_cm0.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M0P/irq_cm0.S index c5146b3b81f..602a8186ef6 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M0P/irq_cm0.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M0P/irq_cm0.S @@ -1,5 +1,5 @@ ;/* -; * Copyright (c) 2013-2018 Arm Limited. All rights reserved. +; * Copyright (c) 2013-2021 Arm Limited. All rights reserved. ; * ; * SPDX-License-Identifier: Apache-2.0 ; * @@ -18,15 +18,22 @@ ; * ----------------------------------------------------------------------------- ; * ; * Project: CMSIS-RTOS RTX -; * Title: Cortex-M0 Exception handlers +; * Title: ARMv6-M Exception handlers ; * ; * ----------------------------------------------------------------------------- ; */ + IF :LNOT::DEF:RTX_STACK_CHECK +RTX_STACK_CHECK EQU 0 + ENDIF + I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset TCB_SP_OFS EQU 56 ; TCB.SP offset +osRtxErrorStackOverflow\ + EQU 1 ; Stack overflow + PRESERVE8 THUMB @@ -44,9 +51,10 @@ SVC_Handler PROC EXPORT SVC_Handler IMPORT osRtxUserSVC IMPORT osRtxInfo - IF :DEF:MPU_LOAD - IMPORT osRtxMpuLoad - ENDIF + IF RTX_STACK_CHECK != 0 + IMPORT osRtxThreadStackCheck + IMPORT osRtxKernelErrorNotify + ENDIF MOV R0,LR LSRS R0,R0,#3 ; Determine return stack from EXC_RETURN bit 2 @@ -57,7 +65,7 @@ SVC_Number LDR R1,[R0,#24] ; Load saved PC from stack SUBS R1,R1,#2 ; Point to SVC instruction LDRB R1,[R1] ; Load SVC number - CMP R1,#0 + CMP R1,#0 ; Check SVC number BNE SVC_User ; Branch if not SVC 0 PUSH {R0,LR} ; Save SP and EXC_RETURN @@ -68,18 +76,42 @@ SVC_Number MOV LR,R3 ; Set EXC_RETURN SVC_Context - LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run + LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run LDMIA R3!,{R1,R2} ; Load osRtxInfo.thread.run: curr & next CMP R1,R2 ; Check if thread switch is required BEQ SVC_Exit ; Branch when threads are the same + SUBS R3,R3,#8 ; Adjust address + STR R2,[R3] ; osRtxInfo.thread.run: curr = next CMP R1,#0 - BEQ SVC_ContextSwitch ; Branch if running thread is deleted + BEQ SVC_ContextRestore ; Branch if running thread is deleted SVC_ContextSave MRS R0,PSP ; Get PSP - SUBS R0,R0,#32 ; Calculate SP + SUBS R0,R0,#32 ; Calculate SP: space for R4..R11 STR R0,[R1,#TCB_SP_OFS] ; Store SP + + IF RTX_STACK_CHECK != 0 + + PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next + MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr + BL osRtxThreadStackCheck ; Check if thread stack is overrun + POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next + CMP R0,#0 + BNE SVC_ContextSaveRegs ; Branch when stack check is ok + + MOVS R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id + BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify + LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run + LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next + STR R2,[R3] ; osRtxInfo.thread.run: curr = next + B SVC_ContextRestore ; Branch to context restore handling + +SVC_ContextSaveRegs + LDR R0,[R1,#TCB_SP_OFS] ; Load SP + + ENDIF + STMIA R0!,{R4-R7} ; Save R4..R7 MOV R4,R8 MOV R5,R9 @@ -87,17 +119,6 @@ SVC_ContextSave MOV R7,R11 STMIA R0!,{R4-R7} ; Save R8..R11 -SVC_ContextSwitch - SUBS R3,R3,#8 ; Adjust address - STR R2,[R3] ; osRtxInfo.thread.run: curr = next - - IF :DEF:MPU_LOAD - PUSH {R2,R3} ; Save registers - MOV R0,R2 ; osRtxMpuLoad parameter - BL osRtxMpuLoad ; Load MPU for next thread - POP {R2,R3} ; Restore registers - ENDIF - SVC_ContextRestore LDR R0,[R2,#TCB_SP_OFS] ; Load SP ADDS R0,R0,#16 ; Adjust address @@ -110,7 +131,7 @@ SVC_ContextRestore SUBS R0,R0,#32 ; Adjust address LDMIA R0!,{R4-R7} ; Restore R4..R7 - MOVS R0,#~0xFFFFFFFD + MOVS R0,#2 ; Binary complement of 0xFFFFFFFD MVNS R0,R0 ; Set EXC_RETURN value BX R0 ; Exit from handler @@ -151,7 +172,7 @@ PendSV_Handler PROC BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler POP {R0,R1} ; Restore EXC_RETURN MOV LR,R1 ; Set EXC_RETURN - B SVC_Context + B SVC_Context ; Branch to context handling ALIGN ENDP @@ -165,7 +186,7 @@ SysTick_Handler PROC BL osRtxTick_Handler ; Call osRtxTick_Handler POP {R0,R1} ; Restore EXC_RETURN MOV LR,R1 ; Set EXC_RETURN - B SVC_Context + B SVC_Context ; Branch to context handling ALIGN ENDP diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M23/irq_armv8mbl.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M23/irq_armv8mbl.S index d8f0fc59ccc..d7bfd999804 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M23/irq_armv8mbl.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M23/irq_armv8mbl.S @@ -1,5 +1,5 @@ ;/* -; * Copyright (c) 2016-2020 Arm Limited. All rights reserved. +; * Copyright (c) 2016-2021 Arm Limited. All rights reserved. ; * ; * SPDX-License-Identifier: Apache-2.0 ; * @@ -18,15 +18,19 @@ ; * ----------------------------------------------------------------------------- ; * ; * Project: CMSIS-RTOS RTX -; * Title: ARMv8M Baseline Exception handlers +; * Title: ARMv8-M Baseline Exception handlers ; * ; * ----------------------------------------------------------------------------- ; */ -#ifndef DOMAIN_NS -DOMAIN_NS EQU 0 -#endif + IF :LNOT::DEF:RTX_STACK_CHECK +RTX_STACK_CHECK EQU 0 + ENDIF + + IF :LNOT::DEF:DOMAIN_NS +DOMAIN_NS EQU 0 + ENDIF I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset TCB_SM_OFS EQU 48 ; TCB.stack_mem offset @@ -34,6 +38,9 @@ TCB_SP_OFS EQU 56 ; TCB.SP offset TCB_SF_OFS EQU 34 ; TCB.stack_frame offset TCB_TZM_OFS EQU 64 ; TCB.tz_memory offset +osRtxErrorStackOverflow\ + EQU 1 ; Stack overflow + PRESERVE8 THUMB @@ -51,13 +58,14 @@ SVC_Handler PROC EXPORT SVC_Handler IMPORT osRtxUserSVC IMPORT osRtxInfo - IF :DEF:MPU_LOAD - IMPORT osRtxMpuLoad - ENDIF - IF DOMAIN_NS = 1 + IF RTX_STACK_CHECK != 0 + IMPORT osRtxThreadStackCheck + IMPORT osRtxKernelErrorNotify + ENDIF + IF DOMAIN_NS != 0 IMPORT TZ_LoadContext_S IMPORT TZ_StoreContext_S - ENDIF + ENDIF MOV R0,LR LSRS R0,R0,#3 ; Determine return stack from EXC_RETURN bit 2 @@ -68,102 +76,137 @@ SVC_Number LDR R1,[R0,#24] ; Load saved PC from stack SUBS R1,R1,#2 ; Point to SVC instruction LDRB R1,[R1] ; Load SVC number - CMP R1,#0 + CMP R1,#0 ; Check SVC number BNE SVC_User ; Branch if not SVC 0 PUSH {R0,LR} ; Save SP and EXC_RETURN - LDM R0,{R0-R3} ; Load function parameters from stack + LDMIA R0,{R0-R3} ; Load function parameters from stack BLX R7 ; Call service function POP {R2,R3} ; Restore SP and EXC_RETURN STMIA R2!,{R0-R1} ; Store function return values MOV LR,R3 ; Set EXC_RETURN SVC_Context - LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run + LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run LDMIA R3!,{R1,R2} ; Load osRtxInfo.thread.run: curr & next CMP R1,R2 ; Check if thread switch is required BEQ SVC_Exit ; Branch when threads are the same - CBZ R1,SVC_ContextSwitch ; Branch if running thread is deleted + SUBS R3,R3,#8 ; Adjust address + STR R2,[R3] ; osRtxInfo.thread.run: curr = next + CBZ R1,SVC_ContextRestore ; Branch if running thread is deleted SVC_ContextSave - IF DOMAIN_NS = 1 + IF DOMAIN_NS != 0 LDR R0,[R1,#TCB_TZM_OFS] ; Load TrustZone memory identifier - CBZ R0,SVC_ContextSave1 ; Branch if there is no secure context + CBZ R0,SVC_ContextSave_NS ; Branch if there is no secure context PUSH {R1,R2,R3,R7} ; Save registers MOV R7,LR ; Get EXC_RETURN BL TZ_StoreContext_S ; Store secure context MOV LR,R7 ; Set EXC_RETURN POP {R1,R2,R3,R7} ; Restore registers - ENDIF + ENDIF -SVC_ContextSave1 +SVC_ContextSave_NS MRS R0,PSP ; Get PSP - SUBS R0,R0,#32 ; Calculate SP + IF DOMAIN_NS != 0 + MOV R3,LR ; Get EXC_RETURN + LSLS R3,R3,#25 ; Check domain of interrupted thread + BMI SVC_ContextSaveSP ; Branch if secure + ENDIF + + IF RTX_STACK_CHECK != 0 + SUBS R0,R0,#32 ; Calculate SP: space for R4..R11 + +SVC_ContextSaveSP STR R0,[R1,#TCB_SP_OFS] ; Store SP + MOV R3,LR ; Get EXC_RETURN + MOV R0,R1 ; osRtxInfo.thread.run.curr + ADDS R0,R0,#TCB_SF_OFS ; Adjust address + STRB R3,[R0] ; Store stack frame information + + PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next + MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr + BL osRtxThreadStackCheck ; Check if thread stack is overrun + POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next + CMP R0,#0 + BNE SVC_ContextSaveRegs ; Branch when stack check is ok + + MOVS R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id + BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify + LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run + LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next + STR R2,[R3] ; osRtxInfo.thread.run: curr = next + B SVC_ContextRestore ; Branch to context restore handling + +SVC_ContextSaveRegs + IF DOMAIN_NS != 0 + MOV R0,R1 ; osRtxInfo.thread.run.curr + ADDS R0,R0,#TCB_SF_OFS ; Adjust address + LDRB R3,[R0] ; Load stack frame information + LSLS R3,R3,#25 ; Check domain of interrupted thread + BMI SVC_ContextRestore ; Branch if secure + ENDIF + LDR R0,[R1,#TCB_SP_OFS] ; Load SP STMIA R0!,{R4-R7} ; Save R4..R7 MOV R4,R8 MOV R5,R9 MOV R6,R10 MOV R7,R11 STMIA R0!,{R4-R7} ; Save R8..R11 - -SVC_ContextSave2 + ELSE + SUBS R0,R0,#32 ; Calculate SP: space for R4..R11 + STMIA R0!,{R4-R7} ; Save R4..R7 + MOV R4,R8 + MOV R5,R9 + MOV R6,R10 + MOV R7,R11 + STMIA R0!,{R4-R7} ; Save R8..R11 + SUBS R0,R0,#32 ; Adjust address +SVC_ContextSaveSP + STR R0,[R1,#TCB_SP_OFS] ; Store SP MOV R0,LR ; Get EXC_RETURN ADDS R1,R1,#TCB_SF_OFS ; Adjust address STRB R0,[R1] ; Store stack frame information - -SVC_ContextSwitch - SUBS R3,R3,#8 ; Adjust address - STR R2,[R3] ; osRtxInfo.thread.run: curr = next - - IF :DEF:MPU_LOAD - PUSH {R2,R3} ; Save registers - MOV R0,R2 ; osRtxMpuLoad parameter - BL osRtxMpuLoad ; Load MPU for next thread - POP {R2,R3} ; Restore registers - ENDIF + ENDIF SVC_ContextRestore - IF DOMAIN_NS = 1 + IF DOMAIN_NS != 0 LDR R0,[R2,#TCB_TZM_OFS] ; Load TrustZone memory identifier - CBZ R0,SVC_ContextRestore1 ; Branch if there is no secure context + CBZ R0,SVC_ContextRestore_NS ; Branch if there is no secure context PUSH {R2,R3} ; Save registers BL TZ_LoadContext_S ; Load secure context POP {R2,R3} ; Restore registers - ENDIF + ENDIF -SVC_ContextRestore1 - MOV R1,R2 - ADDS R1,R1,#TCB_SF_OFS ; Adjust address - LDRB R0,[R1] ; Load stack frame information - MOVS R1,#0xFF - MVNS R1,R1 ; R1=0xFFFFFF00 - ORRS R0,R1 - MOV LR,R0 ; Set EXC_RETURN - - IF DOMAIN_NS = 1 - LSLS R0,R0,#25 ; Check domain of interrupted thread - BPL SVC_ContextRestore2 ; Branch if non-secure - LDR R0,[R2,#TCB_SP_OFS] ; Load SP - MSR PSP,R0 ; Set PSP - BX LR ; Exit from handler - ELSE +SVC_ContextRestore_NS LDR R0,[R2,#TCB_SM_OFS] ; Load stack memory base MSR PSPLIM,R0 ; Set PSPLIM - ENDIF - -SVC_ContextRestore2 + MOV R0,R2 ; osRtxInfo.thread.run.next + ADDS R0,R0,#TCB_SF_OFS ; Adjust address + LDRB R3,[R0] ; Load stack frame information + MOVS R0,#0xFF + MVNS R0,R0 ; R0=0xFFFFFF00 + ORRS R3,R3,R0 + MOV LR,R3 ; Set EXC_RETURN LDR R0,[R2,#TCB_SP_OFS] ; Load SP + IF DOMAIN_NS != 0 + LSLS R3,R3,#25 ; Check domain of interrupted thread + BMI SVC_ContextRestoreSP ; Branch if secure + ENDIF + ADDS R0,R0,#16 ; Adjust address LDMIA R0!,{R4-R7} ; Restore R8..R11 MOV R8,R4 MOV R9,R5 MOV R10,R6 MOV R11,R7 - MSR PSP,R0 ; Set PSP SUBS R0,R0,#32 ; Adjust address LDMIA R0!,{R4-R7} ; Restore R4..R7 + ADDS R0,R0,#16 ; Adjust address + +SVC_ContextRestoreSP + MSR PSP,R0 ; Set PSP SVC_Exit BX LR ; Exit from handler @@ -202,7 +245,7 @@ PendSV_Handler PROC BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler POP {R0,R1} ; Restore EXC_RETURN MOV LR,R1 ; Set EXC_RETURN - B Sys_Context + B SVC_Context ; Branch to context handling ALIGN ENDP @@ -216,117 +259,7 @@ SysTick_Handler PROC BL osRtxTick_Handler ; Call osRtxTick_Handler POP {R0,R1} ; Restore EXC_RETURN MOV LR,R1 ; Set EXC_RETURN - B Sys_Context - - ALIGN - ENDP - - -Sys_Context PROC - EXPORT Sys_Context - IMPORT osRtxInfo - IF :DEF:MPU_LOAD - IMPORT osRtxMpuLoad - ENDIF - IF DOMAIN_NS = 1 - IMPORT TZ_LoadContext_S - IMPORT TZ_StoreContext_S - ENDIF - - LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run - LDM R3!,{R1,R2} ; Load osRtxInfo.thread.run: curr & next - CMP R1,R2 ; Check if thread switch is required - BEQ Sys_ContextExit ; Branch when threads are the same - -Sys_ContextSave - IF DOMAIN_NS = 1 - LDR R0,[R1,#TCB_TZM_OFS] ; Load TrustZone memory identifier - CBZ R0,Sys_ContextSave1 ; Branch if there is no secure context - PUSH {R1,R2,R3,R7} ; Save registers - MOV R7,LR ; Get EXC_RETURN - BL TZ_StoreContext_S ; Store secure context - MOV LR,R7 ; Set EXC_RETURN - POP {R1,R2,R3,R7} ; Restore registers - -Sys_ContextSave1 - MOV R0,LR ; Get EXC_RETURN - LSLS R0,R0,#25 ; Check domain of interrupted thread - BPL Sys_ContextSave2 ; Branch if non-secure - MRS R0,PSP ; Get PSP - STR R0,[R1,#TCB_SP_OFS] ; Store SP - B Sys_ContextSave3 - ENDIF - -Sys_ContextSave2 - MRS R0,PSP ; Get PSP - SUBS R0,R0,#32 ; Adjust address - STR R0,[R1,#TCB_SP_OFS] ; Store SP - STMIA R0!,{R4-R7} ; Save R4..R7 - MOV R4,R8 - MOV R5,R9 - MOV R6,R10 - MOV R7,R11 - STMIA R0!,{R4-R7} ; Save R8..R11 - -Sys_ContextSave3 - MOV R0,LR ; Get EXC_RETURN - ADDS R1,R1,#TCB_SF_OFS ; Adjust address - STRB R0,[R1] ; Store stack frame information - -Sys_ContextSwitch - SUBS R3,R3,#8 ; Adjust address - STR R2,[R3] ; osRtxInfo.run: curr = next - - IF :DEF:MPU_LOAD - PUSH {R2,R3} ; Save registers - MOV R0,R2 ; osRtxMpuLoad parameter - BL osRtxMpuLoad ; Load MPU for next thread - POP {R2,R3} ; Restore registers - ENDIF - -Sys_ContextRestore - IF DOMAIN_NS = 1 - LDR R0,[R2,#TCB_TZM_OFS] ; Load TrustZone memory identifier - CBZ R0,Sys_ContextRestore1 ; Branch if there is no secure context - PUSH {R2,R3} ; Save registers - BL TZ_LoadContext_S ; Load secure context - POP {R2,R3} ; Restore registers - ENDIF - -Sys_ContextRestore1 - MOV R1,R2 - ADDS R1,R1,#TCB_SF_OFS ; Adjust offset - LDRB R0,[R1] ; Load stack frame information - MOVS R1,#0xFF - MVNS R1,R1 ; R1=0xFFFFFF00 - ORRS R0,R1 - MOV LR,R0 ; Set EXC_RETURN - - IF DOMAIN_NS = 1 - LSLS R0,R0,#25 ; Check domain of interrupted thread - BPL Sys_ContextRestore2 ; Branch if non-secure - LDR R0,[R2,#TCB_SP_OFS] ; Load SP - MSR PSP,R0 ; Set PSP - BX LR ; Exit from handler - ELSE - LDR R0,[R2,#TCB_SM_OFS] ; Load stack memory base - MSR PSPLIM,R0 ; Set PSPLIM - ENDIF - -Sys_ContextRestore2 - LDR R0,[R2,#TCB_SP_OFS] ; Load SP - ADDS R0,R0,#16 ; Adjust address - LDMIA R0!,{R4-R7} ; Restore R8..R11 - MOV R8,R4 - MOV R9,R5 - MOV R10,R6 - MOV R11,R7 - MSR PSP,R0 ; Set PSP - SUBS R0,R0,#32 ; Adjust address - LDMIA R0!,{R4-R7} ; Restore R4..R7 - -Sys_ContextExit - BX LR ; Exit from handler + B SVC_Context ; Branch to context handling ALIGN ENDP diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M3/irq_cm3.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M3/irq_cm3.S index 0a5ed3458a5..88f545766ea 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M3/irq_cm3.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M3/irq_cm3.S @@ -1,5 +1,5 @@ ;/* -; * Copyright (c) 2013-2018 Arm Limited. All rights reserved. +; * Copyright (c) 2013-2021 Arm Limited. All rights reserved. ; * ; * SPDX-License-Identifier: Apache-2.0 ; * @@ -18,14 +18,30 @@ ; * ----------------------------------------------------------------------------- ; * ; * Project: CMSIS-RTOS RTX -; * Title: Cortex-M3 Exception handlers +; * Title: ARMv7-M Exception handlers ; * ; * ----------------------------------------------------------------------------- ; */ + IF :LNOT::DEF:RTX_STACK_CHECK +RTX_STACK_CHECK EQU 0 + ENDIF + + IF ({FPU}="FPv4-SP") +FPU_USED EQU 1 + ELSE +FPU_USED EQU 0 + ENDIF + I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset TCB_SP_OFS EQU 56 ; TCB.SP offset +TCB_SF_OFS EQU 34 ; TCB.stack_frame offset + +FPCCR EQU 0xE000EF34 ; FPCCR Address + +osRtxErrorStackOverflow\ + EQU 1 ; Stack overflow PRESERVE8 @@ -44,9 +60,10 @@ SVC_Handler PROC EXPORT SVC_Handler IMPORT osRtxUserSVC IMPORT osRtxInfo - IF :DEF:MPU_LOAD - IMPORT osRtxMpuLoad - ENDIF + IF RTX_STACK_CHECK != 0 + IMPORT osRtxThreadStackCheck + IMPORT osRtxKernelErrorNotify + ENDIF TST LR,#0x04 ; Determine return stack from EXC_RETURN bit 2 ITE EQ @@ -55,7 +72,8 @@ SVC_Handler PROC LDR R1,[R0,#24] ; Load saved PC from stack LDRB R1,[R1,#-2] ; Load SVC number - CBNZ R1,SVC_User ; Branch if not SVC 0 + CMP R1,#0 ; Check SVC number + BNE SVC_User ; Branch if not SVC 0 PUSH {R0,LR} ; Save SP and EXC_RETURN LDM R0,{R0-R3,R12} ; Load function parameters and address from stack @@ -64,35 +82,94 @@ SVC_Handler PROC STM R12,{R0-R1} ; Store function return values SVC_Context - LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run + LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run LDM R3,{R1,R2} ; Load osRtxInfo.thread.run: curr & next CMP R1,R2 ; Check if thread switch is required IT EQ BXEQ LR ; Exit when threads are the same - CBZ R1,SVC_ContextSwitch ; Branch if running thread is deleted + STR R2,[R3] ; osRtxInfo.thread.run: curr = next + + IF FPU_USED != 0 + CBNZ R1,SVC_ContextSave ; Branch if running thread is not deleted +SVC_FP_LazyState + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + BNE SVC_ContextRestore ; Branch if not extended stack frame + LDR R3,=FPCCR ; FPCCR Address + LDR R0,[R3] ; Load FPCCR + BIC R0,R0,#1 ; Clear LSPACT (Lazy state preservation) + STR R0,[R3] ; Store FPCCR + B SVC_ContextRestore ; Branch to context restore handling + ELSE + CBZ R1,SVC_ContextRestore ; Branch if running thread is deleted + ENDIF SVC_ContextSave - STMDB R12!,{R4-R11} ; Save R4..R11 + IF RTX_STACK_CHECK != 0 + SUB R12,R12,#32 ; Calculate SP: space for R4..R11 + IF FPU_USED != 0 + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame + SUBEQ R12,R12,#64 ; Additional space for S16..S31 + STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information + ENDIF STR R12,[R1,#TCB_SP_OFS] ; Store SP -SVC_ContextSwitch + PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next + MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr + BL osRtxThreadStackCheck ; Check if thread stack is overrun + POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next + CBNZ R0,SVC_ContextSaveRegs ; Branch when stack check is ok + + IF FPU_USED != 0 + MOV R4,R1 ; Save osRtxInfo.thread.run.curr + ENDIF + MOV R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id + BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify + LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run + LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next STR R2,[R3] ; osRtxInfo.thread.run: curr = next - - IF :DEF:MPU_LOAD - PUSH {R2,R3} ; Save registers - MOV R0,R2 ; osRtxMpuLoad parameter - BL osRtxMpuLoad ; Load MPU for next thread - POP {R2,R3} ; Restore registers - ENDIF + IF FPU_USED != 0 + LDRB LR,[R4,#TCB_SF_OFS] ; Load stack frame information + B SVC_FP_LazyState ; Branch to FP lazy state handling + ELSE + B SVC_ContextRestore ; Branch to context restore handling + ENDIF + +SVC_ContextSaveRegs + LDR R12,[R1,#TCB_SP_OFS] ; Load SP + IF FPU_USED != 0 + LDRB LR, [R1,#TCB_SF_OFS] ; Load stack frame information + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame + VSTMIAEQ R12!,{S16-S31} ; Save VFP S16..S31 + ENDIF + STM R12,{R4-R11} ; Save R4..R11 + ELSE + STMDB R12!,{R4-R11} ; Save R4..R11 + IF FPU_USED != 0 + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame + VSTMDBEQ R12!,{S16-S31} ; Save VFP S16.S31 + STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information + ENDIF + STR R12,[R1,#TCB_SP_OFS] ; Store SP + ENDIF SVC_ContextRestore LDR R0,[R2,#TCB_SP_OFS] ; Load SP + IF FPU_USED != 0 + LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information + ORN LR,R1,#0xFF ; Set EXC_RETURN + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame + VLDMIAEQ R0!,{S16-S31} ; Restore VFP S16..S31 + ELSE + MVN LR,#~0xFFFFFFFD ; Set EXC_RETURN value + ENDIF LDMIA R0!,{R4-R11} ; Restore R4..R11 MSR PSP,R0 ; Set PSP - MVN LR,#~0xFFFFFFFD ; Set EXC_RETURN value - SVC_Exit BX LR ; Exit from handler @@ -122,8 +199,8 @@ PendSV_Handler PROC PUSH {R0,LR} ; Save EXC_RETURN BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler POP {R0,LR} ; Restore EXC_RETURN - MRS R12,PSP - B SVC_Context + MRS R12,PSP ; Save PSP to R12 + B SVC_Context ; Branch to context handling ALIGN ENDP @@ -136,8 +213,8 @@ SysTick_Handler PROC PUSH {R0,LR} ; Save EXC_RETURN BL osRtxTick_Handler ; Call osRtxTick_Handler POP {R0,LR} ; Restore EXC_RETURN - MRS R12,PSP - B SVC_Context + MRS R12,PSP ; Save PSP to R12 + B SVC_Context ; Branch to context handling ALIGN ENDP diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M33/irq_armv8mml.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M33/irq_armv8mml.S index 3ea3a75097c..984dd2d1969 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M33/irq_armv8mml.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M33/irq_armv8mml.S @@ -1,5 +1,5 @@ ;/* -; * Copyright (c) 2016-2020 Arm Limited. All rights reserved. +; * Copyright (c) 2016-2021 Arm Limited. All rights reserved. ; * ; * SPDX-License-Identifier: Apache-2.0 ; * @@ -18,22 +18,25 @@ ; * ----------------------------------------------------------------------------- ; * ; * Project: CMSIS-RTOS RTX -; * Title: ARMv8M Mainline Exception handlers +; * Title: ARMv8-M Mainline Exception handlers ; * ; * ----------------------------------------------------------------------------- ; */ -#ifndef DOMAIN_NS -DOMAIN_NS EQU 0 -#endif + IF :LNOT::DEF:RTX_STACK_CHECK +RTX_STACK_CHECK EQU 0 + ENDIF -#ifdef __ARM_FP -__FPU_USED EQU 1 -#else -__FPU_USED EQU 0 -#endif + IF :LNOT::DEF:DOMAIN_NS +DOMAIN_NS EQU 0 + ENDIF + IF ({FPU}="FPv5-SP") || ({FPU}="FPv5_D16") +FPU_USED EQU 1 + ELSE +FPU_USED EQU 0 + ENDIF I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset TCB_SM_OFS EQU 48 ; TCB.stack_mem offset @@ -41,6 +44,11 @@ TCB_SP_OFS EQU 56 ; TCB.SP offset TCB_SF_OFS EQU 34 ; TCB.stack_frame offset TCB_TZM_OFS EQU 64 ; TCB.tz_memory offset +FPCCR EQU 0xE000EF34 ; FPCCR Address + +osRtxErrorStackOverflow\ + EQU 1 ; Stack overflow + PRESERVE8 THUMB @@ -58,13 +66,14 @@ SVC_Handler PROC EXPORT SVC_Handler IMPORT osRtxUserSVC IMPORT osRtxInfo - IF :DEF:MPU_LOAD - IMPORT osRtxMpuLoad - ENDIF - IF DOMAIN_NS = 1 + IF RTX_STACK_CHECK != 0 + IMPORT osRtxThreadStackCheck + IMPORT osRtxKernelErrorNotify + ENDIF + IF DOMAIN_NS != 0 IMPORT TZ_LoadContext_S IMPORT TZ_StoreContext_S - ENDIF + ENDIF TST LR,#0x04 ; Determine return stack from EXC_RETURN bit 2 ITE EQ @@ -73,7 +82,7 @@ SVC_Handler PROC LDR R1,[R0,#24] ; Load saved PC from stack LDRB R1,[R1,#-2] ; Load SVC number - CMP R1,#0 + CMP R1,#0 ; Check SVC number BNE SVC_User ; Branch if not SVC 0 PUSH {R0,LR} ; Save SP and EXC_RETURN @@ -83,86 +92,130 @@ SVC_Handler PROC STM R12,{R0-R1} ; Store function return values SVC_Context - LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run + LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run LDM R3,{R1,R2} ; Load osRtxInfo.thread.run: curr & next CMP R1,R2 ; Check if thread switch is required IT EQ BXEQ LR ; Exit when threads are the same - IF __FPU_USED = 1 + STR R2,[R3] ; osRtxInfo.thread.run: curr = next + + IF FPU_USED != 0 CBNZ R1,SVC_ContextSave ; Branch if running thread is not deleted - TST LR,#0x10 ; Check if extended stack frame - BNE SVC_ContextSwitch - LDR R1,=0xE000EF34 ; FPCCR Address - LDR R0,[R1] ; Load FPCCR - BIC R0,R0,#1 ; Clear LSPACT (Lazy state) - STR R0,[R1] ; Store FPCCR - B SVC_ContextSwitch - ELSE - CBZ R1,SVC_ContextSwitch ; Branch if running thread is deleted - ENDIF +SVC_FP_LazyState + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + BNE SVC_ContextRestore ; Branch if not extended stack frame + LDR R3,=FPCCR ; FPCCR Address + LDR R0,[R3] ; Load FPCCR + BIC R0,R0,#1 ; Clear LSPACT (Lazy state preservation) + STR R0,[R3] ; Store FPCCR + B SVC_ContextRestore ; Branch to context restore handling + ELSE + CBZ R1,SVC_ContextRestore ; Branch if running thread is deleted + ENDIF SVC_ContextSave - IF DOMAIN_NS = 1 + IF DOMAIN_NS != 0 LDR R0,[R1,#TCB_TZM_OFS] ; Load TrustZone memory identifier - CBZ R0,SVC_ContextSave1 ; Branch if there is no secure context - PUSH {R1,R2,R3,LR} ; Save registers and EXC_RETURN + CBZ R0,SVC_ContextSave_NS ; Branch if there is no secure context + PUSH {R1,R2,R12,LR} ; Save registers and EXC_RETURN BL TZ_StoreContext_S ; Store secure context - POP {R1,R2,R3,LR} ; Restore registers and EXC_RETURN - ENDIF - -SVC_ContextSave1 - MRS R0,PSP ; Get PSP - STMDB R0!,{R4-R11} ; Save R4..R11 - IF __FPU_USED = 1 - TST LR,#0x10 ; Check if extended stack frame - IT EQ - VSTMDBEQ R0!,{S16-S31} ; Save VFP S16.S31 - ENDIF - -SVC_ContextSave2 - STR R0,[R1,#TCB_SP_OFS] ; Store SP - STRB LR,[R1,#TCB_SF_OFS] ; Store stack frame information + POP {R1,R2,R12,LR} ; Restore registers and EXC_RETURN + ENDIF -SVC_ContextSwitch +SVC_ContextSave_NS + IF DOMAIN_NS != 0 + TST LR,#0x40 ; Check domain of interrupted thread + BNE SVC_ContextSaveSP ; Branch if secure + ENDIF + + IF RTX_STACK_CHECK != 0 + SUB R12,R12,#32 ; Calculate SP: space for R4..R11 + IF FPU_USED != 0 + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame + SUBEQ R12,R12,#64 ; Additional space for S16..S31 + ENDIF + +SVC_ContextSaveSP + STR R12,[R1,#TCB_SP_OFS] ; Store SP + STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information + + PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next + MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr + BL osRtxThreadStackCheck ; Check if thread stack is overrun + POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next + CBNZ R0,SVC_ContextSaveRegs ; Branch when stack check is ok + + IF FPU_USED != 0 + MOV R4,R1 ; Save osRtxInfo.thread.run.curr + ENDIF + MOV R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id + BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify + LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run + LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next STR R2,[R3] ; osRtxInfo.thread.run: curr = next - - IF :DEF:MPU_LOAD - PUSH {R2,R3} ; Save registers - MOV R0,R2 ; osRtxMpuLoad parameter - BL osRtxMpuLoad ; Load MPU for next thread - POP {R2,R3} ; Restore registers - ENDIF + IF FPU_USED != 0 + LDRB LR,[R4,#TCB_SF_OFS] ; Load stack frame information + B SVC_FP_LazyState ; Branch to FP lazy state handling + ELSE + B SVC_ContextRestore ; Branch to context restore handling + ENDIF + +SVC_ContextSaveRegs + LDRB LR,[R1,#TCB_SF_OFS] ; Load stack frame information + IF DOMAIN_NS != 0 + TST LR,#0x40 ; Check domain of interrupted thread + BNE SVC_ContextRestore ; Branch if secure + ENDIF + LDR R12,[R1,#TCB_SP_OFS] ; Load SP + IF FPU_USED != 0 + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame + VSTMIAEQ R12!,{S16-S31} ; Save VFP S16..S31 + ENDIF + STM R12,{R4-R11} ; Save R4..R11 + ELSE + STMDB R12!,{R4-R11} ; Save R4..R11 + IF FPU_USED != 0 + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame + VSTMDBEQ R12!,{S16-S31} ; Save VFP S16.S31 + ENDIF +SVC_ContextSaveSP + STR R12,[R1,#TCB_SP_OFS] ; Store SP + STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information + ENDIF SVC_ContextRestore - IF DOMAIN_NS = 1 + IF DOMAIN_NS != 0 LDR R0,[R2,#TCB_TZM_OFS] ; Load TrustZone memory identifier - CBZ R0,SVC_ContextRestore1 ; Branch if there is no secure context + CBZ R0,SVC_ContextRestore_NS; Branch if there is no secure context PUSH {R2,R3} ; Save registers BL TZ_LoadContext_S ; Load secure context POP {R2,R3} ; Restore registers - ENDIF + ENDIF -SVC_ContextRestore1 - LDR R0,[R2,#TCB_SM_OFS] ; Load stack memory base - LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information - MSR PSPLIM,R0 ; Set PSPLIM +SVC_ContextRestore_NS LDR R0,[R2,#TCB_SP_OFS] ; Load SP - ORR LR,R1,#0xFFFFFF00 ; Set EXC_RETURN + LDR R1,[R2,#TCB_SM_OFS] ; Load stack memory base + MSR PSPLIM,R1 ; Set PSPLIM + LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information + ORN LR,R1,#0xFF ; Set EXC_RETURN - IF DOMAIN_NS = 1 + IF DOMAIN_NS != 0 TST LR,#0x40 ; Check domain of interrupted thread - BNE SVC_ContextRestore2 ; Branch if secure - ENDIF + BNE SVC_ContextRestoreSP ; Branch if secure + ENDIF - IF __FPU_USED = 1 - TST LR,#0x10 ; Check if extended stack frame - IT EQ + IF FPU_USED != 0 + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame VLDMIAEQ R0!,{S16-S31} ; Restore VFP S16..S31 - ENDIF + ENDIF LDMIA R0!,{R4-R11} ; Restore R4..R11 -SVC_ContextRestore2 +SVC_ContextRestoreSP MSR PSP,R0 ; Set PSP SVC_Exit @@ -194,7 +247,8 @@ PendSV_Handler PROC PUSH {R0,LR} ; Save EXC_RETURN BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler POP {R0,LR} ; Restore EXC_RETURN - B Sys_Context + MRS R12,PSP ; Save PSP to R12 + B SVC_Context ; Branch to context handling ALIGN ENDP @@ -207,100 +261,8 @@ SysTick_Handler PROC PUSH {R0,LR} ; Save EXC_RETURN BL osRtxTick_Handler ; Call osRtxTick_Handler POP {R0,LR} ; Restore EXC_RETURN - B Sys_Context - - ALIGN - ENDP - - -Sys_Context PROC - EXPORT Sys_Context - IMPORT osRtxInfo - IF :DEF:MPU_LOAD - IMPORT osRtxMpuLoad - ENDIF - IF DOMAIN_NS = 1 - IMPORT TZ_LoadContext_S - IMPORT TZ_StoreContext_S - ENDIF - - LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run - LDM R3,{R1,R2} ; Load osRtxInfo.thread.run: curr & next - CMP R1,R2 ; Check if thread switch is required - IT EQ - BXEQ LR ; Exit when threads are the same - -Sys_ContextSave - IF DOMAIN_NS = 1 - LDR R0,[R1,#TCB_TZM_OFS] ; Load TrustZone memory identifier - CBZ R0,Sys_ContextSave1 ; Branch if there is no secure context - PUSH {R1,R2,R3,LR} ; Save registers and EXC_RETURN - BL TZ_StoreContext_S ; Store secure context - POP {R1,R2,R3,LR} ; Restore registers and EXC_RETURN - -Sys_ContextSave1 - TST LR,#0x40 ; Check domain of interrupted thread - IT NE - MRSNE R0,PSP ; Get PSP - BNE Sys_ContextSave3 ; Branch if secure - ENDIF - -Sys_ContextSave2 - MRS R0,PSP ; Get PSP - STMDB R0!,{R4-R11} ; Save R4..R11 - IF __FPU_USED = 1 - TST LR,#0x10 ; Check if extended stack frame - IT EQ - VSTMDBEQ R0!,{S16-S31} ; Save VFP S16.S31 - ENDIF - -Sys_ContextSave3 - STR R0,[R1,#TCB_SP_OFS] ; Store SP - STRB LR,[R1,#TCB_SF_OFS] ; Store stack frame information - -Sys_ContextSwitch - STR R2,[R3] ; osRtxInfo.run: curr = next - - IF :DEF:MPU_LOAD - PUSH {R2,R3} ; Save registers - MOV R0,R2 ; osRtxMpuLoad parameter - BL osRtxMpuLoad ; Load MPU for next thread - POP {R2,R3} ; Restore registers - ENDIF - -Sys_ContextRestore - IF DOMAIN_NS = 1 - LDR R0,[R2,#TCB_TZM_OFS] ; Load TrustZone memory identifier - CBZ R0,Sys_ContextRestore1 ; Branch if there is no secure context - PUSH {R2,R3} ; Save registers - BL TZ_LoadContext_S ; Load secure context - POP {R2,R3} ; Restore registers - ENDIF - -Sys_ContextRestore1 - LDR R0,[R2,#TCB_SM_OFS] ; Load stack memory base - LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information - MSR PSPLIM,R0 ; Set PSPLIM - LDR R0,[R2,#TCB_SP_OFS] ; Load SP - ORR LR,R1,#0xFFFFFF00 ; Set EXC_RETURN - - IF DOMAIN_NS = 1 - TST LR,#0x40 ; Check domain of interrupted thread - BNE Sys_ContextRestore2 ; Branch if secure - ENDIF - - IF __FPU_USED = 1 - TST LR,#0x10 ; Check if extended stack frame - IT EQ - VLDMIAEQ R0!,{S16-S31} ; Restore VFP S16..S31 - ENDIF - LDMIA R0!,{R4-R11} ; Restore R4..R11 - -Sys_ContextRestore2 - MSR PSP,R0 ; Set PSP - -Sys_ContextExit - BX LR ; Exit from handler + MRS R12,PSP ; Save PSP to R12 + B SVC_Context ; Branch to context handling ALIGN ENDP diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_RTOS_M4_M7/irq_cm4f.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_RTOS_M4_M7/irq_cm4f.S index 803c7c378c0..88f545766ea 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_RTOS_M4_M7/irq_cm4f.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_RTOS_M4_M7/irq_cm4f.S @@ -1,5 +1,5 @@ ;/* -; * Copyright (c) 2013-2018 Arm Limited. All rights reserved. +; * Copyright (c) 2013-2021 Arm Limited. All rights reserved. ; * ; * SPDX-License-Identifier: Apache-2.0 ; * @@ -18,16 +18,31 @@ ; * ----------------------------------------------------------------------------- ; * ; * Project: CMSIS-RTOS RTX -; * Title: Cortex-M4F Exception handlers +; * Title: ARMv7-M Exception handlers ; * ; * ----------------------------------------------------------------------------- ; */ + IF :LNOT::DEF:RTX_STACK_CHECK +RTX_STACK_CHECK EQU 0 + ENDIF + + IF ({FPU}="FPv4-SP") +FPU_USED EQU 1 + ELSE +FPU_USED EQU 0 + ENDIF + I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset TCB_SP_OFS EQU 56 ; TCB.SP offset TCB_SF_OFS EQU 34 ; TCB.stack_frame offset +FPCCR EQU 0xE000EF34 ; FPCCR Address + +osRtxErrorStackOverflow\ + EQU 1 ; Stack overflow + PRESERVE8 THUMB @@ -45,9 +60,10 @@ SVC_Handler PROC EXPORT SVC_Handler IMPORT osRtxUserSVC IMPORT osRtxInfo - IF :DEF:MPU_LOAD - IMPORT osRtxMpuLoad - ENDIF + IF RTX_STACK_CHECK != 0 + IMPORT osRtxThreadStackCheck + IMPORT osRtxKernelErrorNotify + ENDIF TST LR,#0x04 ; Determine return stack from EXC_RETURN bit 2 ITE EQ @@ -56,7 +72,8 @@ SVC_Handler PROC LDR R1,[R0,#24] ; Load saved PC from stack LDRB R1,[R1,#-2] ; Load SVC number - CBNZ R1,SVC_User ; Branch if not SVC 0 + CMP R1,#0 ; Check SVC number + BNE SVC_User ; Branch if not SVC 0 PUSH {R0,LR} ; Save SP and EXC_RETURN LDM R0,{R0-R3,R12} ; Load function parameters and address from stack @@ -65,54 +82,91 @@ SVC_Handler PROC STM R12,{R0-R1} ; Store function return values SVC_Context - LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run + LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run LDM R3,{R1,R2} ; Load osRtxInfo.thread.run: curr & next CMP R1,R2 ; Check if thread switch is required IT EQ BXEQ LR ; Exit when threads are the same + STR R2,[R3] ; osRtxInfo.thread.run: curr = next + + IF FPU_USED != 0 CBNZ R1,SVC_ContextSave ; Branch if running thread is not deleted - TST LR,#0x10 ; Check if extended stack frame - BNE SVC_ContextSwitch -#ifdef __FPU_PRESENT - LDR R1,=0xE000EF34 ; FPCCR Address - LDR R0,[R1] ; Load FPCCR - BIC R0,R0,#1 ; Clear LSPACT (Lazy state) - STR R0,[R1] ; Store FPCCR - B SVC_ContextSwitch -#endif +SVC_FP_LazyState + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + BNE SVC_ContextRestore ; Branch if not extended stack frame + LDR R3,=FPCCR ; FPCCR Address + LDR R0,[R3] ; Load FPCCR + BIC R0,R0,#1 ; Clear LSPACT (Lazy state preservation) + STR R0,[R3] ; Store FPCCR + B SVC_ContextRestore ; Branch to context restore handling + ELSE + CBZ R1,SVC_ContextRestore ; Branch if running thread is deleted + ENDIF SVC_ContextSave - STMDB R12!,{R4-R11} ; Save R4..R11 -#ifdef __FPU_PRESENT - TST LR,#0x10 ; Check if extended stack frame - IT EQ - VSTMDBEQ R12!,{S16-S31} ; Save VFP S16.S31 -#endif - - STR R12,[R1,#TCB_SP_OFS] ; Store SP + IF RTX_STACK_CHECK != 0 + SUB R12,R12,#32 ; Calculate SP: space for R4..R11 + IF FPU_USED != 0 + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame + SUBEQ R12,R12,#64 ; Additional space for S16..S31 STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information + ENDIF + STR R12,[R1,#TCB_SP_OFS] ; Store SP -SVC_ContextSwitch + PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next + MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr + BL osRtxThreadStackCheck ; Check if thread stack is overrun + POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next + CBNZ R0,SVC_ContextSaveRegs ; Branch when stack check is ok + + IF FPU_USED != 0 + MOV R4,R1 ; Save osRtxInfo.thread.run.curr + ENDIF + MOV R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id + BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify + LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run + LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next STR R2,[R3] ; osRtxInfo.thread.run: curr = next - - IF :DEF:MPU_LOAD - PUSH {R2,R3} ; Save registers - MOV R0,R2 ; osRtxMpuLoad parameter - BL osRtxMpuLoad ; Load MPU for next thread - POP {R2,R3} ; Restore registers - ENDIF + IF FPU_USED != 0 + LDRB LR,[R4,#TCB_SF_OFS] ; Load stack frame information + B SVC_FP_LazyState ; Branch to FP lazy state handling + ELSE + B SVC_ContextRestore ; Branch to context restore handling + ENDIF + +SVC_ContextSaveRegs + LDR R12,[R1,#TCB_SP_OFS] ; Load SP + IF FPU_USED != 0 + LDRB LR, [R1,#TCB_SF_OFS] ; Load stack frame information + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame + VSTMIAEQ R12!,{S16-S31} ; Save VFP S16..S31 + ENDIF + STM R12,{R4-R11} ; Save R4..R11 + ELSE + STMDB R12!,{R4-R11} ; Save R4..R11 + IF FPU_USED != 0 + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame + VSTMDBEQ R12!,{S16-S31} ; Save VFP S16.S31 + STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information + ENDIF + STR R12,[R1,#TCB_SP_OFS] ; Store SP + ENDIF SVC_ContextRestore - LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information LDR R0,[R2,#TCB_SP_OFS] ; Load SP - ORR LR,R1,#0xFFFFFF00 ; Set EXC_RETURN - -#ifdef __FPU_PRESENT - TST LR,#0x10 ; Check if extended stack frame - IT EQ + IF FPU_USED != 0 + LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information + ORN LR,R1,#0xFF ; Set EXC_RETURN + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame VLDMIAEQ R0!,{S16-S31} ; Restore VFP S16..S31 -#endif + ELSE + MVN LR,#~0xFFFFFFFD ; Set EXC_RETURN value + ENDIF LDMIA R0!,{R4-R11} ; Restore R4..R11 MSR PSP,R0 ; Set PSP @@ -145,8 +199,8 @@ PendSV_Handler PROC PUSH {R0,LR} ; Save EXC_RETURN BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler POP {R0,LR} ; Restore EXC_RETURN - MRS R12,PSP - B SVC_Context + MRS R12,PSP ; Save PSP to R12 + B SVC_Context ; Branch to context handling ALIGN ENDP @@ -159,8 +213,8 @@ SysTick_Handler PROC PUSH {R0,LR} ; Save EXC_RETURN BL osRtxTick_Handler ; Call osRtxTick_Handler POP {R0,LR} ; Restore EXC_RETURN - MRS R12,PSP - B SVC_Context + MRS R12,PSP ; Save PSP to R12 + B SVC_Context ; Branch to context handling ALIGN ENDP diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_CORTEX_A/irq_ca.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_CORTEX_A/irq_ca.S index 0db8a995570..9d5f44af8c9 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_CORTEX_A/irq_ca.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_CORTEX_A/irq_ca.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2018 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -18,7 +18,7 @@ * ----------------------------------------------------------------------------- * * Project: CMSIS-RTOS RTX - * Title: Cortex-A Exception handlers + * Title: ARMv7-A Exception handlers * * ----------------------------------------------------------------------------- */ @@ -64,48 +64,48 @@ IRQ_PendSV: .cantunwind Undef_Handler: - SRSFD SP!, #MODE_UND - PUSH {R0-R4, R12} // Save APCS corruptible registers to UND mode stack + srsfd sp!, #MODE_UND + push {r0-r4, r12} // Save APCS corruptible registers to UND mode stack - MRS R0, SPSR - TST R0, #CPSR_BIT_T // Check mode - MOVEQ R1, #4 // R1 = 4 ARM mode - MOVNE R1, #2 // R1 = 2 Thumb mode - SUB R0, LR, R1 - LDREQ R0, [R0] // ARM mode - R0 points to offending instruction - BEQ Undef_Cont + mrs r0, spsr + tst r0, #CPSR_BIT_T // Check mode + moveq r1, #4 // R1 = 4 ARM mode + movne r1, #2 // R1 = 2 Thumb mode + sub r0, lr, r1 + ldreq r0, [r0] // ARM mode - R0 points to offending instruction + beq Undef_Cont // Thumb instruction // Determine if it is a 32-bit Thumb instruction - LDRH R0, [R0] - MOV R2, #0x1C - CMP R2, R0, LSR #11 - BHS Undef_Cont // 16-bit Thumb instruction + ldrh r0, [r0] + mov r2, #0x1C + cmp r2, r0, lsr #11 + bhs Undef_Cont // 16-bit Thumb instruction // 32-bit Thumb instruction. Unaligned - reconstruct the offending instruction - LDRH R2, [LR] - ORR R0, R2, R0, LSL #16 + ldrh r2, [lr] + orr r0, r2, r0, lsl #16 Undef_Cont: - MOV R2, LR // Set LR to third argument + mov r2, lr // Set LR to third argument - AND R12, SP, #4 // Ensure stack is 8-byte aligned - SUB SP, SP, R12 // Adjust stack - PUSH {R12, LR} // Store stack adjustment and dummy LR + and r12, sp, #4 // Ensure stack is 8-byte aligned + sub sp, sp, r12 // Adjust stack + push {r12, lr} // Store stack adjustment and dummy LR // R0 =Offending instruction, R1 =2(Thumb) or =4(ARM) - BL CUndefHandler + bl CUndefHandler - POP {R12, LR} // Get stack adjustment & discard dummy LR - ADD SP, SP, R12 // Unadjust stack + pop {r12, lr} // Get stack adjustment & discard dummy LR + add sp, sp, r12 // Unadjust stack - LDR LR, [SP, #24] // Restore stacked LR and possibly adjust for retry - SUB LR, LR, R0 - LDR R0, [SP, #28] // Restore stacked SPSR - MSR SPSR_cxsf, R0 - CLREX // Clear exclusive monitor - POP {R0-R4, R12} // Restore stacked APCS registers - ADD SP, SP, #8 // Adjust SP for already-restored banked registers - MOVS PC, LR + ldr lr, [sp, #24] // Restore stacked LR and possibly adjust for retry + sub lr, lr, r0 + ldr r0, [sp, #28] // Restore stacked SPSR + msr spsr_cxsf, r0 + clrex // Clear exclusive monitor + pop {r0-r4, r12} // Restore stacked APCS registers + add sp, sp, #8 // Adjust SP for already-restored banked registers + movs pc, lr .fnend .size Undef_Handler, .-Undef_Handler @@ -117,26 +117,26 @@ Undef_Cont: .cantunwind PAbt_Handler: - SUB LR, LR, #4 // Pre-adjust LR - SRSFD SP!, #MODE_ABT // Save LR and SPRS to ABT mode stack - PUSH {R0-R4, R12} // Save APCS corruptible registers to ABT mode stack - MRC p15, 0, R0, c5, c0, 1 // IFSR - MRC p15, 0, R1, c6, c0, 2 // IFAR + sub lr, lr, #4 // Pre-adjust LR + srsfd sp!, #MODE_ABT // Save LR and SPRS to ABT mode stack + push {r0-r4, r12} // Save APCS corruptible registers to ABT mode stack + mrc p15, 0, r0, c5, c0, 1 // IFSR + mrc p15, 0, r1, c6, c0, 2 // IFAR - MOV R2, LR // Set LR to third argument + mov r2, lr // Set LR to third argument - AND R12, SP, #4 // Ensure stack is 8-byte aligned - SUB SP, SP, R12 // Adjust stack - PUSH {R12, LR} // Store stack adjustment and dummy LR + and r12, sp, #4 // Ensure stack is 8-byte aligned + sub sp, sp, r12 // Adjust stack + push {r12, lr} // Store stack adjustment and dummy LR - BL CPAbtHandler + bl CPAbtHandler - POP {R12, LR} // Get stack adjustment & discard dummy LR - ADD SP, SP, R12 // Unadjust stack + pop {r12, lr} // Get stack adjustment & discard dummy LR + add sp, sp, r12 // Unadjust stack - CLREX // Clear exclusive monitor - POP {R0-R4, R12} // Restore stack APCS registers - RFEFD SP! // Return from exception + clrex // Clear exclusive monitor + pop {r0-r4, r12} // Restore stack APCS registers + rfefd sp! // Return from exception .fnend .size PAbt_Handler, .-PAbt_Handler @@ -147,26 +147,26 @@ PAbt_Handler: .fnstart .cantunwind DAbt_Handler: - SUB LR, LR, #8 // Pre-adjust LR - SRSFD SP!, #MODE_ABT // Save LR and SPRS to ABT mode stack - PUSH {R0-R4, R12} // Save APCS corruptible registers to ABT mode stack - MRC p15, 0, R0, c5, c0, 0 // DFSR - MRC p15, 0, R1, c6, c0, 0 // DFAR + sub lr, lr, #8 // Pre-adjust LR + srsfd sp!, #MODE_ABT // Save LR and SPRS to ABT mode stack + push {r0-r4, r12} // Save APCS corruptible registers to ABT mode stack + mrc p15, 0, r0, c5, c0, 0 // DFSR + mrc p15, 0, r1, c6, c0, 0 // DFAR - MOV R2, LR // Set LR to third argument + mov r2, lr // Set LR to third argument - AND R12, SP, #4 // Ensure stack is 8-byte aligned - SUB SP, SP, R12 // Adjust stack - PUSH {R12, LR} // Store stack adjustment and dummy LR + and r12, sp, #4 // Ensure stack is 8-byte aligned + sub sp, sp, r12 // Adjust stack + push {r12, lr} // Store stack adjustment and dummy LR - BL CDAbtHandler + bl CDAbtHandler - POP {R12, LR} // Get stack adjustment & discard dummy LR - ADD SP, SP, R12 // Unadjust stack + pop {r12, lr} // Get stack adjustment & discard dummy LR + add sp, sp, r12 // Unadjust stack - CLREX // Clear exclusive monitor - POP {R0-R4, R12} // Restore stacked APCS registers - RFEFD SP! // Return from exception + clrex // Clear exclusive monitor + pop {r0-r4, r12} // Restore stacked APCS registers + rfefd sp! // Return from exception .fnend .size DAbt_Handler, .-DAbt_Handler @@ -178,49 +178,49 @@ DAbt_Handler: .cantunwind IRQ_Handler: - SUB LR, LR, #4 // Pre-adjust LR - SRSFD SP!, #MODE_SVC // Save LR_irq and SPSR_irq on to the SVC stack - CPS #MODE_SVC // Change to SVC mode - PUSH {R0-R3, R12, LR} // Save APCS corruptible registers + sub lr, lr, #4 // Pre-adjust LR + srsfd sp!, #MODE_SVC // Save LR_irq and SPSR_irq on to the SVC stack + cps #MODE_SVC // Change to SVC mode + push {r0-r3, r12, lr} // Save APCS corruptible registers - LDR R0, =IRQ_NestLevel - LDR R1, [R0] - ADD R1, R1, #1 // Increment IRQ nesting level - STR R1, [R0] + ldr r0, =IRQ_NestLevel + ldr r1, [r0] + add r1, r1, #1 // Increment IRQ nesting level + str r1, [r0] - MOV R3, SP // Move SP into R3 - AND R3, R3, #4 // Get stack adjustment to ensure 8-byte alignment - SUB SP, SP, R3 // Adjust stack - PUSH {R3, R4} // Store stack adjustment(R3) and user data(R4) + mov r3, sp // Move SP into R3 + and r3, r3, #4 // Get stack adjustment to ensure 8-byte alignment + sub sp, sp, r3 // Adjust stack + push {r3, r4} // Store stack adjustment(R3) and user data(R4) - BLX IRQ_GetActiveIRQ // Retrieve interrupt ID into R0 - MOV R4, R0 // Move interrupt ID to R4 + blx IRQ_GetActiveIRQ // Retrieve interrupt ID into R0 + mov r4, r0 // Move interrupt ID to R4 - BLX IRQ_GetHandler // Retrieve interrupt handler address for current ID - CMP R0, #0 // Check if handler address is 0 - BEQ IRQ_End // If 0, end interrupt and return + blx IRQ_GetHandler // Retrieve interrupt handler address for current ID + cmp r0, #0 // Check if handler address is 0 + beq IRQ_End // If 0, end interrupt and return - CPSIE i // Re-enable interrupts - BLX R0 // Call IRQ handler - CPSID i // Disable interrupts + cpsie i // Re-enable interrupts + blx r0 // Call IRQ handler + cpsid i // Disable interrupts IRQ_End: - MOV R0, R4 // Move interrupt ID to R0 - BLX IRQ_EndOfInterrupt // Signal end of interrupt + mov r0, r4 // Move interrupt ID to R0 + blx IRQ_EndOfInterrupt // Signal end of interrupt - POP {R3, R4} // Restore stack adjustment(R3) and user data(R4) - ADD SP, SP, R3 // Unadjust stack + pop {r3, r4} // Restore stack adjustment(R3) and user data(R4) + add sp, sp, r3 // Unadjust stack - BL osRtxContextSwitch // Continue in context switcher + bl osRtxContextSwitch // Continue in context switcher - LDR R0, =IRQ_NestLevel - LDR R1, [R0] - SUBS R1, R1, #1 // Decrement IRQ nesting level - STR R1, [R0] + ldr r0, =IRQ_NestLevel + ldr r1, [r0] + subs r1, r1, #1 // Decrement IRQ nesting level + str r1, [r0] - CLREX // Clear exclusive monitor for interrupted code - POP {R0-R3, R12, LR} // Restore stacked APCS registers - RFEFD SP! // Return from IRQ handler + clrex // Clear exclusive monitor for interrupted code + pop {r0-r3, r12, lr} // Restore stacked APCS registers + rfefd sp! // Return from IRQ handler .fnend .size IRQ_Handler, .-IRQ_Handler @@ -232,80 +232,80 @@ IRQ_End: .cantunwind SVC_Handler: - SRSFD SP!, #MODE_SVC // Store SPSR_svc and LR_svc onto SVC stack - PUSH {R12, LR} - - MRS R12, SPSR // Load SPSR - TST R12, #CPSR_BIT_T // Thumb bit set? - LDRHNE R12, [LR,#-2] // Thumb: load halfword - BICNE R12, R12, #0xFF00 // extract SVC number - LDREQ R12, [LR,#-4] // ARM: load word - BICEQ R12, R12, #0xFF000000 // extract SVC number - CMP R12, #0 // Compare SVC number - BNE SVC_User // Branch if User SVC - - PUSH {R0-R3} - - LDR R0, =IRQ_NestLevel - LDR R1, [R0] - ADD R1, R1, #1 // Increment IRQ nesting level - STR R1, [R0] - - LDR R0, =osRtxInfo - LDR R1, [R0, #I_K_STATE_OFS] // Load RTX5 kernel state - CMP R1, #K_STATE_RUNNING // Check osKernelRunning - BLT SVC_FuncCall // Continue if kernel is not running - LDR R0, [R0, #I_TICK_IRQN_OFS] // Load OS Tick irqn - BLX IRQ_Disable // Disable OS Tick interrupt + srsfd sp!, #MODE_SVC // Store SPSR_svc and LR_svc onto SVC stack + push {r12, lr} + + mrs r12, spsr // Load SPSR + tst r12, #CPSR_BIT_T // Thumb bit set? + ldrhne r12, [lr,#-2] // Thumb: load halfword + bicne r12, r12, #0xFF00 // extract SVC number + ldreq r12, [lr,#-4] // ARM: load word + biceq r12, r12, #0xFF000000 // extract SVC number + cmp r12, #0 // Compare SVC number + bne SVC_User // Branch if User SVC + + push {r0-r3} + + ldr r0, =IRQ_NestLevel + ldr r1, [r0] + add r1, r1, #1 // Increment IRQ nesting level + str r1, [r0] + + ldr r0, =osRtxInfo + ldr r1, [r0, #I_K_STATE_OFS] // Load RTX5 kernel state + cmp r1, #K_STATE_RUNNING // Check osKernelRunning + blt SVC_FuncCall // Continue if kernel is not running + ldr r0, [r0, #I_TICK_IRQN_OFS] // Load OS Tick irqn + blx IRQ_Disable // Disable OS Tick interrupt SVC_FuncCall: - POP {R0-R3} + pop {r0-r3} - LDR R12, [SP] // Reload R12 from stack + ldr r12, [sp] // Reload R12 from stack - CPSIE i // Re-enable interrupts - BLX R12 // Branch to SVC function - CPSID i // Disable interrupts + cpsie i // Re-enable interrupts + blx r12 // Branch to SVC function + cpsid i // Disable interrupts - SUB SP, SP, #4 - STM SP, {SP}^ // Store SP_usr onto stack - POP {R12} // Pop SP_usr into R12 - SUB R12, R12, #16 // Adjust pointer to SP_usr - LDMDB R12, {R2,R3} // Load return values from SVC function - PUSH {R0-R3} // Push return values to stack + sub sp, sp, #4 + stm sp, {sp}^ // Store SP_usr onto stack + pop {r12} // Pop SP_usr into R12 + sub r12, r12, #16 // Adjust pointer to SP_usr + ldmdb r12, {r2,r3} // Load return values from SVC function + push {r0-r3} // Push return values to stack - LDR R0, =osRtxInfo - LDR R1, [R0, #I_K_STATE_OFS] // Load RTX5 kernel state - CMP R1, #K_STATE_RUNNING // Check osKernelRunning - BLT SVC_ContextCheck // Continue if kernel is not running - LDR R0, [R0, #I_TICK_IRQN_OFS] // Load OS Tick irqn - BLX IRQ_Enable // Enable OS Tick interrupt + ldr r0, =osRtxInfo + ldr r1, [r0, #I_K_STATE_OFS] // Load RTX5 kernel state + cmp r1, #K_STATE_RUNNING // Check osKernelRunning + blt SVC_ContextCheck // Continue if kernel is not running + ldr r0, [r0, #I_TICK_IRQN_OFS] // Load OS Tick irqn + blx IRQ_Enable // Enable OS Tick interrupt SVC_ContextCheck: - BL osRtxContextSwitch // Continue in context switcher + bl osRtxContextSwitch // Continue in context switcher - LDR R0, =IRQ_NestLevel - LDR R1, [R0] - SUB R1, R1, #1 // Decrement IRQ nesting level - STR R1, [R0] + ldr r0, =IRQ_NestLevel + ldr r1, [r0] + sub r1, r1, #1 // Decrement IRQ nesting level + str r1, [r0] - CLREX // Clear exclusive monitor - POP {R0-R3, R12, LR} // Restore stacked APCS registers - RFEFD SP! // Return from exception + clrex // Clear exclusive monitor + pop {r0-r3, r12, lr} // Restore stacked APCS registers + rfefd sp! // Return from exception SVC_User: - PUSH {R4, R5} - LDR R5,=osRtxUserSVC // Load address of SVC table - LDR R4,[R5] // Load SVC maximum number - CMP R12,R4 // Check SVC number range - BHI SVC_Done // Branch if out of range + push {r4, r5} + ldr r5,=osRtxUserSVC // Load address of SVC table + ldr r4,[r5] // Load SVC maximum number + cmp r12,r4 // Check SVC number range + bhi SVC_Done // Branch if out of range - LDR R12,[R5,R12,LSL #2] // Load SVC Function Address - BLX R12 // Call SVC Function + ldr r12,[r5,r12,lsl #2] // Load SVC Function Address + blx r12 // Call SVC Function SVC_Done: - CLREX // Clear exclusive monitor - POP {R4, R5, R12, LR} - RFEFD SP! // Return from exception + clrex // Clear exclusive monitor + pop {r4, r5, r12, lr} + rfefd sp! // Return from exception .fnend .size SVC_Handler, .-SVC_Handler @@ -317,146 +317,146 @@ SVC_Done: .cantunwind osRtxContextSwitch: - PUSH {LR} + push {lr} // Check interrupt nesting level - LDR R0, =IRQ_NestLevel - LDR R1, [R0] // Load IRQ nest level - CMP R1, #1 - BNE osRtxContextExit // Nesting interrupts, exit context switcher + ldr r0, =IRQ_NestLevel + ldr r1, [r0] // Load IRQ nest level + cmp r1, #1 + bne osRtxContextExit // Nesting interrupts, exit context switcher - LDR R12, =osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run - LDM R12, {R0, R1} // Load osRtxInfo.thread.run: curr & next - LDR R2, =IRQ_PendSV // Load address of IRQ_PendSV flag - LDRB R3, [R2] // Load PendSV flag + ldr r12, =osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run + ldm r12, {r0, r1} // Load osRtxInfo.thread.run: curr & next + ldr r2, =IRQ_PendSV // Load address of IRQ_PendSV flag + ldrb r3, [r2] // Load PendSV flag - CMP R0, R1 // Check if context switch is required - BNE osRtxContextCheck // Not equal, check if context save required - CMP R3, #1 // Compare IRQ_PendSV value - BNE osRtxContextExit // No post processing (and no context switch requested) + cmp r0, r1 // Check if context switch is required + bne osRtxContextCheck // Not equal, check if context save required + cmp r3, #1 // Compare IRQ_PendSV value + bne osRtxContextExit // No post processing (and no context switch requested) osRtxContextCheck: - STR R1, [R12] // Store run.next as run.curr + str r1, [r12] // Store run.next as run.curr // R0 = curr, R1 = next, R2 = &IRQ_PendSV, R3 = IRQ_PendSV, R12 = &osRtxInfo.thread.run - PUSH {R1-R3, R12} + push {r1-r3, r12} - CMP R0, #0 // Is osRtxInfo.thread.run.curr == 0 - BEQ osRtxPostProcess // Current deleted, skip context save + cmp r0, #0 // Is osRtxInfo.thread.run.curr == 0 + beq osRtxPostProcess // Current deleted, skip context save osRtxContextSave: - MOV LR, R0 // Move &osRtxInfo.thread.run.curr to LR - MOV R0, SP // Move SP_svc into R0 - ADD R0, R0, #20 // Adjust SP_svc to R0 of the basic frame - SUB SP, SP, #4 - STM SP, {SP}^ // Save SP_usr to current stack - POP {R1} // Pop SP_usr into R1 - - SUB R1, R1, #64 // Adjust SP_usr to R4 of the basic frame - STMIA R1!, {R4-R11} // Save R4-R11 to user stack - LDMIA R0!, {R4-R8} // Load stacked R0-R3,R12 into R4-R8 - STMIA R1!, {R4-R8} // Store them to user stack - STM R1, {LR}^ // Store LR_usr directly - ADD R1, R1, #4 // Adjust user sp to PC - LDMIB R0!, {R5-R6} // Load current PC, CPSR - STMIA R1!, {R5-R6} // Restore user PC and CPSR - - SUB R1, R1, #64 // Adjust SP_usr to stacked R4 + mov lr, r0 // Move &osRtxInfo.thread.run.curr to LR + mov r0, sp // Move SP_svc into R0 + add r0, r0, #20 // Adjust SP_svc to R0 of the basic frame + sub sp, sp, #4 + stm sp, {sp}^ // Save SP_usr to current stack + pop {r1} // Pop SP_usr into R1 + + sub r1, r1, #64 // Adjust SP_usr to R4 of the basic frame + stmia r1!, {r4-r11} // Save R4-R11 to user stack + ldmia r0!, {r4-r8} // Load stacked R0-R3,R12 into R4-R8 + stmia r1!, {r4-r8} // Store them to user stack + stm r1, {lr}^ // Store LR_usr directly + add r1, r1, #4 // Adjust user sp to PC + ldmib r0!, {r5-r6} // Load current PC, CPSR + stmia r1!, {r5-r6} // Restore user PC and CPSR + + sub r1, r1, #64 // Adjust SP_usr to stacked R4 // Check if VFP state need to be saved - MRC p15, 0, R2, c1, c0, 2 // VFP/NEON access enabled? (CPACR) - AND R2, R2, #0x00F00000 - CMP R2, #0x00F00000 - BNE osRtxContextSave1 // Continue, no VFP - - VMRS R2, FPSCR - STMDB R1!, {R2,R12} // Push FPSCR, maintain 8-byte alignment - - VSTMDB R1!, {D0-D15} // Save D0-D15 - #if __ARM_NEON == 1 - VSTMDB R1!, {D16-D31} // Save D16-D31 - #endif - - LDRB R2, [LR, #TCB_SP_FRAME] // Load osRtxInfo.thread.run.curr frame info - #if __ARM_NEON == 1 - ORR R2, R2, #4 // NEON state - #else - ORR R2, R2, #2 // VFP state - #endif - STRB R2, [LR, #TCB_SP_FRAME] // Store VFP/NEON state - -osRtxContextSave1: - STR R1, [LR, #TCB_SP_OFS] // Store user sp to osRtxInfo.thread.run.curr + mrc p15, 0, r2, c1, c0, 2 // VFP/NEON access enabled? (CPACR) + and r2, r2, #0x00F00000 + cmp r2, #0x00F00000 + bne osRtxContextSaveSP // Continue, no VFP + + vmrs r2, fpscr + stmdb r1!, {r2,r12} // Push FPSCR, maintain 8-byte alignment + + vstmdb r1!, {d0-d15} // Save D0-D15 + #if defined(__ARM_NEON) && (__ARM_NEON == 1) + vstmdb r1!, {d16-d31} // Save D16-D31 + #endif + + ldrb r2, [lr, #TCB_SP_FRAME] // Load osRtxInfo.thread.run.curr frame info + #if defined(__ARM_NEON) && (__ARM_NEON == 1) + orr r2, r2, #4 // NEON state + #else + orr r2, r2, #2 // VFP state + #endif + strb r2, [lr, #TCB_SP_FRAME] // Store VFP/NEON state + +osRtxContextSaveSP: + str r1, [lr, #TCB_SP_OFS] // Store user sp to osRtxInfo.thread.run.curr osRtxPostProcess: // RTX IRQ post processing check - POP {R8-R11} // Pop R8 = run.next, R9 = &IRQ_PendSV, R10 = IRQ_PendSV, R11 = &osRtxInfo.thread.run - CMP R10, #1 // Compare PendSV value - BNE osRtxContextRestore // Skip post processing if not pending + pop {r8-r11} // Pop R8 = run.next, R9 = &IRQ_PendSV, R10 = IRQ_PendSV, R11 = &osRtxInfo.thread.run + cmp r10, #1 // Compare PendSV value + bne osRtxContextRestore // Skip post processing if not pending - MOV R4, SP // Move SP_svc into R4 - AND R4, R4, #4 // Get stack adjustment to ensure 8-byte alignment - SUB SP, SP, R4 // Adjust stack + mov r4, sp // Move SP_svc into R4 + and r4, r4, #4 // Get stack adjustment to ensure 8-byte alignment + sub sp, sp, r4 // Adjust stack // Disable OS Tick - LDR R5, =osRtxInfo // Load address of osRtxInfo - LDR R5, [R5, #I_TICK_IRQN_OFS] // Load OS Tick irqn - MOV R0, R5 // Set it as function parameter - BLX IRQ_Disable // Disable OS Tick interrupt - MOV R6, #0 // Set PendSV clear value - B osRtxPendCheck + ldr r5, =osRtxInfo // Load address of osRtxInfo + ldr r5, [r5, #I_TICK_IRQN_OFS] // Load OS Tick irqn + mov r0, r5 // Set it as function parameter + blx IRQ_Disable // Disable OS Tick interrupt + mov r6, #0 // Set PendSV clear value + b osRtxPendCheck osRtxPendExec: - STRB R6, [R9] // Clear PendSV flag - CPSIE i // Re-enable interrupts - BLX osRtxPendSV_Handler // Post process pending objects - CPSID i // Disable interrupts + strb r6, [r9] // Clear PendSV flag + cpsie i // Re-enable interrupts + blx osRtxPendSV_Handler // Post process pending objects + cpsid i // Disable interrupts osRtxPendCheck: - LDR R8, [R11, #4] // Load osRtxInfo.thread.run.next - STR R8, [R11] // Store run.next as run.curr - LDRB R0, [R9] // Load PendSV flag - CMP R0, #1 // Compare PendSV value - BEQ osRtxPendExec // Branch to PendExec if PendSV is set + ldr r8, [r11, #4] // Load osRtxInfo.thread.run.next + str r8, [r11] // Store run.next as run.curr + ldrb r0, [r9] // Load PendSV flag + cmp r0, #1 // Compare PendSV value + beq osRtxPendExec // Branch to PendExec if PendSV is set // Re-enable OS Tick - MOV R0, R5 // Restore irqn as function parameter - BLX IRQ_Enable // Enable OS Tick interrupt + mov r0, r5 // Restore irqn as function parameter + blx IRQ_Enable // Enable OS Tick interrupt - ADD SP, SP, R4 // Restore stack adjustment + add sp, sp, r4 // Restore stack adjustment osRtxContextRestore: - LDR LR, [R8, #TCB_SP_OFS] // Load next osRtxThread_t.sp - LDRB R2, [R8, #TCB_SP_FRAME] // Load next osRtxThread_t.stack_frame - - ANDS R2, R2, #0x6 // Check stack frame for VFP context - MRC p15, 0, R2, c1, c0, 2 // Read CPACR - ANDEQ R2, R2, #0xFF0FFFFF // VFP/NEON state not stacked, disable VFP/NEON - ORRNE R2, R2, #0x00F00000 // VFP/NEON state is stacked, enable VFP/NEON - MCR p15, 0, R2, c1, c0, 2 // Write CPACR - BEQ osRtxContextRestore1 // No VFP - ISB // Sync if VFP was enabled - #if __ARM_NEON == 1 - VLDMIA LR!, {D16-D31} // Restore D16-D31 - #endif - VLDMIA LR!, {D0-D15} // Restore D0-D15 - LDR R2, [LR] - VMSR FPSCR, R2 // Restore FPSCR - ADD LR, LR, #8 // Adjust sp pointer to R4 - -osRtxContextRestore1: - LDMIA LR!, {R4-R11} // Restore R4-R11 - ADD R12, LR, #32 // Adjust sp and save it into R12 - PUSH {R12} // Push sp onto stack - LDM SP, {SP}^ // Restore SP_usr directly - ADD SP, SP, #4 // Adjust SP_svc - LDMIA LR!, {R0-R3, R12} // Load user registers R0-R3,R12 - STMIB SP!, {R0-R3, R12} // Store them to SP_svc - LDM LR, {LR}^ // Restore LR_usr directly - LDMIB LR!, {R0-R1} // Load user registers PC,CPSR - ADD SP, SP, #4 - STMIB SP!, {R0-R1} // Store them to SP_svc - SUB SP, SP, #32 // Adjust SP_svc to stacked LR + ldr lr, [r8, #TCB_SP_OFS] // Load next osRtxThread_t.sp + ldrb r2, [r8, #TCB_SP_FRAME] // Load next osRtxThread_t.stack_frame + + ands r2, r2, #0x6 // Check stack frame for VFP context + mrc p15, 0, r2, c1, c0, 2 // Read CPACR + andeq r2, r2, #0xFF0FFFFF // VFP/NEON state not stacked, disable VFP/NEON + orrne r2, r2, #0x00F00000 // VFP/NEON state is stacked, enable VFP/NEON + mcr p15, 0, r2, c1, c0, 2 // Write CPACR + beq osRtxContextRestoreRegs // No VFP + isb // Sync if VFP was enabled + #if defined(__ARM_NEON) && (__ARM_NEON == 1) + vldmia lr!, {d16-d31} // Restore D16-D31 + #endif + vldmia lr!, {d0-d15} // Restore D0-D15 + ldr r2, [lr] + vmsr fpscr, r2 // Restore FPSCR + add lr, lr, #8 // Adjust sp pointer to R4 + +osRtxContextRestoreRegs: + ldmia lr!, {r4-r11} // Restore R4-R11 + add r12, lr, #32 // Adjust sp and save it into R12 + push {r12} // Push sp onto stack + ldm sp, {sp}^ // Restore SP_usr directly + add sp, sp, #4 // Adjust SP_svc + ldmia lr!, {r0-r3, r12} // Load user registers R0-R3,R12 + stmib sp!, {r0-r3, r12} // Store them to SP_svc + ldm lr, {lr}^ // Restore LR_usr directly + ldmib lr!, {r0-r1} // Load user registers PC,CPSR + add sp, sp, #4 + stmib sp!, {r0-r1} // Store them to SP_svc + sub sp, sp, #32 // Adjust SP_svc to stacked LR osRtxContextExit: - POP {PC} // Return + pop {pc} // Return .fnend .size osRtxContextSwitch, .-osRtxContextSwitch diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M0/irq_cm0.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M0/irq_cm0.S index 4d1359116e6..8cdc84aee65 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M0/irq_cm0.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M0/irq_cm0.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2018 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -18,7 +18,7 @@ * ----------------------------------------------------------------------------- * * Project: CMSIS-RTOS RTX - * Title: Cortex-M0 Exception handlers + * Title: ARMv6-M Exception handlers * * ----------------------------------------------------------------------------- */ @@ -26,9 +26,13 @@ .syntax unified + #include "rtx_def.h" + .equ I_T_RUN_OFS, 20 // osRtxInfo.thread.run offset .equ TCB_SP_OFS, 56 // TCB.SP offset + .equ osRtxErrorStackOverflow, 1 // Stack overflow + .section ".rodata" .global irqRtxLib // Non weak library reference irqRtxLib: @@ -38,6 +42,7 @@ irqRtxLib: .thumb .section ".text" .align 2 + .eabi_attribute Tag_ABI_align_preserved, 1 .thumb_func @@ -47,89 +52,109 @@ irqRtxLib: .cantunwind SVC_Handler: - MOV R0,LR - LSRS R0,R0,#3 // Determine return stack from EXC_RETURN bit 2 - BCC SVC_MSP // Branch if return stack is MSP - MRS R0,PSP // Get PSP + mov r0,lr + lsrs r0,r0,#3 // Determine return stack from EXC_RETURN bit 2 + bcc SVC_MSP // Branch if return stack is MSP + mrs r0,psp // Get PSP SVC_Number: - LDR R1,[R0,#24] // Load saved PC from stack - SUBS R1,R1,#2 // Point to SVC instruction - LDRB R1,[R1] // Load SVC number - CMP R1,#0 - BNE SVC_User // Branch if not SVC 0 - - PUSH {R0,LR} // Save SP and EXC_RETURN - LDMIA R0,{R0-R3} // Load function parameters from stack - BLX R7 // Call service function - POP {R2,R3} // Restore SP and EXC_RETURN - STMIA R2!,{R0-R1} // Store function return values - MOV LR,R3 // Set EXC_RETURN + ldr r1,[r0,#24] // Load saved PC from stack + subs r1,r1,#2 // Point to SVC instruction + ldrb r1,[r1] // Load SVC number + cmp r1,#0 // Check SVC number + bne SVC_User // Branch if not SVC 0 + + push {r0,lr} // Save SP and EXC_RETURN + ldmia r0,{r0-r3} // Load function parameters from stack + blx r7 // Call service function + pop {r2,r3} // Restore SP and EXC_RETURN + stmia r2!,{r0-r1} // Store function return values + mov lr,r3 // Set EXC_RETURN SVC_Context: - LDR R3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run - LDMIA R3!,{R1,R2} // Load osRtxInfo.thread.run: curr & next - CMP R1,R2 // Check if thread switch is required - BEQ SVC_Exit // Branch when threads are the same + ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run + ldmia r3!,{r1,r2} // Load osRtxInfo.thread.run: curr & next + cmp r1,r2 // Check if thread switch is required + beq SVC_Exit // Branch when threads are the same - CMP R1,#0 - BEQ SVC_ContextSwitch // Branch if running thread is deleted + subs r3,r3,#8 // Adjust address + str r2,[r3] // osRtxInfo.thread.run: curr = next + cmp r1,#0 + beq SVC_ContextRestore // Branch if running thread is deleted SVC_ContextSave: - MRS R0,PSP // Get PSP - SUBS R0,R0,#32 // Calculate SP - STR R0,[R1,#TCB_SP_OFS] // Store SP - STMIA R0!,{R4-R7} // Save R4..R7 - MOV R4,R8 - MOV R5,R9 - MOV R6,R10 - MOV R7,R11 - STMIA R0!,{R4-R7} // Save R8..R11 - -SVC_ContextSwitch: - SUBS R3,R3,#8 // Adjust address - STR R2,[R3] // osRtxInfo.thread.run: curr = next + mrs r0,psp // Get PSP + subs r0,r0,#32 // Calculate SP: space for R4..R11 + str r0,[r1,#TCB_SP_OFS] // Store SP + + #ifdef RTX_STACK_CHECK + + push {r1,r2} // Save osRtxInfo.thread.run: curr & next + mov r0,r1 // Parameter: osRtxInfo.thread.run.curr + bl osRtxThreadStackCheck // Check if thread stack is overrun + pop {r1,r2} // Restore osRtxInfo.thread.run: curr & next + cmp r0,#0 + bne SVC_ContextSaveRegs // Branch when stack check is ok + + movs r0,#osRtxErrorStackOverflow // Parameter: r0=code, r1=object_id + bl osRtxKernelErrorNotify // Call osRtxKernelErrorNotify + ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run + ldr r2,[r3,#4] // Load osRtxInfo.thread.run: next + str r2,[r3] // osRtxInfo.thread.run: curr = next + b SVC_ContextRestore // Branch to context restore handling + +SVC_ContextSaveRegs: + ldr r0,[r1,#TCB_SP_OFS] // Load SP + + #endif // RTX_STACK_CHECK + + stmia r0!,{r4-r7} // Save R4..R7 + mov r4,r8 + mov r5,r9 + mov r6,r10 + mov r7,r11 + stmia r0!,{r4-r7} // Save R8..R11 SVC_ContextRestore: - LDR R0,[R2,#TCB_SP_OFS] // Load SP - ADDS R0,R0,#16 // Adjust address - LDMIA R0!,{R4-R7} // Restore R8..R11 - MOV R8,R4 - MOV R9,R5 - MOV R10,R6 - MOV R11,R7 - MSR PSP,R0 // Set PSP - SUBS R0,R0,#32 // Adjust address - LDMIA R0!,{R4-R7} // Restore R4..R7 - - MOVS R0,#2 // Binary complement of 0xFFFFFFFD - MVNS R0,R0 // Set EXC_RETURN value - BX R0 // Exit from handler + ldr r0,[r2,#TCB_SP_OFS] // Load SP + adds r0,r0,#16 // Adjust address + ldmia r0!,{r4-r7} // Restore R8..R11 + mov r8,r4 + mov r9,r5 + mov r10,r6 + mov r11,r7 + msr psp,r0 // Set PSP + subs r0,r0,#32 // Adjust address + ldmia r0!,{r4-r7} // Restore R4..R7 + + movs r0,#2 // Binary complement of 0xFFFFFFFD + mvns r0,r0 // Set EXC_RETURN value + bx r0 // Exit from handler SVC_MSP: - MRS R0,MSP // Get MSP - B SVC_Number + mrs r0,msp // Get MSP + b SVC_Number SVC_Exit: - BX LR // Exit from handler + bx lr // Exit from handler SVC_User: - LDR R2,=osRtxUserSVC // Load address of SVC table - LDR R3,[R2] // Load SVC maximum number - CMP R1,R3 // Check SVC number range - BHI SVC_Exit // Branch if out of range - - PUSH {R0,LR} // Save SP and EXC_RETURN - LSLS R1,R1,#2 - LDR R3,[R2,R1] // Load address of SVC function - MOV R12,R3 - LDMIA R0,{R0-R3} // Load function parameters from stack - BLX R12 // Call service function - POP {R2,R3} // Restore SP and EXC_RETURN - STR R0,[R2] // Store function return value - MOV LR,R3 // Set EXC_RETURN - - BX LR // Return from handler + ldr r2,=osRtxUserSVC // Load address of SVC table + ldr r3,[r2] // Load SVC maximum number + cmp r1,r3 // Check SVC number range + bhi SVC_Exit // Branch if out of range + + push {r0,lr} // Save SP and EXC_RETURN + lsls r1,r1,#2 + ldr r3,[r2,r1] // Load address of SVC function + mov r12,r3 + ldmia r0,{r0-r3} // Load function parameters from stack + blx r12 // Call service function + pop {r2,r3} // Restore SP and EXC_RETURN + str r0,[r2] // Store function return value + mov lr,r3 // Set EXC_RETURN + + bx lr // Return from handler .fnend .size SVC_Handler, .-SVC_Handler @@ -142,11 +167,11 @@ SVC_User: .cantunwind PendSV_Handler: - PUSH {R0,LR} // Save EXC_RETURN - BL osRtxPendSV_Handler // Call osRtxPendSV_Handler - POP {R0,R1} // Restore EXC_RETURN - MOV LR,R1 // Set EXC_RETURN - B SVC_Context + push {r0,lr} // Save EXC_RETURN + bl osRtxPendSV_Handler // Call osRtxPendSV_Handler + pop {r0,r1} // Restore EXC_RETURN + mov lr,r1 // Set EXC_RETURN + b SVC_Context // Branch to context handling .fnend .size PendSV_Handler, .-PendSV_Handler @@ -159,11 +184,11 @@ PendSV_Handler: .cantunwind SysTick_Handler: - PUSH {R0,LR} // Save EXC_RETURN - BL osRtxTick_Handler // Call osRtxTick_Handler - POP {R0,R1} // Restore EXC_RETURN - MOV LR,R1 // Set EXC_RETURN - B SVC_Context + push {r0,lr} // Save EXC_RETURN + bl osRtxTick_Handler // Call osRtxTick_Handler + pop {r0,r1} // Restore EXC_RETURN + mov lr,r1 // Set EXC_RETURN + b SVC_Context // Branch to context handling .fnend .size SysTick_Handler, .-SysTick_Handler diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M0P/irq_cm0.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M0P/irq_cm0.S index 4d1359116e6..8cdc84aee65 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M0P/irq_cm0.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M0P/irq_cm0.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2018 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -18,7 +18,7 @@ * ----------------------------------------------------------------------------- * * Project: CMSIS-RTOS RTX - * Title: Cortex-M0 Exception handlers + * Title: ARMv6-M Exception handlers * * ----------------------------------------------------------------------------- */ @@ -26,9 +26,13 @@ .syntax unified + #include "rtx_def.h" + .equ I_T_RUN_OFS, 20 // osRtxInfo.thread.run offset .equ TCB_SP_OFS, 56 // TCB.SP offset + .equ osRtxErrorStackOverflow, 1 // Stack overflow + .section ".rodata" .global irqRtxLib // Non weak library reference irqRtxLib: @@ -38,6 +42,7 @@ irqRtxLib: .thumb .section ".text" .align 2 + .eabi_attribute Tag_ABI_align_preserved, 1 .thumb_func @@ -47,89 +52,109 @@ irqRtxLib: .cantunwind SVC_Handler: - MOV R0,LR - LSRS R0,R0,#3 // Determine return stack from EXC_RETURN bit 2 - BCC SVC_MSP // Branch if return stack is MSP - MRS R0,PSP // Get PSP + mov r0,lr + lsrs r0,r0,#3 // Determine return stack from EXC_RETURN bit 2 + bcc SVC_MSP // Branch if return stack is MSP + mrs r0,psp // Get PSP SVC_Number: - LDR R1,[R0,#24] // Load saved PC from stack - SUBS R1,R1,#2 // Point to SVC instruction - LDRB R1,[R1] // Load SVC number - CMP R1,#0 - BNE SVC_User // Branch if not SVC 0 - - PUSH {R0,LR} // Save SP and EXC_RETURN - LDMIA R0,{R0-R3} // Load function parameters from stack - BLX R7 // Call service function - POP {R2,R3} // Restore SP and EXC_RETURN - STMIA R2!,{R0-R1} // Store function return values - MOV LR,R3 // Set EXC_RETURN + ldr r1,[r0,#24] // Load saved PC from stack + subs r1,r1,#2 // Point to SVC instruction + ldrb r1,[r1] // Load SVC number + cmp r1,#0 // Check SVC number + bne SVC_User // Branch if not SVC 0 + + push {r0,lr} // Save SP and EXC_RETURN + ldmia r0,{r0-r3} // Load function parameters from stack + blx r7 // Call service function + pop {r2,r3} // Restore SP and EXC_RETURN + stmia r2!,{r0-r1} // Store function return values + mov lr,r3 // Set EXC_RETURN SVC_Context: - LDR R3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run - LDMIA R3!,{R1,R2} // Load osRtxInfo.thread.run: curr & next - CMP R1,R2 // Check if thread switch is required - BEQ SVC_Exit // Branch when threads are the same + ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run + ldmia r3!,{r1,r2} // Load osRtxInfo.thread.run: curr & next + cmp r1,r2 // Check if thread switch is required + beq SVC_Exit // Branch when threads are the same - CMP R1,#0 - BEQ SVC_ContextSwitch // Branch if running thread is deleted + subs r3,r3,#8 // Adjust address + str r2,[r3] // osRtxInfo.thread.run: curr = next + cmp r1,#0 + beq SVC_ContextRestore // Branch if running thread is deleted SVC_ContextSave: - MRS R0,PSP // Get PSP - SUBS R0,R0,#32 // Calculate SP - STR R0,[R1,#TCB_SP_OFS] // Store SP - STMIA R0!,{R4-R7} // Save R4..R7 - MOV R4,R8 - MOV R5,R9 - MOV R6,R10 - MOV R7,R11 - STMIA R0!,{R4-R7} // Save R8..R11 - -SVC_ContextSwitch: - SUBS R3,R3,#8 // Adjust address - STR R2,[R3] // osRtxInfo.thread.run: curr = next + mrs r0,psp // Get PSP + subs r0,r0,#32 // Calculate SP: space for R4..R11 + str r0,[r1,#TCB_SP_OFS] // Store SP + + #ifdef RTX_STACK_CHECK + + push {r1,r2} // Save osRtxInfo.thread.run: curr & next + mov r0,r1 // Parameter: osRtxInfo.thread.run.curr + bl osRtxThreadStackCheck // Check if thread stack is overrun + pop {r1,r2} // Restore osRtxInfo.thread.run: curr & next + cmp r0,#0 + bne SVC_ContextSaveRegs // Branch when stack check is ok + + movs r0,#osRtxErrorStackOverflow // Parameter: r0=code, r1=object_id + bl osRtxKernelErrorNotify // Call osRtxKernelErrorNotify + ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run + ldr r2,[r3,#4] // Load osRtxInfo.thread.run: next + str r2,[r3] // osRtxInfo.thread.run: curr = next + b SVC_ContextRestore // Branch to context restore handling + +SVC_ContextSaveRegs: + ldr r0,[r1,#TCB_SP_OFS] // Load SP + + #endif // RTX_STACK_CHECK + + stmia r0!,{r4-r7} // Save R4..R7 + mov r4,r8 + mov r5,r9 + mov r6,r10 + mov r7,r11 + stmia r0!,{r4-r7} // Save R8..R11 SVC_ContextRestore: - LDR R0,[R2,#TCB_SP_OFS] // Load SP - ADDS R0,R0,#16 // Adjust address - LDMIA R0!,{R4-R7} // Restore R8..R11 - MOV R8,R4 - MOV R9,R5 - MOV R10,R6 - MOV R11,R7 - MSR PSP,R0 // Set PSP - SUBS R0,R0,#32 // Adjust address - LDMIA R0!,{R4-R7} // Restore R4..R7 - - MOVS R0,#2 // Binary complement of 0xFFFFFFFD - MVNS R0,R0 // Set EXC_RETURN value - BX R0 // Exit from handler + ldr r0,[r2,#TCB_SP_OFS] // Load SP + adds r0,r0,#16 // Adjust address + ldmia r0!,{r4-r7} // Restore R8..R11 + mov r8,r4 + mov r9,r5 + mov r10,r6 + mov r11,r7 + msr psp,r0 // Set PSP + subs r0,r0,#32 // Adjust address + ldmia r0!,{r4-r7} // Restore R4..R7 + + movs r0,#2 // Binary complement of 0xFFFFFFFD + mvns r0,r0 // Set EXC_RETURN value + bx r0 // Exit from handler SVC_MSP: - MRS R0,MSP // Get MSP - B SVC_Number + mrs r0,msp // Get MSP + b SVC_Number SVC_Exit: - BX LR // Exit from handler + bx lr // Exit from handler SVC_User: - LDR R2,=osRtxUserSVC // Load address of SVC table - LDR R3,[R2] // Load SVC maximum number - CMP R1,R3 // Check SVC number range - BHI SVC_Exit // Branch if out of range - - PUSH {R0,LR} // Save SP and EXC_RETURN - LSLS R1,R1,#2 - LDR R3,[R2,R1] // Load address of SVC function - MOV R12,R3 - LDMIA R0,{R0-R3} // Load function parameters from stack - BLX R12 // Call service function - POP {R2,R3} // Restore SP and EXC_RETURN - STR R0,[R2] // Store function return value - MOV LR,R3 // Set EXC_RETURN - - BX LR // Return from handler + ldr r2,=osRtxUserSVC // Load address of SVC table + ldr r3,[r2] // Load SVC maximum number + cmp r1,r3 // Check SVC number range + bhi SVC_Exit // Branch if out of range + + push {r0,lr} // Save SP and EXC_RETURN + lsls r1,r1,#2 + ldr r3,[r2,r1] // Load address of SVC function + mov r12,r3 + ldmia r0,{r0-r3} // Load function parameters from stack + blx r12 // Call service function + pop {r2,r3} // Restore SP and EXC_RETURN + str r0,[r2] // Store function return value + mov lr,r3 // Set EXC_RETURN + + bx lr // Return from handler .fnend .size SVC_Handler, .-SVC_Handler @@ -142,11 +167,11 @@ SVC_User: .cantunwind PendSV_Handler: - PUSH {R0,LR} // Save EXC_RETURN - BL osRtxPendSV_Handler // Call osRtxPendSV_Handler - POP {R0,R1} // Restore EXC_RETURN - MOV LR,R1 // Set EXC_RETURN - B SVC_Context + push {r0,lr} // Save EXC_RETURN + bl osRtxPendSV_Handler // Call osRtxPendSV_Handler + pop {r0,r1} // Restore EXC_RETURN + mov lr,r1 // Set EXC_RETURN + b SVC_Context // Branch to context handling .fnend .size PendSV_Handler, .-PendSV_Handler @@ -159,11 +184,11 @@ PendSV_Handler: .cantunwind SysTick_Handler: - PUSH {R0,LR} // Save EXC_RETURN - BL osRtxTick_Handler // Call osRtxTick_Handler - POP {R0,R1} // Restore EXC_RETURN - MOV LR,R1 // Set EXC_RETURN - B SVC_Context + push {r0,lr} // Save EXC_RETURN + bl osRtxTick_Handler // Call osRtxTick_Handler + pop {r0,r1} // Restore EXC_RETURN + mov lr,r1 // Set EXC_RETURN + b SVC_Context // Branch to context handling .fnend .size SysTick_Handler, .-SysTick_Handler diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M23/irq_armv8mbl.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M23/irq_armv8mbl.S index 90540257d44..4a6a33ca191 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M23/irq_armv8mbl.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M23/irq_armv8mbl.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2020 Arm Limited. All rights reserved. + * Copyright (c) 2016-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -18,7 +18,7 @@ * ----------------------------------------------------------------------------- * * Project: CMSIS-RTOS RTX - * Title: ARMv8M Baseline Exception handlers + * Title: ARMv8-M Baseline Exception handlers * * ----------------------------------------------------------------------------- */ @@ -26,11 +26,7 @@ .syntax unified - #ifdef _RTE_ - #ifdef RTE_CMSIS_RTOS2_RTX5_ARMV8M_NS - #define DOMAIN_NS 1 - #endif - #endif + #include "rtx_def.h" #ifndef DOMAIN_NS #define DOMAIN_NS 0 @@ -42,6 +38,8 @@ .equ TCB_SF_OFS, 34 // TCB.stack_frame offset .equ TCB_TZM_OFS, 64 // TCB.tz_memory offset + .equ osRtxErrorStackOverflow, 1 // Stack overflow + .section ".rodata" .global irqRtxLib // Non weak library reference irqRtxLib: @@ -51,6 +49,7 @@ irqRtxLib: .thumb .section ".text" .align 2 + .eabi_attribute Tag_ABI_align_preserved, 1 .thumb_func @@ -60,129 +59,171 @@ irqRtxLib: .cantunwind SVC_Handler: - MOV R0,LR - LSRS R0,R0,#3 // Determine return stack from EXC_RETURN bit 2 - BCC SVC_MSP // Branch if return stack is MSP - MRS R0,PSP // Get PSP + mov r0,lr + lsrs r0,r0,#3 // Determine return stack from EXC_RETURN bit 2 + bcc SVC_MSP // Branch if return stack is MSP + mrs r0,psp // Get PSP SVC_Number: - LDR R1,[R0,#24] // Load saved PC from stack - SUBS R1,R1,#2 // Point to SVC instruction - LDRB R1,[R1] // Load SVC number - CMP R1,#0 - BNE SVC_User // Branch if not SVC 0 - - PUSH {R0,LR} // Save SP and EXC_RETURN - LDM R0,{R0-R3} // Load function parameters from stack - BLX R7 // Call service function - POP {R2,R3} // Restore SP and EXC_RETURN - STMIA R2!,{R0-R1} // Store function return values - MOV LR,R3 // Set EXC_RETURN + ldr r1,[r0,#24] // Load saved PC from stack + subs r1,r1,#2 // Point to SVC instruction + ldrb r1,[r1] // Load SVC number + cmp r1,#0 // Check SVC number + bne SVC_User // Branch if not SVC 0 + + push {r0,lr} // Save SP and EXC_RETURN + ldmia r0,{r0-r3} // Load function parameters from stack + blx r7 // Call service function + pop {r2,r3} // Restore SP and EXC_RETURN + stmia r2!,{r0-r1} // Store function return values + mov lr,r3 // Set EXC_RETURN SVC_Context: - LDR R3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run - LDMIA R3!,{R1,R2} // Load osRtxInfo.thread.run: curr & next - CMP R1,R2 // Check if thread switch is required - BEQ SVC_Exit // Branch when threads are the same + ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run + ldmia r3!,{r1,r2} // Load osRtxInfo.thread.run: curr & next + cmp r1,r2 // Check if thread switch is required + beq SVC_Exit // Branch when threads are the same - CBZ R1,SVC_ContextSwitch // Branch if running thread is deleted + subs r3,r3,#8 // Adjust address + str r2,[r3] // osRtxInfo.thread.run: curr = next + cbz r1,SVC_ContextRestore // Branch if running thread is deleted SVC_ContextSave: - #if (DOMAIN_NS == 1) - LDR R0,[R1,#TCB_TZM_OFS] // Load TrustZone memory identifier - CBZ R0,SVC_ContextSave1 // Branch if there is no secure context - PUSH {R1,R2,R3,R7} // Save registers - MOV R7,LR // Get EXC_RETURN - BL TZ_StoreContext_S // Store secure context - MOV LR,R7 // Set EXC_RETURN - POP {R1,R2,R3,R7} // Restore registers - #endif - -SVC_ContextSave1: - MRS R0,PSP // Get PSP - SUBS R0,R0,#32 // Calculate SP - STR R0,[R1,#TCB_SP_OFS] // Store SP - STMIA R0!,{R4-R7} // Save R4..R7 - MOV R4,R8 - MOV R5,R9 - MOV R6,R10 - MOV R7,R11 - STMIA R0!,{R4-R7} // Save R8..R11 - -SVC_ContextSave2: - MOV R0,LR // Get EXC_RETURN - ADDS R1,R1,#TCB_SF_OFS // Adjust address - STRB R0,[R1] // Store stack frame information - -SVC_ContextSwitch: - SUBS R3,R3,#8 // Adjust address - STR R2,[R3] // osRtxInfo.thread.run: curr = next + #if (DOMAIN_NS != 0) + ldr r0,[r1,#TCB_TZM_OFS] // Load TrustZone memory identifier + cbz r0,SVC_ContextSave_NS // Branch if there is no secure context + push {r1,r2,r3,r7} // Save registers + mov r7,lr // Get EXC_RETURN + bl TZ_StoreContext_S // Store secure context + mov lr,r7 // Set EXC_RETURN + pop {r1,r2,r3,r7} // Restore registers + #endif + +SVC_ContextSave_NS: + mrs r0,psp // Get PSP + #if (DOMAIN_NS != 0) + mov r3,lr // Get EXC_RETURN + lsls r3,r3,#25 // Check domain of interrupted thread + bmi SVC_ContextSaveSP // Branch if secure + #endif + + #ifdef RTX_STACK_CHECK + subs r0,r0,#32 // Calculate SP: space for R4..R11 + +SVC_ContextSaveSP: + str r0,[r1,#TCB_SP_OFS] // Store SP + mov r3,lr // Get EXC_RETURN + mov r0,r1 // osRtxInfo.thread.run.curr + adds r0,r0,#TCB_SF_OFS // Adjust address + strb r3,[r0] // Store stack frame information + + push {r1,r2} // Save osRtxInfo.thread.run: curr & next + mov r0,r1 // Parameter: osRtxInfo.thread.run.curr + bl osRtxThreadStackCheck // Check if thread stack is overrun + pop {r1,r2} // Restore osRtxInfo.thread.run: curr & next + cmp r0,#0 + bne SVC_ContextSaveRegs // Branch when stack check is ok + + movs r0,#osRtxErrorStackOverflow // Parameter: r0=code, r1=object_id + bl osRtxKernelErrorNotify // Call osRtxKernelErrorNotify + ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run + ldr r2,[r3,#4] // Load osRtxInfo.thread.run: next + str r2,[r3] // osRtxInfo.thread.run: curr = next + b SVC_ContextRestore // Branch to context restore handling + +SVC_ContextSaveRegs: + #if (DOMAIN_NS != 0) + mov r0,r1 // osRtxInfo.thread.run.curr + adds r0,r0,#TCB_SF_OFS // Adjust address + ldrb r3,[r0] // Load stack frame information + lsls r3,r3,#25 // Check domain of interrupted thread + bmi SVC_ContextRestore // Branch if secure + #endif + ldr r0,[r1,#TCB_SP_OFS] // Load SP + stmia r0!,{r4-r7} // Save R4..R7 + mov r4,r8 + mov r5,r9 + mov r6,r10 + mov r7,r11 + stmia r0!,{r4-r7} // Save R8..R11 + #else + subs r0,r0,#32 // Calculate SP: space for R4..R11 + stmia r0!,{r4-r7} // Save R4..R7 + mov r4,r8 + mov r5,r9 + mov r6,r10 + mov r7,r11 + stmia r0!,{r4-r7} // Save R8..R11 + subs r0,r0,#32 // Adjust address +SVC_ContextSaveSP: + str r0,[r1,#TCB_SP_OFS] // Store SP + mov r0,lr // Get EXC_RETURN + adds r1,r1,#TCB_SF_OFS // Adjust address + strb r0,[r1] // Store stack frame information + #endif // RTX_STACK_CHECK SVC_ContextRestore: - #if (DOMAIN_NS == 1) - LDR R0,[R2,#TCB_TZM_OFS] // Load TrustZone memory identifier - CBZ R0,SVC_ContextRestore1 // Branch if there is no secure context - PUSH {R2,R3} // Save registers - BL TZ_LoadContext_S // Load secure context - POP {R2,R3} // Restore registers - #endif - -SVC_ContextRestore1: - MOV R1,R2 - ADDS R1,R1,#TCB_SF_OFS // Adjust address - LDRB R0,[R1] // Load stack frame information - MOVS R1,#0xFF - MVNS R1,R1 // R1=0xFFFFFF00 - ORRS R0,R1 - MOV LR,R0 // Set EXC_RETURN - - #if (DOMAIN_NS == 1) - LSLS R0,R0,#25 // Check domain of interrupted thread - BPL SVC_ContextRestore2 // Branch if non-secure - LDR R0,[R2,#TCB_SP_OFS] // Load SP - MSR PSP,R0 // Set PSP - BX LR // Exit from handler - #else - LDR R0,[R2,#TCB_SM_OFS] // Load stack memory base - MSR PSPLIM,R0 // Set PSPLIM - #endif - -SVC_ContextRestore2: - LDR R0,[R2,#TCB_SP_OFS] // Load SP - ADDS R0,R0,#16 // Adjust address - LDMIA R0!,{R4-R7} // Restore R8..R11 - MOV R8,R4 - MOV R9,R5 - MOV R10,R6 - MOV R11,R7 - MSR PSP,R0 // Set PSP - SUBS R0,R0,#32 // Adjust address - LDMIA R0!,{R4-R7} // Restore R4..R7 + #if (DOMAIN_NS != 0) + ldr r0,[r2,#TCB_TZM_OFS] // Load TrustZone memory identifier + cbz r0,SVC_ContextRestore_NS // Branch if there is no secure context + push {r2,r3} // Save registers + bl TZ_LoadContext_S // Load secure context + pop {r2,r3} // Restore registers + #endif + +SVC_ContextRestore_NS: + ldr r0,[r2,#TCB_SM_OFS] // Load stack memory base + msr psplim,r0 // Set PSPLIM + mov r0,r2 // osRtxInfo.thread.run.next + adds r0,r0,#TCB_SF_OFS // Adjust address + ldrb r3,[r0] // Load stack frame information + movs r0,#0xFF + mvns r0,r0 // R0=0xFFFFFF00 + orrs r3,r3,r0 + mov lr,r3 // Set EXC_RETURN + ldr r0,[r2,#TCB_SP_OFS] // Load SP + #if (DOMAIN_NS != 0) + lsls r3,r3,#25 // Check domain of interrupted thread + bmi SVC_ContextRestoreSP // Branch if secure + #endif + + adds r0,r0,#16 // Adjust address + ldmia r0!,{r4-r7} // Restore R8..R11 + mov r8,r4 + mov r9,r5 + mov r10,r6 + mov r11,r7 + subs r0,r0,#32 // Adjust address + ldmia r0!,{r4-r7} // Restore R4..R7 + adds r0,r0,#16 // Adjust address + +SVC_ContextRestoreSP: + msr psp,r0 // Set PSP SVC_Exit: - BX LR // Exit from handler + bx lr // Exit from handler SVC_MSP: - MRS R0,MSP // Get MSP - B SVC_Number + mrs r0,msp // Get MSP + b SVC_Number SVC_User: - LDR R2,=osRtxUserSVC // Load address of SVC table - LDR R3,[R2] // Load SVC maximum number - CMP R1,R3 // Check SVC number range - BHI SVC_Exit // Branch if out of range - - PUSH {R0,LR} // Save SP and EXC_RETURN - LSLS R1,R1,#2 - LDR R3,[R2,R1] // Load address of SVC function - MOV R12,R3 - LDMIA R0,{R0-R3} // Load function parameters from stack - BLX R12 // Call service function - POP {R2,R3} // Restore SP and EXC_RETURN - STR R0,[R2] // Store function return value - MOV LR,R3 // Set EXC_RETURN - - BX LR // Return from handler + ldr r2,=osRtxUserSVC // Load address of SVC table + ldr r3,[r2] // Load SVC maximum number + cmp r1,r3 // Check SVC number range + bhi SVC_Exit // Branch if out of range + + push {r0,lr} // Save SP and EXC_RETURN + lsls r1,r1,#2 + ldr r3,[r2,r1] // Load address of SVC function + mov r12,r3 + ldmia r0,{r0-r3} // Load function parameters from stack + blx r12 // Call service function + pop {r2,r3} // Restore SP and EXC_RETURN + str r0,[r2] // Store function return value + mov lr,r3 // Set EXC_RETURN + + bx lr // Return from handler .fnend .size SVC_Handler, .-SVC_Handler @@ -195,11 +236,11 @@ SVC_User: .cantunwind PendSV_Handler: - PUSH {R0,LR} // Save EXC_RETURN - BL osRtxPendSV_Handler // Call osRtxPendSV_Handler - POP {R0,R1} // Restore EXC_RETURN - MOV LR,R1 // Set EXC_RETURN - B Sys_Context + push {r0,lr} // Save EXC_RETURN + bl osRtxPendSV_Handler // Call osRtxPendSV_Handler + pop {r0,r1} // Restore EXC_RETURN + mov lr,r1 // Set EXC_RETURN + b SVC_Context // Branch to context handling .fnend .size PendSV_Handler, .-PendSV_Handler @@ -212,113 +253,14 @@ PendSV_Handler: .cantunwind SysTick_Handler: - PUSH {R0,LR} // Save EXC_RETURN - BL osRtxTick_Handler // Call osRtxTick_Handler - POP {R0,R1} // Restore EXC_RETURN - MOV LR,R1 // Set EXC_RETURN - B Sys_Context - - .fnend - .size SysTick_Handler, .-SysTick_Handler - - - .thumb_func - .type Sys_Context, %function - .global Sys_Context - .fnstart - .cantunwind -Sys_Context: - - LDR R3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run - LDM R3!,{R1,R2} // Load osRtxInfo.thread.run: curr & next - CMP R1,R2 // Check if thread switch is required - BEQ Sys_ContextExit // Branch when threads are the same - -Sys_ContextSave: - #if (DOMAIN_NS == 1) - LDR R0,[R1,#TCB_TZM_OFS] // Load TrustZone memory identifier - CBZ R0,Sys_ContextSave1 // Branch if there is no secure context - PUSH {R1,R2,R3,R7} // Save registers - MOV R7,LR // Get EXC_RETURN - BL TZ_StoreContext_S // Store secure context - MOV LR,R7 // Set EXC_RETURN - POP {R1,R2,R3,R7} // Restore registers - -Sys_ContextSave1: - MOV R0,LR // Get EXC_RETURN - LSLS R0,R0,#25 // Check domain of interrupted thread - BPL Sys_ContextSave2 // Branch if non-secure - MRS R0,PSP // Get PSP - STR R0,[R1,#TCB_SP_OFS] // Store SP - B Sys_ContextSave3 - #endif - -Sys_ContextSave2: - MRS R0,PSP // Get PSP - SUBS R0,R0,#32 // Adjust address - STR R0,[R1,#TCB_SP_OFS] // Store SP - STMIA R0!,{R4-R7} // Save R4..R7 - MOV R4,R8 - MOV R5,R9 - MOV R6,R10 - MOV R7,R11 - STMIA R0!,{R4-R7} // Save R8..R11 - -Sys_ContextSave3: - MOV R0,LR // Get EXC_RETURN - ADDS R1,R1,#TCB_SF_OFS // Adjust address - STRB R0,[R1] // Store stack frame information - -Sys_ContextSwitch: - SUBS R3,R3,#8 // Adjust address - STR R2,[R3] // osRtxInfo.run: curr = next - -Sys_ContextRestore: - #if (DOMAIN_NS == 1) - LDR R0,[R2,#TCB_TZM_OFS] // Load TrustZone memory identifier - CBZ R0,Sys_ContextRestore1 // Branch if there is no secure context - PUSH {R2,R3} // Save registers - BL TZ_LoadContext_S // Load secure context - POP {R2,R3} // Restore registers - #endif - -Sys_ContextRestore1: - MOV R1,R2 - ADDS R1,R1,#TCB_SF_OFS // Adjust offset - LDRB R0,[R1] // Load stack frame information - MOVS R1,#0xFF - MVNS R1,R1 // R1=0xFFFFFF00 - ORRS R0,R1 - MOV LR,R0 // Set EXC_RETURN - - #if (DOMAIN_NS == 1) - LSLS R0,R0,#25 // Check domain of interrupted thread - BPL Sys_ContextRestore2 // Branch if non-secure - LDR R0,[R2,#TCB_SP_OFS] // Load SP - MSR PSP,R0 // Set PSP - BX LR // Exit from handler - #else - LDR R0,[R2,#TCB_SM_OFS] // Load stack memory base - MSR PSPLIM,R0 // Set PSPLIM - #endif - -Sys_ContextRestore2: - LDR R0,[R2,#TCB_SP_OFS] // Load SP - ADDS R0,R0,#16 // Adjust address - LDMIA R0!,{R4-R7} // Restore R8..R11 - MOV R8,R4 - MOV R9,R5 - MOV R10,R6 - MOV R11,R7 - MSR PSP,R0 // Set PSP - SUBS R0,R0,#32 // Adjust address - LDMIA R0!,{R4-R7} // Restore R4..R7 - -Sys_ContextExit: - BX LR // Exit from handler + push {r0,lr} // Save EXC_RETURN + bl osRtxTick_Handler // Call osRtxTick_Handler + pop {r0,r1} // Restore EXC_RETURN + mov lr,r1 // Set EXC_RETURN + b SVC_Context // Branch to context handling .fnend - .size Sys_Context, .-Sys_Context + .size SysTick_Handler, .-SysTick_Handler .end diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M3/irq_cm3.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M3/irq_cm3.S index 069bc901458..ae2a87b5982 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M3/irq_cm3.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M3/irq_cm3.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2018 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -18,7 +18,7 @@ * ----------------------------------------------------------------------------- * * Project: CMSIS-RTOS RTX - * Title: Cortex-M3 Exception handlers + * Title: ARMv7-M Exception handlers * * ----------------------------------------------------------------------------- */ @@ -26,8 +26,21 @@ .syntax unified + #include "rtx_def.h" + + #if (defined(__ARM_FP) && (__ARM_FP > 0)) + .equ FPU_USED, 1 + #else + .equ FPU_USED, 0 + #endif + .equ I_T_RUN_OFS, 20 // osRtxInfo.thread.run offset .equ TCB_SP_OFS, 56 // TCB.SP offset + .equ TCB_SF_OFS, 34 // TCB.stack_frame offset + + .equ FPCCR, 0xE000EF34 // FPCCR Address + + .equ osRtxErrorStackOverflow, 1 // Stack overflow .section ".rodata" .global irqRtxLib // Non weak library reference @@ -38,6 +51,7 @@ irqRtxLib: .thumb .section ".text" .align 2 + .eabi_attribute Tag_ABI_align_preserved, 1 .thumb_func @@ -47,60 +61,128 @@ irqRtxLib: .cantunwind SVC_Handler: - TST LR,#0x04 // Determine return stack from EXC_RETURN bit 2 - ITE EQ - MRSEQ R0,MSP // Get MSP if return stack is MSP - MRSNE R0,PSP // Get PSP if return stack is PSP + tst lr,#0x04 // Determine return stack from EXC_RETURN bit 2 + ite eq + mrseq r0,msp // Get MSP if return stack is MSP + mrsne r0,psp // Get PSP if return stack is PSP - LDR R1,[R0,#24] // Load saved PC from stack - LDRB R1,[R1,#-2] // Load SVC number - CBNZ R1,SVC_User // Branch if not SVC 0 + ldr r1,[r0,#24] // Load saved PC from stack + ldrb r1,[r1,#-2] // Load SVC number + cmp r1,#0 // Check SVC number + bne SVC_User // Branch if not SVC 0 - PUSH {R0,LR} // Save SP and EXC_RETURN - LDM R0,{R0-R3,R12} // Load function parameters and address from stack - BLX R12 // Call service function - POP {R12,LR} // Restore SP and EXC_RETURN - STM R12,{R0-R1} // Store function return values + push {r0,lr} // Save SP and EXC_RETURN + ldm r0,{r0-r3,r12} // Load function parameters and address from stack + blx r12 // Call service function + pop {r12,lr} // Restore SP and EXC_RETURN + stm r12,{r0-r1} // Store function return values SVC_Context: - LDR R3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run - LDM R3,{R1,R2} // Load osRtxInfo.thread.run: curr & next - CMP R1,R2 // Check if thread switch is required - IT EQ - BXEQ LR // Exit when threads are the same - - CBZ R1,SVC_ContextSwitch // Branch if running thread is deleted + ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run + ldm r3,{r1,r2} // Load osRtxInfo.thread.run: curr & next + cmp r1,r2 // Check if thread switch is required + it eq + bxeq lr // Exit when threads are the same + + str r2,[r3] // osRtxInfo.thread.run: curr = next + + .if (FPU_USED != 0) + cbnz r1,SVC_ContextSave // Branch if running thread is not deleted +SVC_FP_LazyState: + tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4 + bne SVC_ContextRestore // Branch if not extended stack frame + ldr r3,=FPCCR // FPCCR Address + ldr r0,[r3] // Load FPCCR + bic r0,r0,#1 // Clear LSPACT (Lazy state preservation) + str r0,[r3] // Store FPCCR + b SVC_ContextRestore // Branch to context restore handling + .else + cbz r1,SVC_ContextRestore // Branch if running thread is deleted + .endif SVC_ContextSave: - STMDB R12!,{R4-R11} // Save R4..R11 - STR R12,[R1,#TCB_SP_OFS] // Store SP - -SVC_ContextSwitch: - STR R2,[R3] // osRtxInfo.thread.run: curr = next + #ifdef RTX_STACK_CHECK + sub r12,r12,#32 // Calculate SP: space for R4..R11 + .if (FPU_USED != 0) + tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4 + it eq // If extended stack frame + subeq r12,r12,#64 // Additional space for S16..S31 + strb lr, [r1,#TCB_SF_OFS] // Store stack frame information + .endif + str r12,[r1,#TCB_SP_OFS] // Store SP + + push {r1,r2} // Save osRtxInfo.thread.run: curr & next + mov r0,r1 // Parameter: osRtxInfo.thread.run.curr + bl osRtxThreadStackCheck // Check if thread stack is overrun + pop {r1,r2} // Restore osRtxInfo.thread.run: curr & next + cbnz r0,SVC_ContextSaveRegs // Branch when stack check is ok + + .if (FPU_USED != 0) + mov r4,r1 // Save osRtxInfo.thread.run.curr + .endif + mov r0,#osRtxErrorStackOverflow // Parameter: r0=code, r1=object_id + bl osRtxKernelErrorNotify // Call osRtxKernelErrorNotify + ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run + ldr r2,[r3,#4] // Load osRtxInfo.thread.run: next + str r2,[r3] // osRtxInfo.thread.run: curr = next + .if (FPU_USED != 0) + ldrb lr,[r4,#TCB_SF_OFS] // Load stack frame information + b SVC_FP_LazyState // Branch to FP lazy state handling + .else + b SVC_ContextRestore // Branch to context restore handling + .endif + +SVC_ContextSaveRegs: + ldr r12,[r1,#TCB_SP_OFS] // Load SP + .if (FPU_USED != 0) + ldrb lr, [r1,#TCB_SF_OFS] // Load stack frame information + tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4 + it eq // If extended stack frame + vstmiaeq r12!,{s16-s31} // Save VFP S16..S31 + .endif + stm r12,{r4-r11} // Save R4..R11 + #else + stmdb r12!,{r4-r11} // Save R4..R11 + .if (FPU_USED != 0) + tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4 + it eq // If extended stack frame + vstmdbeq r12!,{s16-s31} // Save VFP S16.S31 + strb lr, [r1,#TCB_SF_OFS] // Store stack frame information + .endif + str r12,[r1,#TCB_SP_OFS] // Store SP + #endif // RTX_STACK_CHECK SVC_ContextRestore: - LDR R0,[R2,#TCB_SP_OFS] // Load SP - LDMIA R0!,{R4-R11} // Restore R4..R11 - MSR PSP,R0 // Set PSP - MVN LR,#~0xFFFFFFFD // Set EXC_RETURN value + ldr r0,[r2,#TCB_SP_OFS] // Load SP + .if (FPU_USED != 0) + ldrb r1,[r2,#TCB_SF_OFS] // Load stack frame information + orn lr,r1,#0xFF // Set EXC_RETURN + tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4 + it eq // If extended stack frame + vldmiaeq r0!,{s16-s31} // Restore VFP S16..S31 + .else + mvn lr,#~0xFFFFFFFD // Set EXC_RETURN value + .endif + ldmia r0!,{r4-r11} // Restore R4..R11 + msr psp,r0 // Set PSP SVC_Exit: - BX LR // Exit from handler + bx lr // Exit from handler SVC_User: - LDR R2,=osRtxUserSVC // Load address of SVC table - LDR R3,[R2] // Load SVC maximum number - CMP R1,R3 // Check SVC number range - BHI SVC_Exit // Branch if out of range + ldr r2,=osRtxUserSVC // Load address of SVC table + ldr r3,[r2] // Load SVC maximum number + cmp r1,r3 // Check SVC number range + bhi SVC_Exit // Branch if out of range - PUSH {R0,LR} // Save SP and EXC_RETURN - LDR R12,[R2,R1,LSL #2] // Load address of SVC function - LDM R0,{R0-R3} // Load function parameters from stack - BLX R12 // Call service function - POP {R12,LR} // Restore SP and EXC_RETURN - STR R0,[R12] // Store function return value + push {r0,lr} // Save SP and EXC_RETURN + ldr r12,[r2,r1,lsl #2] // Load address of SVC function + ldm r0,{r0-r3} // Load function parameters from stack + blx r12 // Call service function + pop {r12,lr} // Restore SP and EXC_RETURN + str r0,[r12] // Store function return value - BX LR // Return from handler + bx lr // Return from handler .fnend .size SVC_Handler, .-SVC_Handler @@ -113,11 +195,11 @@ SVC_User: .cantunwind PendSV_Handler: - PUSH {R0,LR} // Save EXC_RETURN - BL osRtxPendSV_Handler // Call osRtxPendSV_Handler - POP {R0,LR} // Restore EXC_RETURN - MRS R12,PSP - B SVC_Context + push {r0,lr} // Save EXC_RETURN + bl osRtxPendSV_Handler // Call osRtxPendSV_Handler + pop {r0,lr} // Restore EXC_RETURN + mrs r12,psp // Save PSP to R12 + b SVC_Context // Branch to context handling .fnend .size PendSV_Handler, .-PendSV_Handler @@ -130,11 +212,11 @@ PendSV_Handler: .cantunwind SysTick_Handler: - PUSH {R0,LR} // Save EXC_RETURN - BL osRtxTick_Handler // Call osRtxTick_Handler - POP {R0,LR} // Restore EXC_RETURN - MRS R12,PSP - B SVC_Context + push {r0,lr} // Save EXC_RETURN + bl osRtxTick_Handler // Call osRtxTick_Handler + pop {r0,lr} // Restore EXC_RETURN + mrs r12,psp // Save PSP to R12 + b SVC_Context // Branch to context handling .fnend .size SysTick_Handler, .-SysTick_Handler diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M33/irq_armv8mml.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M33/irq_armv8mml.S index 8051ead98e8..0883c6bd510 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M33/irq_armv8mml.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M33/irq_armv8mml.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2020 Arm Limited. All rights reserved. + * Copyright (c) 2016-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -18,7 +18,7 @@ * ----------------------------------------------------------------------------- * * Project: CMSIS-RTOS RTX - * Title: ARMv8M Mainline Exception handlers + * Title: ARMv8-M Mainline Exception handlers * * ----------------------------------------------------------------------------- */ @@ -26,11 +26,7 @@ .syntax unified - #ifdef _RTE_ - #ifdef RTE_CMSIS_RTOS2_RTX5_ARMV8M_NS - #define DOMAIN_NS 1 - #endif - #endif + #include "rtx_def.h" #ifndef DOMAIN_NS #define DOMAIN_NS 0 @@ -54,6 +50,10 @@ .equ TCB_SF_OFS, 34 // TCB.stack_frame offset .equ TCB_TZM_OFS, 64 // TCB.tz_memory offset + .equ FPCCR, 0xE000EF34 // FPCCR Address + + .equ osRtxErrorStackOverflow, 1 // Stack overflow + .section ".rodata" .global irqRtxLib // Non weak library reference irqRtxLib: @@ -63,6 +63,7 @@ irqRtxLib: .thumb .section ".text" .align 2 + .eabi_attribute Tag_ABI_align_preserved, 1 .thumb_func @@ -72,115 +73,165 @@ irqRtxLib: .cantunwind SVC_Handler: - TST LR,#0x04 // Determine return stack from EXC_RETURN bit 2 - ITE EQ - MRSEQ R0,MSP // Get MSP if return stack is MSP - MRSNE R0,PSP // Get PSP if return stack is PSP + tst lr,#0x04 // Determine return stack from EXC_RETURN bit 2 + ite eq + mrseq r0,msp // Get MSP if return stack is MSP + mrsne r0,psp // Get PSP if return stack is PSP - LDR R1,[R0,#24] // Load saved PC from stack - LDRB R1,[R1,#-2] // Load SVC number - CMP R1,#0 - BNE SVC_User // Branch if not SVC 0 + ldr r1,[r0,#24] // Load saved PC from stack + ldrb r1,[r1,#-2] // Load SVC number + cmp r1,#0 // Check SVC number + bne SVC_User // Branch if not SVC 0 - PUSH {R0,LR} // Save SP and EXC_RETURN - LDM R0,{R0-R3,R12} // Load function parameters and address from stack - BLX R12 // Call service function - POP {R12,LR} // Restore SP and EXC_RETURN - STM R12,{R0-R1} // Store function return values + push {r0,lr} // Save SP and EXC_RETURN + ldm r0,{r0-r3,r12} // Load function parameters and address from stack + blx r12 // Call service function + pop {r12,lr} // Restore SP and EXC_RETURN + stm r12,{r0-r1} // Store function return values SVC_Context: - LDR R3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run - LDM R3,{R1,R2} // Load osRtxInfo.thread.run: curr & next - CMP R1,R2 // Check if thread switch is required - IT EQ - BXEQ LR // Exit when threads are the same - - .if (FPU_USED == 1) || (MVE_USED == 1) - CBNZ R1,SVC_ContextSave // Branch if running thread is not deleted - TST LR,#0x10 // Check if extended stack frame - BNE SVC_ContextSwitch - LDR R1,=0xE000EF34 // FPCCR Address - LDR R0,[R1] // Load FPCCR - BIC R0,R0,#1 // Clear LSPACT (Lazy state) - STR R0,[R1] // Store FPCCR - B SVC_ContextSwitch - .else - CBZ R1,SVC_ContextSwitch // Branch if running thread is deleted - .endif + ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run + ldm r3,{r1,r2} // Load osRtxInfo.thread.run: curr & next + cmp r1,r2 // Check if thread switch is required + it eq + bxeq lr // Exit when threads are the same + + str r2,[r3] // osRtxInfo.thread.run: curr = next + + .if (FPU_USED != 0) || (MVE_USED != 0) + cbnz r1,SVC_ContextSave // Branch if running thread is not deleted +SVC_FP_LazyState: + tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4 + bne SVC_ContextRestore // Branch if not extended stack frame + ldr r3,=FPCCR // FPCCR Address + ldr r0,[r3] // Load FPCCR + bic r0,r0,#1 // Clear LSPACT (Lazy state preservation) + str r0,[r3] // Store FPCCR + b SVC_ContextRestore // Branch to context restore handling + .else + cbz r1,SVC_ContextRestore // Branch if running thread is deleted + .endif SVC_ContextSave: - #if (DOMAIN_NS == 1) - LDR R0,[R1,#TCB_TZM_OFS] // Load TrustZone memory identifier - CBZ R0,SVC_ContextSave1 // Branch if there is no secure context - PUSH {R1,R2,R3,LR} // Save registers and EXC_RETURN - BL TZ_StoreContext_S // Store secure context - POP {R1,R2,R3,LR} // Restore registers and EXC_RETURN - #endif - -SVC_ContextSave1: - MRS R0,PSP // Get PSP - STMDB R0!,{R4-R11} // Save R4..R11 - .if (FPU_USED == 1) || (MVE_USED == 1) - TST LR,#0x10 // Check if extended stack frame - IT EQ - VSTMDBEQ R0!,{S16-S31} // Save VFP S16.S31 - .endif - -SVC_ContextSave2: - STR R0,[R1,#TCB_SP_OFS] // Store SP - STRB LR,[R1,#TCB_SF_OFS] // Store stack frame information - -SVC_ContextSwitch: - STR R2,[R3] // osRtxInfo.thread.run: curr = next + #if (DOMAIN_NS != 0) + ldr r0,[r1,#TCB_TZM_OFS] // Load TrustZone memory identifier + cbz r0,SVC_ContextSave_NS // Branch if there is no secure context + push {r1,r2,r12,lr} // Save registers and EXC_RETURN + bl TZ_StoreContext_S // Store secure context + pop {r1,r2,r12,lr} // Restore registers and EXC_RETURN + #endif + +SVC_ContextSave_NS: + #if (DOMAIN_NS != 0) + tst lr,#0x40 // Check domain of interrupted thread + bne SVC_ContextSaveSP // Branch if secure + #endif + + #ifdef RTX_STACK_CHECK + sub r12,r12,#32 // Calculate SP: space for R4..R11 + .if (FPU_USED != 0) || (MVE_USED != 0) + tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4 + it eq // If extended stack frame + subeq r12,r12,#64 // Additional space for S16..S31 + .endif + +SVC_ContextSaveSP: + str r12,[r1,#TCB_SP_OFS] // Store SP + strb lr, [r1,#TCB_SF_OFS] // Store stack frame information + + push {r1,r2} // Save osRtxInfo.thread.run: curr & next + mov r0,r1 // Parameter: osRtxInfo.thread.run.curr + bl osRtxThreadStackCheck // Check if thread stack is overrun + pop {r1,r2} // Restore osRtxInfo.thread.run: curr & next + cbnz r0,SVC_ContextSaveRegs // Branch when stack check is ok + + .if (FPU_USED != 0) || (MVE_USED != 0) + mov r4,r1 // Save osRtxInfo.thread.run.curr + .endif + mov r0,#osRtxErrorStackOverflow // Parameter: r0=code, r1=object_id + bl osRtxKernelErrorNotify // Call osRtxKernelErrorNotify + ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run + ldr r2,[r3,#4] // Load osRtxInfo.thread.run: next + str r2,[r3] // osRtxInfo.thread.run: curr = next + .if (FPU_USED != 0) || (MVE_USED != 0) + ldrb lr,[r4,#TCB_SF_OFS] // Load stack frame information + b SVC_FP_LazyState // Branch to FP lazy state handling + .else + b SVC_ContextRestore // Branch to context restore handling + .endif + +SVC_ContextSaveRegs: + ldrb lr,[r1,#TCB_SF_OFS] // Load stack frame information + #if (DOMAIN_NS != 0) + tst lr,#0x40 // Check domain of interrupted thread + bne SVC_ContextRestore // Branch if secure + #endif + ldr r12,[r1,#TCB_SP_OFS] // Load SP + .if (FPU_USED != 0) || (MVE_USED != 0) + tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4 + it eq // If extended stack frame + vstmiaeq r12!,{s16-s31} // Save VFP S16..S31 + .endif + stm r12,{r4-r11} // Save R4..R11 + #else + stmdb r12!,{r4-r11} // Save R4..R11 + .if (FPU_USED != 0) || (MVE_USED != 0) + tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4 + it eq // If extended stack frame + vstmdbeq r12!,{s16-s31} // Save VFP S16.S31 + .endif +SVC_ContextSaveSP: + str r12,[r1,#TCB_SP_OFS] // Store SP + strb lr, [r1,#TCB_SF_OFS] // Store stack frame information + #endif // RTX_STACK_CHECK SVC_ContextRestore: - #if (DOMAIN_NS == 1) - LDR R0,[R2,#TCB_TZM_OFS] // Load TrustZone memory identifier - CBZ R0,SVC_ContextRestore1 // Branch if there is no secure context - PUSH {R2,R3} // Save registers - BL TZ_LoadContext_S // Load secure context - POP {R2,R3} // Restore registers - #endif - -SVC_ContextRestore1: - LDR R0,[R2,#TCB_SM_OFS] // Load stack memory base - LDRB R1,[R2,#TCB_SF_OFS] // Load stack frame information - MSR PSPLIM,R0 // Set PSPLIM - LDR R0,[R2,#TCB_SP_OFS] // Load SP - ORR LR,R1,#0xFFFFFF00 // Set EXC_RETURN - - #if (DOMAIN_NS == 1) - TST LR,#0x40 // Check domain of interrupted thread - BNE SVC_ContextRestore2 // Branch if secure - #endif - - .if (FPU_USED == 1) || (MVE_USED == 1) - TST LR,#0x10 // Check if extended stack frame - IT EQ - VLDMIAEQ R0!,{S16-S31} // Restore VFP S16..S31 - .endif - LDMIA R0!,{R4-R11} // Restore R4..R11 - -SVC_ContextRestore2: - MSR PSP,R0 // Set PSP + #if (DOMAIN_NS != 0) + ldr r0,[r2,#TCB_TZM_OFS] // Load TrustZone memory identifier + cbz r0,SVC_ContextRestore_NS // Branch if there is no secure context + push {r2,r3} // Save registers + bl TZ_LoadContext_S // Load secure context + pop {r2,r3} // Restore registers + #endif + +SVC_ContextRestore_NS: + ldr r0,[r2,#TCB_SP_OFS] // Load SP + ldr r1,[r2,#TCB_SM_OFS] // Load stack memory base + msr psplim,r1 // Set PSPLIM + ldrb r1,[r2,#TCB_SF_OFS] // Load stack frame information + orn lr,r1,#0xFF // Set EXC_RETURN + #if (DOMAIN_NS != 0) + tst lr,#0x40 // Check domain of interrupted thread + bne SVC_ContextRestoreSP // Branch if secure + #endif + + .if (FPU_USED != 0) || (MVE_USED != 0) + tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4 + it eq // If extended stack frame + vldmiaeq r0!,{s16-s31} // Restore VFP S16..S31 + .endif + ldmia r0!,{r4-r11} // Restore R4..R11 + +SVC_ContextRestoreSP: + msr psp,r0 // Set PSP SVC_Exit: - BX LR // Exit from handler + bx lr // Exit from handler SVC_User: - LDR R2,=osRtxUserSVC // Load address of SVC table - LDR R3,[R2] // Load SVC maximum number - CMP R1,R3 // Check SVC number range - BHI SVC_Exit // Branch if out of range + ldr r2,=osRtxUserSVC // Load address of SVC table + ldr r3,[r2] // Load SVC maximum number + cmp r1,r3 // Check SVC number range + bhi SVC_Exit // Branch if out of range - PUSH {R0,LR} // Save SP and EXC_RETURN - LDR R12,[R2,R1,LSL #2] // Load address of SVC function - LDM R0,{R0-R3} // Load function parameters from stack - BLX R12 // Call service function - POP {R12,LR} // Restore SP and EXC_RETURN - STR R0,[R12] // Store function return value + push {r0,lr} // Save SP and EXC_RETURN + ldr r12,[r2,r1,lsl #2] // Load address of SVC function + ldm r0,{r0-r3} // Load function parameters from stack + blx r12 // Call service function + pop {r12,lr} // Restore SP and EXC_RETURN + str r0,[r12] // Store function return value - BX LR // Return from handler + bx lr // Return from handler .fnend .size SVC_Handler, .-SVC_Handler @@ -193,10 +244,11 @@ SVC_User: .cantunwind PendSV_Handler: - PUSH {R0,LR} // Save EXC_RETURN - BL osRtxPendSV_Handler // Call osRtxPendSV_Handler - POP {R0,LR} // Restore EXC_RETURN - B Sys_Context + push {r0,lr} // Save EXC_RETURN + bl osRtxPendSV_Handler // Call osRtxPendSV_Handler + pop {r0,lr} // Restore EXC_RETURN + mrs r12,psp // Save PSP to R12 + b SVC_Context // Branch to context handling .fnend .size PendSV_Handler, .-PendSV_Handler @@ -209,95 +261,14 @@ PendSV_Handler: .cantunwind SysTick_Handler: - PUSH {R0,LR} // Save EXC_RETURN - BL osRtxTick_Handler // Call osRtxTick_Handler - POP {R0,LR} // Restore EXC_RETURN - B Sys_Context - - .fnend - .size SysTick_Handler, .-SysTick_Handler - - - .thumb_func - .type Sys_Context, %function - .global Sys_Context - .fnstart - .cantunwind -Sys_Context: - - LDR R3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run - LDM R3,{R1,R2} // Load osRtxInfo.thread.run: curr & next - CMP R1,R2 // Check if thread switch is required - IT EQ - BXEQ LR // Exit when threads are the same - -Sys_ContextSave: - #if (DOMAIN_NS == 1) - LDR R0,[R1,#TCB_TZM_OFS] // Load TrustZone memory identifier - CBZ R0,Sys_ContextSave1 // Branch if there is no secure context - PUSH {R1,R2,R3,LR} // Save registers and EXC_RETURN - BL TZ_StoreContext_S // Store secure context - POP {R1,R2,R3,LR} // Restore registers and EXC_RETURN - -Sys_ContextSave1: - TST LR,#0x40 // Check domain of interrupted thread - IT NE - MRSNE R0,PSP // Get PSP - BNE Sys_ContextSave3 // Branch if secure - #endif - -Sys_ContextSave2: - MRS R0,PSP // Get PSP - STMDB R0!,{R4-R11} // Save R4..R11 - .if (FPU_USED == 1) || (MVE_USED == 1) - TST LR,#0x10 // Check if extended stack frame - IT EQ - VSTMDBEQ R0!,{S16-S31} // Save VFP S16.S31 - .endif - -Sys_ContextSave3: - STR R0,[R1,#TCB_SP_OFS] // Store SP - STRB LR,[R1,#TCB_SF_OFS] // Store stack frame information - -Sys_ContextSwitch: - STR R2,[R3] // osRtxInfo.run: curr = next - -Sys_ContextRestore: - #if (DOMAIN_NS == 1) - LDR R0,[R2,#TCB_TZM_OFS] // Load TrustZone memory identifier - CBZ R0,Sys_ContextRestore1 // Branch if there is no secure context - PUSH {R2,R3} // Save registers - BL TZ_LoadContext_S // Load secure context - POP {R2,R3} // Restore registers - #endif - -Sys_ContextRestore1: - LDR R0,[R2,#TCB_SM_OFS] // Load stack memory base - LDRB R1,[R2,#TCB_SF_OFS] // Load stack frame information - MSR PSPLIM,R0 // Set PSPLIM - LDR R0,[R2,#TCB_SP_OFS] // Load SP - ORR LR,R1,#0xFFFFFF00 // Set EXC_RETURN - - #if (DOMAIN_NS == 1) - TST LR,#0x40 // Check domain of interrupted thread - BNE Sys_ContextRestore2 // Branch if secure - #endif - - .if (FPU_USED == 1) || (MVE_USED == 1) - TST LR,#0x10 // Check if extended stack frame - IT EQ - VLDMIAEQ R0!,{S16-S31} // Restore VFP S16..S31 - .endif - LDMIA R0!,{R4-R11} // Restore R4..R11 - -Sys_ContextRestore2: - MSR PSP,R0 // Set PSP - -Sys_ContextExit: - BX LR // Exit from handler + push {r0,lr} // Save EXC_RETURN + bl osRtxTick_Handler // Call osRtxTick_Handler + pop {r0,lr} // Restore EXC_RETURN + mrs r12,psp // Save PSP to R12 + b SVC_Context // Branch to context handling .fnend - .size Sys_Context, .-Sys_Context + .size SysTick_Handler, .-SysTick_Handler .end diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_RTOS_M4_M7/irq_cm4f.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_RTOS_M4_M7/irq_cm4f.S index b5c68ad2fe1..ae2a87b5982 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_RTOS_M4_M7/irq_cm4f.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_RTOS_M4_M7/irq_cm4f.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2018 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -18,7 +18,7 @@ * ----------------------------------------------------------------------------- * * Project: CMSIS-RTOS RTX - * Title: Cortex-M4F Exception handlers + * Title: ARMv7-M Exception handlers * * ----------------------------------------------------------------------------- */ @@ -26,10 +26,22 @@ .syntax unified + #include "rtx_def.h" + + #if (defined(__ARM_FP) && (__ARM_FP > 0)) + .equ FPU_USED, 1 + #else + .equ FPU_USED, 0 + #endif + .equ I_T_RUN_OFS, 20 // osRtxInfo.thread.run offset .equ TCB_SP_OFS, 56 // TCB.SP offset .equ TCB_SF_OFS, 34 // TCB.stack_frame offset + .equ FPCCR, 0xE000EF34 // FPCCR Address + + .equ osRtxErrorStackOverflow, 1 // Stack overflow + .section ".rodata" .global irqRtxLib // Non weak library reference irqRtxLib: @@ -39,6 +51,7 @@ irqRtxLib: .thumb .section ".text" .align 2 + .eabi_attribute Tag_ABI_align_preserved, 1 .thumb_func @@ -48,83 +61,128 @@ irqRtxLib: .cantunwind SVC_Handler: - TST LR,#0x04 // Determine return stack from EXC_RETURN bit 2 - ITE EQ - MRSEQ R0,MSP // Get MSP if return stack is MSP - MRSNE R0,PSP // Get PSP if return stack is PSP + tst lr,#0x04 // Determine return stack from EXC_RETURN bit 2 + ite eq + mrseq r0,msp // Get MSP if return stack is MSP + mrsne r0,psp // Get PSP if return stack is PSP - LDR R1,[R0,#24] // Load saved PC from stack - LDRB R1,[R1,#-2] // Load SVC number - CBNZ R1,SVC_User // Branch if not SVC 0 + ldr r1,[r0,#24] // Load saved PC from stack + ldrb r1,[r1,#-2] // Load SVC number + cmp r1,#0 // Check SVC number + bne SVC_User // Branch if not SVC 0 - PUSH {R0,LR} // Save SP and EXC_RETURN - LDM R0,{R0-R3,R12} // Load function parameters and address from stack - BLX R12 // Call service function - POP {R12,LR} // Restore SP and EXC_RETURN - STM R12,{R0-R1} // Store function return values + push {r0,lr} // Save SP and EXC_RETURN + ldm r0,{r0-r3,r12} // Load function parameters and address from stack + blx r12 // Call service function + pop {r12,lr} // Restore SP and EXC_RETURN + stm r12,{r0-r1} // Store function return values SVC_Context: - LDR R3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.run - LDM R3,{R1,R2} // Load osRtxInfo.thread.run: curr & next - CMP R1,R2 // Check if thread switch is required - IT EQ - BXEQ LR // Exit when threads are the same - - CBNZ R1,SVC_ContextSave // Branch if running thread is not deleted - TST LR,#0x10 // Check if extended stack frame - BNE SVC_ContextSwitch -#ifdef __FPU_PRESENT - LDR R1,=0xE000EF34 // FPCCR Address - LDR R0,[R1] // Load FPCCR - BIC R0,R0,#1 // Clear LSPACT (Lazy state) - STR R0,[R1] // Store FPCCR - B SVC_ContextSwitch -#endif + ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run + ldm r3,{r1,r2} // Load osRtxInfo.thread.run: curr & next + cmp r1,r2 // Check if thread switch is required + it eq + bxeq lr // Exit when threads are the same + + str r2,[r3] // osRtxInfo.thread.run: curr = next + + .if (FPU_USED != 0) + cbnz r1,SVC_ContextSave // Branch if running thread is not deleted +SVC_FP_LazyState: + tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4 + bne SVC_ContextRestore // Branch if not extended stack frame + ldr r3,=FPCCR // FPCCR Address + ldr r0,[r3] // Load FPCCR + bic r0,r0,#1 // Clear LSPACT (Lazy state preservation) + str r0,[r3] // Store FPCCR + b SVC_ContextRestore // Branch to context restore handling + .else + cbz r1,SVC_ContextRestore // Branch if running thread is deleted + .endif SVC_ContextSave: - STMDB R12!,{R4-R11} // Save R4..R11 -#ifdef __FPU_PRESENT - TST LR,#0x10 // Check if extended stack frame - IT EQ - VSTMDBEQ R12!,{S16-S31} // Save VFP S16.S31 -#endif - - STR R12,[R1,#TCB_SP_OFS] // Store SP - STRB LR, [R1,#TCB_SF_OFS] // Store stack frame information - -SVC_ContextSwitch: - STR R2,[R3] // osRtxInfo.thread.run: curr = next + #ifdef RTX_STACK_CHECK + sub r12,r12,#32 // Calculate SP: space for R4..R11 + .if (FPU_USED != 0) + tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4 + it eq // If extended stack frame + subeq r12,r12,#64 // Additional space for S16..S31 + strb lr, [r1,#TCB_SF_OFS] // Store stack frame information + .endif + str r12,[r1,#TCB_SP_OFS] // Store SP + + push {r1,r2} // Save osRtxInfo.thread.run: curr & next + mov r0,r1 // Parameter: osRtxInfo.thread.run.curr + bl osRtxThreadStackCheck // Check if thread stack is overrun + pop {r1,r2} // Restore osRtxInfo.thread.run: curr & next + cbnz r0,SVC_ContextSaveRegs // Branch when stack check is ok + + .if (FPU_USED != 0) + mov r4,r1 // Save osRtxInfo.thread.run.curr + .endif + mov r0,#osRtxErrorStackOverflow // Parameter: r0=code, r1=object_id + bl osRtxKernelErrorNotify // Call osRtxKernelErrorNotify + ldr r3,=osRtxInfo+I_T_RUN_OFS // Load address of osRtxInfo.thread.run + ldr r2,[r3,#4] // Load osRtxInfo.thread.run: next + str r2,[r3] // osRtxInfo.thread.run: curr = next + .if (FPU_USED != 0) + ldrb lr,[r4,#TCB_SF_OFS] // Load stack frame information + b SVC_FP_LazyState // Branch to FP lazy state handling + .else + b SVC_ContextRestore // Branch to context restore handling + .endif + +SVC_ContextSaveRegs: + ldr r12,[r1,#TCB_SP_OFS] // Load SP + .if (FPU_USED != 0) + ldrb lr, [r1,#TCB_SF_OFS] // Load stack frame information + tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4 + it eq // If extended stack frame + vstmiaeq r12!,{s16-s31} // Save VFP S16..S31 + .endif + stm r12,{r4-r11} // Save R4..R11 + #else + stmdb r12!,{r4-r11} // Save R4..R11 + .if (FPU_USED != 0) + tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4 + it eq // If extended stack frame + vstmdbeq r12!,{s16-s31} // Save VFP S16.S31 + strb lr, [r1,#TCB_SF_OFS] // Store stack frame information + .endif + str r12,[r1,#TCB_SP_OFS] // Store SP + #endif // RTX_STACK_CHECK SVC_ContextRestore: - LDRB R1,[R2,#TCB_SF_OFS] // Load stack frame information - LDR R0,[R2,#TCB_SP_OFS] // Load SP - ORR LR,R1,#0xFFFFFF00 // Set EXC_RETURN - -#ifdef __FPU_PRESENT - TST LR,#0x10 // Check if extended stack frame - IT EQ - VLDMIAEQ R0!,{S16-S31} // Restore VFP S16..S31 -#endif - LDMIA R0!,{R4-R11} // Restore R4..R11 - MSR PSP,R0 // Set PSP + ldr r0,[r2,#TCB_SP_OFS] // Load SP + .if (FPU_USED != 0) + ldrb r1,[r2,#TCB_SF_OFS] // Load stack frame information + orn lr,r1,#0xFF // Set EXC_RETURN + tst lr,#0x10 // Determine stack frame from EXC_RETURN bit 4 + it eq // If extended stack frame + vldmiaeq r0!,{s16-s31} // Restore VFP S16..S31 + .else + mvn lr,#~0xFFFFFFFD // Set EXC_RETURN value + .endif + ldmia r0!,{r4-r11} // Restore R4..R11 + msr psp,r0 // Set PSP SVC_Exit: - BX LR // Exit from handler + bx lr // Exit from handler SVC_User: - LDR R2,=osRtxUserSVC // Load address of SVC table - LDR R3,[R2] // Load SVC maximum number - CMP R1,R3 // Check SVC number range - BHI SVC_Exit // Branch if out of range + ldr r2,=osRtxUserSVC // Load address of SVC table + ldr r3,[r2] // Load SVC maximum number + cmp r1,r3 // Check SVC number range + bhi SVC_Exit // Branch if out of range - PUSH {R0,LR} // Save SP and EXC_RETURN - LDR R12,[R2,R1,LSL #2] // Load address of SVC function - LDM R0,{R0-R3} // Load function parameters from stack - BLX R12 // Call service function - POP {R12,LR} // Restore SP and EXC_RETURN - STR R0,[R12] // Store function return value + push {r0,lr} // Save SP and EXC_RETURN + ldr r12,[r2,r1,lsl #2] // Load address of SVC function + ldm r0,{r0-r3} // Load function parameters from stack + blx r12 // Call service function + pop {r12,lr} // Restore SP and EXC_RETURN + str r0,[r12] // Store function return value - BX LR // Return from handler + bx lr // Return from handler .fnend .size SVC_Handler, .-SVC_Handler @@ -137,11 +195,11 @@ SVC_User: .cantunwind PendSV_Handler: - PUSH {R0,LR} // Save EXC_RETURN - BL osRtxPendSV_Handler // Call osRtxPendSV_Handler - POP {R0,LR} // Restore EXC_RETURN - MRS R12,PSP - B SVC_Context + push {r0,lr} // Save EXC_RETURN + bl osRtxPendSV_Handler // Call osRtxPendSV_Handler + pop {r0,lr} // Restore EXC_RETURN + mrs r12,psp // Save PSP to R12 + b SVC_Context // Branch to context handling .fnend .size PendSV_Handler, .-PendSV_Handler @@ -154,11 +212,11 @@ PendSV_Handler: .cantunwind SysTick_Handler: - PUSH {R0,LR} // Save EXC_RETURN - BL osRtxTick_Handler // Call osRtxTick_Handler - POP {R0,LR} // Restore EXC_RETURN - MRS R12,PSP - B SVC_Context + push {r0,lr} // Save EXC_RETURN + bl osRtxTick_Handler // Call osRtxTick_Handler + pop {r0,lr} // Restore EXC_RETURN + mrs r12,psp // Save PSP to R12 + b SVC_Context // Branch to context handling .fnend .size SysTick_Handler, .-SysTick_Handler diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_CORTEX_A/irq_ca.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_CORTEX_A/irq_ca.S index 975b30dcd67..9e27930a0e7 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_CORTEX_A/irq_ca.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_CORTEX_A/irq_ca.S @@ -1,5 +1,5 @@ ;/* -; * Copyright (c) 2013-2018 Arm Limited. All rights reserved. +; * Copyright (c) 2013-2021 Arm Limited. All rights reserved. ; * ; * SPDX-License-Identifier: Apache-2.0 ; * @@ -18,12 +18,13 @@ ; * ----------------------------------------------------------------------------- ; * ; * Project: CMSIS-RTOS RTX -; * Title: Cortex-A Exception handlers +; * Title: ARMv7-A Exception handlers ; * ; * ----------------------------------------------------------------------------- ; */ - NAME irq_ca.s + NAME irq_armv7a.s + MODE_FIQ EQU 0x11 MODE_IRQ EQU 0x12 @@ -352,16 +353,16 @@ osRtxContextSave STMDB R1!, {R2,R12} ; Push FPSCR, maintain 8-byte alignment VSTMDB R1!, {D0-D15} ; Save D0-D15 - #ifdef __ARM_ADVANCED_SIMD__ + #ifdef __ARM_ADVANCED_SIMD__ VSTMDB R1!, {D16-D31} ; Save D16-D31 - #endif + #endif LDRB R2, [LR, #TCB_SP_FRAME] ; Load osRtxInfo.thread.run.curr frame info - #ifdef __ARM_ADVANCED_SIMD__ + #ifdef __ARM_ADVANCED_SIMD__ ORR R2, R2, #4 ; NEON state - #else + #else ORR R2, R2, #2 ; VFP state - #endif + #endif STRB R2, [LR, #TCB_SP_FRAME] ; Store VFP/NEON state osRtxContextSave1 @@ -413,9 +414,9 @@ osRtxContextRestore MCR p15, 0, R2, c1, c0, 2 ; Write CPACR BEQ osRtxContextRestore1 ; No VFP ISB ; Sync if VFP was enabled - #ifdef __ARM_ADVANCED_SIMD__ + #ifdef __ARM_ADVANCED_SIMD__ VLDMIA LR!, {D16-D31} ; Restore D16-D31 - #endif + #endif VLDMIA LR!, {D0-D15} ; Restore D0-D15 LDR R2, [LR] VMSR FPSCR, R2 ; Restore FPSCR diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M0/irq_cm0.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M0/irq_cm0.S index 3ca51f67012..29b8bb02743 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M0/irq_cm0.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M0/irq_cm0.S @@ -1,5 +1,5 @@ ;/* -; * Copyright (c) 2013-2018 Arm Limited. All rights reserved. +; * Copyright (c) 2013-2021 Arm Limited. All rights reserved. ; * ; * SPDX-License-Identifier: Apache-2.0 ; * @@ -18,18 +18,23 @@ ; * ----------------------------------------------------------------------------- ; * ; * Project: CMSIS-RTOS RTX -; * Title: Cortex-M0 Exception handlers +; * Title: ARMv6-M Exception handlers ; * ; * ----------------------------------------------------------------------------- ; */ - NAME irq_cm0.s + NAME irq_armv6m.s + #include "rtx_def.h" + I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset TCB_SP_OFS EQU 56 ; TCB.SP offset +osRtxErrorStackOverflow\ + EQU 1 ; Stack overflow + PRESERVE8 SECTION .rodata:DATA:NOROOT(2) @@ -47,6 +52,10 @@ SVC_Handler EXPORT SVC_Handler IMPORT osRtxUserSVC IMPORT osRtxInfo + #ifdef RTX_STACK_CHECK + IMPORT osRtxThreadStackCheck + IMPORT osRtxKernelErrorNotify + #endif MOV R0,LR LSRS R0,R0,#3 ; Determine return stack from EXC_RETURN bit 2 @@ -57,7 +66,7 @@ SVC_Number LDR R1,[R0,#24] ; Load saved PC from stack SUBS R1,R1,#2 ; Point to SVC instruction LDRB R1,[R1] ; Load SVC number - CMP R1,#0 + CMP R1,#0 ; Check SVC number BNE SVC_User ; Branch if not SVC 0 PUSH {R0,LR} ; Save SP and EXC_RETURN @@ -68,18 +77,42 @@ SVC_Number MOV LR,R3 ; Set EXC_RETURN SVC_Context - LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run + LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run LDMIA R3!,{R1,R2} ; Load osRtxInfo.thread.run: curr & next CMP R1,R2 ; Check if thread switch is required BEQ SVC_Exit ; Branch when threads are the same + SUBS R3,R3,#8 ; Adjust address + STR R2,[R3] ; osRtxInfo.thread.run: curr = next CMP R1,#0 - BEQ SVC_ContextSwitch ; Branch if running thread is deleted + BEQ SVC_ContextRestore ; Branch if running thread is deleted SVC_ContextSave MRS R0,PSP ; Get PSP - SUBS R0,R0,#32 ; Calculate SP + SUBS R0,R0,#32 ; Calculate SP: space for R4..R11 STR R0,[R1,#TCB_SP_OFS] ; Store SP + + #ifdef RTX_STACK_CHECK + + PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next + MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr + BL osRtxThreadStackCheck ; Check if thread stack is overrun + POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next + CMP R0,#0 + BNE SVC_ContextSaveRegs ; Branch when stack check is ok + + MOVS R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id + BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify + LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run + LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next + STR R2,[R3] ; osRtxInfo.thread.run: curr = next + B SVC_ContextRestore ; Branch to context restore handling + +SVC_ContextSaveRegs + LDR R0,[R1,#TCB_SP_OFS] ; Load SP + + #endif + STMIA R0!,{R4-R7} ; Save R4..R7 MOV R4,R8 MOV R5,R9 @@ -87,10 +120,6 @@ SVC_ContextSave MOV R7,R11 STMIA R0!,{R4-R7} ; Save R8..R11 -SVC_ContextSwitch - SUBS R3,R3,#8 ; Adjust address - STR R2,[R3] ; osRtxInfo.thread.run: curr = next - SVC_ContextRestore LDR R0,[R2,#TCB_SP_OFS] ; Load SP ADDS R0,R0,#16 ; Adjust address @@ -103,7 +132,7 @@ SVC_ContextRestore SUBS R0,R0,#32 ; Adjust address LDMIA R0!,{R4-R7} ; Restore R4..R7 - MOVS R0,#~0xFFFFFFFD + MOVS R0,#2 ; Binary complement of 0xFFFFFFFD MVNS R0,R0 ; Set EXC_RETURN value BX R0 ; Exit from handler @@ -141,7 +170,7 @@ PendSV_Handler BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler POP {R0,R1} ; Restore EXC_RETURN MOV LR,R1 ; Set EXC_RETURN - B SVC_Context + B SVC_Context ; Branch to context handling SysTick_Handler @@ -152,7 +181,7 @@ SysTick_Handler BL osRtxTick_Handler ; Call osRtxTick_Handler POP {R0,R1} ; Restore EXC_RETURN MOV LR,R1 ; Set EXC_RETURN - B SVC_Context + B SVC_Context ; Branch to context handling END diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M0P/irq_cm0.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M0P/irq_cm0.S index 3ca51f67012..29b8bb02743 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M0P/irq_cm0.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M0P/irq_cm0.S @@ -1,5 +1,5 @@ ;/* -; * Copyright (c) 2013-2018 Arm Limited. All rights reserved. +; * Copyright (c) 2013-2021 Arm Limited. All rights reserved. ; * ; * SPDX-License-Identifier: Apache-2.0 ; * @@ -18,18 +18,23 @@ ; * ----------------------------------------------------------------------------- ; * ; * Project: CMSIS-RTOS RTX -; * Title: Cortex-M0 Exception handlers +; * Title: ARMv6-M Exception handlers ; * ; * ----------------------------------------------------------------------------- ; */ - NAME irq_cm0.s + NAME irq_armv6m.s + #include "rtx_def.h" + I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset TCB_SP_OFS EQU 56 ; TCB.SP offset +osRtxErrorStackOverflow\ + EQU 1 ; Stack overflow + PRESERVE8 SECTION .rodata:DATA:NOROOT(2) @@ -47,6 +52,10 @@ SVC_Handler EXPORT SVC_Handler IMPORT osRtxUserSVC IMPORT osRtxInfo + #ifdef RTX_STACK_CHECK + IMPORT osRtxThreadStackCheck + IMPORT osRtxKernelErrorNotify + #endif MOV R0,LR LSRS R0,R0,#3 ; Determine return stack from EXC_RETURN bit 2 @@ -57,7 +66,7 @@ SVC_Number LDR R1,[R0,#24] ; Load saved PC from stack SUBS R1,R1,#2 ; Point to SVC instruction LDRB R1,[R1] ; Load SVC number - CMP R1,#0 + CMP R1,#0 ; Check SVC number BNE SVC_User ; Branch if not SVC 0 PUSH {R0,LR} ; Save SP and EXC_RETURN @@ -68,18 +77,42 @@ SVC_Number MOV LR,R3 ; Set EXC_RETURN SVC_Context - LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run + LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run LDMIA R3!,{R1,R2} ; Load osRtxInfo.thread.run: curr & next CMP R1,R2 ; Check if thread switch is required BEQ SVC_Exit ; Branch when threads are the same + SUBS R3,R3,#8 ; Adjust address + STR R2,[R3] ; osRtxInfo.thread.run: curr = next CMP R1,#0 - BEQ SVC_ContextSwitch ; Branch if running thread is deleted + BEQ SVC_ContextRestore ; Branch if running thread is deleted SVC_ContextSave MRS R0,PSP ; Get PSP - SUBS R0,R0,#32 ; Calculate SP + SUBS R0,R0,#32 ; Calculate SP: space for R4..R11 STR R0,[R1,#TCB_SP_OFS] ; Store SP + + #ifdef RTX_STACK_CHECK + + PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next + MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr + BL osRtxThreadStackCheck ; Check if thread stack is overrun + POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next + CMP R0,#0 + BNE SVC_ContextSaveRegs ; Branch when stack check is ok + + MOVS R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id + BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify + LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run + LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next + STR R2,[R3] ; osRtxInfo.thread.run: curr = next + B SVC_ContextRestore ; Branch to context restore handling + +SVC_ContextSaveRegs + LDR R0,[R1,#TCB_SP_OFS] ; Load SP + + #endif + STMIA R0!,{R4-R7} ; Save R4..R7 MOV R4,R8 MOV R5,R9 @@ -87,10 +120,6 @@ SVC_ContextSave MOV R7,R11 STMIA R0!,{R4-R7} ; Save R8..R11 -SVC_ContextSwitch - SUBS R3,R3,#8 ; Adjust address - STR R2,[R3] ; osRtxInfo.thread.run: curr = next - SVC_ContextRestore LDR R0,[R2,#TCB_SP_OFS] ; Load SP ADDS R0,R0,#16 ; Adjust address @@ -103,7 +132,7 @@ SVC_ContextRestore SUBS R0,R0,#32 ; Adjust address LDMIA R0!,{R4-R7} ; Restore R4..R7 - MOVS R0,#~0xFFFFFFFD + MOVS R0,#2 ; Binary complement of 0xFFFFFFFD MVNS R0,R0 ; Set EXC_RETURN value BX R0 ; Exit from handler @@ -141,7 +170,7 @@ PendSV_Handler BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler POP {R0,R1} ; Restore EXC_RETURN MOV LR,R1 ; Set EXC_RETURN - B SVC_Context + B SVC_Context ; Branch to context handling SysTick_Handler @@ -152,7 +181,7 @@ SysTick_Handler BL osRtxTick_Handler ; Call osRtxTick_Handler POP {R0,R1} ; Restore EXC_RETURN MOV LR,R1 ; Set EXC_RETURN - B SVC_Context + B SVC_Context ; Branch to context handling END diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M23/irq_armv8mbl_common.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M23/irq_armv8mbl_common.S index a0e6918fb7f..9f0bf8effa0 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M23/irq_armv8mbl_common.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M23/irq_armv8mbl_common.S @@ -1,5 +1,5 @@ ;/* -; * Copyright (c) 2016-2020 Arm Limited. All rights reserved. +; * Copyright (c) 2016-2021 Arm Limited. All rights reserved. ; * ; * SPDX-License-Identifier: Apache-2.0 ; * @@ -18,12 +18,17 @@ ; * ----------------------------------------------------------------------------- ; * ; * Project: CMSIS-RTOS RTX -; * Title: ARMv8M Baseline Exception handlers +; * Title: ARMv8-M Baseline Exception handlers ; * ; * ----------------------------------------------------------------------------- ; */ + NAME irq_armv8mbl.s + + + #include "rtx_def.h" + #ifndef DOMAIN_NS #define DOMAIN_NS 0 #endif @@ -34,6 +39,9 @@ TCB_SP_OFS EQU 56 ; TCB.SP offset TCB_SF_OFS EQU 34 ; TCB.stack_frame offset TCB_TZM_OFS EQU 64 ; TCB.tz_memory offset +osRtxErrorStackOverflow\ + EQU 1 ; Stack overflow + PRESERVE8 @@ -51,10 +59,14 @@ SVC_Handler EXPORT SVC_Handler IMPORT osRtxUserSVC IMPORT osRtxInfo - #if (DOMAIN_NS == 1) + #ifdef RTX_STACK_CHECK + IMPORT osRtxThreadStackCheck + IMPORT osRtxKernelErrorNotify + #endif + #if (DOMAIN_NS != 0) IMPORT TZ_LoadContext_S IMPORT TZ_StoreContext_S - #endif + #endif MOV R0,LR LSRS R0,R0,#3 ; Determine return stack from EXC_RETURN bit 2 @@ -65,95 +77,136 @@ SVC_Number LDR R1,[R0,#24] ; Load saved PC from stack SUBS R1,R1,#2 ; Point to SVC instruction LDRB R1,[R1] ; Load SVC number - CMP R1,#0 + CMP R1,#0 ; Check SVC number BNE SVC_User ; Branch if not SVC 0 PUSH {R0,LR} ; Save SP and EXC_RETURN - LDM R0,{R0-R3} ; Load function parameters from stack + LDMIA R0,{R0-R3} ; Load function parameters from stack BLX R7 ; Call service function POP {R2,R3} ; Restore SP and EXC_RETURN STMIA R2!,{R0-R1} ; Store function return values MOV LR,R3 ; Set EXC_RETURN SVC_Context - LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run + LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run LDMIA R3!,{R1,R2} ; Load osRtxInfo.thread.run: curr & next CMP R1,R2 ; Check if thread switch is required BEQ SVC_Exit ; Branch when threads are the same - CBZ R1,SVC_ContextSwitch ; Branch if running thread is deleted + SUBS R3,R3,#8 ; Adjust address + STR R2,[R3] ; osRtxInfo.thread.run: curr = next + CBZ R1,SVC_ContextRestore ; Branch if running thread is deleted SVC_ContextSave - #if (DOMAIN_NS == 1) + #if (DOMAIN_NS != 0) LDR R0,[R1,#TCB_TZM_OFS] ; Load TrustZone memory identifier - CBZ R0,SVC_ContextSave1 ; Branch if there is no secure context + CBZ R0,SVC_ContextSave_NS ; Branch if there is no secure context PUSH {R1,R2,R3,R7} ; Save registers MOV R7,LR ; Get EXC_RETURN BL TZ_StoreContext_S ; Store secure context MOV LR,R7 ; Set EXC_RETURN POP {R1,R2,R3,R7} ; Restore registers - #endif + #endif -SVC_ContextSave1 +SVC_ContextSave_NS MRS R0,PSP ; Get PSP - SUBS R0,R0,#32 ; Calculate SP + #if (DOMAIN_NS != 0) + MOV R3,LR ; Get EXC_RETURN + LSLS R3,R3,#25 ; Check domain of interrupted thread + BMI SVC_ContextSaveSP ; Branch if secure + #endif + + #ifdef RTX_STACK_CHECK + SUBS R0,R0,#32 ; Calculate SP: space for R4..R11 + +SVC_ContextSaveSP STR R0,[R1,#TCB_SP_OFS] ; Store SP + MOV R3,LR ; Get EXC_RETURN + MOV R0,R1 ; osRtxInfo.thread.run.curr + ADDS R0,R0,#TCB_SF_OFS ; Adjust address + STRB R3,[R0] ; Store stack frame information + + PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next + MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr + BL osRtxThreadStackCheck ; Check if thread stack is overrun + POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next + CMP R0,#0 + BNE SVC_ContextSaveRegs ; Branch when stack check is ok + + MOVS R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id + BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify + LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run + LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next + STR R2,[R3] ; osRtxInfo.thread.run: curr = next + B SVC_ContextRestore ; Branch to context restore handling + +SVC_ContextSaveRegs + #if (DOMAIN_NS != 0) + MOV R0,R1 ; osRtxInfo.thread.run.curr + ADDS R0,R0,#TCB_SF_OFS ; Adjust address + LDRB R3,[R0] ; Load stack frame information + LSLS R3,R3,#25 ; Check domain of interrupted thread + BMI SVC_ContextRestore ; Branch if secure + #endif + LDR R0,[R1,#TCB_SP_OFS] ; Load SP STMIA R0!,{R4-R7} ; Save R4..R7 MOV R4,R8 MOV R5,R9 MOV R6,R10 MOV R7,R11 STMIA R0!,{R4-R7} ; Save R8..R11 - -SVC_ContextSave2 + #else + SUBS R0,R0,#32 ; Calculate SP: space for R4..R11 + STMIA R0!,{R4-R7} ; Save R4..R7 + MOV R4,R8 + MOV R5,R9 + MOV R6,R10 + MOV R7,R11 + STMIA R0!,{R4-R7} ; Save R8..R11 + SUBS R0,R0,#32 ; Adjust address +SVC_ContextSaveSP + STR R0,[R1,#TCB_SP_OFS] ; Store SP MOV R0,LR ; Get EXC_RETURN ADDS R1,R1,#TCB_SF_OFS ; Adjust address STRB R0,[R1] ; Store stack frame information - -SVC_ContextSwitch - SUBS R3,R3,#8 ; Adjust address - STR R2,[R3] ; osRtxInfo.thread.run: curr = next + #endif SVC_ContextRestore - #if (DOMAIN_NS == 1) + #if (DOMAIN_NS != 0) LDR R0,[R2,#TCB_TZM_OFS] ; Load TrustZone memory identifier - CBZ R0,SVC_ContextRestore1 ; Branch if there is no secure context + CBZ R0,SVC_ContextRestore_NS ; Branch if there is no secure context PUSH {R2,R3} ; Save registers BL TZ_LoadContext_S ; Load secure context POP {R2,R3} ; Restore registers - #endif + #endif -SVC_ContextRestore1 - MOV R1,R2 - ADDS R1,R1,#TCB_SF_OFS ; Adjust address - LDRB R0,[R1] ; Load stack frame information - MOVS R1,#0xFF - MVNS R1,R1 ; R1=0xFFFFFF00 - ORRS R0,R1 - MOV LR,R0 ; Set EXC_RETURN - - #if (DOMAIN_NS == 1) - LSLS R0,R0,#25 ; Check domain of interrupted thread - BPL SVC_ContextRestore2 ; Branch if non-secure - LDR R0,[R2,#TCB_SP_OFS] ; Load SP - MSR PSP,R0 ; Set PSP - BX LR ; Exit from handler - #else +SVC_ContextRestore_NS LDR R0,[R2,#TCB_SM_OFS] ; Load stack memory base MSR PSPLIM,R0 ; Set PSPLIM - #endif - -SVC_ContextRestore2 + MOV R0,R2 ; osRtxInfo.thread.run.next + ADDS R0,R0,#TCB_SF_OFS ; Adjust address + LDRB R3,[R0] ; Load stack frame information + MOVS R0,#0xFF + MVNS R0,R0 ; R0=0xFFFFFF00 + ORRS R3,R3,R0 + MOV LR,R3 ; Set EXC_RETURN LDR R0,[R2,#TCB_SP_OFS] ; Load SP + #if (DOMAIN_NS != 0) + LSLS R3,R3,#25 ; Check domain of interrupted thread + BMI SVC_ContextRestoreSP ; Branch if secure + #endif ADDS R0,R0,#16 ; Adjust address LDMIA R0!,{R4-R7} ; Restore R8..R11 MOV R8,R4 MOV R9,R5 MOV R10,R6 MOV R11,R7 - MSR PSP,R0 ; Set PSP SUBS R0,R0,#32 ; Adjust address LDMIA R0!,{R4-R7} ; Restore R4..R7 + ADDS R0,R0,#16 ; Adjust address + +SVC_ContextRestoreSP + MSR PSP,R0 ; Set PSP SVC_Exit BX LR ; Exit from handler @@ -189,7 +242,7 @@ PendSV_Handler BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler POP {R0,R1} ; Restore EXC_RETURN MOV LR,R1 ; Set EXC_RETURN - B Sys_Context + B SVC_Context ; Branch to context handling SysTick_Handler @@ -200,104 +253,7 @@ SysTick_Handler BL osRtxTick_Handler ; Call osRtxTick_Handler POP {R0,R1} ; Restore EXC_RETURN MOV LR,R1 ; Set EXC_RETURN - B Sys_Context - - + B SVC_Context ; Branch to context handling -Sys_Context - EXPORT Sys_Context - IMPORT osRtxInfo - #if (DOMAIN_NS == 1) - IMPORT TZ_LoadContext_S - IMPORT TZ_StoreContext_S - #endif - - LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run - LDM R3!,{R1,R2} ; Load osRtxInfo.thread.run: curr & next - CMP R1,R2 ; Check if thread switch is required - BEQ Sys_ContextExit ; Branch when threads are the same - -Sys_ContextSave - #if (DOMAIN_NS == 1) - LDR R0,[R1,#TCB_TZM_OFS] ; Load TrustZone memory identifier - CBZ R0,Sys_ContextSave1 ; Branch if there is no secure context - PUSH {R1,R2,R3,R7} ; Save registers - MOV R7,LR ; Get EXC_RETURN - BL TZ_StoreContext_S ; Store secure context - MOV LR,R7 ; Set EXC_RETURN - POP {R1,R2,R3,R7} ; Restore registers - -Sys_ContextSave1 - MOV R0,LR ; Get EXC_RETURN - LSLS R0,R0,#25 ; Check domain of interrupted thread - BPL Sys_ContextSave2 ; Branch if non-secure - MRS R0,PSP ; Get PSP - STR R0,[R1,#TCB_SP_OFS] ; Store SP - B Sys_ContextSave3 - #endif - -Sys_ContextSave2 - MRS R0,PSP ; Get PSP - SUBS R0,R0,#32 ; Adjust address - STR R0,[R1,#TCB_SP_OFS] ; Store SP - STMIA R0!,{R4-R7} ; Save R4..R7 - MOV R4,R8 - MOV R5,R9 - MOV R6,R10 - MOV R7,R11 - STMIA R0!,{R4-R7} ; Save R8..R11 - -Sys_ContextSave3 - MOV R0,LR ; Get EXC_RETURN - ADDS R1,R1,#TCB_SF_OFS ; Adjust address - STRB R0,[R1] ; Store stack frame information - -Sys_ContextSwitch - SUBS R3,R3,#8 ; Adjust address - STR R2,[R3] ; osRtxInfo.run: curr = next - -Sys_ContextRestore - #if (DOMAIN_NS == 1) - LDR R0,[R2,#TCB_TZM_OFS] ; Load TrustZone memory identifier - CBZ R0,Sys_ContextRestore1 ; Branch if there is no secure context - PUSH {R2,R3} ; Save registers - BL TZ_LoadContext_S ; Load secure context - POP {R2,R3} ; Restore registers - #endif - -Sys_ContextRestore1 - MOV R1,R2 - ADDS R1,R1,#TCB_SF_OFS ; Adjust offset - LDRB R0,[R1] ; Load stack frame information - MOVS R1,#0xFF - MVNS R1,R1 ; R1=0xFFFFFF00 - ORRS R0,R1 - MOV LR,R0 ; Set EXC_RETURN - - #if (DOMAIN_NS == 1) - LSLS R0,R0,#25 ; Check domain of interrupted thread - BPL Sys_ContextRestore2 ; Branch if non-secure - LDR R0,[R2,#TCB_SP_OFS] ; Load SP - MSR PSP,R0 ; Set PSP - BX LR ; Exit from handler - #else - LDR R0,[R2,#TCB_SM_OFS] ; Load stack memory base - MSR PSPLIM,R0 ; Set PSPLIM - #endif - -Sys_ContextRestore2 - LDR R0,[R2,#TCB_SP_OFS] ; Load SP - ADDS R0,R0,#16 ; Adjust address - LDMIA R0!,{R4-R7} ; Restore R8..R11 - MOV R8,R4 - MOV R9,R5 - MOV R10,R6 - MOV R11,R7 - MSR PSP,R0 ; Set PSP - SUBS R0,R0,#32 ; Adjust address - LDMIA R0!,{R4-R7} ; Restore R4..R7 - -Sys_ContextExit - BX LR ; Exit from handler END diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M3/irq_cm3.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M3/irq_cm3.S index 29fe1b727e6..3b6f9cf1d33 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M3/irq_cm3.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M3/irq_cm3.S @@ -1,5 +1,5 @@ ;/* -; * Copyright (c) 2013-2018 Arm Limited. All rights reserved. +; * Copyright (c) 2013-2021 Arm Limited. All rights reserved. ; * ; * SPDX-License-Identifier: Apache-2.0 ; * @@ -18,17 +18,31 @@ ; * ----------------------------------------------------------------------------- ; * ; * Project: CMSIS-RTOS RTX -; * Title: Cortex-M3 Exception handlers +; * Title: ARMv7-M Exception handlers ; * ; * ----------------------------------------------------------------------------- ; */ - NAME irq_cm3.s + NAME irq_armv7m.s + #include "rtx_def.h" + +#ifdef __ARMVFP__ +FPU_USED EQU 1 +#else +FPU_USED EQU 0 +#endif + I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset TCB_SP_OFS EQU 56 ; TCB.SP offset +TCB_SF_OFS EQU 34 ; TCB.stack_frame offset + +FPCCR EQU 0xE000EF34 ; FPCCR Address + +osRtxErrorStackOverflow\ + EQU 1 ; Stack overflow PRESERVE8 @@ -47,6 +61,10 @@ SVC_Handler EXPORT SVC_Handler IMPORT osRtxUserSVC IMPORT osRtxInfo + #ifdef RTX_STACK_CHECK + IMPORT osRtxThreadStackCheck + IMPORT osRtxKernelErrorNotify + #endif TST LR,#0x04 ; Determine return stack from EXC_RETURN bit 2 ITE EQ @@ -55,7 +73,8 @@ SVC_Handler LDR R1,[R0,#24] ; Load saved PC from stack LDRB R1,[R1,#-2] ; Load SVC number - CBNZ R1,SVC_User ; Branch if not SVC 0 + CMP R1,#0 ; Check SVC number + BNE SVC_User ; Branch if not SVC 0 PUSH {R0,LR} ; Save SP and EXC_RETURN LDM R0,{R0-R3,R12} ; Load function parameters and address from stack @@ -64,28 +83,94 @@ SVC_Handler STM R12,{R0-R1} ; Store function return values SVC_Context - LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run + LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run LDM R3,{R1,R2} ; Load osRtxInfo.thread.run: curr & next CMP R1,R2 ; Check if thread switch is required IT EQ BXEQ LR ; Exit when threads are the same - CBZ R1,SVC_ContextSwitch ; Branch if running thread is deleted + STR R2,[R3] ; osRtxInfo.thread.run: curr = next + + #if (FPU_USED != 0) + CBNZ R1,SVC_ContextSave ; Branch if running thread is not deleted +SVC_FP_LazyState + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + BNE SVC_ContextRestore ; Branch if not extended stack frame + LDR R3,=FPCCR ; FPCCR Address + LDR R0,[R3] ; Load FPCCR + BIC R0,R0,#1 ; Clear LSPACT (Lazy state preservation) + STR R0,[R3] ; Store FPCCR + B SVC_ContextRestore ; Branch to context restore handling + #else + CBZ R1,SVC_ContextRestore ; Branch if running thread is deleted + #endif SVC_ContextSave - STMDB R12!,{R4-R11} ; Save R4..R11 + #ifdef RTX_STACK_CHECK + SUB R12,R12,#32 ; Calculate SP: space for R4..R11 + #if (FPU_USED != 0) + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame + SUBEQ R12,R12,#64 ; Additional space for S16..S31 + STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information + #endif STR R12,[R1,#TCB_SP_OFS] ; Store SP -SVC_ContextSwitch + PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next + MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr + BL osRtxThreadStackCheck ; Check if thread stack is overrun + POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next + CBNZ R0,SVC_ContextSaveRegs ; Branch when stack check is ok + + #if (FPU_USED != 0) + MOV R4,R1 ; Save osRtxInfo.thread.run.curr + #endif + MOV R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id + BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify + LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run + LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next STR R2,[R3] ; osRtxInfo.thread.run: curr = next + #if (FPU_USED != 0) + LDRB LR,[R4,#TCB_SF_OFS] ; Load stack frame information + B SVC_FP_LazyState ; Branch to FP lazy state handling + #else + B SVC_ContextRestore ; Branch to context restore handling + #endif + +SVC_ContextSaveRegs + LDR R12,[R1,#TCB_SP_OFS] ; Load SP + #if (FPU_USED != 0) + LDRB LR, [R1,#TCB_SF_OFS] ; Load stack frame information + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame + VSTMIAEQ R12!,{S16-S31} ; Save VFP S16..S31 + #endif + STM R12,{R4-R11} ; Save R4..R11 + #else + STMDB R12!,{R4-R11} ; Save R4..R11 + #if (FPU_USED != 0) + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame + VSTMDBEQ R12!,{S16-S31} ; Save VFP S16.S31 + STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information + #endif + STR R12,[R1,#TCB_SP_OFS] ; Store SP + #endif SVC_ContextRestore LDR R0,[R2,#TCB_SP_OFS] ; Load SP + #if (FPU_USED != 0) + LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information + ORN LR,R1,#0xFF ; Set EXC_RETURN + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame + VLDMIAEQ R0!,{S16-S31} ; Restore VFP S16..S31 + #else + MVN LR,#~0xFFFFFFFD ; Set EXC_RETURN value + #endif LDMIA R0!,{R4-R11} ; Restore R4..R11 MSR PSP,R0 ; Set PSP - MVN LR,#~0xFFFFFFFD ; Set EXC_RETURN value - SVC_Exit BX LR ; Exit from handler @@ -112,8 +197,8 @@ PendSV_Handler PUSH {R0,LR} ; Save EXC_RETURN BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler POP {R0,LR} ; Restore EXC_RETURN - MRS R12,PSP - B SVC_Context + MRS R12,PSP ; Save PSP to R12 + B SVC_Context ; Branch to context handling SysTick_Handler @@ -123,8 +208,8 @@ SysTick_Handler PUSH {R0,LR} ; Save EXC_RETURN BL osRtxTick_Handler ; Call osRtxTick_Handler POP {R0,LR} ; Restore EXC_RETURN - MRS R12,PSP - B SVC_Context + MRS R12,PSP ; Save PSP to R12 + B SVC_Context ; Branch to context handling END diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M33/irq_armv8mml_common.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M33/irq_armv8mml_common.S index aa1cbc218fb..00a84225cc5 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M33/irq_armv8mml_common.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M33/irq_armv8mml_common.S @@ -1,5 +1,5 @@ ;/* -; * Copyright (c) 2016-2020 Arm Limited. All rights reserved. +; * Copyright (c) 2016-2021 Arm Limited. All rights reserved. ; * ; * SPDX-License-Identifier: Apache-2.0 ; * @@ -18,12 +18,17 @@ ; * ----------------------------------------------------------------------------- ; * ; * Project: CMSIS-RTOS RTX -; * Title: ARMv8M Mainline Exception handlers +; * Title: ARMv8-M Mainline Exception handlers ; * ; * ----------------------------------------------------------------------------- ; */ + NAME irq_armv8mml.s + + + #include "rtx_def.h" + #ifndef DOMAIN_NS #define DOMAIN_NS 0 #endif @@ -46,6 +51,11 @@ TCB_SP_OFS EQU 56 ; TCB.SP offset TCB_SF_OFS EQU 34 ; TCB.stack_frame offset TCB_TZM_OFS EQU 64 ; TCB.tz_memory offset +FPCCR EQU 0xE000EF34 ; FPCCR Address + +osRtxErrorStackOverflow\ + EQU 1 ; Stack overflow + PRESERVE8 @@ -63,10 +73,14 @@ SVC_Handler EXPORT SVC_Handler IMPORT osRtxUserSVC IMPORT osRtxInfo - #if (DOMAIN_NS == 1) + #ifdef RTX_STACK_CHECK + IMPORT osRtxThreadStackCheck + IMPORT osRtxKernelErrorNotify + #endif + #if (DOMAIN_NS != 0) IMPORT TZ_LoadContext_S IMPORT TZ_StoreContext_S - #endif + #endif TST LR,#0x04 ; Determine return stack from EXC_RETURN bit 2 ITE EQ @@ -75,7 +89,7 @@ SVC_Handler LDR R1,[R0,#24] ; Load saved PC from stack LDRB R1,[R1,#-2] ; Load SVC number - CMP R1,#0 + CMP R1,#0 ; Check SVC number BNE SVC_User ; Branch if not SVC 0 PUSH {R0,LR} ; Save SP and EXC_RETURN @@ -85,79 +99,131 @@ SVC_Handler STM R12,{R0-R1} ; Store function return values SVC_Context - LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run + LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run LDM R3,{R1,R2} ; Load osRtxInfo.thread.run: curr & next CMP R1,R2 ; Check if thread switch is required IT EQ BXEQ LR ; Exit when threads are the same - #if ((FPU_USED == 1) || (MVE_USED == 1)) + STR R2,[R3] ; osRtxInfo.thread.run: curr = next + + #if ((FPU_USED != 0) || (MVE_USED != 0)) CBNZ R1,SVC_ContextSave ; Branch if running thread is not deleted - TST LR,#0x10 ; Check if extended stack frame - BNE SVC_ContextSwitch - LDR R1,=0xE000EF34 ; FPCCR Address - LDR R0,[R1] ; Load FPCCR - BIC R0,R0,#1 ; Clear LSPACT (Lazy state) - STR R0,[R1] ; Store FPCCR - B SVC_ContextSwitch - #else - CBZ R1,SVC_ContextSwitch ; Branch if running thread is deleted - #endif +SVC_FP_LazyState + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + BNE SVC_ContextRestore ; Branch if not extended stack frame + LDR R3,=FPCCR ; FPCCR Address + LDR R0,[R3] ; Load FPCCR + BIC R0,R0,#1 ; Clear LSPACT (Lazy state preservation) + STR R0,[R3] ; Store FPCCR + B SVC_ContextRestore ; Branch to context restore handling + #else + CBZ R1,SVC_ContextRestore ; Branch if running thread is deleted + #endif SVC_ContextSave - #if (DOMAIN_NS == 1) + #if (DOMAIN_NS != 0) LDR R0,[R1,#TCB_TZM_OFS] ; Load TrustZone memory identifier - CBZ R0,SVC_ContextSave1 ; Branch if there is no secure context - PUSH {R1,R2,R3,LR} ; Save registers and EXC_RETURN + CBZ R0,SVC_ContextSave_NS ; Branch if there is no secure context + PUSH {R1,R2,R12,LR} ; Save registers and EXC_RETURN BL TZ_StoreContext_S ; Store secure context - POP {R1,R2,R3,LR} ; Restore registers and EXC_RETURN - #endif - -SVC_ContextSave1 - MRS R0,PSP ; Get PSP - STMDB R0!,{R4-R11} ; Save R4..R11 - #if ((FPU_USED == 1) || (MVE_USED == 1)) - TST LR,#0x10 ; Check if extended stack frame - IT EQ - VSTMDBEQ R0!,{S16-S31} ; Save VFP S16.S31 - #endif - -SVC_ContextSave2 - STR R0,[R1,#TCB_SP_OFS] ; Store SP - STRB LR,[R1,#TCB_SF_OFS] ; Store stack frame information + POP {R1,R2,R12,LR} ; Restore registers and EXC_RETURN + #endif -SVC_ContextSwitch +SVC_ContextSave_NS + #if (DOMAIN_NS != 0) + TST LR,#0x40 ; Check domain of interrupted thread + BNE SVC_ContextSaveSP ; Branch if secure + #endif + + #ifdef RTX_STACK_CHECK + SUB R12,R12,#32 ; Calculate SP: space for R4..R11 + #if ((FPU_USED != 0) || (MVE_USED != 0)) + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame + SUBEQ R12,R12,#64 ; Additional space for S16..S31 + #endif + +SVC_ContextSaveSP + STR R12,[R1,#TCB_SP_OFS] ; Store SP + STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information + + PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next + MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr + BL osRtxThreadStackCheck ; Check if thread stack is overrun + POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next + CBNZ R0,SVC_ContextSaveRegs ; Branch when stack check is ok + + #if ((FPU_USED != 0) || (MVE_USED != 0)) + MOV R4,R1 ; Save osRtxInfo.thread.run.curr + #endif + MOV R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id + BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify + LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run + LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next STR R2,[R3] ; osRtxInfo.thread.run: curr = next + #if ((FPU_USED != 0) || (MVE_USED != 0)) + LDRB LR,[R4,#TCB_SF_OFS] ; Load stack frame information + B SVC_FP_LazyState ; Branch to FP lazy state handling + #else + B SVC_ContextRestore ; Branch to context restore handling + #endif + +SVC_ContextSaveRegs + LDRB LR,[R1,#TCB_SF_OFS] ; Load stack frame information + #if (DOMAIN_NS != 0) + TST LR,#0x40 ; Check domain of interrupted thread + BNE SVC_ContextRestore ; Branch if secure + #endif + LDR R12,[R1,#TCB_SP_OFS] ; Load SP + #if ((FPU_USED != 0) || (MVE_USED != 0)) + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame + VSTMIAEQ R12!,{S16-S31} ; Save VFP S16..S31 + #endif + STM R12,{R4-R11} ; Save R4..R11 + #else + STMDB R12!,{R4-R11} ; Save R4..R11 + #if ((FPU_USED != 0) || (MVE_USED != 0)) + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame + VSTMDBEQ R12!,{S16-S31} ; Save VFP S16.S31 + #endif + +SVC_ContextSaveSP + STR R12,[R1,#TCB_SP_OFS] ; Store SP + STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information + #endif SVC_ContextRestore - #if (DOMAIN_NS == 1) + #if (DOMAIN_NS != 0) LDR R0,[R2,#TCB_TZM_OFS] ; Load TrustZone memory identifier - CBZ R0,SVC_ContextRestore1 ; Branch if there is no secure context + CBZ R0,SVC_ContextRestore_NS; Branch if there is no secure context PUSH {R2,R3} ; Save registers BL TZ_LoadContext_S ; Load secure context POP {R2,R3} ; Restore registers - #endif + #endif -SVC_ContextRestore1 - LDR R0,[R2,#TCB_SM_OFS] ; Load stack memory base - LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information - MSR PSPLIM,R0 ; Set PSPLIM +SVC_ContextRestore_NS LDR R0,[R2,#TCB_SP_OFS] ; Load SP - ORR LR,R1,#0xFFFFFF00 ; Set EXC_RETURN + LDR R1,[R2,#TCB_SM_OFS] ; Load stack memory base + MSR PSPLIM,R1 ; Set PSPLIM + LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information + ORN LR,R1,#0xFF ; Set EXC_RETURN - #if (DOMAIN_NS == 1) + #if (DOMAIN_NS != 0) TST LR,#0x40 ; Check domain of interrupted thread - BNE SVC_ContextRestore2 ; Branch if secure - #endif + BNE SVC_ContextRestoreSP ; Branch if secure + #endif - #if ((FPU_USED == 1) || (MVE_USED == 1)) - TST LR,#0x10 ; Check if extended stack frame - IT EQ + #if ((FPU_USED != 0) || (MVE_USED != 0)) + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame VLDMIAEQ R0!,{S16-S31} ; Restore VFP S16..S31 - #endif + #endif LDMIA R0!,{R4-R11} ; Restore R4..R11 -SVC_ContextRestore2 +SVC_ContextRestoreSP MSR PSP,R0 ; Set PSP SVC_Exit @@ -186,7 +252,8 @@ PendSV_Handler PUSH {R0,LR} ; Save EXC_RETURN BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler POP {R0,LR} ; Restore EXC_RETURN - B Sys_Context + MRS R12,PSP ; Save PSP to R12 + B SVC_Context ; Branch to context handling SysTick_Handler @@ -196,87 +263,8 @@ SysTick_Handler PUSH {R0,LR} ; Save EXC_RETURN BL osRtxTick_Handler ; Call osRtxTick_Handler POP {R0,LR} ; Restore EXC_RETURN - B Sys_Context - - - -Sys_Context - EXPORT Sys_Context - IMPORT osRtxInfo - #if (DOMAIN_NS == 1) - IMPORT TZ_LoadContext_S - IMPORT TZ_StoreContext_S - #endif - - LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run - LDM R3,{R1,R2} ; Load osRtxInfo.thread.run: curr & next - CMP R1,R2 ; Check if thread switch is required - IT EQ - BXEQ LR ; Exit when threads are the same + MRS R12,PSP ; Save PSP to R12 + B SVC_Context ; Branch to context handling -Sys_ContextSave - #if (DOMAIN_NS == 1) - LDR R0,[R1,#TCB_TZM_OFS] ; Load TrustZone memory identifier - CBZ R0,Sys_ContextSave1 ; Branch if there is no secure context - PUSH {R1,R2,R3,LR} ; Save registers and EXC_RETURN - BL TZ_StoreContext_S ; Store secure context - POP {R1,R2,R3,LR} ; Restore registers and EXC_RETURN - -Sys_ContextSave1 - TST LR,#0x40 ; Check domain of interrupted thread - IT NE - MRSNE R0,PSP ; Get PSP - BNE Sys_ContextSave3 ; Branch if secure - #endif - -Sys_ContextSave2 - MRS R0,PSP ; Get PSP - STMDB R0!,{R4-R11} ; Save R4..R11 - #if ((FPU_USED == 1) || (MVE_USED == 1)) - TST LR,#0x10 ; Check if extended stack frame - IT EQ - VSTMDBEQ R0!,{S16-S31} ; Save VFP S16.S31 - #endif - -Sys_ContextSave3 - STR R0,[R1,#TCB_SP_OFS] ; Store SP - STRB LR,[R1,#TCB_SF_OFS] ; Store stack frame information - -Sys_ContextSwitch - STR R2,[R3] ; osRtxInfo.run: curr = next - -Sys_ContextRestore - #if (DOMAIN_NS == 1) - LDR R0,[R2,#TCB_TZM_OFS] ; Load TrustZone memory identifier - CBZ R0,Sys_ContextRestore1 ; Branch if there is no secure context - PUSH {R2,R3} ; Save registers - BL TZ_LoadContext_S ; Load secure context - POP {R2,R3} ; Restore registers - #endif - -Sys_ContextRestore1 - LDR R0,[R2,#TCB_SM_OFS] ; Load stack memory base - LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information - MSR PSPLIM,R0 ; Set PSPLIM - LDR R0,[R2,#TCB_SP_OFS] ; Load SP - ORR LR,R1,#0xFFFFFF00 ; Set EXC_RETURN - - #if (DOMAIN_NS == 1) - TST LR,#0x40 ; Check domain of interrupted thread - BNE Sys_ContextRestore2 ; Branch if secure - #endif - - #if ((FPU_USED == 1) || (MVE_USED == 1)) - TST LR,#0x10 ; Check if extended stack frame - IT EQ - VLDMIAEQ R0!,{S16-S31} ; Restore VFP S16..S31 - #endif - LDMIA R0!,{R4-R11} ; Restore R4..R11 - -Sys_ContextRestore2 - MSR PSP,R0 ; Set PSP - -Sys_ContextExit - BX LR ; Exit from handler END diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_RTOS_M4_M7/irq_cm4f.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_RTOS_M4_M7/irq_cm4f.S index 4f8f3d4c8f5..3b6f9cf1d33 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_RTOS_M4_M7/irq_cm4f.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_RTOS_M4_M7/irq_cm4f.S @@ -1,5 +1,5 @@ ;/* -; * Copyright (c) 2013-2018 Arm Limited. All rights reserved. +; * Copyright (c) 2013-2021 Arm Limited. All rights reserved. ; * ; * SPDX-License-Identifier: Apache-2.0 ; * @@ -18,19 +18,32 @@ ; * ----------------------------------------------------------------------------- ; * ; * Project: CMSIS-RTOS RTX -; * Title: Cortex-M4F Exception handlers +; * Title: ARMv7-M Exception handlers ; * ; * ----------------------------------------------------------------------------- ; */ - NAME irq_cm4f.s + NAME irq_armv7m.s + #include "rtx_def.h" + +#ifdef __ARMVFP__ +FPU_USED EQU 1 +#else +FPU_USED EQU 0 +#endif + I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset TCB_SP_OFS EQU 56 ; TCB.SP offset TCB_SF_OFS EQU 34 ; TCB.stack_frame offset +FPCCR EQU 0xE000EF34 ; FPCCR Address + +osRtxErrorStackOverflow\ + EQU 1 ; Stack overflow + PRESERVE8 SECTION .rodata:DATA:NOROOT(2) @@ -48,6 +61,10 @@ SVC_Handler EXPORT SVC_Handler IMPORT osRtxUserSVC IMPORT osRtxInfo + #ifdef RTX_STACK_CHECK + IMPORT osRtxThreadStackCheck + IMPORT osRtxKernelErrorNotify + #endif TST LR,#0x04 ; Determine return stack from EXC_RETURN bit 2 ITE EQ @@ -56,7 +73,8 @@ SVC_Handler LDR R1,[R0,#24] ; Load saved PC from stack LDRB R1,[R1,#-2] ; Load SVC number - CBNZ R1,SVC_User ; Branch if not SVC 0 + CMP R1,#0 ; Check SVC number + BNE SVC_User ; Branch if not SVC 0 PUSH {R0,LR} ; Save SP and EXC_RETURN LDM R0,{R0-R3,R12} ; Load function parameters and address from stack @@ -65,47 +83,91 @@ SVC_Handler STM R12,{R0-R1} ; Store function return values SVC_Context - LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.run + LDR R3,=osRtxInfo+I_T_RUN_OFS; Load address of osRtxInfo.thread.run LDM R3,{R1,R2} ; Load osRtxInfo.thread.run: curr & next CMP R1,R2 ; Check if thread switch is required IT EQ BXEQ LR ; Exit when threads are the same + STR R2,[R3] ; osRtxInfo.thread.run: curr = next + + #if (FPU_USED != 0) CBNZ R1,SVC_ContextSave ; Branch if running thread is not deleted - TST LR,#0x10 ; Check if extended stack frame - BNE SVC_ContextSwitch -#ifdef __FPU_PRESENT - LDR R1,=0xE000EF34 ; FPCCR Address - LDR R0,[R1] ; Load FPCCR - BIC R0,R0,#1 ; Clear LSPACT (Lazy state) - STR R0,[R1] ; Store FPCCR - B SVC_ContextSwitch -#endif +SVC_FP_LazyState + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + BNE SVC_ContextRestore ; Branch if not extended stack frame + LDR R3,=FPCCR ; FPCCR Address + LDR R0,[R3] ; Load FPCCR + BIC R0,R0,#1 ; Clear LSPACT (Lazy state preservation) + STR R0,[R3] ; Store FPCCR + B SVC_ContextRestore ; Branch to context restore handling + #else + CBZ R1,SVC_ContextRestore ; Branch if running thread is deleted + #endif SVC_ContextSave - STMDB R12!,{R4-R11} ; Save R4..R11 -#ifdef __FPU_PRESENT - TST LR,#0x10 ; Check if extended stack frame - IT EQ - VSTMDBEQ R12!,{S16-S31} ; Save VFP S16.S31 -#endif - - STR R12,[R1,#TCB_SP_OFS] ; Store SP + #ifdef RTX_STACK_CHECK + SUB R12,R12,#32 ; Calculate SP: space for R4..R11 + #if (FPU_USED != 0) + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame + SUBEQ R12,R12,#64 ; Additional space for S16..S31 STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information + #endif + STR R12,[R1,#TCB_SP_OFS] ; Store SP -SVC_ContextSwitch + PUSH {R1,R2} ; Save osRtxInfo.thread.run: curr & next + MOV R0,R1 ; Parameter: osRtxInfo.thread.run.curr + BL osRtxThreadStackCheck ; Check if thread stack is overrun + POP {R1,R2} ; Restore osRtxInfo.thread.run: curr & next + CBNZ R0,SVC_ContextSaveRegs ; Branch when stack check is ok + + #if (FPU_USED != 0) + MOV R4,R1 ; Save osRtxInfo.thread.run.curr + #endif + MOV R0,#osRtxErrorStackOverflow ; Parameter: r0=code, r1=object_id + BL osRtxKernelErrorNotify ; Call osRtxKernelErrorNotify + LDR R3,=osRtxInfo+I_T_RUN_OFS ; Load address of osRtxInfo.thread.run + LDR R2,[R3,#4] ; Load osRtxInfo.thread.run: next STR R2,[R3] ; osRtxInfo.thread.run: curr = next + #if (FPU_USED != 0) + LDRB LR,[R4,#TCB_SF_OFS] ; Load stack frame information + B SVC_FP_LazyState ; Branch to FP lazy state handling + #else + B SVC_ContextRestore ; Branch to context restore handling + #endif + +SVC_ContextSaveRegs + LDR R12,[R1,#TCB_SP_OFS] ; Load SP + #if (FPU_USED != 0) + LDRB LR, [R1,#TCB_SF_OFS] ; Load stack frame information + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame + VSTMIAEQ R12!,{S16-S31} ; Save VFP S16..S31 + #endif + STM R12,{R4-R11} ; Save R4..R11 + #else + STMDB R12!,{R4-R11} ; Save R4..R11 + #if (FPU_USED != 0) + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame + VSTMDBEQ R12!,{S16-S31} ; Save VFP S16.S31 + STRB LR, [R1,#TCB_SF_OFS] ; Store stack frame information + #endif + STR R12,[R1,#TCB_SP_OFS] ; Store SP + #endif SVC_ContextRestore - LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information LDR R0,[R2,#TCB_SP_OFS] ; Load SP - ORR LR,R1,#0xFFFFFF00 ; Set EXC_RETURN - -#ifdef __FPU_PRESENT - TST LR,#0x10 ; Check if extended stack frame - IT EQ + #if (FPU_USED != 0) + LDRB R1,[R2,#TCB_SF_OFS] ; Load stack frame information + ORN LR,R1,#0xFF ; Set EXC_RETURN + TST LR,#0x10 ; Determine stack frame from EXC_RETURN bit 4 + IT EQ ; If extended stack frame VLDMIAEQ R0!,{S16-S31} ; Restore VFP S16..S31 -#endif + #else + MVN LR,#~0xFFFFFFFD ; Set EXC_RETURN value + #endif LDMIA R0!,{R4-R11} ; Restore R4..R11 MSR PSP,R0 ; Set PSP @@ -135,8 +197,8 @@ PendSV_Handler PUSH {R0,LR} ; Save EXC_RETURN BL osRtxPendSV_Handler ; Call osRtxPendSV_Handler POP {R0,LR} ; Restore EXC_RETURN - MRS R12,PSP - B SVC_Context + MRS R12,PSP ; Save PSP to R12 + B SVC_Context ; Branch to context handling SysTick_Handler @@ -146,8 +208,8 @@ SysTick_Handler PUSH {R0,LR} ; Save EXC_RETURN BL osRtxTick_Handler ; Call osRtxTick_Handler POP {R0,LR} ; Restore EXC_RETURN - MRS R12,PSP - B SVC_Context + MRS R12,PSP ; Save PSP to R12 + B SVC_Context ; Branch to context handling END diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_core_c.h b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_core_c.h index 445d393c9a5..7192a1d01e3 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_core_c.h +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_core_c.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2020 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -27,7 +27,9 @@ #define RTX_CORE_C_H_ //lint -emacro((923,9078),SCB) "cast from unsigned long to pointer" [MISRA Note 9] +#ifndef RTE_COMPONENTS_H #include "RTE_Components.h" +#endif #include CMSIS_device_header #if ((!defined(__ARM_ARCH_6M__)) && \ diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_core_ca.h b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_core_ca.h index 9fd36f2c8e3..a599516f2b9 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_core_ca.h +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_core_ca.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -27,7 +27,9 @@ #define RTX_CORE_CA_H_ #ifndef RTX_CORE_C_H_ +#ifndef RTE_COMPONENTS_H #include "RTE_Components.h" +#endif #include CMSIS_device_header #endif @@ -158,9 +160,9 @@ __STATIC_INLINE bool_t IsPrivileged (void) { return (__get_mode() != CPSR_MODE_USER); } -/// Check if in IRQ Mode -/// \return true=IRQ, false=thread -__STATIC_INLINE bool_t IsIrqMode (void) { +/// Check if in Exception +/// \return true=exception, false=thread +__STATIC_INLINE bool_t IsException (void) { return ((__get_mode() != CPSR_MODE_USER) && (__get_mode() != CPSR_MODE_SYSTEM)); } diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_core_cm.h b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_core_cm.h index 94ee5ba7eca..086b1e00761 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_core_cm.h +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_core_cm.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2020 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -27,7 +27,9 @@ #define RTX_CORE_CM_H_ #ifndef RTX_CORE_C_H_ +#ifndef RTE_COMPONENTS_H #include "RTE_Components.h" +#endif #include CMSIS_device_header #endif @@ -35,15 +37,11 @@ typedef bool bool_t; #ifndef FALSE -#define FALSE (0) +#define FALSE ((bool_t)0) #endif #ifndef TRUE -#define TRUE (1) -#endif - -#ifdef RTE_CMSIS_RTOS2_RTX5_ARMV8M_NS -#define DOMAIN_NS 1 +#define TRUE ((bool_t)1) #endif #ifndef DOMAIN_NS @@ -120,9 +118,9 @@ __STATIC_INLINE bool_t IsPrivileged (void) { return ((__get_CONTROL() & 1U) == 0U); } -/// Check if in IRQ Mode -/// \return true=IRQ, false=thread -__STATIC_INLINE bool_t IsIrqMode (void) { +/// Check if in Exception +/// \return true=exception, false=thread +__STATIC_INLINE bool_t IsException (void) { return (__get_IPSR() != 0U); } @@ -209,58 +207,58 @@ __STATIC_INLINE void SetPendSV (void) { (defined(__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ != 0)) || \ (defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ != 0)) || \ (defined(__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ != 0))) -#define __SVC_INDIRECT(n) __svc_indirect(n) +#define SVC_INDIRECT(n) __svc_indirect(n) #elif ((defined(__ARM_ARCH_6M__) && (__ARM_ARCH_6M__ != 0)) || \ (defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0))) -#define __SVC_INDIRECT(n) __svc_indirect_r7(n) +#define SVC_INDIRECT(n) __svc_indirect_r7(n) #endif #define SVC0_0N(f,t) \ -__SVC_INDIRECT(0) t svc##f (t(*)()); \ +SVC_INDIRECT(0) t svc##f (t(*)()); \ __attribute__((always_inline)) \ -__STATIC_INLINE t __svc##f (void) { \ +__STATIC_INLINE t __svc##f (void) { \ svc##f(svcRtx##f); \ } #define SVC0_0(f,t) \ -__SVC_INDIRECT(0) t svc##f (t(*)()); \ +SVC_INDIRECT(0) t svc##f (t(*)()); \ __attribute__((always_inline)) \ -__STATIC_INLINE t __svc##f (void) { \ +__STATIC_INLINE t __svc##f (void) { \ return svc##f(svcRtx##f); \ } #define SVC0_1N(f,t,t1) \ -__SVC_INDIRECT(0) t svc##f (t(*)(t1),t1); \ +SVC_INDIRECT(0) t svc##f (t(*)(t1),t1); \ __attribute__((always_inline)) \ -__STATIC_INLINE t __svc##f (t1 a1) { \ +__STATIC_INLINE t __svc##f (t1 a1) { \ svc##f(svcRtx##f,a1); \ } #define SVC0_1(f,t,t1) \ -__SVC_INDIRECT(0) t svc##f (t(*)(t1),t1); \ +SVC_INDIRECT(0) t svc##f (t(*)(t1),t1); \ __attribute__((always_inline)) \ -__STATIC_INLINE t __svc##f (t1 a1) { \ +__STATIC_INLINE t __svc##f (t1 a1) { \ return svc##f(svcRtx##f,a1); \ } #define SVC0_2(f,t,t1,t2) \ -__SVC_INDIRECT(0) t svc##f (t(*)(t1,t2),t1,t2); \ +SVC_INDIRECT(0) t svc##f (t(*)(t1,t2),t1,t2); \ __attribute__((always_inline)) \ -__STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \ +__STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \ return svc##f(svcRtx##f,a1,a2); \ } #define SVC0_3(f,t,t1,t2,t3) \ -__SVC_INDIRECT(0) t svc##f (t(*)(t1,t2,t3),t1,t2,t3); \ +SVC_INDIRECT(0) t svc##f (t(*)(t1,t2,t3),t1,t2,t3); \ __attribute__((always_inline)) \ -__STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \ +__STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \ return svc##f(svcRtx##f,a1,a2,a3); \ } #define SVC0_4(f,t,t1,t2,t3,t4) \ -__SVC_INDIRECT(0) t svc##f (t(*)(t1,t2,t3,t4),t1,t2,t3,t4); \ +SVC_INDIRECT(0) t svc##f (t(*)(t1,t2,t3,t4),t1,t2,t3,t4); \ __attribute__((always_inline)) \ -__STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \ +__STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \ return svc##f(svcRtx##f,a1,a2,a3,a4); \ } @@ -285,60 +283,60 @@ __STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \ #endif #define STRINGIFY(a) #a -#define __SVC_INDIRECT(n) _Pragma(STRINGIFY(swi_number = n)) __swi +#define SVC_INDIRECT(n) _Pragma(STRINGIFY(swi_number = n)) __swi #define SVC0_0N(f,t) \ -__SVC_INDIRECT(0) t svc##f (); \ +SVC_INDIRECT(0) t svc##f (); \ __attribute__((always_inline)) \ -__STATIC_INLINE t __svc##f (void) { \ +__STATIC_INLINE t __svc##f (void) { \ SVC_ArgF(svcRtx##f); \ svc##f(); \ } #define SVC0_0(f,t) \ -__SVC_INDIRECT(0) t svc##f (); \ +SVC_INDIRECT(0) t svc##f (); \ __attribute__((always_inline)) \ -__STATIC_INLINE t __svc##f (void) { \ +__STATIC_INLINE t __svc##f (void) { \ SVC_ArgF(svcRtx##f); \ return svc##f(); \ } #define SVC0_1N(f,t,t1) \ -__SVC_INDIRECT(0) t svc##f (t1 a1); \ +SVC_INDIRECT(0) t svc##f (t1 a1); \ __attribute__((always_inline)) \ -__STATIC_INLINE t __svc##f (t1 a1) { \ +__STATIC_INLINE t __svc##f (t1 a1) { \ SVC_ArgF(svcRtx##f); \ svc##f(a1); \ } #define SVC0_1(f,t,t1) \ -__SVC_INDIRECT(0) t svc##f (t1 a1); \ +SVC_INDIRECT(0) t svc##f (t1 a1); \ __attribute__((always_inline)) \ -__STATIC_INLINE t __svc##f (t1 a1) { \ +__STATIC_INLINE t __svc##f (t1 a1) { \ SVC_ArgF(svcRtx##f); \ return svc##f(a1); \ } #define SVC0_2(f,t,t1,t2) \ -__SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2); \ +SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2); \ __attribute__((always_inline)) \ -__STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \ +__STATIC_INLINE t __svc##f (t1 a1, t2 a2) { \ SVC_ArgF(svcRtx##f); \ return svc##f(a1,a2); \ } #define SVC0_3(f,t,t1,t2,t3) \ -__SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2, t3 a3); \ +SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2, t3 a3); \ __attribute__((always_inline)) \ -__STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \ +__STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3) { \ SVC_ArgF(svcRtx##f); \ return svc##f(a1,a2,a3); \ } #define SVC0_4(f,t,t1,t2,t3,t4) \ -__SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2, t3 a3, t4 a4); \ +SVC_INDIRECT(0) t svc##f (t1 a1, t2 a2, t3 a3, t4 a4); \ __attribute__((always_inline)) \ -__STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \ +__STATIC_INLINE t __svc##f (t1 a1, t2 a2, t3 a3, t4 a4) { \ SVC_ArgF(svcRtx##f); \ return svc##f(a1,a2,a3,a4); \ } diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_delay.c b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_delay.c index 9d0cf414bf2..f8eacaf572c 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_delay.c +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_delay.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -31,21 +31,29 @@ /// Wait for Timeout (Time Delay). /// \note API identical to osDelay static osStatus_t svcRtxDelay (uint32_t ticks) { + osStatus_t status; - if (ticks != 0U) { - if (osRtxThreadWaitEnter(osRtxThreadWaitingDelay, ticks)) { - EvrRtxDelayStarted(ticks); - } else { - EvrRtxDelayCompleted(osRtxThreadGetRunning()); - } + if (ticks == 0U) { + EvrRtxDelayError((int32_t)osErrorParameter); + //lint -e{904} "Return statement before end of function" [MISRA Note 1] + return osErrorParameter; } - return osOK; + if (osRtxThreadWaitEnter(osRtxThreadWaitingDelay, ticks)) { + EvrRtxDelayStarted(ticks); + status = osOK; + } else { + EvrRtxDelayError((int32_t)osError); + status = osError; + } + + return status; } /// Wait until specified time. /// \note API identical to osDelayUntil static osStatus_t svcRtxDelayUntil (uint32_t ticks) { + osStatus_t status; ticks -= osRtxInfo.kernel.tick; if ((ticks == 0U) || (ticks > 0x7FFFFFFFU)) { @@ -56,11 +64,13 @@ static osStatus_t svcRtxDelayUntil (uint32_t ticks) { if (osRtxThreadWaitEnter(osRtxThreadWaitingDelay, ticks)) { EvrRtxDelayUntilStarted(ticks); + status = osOK; } else { - EvrRtxDelayCompleted(osRtxThreadGetRunning()); + EvrRtxDelayError((int32_t)osError); + status = osError; } - return osOK; + return status; } // Service Calls definitions @@ -77,7 +87,7 @@ osStatus_t osDelay (uint32_t ticks) { osStatus_t status; EvrRtxDelay(ticks); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxDelayError((int32_t)osErrorISR); status = osErrorISR; } else { @@ -91,7 +101,7 @@ osStatus_t osDelayUntil (uint32_t ticks) { osStatus_t status; EvrRtxDelayUntil(ticks); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxDelayError((int32_t)osErrorISR); status = osErrorISR; } else { diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_evflags.c b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_evflags.c index f32aec7b833..f2300e9a015 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_evflags.c +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_evflags.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -27,7 +27,7 @@ // OS Runtime Object Memory Usage -#if ((defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))) +#ifdef RTX_OBJ_MEM_USAGE osRtxObjectMemUsage_t osRtxEventFlagsMemUsage \ __attribute__((section(".data.os.evflags.obj"))) = { 0U, 0U, 0U }; @@ -200,7 +200,7 @@ static osEventFlagsId_t svcRtxEventFlagsNew (const osEventFlagsAttr_t *attr) { //lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5] ef = osRtxMemoryAlloc(osRtxInfo.mem.common, sizeof(os_event_flags_t), 1U); } -#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)) +#ifdef RTX_OBJ_MEM_USAGE if (ef != NULL) { uint32_t used; osRtxEventFlagsMemUsage.cnt_alloc++; @@ -313,7 +313,7 @@ static uint32_t svcRtxEventFlagsClear (osEventFlagsId_t ef_id, uint32_t flags) { event_flags = EventFlagsClear(ef, flags); EvrRtxEventFlagsClearDone(ef, event_flags); - + return event_flags; } @@ -409,7 +409,7 @@ static osStatus_t svcRtxEventFlagsDelete (osEventFlagsId_t ef_id) { } else { (void)osRtxMemoryFree(osRtxInfo.mem.common, ef); } -#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)) +#ifdef RTX_OBJ_MEM_USAGE osRtxEventFlagsMemUsage.cnt_free++; #endif } @@ -494,7 +494,7 @@ osEventFlagsId_t osEventFlagsNew (const osEventFlagsAttr_t *attr) { osEventFlagsId_t ef_id; EvrRtxEventFlagsNew(attr); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxEventFlagsError(NULL, (int32_t)osErrorISR); ef_id = NULL; } else { @@ -507,7 +507,7 @@ osEventFlagsId_t osEventFlagsNew (const osEventFlagsAttr_t *attr) { const char *osEventFlagsGetName (osEventFlagsId_t ef_id) { const char *name; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxEventFlagsGetName(ef_id, NULL); name = NULL; } else { @@ -521,7 +521,7 @@ uint32_t osEventFlagsSet (osEventFlagsId_t ef_id, uint32_t flags) { uint32_t event_flags; EvrRtxEventFlagsSet(ef_id, flags); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { event_flags = isrRtxEventFlagsSet(ef_id, flags); } else { event_flags = __svcEventFlagsSet(ef_id, flags); @@ -534,7 +534,7 @@ uint32_t osEventFlagsClear (osEventFlagsId_t ef_id, uint32_t flags) { uint32_t event_flags; EvrRtxEventFlagsClear(ef_id, flags); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { event_flags = svcRtxEventFlagsClear(ef_id, flags); } else { event_flags = __svcEventFlagsClear(ef_id, flags); @@ -546,7 +546,7 @@ uint32_t osEventFlagsClear (osEventFlagsId_t ef_id, uint32_t flags) { uint32_t osEventFlagsGet (osEventFlagsId_t ef_id) { uint32_t event_flags; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { event_flags = svcRtxEventFlagsGet(ef_id); } else { event_flags = __svcEventFlagsGet(ef_id); @@ -559,7 +559,7 @@ uint32_t osEventFlagsWait (osEventFlagsId_t ef_id, uint32_t flags, uint32_t opti uint32_t event_flags; EvrRtxEventFlagsWait(ef_id, flags, options, timeout); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { event_flags = isrRtxEventFlagsWait(ef_id, flags, options, timeout); } else { event_flags = __svcEventFlagsWait(ef_id, flags, options, timeout); @@ -572,7 +572,7 @@ osStatus_t osEventFlagsDelete (osEventFlagsId_t ef_id) { osStatus_t status; EvrRtxEventFlagsDelete(ef_id); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxEventFlagsError(ef_id, (int32_t)osErrorISR); status = osErrorISR; } else { diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_evr.c b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_evr.c index 042d2542700..55b55a78e93 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_evr.c +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_evr.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -63,6 +63,7 @@ #define EvtRtxKernelGetTickFreq EventID(EventLevelAPI, EvtRtxKernelNo, 0x14U) #define EvtRtxKernelGetSysTimerCount EventID(EventLevelAPI, EvtRtxKernelNo, 0x15U) #define EvtRtxKernelGetSysTimerFreq EventID(EventLevelAPI, EvtRtxKernelNo, 0x16U) +#define EvtRtxKernelErrorNotify EventID(EventLevelError, EvtRtxKernelNo, 0x19U) /// Event IDs for "RTX Thread" #define EvtRtxThreadError EventID(EventLevelError, EvtRtxThreadNo, 0x00U) @@ -531,6 +532,17 @@ __WEAK void EvrRtxKernelGetSysTimerFreq (uint32_t freq) { } #endif +#if (!defined(EVR_RTX_DISABLE) && (OS_EVR_KERNEL != 0) && !defined(EVR_RTX_KERNEL_ERROR_NOTIFY_DISABLE)) +__WEAK void EvrRtxKernelErrorNotify (uint32_t code, void *object_id) { +#if defined(RTE_Compiler_EventRecorder) + (void)EventRecord2(EvtRtxKernelErrorNotify, code, (uint32_t)object_id); +#else + (void)code; + (void)object_id; +#endif +} +#endif + // ==== Thread Events ==== diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_kernel.c b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_kernel.c index 303fc5442be..68ce8283b9e 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_kernel.c +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_kernel.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -62,6 +62,31 @@ static void KernelUnblock (void) { OS_Tick_Enable(); } +// Get Kernel sleep time +static uint32_t GetKernelSleepTime (void) { + const os_thread_t *thread; + const os_timer_t *timer; + uint32_t delay; + + delay = osWaitForever; + + // Check Thread Delay list + thread = osRtxInfo.thread.delay_list; + if (thread != NULL) { + delay = thread->delay; + } + + // Check Active Timer list + timer = osRtxInfo.timer.list; + if (timer != NULL) { + if (timer->tick < delay) { + delay = timer->tick; + } + } + + return delay; +} + // ==== Service Calls ==== @@ -90,7 +115,6 @@ static osStatus_t svcRtxKernelInitialize (void) { #endif // Initialize osRtxInfo - memset(&osRtxInfo.kernel, 0, sizeof(osRtxInfo) - offsetof(osRtxInfo_t, kernel)); osRtxInfo.isr_queue.data = osRtxConfig.isr_queue.data; osRtxInfo.isr_queue.max = osRtxConfig.isr_queue.max; @@ -198,7 +222,7 @@ static osStatus_t svcRtxKernelGetInfo (osVersion_t *version, char *id_buf, uint3 } else { size = id_size; } - memcpy(id_buf, osRtxKernelId, size); + (void)memcpy(id_buf, osRtxKernelId, size); } EvrRtxKernelInfoRetrieved(version, id_buf, id_size); @@ -287,7 +311,7 @@ static int32_t svcRtxKernelLock (void) { } return lock; } - + /// Unlock the RTOS Kernel scheduler. /// \note API identical to osKernelUnlock static int32_t svcRtxKernelUnlock (void) { @@ -347,9 +371,7 @@ static int32_t svcRtxKernelRestoreLock (int32_t lock) { /// Suspend the RTOS Kernel scheduler. /// \note API identical to osKernelSuspend static uint32_t svcRtxKernelSuspend (void) { - const os_thread_t *thread; - const os_timer_t *timer; - uint32_t delay; + uint32_t delay; if (osRtxInfo.kernel.state != osRtxKernelRunning) { EvrRtxKernelError(osRtxErrorKernelNotRunning); @@ -359,24 +381,10 @@ static uint32_t svcRtxKernelSuspend (void) { KernelBlock(); - delay = osWaitForever; - - // Check Thread Delay list - thread = osRtxInfo.thread.delay_list; - if (thread != NULL) { - delay = thread->delay; - } - - // Check Active Timer list - timer = osRtxInfo.timer.list; - if (timer != NULL) { - if (timer->tick < delay) { - delay = timer->tick; - } - } - osRtxInfo.kernel.state = osRtxKernelSuspended; + delay = GetKernelSleepTime(); + EvrRtxKernelSuspended(delay); return delay; @@ -388,7 +396,7 @@ static void svcRtxKernelResume (uint32_t sleep_ticks) { os_thread_t *thread; os_timer_t *timer; uint32_t delay; - uint32_t ticks; + uint32_t ticks, kernel_tick; if (osRtxInfo.kernel.state != osRtxKernelSuspended) { EvrRtxKernelResumed(); @@ -396,40 +404,38 @@ static void svcRtxKernelResume (uint32_t sleep_ticks) { return; } - osRtxInfo.kernel.tick += sleep_ticks; + delay = GetKernelSleepTime(); + if (sleep_ticks >= delay) { + ticks = delay - 1U; + } else { + ticks = sleep_ticks; + } - // Process Thread Delay list + // Update Thread Delay sleep ticks thread = osRtxInfo.thread.delay_list; if (thread != NULL) { - delay = sleep_ticks; - do { - if (delay >= thread->delay) { - delay -= thread->delay; - thread->delay = 1U; - osRtxThreadDelayTick(); - thread = osRtxInfo.thread.delay_list; - } else { - thread->delay -= delay; - delay = 0U; - } - } while ((thread != NULL) && (delay != 0U)); + thread->delay -= ticks; } - // Process Active Timer list + // Update Timer sleep ticks timer = osRtxInfo.timer.list; if (timer != NULL) { - ticks = sleep_ticks; - do { - if (ticks >= timer->tick) { - ticks -= timer->tick; - timer->tick = 1U; - osRtxInfo.timer.tick(); - timer = osRtxInfo.timer.list; - } else { - timer->tick -= ticks; - ticks = 0U; - } - } while ((timer != NULL) && (ticks != 0U)); + timer->tick -= ticks; + } + + kernel_tick = osRtxInfo.kernel.tick + sleep_ticks; + osRtxInfo.kernel.tick += ticks; + + while (osRtxInfo.kernel.tick != kernel_tick) { + osRtxInfo.kernel.tick++; + + // Process Thread Delays + osRtxThreadDelayTick(); + + // Process Timers + if (osRtxInfo.timer.tick != NULL) { + osRtxInfo.timer.tick(); + } } osRtxInfo.kernel.state = osRtxKernelRunning; @@ -507,6 +513,13 @@ SVC0_0 (KernelGetSysTimerFreq, uint32_t) __WEAK void osRtxKernelPreInit (void) { } +/// RTOS Kernel Error Notification Handler +/// \note API identical to osRtxErrorNotify +uint32_t osRtxKernelErrorNotify (uint32_t code, void *object_id) { + EvrRtxKernelErrorNotify(code, object_id); + return osRtxErrorNotify(code, object_id); +} + // ==== Public API ==== @@ -516,7 +529,7 @@ osStatus_t osKernelInitialize (void) { osRtxKernelPreInit(); EvrRtxKernelInitialize(); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxKernelError((int32_t)osErrorISR); status = osErrorISR; } else { @@ -530,7 +543,7 @@ osStatus_t osKernelGetInfo (osVersion_t *version, char *id_buf, uint32_t id_size osStatus_t status; EvrRtxKernelGetInfo(version, id_buf, id_size); - if (IsIrqMode() || IsIrqMasked() || IsPrivileged()) { + if (IsException() || IsIrqMasked() || IsPrivileged()) { status = svcRtxKernelGetInfo(version, id_buf, id_size); } else { status = __svcKernelGetInfo(version, id_buf, id_size); @@ -542,7 +555,7 @@ osStatus_t osKernelGetInfo (osVersion_t *version, char *id_buf, uint32_t id_size osKernelState_t osKernelGetState (void) { osKernelState_t state; - if (IsIrqMode() || IsIrqMasked() || IsPrivileged()) { + if (IsException() || IsIrqMasked() || IsPrivileged()) { state = svcRtxKernelGetState(); } else { state = __svcKernelGetState(); @@ -555,7 +568,7 @@ osStatus_t osKernelStart (void) { osStatus_t status; EvrRtxKernelStart(); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxKernelError((int32_t)osErrorISR); status = osErrorISR; } else { @@ -569,7 +582,7 @@ int32_t osKernelLock (void) { int32_t lock; EvrRtxKernelLock(); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxKernelError((int32_t)osErrorISR); lock = (int32_t)osErrorISR; } else { @@ -577,13 +590,13 @@ int32_t osKernelLock (void) { } return lock; } - + /// Unlock the RTOS Kernel scheduler. int32_t osKernelUnlock (void) { int32_t lock; EvrRtxKernelUnlock(); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxKernelError((int32_t)osErrorISR); lock = (int32_t)osErrorISR; } else { @@ -597,7 +610,7 @@ int32_t osKernelRestoreLock (int32_t lock) { int32_t lock_new; EvrRtxKernelRestoreLock(lock); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxKernelError((int32_t)osErrorISR); lock_new = (int32_t)osErrorISR; } else { @@ -611,7 +624,7 @@ uint32_t osKernelSuspend (void) { uint32_t ticks; EvrRtxKernelSuspend(); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxKernelError((int32_t)osErrorISR); ticks = 0U; } else { @@ -624,7 +637,7 @@ uint32_t osKernelSuspend (void) { void osKernelResume (uint32_t sleep_ticks) { EvrRtxKernelResume(sleep_ticks); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxKernelError((int32_t)osErrorISR); } else { __svcKernelResume(sleep_ticks); @@ -635,7 +648,7 @@ void osKernelResume (uint32_t sleep_ticks) { uint32_t osKernelGetTickCount (void) { uint32_t count; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { count = svcRtxKernelGetTickCount(); } else { count = __svcKernelGetTickCount(); @@ -647,7 +660,7 @@ uint32_t osKernelGetTickCount (void) { uint32_t osKernelGetTickFreq (void) { uint32_t freq; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { freq = svcRtxKernelGetTickFreq(); } else { freq = __svcKernelGetTickFreq(); @@ -659,7 +672,7 @@ uint32_t osKernelGetTickFreq (void) { uint32_t osKernelGetSysTimerCount (void) { uint32_t count; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { count = svcRtxKernelGetSysTimerCount(); } else { count = __svcKernelGetSysTimerCount(); @@ -671,7 +684,7 @@ uint32_t osKernelGetSysTimerCount (void) { uint32_t osKernelGetSysTimerFreq (void) { uint32_t freq; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { freq = svcRtxKernelGetSysTimerFreq(); } else { freq = __svcKernelGetSysTimerFreq(); diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_lib.c b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_lib.c index 00cfe6426df..70663168e14 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_lib.c +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_lib.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -24,7 +24,6 @@ */ #include "cmsis_compiler.h" -#include "RTX_Config.h" #include "rtx_os.h" #ifdef RTE_Compiler_EventRecorder @@ -87,7 +86,7 @@ __attribute__((section(".bss.os.thread.cb"))); // Thread Default Stack #if (OS_THREAD_DEF_STACK_NUM != 0) -static uint64_t os_thread_def_stack[OS_THREAD_DEF_STACK_NUM*(OS_STACK_SIZE/8)] \ +static uint64_t os_thread_def_stack[(OS_THREAD_DEF_STACK_NUM*OS_STACK_SIZE)/8] \ __attribute__((section(".bss.os.thread.stack"))); #endif @@ -105,32 +104,21 @@ __attribute__((section(".data.os.thread.mpi"))) = // Memory Pool for Thread Stack #if (OS_THREAD_USER_STACK_SIZE != 0) -static uint64_t os_thread_stack[2 + OS_THREAD_NUM + (OS_THREAD_USER_STACK_SIZE/8)] \ +static uint64_t os_thread_stack[(16 + (8*OS_THREAD_NUM) + OS_THREAD_USER_STACK_SIZE)/8] \ __attribute__((section(".bss.os.thread.stack"))); #endif #endif // (OS_THREAD_OBJ_MEM != 0) -// Stack overrun checking -#if (OS_STACK_CHECK == 0) -// Override library function -extern void osRtxThreadStackCheck (void); - void osRtxThreadStackCheck (void) {} -#endif - - // Idle Thread Control Block static osRtxThread_t os_idle_thread_cb \ __attribute__((section(".bss.os.thread.cb"))); // Idle Thread Stack -#if defined (__CC_ARM) -static uint64_t os_idle_thread_stack[OS_IDLE_THREAD_STACK_SIZE/8]; -#else static uint64_t os_idle_thread_stack[OS_IDLE_THREAD_STACK_SIZE/8] \ -__attribute__((section(".bss.os.thread.stack"))); -#endif +__attribute__((section(".bss.os.thread.idle.stack"))); + // Idle Thread Attributes static const osThreadAttr_t os_idle_thread_attr = { #if defined(OS_IDLE_THREAD_NAME) @@ -184,13 +172,9 @@ __attribute__((section(".data.os.timer.mpi"))) = static osRtxThread_t os_timer_thread_cb \ __attribute__((section(".bss.os.thread.cb"))); -#if defined (__CC_ARM) -static uint64_t os_timer_thread_stack[OS_TIMER_THREAD_STACK_SIZE/8]; -#else // Timer Thread Stack static uint64_t os_timer_thread_stack[OS_TIMER_THREAD_STACK_SIZE/8] \ -__attribute__((section(".bss.os.thread.stack"))); -#endif +__attribute__((section(".bss.os.thread.timer.stack"))); // Timer Thread Attributes static const osThreadAttr_t os_timer_thread_attr = { @@ -232,10 +216,8 @@ static const osMessageQueueAttr_t os_timer_mq_attr = { (uint32_t)sizeof(os_timer_mq_data) }; -#else - -extern void osRtxTimerThread (void *argument); - void osRtxTimerThread (void *argument) { (void)argument; } +extern int32_t osRtxTimerSetup (void); +extern void osRtxTimerThread (void *argument); #endif // ((OS_TIMER_THREAD_STACK_SIZE != 0) && (OS_TIMER_CB_QUEUE != 0)) @@ -326,7 +308,7 @@ __attribute__((section(".data.os.mempool.mpi"))) = #if ((OS_MEMPOOL_DATA_SIZE % 8) != 0) #error "Invalid Data Memory size for Memory Pools!" #endif -static uint64_t os_mp_data[2 + OS_MEMPOOL_NUM + (OS_MEMPOOL_DATA_SIZE/8)] \ +static uint64_t os_mp_data[(16 + (8*OS_MEMPOOL_NUM) + OS_MEMPOOL_DATA_SIZE)/8] \ __attribute__((section(".bss.os.mempool.mem"))); #endif @@ -356,7 +338,7 @@ __attribute__((section(".data.os.msgqueue.mpi"))) = #if ((OS_MSGQUEUE_DATA_SIZE % 8) != 0) #error "Invalid Data Memory size for Message Queues!" #endif -static uint64_t os_mq_data[2 + OS_MSGQUEUE_NUM + (OS_MSGQUEUE_DATA_SIZE/8)] \ +static uint64_t os_mq_data[(16 + ((8+12)*OS_MSGQUEUE_NUM) + OS_MSGQUEUE_DATA_SIZE + 7)/8] \ __attribute__((section(".bss.os.msgqueue.mem"))); #endif @@ -368,69 +350,57 @@ __attribute__((section(".bss.os.msgqueue.mem"))); #if (defined(OS_EVR_INIT) && (OS_EVR_INIT != 0)) -// Initial Thread configuration covered also Thread Flags and Generic Wait -#if defined(OS_EVR_THREAD_FILTER) -#if !defined(OS_EVR_THFLAGS_FILTER) -#define OS_EVR_THFLAGS_FILTER OS_EVR_THREAD_FILTER -#endif -#if !defined(OS_EVR_WAIT_FILTER) -#define OS_EVR_WAIT_FILTER OS_EVR_THREAD_FILTER -#endif -#endif +#ifdef RTE_Compiler_EventRecorder + +// Event Recorder Initialize +__STATIC_INLINE void evr_initialize (void) { + + (void)EventRecorderInitialize(OS_EVR_LEVEL, (uint32_t)OS_EVR_START); -// Migrate initial filter configuration -#if defined(OS_EVR_MEMORY_FILTER) -#define OS_EVR_MEMORY_LEVEL (((OS_EVR_MEMORY_FILTER & 0x80U) != 0U) ? (OS_EVR_MEMORY_FILTER & 0x0FU) : 0U) +#if ((OS_EVR_MEMORY_LEVEL & 0x80U) != 0U) + (void)EventRecorderEnable( OS_EVR_MEMORY_LEVEL & 0x0FU, EvtRtxMemoryNo, EvtRtxMemoryNo); + (void)EventRecorderDisable(~OS_EVR_MEMORY_LEVEL & 0x0FU, EvtRtxMemoryNo, EvtRtxMemoryNo); #endif -#if defined(OS_EVR_KERNEL_FILTER) -#define OS_EVR_KERNEL_LEVEL (((OS_EVR_KERNEL_FILTER & 0x80U) != 0U) ? (OS_EVR_KERNEL_FILTER & 0x0FU) : 0U) +#if ((OS_EVR_KERNEL_LEVEL & 0x80U) != 0U) + (void)EventRecorderEnable( OS_EVR_KERNEL_LEVEL & 0x0FU, EvtRtxKernelNo, EvtRtxKernelNo); + (void)EventRecorderDisable(~OS_EVR_KERNEL_LEVEL & 0x0FU, EvtRtxKernelNo, EvtRtxMemoryNo); #endif -#if defined(OS_EVR_THREAD_FILTER) -#define OS_EVR_THREAD_LEVEL (((OS_EVR_THREAD_FILTER & 0x80U) != 0U) ? (OS_EVR_THREAD_FILTER & 0x0FU) : 0U) +#if ((OS_EVR_THREAD_LEVEL & 0x80U) != 0U) + (void)EventRecorderEnable( OS_EVR_THREAD_LEVEL & 0x0FU, EvtRtxThreadNo, EvtRtxThreadNo); + (void)EventRecorderDisable(~OS_EVR_THREAD_LEVEL & 0x0FU, EvtRtxThreadNo, EvtRtxThreadNo); #endif -#if defined(OS_EVR_WAIT_FILTER) -#define OS_EVR_WAIT_LEVEL (((OS_EVR_WAIT_FILTER & 0x80U) != 0U) ? (OS_EVR_WAIT_FILTER & 0x0FU) : 0U) +#if ((OS_EVR_WAIT_LEVEL & 0x80U) != 0U) + (void)EventRecorderEnable( OS_EVR_WAIT_LEVEL & 0x0FU, EvtRtxWaitNo, EvtRtxWaitNo); + (void)EventRecorderDisable(~OS_EVR_WAIT_LEVEL & 0x0FU, EvtRtxWaitNo, EvtRtxWaitNo); #endif -#if defined(OS_EVR_THFLAGS_FILTER) -#define OS_EVR_THFLAGS_LEVEL (((OS_EVR_THFLAGS_FILTER & 0x80U) != 0U) ? (OS_EVR_THFLAGS_FILTER & 0x0FU) : 0U) +#if ((OS_EVR_THFLAGS_LEVEL & 0x80U) != 0U) + (void)EventRecorderEnable( OS_EVR_THFLAGS_LEVEL & 0x0FU, EvtRtxThreadFlagsNo, EvtRtxThreadFlagsNo); + (void)EventRecorderDisable(~OS_EVR_THFLAGS_LEVEL & 0x0FU, EvtRtxThreadFlagsNo, EvtRtxThreadFlagsNo); #endif -#if defined(OS_EVR_EVFLAGS_FILTER) -#define OS_EVR_EVFLAGS_LEVEL (((OS_EVR_EVFLAGS_FILTER & 0x80U) != 0U) ? (OS_EVR_EVFLAGS_FILTER & 0x0FU) : 0U) +#if ((OS_EVR_EVFLAGS_LEVEL & 0x80U) != 0U) + (void)EventRecorderEnable( OS_EVR_EVFLAGS_LEVEL & 0x0FU, EvtRtxEventFlagsNo, EvtRtxEventFlagsNo); + (void)EventRecorderDisable(~OS_EVR_EVFLAGS_LEVEL & 0x0FU, EvtRtxEventFlagsNo, EvtRtxEventFlagsNo); #endif -#if defined(OS_EVR_TIMER_FILTER) -#define OS_EVR_TIMER_LEVEL (((OS_EVR_TIMER_FILTER & 0x80U) != 0U) ? (OS_EVR_TIMER_FILTER & 0x0FU) : 0U) +#if ((OS_EVR_TIMER_LEVEL & 0x80U) != 0U) + (void)EventRecorderEnable( OS_EVR_TIMER_LEVEL & 0x0FU, EvtRtxTimerNo, EvtRtxTimerNo); + (void)EventRecorderDisable(~OS_EVR_TIMER_LEVEL & 0x0FU, EvtRtxTimerNo, EvtRtxTimerNo); #endif -#if defined(OS_EVR_MUTEX_FILTER) -#define OS_EVR_MUTEX_LEVEL (((OS_EVR_MUTEX_FILTER & 0x80U) != 0U) ? (OS_EVR_MUTEX_FILTER & 0x0FU) : 0U) +#if ((OS_EVR_MUTEX_LEVEL & 0x80U) != 0U) + (void)EventRecorderEnable( OS_EVR_MUTEX_LEVEL & 0x0FU, EvtRtxMutexNo, EvtRtxMutexNo); + (void)EventRecorderDisable(~OS_EVR_MUTEX_LEVEL & 0x0FU, EvtRtxMutexNo, EvtRtxMutexNo); #endif -#if defined(OS_EVR_SEMAPHORE_FILTER) -#define OS_EVR_SEMAPHORE_LEVEL (((OS_EVR_SEMAPHORE_FILTER & 0x80U) != 0U) ? (OS_EVR_SEMAPHORE_FILTER & 0x0FU) : 0U) +#if ((OS_EVR_SEMAPHORE_LEVEL & 0x80U) != 0U) + (void)EventRecorderEnable( OS_EVR_SEMAPHORE_LEVEL & 0x0FU, EvtRtxSemaphoreNo, EvtRtxSemaphoreNo); + (void)EventRecorderDisable(~OS_EVR_SEMAPHORE_LEVEL & 0x0FU, EvtRtxSemaphoreNo, EvtRtxSemaphoreNo); #endif -#if defined(OS_EVR_MEMPOOL_FILTER) -#define OS_EVR_MEMPOOL_LEVEL (((OS_EVR_MEMPOOL_FILTER & 0x80U) != 0U) ? (OS_EVR_MEMPOOL_FILTER & 0x0FU) : 0U) +#if ((OS_EVR_MEMPOOL_LEVEL & 0x80U) != 0U) + (void)EventRecorderEnable( OS_EVR_MEMPOOL_LEVEL & 0x0FU, EvtRtxMemoryPoolNo, EvtRtxMemoryPoolNo); + (void)EventRecorderDisable(~OS_EVR_MEMPOOL_LEVEL & 0x0FU, EvtRtxMemoryPoolNo, EvtRtxMemoryPoolNo); #endif -#if defined(OS_EVR_MSGQUEUE_FILTER) -#define OS_EVR_MSGQUEUE_LEVEL (((OS_EVR_MSGQUEUE_FILTER & 0x80U) != 0U) ? (OS_EVR_MSGQUEUE_FILTER & 0x0FU) : 0U) +#if ((OS_EVR_MSGQUEUE_LEVEL & 0x80U) != 0U) + (void)EventRecorderEnable( OS_EVR_MSGQUEUE_LEVEL & 0x0FU, EvtRtxMessageQueueNo, EvtRtxMessageQueueNo); + (void)EventRecorderDisable(~OS_EVR_MSGQUEUE_LEVEL & 0x0FU, EvtRtxMessageQueueNo, EvtRtxMessageQueueNo); #endif - -#if defined(RTE_Compiler_EventRecorder) - -// Event Recorder Initialize -__STATIC_INLINE void evr_initialize (void) { - - (void)EventRecorderInitialize(OS_EVR_LEVEL, (uint32_t)OS_EVR_START); - - (void)EventRecorderEnable(OS_EVR_MEMORY_LEVEL, EvtRtxMemoryNo, EvtRtxMemoryNo); - (void)EventRecorderEnable(OS_EVR_KERNEL_LEVEL, EvtRtxKernelNo, EvtRtxKernelNo); - (void)EventRecorderEnable(OS_EVR_THREAD_LEVEL, EvtRtxThreadNo, EvtRtxThreadNo); - (void)EventRecorderEnable(OS_EVR_WAIT_LEVEL, EvtRtxWaitNo, EvtRtxWaitNo); - (void)EventRecorderEnable(OS_EVR_THFLAGS_LEVEL, EvtRtxThreadFlagsNo, EvtRtxThreadFlagsNo); - (void)EventRecorderEnable(OS_EVR_EVFLAGS_LEVEL, EvtRtxEventFlagsNo, EvtRtxEventFlagsNo); - (void)EventRecorderEnable(OS_EVR_TIMER_LEVEL, EvtRtxTimerNo, EvtRtxTimerNo); - (void)EventRecorderEnable(OS_EVR_MUTEX_LEVEL, EvtRtxMutexNo, EvtRtxMutexNo); - (void)EventRecorderEnable(OS_EVR_SEMAPHORE_LEVEL, EvtRtxSemaphoreNo, EvtRtxSemaphoreNo); - (void)EventRecorderEnable(OS_EVR_MEMPOOL_LEVEL, EvtRtxMemoryPoolNo, EvtRtxMemoryPoolNo); - (void)EventRecorderEnable(OS_EVR_MSGQUEUE_LEVEL, EvtRtxMessageQueueNo, EvtRtxMessageQueueNo); } #else @@ -539,9 +509,13 @@ __attribute__((section(".rodata"))) = &os_idle_thread_attr, #if ((OS_TIMER_THREAD_STACK_SIZE != 0) && (OS_TIMER_CB_QUEUE != 0)) &os_timer_thread_attr, + osRtxTimerThread, + osRtxTimerSetup, &os_timer_mq_attr, (uint32_t)OS_TIMER_CB_QUEUE #else + NULL, + NULL, NULL, NULL, 0U @@ -553,9 +527,9 @@ __attribute__((section(".rodata"))) = //lint -esym(526,irqRtxLib) "Defined by Exception handlers" //lint -esym(714,irqRtxLibRef) "Non weak reference" //lint -esym(765,irqRtxLibRef) "Global scope" -extern uint8_t irqRtxLib; -extern const uint8_t *irqRtxLibRef; - const uint8_t *irqRtxLibRef = &irqRtxLib; +extern const uint8_t irqRtxLib; +extern const uint8_t * const irqRtxLibRef; + const uint8_t * const irqRtxLibRef = &irqRtxLib; // Default User SVC Table //lint -esym(714,osRtxUserSVC) "Referenced by Exception handlers" @@ -570,35 +544,43 @@ __WEAK void * const osRtxUserSVC[1] = { (void *)0 }; #if defined(__CC_ARM) || \ (defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)) -static uint32_t __os_thread_cb_start__ __attribute__((weakref(".bss.os.thread.cb$$Base"))); //lint -esym(728,__os_thread_cb_start__) -static uint32_t __os_thread_cb_end__ __attribute__((weakref(".bss.os.thread.cb$$Limit"))); //lint -esym(728,__os_thread_cb_end__) -static uint32_t __os_timer_cb_start__ __attribute__((weakref(".bss.os.timer.cb$$Base"))); //lint -esym(728,__os_timer_cb_start__) -static uint32_t __os_timer_cb_end__ __attribute__((weakref(".bss.os.timer.cb$$Limit"))); //lint -esym(728,__os_timer_cb_end__) -static uint32_t __os_evflags_cb_start__ __attribute__((weakref(".bss.os.evflags.cb$$Base"))); //lint -esym(728,__os_evflags_cb_start__) -static uint32_t __os_evflags_cb_end__ __attribute__((weakref(".bss.os.evflags.cb$$Limit"))); //lint -esym(728,__os_evflags_cb_end__) -static uint32_t __os_mutex_cb_start__ __attribute__((weakref(".bss.os.mutex.cb$$Base"))); //lint -esym(728,__os_mutex_cb_start__) -static uint32_t __os_mutex_cb_end__ __attribute__((weakref(".bss.os.mutex.cb$$Limit"))); //lint -esym(728,__os_mutex_cb_end__) -static uint32_t __os_semaphore_cb_start__ __attribute__((weakref(".bss.os.semaphore.cb$$Base"))); //lint -esym(728,__os_semaphore_cb_start__) -static uint32_t __os_semaphore_cb_end__ __attribute__((weakref(".bss.os.semaphore.cb$$Limit"))); //lint -esym(728,__os_semaphore_cb_end__) -static uint32_t __os_mempool_cb_start__ __attribute__((weakref(".bss.os.mempool.cb$$Base"))); //lint -esym(728,__os_mempool_cb_start__) -static uint32_t __os_mempool_cb_end__ __attribute__((weakref(".bss.os.mempool.cb$$Limit"))); //lint -esym(728,__os_mempool_cb_end__) -static uint32_t __os_msgqueue_cb_start__ __attribute__((weakref(".bss.os.msgqueue.cb$$Base"))); //lint -esym(728,__os_msgqueue_cb_start__) -static uint32_t __os_msgqueue_cb_end__ __attribute__((weakref(".bss.os.msgqueue.cb$$Limit"))); //lint -esym(728,__os_msgqueue_cb_end__) +// Initialized through linker +//lint -esym(728, __os_thread_cb_start__, __os_thread_cb_end__) +//lint -esym(728, __os_timer_cb_start__, __os_timer_cb_end__) +//lint -esym(728, __os_evflags_cb_start__, __os_evflags_cb_end__) +//lint -esym(728, __os_mutex_cb_start__, __os_mutex_cb_end__) +//lint -esym(728, __os_semaphore_cb_start__, __os_semaphore_cb_end__) +//lint -esym(728, __os_mempool_cb_start__, __os_mempool_cb_end__) +//lint -esym(728, __os_msgqueue_cb_start__, __os_msgqueue_cb_end__) +static const uint32_t __os_thread_cb_start__ __attribute__((weakref(".bss.os.thread.cb$$Base"))); +static const uint32_t __os_thread_cb_end__ __attribute__((weakref(".bss.os.thread.cb$$Limit"))); +static const uint32_t __os_timer_cb_start__ __attribute__((weakref(".bss.os.timer.cb$$Base"))); +static const uint32_t __os_timer_cb_end__ __attribute__((weakref(".bss.os.timer.cb$$Limit"))); +static const uint32_t __os_evflags_cb_start__ __attribute__((weakref(".bss.os.evflags.cb$$Base"))); +static const uint32_t __os_evflags_cb_end__ __attribute__((weakref(".bss.os.evflags.cb$$Limit"))); +static const uint32_t __os_mutex_cb_start__ __attribute__((weakref(".bss.os.mutex.cb$$Base"))); +static const uint32_t __os_mutex_cb_end__ __attribute__((weakref(".bss.os.mutex.cb$$Limit"))); +static const uint32_t __os_semaphore_cb_start__ __attribute__((weakref(".bss.os.semaphore.cb$$Base"))); +static const uint32_t __os_semaphore_cb_end__ __attribute__((weakref(".bss.os.semaphore.cb$$Limit"))); +static const uint32_t __os_mempool_cb_start__ __attribute__((weakref(".bss.os.mempool.cb$$Base"))); +static const uint32_t __os_mempool_cb_end__ __attribute__((weakref(".bss.os.mempool.cb$$Limit"))); +static const uint32_t __os_msgqueue_cb_start__ __attribute__((weakref(".bss.os.msgqueue.cb$$Base"))); +static const uint32_t __os_msgqueue_cb_end__ __attribute__((weakref(".bss.os.msgqueue.cb$$Limit"))); #else -extern uint32_t __os_thread_cb_start__ __attribute__((weak)); -extern uint32_t __os_thread_cb_end__ __attribute__((weak)); -extern uint32_t __os_timer_cb_start__ __attribute__((weak)); -extern uint32_t __os_timer_cb_end__ __attribute__((weak)); -extern uint32_t __os_evflags_cb_start__ __attribute__((weak)); -extern uint32_t __os_evflags_cb_end__ __attribute__((weak)); -extern uint32_t __os_mutex_cb_start__ __attribute__((weak)); -extern uint32_t __os_mutex_cb_end__ __attribute__((weak)); -extern uint32_t __os_semaphore_cb_start__ __attribute__((weak)); -extern uint32_t __os_semaphore_cb_end__ __attribute__((weak)); -extern uint32_t __os_mempool_cb_start__ __attribute__((weak)); -extern uint32_t __os_mempool_cb_end__ __attribute__((weak)); -extern uint32_t __os_msgqueue_cb_start__ __attribute__((weak)); -extern uint32_t __os_msgqueue_cb_end__ __attribute__((weak)); +extern const uint32_t __os_thread_cb_start__ __attribute__((weak)); +extern const uint32_t __os_thread_cb_end__ __attribute__((weak)); +extern const uint32_t __os_timer_cb_start__ __attribute__((weak)); +extern const uint32_t __os_timer_cb_end__ __attribute__((weak)); +extern const uint32_t __os_evflags_cb_start__ __attribute__((weak)); +extern const uint32_t __os_evflags_cb_end__ __attribute__((weak)); +extern const uint32_t __os_mutex_cb_start__ __attribute__((weak)); +extern const uint32_t __os_mutex_cb_end__ __attribute__((weak)); +extern const uint32_t __os_semaphore_cb_start__ __attribute__((weak)); +extern const uint32_t __os_semaphore_cb_end__ __attribute__((weak)); +extern const uint32_t __os_mempool_cb_start__ __attribute__((weak)); +extern const uint32_t __os_mempool_cb_end__ __attribute__((weak)); +extern const uint32_t __os_msgqueue_cb_start__ __attribute__((weak)); +extern const uint32_t __os_msgqueue_cb_end__ __attribute__((weak)); #endif //lint -e{9067} "extern array declared without size" @@ -649,6 +631,14 @@ __WEAK void software_init_hook (void) { (void)osKernelInitialize(); } +#elif defined(__ICCARM__) + +extern void $Super$$__iar_data_init3 (void); +void $Sub$$__iar_data_init3 (void) { + $Super$$__iar_data_init3(); + (void)osKernelInitialize(); +} + #endif @@ -717,7 +707,7 @@ void *__user_perthread_libspace (void) { } } if (n == (uint32_t)OS_THREAD_LIBSPACE_NUM) { - (void)osRtxErrorNotify(osRtxErrorClibSpace, id); + (void)osRtxKernelErrorNotify(osRtxErrorClibSpace, id); } } else { n = OS_THREAD_LIBSPACE_NUM; @@ -735,27 +725,24 @@ typedef void *mutex; //lint -e818 "Pointer 'm' could be declared as pointing to const" // Initialize mutex -#if !defined(__ARMCC_VERSION) || __ARMCC_VERSION < 6010050 __USED -#endif int _mutex_initialize(mutex *m); -__WEAK int _mutex_initialize(mutex *m) { +int _mutex_initialize(mutex *m) { int result; + *m = osMutexNew(NULL); if (*m != NULL) { result = 1; } else { result = 0; - (void)osRtxErrorNotify(osRtxErrorClibMutex, m); + (void)osRtxKernelErrorNotify(osRtxErrorClibMutex, m); } return result; } // Acquire mutex -#if !defined(__ARMCC_VERSION) || __ARMCC_VERSION < 6010050 __USED -#endif -__WEAK void _mutex_acquire(mutex *m); +void _mutex_acquire(mutex *m); void _mutex_acquire(mutex *m) { if (os_kernel_is_active() != 0U) { (void)osMutexAcquire(*m, osWaitForever); @@ -763,10 +750,8 @@ void _mutex_acquire(mutex *m) { } // Release mutex -#if !defined(__ARMCC_VERSION) || __ARMCC_VERSION < 6010050 __USED -#endif -__WEAK void _mutex_release(mutex *m); +void _mutex_release(mutex *m); void _mutex_release(mutex *m) { if (os_kernel_is_active() != 0U) { (void)osMutexRelease(*m); @@ -774,10 +759,8 @@ void _mutex_release(mutex *m) { } // Free mutex -#if !defined(__ARMCC_VERSION) || __ARMCC_VERSION < 6010050 __USED -#endif -__WEAK void _mutex_free(mutex *m); +void _mutex_free(mutex *m); void _mutex_free(mutex *m) { (void)osMutexDelete(*m); } diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_lib.h b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_lib.h index 2d23625e49d..242f36f9f62 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_lib.h +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_lib.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2020 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -27,6 +27,7 @@ #define RTX_LIB_H_ #include +#include "rtx_def.h" // RTX Configuration definitions #include "rtx_core_c.h" // Cortex core definitions #if ((defined(__ARM_ARCH_8M_BASE__) && (__ARM_ARCH_8M_BASE__ != 0)) || \ (defined(__ARM_ARCH_8M_MAIN__) && (__ARM_ARCH_8M_MAIN__ != 0)) || \ @@ -35,7 +36,6 @@ #endif #include "os_tick.h" // CMSIS OS Tick API #include "cmsis_os2.h" // CMSIS RTOS API -#include "RTX_Config.h" // RTX Configuration #include "rtx_os.h" // RTX OS definitions #include "rtx_evr.h" // RTX Event Recorder definitions @@ -189,11 +189,14 @@ extern void osRtxThreadSwitch (os_thread_t *thread); extern void osRtxThreadDispatch (os_thread_t *thread); extern void osRtxThreadWaitExit (os_thread_t *thread, uint32_t ret_val, bool_t dispatch); extern bool_t osRtxThreadWaitEnter (uint8_t state, uint32_t timeout); -extern void osRtxThreadStackCheck (void); +#ifdef RTX_STACK_CHECK +extern bool_t osRtxThreadStackCheck (const os_thread_t *thread); +#endif extern bool_t osRtxThreadStartup (void); // Timer Library functions -extern void osRtxTimerThread (void *argument); +extern int32_t osRtxTimerSetup (void); +extern void osRtxTimerThread (void *argument); // Mutex Library functions extern void osRtxMutexOwnerRelease (os_mutex_t *mutex_list); @@ -209,6 +212,9 @@ extern uint32_t osRtxMemoryPoolInit (os_mp_info_t *mp_info, uint32_t block_co extern void *osRtxMemoryPoolAlloc (os_mp_info_t *mp_info); extern osStatus_t osRtxMemoryPoolFree (os_mp_info_t *mp_info, void *block); +// Message Queue Library functions +extern int32_t osRtxMessageQueueTimerSetup (void); + // System Library functions extern void osRtxTick_Handler (void); extern void osRtxPendSV_Handler (void); diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_mempool.c b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_mempool.c index f7a18723d27..30c924825a5 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_mempool.c +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_mempool.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2018 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -27,7 +27,7 @@ // OS Runtime Object Memory Usage -#if ((defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))) +#ifdef RTX_OBJ_MEM_USAGE osRtxObjectMemUsage_t osRtxMemoryPoolMemUsage \ __attribute__((section(".data.os.mempool.obj"))) = { 0U, 0U, 0U }; @@ -191,20 +191,16 @@ static osMemoryPoolId_t svcRtxMemoryPoolNew (uint32_t block_count, uint32_t bloc const char *name; // Check parameters - if ((block_count == 0U) || (block_size == 0U)) { + if ((block_count == 0U) || (block_size == 0U) || + ((__CLZ(block_count) + __CLZ(block_size)) < 32U)) { EvrRtxMemoryPoolError(NULL, (int32_t)osErrorParameter); //lint -e{904} "Return statement before end of function" [MISRA Note 1] return NULL; } + b_count = block_count; b_size = (block_size + 3U) & ~3UL; - if ((__CLZ(b_count) + __CLZ(b_size)) < 32U) { - EvrRtxMemoryPoolError(NULL, (int32_t)osErrorParameter); - //lint -e{904} "Return statement before end of function" [MISRA Note 1] - return NULL; - } - - size = b_count * b_size; + size = b_count * b_size; // Process attributes if (attr != NULL) { @@ -229,7 +225,7 @@ static osMemoryPoolId_t svcRtxMemoryPoolNew (uint32_t block_count, uint32_t bloc } } if (mp_mem != NULL) { - //lint -e(923) -e(9078) "cast from pointer to unsigned int" [MISRA Note 7] + //lint -e{923} "cast from pointer to unsigned int" [MISRA Note 7] if ((((uint32_t)mp_mem & 3U) != 0U) || (mp_size < size)) { EvrRtxMemoryPoolError(NULL, osRtxErrorInvalidDataMemory); //lint -e{904} "Return statement before end of function" [MISRA Note 1] @@ -257,7 +253,7 @@ static osMemoryPoolId_t svcRtxMemoryPoolNew (uint32_t block_count, uint32_t bloc //lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5] mp = osRtxMemoryAlloc(osRtxInfo.mem.common, sizeof(os_memory_pool_t), 1U); } -#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)) +#ifdef RTX_OBJ_MEM_USAGE if (mp != NULL) { uint32_t used; osRtxMemoryPoolMemUsage.cnt_alloc++; @@ -283,13 +279,13 @@ static osMemoryPoolId_t svcRtxMemoryPoolNew (uint32_t block_count, uint32_t bloc } else { (void)osRtxMemoryFree(osRtxInfo.mem.common, mp); } -#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)) +#ifdef RTX_OBJ_MEM_USAGE osRtxMemoryPoolMemUsage.cnt_free++; #endif } mp = NULL; } else { - memset(mp_mem, 0, size); + (void)memset(mp_mem, 0, size); } flags |= osRtxFlagSystemMemory; } @@ -508,7 +504,7 @@ static osStatus_t svcRtxMemoryPoolDelete (osMemoryPoolId_t mp_id) { } else { (void)osRtxMemoryFree(osRtxInfo.mem.common, mp); } -#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)) +#ifdef RTX_OBJ_MEM_USAGE osRtxMemoryPoolMemUsage.cnt_free++; #endif } @@ -594,7 +590,7 @@ osMemoryPoolId_t osMemoryPoolNew (uint32_t block_count, uint32_t block_size, con osMemoryPoolId_t mp_id; EvrRtxMemoryPoolNew(block_count, block_size, attr); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxMemoryPoolError(NULL, (int32_t)osErrorISR); mp_id = NULL; } else { @@ -607,7 +603,7 @@ osMemoryPoolId_t osMemoryPoolNew (uint32_t block_count, uint32_t block_size, con const char *osMemoryPoolGetName (osMemoryPoolId_t mp_id) { const char *name; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxMemoryPoolGetName(mp_id, NULL); name = NULL; } else { @@ -621,7 +617,7 @@ void *osMemoryPoolAlloc (osMemoryPoolId_t mp_id, uint32_t timeout) { void *memory; EvrRtxMemoryPoolAlloc(mp_id, timeout); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { memory = isrRtxMemoryPoolAlloc(mp_id, timeout); } else { memory = __svcMemoryPoolAlloc(mp_id, timeout); @@ -634,7 +630,7 @@ osStatus_t osMemoryPoolFree (osMemoryPoolId_t mp_id, void *block) { osStatus_t status; EvrRtxMemoryPoolFree(mp_id, block); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { status = isrRtxMemoryPoolFree(mp_id, block); } else { status = __svcMemoryPoolFree(mp_id, block); @@ -646,7 +642,7 @@ osStatus_t osMemoryPoolFree (osMemoryPoolId_t mp_id, void *block) { uint32_t osMemoryPoolGetCapacity (osMemoryPoolId_t mp_id) { uint32_t capacity; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { capacity = svcRtxMemoryPoolGetCapacity(mp_id); } else { capacity = __svcMemoryPoolGetCapacity(mp_id); @@ -658,7 +654,7 @@ uint32_t osMemoryPoolGetCapacity (osMemoryPoolId_t mp_id) { uint32_t osMemoryPoolGetBlockSize (osMemoryPoolId_t mp_id) { uint32_t block_size; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { block_size = svcRtxMemoryPoolGetBlockSize(mp_id); } else { block_size = __svcMemoryPoolGetBlockSize(mp_id); @@ -670,7 +666,7 @@ uint32_t osMemoryPoolGetBlockSize (osMemoryPoolId_t mp_id) { uint32_t osMemoryPoolGetCount (osMemoryPoolId_t mp_id) { uint32_t count; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { count = svcRtxMemoryPoolGetCount(mp_id); } else { count = __svcMemoryPoolGetCount(mp_id); @@ -682,7 +678,7 @@ uint32_t osMemoryPoolGetCount (osMemoryPoolId_t mp_id) { uint32_t osMemoryPoolGetSpace (osMemoryPoolId_t mp_id) { uint32_t space; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { space = svcRtxMemoryPoolGetSpace(mp_id); } else { space = __svcMemoryPoolGetSpace(mp_id); @@ -695,7 +691,7 @@ osStatus_t osMemoryPoolDelete (osMemoryPoolId_t mp_id) { osStatus_t status; EvrRtxMemoryPoolDelete(mp_id); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxMemoryPoolError(mp_id, (int32_t)osErrorISR); status = osErrorISR; } else { diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_msgqueue.c b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_msgqueue.c index 5451998c6f7..c052811e508 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_msgqueue.c +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_msgqueue.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -27,7 +27,7 @@ // OS Runtime Object Memory Usage -#if ((defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))) +#ifdef RTX_OBJ_MEM_USAGE osRtxObjectMemUsage_t osRtxMessageQueueMemUsage \ __attribute__((section(".data.os.msgqueue.obj"))) = { 0U, 0U, 0U }; @@ -189,7 +189,7 @@ static void osRtxMessageQueuePostProcess (os_message_t *msg) { reg = osRtxThreadRegPtr(thread); //lint -e{923} "cast from unsigned int to pointer" ptr_src = (const void *)reg[2]; - memcpy(&msg0[1], ptr_src, mq->msg_size); + (void)memcpy(&msg0[1], ptr_src, mq->msg_size); // Store Message into Queue msg0->id = osRtxIdMessage; msg0->flags = 0U; @@ -214,7 +214,7 @@ static void osRtxMessageQueuePostProcess (os_message_t *msg) { reg = osRtxThreadRegPtr(thread); //lint -e{923} "cast from unsigned int to pointer" ptr_dst = (void *)reg[2]; - memcpy(ptr_dst, &msg[1], mq->msg_size); + (void)memcpy(ptr_dst, &msg[1], mq->msg_size); if (reg[3] != 0U) { //lint -e{923} -e{9078} "cast from unsigned int to pointer" *((uint8_t *)reg[3]) = msg->priority; @@ -245,19 +245,15 @@ static osMessageQueueId_t svcRtxMessageQueueNew (uint32_t msg_count, uint32_t ms const char *name; // Check parameters - if ((msg_count == 0U) || (msg_size == 0U)) { - EvrRtxMessageQueueError(NULL, (int32_t)osErrorParameter); - //lint -e{904} "Return statement before end of function" [MISRA Note 1] - return NULL; - } - block_size = ((msg_size + 3U) & ~3UL) + sizeof(os_message_t); - if ((__CLZ(msg_count) + __CLZ(block_size)) < 32U) { + if ((msg_count == 0U) || (msg_size == 0U) || + ((__CLZ(msg_count) + __CLZ(msg_size)) < 32U)) { EvrRtxMessageQueueError(NULL, (int32_t)osErrorParameter); //lint -e{904} "Return statement before end of function" [MISRA Note 1] return NULL; } - size = msg_count * block_size; + block_size = ((msg_size + 3U) & ~3UL) + sizeof(os_message_t); + size = msg_count * block_size; // Process attributes if (attr != NULL) { @@ -282,7 +278,7 @@ static osMessageQueueId_t svcRtxMessageQueueNew (uint32_t msg_count, uint32_t ms } } if (mq_mem != NULL) { - //lint -e(923) -e(9078) "cast from pointer to unsigned int" [MISRA Note 7] + //lint -e{923} "cast from pointer to unsigned int" [MISRA Note 7] if ((((uint32_t)mq_mem & 3U) != 0U) || (mq_size < size)) { EvrRtxMessageQueueError(NULL, osRtxErrorInvalidDataMemory); //lint -e{904} "Return statement before end of function" [MISRA Note 1] @@ -310,7 +306,7 @@ static osMessageQueueId_t svcRtxMessageQueueNew (uint32_t msg_count, uint32_t ms //lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5] mq = osRtxMemoryAlloc(osRtxInfo.mem.common, sizeof(os_message_queue_t), 1U); } -#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)) +#ifdef RTX_OBJ_MEM_USAGE if (mq != NULL) { uint32_t used; osRtxMessageQueueMemUsage.cnt_alloc++; @@ -336,13 +332,13 @@ static osMessageQueueId_t svcRtxMessageQueueNew (uint32_t msg_count, uint32_t ms } else { (void)osRtxMemoryFree(osRtxInfo.mem.common, mq); } -#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)) +#ifdef RTX_OBJ_MEM_USAGE osRtxMessageQueueMemUsage.cnt_free++; #endif } mq = NULL; } else { - memset(mq_mem, 0, size); + (void)memset(mq_mem, 0, size); } flags |= osRtxFlagSystemMemory; } @@ -414,7 +410,7 @@ static osStatus_t svcRtxMessageQueuePut (osMessageQueueId_t mq_id, const void *m reg = osRtxThreadRegPtr(thread); //lint -e{923} "cast from unsigned int to pointer" ptr = (void *)reg[2]; - memcpy(ptr, msg_ptr, mq->msg_size); + (void)memcpy(ptr, msg_ptr, mq->msg_size); if (reg[3] != 0U) { //lint -e{923} -e{9078} "cast from unsigned int to pointer" *((uint8_t *)reg[3]) = msg_prio; @@ -427,7 +423,7 @@ static osStatus_t svcRtxMessageQueuePut (osMessageQueueId_t mq_id, const void *m msg = osRtxMemoryPoolAlloc(&mq->mp_info); if (msg != NULL) { // Copy Message - memcpy(&msg[1], msg_ptr, mq->msg_size); + (void)memcpy(&msg[1], msg_ptr, mq->msg_size); // Put Message into Queue msg->id = osRtxIdMessage; msg->flags = 0U; @@ -485,7 +481,7 @@ static osStatus_t svcRtxMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr if (msg != NULL) { MessageQueueRemove(mq, msg); // Copy Message - memcpy(msg_ptr, &msg[1], mq->msg_size); + (void)memcpy(msg_ptr, &msg[1], mq->msg_size); if (msg_prio != NULL) { *msg_prio = msg->priority; } @@ -506,7 +502,7 @@ static osStatus_t svcRtxMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr reg = osRtxThreadRegPtr(thread); //lint -e{923} "cast from unsigned int to pointer" ptr = (const void *)reg[2]; - memcpy(&msg[1], ptr, mq->msg_size); + (void)memcpy(&msg[1], ptr, mq->msg_size); // Store Message into Queue msg->id = osRtxIdMessage; msg->flags = 0U; @@ -655,7 +651,7 @@ static osStatus_t svcRtxMessageQueueReset (osMessageQueueId_t mq_id) { reg = osRtxThreadRegPtr(thread); //lint -e{923} "cast from unsigned int to pointer" ptr = (const void *)reg[2]; - memcpy(&msg[1], ptr, mq->msg_size); + (void)memcpy(&msg[1], ptr, mq->msg_size); // Store Message into Queue msg->id = osRtxIdMessage; msg->flags = 0U; @@ -709,7 +705,7 @@ static osStatus_t svcRtxMessageQueueDelete (osMessageQueueId_t mq_id) { } else { (void)osRtxMemoryFree(osRtxInfo.mem.common, mq); } -#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)) +#ifdef RTX_OBJ_MEM_USAGE osRtxMessageQueueMemUsage.cnt_free++; #endif } @@ -756,7 +752,7 @@ osStatus_t isrRtxMessageQueuePut (osMessageQueueId_t mq_id, const void *msg_ptr, msg = osRtxMemoryPoolAlloc(&mq->mp_info); if (msg != NULL) { // Copy Message - memcpy(&msg[1], msg_ptr, mq->msg_size); + (void)memcpy(&msg[1], msg_ptr, mq->msg_size); msg->id = osRtxIdMessage; msg->flags = 0U; msg->priority = msg_prio; @@ -816,6 +812,23 @@ osStatus_t isrRtxMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr, uint8 } +// ==== Library functions ==== + +/// Create a Message Queue for the Timer Thread. +int32_t osRtxMessageQueueTimerSetup (void) { + int32_t ret = -1; + + osRtxInfo.timer.mq = osRtxMessageQueueId( + svcRtxMessageQueueNew(osRtxConfig.timer_mq_mcnt, sizeof(os_timer_finfo_t), osRtxConfig.timer_mq_attr) + ); + if (osRtxInfo.timer.mq != NULL) { + ret = 0; + } + + return ret; +} + + // ==== Public API ==== /// Create and Initialize a Message Queue object. @@ -823,7 +836,7 @@ osMessageQueueId_t osMessageQueueNew (uint32_t msg_count, uint32_t msg_size, con osMessageQueueId_t mq_id; EvrRtxMessageQueueNew(msg_count, msg_size, attr); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxMessageQueueError(NULL, (int32_t)osErrorISR); mq_id = NULL; } else { @@ -836,7 +849,7 @@ osMessageQueueId_t osMessageQueueNew (uint32_t msg_count, uint32_t msg_size, con const char *osMessageQueueGetName (osMessageQueueId_t mq_id) { const char *name; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxMessageQueueGetName(mq_id, NULL); name = NULL; } else { @@ -850,7 +863,7 @@ osStatus_t osMessageQueuePut (osMessageQueueId_t mq_id, const void *msg_ptr, uin osStatus_t status; EvrRtxMessageQueuePut(mq_id, msg_ptr, msg_prio, timeout); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { status = isrRtxMessageQueuePut(mq_id, msg_ptr, msg_prio, timeout); } else { status = __svcMessageQueuePut(mq_id, msg_ptr, msg_prio, timeout); @@ -863,7 +876,7 @@ osStatus_t osMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr, uint8_t * osStatus_t status; EvrRtxMessageQueueGet(mq_id, msg_ptr, msg_prio, timeout); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { status = isrRtxMessageQueueGet(mq_id, msg_ptr, msg_prio, timeout); } else { status = __svcMessageQueueGet(mq_id, msg_ptr, msg_prio, timeout); @@ -875,7 +888,7 @@ osStatus_t osMessageQueueGet (osMessageQueueId_t mq_id, void *msg_ptr, uint8_t * uint32_t osMessageQueueGetCapacity (osMessageQueueId_t mq_id) { uint32_t capacity; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { capacity = svcRtxMessageQueueGetCapacity(mq_id); } else { capacity = __svcMessageQueueGetCapacity(mq_id); @@ -887,7 +900,7 @@ uint32_t osMessageQueueGetCapacity (osMessageQueueId_t mq_id) { uint32_t osMessageQueueGetMsgSize (osMessageQueueId_t mq_id) { uint32_t msg_size; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { msg_size = svcRtxMessageQueueGetMsgSize(mq_id); } else { msg_size = __svcMessageQueueGetMsgSize(mq_id); @@ -899,7 +912,7 @@ uint32_t osMessageQueueGetMsgSize (osMessageQueueId_t mq_id) { uint32_t osMessageQueueGetCount (osMessageQueueId_t mq_id) { uint32_t count; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { count = svcRtxMessageQueueGetCount(mq_id); } else { count = __svcMessageQueueGetCount(mq_id); @@ -911,7 +924,7 @@ uint32_t osMessageQueueGetCount (osMessageQueueId_t mq_id) { uint32_t osMessageQueueGetSpace (osMessageQueueId_t mq_id) { uint32_t space; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { space = svcRtxMessageQueueGetSpace(mq_id); } else { space = __svcMessageQueueGetSpace(mq_id); @@ -924,7 +937,7 @@ osStatus_t osMessageQueueReset (osMessageQueueId_t mq_id) { osStatus_t status; EvrRtxMessageQueueReset(mq_id); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxMessageQueueError(mq_id, (int32_t)osErrorISR); status = osErrorISR; } else { @@ -938,7 +951,7 @@ osStatus_t osMessageQueueDelete (osMessageQueueId_t mq_id) { osStatus_t status; EvrRtxMessageQueueDelete(mq_id); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxMessageQueueError(mq_id, (int32_t)osErrorISR); status = osErrorISR; } else { diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_mutex.c b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_mutex.c index e97def252db..fb889b45d99 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_mutex.c +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_mutex.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -27,7 +27,7 @@ // OS Runtime Object Memory Usage -#if ((defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))) +#ifdef RTX_OBJ_MEM_USAGE osRtxObjectMemUsage_t osRtxMutexMemUsage \ __attribute__((section(".data.os.mutex.obj"))) = { 0U, 0U, 0U }; @@ -78,7 +78,7 @@ void osRtxMutexOwnerRelease (os_mutex_t *mutex_list) { void osRtxMutexOwnerRestore (const os_mutex_t *mutex, const os_thread_t *thread_wakeup) { const os_mutex_t *mutex0; os_thread_t *thread; - os_thread_t *thread0; + const os_thread_t *thread0; int8_t priority; // Restore owner Thread priority @@ -88,15 +88,17 @@ void osRtxMutexOwnerRestore (const os_mutex_t *mutex, const os_thread_t *thread_ mutex0 = thread->mutex_list; // Check Mutexes owned by Thread do { - // Check Threads waiting for Mutex - thread0 = mutex0->thread_list; - if (thread0 == thread_wakeup) { - // Skip thread that is waken-up - thread0 = thread0->thread_next; - } - if ((thread0 != NULL) && (thread0->priority > priority)) { - // Higher priority Thread is waiting for Mutex - priority = thread0->priority; + if ((mutex0->attr & osMutexPrioInherit) != 0U) { + // Check Threads waiting for Mutex + thread0 = mutex0->thread_list; + if (thread0 == thread_wakeup) { + // Skip thread that is waken-up + thread0 = thread0->thread_next; + } + if ((thread0 != NULL) && (thread0->priority > priority)) { + // Higher priority Thread is waiting for Mutex + priority = thread0->priority; + } } mutex0 = mutex0->owner_next; } while (mutex0 != NULL); @@ -153,7 +155,7 @@ static osMutexId_t svcRtxMutexNew (const osMutexAttr_t *attr) { //lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5] mutex = osRtxMemoryAlloc(osRtxInfo.mem.common, sizeof(os_mutex_t), 1U); } -#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)) +#ifdef RTX_OBJ_MEM_USAGE if (mutex != NULL) { uint32_t used; osRtxMutexMemUsage.cnt_alloc++; @@ -336,19 +338,19 @@ static osStatus_t svcRtxMutexRelease (osMutexId_t mutex_id) { } // Restore running Thread priority - if ((mutex->attr & osMutexPrioInherit) != 0U) { - priority = thread->priority_base; - mutex0 = thread->mutex_list; - // Check mutexes owned by running Thread - while (mutex0 != NULL) { + priority = thread->priority_base; + mutex0 = thread->mutex_list; + // Check mutexes owned by running Thread + while (mutex0 != NULL) { + if ((mutex0->attr & osMutexPrioInherit) != 0U) { if ((mutex0->thread_list != NULL) && (mutex0->thread_list->priority > priority)) { // Higher priority Thread is waiting for Mutex priority = mutex0->thread_list->priority; } - mutex0 = mutex0->owner_next; } - thread->priority = priority; + mutex0 = mutex0->owner_next; } + thread->priority = priority; // Check if Thread is waiting for a Mutex if (mutex->thread_list != NULL) { @@ -428,21 +430,21 @@ static osStatus_t svcRtxMutexDelete (osMutexId_t mutex_id) { } // Restore owner Thread priority - if ((mutex->attr & osMutexPrioInherit) != 0U) { - priority = thread->priority_base; - mutex0 = thread->mutex_list; - // Check Mutexes owned by Thread - while (mutex0 != NULL) { + priority = thread->priority_base; + mutex0 = thread->mutex_list; + // Check Mutexes owned by Thread + while (mutex0 != NULL) { + if ((mutex0->attr & osMutexPrioInherit) != 0U) { if ((mutex0->thread_list != NULL) && (mutex0->thread_list->priority > priority)) { // Higher priority Thread is waiting for Mutex priority = mutex0->thread_list->priority; } - mutex0 = mutex0->owner_next; - } - if (thread->priority != priority) { - thread->priority = priority; - osRtxThreadListSort(thread); } + mutex0 = mutex0->owner_next; + } + if (thread->priority != priority) { + thread->priority = priority; + osRtxThreadListSort(thread); } // Unblock waiting threads @@ -464,7 +466,7 @@ static osStatus_t svcRtxMutexDelete (osMutexId_t mutex_id) { } else { (void)osRtxMemoryFree(osRtxInfo.mem.common, mutex); } -#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)) +#ifdef RTX_OBJ_MEM_USAGE osRtxMutexMemUsage.cnt_free++; #endif } @@ -492,7 +494,7 @@ osMutexId_t osMutexNew (const osMutexAttr_t *attr) { osMutexId_t mutex_id; EvrRtxMutexNew(attr); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxMutexError(NULL, (int32_t)osErrorISR); mutex_id = NULL; } else { @@ -505,7 +507,7 @@ osMutexId_t osMutexNew (const osMutexAttr_t *attr) { const char *osMutexGetName (osMutexId_t mutex_id) { const char *name; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxMutexGetName(mutex_id, NULL); name = NULL; } else { @@ -519,7 +521,7 @@ osStatus_t osMutexAcquire (osMutexId_t mutex_id, uint32_t timeout) { osStatus_t status; EvrRtxMutexAcquire(mutex_id, timeout); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxMutexError(mutex_id, (int32_t)osErrorISR); status = osErrorISR; } else { @@ -533,7 +535,7 @@ osStatus_t osMutexRelease (osMutexId_t mutex_id) { osStatus_t status; EvrRtxMutexRelease(mutex_id); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxMutexError(mutex_id, (int32_t)osErrorISR); status = osErrorISR; } else { @@ -546,7 +548,7 @@ osStatus_t osMutexRelease (osMutexId_t mutex_id) { osThreadId_t osMutexGetOwner (osMutexId_t mutex_id) { osThreadId_t thread; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxMutexGetOwner(mutex_id, NULL); thread = NULL; } else { @@ -560,7 +562,7 @@ osStatus_t osMutexDelete (osMutexId_t mutex_id) { osStatus_t status; EvrRtxMutexDelete(mutex_id); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxMutexError(mutex_id, (int32_t)osErrorISR); status = osErrorISR; } else { diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_semaphore.c b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_semaphore.c index ebca85543c0..b96939aca3a 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_semaphore.c +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_semaphore.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2018 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -27,7 +27,7 @@ // OS Runtime Object Memory Usage -#if ((defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))) +#ifdef RTX_OBJ_MEM_USAGE osRtxObjectMemUsage_t osRtxSemaphoreMemUsage \ __attribute__((section(".data.os.semaphore.obj"))) = { 0U, 0U, 0U }; @@ -172,7 +172,7 @@ static osSemaphoreId_t svcRtxSemaphoreNew (uint32_t max_count, uint32_t initial_ //lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5] semaphore = osRtxMemoryAlloc(osRtxInfo.mem.common, sizeof(os_semaphore_t), 1U); } -#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)) +#ifdef RTX_OBJ_MEM_USAGE if (semaphore != NULL) { uint32_t used; osRtxSemaphoreMemUsage.cnt_alloc++; @@ -346,7 +346,7 @@ static osStatus_t svcRtxSemaphoreDelete (osSemaphoreId_t semaphore_id) { } else { (void)osRtxMemoryFree(osRtxInfo.mem.common, semaphore); } -#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)) +#ifdef RTX_OBJ_MEM_USAGE osRtxSemaphoreMemUsage.cnt_free++; #endif } @@ -432,7 +432,7 @@ osSemaphoreId_t osSemaphoreNew (uint32_t max_count, uint32_t initial_count, cons osSemaphoreId_t semaphore_id; EvrRtxSemaphoreNew(max_count, initial_count, attr); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxSemaphoreError(NULL, (int32_t)osErrorISR); semaphore_id = NULL; } else { @@ -445,7 +445,7 @@ osSemaphoreId_t osSemaphoreNew (uint32_t max_count, uint32_t initial_count, cons const char *osSemaphoreGetName (osSemaphoreId_t semaphore_id) { const char *name; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxSemaphoreGetName(semaphore_id, NULL); name = NULL; } else { @@ -459,7 +459,7 @@ osStatus_t osSemaphoreAcquire (osSemaphoreId_t semaphore_id, uint32_t timeout) { osStatus_t status; EvrRtxSemaphoreAcquire(semaphore_id, timeout); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { status = isrRtxSemaphoreAcquire(semaphore_id, timeout); } else { status = __svcSemaphoreAcquire(semaphore_id, timeout); @@ -472,7 +472,7 @@ osStatus_t osSemaphoreRelease (osSemaphoreId_t semaphore_id) { osStatus_t status; EvrRtxSemaphoreRelease(semaphore_id); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { status = isrRtxSemaphoreRelease(semaphore_id); } else { status = __svcSemaphoreRelease(semaphore_id); @@ -484,7 +484,7 @@ osStatus_t osSemaphoreRelease (osSemaphoreId_t semaphore_id) { uint32_t osSemaphoreGetCount (osSemaphoreId_t semaphore_id) { uint32_t count; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { count = svcRtxSemaphoreGetCount(semaphore_id); } else { count = __svcSemaphoreGetCount(semaphore_id); @@ -497,7 +497,7 @@ osStatus_t osSemaphoreDelete (osSemaphoreId_t semaphore_id) { osStatus_t status; EvrRtxSemaphoreDelete(semaphore_id); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxSemaphoreError(semaphore_id, (int32_t)osErrorISR); status = osErrorISR; } else { diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_system.c b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_system.c index a0583495cbf..bc969c80943 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_system.c +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_system.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -122,38 +122,40 @@ void osRtxTick_Handler (void) { OS_Tick_AcknowledgeIRQ(); osRtxInfo.kernel.tick++; - // Process Timers - if (osRtxInfo.timer.tick != NULL) { - osRtxInfo.timer.tick(); - } - // Process Thread Delays osRtxThreadDelayTick(); osRtxThreadDispatch(NULL); + // Process Timers + if (osRtxInfo.timer.tick != NULL) { + osRtxInfo.timer.tick(); + } + // Check Round Robin timeout if (osRtxInfo.thread.robin.timeout != 0U) { - if (osRtxInfo.thread.robin.thread != osRtxInfo.thread.run.next) { - // Reset Round Robin - osRtxInfo.thread.robin.thread = osRtxInfo.thread.run.next; - osRtxInfo.thread.robin.tick = osRtxInfo.thread.robin.timeout; - } else { - if (osRtxInfo.thread.robin.tick != 0U) { - osRtxInfo.thread.robin.tick--; + thread = osRtxInfo.thread.run.next; + if (thread != osRtxInfo.thread.robin.thread) { + osRtxInfo.thread.robin.thread = thread; + if (thread->delay == 0U) { + // Reset Round Robin + thread->delay = osRtxInfo.thread.robin.timeout; } - if (osRtxInfo.thread.robin.tick == 0U) { - // Round Robin Timeout - if (osRtxKernelGetState() == osRtxKernelRunning) { - thread = osRtxInfo.thread.ready.thread_list; - if ((thread != NULL) && (thread->priority == osRtxInfo.thread.robin.thread->priority)) { - osRtxThreadListRemove(thread); - osRtxThreadReadyPut(osRtxInfo.thread.robin.thread); - EvrRtxThreadPreempted(osRtxInfo.thread.robin.thread); - osRtxThreadSwitch(thread); - osRtxInfo.thread.robin.thread = thread; - osRtxInfo.thread.robin.tick = osRtxInfo.thread.robin.timeout; - } + } + if (thread->delay != 0U) { + thread->delay--; + } + if (thread->delay == 0U) { + // Round Robin Timeout + if (osRtxKernelGetState() == osRtxKernelRunning) { + thread = osRtxInfo.thread.ready.thread_list; + if ((thread != NULL) && (thread->priority == osRtxInfo.thread.robin.thread->priority)) { + osRtxThreadListRemove(thread); + osRtxThreadReadyPut(osRtxInfo.thread.robin.thread); + EvrRtxThreadPreempted(osRtxInfo.thread.robin.thread); + osRtxThreadSwitch(thread); + osRtxInfo.thread.robin.thread = thread; + thread->delay = osRtxInfo.thread.robin.timeout; } } } @@ -208,6 +210,6 @@ void osRtxPostProcess (os_object_t *object) { osRtxInfo.kernel.pendSV = 1U; } } else { - (void)osRtxErrorNotify(osRtxErrorISRQueueOverflow, object); + (void)osRtxKernelErrorNotify(osRtxErrorISRQueueOverflow, object); } } diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_thread.c b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_thread.c index 96a1e3196f0..6b572c60626 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_thread.c +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_thread.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -27,7 +27,7 @@ // OS Runtime Object Memory Usage -#if ((defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))) +#ifdef RTX_OBJ_MEM_USAGE osRtxObjectMemUsage_t osRtxThreadMemUsage \ __attribute__((section(".data.os.thread.obj"))) = { 0U, 0U, 0U }; @@ -313,6 +313,7 @@ static void osRtxThreadDelayRemove (os_thread_t *thread) { osRtxInfo.thread.delay_list = thread->delay_next; } } + thread->delay = 0U; } /// Process Thread Delay Tick (executed each System Tick). @@ -420,7 +421,6 @@ void osRtxThreadSwitch (os_thread_t *thread) { thread->state = osRtxThreadRunning; osRtxInfo.thread.run.next = thread; - osRtxThreadStackCheck(); EvrRtxThreadSwitched(thread); } @@ -509,22 +509,25 @@ bool_t osRtxThreadWaitEnter (uint8_t state, uint32_t timeout) { return TRUE; } +#ifdef RTX_STACK_CHECK /// Check current running Thread Stack. +/// \param[in] thread running thread. +/// \return true - success, false - failure. +//lint -esym(714,osRtxThreadStackCheck) "Referenced by Exception handlers" //lint -esym(759,osRtxThreadStackCheck) "Prototype in header" -//lint -esym(765,osRtxThreadStackCheck) "Global scope (can be overridden)" -__WEAK void osRtxThreadStackCheck (void) { - os_thread_t *thread; +//lint -esym(765,osRtxThreadStackCheck) "Global scope" +bool_t osRtxThreadStackCheck (const os_thread_t *thread) { - thread = osRtxThreadGetRunning(); - if (thread != NULL) { - //lint -e{923} "cast from pointer to unsigned int" - //lint -e{9079} -e{9087} "cast between pointers to different object types" - if ((thread->sp <= (uint32_t)thread->stack_mem) || - (*((uint32_t *)thread->stack_mem) != osRtxStackMagicWord)) { - (void)osRtxErrorNotify(osRtxErrorStackUnderflow, thread); - } + //lint -e{923} "cast from pointer to unsigned int" + //lint -e{9079} -e{9087} "cast between pointers to different object types" + if ((thread->sp <= (uint32_t)thread->stack_mem) || + (*((uint32_t *)thread->stack_mem) != osRtxStackMagicWord)) { + //lint -e{904} "Return statement before end of function" [MISRA Note 1] + return FALSE; } + return TRUE; } +#endif #ifdef RTX_TF_M_EXTENSION /// Get TrustZone Module Identifier of running Thread. @@ -617,7 +620,7 @@ static osThreadId_t svcRtxThreadNew (osThreadFunc_t func, void *argument, const } } if (stack_mem != NULL) { - //lint -e(923) -e(9078) "cast from pointer to unsigned int" [MISRA Note 7] + //lint -e{923} "cast from pointer to unsigned int" [MISRA Note 7] if ((((uint32_t)stack_mem & 7U) != 0U) || (stack_size == 0U)) { EvrRtxThreadError(NULL, osRtxErrorInvalidThreadStack); //lint -e{904} "Return statement before end of function" [MISRA Note 1] @@ -646,10 +649,12 @@ static osThreadId_t svcRtxThreadNew (osThreadFunc_t func, void *argument, const } // Check stack size - if ((stack_size != 0U) && (((stack_size & 7U) != 0U) || (stack_size < (64U + 8U)))) { - EvrRtxThreadError(NULL, osRtxErrorInvalidThreadStack); - //lint -e{904} "Return statement before end of function" [MISRA Note 1] - return NULL; + if (stack_size != 0U) { + if (((stack_size & 7U) != 0U) || (stack_size < (64U + 8U)) || (stack_size > 0x7FFFFFFFU)) { + EvrRtxThreadError(NULL, osRtxErrorInvalidThreadStack); + //lint -e{904} "Return statement before end of function" [MISRA Note 1] + return NULL; + } } // Allocate object memory if not provided @@ -661,7 +666,7 @@ static osThreadId_t svcRtxThreadNew (osThreadFunc_t func, void *argument, const //lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5] thread = osRtxMemoryAlloc(osRtxInfo.mem.common, sizeof(os_thread_t), 1U); } -#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)) +#ifdef RTX_OBJ_MEM_USAGE if (thread != NULL) { uint32_t used; osRtxThreadMemUsage.cnt_alloc++; @@ -701,7 +706,7 @@ static osThreadId_t svcRtxThreadNew (osThreadFunc_t func, void *argument, const } else { (void)osRtxMemoryFree(osRtxInfo.mem.common, thread); } -#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)) +#ifdef RTX_OBJ_MEM_USAGE osRtxThreadMemUsage.cnt_free++; #endif } @@ -729,7 +734,7 @@ static osThreadId_t svcRtxThreadNew (osThreadFunc_t func, void *argument, const } else { (void)osRtxMemoryFree(osRtxInfo.mem.common, thread); } -#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)) +#ifdef RTX_OBJ_MEM_USAGE osRtxThreadMemUsage.cnt_free++; #endif } @@ -803,7 +808,7 @@ static osThreadId_t svcRtxThreadNew (osThreadFunc_t func, void *argument, const } else { EvrRtxThreadError(NULL, (int32_t)osErrorNoMemory); } - + if (thread != NULL) { osRtxThreadDispatch(thread); } @@ -1042,8 +1047,6 @@ static osStatus_t svcRtxThreadSuspend (osThreadId_t thread_id) { // Update Thread State and put it into Delay list thread->state = osRtxThreadBlocked; - thread->thread_prev = NULL; - thread->thread_next = NULL; osRtxThreadDelayInsert(thread, osWaitForever); } @@ -1079,6 +1082,19 @@ static osStatus_t svcRtxThreadResume (osThreadId_t thread_id) { return osOK; } +/// Wakeup a thread waiting to join. +/// \param[in] thread thread object. +static void osRtxThreadJoinWakeup (os_thread_t *thread) { + + if (thread->thread_join != NULL) { + osRtxThreadWaitExit(thread->thread_join, (uint32_t)osOK, FALSE); + EvrRtxThreadJoined(thread->thread_join); + } + if (thread->state == osRtxThreadWaitingJoin) { + thread->thread_next->thread_join = NULL; + } +} + /// Free Thread resources. /// \param[in] thread thread object. static void osRtxThreadFree (os_thread_t *thread) { @@ -1110,12 +1126,31 @@ static void osRtxThreadFree (os_thread_t *thread) { } else { (void)osRtxMemoryFree(osRtxInfo.mem.common, thread); } -#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)) +#ifdef RTX_OBJ_MEM_USAGE osRtxThreadMemUsage.cnt_free++; #endif } } +/// Destroy a Thread. +/// \param[in] thread thread object. +static void osRtxThreadDestroy (os_thread_t *thread) { + + if ((thread->attr & osThreadJoinable) == 0U) { + osRtxThreadFree(thread); + } else { + // Update Thread State and put it into Terminate Thread list + thread->state = osRtxThreadTerminated; + thread->thread_prev = NULL; + thread->thread_next = osRtxInfo.thread.terminate_list; + if (osRtxInfo.thread.terminate_list != NULL) { + osRtxInfo.thread.terminate_list->thread_prev = thread; + } + osRtxInfo.thread.terminate_list = thread; + } + EvrRtxThreadDestroyed(thread); +} + /// Detach a thread (thread storage can be reclaimed when thread terminates). /// \note API identical to osThreadDetach static osStatus_t svcRtxThreadDetach (osThreadId_t thread_id) { @@ -1151,6 +1186,7 @@ static osStatus_t svcRtxThreadDetach (osThreadId_t thread_id) { /// \note API identical to osThreadJoin static osStatus_t svcRtxThreadJoin (osThreadId_t thread_id) { os_thread_t *thread = osRtxThreadId(thread_id); + os_thread_t *thread_running; osStatus_t status; // Check parameters @@ -1182,7 +1218,9 @@ static osStatus_t svcRtxThreadJoin (osThreadId_t thread_id) { } else { // Suspend current Thread if (osRtxThreadWaitEnter(osRtxThreadWaitingJoin, osWaitForever)) { - thread->thread_join = osRtxThreadGetRunning(); + thread_running = osRtxThreadGetRunning(); + thread_running->thread_next = thread; + thread->thread_join = thread_running; thread->attr &= ~osThreadJoinable; EvrRtxThreadJoinPending(thread); } else { @@ -1213,30 +1251,26 @@ static void svcRtxThreadExit (void) { osRtxMutexOwnerRelease(thread->mutex_list); // Wakeup Thread waiting to Join - if (thread->thread_join != NULL) { - osRtxThreadWaitExit(thread->thread_join, (uint32_t)osOK, FALSE); - EvrRtxThreadJoined(thread->thread_join); - } + osRtxThreadJoinWakeup(thread); // Switch to next Ready Thread - thread->sp = __get_PSP(); osRtxThreadSwitch(osRtxThreadListGet(&osRtxInfo.thread.ready)); - osRtxThreadSetRunning(NULL); - if ((thread->attr & osThreadJoinable) == 0U) { - osRtxThreadFree(thread); - } else { - // Update Thread State and put it into Terminate Thread list - thread->state = osRtxThreadTerminated; - thread->thread_prev = NULL; - thread->thread_next = osRtxInfo.thread.terminate_list; - if (osRtxInfo.thread.terminate_list != NULL) { - osRtxInfo.thread.terminate_list->thread_prev = thread; - } - osRtxInfo.thread.terminate_list = thread; + // Update Stack Pointer + thread->sp = __get_PSP(); +#ifdef RTX_STACK_CHECK + // Check Stack usage + if (!osRtxThreadStackCheck(thread)) { + osRtxThreadSetRunning(osRtxInfo.thread.run.next); + (void)osRtxKernelErrorNotify(osRtxErrorStackOverflow, thread); } +#endif - EvrRtxThreadDestroyed(thread); + // Mark running thread as deleted + osRtxThreadSetRunning(NULL); + + // Destroy Thread + osRtxThreadDestroy(thread); } /// Terminate execution of a thread. @@ -1285,34 +1319,28 @@ static osStatus_t svcRtxThreadTerminate (osThreadId_t thread_id) { osRtxMutexOwnerRelease(thread->mutex_list); // Wakeup Thread waiting to Join - if (thread->thread_join != NULL) { - osRtxThreadWaitExit(thread->thread_join, (uint32_t)osOK, FALSE); - EvrRtxThreadJoined(thread->thread_join); - } + osRtxThreadJoinWakeup(thread); // Switch to next Ready Thread when terminating running Thread if (thread->state == osRtxThreadRunning) { - thread->sp = __get_PSP(); osRtxThreadSwitch(osRtxThreadListGet(&osRtxInfo.thread.ready)); + // Update Stack Pointer + thread->sp = __get_PSP(); +#ifdef RTX_STACK_CHECK + // Check Stack usage + if (!osRtxThreadStackCheck(thread)) { + osRtxThreadSetRunning(osRtxInfo.thread.run.next); + (void)osRtxKernelErrorNotify(osRtxErrorStackOverflow, thread); + } +#endif + // Mark running thread as deleted osRtxThreadSetRunning(NULL); } else { osRtxThreadDispatch(NULL); } - if ((thread->attr & osThreadJoinable) == 0U) { - osRtxThreadFree(thread); - } else { - // Update Thread State and put it into Terminate Thread list - thread->state = osRtxThreadTerminated; - thread->thread_prev = NULL; - thread->thread_next = osRtxInfo.thread.terminate_list; - if (osRtxInfo.thread.terminate_list != NULL) { - osRtxInfo.thread.terminate_list->thread_prev = thread; - } - osRtxInfo.thread.terminate_list = thread; - } - - EvrRtxThreadDestroyed(thread); + // Destroy Thread + osRtxThreadDestroy(thread); } return status; @@ -1539,8 +1567,8 @@ SVC0_3 (ThreadNew, osThreadId_t, osThreadFunc_t, void *, const osTh SVC0_1 (ThreadGetName, const char *, osThreadId_t) SVC0_0 (ThreadGetId, osThreadId_t) SVC0_1 (ThreadGetState, osThreadState_t, osThreadId_t) -SVC0_1 (ThreadGetStackSize, uint32_t, osThreadId_t) -SVC0_1 (ThreadGetStackSpace, uint32_t, osThreadId_t) +SVC0_1 (ThreadGetStackSize, uint32_t, osThreadId_t) +SVC0_1 (ThreadGetStackSpace, uint32_t, osThreadId_t) SVC0_2 (ThreadSetPriority, osStatus_t, osThreadId_t, osPriority_t) SVC0_1 (ThreadGetPriority, osPriority_t, osThreadId_t) SVC0_0 (ThreadYield, osStatus_t) @@ -1600,7 +1628,7 @@ uint32_t isrRtxThreadFlagsSet (osThreadId_t thread_id, uint32_t flags) { /// Thread startup (Idle and Timer Thread). /// \return true - success, false - failure. bool_t osRtxThreadStartup (void) { - bool_t ret = TRUE; + bool_t ret = FALSE; // Create Idle Thread osRtxInfo.thread.idle = osRtxThreadId( @@ -1608,13 +1636,17 @@ bool_t osRtxThreadStartup (void) { ); // Create Timer Thread - if (osRtxConfig.timer_mq_mcnt != 0U) { - osRtxInfo.timer.thread = osRtxThreadId( - svcRtxThreadNew(osRtxTimerThread, NULL, osRtxConfig.timer_thread_attr) - ); - if (osRtxInfo.timer.thread == NULL) { - ret = FALSE; + if (osRtxConfig.timer_setup != NULL) { + if (osRtxConfig.timer_setup() == 0) { + osRtxInfo.timer.thread = osRtxThreadId( + svcRtxThreadNew(osRtxConfig.timer_thread, osRtxInfo.timer.mq, osRtxConfig.timer_thread_attr) + ); + if (osRtxInfo.timer.thread != NULL) { + ret = TRUE; + } } + } else { + ret = TRUE; } return ret; @@ -1628,7 +1660,7 @@ osThreadId_t osThreadNew (osThreadFunc_t func, void *argument, const osThreadAtt osThreadId_t thread_id; EvrRtxThreadNew(func, argument, attr); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxThreadError(NULL, (int32_t)osErrorISR); thread_id = NULL; } else { @@ -1641,7 +1673,7 @@ osThreadId_t osThreadNew (osThreadFunc_t func, void *argument, const osThreadAtt const char *osThreadGetName (osThreadId_t thread_id) { const char *name; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxThreadGetName(thread_id, NULL); name = NULL; } else { @@ -1654,7 +1686,7 @@ const char *osThreadGetName (osThreadId_t thread_id) { osThreadId_t osThreadGetId (void) { osThreadId_t thread_id; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { thread_id = svcRtxThreadGetId(); } else { thread_id = __svcThreadGetId(); @@ -1666,7 +1698,7 @@ osThreadId_t osThreadGetId (void) { osThreadState_t osThreadGetState (osThreadId_t thread_id) { osThreadState_t state; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxThreadGetState(thread_id, osThreadError); state = osThreadError; } else { @@ -1679,7 +1711,7 @@ osThreadState_t osThreadGetState (osThreadId_t thread_id) { uint32_t osThreadGetStackSize (osThreadId_t thread_id) { uint32_t stack_size; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxThreadGetStackSize(thread_id, 0U); stack_size = 0U; } else { @@ -1692,7 +1724,7 @@ uint32_t osThreadGetStackSize (osThreadId_t thread_id) { uint32_t osThreadGetStackSpace (osThreadId_t thread_id) { uint32_t stack_space; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxThreadGetStackSpace(thread_id, 0U); stack_space = 0U; } else { @@ -1706,7 +1738,7 @@ osStatus_t osThreadSetPriority (osThreadId_t thread_id, osPriority_t priority) { osStatus_t status; EvrRtxThreadSetPriority(thread_id, priority); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxThreadError(thread_id, (int32_t)osErrorISR); status = osErrorISR; } else { @@ -1719,7 +1751,7 @@ osStatus_t osThreadSetPriority (osThreadId_t thread_id, osPriority_t priority) { osPriority_t osThreadGetPriority (osThreadId_t thread_id) { osPriority_t priority; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxThreadGetPriority(thread_id, osPriorityError); priority = osPriorityError; } else { @@ -1733,7 +1765,7 @@ osStatus_t osThreadYield (void) { osStatus_t status; EvrRtxThreadYield(); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxThreadError(NULL, (int32_t)osErrorISR); status = osErrorISR; } else { @@ -1747,7 +1779,7 @@ osStatus_t osThreadSuspend (osThreadId_t thread_id) { osStatus_t status; EvrRtxThreadSuspend(thread_id); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxThreadError(thread_id, (int32_t)osErrorISR); status = osErrorISR; } else { @@ -1761,7 +1793,7 @@ osStatus_t osThreadResume (osThreadId_t thread_id) { osStatus_t status; EvrRtxThreadResume(thread_id); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxThreadError(thread_id, (int32_t)osErrorISR); status = osErrorISR; } else { @@ -1775,7 +1807,7 @@ osStatus_t osThreadDetach (osThreadId_t thread_id) { osStatus_t status; EvrRtxThreadDetach(thread_id); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxThreadError(thread_id, (int32_t)osErrorISR); status = osErrorISR; } else { @@ -1789,7 +1821,7 @@ osStatus_t osThreadJoin (osThreadId_t thread_id) { osStatus_t status; EvrRtxThreadJoin(thread_id); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxThreadError(thread_id, (int32_t)osErrorISR); status = osErrorISR; } else { @@ -1811,7 +1843,7 @@ osStatus_t osThreadTerminate (osThreadId_t thread_id) { osStatus_t status; EvrRtxThreadTerminate(thread_id); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxThreadError(thread_id, (int32_t)osErrorISR); status = osErrorISR; } else { @@ -1824,7 +1856,7 @@ osStatus_t osThreadTerminate (osThreadId_t thread_id) { uint32_t osThreadGetCount (void) { uint32_t count; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxThreadGetCount(0U); count = 0U; } else { @@ -1837,7 +1869,7 @@ uint32_t osThreadGetCount (void) { uint32_t osThreadEnumerate (osThreadId_t *thread_array, uint32_t array_items) { uint32_t count; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxThreadEnumerate(thread_array, array_items, 0U); count = 0U; } else { @@ -1851,7 +1883,7 @@ uint32_t osThreadFlagsSet (osThreadId_t thread_id, uint32_t flags) { uint32_t thread_flags; EvrRtxThreadFlagsSet(thread_id, flags); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { thread_flags = isrRtxThreadFlagsSet(thread_id, flags); } else { thread_flags = __svcThreadFlagsSet(thread_id, flags); @@ -1864,7 +1896,7 @@ uint32_t osThreadFlagsClear (uint32_t flags) { uint32_t thread_flags; EvrRtxThreadFlagsClear(flags); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxThreadFlagsError(NULL, (int32_t)osErrorISR); thread_flags = (uint32_t)osErrorISR; } else { @@ -1877,7 +1909,7 @@ uint32_t osThreadFlagsClear (uint32_t flags) { uint32_t osThreadFlagsGet (void) { uint32_t thread_flags; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxThreadFlagsGet(0U); thread_flags = 0U; } else { @@ -1891,7 +1923,7 @@ uint32_t osThreadFlagsWait (uint32_t flags, uint32_t options, uint32_t timeout) uint32_t thread_flags; EvrRtxThreadFlagsWait(flags, options, timeout); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxThreadFlagsError(NULL, (int32_t)osErrorISR); thread_flags = (uint32_t)osErrorISR; } else { diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_timer.c b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_timer.c index de3c98712ed..8cdca2944c3 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_timer.c +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_timer.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019 Arm Limited. All rights reserved. + * Copyright (c) 2013-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -27,7 +27,7 @@ // OS Runtime Object Memory Usage -#if ((defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0))) +#ifdef RTX_OBJ_MEM_USAGE osRtxObjectMemUsage_t osRtxTimerMemUsage \ __attribute__((section(".data.os.timer.obj"))) = { 0U, 0U, 0U }; @@ -93,8 +93,9 @@ static void TimerUnlink (const os_timer_t *timer) { /// Timer Tick (called each SysTick). static void osRtxTimerTick (void) { - os_timer_t *timer; - osStatus_t status; + os_thread_t *thread_running; + os_timer_t *timer; + osStatus_t status; timer = osRtxInfo.timer.list; if (timer == NULL) { @@ -102,12 +103,21 @@ static void osRtxTimerTick (void) { return; } + thread_running = osRtxThreadGetRunning(); + timer->tick--; while ((timer != NULL) && (timer->tick == 0U)) { TimerUnlink(timer); status = osMessageQueuePut(osRtxInfo.timer.mq, &timer->finfo, 0U, 0U); if (status != osOK) { - (void)osRtxErrorNotify(osRtxErrorTimerQueueOverflow, timer); + const os_thread_t *thread = osRtxThreadGetRunning(); + osRtxThreadSetRunning(osRtxInfo.thread.run.next); + (void)osRtxKernelErrorNotify(osRtxErrorTimerQueueOverflow, timer); + if (osRtxThreadGetRunning() == NULL) { + if (thread_running == thread) { + thread_running = NULL; + } + } } if (timer->type == osRtxTimerPeriodic) { TimerInsert(timer, timer->load); @@ -116,22 +126,37 @@ static void osRtxTimerTick (void) { } timer = osRtxInfo.timer.list; } + + osRtxThreadSetRunning(thread_running); } -/// Timer Thread -__WEAK __NO_RETURN void osRtxTimerThread (void *argument) { - os_timer_finfo_t finfo; - osStatus_t status; - (void) argument; +/// Setup Timer Thread objects. +//lint -esym(714,osRtxTimerSetup) "Referenced from library configuration" +//lint -esym(759,osRtxTimerSetup) "Prototype in header" +//lint -esym(765,osRtxTimerSetup) "Global scope" +int32_t osRtxTimerSetup (void) { + int32_t ret = -1; - osRtxInfo.timer.mq = osRtxMessageQueueId( - osMessageQueueNew(osRtxConfig.timer_mq_mcnt, sizeof(os_timer_finfo_t), osRtxConfig.timer_mq_attr) - ); - osRtxInfo.timer.tick = osRtxTimerTick; + if (osRtxMessageQueueTimerSetup() == 0) { + osRtxInfo.timer.tick = osRtxTimerTick; + ret = 0; + } + + return ret; +} + +/// Timer Thread +//lint -esym(714,osRtxTimerThread) "Referenced from library configuration" +//lint -esym(759,osRtxTimerThread) "Prototype in header" +//lint -esym(765,osRtxTimerThread) "Global scope" +__NO_RETURN void osRtxTimerThread (void *argument) { + os_timer_finfo_t finfo; + osStatus_t status; + osMessageQueueId_t mq = (osMessageQueueId_t)argument; for (;;) { //lint -e{934} "Taking address of near auto variable" - status = osMessageQueueGet(osRtxInfo.timer.mq, &finfo, NULL, osWaitForever); + status = osMessageQueueGet(mq, &finfo, NULL, osWaitForever); if (status == osOK) { EvrRtxTimerCallback(finfo.func, finfo.arg); (finfo.func)(finfo.arg); @@ -188,7 +213,7 @@ static osTimerId_t svcRtxTimerNew (osTimerFunc_t func, osTimerType_t type, void //lint -e{9079} "conversion from pointer to void to pointer to other type" [MISRA Note 5] timer = osRtxMemoryAlloc(osRtxInfo.mem.common, sizeof(os_timer_t), 1U); } -#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)) +#ifdef RTX_OBJ_MEM_USAGE if (timer != NULL) { uint32_t used; osRtxTimerMemUsage.cnt_alloc++; @@ -353,7 +378,7 @@ static osStatus_t svcRtxTimerDelete (osTimerId_t timer_id) { } else { (void)osRtxMemoryFree(osRtxInfo.mem.common, timer); } -#if (defined(OS_OBJ_MEM_USAGE) && (OS_OBJ_MEM_USAGE != 0)) +#ifdef RTX_OBJ_MEM_USAGE osRtxTimerMemUsage.cnt_free++; #endif } @@ -381,7 +406,7 @@ osTimerId_t osTimerNew (osTimerFunc_t func, osTimerType_t type, void *argument, osTimerId_t timer_id; EvrRtxTimerNew(func, type, argument, attr); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxTimerError(NULL, (int32_t)osErrorISR); timer_id = NULL; } else { @@ -394,7 +419,7 @@ osTimerId_t osTimerNew (osTimerFunc_t func, osTimerType_t type, void *argument, const char *osTimerGetName (osTimerId_t timer_id) { const char *name; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxTimerGetName(timer_id, NULL); name = NULL; } else { @@ -408,7 +433,7 @@ osStatus_t osTimerStart (osTimerId_t timer_id, uint32_t ticks) { osStatus_t status; EvrRtxTimerStart(timer_id, ticks); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxTimerError(timer_id, (int32_t)osErrorISR); status = osErrorISR; } else { @@ -422,7 +447,7 @@ osStatus_t osTimerStop (osTimerId_t timer_id) { osStatus_t status; EvrRtxTimerStop(timer_id); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxTimerError(timer_id, (int32_t)osErrorISR); status = osErrorISR; } else { @@ -435,7 +460,7 @@ osStatus_t osTimerStop (osTimerId_t timer_id) { uint32_t osTimerIsRunning (osTimerId_t timer_id) { uint32_t is_running; - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxTimerIsRunning(timer_id, 0U); is_running = 0U; } else { @@ -449,7 +474,7 @@ osStatus_t osTimerDelete (osTimerId_t timer_id) { osStatus_t status; EvrRtxTimerDelete(timer_id); - if (IsIrqMode() || IsIrqMasked()) { + if (IsException() || IsIrqMasked()) { EvrRtxTimerError(timer_id, (int32_t)osErrorISR); status = osErrorISR; } else { diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/Source/os_systick.c b/cmsis/CMSIS_5/CMSIS/RTOS2/Source/os_systick.c index b7c3e3e0b9a..3cce53c521c 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/Source/os_systick.c +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/Source/os_systick.c @@ -1,11 +1,11 @@ /**************************************************************************//** * @file os_systick.c * @brief CMSIS OS Tick SysTick implementation - * @version V1.0.2 - * @date 6. March 2020 + * @version V1.0.3 + * @date 19. March 2021 ******************************************************************************/ /* - * Copyright (c) 2017-2020 ARM Limited. All rights reserved. + * Copyright (c) 2017-2021 ARM Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -34,7 +34,7 @@ #define SYSTICK_IRQ_PRIORITY 0xFFU #endif -static uint8_t PendST; +static uint8_t PendST __attribute__((section(".bss.os"))); // Setup OS Tick. __WEAK int32_t OS_Tick_Setup (uint32_t freq, IRQHandler_t handler) { @@ -127,7 +127,7 @@ __WEAK uint32_t OS_Tick_GetCount (void) { // Get OS Tick overflow status. __WEAK uint32_t OS_Tick_GetOverflow (void) { - return ((SysTick->CTRL >> 16) & 1U); + return ((SCB->ICSR & SCB_ICSR_PENDSTSET_Msk) >> SCB_ICSR_PENDSTSET_Pos); } #endif // SysTick diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_A/Include/cmsis_armcc.h b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_A/Include/cmsis_armcc.h index cc11ada3539..0d9c37428cf 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_A/Include/cmsis_armcc.h +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_A/Include/cmsis_armcc.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_armcc.h * @brief CMSIS compiler specific macros, functions, instructions - * @version V1.0.4 - * @date 30. July 2019 + * @version V1.0.5 + * @date 05. May 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2019 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -296,6 +296,34 @@ __attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int16_t __REVSH(in /* ########################### Core Function Access ########################### */ +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +/* intrinsic void __enable_irq(); */ + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +/* intrinsic void __disable_irq(void); */ + +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +#define __enable_fault_irq __enable_fiq + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +#define __disable_fault_irq __disable_fiq + /** \brief Get FPSCR (Floating Point Status/Control) \return Floating Point Status/Control register value diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_A/Include/cmsis_armclang.h b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_A/Include/cmsis_armclang.h index 65a3b913467..e64eba93544 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_A/Include/cmsis_armclang.h +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_A/Include/cmsis_armclang.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_armclang.h * @brief CMSIS compiler specific macros, functions, instructions - * @version V1.2.0 - * @date 05. August 2019 + * @version V1.2.1 + * @date 05. May 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2019 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -27,10 +27,6 @@ #pragma clang system_header /* treat file as system include file */ -#ifndef __ARM_COMPAT_H -#include /* Compatibility header for Arm Compiler 5 intrinsics */ -#endif - /* CMSIS compiler specific defines */ #ifndef __ASM #define __ASM __asm @@ -372,6 +368,46 @@ __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) /* ########################### Core Function Access ########################### */ +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("cpsie i" : : : "memory"); +} + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting the I-bit in the CPSR. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("cpsid i" : : : "memory"); +} + +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_fault_irq(void) +{ + __ASM volatile ("cpsie f" : : : "memory"); +} + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_fault_irq(void) +{ + __ASM volatile ("cpsid f" : : : "memory"); +} + /** \brief Get FPSCR \details Returns the current value of the Floating Point Status/Control register. @@ -401,7 +437,7 @@ __STATIC_FORCEINLINE uint32_t __get_CPSR(void) */ __STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr) { -__ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory"); + __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory"); } /** \brief Get Mode @@ -409,7 +445,7 @@ __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory"); */ __STATIC_FORCEINLINE uint32_t __get_mode(void) { - return (__get_CPSR() & 0x1FU); + return (__get_CPSR() & 0x1FU); } /** \brief Set Mode @@ -423,7 +459,7 @@ __STATIC_FORCEINLINE void __set_mode(uint32_t mode) /** \brief Get Stack Pointer \return Stack Pointer value */ -__STATIC_FORCEINLINE uint32_t __get_SP() +__STATIC_FORCEINLINE uint32_t __get_SP(void) { uint32_t result; __ASM volatile("MOV %0, sp" : "=r" (result) : : "memory"); @@ -441,7 +477,7 @@ __STATIC_FORCEINLINE void __set_SP(uint32_t stack) /** \brief Get USR/SYS Stack Pointer \return USR/SYS Stack Pointer value */ -__STATIC_FORCEINLINE uint32_t __get_SP_usr() +__STATIC_FORCEINLINE uint32_t __get_SP_usr(void) { uint32_t cpsr; uint32_t result; @@ -546,7 +582,7 @@ __STATIC_INLINE void __FPU_Enable(void) " VMOV D14,R2,R2 \n" " VMOV D15,R2,R2 \n" -#if __ARM_NEON == 1 +#if (defined(__ARM_NEON) && (__ARM_NEON == 1)) //Initialise D32 registers to 0 " VMOV D16,R2,R2 \n" " VMOV D17,R2,R2 \n" diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_A/Include/cmsis_gcc.h b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_A/Include/cmsis_gcc.h index 23d61205ddf..5f9a6aa29aa 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_A/Include/cmsis_gcc.h +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_A/Include/cmsis_gcc.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_gcc.h * @brief CMSIS compiler specific macros, functions, instructions - * @version V1.3.0 - * @date 17. December 2019 + * @version V1.3.1 + * @date 05. May 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2019 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -37,7 +37,6 @@ #endif /* CMSIS compiler specific defines */ - #ifndef __ASM #define __ASM __asm #endif @@ -57,7 +56,7 @@ #define __NO_RETURN __attribute__((__noreturn__)) #endif #ifndef CMSIS_DEPRECATED - #define CMSIS_DEPRECATED __attribute__((deprecated)) + #define CMSIS_DEPRECATED __attribute__((deprecated)) #endif #ifndef __USED #define __USED __attribute__((used)) @@ -433,10 +432,11 @@ __STATIC_FORCEINLINE int16_t __REVSH(int16_t value) \param [in] op2 Number of Bits to rotate \return Rotated value */ -__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) { op2 %= 32U; - if (op2 == 0U) { + if (op2 == 0U) + { return op1; } return (op1 >> op2) | (op1 << (32U - op2)); @@ -448,7 +448,7 @@ __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) \param [in] value is ignored by the processor. If required, a debugger can use it to store additional information about the breakpoint. */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) +#define __BKPT(value) __ASM volatile ("bkpt "#value) /** \brief Reverse bit order of value @@ -669,16 +669,36 @@ __STATIC_FORCEINLINE void __enable_irq(void) \details Disables IRQ interrupts by setting the I-bit in the CPSR. Can only be executed in Privileged modes. */ -__STATIC_FORCEINLINE void __disable_irq(void) +__STATIC_FORCEINLINE void __disable_irq(void) { __ASM volatile ("cpsid i" : : : "memory"); } +/** + \brief Enable FIQ + \details Enables FIQ interrupts by clearing the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_fault_irq(void) +{ + __ASM volatile ("cpsie f" : : : "memory"); +} + +/** + \brief Disable FIQ + \details Disables FIQ interrupts by setting the F-bit in the CPSR. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __disable_fault_irq(void) +{ + __ASM volatile ("cpsid f" : : : "memory"); +} + /** \brief Get FPSCR \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value -*/ + \return Floating Point Status/Control register value + */ __STATIC_FORCEINLINE uint32_t __get_FPSCR(void) { #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ @@ -702,8 +722,8 @@ __STATIC_FORCEINLINE uint32_t __get_FPSCR(void) /** \brief Set FPSCR \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set -*/ + \param [in] fpscr Floating Point Status/Control value to set + */ __STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) { #if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ @@ -736,7 +756,7 @@ __STATIC_FORCEINLINE uint32_t __get_CPSR(void) */ __STATIC_FORCEINLINE void __set_CPSR(uint32_t cpsr) { -__ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory"); + __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory"); } /** \brief Get Mode @@ -744,7 +764,7 @@ __ASM volatile ("MSR cpsr, %0" : : "r" (cpsr) : "cc", "memory"); */ __STATIC_FORCEINLINE uint32_t __get_mode(void) { - return (__get_CPSR() & 0x1FU); + return (__get_CPSR() & 0x1FU); } /** \brief Set Mode @@ -810,7 +830,7 @@ __STATIC_FORCEINLINE uint32_t __get_FPEXC(void) { #if (__FPU_PRESENT == 1) uint32_t result; - __ASM volatile("VMRS %0, fpexc" : "=r" (result) ); + __ASM volatile("VMRS %0, fpexc" : "=r" (result) : : "memory"); return(result); #else return(0); @@ -833,8 +853,8 @@ __STATIC_FORCEINLINE void __set_FPEXC(uint32_t fpexc) #define __get_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MRC p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : "=r" (Rt) : : "memory" ) #define __set_CP(cp, op1, Rt, CRn, CRm, op2) __ASM volatile("MCR p" # cp ", " # op1 ", %0, c" # CRn ", c" # CRm ", " # op2 : : "r" (Rt) : "memory" ) -#define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" ) -#define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" ) +#define __get_CP64(cp, op1, Rt, CRm) __ASM volatile("MRRC p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : "=r" (Rt) : : "memory" ) +#define __set_CP64(cp, op1, Rt, CRm) __ASM volatile("MCRR p" # cp ", " # op1 ", %Q0, %R0, c" # CRm : : "r" (Rt) : "memory" ) #include "cmsis_cp15.h" diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_A/Source/irq_ctrl_gic.c b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_A/Source/irq_ctrl_gic.c index e60467099e2..15588bffab9 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_A/Source/irq_ctrl_gic.c +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_A/Source/irq_ctrl_gic.c @@ -1,11 +1,11 @@ /**************************************************************************//** * @file irq_ctrl_gic.c * @brief Interrupt controller handling implementation for GIC - * @version V1.1.0 - * @date 03. March 2020 + * @version V1.1.1 + * @date 29. March 2021 ******************************************************************************/ /* - * Copyright (c) 2017-2020 ARM Limited. All rights reserved. + * Copyright (c) 2017-2021 ARM Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -184,7 +184,7 @@ __WEAK int32_t IRQ_SetMode (IRQn_ID_t irqn, uint32_t mode) { if (val == IRQ_MODE_CPU_ALL) { cpu = 0xFFU; } else { - cpu = val >> IRQ_MODE_CPU_Pos; + cpu = (uint8_t)(val >> IRQ_MODE_CPU_Pos); } // Apply configuration if no mode error diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cachel1_armv7.h b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cachel1_armv7.h index d2c3e2291fd..abebc95f946 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cachel1_armv7.h +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cachel1_armv7.h @@ -1,11 +1,11 @@ /****************************************************************************** * @file cachel1_armv7.h * @brief CMSIS Level 1 Cache API for Armv7-M and later - * @version V1.0.0 - * @date 03. March 2020 + * @version V1.0.1 + * @date 19. April 2021 ******************************************************************************/ /* - * Copyright (c) 2020 Arm Limited. All rights reserved. + * Copyright (c) 2020-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -48,7 +48,7 @@ #ifndef __SCB_ICACHE_LINE_SIZE #define __SCB_ICACHE_LINE_SIZE 32U /*!< Cortex-M7 cache line size is fixed to 32 bytes (8 words). See also register SCB_CCSIDR */ -#endif +#endif /** \brief Enable I-Cache @@ -112,7 +112,7 @@ __STATIC_FORCEINLINE void SCB_InvalidateICache (void) \param[in] addr address \param[in] isize size of memory block (in number of bytes) */ -__STATIC_FORCEINLINE void SCB_InvalidateICache_by_Addr (void *addr, int32_t isize) +__STATIC_FORCEINLINE void SCB_InvalidateICache_by_Addr (volatile void *addr, int32_t isize) { #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) if ( isize > 0 ) { @@ -325,13 +325,13 @@ __STATIC_FORCEINLINE void SCB_CleanInvalidateDCache (void) \param[in] addr address \param[in] dsize size of memory block (in number of bytes) */ -__STATIC_FORCEINLINE void SCB_InvalidateDCache_by_Addr (void *addr, int32_t dsize) +__STATIC_FORCEINLINE void SCB_InvalidateDCache_by_Addr (volatile void *addr, int32_t dsize) { #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) - if ( dsize > 0 ) { + if ( dsize > 0 ) { int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; - + __DSB(); do { @@ -355,13 +355,13 @@ __STATIC_FORCEINLINE void SCB_InvalidateDCache_by_Addr (void *addr, int32_t dsiz \param[in] addr address \param[in] dsize size of memory block (in number of bytes) */ -__STATIC_FORCEINLINE void SCB_CleanDCache_by_Addr (uint32_t *addr, int32_t dsize) +__STATIC_FORCEINLINE void SCB_CleanDCache_by_Addr (volatile void *addr, int32_t dsize) { #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) - if ( dsize > 0 ) { + if ( dsize > 0 ) { int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; - + __DSB(); do { @@ -385,13 +385,13 @@ __STATIC_FORCEINLINE void SCB_CleanDCache_by_Addr (uint32_t *addr, int32_t dsize \param[in] addr address (aligned to 32-byte boundary) \param[in] dsize size of memory block (in number of bytes) */ -__STATIC_FORCEINLINE void SCB_CleanInvalidateDCache_by_Addr (uint32_t *addr, int32_t dsize) +__STATIC_FORCEINLINE void SCB_CleanInvalidateDCache_by_Addr (volatile void *addr, int32_t dsize) { #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) - if ( dsize > 0 ) { + if ( dsize > 0 ) { int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; - + __DSB(); do { diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cmsis_armcc.h b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cmsis_armcc.h index 237ff6ec3ea..a955d471391 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cmsis_armcc.h +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cmsis_armcc.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_armcc.h * @brief CMSIS compiler ARMCC (Arm Compiler 5) header file - * @version V5.2.1 - * @date 26. March 2020 + * @version V5.3.2 + * @date 27. May 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -63,9 +63,9 @@ #ifndef __STATIC_INLINE #define __STATIC_INLINE static __inline #endif -#ifndef __STATIC_FORCEINLINE +#ifndef __STATIC_FORCEINLINE #define __STATIC_FORCEINLINE static __forceinline -#endif +#endif #ifndef __NO_RETURN #define __NO_RETURN __declspec(noreturn) #endif @@ -129,280 +129,7 @@ #ifndef __VECTOR_TABLE_ATTRIBUTE #define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET"))) -#endif - -/* ########################### Core Function Access ########################### */ -/** \ingroup CMSIS_Core_FunctionInterface - \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions - @{ - */ - -/** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing the I-bit in the CPSR. - Can only be executed in Privileged modes. - */ -/* intrinsic void __enable_irq(); */ - - -/** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting the I-bit in the CPSR. - Can only be executed in Privileged modes. - */ -/* intrinsic void __disable_irq(); */ - -/** - \brief Get Control Register - \details Returns the content of the Control Register. - \return Control Register value - */ -__STATIC_INLINE uint32_t __get_CONTROL(void) -{ - register uint32_t __regControl __ASM("control"); - return(__regControl); -} - - -/** - \brief Set Control Register - \details Writes the given value to the Control Register. - \param [in] control Control Register value to set - */ -__STATIC_INLINE void __set_CONTROL(uint32_t control) -{ - register uint32_t __regControl __ASM("control"); - __regControl = control; -} - - -/** - \brief Get IPSR Register - \details Returns the content of the IPSR Register. - \return IPSR Register value - */ -__STATIC_INLINE uint32_t __get_IPSR(void) -{ - register uint32_t __regIPSR __ASM("ipsr"); - return(__regIPSR); -} - - -/** - \brief Get APSR Register - \details Returns the content of the APSR Register. - \return APSR Register value - */ -__STATIC_INLINE uint32_t __get_APSR(void) -{ - register uint32_t __regAPSR __ASM("apsr"); - return(__regAPSR); -} - - -/** - \brief Get xPSR Register - \details Returns the content of the xPSR Register. - \return xPSR Register value - */ -__STATIC_INLINE uint32_t __get_xPSR(void) -{ - register uint32_t __regXPSR __ASM("xpsr"); - return(__regXPSR); -} - - -/** - \brief Get Process Stack Pointer - \details Returns the current value of the Process Stack Pointer (PSP). - \return PSP Register value - */ -__STATIC_INLINE uint32_t __get_PSP(void) -{ - register uint32_t __regProcessStackPointer __ASM("psp"); - return(__regProcessStackPointer); -} - - -/** - \brief Set Process Stack Pointer - \details Assigns the given value to the Process Stack Pointer (PSP). - \param [in] topOfProcStack Process Stack Pointer value to set - */ -__STATIC_INLINE void __set_PSP(uint32_t topOfProcStack) -{ - register uint32_t __regProcessStackPointer __ASM("psp"); - __regProcessStackPointer = topOfProcStack; -} - - -/** - \brief Get Main Stack Pointer - \details Returns the current value of the Main Stack Pointer (MSP). - \return MSP Register value - */ -__STATIC_INLINE uint32_t __get_MSP(void) -{ - register uint32_t __regMainStackPointer __ASM("msp"); - return(__regMainStackPointer); -} - - -/** - \brief Set Main Stack Pointer - \details Assigns the given value to the Main Stack Pointer (MSP). - \param [in] topOfMainStack Main Stack Pointer value to set - */ -__STATIC_INLINE void __set_MSP(uint32_t topOfMainStack) -{ - register uint32_t __regMainStackPointer __ASM("msp"); - __regMainStackPointer = topOfMainStack; -} - - -/** - \brief Get Priority Mask - \details Returns the current state of the priority mask bit from the Priority Mask Register. - \return Priority Mask value - */ -__STATIC_INLINE uint32_t __get_PRIMASK(void) -{ - register uint32_t __regPriMask __ASM("primask"); - return(__regPriMask); -} - - -/** - \brief Set Priority Mask - \details Assigns the given value to the Priority Mask Register. - \param [in] priMask Priority Mask - */ -__STATIC_INLINE void __set_PRIMASK(uint32_t priMask) -{ - register uint32_t __regPriMask __ASM("primask"); - __regPriMask = (priMask); -} - - -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) - -/** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing the F-bit in the CPSR. - Can only be executed in Privileged modes. - */ -#define __enable_fault_irq __enable_fiq - - -/** - \brief Disable FIQ - \details Disables FIQ interrupts by setting the F-bit in the CPSR. - Can only be executed in Privileged modes. - */ -#define __disable_fault_irq __disable_fiq - - -/** - \brief Get Base Priority - \details Returns the current value of the Base Priority register. - \return Base Priority register value - */ -__STATIC_INLINE uint32_t __get_BASEPRI(void) -{ - register uint32_t __regBasePri __ASM("basepri"); - return(__regBasePri); -} - - -/** - \brief Set Base Priority - \details Assigns the given value to the Base Priority register. - \param [in] basePri Base Priority value to set - */ -__STATIC_INLINE void __set_BASEPRI(uint32_t basePri) -{ - register uint32_t __regBasePri __ASM("basepri"); - __regBasePri = (basePri & 0xFFU); -} - - -/** - \brief Set Base Priority with condition - \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, - or the new value increases the BASEPRI priority level. - \param [in] basePri Base Priority value to set - */ -__STATIC_INLINE void __set_BASEPRI_MAX(uint32_t basePri) -{ - register uint32_t __regBasePriMax __ASM("basepri_max"); - __regBasePriMax = (basePri & 0xFFU); -} - - -/** - \brief Get Fault Mask - \details Returns the current value of the Fault Mask register. - \return Fault Mask register value - */ -__STATIC_INLINE uint32_t __get_FAULTMASK(void) -{ - register uint32_t __regFaultMask __ASM("faultmask"); - return(__regFaultMask); -} - - -/** - \brief Set Fault Mask - \details Assigns the given value to the Fault Mask register. - \param [in] faultMask Fault Mask value to set - */ -__STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask) -{ - register uint32_t __regFaultMask __ASM("faultmask"); - __regFaultMask = (faultMask & (uint32_t)1U); -} - -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ - - -/** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value - */ -__STATIC_INLINE uint32_t __get_FPSCR(void) -{ -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) - register uint32_t __regfpscr __ASM("fpscr"); - return(__regfpscr); -#else - return(0U); -#endif -} - - -/** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set - */ -__STATIC_INLINE void __set_FPSCR(uint32_t fpscr) -{ -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) - register uint32_t __regfpscr __ASM("fpscr"); - __regfpscr = (fpscr); -#else - (void)fpscr; -#endif -} - - -/*@} end of CMSIS_Core_RegAccFunctions */ - +#endif /* ########################## Core Instruction Access ######################### */ /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface @@ -461,7 +188,7 @@ __STATIC_INLINE void __set_FPSCR(uint32_t fpscr) */ #define __DMB() __dmb(0xF) - + /** \brief Reverse byte order (32 bit) \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. @@ -616,187 +343,461 @@ __attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value) /** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __STREXH(value, ptr) __strex(value, ptr) +#else + #define __STREXH(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") +#endif + + +/** + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __STREXW(value, ptr) __strex(value, ptr) +#else + #define __STREXW(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") +#endif + + +/** + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. + */ +#define __CLREX __clrex + + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +#define __SSAT __ssat + + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +#define __USAT __usat + + +/** + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value + */ +#ifndef __NO_EMBEDDED_ASM +__attribute__((section(".rrx_text"))) __STATIC_INLINE __ASM uint32_t __RRX(uint32_t value) +{ + rrx r0, r0 + bx lr +} +#endif + + +/** + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDRBT(ptr) ((uint8_t ) __ldrt(ptr)) + + +/** + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +#define __LDRHT(ptr) ((uint16_t) __ldrt(ptr)) + + +/** + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDRT(ptr) ((uint32_t ) __ldrt(ptr)) + + +/** + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +#define __STRBT(value, ptr) __strt(value, ptr) + + +/** + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +#define __STRHT(value, ptr) __strt(value, ptr) + + +/** + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + */ +#define __STRT(value, ptr) __strt(value, ptr) + +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__attribute__((always_inline)) __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; +} + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__attribute__((always_inline)) __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ + +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +/* intrinsic void __enable_irq(); */ + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +/* intrinsic void __disable_irq(); */ + +/** + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value + */ +__STATIC_INLINE uint32_t __get_CONTROL(void) +{ + register uint32_t __regControl __ASM("control"); + return(__regControl); +} + + +/** + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set + */ +__STATIC_INLINE void __set_CONTROL(uint32_t control) +{ + register uint32_t __regControl __ASM("control"); + __regControl = control; + __ISB(); +} + + +/** + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value + */ +__STATIC_INLINE uint32_t __get_IPSR(void) +{ + register uint32_t __regIPSR __ASM("ipsr"); + return(__regIPSR); +} + + +/** + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value + */ +__STATIC_INLINE uint32_t __get_APSR(void) +{ + register uint32_t __regAPSR __ASM("apsr"); + return(__regAPSR); +} + + +/** + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value + */ +__STATIC_INLINE uint32_t __get_xPSR(void) +{ + register uint32_t __regXPSR __ASM("xpsr"); + return(__regXPSR); +} + + +/** + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value */ -#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) - #define __STREXH(value, ptr) __strex(value, ptr) -#else - #define __STREXH(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") -#endif +__STATIC_INLINE uint32_t __get_PSP(void) +{ + register uint32_t __regProcessStackPointer __ASM("psp"); + return(__regProcessStackPointer); +} /** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set */ -#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) - #define __STREXW(value, ptr) __strex(value, ptr) -#else - #define __STREXW(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") -#endif +__STATIC_INLINE void __set_PSP(uint32_t topOfProcStack) +{ + register uint32_t __regProcessStackPointer __ASM("psp"); + __regProcessStackPointer = topOfProcStack; +} /** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value */ -#define __CLREX __clrex +__STATIC_INLINE uint32_t __get_MSP(void) +{ + register uint32_t __regMainStackPointer __ASM("msp"); + return(__regMainStackPointer); +} /** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set */ -#define __SSAT __ssat +__STATIC_INLINE void __set_MSP(uint32_t topOfMainStack) +{ + register uint32_t __regMainStackPointer __ASM("msp"); + __regMainStackPointer = topOfMainStack; +} /** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value */ -#define __USAT __usat +__STATIC_INLINE uint32_t __get_PRIMASK(void) +{ + register uint32_t __regPriMask __ASM("primask"); + return(__regPriMask); +} /** - \brief Rotate Right with Extend (32 bit) - \details Moves each bit of a bitstring right by one bit. - The carry input is shifted in at the left end of the bitstring. - \param [in] value Value to rotate - \return Rotated value + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask */ -#ifndef __NO_EMBEDDED_ASM -__attribute__((section(".rrx_text"))) __STATIC_INLINE __ASM uint32_t __RRX(uint32_t value) +__STATIC_INLINE void __set_PRIMASK(uint32_t priMask) { - rrx r0, r0 - bx lr + register uint32_t __regPriMask __ASM("primask"); + __regPriMask = (priMask); } -#endif +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) + /** - \brief LDRT Unprivileged (8 bit) - \details Executes a Unprivileged LDRT instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. */ -#define __LDRBT(ptr) ((uint8_t ) __ldrt(ptr)) +#define __enable_fault_irq __enable_fiq /** - \brief LDRT Unprivileged (16 bit) - \details Executes a Unprivileged LDRT instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. */ -#define __LDRHT(ptr) ((uint16_t) __ldrt(ptr)) +#define __disable_fault_irq __disable_fiq /** - \brief LDRT Unprivileged (32 bit) - \details Executes a Unprivileged LDRT instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value */ -#define __LDRT(ptr) ((uint32_t ) __ldrt(ptr)) +__STATIC_INLINE uint32_t __get_BASEPRI(void) +{ + register uint32_t __regBasePri __ASM("basepri"); + return(__regBasePri); +} /** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set */ -#define __STRBT(value, ptr) __strt(value, ptr) +__STATIC_INLINE void __set_BASEPRI(uint32_t basePri) +{ + register uint32_t __regBasePri __ASM("basepri"); + __regBasePri = (basePri & 0xFFU); +} /** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set */ -#define __STRHT(value, ptr) __strt(value, ptr) +__STATIC_INLINE void __set_BASEPRI_MAX(uint32_t basePri) +{ + register uint32_t __regBasePriMax __ASM("basepri_max"); + __regBasePriMax = (basePri & 0xFFU); +} /** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value */ -#define __STRT(value, ptr) __strt(value, ptr) +__STATIC_INLINE uint32_t __get_FAULTMASK(void) +{ + register uint32_t __regFaultMask __ASM("faultmask"); + return(__regFaultMask); +} -#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + +/** + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask) +{ + register uint32_t __regFaultMask __ASM("faultmask"); + __regFaultMask = (faultMask & (uint32_t)1U); +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ + /** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value */ -__attribute__((always_inline)) __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat) +__STATIC_INLINE uint32_t __get_FPSCR(void) { - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return max; - } - else if (val < min) - { - return min; - } - } - return val; +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) + register uint32_t __regfpscr __ASM("fpscr"); + return(__regfpscr); +#else + return(0U); +#endif } + /** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set */ -__attribute__((always_inline)) __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat) +__STATIC_INLINE void __set_FPSCR(uint32_t fpscr) { - if (sat <= 31U) - { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return max; - } - else if (val < 0) - { - return 0U; - } - } - return (uint32_t)val; +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) + register uint32_t __regfpscr __ASM("fpscr"); + __regfpscr = (fpscr); +#else + (void)fpscr; +#endif } -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ -/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ +/*@} end of CMSIS_Core_RegAccFunctions */ /* ################### Compiler specific Intrinsics ########################### */ @@ -878,6 +879,8 @@ __attribute__((always_inline)) __STATIC_INLINE uint32_t __USAT(int32_t val, uint #define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) +#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) + #endif /* ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ /*@} end of group CMSIS_SIMD_intrinsics */ diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cmsis_armclang.h b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cmsis_armclang.h index 90de9dbf8f8..69114177477 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cmsis_armclang.h +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cmsis_armclang.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_armclang.h * @brief CMSIS compiler armclang (Arm Compiler 6) header file - * @version V5.3.1 - * @date 26. March 2020 + * @version V5.4.3 + * @date 27. May 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -29,10 +29,6 @@ #pragma clang system_header /* treat file as system include file */ -#ifndef __ARM_COMPAT_H -#include /* Compatibility header for Arm Compiler 5 intrinsics */ -#endif - /* CMSIS compiler specific defines */ #ifndef __ASM #define __ASM __asm @@ -136,451 +132,438 @@ #define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET"))) #endif -/* ########################### Core Function Access ########################### */ -/** \ingroup CMSIS_Core_FunctionInterface - \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +#ifndef __STACK_SEAL +#define __STACK_SEAL Image$$STACKSEAL$$ZI$$Base +#endif + +#ifndef __TZ_STACK_SEAL_SIZE +#define __TZ_STACK_SEAL_SIZE 8U +#endif + +#ifndef __TZ_STACK_SEAL_VALUE +#define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL +#endif + + +__STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { + *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; +} +#endif + + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions @{ - */ +*/ + +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_RW_REG(r) "+l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_RW_REG(r) "+r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif /** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing the I-bit in the CPSR. - Can only be executed in Privileged modes. + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. */ -/* intrinsic void __enable_irq(); see arm_compat.h */ - +#define __NOP __builtin_arm_nop /** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting the I-bit in the CPSR. - Can only be executed in Privileged modes. + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. */ -/* intrinsic void __disable_irq(); see arm_compat.h */ +#define __WFI __builtin_arm_wfi /** - \brief Get Control Register - \details Returns the content of the Control Register. - \return Control Register value + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. */ -__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, control" : "=r" (result) ); - return(result); -} +#define __WFE __builtin_arm_wfe -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Control Register (non-secure) - \details Returns the content of the non-secure Control Register when in secure mode. - \return non-secure Control Register value + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. */ -__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) -{ - uint32_t result; +#define __SEV __builtin_arm_sev - __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); - return(result); -} -#endif +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +#define __ISB() __builtin_arm_isb(0xF) /** - \brief Set Control Register - \details Writes the given value to the Control Register. - \param [in] control Control Register value to set + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. */ -__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) -{ - __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); -} +#define __DSB() __builtin_arm_dsb(0xF) -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Control Register (non-secure) - \details Writes the given value to the non-secure Control Register when in secure state. - \param [in] control Control Register value to set + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. */ -__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) -{ - __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); -} -#endif +#define __DMB() __builtin_arm_dmb(0xF) /** - \brief Get IPSR Register - \details Returns the content of the IPSR Register. - \return IPSR Register value + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __get_IPSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); - return(result); -} +#define __REV(value) __builtin_bswap32(value) /** - \brief Get APSR Register - \details Returns the content of the APSR Register. - \return APSR Register value + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __get_APSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, apsr" : "=r" (result) ); - return(result); -} +#define __REV16(value) __ROR(__REV(value), 16) /** - \brief Get xPSR Register - \details Returns the content of the xPSR Register. - \return xPSR Register value + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __get_xPSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); - return(result); -} +#define __REVSH(value) (int16_t)__builtin_bswap16(value) /** - \brief Get Process Stack Pointer - \details Returns the current value of the Process Stack Pointer (PSP). - \return PSP Register value + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value */ -__STATIC_FORCEINLINE uint32_t __get_PSP(void) +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) { - uint32_t result; - - __ASM volatile ("MRS %0, psp" : "=r" (result) ); - return(result); + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Process Stack Pointer (non-secure) - \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. - \return PSP Register value + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) -{ - uint32_t result; +#define __BKPT(value) __ASM volatile ("bkpt "#value) - __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); - return(result); -} -#endif +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +#define __RBIT __builtin_arm_rbit /** - \brief Set Process Stack Pointer - \details Assigns the given value to the Process Stack Pointer (PSP). - \param [in] topOfProcStack Process Stack Pointer value to set + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value */ -__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) +__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) { - __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees ARM-compatible results if happening to compile on a non-ARM + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) + /** - \brief Set Process Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. - \param [in] topOfProcStack Process Stack Pointer value to set + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) -{ - __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); -} -#endif +#define __LDREXB (uint8_t)__builtin_arm_ldrex /** - \brief Get Main Stack Pointer - \details Returns the current value of the Main Stack Pointer (MSP). - \return MSP Register value + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_MSP(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, msp" : "=r" (result) ); - return(result); -} +#define __LDREXH (uint16_t)__builtin_arm_ldrex -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Main Stack Pointer (non-secure) - \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. - \return MSP Register value + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) -{ - uint32_t result; +#define __LDREXW (uint32_t)__builtin_arm_ldrex - __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); - return(result); -} -#endif + +/** + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STREXB (uint32_t)__builtin_arm_strex /** - \brief Set Main Stack Pointer - \details Assigns the given value to the Main Stack Pointer (MSP). - \param [in] topOfMainStack Main Stack Pointer value to set + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) -{ - __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); -} +#define __STREXH (uint32_t)__builtin_arm_strex -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Main Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. - \param [in] topOfMainStack Main Stack Pointer value to set + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) -{ - __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); -} -#endif +#define __STREXW (uint32_t)__builtin_arm_strex -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Stack Pointer (non-secure) - \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. - \return SP Register value + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. */ -__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) -{ - uint32_t result; +#define __CLREX __builtin_arm_clrex + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ - __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); - return(result); -} +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) /** - \brief Set Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. - \param [in] topOfStack Stack Pointer value to set + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value */ -__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) -{ - __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); -} -#endif +#define __SSAT __builtin_arm_ssat /** - \brief Get Priority Mask - \details Returns the current state of the priority mask bit from the Priority Mask Register. - \return Priority Mask value + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value */ -__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, primask" : "=r" (result) ); - return(result); -} +#define __USAT __builtin_arm_usat -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Priority Mask (non-secure) - \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. - \return Priority Mask value + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) { uint32_t result; - __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); return(result); } -#endif /** - \brief Set Priority Mask - \details Assigns the given value to the Priority Mask Register. - \param [in] priMask Priority Mask + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) { - __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); -} - + uint32_t result; -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Set Priority Mask (non-secure) - \details Assigns the given value to the non-secure Priority Mask Register when in secure state. - \param [in] priMask Priority Mask - */ -__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) -{ - __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t) result); /* Add explicit type cast here */ } -#endif - - -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) -/** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing the F-bit in the CPSR. - Can only be executed in Privileged modes. - */ -#define __enable_fault_irq __enable_fiq /* see arm_compat.h */ - - -/** - \brief Disable FIQ - \details Disables FIQ interrupts by setting the F-bit in the CPSR. - Can only be executed in Privileged modes. - */ -#define __disable_fault_irq __disable_fiq /* see arm_compat.h */ /** - \brief Get Base Priority - \details Returns the current value of the Base Priority register. - \return Base Priority register value + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) { uint32_t result; - __ASM volatile ("MRS %0, basepri" : "=r" (result) ); - return(result); + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t) result); /* Add explicit type cast here */ } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Base Priority (non-secure) - \details Returns the current value of the non-secure Base Priority register when in secure state. - \return Base Priority register value + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) { uint32_t result; - __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); return(result); } -#endif /** - \brief Set Base Priority - \details Assigns the given value to the Base Priority register. - \param [in] basePri Base Priority value to set + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) { - __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Base Priority (non-secure) - \details Assigns the given value to the non-secure Base Priority register when in secure state. - \param [in] basePri Base Priority value to set + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) { - __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); } -#endif /** - \brief Set Base Priority with condition - \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, - or the new value increases the BASEPRI priority level. - \param [in] basePri Base Priority value to set + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) { - __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); } +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ /** - \brief Get Fault Mask - \details Returns the current value of the Fault Mask register. - \return Fault Mask register value + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value */ -__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) { - uint32_t result; - - __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); - return(result); + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; } - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Get Fault Mask (non-secure) - \details Returns the current value of the non-secure Fault Mask register when in secure state. - \return Fault Mask register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); - return(result); -} -#endif - - -/** - \brief Set Fault Mask - \details Assigns the given value to the Fault Mask register. - \param [in] faultMask Fault Mask value to set - */ -__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) -{ - __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); -} - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Fault Mask (non-secure) - \details Assigns the given value to the non-secure Fault Mask register when in secure state. - \param [in] faultMask Fault Mask value to set + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value */ -__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) { - __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; } -#endif #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ @@ -593,631 +576,615 @@ __STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) /** - \brief Get Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - - \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). - \return PSPLIM Register value + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) { -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - return 0U; -#else uint32_t result; - __ASM volatile ("MRS %0, psplim" : "=r" (result) ); - return result; -#endif + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t) result); } -#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Get Process Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \return PSPLIM Register value +/** + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) { -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) - // without main extensions, the non-secure PSPLIM is RAZ/WI - return 0U; -#else uint32_t result; - __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); - return result; -#endif + + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t) result); } -#endif /** - \brief Set Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - - \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) { -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); -#endif + uint32_t result; + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return(result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Process Stack Pointer (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - - \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) { -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) - // without main extensions, the non-secure PSPLIM is RAZ/WI - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); -#endif + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } -#endif /** - \brief Get Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). - \return MSPLIM Register value + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) { -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim" : "=r" (result) ); - return result; -#endif + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. - \return MSPLIM Register value + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) { -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) - // without main extensions, the non-secure MSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); - return result; -#endif + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } -#endif /** - \brief Set Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDAEXB (uint8_t)__builtin_arm_ldaex - \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). - \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + +/** + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) -{ -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); -#endif -} +#define __LDAEXH (uint16_t)__builtin_arm_ldaex -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDAEX (uint32_t)__builtin_arm_ldaex - \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. - \param [in] MainStackPtrLimit Main Stack Pointer value to set + +/** + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) -{ -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) - // without main extensions, the non-secure MSPLIM is RAZ/WI - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); -#endif -} -#endif +#define __STLEXB (uint32_t)__builtin_arm_stlex -#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ /** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) -#define __get_FPSCR (uint32_t)__builtin_arm_get_fpscr -#else -#define __get_FPSCR() ((uint32_t)0U) -#endif +#define __STLEXH (uint32_t)__builtin_arm_stlex + /** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) -#define __set_FPSCR __builtin_arm_set_fpscr -#else -#define __set_FPSCR(x) ((void)(x)) -#endif +#define __STLEX (uint32_t)__builtin_arm_stlex +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ -/*@} end of CMSIS_Core_RegAccFunctions */ +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ -/* ########################## Core Instruction Access ######################### */ -/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface - Access to dedicated instructions +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions @{ -*/ + */ -/* Define macros for porting to both thumb1 and thumb2. - * For thumb1, use low register (r0-r7), specified by constraint "l" - * Otherwise, use general registers, specified by constraint "r" */ -#if defined (__thumb__) && !defined (__thumb2__) -#define __CMSIS_GCC_OUT_REG(r) "=l" (r) -#define __CMSIS_GCC_RW_REG(r) "+l" (r) -#define __CMSIS_GCC_USE_REG(r) "l" (r) -#else -#define __CMSIS_GCC_OUT_REG(r) "=r" (r) -#define __CMSIS_GCC_RW_REG(r) "+r" (r) -#define __CMSIS_GCC_USE_REG(r) "r" (r) +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +#ifndef __ARM_COMPAT_H +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("cpsie i" : : : "memory"); +} +#endif + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +#ifndef __ARM_COMPAT_H +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("cpsid i" : : : "memory"); +} #endif + /** - \brief No Operation - \details No Operation does nothing. This instruction can be used for code alignment purposes. + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value */ -#define __NOP __builtin_arm_nop +__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control" : "=r" (result) ); + return(result); +} + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Wait For Interrupt - \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value */ -#define __WFI __builtin_arm_wfi +__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return(result); +} +#endif /** - \brief Wait For Event - \details Wait For Event is a hint instruction that permits the processor to enter - a low-power state until one of a number of events occurs. + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set */ -#define __WFE __builtin_arm_wfe +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +{ + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); + __ISB(); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Send Event - \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set */ -#define __SEV __builtin_arm_sev +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +{ + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); + __ISB(); +} +#endif /** - \brief Instruction Synchronization Barrier - \details Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or memory, - after the instruction has been completed. + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value */ -#define __ISB() __builtin_arm_isb(0xF) +__STATIC_FORCEINLINE uint32_t __get_IPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); + return(result); +} + /** - \brief Data Synchronization Barrier - \details Acts as a special kind of Data Memory Barrier. - It completes when all explicit memory accesses before this instruction complete. + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value */ -#define __DSB() __builtin_arm_dsb(0xF) +__STATIC_FORCEINLINE uint32_t __get_APSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); + return(result); +} /** - \brief Data Memory Barrier - \details Ensures the apparent order of the explicit memory operations before - and after the instruction, without ensuring their completion. + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value */ -#define __DMB() __builtin_arm_dmb(0xF) +__STATIC_FORCEINLINE uint32_t __get_xPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); + return(result); +} /** - \brief Reverse byte order (32 bit) - \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. - \param [in] value Value to reverse - \return Reversed value + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value */ -#define __REV(value) __builtin_bswap32(value) +__STATIC_FORCEINLINE uint32_t __get_PSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp" : "=r" (result) ); + return(result); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Reverse byte order (16 bit) - \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. - \param [in] value Value to reverse - \return Reversed value + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value */ -#define __REV16(value) __ROR(__REV(value), 16) +__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return(result); +} +#endif /** - \brief Reverse byte order (16 bit) - \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. - \param [in] value Value to reverse - \return Reversed value + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set */ -#define __REVSH(value) (int16_t)__builtin_bswap16(value) +__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set */ -__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) { - op2 %= 32U; - if (op2 == 0U) - { - return op1; - } - return (op1 >> op2) | (op1 << (32U - op2)); + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); } +#endif /** - \brief Breakpoint - \details Causes the processor to enter Debug state. - Debug tools can use this to investigate system state when the instruction at a particular address is reached. - \param [in] value is ignored by the processor. - If required, a debugger can use it to store additional information about the breakpoint. + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) +__STATIC_FORCEINLINE uint32_t __get_MSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp" : "=r" (result) ); + return(result); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Reverse bit order of value - \details Reverses the bit order of the given value. - \param [in] value Value to reverse - \return Reversed value + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value */ -#define __RBIT __builtin_arm_rbit +__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return(result); +} +#endif + /** - \brief Count leading zeros - \details Counts the number of leading zeros of a data value. - \param [in] value Value to count the leading zeros - \return number of leading zeros in value + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set */ -__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) { - /* Even though __builtin_clz produces a CLZ instruction on ARM, formally - __builtin_clz(0) is undefined behaviour, so handle this case specially. - This guarantees ARM-compatible results if happening to compile on a non-ARM - target, and ensures the compiler doesn't decide to activate any - optimisations using the logic "value was passed to __builtin_clz, so it - is non-zero". - ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a - single CLZ instruction. - */ - if (value == 0U) - { - return 32U; - } - return __builtin_clz(value); + __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); } -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) - +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set */ -#define __LDREXB (uint8_t)__builtin_arm_ldrex +__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); +} +#endif +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value */ -#define __LDREXH (uint16_t)__builtin_arm_ldrex +__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return(result); +} /** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set */ -#define __LDREXW (uint32_t)__builtin_arm_ldrex +__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) +{ + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); +} +#endif /** - \brief STR Exclusive (8 bit) - \details Executes a exclusive STR instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value */ -#define __STREXB (uint32_t)__builtin_arm_strex +__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask" : "=r" (result) ); + return(result); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value */ -#define __STREXH (uint32_t)__builtin_arm_strex +__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + return(result); +} +#endif /** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask */ -#define __STREXW (uint32_t)__builtin_arm_strex +__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +{ + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask */ -#define __CLREX __builtin_arm_clrex - -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ +__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) +{ + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); +} +#endif #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) - /** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. */ -#define __SSAT __builtin_arm_ssat +__STATIC_FORCEINLINE void __enable_fault_irq(void) +{ + __ASM volatile ("cpsie f" : : : "memory"); +} /** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. */ -#define __USAT __builtin_arm_usat +__STATIC_FORCEINLINE void __disable_fault_irq(void) +{ + __ASM volatile ("cpsid f" : : : "memory"); +} /** - \brief Rotate Right with Extend (32 bit) - \details Moves each bit of a bitstring right by one bit. - The carry input is shifted in at the left end of the bitstring. - \param [in] value Value to rotate - \return Rotated value + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value */ -__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) { uint32_t result; - __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); return(result); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDRT Unprivileged (8 bit) - \details Executes a Unprivileged LDRT instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value */ -__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) { uint32_t result; - __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint8_t) result); /* Add explicit type cast here */ + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return(result); } +#endif /** - \brief LDRT Unprivileged (16 bit) - \details Executes a Unprivileged LDRT instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set */ -__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) { - uint32_t result; - - __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint16_t) result); /* Add explicit type cast here */ + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDRT Unprivileged (32 bit) - \details Executes a Unprivileged LDRT instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set */ -__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) { - uint32_t result; - - __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return(result); + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); } +#endif /** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set */ -__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) +__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) { - __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); } /** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value */ -__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) { - __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); + uint32_t result; + + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + return(result); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value */ -__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) { - __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return(result); } +#endif -#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ /** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set */ -__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) { - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return max; - } - else if (val < min) - { - return min; - } - } - return val; + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); } + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) -{ - if (sat <= 31U) - { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return max; - } - else if (val < 0) - { - return 0U; - } - } - return (uint32_t)val; + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); } +#endif #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ @@ -1228,152 +1195,219 @@ __STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) - + /** - \brief Load-Acquire (8 bit) - \details Executes a LDAB instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value */ -__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) { +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else uint32_t result; - - __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint8_t) result); + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return result; +#endif } - +#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Load-Acquire (16 bit) - \details Executes a LDAH instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value */ -__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) { +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else uint32_t result; - - __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint16_t) result); + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return result; +#endif } +#endif /** - \brief Load-Acquire (32 bit) - \details Executes a LDA instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set */ -__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) { - uint32_t result; - - __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return(result); +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); +#endif } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Store-Release (8 bit) - \details Executes a STLB instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set */ -__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) { - __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); +#endif } +#endif /** - \brief Store-Release (16 bit) - \details Executes a STLH instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value */ -__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) { - __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return result; +#endif } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Store-Release (32 bit) - \details Executes a STL instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value */ -__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) { - __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return result; +#endif } +#endif /** - \brief Load-Acquire Exclusive (8 bit) - \details Executes a LDAB exclusive instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -#define __LDAEXB (uint8_t)__builtin_arm_ldaex - + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. -/** - \brief Load-Acquire Exclusive (16 bit) - \details Executes a LDAH exclusive instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set */ -#define __LDAEXH (uint16_t)__builtin_arm_ldaex +__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) +{ +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); +#endif +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Load-Acquire Exclusive (32 bit) - \details Executes a LDA exclusive instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -#define __LDAEX (uint32_t)__builtin_arm_ldaex - + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. -/** - \brief Store-Release Exclusive (8 bit) - \details Executes a STLB exclusive instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set */ -#define __STLEXB (uint32_t)__builtin_arm_stlex +__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) +{ +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); +#endif +} +#endif +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ /** - \brief Store-Release Exclusive (16 bit) - \details Executes a STLH exclusive instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value */ -#define __STLEXH (uint32_t)__builtin_arm_stlex - +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __get_FPSCR (uint32_t)__builtin_arm_get_fpscr +#else +#define __get_FPSCR() ((uint32_t)0U) +#endif /** - \brief Store-Release Exclusive (32 bit) - \details Executes a STL exclusive instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set */ -#define __STLEX (uint32_t)__builtin_arm_stlex +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __set_FPSCR __builtin_arm_set_fpscr +#else +#define __set_FPSCR(x) ((void)(x)) +#endif -#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ -/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ +/*@} end of CMSIS_Core_RegAccFunctions */ /* ################### Compiler specific Intrinsics ########################### */ @@ -1452,6 +1486,8 @@ __STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) #define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) +#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) + __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) { int32_t result; diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cmsis_armclang_ltm.h b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cmsis_armclang_ltm.h index 0e5c7349d3e..1e255d5907f 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cmsis_armclang_ltm.h +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cmsis_armclang_ltm.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_armclang_ltm.h * @brief CMSIS compiler armclang (Arm Compiler 6) header file - * @version V1.3.0 - * @date 26. March 2020 + * @version V1.5.3 + * @date 27. May 2021 ******************************************************************************/ /* - * Copyright (c) 2018-2020 Arm Limited. All rights reserved. + * Copyright (c) 2018-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -29,10 +29,6 @@ #pragma clang system_header /* treat file as system include file */ -#ifndef __ARM_COMPAT_H -#include /* Compatibility header for Arm Compiler 5 intrinsics */ -#endif - /* CMSIS compiler specific defines */ #ifndef __ASM #define __ASM __asm @@ -136,1070 +132,1047 @@ #define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET"))) #endif +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +#ifndef __STACK_SEAL +#define __STACK_SEAL Image$$STACKSEAL$$ZI$$Base +#endif -/* ########################### Core Function Access ########################### */ -/** \ingroup CMSIS_Core_FunctionInterface - \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions +#ifndef __TZ_STACK_SEAL_SIZE +#define __TZ_STACK_SEAL_SIZE 8U +#endif + +#ifndef __TZ_STACK_SEAL_VALUE +#define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL +#endif + + +__STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { + *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; +} +#endif + + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions @{ +*/ + +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif + +/** + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. */ +#define __NOP __builtin_arm_nop /** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing the I-bit in the CPSR. - Can only be executed in Privileged modes. + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. */ -/* intrinsic void __enable_irq(); see arm_compat.h */ +#define __WFI __builtin_arm_wfi /** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting the I-bit in the CPSR. - Can only be executed in Privileged modes. + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. */ -/* intrinsic void __disable_irq(); see arm_compat.h */ +#define __WFE __builtin_arm_wfe /** - \brief Get Control Register - \details Returns the content of the Control Register. - \return Control Register value + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. */ -__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, control" : "=r" (result) ); - return(result); -} +#define __SEV __builtin_arm_sev -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Control Register (non-secure) - \details Returns the content of the non-secure Control Register when in secure mode. - \return non-secure Control Register value + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. */ -__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) -{ - uint32_t result; +#define __ISB() __builtin_arm_isb(0xF) - __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); - return(result); -} -#endif +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +#define __DSB() __builtin_arm_dsb(0xF) /** - \brief Set Control Register - \details Writes the given value to the Control Register. - \param [in] control Control Register value to set + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. */ -__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) -{ - __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); -} +#define __DMB() __builtin_arm_dmb(0xF) -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Control Register (non-secure) - \details Writes the given value to the non-secure Control Register when in secure state. - \param [in] control Control Register value to set + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) -{ - __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); -} -#endif +#define __REV(value) __builtin_bswap32(value) /** - \brief Get IPSR Register - \details Returns the content of the IPSR Register. - \return IPSR Register value + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __get_IPSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); - return(result); -} +#define __REV16(value) __ROR(__REV(value), 16) /** - \brief Get APSR Register - \details Returns the content of the APSR Register. - \return APSR Register value + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __get_APSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, apsr" : "=r" (result) ); - return(result); -} +#define __REVSH(value) (int16_t)__builtin_bswap16(value) /** - \brief Get xPSR Register - \details Returns the content of the xPSR Register. - \return xPSR Register value + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value */ -__STATIC_FORCEINLINE uint32_t __get_xPSR(void) +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) { - uint32_t result; - - __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); - return(result); + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); } /** - \brief Get Process Stack Pointer - \details Returns the current value of the Process Stack Pointer (PSP). - \return PSP Register value + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. */ -__STATIC_FORCEINLINE uint32_t __get_PSP(void) -{ - uint32_t result; +#define __BKPT(value) __ASM volatile ("bkpt "#value) - __ASM volatile ("MRS %0, psp" : "=r" (result) ); - return(result); -} +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +#define __RBIT __builtin_arm_rbit -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Process Stack Pointer (non-secure) - \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. - \return PSP Register value + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) +__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) { - uint32_t result; - - __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); - return(result); + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees ARM-compatible results if happening to compile on a non-ARM + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); } -#endif +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) /** - \brief Set Process Stack Pointer - \details Assigns the given value to the Process Stack Pointer (PSP). - \param [in] topOfProcStack Process Stack Pointer value to set + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) -{ - __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); -} +#define __LDREXB (uint8_t)__builtin_arm_ldrex -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Process Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. - \param [in] topOfProcStack Process Stack Pointer value to set + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) -{ - __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); -} -#endif +#define __LDREXH (uint16_t)__builtin_arm_ldrex /** - \brief Get Main Stack Pointer - \details Returns the current value of the Main Stack Pointer (MSP). - \return MSP Register value + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_MSP(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, msp" : "=r" (result) ); - return(result); -} +#define __LDREXW (uint32_t)__builtin_arm_ldrex -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Main Stack Pointer (non-secure) - \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. - \return MSP Register value + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); - return(result); -} -#endif +#define __STREXB (uint32_t)__builtin_arm_strex /** - \brief Set Main Stack Pointer - \details Assigns the given value to the Main Stack Pointer (MSP). - \param [in] topOfMainStack Main Stack Pointer value to set + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) -{ - __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); -} +#define __STREXH (uint32_t)__builtin_arm_strex -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Main Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. - \param [in] topOfMainStack Main Stack Pointer value to set + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) -{ - __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); -} -#endif +#define __STREXW (uint32_t)__builtin_arm_strex -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Stack Pointer (non-secure) - \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. - \return SP Register value + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. */ -__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) -{ - uint32_t result; +#define __CLREX __builtin_arm_clrex + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ - __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); - return(result); -} +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) /** - \brief Set Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. - \param [in] topOfStack Stack Pointer value to set + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value */ -__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) -{ - __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); -} -#endif +#define __SSAT __builtin_arm_ssat /** - \brief Get Priority Mask - \details Returns the current state of the priority mask bit from the Priority Mask Register. - \return Priority Mask value + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value */ -__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, primask" : "=r" (result) ); - return(result); -} +#define __USAT __builtin_arm_usat -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Priority Mask (non-secure) - \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. - \return Priority Mask value + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) { uint32_t result; - __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); return(result); } -#endif /** - \brief Set Priority Mask - \details Assigns the given value to the Priority Mask Register. - \param [in] priMask Priority Mask + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) { - __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); -} - + uint32_t result; -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Set Priority Mask (non-secure) - \details Assigns the given value to the non-secure Priority Mask Register when in secure state. - \param [in] priMask Priority Mask - */ -__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) -{ - __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t) result); /* Add explicit type cast here */ } -#endif - - -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) -/** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing the F-bit in the CPSR. - Can only be executed in Privileged modes. - */ -#define __enable_fault_irq __enable_fiq /* see arm_compat.h */ - - -/** - \brief Disable FIQ - \details Disables FIQ interrupts by setting the F-bit in the CPSR. - Can only be executed in Privileged modes. - */ -#define __disable_fault_irq __disable_fiq /* see arm_compat.h */ /** - \brief Get Base Priority - \details Returns the current value of the Base Priority register. - \return Base Priority register value + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) { uint32_t result; - __ASM volatile ("MRS %0, basepri" : "=r" (result) ); - return(result); + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t) result); /* Add explicit type cast here */ } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Base Priority (non-secure) - \details Returns the current value of the non-secure Base Priority register when in secure state. - \return Base Priority register value + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) { uint32_t result; - __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); return(result); } -#endif /** - \brief Set Base Priority - \details Assigns the given value to the Base Priority register. - \param [in] basePri Base Priority value to set + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) { - __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Base Priority (non-secure) - \details Assigns the given value to the non-secure Base Priority register when in secure state. - \param [in] basePri Base Priority value to set + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) { - __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); } -#endif /** - \brief Set Base Priority with condition - \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, - or the new value increases the BASEPRI priority level. - \param [in] basePri Base Priority value to set + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) { - __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); } +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ /** - \brief Get Fault Mask - \details Returns the current value of the Fault Mask register. - \return Fault Mask register value + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value */ -__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) { - uint32_t result; - - __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); - return(result); + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; } - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Fault Mask (non-secure) - \details Returns the current value of the non-secure Fault Mask register when in secure state. - \return Fault Mask register value + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value */ -__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) { - uint32_t result; - - __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); - return(result); + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; } -#endif + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) /** - \brief Set Fault Mask - \details Assigns the given value to the Fault Mask register. - \param [in] faultMask Fault Mask value to set + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) { - __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); + uint32_t result; + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t) result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Fault Mask (non-secure) - \details Assigns the given value to the non-secure Fault Mask register when in secure state. - \param [in] faultMask Fault Mask value to set + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) { - __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); -} -#endif - -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + uint32_t result; + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t) result); +} -#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) /** - \brief Get Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - - \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). - \return PSPLIM Register value + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - return 0U; -#else uint32_t result; - __ASM volatile ("MRS %0, psplim" : "=r" (result) ); - return result; -#endif + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return(result); } -#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Get Process Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \return PSPLIM Register value +/** + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); - return result; -#endif + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } -#endif /** - \brief Set Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - - \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); -#endif + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Process Stack Pointer (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - - \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); -#endif + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } -#endif /** - \brief Get Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). - \return MSPLIM Register value + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) -{ -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim" : "=r" (result) ); - return result; -#endif -} +#define __LDAEXB (uint8_t)__builtin_arm_ldaex -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. - \return MSPLIM Register value + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) -{ -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); - return result; -#endif -} -#endif +#define __LDAEXH (uint16_t)__builtin_arm_ldaex /** - \brief Set Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDAEX (uint32_t)__builtin_arm_ldaex - \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). - \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + +/** + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) -{ -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); -#endif -} +#define __STLEXB (uint32_t)__builtin_arm_stlex -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXH (uint32_t)__builtin_arm_stlex - \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. - \param [in] MainStackPtrLimit Main Stack Pointer value to set + +/** + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) -{ -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); -#endif -} -#endif +#define __STLEX (uint32_t)__builtin_arm_stlex #endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ + + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + /** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. */ -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) -#define __get_FPSCR (uint32_t)__builtin_arm_get_fpscr -#else -#define __get_FPSCR() ((uint32_t)0U) +#ifndef __ARM_COMPAT_H +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("cpsie i" : : : "memory"); +} #endif + /** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. */ -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) -#define __set_FPSCR __builtin_arm_set_fpscr -#else -#define __set_FPSCR(x) ((void)(x)) +#ifndef __ARM_COMPAT_H +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("cpsid i" : : : "memory"); +} #endif -/*@} end of CMSIS_Core_RegAccFunctions */ - +/** + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value + */ +__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) +{ + uint32_t result; -/* ########################## Core Instruction Access ######################### */ -/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface - Access to dedicated instructions - @{ -*/ + __ASM volatile ("MRS %0, control" : "=r" (result) ); + return(result); +} -/* Define macros for porting to both thumb1 and thumb2. - * For thumb1, use low register (r0-r7), specified by constraint "l" - * Otherwise, use general registers, specified by constraint "r" */ -#if defined (__thumb__) && !defined (__thumb2__) -#define __CMSIS_GCC_OUT_REG(r) "=l" (r) -#define __CMSIS_GCC_USE_REG(r) "l" (r) -#else -#define __CMSIS_GCC_OUT_REG(r) "=r" (r) -#define __CMSIS_GCC_USE_REG(r) "r" (r) -#endif +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief No Operation - \details No Operation does nothing. This instruction can be used for code alignment purposes. + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value */ -#define __NOP __builtin_arm_nop +__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) +{ + uint32_t result; -/** - \brief Wait For Interrupt - \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. - */ -#define __WFI __builtin_arm_wfi + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return(result); +} +#endif /** - \brief Wait For Event - \details Wait For Event is a hint instruction that permits the processor to enter - a low-power state until one of a number of events occurs. + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set */ -#define __WFE __builtin_arm_wfe +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +{ + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); + __ISB(); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Send Event - \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set */ -#define __SEV __builtin_arm_sev +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +{ + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); + __ISB(); +} +#endif /** - \brief Instruction Synchronization Barrier - \details Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or memory, - after the instruction has been completed. + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value */ -#define __ISB() __builtin_arm_isb(0xF) +__STATIC_FORCEINLINE uint32_t __get_IPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); + return(result); +} + /** - \brief Data Synchronization Barrier - \details Acts as a special kind of Data Memory Barrier. - It completes when all explicit memory accesses before this instruction complete. + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value */ -#define __DSB() __builtin_arm_dsb(0xF) +__STATIC_FORCEINLINE uint32_t __get_APSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); + return(result); +} /** - \brief Data Memory Barrier - \details Ensures the apparent order of the explicit memory operations before - and after the instruction, without ensuring their completion. + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value */ -#define __DMB() __builtin_arm_dmb(0xF) +__STATIC_FORCEINLINE uint32_t __get_xPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); + return(result); +} /** - \brief Reverse byte order (32 bit) - \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. - \param [in] value Value to reverse - \return Reversed value + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value */ -#define __REV(value) __builtin_bswap32(value) +__STATIC_FORCEINLINE uint32_t __get_PSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp" : "=r" (result) ); + return(result); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Reverse byte order (16 bit) - \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. - \param [in] value Value to reverse - \return Reversed value + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value */ -#define __REV16(value) __ROR(__REV(value), 16) +__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return(result); +} +#endif /** - \brief Reverse byte order (16 bit) - \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. - \param [in] value Value to reverse - \return Reversed value + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set */ -#define __REVSH(value) (int16_t)__builtin_bswap16(value) +__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set */ -__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) { - op2 %= 32U; - if (op2 == 0U) - { - return op1; - } - return (op1 >> op2) | (op1 << (32U - op2)); + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); } +#endif /** - \brief Breakpoint - \details Causes the processor to enter Debug state. - Debug tools can use this to investigate system state when the instruction at a particular address is reached. - \param [in] value is ignored by the processor. - If required, a debugger can use it to store additional information about the breakpoint. + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) +__STATIC_FORCEINLINE uint32_t __get_MSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp" : "=r" (result) ); + return(result); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Reverse bit order of value - \details Reverses the bit order of the given value. - \param [in] value Value to reverse - \return Reversed value + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value */ -#define __RBIT __builtin_arm_rbit +__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return(result); +} +#endif + /** - \brief Count leading zeros - \details Counts the number of leading zeros of a data value. - \param [in] value Value to count the leading zeros - \return number of leading zeros in value + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set */ -__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) { - /* Even though __builtin_clz produces a CLZ instruction on ARM, formally - __builtin_clz(0) is undefined behaviour, so handle this case specially. - This guarantees ARM-compatible results if happening to compile on a non-ARM - target, and ensures the compiler doesn't decide to activate any - optimisations using the logic "value was passed to __builtin_clz, so it - is non-zero". - ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a - single CLZ instruction. - */ - if (value == 0U) - { - return 32U; - } - return __builtin_clz(value); + __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); } -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set */ -#define __LDREXB (uint8_t)__builtin_arm_ldrex +__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); +} +#endif +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value */ -#define __LDREXH (uint16_t)__builtin_arm_ldrex +__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return(result); +} /** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set */ -#define __LDREXW (uint32_t)__builtin_arm_ldrex +__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) +{ + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); +} +#endif /** - \brief STR Exclusive (8 bit) - \details Executes a exclusive STR instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value */ -#define __STREXB (uint32_t)__builtin_arm_strex +__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask" : "=r" (result) ); + return(result); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value */ -#define __STREXH (uint32_t)__builtin_arm_strex +__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + return(result); +} +#endif /** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask */ -#define __STREXW (uint32_t)__builtin_arm_strex +__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +{ + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask */ -#define __CLREX __builtin_arm_clrex - -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ +__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) +{ + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); +} +#endif #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) - /** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. */ -#define __SSAT __builtin_arm_ssat +__STATIC_FORCEINLINE void __enable_fault_irq(void) +{ + __ASM volatile ("cpsie f" : : : "memory"); +} /** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. */ -#define __USAT __builtin_arm_usat +__STATIC_FORCEINLINE void __disable_fault_irq(void) +{ + __ASM volatile ("cpsid f" : : : "memory"); +} /** - \brief Rotate Right with Extend (32 bit) - \details Moves each bit of a bitstring right by one bit. - The carry input is shifted in at the left end of the bitstring. - \param [in] value Value to rotate - \return Rotated value + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value */ -__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) { uint32_t result; - __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); return(result); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDRT Unprivileged (8 bit) - \details Executes a Unprivileged LDRT instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value */ -__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) { uint32_t result; - __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint8_t) result); /* Add explicit type cast here */ + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return(result); } +#endif /** - \brief LDRT Unprivileged (16 bit) - \details Executes a Unprivileged LDRT instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set */ -__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) { - uint32_t result; - - __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint16_t) result); /* Add explicit type cast here */ + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDRT Unprivileged (32 bit) - \details Executes a Unprivileged LDRT instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set */ -__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) { - uint32_t result; - - __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return(result); + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); } +#endif /** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set */ -__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) +__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) { - __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); } /** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value */ -__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) { - __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); + uint32_t result; + + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + return(result); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value */ -__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) { - __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return(result); } +#endif -#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ /** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set */ -__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) { - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return max; - } - else if (val < min) - { - return min; - } - } - return val; + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); } + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set */ -__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) { - if (sat <= 31U) - { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return max; - } - else if (val < 0) - { - return 0U; - } - } - return (uint32_t)val; + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); } +#endif #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ @@ -1208,150 +1181,210 @@ __STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) + /** - \brief Load-Acquire (8 bit) - \details Executes a LDAB instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value */ -__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) { +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else uint32_t result; - - __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint8_t) result); + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return result; +#endif } - +#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Load-Acquire (16 bit) - \details Executes a LDAH instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value */ -__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) { +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else uint32_t result; - - __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint16_t) result); + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return result; +#endif } +#endif /** - \brief Load-Acquire (32 bit) - \details Executes a LDA instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set */ -__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) { - uint32_t result; - - __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return(result); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); +#endif } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Store-Release (8 bit) - \details Executes a STLB instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set */ -__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) { - __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); +#endif } +#endif /** - \brief Store-Release (16 bit) - \details Executes a STLH instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value */ -__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) { - __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return result; +#endif } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Store-Release (32 bit) - \details Executes a STL instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value */ -__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) { - __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return result; +#endif } +#endif /** - \brief Load-Acquire Exclusive (8 bit) - \details Executes a LDAB exclusive instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -#define __LDAEXB (uint8_t)__builtin_arm_ldaex - + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. -/** - \brief Load-Acquire Exclusive (16 bit) - \details Executes a LDAH exclusive instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set */ -#define __LDAEXH (uint16_t)__builtin_arm_ldaex +__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); +#endif +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Load-Acquire Exclusive (32 bit) - \details Executes a LDA exclusive instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -#define __LDAEX (uint32_t)__builtin_arm_ldaex - + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. -/** - \brief Store-Release Exclusive (8 bit) - \details Executes a STLB exclusive instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set */ -#define __STLEXB (uint32_t)__builtin_arm_stlex +__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); +#endif +} +#endif +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ /** - \brief Store-Release Exclusive (16 bit) - \details Executes a STLH exclusive instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value */ -#define __STLEXH (uint32_t)__builtin_arm_stlex - +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __get_FPSCR (uint32_t)__builtin_arm_get_fpscr +#else +#define __get_FPSCR() ((uint32_t)0U) +#endif /** - \brief Store-Release Exclusive (32 bit) - \details Executes a STL exclusive instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set */ -#define __STLEX (uint32_t)__builtin_arm_stlex +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __set_FPSCR __builtin_arm_set_fpscr +#else +#define __set_FPSCR(x) ((void)(x)) +#endif -#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ -/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ +/*@} end of CMSIS_Core_RegAccFunctions */ /* ################### Compiler specific Intrinsics ########################### */ @@ -1878,6 +1911,8 @@ __STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) #define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) +#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) + __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) { int32_t result; diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cmsis_gcc.h b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cmsis_gcc.h index a2778f58e8f..67bda4ef3c3 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cmsis_gcc.h +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cmsis_gcc.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_gcc.h * @brief CMSIS compiler GCC header file - * @version V5.3.0 - * @date 26. March 2020 + * @version V5.4.1 + * @date 27. May 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -46,9 +46,9 @@ #ifndef __STATIC_INLINE #define __STATIC_INLINE static inline #endif -#ifndef __STATIC_FORCEINLINE +#ifndef __STATIC_FORCEINLINE #define __STATIC_FORCEINLINE __attribute__((always_inline)) static inline -#endif +#endif #ifndef __NO_RETURN #define __NO_RETURN __attribute__((__noreturn__)) #endif @@ -126,23 +126,23 @@ \details This default implementations initialized all data and additional bss sections relying on .copy.table and .zero.table specified properly in the used linker script. - + */ __STATIC_FORCEINLINE __NO_RETURN void __cmsis_start(void) { extern void _start(void) __NO_RETURN; - + typedef struct { uint32_t const* src; uint32_t* dest; uint32_t wlen; } __copy_table_t; - + typedef struct { uint32_t* dest; uint32_t wlen; } __zero_table_t; - + extern const __copy_table_t __copy_table_start__; extern const __copy_table_t __copy_table_end__; extern const __zero_table_t __zero_table_start__; @@ -153,16 +153,16 @@ __STATIC_FORCEINLINE __NO_RETURN void __cmsis_start(void) pTable->dest[i] = pTable->src[i]; } } - + for (__zero_table_t const* pTable = &__zero_table_start__; pTable < &__zero_table_end__; ++pTable) { for(uint32_t i=0u; iwlen; ++i) { pTable->dest[i] = 0u; } } - + _start(); } - + #define __PROGRAM_START __cmsis_start #endif @@ -182,462 +182,569 @@ __STATIC_FORCEINLINE __NO_RETURN void __cmsis_start(void) #define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section(".vectors"))) #endif -/* ########################### Core Function Access ########################### */ -/** \ingroup CMSIS_Core_FunctionInterface - \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions - @{ - */ +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +#ifndef __STACK_SEAL +#define __STACK_SEAL __StackSeal +#endif -/** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing the I-bit in the CPSR. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __enable_irq(void) -{ - __ASM volatile ("cpsie i" : : : "memory"); -} +#ifndef __TZ_STACK_SEAL_SIZE +#define __TZ_STACK_SEAL_SIZE 8U +#endif + +#ifndef __TZ_STACK_SEAL_VALUE +#define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL +#endif -/** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting the I-bit in the CPSR. - Can only be executed in Privileged modes. - */ -__STATIC_FORCEINLINE void __disable_irq(void) -{ - __ASM volatile ("cpsid i" : : : "memory"); +__STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { + *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; } +#endif + + +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions + @{ +*/ +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_RW_REG(r) "+l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_RW_REG(r) "+r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif /** - \brief Get Control Register - \details Returns the content of the Control Register. - \return Control Register value + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. */ -__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) -{ - uint32_t result; +#define __NOP() __ASM volatile ("nop") - __ASM volatile ("MRS %0, control" : "=r" (result) ); - return(result); -} +/** + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + */ +#define __WFI() __ASM volatile ("wfi":::"memory") -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Control Register (non-secure) - \details Returns the content of the non-secure Control Register when in secure mode. - \return non-secure Control Register value + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. */ -__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); - return(result); -} -#endif +#define __WFE() __ASM volatile ("wfe":::"memory") /** - \brief Set Control Register - \details Writes the given value to the Control Register. - \param [in] control Control Register value to set + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. */ -__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) -{ - __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); -} +#define __SEV() __ASM volatile ("sev") -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Control Register (non-secure) - \details Writes the given value to the non-secure Control Register when in secure state. - \param [in] control Control Register value to set + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. */ -__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +__STATIC_FORCEINLINE void __ISB(void) { - __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); + __ASM volatile ("isb 0xF":::"memory"); } -#endif /** - \brief Get IPSR Register - \details Returns the content of the IPSR Register. - \return IPSR Register value + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. */ -__STATIC_FORCEINLINE uint32_t __get_IPSR(void) +__STATIC_FORCEINLINE void __DSB(void) { - uint32_t result; - - __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); - return(result); + __ASM volatile ("dsb 0xF":::"memory"); } /** - \brief Get APSR Register - \details Returns the content of the APSR Register. - \return APSR Register value + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. */ -__STATIC_FORCEINLINE uint32_t __get_APSR(void) +__STATIC_FORCEINLINE void __DMB(void) { - uint32_t result; - - __ASM volatile ("MRS %0, apsr" : "=r" (result) ); - return(result); + __ASM volatile ("dmb 0xF":::"memory"); } /** - \brief Get xPSR Register - \details Returns the content of the xPSR Register. - \return xPSR Register value + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __get_xPSR(void) +__STATIC_FORCEINLINE uint32_t __REV(uint32_t value) { +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) + return __builtin_bswap32(value); +#else uint32_t result; - __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); - return(result); + __ASM ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; +#endif } /** - \brief Get Process Stack Pointer - \details Returns the current value of the Process Stack Pointer (PSP). - \return PSP Register value + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __get_PSP(void) +__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) { uint32_t result; - __ASM volatile ("MRS %0, psp" : "=r" (result) ); - return(result); + __ASM ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Process Stack Pointer (non-secure) - \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. - \return PSP Register value + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) +__STATIC_FORCEINLINE int16_t __REVSH(int16_t value) { - uint32_t result; +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + return (int16_t)__builtin_bswap16(value); +#else + int16_t result; - __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); - return(result); -} + __ASM ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; #endif +} /** - \brief Set Process Stack Pointer - \details Assigns the given value to the Process Stack Pointer (PSP). - \param [in] topOfProcStack Process Stack Pointer value to set + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value */ -__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) { - __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Process Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. - \param [in] topOfProcStack Process Stack Pointer value to set + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. */ -__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) -{ - __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); -} -#endif +#define __BKPT(value) __ASM volatile ("bkpt "#value) /** - \brief Get Main Stack Pointer - \details Returns the current value of the Main Stack Pointer (MSP). - \return MSP Register value + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __get_MSP(void) +__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) { uint32_t result; - __ASM volatile ("MRS %0, msp" : "=r" (result) ); - return(result); +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) + __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) ); +#else + uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ + + result = value; /* r will be reversed bits of v; first get LSB of v */ + for (value >>= 1U; value != 0U; value >>= 1U) + { + result <<= 1U; + result |= value & 1U; + s--; + } + result <<= s; /* shift when v's highest bits are zero */ +#endif + return result; } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Main Stack Pointer (non-secure) - \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. - \return MSP Register value + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) +__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) { - uint32_t result; - - __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); - return(result); + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees ARM-compatible results if happening to compile on a non-ARM + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); } -#endif +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) /** - \brief Set Main Stack Pointer - \details Assigns the given value to the Main Stack Pointer (MSP). - \param [in] topOfMainStack Main Stack Pointer value to set + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) +__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) { - __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); -} - + uint32_t result; -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Set Main Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. - \param [in] topOfMainStack Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) -{ - __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); -} +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); #endif + return ((uint8_t) result); /* Add explicit type cast here */ +} -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Stack Pointer (non-secure) - \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. - \return SP Register value + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) +__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) { - uint32_t result; + uint32_t result; - __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); - return(result); +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); +#endif + return ((uint16_t) result); /* Add explicit type cast here */ } /** - \brief Set Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. - \param [in] topOfStack Stack Pointer value to set + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) +__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) { - __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); + uint32_t result; + + __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); + return(result); } -#endif /** - \brief Get Priority Mask - \details Returns the current state of the priority mask bit from the Priority Mask Register. - \return Priority Mask value + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) +__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) { - uint32_t result; + uint32_t result; - __ASM volatile ("MRS %0, primask" : "=r" (result) ); - return(result); + __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); + return(result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Priority Mask (non-secure) - \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. - \return Priority Mask value + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) +__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) { - uint32_t result; + uint32_t result; - __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); - return(result); + __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); + return(result); } -#endif /** - \brief Set Priority Mask - \details Assigns the given value to the Priority Mask Register. - \param [in] priMask Priority Mask + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) { - __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); + uint32_t result; + + __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); + return(result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Priority Mask (non-secure) - \details Assigns the given value to the non-secure Priority Mask Register when in secure state. - \param [in] priMask Priority Mask + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. */ -__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) +__STATIC_FORCEINLINE void __CLREX(void) { - __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); + __ASM volatile ("clrex" ::: "memory"); } -#endif + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) /** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing the F-bit in the CPSR. - Can only be executed in Privileged modes. + \brief Signed Saturate + \details Saturates a signed value. + \param [in] ARG1 Value to be saturated + \param [in] ARG2 Bit position to saturate to (1..32) + \return Saturated value */ -__STATIC_FORCEINLINE void __enable_fault_irq(void) -{ - __ASM volatile ("cpsie f" : : : "memory"); -} +#define __SSAT(ARG1, ARG2) \ +__extension__ \ +({ \ + int32_t __RES, __ARG1 = (ARG1); \ + __ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ + __RES; \ + }) /** - \brief Disable FIQ - \details Disables FIQ interrupts by setting the F-bit in the CPSR. - Can only be executed in Privileged modes. + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] ARG1 Value to be saturated + \param [in] ARG2 Bit position to saturate to (0..31) + \return Saturated value */ -__STATIC_FORCEINLINE void __disable_fault_irq(void) -{ - __ASM volatile ("cpsid f" : : : "memory"); -} +#define __USAT(ARG1, ARG2) \ +__extension__ \ +({ \ + uint32_t __RES, __ARG1 = (ARG1); \ + __ASM volatile ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ + __RES; \ + }) /** - \brief Get Base Priority - \details Returns the current value of the Base Priority register. - \return Base Priority register value + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value */ -__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) { uint32_t result; - __ASM volatile ("MRS %0, basepri" : "=r" (result) ); + __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); return(result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Base Priority (non-secure) - \details Returns the current value of the non-secure Base Priority register when in secure state. - \return Base Priority register value + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) { - uint32_t result; + uint32_t result; - __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); - return(result); -} +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); #endif + return ((uint8_t) result); /* Add explicit type cast here */ +} /** - \brief Set Base Priority - \details Assigns the given value to the Base Priority register. - \param [in] basePri Base Priority value to set + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) { - __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); +#endif + return ((uint16_t) result); /* Add explicit type cast here */ } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Base Priority (non-secure) - \details Assigns the given value to the non-secure Base Priority register when in secure state. - \param [in] basePri Base Priority value to set + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) { - __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); + uint32_t result; + + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return(result); } -#endif /** - \brief Set Base Priority with condition - \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, - or the new value increases the BASEPRI priority level. - \param [in] basePri Base Priority value to set + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) { - __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); } /** - \brief Get Fault Mask - \details Returns the current value of the Fault Mask register. - \return Fault Mask register value + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) { - uint32_t result; - - __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); - return(result); + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Fault Mask (non-secure) - \details Returns the current value of the non-secure Fault Mask register when in secure state. - \return Fault Mask register value + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) { - uint32_t result; - - __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); - return(result); + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); } -#endif +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ /** - \brief Set Fault Mask - \details Assigns the given value to the Fault Mask register. - \param [in] faultMask Fault Mask value to set + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value */ -__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) { - __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; } - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Fault Mask (non-secure) - \details Assigns the given value to the non-secure Fault Mask register when in secure state. - \param [in] faultMask Fault Mask value to set + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value */ -__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) { - __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; } -#endif #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ @@ -646,780 +753,646 @@ __STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) - /** - \brief Get Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - - \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). - \return PSPLIM Register value + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, psplim" : "=r" (result) ); - return result; -#endif + uint32_t result; + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t) result); } -#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Get Process Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \return PSPLIM Register value +/** + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); - return result; -#endif + uint32_t result; + + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t) result); } -#endif /** - \brief Set Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - - \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); -#endif + uint32_t result; + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return(result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Process Stack Pointer (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); -#endif + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } -#endif /** - \brief Get Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - - \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). - \return MSPLIM Register value + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim" : "=r" (result) ); - return result; -#endif + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. - \return MSPLIM Register value + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); - return result; -#endif + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } -#endif /** - \brief Set Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - - \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). - \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) +__STATIC_FORCEINLINE uint8_t __LDAEXB(volatile uint8_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); -#endif + uint32_t result; + + __ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t) result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) + */ +__STATIC_FORCEINLINE uint16_t __LDAEXH(volatile uint16_t *ptr) +{ + uint32_t result; - \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. - \param [in] MainStackPtrLimit Main Stack Pointer value to set + __ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t) result); +} + + +/** + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) +__STATIC_FORCEINLINE uint32_t __LDAEX(volatile uint32_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); -#endif + uint32_t result; + + __ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return(result); } -#endif -#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + +/** + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) +{ + uint32_t result; + + __ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + return(result); +} /** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) +__STATIC_FORCEINLINE uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) { -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) -#if __has_builtin(__builtin_arm_get_fpscr) -// Re-enable using built-in when GCC has been fixed -// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) - /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ - return __builtin_arm_get_fpscr(); -#else - uint32_t result; + uint32_t result; - __ASM volatile ("VMRS %0, fpscr" : "=r" (result) ); - return(result); -#endif -#else - return(0U); -#endif + __ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + return(result); } /** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) +__STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) { -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) -#if __has_builtin(__builtin_arm_set_fpscr) -// Re-enable using built-in when GCC has been fixed -// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) - /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ - __builtin_arm_set_fpscr(fpscr); -#else - __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory"); -#endif -#else - (void)fpscr; -#endif + uint32_t result; + + __ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + return(result); } +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ -/*@} end of CMSIS_Core_RegAccFunctions */ +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ -/* ########################## Core Instruction Access ######################### */ -/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface - Access to dedicated instructions +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions @{ -*/ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("cpsie i" : : : "memory"); +} -/* Define macros for porting to both thumb1 and thumb2. - * For thumb1, use low register (r0-r7), specified by constraint "l" - * Otherwise, use general registers, specified by constraint "r" */ -#if defined (__thumb__) && !defined (__thumb2__) -#define __CMSIS_GCC_OUT_REG(r) "=l" (r) -#define __CMSIS_GCC_RW_REG(r) "+l" (r) -#define __CMSIS_GCC_USE_REG(r) "l" (r) -#else -#define __CMSIS_GCC_OUT_REG(r) "=r" (r) -#define __CMSIS_GCC_RW_REG(r) "+r" (r) -#define __CMSIS_GCC_USE_REG(r) "r" (r) -#endif /** - \brief No Operation - \details No Operation does nothing. This instruction can be used for code alignment purposes. + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. */ -#define __NOP() __ASM volatile ("nop") +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("cpsid i" : : : "memory"); +} + /** - \brief Wait For Interrupt - \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value */ -#define __WFI() __ASM volatile ("wfi":::"memory") +__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control" : "=r" (result) ); + return(result); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Wait For Event - \details Wait For Event is a hint instruction that permits the processor to enter - a low-power state until one of a number of events occurs. + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value */ -#define __WFE() __ASM volatile ("wfe":::"memory") +__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return(result); +} +#endif /** - \brief Send Event - \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set */ -#define __SEV() __ASM volatile ("sev") +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +{ + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); + __ISB(); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Instruction Synchronization Barrier - \details Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or memory, - after the instruction has been completed. + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set */ -__STATIC_FORCEINLINE void __ISB(void) +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) { - __ASM volatile ("isb 0xF":::"memory"); + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); + __ISB(); } +#endif /** - \brief Data Synchronization Barrier - \details Acts as a special kind of Data Memory Barrier. - It completes when all explicit memory accesses before this instruction complete. + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value */ -__STATIC_FORCEINLINE void __DSB(void) +__STATIC_FORCEINLINE uint32_t __get_IPSR(void) { - __ASM volatile ("dsb 0xF":::"memory"); + uint32_t result; + + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); + return(result); } /** - \brief Data Memory Barrier - \details Ensures the apparent order of the explicit memory operations before - and after the instruction, without ensuring their completion. + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value */ -__STATIC_FORCEINLINE void __DMB(void) +__STATIC_FORCEINLINE uint32_t __get_APSR(void) { - __ASM volatile ("dmb 0xF":::"memory"); + uint32_t result; + + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); + return(result); } /** - \brief Reverse byte order (32 bit) - \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. - \param [in] value Value to reverse - \return Reversed value + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value */ -__STATIC_FORCEINLINE uint32_t __REV(uint32_t value) +__STATIC_FORCEINLINE uint32_t __get_xPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); + return(result); +} + + +/** + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value + */ +__STATIC_FORCEINLINE uint32_t __get_PSP(void) { -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) - return __builtin_bswap32(value); -#else uint32_t result; - __ASM ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); - return result; -#endif + __ASM volatile ("MRS %0, psp" : "=r" (result) ); + return(result); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Reverse byte order (16 bit) - \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. - \param [in] value Value to reverse - \return Reversed value + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value */ -__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) +__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) { uint32_t result; - __ASM ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); - return result; + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return(result); } +#endif /** - \brief Reverse byte order (16 bit) - \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. - \param [in] value Value to reverse - \return Reversed value + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set */ -__STATIC_FORCEINLINE int16_t __REVSH(int16_t value) +__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) { -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - return (int16_t)__builtin_bswap16(value); -#else - int16_t result; - - __ASM ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); - return result; -#endif + __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set */ -__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) { - op2 %= 32U; - if (op2 == 0U) - { - return op1; - } - return (op1 >> op2) | (op1 << (32U - op2)); + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); } +#endif /** - \brief Breakpoint - \details Causes the processor to enter Debug state. - Debug tools can use this to investigate system state when the instruction at a particular address is reached. - \param [in] value is ignored by the processor. - If required, a debugger can use it to store additional information about the breakpoint. + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) +__STATIC_FORCEINLINE uint32_t __get_MSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp" : "=r" (result) ); + return(result); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Reverse bit order of value - \details Reverses the bit order of the given value. - \param [in] value Value to reverse - \return Reversed value + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value */ -__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) +__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) { uint32_t result; -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) - __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) ); -#else - uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ - - result = value; /* r will be reversed bits of v; first get LSB of v */ - for (value >>= 1U; value != 0U; value >>= 1U) - { - result <<= 1U; - result |= value & 1U; - s--; - } - result <<= s; /* shift when v's highest bits are zero */ -#endif - return result; + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return(result); } +#endif /** - \brief Count leading zeros - \details Counts the number of leading zeros of a data value. - \param [in] value Value to count the leading zeros - \return number of leading zeros in value + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set */ -__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) { - /* Even though __builtin_clz produces a CLZ instruction on ARM, formally - __builtin_clz(0) is undefined behaviour, so handle this case specially. - This guarantees ARM-compatible results if happening to compile on a non-ARM - target, and ensures the compiler doesn't decide to activate any - optimisations using the logic "value was passed to __builtin_clz, so it - is non-zero". - ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a - single CLZ instruction. - */ - if (value == 0U) - { - return 32U; - } - return __builtin_clz(value); + __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); } -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set */ -__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) +__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) { - uint32_t result; - -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); -#else - /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not - accepted by assembler. So has to use following less efficient pattern. - */ - __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); -#endif - return ((uint8_t) result); /* Add explicit type cast here */ + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); } +#endif +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value */ -__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) +__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) { - uint32_t result; + uint32_t result; -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); -#else - /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not - accepted by assembler. So has to use following less efficient pattern. - */ - __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); -#endif - return ((uint16_t) result); /* Add explicit type cast here */ + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return(result); } /** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set */ -__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) +__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) { - uint32_t result; - - __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); - return(result); + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); } +#endif /** - \brief STR Exclusive (8 bit) - \details Executes a exclusive STR instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value */ -__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) +__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) { - uint32_t result; + uint32_t result; - __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); - return(result); + __ASM volatile ("MRS %0, primask" : "=r" (result) ); + return(result); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value */ -__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) +__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) { - uint32_t result; + uint32_t result; - __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); - return(result); + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + return(result); } +#endif /** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask */ -__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) +__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) { - uint32_t result; - - __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); - return(result); + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask */ -__STATIC_FORCEINLINE void __CLREX(void) +__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) { - __ASM volatile ("clrex" ::: "memory"); + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); } - -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ +#endif #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) /** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] ARG1 Value to be saturated - \param [in] ARG2 Bit position to saturate to (1..32) - \return Saturated value + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. */ -#define __SSAT(ARG1, ARG2) \ -__extension__ \ -({ \ - int32_t __RES, __ARG1 = (ARG1); \ - __ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ - __RES; \ - }) +__STATIC_FORCEINLINE void __enable_fault_irq(void) +{ + __ASM volatile ("cpsie f" : : : "memory"); +} /** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] ARG1 Value to be saturated - \param [in] ARG2 Bit position to saturate to (0..31) - \return Saturated value + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. */ -#define __USAT(ARG1, ARG2) \ - __extension__ \ -({ \ - uint32_t __RES, __ARG1 = (ARG1); \ - __ASM volatile ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ - __RES; \ - }) +__STATIC_FORCEINLINE void __disable_fault_irq(void) +{ + __ASM volatile ("cpsid f" : : : "memory"); +} /** - \brief Rotate Right with Extend (32 bit) - \details Moves each bit of a bitstring right by one bit. - The carry input is shifted in at the left end of the bitstring. - \param [in] value Value to rotate - \return Rotated value + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value */ -__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) { uint32_t result; - __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); return(result); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDRT Unprivileged (8 bit) - \details Executes a Unprivileged LDRT instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value */ -__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) { - uint32_t result; + uint32_t result; -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); -#else - /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not - accepted by assembler. So has to use following less efficient pattern. - */ - __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); -#endif - return ((uint8_t) result); /* Add explicit type cast here */ + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return(result); } +#endif /** - \brief LDRT Unprivileged (16 bit) - \details Executes a Unprivileged LDRT instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set */ -__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) { - uint32_t result; - -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); -#else - /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not - accepted by assembler. So has to use following less efficient pattern. - */ - __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); -#endif - return ((uint16_t) result); /* Add explicit type cast here */ + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDRT Unprivileged (32 bit) - \details Executes a Unprivileged LDRT instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set */ -__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) { - uint32_t result; - - __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return(result); + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); } +#endif /** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set */ -__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) +__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) { - __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); } /** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value */ -__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) { - __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); + uint32_t result; + + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + return(result); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value */ -__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) { - __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return(result); } +#endif -#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ /** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set */ -__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) { - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return max; - } - else if (val < min) - { - return min; - } - } - return val; + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); } + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set */ -__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) { - if (sat <= 31U) - { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return max; - } - else if (val < 0) - { - return 0U; - } - } - return (uint32_t)val; + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); } +#endif #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ @@ -1428,186 +1401,235 @@ __STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) -/** - \brief Load-Acquire (8 bit) - \details Executes a LDAB instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint8_t) result); -} - /** - \brief Load-Acquire (16 bit) - \details Executes a LDAH instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) - */ -__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) -{ - uint32_t result; - - __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint16_t) result); -} - + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. -/** - \brief Load-Acquire (32 bit) - \details Executes a LDA instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value */ -__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) { - uint32_t result; - - __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return(result); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return result; +#endif } - +#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Store-Release (8 bit) - \details Executes a STLB instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value */ -__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) { - __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return result; +#endif } +#endif /** - \brief Store-Release (16 bit) - \details Executes a STLH instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set */ -__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) { - __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); +#endif } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Store-Release (32 bit) - \details Executes a STL instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set */ -__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) { - __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); +#endif } +#endif /** - \brief Load-Acquire Exclusive (8 bit) - \details Executes a LDAB exclusive instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value */ -__STATIC_FORCEINLINE uint8_t __LDAEXB(volatile uint8_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) { - uint32_t result; - - __ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint8_t) result); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return result; +#endif } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Load-Acquire Exclusive (16 bit) - \details Executes a LDAH exclusive instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value */ -__STATIC_FORCEINLINE uint16_t __LDAEXH(volatile uint16_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) { - uint32_t result; - - __ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint16_t) result); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return result; +#endif } +#endif /** - \brief Load-Acquire Exclusive (32 bit) - \details Executes a LDA exclusive instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set */ -__STATIC_FORCEINLINE uint32_t __LDAEX(volatile uint32_t *ptr) +__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) { - uint32_t result; - - __ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return(result); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); +#endif } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Store-Release Exclusive (8 bit) - \details Executes a STLB exclusive instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set */ -__STATIC_FORCEINLINE uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) +__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) { - uint32_t result; - - __ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); - return(result); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); +#endif } +#endif + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ /** - \brief Store-Release Exclusive (16 bit) - \details Executes a STLH exclusive instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value */ -__STATIC_FORCEINLINE uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) { - uint32_t result; +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#if __has_builtin(__builtin_arm_get_fpscr) +// Re-enable using built-in when GCC has been fixed +// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) + /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ + return __builtin_arm_get_fpscr(); +#else + uint32_t result; - __ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); - return(result); + __ASM volatile ("VMRS %0, fpscr" : "=r" (result) ); + return(result); +#endif +#else + return(0U); +#endif } /** - \brief Store-Release Exclusive (32 bit) - \details Executes a STL exclusive instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set */ -__STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) +__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) { - uint32_t result; - - __ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); - return(result); +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#if __has_builtin(__builtin_arm_set_fpscr) +// Re-enable using built-in when GCC has been fixed +// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) + /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ + __builtin_arm_set_fpscr(fpscr); +#else + __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory"); +#endif +#else + (void)fpscr; +#endif } -#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ -/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ +/*@} end of CMSIS_Core_RegAccFunctions */ /* ################### Compiler specific Intrinsics ########################### */ @@ -1925,6 +1947,7 @@ __STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3) } #define __SSAT16(ARG1, ARG2) \ +__extension__ \ ({ \ int32_t __RES, __ARG1 = (ARG1); \ __ASM volatile ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ @@ -1932,6 +1955,7 @@ __STATIC_FORCEINLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3) }) #define __USAT16(ARG1, ARG2) \ +__extension__ \ ({ \ uint32_t __RES, __ARG1 = (ARG1); \ __ASM volatile ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ @@ -1965,9 +1989,11 @@ __STATIC_FORCEINLINE uint32_t __SXTB16(uint32_t op1) __STATIC_FORCEINLINE uint32_t __SXTB16_RORn(uint32_t op1, uint32_t rotate) { uint32_t result; - - __ASM ("sxtb16 %0, %1, ROR %2" : "=r" (result) : "r" (op1), "i" (rotate) ); - + if (__builtin_constant_p(rotate) && ((rotate == 8U) || (rotate == 16U) || (rotate == 24U))) { + __ASM volatile ("sxtb16 %0, %1, ROR %2" : "=r" (result) : "r" (op1), "i" (rotate) ); + } else { + result = __SXTB16(__ROR(op1, rotate)) ; + } return result; } @@ -1979,6 +2005,18 @@ __STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2) return(result); } +__STATIC_FORCEINLINE uint32_t __SXTAB16_RORn(uint32_t op1, uint32_t op2, uint32_t rotate) +{ + uint32_t result; + if (__builtin_constant_p(rotate) && ((rotate == 8U) || (rotate == 16U) || (rotate == 24U))) { + __ASM volatile ("sxtab16 %0, %1, %2, ROR %3" : "=r" (result) : "r" (op1) , "r" (op2) , "i" (rotate)); + } else { + result = __SXTAB16(op1, __ROR(op2, rotate)); + } + return result; +} + + __STATIC_FORCEINLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2) { uint32_t result; @@ -2135,8 +2173,9 @@ __STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) return(result); } -#if 0 + #define __PKHBT(ARG1,ARG2,ARG3) \ +__extension__ \ ({ \ uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ @@ -2144,6 +2183,7 @@ __STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) }) #define __PKHTB(ARG1,ARG2,ARG3) \ +__extension__ \ ({ \ uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \ if (ARG3 == 0) \ @@ -2152,13 +2192,7 @@ __STATIC_FORCEINLINE int32_t __QSUB( int32_t op1, int32_t op2) __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \ __RES; \ }) -#endif - -#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \ - ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) ) -#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \ - ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) ) __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) { diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cmsis_iccarm.h b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cmsis_iccarm.h index 7eeffca5c71..65b824b009c 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cmsis_iccarm.h +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/cmsis_iccarm.h @@ -1,14 +1,14 @@ /**************************************************************************//** * @file cmsis_iccarm.h * @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file - * @version V5.2.0 - * @date 28. January 2020 + * @version V5.3.0 + * @date 14. April 2021 ******************************************************************************/ //------------------------------------------------------------------------------ // -// Copyright (c) 2017-2019 IAR Systems -// Copyright (c) 2017-2019 Arm Limited. All rights reserved. +// Copyright (c) 2017-2021 IAR Systems +// Copyright (c) 2017-2021 Arm Limited. All rights reserved. // // SPDX-License-Identifier: Apache-2.0 // @@ -238,6 +238,7 @@ __packed struct __iar_u32 { uint32_t v; }; #endif #endif +#undef __WEAK /* undo the definition from DLib_Defaults.h */ #ifndef __WEAK #if __ICCARM_V8 #define __WEAK __attribute__((weak)) @@ -266,6 +267,24 @@ __packed struct __iar_u32 { uint32_t v; }; #define __VECTOR_TABLE_ATTRIBUTE @".intvec" #endif +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +#ifndef __STACK_SEAL +#define __STACK_SEAL STACKSEAL$$Base +#endif + +#ifndef __TZ_STACK_SEAL_SIZE +#define __TZ_STACK_SEAL_SIZE 8U +#endif + +#ifndef __TZ_STACK_SEAL_VALUE +#define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL +#endif + +__STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { + *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; +} +#endif + #ifndef __ICCARM_INTRINSICS_VERSION__ #define __ICCARM_INTRINSICS_VERSION__ 0 #endif @@ -336,7 +355,13 @@ __packed struct __iar_u32 { uint32_t v; }; #define __set_BASEPRI(VALUE) (__arm_wsr("BASEPRI", (VALUE))) #define __set_BASEPRI_MAX(VALUE) (__arm_wsr("BASEPRI_MAX", (VALUE))) - #define __set_CONTROL(VALUE) (__arm_wsr("CONTROL", (VALUE))) + +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +{ + __arm_wsr("CONTROL", control); + __iar_builtin_ISB(); +} + #define __set_FAULTMASK(VALUE) (__arm_wsr("FAULTMASK", (VALUE))) #define __set_MSP(VALUE) (__arm_wsr("MSP", (VALUE))) @@ -358,7 +383,13 @@ __packed struct __iar_u32 { uint32_t v; }; #endif #define __TZ_get_CONTROL_NS() (__arm_rsr("CONTROL_NS")) - #define __TZ_set_CONTROL_NS(VALUE) (__arm_wsr("CONTROL_NS", (VALUE))) + +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +{ + __arm_wsr("CONTROL_NS", control); + __iar_builtin_ISB(); +} + #define __TZ_get_PSP_NS() (__arm_rsr("PSP_NS")) #define __TZ_set_PSP_NS(VALUE) (__arm_wsr("PSP_NS", (VALUE))) #define __TZ_get_MSP_NS() (__arm_rsr("MSP_NS")) @@ -680,6 +711,7 @@ __packed struct __iar_u32 { uint32_t v; }; __IAR_FT void __TZ_set_CONTROL_NS(uint32_t value) { __asm volatile("MSR CONTROL_NS,%0" :: "r" (value)); + __iar_builtin_ISB(); } __IAR_FT uint32_t __TZ_get_PSP_NS(void) @@ -965,4 +997,6 @@ __packed struct __iar_u32 { uint32_t v; }; #define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2)) +#define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) + #endif /* __CMSIS_ICCARM_H__ */ diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_armv81mml.h b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_armv81mml.h index 1ad19e215ac..33df4554361 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_armv81mml.h +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_armv81mml.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_armv81mml.h * @brief CMSIS Armv8.1-M Mainline Core Peripheral Access Layer Header File - * @version V1.3.1 - * @date 27. March 2020 + * @version V1.4.1 + * @date 04. June 2021 ******************************************************************************/ /* - * Copyright (c) 2018-2020 Arm Limited. All rights reserved. + * Copyright (c) 2018-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -210,14 +210,14 @@ #define __FPU_PRESENT 0U #warning "__FPU_PRESENT not defined in device header file; using default!" #endif - + #if __FPU_PRESENT != 0U #ifndef __FPU_DP #define __FPU_DP 0U #warning "__FPU_DP not defined in device header file; using default!" #endif #endif - + #ifndef __MPU_PRESENT #define __MPU_PRESENT 0U #warning "__MPU_PRESENT not defined in device header file; using default!" @@ -232,7 +232,7 @@ #define __DCACHE_PRESENT 0U #warning "__DCACHE_PRESENT not defined in device header file; using default!" #endif - + #ifndef __PMU_PRESENT #define __PMU_PRESENT 0U #warning "__PMU_PRESENT not defined in device header file; using default!" @@ -261,7 +261,7 @@ #define __VTOR_PRESENT 1U #warning "__VTOR_PRESENT not defined in device header file; using default!" #endif - + #ifndef __NVIC_PRIO_BITS #define __NVIC_PRIO_BITS 3U #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" @@ -766,22 +766,22 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ #define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ @@ -1508,12 +1508,12 @@ typedef struct /** \brief PMU Event Counter Registers (0-30) Definitions */ #define PMU_EVCNTR_CNT_Pos 0U /*!< PMU EVCNTR: Counter Position */ -#define PMU_EVCNTR_CNT_Msk (16UL /*<< PMU_EVCNTRx_CNT_Pos*/) /*!< PMU EVCNTR: Counter Mask */ +#define PMU_EVCNTR_CNT_Msk (0xFFFFUL /*<< PMU_EVCNTRx_CNT_Pos*/) /*!< PMU EVCNTR: Counter Mask */ /** \brief PMU Event Type and Filter Registers (0-30) Definitions */ #define PMU_EVTYPER_EVENTTOCNT_Pos 0U /*!< PMU EVTYPER: Event to Count Position */ -#define PMU_EVTYPER_EVENTTOCNT_Msk (16UL /*<< EVTYPERx_EVENTTOCNT_Pos*/) /*!< PMU EVTYPER: Event to Count Mask */ +#define PMU_EVTYPER_EVENTTOCNT_Msk (0xFFFFUL /*<< EVTYPERx_EVENTTOCNT_Pos*/) /*!< PMU EVTYPER: Event to Count Mask */ /** \brief PMU Count Enable Set Register Definitions */ @@ -2221,10 +2221,10 @@ typedef struct /** \brief PMU Type Register Definitions */ #define PMU_TYPE_NUM_CNTS_Pos 0U /*!< PMU TYPE: Number of Counters Position */ -#define PMU_TYPE_NUM_CNTS_Msk (8UL /*<< PMU_TYPE_NUM_CNTS_Pos*/) /*!< PMU TYPE: Number of Counters Mask */ +#define PMU_TYPE_NUM_CNTS_Msk (0xFFUL /*<< PMU_TYPE_NUM_CNTS_Pos*/) /*!< PMU TYPE: Number of Counters Mask */ #define PMU_TYPE_SIZE_CNTS_Pos 8U /*!< PMU TYPE: Size of Counters Position */ -#define PMU_TYPE_SIZE_CNTS_Msk (6UL << PMU_TYPE_SIZE_CNTS_Pos) /*!< PMU TYPE: Size of Counters Mask */ +#define PMU_TYPE_SIZE_CNTS_Msk (0x3FUL << PMU_TYPE_SIZE_CNTS_Pos) /*!< PMU TYPE: Size of Counters Mask */ #define PMU_TYPE_CYCCNT_PRESENT_Pos 14U /*!< PMU TYPE: Cycle Counter Present Position */ #define PMU_TYPE_CYCCNT_PRESENT_Msk (1UL << PMU_TYPE_CYCCNT_PRESENT_Pos) /*!< PMU TYPE: Cycle Counter Present Mask */ @@ -2235,6 +2235,32 @@ typedef struct #define PMU_TYPE_TRACE_ON_OV_SUPPORT_Pos 23U /*!< PMU TYPE: Trace-on-overflow Support Position */ #define PMU_TYPE_TRACE_ON_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Trace-on-overflow Support Mask */ +/** \brief PMU Authentication Status Register Definitions */ + +#define PMU_AUTHSTATUS_NSID_Pos 0U /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSID_Msk (0x3UL /*<< PMU_AUTHSTATUS_NSID_Pos*/) /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSNID_Pos 2U /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSNID_Msk (0x3UL << PMU_AUTHSTATUS_NSNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SID_Pos 4U /*!< PMU AUTHSTATUS: Secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_SID_Msk (0x3UL << PMU_AUTHSTATUS_SID_Pos) /*!< PMU AUTHSTATUS: Secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SNID_Pos 6U /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SNID_Msk (0x3UL << PMU_AUTHSTATUS_SNID_Pos) /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUID_Pos 16U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUID_Msk (0x3UL << PMU_AUTHSTATUS_NSUID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUNID_Pos 18U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUNID_Msk (0x3UL << PMU_AUTHSTATUS_NSUNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUID_Pos 20U /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_SUID_Msk (0x3UL << PMU_AUTHSTATUS_SUID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUNID_Pos 22U /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SUNID_Msk (0x3UL << PMU_AUTHSTATUS_SUNID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Mask */ + /*@} end of group CMSIS_PMU */ #endif diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_armv8mml.h b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_armv8mml.h index 71f000bcadf..2bd9e76064d 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_armv8mml.h +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_armv8mml.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_armv8mml.h * @brief CMSIS Armv8-M Mainline Core Peripheral Access Layer Header File - * @version V5.2.0 - * @date 27. March 2020 + * @version V5.2.2 + * @date 04. June 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -254,7 +254,7 @@ #define __VTOR_PRESENT 1U #warning "__VTOR_PRESENT not defined in device header file; using default!" #endif - + #ifndef __NVIC_PRIO_BITS #define __NVIC_PRIO_BITS 3U #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" @@ -545,6 +545,7 @@ typedef struct __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ } SCB_Type; /* SCB CPUID Register Definitions */ @@ -745,22 +746,22 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ #define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ @@ -2939,7 +2940,7 @@ __STATIC_INLINE void TZ_SAU_Disable(void) @{ */ - + /** \brief Set Debug Authentication Control Register \details writes to Debug Authentication Control register. @@ -3006,7 +3007,7 @@ __STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) @{ */ - + /** \brief Get Debug Authentication Status Register \details Reads Debug Authentication Status register. diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm3.h b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm3.h index 24453a88633..74fb87e5c56 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm3.h +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm3.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_cm3.h * @brief CMSIS Cortex-M3 Core Peripheral Access Layer Header File - * @version V5.1.1 - * @date 27. March 2020 + * @version V5.1.2 + * @date 04. June 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -146,7 +146,7 @@ #define __VTOR_PRESENT 1U #warning "__VTOR_PRESENT not defined in device header file; using default!" #endif - + #ifndef __NVIC_PRIO_BITS #define __NVIC_PRIO_BITS 3U #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" @@ -565,19 +565,19 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm33.h b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm33.h index 13359be3ed0..f9cf6ab183a 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm33.h +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm33.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_cm33.h * @brief CMSIS Cortex-M33 Core Peripheral Access Layer Header File - * @version V5.2.0 - * @date 27. March 2020 + * @version V5.2.2 + * @date 04. June 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -254,7 +254,7 @@ #define __VTOR_PRESENT 1U #warning "__VTOR_PRESENT not defined in device header file; using default!" #endif - + #ifndef __NVIC_PRIO_BITS #define __NVIC_PRIO_BITS 3U #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" @@ -545,6 +545,7 @@ typedef struct __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ } SCB_Type; /* SCB CPUID Register Definitions */ @@ -745,22 +746,22 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ #define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ @@ -3007,7 +3008,7 @@ __STATIC_INLINE void TZ_SAU_Disable(void) @{ */ - + /** \brief Set Debug Authentication Control Register \details writes to Debug Authentication Control register. @@ -3074,7 +3075,7 @@ __STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) @{ */ - + /** \brief Get Debug Authentication Status Register \details Reads Debug Authentication Status register. diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm35p.h b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm35p.h index 6a5f6ad1471..552c29464de 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm35p.h +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm35p.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_cm35p.h * @brief CMSIS Cortex-M35P Core Peripheral Access Layer Header File - * @version V1.1.0 - * @date 27. March 2020 + * @version V1.1.2 + * @date 04. June 2021 ******************************************************************************/ /* - * Copyright (c) 2018-2020 Arm Limited. All rights reserved. + * Copyright (c) 2018-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -249,12 +249,12 @@ #define __DSP_PRESENT 0U #warning "__DSP_PRESENT not defined in device header file; using default!" #endif - + #ifndef __VTOR_PRESENT #define __VTOR_PRESENT 1U #warning "__VTOR_PRESENT not defined in device header file; using default!" #endif - + #ifndef __NVIC_PRIO_BITS #define __NVIC_PRIO_BITS 3U #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" @@ -545,6 +545,7 @@ typedef struct __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ } SCB_Type; /* SCB CPUID Register Definitions */ @@ -745,22 +746,22 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ #define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ @@ -3007,7 +3008,7 @@ __STATIC_INLINE void TZ_SAU_Disable(void) @{ */ - + /** \brief Set Debug Authentication Control Register \details writes to Debug Authentication Control register. @@ -3074,7 +3075,7 @@ __STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) @{ */ - + /** \brief Get Debug Authentication Status Register \details Reads Debug Authentication Status register. diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm4.h b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm4.h index 4e0e8866970..e21cd149256 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm4.h +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm4.h @@ -1,8 +1,8 @@ /**************************************************************************//** * @file core_cm4.h * @brief CMSIS Cortex-M4 Core Peripheral Access Layer Header File - * @version V5.1.1 - * @date 27. March 2020 + * @version V5.1.2 + * @date 04. June 2021 ******************************************************************************/ /* * Copyright (c) 2009-2020 Arm Limited. All rights reserved. @@ -198,7 +198,7 @@ #define __VTOR_PRESENT 1U #warning "__VTOR_PRESENT not defined in device header file; using default!" #endif - + #ifndef __NVIC_PRIO_BITS #define __NVIC_PRIO_BITS 3U #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" @@ -623,22 +623,22 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ #define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm55.h b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm55.h index 6efaa3f8429..ecee4e0afb2 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm55.h +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm55.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_cm55.h * @brief CMSIS Cortex-M55 Core Peripheral Access Layer Header File - * @version V1.0.0 - * @date 27. March 2020 + * @version V1.2.1 + * @date 04. June 2021 ******************************************************************************/ /* - * Copyright (c) 2018-2020 Arm Limited. All rights reserved. + * Copyright (c) 2018-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -210,7 +210,7 @@ #define __FPU_PRESENT 0U #warning "__FPU_PRESENT not defined in device header file; using default!" #endif - + #if __FPU_PRESENT != 0U #ifndef __FPU_DP #define __FPU_DP 0U @@ -232,12 +232,12 @@ #define __DCACHE_PRESENT 0U #warning "__DCACHE_PRESENT not defined in device header file; using default!" #endif - + #ifndef __VTOR_PRESENT #define __VTOR_PRESENT 1U #warning "__VTOR_PRESENT not defined in device header file; using default!" #endif - + #ifndef __PMU_PRESENT #define __PMU_PRESENT 0U #warning "__PMU_PRESENT not defined in device header file; using default!" @@ -766,22 +766,22 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ #define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ @@ -1349,6 +1349,40 @@ typedef struct /*@}*/ /* end of group CMSIS_DWT */ +/** + \ingroup CMSIS_core_register + \defgroup PwrModCtl_Type Power Mode Control Registers + \brief Type definitions for the Power Mode Control Registers (PWRMODCTL) + @{ + */ + +/** + \brief Structure type to access the Power Mode Control Registers (PWRMODCTL). + */ +typedef struct +{ + __IOM uint32_t CPDLPSTATE; + __IOM uint32_t DPDLPSTATE; +} PwrModCtl_Type; + + +/* PWRMODCTL Core Power Domain Low Power State (CPDLPSTATE) Register Definitions */ +#define PWRMODCTL_CPDLPSTATE_CLPSTATE_Pos 0U /*!< PWRMODCTL CPDLPSTATE CLPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_CLPSTATE_Msk 3UL /*!< PWRMODCTL CPDLPSTATE CLPSTATE Mask */ + +#define PWRMODCTL_CPDLPSTATE_ELPSTATE_Pos 4U /*!< PWRMODCTL CPDLPSTATE ELPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_ELPSTATE_Msk 3UL /*!< PWRMODCTL CPDLPSTATE ELPSTATE Mask */ + +#define PWRMODCTL_CPDLPSTATE_RLPSTATE_Pos 8U /*!< PWRMODCTL CPDLPSTATE RLPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_RLPSTATE_Msk 3UL /*!< PWRMODCTL CPDLPSTATE RLPSTATE Mask */ + +/* PWRMODCTL Debug Power Domain Low Power State (DPDLPSTATE) Register Definitions */ +#define PWRMODCTL_DPDLPSTATE_DLPSTATE_Pos 0U /*!< PWRMODCTL DPDLPSTATE DLPSTATE Position */ +#define PWRMODCTL_DPDLPSTATE_DLPSTATE_Msk 3UL /*!< PWRMODCTL DPDLPSTATE DLPSTATE Mask */ + +/*@}*/ /* end of group CMSIS_PWRMODCTL */ + + /** \ingroup CMSIS_core_register \defgroup CMSIS_TPI Trace Port Interface (TPI) @@ -1508,12 +1542,12 @@ typedef struct /** \brief PMU Event Counter Registers (0-30) Definitions */ #define PMU_EVCNTR_CNT_Pos 0U /*!< PMU EVCNTR: Counter Position */ -#define PMU_EVCNTR_CNT_Msk (16UL /*<< PMU_EVCNTRx_CNT_Pos*/) /*!< PMU EVCNTR: Counter Mask */ +#define PMU_EVCNTR_CNT_Msk (0xFFFFUL /*<< PMU_EVCNTRx_CNT_Pos*/) /*!< PMU EVCNTR: Counter Mask */ /** \brief PMU Event Type and Filter Registers (0-30) Definitions */ #define PMU_EVTYPER_EVENTTOCNT_Pos 0U /*!< PMU EVTYPER: Event to Count Position */ -#define PMU_EVTYPER_EVENTTOCNT_Msk (16UL /*<< EVTYPERx_EVENTTOCNT_Pos*/) /*!< PMU EVTYPER: Event to Count Mask */ +#define PMU_EVTYPER_EVENTTOCNT_Msk (0xFFFFUL /*<< EVTYPERx_EVENTTOCNT_Pos*/) /*!< PMU EVTYPER: Event to Count Mask */ /** \brief PMU Count Enable Set Register Definitions */ @@ -2221,10 +2255,10 @@ typedef struct /** \brief PMU Type Register Definitions */ #define PMU_TYPE_NUM_CNTS_Pos 0U /*!< PMU TYPE: Number of Counters Position */ -#define PMU_TYPE_NUM_CNTS_Msk (8UL /*<< PMU_TYPE_NUM_CNTS_Pos*/) /*!< PMU TYPE: Number of Counters Mask */ +#define PMU_TYPE_NUM_CNTS_Msk (0xFFUL /*<< PMU_TYPE_NUM_CNTS_Pos*/) /*!< PMU TYPE: Number of Counters Mask */ #define PMU_TYPE_SIZE_CNTS_Pos 8U /*!< PMU TYPE: Size of Counters Position */ -#define PMU_TYPE_SIZE_CNTS_Msk (6UL << PMU_TYPE_SIZE_CNTS_Pos) /*!< PMU TYPE: Size of Counters Mask */ +#define PMU_TYPE_SIZE_CNTS_Msk (0x3FUL << PMU_TYPE_SIZE_CNTS_Pos) /*!< PMU TYPE: Size of Counters Mask */ #define PMU_TYPE_CYCCNT_PRESENT_Pos 14U /*!< PMU TYPE: Cycle Counter Present Position */ #define PMU_TYPE_CYCCNT_PRESENT_Msk (1UL << PMU_TYPE_CYCCNT_PRESENT_Pos) /*!< PMU TYPE: Cycle Counter Present Mask */ @@ -2235,6 +2269,33 @@ typedef struct #define PMU_TYPE_TRACE_ON_OV_SUPPORT_Pos 23U /*!< PMU TYPE: Trace-on-overflow Support Position */ #define PMU_TYPE_TRACE_ON_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Trace-on-overflow Support Mask */ +/** \brief PMU Authentication Status Register Definitions */ + +#define PMU_AUTHSTATUS_NSID_Pos 0U /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSID_Msk (0x3UL /*<< PMU_AUTHSTATUS_NSID_Pos*/) /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSNID_Pos 2U /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSNID_Msk (0x3UL << PMU_AUTHSTATUS_NSNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SID_Pos 4U /*!< PMU AUTHSTATUS: Secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_SID_Msk (0x3UL << PMU_AUTHSTATUS_SID_Pos) /*!< PMU AUTHSTATUS: Secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SNID_Pos 6U /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SNID_Msk (0x3UL << PMU_AUTHSTATUS_SNID_Pos) /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUID_Pos 16U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUID_Msk (0x3UL << PMU_AUTHSTATUS_NSUID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUNID_Pos 18U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUNID_Msk (0x3UL << PMU_AUTHSTATUS_NSUNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUID_Pos 20U /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_SUID_Msk (0x3UL << PMU_AUTHSTATUS_SUID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUNID_Pos 22U /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SUNID_Msk (0x3UL << PMU_AUTHSTATUS_SUNID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Mask */ + + /*@} end of group CMSIS_PMU */ #endif @@ -3066,6 +3127,7 @@ typedef struct #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define PWRMODCTL_BASE (0xE001E300UL) /*!< Power Mode Control Base Address */ #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ @@ -3081,6 +3143,7 @@ typedef struct #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define PWRMODCTL ((PwrModCtl_Type *) PWRMODCTL_BASE ) /*!< Power Mode Control configuration struct */ #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm7.h b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm7.h index e1c31c275dc..010506e9fa4 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm7.h +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_cm7.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_cm7.h * @brief CMSIS Cortex-M7 Core Peripheral Access Layer Header File - * @version V5.1.2 - * @date 27. March 2020 + * @version V5.1.6 + * @date 04. June 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -213,7 +213,7 @@ #define __VTOR_PRESENT 1U #warning "__VTOR_PRESENT not defined in device header file; using default!" #endif - + #ifndef __NVIC_PRIO_BITS #define __NVIC_PRIO_BITS 3U #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" @@ -501,7 +501,8 @@ typedef struct __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ - uint32_t RESERVED7[6U]; + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ + uint32_t RESERVED7[5U]; __IOM uint32_t ITCMCR; /*!< Offset: 0x290 (R/W) Instruction Tightly-Coupled Memory Control Register */ __IOM uint32_t DTCMCR; /*!< Offset: 0x294 (R/W) Data Tightly-Coupled Memory Control Registers */ __IOM uint32_t AHBPCR; /*!< Offset: 0x298 (R/W) AHBP Control Register */ @@ -676,22 +677,22 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ #define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ @@ -875,21 +876,24 @@ typedef struct #define SCB_CACR_FORCEWT_Pos 2U /*!< SCB CACR: FORCEWT Position */ #define SCB_CACR_FORCEWT_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: FORCEWT Mask */ -#define SCB_CACR_ECCEN_Pos 1U /*!< SCB CACR: ECCEN Position */ -#define SCB_CACR_ECCEN_Msk (1UL << SCB_CACR_ECCEN_Pos) /*!< SCB CACR: ECCEN Mask */ +#define SCB_CACR_ECCEN_Pos 1U /*!< \deprecated SCB CACR: ECCEN Position */ +#define SCB_CACR_ECCEN_Msk (1UL << SCB_CACR_ECCEN_Pos) /*!< \deprecated SCB CACR: ECCEN Mask */ + +#define SCB_CACR_ECCDIS_Pos 1U /*!< SCB CACR: ECCDIS Position */ +#define SCB_CACR_ECCDIS_Msk (1UL << SCB_CACR_ECCDIS_Pos) /*!< SCB CACR: ECCDIS Mask */ #define SCB_CACR_SIWT_Pos 0U /*!< SCB CACR: SIWT Position */ #define SCB_CACR_SIWT_Msk (1UL /*<< SCB_CACR_SIWT_Pos*/) /*!< SCB CACR: SIWT Mask */ /* AHBS Control Register Definitions */ #define SCB_AHBSCR_INITCOUNT_Pos 11U /*!< SCB AHBSCR: INITCOUNT Position */ -#define SCB_AHBSCR_INITCOUNT_Msk (0x1FUL << SCB_AHBPCR_INITCOUNT_Pos) /*!< SCB AHBSCR: INITCOUNT Mask */ +#define SCB_AHBSCR_INITCOUNT_Msk (0x1FUL << SCB_AHBSCR_INITCOUNT_Pos) /*!< SCB AHBSCR: INITCOUNT Mask */ #define SCB_AHBSCR_TPRI_Pos 2U /*!< SCB AHBSCR: TPRI Position */ -#define SCB_AHBSCR_TPRI_Msk (0x1FFUL << SCB_AHBPCR_TPRI_Pos) /*!< SCB AHBSCR: TPRI Mask */ +#define SCB_AHBSCR_TPRI_Msk (0x1FFUL << SCB_AHBSCR_TPRI_Pos) /*!< SCB AHBSCR: TPRI Mask */ #define SCB_AHBSCR_CTL_Pos 0U /*!< SCB AHBSCR: CTL Position*/ -#define SCB_AHBSCR_CTL_Msk (3UL /*<< SCB_AHBPCR_CTL_Pos*/) /*!< SCB AHBSCR: CTL Mask */ +#define SCB_AHBSCR_CTL_Msk (3UL /*<< SCB_AHBSCR_CTL_Pos*/) /*!< SCB AHBSCR: CTL Mask */ /* Auxiliary Bus Fault Status Register Definitions */ #define SCB_ABFSR_AXIMTYPE_Pos 8U /*!< SCB ABFSR: AXIMTYPE Position*/ diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_sc300.h b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_sc300.h index e8914ba601d..d66621031e0 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_sc300.h +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/core_sc300.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_sc300.h * @brief CMSIS SC300 Core Peripheral Access Layer Header File - * @version V5.0.9 - * @date 27. March 2020 + * @version V5.0.10 + * @date 04. June 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -562,19 +562,19 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/mpu_armv7.h b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/mpu_armv7.h index 791a8dae65a..d9eedf81a64 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/mpu_armv7.h +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/mpu_armv7.h @@ -1,8 +1,8 @@ /****************************************************************************** * @file mpu_armv7.h * @brief CMSIS MPU API for Armv7-M MPU - * @version V5.1.1 - * @date 10. February 2020 + * @version V5.1.2 + * @date 25. May 2020 ******************************************************************************/ /* * Copyright (c) 2017-2020 Arm Limited. All rights reserved. @@ -223,7 +223,7 @@ __STATIC_INLINE void ARM_MPU_ClrRegion(uint32_t rnr) /** Configure an MPU region. * \param rbar Value for RBAR register. -* \param rsar Value for RSAR register. +* \param rasr Value for RASR register. */ __STATIC_INLINE void ARM_MPU_SetRegion(uint32_t rbar, uint32_t rasr) { @@ -234,7 +234,7 @@ __STATIC_INLINE void ARM_MPU_SetRegion(uint32_t rbar, uint32_t rasr) /** Configure the given MPU region. * \param rnr Region number to be configured. * \param rbar Value for RBAR register. -* \param rsar Value for RSAR register. +* \param rasr Value for RASR register. */ __STATIC_INLINE void ARM_MPU_SetRegionEx(uint32_t rnr, uint32_t rbar, uint32_t rasr) { @@ -243,7 +243,7 @@ __STATIC_INLINE void ARM_MPU_SetRegionEx(uint32_t rnr, uint32_t rbar, uint32_t r MPU->RASR = rasr; } -/** Memcopy with strictly ordered memory access, e.g. for register targets. +/** Memcpy with strictly ordered memory access, e.g. used by code in ARM_MPU_Load(). * \param dst Destination data is copied to. * \param src Source data is copied from. * \param len Amount of data words to be copied. diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/mpu_armv8.h b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/mpu_armv8.h index ef44ad01df0..3de16efc86a 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/mpu_armv8.h +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/mpu_armv8.h @@ -1,11 +1,11 @@ /****************************************************************************** * @file mpu_armv8.h * @brief CMSIS MPU API for Armv8-M and Armv8.1-M MPU - * @version V5.1.2 - * @date 10. February 2020 + * @version V5.1.3 + * @date 03. February 2021 ******************************************************************************/ /* - * Copyright (c) 2017-2020 Arm Limited. All rights reserved. + * Copyright (c) 2017-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -281,7 +281,7 @@ __STATIC_INLINE void ARM_MPU_SetRegion_NS(uint32_t rnr, uint32_t rbar, uint32_t } #endif -/** Memcopy with strictly ordered memory access, e.g. for register targets. +/** Memcpy with strictly ordered memory access, e.g. used by code in ARM_MPU_LoadEx() * \param dst Destination data is copied to. * \param src Source data is copied from. * \param len Amount of data words to be copied. diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/pmu_armv8.h b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/pmu_armv8.h index dbd39d20c73..f8f3d8935b8 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/pmu_armv8.h +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Include/pmu_armv8.h @@ -1,8 +1,8 @@ /****************************************************************************** * @file pmu_armv8.h * @brief CMSIS PMU API for Armv8.1-M PMU - * @version V1.0.0 - * @date 24. March 2020 + * @version V1.0.1 + * @date 15. April 2020 ******************************************************************************/ /* * Copyright (c) 2020 Arm Limited. All rights reserved. @@ -274,7 +274,7 @@ __STATIC_INLINE uint32_t ARM_PMU_Get_CCNTR(void) */ __STATIC_INLINE uint32_t ARM_PMU_Get_EVCNTR(uint32_t num) { - return PMU->EVCNTR[num]; + return PMU_EVCNTR_CNT_Msk & PMU->EVCNTR[num]; } /** diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Source/mbed_tz_context.c b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Source/mbed_tz_context.c index 917eeaeeced..e2e82942f81 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Source/mbed_tz_context.c +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Source/mbed_tz_context.c @@ -5,7 +5,7 @@ * @date 10. January 2018 ******************************************************************************/ /* - * Copyright (c) 2016-2020 Arm Limited. All rights reserved. + * Copyright (c) 2016-2018 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -22,10 +22,6 @@ * limitations under the License. */ -#if !FEATURE_TFM - -#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) - #include "RTE_Components.h" #include CMSIS_device_header #include "tz_context.h" @@ -202,6 +198,3 @@ uint32_t TZ_StoreContext_S (TZ_MemoryId_t id) { return 1U; // Success } -#endif - -#endif // !FEATURE_TFM diff --git a/tools/importer/cmsis_importer.json b/tools/importer/cmsis_importer.json index 3dab3ab9936..cc13bb85e7d 100644 --- a/tools/importer/cmsis_importer.json +++ b/tools/importer/cmsis_importer.json @@ -17,11 +17,11 @@ "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Config/RTX_Config.c" }, { - "src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_cm0.s", + "src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_armv6m.s", "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M0/irq_cm0.S" }, { - "src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_cm0.s", + "src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_armv6m.s", "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M0P/irq_cm0.S" }, { @@ -29,7 +29,7 @@ "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M23/irq_armv8mbl.S" }, { - "src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_cm3.s", + "src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_armv7m.s", "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M3/irq_cm3.S" }, { @@ -37,19 +37,19 @@ "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M33/irq_armv8mml.S" }, { - "src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_cm4f.s", + "src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_armv7m.s", "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_RTOS_M4_M7/irq_cm4f.S" }, { - "src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_ca.s", + "src_file" : "CMSIS/RTOS2/RTX/Source/ARM/irq_armv7a.s", "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_CORTEX_A/irq_ca.S" }, { - "src_file" : "CMSIS/RTOS2/RTX/Source/GCC/irq_cm0.S", + "src_file" : "CMSIS/RTOS2/RTX/Source/GCC/irq_armv6m.S", "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M0/irq_cm0.S" }, { - "src_file" : "CMSIS/RTOS2/RTX/Source/GCC/irq_cm0.S", + "src_file" : "CMSIS/RTOS2/RTX/Source/GCC/irq_armv6m.S", "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M0P/irq_cm0.S" }, { @@ -57,7 +57,7 @@ "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M23/irq_armv8mbl.S" }, { - "src_file" : "CMSIS/RTOS2/RTX/Source/GCC/irq_cm3.S", + "src_file" : "CMSIS/RTOS2/RTX/Source/GCC/irq_armv7m.S", "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M3/irq_cm3.S" }, { @@ -65,35 +65,35 @@ "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M33/irq_armv8mml.S" }, { - "src_file" : "CMSIS/RTOS2/RTX/Source/GCC/irq_cm4f.S", + "src_file" : "CMSIS/RTOS2/RTX/Source/GCC/irq_armv7m.S", "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_RTOS_M4_M7/irq_cm4f.S" }, { - "src_file" : "CMSIS/RTOS2/RTX/Source/GCC/irq_ca.S", + "src_file" : "CMSIS/RTOS2/RTX/Source/GCC/irq_armv7a.S", "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_CORTEX_A/irq_ca.S" }, { - "src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_cm0.s", + "src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_armv6m.s", "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M0/irq_cm0.S" }, { - "src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_cm0.s", + "src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_armv6m.s", "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M0P/irq_cm0.S" }, { - "src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_armv8mbl_common.s", + "src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_armv8mbl.s", "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M23/irq_armv8mbl_common.S" }, { - "src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_cm3.s", + "src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_armv7m.s", "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M3/irq_cm3.S" }, { - "src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_armv8mml_common.s", + "src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_armv8mml.s", "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_M33/irq_armv8mml_common.S" }, { - "src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_cm4f.s", + "src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_armv7m.s", "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_RTOS_M4_M7/irq_cm4f.S" }, { @@ -101,7 +101,7 @@ "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/Source/os_systick.c" }, { - "src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_ca.s", + "src_file" : "CMSIS/RTOS2/RTX/Source/IAR/irq_armv7a.s", "dest_file" : "cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_IAR/TARGET_CORTEX_A/irq_ca.S" }, { From 8ade0d46eca5ee960eb698b0a3405419eb0b715c Mon Sep 17 00:00:00 2001 From: Bartek Szatkowski Date: Tue, 4 Jul 2017 14:14:17 +0100 Subject: [PATCH 02/16] CMSIS/RTX: Patch RTX4 to preserve osThreadDef compatibility mbed OS used older RTX4 version and with osThreadDef accepting only 3 parameters, to preserve compatibility we hardcode the 'instances' parameter to 1. (cherry picked from commit 428acae1b2ac15c3ad523e8d40755a9301220822) (cherry picked from commit 4360b7bbf815c4d812005938c9c27af199803a97) --- .../CMSIS_5/CMSIS/RTOS2/RTX/Include1/cmsis_os.h | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include1/cmsis_os.h b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include1/cmsis_os.h index ac487e143d9..59a9e3307c7 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include1/cmsis_os.h +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include1/cmsis_os.h @@ -438,26 +438,25 @@ uint32_t osKernelSysTick (void); /// Create a Thread Definition with function, priority, and stack requirements. /// \param name name of the thread function. /// \param priority initial priority of the thread function. -/// \param instances number of possible thread instances. /// \param stacksz stack size (in bytes) requirements for the thread function. #if defined (osObjectsExternal) // object is external -#define osThreadDef(name, priority, instances, stacksz) \ +#define osThreadDef(name, priority, stacksz) \ extern const osThreadDef_t os_thread_def_##name #else // define the object #if (osCMSIS < 0x20000U) -#define osThreadDef(name, priority, instances, stacksz) \ +#define osThreadDef(name, priority, stacksz) \ const osThreadDef_t os_thread_def_##name = \ -{ (name), (priority), (instances), (stacksz) } +{ (name), (priority), 1, (stacksz) } #else -#define osThreadDef(name, priority, instances, stacksz) \ -static uint64_t os_thread_stack##name[(stacksz)?(((stacksz+7)/8)):1] __attribute__((section(".bss.os.thread.stack"))); \ +#define osThreadDef(name, priority, stacksz) \ +uint64_t os_thread_stack##name[(stacksz)?(((stacksz+7)/8)):1] __attribute__((section(".bss.os.thread.stack"))); \ static osRtxThread_t os_thread_cb_##name __attribute__((section(".bss.os.thread.cb"))); \ const osThreadDef_t os_thread_def_##name = \ { (name), \ { NULL, osThreadDetached, \ - (instances == 1) ? (&os_thread_cb_##name) : NULL,\ - (instances == 1) ? osRtxThreadCbSize : 0U, \ - ((stacksz) && (instances == 1)) ? (&os_thread_stack##name) : NULL, \ + &os_thread_cb_##name,\ + osRtxThreadCbSize, \ + (stacksz) ? (&os_thread_stack##name) : NULL, \ 8*((stacksz+7)/8), \ (priority), 0U, 0U } } #endif From 06b815a9d2fc3ced6adc7cf52199d810a066bad8 Mon Sep 17 00:00:00 2001 From: Deepika Date: Wed, 28 Mar 2018 11:31:18 -0500 Subject: [PATCH 03/16] CMSIS/RTX: Patch to conditionally compile tz_context.c should be compiled only for secure world, definition of API's in tz_context.h should be part of secure binary/bootloader when building mbed-os as non-secure (Cherry picked from d0a43b8af0eef4775ec4c3da5994ecceb9ed4558) (cherry picked from commit fb354752eb69403ad503c8e53da67da6483776d6) --- cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Source/mbed_tz_context.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Source/mbed_tz_context.c b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Source/mbed_tz_context.c index e2e82942f81..8e9541f75dc 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Source/mbed_tz_context.c +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_M/Source/mbed_tz_context.c @@ -22,6 +22,8 @@ * limitations under the License. */ +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #include "RTE_Components.h" #include CMSIS_device_header #include "tz_context.h" @@ -198,3 +200,4 @@ uint32_t TZ_StoreContext_S (TZ_MemoryId_t id) { return 1U; // Success } +#endif From 4744fdb2f3c7ad2cceb839e68cd8e6f913f9f9ae Mon Sep 17 00:00:00 2001 From: Bartek Szatkowski Date: Wed, 18 Oct 2017 11:29:48 -0500 Subject: [PATCH 04/16] CMSIS/RTX: Allow overwriting mutex ops for ARMC (cherry picked from commit 08ab8cc47d8722bf0c767990cd615cf1c427d006) --- cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_lib.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_lib.c b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_lib.c index 70663168e14..5b63a0a5b82 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_lib.c +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/rtx_lib.c @@ -725,11 +725,12 @@ typedef void *mutex; //lint -e818 "Pointer 'm' could be declared as pointing to const" // Initialize mutex +#if !defined(__ARMCC_VERSION) || __ARMCC_VERSION < 6010050 __USED +#endif int _mutex_initialize(mutex *m); -int _mutex_initialize(mutex *m) { +__WEAK int _mutex_initialize(mutex *m) { int result; - *m = osMutexNew(NULL); if (*m != NULL) { result = 1; @@ -741,8 +742,10 @@ int _mutex_initialize(mutex *m) { } // Acquire mutex +#if !defined(__ARMCC_VERSION) || __ARMCC_VERSION < 6010050 __USED -void _mutex_acquire(mutex *m); +#endif +__WEAK void _mutex_acquire(mutex *m); void _mutex_acquire(mutex *m) { if (os_kernel_is_active() != 0U) { (void)osMutexAcquire(*m, osWaitForever); @@ -750,8 +753,10 @@ void _mutex_acquire(mutex *m) { } // Release mutex +#if !defined(__ARMCC_VERSION) || __ARMCC_VERSION < 6010050 __USED -void _mutex_release(mutex *m); +#endif +__WEAK void _mutex_release(mutex *m); void _mutex_release(mutex *m) { if (os_kernel_is_active() != 0U) { (void)osMutexRelease(*m); @@ -759,8 +764,10 @@ void _mutex_release(mutex *m) { } // Free mutex +#if !defined(__ARMCC_VERSION) || __ARMCC_VERSION < 6010050 __USED -void _mutex_free(mutex *m); +#endif +__WEAK void _mutex_free(mutex *m); void _mutex_free(mutex *m) { (void)osMutexDelete(*m); } From b9043fb91c261c67e515d1f12eb21963eed74ee2 Mon Sep 17 00:00:00 2001 From: Martin Kojtal Date: Fri, 9 Jul 2021 11:29:23 +0100 Subject: [PATCH 05/16] cmsis: importer sha removal I had to resolve conflicts with these sha. CMSIS 5.8.0 includes fixes and makes these changes irrelevant. --- tools/importer/cmsis_importer.json | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/tools/importer/cmsis_importer.json b/tools/importer/cmsis_importer.json index cc13bb85e7d..3da3d477356 100644 --- a/tools/importer/cmsis_importer.json +++ b/tools/importer/cmsis_importer.json @@ -146,12 +146,7 @@ "commit_sha" : [ "4360b7bbf815c4d812005938c9c27af199803a97", "fb354752eb69403ad503c8e53da67da6483776d6", - "d3f7abdb7c109517e6a71daed8bae63ad6436afc", - "08ab8cc47d8722bf0c767990cd615cf1c427d006", - "dd21ea0ae0559f148d3ff5b1a1937f9d7e0e1138", - "9549fff786475bdcd6ab1d8ac8db1c8618c19f6f", - "96e0689204d375e23bf69d7787a18ba07182f085", - "7149ffed11c0ef6a16f8808f12b7aca16921a66a" + "08ab8cc47d8722bf0c767990cd615cf1c427d006" ] } From 218e93c97ff81a6158519d7d8891c7824131b3ec Mon Sep 17 00:00:00 2001 From: Martin Kojtal Date: Mon, 12 Jul 2021 09:09:47 +0100 Subject: [PATCH 06/16] cmsis: fix license header in rtx_def Upstream fix was merged https://github.com/ARM-software/CMSIS_5/pull/1238. Included here as it will be overwritten once again updated --- cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include/rtx_def.h | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include/rtx_def.h b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include/rtx_def.h index a7076a4e46f..26230e70ae2 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include/rtx_def.h +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Include/rtx_def.h @@ -1,7 +1,19 @@ /* * Copyright (c) 2021 Arm Limited. All rights reserved. * - * This Software is licensed under an Arm proprietary license. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. * * ----------------------------------------------------------------------------- * From 8bdec2b4db0c087e1ca9310b523c1bb82d67fe2a Mon Sep 17 00:00:00 2001 From: Martin Kojtal Date: Mon, 12 Jul 2021 14:25:21 +0100 Subject: [PATCH 07/16] retarget: move compat header for ARMClang prior any cmsis headers Reference: CMSIS 5.8.0 known issues and https://github.com/ARM-software/CMSIS_5/issues/1211 This fixes the error about redefinition of enable/disable irq. we need compat header because of semihosting (not yet provided in CMSIS). --- platform/source/mbed_retarget.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/platform/source/mbed_retarget.cpp b/platform/source/mbed_retarget.cpp index 28201757a20..cf6dd3c0b76 100644 --- a/platform/source/mbed_retarget.cpp +++ b/platform/source/mbed_retarget.cpp @@ -15,6 +15,11 @@ * limitations under the License. */ +// Workaround for CMSIS 5.8.0, compat header must be placed before any CMSIS header inclusion +#if defined(__ARMCC_VERSION) +# include +#endif + #include #include #include "platform/platform.h" @@ -53,7 +58,6 @@ struct DIR_impl { }; #if defined(__ARMCC_VERSION) -# include # include # include # include From 38ca4bdce40a61541b6863799874f5b38b821267 Mon Sep 17 00:00:00 2001 From: Martin Kojtal Date: Mon, 12 Jul 2021 16:13:25 +0100 Subject: [PATCH 08/16] semihosting: add compat header where is required ARMCC provides __semihost via compat header. As CMSIS 5.8.0 removed this compat header, we need to explicitly include it to fix definition missing error. --- platform/source/LocalFileSystem.cpp | 4 ++++ platform/source/mbed_semihost_api.c | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/platform/source/LocalFileSystem.cpp b/platform/source/LocalFileSystem.cpp index 3a5a09cfa85..3a0ec41be0e 100644 --- a/platform/source/LocalFileSystem.cpp +++ b/platform/source/LocalFileSystem.cpp @@ -14,6 +14,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +#if defined(__ARMCC_VERSION) +#include +#endif + #include "platform/LocalFileSystem.h" #if DEVICE_LOCALFILESYSTEM diff --git a/platform/source/mbed_semihost_api.c b/platform/source/mbed_semihost_api.c index f88c1fbf51a..2ee681b9386 100644 --- a/platform/source/mbed_semihost_api.c +++ b/platform/source/mbed_semihost_api.c @@ -14,6 +14,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +#if defined(__ARMCC_VERSION) +#include +#endif + #include "cmsis.h" #include "platform/mbed_semihost_api.h" From 00580ce3f5d64b76342b7f26deed55e842056ea0 Mon Sep 17 00:00:00 2001 From: Martin Kojtal <--global> Date: Mon, 19 Jul 2021 10:47:21 +0100 Subject: [PATCH 09/16] cmsis: fix rtx_def inclusion in .S files for Gcc Arm rtx_def includes two CMSIS headers that pull in C/C++ headers in our case. As I found out, they should only define macros. We can fix it but it will require some refactoring as our targets use mbed rtx headers to define heap using stdin header, plus some other offenders. Workaround is to exclude the headers we do not need in irq assembly files. Tracking issue https://github.com/ARMmbed/mbed-os/issues/14962 --- .../CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M0/irq_cm0.S | 3 +++ .../CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M0P/irq_cm0.S | 3 +++ .../RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M23/irq_armv8mbl.S | 3 +++ .../CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M3/irq_cm3.S | 3 +++ .../RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M33/irq_armv8mml.S | 3 +++ .../RTX/Source/TOOLCHAIN_GCC/TARGET_RTOS_M4_M7/irq_cm4f.S | 3 +++ 6 files changed, 18 insertions(+) diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M0/irq_cm0.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M0/irq_cm0.S index 8cdc84aee65..74f7d362aaa 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M0/irq_cm0.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M0/irq_cm0.S @@ -26,6 +26,9 @@ .syntax unified + // Mbed OS patch: Exclude RTE_Components.h and RTX_Config.h inclusion in .S files + #define RTX_CONFIG_H_ + #undef _RTE_ #include "rtx_def.h" .equ I_T_RUN_OFS, 20 // osRtxInfo.thread.run offset diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M0P/irq_cm0.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M0P/irq_cm0.S index 8cdc84aee65..74f7d362aaa 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M0P/irq_cm0.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M0P/irq_cm0.S @@ -26,6 +26,9 @@ .syntax unified + // Mbed OS patch: Exclude RTE_Components.h and RTX_Config.h inclusion in .S files + #define RTX_CONFIG_H_ + #undef _RTE_ #include "rtx_def.h" .equ I_T_RUN_OFS, 20 // osRtxInfo.thread.run offset diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M23/irq_armv8mbl.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M23/irq_armv8mbl.S index 4a6a33ca191..deee5b9850c 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M23/irq_armv8mbl.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M23/irq_armv8mbl.S @@ -26,6 +26,9 @@ .syntax unified + // Mbed OS patch: Exclude RTE_Components.h and RTX_Config.h inclusion in .S files + #define RTX_CONFIG_H_ + #undef _RTE_ #include "rtx_def.h" #ifndef DOMAIN_NS diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M3/irq_cm3.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M3/irq_cm3.S index ae2a87b5982..c0a47974347 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M3/irq_cm3.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M3/irq_cm3.S @@ -26,6 +26,9 @@ .syntax unified + // Mbed OS patch: Exclude RTE_Components.h and RTX_Config.h inclusion in .S files + #define RTX_CONFIG_H_ + #undef _RTE_ #include "rtx_def.h" #if (defined(__ARM_FP) && (__ARM_FP > 0)) diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M33/irq_armv8mml.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M33/irq_armv8mml.S index 0883c6bd510..fdd99238151 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M33/irq_armv8mml.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_M33/irq_armv8mml.S @@ -26,6 +26,9 @@ .syntax unified + // Mbed OS patch: Exclude RTE_Components.h and RTX_Config.h inclusion in .S files + #define RTX_CONFIG_H_ + #undef _RTE_ #include "rtx_def.h" #ifndef DOMAIN_NS diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_RTOS_M4_M7/irq_cm4f.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_RTOS_M4_M7/irq_cm4f.S index ae2a87b5982..c0a47974347 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_RTOS_M4_M7/irq_cm4f.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_GCC/TARGET_RTOS_M4_M7/irq_cm4f.S @@ -26,6 +26,9 @@ .syntax unified + // Mbed OS patch: Exclude RTE_Components.h and RTX_Config.h inclusion in .S files + #define RTX_CONFIG_H_ + #undef _RTE_ #include "rtx_def.h" #if (defined(__ARM_FP) && (__ARM_FP > 0)) From dca1d5c42e4535c9d682afa1cec1b21056d650fa Mon Sep 17 00:00:00 2001 From: Martin Kojtal <--global> Date: Mon, 19 Jul 2021 11:01:45 +0100 Subject: [PATCH 10/16] cmsis: fix armcc compat header redefition of enable/disable irq The fix will be in the 5.8.1 CMSIS, we cherry-pick it to our imported version. See https://github.com/ARM-software/CMSIS_5/commit/e797cca3f42bef19a2c33b3b929f6010ddf53580 --- cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_A/Include/cmsis_armclang.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_A/Include/cmsis_armclang.h b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_A/Include/cmsis_armclang.h index e64eba93544..1b0adb3b5be 100644 --- a/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_A/Include/cmsis_armclang.h +++ b/cmsis/CMSIS_5/CMSIS/TARGET_CORTEX_A/Include/cmsis_armclang.h @@ -373,20 +373,24 @@ __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) \details Enables IRQ interrupts by clearing the I-bit in the CPSR. Can only be executed in Privileged modes. */ +#ifndef __ARM_COMPAT_H __STATIC_FORCEINLINE void __enable_irq(void) { __ASM volatile ("cpsie i" : : : "memory"); } +#endif /** \brief Disable IRQ Interrupts \details Disables IRQ interrupts by setting the I-bit in the CPSR. Can only be executed in Privileged modes. */ +#ifndef __ARM_COMPAT_H __STATIC_FORCEINLINE void __disable_irq(void) { __ASM volatile ("cpsid i" : : : "memory"); } +#endif /** \brief Enable FIQ From c0187712b185be85e5f30c504d204232237ed34f Mon Sep 17 00:00:00 2001 From: Martin Kojtal <--global> Date: Mon, 19 Jul 2021 14:19:54 +0100 Subject: [PATCH 11/16] musca targets: use IsException instead of removed isIrqMode --- targets/TARGET_ARM_SSG/TARGET_MUSCA_B1/gpio_api_ns.c | 2 +- targets/TARGET_ARM_SSG/TARGET_MUSCA_B1/pinmap_ns.c | 4 ++-- targets/TARGET_ARM_SSG/TARGET_MUSCA_S1/gpio_api_ns.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/targets/TARGET_ARM_SSG/TARGET_MUSCA_B1/gpio_api_ns.c b/targets/TARGET_ARM_SSG/TARGET_MUSCA_B1/gpio_api_ns.c index 42faf1bff0d..776fbd8910e 100644 --- a/targets/TARGET_ARM_SSG/TARGET_MUSCA_B1/gpio_api_ns.c +++ b/targets/TARGET_ARM_SSG/TARGET_MUSCA_B1/gpio_api_ns.c @@ -37,7 +37,7 @@ #define IRQ_MODE_CHECK(is_func_void) \ /* Secure service can't be called in interrupt context. */ \ - if (IsIrqMode()) { \ + if (IsException()) { \ MBED_WARNING(MBED_MAKE_ERROR(MBED_MODULE_HAL, \ MBED_ERROR_INVALID_OPERATION), \ "GPIO secure service can't be called in interrupt context\n"); \ diff --git a/targets/TARGET_ARM_SSG/TARGET_MUSCA_B1/pinmap_ns.c b/targets/TARGET_ARM_SSG/TARGET_MUSCA_B1/pinmap_ns.c index 77b396c833a..c77b3712eef 100644 --- a/targets/TARGET_ARM_SSG/TARGET_MUSCA_B1/pinmap_ns.c +++ b/targets/TARGET_ARM_SSG/TARGET_MUSCA_B1/pinmap_ns.c @@ -81,7 +81,7 @@ void pin_function(PinName pin, int function) MBED_ASSERT(pin != NC); /* Secure service can't be called in interrupt context. */ - if (IsIrqMode()) { + if (IsException()) { MBED_WARNING(MBED_MAKE_ERROR(MBED_MODULE_HAL, MBED_ERROR_INVALID_OPERATION), "Pin secure service can't be called in interrupt context\n"); @@ -127,7 +127,7 @@ void pin_mode(PinName pin, PinMode mode) MBED_ASSERT(pin != NC); /* Secure service can't be called in interrupt context. */ - if (IsIrqMode()) { + if (IsException()) { MBED_WARNING(MBED_MAKE_ERROR(MBED_MODULE_HAL, MBED_ERROR_INVALID_OPERATION), "Pin secure service can't be called in interrupt context\n"); diff --git a/targets/TARGET_ARM_SSG/TARGET_MUSCA_S1/gpio_api_ns.c b/targets/TARGET_ARM_SSG/TARGET_MUSCA_S1/gpio_api_ns.c index aa09bc18dbf..c7a60441333 100644 --- a/targets/TARGET_ARM_SSG/TARGET_MUSCA_S1/gpio_api_ns.c +++ b/targets/TARGET_ARM_SSG/TARGET_MUSCA_S1/gpio_api_ns.c @@ -37,7 +37,7 @@ #define IRQ_MODE_CHECK(is_func_void) \ /* Secure service can't be called in interrupt context. */ \ - if (IsIrqMode()) { \ + if (IsException()) { \ MBED_WARNING(MBED_MAKE_ERROR(MBED_MODULE_HAL, \ MBED_ERROR_INVALID_OPERATION), \ "GPIO secure service can't be called in interrupt context\n"); \ From c122158d496d59fbd723bdb17bbc7bd5cc95245e Mon Sep 17 00:00:00 2001 From: Martin Kojtal <--global> Date: Mon, 19 Jul 2021 14:21:26 +0100 Subject: [PATCH 12/16] cmsis: preprocess irq files We use preprocessor for asm files even for Armcc. If symbol is defined it's replaced by preprocessor, asembler would just see 1 or 0 in this case and errors: TARGET_M33\\irq_armv8mml.S", line 31: Error: A1185E: Symbol missing Use preprocessor instead. --- .../RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M0/irq_cm0.S | 4 ++-- .../RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M0P/irq_cm0.S | 4 ++-- .../RTX/Source/TOOLCHAIN_ARM/TARGET_M23/irq_armv8mbl.S | 8 ++++---- .../RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M3/irq_cm3.S | 4 ++-- .../RTX/Source/TOOLCHAIN_ARM/TARGET_M33/irq_armv8mml.S | 8 ++++---- .../RTX/Source/TOOLCHAIN_ARM/TARGET_RTOS_M4_M7/irq_cm4f.S | 4 ++-- 6 files changed, 16 insertions(+), 16 deletions(-) diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M0/irq_cm0.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M0/irq_cm0.S index 602a8186ef6..a822002f285 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M0/irq_cm0.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M0/irq_cm0.S @@ -24,9 +24,9 @@ ; */ - IF :LNOT::DEF:RTX_STACK_CHECK +#ifndef RTX_STACK_CHECK RTX_STACK_CHECK EQU 0 - ENDIF +#endif I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset TCB_SP_OFS EQU 56 ; TCB.SP offset diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M0P/irq_cm0.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M0P/irq_cm0.S index 602a8186ef6..a822002f285 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M0P/irq_cm0.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M0P/irq_cm0.S @@ -24,9 +24,9 @@ ; */ - IF :LNOT::DEF:RTX_STACK_CHECK +#ifndef RTX_STACK_CHECK RTX_STACK_CHECK EQU 0 - ENDIF +#endif I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset TCB_SP_OFS EQU 56 ; TCB.SP offset diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M23/irq_armv8mbl.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M23/irq_armv8mbl.S index d7bfd999804..6d932be46ba 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M23/irq_armv8mbl.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M23/irq_armv8mbl.S @@ -24,13 +24,13 @@ ; */ - IF :LNOT::DEF:RTX_STACK_CHECK +#ifndef RTX_STACK_CHECK RTX_STACK_CHECK EQU 0 - ENDIF +#endif - IF :LNOT::DEF:DOMAIN_NS +#ifndef DOMAIN_NS DOMAIN_NS EQU 0 - ENDIF +#endif I_T_RUN_OFS EQU 20 ; osRtxInfo.thread.run offset TCB_SM_OFS EQU 48 ; TCB.stack_mem offset diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M3/irq_cm3.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M3/irq_cm3.S index 88f545766ea..0c8148f978b 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M3/irq_cm3.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M3/irq_cm3.S @@ -24,9 +24,9 @@ ; */ - IF :LNOT::DEF:RTX_STACK_CHECK +#ifndef RTX_STACK_CHECK RTX_STACK_CHECK EQU 0 - ENDIF +#endif IF ({FPU}="FPv4-SP") FPU_USED EQU 1 diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M33/irq_armv8mml.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M33/irq_armv8mml.S index 984dd2d1969..dd967edca2e 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M33/irq_armv8mml.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_M33/irq_armv8mml.S @@ -24,13 +24,13 @@ ; */ - IF :LNOT::DEF:RTX_STACK_CHECK +#ifndef RTX_STACK_CHECK RTX_STACK_CHECK EQU 0 - ENDIF +#endif - IF :LNOT::DEF:DOMAIN_NS +#ifndef DOMAIN_NS DOMAIN_NS EQU 0 - ENDIF +#endif IF ({FPU}="FPv5-SP") || ({FPU}="FPv5_D16") FPU_USED EQU 1 diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_RTOS_M4_M7/irq_cm4f.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_RTOS_M4_M7/irq_cm4f.S index 88f545766ea..0c8148f978b 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_RTOS_M4_M7/irq_cm4f.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_RTOS_M4_M7/irq_cm4f.S @@ -24,9 +24,9 @@ ; */ - IF :LNOT::DEF:RTX_STACK_CHECK +#ifndef RTX_STACK_CHECK RTX_STACK_CHECK EQU 0 - ENDIF +#endif IF ({FPU}="FPv4-SP") FPU_USED EQU 1 From 4822ac56a12e96a4533f5461885363fc7051d27c Mon Sep 17 00:00:00 2001 From: Martin Kojtal <--global> Date: Mon, 19 Jul 2021 15:50:27 +0100 Subject: [PATCH 13/16] cypress: fix cy_syslib breakpoint undefined CMSIS 5.8.0 requires for armcc to include compat header prior any other header. See Known issues for the release. --- .../TARGET_PSOC6/mtb-pdl-cat1/drivers/source/cy_syslib.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/targets/TARGET_Cypress/TARGET_PSOC6/mtb-pdl-cat1/drivers/source/cy_syslib.c b/targets/TARGET_Cypress/TARGET_PSOC6/mtb-pdl-cat1/drivers/source/cy_syslib.c index 0842c41cc25..aa36903999b 100644 --- a/targets/TARGET_Cypress/TARGET_PSOC6/mtb-pdl-cat1/drivers/source/cy_syslib.c +++ b/targets/TARGET_Cypress/TARGET_PSOC6/mtb-pdl-cat1/drivers/source/cy_syslib.c @@ -21,6 +21,9 @@ * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ +#if defined(__ARMCC_VERSION) +#include +#endif #include "cy_device.h" From 180eb75f097672ca51b7c985dfe1292736e5644c Mon Sep 17 00:00:00 2001 From: Martin Kojtal <--global> Date: Mon, 26 Jul 2021 15:23:45 +0100 Subject: [PATCH 14/16] cmsis: patch fpu in asm for armcc5 For FPU, use armasm to select fpu selection. This will be fixed upstream in the next version of CMSIS. Meanwhile, we use our local patch. Taken from --cpu selection for armasm: --cpu | {FPU} Cortex-M4.fp.sp | VFPv4_SP_D16 Cortex-M7.fp.sp | FPv5-SP Cortex-M7.fp.dp | FPv5_D16 Tracking issue: https://github.com/ARM-software/CMSIS_5/issues/1266 --- .../RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_RTOS_M4_M7/irq_cm4f.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_RTOS_M4_M7/irq_cm4f.S b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_RTOS_M4_M7/irq_cm4f.S index 0c8148f978b..4905a35b73b 100644 --- a/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_RTOS_M4_M7/irq_cm4f.S +++ b/cmsis/CMSIS_5/CMSIS/RTOS2/RTX/Source/TOOLCHAIN_ARM/TARGET_RTOS_M4_M7/irq_cm4f.S @@ -28,7 +28,7 @@ RTX_STACK_CHECK EQU 0 #endif - IF ({FPU}="FPv4-SP") + IF ({FPU}="FPv4-SP") || ({FPU}="VFPv4_SP_D16") || ({FPU}="FPv5-SP") || ({FPU}="FPv5_D16") FPU_USED EQU 1 ELSE FPU_USED EQU 0 From 5e29db6e30bda4afb66f2138a508213458510bc8 Mon Sep 17 00:00:00 2001 From: Martin Kojtal <--global> Date: Tue, 27 Jul 2021 12:49:28 +0100 Subject: [PATCH 15/16] rtos: fix delay with 0 Fixes error osErrorParameter that triggers our assert. RTX delay ignored 0 previously, it now returns osErrorParameter instead. --- rtos/source/ThisThread.cpp | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/rtos/source/ThisThread.cpp b/rtos/source/ThisThread.cpp index b5c00372d75..8b8bf3b5ba4 100644 --- a/rtos/source/ThisThread.cpp +++ b/rtos/source/ThisThread.cpp @@ -221,9 +221,12 @@ void ThisThread::sleep_for(uint32_t millisec) void ThisThread::sleep_for(Clock::duration_u32 rel_time) { #if MBED_CONF_RTOS_PRESENT - osStatus_t status = osDelay(rel_time.count()); - MBED_ASSERT(status == osOK); - (void) status; + uint32_t delay = rel_time.count(); + if (delay != 0) { + osStatus_t status = osDelay(delay); + MBED_ASSERT(status == osOK); + (void) status; + } #else thread_sleep_for(rel_time.count()); #endif From 049d1f145d23ae7f66bfb7caaf457dbf4f8c0ac2 Mon Sep 17 00:00:00 2001 From: Martin Kojtal <--global> Date: Fri, 30 Jul 2021 08:35:04 +0100 Subject: [PATCH 16/16] cmsis importer: add commits for Mbed OS changes See specific SHA for details. --- tools/importer/cmsis_importer.json | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/importer/cmsis_importer.json b/tools/importer/cmsis_importer.json index 3da3d477356..fa81fce7522 100644 --- a/tools/importer/cmsis_importer.json +++ b/tools/importer/cmsis_importer.json @@ -146,7 +146,9 @@ "commit_sha" : [ "4360b7bbf815c4d812005938c9c27af199803a97", "fb354752eb69403ad503c8e53da67da6483776d6", - "08ab8cc47d8722bf0c767990cd615cf1c427d006" + "08ab8cc47d8722bf0c767990cd615cf1c427d006", + "00580ce3f5d64b76342b7f26deed55e842056ea0", + "c122158d496d59fbd723bdb17bbc7bd5cc95245e" ] }