1 /***************************************************************************//**
2  * @file
3  * @brief Core API implemented for CortexM
4  *******************************************************************************
5  * # License
6  * <b>Copyright 2024 Silicon Laboratories Inc. www.silabs.com</b>
7  *******************************************************************************
8  *
9  * SPDX-License-Identifier: Zlib
10  *
11  * The licensor of this software is Silicon Laboratories Inc.
12  *
13  * This software is provided 'as-is', without any express or implied
14  * warranty. In no event will the authors be held liable for any damages
15  * arising from the use of this software.
16  *
17  * Permission is granted to anyone to use this software for any purpose,
18  * including commercial applications, and to alter it and redistribute it
19  * freely, subject to the following restrictions:
20  *
21  * 1. The origin of this software must not be misrepresented; you must not
22  *    claim that you wrote the original software. If you use this software
23  *    in a product, an acknowledgment in the product documentation would be
24  *    appreciated but is not required.
25  * 2. Altered source versions must be plainly marked as such, and must not be
26  *    misrepresented as being the original software.
27  * 3. This notice may not be removed or altered from any source distribution.
28  *
29  ******************************************************************************/
30 
31 #include "sl_core.h"
32 #include "sl_core_config.h"
33 #include "sl_common.h"
34 #include "em_device.h"
35 
36 /**************************************************************************//**
37  * @addtogroup sl_core
38  * @{
39  *****************************************************************************/
40 
41 /*******************************************************************************
42  **************************   STRUCTS   ****************************************
43  ******************************************************************************/
44 /// A Cycle Counter Instance.
45 typedef struct {
46   uint32_t start;    /*!< Cycle counter at start of recording. */
47   uint32_t cycles;   /*!< Cycles elapsed in last recording. */
48   uint32_t max;      /*!< Max recorded cycles since last reset or init. */
49 } dwt_cycle_counter_handle_t;
50 
51 /*******************************************************************************
52  ***************************   LOCAL VARIABLES   *******************************
53  ******************************************************************************/
54 
55 /** @cond DO_NOT_INCLUDE_WITH_DOXYGEN */
56 
57 #if (SL_CORE_DEBUG_INTERRUPTS_MASKED_TIMING == 1)
58 // cycle counter to record atomic sections
59 dwt_cycle_counter_handle_t atomic_cycle_counter   = { 0 };
60 // cycle counter to record critical sections
61 dwt_cycle_counter_handle_t critical_cycle_counter = { 0 };
62 #endif
63 
64 /** @endcond */
65 
66 /*******************************************************************************
67  ***************************   LOCAL FUNCTIONS   *******************************
68  ******************************************************************************/
69 
70 #if (SL_CORE_DEBUG_INTERRUPTS_MASKED_TIMING == 1)
71 static void cycle_counter_start(dwt_cycle_counter_handle_t *handle);
72 static void cycle_counter_stop(dwt_cycle_counter_handle_t *handle);
73 #endif
74 
75 /*******************************************************************************
76  **************************   GLOBAL FUNCTIONS   *******************************
77  ******************************************************************************/
78 
79 /***************************************************************************//**
80  * @brief
81  *   Disable interrupts.
82  ******************************************************************************/
CORE_CriticalDisableIrq(void)83 SL_WEAK void CORE_CriticalDisableIrq(void)
84 {
85   __disable_irq();
86 }
87 
88 /***************************************************************************//**
89  * @brief
90  *   Enable interrupts.
91  * @note
92  *   __ISB() makes sure pending interrupts are executed before returning.
93  *   This can be a problem if the first instruction after changing the BASEPRI
94  *   or PRIMASK assumes that the pending interrupts have already been processed.
95  ******************************************************************************/
CORE_CriticalEnableIrq(void)96 SL_WEAK void CORE_CriticalEnableIrq(void)
97 {
98   __enable_irq();
99   __ISB();
100 }
101 
102 /***************************************************************************//**
103  * @brief
104  *   Enter a CRITICAL section.
105  ******************************************************************************/
CORE_EnterCritical(void)106 SL_WEAK CORE_irqState_t CORE_EnterCritical(void)
107 {
108   CORE_irqState_t irqState = __get_PRIMASK();
109   __disable_irq();
110 #if (SL_CORE_DEBUG_INTERRUPTS_MASKED_TIMING == 1)
111   if (irqState == 0U) {
112     cycle_counter_start(&critical_cycle_counter);
113   }
114 #endif
115   return irqState;
116 }
117 
118 /***************************************************************************//**
119  * @brief
120  *   Exit a CRITICAL section.
121  * @note
122  *   __ISB() makes sure pending interrupts are executed before returning.
123  *   This can be a problem if the first instruction after changing the BASEPRI
124  *   or PRIMASK assumes that the pending interrupts have already been processed.
125  ******************************************************************************/
CORE_ExitCritical(CORE_irqState_t irqState)126 SL_WEAK void CORE_ExitCritical(CORE_irqState_t irqState)
127 {
128   if (irqState == 0U) {
129 #if (SL_CORE_DEBUG_INTERRUPTS_MASKED_TIMING == 1)
130     cycle_counter_stop(&critical_cycle_counter);
131 #endif
132     __enable_irq();
133     __ISB();
134   }
135 }
136 
137 /***************************************************************************//**
138  * @brief
139  *   Brief interrupt enable/disable sequence to allow handling of
140  *   pending interrupts.
141  ******************************************************************************/
CORE_YieldCritical(void)142 SL_WEAK void CORE_YieldCritical(void)
143 {
144   if ((__get_PRIMASK() & 1U) != 0U) {
145     __enable_irq();
146     __ISB();
147     __disable_irq();
148   }
149 }
150 
151 /***************************************************************************//**
152  * @brief
153  *   Disable interrupts.
154  ******************************************************************************/
CORE_AtomicDisableIrq(void)155 SL_WEAK void CORE_AtomicDisableIrq(void)
156 {
157 #ifndef __CM0PLUS_REV
158   __set_BASEPRI(CORE_ATOMIC_BASE_PRIORITY_LEVEL << (8UL - __NVIC_PRIO_BITS));
159 #else
160   __disable_irq();
161 #endif
162 }
163 
164 /***************************************************************************//**
165  * @brief
166  *   Enable interrupts.
167  * @note
168  *   __ISB() makes sure pending interrupts are executed before returning.
169  *   This can be a problem if the first instruction after changing the BASEPRI
170  *   or PRIMASK assumes that the pending interrupts have already been processed.
171  ******************************************************************************/
CORE_AtomicEnableIrq(void)172 SL_WEAK void CORE_AtomicEnableIrq(void)
173 {
174 #ifndef __CM0PLUS_REV
175   __set_BASEPRI(0);
176 #else
177   __enable_irq();
178 #endif
179   __ISB();
180 }
181 
182 /***************************************************************************//**
183  * @brief
184  *   Enter an ATOMIC section.
185  ******************************************************************************/
CORE_EnterAtomic(void)186 SL_WEAK CORE_irqState_t CORE_EnterAtomic(void)
187 {
188 #ifndef __CM0PLUS_REV
189   CORE_irqState_t irqState = __get_BASEPRI();
190   __set_BASEPRI(CORE_ATOMIC_BASE_PRIORITY_LEVEL << (8U - __NVIC_PRIO_BITS));
191 #if (SL_CORE_DEBUG_INTERRUPTS_MASKED_TIMING == 1)
192   if ((irqState & (CORE_ATOMIC_BASE_PRIORITY_LEVEL << (8U - __NVIC_PRIO_BITS)))
193       != (CORE_ATOMIC_BASE_PRIORITY_LEVEL << (8U - __NVIC_PRIO_BITS))) {
194     cycle_counter_start(&atomic_cycle_counter);
195   }
196 #endif
197   return irqState;
198 #else
199   CORE_irqState_t irqState = __get_PRIMASK();
200   __disable_irq();
201 #if (SL_CORE_DEBUG_INTERRUPTS_MASKED_TIMING == 1)
202   if (irqState == 0U) {
203     cycle_counter_start(&critical_cycle_counter);
204   }
205 #endif
206   return irqState;
207 #endif
208 }
209 
210 /***************************************************************************//**
211  * @brief
212  *   Exit an ATOMIC section.
213  * @note
214  *   __ISB() makes sure pending interrupts are executed before returning.
215  *   This can be a problem if the first instruction after changing the BASEPRI
216  *   or PRIMASK assumes that the pending interrupts have already been processed.
217  ******************************************************************************/
CORE_ExitAtomic(CORE_irqState_t irqState)218 SL_WEAK void CORE_ExitAtomic(CORE_irqState_t irqState)
219 {
220 #ifndef __CM0PLUS_REV
221 #if (SL_CORE_DEBUG_INTERRUPTS_MASKED_TIMING == 1)
222   if ((irqState & (CORE_ATOMIC_BASE_PRIORITY_LEVEL << (8U - __NVIC_PRIO_BITS)))
223       != (CORE_ATOMIC_BASE_PRIORITY_LEVEL << (8U - __NVIC_PRIO_BITS))) {
224     cycle_counter_stop(&atomic_cycle_counter);
225   }
226 #endif
227   __set_BASEPRI(irqState);
228   __ISB();
229 #else
230   if (irqState == 0U) {
231 #if (SL_CORE_DEBUG_INTERRUPTS_MASKED_TIMING == 1)
232     cycle_counter_stop(&critical_cycle_counter);
233 #endif
234     __enable_irq();
235     __ISB();
236   }
237 #endif
238 }
239 
240 /***************************************************************************//**
241  * @brief
242  *   Brief interrupt enable/disable sequence to allow handling of
243  *   pending interrupts.
244  ******************************************************************************/
CORE_YieldAtomic(void)245 SL_WEAK void CORE_YieldAtomic(void)
246 {
247 #ifndef __CM0PLUS_REV
248   CORE_irqState_t basepri = __get_BASEPRI();
249   if (basepri >= (CORE_ATOMIC_BASE_PRIORITY_LEVEL << (8U - __NVIC_PRIO_BITS))) {
250     __set_BASEPRI(0);
251     __ISB();
252     __set_BASEPRI(basepri);
253   }
254 #else
255   if ((__get_PRIMASK() & 1U) != 0U) {
256     __enable_irq();
257     __ISB();
258     __disable_irq();
259   }
260 #endif
261 }
262 
263 /***************************************************************************//**
264  * @brief
265  *   Check whether the current CPU operation mode is handler mode.
266  ******************************************************************************/
CORE_InIrqContext(void)267 SL_WEAK bool CORE_InIrqContext(void)
268 {
269   return (SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0U;
270 }
271 
272 /***************************************************************************//**
273  * @brief
274  *   Check if interrupts are disabled.
275  ******************************************************************************/
CORE_IrqIsDisabled(void)276 SL_WEAK bool CORE_IrqIsDisabled(void)
277 {
278 #ifndef __CM0PLUS_REV
279   return ((__get_PRIMASK() & 1U) == 1U)
280          || (__get_BASEPRI() >= (CORE_ATOMIC_BASE_PRIORITY_LEVEL
281                                  << (8U - __NVIC_PRIO_BITS)));
282 #else
283   return (__get_PRIMASK() & 1U == 1U);
284 #endif
285 }
286 
287 #if (SL_CORE_DEBUG_INTERRUPTS_MASKED_TIMING == 1)
288 /***************************************************************************//**
289  * @brief
290  *   Start a recording.
291  *
292  * @param[in] handle
293  *   Pointer to initialized counter handle.
294  *
295  * @note SL_CORE_DEBUG_INTERRUPTS_MASKED_TIMING must be enabled.
296  ******************************************************************************/
cycle_counter_start(dwt_cycle_counter_handle_t * handle)297 static void cycle_counter_start(dwt_cycle_counter_handle_t *handle)
298 {
299   handle->start = DWT->CYCCNT;
300 }
301 #endif //(SL_CORE_DEBUG_INTERRUPTS_MASKED_TIMING == 1)
302 
303 #if (SL_CORE_DEBUG_INTERRUPTS_MASKED_TIMING == 1)
304 /***************************************************************************//**
305  * @brief
306  *   Stop a recording.
307  *
308  * @param[in] handle
309  *   Pointer to initialized counter handle.
310  *
311  * @note SL_CORE_DEBUG_INTERRUPTS_MASKED_TIMING must be enabled.
312  ******************************************************************************/
cycle_counter_stop(dwt_cycle_counter_handle_t * handle)313 static void cycle_counter_stop(dwt_cycle_counter_handle_t *handle)
314 {
315   handle->cycles = DWT->CYCCNT - handle->start;
316 
317   if (handle->cycles > handle->max) {
318     handle->max = handle->cycles;
319   }
320 }
321 #endif //(SL_CORE_DEBUG_INTERRUPTS_MASKED_TIMING == 1)
322 
323 /***************************************************************************//**
324  * @brief
325  *   Returns the max time spent in critical section.
326  ******************************************************************************/
CORE_get_max_time_critical_section(void)327 uint32_t CORE_get_max_time_critical_section(void)
328 {
329   #if (SL_CORE_DEBUG_INTERRUPTS_MASKED_TIMING == 1)
330   return critical_cycle_counter.max;
331   #else
332   return 0U;
333   #endif //(SL_CORE_DEBUG_INTERRUPTS_MASKED_TIMING == 1)
334 }
335 
336 /***************************************************************************//**
337  * @brief
338  *   Returns the max time spent in atomic section.
339  ******************************************************************************/
CORE_get_max_time_atomic_section(void)340 uint32_t CORE_get_max_time_atomic_section(void)
341 {
342   #if (SL_CORE_DEBUG_INTERRUPTS_MASKED_TIMING == 1)
343   return atomic_cycle_counter.max;
344   #else
345   return 0U;
346   #endif //(SL_CORE_DEBUG_INTERRUPTS_MASKED_TIMING == 1)
347 }
348 
349 /***************************************************************************//**
350  * @brief
351  *   Clears the max time spent in atomic section.
352  ******************************************************************************/
CORE_clear_max_time_critical_section(void)353 void CORE_clear_max_time_critical_section(void)
354 {
355   #if (SL_CORE_DEBUG_INTERRUPTS_MASKED_TIMING == 1)
356   critical_cycle_counter.max = 0;
357   #endif //(SL_CORE_DEBUG_INTERRUPTS_MASKED_TIMING == 1)
358 }
359 
360 /***************************************************************************//**
361  * @brief
362  *   Clears the max time spent in atomic section.
363  ******************************************************************************/
CORE_clear_max_time_atomic_section(void)364 void CORE_clear_max_time_atomic_section(void)
365 {
366   #if (SL_CORE_DEBUG_INTERRUPTS_MASKED_TIMING == 1)
367   atomic_cycle_counter.max = 0;
368   #endif //(SL_CORE_DEBUG_INTERRUPTS_MASKED_TIMING == 1)
369 }
370 
371 /***************************************************************************//**
372  * @brief
373  *   Reset chip routine.
374  ******************************************************************************/
CORE_ResetSystem(void)375 void CORE_ResetSystem(void)
376 {
377   // Ensure all outstanding memory accesses including buffered writes are
378   // completed before reset
379   __DSB();
380 
381   // Keep priority group unchanged
382   SCB->AIRCR  = (0x5FAUL << SCB_AIRCR_VECTKEY_Pos)
383                 | (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk)
384                 | SCB_AIRCR_SYSRESETREQ_Msk;
385 
386   // Ensure completion of memory access
387   __DSB();
388 
389   // Wait until reset
390   for (;; ) {
391     __NOP();
392   }
393 }
394 
395 /** @} (end addtogroup sl_core) */
396