1 /***************************************************************************//**
2 * @file
3 * @brief Core interrupt handling API
4 *******************************************************************************
5 * # License
6 * <b>Copyright 2018 Silicon Laboratories Inc. www.silabs.com</b>
7 *******************************************************************************
8 *
9 * SPDX-License-Identifier: Zlib
10 *
11 * The licensor of this software is Silicon Laboratories Inc.
12 *
13 * This software is provided 'as-is', without any express or implied
14 * warranty. In no event will the authors be held liable for any damages
15 * arising from the use of this software.
16 *
17 * Permission is granted to anyone to use this software for any purpose,
18 * including commercial applications, and to alter it and redistribute it
19 * freely, subject to the following restrictions:
20 *
21 * 1. The origin of this software must not be misrepresented; you must not
22 * claim that you wrote the original software. If you use this software
23 * in a product, an acknowledgment in the product documentation would be
24 * appreciated but is not required.
25 * 2. Altered source versions must be plainly marked as such, and must not be
26 * misrepresented as being the original software.
27 * 3. This notice may not be removed or altered from any source distribution.
28 *
29 ******************************************************************************/
30 #include "em_core.h"
31 #include "sl_assert.h"
32
33 /* *INDENT-OFF* */
34 // *****************************************************************************
35 /// @addtogroup core CORE - Core Interrupt
36 ///
37 ///@n @section core_intro Introduction
38 ///
39 /// CORE interrupt API provides a simple and safe means
40 /// to disable and enable interrupts to protect sections of code.
41 ///
42 /// This is often referred to as "critical sections". This module provides
43 /// support for three types of critical sections, each with different interrupt
44 /// blocking capabilities.
45 ///
46 /// @li <b>CRITICAL</b> section: Inside a critical section, all interrupts are
47 /// disabled (except for fault handlers). The PRIMASK register is always used for
48 /// interrupt disable/enable.
49 /// @li <b>ATOMIC</b> section: This type of section is configurable and the default
50 /// method is to use PRIMASK. With BASEPRI configuration, interrupts with priority
51 /// equal to or lower than a given configurable level are disabled. The interrupt
52 /// disable priority level is defined at compile time. The BASEPRI register is not
53 /// available for all architectures.
54 /// @li <b>NVIC mask</b> section: Disable NVIC (external interrupts) on an
55 /// individual manner.
56 ///
57 /// em_core also has an API for manipulating RAM-based interrupt vector tables.
58 ///
59 ///@n @section core_conf Compile-time Configuration
60 ///
61 /// The following #defines are used to configure em_core:
62 /// @code{.c}
63 /// // The interrupt priority level used inside ATOMIC sections.
64 /// #define CORE_ATOMIC_BASE_PRIORITY_LEVEL 3
65 ///
66 /// // A method used for interrupt disable/enable within ATOMIC sections.
67 /// #define CORE_ATOMIC_METHOD CORE_ATOMIC_METHOD_PRIMASK
68 /// @endcode
69 ///
70 /// If the default values do not support your needs, they can be overridden
71 /// by supplying -D compiler flags on the compiler command line or by collecting
72 /// all macro redefinitions in a file named @em emlib_config.h and then supplying
73 /// -DEMLIB_USER_CONFIG on a compiler command line.
74 ///
75 /// @note The default emlib configuration for ATOMIC section interrupt disable
76 /// method is using PRIMASK, i.e., ATOMIC sections are implemented as
77 /// CRITICAL sections.
78 ///
79 /// @note Due to architectural limitations Cortex-M0+ devices do not support
80 /// ATOMIC type critical sections using the BASEPRI register. On M0+
81 /// devices ATOMIC section helper macros are available but they are
82 /// implemented as CRITICAL sections using PRIMASK register.
83 ///
84 ///@n @section core_macro_api Macro API
85 ///
86 /// The primary em_core API is the macro API. Macro API will map to correct
87 /// CORE functions according to the selected @ref CORE_ATOMIC_METHOD and similar
88 /// configurations (the full CORE API is of course also available).
89 /// The most useful macros are as follows:
90 ///
91 /// @ref CORE_DECLARE_IRQ_STATE @n @ref CORE_ENTER_ATOMIC() @n
92 /// @ref CORE_EXIT_ATOMIC()@n
93 /// Used together to implement an ATOMIC section.
94 /// @code{.c}
95 /// {
96 /// CORE_DECLARE_IRQ_STATE; // Storage for saving IRQ state prior to
97 /// // atomic section entry.
98 ///
99 /// CORE_ENTER_ATOMIC(); // Enter atomic section.
100 ///
101 /// ...
102 /// ... your code goes here ...
103 /// ...
104 ///
105 /// CORE_EXIT_ATOMIC(); // Exit atomic section, IRQ state is restored.
106 /// }
107 /// @endcode
108 ///
109 /// @n @ref CORE_ATOMIC_SECTION(yourcode)@n
110 /// A concatenation of all three macros above.
111 /// @code{.c}
112 /// {
113 /// CORE_ATOMIC_SECTION(
114 /// ...
115 /// ... your code goes here ...
116 /// ...
117 /// )
118 /// }
119 /// @endcode
120 ///
121 /// @n @ref CORE_DECLARE_IRQ_STATE @n @ref CORE_ENTER_CRITICAL() @n
122 /// @ref CORE_EXIT_CRITICAL() @n @ref CORE_CRITICAL_SECTION(yourcode)@n
123 /// These macros implement CRITICAL sections in a similar fashion as described
124 /// above for ATOMIC sections.
125 ///
126 /// @n @ref CORE_DECLARE_NVIC_STATE @n @ref CORE_ENTER_NVIC() @n
127 /// @ref CORE_EXIT_NVIC() @n @ref CORE_NVIC_SECTION(yourcode)@n
128 /// These macros implement NVIC mask sections in a similar fashion as described
129 /// above for ATOMIC sections. See @ref core_examples for an example.
130 ///
131 /// Refer to @em Macros or <em>Macro Definition Documentation</em> below for a
132 /// full list of macros.
133 ///
134 ///@n @section core_reimplementation API reimplementation
135 ///
136 /// Most of the functions in the API are implemented as weak functions. This means
137 /// that it is easy to reimplement when special needs arise. Shown below is a
138 /// reimplementation of CRITICAL sections suitable if FreeRTOS OS is used:
139 /// @code{.c}
140 /// CORE_irqState_t CORE_EnterCritical(void)
141 /// {
142 /// vPortEnterCritical();
143 /// return 0;
144 /// }
145 ///
146 /// void CORE_ExitCritical(CORE_irqState_t irqState)
147 /// {
148 /// (void)irqState;
149 /// vPortExitCritical();
150 /// }
151 /// @endcode
152 /// Also note that CORE_Enter/ExitCritical() are not implemented as inline
153 /// functions. As a result, reimplementations will be possible even when original
154 /// implementations are inside a linked library.
155 ///
156 /// Some RTOSes must be notified on interrupt handler entry and exit. Macros
157 /// @ref CORE_INTERRUPT_ENTRY() and @ref CORE_INTERRUPT_EXIT() are suitable
158 /// placeholders for inserting such code. Insert these macros in all your
159 /// interrupt handlers and then override the default macro implementations.
160 /// This is an example if uC/OS is used:
161 /// @code{.c}
162 /// // In emlib_config.h:
163 ///
164 /// #define CORE_INTERRUPT_ENTRY() OSIntEnter()
165 /// #define CORE_INTERRUPT_EXIT() OSIntExit()
166 /// @endcode
167 ///
168 ///@n @section core_vector_tables Interrupt vector tables
169 ///
170 /// When using RAM based interrupt vector tables it is the user's responsibility
171 /// to allocate the table space correctly. The tables must be aligned as specified
172 /// in the CPU reference manual.
173 ///
174 /// @ref CORE_InitNvicVectorTable()@n
175 /// Initialize a RAM based vector table by copying table entries from a source
176 /// vector table to a target table. VTOR is set to the address of the target
177 /// vector table.
178 ///
179 /// @n @ref CORE_GetNvicRamTableHandler() @n @ref CORE_SetNvicRamTableHandler()@n
180 /// Use these functions to get or set the interrupt handler for a specific IRQn.
181 /// They both use the interrupt vector table defined by the current
182 /// VTOR register value.
183 ///
184 ///@n @section core_max_timing Maximum Interrupt Disabled Time
185 ///
186 /// The maximum time spent (in cycles) in critical and atomic sections can be
187 /// measured for performance and interrupt latency analysis.
188 /// To enable the timings, use the SL_EMLIB_CORE_ENABLE_INTERRUPT_DISABLED_TIMING
189 /// configuration option. When enabled, the functions
190 /// @n @ref CORE_get_max_time_critical_section()
191 /// @n @ref CORE_get_max_time_atomic_section() @n
192 /// can be used to get the max timings since startup.
193 ///
194 ///@n @section core_examples Examples
195 ///
196 /// Implement an NVIC critical section:
197 /// @code{.c}
198 /// {
199 /// CORE_DECLARE_NVIC_ZEROMASK(mask); // A zero initialized NVIC disable mask
200 ///
201 /// // Set mask bits for IRQs to block in the NVIC critical section.
202 /// // In many cases, you can create the disable mask once upon application
203 /// // startup and use the mask globally throughout the application lifetime.
204 /// CORE_NvicMaskSetIRQ(LEUART0_IRQn, &mask);
205 /// CORE_NvicMaskSetIRQ(VCMP_IRQn, &mask);
206 ///
207 /// // Enter NVIC critical section with the disable mask
208 /// CORE_NVIC_SECTION(&mask,
209 /// ...
210 /// ... your code goes here ...
211 /// ...
212 /// )
213 /// }
214 /// @endcode
215 ///
216 ///@n @section core_porting Porting from em_int
217 ///
218 /// Existing code using INT_Enable() and INT_Disable() must be ported to the
219 /// em_core API. While em_int used, a global counter to store the interrupt state,
220 /// em_core uses a local variable. Any usage of INT_Disable(), therefore, needs to
221 /// be replaced with a declaration of the interrupt state variable before entering
222 /// the critical section.
223 ///
224 /// Since the state variable is in local scope, the critical section exit
225 /// needs to occur within the scope of the variable. If multiple nested critical
226 /// sections are used, each needs to have its own state variable in its own scope.
227 ///
228 /// In many cases, completely disabling all interrupts using CRITICAL sections
229 /// might be more heavy-handed than needed. When porting, consider whether other
230 /// types of sections, such as ATOMIC or NVIC mask, can be used to only disable
231 /// a subset of the interrupts.
232 ///
233 /// Replacing em_int calls with em_core function calls:
234 /// @code{.c}
235 /// void func(void)
236 /// {
237 /// // INT_Disable();
238 /// CORE_DECLARE_IRQ_STATE;
239 /// CORE_ENTER_ATOMIC();
240 /// .
241 /// .
242 /// .
243 /// // INT_Enable();
244 /// CORE_EXIT_ATOMIC();
245 /// }
246 /// @endcode
247 /// @{
248 // *****************************************************************************
249 /* *INDENT-ON* */
250
251 /*******************************************************************************
252 ******************************* DEFINES ***********************************
253 ******************************************************************************/
254
255 #if !defined(CORE_INTERRUPT_ENTRY)
256 // Some RTOSes must be notified on interrupt entry (and exit).
257 // Use this macro at the start of all your interrupt handlers.
258 // Reimplement the macro in emlib_config.h to suit the needs of your RTOS.
259 /** Placeholder for optional interrupt handler entry code. This might be needed
260 * when working with an RTOS. */
261 #define CORE_INTERRUPT_ENTRY()
262 #endif
263
264 #if !defined(CORE_INTERRUPT_EXIT)
265 /** Placeholder for optional interrupt handler exit code. This might be needed
266 * when working with an RTOS. */
267 #define CORE_INTERRUPT_EXIT()
268 #endif
269
270 // Compile time sanity check.
271 #if (CORE_ATOMIC_METHOD != CORE_ATOMIC_METHOD_PRIMASK) \
272 && (CORE_ATOMIC_METHOD != CORE_ATOMIC_METHOD_BASEPRI)
273 #error "em_core: Undefined ATOMIC IRQ handling strategy."
274 #endif
275
276 /*******************************************************************************
277 ************************** STRUCTS ****************************************
278 ******************************************************************************/
279 /** A Cycle Counter Instance. */
280 typedef struct {
281 uint32_t start; /*!< Cycle counter at start of recording. */
282 uint32_t cycles; /*!< Cycles elapsed in last recording. */
283 uint32_t max; /*!< Max recorded cycles since last reset or init. */
284 } dwt_cycle_counter_handle_t;
285
286 /*******************************************************************************
287 *************************** LOCAL VARIABLES *******************************
288 ******************************************************************************/
289
290 /** @cond DO_NOT_INCLUDE_WITH_DOXYGEN */
291
292 #if (SL_EMLIB_CORE_ENABLE_INTERRUPT_DISABLED_TIMING == 1)
293 // cycle counter to record atomic sections
294 dwt_cycle_counter_handle_t atomic_cycle_counter = { 0 };
295 // cycle counter to record critical sections
296 dwt_cycle_counter_handle_t critical_cycle_counter = { 0 };
297 #endif
298
299 /** @endcond */
300
301 /*******************************************************************************
302 *************************** LOCAL FUNCTIONS *******************************
303 ******************************************************************************/
304
305 #if (SL_EMLIB_CORE_ENABLE_INTERRUPT_DISABLED_TIMING == 1)
306 static void cycle_counter_start(dwt_cycle_counter_handle_t *handle);
307 static void cycle_counter_stop(dwt_cycle_counter_handle_t *handle);
308 #endif
309
310 /*******************************************************************************
311 ************************** GLOBAL FUNCTIONS *******************************
312 ******************************************************************************/
313
314 /***************************************************************************//**
315 * @brief
316 * Disable interrupts.
317 *
318 * Disable all interrupts by setting PRIMASK.
319 * (Fault exception handlers will still be enabled).
320 ******************************************************************************/
CORE_CriticalDisableIrq(void)321 SL_WEAK void CORE_CriticalDisableIrq(void)
322 {
323 __disable_irq();
324 }
325
326 /***************************************************************************//**
327 * @brief
328 * Enable interrupts.
329 *
330 * Enable interrupts by clearing PRIMASK.
331 ******************************************************************************/
CORE_CriticalEnableIrq(void)332 SL_WEAK void CORE_CriticalEnableIrq(void)
333 {
334 __enable_irq();
335 }
336
337 /***************************************************************************//**
338 * @brief
339 * Enter a CRITICAL section.
340 *
341 * When a CRITICAL section is entered, all interrupts (except fault handlers)
342 * are disabled.
343 *
344 * @return
345 * The value of PRIMASK register prior to the CRITICAL section entry.
346 ******************************************************************************/
CORE_EnterCritical(void)347 SL_WEAK CORE_irqState_t CORE_EnterCritical(void)
348 {
349 CORE_irqState_t irqState = __get_PRIMASK();
350 __disable_irq();
351 if (irqState == 0U) {
352 #if (SL_EMLIB_CORE_ENABLE_INTERRUPT_DISABLED_TIMING == 1)
353 cycle_counter_start(&critical_cycle_counter);
354 #endif
355 }
356 return irqState;
357 }
358
359 /***************************************************************************//**
360 * @brief
361 * Exit a CRITICAL section.
362 *
363 * @param[in] irqState
364 * The interrupt priority blocking level to restore to PRIMASK when exiting
365 * the CRITICAL section. This value is usually the one returned by a prior
366 * call to @ref CORE_EnterCritical().
367 ******************************************************************************/
CORE_ExitCritical(CORE_irqState_t irqState)368 SL_WEAK void CORE_ExitCritical(CORE_irqState_t irqState)
369 {
370 if (irqState == 0U) {
371 #if (SL_EMLIB_CORE_ENABLE_INTERRUPT_DISABLED_TIMING == 1)
372 cycle_counter_stop(&critical_cycle_counter);
373 #endif
374 __enable_irq();
375 }
376 }
377
378 /***************************************************************************//**
379 * @brief
380 * Brief interrupt enable/disable sequence to allow handling of
381 * pending interrupts.
382 *
383 * @note
384 * Usually used within a CRITICAL section.
385 ******************************************************************************/
CORE_YieldCritical(void)386 SL_WEAK void CORE_YieldCritical(void)
387 {
388 if ((__get_PRIMASK() & 1U) != 0U) {
389 __enable_irq();
390 __ISB();
391 __disable_irq();
392 }
393 }
394
395 /***************************************************************************//**
396 * @brief
397 * Disable interrupts.
398 *
399 * Disable interrupts with a priority lower or equal to
400 * @ref CORE_ATOMIC_BASE_PRIORITY_LEVEL. Sets core BASEPRI register
401 * to CORE_ATOMIC_BASE_PRIORITY_LEVEL.
402 *
403 * @note
404 * If @ref CORE_ATOMIC_METHOD is @ref CORE_ATOMIC_METHOD_PRIMASK, this
405 * function is identical to @ref CORE_CriticalDisableIrq().
406 ******************************************************************************/
CORE_AtomicDisableIrq(void)407 SL_WEAK void CORE_AtomicDisableIrq(void)
408 {
409 #if (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
410 __set_BASEPRI(CORE_ATOMIC_BASE_PRIORITY_LEVEL << (8UL - __NVIC_PRIO_BITS));
411 #else
412 __disable_irq();
413 #endif // (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
414 }
415
416 /***************************************************************************//**
417 * @brief
418 * Enable interrupts.
419 *
420 * Enable interrupts by setting core BASEPRI register to 0.
421 *
422 * @note
423 * If @ref CORE_ATOMIC_METHOD is @ref CORE_ATOMIC_METHOD_BASEPRI and PRIMASK
424 * is set (CPU is inside a CRITICAL section), interrupts will still be
425 * disabled after calling this function.
426 *
427 * @note
428 * If @ref CORE_ATOMIC_METHOD is @ref CORE_ATOMIC_METHOD_PRIMASK, this
429 * function is identical to @ref CORE_CriticalEnableIrq().
430 ******************************************************************************/
CORE_AtomicEnableIrq(void)431 SL_WEAK void CORE_AtomicEnableIrq(void)
432 {
433 #if (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
434 __set_BASEPRI(0);
435 #else
436 __enable_irq();
437 #endif // (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
438 }
439
440 /***************************************************************************//**
441 * @brief
442 * Enter an ATOMIC section.
443 *
444 * When an ATOMIC section is entered, interrupts with priority lower or equal
445 * to @ref CORE_ATOMIC_BASE_PRIORITY_LEVEL are disabled.
446 *
447 * @note
448 * If @ref CORE_ATOMIC_METHOD is @ref CORE_ATOMIC_METHOD_PRIMASK, this
449 * function is identical to @ref CORE_EnterCritical().
450 *
451 * @return
452 * The value of BASEPRI register prior to ATOMIC section entry.
453 ******************************************************************************/
CORE_EnterAtomic(void)454 SL_WEAK CORE_irqState_t CORE_EnterAtomic(void)
455 {
456 #if (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
457 CORE_irqState_t irqState = __get_BASEPRI();
458 __set_BASEPRI(CORE_ATOMIC_BASE_PRIORITY_LEVEL << (8U - __NVIC_PRIO_BITS));
459 if ((irqState & (CORE_ATOMIC_BASE_PRIORITY_LEVEL << (8U - __NVIC_PRIO_BITS)))
460 != (CORE_ATOMIC_BASE_PRIORITY_LEVEL << (8U - __NVIC_PRIO_BITS))) {
461 #if (SL_EMLIB_CORE_ENABLE_INTERRUPT_DISABLED_TIMING == 1)
462 cycle_counter_start(&atomic_cycle_counter);
463 #endif
464 }
465 return irqState;
466 #else
467 CORE_irqState_t irqState = __get_PRIMASK();
468 __disable_irq();
469 if (irqState == 0U) {
470 #if (SL_EMLIB_CORE_ENABLE_INTERRUPT_DISABLED_TIMING == 1)
471 cycle_counter_start(&atomic_cycle_counter);
472 #endif
473 }
474 return irqState;
475 #endif // (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
476 }
477
478 /***************************************************************************//**
479 * @brief
480 * Exit an ATOMIC section.
481 *
482 * @param[in] irqState
483 * The interrupt priority blocking level to restore to BASEPRI when exiting
484 * the ATOMIC section. This value is usually the one returned by a prior
485 * call to @ref CORE_EnterAtomic().
486 *
487 * @note
488 * If @ref CORE_ATOMIC_METHOD is set to @ref CORE_ATOMIC_METHOD_PRIMASK, this
489 * function is identical to @ref CORE_ExitCritical().
490 ******************************************************************************/
CORE_ExitAtomic(CORE_irqState_t irqState)491 SL_WEAK void CORE_ExitAtomic(CORE_irqState_t irqState)
492 {
493 #if (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
494 if ((irqState & (CORE_ATOMIC_BASE_PRIORITY_LEVEL << (8U - __NVIC_PRIO_BITS)))
495 != (CORE_ATOMIC_BASE_PRIORITY_LEVEL << (8U - __NVIC_PRIO_BITS))) {
496 #if (SL_EMLIB_CORE_ENABLE_INTERRUPT_DISABLED_TIMING == 1)
497 cycle_counter_stop(&atomic_cycle_counter);
498 #endif
499 }
500 __set_BASEPRI(irqState);
501 #else
502 if (irqState == 0U) {
503 #if (SL_EMLIB_CORE_ENABLE_INTERRUPT_DISABLED_TIMING == 1)
504 cycle_counter_stop(&atomic_cycle_counter);
505 #endif
506 __enable_irq();
507 }
508 #endif // (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
509 }
510
511 /***************************************************************************//**
512 * @brief
513 * Brief interrupt enable/disable sequence to allow handling of
514 * pending interrupts.
515 *
516 * @note
517 * Usully used within an ATOMIC section.
518 *
519 * @note
520 * If @ref CORE_ATOMIC_METHOD is @ref CORE_ATOMIC_METHOD_PRIMASK, this
521 * function is identical to @ref CORE_YieldCritical().
522 ******************************************************************************/
CORE_YieldAtomic(void)523 SL_WEAK void CORE_YieldAtomic(void)
524 {
525 #if (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
526 CORE_irqState_t basepri = __get_BASEPRI();
527 if (basepri >= (CORE_ATOMIC_BASE_PRIORITY_LEVEL << (8U - __NVIC_PRIO_BITS))) {
528 __set_BASEPRI(0);
529 __ISB();
530 __set_BASEPRI(basepri);
531 }
532 #else
533 if ((__get_PRIMASK() & 1U) != 0U) {
534 __enable_irq();
535 __ISB();
536 __disable_irq();
537 }
538 #endif // (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
539 }
540
541 /***************************************************************************//**
542 * @brief
543 * Enter a NVIC mask section.
544 *
545 * When a NVIC mask section is entered, specified NVIC interrupts
546 * are disabled.
547 *
548 * @param[out] nvicState
549 * Return NVIC interrupts enable mask prior to section entry.
550 *
551 * @param[in] disable
552 * A mask specifying which NVIC interrupts to disable within the section.
553 ******************************************************************************/
CORE_EnterNvicMask(CORE_nvicMask_t * nvicState,const CORE_nvicMask_t * disable)554 void CORE_EnterNvicMask(CORE_nvicMask_t *nvicState,
555 const CORE_nvicMask_t *disable)
556 {
557 CORE_CRITICAL_SECTION(
558 *nvicState = *(CORE_nvicMask_t*)((uint32_t)&NVIC->ICER[0]);
559 *(CORE_nvicMask_t*)((uint32_t)&NVIC->ICER[0]) = *disable;
560 )
561 }
562
563 /***************************************************************************//**
564 * @brief
565 * Disable NVIC interrupts.
566 *
567 * @param[in] disable
568 * A mask specifying which NVIC interrupts to disable.
569 ******************************************************************************/
CORE_NvicDisableMask(const CORE_nvicMask_t * disable)570 void CORE_NvicDisableMask(const CORE_nvicMask_t *disable)
571 {
572 CORE_CRITICAL_SECTION(
573 *(CORE_nvicMask_t*)((uint32_t)&NVIC->ICER[0]) = *disable;
574 )
575 }
576
577 /***************************************************************************//**
578 * @brief
579 * Set current NVIC interrupt enable mask.
580 *
581 * @param[out] enable
582 * A mask specifying which NVIC interrupts are currently enabled.
583 ******************************************************************************/
CORE_NvicEnableMask(const CORE_nvicMask_t * enable)584 void CORE_NvicEnableMask(const CORE_nvicMask_t *enable)
585 {
586 CORE_CRITICAL_SECTION(
587 *(CORE_nvicMask_t*)((uint32_t)&NVIC->ISER[0]) = *enable;
588 )
589 }
590
591 /***************************************************************************//**
592 * @brief
593 * Brief NVIC interrupt enable/disable sequence to allow handling of
594 * pending interrupts.
595 *
596 * @param[in] enable
597 * A mask specifying which NVIC interrupts to briefly enable.
598 *
599 * @note
600 * Usually used within an NVIC mask section.
601 ******************************************************************************/
CORE_YieldNvicMask(const CORE_nvicMask_t * enable)602 void CORE_YieldNvicMask(const CORE_nvicMask_t *enable)
603 {
604 CORE_nvicMask_t nvicMask;
605
606 // Get current NVIC enable mask.
607 CORE_CRITICAL_SECTION(
608 nvicMask = *(CORE_nvicMask_t*)((uint32_t)&NVIC->ISER[0]);
609 )
610
611 // Make a mask with bits set for those interrupts that are currently
612 // disabled but are set in the enable mask.
613 #if (CORE_NVIC_REG_WORDS == 1)
614 nvicMask.a[0] &= enable->a[0];
615 nvicMask.a[0] = ~nvicMask.a[0] & enable->a[0];
616
617 if (nvicMask.a[0] != 0) {
618 #elif (CORE_NVIC_REG_WORDS == 2)
619 nvicMask.a[0] &= enable->a[0];
620 nvicMask.a[1] &= enable->a[1];
621 nvicMask.a[0] = ~nvicMask.a[0] & enable->a[0];
622 nvicMask.a[1] = ~nvicMask.a[1] & enable->a[1];
623
624 if ((nvicMask.a[0] != 0U) || (nvicMask.a[1] != 0U)) {
625 #elif (CORE_NVIC_REG_WORDS == 3)
626 nvicMask.a[0] &= enable->a[0];
627 nvicMask.a[1] &= enable->a[1];
628 nvicMask.a[2] &= enable->a[2];
629 nvicMask.a[0] = ~nvicMask.a[0] & enable->a[0];
630 nvicMask.a[1] = ~nvicMask.a[1] & enable->a[1];
631 nvicMask.a[2] = ~nvicMask.a[2] & enable->a[2];
632
633 if ((nvicMask.a[0] != 0U) || (nvicMask.a[1] != 0U) || (nvicMask.a[2] != 0U)) {
634 #endif
635
636 // Enable previously disabled interrupts.
637 *(CORE_nvicMask_t*)((uint32_t)&NVIC->ISER[0]) = nvicMask;
638
639 // Disable those interrupts again.
640 *(CORE_nvicMask_t*)((uint32_t)&NVIC->ICER[0]) = nvicMask;
641 }
642 }
643
644 /***************************************************************************//**
645 * @brief
646 * Utility function to set an IRQn bit in a NVIC enable/disable mask.
647 *
648 * @param[in] irqN
649 * The IRQn_Type enumerator for the interrupt.
650 *
651 * @param[in,out] mask
652 * The mask to set the interrupt bit in.
653 ******************************************************************************/
654 void CORE_NvicMaskSetIRQ(IRQn_Type irqN, CORE_nvicMask_t *mask)
655 {
656 EFM_ASSERT(((int)irqN >= 0) && ((int)irqN < EXT_IRQ_COUNT));
657 mask->a[(unsigned)irqN >> 5] |= 1UL << ((unsigned)irqN & 0x1FUL);
658 }
659
660 /***************************************************************************//**
661 * @brief
662 * Utility function to clear an IRQn bit in a NVIC enable/disable mask.
663 *
664 * @param[in] irqN
665 * The IRQn_Type enumerator for the interrupt.
666 *
667 * @param[in,out] mask
668 * The mask to clear the interrupt bit in.
669 ******************************************************************************/
670 void CORE_NvicMaskClearIRQ(IRQn_Type irqN, CORE_nvicMask_t *mask)
671 {
672 EFM_ASSERT(((int)irqN >= 0) && ((int)irqN < EXT_IRQ_COUNT));
673 mask->a[(unsigned)irqN >> 5] &= ~(1UL << ((unsigned)irqN & 0x1FUL));
674 }
675
676 /***************************************************************************//**
677 * @brief
678 * Check whether the current CPU operation mode is handler mode.
679 *
680 * @return
681 * True if the CPU is in handler mode (currently executing an interrupt handler).
682 * @n False if the CPU is in thread mode.
683 ******************************************************************************/
684 SL_WEAK bool CORE_InIrqContext(void)
685 {
686 return (SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0U;
687 }
688
689 /***************************************************************************//**
690 * @brief
691 * Check if a specific interrupt is disabled or blocked.
692 *
693 * @param[in] irqN
694 * The IRQn_Type enumerator for the interrupt to check.
695 *
696 * @return
697 * True if the interrupt is disabled or blocked.
698 ******************************************************************************/
699 SL_WEAK bool CORE_IrqIsBlocked(IRQn_Type irqN)
700 {
701 uint32_t irqPri, activeIrq;
702
703 #if (__CORTEX_M >= 3)
704 uint32_t basepri;
705
706 EFM_ASSERT((irqN >= MemoryManagement_IRQn)
707 && (irqN < (IRQn_Type)EXT_IRQ_COUNT));
708 #else
709 EFM_ASSERT((irqN >= SVCall_IRQn) && ((IRQn_Type)irqN < EXT_IRQ_COUNT));
710 #endif
711
712 if ((__get_PRIMASK() & 1U) != 0U) {
713 return true; // All IRQs are disabled.
714 }
715
716 if (CORE_NvicIRQDisabled(irqN)) {
717 return true; // The IRQ in question is disabled.
718 }
719
720 irqPri = NVIC_GetPriority(irqN);
721 #if (__CORTEX_M >= 3)
722 basepri = __get_BASEPRI();
723 if ((basepri != 0U)
724 && (irqPri >= (basepri >> (8U - __NVIC_PRIO_BITS)))) {
725 return true; // The IRQ in question has too low
726 } // priority vs. BASEPRI.
727 #endif
728
729 // Check if already in an interrupt handler. If so, an interrupt with a
730 // higher priority (lower priority value) can preempt.
731 activeIrq = (SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) >> SCB_ICSR_VECTACTIVE_Pos;
732 if (activeIrq != 0U) {
733 if (irqPri >= NVIC_GetPriority((IRQn_Type)(activeIrq - 16U))) {
734 return true; // The IRQ in question has too low
735 } // priority vs. current active IRQ
736 }
737
738 return false;
739 }
740
741 /***************************************************************************//**
742 * @brief
743 * Check if interrupts are disabled.
744 *
745 * @return
746 * True if interrupts are disabled.
747 ******************************************************************************/
748 SL_WEAK bool CORE_IrqIsDisabled(void)
749 {
750 #if (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_PRIMASK)
751 return (__get_PRIMASK() & 1U) == 1U;
752
753 #elif (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
754 return ((__get_PRIMASK() & 1U) == 1U)
755 || (__get_BASEPRI() >= (CORE_ATOMIC_BASE_PRIORITY_LEVEL
756 << (8U - __NVIC_PRIO_BITS)));
757 #endif
758 }
759
760 /***************************************************************************//**
761 * @brief
762 * Get the current NVIC enable mask state.
763 *
764 * @param[out] mask
765 * The current NVIC enable mask.
766 ******************************************************************************/
767 void CORE_GetNvicEnabledMask(CORE_nvicMask_t *mask)
768 {
769 CORE_CRITICAL_SECTION(
770 *mask = *(CORE_nvicMask_t*)((uint32_t)&NVIC->ISER[0]);
771 )
772 }
773
774 /***************************************************************************//**
775 * @brief
776 * Get NVIC disable state for a given mask.
777 *
778 * @param[in] mask
779 * An NVIC mask to check.
780 *
781 * @return
782 * True if all NVIC interrupt mask bits are clear.
783 ******************************************************************************/
784 bool CORE_GetNvicMaskDisableState(const CORE_nvicMask_t *mask)
785 {
786 CORE_nvicMask_t nvicMask;
787
788 CORE_CRITICAL_SECTION(
789 nvicMask = *(CORE_nvicMask_t*)((uint32_t)&NVIC->ISER[0]);
790 )
791
792 #if (CORE_NVIC_REG_WORDS == 1)
793 return (mask->a[0] & nvicMask.a[0]) == 0U;
794
795 #elif (CORE_NVIC_REG_WORDS == 2)
796 return ((mask->a[0] & nvicMask.a[0]) == 0U)
797 && ((mask->a[1] & nvicMask.a[1]) == 0U);
798
799 #elif (CORE_NVIC_REG_WORDS == 3)
800 return ((mask->a[0] & nvicMask.a[0]) == 0U)
801 && ((mask->a[1] & nvicMask.a[1]) == 0U)
802 && ((mask->a[2] & nvicMask.a[2]) == 0U);
803 #endif
804 }
805
806 /***************************************************************************//**
807 * @brief
808 * Check if an NVIC interrupt is disabled.
809 *
810 * @param[in] irqN
811 * The IRQn_Type enumerator for the interrupt to check.
812 *
813 * @return
814 * True if the interrupt is disabled.
815 ******************************************************************************/
816 bool CORE_NvicIRQDisabled(IRQn_Type irqN)
817 {
818 CORE_nvicMask_t *mask;
819
820 EFM_ASSERT(((int)irqN >= 0) && ((int)irqN < EXT_IRQ_COUNT));
821 mask = (CORE_nvicMask_t*)((uint32_t)&NVIC->ISER[0]);
822 return (mask->a[(unsigned)irqN >> 5U] & (1UL << ((unsigned)irqN & 0x1FUL)))
823 == 0UL;
824 }
825
826 /***************************************************************************//**
827 * @brief
828 * Utility function to get the handler for a specific interrupt.
829 *
830 * @param[in] irqN
831 * The IRQn_Type enumerator for the interrupt.
832 *
833 * @return
834 * The handler address.
835 *
836 * @note
837 * Uses the interrupt vector table defined by the current VTOR register value.
838 ******************************************************************************/
839 void *CORE_GetNvicRamTableHandler(IRQn_Type irqN)
840 {
841 EFM_ASSERT(((int)irqN >= -16) && ((int)irqN < EXT_IRQ_COUNT));
842 return (void*)((uint32_t*)(((uint32_t*)SCB->VTOR)[(int)irqN + 16]));
843 }
844
845 /***************************************************************************//**
846 * @brief
847 * Utility function to set the handler for a specific interrupt.
848 *
849 * @param[in] irqN
850 * The IRQn_Type enumerator for the interrupt.
851 *
852 * @param[in] handler
853 * The handler address.
854 *
855 * @note
856 * Uses the interrupt vector table defined by the current VTOR register value.
857 ******************************************************************************/
858 void CORE_SetNvicRamTableHandler(IRQn_Type irqN, void *handler)
859 {
860 EFM_ASSERT(((int)irqN >= -16) && ((int)irqN < EXT_IRQ_COUNT));
861 ((uint32_t*)SCB->VTOR)[(int)irqN + 16] = (uint32_t)((uint32_t*)handler);
862 }
863
864 /***************************************************************************//**
865 * @brief
866 * Initialize an interrupt vector table by copying table entries from a
867 * source to a target table.
868 *
869 * @note This function will set a new VTOR register value.
870 *
871 * @param[in] sourceTable
872 * The address of the source vector table.
873 *
874 * @param[in] sourceSize
875 * A number of entries in the source vector table.
876 *
877 * @param[in] targetTable
878 * The address of the target (new) vector table.
879 *
880 * @param[in] targetSize
881 * A number of entries in the target vector table.
882 *
883 * @param[in] defaultHandler
884 * An address of the interrupt handler used for target entries for which where there
885 * is no corresponding source entry (i.e., the target table is larger than the source
886 * table).
887 *
888 * @param[in] overwriteActive
889 * When true, a target table entry is always overwritten with the
890 * corresponding source entry. If false, a target table entry is only
891 * overwritten if it is zero. This makes it possible for an application
892 * to partly initialize a target table before passing it to this function.
893 *
894 ******************************************************************************/
895 void CORE_InitNvicVectorTable(uint32_t *sourceTable,
896 uint32_t sourceSize,
897 uint32_t *targetTable,
898 uint32_t targetSize,
899 void *defaultHandler,
900 bool overwriteActive)
901 {
902 uint32_t i;
903
904 // ASSERT on non SRAM-based target table.
905 EFM_ASSERT(((uint32_t)targetTable >= SRAM_BASE)
906 && ((uint32_t)targetTable < (SRAM_BASE + SRAM_SIZE)));
907
908 // ASSERT if misaligned with respect to the VTOR register implementation.
909 #if defined(SCB_VTOR_TBLBASE_Msk)
910 EFM_ASSERT(((uint32_t)targetTable & ~(SCB_VTOR_TBLOFF_Msk
911 | SCB_VTOR_TBLBASE_Msk)) == 0U);
912 #else
913 EFM_ASSERT(((uint32_t)targetTable & ~SCB_VTOR_TBLOFF_Msk) == 0U);
914 #endif
915
916 // ASSERT if misaligned with respect to the vector table size.
917 // The vector table address must be aligned at its size rounded up to nearest 2^n.
918 EFM_ASSERT(((uint32_t)targetTable
919 & ((1UL << (32UL - __CLZ((targetSize * 4UL) - 1UL))) - 1UL))
920 == 0UL);
921
922 for (i = 0; i < targetSize; i++) {
923 if (overwriteActive) { // Overwrite target entries.
924 if (i < sourceSize) { // targetSize <= sourceSize
925 targetTable[i] = sourceTable[i];
926 } else { // targetSize > sourceSize
927 targetTable[i] = (uint32_t)((uint32_t*)defaultHandler);
928 }
929 } else { // Overwrite target entries which are 0.
930 if (i < sourceSize) { // targetSize <= sourceSize
931 if (targetTable[i] == 0U) {
932 targetTable[i] = sourceTable[i];
933 }
934 } else { // targetSize > sourceSize
935 if (targetTable[i] == 0U) {
936 targetTable[i] = (uint32_t)((uint32_t*)defaultHandler);
937 }
938 }
939 }
940 }
941 SCB->VTOR = (uint32_t)targetTable;
942 }
943
944 #if (SL_EMLIB_CORE_ENABLE_INTERRUPT_DISABLED_TIMING == 1) || defined(DOXYGEN)
945 /***************************************************************************//**
946 * @brief
947 * Start a recording.
948 *
949 * @param[in] handle
950 * Pointer to initialized counter handle.
951 *
952 * @note SL_EMLIB_CORE_ENABLE_INTERRUPT_DISABLED_TIMING must be enabled.
953 ******************************************************************************/
954 static void cycle_counter_start(dwt_cycle_counter_handle_t *handle)
955 {
956 handle->start = DWT->CYCCNT;
957 }
958
959 /***************************************************************************//**
960 * @brief
961 * Stop a recording.
962 *
963 * @param[in] handle
964 * Pointer to initialized counter handle.
965 *
966 * @note SL_EMLIB_CORE_ENABLE_INTERRUPT_DISABLED_TIMING must be enabled.
967 ******************************************************************************/
968 static void cycle_counter_stop(dwt_cycle_counter_handle_t *handle)
969 {
970 handle->cycles = DWT->CYCCNT - handle->start;
971
972 if (handle->cycles > handle->max) {
973 handle->max = handle->cycles;
974 }
975 }
976
977 /***************************************************************************//**
978 * @brief
979 * Returns the max time spent in critical section.
980 *
981 * @return
982 * The max time spent in critical section.
983 *
984 * @note SL_EMLIB_CORE_ENABLE_INTERRUPT_DISABLED_TIMING must be enabled.
985 ******************************************************************************/
986 uint32_t CORE_get_max_time_critical_section(void)
987 {
988 return critical_cycle_counter.max;
989 }
990
991 /***************************************************************************//**
992 * @brief
993 * Returns the max time spent in atomic section.
994 *
995 * @return
996 * The max time spent in atomic section.
997 *
998 * @note SL_EMLIB_CORE_ENABLE_INTERRUPT_DISABLED_TIMING must be enabled.
999 ******************************************************************************/
1000 uint32_t CORE_get_max_time_atomic_section(void)
1001 {
1002 return atomic_cycle_counter.max;
1003 }
1004
1005 /***************************************************************************//**
1006 * @brief
1007 * Clears the max time spent in atomic section.
1008 *
1009 * @note SL_EMLIB_CORE_ENABLE_INTERRUPT_DISABLED_TIMING must be enabled.
1010 ******************************************************************************/
1011 void CORE_clear_max_time_critical_section(void)
1012 {
1013 critical_cycle_counter.max = 0;
1014 }
1015
1016 /***************************************************************************//**
1017 * @brief
1018 * Clears the max time spent in atomic section.
1019 *
1020 * @note SL_EMLIB_CORE_ENABLE_INTERRUPT_DISABLED_TIMING must be enabled.
1021 ******************************************************************************/
1022 void CORE_clear_max_time_atomic_section(void)
1023 {
1024 atomic_cycle_counter.max = 0;
1025 }
1026 #endif //(SL_EMLIB_CORE_ENABLE_INTERRUPT_DISABLED_TIMING == 1)
1027
1028 /** @} (end addtogroup core) */
1029