1 /***************************************************************************//**
2  * @file
3  * @brief Core interrupt handling API
4  *******************************************************************************
5  * # License
6  * <b>Copyright 2018 Silicon Laboratories Inc. www.silabs.com</b>
7  *******************************************************************************
8  *
9  * SPDX-License-Identifier: Zlib
10  *
11  * The licensor of this software is Silicon Laboratories Inc.
12  *
13  * This software is provided 'as-is', without any express or implied
14  * warranty. In no event will the authors be held liable for any damages
15  * arising from the use of this software.
16  *
17  * Permission is granted to anyone to use this software for any purpose,
18  * including commercial applications, and to alter it and redistribute it
19  * freely, subject to the following restrictions:
20  *
21  * 1. The origin of this software must not be misrepresented; you must not
22  *    claim that you wrote the original software. If you use this software
23  *    in a product, an acknowledgment in the product documentation would be
24  *    appreciated but is not required.
25  * 2. Altered source versions must be plainly marked as such, and must not be
26  *    misrepresented as being the original software.
27  * 3. This notice may not be removed or altered from any source distribution.
28  *
29  ******************************************************************************/
30 #include "em_core.h"
31 #include "em_assert.h"
32 
33 /* *INDENT-OFF* */
34 // *****************************************************************************
35 ///  @addtogroup core CORE - Core Interrupt
36 ///  @brief Core interrupt handling API
37 ///
38 ///  @li @ref core_intro
39 ///  @li @ref core_conf
40 ///  @li @ref core_macro_api
41 ///  @li @ref core_reimplementation
42 ///  @li @ref core_vector_tables
43 ///  @li @ref core_examples
44 ///  @li @ref core_porting
45 ///
46 ///@n @section core_intro Introduction
47 ///
48 ///  CORE interrupt API provides a simple and safe means
49 ///  to disable and enable interrupts to protect sections of code.
50 ///
51 ///  This is often referred to as "critical sections". This module provides
52 ///  support for three types of critical sections, each with different interrupt
53 ///  blocking capabilities.
54 ///
55 ///  @li <b>CRITICAL</b> section: Inside a critical section, all interrupts are
56 ///      disabled (except for fault handlers). The PRIMASK register is always used for
57 ///      interrupt disable/enable.
58 ///  @li <b>ATOMIC</b> section: This type of section is configurable and the default
59 ///      method is to use PRIMASK. With BASEPRI configuration, interrupts with priority
60 ///      equal to or lower than a given configurable level are disabled. The interrupt
61 ///      disable priority level is defined at compile time. The BASEPRI register is not
62 ///      available for all architectures.
63 ///  @li <b>NVIC mask</b> section: Disable NVIC (external interrupts) on an
64 ///      individual manner.
65 ///
66 ///  em_core also has an API for manipulating RAM-based interrupt vector tables.
67 ///
68 ///@n @section core_conf Compile-time Configuration
69 ///
70 ///  The following @htmlonly #defines @endhtmlonly are used to configure em_core:
71 ///  @code{.c}
72 ///  // The interrupt priority level used inside ATOMIC sections.
73 ///  #define CORE_ATOMIC_BASE_PRIORITY_LEVEL    3
74 ///
75 ///  // A method used for interrupt disable/enable within ATOMIC sections.
76 ///  #define CORE_ATOMIC_METHOD                 CORE_ATOMIC_METHOD_PRIMASK
77 ///  @endcode
78 ///
79 ///  If the default values do not support your needs, they can be overridden
80 ///  by supplying -D compiler flags on the compiler command line or by collecting
81 ///  all macro redefinitions in a file named @em emlib_config.h and then supplying
82 ///  -DEMLIB_USER_CONFIG on a compiler command line.
83 ///
84 ///  @note The default emlib configuration for ATOMIC section interrupt disable
85 ///        method is using PRIMASK, i.e., ATOMIC sections are implemented as
86 ///        CRITICAL sections.
87 ///
88 ///  @note Due to architectural limitations Cortex-M0+ devices do not support
89 ///        ATOMIC type critical sections using the BASEPRI register. On M0+
90 ///        devices ATOMIC section helper macros are available but they are
91 ///        implemented as CRITICAL sections using PRIMASK register.
92 ///
93 ///@n @section core_macro_api Macro API
94 ///
95 ///  The primary em_core API is the macro API. Macro API will map to correct
96 ///  CORE functions according to the selected @ref CORE_ATOMIC_METHOD and similar
97 ///  configurations (the full CORE API is of course also available).
98 ///  The most useful macros are as follows:
99 ///
100 ///  @ref CORE_DECLARE_IRQ_STATE @n @ref CORE_ENTER_ATOMIC() @n
101 ///  @ref CORE_EXIT_ATOMIC()@n
102 ///  Used together to implement an ATOMIC section.
103 ///  @code{.c}
104 ///  {
105 ///    CORE_DECLARE_IRQ_STATE;           // Storage for saving IRQ state prior to
106 ///                                      // atomic section entry.
107 ///
108 ///    CORE_ENTER_ATOMIC();              // Enter atomic section.
109 ///
110 ///    ...
111 ///    ... your code goes here ...
112 ///    ...
113 ///
114 ///    CORE_EXIT_ATOMIC();               // Exit atomic section, IRQ state is restored.
115 ///  }
116 ///  @endcode
117 ///
118 ///  @n @ref CORE_ATOMIC_SECTION(yourcode)@n
119 ///  A concatenation of all three macros above.
120 ///  @code{.c}
121 ///  {
122 ///    CORE_ATOMIC_SECTION(
123 ///      ...
124 ///      ... your code goes here ...
125 ///      ...
126 ///    )
127 ///  }
128 ///  @endcode
129 ///
130 ///  @n @ref CORE_DECLARE_IRQ_STATE @n @ref CORE_ENTER_CRITICAL() @n
131 ///  @ref CORE_EXIT_CRITICAL() @n @ref CORE_CRITICAL_SECTION(yourcode)@n
132 ///  These macros implement CRITICAL sections in a similar fashion as described
133 ///  above for ATOMIC sections.
134 ///
135 ///  @n @ref CORE_DECLARE_NVIC_STATE @n @ref CORE_ENTER_NVIC() @n
136 ///  @ref CORE_EXIT_NVIC() @n @ref CORE_NVIC_SECTION(yourcode)@n
137 ///  These macros implement NVIC mask sections in a similar fashion as described
138 ///  above for ATOMIC sections. See @ref core_examples for an example.
139 ///
140 ///  Refer to @em Macros or <em>Macro Definition Documentation</em> below for a
141 ///  full list of macros.
142 ///
143 ///@n @section core_reimplementation API reimplementation
144 ///
145 ///  Most of the functions in the API are implemented as weak functions. This means
146 ///  that it is easy to reimplement when special needs arise. Shown below is a
147 ///  reimplementation of CRITICAL sections suitable if FreeRTOS OS is used:
148 ///  @code{.c}
149 ///  CORE_irqState_t CORE_EnterCritical(void)
150 ///  {
151 ///    vPortEnterCritical();
152 ///    return 0;
153 ///  }
154 ///
155 ///  void CORE_ExitCritical(CORE_irqState_t irqState)
156 ///  {
157 ///    (void)irqState;
158 ///    vPortExitCritical();
159 ///  }
160 ///  @endcode
161 ///  Also note that CORE_Enter/ExitCritical() are not implemented as inline
162 ///  functions. As a result, reimplementations will be possible even when original
163 ///  implementations are inside a linked library.
164 ///
165 ///  Some RTOSes must be notified on interrupt handler entry and exit. Macros
166 ///  @ref CORE_INTERRUPT_ENTRY() and @ref CORE_INTERRUPT_EXIT() are suitable
167 ///  placeholders for inserting such code. Insert these macros in all your
168 ///  interrupt handlers and then override the default macro implementations.
169 ///  This is an example if uC/OS is used:
170 ///  @code{.c}
171 ///  // In emlib_config.h:
172 ///
173 ///  #define CORE_INTERRUPT_ENTRY()   OSIntEnter()
174 ///  #define CORE_INTERRUPT_EXIT()    OSIntExit()
175 ///  @endcode
176 ///
177 ///@n @section core_vector_tables Interrupt vector tables
178 ///
179 ///  When using RAM based interrupt vector tables it is the user's responsibility
180 ///  to allocate the table space correctly. The tables must be aligned as specified
181 ///  in the CPU reference manual.
182 ///
183 ///  @ref CORE_InitNvicVectorTable()@n
184 ///  Initialize a RAM based vector table by copying table entries from a source
185 ///  vector table to a target table. VTOR is set to the address of the target
186 ///  vector table.
187 ///
188 ///  @n @ref CORE_GetNvicRamTableHandler() @n @ref CORE_SetNvicRamTableHandler()@n
189 ///  Use these functions to get or set the interrupt handler for a specific IRQn.
190 ///  They both use the interrupt vector table defined by the current
191 ///  VTOR register value.
192 ///
193 ///@n @section core_examples Examples
194 ///
195 ///  Implement an NVIC critical section:
196 ///  @code{.c}
197 ///  {
198 ///    CORE_DECLARE_NVIC_ZEROMASK(mask); // A zero initialized NVIC disable mask
199 ///
200 ///    // Set mask bits for IRQs to block in the NVIC critical section.
201 ///    // In many cases, you can create the disable mask once upon application
202 ///    // startup and use the mask globally throughout the application lifetime.
203 ///    CORE_NvicMaskSetIRQ(LEUART0_IRQn, &mask);
204 ///    CORE_NvicMaskSetIRQ(VCMP_IRQn,    &mask);
205 ///
206 ///    // Enter NVIC critical section with the disable mask
207 ///    CORE_NVIC_SECTION(&mask,
208 ///      ...
209 ///      ... your code goes here ...
210 ///      ...
211 ///    )
212 ///  }
213 ///  @endcode
214 ///
215 ///@n @section core_porting Porting from em_int
216 ///
217 ///  Existing code using INT_Enable() and INT_Disable() must be ported to the
218 ///  em_core API. While em_int used, a global counter to store the interrupt state,
219 ///  em_core uses a local variable. Any usage of INT_Disable(), therefore, needs to
220 ///  be replaced with a declaration of the interrupt state variable before entering
221 ///  the critical section.
222 ///
223 ///  Since the state variable is in local scope, the critical section exit
224 ///  needs to occur within the scope of the variable. If multiple nested critical
225 ///  sections are used, each needs to have its own state variable in its own scope.
226 ///
227 ///  In many cases, completely disabling all interrupts using CRITICAL sections
228 ///  might be more heavy-handed than needed. When porting, consider whether other
229 ///  types of sections, such as ATOMIC or NVIC mask, can be used to only disable
230 ///  a subset of the interrupts.
231 ///
232 ///  Replacing em_int calls with em_core function calls:
233 ///  @code{.c}
234 ///  void func(void)
235 ///  {
236 ///    // INT_Disable();
237 ///    CORE_DECLARE_IRQ_STATE;
238 ///    CORE_ENTER_ATOMIC();
239 ///      .
240 ///      .
241 ///      .
242 ///    // INT_Enable();
243 ///    CORE_EXIT_ATOMIC();
244 ///  }
245 ///  @endcode
246 /// @{
247 // *****************************************************************************
248 /* *INDENT-ON* */
249 
250 /*******************************************************************************
251  *******************************   DEFINES   ***********************************
252  ******************************************************************************/
253 
254 #if !defined(CORE_INTERRUPT_ENTRY)
255 // Some RTOSes must be notified on interrupt entry (and exit).
256 // Use this macro at the start of all your interrupt handlers.
257 // Reimplement the macro in emlib_config.h to suit the needs of your RTOS.
258 /** Placeholder for optional interrupt handler entry code. This might be needed
259  *  when working with an RTOS. */
260 #define CORE_INTERRUPT_ENTRY()
261 #endif
262 
263 #if !defined(CORE_INTERRUPT_EXIT)
264 /** Placeholder for optional interrupt handler exit code. This might be needed
265  *  when working with an RTOS. */
266 #define CORE_INTERRUPT_EXIT()
267 #endif
268 
269 // Compile time sanity check.
270 #if (CORE_ATOMIC_METHOD != CORE_ATOMIC_METHOD_PRIMASK) \
271   && (CORE_ATOMIC_METHOD != CORE_ATOMIC_METHOD_BASEPRI)
272 #error "em_core: Undefined ATOMIC IRQ handling strategy."
273 #endif
274 
275 /*******************************************************************************
276  ******************************   FUNCTIONS   **********************************
277  ******************************************************************************/
278 
279 /***************************************************************************//**
280  * @brief
281  *   Disable interrupts.
282  *
283  *   Disable all interrupts by setting PRIMASK.
284  *   (Fault exception handlers will still be enabled).
285  ******************************************************************************/
CORE_CriticalDisableIrq(void)286 SL_WEAK void CORE_CriticalDisableIrq(void)
287 {
288   __disable_irq();
289 }
290 
291 /***************************************************************************//**
292  * @brief
293  *   Enable interrupts.
294  *
295  *   Enable interrupts by clearing PRIMASK.
296  ******************************************************************************/
CORE_CriticalEnableIrq(void)297 SL_WEAK void CORE_CriticalEnableIrq(void)
298 {
299   __enable_irq();
300 }
301 
302 /***************************************************************************//**
303  * @brief
304  *   Enter a CRITICAL section.
305  *
306  *   When a CRITICAL section is entered, all interrupts (except fault handlers)
307  *   are disabled.
308  *
309  * @return
310  *   The value of PRIMASK register prior to the CRITICAL section entry.
311  ******************************************************************************/
CORE_EnterCritical(void)312 SL_WEAK CORE_irqState_t CORE_EnterCritical(void)
313 {
314   CORE_irqState_t irqState = __get_PRIMASK();
315   __disable_irq();
316   return irqState;
317 }
318 
319 /***************************************************************************//**
320  * @brief
321  *   Exit a CRITICAL section.
322  *
323  * @param[in] irqState
324  *   The interrupt priority blocking level to restore to PRIMASK when exiting
325  *   the CRITICAL section. This value is usually the one returned by a prior
326  *   call to @ref CORE_EnterCritical().
327  ******************************************************************************/
CORE_ExitCritical(CORE_irqState_t irqState)328 SL_WEAK void CORE_ExitCritical(CORE_irqState_t irqState)
329 {
330   if (irqState == 0U) {
331     __enable_irq();
332   }
333 }
334 
335 /***************************************************************************//**
336  * @brief
337  *   Brief interrupt enable/disable sequence to allow handling of
338  *   pending interrupts.
339  *
340  * @note
341  *   Usually used within a CRITICAL section.
342  ******************************************************************************/
CORE_YieldCritical(void)343 SL_WEAK void CORE_YieldCritical(void)
344 {
345   if ((__get_PRIMASK() & 1U) != 0U) {
346     __enable_irq();
347     __ISB();
348     __disable_irq();
349   }
350 }
351 
352 /***************************************************************************//**
353  * @brief
354  *   Disable interrupts.
355  *
356  *   Disable interrupts with a priority lower or equal to
357  *   @ref CORE_ATOMIC_BASE_PRIORITY_LEVEL. Sets core BASEPRI register
358  *   to CORE_ATOMIC_BASE_PRIORITY_LEVEL.
359  *
360  * @note
361  *   If @ref CORE_ATOMIC_METHOD is @ref CORE_ATOMIC_METHOD_PRIMASK, this
362  *   function is identical to @ref CORE_CriticalDisableIrq().
363  ******************************************************************************/
CORE_AtomicDisableIrq(void)364 SL_WEAK void CORE_AtomicDisableIrq(void)
365 {
366 #if (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
367   __set_BASEPRI(CORE_ATOMIC_BASE_PRIORITY_LEVEL << (8 - __NVIC_PRIO_BITS));
368 #else
369   __disable_irq();
370 #endif // (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
371 }
372 
373 /***************************************************************************//**
374  * @brief
375  *   Enable interrupts.
376  *
377  *   Enable interrupts by setting core BASEPRI register to 0.
378  *
379  * @note
380  *   If @ref CORE_ATOMIC_METHOD is @ref CORE_ATOMIC_METHOD_BASEPRI and PRIMASK
381  *   is set (CPU is inside a CRITICAL section), interrupts will still be
382  *   disabled after calling this function.
383  *
384  * @note
385  *   If @ref CORE_ATOMIC_METHOD is @ref CORE_ATOMIC_METHOD_PRIMASK, this
386  *   function is identical to @ref CORE_CriticalEnableIrq().
387  ******************************************************************************/
CORE_AtomicEnableIrq(void)388 SL_WEAK void CORE_AtomicEnableIrq(void)
389 {
390 #if (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
391   __set_BASEPRI(0);
392 #else
393   __enable_irq();
394 #endif // (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
395 }
396 
397 /***************************************************************************//**
398  * @brief
399  *   Enter an ATOMIC section.
400  *
401  *   When an ATOMIC section is entered, interrupts with priority lower or equal
402  *   to @ref CORE_ATOMIC_BASE_PRIORITY_LEVEL are disabled.
403  *
404  * @note
405  *   If @ref CORE_ATOMIC_METHOD is @ref CORE_ATOMIC_METHOD_PRIMASK, this
406  *   function is identical to @ref CORE_EnterCritical().
407  *
408  * @return
409  *   The value of BASEPRI register prior to ATOMIC section entry.
410  ******************************************************************************/
CORE_EnterAtomic(void)411 SL_WEAK CORE_irqState_t CORE_EnterAtomic(void)
412 {
413 #if (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
414   CORE_irqState_t irqState = __get_BASEPRI();
415   __set_BASEPRI(CORE_ATOMIC_BASE_PRIORITY_LEVEL << (8 - __NVIC_PRIO_BITS));
416   return irqState;
417 #else
418   CORE_irqState_t irqState = __get_PRIMASK();
419   __disable_irq();
420   return irqState;
421 #endif // (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
422 }
423 
424 /***************************************************************************//**
425  * @brief
426  *   Exit an ATOMIC section.
427  *
428  * @param[in] irqState
429  *   The interrupt priority blocking level to restore to BASEPRI when exiting
430  *   the ATOMIC section. This value is usually the one returned by a prior
431  *   call to @ref CORE_EnterAtomic().
432  *
433  * @note
434  *   If @ref CORE_ATOMIC_METHOD is set to @ref CORE_ATOMIC_METHOD_PRIMASK, this
435  *   function is identical to @ref CORE_ExitCritical().
436  ******************************************************************************/
CORE_ExitAtomic(CORE_irqState_t irqState)437 SL_WEAK void CORE_ExitAtomic(CORE_irqState_t irqState)
438 {
439 #if (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
440   __set_BASEPRI(irqState);
441 #else
442   if (irqState == 0U) {
443     __enable_irq();
444   }
445 #endif // (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
446 }
447 
448 /***************************************************************************//**
449  * @brief
450  *   Brief interrupt enable/disable sequence to allow handling of
451  *   pending interrupts.
452  *
453  * @note
454  *   Usully used within an ATOMIC section.
455  *
456  * @note
457  *   If @ref CORE_ATOMIC_METHOD is @ref CORE_ATOMIC_METHOD_PRIMASK, this
458  *   function is identical to @ref CORE_YieldCritical().
459  ******************************************************************************/
CORE_YieldAtomic(void)460 SL_WEAK void CORE_YieldAtomic(void)
461 {
462 #if (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
463   CORE_irqState_t basepri = __get_BASEPRI();
464   if (basepri >= (CORE_ATOMIC_BASE_PRIORITY_LEVEL << (8 - __NVIC_PRIO_BITS))) {
465     __set_BASEPRI(0);
466     __ISB();
467     __set_BASEPRI(basepri);
468   }
469 #else
470   if ((__get_PRIMASK() & 1U) != 0U) {
471     __enable_irq();
472     __ISB();
473     __disable_irq();
474   }
475 #endif // (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
476 }
477 
478 /***************************************************************************//**
479  * @brief
480  *   Enter a NVIC mask section.
481  *
482  *   When a NVIC mask section is entered, specified NVIC interrupts
483  *   are disabled.
484  *
485  * @param[out] nvicState
486  *   Return NVIC interrupts enable mask prior to section entry.
487  *
488  * @param[in] disable
489  *   A mask specifying which NVIC interrupts to disable within the section.
490  ******************************************************************************/
CORE_EnterNvicMask(CORE_nvicMask_t * nvicState,const CORE_nvicMask_t * disable)491 void CORE_EnterNvicMask(CORE_nvicMask_t *nvicState,
492                         const CORE_nvicMask_t *disable)
493 {
494   CORE_CRITICAL_SECTION(
495     *nvicState = *(CORE_nvicMask_t*)((uint32_t)&NVIC->ICER[0]);
496     *(CORE_nvicMask_t*)((uint32_t)&NVIC->ICER[0]) = *disable;
497     )
498 }
499 
500 /***************************************************************************//**
501  * @brief
502  *   Disable NVIC interrupts.
503  *
504  * @param[in] disable
505  *   A mask specifying which NVIC interrupts to disable.
506  ******************************************************************************/
CORE_NvicDisableMask(const CORE_nvicMask_t * disable)507 void CORE_NvicDisableMask(const CORE_nvicMask_t *disable)
508 {
509   CORE_CRITICAL_SECTION(
510     *(CORE_nvicMask_t*)((uint32_t)&NVIC->ICER[0]) = *disable;
511     )
512 }
513 
514 /***************************************************************************//**
515  * @brief
516  *   Set current NVIC interrupt enable mask.
517  *
518  * @param[out] enable
519  *   A mask specifying which NVIC interrupts are currently enabled.
520  ******************************************************************************/
CORE_NvicEnableMask(const CORE_nvicMask_t * enable)521 void CORE_NvicEnableMask(const CORE_nvicMask_t *enable)
522 {
523   CORE_CRITICAL_SECTION(
524     *(CORE_nvicMask_t*)((uint32_t)&NVIC->ISER[0]) = *enable;
525     )
526 }
527 
528 /***************************************************************************//**
529  * @brief
530  *   Brief NVIC interrupt enable/disable sequence to allow handling of
531  *   pending interrupts.
532  *
533  * @param[in] enable
534  *   A mask specifying which NVIC interrupts to briefly enable.
535  *
536  * @note
537  *   Usually used within an NVIC mask section.
538  ******************************************************************************/
CORE_YieldNvicMask(const CORE_nvicMask_t * enable)539 void CORE_YieldNvicMask(const CORE_nvicMask_t *enable)
540 {
541   CORE_nvicMask_t nvicMask;
542 
543   // Get current NVIC enable mask.
544   CORE_CRITICAL_SECTION(
545     nvicMask = *(CORE_nvicMask_t*)((uint32_t)&NVIC->ISER[0]);
546     )
547 
548   // Make a mask with bits set for those interrupts that are currently
549   // disabled but are set in the enable mask.
550 #if (CORE_NVIC_REG_WORDS == 1)
551   nvicMask.a[0] &= enable->a[0];
552   nvicMask.a[0] = ~nvicMask.a[0] & enable->a[0];
553 
554   if (nvicMask.a[0] != 0) {
555 #elif (CORE_NVIC_REG_WORDS == 2)
556   nvicMask.a[0] &= enable->a[0];
557   nvicMask.a[1] &= enable->a[1];
558   nvicMask.a[0] = ~nvicMask.a[0] & enable->a[0];
559   nvicMask.a[1] = ~nvicMask.a[1] & enable->a[1];
560 
561   if ((nvicMask.a[0] != 0U) || (nvicMask.a[1] != 0U)) {
562 #elif (CORE_NVIC_REG_WORDS == 3)
563   nvicMask.a[0] &= enable->a[0];
564   nvicMask.a[1] &= enable->a[1];
565   nvicMask.a[2] &= enable->a[2];
566   nvicMask.a[0] = ~nvicMask.a[0] & enable->a[0];
567   nvicMask.a[1] = ~nvicMask.a[1] & enable->a[1];
568   nvicMask.a[2] = ~nvicMask.a[2] & enable->a[2];
569 
570   if ((nvicMask.a[0] != 0) || (nvicMask.a[1] != 0) || (nvicMask.a[2] != 0)) {
571 #endif
572 
573     // Enable previously disabled interrupts.
574     *(CORE_nvicMask_t*)((uint32_t)&NVIC->ISER[0]) = nvicMask;
575 
576     // Disable those interrupts again.
577     *(CORE_nvicMask_t*)((uint32_t)&NVIC->ICER[0]) = nvicMask;
578   }
579 }
580 
581 /***************************************************************************//**
582  * @brief
583  *   Utility function to set an IRQn bit in a NVIC enable/disable mask.
584  *
585  * @param[in] irqN
586  *   The IRQn_Type enumerator for the interrupt.
587  *
588  * @param[in,out] mask
589  *   The mask to set the interrupt bit in.
590  ******************************************************************************/
591 void CORE_NvicMaskSetIRQ(IRQn_Type irqN, CORE_nvicMask_t *mask)
592 {
593   EFM_ASSERT(((int)irqN >= 0) && ((int)irqN < EXT_IRQ_COUNT));
594   mask->a[(unsigned)irqN >> 5] |= 1UL << ((unsigned)irqN & 0x1FUL);
595 }
596 
597 /***************************************************************************//**
598  * @brief
599  *   Utility function to clear an IRQn bit in a NVIC enable/disable mask.
600  *
601  * @param[in] irqN
602  *   The IRQn_Type enumerator for the interrupt.
603  *
604  * @param[in,out] mask
605  *   The mask to clear the interrupt bit in.
606  ******************************************************************************/
607 void CORE_NvicMaskClearIRQ(IRQn_Type irqN, CORE_nvicMask_t *mask)
608 {
609   EFM_ASSERT(((int)irqN >= 0) && ((int)irqN < EXT_IRQ_COUNT));
610   mask->a[(unsigned)irqN >> 5] &= ~(1UL << ((unsigned)irqN & 0x1FUL));
611 }
612 
613 /***************************************************************************//**
614  * @brief
615  *   Check whether the current CPU operation mode is handler mode.
616  *
617  * @return
618  *   True if the CPU is in handler mode (currently executing an interrupt handler).
619  *   @n False if the CPU is in thread mode.
620  ******************************************************************************/
621 SL_WEAK bool CORE_InIrqContext(void)
622 {
623   return (SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0U;
624 }
625 
626 /***************************************************************************//**
627  * @brief
628  *   Check if a specific interrupt is disabled or blocked.
629  *
630  * @param[in] irqN
631  *   The IRQn_Type enumerator for the interrupt to check.
632  *
633  * @return
634  *   True if the interrupt is disabled or blocked.
635  ******************************************************************************/
636 SL_WEAK bool CORE_IrqIsBlocked(IRQn_Type irqN)
637 {
638   uint32_t irqPri, activeIrq;
639 
640 #if (__CORTEX_M >= 3)
641   uint32_t basepri;
642 
643   EFM_ASSERT((irqN >= MemoryManagement_IRQn)
644              && (irqN < (IRQn_Type)EXT_IRQ_COUNT));
645 #else
646   EFM_ASSERT((irqN >= SVCall_IRQn) && ((IRQn_Type)irqN < EXT_IRQ_COUNT));
647 #endif
648 
649   if ((__get_PRIMASK() & 1U) != 0U) {
650     return true;                            // All IRQs are disabled.
651   }
652 
653   if (CORE_NvicIRQDisabled(irqN)) {
654     return true;                            // The IRQ in question is disabled.
655   }
656 
657   irqPri  = NVIC_GetPriority(irqN);
658 #if (__CORTEX_M >= 3)
659   basepri = __get_BASEPRI();
660   if ((basepri != 0U)
661       && (irqPri >= (basepri >> (8 - __NVIC_PRIO_BITS)))) {
662     return true;                            // The IRQ in question has too low
663   }                                         // priority vs. BASEPRI.
664 #endif
665 
666   // Check if already in an interrupt handler. If so, an interrupt with a
667   // higher priority (lower priority value) can preempt.
668   activeIrq = (SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) >> SCB_ICSR_VECTACTIVE_Pos;
669   if (activeIrq != 0U) {
670     if (irqPri >= NVIC_GetPriority((IRQn_Type)(activeIrq - 16U))) {
671       return true;                          // The IRQ in question has too low
672     }                                       // priority vs. current active IRQ
673   }
674 
675   return false;
676 }
677 
678 /***************************************************************************//**
679  * @brief
680  *   Check if interrupts are disabled.
681  *
682  * @return
683  *   True if interrupts are disabled.
684  ******************************************************************************/
685 SL_WEAK bool CORE_IrqIsDisabled(void)
686 {
687 #if (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_PRIMASK)
688   return (__get_PRIMASK() & 1U) == 1U;
689 
690 #elif (CORE_ATOMIC_METHOD == CORE_ATOMIC_METHOD_BASEPRI)
691   return ((__get_PRIMASK() & 1U) == 1U)
692          || (__get_BASEPRI() >= (CORE_ATOMIC_BASE_PRIORITY_LEVEL
693                                  << (8 - __NVIC_PRIO_BITS)));
694 #endif
695 }
696 
697 /***************************************************************************//**
698  * @brief
699  *   Get the current NVIC enable mask state.
700  *
701  * @param[out] mask
702  *   The current NVIC enable mask.
703  ******************************************************************************/
704 void CORE_GetNvicEnabledMask(CORE_nvicMask_t *mask)
705 {
706   CORE_CRITICAL_SECTION(
707     *mask = *(CORE_nvicMask_t*)((uint32_t)&NVIC->ISER[0]);
708     )
709 }
710 
711 /***************************************************************************//**
712  * @brief
713  *   Get NVIC disable state for a given mask.
714  *
715  * @param[in] mask
716  *   An NVIC mask to check.
717  *
718  * @return
719  *   True if all NVIC interrupt mask bits are clear.
720  ******************************************************************************/
721 bool CORE_GetNvicMaskDisableState(const CORE_nvicMask_t *mask)
722 {
723   CORE_nvicMask_t nvicMask;
724 
725   CORE_CRITICAL_SECTION(
726     nvicMask = *(CORE_nvicMask_t*)((uint32_t)&NVIC->ISER[0]);
727     )
728 
729 #if (CORE_NVIC_REG_WORDS == 1)
730   return (mask->a[0] & nvicMask.a[0]) == 0U;
731 
732 #elif (CORE_NVIC_REG_WORDS == 2)
733   return ((mask->a[0] & nvicMask.a[0]) == 0U)
734          && ((mask->a[1] & nvicMask.a[1]) == 0U);
735 
736 #elif (CORE_NVIC_REG_WORDS == 3)
737   return ((mask->a[0] & nvicMask.a[0]) == 0U)
738          && ((mask->a[1] & nvicMask.a[1]) == 0U)
739          && ((mask->a[2] & nvicMask.a[2]) == 0U);
740 #endif
741 }
742 
743 /***************************************************************************//**
744  * @brief
745  *   Check if an NVIC interrupt is disabled.
746  *
747  * @param[in] irqN
748  *   The IRQn_Type enumerator for the interrupt to check.
749  *
750  * @return
751  *   True if the interrupt is disabled.
752  ******************************************************************************/
753 bool CORE_NvicIRQDisabled(IRQn_Type irqN)
754 {
755   CORE_nvicMask_t *mask;
756 
757   EFM_ASSERT(((int)irqN >= 0) && ((int)irqN < EXT_IRQ_COUNT));
758   mask = (CORE_nvicMask_t*)((uint32_t)&NVIC->ISER[0]);
759   return (mask->a[(unsigned)irqN >> 5U] & (1UL << ((unsigned)irqN & 0x1FUL)))
760          == 0UL;
761 }
762 
763 /***************************************************************************//**
764  * @brief
765  *   Utility function to get the handler for a specific interrupt.
766  *
767  * @param[in] irqN
768  *   The IRQn_Type enumerator for the interrupt.
769  *
770  * @return
771  *   The handler address.
772  *
773  * @note
774  *   Uses the interrupt vector table defined by the current VTOR register value.
775  ******************************************************************************/
776 void *CORE_GetNvicRamTableHandler(IRQn_Type irqN)
777 {
778   EFM_ASSERT(((int)irqN >= -16) && ((int)irqN < EXT_IRQ_COUNT));
779   return (void*)((uint32_t*)(((uint32_t*)SCB->VTOR)[(int)irqN + 16]));
780 }
781 
782 /***************************************************************************//**
783  * @brief
784  *   Utility function to set the handler for a specific interrupt.
785  *
786  * @param[in] irqN
787  *   The IRQn_Type enumerator for the interrupt.
788  *
789  * @param[in] handler
790  *   The handler address.
791  *
792  * @note
793  *   Uses the interrupt vector table defined by the current VTOR register value.
794  ******************************************************************************/
795 void CORE_SetNvicRamTableHandler(IRQn_Type irqN, void *handler)
796 {
797   EFM_ASSERT(((int)irqN >= -16) && ((int)irqN < EXT_IRQ_COUNT));
798   ((uint32_t*)SCB->VTOR)[(int)irqN + 16] = (uint32_t)((uint32_t*)handler);
799 }
800 
801 /***************************************************************************//**
802  * @brief
803  *   Initialize an interrupt vector table by copying table entries from a
804  *   source to a target table.
805  *
806  * @note This function will set a new VTOR register value.
807  *
808  * @param[in] sourceTable
809  *   The address of the source vector table.
810  *
811  * @param[in] sourceSize
812  *   A number of entries in the source vector table.
813  *
814  * @param[in] targetTable
815  *   The address of the target (new) vector table.
816  *
817  * @param[in] targetSize
818  *   A number of entries in the target vector table.
819  *
820  * @param[in] defaultHandler
821  *   An address of the interrupt handler used for target entries for which where there
822  *   is no corresponding source entry (i.e., the target table is larger than the source
823  *   table).
824  *
825  * @param[in] overwriteActive
826  *   When true, a target table entry is always overwritten with the
827  *   corresponding source entry. If false, a target table entry is only
828  *   overwritten if it is zero. This makes it possible for an application
829  *   to partly initialize a target table before passing it to this function.
830  *
831  ******************************************************************************/
832 void CORE_InitNvicVectorTable(uint32_t *sourceTable,
833                               uint32_t sourceSize,
834                               uint32_t *targetTable,
835                               uint32_t targetSize,
836                               void *defaultHandler,
837                               bool overwriteActive)
838 {
839   uint32_t i;
840 
841   // ASSERT on non SRAM-based target table.
842   EFM_ASSERT(((uint32_t)targetTable >= SRAM_BASE)
843              && ((uint32_t)targetTable < (SRAM_BASE + SRAM_SIZE)));
844 
845   // ASSERT if misaligned with respect to the VTOR register implementation.
846 #if defined(SCB_VTOR_TBLBASE_Msk)
847   EFM_ASSERT(((uint32_t)targetTable & ~(SCB_VTOR_TBLOFF_Msk
848                                         | SCB_VTOR_TBLBASE_Msk)) == 0U);
849 #else
850   EFM_ASSERT(((uint32_t)targetTable & ~SCB_VTOR_TBLOFF_Msk) == 0U);
851 #endif
852 
853   // ASSERT if misaligned with respect to the vector table size.
854   // The vector table address must be aligned at its size rounded up to nearest 2^n.
855   EFM_ASSERT(((uint32_t)targetTable
856               & ((1UL << (32UL - __CLZ((targetSize * 4UL) - 1UL))) - 1UL))
857              == 0UL);
858 
859   for (i = 0; i < targetSize; i++) {
860     if (overwriteActive) {                      // Overwrite target entries.
861       if (i < sourceSize) {                     //   targetSize <= sourceSize
862         targetTable[i] = sourceTable[i];
863       } else {                                  //   targetSize > sourceSize
864         targetTable[i] = (uint32_t)((uint32_t*)defaultHandler);
865       }
866     } else {                            // Overwrite target entries which are 0.
867       if (i < sourceSize) {                     // targetSize <= sourceSize
868         if (targetTable[i] == 0U) {
869           targetTable[i] = sourceTable[i];
870         }
871       } else {                                  // targetSize > sourceSize
872         if (targetTable[i] == 0U) {
873           targetTable[i] = (uint32_t)((uint32_t*)defaultHandler);
874         }
875       }
876     }
877   }
878   SCB->VTOR = (uint32_t)targetTable;
879 }
880 
881 /** @} (end addtogroup core) */
882