1 /*
2  * FreeRTOS Kernel V11.1.0
3  * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4  *
5  * SPDX-License-Identifier: MIT
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy of
8  * this software and associated documentation files (the "Software"), to deal in
9  * the Software without restriction, including without limitation the rights to
10  * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11  * the Software, and to permit persons to whom the Software is furnished to do so,
12  * subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in all
15  * copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19  * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20  * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * https://www.FreeRTOS.org
25  * https://github.com/FreeRTOS
26  *
27  */
28 
29 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
30  * all the API functions to use the MPU wrappers. That should only be done when
31  * task.h is included from an application file. */
32 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
33 
34 /* Scheduler includes. */
35 #include "FreeRTOS.h"
36 #include "task.h"
37 
38 /* MPU includes. */
39 #include "mpu_wrappers.h"
40 #include "mpu_syscall_numbers.h"
41 
42 /* Portasm includes. */
43 #include "portasm.h"
44 
45 #if ( configENABLE_TRUSTZONE == 1 )
46     /* Secure components includes. */
47     #include "secure_context.h"
48     #include "secure_init.h"
49 #endif /* configENABLE_TRUSTZONE */
50 
51 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
52 
53 /**
54  * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only
55  * i.e. the processor boots as secure and never jumps to the non-secure side.
56  * The Trust Zone support in the port must be disabled in order to run FreeRTOS
57  * on the secure side. The following are the valid configuration seetings:
58  *
59  * 1. Run FreeRTOS on the Secure Side:
60  *    configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0
61  *
62  * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support:
63  *    configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1
64  *
65  * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support:
66  *    configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0
67  */
68 #if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) )
69     #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side.
70 #endif
71 
72 /**
73  * Cortex-M23 does not have non-secure PSPLIM. We should use PSPLIM on Cortex-M23
74  * only when FreeRTOS runs on secure side.
75  */
76 #if ( ( portHAS_ARMV8M_MAIN_EXTENSION == 0 ) && ( configRUN_FREERTOS_SECURE_ONLY == 0 ) )
77     #define portUSE_PSPLIM_REGISTER    0
78 #else
79     #define portUSE_PSPLIM_REGISTER    1
80 #endif
81 /*-----------------------------------------------------------*/
82 
83 /**
84  * @brief Prototype of all Interrupt Service Routines (ISRs).
85  */
86 typedef void ( * portISR_t )( void );
87 /*-----------------------------------------------------------*/
88 
89 /**
90  * @brief Constants required to manipulate the NVIC.
91  */
92 #define portNVIC_SYSTICK_CTRL_REG             ( *( ( volatile uint32_t * ) 0xe000e010 ) )
93 #define portNVIC_SYSTICK_LOAD_REG             ( *( ( volatile uint32_t * ) 0xe000e014 ) )
94 #define portNVIC_SYSTICK_CURRENT_VALUE_REG    ( *( ( volatile uint32_t * ) 0xe000e018 ) )
95 #define portNVIC_SHPR3_REG                    ( *( ( volatile uint32_t * ) 0xe000ed20 ) )
96 #define portNVIC_SYSTICK_ENABLE_BIT           ( 1UL << 0UL )
97 #define portNVIC_SYSTICK_INT_BIT              ( 1UL << 1UL )
98 #define portNVIC_SYSTICK_CLK_BIT              ( 1UL << 2UL )
99 #define portNVIC_SYSTICK_COUNT_FLAG_BIT       ( 1UL << 16UL )
100 #define portNVIC_PEND_SYSTICK_CLEAR_BIT       ( 1UL << 25UL )
101 #define portNVIC_PEND_SYSTICK_SET_BIT         ( 1UL << 26UL )
102 #define portMIN_INTERRUPT_PRIORITY            ( 255UL )
103 #define portNVIC_PENDSV_PRI                   ( portMIN_INTERRUPT_PRIORITY << 16UL )
104 #define portNVIC_SYSTICK_PRI                  ( portMIN_INTERRUPT_PRIORITY << 24UL )
105 /*-----------------------------------------------------------*/
106 
107 /**
108  * @brief Constants required to manipulate the SCB.
109  */
110 #define portSCB_VTOR_REG                      ( *( ( portISR_t ** ) 0xe000ed08 ) )
111 #define portSCB_SYS_HANDLER_CTRL_STATE_REG    ( *( ( volatile uint32_t * ) 0xe000ed24 ) )
112 #define portSCB_MEM_FAULT_ENABLE_BIT          ( 1UL << 16UL )
113 /*-----------------------------------------------------------*/
114 
115 /**
116  * @brief Constants used to check the installation of the FreeRTOS interrupt handlers.
117  */
118 #define portVECTOR_INDEX_SVC       ( 11 )
119 #define portVECTOR_INDEX_PENDSV    ( 14 )
120 /*-----------------------------------------------------------*/
121 
122 /**
123  * @brief Constants required to check the validity of an interrupt priority.
124  */
125 #define portNVIC_SHPR2_REG                 ( *( ( volatile uint32_t * ) 0xE000ED1C ) )
126 #define portFIRST_USER_INTERRUPT_NUMBER    ( 16 )
127 #define portNVIC_IP_REGISTERS_OFFSET_16    ( 0xE000E3F0 )
128 #define portAIRCR_REG                      ( *( ( volatile uint32_t * ) 0xE000ED0C ) )
129 #define portTOP_BIT_OF_BYTE                ( ( uint8_t ) 0x80 )
130 #define portMAX_PRIGROUP_BITS              ( ( uint8_t ) 7 )
131 #define portPRIORITY_GROUP_MASK            ( 0x07UL << 8UL )
132 #define portPRIGROUP_SHIFT                 ( 8UL )
133 /*-----------------------------------------------------------*/
134 
135 /**
136  * @brief Constants used during system call enter and exit.
137  */
138 #define portPSR_STACK_PADDING_MASK              ( 1UL << 9UL )
139 #define portEXC_RETURN_STACK_FRAME_TYPE_MASK    ( 1UL << 4UL )
140 /*-----------------------------------------------------------*/
141 
142 /**
143  * @brief Constants required to manipulate the FPU.
144  */
145 #define portCPACR               ( ( volatile uint32_t * ) 0xe000ed88 )              /* Coprocessor Access Control Register. */
146 #define portCPACR_CP10_VALUE    ( 3UL )
147 #define portCPACR_CP11_VALUE    portCPACR_CP10_VALUE
148 #define portCPACR_CP10_POS      ( 20UL )
149 #define portCPACR_CP11_POS      ( 22UL )
150 
151 #define portFPCCR               ( ( volatile uint32_t * ) 0xe000ef34 )              /* Floating Point Context Control Register. */
152 #define portFPCCR_ASPEN_POS     ( 31UL )
153 #define portFPCCR_ASPEN_MASK    ( 1UL << portFPCCR_ASPEN_POS )
154 #define portFPCCR_LSPEN_POS     ( 30UL )
155 #define portFPCCR_LSPEN_MASK    ( 1UL << portFPCCR_LSPEN_POS )
156 /*-----------------------------------------------------------*/
157 
158 /**
159  * @brief Offsets in the stack to the parameters when inside the SVC handler.
160  */
161 #define portOFFSET_TO_LR     ( 5 )
162 #define portOFFSET_TO_PC     ( 6 )
163 #define portOFFSET_TO_PSR    ( 7 )
164 /*-----------------------------------------------------------*/
165 
166 /**
167  * @brief Constants required to manipulate the MPU.
168  */
169 #define portMPU_TYPE_REG                        ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
170 #define portMPU_CTRL_REG                        ( *( ( volatile uint32_t * ) 0xe000ed94 ) )
171 #define portMPU_RNR_REG                         ( *( ( volatile uint32_t * ) 0xe000ed98 ) )
172 
173 #define portMPU_RBAR_REG                        ( *( ( volatile uint32_t * ) 0xe000ed9c ) )
174 #define portMPU_RLAR_REG                        ( *( ( volatile uint32_t * ) 0xe000eda0 ) )
175 
176 #define portMPU_RBAR_A1_REG                     ( *( ( volatile uint32_t * ) 0xe000eda4 ) )
177 #define portMPU_RLAR_A1_REG                     ( *( ( volatile uint32_t * ) 0xe000eda8 ) )
178 
179 #define portMPU_RBAR_A2_REG                     ( *( ( volatile uint32_t * ) 0xe000edac ) )
180 #define portMPU_RLAR_A2_REG                     ( *( ( volatile uint32_t * ) 0xe000edb0 ) )
181 
182 #define portMPU_RBAR_A3_REG                     ( *( ( volatile uint32_t * ) 0xe000edb4 ) )
183 #define portMPU_RLAR_A3_REG                     ( *( ( volatile uint32_t * ) 0xe000edb8 ) )
184 
185 #define portMPU_MAIR0_REG                       ( *( ( volatile uint32_t * ) 0xe000edc0 ) )
186 #define portMPU_MAIR1_REG                       ( *( ( volatile uint32_t * ) 0xe000edc4 ) )
187 
188 #define portMPU_RBAR_ADDRESS_MASK               ( 0xffffffe0 ) /* Must be 32-byte aligned. */
189 #define portMPU_RLAR_ADDRESS_MASK               ( 0xffffffe0 ) /* Must be 32-byte aligned. */
190 
191 #define portMPU_RBAR_ACCESS_PERMISSIONS_MASK    ( 3UL << 1UL )
192 
193 #define portMPU_MAIR_ATTR0_POS                  ( 0UL )
194 #define portMPU_MAIR_ATTR0_MASK                 ( 0x000000ff )
195 
196 #define portMPU_MAIR_ATTR1_POS                  ( 8UL )
197 #define portMPU_MAIR_ATTR1_MASK                 ( 0x0000ff00 )
198 
199 #define portMPU_MAIR_ATTR2_POS                  ( 16UL )
200 #define portMPU_MAIR_ATTR2_MASK                 ( 0x00ff0000 )
201 
202 #define portMPU_MAIR_ATTR3_POS                  ( 24UL )
203 #define portMPU_MAIR_ATTR3_MASK                 ( 0xff000000 )
204 
205 #define portMPU_MAIR_ATTR4_POS                  ( 0UL )
206 #define portMPU_MAIR_ATTR4_MASK                 ( 0x000000ff )
207 
208 #define portMPU_MAIR_ATTR5_POS                  ( 8UL )
209 #define portMPU_MAIR_ATTR5_MASK                 ( 0x0000ff00 )
210 
211 #define portMPU_MAIR_ATTR6_POS                  ( 16UL )
212 #define portMPU_MAIR_ATTR6_MASK                 ( 0x00ff0000 )
213 
214 #define portMPU_MAIR_ATTR7_POS                  ( 24UL )
215 #define portMPU_MAIR_ATTR7_MASK                 ( 0xff000000 )
216 
217 #define portMPU_RLAR_ATTR_INDEX0                ( 0UL << 1UL )
218 #define portMPU_RLAR_ATTR_INDEX1                ( 1UL << 1UL )
219 #define portMPU_RLAR_ATTR_INDEX2                ( 2UL << 1UL )
220 #define portMPU_RLAR_ATTR_INDEX3                ( 3UL << 1UL )
221 #define portMPU_RLAR_ATTR_INDEX4                ( 4UL << 1UL )
222 #define portMPU_RLAR_ATTR_INDEX5                ( 5UL << 1UL )
223 #define portMPU_RLAR_ATTR_INDEX6                ( 6UL << 1UL )
224 #define portMPU_RLAR_ATTR_INDEX7                ( 7UL << 1UL )
225 
226 #define portMPU_RLAR_REGION_ENABLE              ( 1UL )
227 
228 /* Enable privileged access to unmapped region. */
229 #define portMPU_PRIV_BACKGROUND_ENABLE_BIT      ( 1UL << 2UL )
230 
231 /* Enable MPU. */
232 #define portMPU_ENABLE_BIT                      ( 1UL << 0UL )
233 
234 /* Expected value of the portMPU_TYPE register. */
235 #define portEXPECTED_MPU_TYPE_VALUE             ( configTOTAL_MPU_REGIONS << 8UL )
236 
237 /* Extract first address of the MPU region as encoded in the
238  * RBAR (Region Base Address Register) value. */
239 #define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
240     ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
241 
242 /* Extract last address of the MPU region as encoded in the
243  * RLAR (Region Limit Address Register) value. */
244 #define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
245     ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
246 
247 /* Does addr lies within [start, end] address range? */
248 #define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
249     ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
250 
251 /* Is the access request satisfied by the available permissions? */
252 #define portIS_AUTHORIZED( accessRequest, permissions ) \
253     ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
254 
255 /* Max value that fits in a uint32_t type. */
256 #define portUINT32_MAX    ( ~( ( uint32_t ) 0 ) )
257 
258 /* Check if adding a and b will result in overflow. */
259 #define portADD_UINT32_WILL_OVERFLOW( a, b )    ( ( a ) > ( portUINT32_MAX - ( b ) ) )
260 /*-----------------------------------------------------------*/
261 
262 /**
263  * @brief The maximum 24-bit number.
264  *
265  * It is needed because the systick is a 24-bit counter.
266  */
267 #define portMAX_24_BIT_NUMBER       ( 0xffffffUL )
268 
269 /**
270  * @brief A fiddle factor to estimate the number of SysTick counts that would
271  * have occurred while the SysTick counter is stopped during tickless idle
272  * calculations.
273  */
274 #define portMISSED_COUNTS_FACTOR    ( 94UL )
275 /*-----------------------------------------------------------*/
276 
277 /**
278  * @brief Constants required to set up the initial stack.
279  */
280 #define portINITIAL_XPSR    ( 0x01000000 )
281 
282 #if ( configRUN_FREERTOS_SECURE_ONLY == 1 )
283 
284 /**
285  * @brief Initial EXC_RETURN value.
286  *
287  *     FF         FF         FF         FD
288  * 1111 1111  1111 1111  1111 1111  1111 1101
289  *
290  * Bit[6] - 1 --> The exception was taken from the Secure state.
291  * Bit[5] - 1 --> Do not skip stacking of additional state context.
292  * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
293  * Bit[3] - 1 --> Return to the Thread mode.
294  * Bit[2] - 1 --> Restore registers from the process stack.
295  * Bit[1] - 0 --> Reserved, 0.
296  * Bit[0] - 1 --> The exception was taken to the Secure state.
297  */
298     #define portINITIAL_EXC_RETURN    ( 0xfffffffd )
299 #else
300 
301 /**
302  * @brief Initial EXC_RETURN value.
303  *
304  *     FF         FF         FF         BC
305  * 1111 1111  1111 1111  1111 1111  1011 1100
306  *
307  * Bit[6] - 0 --> The exception was taken from the Non-Secure state.
308  * Bit[5] - 1 --> Do not skip stacking of additional state context.
309  * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
310  * Bit[3] - 1 --> Return to the Thread mode.
311  * Bit[2] - 1 --> Restore registers from the process stack.
312  * Bit[1] - 0 --> Reserved, 0.
313  * Bit[0] - 0 --> The exception was taken to the Non-Secure state.
314  */
315     #define portINITIAL_EXC_RETURN    ( 0xffffffbc )
316 #endif /* configRUN_FREERTOS_SECURE_ONLY */
317 
318 /**
319  * @brief CONTROL register privileged bit mask.
320  *
321  * Bit[0] in CONTROL register tells the privilege:
322  *  Bit[0] = 0 ==> The task is privileged.
323  *  Bit[0] = 1 ==> The task is not privileged.
324  */
325 #define portCONTROL_PRIVILEGED_MASK         ( 1UL << 0UL )
326 
327 /**
328  * @brief Initial CONTROL register values.
329  */
330 #define portINITIAL_CONTROL_UNPRIVILEGED    ( 0x3 )
331 #define portINITIAL_CONTROL_PRIVILEGED      ( 0x2 )
332 
333 /**
334  * @brief Let the user override the default SysTick clock rate.  If defined by the
335  * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the
336  * configuration register.
337  */
338 #ifndef configSYSTICK_CLOCK_HZ
339     #define configSYSTICK_CLOCK_HZ             ( configCPU_CLOCK_HZ )
340     /* Ensure the SysTick is clocked at the same frequency as the core. */
341     #define portNVIC_SYSTICK_CLK_BIT_CONFIG    ( portNVIC_SYSTICK_CLK_BIT )
342 #else
343     /* Select the option to clock SysTick not at the same frequency as the core. */
344     #define portNVIC_SYSTICK_CLK_BIT_CONFIG    ( 0 )
345 #endif
346 
347 /**
348  * @brief Let the user override the pre-loading of the initial LR with the
349  * address of prvTaskExitError() in case it messes up unwinding of the stack
350  * in the debugger.
351  */
352 #ifdef configTASK_RETURN_ADDRESS
353     #define portTASK_RETURN_ADDRESS    configTASK_RETURN_ADDRESS
354 #else
355     #define portTASK_RETURN_ADDRESS    prvTaskExitError
356 #endif
357 
358 /**
359  * @brief If portPRELOAD_REGISTERS then registers will be given an initial value
360  * when a task is created. This helps in debugging at the cost of code size.
361  */
362 #define portPRELOAD_REGISTERS    1
363 
364 /**
365  * @brief A task is created without a secure context, and must call
366  * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes
367  * any secure calls.
368  */
369 #define portNO_SECURE_CONTEXT    0
370 /*-----------------------------------------------------------*/
371 
372 /**
373  * @brief Used to catch tasks that attempt to return from their implementing
374  * function.
375  */
376 static void prvTaskExitError( void );
377 
378 #if ( configENABLE_MPU == 1 )
379 
380 /**
381  * @brief Extract MPU region's access permissions from the Region Base Address
382  * Register (RBAR) value.
383  *
384  * @param ulRBARValue RBAR value for the MPU region.
385  *
386  * @return uint32_t Access permissions.
387  */
388     static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
389 #endif /* configENABLE_MPU */
390 
391 #if ( configENABLE_MPU == 1 )
392 
393 /**
394  * @brief Setup the Memory Protection Unit (MPU).
395  */
396     static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
397 #endif /* configENABLE_MPU */
398 
399 #if ( configENABLE_FPU == 1 )
400 
401 /**
402  * @brief Setup the Floating Point Unit (FPU).
403  */
404     static void prvSetupFPU( void ) PRIVILEGED_FUNCTION;
405 #endif /* configENABLE_FPU */
406 
407 /**
408  * @brief Setup the timer to generate the tick interrupts.
409  *
410  * The implementation in this file is weak to allow application writers to
411  * change the timer used to generate the tick interrupt.
412  */
413 void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION;
414 
415 /**
416  * @brief Checks whether the current execution context is interrupt.
417  *
418  * @return pdTRUE if the current execution context is interrupt, pdFALSE
419  * otherwise.
420  */
421 BaseType_t xPortIsInsideInterrupt( void );
422 
423 /**
424  * @brief Yield the processor.
425  */
426 void vPortYield( void ) PRIVILEGED_FUNCTION;
427 
428 /**
429  * @brief Enter critical section.
430  */
431 void vPortEnterCritical( void ) PRIVILEGED_FUNCTION;
432 
433 /**
434  * @brief Exit from critical section.
435  */
436 void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
437 
438 /**
439  * @brief SysTick handler.
440  */
441 void SysTick_Handler( void ) PRIVILEGED_FUNCTION;
442 
443 /**
444  * @brief C part of SVC handler.
445  */
446 portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
447 
448 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
449 
450 /**
451  * @brief Sets up the system call stack so that upon returning from
452  * SVC, the system call stack is used.
453  *
454  * @param pulTaskStack The current SP when the SVC was raised.
455  * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
456  * @param ucSystemCallNumber The system call number of the system call.
457  */
458     void vSystemCallEnter( uint32_t * pulTaskStack,
459                            uint32_t ulLR,
460                            uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
461 
462 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
463 
464 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
465 
466 /**
467  * @brief Raise SVC for exiting from a system call.
468  */
469     void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
470 
471 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
472 
473 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
474 
475 /**
476  * @brief Sets up the task stack so that upon returning from
477  * SVC, the task stack is used again.
478  *
479  * @param pulSystemCallStack The current SP when the SVC was raised.
480  * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
481  */
482     void vSystemCallExit( uint32_t * pulSystemCallStack,
483                           uint32_t ulLR ) PRIVILEGED_FUNCTION;
484 
485 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
486 
487 #if ( configENABLE_MPU == 1 )
488 
489 /**
490  * @brief Checks whether or not the calling task is privileged.
491  *
492  * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
493  */
494     BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
495 
496 #endif /* configENABLE_MPU == 1 */
497 /*-----------------------------------------------------------*/
498 
499 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
500 
501 /**
502  * @brief This variable is set to pdTRUE when the scheduler is started.
503  */
504     PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
505 
506 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
507 
508 /**
509  * @brief Each task maintains its own interrupt status in the critical nesting
510  * variable.
511  */
512 PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL;
513 
514 #if ( configENABLE_TRUSTZONE == 1 )
515 
516 /**
517  * @brief Saved as part of the task context to indicate which context the
518  * task is using on the secure side.
519  */
520     PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT;
521 #endif /* configENABLE_TRUSTZONE */
522 
523 /**
524  * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
525  * FreeRTOS API functions are not called from interrupts that have been assigned
526  * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY.
527  */
528 #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_ARMV8M_MAIN_EXTENSION == 1 ) )
529 
530     static uint8_t ucMaxSysCallPriority = 0;
531     static uint32_t ulMaxPRIGROUPValue = 0;
532     static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16;
533 
534 #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_ARMV8M_MAIN_EXTENSION == 1 ) ) */
535 
536 #if ( configUSE_TICKLESS_IDLE == 1 )
537 
538 /**
539  * @brief The number of SysTick increments that make up one tick period.
540  */
541     PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0;
542 
543 /**
544  * @brief The maximum number of tick periods that can be suppressed is
545  * limited by the 24 bit resolution of the SysTick timer.
546  */
547     PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0;
548 
549 /**
550  * @brief Compensate for the CPU cycles that pass while the SysTick is
551  * stopped (low power functionality only).
552  */
553     PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0;
554 #endif /* configUSE_TICKLESS_IDLE */
555 /*-----------------------------------------------------------*/
556 
557 #if ( configUSE_TICKLESS_IDLE == 1 )
558 
vPortSuppressTicksAndSleep(TickType_t xExpectedIdleTime)559     __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime )
560     {
561         uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft;
562         TickType_t xModifiableIdleTime;
563 
564         /* Make sure the SysTick reload value does not overflow the counter. */
565         if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks )
566         {
567             xExpectedIdleTime = xMaximumPossibleSuppressedTicks;
568         }
569 
570         /* Enter a critical section but don't use the taskENTER_CRITICAL()
571          * method as that will mask interrupts that should exit sleep mode. */
572         __asm volatile ( "cpsid i" ::: "memory" );
573         __asm volatile ( "dsb" );
574         __asm volatile ( "isb" );
575 
576         /* If a context switch is pending or a task is waiting for the scheduler
577          * to be unsuspended then abandon the low power entry. */
578         if( eTaskConfirmSleepModeStatus() == eAbortSleep )
579         {
580             /* Re-enable interrupts - see comments above the cpsid instruction
581              * above. */
582             __asm volatile ( "cpsie i" ::: "memory" );
583         }
584         else
585         {
586             /* Stop the SysTick momentarily.  The time the SysTick is stopped for
587              * is accounted for as best it can be, but using the tickless mode will
588              * inevitably result in some tiny drift of the time maintained by the
589              * kernel with respect to calendar time. */
590             portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
591 
592             /* Use the SysTick current-value register to determine the number of
593              * SysTick decrements remaining until the next tick interrupt.  If the
594              * current-value register is zero, then there are actually
595              * ulTimerCountsForOneTick decrements remaining, not zero, because the
596              * SysTick requests the interrupt when decrementing from 1 to 0. */
597             ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
598 
599             if( ulSysTickDecrementsLeft == 0 )
600             {
601                 ulSysTickDecrementsLeft = ulTimerCountsForOneTick;
602             }
603 
604             /* Calculate the reload value required to wait xExpectedIdleTime
605              * tick periods.  -1 is used because this code normally executes part
606              * way through the first tick period.  But if the SysTick IRQ is now
607              * pending, then clear the IRQ, suppressing the first tick, and correct
608              * the reload value to reflect that the second tick period is already
609              * underway.  The expected idle time is always at least two ticks. */
610             ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) );
611 
612             if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 )
613             {
614                 portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT;
615                 ulReloadValue -= ulTimerCountsForOneTick;
616             }
617 
618             if( ulReloadValue > ulStoppedTimerCompensation )
619             {
620                 ulReloadValue -= ulStoppedTimerCompensation;
621             }
622 
623             /* Set the new reload value. */
624             portNVIC_SYSTICK_LOAD_REG = ulReloadValue;
625 
626             /* Clear the SysTick count flag and set the count value back to
627              * zero. */
628             portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
629 
630             /* Restart SysTick. */
631             portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
632 
633             /* Sleep until something happens.  configPRE_SLEEP_PROCESSING() can
634              * set its parameter to 0 to indicate that its implementation contains
635              * its own wait for interrupt or wait for event instruction, and so wfi
636              * should not be executed again.  However, the original expected idle
637              * time variable must remain unmodified, so a copy is taken. */
638             xModifiableIdleTime = xExpectedIdleTime;
639             configPRE_SLEEP_PROCESSING( xModifiableIdleTime );
640 
641             if( xModifiableIdleTime > 0 )
642             {
643                 __asm volatile ( "dsb" ::: "memory" );
644                 __asm volatile ( "wfi" );
645                 __asm volatile ( "isb" );
646             }
647 
648             configPOST_SLEEP_PROCESSING( xExpectedIdleTime );
649 
650             /* Re-enable interrupts to allow the interrupt that brought the MCU
651              * out of sleep mode to execute immediately.  See comments above
652              * the cpsid instruction above. */
653             __asm volatile ( "cpsie i" ::: "memory" );
654             __asm volatile ( "dsb" );
655             __asm volatile ( "isb" );
656 
657             /* Disable interrupts again because the clock is about to be stopped
658              * and interrupts that execute while the clock is stopped will increase
659              * any slippage between the time maintained by the RTOS and calendar
660              * time. */
661             __asm volatile ( "cpsid i" ::: "memory" );
662             __asm volatile ( "dsb" );
663             __asm volatile ( "isb" );
664 
665             /* Disable the SysTick clock without reading the
666              * portNVIC_SYSTICK_CTRL_REG register to ensure the
667              * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set.  Again,
668              * the time the SysTick is stopped for is accounted for as best it can
669              * be, but using the tickless mode will inevitably result in some tiny
670              * drift of the time maintained by the kernel with respect to calendar
671              * time*/
672             portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
673 
674             /* Determine whether the SysTick has already counted to zero. */
675             if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
676             {
677                 uint32_t ulCalculatedLoadValue;
678 
679                 /* The tick interrupt ended the sleep (or is now pending), and
680                  * a new tick period has started.  Reset portNVIC_SYSTICK_LOAD_REG
681                  * with whatever remains of the new tick period. */
682                 ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG );
683 
684                 /* Don't allow a tiny value, or values that have somehow
685                  * underflowed because the post sleep hook did something
686                  * that took too long or because the SysTick current-value register
687                  * is zero. */
688                 if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) )
689                 {
690                     ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL );
691                 }
692 
693                 portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue;
694 
695                 /* As the pending tick will be processed as soon as this
696                  * function exits, the tick value maintained by the tick is stepped
697                  * forward by one less than the time spent waiting. */
698                 ulCompleteTickPeriods = xExpectedIdleTime - 1UL;
699             }
700             else
701             {
702                 /* Something other than the tick interrupt ended the sleep. */
703 
704                 /* Use the SysTick current-value register to determine the
705                  * number of SysTick decrements remaining until the expected idle
706                  * time would have ended. */
707                 ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
708                 #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT )
709                 {
710                     /* If the SysTick is not using the core clock, the current-
711                      * value register might still be zero here.  In that case, the
712                      * SysTick didn't load from the reload register, and there are
713                      * ulReloadValue decrements remaining in the expected idle
714                      * time, not zero. */
715                     if( ulSysTickDecrementsLeft == 0 )
716                     {
717                         ulSysTickDecrementsLeft = ulReloadValue;
718                     }
719                 }
720                 #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
721 
722                 /* Work out how long the sleep lasted rounded to complete tick
723                  * periods (not the ulReload value which accounted for part
724                  * ticks). */
725                 ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft;
726 
727                 /* How many complete tick periods passed while the processor
728                  * was waiting? */
729                 ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick;
730 
731                 /* The reload value is set to whatever fraction of a single tick
732                  * period remains. */
733                 portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements;
734             }
735 
736             /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again,
737              * then set portNVIC_SYSTICK_LOAD_REG back to its standard value.  If
738              * the SysTick is not using the core clock, temporarily configure it to
739              * use the core clock.  This configuration forces the SysTick to load
740              * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next
741              * cycle of the other clock.  Then portNVIC_SYSTICK_LOAD_REG is ready
742              * to receive the standard value immediately. */
743             portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
744             portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
745             #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT )
746             {
747                 portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
748             }
749             #else
750             {
751                 /* The temporary usage of the core clock has served its purpose,
752                  * as described above.  Resume usage of the other clock. */
753                 portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT;
754 
755                 if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
756                 {
757                     /* The partial tick period already ended.  Be sure the SysTick
758                      * counts it only once. */
759                     portNVIC_SYSTICK_CURRENT_VALUE_REG = 0;
760                 }
761 
762                 portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
763                 portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
764             }
765             #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
766 
767             /* Step the tick to account for any tick periods that elapsed. */
768             vTaskStepTick( ulCompleteTickPeriods );
769 
770             /* Exit with interrupts enabled. */
771             __asm volatile ( "cpsie i" ::: "memory" );
772         }
773     }
774 
775 #endif /* configUSE_TICKLESS_IDLE */
776 /*-----------------------------------------------------------*/
777 
vPortSetupTimerInterrupt(void)778 __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */
779 {
780     /* Calculate the constants required to configure the tick interrupt. */
781     #if ( configUSE_TICKLESS_IDLE == 1 )
782     {
783         ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ );
784         xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick;
785         ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ );
786     }
787     #endif /* configUSE_TICKLESS_IDLE */
788 
789     /* Stop and reset SysTick.
790      *
791      * QEMU versions older than 7.0.0 contain a bug which causes an error if we
792      * enable SysTick without first selecting a valid clock source. We trigger
793      * the bug if we change clock sources from a clock with a zero clock period
794      * to one with a nonzero clock period and enable Systick at the same time.
795      * So we configure the CLKSOURCE bit here, prior to setting the ENABLE bit.
796      * This workaround avoids the bug in QEMU versions older than 7.0.0. */
797     portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG;
798     portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
799 
800     /* Configure SysTick to interrupt at the requested rate. */
801     portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL;
802     portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
803 }
804 /*-----------------------------------------------------------*/
805 
prvTaskExitError(void)806 static void prvTaskExitError( void )
807 {
808     volatile uint32_t ulDummy = 0UL;
809 
810     /* A function that implements a task must not exit or attempt to return to
811      * its caller as there is nothing to return to. If a task wants to exit it
812      * should instead call vTaskDelete( NULL ). Artificially force an assert()
813      * to be triggered if configASSERT() is defined, then stop here so
814      * application writers can catch the error. */
815     configASSERT( ulCriticalNesting == ~0UL );
816     portDISABLE_INTERRUPTS();
817 
818     while( ulDummy == 0 )
819     {
820         /* This file calls prvTaskExitError() after the scheduler has been
821          * started to remove a compiler warning about the function being
822          * defined but never called.  ulDummy is used purely to quieten other
823          * warnings about code appearing after this function is called - making
824          * ulDummy volatile makes the compiler think the function could return
825          * and therefore not output an 'unreachable code' warning for code that
826          * appears after it. */
827     }
828 }
829 /*-----------------------------------------------------------*/
830 
831 #if ( configENABLE_MPU == 1 )
832 
prvGetRegionAccessPermissions(uint32_t ulRBARValue)833     static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
834     {
835         uint32_t ulAccessPermissions = 0;
836 
837         if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
838         {
839             ulAccessPermissions = tskMPU_READ_PERMISSION;
840         }
841 
842         if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
843         {
844             ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
845         }
846 
847         return ulAccessPermissions;
848     }
849 
850 #endif /* configENABLE_MPU */
851 /*-----------------------------------------------------------*/
852 
853 #if ( configENABLE_MPU == 1 )
854 
prvSetupMPU(void)855     static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
856     {
857         #if defined( __ARMCC_VERSION )
858 
859             /* Declaration when these variable are defined in code instead of being
860              * exported from linker scripts. */
861             extern uint32_t * __privileged_functions_start__;
862             extern uint32_t * __privileged_functions_end__;
863             extern uint32_t * __syscalls_flash_start__;
864             extern uint32_t * __syscalls_flash_end__;
865             extern uint32_t * __unprivileged_flash_start__;
866             extern uint32_t * __unprivileged_flash_end__;
867             extern uint32_t * __privileged_sram_start__;
868             extern uint32_t * __privileged_sram_end__;
869         #else /* if defined( __ARMCC_VERSION ) */
870             /* Declaration when these variable are exported from linker scripts. */
871             extern uint32_t __privileged_functions_start__[];
872             extern uint32_t __privileged_functions_end__[];
873             extern uint32_t __syscalls_flash_start__[];
874             extern uint32_t __syscalls_flash_end__[];
875             extern uint32_t __unprivileged_flash_start__[];
876             extern uint32_t __unprivileged_flash_end__[];
877             extern uint32_t __privileged_sram_start__[];
878             extern uint32_t __privileged_sram_end__[];
879         #endif /* defined( __ARMCC_VERSION ) */
880 
881         /* The only permitted number of regions are 8 or 16. */
882         configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) );
883 
884         /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */
885         configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE );
886 
887         /* Check that the MPU is present. */
888         if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE )
889         {
890             /* MAIR0 - Index 0. */
891             portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
892             /* MAIR0 - Index 1. */
893             portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
894 
895             /* Setup privileged flash as Read Only so that privileged tasks can
896              * read it but not modify. */
897             portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION;
898             portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
899                                ( portMPU_REGION_NON_SHAREABLE ) |
900                                ( portMPU_REGION_PRIVILEGED_READ_ONLY );
901             portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
902                                ( portMPU_RLAR_ATTR_INDEX0 ) |
903                                ( portMPU_RLAR_REGION_ENABLE );
904 
905             /* Setup unprivileged flash as Read Only by both privileged and
906              * unprivileged tasks. All tasks can read it but no-one can modify. */
907             portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION;
908             portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
909                                ( portMPU_REGION_NON_SHAREABLE ) |
910                                ( portMPU_REGION_READ_ONLY );
911             portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
912                                ( portMPU_RLAR_ATTR_INDEX0 ) |
913                                ( portMPU_RLAR_REGION_ENABLE );
914 
915             /* Setup unprivileged syscalls flash as Read Only by both privileged
916              * and unprivileged tasks. All tasks can read it but no-one can modify. */
917             portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION;
918             portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
919                                ( portMPU_REGION_NON_SHAREABLE ) |
920                                ( portMPU_REGION_READ_ONLY );
921             portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
922                                ( portMPU_RLAR_ATTR_INDEX0 ) |
923                                ( portMPU_RLAR_REGION_ENABLE );
924 
925             /* Setup RAM containing kernel data for privileged access only. */
926             portMPU_RNR_REG = portPRIVILEGED_RAM_REGION;
927             portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
928                                ( portMPU_REGION_NON_SHAREABLE ) |
929                                ( portMPU_REGION_PRIVILEGED_READ_WRITE ) |
930                                ( portMPU_REGION_EXECUTE_NEVER );
931             portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
932                                ( portMPU_RLAR_ATTR_INDEX0 ) |
933                                ( portMPU_RLAR_REGION_ENABLE );
934 
935             /* Enable mem fault. */
936             portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT;
937 
938             /* Enable MPU with privileged background access i.e. unmapped
939              * regions have privileged access. */
940             portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT );
941         }
942     }
943 
944 #endif /* configENABLE_MPU */
945 /*-----------------------------------------------------------*/
946 
947 #if ( configENABLE_FPU == 1 )
948 
prvSetupFPU(void)949     static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */
950     {
951         #if ( configENABLE_TRUSTZONE == 1 )
952         {
953             /* Enable non-secure access to the FPU. */
954             SecureInit_EnableNSFPUAccess();
955         }
956         #endif /* configENABLE_TRUSTZONE */
957 
958         /* CP10 = 11 ==> Full access to FPU i.e. both privileged and
959          * unprivileged code should be able to access FPU. CP11 should be
960          * programmed to the same value as CP10. */
961         *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) |
962                             ( portCPACR_CP11_VALUE << portCPACR_CP11_POS )
963                             );
964 
965         /* ASPEN = 1 ==> Hardware should automatically preserve floating point
966          * context on exception entry and restore on exception return.
967          * LSPEN = 1 ==> Enable lazy context save of FP state. */
968         *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK );
969     }
970 
971 #endif /* configENABLE_FPU */
972 /*-----------------------------------------------------------*/
973 
vPortYield(void)974 void vPortYield( void ) /* PRIVILEGED_FUNCTION */
975 {
976     /* Set a PendSV to request a context switch. */
977     portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
978 
979     /* Barriers are normally not required but do ensure the code is
980      * completely within the specified behaviour for the architecture. */
981     __asm volatile ( "dsb" ::: "memory" );
982     __asm volatile ( "isb" );
983 }
984 /*-----------------------------------------------------------*/
985 
vPortEnterCritical(void)986 void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */
987 {
988     portDISABLE_INTERRUPTS();
989     ulCriticalNesting++;
990 
991     /* Barriers are normally not required but do ensure the code is
992      * completely within the specified behaviour for the architecture. */
993     __asm volatile ( "dsb" ::: "memory" );
994     __asm volatile ( "isb" );
995 }
996 /*-----------------------------------------------------------*/
997 
vPortExitCritical(void)998 void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */
999 {
1000     configASSERT( ulCriticalNesting );
1001     ulCriticalNesting--;
1002 
1003     if( ulCriticalNesting == 0 )
1004     {
1005         portENABLE_INTERRUPTS();
1006     }
1007 }
1008 /*-----------------------------------------------------------*/
1009 
SysTick_Handler(void)1010 void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */
1011 {
1012     uint32_t ulPreviousMask;
1013 
1014     ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR();
1015     traceISR_ENTER();
1016     {
1017         /* Increment the RTOS tick. */
1018         if( xTaskIncrementTick() != pdFALSE )
1019         {
1020             traceISR_EXIT_TO_SCHEDULER();
1021             /* Pend a context switch. */
1022             portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
1023         }
1024         else
1025         {
1026             traceISR_EXIT();
1027         }
1028     }
1029     portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask );
1030 }
1031 /*-----------------------------------------------------------*/
1032 
vPortSVCHandler_C(uint32_t * pulCallerStackAddress)1033 void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
1034 {
1035     #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
1036         #if defined( __ARMCC_VERSION )
1037 
1038             /* Declaration when these variable are defined in code instead of being
1039              * exported from linker scripts. */
1040             extern uint32_t * __syscalls_flash_start__;
1041             extern uint32_t * __syscalls_flash_end__;
1042         #else
1043             /* Declaration when these variable are exported from linker scripts. */
1044             extern uint32_t __syscalls_flash_start__[];
1045             extern uint32_t __syscalls_flash_end__[];
1046         #endif /* defined( __ARMCC_VERSION ) */
1047     #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
1048 
1049     uint32_t ulPC;
1050 
1051     #if ( configENABLE_TRUSTZONE == 1 )
1052         uint32_t ulR0, ulR1;
1053         extern TaskHandle_t pxCurrentTCB;
1054         #if ( configENABLE_MPU == 1 )
1055             uint32_t ulControl, ulIsTaskPrivileged;
1056         #endif /* configENABLE_MPU */
1057     #endif /* configENABLE_TRUSTZONE */
1058     uint8_t ucSVCNumber;
1059 
1060     /* Register are stored on the stack in the following order - R0, R1, R2, R3,
1061      * R12, LR, PC, xPSR. */
1062     ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
1063     ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
1064 
1065     switch( ucSVCNumber )
1066     {
1067         #if ( configENABLE_TRUSTZONE == 1 )
1068             case portSVC_ALLOCATE_SECURE_CONTEXT:
1069 
1070                 /* R0 contains the stack size passed as parameter to the
1071                  * vPortAllocateSecureContext function. */
1072                 ulR0 = pulCallerStackAddress[ 0 ];
1073 
1074                 #if ( configENABLE_MPU == 1 )
1075                 {
1076                     /* Read the CONTROL register value. */
1077                     __asm volatile ( "mrs %0, control"  : "=r" ( ulControl ) );
1078 
1079                     /* The task that raised the SVC is privileged if Bit[0]
1080                      * in the CONTROL register is 0. */
1081                     ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 );
1082 
1083                     /* Allocate and load a context for the secure task. */
1084                     xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB );
1085                 }
1086                 #else /* if ( configENABLE_MPU == 1 ) */
1087                 {
1088                     /* Allocate and load a context for the secure task. */
1089                     xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB );
1090                 }
1091                 #endif /* configENABLE_MPU */
1092 
1093                 configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID );
1094                 SecureContext_LoadContext( xSecureContext, pxCurrentTCB );
1095                 break;
1096 
1097             case portSVC_FREE_SECURE_CONTEXT:
1098 
1099                 /* R0 contains TCB being freed and R1 contains the secure
1100                  * context handle to be freed. */
1101                 ulR0 = pulCallerStackAddress[ 0 ];
1102                 ulR1 = pulCallerStackAddress[ 1 ];
1103 
1104                 /* Free the secure context. */
1105                 SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 );
1106                 break;
1107         #endif /* configENABLE_TRUSTZONE */
1108 
1109         case portSVC_START_SCHEDULER:
1110             #if ( configENABLE_TRUSTZONE == 1 )
1111             {
1112                 /* De-prioritize the non-secure exceptions so that the
1113                  * non-secure pendSV runs at the lowest priority. */
1114                 SecureInit_DePrioritizeNSExceptions();
1115 
1116                 /* Initialize the secure context management system. */
1117                 SecureContext_Init();
1118             }
1119             #endif /* configENABLE_TRUSTZONE */
1120 
1121             #if ( configENABLE_FPU == 1 )
1122             {
1123                 /* Setup the Floating Point Unit (FPU). */
1124                 prvSetupFPU();
1125             }
1126             #endif /* configENABLE_FPU */
1127 
1128             /* Setup the context of the first task so that the first task starts
1129              * executing. */
1130             vRestoreContextOfFirstTask();
1131             break;
1132 
1133             #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
1134                 case portSVC_RAISE_PRIVILEGE:
1135 
1136                     /* Only raise the privilege, if the svc was raised from any of
1137                      * the system calls. */
1138                     if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
1139                         ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
1140                     {
1141                         vRaisePrivilege();
1142                     }
1143                     break;
1144             #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
1145 
1146             #if ( configENABLE_MPU == 1 )
1147                 case portSVC_YIELD:
1148                     vPortYield();
1149                     break;
1150             #endif /* configENABLE_MPU == 1 */
1151 
1152         default:
1153             /* Incorrect SVC call. */
1154             configASSERT( pdFALSE );
1155     }
1156 }
1157 /*-----------------------------------------------------------*/
1158 
1159 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
1160 
vSystemCallEnter(uint32_t * pulTaskStack,uint32_t ulLR,uint8_t ucSystemCallNumber)1161     void vSystemCallEnter( uint32_t * pulTaskStack,
1162                            uint32_t ulLR,
1163                            uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
1164     {
1165         extern TaskHandle_t pxCurrentTCB;
1166         extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
1167         xMPU_SETTINGS * pxMpuSettings;
1168         uint32_t * pulSystemCallStack;
1169         uint32_t ulStackFrameSize, ulSystemCallLocation, i;
1170 
1171         #if defined( __ARMCC_VERSION )
1172             /* Declaration when these variable are defined in code instead of being
1173              * exported from linker scripts. */
1174             extern uint32_t * __syscalls_flash_start__;
1175             extern uint32_t * __syscalls_flash_end__;
1176         #else
1177             /* Declaration when these variable are exported from linker scripts. */
1178             extern uint32_t __syscalls_flash_start__[];
1179             extern uint32_t __syscalls_flash_end__[];
1180         #endif /* #if defined( __ARMCC_VERSION ) */
1181 
1182         ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
1183         pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
1184 
1185         /* Checks:
1186          * 1. SVC is raised from the system call section (i.e. application is
1187          *    not raising SVC directly).
1188          * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
1189          *    it is non-NULL only during the execution of a system call (i.e.
1190          *    between system call enter and exit).
1191          * 3. System call is not for a kernel API disabled by the configuration
1192          *    in FreeRTOSConfig.h.
1193          * 4. We do not need to check that ucSystemCallNumber is within range
1194          *    because the assembly SVC handler checks that before calling
1195          *    this function.
1196          */
1197         if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
1198             ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
1199             ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
1200             ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
1201         {
1202             pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
1203 
1204             #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
1205             {
1206                 if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
1207                 {
1208                     /* Extended frame i.e. FPU in use. */
1209                     ulStackFrameSize = 26;
1210                     __asm volatile (
1211                         " vpush {s0}         \n" /* Trigger lazy stacking. */
1212                         " vpop  {s0}         \n" /* Nullify the affect of the above instruction. */
1213                         ::: "memory"
1214                         );
1215                 }
1216                 else
1217                 {
1218                     /* Standard frame i.e. FPU not in use. */
1219                     ulStackFrameSize = 8;
1220                 }
1221             }
1222             #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
1223             {
1224                 ulStackFrameSize = 8;
1225             }
1226             #endif /* configENABLE_FPU || configENABLE_MVE */
1227 
1228             /* Make space on the system call stack for the stack frame. */
1229             pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
1230 
1231             /* Copy the stack frame. */
1232             for( i = 0; i < ulStackFrameSize; i++ )
1233             {
1234                 pulSystemCallStack[ i ] = pulTaskStack[ i ];
1235             }
1236 
1237             /* Store the value of the Link Register before the SVC was raised.
1238              * It contains the address of the caller of the System Call entry
1239              * point (i.e. the caller of the MPU_<API>). We need to restore it
1240              * when we exit from the system call. */
1241             pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
1242             /* Store the value of the PSPLIM register before the SVC was raised.
1243              * We need to restore it when we exit from the system call. */
1244             #if ( portUSE_PSPLIM_REGISTER == 1 )
1245             {
1246                 __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
1247             }
1248             #endif
1249 
1250             /* Use the pulSystemCallStack in thread mode. */
1251             __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
1252             #if ( portUSE_PSPLIM_REGISTER == 1 )
1253             {
1254                 __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
1255             }
1256             #endif
1257 
1258             /* Start executing the system call upon returning from this handler. */
1259             pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
1260             /* Raise a request to exit from the system call upon finishing the
1261              * system call. */
1262             pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
1263 
1264             /* Remember the location where we should copy the stack frame when we exit from
1265              * the system call. */
1266             pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
1267 
1268             /* Record if the hardware used padding to force the stack pointer
1269              * to be double word aligned. */
1270             if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
1271             {
1272                 pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
1273             }
1274             else
1275             {
1276                 pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
1277             }
1278 
1279             /* We ensure in pxPortInitialiseStack that the system call stack is
1280              * double word aligned and therefore, there is no need of padding.
1281              * Clear the bit[9] of stacked xPSR. */
1282             pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
1283 
1284             /* Raise the privilege for the duration of the system call. */
1285             __asm volatile (
1286                 " mrs r0, control     \n" /* Obtain current control value. */
1287                 " movs r1, #1         \n" /* r1 = 1. */
1288                 " bics r0, r1         \n" /* Clear nPRIV bit. */
1289                 " msr control, r0     \n" /* Write back new control value. */
1290                 ::: "r0", "r1", "memory"
1291                 );
1292         }
1293     }
1294 
1295 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
1296 /*-----------------------------------------------------------*/
1297 
1298 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
1299 
vRequestSystemCallExit(void)1300     void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
1301     {
1302         __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
1303     }
1304 
1305 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
1306 /*-----------------------------------------------------------*/
1307 
1308 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
1309 
vSystemCallExit(uint32_t * pulSystemCallStack,uint32_t ulLR)1310     void vSystemCallExit( uint32_t * pulSystemCallStack,
1311                           uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
1312     {
1313         extern TaskHandle_t pxCurrentTCB;
1314         xMPU_SETTINGS * pxMpuSettings;
1315         uint32_t * pulTaskStack;
1316         uint32_t ulStackFrameSize, ulSystemCallLocation, i;
1317 
1318         #if defined( __ARMCC_VERSION )
1319             /* Declaration when these variable are defined in code instead of being
1320              * exported from linker scripts. */
1321             extern uint32_t * __privileged_functions_start__;
1322             extern uint32_t * __privileged_functions_end__;
1323         #else
1324             /* Declaration when these variable are exported from linker scripts. */
1325             extern uint32_t __privileged_functions_start__[];
1326             extern uint32_t __privileged_functions_end__[];
1327         #endif /* #if defined( __ARMCC_VERSION ) */
1328 
1329         ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
1330         pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
1331 
1332         /* Checks:
1333          * 1. SVC is raised from the privileged code (i.e. application is not
1334          *    raising SVC directly). This SVC is only raised from
1335          *    vRequestSystemCallExit which is in the privileged code section.
1336          * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
1337          *    this means that we previously entered a system call and the
1338          *    application is not attempting to exit without entering a system
1339          *    call.
1340          */
1341         if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
1342             ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
1343             ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
1344         {
1345             pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
1346 
1347             #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
1348             {
1349                 if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
1350                 {
1351                     /* Extended frame i.e. FPU in use. */
1352                     ulStackFrameSize = 26;
1353                     __asm volatile (
1354                         " vpush {s0}         \n" /* Trigger lazy stacking. */
1355                         " vpop  {s0}         \n" /* Nullify the affect of the above instruction. */
1356                         ::: "memory"
1357                         );
1358                 }
1359                 else
1360                 {
1361                     /* Standard frame i.e. FPU not in use. */
1362                     ulStackFrameSize = 8;
1363                 }
1364             }
1365             #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
1366             {
1367                 ulStackFrameSize = 8;
1368             }
1369             #endif /* configENABLE_FPU || configENABLE_MVE */
1370 
1371             /* Make space on the task stack for the stack frame. */
1372             pulTaskStack = pulTaskStack - ulStackFrameSize;
1373 
1374             /* Copy the stack frame. */
1375             for( i = 0; i < ulStackFrameSize; i++ )
1376             {
1377                 pulTaskStack[ i ] = pulSystemCallStack[ i ];
1378             }
1379 
1380             /* Use the pulTaskStack in thread mode. */
1381             __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
1382 
1383             /* Return to the caller of the System Call entry point (i.e. the
1384              * caller of the MPU_<API>). */
1385             pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
1386             /* Ensure that LR has a valid value.*/
1387             pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
1388 
1389             /* Restore the PSPLIM register to what it was at the time of
1390              * system call entry. */
1391             #if ( portUSE_PSPLIM_REGISTER == 1 )
1392             {
1393                 __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
1394             }
1395             #endif
1396 
1397             /* If the hardware used padding to force the stack pointer
1398              * to be double word aligned, set the stacked xPSR bit[9],
1399              * otherwise clear it. */
1400             if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
1401             {
1402                 pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
1403             }
1404             else
1405             {
1406                 pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
1407             }
1408 
1409             /* This is not NULL only for the duration of the system call. */
1410             pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
1411 
1412             /* Drop the privilege before returning to the thread mode. */
1413             __asm volatile (
1414                 " mrs r0, control     \n" /* Obtain current control value. */
1415                 " movs r1, #1         \n" /* r1 = 1. */
1416                 " orrs r0, r1         \n" /* Set nPRIV bit. */
1417                 " msr control, r0     \n" /* Write back new control value. */
1418                 ::: "r0", "r1", "memory"
1419                 );
1420         }
1421     }
1422 
1423 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
1424 /*-----------------------------------------------------------*/
1425 
1426 #if ( configENABLE_MPU == 1 )
1427 
xPortIsTaskPrivileged(void)1428     BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
1429     {
1430         BaseType_t xTaskIsPrivileged = pdFALSE;
1431         const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
1432 
1433         if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
1434         {
1435             xTaskIsPrivileged = pdTRUE;
1436         }
1437 
1438         return xTaskIsPrivileged;
1439     }
1440 
1441 #endif /* configENABLE_MPU == 1 */
1442 /*-----------------------------------------------------------*/
1443 
1444 #if ( configENABLE_MPU == 1 )
1445 
pxPortInitialiseStack(StackType_t * pxTopOfStack,StackType_t * pxEndOfStack,TaskFunction_t pxCode,void * pvParameters,BaseType_t xRunPrivileged,xMPU_SETTINGS * xMPUSettings)1446     StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
1447                                          StackType_t * pxEndOfStack,
1448                                          TaskFunction_t pxCode,
1449                                          void * pvParameters,
1450                                          BaseType_t xRunPrivileged,
1451                                          xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
1452     {
1453         uint32_t ulIndex = 0;
1454 
1455         xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
1456         ulIndex++;
1457         xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
1458         ulIndex++;
1459         xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
1460         ulIndex++;
1461         xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
1462         ulIndex++;
1463         xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
1464         ulIndex++;
1465         xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
1466         ulIndex++;
1467         xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
1468         ulIndex++;
1469         xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
1470         ulIndex++;
1471 
1472         xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters;            /* r0. */
1473         ulIndex++;
1474         xMPUSettings->ulContext[ ulIndex ] = 0x01010101;                           /* r1. */
1475         ulIndex++;
1476         xMPUSettings->ulContext[ ulIndex ] = 0x02020202;                           /* r2. */
1477         ulIndex++;
1478         xMPUSettings->ulContext[ ulIndex ] = 0x03030303;                           /* r3. */
1479         ulIndex++;
1480         xMPUSettings->ulContext[ ulIndex ] = 0x12121212;                           /* r12. */
1481         ulIndex++;
1482         xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
1483         ulIndex++;
1484         xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode;                  /* PC. */
1485         ulIndex++;
1486         xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR;                     /* xPSR. */
1487         ulIndex++;
1488 
1489         #if ( configENABLE_TRUSTZONE == 1 )
1490         {
1491             xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
1492             ulIndex++;
1493         }
1494         #endif /* configENABLE_TRUSTZONE */
1495         xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
1496         ulIndex++;
1497         xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack;         /* PSPLIM. */
1498         ulIndex++;
1499 
1500         if( xRunPrivileged == pdTRUE )
1501         {
1502             xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
1503             xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
1504             ulIndex++;
1505         }
1506         else
1507         {
1508             xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
1509             xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
1510             ulIndex++;
1511         }
1512 
1513         xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
1514         ulIndex++;
1515 
1516         #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
1517         {
1518             /* Ensure that the system call stack is double word aligned. */
1519             xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
1520             xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
1521                                                                                      ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
1522 
1523             xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
1524             xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
1525                                                                                             ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
1526                                                                                           ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
1527 
1528             /* This is not NULL only for the duration of a system call. */
1529             xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
1530         }
1531         #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
1532 
1533         return &( xMPUSettings->ulContext[ ulIndex ] );
1534     }
1535 
1536 #else /* configENABLE_MPU */
1537 
pxPortInitialiseStack(StackType_t * pxTopOfStack,StackType_t * pxEndOfStack,TaskFunction_t pxCode,void * pvParameters)1538     StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
1539                                          StackType_t * pxEndOfStack,
1540                                          TaskFunction_t pxCode,
1541                                          void * pvParameters ) /* PRIVILEGED_FUNCTION */
1542     {
1543         /* Simulate the stack frame as it would be created by a context switch
1544          * interrupt. */
1545         #if ( portPRELOAD_REGISTERS == 0 )
1546         {
1547             pxTopOfStack--;                                          /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
1548             *pxTopOfStack = portINITIAL_XPSR;                        /* xPSR. */
1549             pxTopOfStack--;
1550             *pxTopOfStack = ( StackType_t ) pxCode;                  /* PC. */
1551             pxTopOfStack--;
1552             *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
1553             pxTopOfStack -= 5;                                       /* R12, R3, R2 and R1. */
1554             *pxTopOfStack = ( StackType_t ) pvParameters;            /* R0. */
1555             pxTopOfStack -= 9;                                       /* R11..R4, EXC_RETURN. */
1556             *pxTopOfStack = portINITIAL_EXC_RETURN;
1557             pxTopOfStack--;
1558             *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
1559 
1560             #if ( configENABLE_TRUSTZONE == 1 )
1561             {
1562                 pxTopOfStack--;
1563                 *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
1564             }
1565             #endif /* configENABLE_TRUSTZONE */
1566         }
1567         #else /* portPRELOAD_REGISTERS */
1568         {
1569             pxTopOfStack--;                                          /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
1570             *pxTopOfStack = portINITIAL_XPSR;                        /* xPSR. */
1571             pxTopOfStack--;
1572             *pxTopOfStack = ( StackType_t ) pxCode;                  /* PC. */
1573             pxTopOfStack--;
1574             *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
1575             pxTopOfStack--;
1576             *pxTopOfStack = ( StackType_t ) 0x12121212UL;            /* R12. */
1577             pxTopOfStack--;
1578             *pxTopOfStack = ( StackType_t ) 0x03030303UL;            /* R3. */
1579             pxTopOfStack--;
1580             *pxTopOfStack = ( StackType_t ) 0x02020202UL;            /* R2. */
1581             pxTopOfStack--;
1582             *pxTopOfStack = ( StackType_t ) 0x01010101UL;            /* R1. */
1583             pxTopOfStack--;
1584             *pxTopOfStack = ( StackType_t ) pvParameters;            /* R0. */
1585             pxTopOfStack--;
1586             *pxTopOfStack = ( StackType_t ) 0x11111111UL;            /* R11. */
1587             pxTopOfStack--;
1588             *pxTopOfStack = ( StackType_t ) 0x10101010UL;            /* R10. */
1589             pxTopOfStack--;
1590             *pxTopOfStack = ( StackType_t ) 0x09090909UL;            /* R09. */
1591             pxTopOfStack--;
1592             *pxTopOfStack = ( StackType_t ) 0x08080808UL;            /* R08. */
1593             pxTopOfStack--;
1594             *pxTopOfStack = ( StackType_t ) 0x07070707UL;            /* R07. */
1595             pxTopOfStack--;
1596             *pxTopOfStack = ( StackType_t ) 0x06060606UL;            /* R06. */
1597             pxTopOfStack--;
1598             *pxTopOfStack = ( StackType_t ) 0x05050505UL;            /* R05. */
1599             pxTopOfStack--;
1600             *pxTopOfStack = ( StackType_t ) 0x04040404UL;            /* R04. */
1601             pxTopOfStack--;
1602             *pxTopOfStack = portINITIAL_EXC_RETURN;                  /* EXC_RETURN. */
1603             pxTopOfStack--;
1604             *pxTopOfStack = ( StackType_t ) pxEndOfStack;            /* Slot used to hold this task's PSPLIM value. */
1605 
1606             #if ( configENABLE_TRUSTZONE == 1 )
1607             {
1608                 pxTopOfStack--;
1609                 *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
1610             }
1611             #endif /* configENABLE_TRUSTZONE */
1612         }
1613         #endif /* portPRELOAD_REGISTERS */
1614 
1615         return pxTopOfStack;
1616     }
1617 
1618 #endif /* configENABLE_MPU */
1619 /*-----------------------------------------------------------*/
1620 
xPortStartScheduler(void)1621 BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
1622 {
1623     /* An application can install FreeRTOS interrupt handlers in one of the
1624      * following ways:
1625      * 1. Direct Routing - Install the functions SVC_Handler and PendSV_Handler
1626      *    for SVCall and PendSV interrupts respectively.
1627      * 2. Indirect Routing - Install separate handlers for SVCall and PendSV
1628      *    interrupts and route program control from those handlers to
1629      *    SVC_Handler and PendSV_Handler functions.
1630      *
1631      * Applications that use Indirect Routing must set
1632      * configCHECK_HANDLER_INSTALLATION to 0 in their FreeRTOSConfig.h. Direct
1633      * routing, which is validated here when configCHECK_HANDLER_INSTALLATION
1634      * is 1, should be preferred when possible. */
1635     #if ( configCHECK_HANDLER_INSTALLATION == 1 )
1636     {
1637         const portISR_t * const pxVectorTable = portSCB_VTOR_REG;
1638 
1639         /* Validate that the application has correctly installed the FreeRTOS
1640          * handlers for SVCall and PendSV interrupts. We do not check the
1641          * installation of the SysTick handler because the application may
1642          * choose to drive the RTOS tick using a timer other than the SysTick
1643          * timer by overriding the weak function vPortSetupTimerInterrupt().
1644          *
1645          * Assertion failures here indicate incorrect installation of the
1646          * FreeRTOS handlers. For help installing the FreeRTOS handlers, see
1647          * https://www.FreeRTOS.org/FAQHelp.html.
1648          *
1649          * Systems with a configurable address for the interrupt vector table
1650          * can also encounter assertion failures or even system faults here if
1651          * VTOR is not set correctly to point to the application's vector table. */
1652         configASSERT( pxVectorTable[ portVECTOR_INDEX_SVC ] == SVC_Handler );
1653         configASSERT( pxVectorTable[ portVECTOR_INDEX_PENDSV ] == PendSV_Handler );
1654     }
1655     #endif /* configCHECK_HANDLER_INSTALLATION */
1656 
1657     #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_ARMV8M_MAIN_EXTENSION == 1 ) )
1658     {
1659         volatile uint32_t ulImplementedPrioBits = 0;
1660         volatile uint8_t ucMaxPriorityValue;
1661 
1662         /* Determine the maximum priority from which ISR safe FreeRTOS API
1663          * functions can be called. ISR safe functions are those that end in
1664          * "FromISR". FreeRTOS maintains separate thread and ISR API functions to
1665          * ensure interrupt entry is as fast and simple as possible.
1666          *
1667          * First, determine the number of priority bits available. Write to all
1668          * possible bits in the priority setting for SVCall. */
1669         portNVIC_SHPR2_REG = 0xFF000000;
1670 
1671         /* Read the value back to see how many bits stuck. */
1672         ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 );
1673 
1674         /* Use the same mask on the maximum system call priority. */
1675         ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
1676 
1677         /* Check that the maximum system call priority is nonzero after
1678          * accounting for the number of priority bits supported by the
1679          * hardware. A priority of 0 is invalid because setting the BASEPRI
1680          * register to 0 unmasks all interrupts, and interrupts with priority 0
1681          * cannot be masked using BASEPRI.
1682          * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
1683         configASSERT( ucMaxSysCallPriority );
1684 
1685         /* Check that the bits not implemented in hardware are zero in
1686          * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
1687         configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( uint8_t ) ( ~( uint32_t ) ucMaxPriorityValue ) ) == 0U );
1688 
1689         /* Calculate the maximum acceptable priority group value for the number
1690          * of bits read back. */
1691         while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
1692         {
1693             ulImplementedPrioBits++;
1694             ucMaxPriorityValue <<= ( uint8_t ) 0x01;
1695         }
1696 
1697         if( ulImplementedPrioBits == 8 )
1698         {
1699             /* When the hardware implements 8 priority bits, there is no way for
1700              * the software to configure PRIGROUP to not have sub-priorities. As
1701              * a result, the least significant bit is always used for sub-priority
1702              * and there are 128 preemption priorities and 2 sub-priorities.
1703              *
1704              * This may cause some confusion in some cases - for example, if
1705              * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
1706              * priority interrupts will be masked in Critical Sections as those
1707              * are at the same preemption priority. This may appear confusing as
1708              * 4 is higher (numerically lower) priority than
1709              * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
1710              * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
1711              * to 4, this confusion does not happen and the behaviour remains the same.
1712              *
1713              * The following assert ensures that the sub-priority bit in the
1714              * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
1715              * confusion. */
1716             configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
1717             ulMaxPRIGROUPValue = 0;
1718         }
1719         else
1720         {
1721             ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
1722         }
1723 
1724         /* Shift the priority group value back to its position within the AIRCR
1725          * register. */
1726         ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
1727         ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
1728     }
1729     #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_ARMV8M_MAIN_EXTENSION == 1 ) ) */
1730 
1731     /* Make PendSV and SysTick the lowest priority interrupts, and make SVCall
1732      * the highest priority. */
1733     portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
1734     portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
1735     portNVIC_SHPR2_REG = 0;
1736 
1737     #if ( configENABLE_MPU == 1 )
1738     {
1739         /* Setup the Memory Protection Unit (MPU). */
1740         prvSetupMPU();
1741     }
1742     #endif /* configENABLE_MPU */
1743 
1744     /* Start the timer that generates the tick ISR. Interrupts are disabled
1745      * here already. */
1746     vPortSetupTimerInterrupt();
1747 
1748     /* Initialize the critical nesting count ready for the first task. */
1749     ulCriticalNesting = 0;
1750 
1751     #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
1752     {
1753         xSchedulerRunning = pdTRUE;
1754     }
1755     #endif /* ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
1756 
1757     /* Start the first task. */
1758     vStartFirstTask();
1759 
1760     /* Should never get here as the tasks will now be executing. Call the task
1761      * exit error function to prevent compiler warnings about a static function
1762      * not being called in the case that the application writer overrides this
1763      * functionality by defining configTASK_RETURN_ADDRESS. Call
1764      * vTaskSwitchContext() so link time optimization does not remove the
1765      * symbol. */
1766     vTaskSwitchContext();
1767     prvTaskExitError();
1768 
1769     /* Should not get here. */
1770     return 0;
1771 }
1772 /*-----------------------------------------------------------*/
1773 
vPortEndScheduler(void)1774 void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
1775 {
1776     /* Not implemented in ports where there is nothing to return to.
1777      * Artificially force an assert. */
1778     configASSERT( ulCriticalNesting == 1000UL );
1779 }
1780 /*-----------------------------------------------------------*/
1781 
1782 #if ( configENABLE_MPU == 1 )
1783 
vPortStoreTaskMPUSettings(xMPU_SETTINGS * xMPUSettings,const struct xMEMORY_REGION * const xRegions,StackType_t * pxBottomOfStack,configSTACK_DEPTH_TYPE uxStackDepth)1784     void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
1785                                     const struct xMEMORY_REGION * const xRegions,
1786                                     StackType_t * pxBottomOfStack,
1787                                     configSTACK_DEPTH_TYPE uxStackDepth )
1788     {
1789         uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber;
1790         int32_t lIndex = 0;
1791 
1792         #if defined( __ARMCC_VERSION )
1793 
1794             /* Declaration when these variable are defined in code instead of being
1795              * exported from linker scripts. */
1796             extern uint32_t * __privileged_sram_start__;
1797             extern uint32_t * __privileged_sram_end__;
1798         #else
1799             /* Declaration when these variable are exported from linker scripts. */
1800             extern uint32_t __privileged_sram_start__[];
1801             extern uint32_t __privileged_sram_end__[];
1802         #endif /* defined( __ARMCC_VERSION ) */
1803 
1804         /* Setup MAIR0. */
1805         xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
1806         xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
1807 
1808         /* This function is called automatically when the task is created - in
1809          * which case the stack region parameters will be valid.  At all other
1810          * times the stack parameters will not be valid and it is assumed that
1811          * the stack region has already been configured. */
1812         if( uxStackDepth > 0 )
1813         {
1814             ulRegionStartAddress = ( uint32_t ) pxBottomOfStack;
1815             ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( uxStackDepth * ( configSTACK_DEPTH_TYPE ) sizeof( StackType_t ) ) - 1;
1816 
1817             /* If the stack is within the privileged SRAM, do not protect it
1818              * using a separate MPU region. This is needed because privileged
1819              * SRAM is already protected using an MPU region and ARMv8-M does
1820              * not allow overlapping MPU regions. */
1821             if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) &&
1822                 ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) )
1823             {
1824                 xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0;
1825                 xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0;
1826             }
1827             else
1828             {
1829                 /* Define the region that allows access to the stack. */
1830                 ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK;
1831                 ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
1832 
1833                 xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) |
1834                                                              ( portMPU_REGION_NON_SHAREABLE ) |
1835                                                              ( portMPU_REGION_READ_WRITE ) |
1836                                                              ( portMPU_REGION_EXECUTE_NEVER );
1837 
1838                 xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) |
1839                                                              ( portMPU_RLAR_ATTR_INDEX0 ) |
1840                                                              ( portMPU_RLAR_REGION_ENABLE );
1841             }
1842         }
1843 
1844         /* User supplied configurable regions. */
1845         for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ )
1846         {
1847             /* If xRegions is NULL i.e. the task has not specified any MPU
1848              * region, the else part ensures that all the configurable MPU
1849              * regions are invalidated. */
1850             if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) )
1851             {
1852                 /* Translate the generic region definition contained in xRegions
1853                  * into the ARMv8 specific MPU settings that are then stored in
1854                  * xMPUSettings. */
1855                 ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK;
1856                 ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1;
1857                 ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
1858 
1859                 /* Start address. */
1860                 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) |
1861                                                                           ( portMPU_REGION_NON_SHAREABLE );
1862 
1863                 /* RO/RW. */
1864                 if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 )
1865                 {
1866                     xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY );
1867                 }
1868                 else
1869                 {
1870                     xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE );
1871                 }
1872 
1873                 /* XN. */
1874                 if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 )
1875                 {
1876                     xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER );
1877                 }
1878 
1879                 /* End Address. */
1880                 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) |
1881                                                                           ( portMPU_RLAR_REGION_ENABLE );
1882 
1883                 /* Normal memory/ Device memory. */
1884                 if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 )
1885                 {
1886                     /* Attr1 in MAIR0 is configured as device memory. */
1887                     xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1;
1888                 }
1889                 else
1890                 {
1891                     /* Attr0 in MAIR0 is configured as normal memory. */
1892                     xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0;
1893                 }
1894             }
1895             else
1896             {
1897                 /* Invalidate the region. */
1898                 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL;
1899                 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL;
1900             }
1901 
1902             lIndex++;
1903         }
1904     }
1905 
1906 #endif /* configENABLE_MPU */
1907 /*-----------------------------------------------------------*/
1908 
1909 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
1910 
xPortIsAuthorizedToAccessBuffer(const void * pvBuffer,uint32_t ulBufferLength,uint32_t ulAccessRequested)1911     BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
1912                                                 uint32_t ulBufferLength,
1913                                                 uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
1914 
1915     {
1916         uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
1917         BaseType_t xAccessGranted = pdFALSE;
1918         const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
1919 
1920         if( xSchedulerRunning == pdFALSE )
1921         {
1922             /* Grant access to all the kernel objects before the scheduler
1923             * is started. It is necessary because there is no task running
1924             * yet and therefore, we cannot use the permissions of any
1925             * task. */
1926             xAccessGranted = pdTRUE;
1927         }
1928         else if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
1929         {
1930             xAccessGranted = pdTRUE;
1931         }
1932         else
1933         {
1934             if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
1935             {
1936                 ulBufferStartAddress = ( uint32_t ) pvBuffer;
1937                 ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
1938 
1939                 for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
1940                 {
1941                     /* Is the MPU region enabled? */
1942                     if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
1943                     {
1944                         if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
1945                                                          portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
1946                                                          portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
1947                             portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
1948                                                          portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
1949                                                          portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
1950                             portIS_AUTHORIZED( ulAccessRequested,
1951                                                prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
1952                         {
1953                             xAccessGranted = pdTRUE;
1954                             break;
1955                         }
1956                     }
1957                 }
1958             }
1959         }
1960 
1961         return xAccessGranted;
1962     }
1963 
1964 #endif /* #if ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
1965 /*-----------------------------------------------------------*/
1966 
xPortIsInsideInterrupt(void)1967 BaseType_t xPortIsInsideInterrupt( void )
1968 {
1969     uint32_t ulCurrentInterrupt;
1970     BaseType_t xReturn;
1971 
1972     /* Obtain the number of the currently executing interrupt. Interrupt Program
1973      * Status Register (IPSR) holds the exception number of the currently-executing
1974      * exception or zero for Thread mode.*/
1975     __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
1976 
1977     if( ulCurrentInterrupt == 0 )
1978     {
1979         xReturn = pdFALSE;
1980     }
1981     else
1982     {
1983         xReturn = pdTRUE;
1984     }
1985 
1986     return xReturn;
1987 }
1988 /*-----------------------------------------------------------*/
1989 
1990 #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_ARMV8M_MAIN_EXTENSION == 1 ) )
1991 
vPortValidateInterruptPriority(void)1992     void vPortValidateInterruptPriority( void )
1993     {
1994         uint32_t ulCurrentInterrupt;
1995         uint8_t ucCurrentPriority;
1996 
1997         /* Obtain the number of the currently executing interrupt. */
1998         __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
1999 
2000         /* Is the interrupt number a user defined interrupt? */
2001         if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER )
2002         {
2003             /* Look up the interrupt's priority. */
2004             ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ];
2005 
2006             /* The following assertion will fail if a service routine (ISR) for
2007              * an interrupt that has been assigned a priority above
2008              * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
2009              * function.  ISR safe FreeRTOS API functions must *only* be called
2010              * from interrupts that have been assigned a priority at or below
2011              * configMAX_SYSCALL_INTERRUPT_PRIORITY.
2012              *
2013              * Numerically low interrupt priority numbers represent logically high
2014              * interrupt priorities, therefore the priority of the interrupt must
2015              * be set to a value equal to or numerically *higher* than
2016              * configMAX_SYSCALL_INTERRUPT_PRIORITY.
2017              *
2018              * Interrupts that  use the FreeRTOS API must not be left at their
2019              * default priority of  zero as that is the highest possible priority,
2020              * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
2021              * and  therefore also guaranteed to be invalid.
2022              *
2023              * FreeRTOS maintains separate thread and ISR API functions to ensure
2024              * interrupt entry is as fast and simple as possible.
2025              *
2026              * The following links provide detailed information:
2027              * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html
2028              * https://www.FreeRTOS.org/FAQHelp.html */
2029             configASSERT( ucCurrentPriority >= ucMaxSysCallPriority );
2030         }
2031 
2032         /* Priority grouping:  The interrupt controller (NVIC) allows the bits
2033          * that define each interrupt's priority to be split between bits that
2034          * define the interrupt's pre-emption priority bits and bits that define
2035          * the interrupt's sub-priority.  For simplicity all bits must be defined
2036          * to be pre-emption priority bits.  The following assertion will fail if
2037          * this is not the case (if some bits represent a sub-priority).
2038          *
2039          * If the application only uses CMSIS libraries for interrupt
2040          * configuration then the correct setting can be achieved on all Cortex-M
2041          * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the
2042          * scheduler.  Note however that some vendor specific peripheral libraries
2043          * assume a non-zero priority group setting, in which cases using a value
2044          * of zero will result in unpredictable behaviour. */
2045         configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue );
2046     }
2047 
2048 #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_ARMV8M_MAIN_EXTENSION == 1 ) ) */
2049 /*-----------------------------------------------------------*/
2050 
2051 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
2052 
vPortGrantAccessToKernelObject(TaskHandle_t xInternalTaskHandle,int32_t lInternalIndexOfKernelObject)2053     void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
2054                                          int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
2055     {
2056         uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
2057         xMPU_SETTINGS * xTaskMpuSettings;
2058 
2059         ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
2060         ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
2061 
2062         xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
2063 
2064         xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
2065     }
2066 
2067 #endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
2068 /*-----------------------------------------------------------*/
2069 
2070 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
2071 
vPortRevokeAccessToKernelObject(TaskHandle_t xInternalTaskHandle,int32_t lInternalIndexOfKernelObject)2072     void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
2073                                           int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
2074     {
2075         uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
2076         xMPU_SETTINGS * xTaskMpuSettings;
2077 
2078         ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
2079         ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
2080 
2081         xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
2082 
2083         xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
2084     }
2085 
2086 #endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
2087 /*-----------------------------------------------------------*/
2088 
2089 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
2090 
2091     #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
2092 
xPortIsAuthorizedToAccessKernelObject(int32_t lInternalIndexOfKernelObject)2093         BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
2094         {
2095             uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
2096             BaseType_t xAccessGranted = pdFALSE;
2097             const xMPU_SETTINGS * xTaskMpuSettings;
2098 
2099             if( xSchedulerRunning == pdFALSE )
2100             {
2101                 /* Grant access to all the kernel objects before the scheduler
2102                  * is started. It is necessary because there is no task running
2103                  * yet and therefore, we cannot use the permissions of any
2104                  * task. */
2105                 xAccessGranted = pdTRUE;
2106             }
2107             else
2108             {
2109                 xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
2110 
2111                 ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
2112                 ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
2113 
2114                 if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
2115                 {
2116                     xAccessGranted = pdTRUE;
2117                 }
2118                 else
2119                 {
2120                     if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
2121                     {
2122                         xAccessGranted = pdTRUE;
2123                     }
2124                 }
2125             }
2126 
2127             return xAccessGranted;
2128         }
2129 
2130     #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
2131 
xPortIsAuthorizedToAccessKernelObject(int32_t lInternalIndexOfKernelObject)2132         BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
2133         {
2134             ( void ) lInternalIndexOfKernelObject;
2135 
2136             /* If Access Control List feature is not used, all the tasks have
2137              * access to all the kernel objects. */
2138             return pdTRUE;
2139         }
2140 
2141     #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
2142 
2143 #endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
2144 /*-----------------------------------------------------------*/
2145