xref: /Kernel-v10.6.2/portable/IAR/ARM_CM35P/non_secure/port.c (revision ef7b253b56c9788077f5ecd6c9deb4021923d646)
1 /*
2  * FreeRTOS Kernel V10.6.2
3  * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
4  *
5  * SPDX-License-Identifier: MIT
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy of
8  * this software and associated documentation files (the "Software"), to deal in
9  * the Software without restriction, including without limitation the rights to
10  * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11  * the Software, and to permit persons to whom the Software is furnished to do so,
12  * subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in all
15  * copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19  * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20  * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * https://www.FreeRTOS.org
25  * https://github.com/FreeRTOS
26  *
27  */
28 
29 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
30  * all the API functions to use the MPU wrappers. That should only be done when
31  * task.h is included from an application file. */
32 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
33 
34 /* Scheduler includes. */
35 #include "FreeRTOS.h"
36 #include "task.h"
37 
38 /* MPU includes. */
39 #include "mpu_wrappers.h"
40 #include "mpu_syscall_numbers.h"
41 
42 /* Portasm includes. */
43 #include "portasm.h"
44 
45 #if ( configENABLE_TRUSTZONE == 1 )
46     /* Secure components includes. */
47     #include "secure_context.h"
48     #include "secure_init.h"
49 #endif /* configENABLE_TRUSTZONE */
50 
51 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
52 
53 /**
54  * The FreeRTOS Cortex M33 port can be configured to run on the Secure Side only
55  * i.e. the processor boots as secure and never jumps to the non-secure side.
56  * The Trust Zone support in the port must be disabled in order to run FreeRTOS
57  * on the secure side. The following are the valid configuration seetings:
58  *
59  * 1. Run FreeRTOS on the Secure Side:
60  *    configRUN_FREERTOS_SECURE_ONLY = 1 and configENABLE_TRUSTZONE = 0
61  *
62  * 2. Run FreeRTOS on the Non-Secure Side with Secure Side function call support:
63  *    configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 1
64  *
65  * 3. Run FreeRTOS on the Non-Secure Side only i.e. no Secure Side function call support:
66  *    configRUN_FREERTOS_SECURE_ONLY = 0 and configENABLE_TRUSTZONE = 0
67  */
68 #if ( ( configRUN_FREERTOS_SECURE_ONLY == 1 ) && ( configENABLE_TRUSTZONE == 1 ) )
69     #error TrustZone needs to be disabled in order to run FreeRTOS on the Secure Side.
70 #endif
71 /*-----------------------------------------------------------*/
72 
73 /**
74  * @brief Constants required to manipulate the NVIC.
75  */
76 #define portNVIC_SYSTICK_CTRL_REG             ( *( ( volatile uint32_t * ) 0xe000e010 ) )
77 #define portNVIC_SYSTICK_LOAD_REG             ( *( ( volatile uint32_t * ) 0xe000e014 ) )
78 #define portNVIC_SYSTICK_CURRENT_VALUE_REG    ( *( ( volatile uint32_t * ) 0xe000e018 ) )
79 #define portNVIC_SHPR3_REG                    ( *( ( volatile uint32_t * ) 0xe000ed20 ) )
80 #define portNVIC_SYSTICK_ENABLE_BIT           ( 1UL << 0UL )
81 #define portNVIC_SYSTICK_INT_BIT              ( 1UL << 1UL )
82 #define portNVIC_SYSTICK_CLK_BIT              ( 1UL << 2UL )
83 #define portNVIC_SYSTICK_COUNT_FLAG_BIT       ( 1UL << 16UL )
84 #define portNVIC_PEND_SYSTICK_CLEAR_BIT       ( 1UL << 25UL )
85 #define portNVIC_PEND_SYSTICK_SET_BIT         ( 1UL << 26UL )
86 #define portMIN_INTERRUPT_PRIORITY            ( 255UL )
87 #define portNVIC_PENDSV_PRI                   ( portMIN_INTERRUPT_PRIORITY << 16UL )
88 #define portNVIC_SYSTICK_PRI                  ( portMIN_INTERRUPT_PRIORITY << 24UL )
89 /*-----------------------------------------------------------*/
90 
91 /**
92  * @brief Constants required to manipulate the SCB.
93  */
94 #define portSCB_SYS_HANDLER_CTRL_STATE_REG    ( *( volatile uint32_t * ) 0xe000ed24 )
95 #define portSCB_MEM_FAULT_ENABLE_BIT          ( 1UL << 16UL )
96 /*-----------------------------------------------------------*/
97 
98 /**
99  * @brief Constants required to check the validity of an interrupt priority.
100  */
101 #define portNVIC_SHPR2_REG                 ( *( ( volatile uint32_t * ) 0xE000ED1C ) )
102 #define portFIRST_USER_INTERRUPT_NUMBER    ( 16 )
103 #define portNVIC_IP_REGISTERS_OFFSET_16    ( 0xE000E3F0 )
104 #define portAIRCR_REG                      ( *( ( volatile uint32_t * ) 0xE000ED0C ) )
105 #define portTOP_BIT_OF_BYTE                ( ( uint8_t ) 0x80 )
106 #define portMAX_PRIGROUP_BITS              ( ( uint8_t ) 7 )
107 #define portPRIORITY_GROUP_MASK            ( 0x07UL << 8UL )
108 #define portPRIGROUP_SHIFT                 ( 8UL )
109 /*-----------------------------------------------------------*/
110 
111 /**
112  * @brief Constants used during system call enter and exit.
113  */
114 #define portPSR_STACK_PADDING_MASK                ( 1UL << 9UL )
115 #define portEXC_RETURN_STACK_FRAME_TYPE_MASK      ( 1UL << 4UL )
116 /*-----------------------------------------------------------*/
117 
118 /**
119  * @brief Constants required to manipulate the FPU.
120  */
121 #define portCPACR               ( ( volatile uint32_t * ) 0xe000ed88 )              /* Coprocessor Access Control Register. */
122 #define portCPACR_CP10_VALUE    ( 3UL )
123 #define portCPACR_CP11_VALUE    portCPACR_CP10_VALUE
124 #define portCPACR_CP10_POS      ( 20UL )
125 #define portCPACR_CP11_POS      ( 22UL )
126 
127 #define portFPCCR               ( ( volatile uint32_t * ) 0xe000ef34 )              /* Floating Point Context Control Register. */
128 #define portFPCCR_ASPEN_POS     ( 31UL )
129 #define portFPCCR_ASPEN_MASK    ( 1UL << portFPCCR_ASPEN_POS )
130 #define portFPCCR_LSPEN_POS     ( 30UL )
131 #define portFPCCR_LSPEN_MASK    ( 1UL << portFPCCR_LSPEN_POS )
132 /*-----------------------------------------------------------*/
133 
134 /**
135  * @brief Offsets in the stack to the parameters when inside the SVC handler.
136  */
137 #define portOFFSET_TO_LR                    ( 5 )
138 #define portOFFSET_TO_PC                    ( 6 )
139 #define portOFFSET_TO_PSR                   ( 7 )
140 /*-----------------------------------------------------------*/
141 
142 /**
143  * @brief Constants required to manipulate the MPU.
144  */
145 #define portMPU_TYPE_REG                      ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
146 #define portMPU_CTRL_REG                      ( *( ( volatile uint32_t * ) 0xe000ed94 ) )
147 #define portMPU_RNR_REG                       ( *( ( volatile uint32_t * ) 0xe000ed98 ) )
148 
149 #define portMPU_RBAR_REG                      ( *( ( volatile uint32_t * ) 0xe000ed9c ) )
150 #define portMPU_RLAR_REG                      ( *( ( volatile uint32_t * ) 0xe000eda0 ) )
151 
152 #define portMPU_RBAR_A1_REG                   ( *( ( volatile uint32_t * ) 0xe000eda4 ) )
153 #define portMPU_RLAR_A1_REG                   ( *( ( volatile uint32_t * ) 0xe000eda8 ) )
154 
155 #define portMPU_RBAR_A2_REG                   ( *( ( volatile uint32_t * ) 0xe000edac ) )
156 #define portMPU_RLAR_A2_REG                   ( *( ( volatile uint32_t * ) 0xe000edb0 ) )
157 
158 #define portMPU_RBAR_A3_REG                   ( *( ( volatile uint32_t * ) 0xe000edb4 ) )
159 #define portMPU_RLAR_A3_REG                   ( *( ( volatile uint32_t * ) 0xe000edb8 ) )
160 
161 #define portMPU_MAIR0_REG                     ( *( ( volatile uint32_t * ) 0xe000edc0 ) )
162 #define portMPU_MAIR1_REG                     ( *( ( volatile uint32_t * ) 0xe000edc4 ) )
163 
164 #define portMPU_RBAR_ADDRESS_MASK             ( 0xffffffe0 ) /* Must be 32-byte aligned. */
165 #define portMPU_RLAR_ADDRESS_MASK             ( 0xffffffe0 ) /* Must be 32-byte aligned. */
166 
167 #define portMPU_RBAR_ACCESS_PERMISSIONS_MASK  ( 3UL << 1UL )
168 
169 #define portMPU_MAIR_ATTR0_POS                ( 0UL )
170 #define portMPU_MAIR_ATTR0_MASK               ( 0x000000ff )
171 
172 #define portMPU_MAIR_ATTR1_POS                ( 8UL )
173 #define portMPU_MAIR_ATTR1_MASK               ( 0x0000ff00 )
174 
175 #define portMPU_MAIR_ATTR2_POS                ( 16UL )
176 #define portMPU_MAIR_ATTR2_MASK               ( 0x00ff0000 )
177 
178 #define portMPU_MAIR_ATTR3_POS                ( 24UL )
179 #define portMPU_MAIR_ATTR3_MASK               ( 0xff000000 )
180 
181 #define portMPU_MAIR_ATTR4_POS                ( 0UL )
182 #define portMPU_MAIR_ATTR4_MASK               ( 0x000000ff )
183 
184 #define portMPU_MAIR_ATTR5_POS                ( 8UL )
185 #define portMPU_MAIR_ATTR5_MASK               ( 0x0000ff00 )
186 
187 #define portMPU_MAIR_ATTR6_POS                ( 16UL )
188 #define portMPU_MAIR_ATTR6_MASK               ( 0x00ff0000 )
189 
190 #define portMPU_MAIR_ATTR7_POS                ( 24UL )
191 #define portMPU_MAIR_ATTR7_MASK               ( 0xff000000 )
192 
193 #define portMPU_RLAR_ATTR_INDEX0              ( 0UL << 1UL )
194 #define portMPU_RLAR_ATTR_INDEX1              ( 1UL << 1UL )
195 #define portMPU_RLAR_ATTR_INDEX2              ( 2UL << 1UL )
196 #define portMPU_RLAR_ATTR_INDEX3              ( 3UL << 1UL )
197 #define portMPU_RLAR_ATTR_INDEX4              ( 4UL << 1UL )
198 #define portMPU_RLAR_ATTR_INDEX5              ( 5UL << 1UL )
199 #define portMPU_RLAR_ATTR_INDEX6              ( 6UL << 1UL )
200 #define portMPU_RLAR_ATTR_INDEX7              ( 7UL << 1UL )
201 
202 #define portMPU_RLAR_REGION_ENABLE            ( 1UL )
203 
204 /* Enable privileged access to unmapped region. */
205 #define portMPU_PRIV_BACKGROUND_ENABLE_BIT    ( 1UL << 2UL )
206 
207 /* Enable MPU. */
208 #define portMPU_ENABLE_BIT                    ( 1UL << 0UL )
209 
210 /* Expected value of the portMPU_TYPE register. */
211 #define portEXPECTED_MPU_TYPE_VALUE           ( configTOTAL_MPU_REGIONS << 8UL )
212 
213 /* Extract first address of the MPU region as encoded in the
214  * RBAR (Region Base Address Register) value. */
215 #define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
216     ( ( rbar ) & portMPU_RBAR_ADDRESS_MASK )
217 
218 /* Extract last address of the MPU region as encoded in the
219  * RLAR (Region Limit Address Register) value. */
220 #define portEXTRACT_LAST_ADDRESS_FROM_RLAR( rlar ) \
221     ( ( ( rlar ) & portMPU_RLAR_ADDRESS_MASK ) | ~portMPU_RLAR_ADDRESS_MASK )
222 
223 /* Does addr lies within [start, end] address range? */
224 #define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
225     ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
226 
227 /* Is the access request satisfied by the available permissions? */
228 #define portIS_AUTHORIZED( accessRequest, permissions ) \
229     ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
230 
231 /* Max value that fits in a uint32_t type. */
232 #define portUINT32_MAX    ( ~( ( uint32_t ) 0 ) )
233 
234 /* Check if adding a and b will result in overflow. */
235 #define portADD_UINT32_WILL_OVERFLOW( a, b )    ( ( a ) > ( portUINT32_MAX - ( b ) ) )
236 /*-----------------------------------------------------------*/
237 
238 /**
239  * @brief The maximum 24-bit number.
240  *
241  * It is needed because the systick is a 24-bit counter.
242  */
243 #define portMAX_24_BIT_NUMBER       ( 0xffffffUL )
244 
245 /**
246  * @brief A fiddle factor to estimate the number of SysTick counts that would
247  * have occurred while the SysTick counter is stopped during tickless idle
248  * calculations.
249  */
250 #define portMISSED_COUNTS_FACTOR    ( 94UL )
251 /*-----------------------------------------------------------*/
252 
253 /**
254  * @brief Constants required to set up the initial stack.
255  */
256 #define portINITIAL_XPSR    ( 0x01000000 )
257 
258 #if ( configRUN_FREERTOS_SECURE_ONLY == 1 )
259 
260 /**
261  * @brief Initial EXC_RETURN value.
262  *
263  *     FF         FF         FF         FD
264  * 1111 1111  1111 1111  1111 1111  1111 1101
265  *
266  * Bit[6] - 1 --> The exception was taken from the Secure state.
267  * Bit[5] - 1 --> Do not skip stacking of additional state context.
268  * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
269  * Bit[3] - 1 --> Return to the Thread mode.
270  * Bit[2] - 1 --> Restore registers from the process stack.
271  * Bit[1] - 0 --> Reserved, 0.
272  * Bit[0] - 1 --> The exception was taken to the Secure state.
273  */
274     #define portINITIAL_EXC_RETURN    ( 0xfffffffd )
275 #else
276 
277 /**
278  * @brief Initial EXC_RETURN value.
279  *
280  *     FF         FF         FF         BC
281  * 1111 1111  1111 1111  1111 1111  1011 1100
282  *
283  * Bit[6] - 0 --> The exception was taken from the Non-Secure state.
284  * Bit[5] - 1 --> Do not skip stacking of additional state context.
285  * Bit[4] - 1 --> The PE did not allocate space on the stack for FP context.
286  * Bit[3] - 1 --> Return to the Thread mode.
287  * Bit[2] - 1 --> Restore registers from the process stack.
288  * Bit[1] - 0 --> Reserved, 0.
289  * Bit[0] - 0 --> The exception was taken to the Non-Secure state.
290  */
291     #define portINITIAL_EXC_RETURN    ( 0xffffffbc )
292 #endif /* configRUN_FREERTOS_SECURE_ONLY */
293 
294 /**
295  * @brief CONTROL register privileged bit mask.
296  *
297  * Bit[0] in CONTROL register tells the privilege:
298  *  Bit[0] = 0 ==> The task is privileged.
299  *  Bit[0] = 1 ==> The task is not privileged.
300  */
301 #define portCONTROL_PRIVILEGED_MASK         ( 1UL << 0UL )
302 
303 /**
304  * @brief Initial CONTROL register values.
305  */
306 #define portINITIAL_CONTROL_UNPRIVILEGED    ( 0x3 )
307 #define portINITIAL_CONTROL_PRIVILEGED      ( 0x2 )
308 
309 /**
310  * @brief Let the user override the default SysTick clock rate.  If defined by the
311  * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the
312  * configuration register.
313  */
314 #ifndef configSYSTICK_CLOCK_HZ
315     #define configSYSTICK_CLOCK_HZ             ( configCPU_CLOCK_HZ )
316     /* Ensure the SysTick is clocked at the same frequency as the core. */
317     #define portNVIC_SYSTICK_CLK_BIT_CONFIG    ( portNVIC_SYSTICK_CLK_BIT )
318 #else
319     /* Select the option to clock SysTick not at the same frequency as the core. */
320     #define portNVIC_SYSTICK_CLK_BIT_CONFIG    ( 0 )
321 #endif
322 
323 /**
324  * @brief Let the user override the pre-loading of the initial LR with the
325  * address of prvTaskExitError() in case it messes up unwinding of the stack
326  * in the debugger.
327  */
328 #ifdef configTASK_RETURN_ADDRESS
329     #define portTASK_RETURN_ADDRESS    configTASK_RETURN_ADDRESS
330 #else
331     #define portTASK_RETURN_ADDRESS    prvTaskExitError
332 #endif
333 
334 /**
335  * @brief If portPRELOAD_REGISTERS then registers will be given an initial value
336  * when a task is created. This helps in debugging at the cost of code size.
337  */
338 #define portPRELOAD_REGISTERS    1
339 
340 /**
341  * @brief A task is created without a secure context, and must call
342  * portALLOCATE_SECURE_CONTEXT() to give itself a secure context before it makes
343  * any secure calls.
344  */
345 #define portNO_SECURE_CONTEXT    0
346 /*-----------------------------------------------------------*/
347 
348 /**
349  * @brief Used to catch tasks that attempt to return from their implementing
350  * function.
351  */
352 static void prvTaskExitError( void );
353 
354 #if ( configENABLE_MPU == 1 )
355 
356 /**
357  * @brief Extract MPU region's access permissions from the Region Base Address
358  * Register (RBAR) value.
359  *
360  * @param ulRBARValue RBAR value for the MPU region.
361  *
362  * @return uint32_t Access permissions.
363  */
364     static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) PRIVILEGED_FUNCTION;
365 #endif /* configENABLE_MPU */
366 
367 #if ( configENABLE_MPU == 1 )
368 
369 /**
370  * @brief Setup the Memory Protection Unit (MPU).
371  */
372     static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
373 #endif /* configENABLE_MPU */
374 
375 #if ( configENABLE_FPU == 1 )
376 
377 /**
378  * @brief Setup the Floating Point Unit (FPU).
379  */
380     static void prvSetupFPU( void ) PRIVILEGED_FUNCTION;
381 #endif /* configENABLE_FPU */
382 
383 /**
384  * @brief Setup the timer to generate the tick interrupts.
385  *
386  * The implementation in this file is weak to allow application writers to
387  * change the timer used to generate the tick interrupt.
388  */
389 void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION;
390 
391 /**
392  * @brief Checks whether the current execution context is interrupt.
393  *
394  * @return pdTRUE if the current execution context is interrupt, pdFALSE
395  * otherwise.
396  */
397 BaseType_t xPortIsInsideInterrupt( void );
398 
399 /**
400  * @brief Yield the processor.
401  */
402 void vPortYield( void ) PRIVILEGED_FUNCTION;
403 
404 /**
405  * @brief Enter critical section.
406  */
407 void vPortEnterCritical( void ) PRIVILEGED_FUNCTION;
408 
409 /**
410  * @brief Exit from critical section.
411  */
412 void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
413 
414 /**
415  * @brief SysTick handler.
416  */
417 void SysTick_Handler( void ) PRIVILEGED_FUNCTION;
418 
419 /**
420  * @brief C part of SVC handler.
421  */
422 portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
423 
424 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
425 
426 /**
427  * @brief Sets up the system call stack so that upon returning from
428  * SVC, the system call stack is used.
429  *
430  * @param pulTaskStack The current SP when the SVC was raised.
431  * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
432  * @param ucSystemCallNumber The system call number of the system call.
433  */
434     void vSystemCallEnter( uint32_t * pulTaskStack,
435                            uint32_t ulLR,
436                            uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
437 
438 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
439 
440 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
441 
442 /**
443  * @brief Raise SVC for exiting from a system call.
444  */
445     void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
446 
447 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
448 
449 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
450 
451     /**
452      * @brief Sets up the task stack so that upon returning from
453      * SVC, the task stack is used again.
454      *
455      * @param pulSystemCallStack The current SP when the SVC was raised.
456      * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
457      */
458     void vSystemCallExit( uint32_t * pulSystemCallStack,
459                           uint32_t ulLR ) PRIVILEGED_FUNCTION;
460 
461 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
462 
463 #if ( configENABLE_MPU == 1 )
464 
465     /**
466      * @brief Checks whether or not the calling task is privileged.
467      *
468      * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
469      */
470     BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
471 
472 #endif /* configENABLE_MPU == 1 */
473 /*-----------------------------------------------------------*/
474 
475 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
476 
477 /**
478  * @brief This variable is set to pdTRUE when the scheduler is started.
479  */
480     PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
481 
482 #endif
483 
484 /**
485  * @brief Each task maintains its own interrupt status in the critical nesting
486  * variable.
487  */
488 PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL;
489 
490 #if ( configENABLE_TRUSTZONE == 1 )
491 
492 /**
493  * @brief Saved as part of the task context to indicate which context the
494  * task is using on the secure side.
495  */
496     PRIVILEGED_DATA portDONT_DISCARD volatile SecureContextHandle_t xSecureContext = portNO_SECURE_CONTEXT;
497 #endif /* configENABLE_TRUSTZONE */
498 
499 /**
500  * @brief Used by the portASSERT_IF_INTERRUPT_PRIORITY_INVALID() macro to ensure
501  * FreeRTOS API functions are not called from interrupts that have been assigned
502  * a priority above configMAX_SYSCALL_INTERRUPT_PRIORITY.
503  */
504 #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
505 
506     static uint8_t ucMaxSysCallPriority = 0;
507     static uint32_t ulMaxPRIGROUPValue = 0;
508     static const volatile uint8_t * const pcInterruptPriorityRegisters = ( const volatile uint8_t * ) portNVIC_IP_REGISTERS_OFFSET_16;
509 
510 #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
511 
512 #if ( configUSE_TICKLESS_IDLE == 1 )
513 
514 /**
515  * @brief The number of SysTick increments that make up one tick period.
516  */
517     PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0;
518 
519 /**
520  * @brief The maximum number of tick periods that can be suppressed is
521  * limited by the 24 bit resolution of the SysTick timer.
522  */
523     PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0;
524 
525 /**
526  * @brief Compensate for the CPU cycles that pass while the SysTick is
527  * stopped (low power functionality only).
528  */
529     PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0;
530 #endif /* configUSE_TICKLESS_IDLE */
531 /*-----------------------------------------------------------*/
532 
533 #if ( configUSE_TICKLESS_IDLE == 1 )
vPortSuppressTicksAndSleep(TickType_t xExpectedIdleTime)534     __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime )
535     {
536         uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft;
537         TickType_t xModifiableIdleTime;
538 
539         /* Make sure the SysTick reload value does not overflow the counter. */
540         if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks )
541         {
542             xExpectedIdleTime = xMaximumPossibleSuppressedTicks;
543         }
544 
545         /* Enter a critical section but don't use the taskENTER_CRITICAL()
546          * method as that will mask interrupts that should exit sleep mode. */
547         __asm volatile ( "cpsid i" ::: "memory" );
548         __asm volatile ( "dsb" );
549         __asm volatile ( "isb" );
550 
551         /* If a context switch is pending or a task is waiting for the scheduler
552          * to be unsuspended then abandon the low power entry. */
553         if( eTaskConfirmSleepModeStatus() == eAbortSleep )
554         {
555             /* Re-enable interrupts - see comments above the cpsid instruction
556              * above. */
557             __asm volatile ( "cpsie i" ::: "memory" );
558         }
559         else
560         {
561             /* Stop the SysTick momentarily.  The time the SysTick is stopped for
562              * is accounted for as best it can be, but using the tickless mode will
563              * inevitably result in some tiny drift of the time maintained by the
564              * kernel with respect to calendar time. */
565             portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
566 
567             /* Use the SysTick current-value register to determine the number of
568              * SysTick decrements remaining until the next tick interrupt.  If the
569              * current-value register is zero, then there are actually
570              * ulTimerCountsForOneTick decrements remaining, not zero, because the
571              * SysTick requests the interrupt when decrementing from 1 to 0. */
572             ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
573 
574             if( ulSysTickDecrementsLeft == 0 )
575             {
576                 ulSysTickDecrementsLeft = ulTimerCountsForOneTick;
577             }
578 
579             /* Calculate the reload value required to wait xExpectedIdleTime
580              * tick periods.  -1 is used because this code normally executes part
581              * way through the first tick period.  But if the SysTick IRQ is now
582              * pending, then clear the IRQ, suppressing the first tick, and correct
583              * the reload value to reflect that the second tick period is already
584              * underway.  The expected idle time is always at least two ticks. */
585             ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) );
586 
587             if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 )
588             {
589                 portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT;
590                 ulReloadValue -= ulTimerCountsForOneTick;
591             }
592 
593             if( ulReloadValue > ulStoppedTimerCompensation )
594             {
595                 ulReloadValue -= ulStoppedTimerCompensation;
596             }
597 
598             /* Set the new reload value. */
599             portNVIC_SYSTICK_LOAD_REG = ulReloadValue;
600 
601             /* Clear the SysTick count flag and set the count value back to
602              * zero. */
603             portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
604 
605             /* Restart SysTick. */
606             portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
607 
608             /* Sleep until something happens.  configPRE_SLEEP_PROCESSING() can
609              * set its parameter to 0 to indicate that its implementation contains
610              * its own wait for interrupt or wait for event instruction, and so wfi
611              * should not be executed again.  However, the original expected idle
612              * time variable must remain unmodified, so a copy is taken. */
613             xModifiableIdleTime = xExpectedIdleTime;
614             configPRE_SLEEP_PROCESSING( xModifiableIdleTime );
615 
616             if( xModifiableIdleTime > 0 )
617             {
618                 __asm volatile ( "dsb" ::: "memory" );
619                 __asm volatile ( "wfi" );
620                 __asm volatile ( "isb" );
621             }
622 
623             configPOST_SLEEP_PROCESSING( xExpectedIdleTime );
624 
625             /* Re-enable interrupts to allow the interrupt that brought the MCU
626              * out of sleep mode to execute immediately.  See comments above
627              * the cpsid instruction above. */
628             __asm volatile ( "cpsie i" ::: "memory" );
629             __asm volatile ( "dsb" );
630             __asm volatile ( "isb" );
631 
632             /* Disable interrupts again because the clock is about to be stopped
633              * and interrupts that execute while the clock is stopped will increase
634              * any slippage between the time maintained by the RTOS and calendar
635              * time. */
636             __asm volatile ( "cpsid i" ::: "memory" );
637             __asm volatile ( "dsb" );
638             __asm volatile ( "isb" );
639 
640             /* Disable the SysTick clock without reading the
641              * portNVIC_SYSTICK_CTRL_REG register to ensure the
642              * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set.  Again,
643              * the time the SysTick is stopped for is accounted for as best it can
644              * be, but using the tickless mode will inevitably result in some tiny
645              * drift of the time maintained by the kernel with respect to calendar
646              * time*/
647             portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
648 
649             /* Determine whether the SysTick has already counted to zero. */
650             if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
651             {
652                 uint32_t ulCalculatedLoadValue;
653 
654                 /* The tick interrupt ended the sleep (or is now pending), and
655                  * a new tick period has started.  Reset portNVIC_SYSTICK_LOAD_REG
656                  * with whatever remains of the new tick period. */
657                 ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG );
658 
659                 /* Don't allow a tiny value, or values that have somehow
660                  * underflowed because the post sleep hook did something
661                  * that took too long or because the SysTick current-value register
662                  * is zero. */
663                 if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) )
664                 {
665                     ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL );
666                 }
667 
668                 portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue;
669 
670                 /* As the pending tick will be processed as soon as this
671                  * function exits, the tick value maintained by the tick is stepped
672                  * forward by one less than the time spent waiting. */
673                 ulCompleteTickPeriods = xExpectedIdleTime - 1UL;
674             }
675             else
676             {
677                 /* Something other than the tick interrupt ended the sleep. */
678 
679                 /* Use the SysTick current-value register to determine the
680                  * number of SysTick decrements remaining until the expected idle
681                  * time would have ended. */
682                 ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
683                 #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT )
684                 {
685                     /* If the SysTick is not using the core clock, the current-
686                      * value register might still be zero here.  In that case, the
687                      * SysTick didn't load from the reload register, and there are
688                      * ulReloadValue decrements remaining in the expected idle
689                      * time, not zero. */
690                     if( ulSysTickDecrementsLeft == 0 )
691                     {
692                         ulSysTickDecrementsLeft = ulReloadValue;
693                     }
694                 }
695                 #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
696 
697                 /* Work out how long the sleep lasted rounded to complete tick
698                  * periods (not the ulReload value which accounted for part
699                  * ticks). */
700                 ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft;
701 
702                 /* How many complete tick periods passed while the processor
703                  * was waiting? */
704                 ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick;
705 
706                 /* The reload value is set to whatever fraction of a single tick
707                  * period remains. */
708                 portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements;
709             }
710 
711             /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again,
712              * then set portNVIC_SYSTICK_LOAD_REG back to its standard value.  If
713              * the SysTick is not using the core clock, temporarily configure it to
714              * use the core clock.  This configuration forces the SysTick to load
715              * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next
716              * cycle of the other clock.  Then portNVIC_SYSTICK_LOAD_REG is ready
717              * to receive the standard value immediately. */
718             portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
719             portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
720             #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT )
721             {
722                 portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
723             }
724             #else
725             {
726                 /* The temporary usage of the core clock has served its purpose,
727                  * as described above.  Resume usage of the other clock. */
728                 portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT;
729 
730                 if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
731                 {
732                     /* The partial tick period already ended.  Be sure the SysTick
733                      * counts it only once. */
734                     portNVIC_SYSTICK_CURRENT_VALUE_REG = 0;
735                 }
736 
737                 portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
738                 portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
739             }
740             #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
741 
742             /* Step the tick to account for any tick periods that elapsed. */
743             vTaskStepTick( ulCompleteTickPeriods );
744 
745             /* Exit with interrupts enabled. */
746             __asm volatile ( "cpsie i" ::: "memory" );
747         }
748     }
749 #endif /* configUSE_TICKLESS_IDLE */
750 /*-----------------------------------------------------------*/
751 
vPortSetupTimerInterrupt(void)752 __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */
753 {
754     /* Calculate the constants required to configure the tick interrupt. */
755     #if ( configUSE_TICKLESS_IDLE == 1 )
756     {
757         ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ );
758         xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick;
759         ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ );
760     }
761     #endif /* configUSE_TICKLESS_IDLE */
762 
763     /* Stop and reset the SysTick. */
764     portNVIC_SYSTICK_CTRL_REG = 0UL;
765     portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
766 
767     /* Configure SysTick to interrupt at the requested rate. */
768     portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL;
769     portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
770 }
771 /*-----------------------------------------------------------*/
772 
prvTaskExitError(void)773 static void prvTaskExitError( void )
774 {
775     volatile uint32_t ulDummy = 0UL;
776 
777     /* A function that implements a task must not exit or attempt to return to
778      * its caller as there is nothing to return to. If a task wants to exit it
779      * should instead call vTaskDelete( NULL ). Artificially force an assert()
780      * to be triggered if configASSERT() is defined, then stop here so
781      * application writers can catch the error. */
782     configASSERT( ulCriticalNesting == ~0UL );
783     portDISABLE_INTERRUPTS();
784 
785     while( ulDummy == 0 )
786     {
787         /* This file calls prvTaskExitError() after the scheduler has been
788          * started to remove a compiler warning about the function being
789          * defined but never called.  ulDummy is used purely to quieten other
790          * warnings about code appearing after this function is called - making
791          * ulDummy volatile makes the compiler think the function could return
792          * and therefore not output an 'unreachable code' warning for code that
793          * appears after it. */
794     }
795 }
796 /*-----------------------------------------------------------*/
797 
798 #if ( configENABLE_MPU == 1 )
prvGetRegionAccessPermissions(uint32_t ulRBARValue)799     static uint32_t prvGetRegionAccessPermissions( uint32_t ulRBARValue ) /* PRIVILEGED_FUNCTION */
800     {
801         uint32_t ulAccessPermissions = 0;
802 
803         if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_ONLY )
804         {
805             ulAccessPermissions = tskMPU_READ_PERMISSION;
806         }
807 
808         if( ( ulRBARValue & portMPU_RBAR_ACCESS_PERMISSIONS_MASK ) == portMPU_REGION_READ_WRITE )
809         {
810             ulAccessPermissions = ( tskMPU_READ_PERMISSION | tskMPU_WRITE_PERMISSION );
811         }
812 
813         return ulAccessPermissions;
814     }
815 #endif /* configENABLE_MPU */
816 /*-----------------------------------------------------------*/
817 
818 #if ( configENABLE_MPU == 1 )
prvSetupMPU(void)819     static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
820     {
821         #if defined( __ARMCC_VERSION )
822             /* Declaration when these variable are defined in code instead of being
823              * exported from linker scripts. */
824             extern uint32_t * __privileged_functions_start__;
825             extern uint32_t * __privileged_functions_end__;
826             extern uint32_t * __syscalls_flash_start__;
827             extern uint32_t * __syscalls_flash_end__;
828             extern uint32_t * __unprivileged_flash_start__;
829             extern uint32_t * __unprivileged_flash_end__;
830             extern uint32_t * __privileged_sram_start__;
831             extern uint32_t * __privileged_sram_end__;
832         #else /* if defined( __ARMCC_VERSION ) */
833             /* Declaration when these variable are exported from linker scripts. */
834             extern uint32_t __privileged_functions_start__[];
835             extern uint32_t __privileged_functions_end__[];
836             extern uint32_t __syscalls_flash_start__[];
837             extern uint32_t __syscalls_flash_end__[];
838             extern uint32_t __unprivileged_flash_start__[];
839             extern uint32_t __unprivileged_flash_end__[];
840             extern uint32_t __privileged_sram_start__[];
841             extern uint32_t __privileged_sram_end__[];
842         #endif /* defined( __ARMCC_VERSION ) */
843 
844         /* The only permitted number of regions are 8 or 16. */
845         configASSERT( ( configTOTAL_MPU_REGIONS == 8 ) || ( configTOTAL_MPU_REGIONS == 16 ) );
846 
847         /* Ensure that the configTOTAL_MPU_REGIONS is configured correctly. */
848         configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE );
849 
850         /* Check that the MPU is present. */
851         if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE )
852         {
853             /* MAIR0 - Index 0. */
854             portMPU_MAIR0_REG |= ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
855             /* MAIR0 - Index 1. */
856             portMPU_MAIR0_REG |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
857 
858             /* Setup privileged flash as Read Only so that privileged tasks can
859              * read it but not modify. */
860             portMPU_RNR_REG = portPRIVILEGED_FLASH_REGION;
861             portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
862                                ( portMPU_REGION_NON_SHAREABLE ) |
863                                ( portMPU_REGION_PRIVILEGED_READ_ONLY );
864             portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_functions_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
865                                ( portMPU_RLAR_ATTR_INDEX0 ) |
866                                ( portMPU_RLAR_REGION_ENABLE );
867 
868             /* Setup unprivileged flash as Read Only by both privileged and
869              * unprivileged tasks. All tasks can read it but no-one can modify. */
870             portMPU_RNR_REG = portUNPRIVILEGED_FLASH_REGION;
871             portMPU_RBAR_REG = ( ( ( uint32_t ) __unprivileged_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
872                                ( portMPU_REGION_NON_SHAREABLE ) |
873                                ( portMPU_REGION_READ_ONLY );
874             portMPU_RLAR_REG = ( ( ( uint32_t ) __unprivileged_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
875                                ( portMPU_RLAR_ATTR_INDEX0 ) |
876                                ( portMPU_RLAR_REGION_ENABLE );
877 
878             /* Setup unprivileged syscalls flash as Read Only by both privileged
879              * and unprivileged tasks. All tasks can read it but no-one can modify. */
880             portMPU_RNR_REG = portUNPRIVILEGED_SYSCALLS_REGION;
881             portMPU_RBAR_REG = ( ( ( uint32_t ) __syscalls_flash_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
882                                ( portMPU_REGION_NON_SHAREABLE ) |
883                                ( portMPU_REGION_READ_ONLY );
884             portMPU_RLAR_REG = ( ( ( uint32_t ) __syscalls_flash_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
885                                ( portMPU_RLAR_ATTR_INDEX0 ) |
886                                ( portMPU_RLAR_REGION_ENABLE );
887 
888             /* Setup RAM containing kernel data for privileged access only. */
889             portMPU_RNR_REG = portPRIVILEGED_RAM_REGION;
890             portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_sram_start__ ) & portMPU_RBAR_ADDRESS_MASK ) |
891                                ( portMPU_REGION_NON_SHAREABLE ) |
892                                ( portMPU_REGION_PRIVILEGED_READ_WRITE ) |
893                                ( portMPU_REGION_EXECUTE_NEVER );
894             portMPU_RLAR_REG = ( ( ( uint32_t ) __privileged_sram_end__ ) & portMPU_RLAR_ADDRESS_MASK ) |
895                                ( portMPU_RLAR_ATTR_INDEX0 ) |
896                                ( portMPU_RLAR_REGION_ENABLE );
897 
898             /* Enable mem fault. */
899             portSCB_SYS_HANDLER_CTRL_STATE_REG |= portSCB_MEM_FAULT_ENABLE_BIT;
900 
901             /* Enable MPU with privileged background access i.e. unmapped
902              * regions have privileged access. */
903             portMPU_CTRL_REG |= ( portMPU_PRIV_BACKGROUND_ENABLE_BIT | portMPU_ENABLE_BIT );
904         }
905     }
906 #endif /* configENABLE_MPU */
907 /*-----------------------------------------------------------*/
908 
909 #if ( configENABLE_FPU == 1 )
prvSetupFPU(void)910     static void prvSetupFPU( void ) /* PRIVILEGED_FUNCTION */
911     {
912         #if ( configENABLE_TRUSTZONE == 1 )
913         {
914             /* Enable non-secure access to the FPU. */
915             SecureInit_EnableNSFPUAccess();
916         }
917         #endif /* configENABLE_TRUSTZONE */
918 
919         /* CP10 = 11 ==> Full access to FPU i.e. both privileged and
920          * unprivileged code should be able to access FPU. CP11 should be
921          * programmed to the same value as CP10. */
922         *( portCPACR ) |= ( ( portCPACR_CP10_VALUE << portCPACR_CP10_POS ) |
923                             ( portCPACR_CP11_VALUE << portCPACR_CP11_POS )
924                             );
925 
926         /* ASPEN = 1 ==> Hardware should automatically preserve floating point
927          * context on exception entry and restore on exception return.
928          * LSPEN = 1 ==> Enable lazy context save of FP state. */
929         *( portFPCCR ) |= ( portFPCCR_ASPEN_MASK | portFPCCR_LSPEN_MASK );
930     }
931 #endif /* configENABLE_FPU */
932 /*-----------------------------------------------------------*/
933 
vPortYield(void)934 void vPortYield( void ) /* PRIVILEGED_FUNCTION */
935 {
936     /* Set a PendSV to request a context switch. */
937     portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
938 
939     /* Barriers are normally not required but do ensure the code is
940      * completely within the specified behaviour for the architecture. */
941     __asm volatile ( "dsb" ::: "memory" );
942     __asm volatile ( "isb" );
943 }
944 /*-----------------------------------------------------------*/
945 
vPortEnterCritical(void)946 void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */
947 {
948     portDISABLE_INTERRUPTS();
949     ulCriticalNesting++;
950 
951     /* Barriers are normally not required but do ensure the code is
952      * completely within the specified behaviour for the architecture. */
953     __asm volatile ( "dsb" ::: "memory" );
954     __asm volatile ( "isb" );
955 }
956 /*-----------------------------------------------------------*/
957 
vPortExitCritical(void)958 void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */
959 {
960     configASSERT( ulCriticalNesting );
961     ulCriticalNesting--;
962 
963     if( ulCriticalNesting == 0 )
964     {
965         portENABLE_INTERRUPTS();
966     }
967 }
968 /*-----------------------------------------------------------*/
969 
SysTick_Handler(void)970 void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */
971 {
972     uint32_t ulPreviousMask;
973 
974     ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR();
975     {
976         /* Increment the RTOS tick. */
977         if( xTaskIncrementTick() != pdFALSE )
978         {
979             /* Pend a context switch. */
980             portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
981         }
982     }
983     portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask );
984 }
985 /*-----------------------------------------------------------*/
986 
vPortSVCHandler_C(uint32_t * pulCallerStackAddress)987 void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
988 {
989     #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
990         #if defined( __ARMCC_VERSION )
991             /* Declaration when these variable are defined in code instead of being
992              * exported from linker scripts. */
993             extern uint32_t * __syscalls_flash_start__;
994             extern uint32_t * __syscalls_flash_end__;
995         #else
996             /* Declaration when these variable are exported from linker scripts. */
997             extern uint32_t __syscalls_flash_start__[];
998             extern uint32_t __syscalls_flash_end__[];
999         #endif /* defined( __ARMCC_VERSION ) */
1000     #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
1001 
1002     uint32_t ulPC;
1003 
1004     #if ( configENABLE_TRUSTZONE == 1 )
1005         uint32_t ulR0, ulR1;
1006         extern TaskHandle_t pxCurrentTCB;
1007         #if ( configENABLE_MPU == 1 )
1008             uint32_t ulControl, ulIsTaskPrivileged;
1009         #endif /* configENABLE_MPU */
1010     #endif /* configENABLE_TRUSTZONE */
1011     uint8_t ucSVCNumber;
1012 
1013     /* Register are stored on the stack in the following order - R0, R1, R2, R3,
1014      * R12, LR, PC, xPSR. */
1015     ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
1016     ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
1017 
1018     switch( ucSVCNumber )
1019     {
1020         #if ( configENABLE_TRUSTZONE == 1 )
1021             case portSVC_ALLOCATE_SECURE_CONTEXT:
1022 
1023                 /* R0 contains the stack size passed as parameter to the
1024                  * vPortAllocateSecureContext function. */
1025                 ulR0 = pulCallerStackAddress[ 0 ];
1026 
1027                 #if ( configENABLE_MPU == 1 )
1028                 {
1029                     /* Read the CONTROL register value. */
1030                     __asm volatile ( "mrs %0, control"  : "=r" ( ulControl ) );
1031 
1032                     /* The task that raised the SVC is privileged if Bit[0]
1033                      * in the CONTROL register is 0. */
1034                     ulIsTaskPrivileged = ( ( ulControl & portCONTROL_PRIVILEGED_MASK ) == 0 );
1035 
1036                     /* Allocate and load a context for the secure task. */
1037                     xSecureContext = SecureContext_AllocateContext( ulR0, ulIsTaskPrivileged, pxCurrentTCB );
1038                 }
1039                 #else /* if ( configENABLE_MPU == 1 ) */
1040                 {
1041                     /* Allocate and load a context for the secure task. */
1042                     xSecureContext = SecureContext_AllocateContext( ulR0, pxCurrentTCB );
1043                 }
1044                 #endif /* configENABLE_MPU */
1045 
1046                 configASSERT( xSecureContext != securecontextINVALID_CONTEXT_ID );
1047                 SecureContext_LoadContext( xSecureContext, pxCurrentTCB );
1048                 break;
1049 
1050             case portSVC_FREE_SECURE_CONTEXT:
1051 
1052                 /* R0 contains TCB being freed and R1 contains the secure
1053                  * context handle to be freed. */
1054                 ulR0 = pulCallerStackAddress[ 0 ];
1055                 ulR1 = pulCallerStackAddress[ 1 ];
1056 
1057                 /* Free the secure context. */
1058                 SecureContext_FreeContext( ( SecureContextHandle_t ) ulR1, ( void * ) ulR0 );
1059                 break;
1060         #endif /* configENABLE_TRUSTZONE */
1061 
1062         case portSVC_START_SCHEDULER:
1063             #if ( configENABLE_TRUSTZONE == 1 )
1064             {
1065                 /* De-prioritize the non-secure exceptions so that the
1066                  * non-secure pendSV runs at the lowest priority. */
1067                 SecureInit_DePrioritizeNSExceptions();
1068 
1069                 /* Initialize the secure context management system. */
1070                 SecureContext_Init();
1071             }
1072             #endif /* configENABLE_TRUSTZONE */
1073 
1074             #if ( configENABLE_FPU == 1 )
1075             {
1076                 /* Setup the Floating Point Unit (FPU). */
1077                 prvSetupFPU();
1078             }
1079             #endif /* configENABLE_FPU */
1080 
1081             /* Setup the context of the first task so that the first task starts
1082              * executing. */
1083             vRestoreContextOfFirstTask();
1084             break;
1085 
1086         #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
1087             case portSVC_RAISE_PRIVILEGE:
1088 
1089                 /* Only raise the privilege, if the svc was raised from any of
1090                  * the system calls. */
1091                 if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
1092                     ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
1093                 {
1094                     vRaisePrivilege();
1095                 }
1096                 break;
1097         #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
1098 
1099         default:
1100             /* Incorrect SVC call. */
1101             configASSERT( pdFALSE );
1102     }
1103 }
1104 /*-----------------------------------------------------------*/
1105 
1106 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
1107 
vSystemCallEnter(uint32_t * pulTaskStack,uint32_t ulLR,uint8_t ucSystemCallNumber)1108     void vSystemCallEnter( uint32_t * pulTaskStack,
1109                            uint32_t ulLR,
1110                            uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
1111     {
1112         extern TaskHandle_t pxCurrentTCB;
1113         extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
1114         xMPU_SETTINGS * pxMpuSettings;
1115         uint32_t * pulSystemCallStack;
1116         uint32_t ulStackFrameSize, ulSystemCallLocation, i;
1117 
1118         #if defined( __ARMCC_VERSION )
1119             /* Declaration when these variable are defined in code instead of being
1120              * exported from linker scripts. */
1121             extern uint32_t * __syscalls_flash_start__;
1122             extern uint32_t * __syscalls_flash_end__;
1123         #else
1124             /* Declaration when these variable are exported from linker scripts. */
1125             extern uint32_t __syscalls_flash_start__[];
1126             extern uint32_t __syscalls_flash_end__[];
1127         #endif /* #if defined( __ARMCC_VERSION ) */
1128 
1129         ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
1130         pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
1131 
1132         /* Checks:
1133          * 1. SVC is raised from the system call section (i.e. application is
1134          *    not raising SVC directly).
1135          * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
1136          *    it is non-NULL only during the execution of a system call (i.e.
1137          *    between system call enter and exit).
1138          * 3. System call is not for a kernel API disabled by the configuration
1139          *    in FreeRTOSConfig.h.
1140          * 4. We do not need to check that ucSystemCallNumber is within range
1141          *    because the assembly SVC handler checks that before calling
1142          *    this function.
1143          */
1144         if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
1145             ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
1146             ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
1147             ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
1148         {
1149             pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
1150 
1151             #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
1152             {
1153                 if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
1154                 {
1155                     /* Extended frame i.e. FPU in use. */
1156                     ulStackFrameSize = 26;
1157                     __asm volatile
1158                     (
1159                         " vpush {s0}         \n" /* Trigger lazy stacking. */
1160                         " vpop  {s0}         \n" /* Nullify the affect of the above instruction. */
1161                         ::: "memory"
1162                     );
1163                 }
1164                 else
1165                 {
1166                     /* Standard frame i.e. FPU not in use. */
1167                     ulStackFrameSize = 8;
1168                 }
1169             }
1170             #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
1171             {
1172                 ulStackFrameSize = 8;
1173             }
1174             #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
1175 
1176             /* Make space on the system call stack for the stack frame. */
1177             pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
1178 
1179             /* Copy the stack frame. */
1180             for( i = 0; i < ulStackFrameSize; i++ )
1181             {
1182                 pulSystemCallStack[ i ] = pulTaskStack[ i ];
1183             }
1184 
1185             /* Store the value of the Link Register before the SVC was raised.
1186              * It contains the address of the caller of the System Call entry
1187              * point (i.e. the caller of the MPU_<API>). We need to restore it
1188              * when we exit from the system call. */
1189             pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
1190 
1191             /* Store the value of the PSPLIM register before the SVC was raised.
1192              * We need to restore it when we exit from the system call. */
1193             __asm volatile ( "mrs %0, psplim" : "=r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
1194 
1195             /* Use the pulSystemCallStack in thread mode. */
1196             __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
1197             __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) );
1198 
1199             /* Start executing the system call upon returning from this handler. */
1200             pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
1201 
1202             /* Raise a request to exit from the system call upon finishing the
1203              * system call. */
1204             pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
1205 
1206             /* Remember the location where we should copy the stack frame when we exit from
1207              * the system call. */
1208             pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
1209 
1210             /* Record if the hardware used padding to force the stack pointer
1211              * to be double word aligned. */
1212             if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
1213             {
1214                 pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
1215             }
1216             else
1217             {
1218                 pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
1219             }
1220 
1221             /* We ensure in pxPortInitialiseStack that the system call stack is
1222              * double word aligned and therefore, there is no need of padding.
1223              * Clear the bit[9] of stacked xPSR. */
1224             pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
1225 
1226             /* Raise the privilege for the duration of the system call. */
1227             __asm volatile
1228             (
1229                 " mrs r0, control     \n" /* Obtain current control value. */
1230                 " movs r1, #1         \n" /* r1 = 1. */
1231                 " bics r0, r1         \n" /* Clear nPRIV bit. */
1232                 " msr control, r0     \n" /* Write back new control value. */
1233                 ::: "r0", "r1", "memory"
1234             );
1235         }
1236     }
1237 
1238 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
1239 /*-----------------------------------------------------------*/
1240 
1241 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
1242 
vRequestSystemCallExit(void)1243     void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
1244     {
1245         __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
1246     }
1247 
1248 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
1249 /*-----------------------------------------------------------*/
1250 
1251 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
1252 
vSystemCallExit(uint32_t * pulSystemCallStack,uint32_t ulLR)1253     void vSystemCallExit( uint32_t * pulSystemCallStack,
1254                           uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
1255     {
1256         extern TaskHandle_t pxCurrentTCB;
1257         xMPU_SETTINGS * pxMpuSettings;
1258         uint32_t * pulTaskStack;
1259         uint32_t ulStackFrameSize, ulSystemCallLocation, i;
1260 
1261         #if defined( __ARMCC_VERSION )
1262             /* Declaration when these variable are defined in code instead of being
1263              * exported from linker scripts. */
1264             extern uint32_t * __privileged_functions_start__;
1265             extern uint32_t * __privileged_functions_end__;
1266         #else
1267             /* Declaration when these variable are exported from linker scripts. */
1268             extern uint32_t __privileged_functions_start__[];
1269             extern uint32_t __privileged_functions_end__[];
1270         #endif /* #if defined( __ARMCC_VERSION ) */
1271 
1272         ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
1273         pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
1274 
1275         /* Checks:
1276          * 1. SVC is raised from the privileged code (i.e. application is not
1277          *    raising SVC directly). This SVC is only raised from
1278          *    vRequestSystemCallExit which is in the privileged code section.
1279          * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
1280          *    this means that we previously entered a system call and the
1281          *    application is not attempting to exit without entering a system
1282          *    call.
1283          */
1284         if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
1285             ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
1286             ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
1287         {
1288             pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
1289 
1290             #if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) )
1291             {
1292                 if( ( ulLR & portEXC_RETURN_STACK_FRAME_TYPE_MASK ) == 0UL )
1293                 {
1294                     /* Extended frame i.e. FPU in use. */
1295                     ulStackFrameSize = 26;
1296                     __asm volatile
1297                     (
1298                         " vpush {s0}         \n" /* Trigger lazy stacking. */
1299                         " vpop  {s0}         \n" /* Nullify the affect of the above instruction. */
1300                         ::: "memory"
1301                     );
1302                 }
1303                 else
1304                 {
1305                     /* Standard frame i.e. FPU not in use. */
1306                     ulStackFrameSize = 8;
1307                 }
1308             }
1309             #else /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
1310             {
1311                 ulStackFrameSize = 8;
1312             }
1313             #endif /* if ( ( configENABLE_FPU == 1 ) || ( configENABLE_MVE == 1 ) ) */
1314 
1315             /* Make space on the task stack for the stack frame. */
1316             pulTaskStack = pulTaskStack - ulStackFrameSize;
1317 
1318             /* Copy the stack frame. */
1319             for( i = 0; i < ulStackFrameSize; i++ )
1320             {
1321                 pulTaskStack[ i ] = pulSystemCallStack[ i ];
1322             }
1323 
1324             /* Use the pulTaskStack in thread mode. */
1325             __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
1326 
1327             /* Return to the caller of the System Call entry point (i.e. the
1328              * caller of the MPU_<API>). */
1329             pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
1330             /* Ensure that LR has a valid value.*/
1331             pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
1332 
1333             /* Restore the PSPLIM register to what it was at the time of
1334              * system call entry. */
1335             __asm volatile ( "msr psplim, %0" : : "r" ( pxMpuSettings->xSystemCallStackInfo.ulStackLimitRegisterAtSystemCallEntry ) );
1336 
1337             /* If the hardware used padding to force the stack pointer
1338              * to be double word aligned, set the stacked xPSR bit[9],
1339              * otherwise clear it. */
1340             if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
1341             {
1342                 pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
1343             }
1344             else
1345             {
1346                 pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
1347             }
1348 
1349             /* This is not NULL only for the duration of the system call. */
1350             pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
1351 
1352             /* Drop the privilege before returning to the thread mode. */
1353             __asm volatile
1354             (
1355                 " mrs r0, control     \n" /* Obtain current control value. */
1356                 " movs r1, #1         \n" /* r1 = 1. */
1357                 " orrs r0, r1         \n" /* Set nPRIV bit. */
1358                 " msr control, r0     \n" /* Write back new control value. */
1359                 ::: "r0", "r1", "memory"
1360             );
1361         }
1362     }
1363 
1364 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
1365 /*-----------------------------------------------------------*/
1366 
1367 #if ( configENABLE_MPU == 1 )
1368 
xPortIsTaskPrivileged(void)1369     BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
1370     {
1371         BaseType_t xTaskIsPrivileged = pdFALSE;
1372         const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
1373 
1374         if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
1375         {
1376             xTaskIsPrivileged = pdTRUE;
1377         }
1378 
1379         return xTaskIsPrivileged;
1380     }
1381 
1382 #endif /* configENABLE_MPU == 1 */
1383 /*-----------------------------------------------------------*/
1384 
1385 #if ( configENABLE_MPU == 1 )
1386 
pxPortInitialiseStack(StackType_t * pxTopOfStack,StackType_t * pxEndOfStack,TaskFunction_t pxCode,void * pvParameters,BaseType_t xRunPrivileged,xMPU_SETTINGS * xMPUSettings)1387     StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
1388                                          StackType_t * pxEndOfStack,
1389                                          TaskFunction_t pxCode,
1390                                          void * pvParameters,
1391                                          BaseType_t xRunPrivileged,
1392                                          xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
1393     {
1394         uint32_t ulIndex = 0;
1395 
1396         xMPUSettings->ulContext[ ulIndex ] = 0x04040404; /* r4. */
1397         ulIndex++;
1398         xMPUSettings->ulContext[ ulIndex ] = 0x05050505; /* r5. */
1399         ulIndex++;
1400         xMPUSettings->ulContext[ ulIndex ] = 0x06060606; /* r6. */
1401         ulIndex++;
1402         xMPUSettings->ulContext[ ulIndex ] = 0x07070707; /* r7. */
1403         ulIndex++;
1404         xMPUSettings->ulContext[ ulIndex ] = 0x08080808; /* r8. */
1405         ulIndex++;
1406         xMPUSettings->ulContext[ ulIndex ] = 0x09090909; /* r9. */
1407         ulIndex++;
1408         xMPUSettings->ulContext[ ulIndex ] = 0x10101010; /* r10. */
1409         ulIndex++;
1410         xMPUSettings->ulContext[ ulIndex ] = 0x11111111; /* r11. */
1411         ulIndex++;
1412 
1413         xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pvParameters; /* r0. */
1414         ulIndex++;
1415         xMPUSettings->ulContext[ ulIndex ] = 0x01010101; /* r1. */
1416         ulIndex++;
1417         xMPUSettings->ulContext[ ulIndex ] = 0x02020202; /* r2. */
1418         ulIndex++;
1419         xMPUSettings->ulContext[ ulIndex ] = 0x03030303; /* r3. */
1420         ulIndex++;
1421         xMPUSettings->ulContext[ ulIndex ] = 0x12121212; /* r12. */
1422         ulIndex++;
1423         xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
1424         ulIndex++;
1425         xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxCode; /* PC. */
1426         ulIndex++;
1427         xMPUSettings->ulContext[ ulIndex ] = portINITIAL_XPSR; /* xPSR. */
1428         ulIndex++;
1429 
1430         #if ( configENABLE_TRUSTZONE == 1 )
1431         {
1432             xMPUSettings->ulContext[ ulIndex ] = portNO_SECURE_CONTEXT; /* xSecureContext. */
1433             ulIndex++;
1434         }
1435         #endif /* configENABLE_TRUSTZONE */
1436         xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
1437         ulIndex++;
1438         xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) pxEndOfStack; /* PSPLIM. */
1439         ulIndex++;
1440         if( xRunPrivileged == pdTRUE )
1441         {
1442             xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
1443             xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
1444             ulIndex++;
1445         }
1446         else
1447         {
1448             xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
1449             xMPUSettings->ulContext[ ulIndex ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
1450             ulIndex++;
1451         }
1452         xMPUSettings->ulContext[ ulIndex ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
1453         ulIndex++;
1454 
1455         #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
1456         {
1457             /* Ensure that the system call stack is double word aligned. */
1458             xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
1459             xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
1460                                                                                      ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
1461 
1462             xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ 0 ] );
1463             xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit = ( uint32_t * ) ( ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStackLimit ) +
1464                                                                                             ( uint32_t ) ( portBYTE_ALIGNMENT - 1 ) ) &
1465                                                                                           ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
1466 
1467             /* This is not NULL only for the duration of a system call. */
1468             xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
1469         }
1470         #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
1471 
1472         return &( xMPUSettings->ulContext[ ulIndex ] );
1473     }
1474 
1475 #else /* configENABLE_MPU */
1476 
pxPortInitialiseStack(StackType_t * pxTopOfStack,StackType_t * pxEndOfStack,TaskFunction_t pxCode,void * pvParameters)1477     StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
1478                                          StackType_t * pxEndOfStack,
1479                                          TaskFunction_t pxCode,
1480                                          void * pvParameters ) /* PRIVILEGED_FUNCTION */
1481     {
1482         /* Simulate the stack frame as it would be created by a context switch
1483          * interrupt. */
1484         #if ( portPRELOAD_REGISTERS == 0 )
1485         {
1486             pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
1487             *pxTopOfStack = portINITIAL_XPSR;                        /* xPSR. */
1488             pxTopOfStack--;
1489             *pxTopOfStack = ( StackType_t ) pxCode;                  /* PC. */
1490             pxTopOfStack--;
1491             *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
1492             pxTopOfStack -= 5;                                       /* R12, R3, R2 and R1. */
1493             *pxTopOfStack = ( StackType_t ) pvParameters;            /* R0. */
1494             pxTopOfStack -= 9;                                       /* R11..R4, EXC_RETURN. */
1495             *pxTopOfStack = portINITIAL_EXC_RETURN;
1496             pxTopOfStack--;
1497             *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
1498 
1499             #if ( configENABLE_TRUSTZONE == 1 )
1500             {
1501                 pxTopOfStack--;
1502                 *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
1503             }
1504             #endif /* configENABLE_TRUSTZONE */
1505         }
1506         #else /* portPRELOAD_REGISTERS */
1507         {
1508             pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
1509             *pxTopOfStack = portINITIAL_XPSR;                        /* xPSR. */
1510             pxTopOfStack--;
1511             *pxTopOfStack = ( StackType_t ) pxCode;                  /* PC. */
1512             pxTopOfStack--;
1513             *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
1514             pxTopOfStack--;
1515             *pxTopOfStack = ( StackType_t ) 0x12121212UL;            /* R12. */
1516             pxTopOfStack--;
1517             *pxTopOfStack = ( StackType_t ) 0x03030303UL;            /* R3. */
1518             pxTopOfStack--;
1519             *pxTopOfStack = ( StackType_t ) 0x02020202UL;            /* R2. */
1520             pxTopOfStack--;
1521             *pxTopOfStack = ( StackType_t ) 0x01010101UL;            /* R1. */
1522             pxTopOfStack--;
1523             *pxTopOfStack = ( StackType_t ) pvParameters;            /* R0. */
1524             pxTopOfStack--;
1525             *pxTopOfStack = ( StackType_t ) 0x11111111UL;            /* R11. */
1526             pxTopOfStack--;
1527             *pxTopOfStack = ( StackType_t ) 0x10101010UL;            /* R10. */
1528             pxTopOfStack--;
1529             *pxTopOfStack = ( StackType_t ) 0x09090909UL;            /* R09. */
1530             pxTopOfStack--;
1531             *pxTopOfStack = ( StackType_t ) 0x08080808UL;            /* R08. */
1532             pxTopOfStack--;
1533             *pxTopOfStack = ( StackType_t ) 0x07070707UL;            /* R07. */
1534             pxTopOfStack--;
1535             *pxTopOfStack = ( StackType_t ) 0x06060606UL;            /* R06. */
1536             pxTopOfStack--;
1537             *pxTopOfStack = ( StackType_t ) 0x05050505UL;            /* R05. */
1538             pxTopOfStack--;
1539             *pxTopOfStack = ( StackType_t ) 0x04040404UL;            /* R04. */
1540             pxTopOfStack--;
1541             *pxTopOfStack = portINITIAL_EXC_RETURN;                  /* EXC_RETURN. */
1542             pxTopOfStack--;
1543             *pxTopOfStack = ( StackType_t ) pxEndOfStack; /* Slot used to hold this task's PSPLIM value. */
1544 
1545             #if ( configENABLE_TRUSTZONE == 1 )
1546             {
1547                 pxTopOfStack--;
1548                 *pxTopOfStack = portNO_SECURE_CONTEXT; /* Slot used to hold this task's xSecureContext value. */
1549             }
1550             #endif /* configENABLE_TRUSTZONE */
1551         }
1552         #endif /* portPRELOAD_REGISTERS */
1553 
1554         return pxTopOfStack;
1555     }
1556 
1557 #endif /* configENABLE_MPU */
1558 /*-----------------------------------------------------------*/
1559 
xPortStartScheduler(void)1560 BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
1561 {
1562     #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
1563     {
1564         volatile uint32_t ulOriginalPriority;
1565         volatile uint32_t ulImplementedPrioBits = 0;
1566         volatile uint8_t ucMaxPriorityValue;
1567 
1568         /* Determine the maximum priority from which ISR safe FreeRTOS API
1569          * functions can be called.  ISR safe functions are those that end in
1570          * "FromISR".  FreeRTOS maintains separate thread and ISR API functions to
1571          * ensure interrupt entry is as fast and simple as possible.
1572          *
1573          * Save the interrupt priority value that is about to be clobbered. */
1574         ulOriginalPriority = portNVIC_SHPR2_REG;
1575 
1576         /* Determine the number of priority bits available.  First write to all
1577          * possible bits. */
1578         portNVIC_SHPR2_REG = 0xFF000000;
1579 
1580         /* Read the value back to see how many bits stuck. */
1581         ucMaxPriorityValue = ( uint8_t ) ( ( portNVIC_SHPR2_REG & 0xFF000000 ) >> 24 );
1582 
1583         /* Use the same mask on the maximum system call priority. */
1584         ucMaxSysCallPriority = configMAX_SYSCALL_INTERRUPT_PRIORITY & ucMaxPriorityValue;
1585 
1586         /* Check that the maximum system call priority is nonzero after
1587          * accounting for the number of priority bits supported by the
1588          * hardware. A priority of 0 is invalid because setting the BASEPRI
1589          * register to 0 unmasks all interrupts, and interrupts with priority 0
1590          * cannot be masked using BASEPRI.
1591          * See https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
1592         configASSERT( ucMaxSysCallPriority );
1593 
1594         /* Check that the bits not implemented in hardware are zero in
1595          * configMAX_SYSCALL_INTERRUPT_PRIORITY. */
1596         configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & ( ~ucMaxPriorityValue ) ) == 0U );
1597 
1598         /* Calculate the maximum acceptable priority group value for the number
1599          * of bits read back. */
1600 
1601         while( ( ucMaxPriorityValue & portTOP_BIT_OF_BYTE ) == portTOP_BIT_OF_BYTE )
1602         {
1603             ulImplementedPrioBits++;
1604             ucMaxPriorityValue <<= ( uint8_t ) 0x01;
1605         }
1606 
1607         if( ulImplementedPrioBits == 8 )
1608         {
1609             /* When the hardware implements 8 priority bits, there is no way for
1610              * the software to configure PRIGROUP to not have sub-priorities. As
1611              * a result, the least significant bit is always used for sub-priority
1612              * and there are 128 preemption priorities and 2 sub-priorities.
1613              *
1614              * This may cause some confusion in some cases - for example, if
1615              * configMAX_SYSCALL_INTERRUPT_PRIORITY is set to 5, both 5 and 4
1616              * priority interrupts will be masked in Critical Sections as those
1617              * are at the same preemption priority. This may appear confusing as
1618              * 4 is higher (numerically lower) priority than
1619              * configMAX_SYSCALL_INTERRUPT_PRIORITY and therefore, should not
1620              * have been masked. Instead, if we set configMAX_SYSCALL_INTERRUPT_PRIORITY
1621              * to 4, this confusion does not happen and the behaviour remains the same.
1622              *
1623              * The following assert ensures that the sub-priority bit in the
1624              * configMAX_SYSCALL_INTERRUPT_PRIORITY is clear to avoid the above mentioned
1625              * confusion. */
1626             configASSERT( ( configMAX_SYSCALL_INTERRUPT_PRIORITY & 0x1U ) == 0U );
1627             ulMaxPRIGROUPValue = 0;
1628         }
1629         else
1630         {
1631             ulMaxPRIGROUPValue = portMAX_PRIGROUP_BITS - ulImplementedPrioBits;
1632         }
1633 
1634         /* Shift the priority group value back to its position within the AIRCR
1635          * register. */
1636         ulMaxPRIGROUPValue <<= portPRIGROUP_SHIFT;
1637         ulMaxPRIGROUPValue &= portPRIORITY_GROUP_MASK;
1638 
1639         /* Restore the clobbered interrupt priority register to its original
1640          * value. */
1641         portNVIC_SHPR2_REG = ulOriginalPriority;
1642     }
1643     #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
1644 
1645     /* Make PendSV, CallSV and SysTick the same priority as the kernel. */
1646     portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
1647     portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
1648 
1649     #if ( configENABLE_MPU == 1 )
1650     {
1651         /* Setup the Memory Protection Unit (MPU). */
1652         prvSetupMPU();
1653     }
1654     #endif /* configENABLE_MPU */
1655 
1656     /* Start the timer that generates the tick ISR. Interrupts are disabled
1657      * here already. */
1658     vPortSetupTimerInterrupt();
1659 
1660     /* Initialize the critical nesting count ready for the first task. */
1661     ulCriticalNesting = 0;
1662 
1663     #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
1664     {
1665         xSchedulerRunning = pdTRUE;
1666     }
1667     #endif
1668 
1669     /* Start the first task. */
1670     vStartFirstTask();
1671 
1672     /* Should never get here as the tasks will now be executing. Call the task
1673      * exit error function to prevent compiler warnings about a static function
1674      * not being called in the case that the application writer overrides this
1675      * functionality by defining configTASK_RETURN_ADDRESS. Call
1676      * vTaskSwitchContext() so link time optimization does not remove the
1677      * symbol. */
1678     vTaskSwitchContext();
1679     prvTaskExitError();
1680 
1681     /* Should not get here. */
1682     return 0;
1683 }
1684 /*-----------------------------------------------------------*/
1685 
vPortEndScheduler(void)1686 void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
1687 {
1688     /* Not implemented in ports where there is nothing to return to.
1689      * Artificially force an assert. */
1690     configASSERT( ulCriticalNesting == 1000UL );
1691 }
1692 /*-----------------------------------------------------------*/
1693 
1694 #if ( configENABLE_MPU == 1 )
vPortStoreTaskMPUSettings(xMPU_SETTINGS * xMPUSettings,const struct xMEMORY_REGION * const xRegions,StackType_t * pxBottomOfStack,uint32_t ulStackDepth)1695     void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
1696                                     const struct xMEMORY_REGION * const xRegions,
1697                                     StackType_t * pxBottomOfStack,
1698                                     uint32_t ulStackDepth )
1699     {
1700         uint32_t ulRegionStartAddress, ulRegionEndAddress, ulRegionNumber;
1701         int32_t lIndex = 0;
1702 
1703         #if defined( __ARMCC_VERSION )
1704             /* Declaration when these variable are defined in code instead of being
1705              * exported from linker scripts. */
1706             extern uint32_t * __privileged_sram_start__;
1707             extern uint32_t * __privileged_sram_end__;
1708         #else
1709             /* Declaration when these variable are exported from linker scripts. */
1710             extern uint32_t __privileged_sram_start__[];
1711             extern uint32_t __privileged_sram_end__[];
1712         #endif /* defined( __ARMCC_VERSION ) */
1713 
1714         /* Setup MAIR0. */
1715         xMPUSettings->ulMAIR0 = ( ( portMPU_NORMAL_MEMORY_BUFFERABLE_CACHEABLE << portMPU_MAIR_ATTR0_POS ) & portMPU_MAIR_ATTR0_MASK );
1716         xMPUSettings->ulMAIR0 |= ( ( portMPU_DEVICE_MEMORY_nGnRE << portMPU_MAIR_ATTR1_POS ) & portMPU_MAIR_ATTR1_MASK );
1717 
1718         /* This function is called automatically when the task is created - in
1719          * which case the stack region parameters will be valid.  At all other
1720          * times the stack parameters will not be valid and it is assumed that
1721          * the stack region has already been configured. */
1722         if( ulStackDepth > 0 )
1723         {
1724             ulRegionStartAddress = ( uint32_t ) pxBottomOfStack;
1725             ulRegionEndAddress = ( uint32_t ) pxBottomOfStack + ( ulStackDepth * ( uint32_t ) sizeof( StackType_t ) ) - 1;
1726 
1727             /* If the stack is within the privileged SRAM, do not protect it
1728              * using a separate MPU region. This is needed because privileged
1729              * SRAM is already protected using an MPU region and ARMv8-M does
1730              * not allow overlapping MPU regions. */
1731             if( ( ulRegionStartAddress >= ( uint32_t ) __privileged_sram_start__ ) &&
1732                 ( ulRegionEndAddress <= ( uint32_t ) __privileged_sram_end__ ) )
1733             {
1734                 xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = 0;
1735                 xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = 0;
1736             }
1737             else
1738             {
1739                 /* Define the region that allows access to the stack. */
1740                 ulRegionStartAddress &= portMPU_RBAR_ADDRESS_MASK;
1741                 ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
1742 
1743                 xMPUSettings->xRegionsSettings[ 0 ].ulRBAR = ( ulRegionStartAddress ) |
1744                                                              ( portMPU_REGION_NON_SHAREABLE ) |
1745                                                              ( portMPU_REGION_READ_WRITE ) |
1746                                                              ( portMPU_REGION_EXECUTE_NEVER );
1747 
1748                 xMPUSettings->xRegionsSettings[ 0 ].ulRLAR = ( ulRegionEndAddress ) |
1749                                                              ( portMPU_RLAR_ATTR_INDEX0 ) |
1750                                                              ( portMPU_RLAR_REGION_ENABLE );
1751             }
1752         }
1753 
1754         /* User supplied configurable regions. */
1755         for( ulRegionNumber = 1; ulRegionNumber <= portNUM_CONFIGURABLE_REGIONS; ulRegionNumber++ )
1756         {
1757             /* If xRegions is NULL i.e. the task has not specified any MPU
1758              * region, the else part ensures that all the configurable MPU
1759              * regions are invalidated. */
1760             if( ( xRegions != NULL ) && ( xRegions[ lIndex ].ulLengthInBytes > 0UL ) )
1761             {
1762                 /* Translate the generic region definition contained in xRegions
1763                  * into the ARMv8 specific MPU settings that are then stored in
1764                  * xMPUSettings. */
1765                 ulRegionStartAddress = ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) & portMPU_RBAR_ADDRESS_MASK;
1766                 ulRegionEndAddress = ( uint32_t ) xRegions[ lIndex ].pvBaseAddress + xRegions[ lIndex ].ulLengthInBytes - 1;
1767                 ulRegionEndAddress &= portMPU_RLAR_ADDRESS_MASK;
1768 
1769                 /* Start address. */
1770                 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = ( ulRegionStartAddress ) |
1771                                                                           ( portMPU_REGION_NON_SHAREABLE );
1772 
1773                 /* RO/RW. */
1774                 if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_READ_ONLY ) != 0 )
1775                 {
1776                     xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_ONLY );
1777                 }
1778                 else
1779                 {
1780                     xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_READ_WRITE );
1781                 }
1782 
1783                 /* XN. */
1784                 if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_EXECUTE_NEVER ) != 0 )
1785                 {
1786                     xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR |= ( portMPU_REGION_EXECUTE_NEVER );
1787                 }
1788 
1789                 /* End Address. */
1790                 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = ( ulRegionEndAddress ) |
1791                                                                           ( portMPU_RLAR_REGION_ENABLE );
1792 
1793                 /* Normal memory/ Device memory. */
1794                 if( ( xRegions[ lIndex ].ulParameters & tskMPU_REGION_DEVICE_MEMORY ) != 0 )
1795                 {
1796                     /* Attr1 in MAIR0 is configured as device memory. */
1797                     xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX1;
1798                 }
1799                 else
1800                 {
1801                     /* Attr0 in MAIR0 is configured as normal memory. */
1802                     xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR |= portMPU_RLAR_ATTR_INDEX0;
1803                 }
1804             }
1805             else
1806             {
1807                 /* Invalidate the region. */
1808                 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRBAR = 0UL;
1809                 xMPUSettings->xRegionsSettings[ ulRegionNumber ].ulRLAR = 0UL;
1810             }
1811 
1812             lIndex++;
1813         }
1814     }
1815 #endif /* configENABLE_MPU */
1816 /*-----------------------------------------------------------*/
1817 
1818 #if ( configENABLE_MPU == 1 )
xPortIsAuthorizedToAccessBuffer(const void * pvBuffer,uint32_t ulBufferLength,uint32_t ulAccessRequested)1819     BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
1820                                                 uint32_t ulBufferLength,
1821                                                 uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
1822 
1823     {
1824         uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
1825         BaseType_t xAccessGranted = pdFALSE;
1826         const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
1827 
1828         if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
1829         {
1830             xAccessGranted = pdTRUE;
1831         }
1832         else
1833         {
1834             if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
1835             {
1836                 ulBufferStartAddress = ( uint32_t ) pvBuffer;
1837                 ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
1838 
1839                 for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
1840                 {
1841                     /* Is the MPU region enabled? */
1842                     if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR & portMPU_RLAR_REGION_ENABLE ) == portMPU_RLAR_REGION_ENABLE )
1843                     {
1844                         if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
1845                                                          portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
1846                                                          portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
1847                             portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
1848                                                          portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ),
1849                                                          portEXTRACT_LAST_ADDRESS_FROM_RLAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRLAR ) ) &&
1850                             portIS_AUTHORIZED( ulAccessRequested,
1851                                                prvGetRegionAccessPermissions( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR ) ) )
1852                         {
1853                             xAccessGranted = pdTRUE;
1854                             break;
1855                         }
1856                     }
1857                 }
1858             }
1859         }
1860 
1861         return xAccessGranted;
1862     }
1863 #endif /* configENABLE_MPU */
1864 /*-----------------------------------------------------------*/
1865 
xPortIsInsideInterrupt(void)1866 BaseType_t xPortIsInsideInterrupt( void )
1867 {
1868     uint32_t ulCurrentInterrupt;
1869     BaseType_t xReturn;
1870 
1871     /* Obtain the number of the currently executing interrupt. Interrupt Program
1872      * Status Register (IPSR) holds the exception number of the currently-executing
1873      * exception or zero for Thread mode.*/
1874     __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
1875 
1876     if( ulCurrentInterrupt == 0 )
1877     {
1878         xReturn = pdFALSE;
1879     }
1880     else
1881     {
1882         xReturn = pdTRUE;
1883     }
1884 
1885     return xReturn;
1886 }
1887 /*-----------------------------------------------------------*/
1888 
1889 #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) )
1890 
vPortValidateInterruptPriority(void)1891     void vPortValidateInterruptPriority( void )
1892     {
1893         uint32_t ulCurrentInterrupt;
1894         uint8_t ucCurrentPriority;
1895 
1896         /* Obtain the number of the currently executing interrupt. */
1897         __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
1898 
1899         /* Is the interrupt number a user defined interrupt? */
1900         if( ulCurrentInterrupt >= portFIRST_USER_INTERRUPT_NUMBER )
1901         {
1902             /* Look up the interrupt's priority. */
1903             ucCurrentPriority = pcInterruptPriorityRegisters[ ulCurrentInterrupt ];
1904 
1905             /* The following assertion will fail if a service routine (ISR) for
1906              * an interrupt that has been assigned a priority above
1907              * configMAX_SYSCALL_INTERRUPT_PRIORITY calls an ISR safe FreeRTOS API
1908              * function.  ISR safe FreeRTOS API functions must *only* be called
1909              * from interrupts that have been assigned a priority at or below
1910              * configMAX_SYSCALL_INTERRUPT_PRIORITY.
1911              *
1912              * Numerically low interrupt priority numbers represent logically high
1913              * interrupt priorities, therefore the priority of the interrupt must
1914              * be set to a value equal to or numerically *higher* than
1915              * configMAX_SYSCALL_INTERRUPT_PRIORITY.
1916              *
1917              * Interrupts that  use the FreeRTOS API must not be left at their
1918              * default priority of  zero as that is the highest possible priority,
1919              * which is guaranteed to be above configMAX_SYSCALL_INTERRUPT_PRIORITY,
1920              * and  therefore also guaranteed to be invalid.
1921              *
1922              * FreeRTOS maintains separate thread and ISR API functions to ensure
1923              * interrupt entry is as fast and simple as possible.
1924              *
1925              * The following links provide detailed information:
1926              * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html
1927              * https://www.FreeRTOS.org/FAQHelp.html */
1928             configASSERT( ucCurrentPriority >= ucMaxSysCallPriority );
1929         }
1930 
1931         /* Priority grouping:  The interrupt controller (NVIC) allows the bits
1932          * that define each interrupt's priority to be split between bits that
1933          * define the interrupt's pre-emption priority bits and bits that define
1934          * the interrupt's sub-priority.  For simplicity all bits must be defined
1935          * to be pre-emption priority bits.  The following assertion will fail if
1936          * this is not the case (if some bits represent a sub-priority).
1937          *
1938          * If the application only uses CMSIS libraries for interrupt
1939          * configuration then the correct setting can be achieved on all Cortex-M
1940          * devices by calling NVIC_SetPriorityGrouping( 0 ); before starting the
1941          * scheduler.  Note however that some vendor specific peripheral libraries
1942          * assume a non-zero priority group setting, in which cases using a value
1943          * of zero will result in unpredictable behaviour. */
1944         configASSERT( ( portAIRCR_REG & portPRIORITY_GROUP_MASK ) <= ulMaxPRIGROUPValue );
1945     }
1946 
1947 #endif /* #if ( ( configASSERT_DEFINED == 1 ) && ( portHAS_BASEPRI == 1 ) ) */
1948 /*-----------------------------------------------------------*/
1949 
1950 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
1951 
vPortGrantAccessToKernelObject(TaskHandle_t xInternalTaskHandle,int32_t lInternalIndexOfKernelObject)1952     void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
1953                                          int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
1954     {
1955         uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
1956         xMPU_SETTINGS * xTaskMpuSettings;
1957 
1958         ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
1959         ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
1960 
1961         xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
1962 
1963         xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
1964     }
1965 
1966 #endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
1967 /*-----------------------------------------------------------*/
1968 
1969 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
1970 
vPortRevokeAccessToKernelObject(TaskHandle_t xInternalTaskHandle,int32_t lInternalIndexOfKernelObject)1971     void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
1972                                           int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
1973     {
1974         uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
1975         xMPU_SETTINGS * xTaskMpuSettings;
1976 
1977         ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
1978         ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
1979 
1980         xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
1981 
1982         xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
1983     }
1984 
1985 #endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
1986 /*-----------------------------------------------------------*/
1987 
1988 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
1989 
1990     #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
1991 
xPortIsAuthorizedToAccessKernelObject(int32_t lInternalIndexOfKernelObject)1992         BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
1993         {
1994             uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
1995             BaseType_t xAccessGranted = pdFALSE;
1996             const xMPU_SETTINGS * xTaskMpuSettings;
1997 
1998             if( xSchedulerRunning == pdFALSE )
1999             {
2000                 /* Grant access to all the kernel objects before the scheduler
2001                  * is started. It is necessary because there is no task running
2002                  * yet and therefore, we cannot use the permissions of any
2003                  * task. */
2004                 xAccessGranted = pdTRUE;
2005             }
2006             else
2007             {
2008                 xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
2009 
2010                 ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
2011                 ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
2012 
2013                 if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
2014                 {
2015                     xAccessGranted = pdTRUE;
2016                 }
2017                 else
2018                 {
2019                     if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
2020                     {
2021                         xAccessGranted = pdTRUE;
2022                     }
2023                 }
2024             }
2025 
2026             return xAccessGranted;
2027         }
2028 
2029     #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
2030 
xPortIsAuthorizedToAccessKernelObject(int32_t lInternalIndexOfKernelObject)2031         BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
2032         {
2033             ( void ) lInternalIndexOfKernelObject;
2034 
2035             /* If Access Control List feature is not used, all the tasks have
2036              * access to all the kernel objects. */
2037             return pdTRUE;
2038         }
2039 
2040     #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
2041 
2042 #endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
2043 /*-----------------------------------------------------------*/
2044