1 /*
2 * FreeRTOS Kernel V11.1.0
3 * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 *
5 * SPDX-License-Identifier: MIT
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy of
8 * this software and associated documentation files (the "Software"), to deal in
9 * the Software without restriction, including without limitation the rights to
10 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11 * the Software, and to permit persons to whom the Software is furnished to do so,
12 * subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in all
15 * copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * https://www.FreeRTOS.org
25 * https://github.com/FreeRTOS
26 *
27 */
28
29 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
30 * all the API functions to use the MPU wrappers. That should only be done when
31 * task.h is included from an application file. */
32 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
33
34 /* Scheduler includes. */
35 #include "FreeRTOS.h"
36 #include "task.h"
37
38 /* MPU includes. */
39 #include "mpu_wrappers.h"
40 #include "mpu_syscall_numbers.h"
41
42 /* Portasm includes. */
43 #include "portasm.h"
44
45 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
46
47 /*-----------------------------------------------------------*/
48
49 /**
50 * @brief Prototype of all Interrupt Service Routines (ISRs).
51 */
52 typedef void ( * portISR_t )( void );
53
54 /*-----------------------------------------------------------*/
55
56 /**
57 * @brief Constants required to manipulate the NVIC.
58 */
59 #define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) )
60 #define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) )
61 #define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) )
62 #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) )
63 #define portNVIC_SHPR2_REG ( *( ( volatile uint32_t * ) 0xe000ed1c ) )
64 #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL )
65 #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL )
66 #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL )
67 #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL )
68 #define portNVIC_PEND_SYSTICK_CLEAR_BIT ( 1UL << 25UL )
69 #define portNVIC_PEND_SYSTICK_SET_BIT ( 1UL << 26UL )
70 #define portMIN_INTERRUPT_PRIORITY ( 255UL )
71 #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL )
72 #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL )
73
74 /*-----------------------------------------------------------*/
75
76 /**
77 * @brief Constants required to manipulate the SCB.
78 */
79 #define portSCB_VTOR_REG ( *( ( portISR_t ** ) 0xe000ed08 ) )
80 #define portSCB_SYS_HANDLER_CTRL_STATE_REG ( *( ( volatile uint32_t * ) 0xe000ed24 ) )
81 #define portSCB_MEM_FAULT_ENABLE_BIT ( 1UL << 16UL )
82
83 /*-----------------------------------------------------------*/
84
85 /**
86 * @brief Constants used to check the installation of the FreeRTOS interrupt handlers.
87 */
88 #define portVECTOR_INDEX_SVC ( 11 )
89 #define portVECTOR_INDEX_PENDSV ( 14 )
90
91 /*-----------------------------------------------------------*/
92
93 /**
94 * @brief Constants used during system call enter and exit.
95 */
96 #define portPSR_STACK_PADDING_MASK ( 1UL << 9UL )
97 #define portEXC_RETURN_STACK_FRAME_TYPE_MASK ( 1UL << 4UL )
98
99 /*-----------------------------------------------------------*/
100
101 /**
102 * @brief Offsets in the stack to the parameters when inside the SVC handler.
103 */
104 #define portOFFSET_TO_LR ( 5 )
105 #define portOFFSET_TO_PC ( 6 )
106 #define portOFFSET_TO_PSR ( 7 )
107
108 /*-----------------------------------------------------------*/
109
110 /**
111 * @brief Constants required to manipulate the MPU.
112 */
113 #define portMPU_TYPE_REG ( *( ( volatile uint32_t * ) 0xe000ed90 ) )
114 #define portMPU_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed94 ) )
115
116 #define portMPU_RBAR_REG ( *( ( volatile uint32_t * ) 0xe000ed9c ) )
117 #define portMPU_RASR_REG ( *( ( volatile uint32_t * ) 0xe000eda0 ) )
118
119 /* MPU Region Attribute and Size Register (RASR) bitmasks. */
120 #define portMPU_RASR_AP_BITMASK ( 0x7UL << 24UL )
121 #define portMPU_RASR_S_C_B_BITMASK ( 0x7UL )
122 #define portMPU_RASR_S_C_B_LOCATION ( 16UL )
123 #define portMPU_RASR_SIZE_BITMASK ( 0x1FUL << 1UL )
124 #define portMPU_RASR_REGION_ENABLE_BITMASK ( 0x1UL )
125
126 /* MPU Region Base Address Register (RBAR) bitmasks. */
127 #define portMPU_RBAR_ADDRESS_BITMASK ( 0xFFFFFF00UL )
128 #define portMPU_RBAR_REGION_NUMBER_VALID_BITMASK ( 0x1UL << 4UL )
129 #define portMPU_RBAR_REGION_NUMBER_BITMASK ( 0x0000000FUL )
130
131 /* MPU Control Register (MPU_CTRL) bitmasks. */
132 #define portMPU_CTRL_ENABLE_BITMASK ( 0x1UL )
133 #define portMPU_CTRL_PRIV_BACKGROUND_ENABLE_BITMASK ( 0x1UL << 2UL ) /* PRIVDEFENA bit. */
134
135 /* Expected value of the portMPU_TYPE register. */
136 #define portEXPECTED_MPU_TYPE_VALUE ( 0x8UL << 8UL ) /* 8 DREGION unified. */
137
138 /* Extract first address of the MPU region as encoded in the
139 * RBAR (Region Base Address Register) value. */
140 #define portEXTRACT_FIRST_ADDRESS_FROM_RBAR( rbar ) \
141 ( ( rbar ) & portMPU_RBAR_ADDRESS_BITMASK )
142
143 /* Extract size of the MPU region as encoded in the
144 * RASR (Region Attribute and Size Register) value. */
145 #define portEXTRACT_REGION_SIZE_FROM_RASR( rasr ) \
146 ( 1 << ( ( ( ( rasr ) & portMPU_RASR_SIZE_BITMASK ) >> 1 )+ 1 ) )
147
148 /* Does addr lies within [start, end] address range? */
149 #define portIS_ADDRESS_WITHIN_RANGE( addr, start, end ) \
150 ( ( ( addr ) >= ( start ) ) && ( ( addr ) <= ( end ) ) )
151
152 /* Is the access request satisfied by the available permissions? */
153 #define portIS_AUTHORIZED( accessRequest, permissions ) \
154 ( ( ( permissions ) & ( accessRequest ) ) == accessRequest )
155
156 /* Max value that fits in a uint32_t type. */
157 #define portUINT32_MAX ( ~( ( uint32_t ) 0 ) )
158
159 /* Check if adding a and b will result in overflow. */
160 #define portADD_UINT32_WILL_OVERFLOW( a, b ) ( ( a ) > ( portUINT32_MAX - ( b ) ) )
161
162 /*-----------------------------------------------------------*/
163
164 /**
165 * @brief The maximum 24-bit number.
166 *
167 * It is needed because the systick is a 24-bit counter.
168 */
169 #define portMAX_24_BIT_NUMBER ( 0xffffffUL )
170
171 /**
172 * @brief A fiddle factor to estimate the number of SysTick counts that would
173 * have occurred while the SysTick counter is stopped during tickless idle
174 * calculations.
175 */
176 #define portMISSED_COUNTS_FACTOR ( 94UL )
177
178 /*-----------------------------------------------------------*/
179
180 /**
181 * @brief Constants required to set up the initial stack.
182 */
183 #define portINITIAL_XPSR ( 0x01000000 )
184
185 /**
186 * @brief Initial EXC_RETURN value.
187 *
188 * FF FF FF FD
189 * 1111 1111 1111 1111 1111 1111 1111 1101
190 *
191 * Bit[3] - 1 --> Return to the Thread mode.
192 * Bit[2] - 1 --> Restore registers from the process stack.
193 * Bit[1] - 0 --> Reserved, 0.
194 * Bit[0] - 0 --> Reserved, 1.
195 */
196 #define portINITIAL_EXC_RETURN ( 0xfffffffdUL )
197
198 /**
199 * @brief CONTROL register privileged bit mask.
200 *
201 * Bit[0] in CONTROL register tells the privilege:
202 * Bit[0] = 0 ==> The task is privileged.
203 * Bit[0] = 1 ==> The task is not privileged.
204 */
205 #define portCONTROL_PRIVILEGED_MASK ( 1UL << 0UL )
206
207 /**
208 * @brief Initial CONTROL register values.
209 */
210 #define portINITIAL_CONTROL_UNPRIVILEGED ( 0x3 )
211 #define portINITIAL_CONTROL_PRIVILEGED ( 0x2 )
212
213 /**
214 * @brief Let the user override the default SysTick clock rate. If defined by the
215 * user, this symbol must equal the SysTick clock rate when the CLK bit is 0 in the
216 * configuration register.
217 */
218 #ifndef configSYSTICK_CLOCK_HZ
219 #define configSYSTICK_CLOCK_HZ ( configCPU_CLOCK_HZ )
220 /* Ensure the SysTick is clocked at the same frequency as the core. */
221 #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( portNVIC_SYSTICK_CLK_BIT )
222 #else
223 /* Select the option to clock SysTick not at the same frequency as the core. */
224 #define portNVIC_SYSTICK_CLK_BIT_CONFIG ( 0 )
225 #endif
226
227 /**
228 * @brief Let the user override the pre-loading of the initial LR with the
229 * address of prvTaskExitError() in case it messes up unwinding of the stack
230 * in the debugger.
231 */
232 #ifdef configTASK_RETURN_ADDRESS
233 #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS
234 #else
235 #define portTASK_RETURN_ADDRESS prvTaskExitError
236 #endif
237
238 /**
239 * @brief If portPRELOAD_REGISTERS then registers will be given an initial value
240 * when a task is created. This helps in debugging at the cost of code size.
241 */
242 #define portPRELOAD_REGISTERS 1
243
244 /*-----------------------------------------------------------*/
245
246 /**
247 * @brief Used to catch tasks that attempt to return from their implementing
248 * function.
249 */
250 static void prvTaskExitError( void );
251
252 #if ( configENABLE_MPU == 1 )
253
254 /**
255 * @brief Setup the Memory Protection Unit (MPU).
256 */
257 static void prvSetupMPU( void ) PRIVILEGED_FUNCTION;
258
259 #endif /* configENABLE_MPU */
260
261 /**
262 * @brief Setup the timer to generate the tick interrupts.
263 *
264 * The implementation in this file is weak to allow application writers to
265 * change the timer used to generate the tick interrupt.
266 */
267 void vPortSetupTimerInterrupt( void ) PRIVILEGED_FUNCTION;
268
269 /**
270 * @brief Checks whether the current execution context is interrupt.
271 *
272 * @return pdTRUE if the current execution context is interrupt, pdFALSE
273 * otherwise.
274 */
275 BaseType_t xPortIsInsideInterrupt( void );
276
277 /**
278 * @brief Yield the processor.
279 */
280 void vPortYield( void ) PRIVILEGED_FUNCTION;
281
282 /**
283 * @brief Enter critical section.
284 */
285 void vPortEnterCritical( void ) PRIVILEGED_FUNCTION;
286
287 /**
288 * @brief Exit from critical section.
289 */
290 void vPortExitCritical( void ) PRIVILEGED_FUNCTION;
291
292 /**
293 * @brief SysTick handler.
294 */
295 void SysTick_Handler( void ) PRIVILEGED_FUNCTION;
296
297 /**
298 * @brief C part of SVC handler.
299 */
300 portDONT_DISCARD void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) PRIVILEGED_FUNCTION;
301
302 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
303
304 /**
305 * @brief Sets up the system call stack so that upon returning from
306 * SVC, the system call stack is used.
307 *
308 * @param pulTaskStack The current SP when the SVC was raised.
309 * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
310 * @param ucSystemCallNumber The system call number of the system call.
311 */
312 void vSystemCallEnter( uint32_t * pulTaskStack,
313 uint32_t ulLR,
314 uint8_t ucSystemCallNumber ) PRIVILEGED_FUNCTION;
315
316 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
317
318 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
319
320 /**
321 * @brief Raise SVC for exiting from a system call.
322 */
323 void vRequestSystemCallExit( void ) __attribute__( ( naked ) ) PRIVILEGED_FUNCTION;
324
325 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
326
327 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
328
329 /**
330 * @brief Sets up the task stack so that upon returning from
331 * SVC, the task stack is used again.
332 *
333 * @param pulSystemCallStack The current SP when the SVC was raised.
334 * @param ulLR The value of Link Register (EXC_RETURN) in the SVC handler.
335 */
336 void vSystemCallExit( uint32_t * pulSystemCallStack,
337 uint32_t ulLR ) PRIVILEGED_FUNCTION;
338
339 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
340
341 #if ( configENABLE_MPU == 1 )
342
343 /**
344 * @brief Checks whether or not the calling task is privileged.
345 *
346 * @return pdTRUE if the calling task is privileged, pdFALSE otherwise.
347 */
348 BaseType_t xPortIsTaskPrivileged( void ) PRIVILEGED_FUNCTION;
349
350 #endif /* configENABLE_MPU == 1 */
351
352 /*-----------------------------------------------------------*/
353
354 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
355
356 /**
357 * @brief This variable is set to pdTRUE when the scheduler is started.
358 */
359 PRIVILEGED_DATA static BaseType_t xSchedulerRunning = pdFALSE;
360
361 #endif
362
363 /**
364 * @brief Each task maintains its own interrupt status in the critical nesting
365 * variable.
366 */
367 PRIVILEGED_DATA static volatile uint32_t ulCriticalNesting = 0xaaaaaaaaUL;
368
369 #if ( configUSE_TICKLESS_IDLE == 1 )
370
371 /**
372 * @brief The number of SysTick increments that make up one tick period.
373 */
374 PRIVILEGED_DATA static uint32_t ulTimerCountsForOneTick = 0;
375
376 /**
377 * @brief The maximum number of tick periods that can be suppressed is
378 * limited by the 24 bit resolution of the SysTick timer.
379 */
380 PRIVILEGED_DATA static uint32_t xMaximumPossibleSuppressedTicks = 0;
381
382 /**
383 * @brief Compensate for the CPU cycles that pass while the SysTick is
384 * stopped (low power functionality only).
385 */
386 PRIVILEGED_DATA static uint32_t ulStoppedTimerCompensation = 0;
387
388 #endif /* configUSE_TICKLESS_IDLE */
389
390 /*-----------------------------------------------------------*/
391
392 #if ( configUSE_TICKLESS_IDLE == 1 )
393
vPortSuppressTicksAndSleep(TickType_t xExpectedIdleTime)394 __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime )
395 {
396 uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements, ulSysTickDecrementsLeft;
397 TickType_t xModifiableIdleTime;
398
399 /* Make sure the SysTick reload value does not overflow the counter. */
400 if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks )
401 {
402 xExpectedIdleTime = xMaximumPossibleSuppressedTicks;
403 }
404
405 /* Enter a critical section but don't use the taskENTER_CRITICAL()
406 * method as that will mask interrupts that should exit sleep mode. */
407 __asm volatile ( "cpsid i" ::: "memory" );
408 __asm volatile ( "dsb" );
409 __asm volatile ( "isb" );
410
411 /* If a context switch is pending or a task is waiting for the scheduler
412 * to be unsuspended then abandon the low power entry. */
413 if( eTaskConfirmSleepModeStatus() == eAbortSleep )
414 {
415 /* Re-enable interrupts - see comments above the cpsid instruction
416 * above. */
417 __asm volatile ( "cpsie i" ::: "memory" );
418 }
419 else
420 {
421 /* Stop the SysTick momentarily. The time the SysTick is stopped for
422 * is accounted for as best it can be, but using the tickless mode will
423 * inevitably result in some tiny drift of the time maintained by the
424 * kernel with respect to calendar time. */
425 portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
426
427 /* Use the SysTick current-value register to determine the number of
428 * SysTick decrements remaining until the next tick interrupt. If the
429 * current-value register is zero, then there are actually
430 * ulTimerCountsForOneTick decrements remaining, not zero, because the
431 * SysTick requests the interrupt when decrementing from 1 to 0. */
432 ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
433
434 if( ulSysTickDecrementsLeft == 0 )
435 {
436 ulSysTickDecrementsLeft = ulTimerCountsForOneTick;
437 }
438
439 /* Calculate the reload value required to wait xExpectedIdleTime
440 * tick periods. -1 is used because this code normally executes part
441 * way through the first tick period. But if the SysTick IRQ is now
442 * pending, then clear the IRQ, suppressing the first tick, and correct
443 * the reload value to reflect that the second tick period is already
444 * underway. The expected idle time is always at least two ticks. */
445 ulReloadValue = ulSysTickDecrementsLeft + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) );
446
447 if( ( portNVIC_INT_CTRL_REG & portNVIC_PEND_SYSTICK_SET_BIT ) != 0 )
448 {
449 portNVIC_INT_CTRL_REG = portNVIC_PEND_SYSTICK_CLEAR_BIT;
450 ulReloadValue -= ulTimerCountsForOneTick;
451 }
452
453 if( ulReloadValue > ulStoppedTimerCompensation )
454 {
455 ulReloadValue -= ulStoppedTimerCompensation;
456 }
457
458 /* Set the new reload value. */
459 portNVIC_SYSTICK_LOAD_REG = ulReloadValue;
460
461 /* Clear the SysTick count flag and set the count value back to
462 * zero. */
463 portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
464
465 /* Restart SysTick. */
466 portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
467
468 /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can
469 * set its parameter to 0 to indicate that its implementation contains
470 * its own wait for interrupt or wait for event instruction, and so wfi
471 * should not be executed again. However, the original expected idle
472 * time variable must remain unmodified, so a copy is taken. */
473 xModifiableIdleTime = xExpectedIdleTime;
474 configPRE_SLEEP_PROCESSING( xModifiableIdleTime );
475
476 if( xModifiableIdleTime > 0 )
477 {
478 __asm volatile ( "dsb" ::: "memory" );
479 __asm volatile ( "wfi" );
480 __asm volatile ( "isb" );
481 }
482
483 configPOST_SLEEP_PROCESSING( xExpectedIdleTime );
484
485 /* Re-enable interrupts to allow the interrupt that brought the MCU
486 * out of sleep mode to execute immediately. See comments above
487 * the cpsid instruction above. */
488 __asm volatile ( "cpsie i" ::: "memory" );
489 __asm volatile ( "dsb" );
490 __asm volatile ( "isb" );
491
492 /* Disable interrupts again because the clock is about to be stopped
493 * and interrupts that execute while the clock is stopped will increase
494 * any slippage between the time maintained by the RTOS and calendar
495 * time. */
496 __asm volatile ( "cpsid i" ::: "memory" );
497 __asm volatile ( "dsb" );
498 __asm volatile ( "isb" );
499
500 /* Disable the SysTick clock without reading the
501 * portNVIC_SYSTICK_CTRL_REG register to ensure the
502 * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again,
503 * the time the SysTick is stopped for is accounted for as best it can
504 * be, but using the tickless mode will inevitably result in some tiny
505 * drift of the time maintained by the kernel with respect to calendar
506 * time*/
507 portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT );
508
509 /* Determine whether the SysTick has already counted to zero. */
510 if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
511 {
512 uint32_t ulCalculatedLoadValue;
513
514 /* The tick interrupt ended the sleep (or is now pending), and
515 * a new tick period has started. Reset portNVIC_SYSTICK_LOAD_REG
516 * with whatever remains of the new tick period. */
517 ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG );
518
519 /* Don't allow a tiny value, or values that have somehow
520 * underflowed because the post sleep hook did something
521 * that took too long or because the SysTick current-value register
522 * is zero. */
523 if( ( ulCalculatedLoadValue <= ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) )
524 {
525 ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL );
526 }
527
528 portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue;
529
530 /* As the pending tick will be processed as soon as this
531 * function exits, the tick value maintained by the tick is stepped
532 * forward by one less than the time spent waiting. */
533 ulCompleteTickPeriods = xExpectedIdleTime - 1UL;
534 }
535 else
536 {
537 /* Something other than the tick interrupt ended the sleep. */
538
539 /* Use the SysTick current-value register to determine the
540 * number of SysTick decrements remaining until the expected idle
541 * time would have ended. */
542 ulSysTickDecrementsLeft = portNVIC_SYSTICK_CURRENT_VALUE_REG;
543 #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG != portNVIC_SYSTICK_CLK_BIT )
544 {
545 /* If the SysTick is not using the core clock, the current-
546 * value register might still be zero here. In that case, the
547 * SysTick didn't load from the reload register, and there are
548 * ulReloadValue decrements remaining in the expected idle
549 * time, not zero. */
550 if( ulSysTickDecrementsLeft == 0 )
551 {
552 ulSysTickDecrementsLeft = ulReloadValue;
553 }
554 }
555 #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
556
557 /* Work out how long the sleep lasted rounded to complete tick
558 * periods (not the ulReload value which accounted for part
559 * ticks). */
560 ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - ulSysTickDecrementsLeft;
561
562 /* How many complete tick periods passed while the processor
563 * was waiting? */
564 ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick;
565
566 /* The reload value is set to whatever fraction of a single tick
567 * period remains. */
568 portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements;
569 }
570
571 /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG again,
572 * then set portNVIC_SYSTICK_LOAD_REG back to its standard value. If
573 * the SysTick is not using the core clock, temporarily configure it to
574 * use the core clock. This configuration forces the SysTick to load
575 * from portNVIC_SYSTICK_LOAD_REG immediately instead of at the next
576 * cycle of the other clock. Then portNVIC_SYSTICK_LOAD_REG is ready
577 * to receive the standard value immediately. */
578 portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
579 portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
580 #if ( portNVIC_SYSTICK_CLK_BIT_CONFIG == portNVIC_SYSTICK_CLK_BIT )
581 {
582 portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
583 }
584 #else
585 {
586 /* The temporary usage of the core clock has served its purpose,
587 * as described above. Resume usage of the other clock. */
588 portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT;
589
590 if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
591 {
592 /* The partial tick period already ended. Be sure the SysTick
593 * counts it only once. */
594 portNVIC_SYSTICK_CURRENT_VALUE_REG = 0;
595 }
596
597 portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
598 portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
599 }
600 #endif /* portNVIC_SYSTICK_CLK_BIT_CONFIG */
601
602 /* Step the tick to account for any tick periods that elapsed. */
603 vTaskStepTick( ulCompleteTickPeriods );
604
605 /* Exit with interrupts enabled. */
606 __asm volatile ( "cpsie i" ::: "memory" );
607 }
608 }
609
610 #endif /* configUSE_TICKLESS_IDLE */
611
612 /*-----------------------------------------------------------*/
613
vPortSetupTimerInterrupt(void)614 __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void ) /* PRIVILEGED_FUNCTION */
615 {
616 /* Calculate the constants required to configure the tick interrupt. */
617 #if ( configUSE_TICKLESS_IDLE == 1 )
618 {
619 ulTimerCountsForOneTick = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ );
620 xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick;
621 ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR / ( configCPU_CLOCK_HZ / configSYSTICK_CLOCK_HZ );
622 }
623 #endif /* configUSE_TICKLESS_IDLE */
624
625 /* Stop and reset SysTick.
626 *
627 * QEMU versions older than 7.0.0 contain a bug which causes an error if we
628 * enable SysTick without first selecting a valid clock source. We trigger
629 * the bug if we change clock sources from a clock with a zero clock period
630 * to one with a nonzero clock period and enable Systick at the same time.
631 * So we configure the CLKSOURCE bit here, prior to setting the ENABLE bit.
632 * This workaround avoids the bug in QEMU versions older than 7.0.0. */
633 portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG;
634 portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
635
636 /* Configure SysTick to interrupt at the requested rate. */
637 portNVIC_SYSTICK_LOAD_REG = ( configSYSTICK_CLOCK_HZ / configTICK_RATE_HZ ) - 1UL;
638 portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT_CONFIG | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
639 }
640
641 /*-----------------------------------------------------------*/
642
prvTaskExitError(void)643 static void prvTaskExitError( void )
644 {
645 volatile uint32_t ulDummy = 0UL;
646
647 /* A function that implements a task must not exit or attempt to return to
648 * its caller as there is nothing to return to. If a task wants to exit it
649 * should instead call vTaskDelete( NULL ). Artificially force an assert()
650 * to be triggered if configASSERT() is defined, then stop here so
651 * application writers can catch the error. */
652 configASSERT( ulCriticalNesting == ~0UL );
653 portDISABLE_INTERRUPTS();
654
655 while( ulDummy == 0 )
656 {
657 /* This file calls prvTaskExitError() after the scheduler has been
658 * started to remove a compiler warning about the function being
659 * defined but never called. ulDummy is used purely to quieten other
660 * warnings about code appearing after this function is called - making
661 * ulDummy volatile makes the compiler think the function could return
662 * and therefore not output an 'unreachable code' warning for code that
663 * appears after it. */
664 }
665 }
666
667 /*-----------------------------------------------------------*/
668
669 #if ( configENABLE_MPU == 1 )
670
prvGetMPURegionSizeSetting(uint32_t ulActualSizeInBytes)671 static uint32_t prvGetMPURegionSizeSetting( uint32_t ulActualSizeInBytes )
672 {
673 uint32_t ulRegionSize, ulReturnValue = 7UL;
674
675 /* 256 is the smallest region size, 31 is the largest valid value for
676 * ulReturnValue. */
677 for( ulRegionSize = 256UL; ulReturnValue < 31UL; ( ulRegionSize <<= 1UL ) )
678 {
679 if( ulActualSizeInBytes <= ulRegionSize )
680 {
681 break;
682 }
683 else
684 {
685 ulReturnValue++;
686 }
687 }
688
689 /* Shift the code by one before returning so it can be written directly
690 * into the the correct bit position of the attribute register. */
691 return( ulReturnValue << 1UL );
692 }
693
694 #endif /* configENABLE_MPU */
695
696 /*-----------------------------------------------------------*/
697
698 #if ( configENABLE_MPU == 1 )
699
prvSetupMPU(void)700 static void prvSetupMPU( void ) /* PRIVILEGED_FUNCTION */
701 {
702 #if defined( __ARMCC_VERSION )
703
704 /* Declaration when these variable are defined in code instead of being
705 * exported from linker scripts. */
706 extern uint32_t * __privileged_functions_start__;
707 extern uint32_t * __privileged_functions_end__;
708 extern uint32_t * __FLASH_segment_start__;
709 extern uint32_t * __FLASH_segment_end__;
710 extern uint32_t * __privileged_sram_start__;
711 extern uint32_t * __privileged_sram_end__;
712
713 #else /* if defined( __ARMCC_VERSION ) */
714
715 /* Declaration when these variable are exported from linker scripts. */
716 extern uint32_t __privileged_functions_start__[];
717 extern uint32_t __privileged_functions_end__[];
718 extern uint32_t __FLASH_segment_start__[];
719 extern uint32_t __FLASH_segment_end__[];
720 extern uint32_t __privileged_sram_start__[];
721 extern uint32_t __privileged_sram_end__[];
722
723 #endif /* defined( __ARMCC_VERSION ) */
724
725 /* Ensure that the MPU is present. */
726 configASSERT( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE );
727
728 /* Check that the MPU is present. */
729 if( portMPU_TYPE_REG == portEXPECTED_MPU_TYPE_VALUE )
730 {
731 /* Setup privileged flash as Read Only so that privileged tasks can
732 * read it but not modify. */
733 portMPU_RBAR_REG = ( ( ( uint32_t ) __privileged_functions_start__ ) | /* Base address. */
734 ( portMPU_RBAR_REGION_NUMBER_VALID_BITMASK ) |
735 ( portPRIVILEGED_FLASH_REGION ) );
736
737 portMPU_RASR_REG = ( ( portMPU_REGION_PRIV_RO_UNPRIV_NA ) |
738 ( ( configS_C_B_FLASH & portMPU_RASR_S_C_B_BITMASK ) << portMPU_RASR_S_C_B_LOCATION ) |
739 ( prvGetMPURegionSizeSetting( ( uint32_t ) __privileged_functions_end__ - ( uint32_t ) __privileged_functions_start__ ) ) |
740 ( portMPU_RASR_REGION_ENABLE_BITMASK ) );
741
742 /* Setup unprivileged flash as Read Only by both privileged and
743 * unprivileged tasks. All tasks can read it but no-one can modify. */
744 portMPU_RBAR_REG = ( ( ( uint32_t ) __FLASH_segment_start__ ) | /* Base address. */
745 ( portMPU_RBAR_REGION_NUMBER_VALID_BITMASK ) |
746 ( portUNPRIVILEGED_FLASH_REGION ) );
747
748 portMPU_RASR_REG = ( ( portMPU_REGION_PRIV_RO_UNPRIV_RO ) |
749 ( ( configS_C_B_FLASH & portMPU_RASR_S_C_B_BITMASK ) << portMPU_RASR_S_C_B_LOCATION ) |
750 ( prvGetMPURegionSizeSetting( ( uint32_t ) __FLASH_segment_end__ - ( uint32_t ) __FLASH_segment_start__ ) ) |
751 ( portMPU_RASR_REGION_ENABLE_BITMASK ) );
752
753 /* Setup RAM containing kernel data for privileged access only. */
754 portMPU_RBAR_REG = ( ( uint32_t ) __privileged_sram_start__ ) | /* Base address. */
755 ( portMPU_RBAR_REGION_NUMBER_VALID_BITMASK ) |
756 ( portPRIVILEGED_RAM_REGION );
757
758 portMPU_RASR_REG = ( ( portMPU_REGION_PRIV_RW_UNPRIV_NA ) |
759 ( portMPU_REGION_EXECUTE_NEVER ) |
760 ( ( configS_C_B_SRAM & portMPU_RASR_S_C_B_BITMASK ) << portMPU_RASR_S_C_B_LOCATION ) |
761 prvGetMPURegionSizeSetting( ( uint32_t ) __privileged_sram_end__ - ( uint32_t ) __privileged_sram_start__ ) |
762 ( portMPU_RASR_REGION_ENABLE_BITMASK ) );
763
764 /* Enable MPU with privileged background access i.e. unmapped
765 * regions have privileged access. */
766 portMPU_CTRL_REG |= ( portMPU_CTRL_PRIV_BACKGROUND_ENABLE_BITMASK |
767 portMPU_CTRL_ENABLE_BITMASK );
768 }
769 }
770
771 #endif /* configENABLE_MPU */
772
773 /*-----------------------------------------------------------*/
774
vPortYield(void)775 void vPortYield( void ) /* PRIVILEGED_FUNCTION */
776 {
777 /* Set a PendSV to request a context switch. */
778 portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
779
780 /* Barriers are normally not required but do ensure the code is
781 * completely within the specified behaviour for the architecture. */
782 __asm volatile ( "dsb" ::: "memory" );
783 __asm volatile ( "isb" );
784 }
785
786 /*-----------------------------------------------------------*/
787
vPortEnterCritical(void)788 void vPortEnterCritical( void ) /* PRIVILEGED_FUNCTION */
789 {
790 portDISABLE_INTERRUPTS();
791 ulCriticalNesting++;
792
793 /* Barriers are normally not required but do ensure the code is
794 * completely within the specified behaviour for the architecture. */
795 __asm volatile ( "dsb" ::: "memory" );
796 __asm volatile ( "isb" );
797 }
798
799 /*-----------------------------------------------------------*/
800
vPortExitCritical(void)801 void vPortExitCritical( void ) /* PRIVILEGED_FUNCTION */
802 {
803 configASSERT( ulCriticalNesting );
804 ulCriticalNesting--;
805
806 if( ulCriticalNesting == 0 )
807 {
808 portENABLE_INTERRUPTS();
809 }
810 }
811
812 /*-----------------------------------------------------------*/
813
SysTick_Handler(void)814 void SysTick_Handler( void ) /* PRIVILEGED_FUNCTION */
815 {
816 uint32_t ulPreviousMask;
817
818 ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR();
819
820 traceISR_ENTER();
821 {
822 /* Increment the RTOS tick. */
823 if( xTaskIncrementTick() != pdFALSE )
824 {
825 traceISR_EXIT_TO_SCHEDULER();
826 /* Pend a context switch. */
827 portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
828 }
829 else
830 {
831 traceISR_EXIT();
832 }
833 }
834
835 portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask );
836 }
837
838 /*-----------------------------------------------------------*/
839
vPortSVCHandler_C(uint32_t * pulCallerStackAddress)840 void vPortSVCHandler_C( uint32_t * pulCallerStackAddress ) /* PRIVILEGED_FUNCTION portDONT_DISCARD */
841 {
842 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
843
844 #if defined( __ARMCC_VERSION )
845
846 /* Declaration when these variable are defined in code instead of being
847 * exported from linker scripts. */
848 extern uint32_t * __syscalls_flash_start__;
849 extern uint32_t * __syscalls_flash_end__;
850
851 #else
852
853 /* Declaration when these variable are exported from linker scripts. */
854 extern uint32_t __syscalls_flash_start__[];
855 extern uint32_t __syscalls_flash_end__[];
856
857 #endif /* defined( __ARMCC_VERSION ) */
858
859 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
860
861 uint32_t ulPC;
862 uint8_t ucSVCNumber;
863
864 /* Register are stored on the stack in the following order - R0, R1, R2, R3,
865 * R12, LR, PC, xPSR. */
866 ulPC = pulCallerStackAddress[ portOFFSET_TO_PC ];
867 ucSVCNumber = ( ( uint8_t * ) ulPC )[ -2 ];
868
869 switch( ucSVCNumber )
870 {
871 case portSVC_START_SCHEDULER:
872 /* Setup the context of the first task so that the first task starts
873 * executing. */
874 vRestoreContextOfFirstTask();
875 break;
876
877 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) )
878
879 case portSVC_RAISE_PRIVILEGE:
880 /* Only raise the privilege, if the svc was raised from any of
881 * the system calls. */
882 if( ( ulPC >= ( uint32_t ) __syscalls_flash_start__ ) &&
883 ( ulPC <= ( uint32_t ) __syscalls_flash_end__ ) )
884 {
885 vRaisePrivilege();
886 }
887 break;
888
889 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 1 ) */
890
891 #if ( configENABLE_MPU == 1 )
892
893 case portSVC_YIELD:
894 vPortYield();
895 break;
896
897 #endif /* configENABLE_MPU == 1 */
898
899 default:
900 /* Incorrect SVC call. */
901 configASSERT( pdFALSE );
902 }
903 }
904
905 /*-----------------------------------------------------------*/
906
907 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
908
vSystemCallEnter(uint32_t * pulTaskStack,uint32_t ulLR,uint8_t ucSystemCallNumber)909 void vSystemCallEnter( uint32_t * pulTaskStack,
910 uint32_t ulLR,
911 uint8_t ucSystemCallNumber ) /* PRIVILEGED_FUNCTION */
912 {
913 extern TaskHandle_t pxCurrentTCB;
914 extern UBaseType_t uxSystemCallImplementations[ NUM_SYSTEM_CALLS ];
915 xMPU_SETTINGS * pxMpuSettings;
916 uint32_t * pulSystemCallStack;
917 uint32_t ulSystemCallLocation, i;
918 const uint32_t ulStackFrameSize = 8;
919
920 #if defined( __ARMCC_VERSION )
921
922 /* Declaration when these variable are defined in code instead of being
923 * exported from linker scripts. */
924 extern uint32_t * __syscalls_flash_start__;
925 extern uint32_t * __syscalls_flash_end__;
926
927 #else
928
929 /* Declaration when these variable are exported from linker scripts. */
930 extern uint32_t __syscalls_flash_start__[];
931 extern uint32_t __syscalls_flash_end__[];
932
933 #endif /* #if defined( __ARMCC_VERSION ) */
934
935 ulSystemCallLocation = pulTaskStack[ portOFFSET_TO_PC ];
936 pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
937
938 /* Checks:
939 * 1. SVC is raised from the system call section (i.e. application is
940 * not raising SVC directly).
941 * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must be NULL as
942 * it is non-NULL only during the execution of a system call (i.e.
943 * between system call enter and exit).
944 * 3. System call is not for a kernel API disabled by the configuration
945 * in FreeRTOSConfig.h.
946 * 4. We do not need to check that ucSystemCallNumber is within range
947 * because the assembly SVC handler checks that before calling
948 * this function.
949 */
950 if( ( ulSystemCallLocation >= ( uint32_t ) __syscalls_flash_start__ ) &&
951 ( ulSystemCallLocation <= ( uint32_t ) __syscalls_flash_end__ ) &&
952 ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack == NULL ) &&
953 ( uxSystemCallImplementations[ ucSystemCallNumber ] != ( UBaseType_t ) 0 ) )
954 {
955 pulSystemCallStack = pxMpuSettings->xSystemCallStackInfo.pulSystemCallStack;
956
957 /* Make space on the system call stack for the stack frame. */
958 pulSystemCallStack = pulSystemCallStack - ulStackFrameSize;
959
960 /* Copy the stack frame. */
961 for( i = 0; i < ulStackFrameSize; i++ )
962 {
963 pulSystemCallStack[ i ] = pulTaskStack[ i ];
964 }
965
966 /* Store the value of the Link Register before the SVC was raised.
967 * It contains the address of the caller of the System Call entry
968 * point (i.e. the caller of the MPU_<API>). We need to restore it
969 * when we exit from the system call. */
970 pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry = pulTaskStack[ portOFFSET_TO_LR ];
971
972 /* Use the pulSystemCallStack in thread mode. */
973 __asm volatile ( "msr psp, %0" : : "r" ( pulSystemCallStack ) );
974
975 /* Start executing the system call upon returning from this handler. */
976 pulSystemCallStack[ portOFFSET_TO_PC ] = uxSystemCallImplementations[ ucSystemCallNumber ];
977
978 /* Raise a request to exit from the system call upon finishing the
979 * system call. */
980 pulSystemCallStack[ portOFFSET_TO_LR ] = ( uint32_t ) vRequestSystemCallExit;
981
982 /* Remember the location where we should copy the stack frame when we exit from
983 * the system call. */
984 pxMpuSettings->xSystemCallStackInfo.pulTaskStack = pulTaskStack + ulStackFrameSize;
985
986 /* Record if the hardware used padding to force the stack pointer
987 * to be double word aligned. */
988 if( ( pulTaskStack[ portOFFSET_TO_PSR ] & portPSR_STACK_PADDING_MASK ) == portPSR_STACK_PADDING_MASK )
989 {
990 pxMpuSettings->ulTaskFlags |= portSTACK_FRAME_HAS_PADDING_FLAG;
991 }
992 else
993 {
994 pxMpuSettings->ulTaskFlags &= ( ~portSTACK_FRAME_HAS_PADDING_FLAG );
995 }
996
997 /* We ensure in pxPortInitialiseStack that the system call stack is
998 * double word aligned and therefore, there is no need of padding.
999 * Clear the bit[9] of stacked xPSR. */
1000 pulSystemCallStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
1001
1002 /* Raise the privilege for the duration of the system call. */
1003 __asm volatile
1004 (
1005 " .syntax unified \n"
1006 " mrs r0, control \n" /* Obtain current control value. */
1007 " movs r1, #1 \n" /* r1 = 1. */
1008 " bics r0, r1 \n" /* Clear nPRIV bit. */
1009 " msr control, r0 \n" /* Write back new control value. */
1010 ::: "r0", "r1", "memory"
1011 );
1012 }
1013 }
1014
1015 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
1016
1017 /*-----------------------------------------------------------*/
1018
1019 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
1020
vRequestSystemCallExit(void)1021 void vRequestSystemCallExit( void ) /* __attribute__( ( naked ) ) PRIVILEGED_FUNCTION */
1022 {
1023 __asm volatile ( "svc %0 \n" ::"i" ( portSVC_SYSTEM_CALL_EXIT ) : "memory" );
1024 }
1025
1026 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
1027
1028 /*-----------------------------------------------------------*/
1029
1030 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
1031
vSystemCallExit(uint32_t * pulSystemCallStack,uint32_t ulLR)1032 void vSystemCallExit( uint32_t * pulSystemCallStack,
1033 uint32_t ulLR ) /* PRIVILEGED_FUNCTION */
1034 {
1035 extern TaskHandle_t pxCurrentTCB;
1036 xMPU_SETTINGS * pxMpuSettings;
1037 uint32_t * pulTaskStack;
1038 uint32_t ulSystemCallLocation, i;
1039 const uint32_t ulStackFrameSize = 8;
1040
1041 #if defined( __ARMCC_VERSION )
1042
1043 /* Declaration when these variable are defined in code instead of being
1044 * exported from linker scripts. */
1045 extern uint32_t * __privileged_functions_start__;
1046 extern uint32_t * __privileged_functions_end__;
1047
1048 #else
1049
1050 /* Declaration when these variable are exported from linker scripts. */
1051 extern uint32_t __privileged_functions_start__[];
1052 extern uint32_t __privileged_functions_end__[];
1053
1054 #endif /* #if defined( __ARMCC_VERSION ) */
1055
1056 ulSystemCallLocation = pulSystemCallStack[ portOFFSET_TO_PC ];
1057 pxMpuSettings = xTaskGetMPUSettings( pxCurrentTCB );
1058
1059 /* Checks:
1060 * 1. SVC is raised from the privileged code (i.e. application is not
1061 * raising SVC directly). This SVC is only raised from
1062 * vRequestSystemCallExit which is in the privileged code section.
1063 * 2. pxMpuSettings->xSystemCallStackInfo.pulTaskStack must not be NULL -
1064 * this means that we previously entered a system call and the
1065 * application is not attempting to exit without entering a system
1066 * call.
1067 */
1068 if( ( ulSystemCallLocation >= ( uint32_t ) __privileged_functions_start__ ) &&
1069 ( ulSystemCallLocation <= ( uint32_t ) __privileged_functions_end__ ) &&
1070 ( pxMpuSettings->xSystemCallStackInfo.pulTaskStack != NULL ) )
1071 {
1072 pulTaskStack = pxMpuSettings->xSystemCallStackInfo.pulTaskStack;
1073
1074 /* Make space on the task stack for the stack frame. */
1075 pulTaskStack = pulTaskStack - ulStackFrameSize;
1076
1077 /* Copy the stack frame. */
1078 for( i = 0; i < ulStackFrameSize; i++ )
1079 {
1080 pulTaskStack[ i ] = pulSystemCallStack[ i ];
1081 }
1082
1083 /* Use the pulTaskStack in thread mode. */
1084 __asm volatile ( "msr psp, %0" : : "r" ( pulTaskStack ) );
1085
1086 /* Return to the caller of the System Call entry point (i.e. the
1087 * caller of the MPU_<API>). */
1088 pulTaskStack[ portOFFSET_TO_PC ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
1089
1090 /* Ensure that LR has a valid value.*/
1091 pulTaskStack[ portOFFSET_TO_LR ] = pxMpuSettings->xSystemCallStackInfo.ulLinkRegisterAtSystemCallEntry;
1092
1093 /* If the hardware used padding to force the stack pointer
1094 * to be double word aligned, set the stacked xPSR bit[9],
1095 * otherwise clear it. */
1096 if( ( pxMpuSettings->ulTaskFlags & portSTACK_FRAME_HAS_PADDING_FLAG ) == portSTACK_FRAME_HAS_PADDING_FLAG )
1097 {
1098 pulTaskStack[ portOFFSET_TO_PSR ] |= portPSR_STACK_PADDING_MASK;
1099 }
1100 else
1101 {
1102 pulTaskStack[ portOFFSET_TO_PSR ] &= ( ~portPSR_STACK_PADDING_MASK );
1103 }
1104
1105 /* This is not NULL only for the duration of the system call. */
1106 pxMpuSettings->xSystemCallStackInfo.pulTaskStack = NULL;
1107
1108 /* Drop the privilege before returning to the thread mode. */
1109 __asm volatile
1110 (
1111 " .syntax unified \n"
1112 " mrs r0, control \n" /* Obtain current control value. */
1113 " movs r1, #1 \n" /* r1 = 1. */
1114 " orrs r0, r1 \n" /* Set nPRIV bit. */
1115 " msr control, r0 \n" /* Write back new control value. */
1116 ::: "r0", "r1", "memory"
1117 );
1118 }
1119 }
1120
1121 #endif /* ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) */
1122
1123 /*-----------------------------------------------------------*/
1124
1125 #if ( configENABLE_MPU == 1 )
1126
xPortIsTaskPrivileged(void)1127 BaseType_t xPortIsTaskPrivileged( void ) /* PRIVILEGED_FUNCTION */
1128 {
1129 BaseType_t xTaskIsPrivileged = pdFALSE;
1130 const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
1131
1132 if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
1133 {
1134 xTaskIsPrivileged = pdTRUE;
1135 }
1136
1137 return xTaskIsPrivileged;
1138 }
1139
1140 #endif /* configENABLE_MPU == 1 */
1141
1142 /*-----------------------------------------------------------*/
1143
1144 #if ( configENABLE_MPU == 1 )
1145
pxPortInitialiseStack(StackType_t * pxTopOfStack,TaskFunction_t pxCode,void * pvParameters,BaseType_t xRunPrivileged,xMPU_SETTINGS * xMPUSettings)1146 StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
1147 TaskFunction_t pxCode,
1148 void * pvParameters,
1149 BaseType_t xRunPrivileged,
1150 xMPU_SETTINGS * xMPUSettings ) /* PRIVILEGED_FUNCTION */
1151 {
1152 xMPUSettings->ulContext[ 0 ] = 0x04040404; /* r4. */
1153 xMPUSettings->ulContext[ 1 ] = 0x05050505; /* r5. */
1154 xMPUSettings->ulContext[ 2 ] = 0x06060606; /* r6. */
1155 xMPUSettings->ulContext[ 3 ] = 0x07070707; /* r7. */
1156 xMPUSettings->ulContext[ 4 ] = 0x08080808; /* r8. */
1157 xMPUSettings->ulContext[ 5 ] = 0x09090909; /* r9. */
1158 xMPUSettings->ulContext[ 6 ] = 0x10101010; /* r10. */
1159 xMPUSettings->ulContext[ 7 ] = 0x11111111; /* r11. */
1160
1161 xMPUSettings->ulContext[ 8 ] = ( uint32_t ) pvParameters; /* r0. */
1162 xMPUSettings->ulContext[ 9 ] = 0x01010101; /* r1. */
1163 xMPUSettings->ulContext[ 10 ] = 0x02020202; /* r2. */
1164 xMPUSettings->ulContext[ 11 ] = 0x03030303; /* r3. */
1165 xMPUSettings->ulContext[ 12 ] = 0x12121212; /* r12. */
1166 xMPUSettings->ulContext[ 13 ] = ( uint32_t ) portTASK_RETURN_ADDRESS; /* LR. */
1167 xMPUSettings->ulContext[ 14 ] = ( uint32_t ) pxCode; /* PC. */
1168 xMPUSettings->ulContext[ 15 ] = portINITIAL_XPSR; /* xPSR. */
1169
1170 xMPUSettings->ulContext[ 16 ] = ( uint32_t ) ( pxTopOfStack - 8 ); /* PSP with the hardware saved stack. */
1171 if( xRunPrivileged == pdTRUE )
1172 {
1173 xMPUSettings->ulTaskFlags |= portTASK_IS_PRIVILEGED_FLAG;
1174 xMPUSettings->ulContext[ 17 ] = ( uint32_t ) portINITIAL_CONTROL_PRIVILEGED; /* CONTROL. */
1175 }
1176 else
1177 {
1178 xMPUSettings->ulTaskFlags &= ( ~portTASK_IS_PRIVILEGED_FLAG );
1179 xMPUSettings->ulContext[ 17 ] = ( uint32_t ) portINITIAL_CONTROL_UNPRIVILEGED; /* CONTROL. */
1180 }
1181 xMPUSettings->ulContext[ 18 ] = portINITIAL_EXC_RETURN; /* LR (EXC_RETURN). */
1182
1183 #if ( configUSE_MPU_WRAPPERS_V1 == 0 )
1184 {
1185 /* Ensure that the system call stack is double word aligned. */
1186 xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = &( xMPUSettings->xSystemCallStackInfo.ulSystemCallStackBuffer[ configSYSTEM_CALL_STACK_SIZE - 1 ] );
1187 xMPUSettings->xSystemCallStackInfo.pulSystemCallStack = ( uint32_t * ) ( ( uint32_t ) ( xMPUSettings->xSystemCallStackInfo.pulSystemCallStack ) &
1188 ( uint32_t ) ( ~( portBYTE_ALIGNMENT_MASK ) ) );
1189
1190 /* This is not NULL only for the duration of a system call. */
1191 xMPUSettings->xSystemCallStackInfo.pulTaskStack = NULL;
1192 }
1193 #endif /* configUSE_MPU_WRAPPERS_V1 == 0 */
1194
1195 return &( xMPUSettings->ulContext[ 19 ] );
1196 }
1197
1198 #else /* configENABLE_MPU */
1199
pxPortInitialiseStack(StackType_t * pxTopOfStack,TaskFunction_t pxCode,void * pvParameters)1200 StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
1201 TaskFunction_t pxCode,
1202 void * pvParameters ) /* PRIVILEGED_FUNCTION */
1203 {
1204 /* Simulate the stack frame as it would be created by a context switch
1205 * interrupt. */
1206 #if ( portPRELOAD_REGISTERS == 0 )
1207 {
1208 pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
1209 *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
1210 pxTopOfStack--;
1211 *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
1212 pxTopOfStack--;
1213 *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
1214 pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
1215 *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
1216 pxTopOfStack -= 9; /* R11..R4, EXC_RETURN. */
1217 *pxTopOfStack = portINITIAL_EXC_RETURN;
1218 }
1219 #else /* portPRELOAD_REGISTERS */
1220 {
1221 pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
1222 *pxTopOfStack = portINITIAL_XPSR; /* xPSR. */
1223 pxTopOfStack--;
1224 *pxTopOfStack = ( StackType_t ) pxCode; /* PC. */
1225 pxTopOfStack--;
1226 *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR. */
1227 pxTopOfStack--;
1228 *pxTopOfStack = ( StackType_t ) 0x12121212UL; /* R12. */
1229 pxTopOfStack--;
1230 *pxTopOfStack = ( StackType_t ) 0x03030303UL; /* R3. */
1231 pxTopOfStack--;
1232 *pxTopOfStack = ( StackType_t ) 0x02020202UL; /* R2. */
1233 pxTopOfStack--;
1234 *pxTopOfStack = ( StackType_t ) 0x01010101UL; /* R1. */
1235 pxTopOfStack--;
1236 *pxTopOfStack = ( StackType_t ) pvParameters; /* R0. */
1237 pxTopOfStack--;
1238 *pxTopOfStack = ( StackType_t ) 0x11111111UL; /* R11. */
1239 pxTopOfStack--;
1240 *pxTopOfStack = ( StackType_t ) 0x10101010UL; /* R10. */
1241 pxTopOfStack--;
1242 *pxTopOfStack = ( StackType_t ) 0x09090909UL; /* R09. */
1243 pxTopOfStack--;
1244 *pxTopOfStack = ( StackType_t ) 0x08080808UL; /* R08. */
1245 pxTopOfStack--;
1246 *pxTopOfStack = ( StackType_t ) 0x07070707UL; /* R07. */
1247 pxTopOfStack--;
1248 *pxTopOfStack = ( StackType_t ) 0x06060606UL; /* R06. */
1249 pxTopOfStack--;
1250 *pxTopOfStack = ( StackType_t ) 0x05050505UL; /* R05. */
1251 pxTopOfStack--;
1252 *pxTopOfStack = ( StackType_t ) 0x04040404UL; /* R04. */
1253 pxTopOfStack--;
1254 *pxTopOfStack = portINITIAL_EXC_RETURN; /* EXC_RETURN. */
1255 }
1256 #endif /* portPRELOAD_REGISTERS */
1257
1258 return pxTopOfStack;
1259 }
1260
1261 #endif /* configENABLE_MPU */
1262
1263 /*-----------------------------------------------------------*/
1264
xPortStartScheduler(void)1265 BaseType_t xPortStartScheduler( void ) /* PRIVILEGED_FUNCTION */
1266 {
1267 /* An application can install FreeRTOS interrupt handlers in one of the
1268 * following ways:
1269 * 1. Direct Routing - Install the functions SVC_Handler and PendSV_Handler
1270 * for SVCall and PendSV interrupts respectively.
1271 * 2. Indirect Routing - Install separate handlers for SVCall and PendSV
1272 * interrupts and route program control from those handlers to
1273 * SVC_Handler and PendSV_Handler functions.
1274 *
1275 * Applications that use Indirect Routing must set
1276 * configCHECK_HANDLER_INSTALLATION to 0 in their FreeRTOSConfig.h. Direct
1277 * routing, which is validated here when configCHECK_HANDLER_INSTALLATION
1278 * is 1, should be preferred when possible. */
1279 #if ( configCHECK_HANDLER_INSTALLATION == 1 )
1280 {
1281 const portISR_t * const pxVectorTable = portSCB_VTOR_REG;
1282
1283 /* Validate that the application has correctly installed the FreeRTOS
1284 * handlers for SVCall and PendSV interrupts. We do not check the
1285 * installation of the SysTick handler because the application may
1286 * choose to drive the RTOS tick using a timer other than the SysTick
1287 * timer by overriding the weak function vPortSetupTimerInterrupt().
1288 *
1289 * Assertion failures here indicate incorrect installation of the
1290 * FreeRTOS handlers. For help installing the FreeRTOS handlers, see
1291 * https://www.FreeRTOS.org/FAQHelp.html.
1292 *
1293 * Systems with a configurable address for the interrupt vector table
1294 * can also encounter assertion failures or even system faults here if
1295 * VTOR is not set correctly to point to the application's vector table. */
1296 configASSERT( pxVectorTable[ portVECTOR_INDEX_SVC ] == SVC_Handler );
1297 configASSERT( pxVectorTable[ portVECTOR_INDEX_PENDSV ] == PendSV_Handler );
1298 }
1299 #endif /* configCHECK_HANDLER_INSTALLATION */
1300
1301 /* Make PendSV and SysTick the lowest priority interrupts, and make SVCall
1302 * the highest priority. */
1303 portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
1304 portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
1305 portNVIC_SHPR2_REG = 0;
1306
1307 #if ( configENABLE_MPU == 1 )
1308 {
1309 /* Setup the Memory Protection Unit (MPU). */
1310 prvSetupMPU();
1311 }
1312 #endif /* configENABLE_MPU */
1313
1314 /* Start the timer that generates the tick ISR. Interrupts are disabled
1315 * here already. */
1316 vPortSetupTimerInterrupt();
1317
1318 /* Initialize the critical nesting count ready for the first task. */
1319 ulCriticalNesting = 0;
1320
1321 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
1322 {
1323 xSchedulerRunning = pdTRUE;
1324 }
1325 #endif
1326
1327 /* Start the first task. */
1328 vStartFirstTask();
1329
1330 /* Should never get here as the tasks will now be executing. Call the task
1331 * exit error function to prevent compiler warnings about a static function
1332 * not being called in the case that the application writer overrides this
1333 * functionality by defining configTASK_RETURN_ADDRESS. Call
1334 * vTaskSwitchContext() so link time optimization does not remove the
1335 * symbol. */
1336 vTaskSwitchContext();
1337 prvTaskExitError();
1338
1339 /* Should not get here. */
1340 return 0;
1341 }
1342
1343 /*-----------------------------------------------------------*/
1344
vPortEndScheduler(void)1345 void vPortEndScheduler( void ) /* PRIVILEGED_FUNCTION */
1346 {
1347 /* Not implemented in ports where there is nothing to return to.
1348 * Artificially force an assert. */
1349 configASSERT( ulCriticalNesting == 1000UL );
1350 }
1351
1352 /*-----------------------------------------------------------*/
1353
1354 #if ( configENABLE_MPU == 1 )
1355
vPortStoreTaskMPUSettings(xMPU_SETTINGS * xMPUSettings,const struct xMEMORY_REGION * const xRegions,StackType_t * pxBottomOfStack,configSTACK_DEPTH_TYPE uxStackDepth)1356 void vPortStoreTaskMPUSettings( xMPU_SETTINGS * xMPUSettings,
1357 const struct xMEMORY_REGION * const xRegions,
1358 StackType_t * pxBottomOfStack,
1359 configSTACK_DEPTH_TYPE uxStackDepth )
1360 {
1361 #if defined( __ARMCC_VERSION )
1362
1363 /* Declaration when these variable are defined in code instead of being
1364 * exported from linker scripts. */
1365 extern uint32_t * __SRAM_segment_start__;
1366 extern uint32_t * __SRAM_segment_end__;
1367 extern uint32_t * __privileged_sram_start__;
1368 extern uint32_t * __privileged_sram_end__;
1369
1370 #else
1371 /* Declaration when these variable are exported from linker scripts. */
1372 extern uint32_t __SRAM_segment_start__[];
1373 extern uint32_t __SRAM_segment_end__[];
1374 extern uint32_t __privileged_sram_start__[];
1375 extern uint32_t __privileged_sram_end__[];
1376
1377 #endif /* defined( __ARMCC_VERSION ) */
1378
1379 int32_t lIndex;
1380 uint32_t ul;
1381
1382 if( xRegions == NULL )
1383 {
1384 /* No MPU regions are specified so allow access to all RAM. */
1385 xMPUSettings->xRegionsSettings[ 0 ].ulRBAR =
1386 ( ( ( uint32_t ) __SRAM_segment_start__ ) | /* Base address. */
1387 ( portMPU_RBAR_REGION_NUMBER_VALID_BITMASK ) |
1388 ( portSTACK_REGION ) ); /* Region number. */
1389
1390 xMPUSettings->xRegionsSettings[ 0 ].ulRASR =
1391 ( ( portMPU_REGION_PRIV_RW_UNPRIV_RW ) |
1392 ( portMPU_REGION_EXECUTE_NEVER ) |
1393 ( ( configS_C_B_SRAM & portMPU_RASR_S_C_B_BITMASK ) << portMPU_RASR_S_C_B_LOCATION ) |
1394 ( prvGetMPURegionSizeSetting( ( uint32_t ) __SRAM_segment_end__ - ( uint32_t ) __SRAM_segment_start__ ) ) |
1395 ( portMPU_RASR_REGION_ENABLE_BITMASK ) );
1396
1397
1398 /* Invalidate user configurable regions. */
1399 for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ )
1400 {
1401 xMPUSettings->xRegionsSettings[ ul ].ulRBAR = ( ( ul - 1UL ) | portMPU_RBAR_REGION_NUMBER_VALID_BITMASK );
1402 xMPUSettings->xRegionsSettings[ ul ].ulRASR = 0UL;
1403 }
1404 }
1405 else
1406 {
1407 /* This function is called automatically when the task is created - in
1408 * which case the stack region parameters will be valid. At all other
1409 * times the stack parameters will not be valid and it is assumed that the
1410 * stack region has already been configured. */
1411 if( uxStackDepth > 0 )
1412 {
1413 /* Define the region that allows access to the stack. */
1414 xMPUSettings->xRegionsSettings[ 0 ].ulRBAR =
1415 ( ( ( uint32_t ) pxBottomOfStack ) |
1416 ( portMPU_RBAR_REGION_NUMBER_VALID_BITMASK ) |
1417 ( portSTACK_REGION ) ); /* Region number. */
1418
1419 xMPUSettings->xRegionsSettings[ 0 ].ulRASR =
1420 ( ( portMPU_REGION_PRIV_RW_UNPRIV_RW ) |
1421 ( portMPU_REGION_EXECUTE_NEVER ) |
1422 ( prvGetMPURegionSizeSetting( uxStackDepth * ( uint32_t ) sizeof( StackType_t ) ) ) |
1423 ( ( configS_C_B_SRAM & portMPU_RASR_S_C_B_BITMASK ) << portMPU_RASR_S_C_B_LOCATION ) |
1424 ( portMPU_RASR_REGION_ENABLE_BITMASK ) );
1425 }
1426
1427 lIndex = 0;
1428
1429 for( ul = 1UL; ul <= portNUM_CONFIGURABLE_REGIONS; ul++ )
1430 {
1431 if( ( xRegions[ lIndex ] ).ulLengthInBytes > 0UL )
1432 {
1433 /* Translate the generic region definition contained in
1434 * xRegions into the CM0+ specific MPU settings that are then
1435 * stored in xMPUSettings. */
1436 xMPUSettings->xRegionsSettings[ ul ].ulRBAR =
1437 ( ( uint32_t ) xRegions[ lIndex ].pvBaseAddress ) |
1438 ( portMPU_RBAR_REGION_NUMBER_VALID_BITMASK ) |
1439 ( ul - 1UL ); /* Region number. */
1440
1441 xMPUSettings->xRegionsSettings[ ul ].ulRASR =
1442 ( prvGetMPURegionSizeSetting( xRegions[ lIndex ].ulLengthInBytes ) ) |
1443 ( xRegions[ lIndex ].ulParameters ) |
1444 ( portMPU_RASR_REGION_ENABLE_BITMASK );
1445 }
1446 else
1447 {
1448 /* Invalidate the region. */
1449 xMPUSettings->xRegionsSettings[ ul ].ulRBAR = ( ( ul - 1UL ) | portMPU_RBAR_REGION_NUMBER_VALID_BITMASK );
1450 xMPUSettings->xRegionsSettings[ ul ].ulRASR = 0UL;
1451 }
1452
1453 lIndex++;
1454 }
1455 }
1456 }
1457
1458 #endif /* configENABLE_MPU */
1459
1460 /*-----------------------------------------------------------*/
1461
1462 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
1463
xPortIsAuthorizedToAccessBuffer(const void * pvBuffer,uint32_t ulBufferLength,uint32_t ulAccessRequested)1464 BaseType_t xPortIsAuthorizedToAccessBuffer( const void * pvBuffer,
1465 uint32_t ulBufferLength,
1466 uint32_t ulAccessRequested ) /* PRIVILEGED_FUNCTION */
1467
1468 {
1469 uint32_t i, ulBufferStartAddress, ulBufferEndAddress;
1470 uint32_t ulRegionStart, ulRegionSize, ulRegionEnd;
1471 uint32_t ulMPURegionAccessPermissions;
1472 BaseType_t xAccessGranted = pdFALSE;
1473 const xMPU_SETTINGS * xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
1474
1475 if( xSchedulerRunning == pdFALSE )
1476 {
1477 /* Grant access to all the kernel objects before the scheduler
1478 * is started. It is necessary because there is no task running
1479 * yet and therefore, we cannot use the permissions of any
1480 * task. */
1481 xAccessGranted = pdTRUE;
1482 }
1483 else if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
1484 {
1485 xAccessGranted = pdTRUE;
1486 }
1487 else
1488 {
1489 if( portADD_UINT32_WILL_OVERFLOW( ( ( uint32_t ) pvBuffer ), ( ulBufferLength - 1UL ) ) == pdFALSE )
1490 {
1491 ulBufferStartAddress = ( uint32_t ) pvBuffer;
1492 ulBufferEndAddress = ( ( ( uint32_t ) pvBuffer ) + ulBufferLength - 1UL );
1493
1494 for( i = 0; i < portTOTAL_NUM_REGIONS; i++ )
1495 {
1496 /* Is the MPU region enabled? */
1497 if( ( xTaskMpuSettings->xRegionsSettings[ i ].ulRASR &
1498 portMPU_RASR_REGION_ENABLE_BITMASK ) == portMPU_RASR_REGION_ENABLE_BITMASK )
1499 {
1500 ulRegionStart = portEXTRACT_FIRST_ADDRESS_FROM_RBAR( xTaskMpuSettings->xRegionsSettings[ i ].ulRBAR );
1501 ulRegionSize = portEXTRACT_REGION_SIZE_FROM_RASR( xTaskMpuSettings->xRegionsSettings[ i ].ulRASR );
1502 ulRegionEnd = ulRegionStart + ulRegionSize;
1503
1504 if( portIS_ADDRESS_WITHIN_RANGE( ulBufferStartAddress,
1505 ulRegionStart,
1506 ulRegionEnd ) &&
1507 portIS_ADDRESS_WITHIN_RANGE( ulBufferEndAddress,
1508 ulRegionStart,
1509 ulRegionEnd ) )
1510 {
1511 ulMPURegionAccessPermissions = xTaskMpuSettings->xRegionsSettings[ i ].ulRASR &
1512 portMPU_RASR_AP_BITMASK;
1513
1514 if( ulAccessRequested == tskMPU_READ_PERMISSION ) /* RO. */
1515 {
1516 if( ( ulMPURegionAccessPermissions == portMPU_REGION_PRIV_RW_UNPRIV_RO ) ||
1517 ( ulMPURegionAccessPermissions == portMPU_REGION_PRIV_RO_UNPRIV_RO ) ||
1518 ( ulMPURegionAccessPermissions == portMPU_REGION_PRIV_RW_UNPRIV_RW ) )
1519 {
1520 xAccessGranted = pdTRUE;
1521 break;
1522 }
1523 }
1524 else if( ( ulAccessRequested & tskMPU_WRITE_PERMISSION ) != 0UL ) /* W or RW. */
1525 {
1526 if( ulMPURegionAccessPermissions == portMPU_REGION_PRIV_RW_UNPRIV_RW )
1527 {
1528 xAccessGranted = pdTRUE;
1529 break;
1530 }
1531 }
1532 }
1533 }
1534 }
1535 }
1536 }
1537
1538 return xAccessGranted;
1539 }
1540
1541 #endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
1542
1543 /*-----------------------------------------------------------*/
1544
xPortIsInsideInterrupt(void)1545 BaseType_t xPortIsInsideInterrupt( void )
1546 {
1547 uint32_t ulCurrentInterrupt;
1548 BaseType_t xReturn;
1549
1550 /* Obtain the number of the currently executing interrupt. Interrupt Program
1551 * Status Register (IPSR) holds the exception number of the currently-executing
1552 * exception or zero for Thread mode.*/
1553 __asm volatile ( "mrs %0, ipsr" : "=r" ( ulCurrentInterrupt )::"memory" );
1554
1555 if( ulCurrentInterrupt == 0 )
1556 {
1557 xReturn = pdFALSE;
1558 }
1559 else
1560 {
1561 xReturn = pdTRUE;
1562 }
1563
1564 return xReturn;
1565 }
1566
1567 /*-----------------------------------------------------------*/
1568
1569 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
1570
vPortGrantAccessToKernelObject(TaskHandle_t xInternalTaskHandle,int32_t lInternalIndexOfKernelObject)1571 void vPortGrantAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
1572 int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
1573 {
1574 uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
1575 xMPU_SETTINGS * xTaskMpuSettings;
1576
1577 ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
1578 ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
1579
1580 xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
1581
1582 xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] |= ( 1U << ulAccessControlListEntryBit );
1583 }
1584
1585 #endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
1586
1587 /*-----------------------------------------------------------*/
1588
1589 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) )
1590
vPortRevokeAccessToKernelObject(TaskHandle_t xInternalTaskHandle,int32_t lInternalIndexOfKernelObject)1591 void vPortRevokeAccessToKernelObject( TaskHandle_t xInternalTaskHandle,
1592 int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
1593 {
1594 uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
1595 xMPU_SETTINGS * xTaskMpuSettings;
1596
1597 ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
1598 ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
1599
1600 xTaskMpuSettings = xTaskGetMPUSettings( xInternalTaskHandle );
1601
1602 xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] &= ~( 1U << ulAccessControlListEntryBit );
1603 }
1604
1605 #endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) && ( configENABLE_ACCESS_CONTROL_LIST == 1 ) ) */
1606
1607 /*-----------------------------------------------------------*/
1608
1609 #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) )
1610
1611 #if ( configENABLE_ACCESS_CONTROL_LIST == 1 )
1612
xPortIsAuthorizedToAccessKernelObject(int32_t lInternalIndexOfKernelObject)1613 BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
1614 {
1615 uint32_t ulAccessControlListEntryIndex, ulAccessControlListEntryBit;
1616 BaseType_t xAccessGranted = pdFALSE;
1617 const xMPU_SETTINGS * xTaskMpuSettings;
1618
1619 if( xSchedulerRunning == pdFALSE )
1620 {
1621 /* Grant access to all the kernel objects before the scheduler
1622 * is started. It is necessary because there is no task running
1623 * yet and therefore, we cannot use the permissions of any
1624 * task. */
1625 xAccessGranted = pdTRUE;
1626 }
1627 else
1628 {
1629 xTaskMpuSettings = xTaskGetMPUSettings( NULL ); /* Calling task's MPU settings. */
1630
1631 ulAccessControlListEntryIndex = ( ( uint32_t ) lInternalIndexOfKernelObject / portACL_ENTRY_SIZE_BITS );
1632 ulAccessControlListEntryBit = ( ( uint32_t ) lInternalIndexOfKernelObject % portACL_ENTRY_SIZE_BITS );
1633
1634 if( ( xTaskMpuSettings->ulTaskFlags & portTASK_IS_PRIVILEGED_FLAG ) == portTASK_IS_PRIVILEGED_FLAG )
1635 {
1636 xAccessGranted = pdTRUE;
1637 }
1638 else
1639 {
1640 if( ( xTaskMpuSettings->ulAccessControlList[ ulAccessControlListEntryIndex ] & ( 1U << ulAccessControlListEntryBit ) ) != 0 )
1641 {
1642 xAccessGranted = pdTRUE;
1643 }
1644 }
1645 }
1646
1647 return xAccessGranted;
1648 }
1649
1650 #else /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
1651
xPortIsAuthorizedToAccessKernelObject(int32_t lInternalIndexOfKernelObject)1652 BaseType_t xPortIsAuthorizedToAccessKernelObject( int32_t lInternalIndexOfKernelObject ) /* PRIVILEGED_FUNCTION */
1653 {
1654 ( void ) lInternalIndexOfKernelObject;
1655
1656 /* If Access Control List feature is not used, all the tasks have
1657 * access to all the kernel objects. */
1658 return pdTRUE;
1659 }
1660
1661 #endif /* #if ( configENABLE_ACCESS_CONTROL_LIST == 1 ) */
1662
1663 #endif /* #if ( ( configENABLE_MPU == 1 ) && ( configUSE_MPU_WRAPPERS_V1 == 0 ) ) */
1664
1665 /*-----------------------------------------------------------*/
1666