1 /*
2 * FreeRTOS Kernel V11.1.0
3 * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 * Copyright (c) 2021 Raspberry Pi (Trading) Ltd.
5 *
6 * SPDX-License-Identifier: MIT AND BSD-3-Clause
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy of
9 * this software and associated documentation files (the "Software"), to deal in
10 * the Software without restriction, including without limitation the rights to
11 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
12 * the Software, and to permit persons to whom the Software is furnished to do so,
13 * subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in all
16 * copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
20 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
21 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
22 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
23 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * https://www.FreeRTOS.org
26 * https://github.com/FreeRTOS
27 *
28 */
29
30 /*----------------------------------------------------------------------
31 * Implementation of functions defined in portable.h for the RP2040 port.
32 *----------------------------------------------------------------------*/
33
34 #include "FreeRTOS.h"
35 #include "task.h"
36 #include "rp2040_config.h"
37 #include "hardware/clocks.h"
38 #include "hardware/exception.h"
39
40 /*
41 * LIB_PICO_MULTICORE == 1, if we are linked with pico_multicore (note that
42 * the non SMP FreeRTOS_Kernel is not linked with pico_multicore itself). We
43 * use this flag to determine if we need multi-core functionality.
44 */
45 #if ( LIB_PICO_MULTICORE == 1 )
46 #include "pico/multicore.h"
47 #endif /* LIB_PICO_MULTICORE */
48
49 /* TODO : consider to remove this macro. */
50 #define portRUNNING_ON_BOTH_CORES ( configNUMBER_OF_CORES == portMAX_CORE_COUNT )
51
52 /* Constants required to manipulate the NVIC. */
53 #define portNVIC_SYSTICK_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000e010 ) )
54 #define portNVIC_SYSTICK_LOAD_REG ( *( ( volatile uint32_t * ) 0xe000e014 ) )
55 #define portNVIC_SYSTICK_CURRENT_VALUE_REG ( *( ( volatile uint32_t * ) 0xe000e018 ) )
56 #define portNVIC_INT_CTRL_REG ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
57 #define portNVIC_SHPR3_REG ( *( ( volatile uint32_t * ) 0xe000ed20 ) )
58 #define portNVIC_SYSTICK_CLK_BIT ( 1UL << 2UL )
59 #define portNVIC_SYSTICK_INT_BIT ( 1UL << 1UL )
60 #define portNVIC_SYSTICK_ENABLE_BIT ( 1UL << 0UL )
61 #define portNVIC_SYSTICK_COUNT_FLAG_BIT ( 1UL << 16UL )
62 #define portNVIC_PENDSVSET_BIT ( 1UL << 28UL )
63 #define portMIN_INTERRUPT_PRIORITY ( 255UL )
64 #define portNVIC_PENDSV_PRI ( portMIN_INTERRUPT_PRIORITY << 16UL )
65 #define portNVIC_SYSTICK_PRI ( portMIN_INTERRUPT_PRIORITY << 24UL )
66
67 /* Constants required to set up the initial stack. */
68 #define portINITIAL_XPSR ( 0x01000000 )
69
70 /* The systick is a 24-bit counter. */
71 #define portMAX_24_BIT_NUMBER ( 0xffffffUL )
72
73 /* A fiddle factor to estimate the number of SysTick counts that would have
74 * occurred while the SysTick counter is stopped during tickless idle
75 * calculations. */
76 #ifndef portMISSED_COUNTS_FACTOR
77 #define portMISSED_COUNTS_FACTOR ( 45UL )
78 #endif
79
80 /* Let the user override the pre-loading of the initial LR with the address of
81 * prvTaskExitError() in case it messes up unwinding of the stack in the
82 * debugger. */
83 #ifdef configTASK_RETURN_ADDRESS
84 #define portTASK_RETURN_ADDRESS configTASK_RETURN_ADDRESS
85 #else
86 #define portTASK_RETURN_ADDRESS prvTaskExitError
87 #endif
88
89 /*
90 * Setup the timer to generate the tick interrupts. The implementation in this
91 * file is weak to allow application writers to change the timer used to
92 * generate the tick interrupt.
93 */
94 void vPortSetupTimerInterrupt( void );
95
96 /*
97 * Exception handlers.
98 */
99 void xPortPendSVHandler( void ) __attribute__( ( naked ) );
100 void xPortSysTickHandler( void );
101 void vPortSVCHandler( void );
102
103 /*
104 * Start first task is a separate function so it can be tested in isolation.
105 */
106 static void vPortStartFirstTask( void ) __attribute__( ( naked ) );
107
108 /*
109 * Used to catch tasks that attempt to return from their implementing function.
110 */
111 static void prvTaskExitError( void );
112
113 /*-----------------------------------------------------------*/
114
115 /* Each task maintains its own interrupt status in the critical nesting
116 * variable. This is initialized to 0 to allow vPortEnter/ExitCritical
117 * to be called before the scheduler is started */
118 #if ( configNUMBER_OF_CORES == 1 )
119 static UBaseType_t uxCriticalNesting;
120 #else /* #if ( configNUMBER_OF_CORES == 1 ) */
121 UBaseType_t uxCriticalNestings[ configNUMBER_OF_CORES ] = { 0 };
122 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
123
124 /*-----------------------------------------------------------*/
125
126 #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
127 #include "pico/lock_core.h"
128 #include "hardware/irq.h"
129 #include "event_groups.h"
130 #if configSUPPORT_STATIC_ALLOCATION
131 static StaticEventGroup_t xStaticEventGroup;
132 #define pEventGroup ( &xStaticEventGroup )
133 #endif /* configSUPPORT_STATIC_ALLOCATION */
134 static EventGroupHandle_t xEventGroup;
135 #if ( portRUNNING_ON_BOTH_CORES == 0 )
136 static EventBits_t uxCrossCoreEventBits;
137 static spin_lock_t * pxCrossCoreSpinLock;
138 #endif
139
140 static spin_lock_t * pxYieldSpinLock[ configNUMBER_OF_CORES ];
141 static uint32_t ulYieldSpinLockSaveValue[ configNUMBER_OF_CORES ];
142 #endif /* configSUPPORT_PICO_SYNC_INTEROP */
143
144 /*
145 * The number of SysTick increments that make up one tick period.
146 */
147 #if ( configUSE_TICKLESS_IDLE == 1 )
148 static uint32_t ulTimerCountsForOneTick = 0;
149 #endif /* configUSE_TICKLESS_IDLE */
150
151 /*
152 * The maximum number of tick periods that can be suppressed is limited by the
153 * 24 bit resolution of the SysTick timer.
154 */
155 #if ( configUSE_TICKLESS_IDLE == 1 )
156 static uint32_t xMaximumPossibleSuppressedTicks = 0;
157 #endif /* configUSE_TICKLESS_IDLE */
158
159 /*
160 * Compensate for the CPU cycles that pass while the SysTick is stopped (low
161 * power functionality only.
162 */
163 #if ( configUSE_TICKLESS_IDLE == 1 )
164 static uint32_t ulStoppedTimerCompensation = 0;
165 #endif /* configUSE_TICKLESS_IDLE */
166
167 /*-----------------------------------------------------------*/
168
169 #define INVALID_PRIMARY_CORE_NUM 0xffu
170 /* The primary core number (the own which has the SysTick handler) */
171 static uint8_t ucPrimaryCoreNum = INVALID_PRIMARY_CORE_NUM;
172
173 /* Note: portIS_FREE_RTOS_CORE() also returns false until the scheduler is started */
174 #if ( portRUNNING_ON_BOTH_CORES == 1 )
175 #define portIS_FREE_RTOS_CORE() ( ucPrimaryCoreNum != INVALID_PRIMARY_CORE_NUM )
176 #else
177 #define portIS_FREE_RTOS_CORE() ( ucPrimaryCoreNum == get_core_num() )
178 #endif
179
180 /*
181 * See header file for description.
182 */
pxPortInitialiseStack(StackType_t * pxTopOfStack,TaskFunction_t pxCode,void * pvParameters)183 StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
184 TaskFunction_t pxCode,
185 void * pvParameters )
186 {
187 /* Simulate the stack frame as it would be created by a context switch
188 * interrupt. */
189 pxTopOfStack--; /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
190 *pxTopOfStack = portINITIAL_XPSR; /* xPSR */
191 pxTopOfStack--;
192 *pxTopOfStack = ( StackType_t ) pxCode; /* PC */
193 pxTopOfStack--;
194 *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
195 pxTopOfStack -= 5; /* R12, R3, R2 and R1. */
196 *pxTopOfStack = ( StackType_t ) pvParameters; /* R0 */
197 pxTopOfStack -= 8; /* R11..R4. */
198
199 return pxTopOfStack;
200 }
201 /*-----------------------------------------------------------*/
202
prvTaskExitError(void)203 static void prvTaskExitError( void )
204 {
205 /* A function that implements a task must not exit or attempt to return to
206 * its caller as there is nothing to return to. If a task wants to exit it
207 * should instead call vTaskDelete( NULL ). */
208 panic_unsupported();
209 }
210 /*-----------------------------------------------------------*/
211
vPortSVCHandler(void)212 void vPortSVCHandler( void )
213 {
214 /* This function is no longer used, but retained for backward
215 * compatibility. */
216 }
217 /*-----------------------------------------------------------*/
218
vPortStartFirstTask(void)219 void vPortStartFirstTask( void )
220 {
221 #if ( configNUMBER_OF_CORES == 1 )
222 __asm volatile (
223 " .syntax unified \n"
224 " ldr r2, pxCurrentTCBConst1 \n" /* Obtain location of pxCurrentTCB. */
225 " ldr r3, [r2] \n"
226 " ldr r0, [r3] \n" /* The first item in pxCurrentTCB is the task top of stack. */
227 " adds r0, #32 \n" /* Discard everything up to r0. */
228 " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
229 " movs r0, #2 \n" /* Switch to the psp stack. */
230 " msr CONTROL, r0 \n"
231 " isb \n"
232 " pop {r0-r5} \n" /* Pop the registers that are saved automatically. */
233 " mov lr, r5 \n" /* lr is now in r5. */
234 " pop {r3} \n" /* Return address is now in r3. */
235 " pop {r2} \n" /* Pop and discard XPSR. */
236 " cpsie i \n" /* The first task has its context and interrupts can be enabled. */
237 " bx r3 \n" /* Finally, jump to the user defined task code. */
238 " .align 4 \n"
239 "pxCurrentTCBConst1: .word pxCurrentTCB\n"
240 );
241 #else /* if ( configNUMBER_OF_CORES == 1 ) */
242 __asm volatile (
243 " .syntax unified \n"
244 #if configRESET_STACK_POINTER
245 " ldr r0, =0xE000ED08 \n" /* Use the NVIC offset register to locate the stack. */
246 " ldr r0, [r0] \n"
247 " ldr r0, [r0] \n"
248 " msr msp, r0 \n" /* Set the msp back to the start of the stack. */
249 #endif /* configRESET_STACK_POINTER */
250 #if portRUNNING_ON_BOTH_CORES
251 " adr r1, ulAsmLocals \n" /* Get the location of the current TCB for the current core. */
252 " ldmia r1!, {r2, r3} \n"
253 " ldr r2, [r2] \n" /* r2 = Core number */
254 " lsls r2, #2 \n"
255 " ldr r3, [r3, r2] \n" /* r3 = pxCurrentTCBs[get_core_num()] */
256 #else
257 " ldr r3, =pxCurrentTCBs \n"
258 " ldr r3, [r3] \n" /* r3 = pxCurrentTCBs[0] */
259 #endif /* portRUNNING_ON_BOTH_CORES */
260 " ldr r0, [r3] \n" /* The first item in pxCurrentTCB is the task top of stack. */
261 " adds r0, #32 \n" /* Discard everything up to r0. */
262 " msr psp, r0 \n" /* This is now the new top of stack to use in the task. */
263 " movs r0, #2 \n" /* Switch to the psp stack. */
264 " msr CONTROL, r0 \n"
265 " isb \n"
266 " pop {r0-r5} \n" /* Pop the registers that are saved automatically. */
267 " mov lr, r5 \n" /* lr is now in r5. */
268 " pop {r3} \n" /* Return address is now in r3. */
269 " pop {r2} \n" /* Pop and discard XPSR. */
270 " cpsie i \n" /* The first task has its context and interrupts can be enabled. */
271 " bx r3 \n" /* Finally, jump to the user defined task code. */
272 #if portRUNNING_ON_BOTH_CORES
273 " \n"
274 " .align 4 \n"
275 "ulAsmLocals: \n"
276 " .word 0xD0000000 \n" /* SIO */
277 " .word pxCurrentTCBs \n"
278 #endif /* portRUNNING_ON_BOTH_CORES */
279 );
280 #endif /* if ( configNUMBER_OF_CORES == 1 ) */
281 }
282 /*-----------------------------------------------------------*/
283
284 #if ( LIB_PICO_MULTICORE == 1 ) && ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
prvFIFOInterruptHandler()285 static void prvFIFOInterruptHandler()
286 {
287 /* We must remove the contents (which we don't care about)
288 * to clear the IRQ */
289 multicore_fifo_drain();
290
291 /* And explicitly clear any other IRQ flags. */
292 multicore_fifo_clear_irq();
293
294 #if ( portRUNNING_ON_BOTH_CORES == 1 )
295 portYIELD_FROM_ISR( pdTRUE );
296 #elif ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
297 BaseType_t xHigherPriorityTaskWoken = pdFALSE;
298 uint32_t ulSave = spin_lock_blocking( pxCrossCoreSpinLock );
299 EventBits_t ulBits = uxCrossCoreEventBits;
300 uxCrossCoreEventBits &= ~ulBits;
301 spin_unlock( pxCrossCoreSpinLock, ulSave );
302 xEventGroupSetBitsFromISR( xEventGroup, ulBits, &xHigherPriorityTaskWoken );
303 portYIELD_FROM_ISR( xHigherPriorityTaskWoken );
304 #endif /* portRUNNING_ON_BOTH_CORES */
305 }
306 #endif /* if ( LIB_PICO_MULTICORE == 1 ) && ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) */
307
308 #if ( configNUMBER_OF_CORES > 1 )
309
310 /*
311 * See header file for description.
312 */
xPortStartSchedulerOnCore()313 static BaseType_t xPortStartSchedulerOnCore()
314 {
315 if( ucPrimaryCoreNum == get_core_num() )
316 {
317 /* Start the timer that generates the tick ISR. Interrupts are disabled
318 * here already. */
319 vPortSetupTimerInterrupt();
320
321 /* Make PendSV, CallSV and SysTick the same priority as the kernel. */
322 portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
323 #if ( configUSE_DYNAMIC_EXCEPTION_HANDLERS == 1 )
324 exception_set_exclusive_handler( SYSTICK_EXCEPTION, xPortSysTickHandler );
325 #endif
326 }
327
328 portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
329
330 #if ( configUSE_DYNAMIC_EXCEPTION_HANDLERS == 1 )
331 exception_set_exclusive_handler( PENDSV_EXCEPTION, xPortPendSVHandler );
332 exception_set_exclusive_handler( SVCALL_EXCEPTION, vPortSVCHandler );
333 #endif
334
335 /* Install FIFO handler to receive interrupt from other core */
336 multicore_fifo_clear_irq();
337 multicore_fifo_drain();
338 uint32_t ulIRQNum = SIO_IRQ_PROC0 + get_core_num();
339 irq_set_priority( ulIRQNum, portMIN_INTERRUPT_PRIORITY );
340 irq_set_exclusive_handler( ulIRQNum, prvFIFOInterruptHandler );
341 irq_set_enabled( ulIRQNum, 1 );
342
343 /* Start the first task. */
344 vPortStartFirstTask();
345
346 /* Should never get here as the tasks will now be executing! Call the task
347 * exit error function to prevent compiler warnings about a static function
348 * not being called in the case that the application writer overrides this
349 * functionality by defining configTASK_RETURN_ADDRESS. Call
350 * vTaskSwitchContext() so link time optimisation does not remove the
351 * symbol. */
352 vTaskSwitchContext( portGET_CORE_ID() );
353 prvTaskExitError();
354
355 /* Should not get here! */
356 return 0;
357 }
358
359 #if portRUNNING_ON_BOTH_CORES
prvDisableInterruptsAndPortStartSchedulerOnCore(void)360 static void prvDisableInterruptsAndPortStartSchedulerOnCore( void )
361 {
362 portDISABLE_INTERRUPTS();
363 xPortStartSchedulerOnCore();
364 }
365 #endif
366
367 /*
368 * See header file for description.
369 */
xPortStartScheduler(void)370 BaseType_t xPortStartScheduler( void )
371 {
372 configASSERT( ucPrimaryCoreNum == INVALID_PRIMARY_CORE_NUM );
373
374 /* No one else should use these! */
375 spin_lock_claim( configSMP_SPINLOCK_0 );
376 spin_lock_claim( configSMP_SPINLOCK_1 );
377
378 #if portRUNNING_ON_BOTH_CORES
379 ucPrimaryCoreNum = configTICK_CORE;
380 configASSERT( get_core_num() == 0 ); /* we must be started on core 0 */
381 multicore_launch_core1( prvDisableInterruptsAndPortStartSchedulerOnCore );
382 #else
383 ucPrimaryCoreNum = get_core_num();
384 #endif
385 xPortStartSchedulerOnCore();
386
387 /* Should not get here! */
388 return 0;
389 }
390
391 #else /* if ( configNUMBER_OF_CORES > 1 ) */
392
393 /*
394 * See header file for description.
395 */
xPortStartScheduler(void)396 BaseType_t xPortStartScheduler( void )
397 {
398 /* Make PendSV, CallSV and SysTick the same priority as the kernel. */
399 portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
400 portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
401
402 #if ( configUSE_DYNAMIC_EXCEPTION_HANDLERS == 1 )
403 exception_set_exclusive_handler( PENDSV_EXCEPTION, xPortPendSVHandler );
404 exception_set_exclusive_handler( SYSTICK_EXCEPTION, xPortSysTickHandler );
405 exception_set_exclusive_handler( SVCALL_EXCEPTION, vPortSVCHandler );
406 #endif
407
408 /* Start the timer that generates the tick ISR. Interrupts are disabled
409 * here already. */
410 vPortSetupTimerInterrupt();
411
412 /* Initialise the critical nesting count ready for the first task. */
413 uxCriticalNesting = 0;
414
415 ucPrimaryCoreNum = get_core_num();
416 #if ( LIB_PICO_MULTICORE == 1 )
417 #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
418 multicore_fifo_clear_irq();
419 multicore_fifo_drain();
420 uint32_t irq_num = 15 + get_core_num();
421 irq_set_priority( irq_num, portMIN_INTERRUPT_PRIORITY );
422 irq_set_exclusive_handler( irq_num, prvFIFOInterruptHandler );
423 irq_set_enabled( irq_num, 1 );
424 #endif
425 #endif
426
427 /* Start the first task. */
428 vPortStartFirstTask();
429
430 /* Should never get here as the tasks will now be executing! Call the task
431 * exit error function to prevent compiler warnings about a static function
432 * not being called in the case that the application writer overrides this
433 * functionality by defining configTASK_RETURN_ADDRESS. Call
434 * vTaskSwitchContext() so link time optimisation does not remove the
435 * symbol. */
436 vTaskSwitchContext();
437 prvTaskExitError();
438
439 /* Should not get here! */
440 return 0;
441 }
442 #endif /* if ( configNUMBER_OF_CORES > 1 ) */
443
444 /*-----------------------------------------------------------*/
445
vPortEndScheduler(void)446 void vPortEndScheduler( void )
447 {
448 /* Not implemented in ports where there is nothing to return to. */
449 panic_unsupported();
450 }
451 /*-----------------------------------------------------------*/
452
vPortYield(void)453 void vPortYield( void )
454 {
455 #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
456
457 /* We are not in an ISR, and pxYieldSpinLock is always dealt with and
458 * cleared when interrupts are re-enabled, so should be NULL */
459 configASSERT( pxYieldSpinLock[ portGET_CORE_ID() ] == NULL );
460 #endif /* configSUPPORT_PICO_SYNC_INTEROP */
461
462 /* Set a PendSV to request a context switch. */
463 portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
464
465 /* Barriers are normally not required but do ensure the code is completely
466 * within the specified behaviour for the architecture. */
467 __asm volatile ( "dsb" ::: "memory" );
468 __asm volatile ( "isb" );
469 }
470
471 /*-----------------------------------------------------------*/
472
473 #if ( configNUMBER_OF_CORES == 1 )
vPortEnterCritical(void)474 void vPortEnterCritical( void )
475 {
476 portDISABLE_INTERRUPTS();
477 uxCriticalNesting++;
478 __asm volatile ( "dsb" ::: "memory" );
479 __asm volatile ( "isb" );
480 }
481 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
482 /*-----------------------------------------------------------*/
483
484 #if ( configNUMBER_OF_CORES == 1 )
vPortExitCritical(void)485 void vPortExitCritical( void )
486 {
487 configASSERT( uxCriticalNesting );
488 uxCriticalNesting--;
489
490 if( uxCriticalNesting == 0 )
491 {
492 portENABLE_INTERRUPTS();
493 }
494 }
495 #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
496
vPortEnableInterrupts(void)497 void vPortEnableInterrupts( void )
498 {
499 #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
500 int xCoreID = ( int ) portGET_CORE_ID();
501
502 if( pxYieldSpinLock[ xCoreID ] )
503 {
504 spin_lock_t * const pxTmpLock = pxYieldSpinLock[ xCoreID ];
505 pxYieldSpinLock[ xCoreID ] = NULL;
506 spin_unlock( pxTmpLock, ulYieldSpinLockSaveValue[ xCoreID ] );
507 }
508 #endif
509 __asm volatile ( " cpsie i " ::: "memory" );
510 }
511
512 /*-----------------------------------------------------------*/
513
ulSetInterruptMaskFromISR(void)514 uint32_t ulSetInterruptMaskFromISR( void )
515 {
516 __asm volatile (
517 " mrs r0, PRIMASK \n"
518 " cpsid i \n"
519 " bx lr "
520 ::: "memory"
521 );
522 }
523 /*-----------------------------------------------------------*/
524
vClearInterruptMaskFromISR(uint32_t ulMask)525 void vClearInterruptMaskFromISR( __attribute__( ( unused ) ) uint32_t ulMask )
526 {
527 __asm volatile (
528 " msr PRIMASK, r0 \n"
529 " bx lr "
530 ::: "memory"
531 );
532 }
533
534 /*-----------------------------------------------------------*/
535
vYieldCore(int xCoreID)536 void vYieldCore( int xCoreID )
537 {
538 /* Remove warning if configASSERT is not defined.
539 * xCoreID is not used in this function due to this is a dual-core system. The yielding core must be different from the current core. */
540 ( void ) xCoreID;
541
542 configASSERT( xCoreID != ( int ) portGET_CORE_ID() );
543
544 #if portRUNNING_ON_BOTH_CORES
545
546 /* Non blocking, will cause interrupt on other core if the queue isn't already full,
547 * in which case an IRQ must be pending */
548 sio_hw->fifo_wr = 0;
549 #endif
550 }
551
552 /*-----------------------------------------------------------*/
553
xPortPendSVHandler(void)554 void xPortPendSVHandler( void )
555 {
556 /* This is a naked function. */
557 #if ( configNUMBER_OF_CORES == 1 )
558 __asm volatile
559 (
560 " .syntax unified \n"
561 " mrs r0, psp \n"
562 " \n"
563 " ldr r3, pxCurrentTCBConst2 \n" /* Get the location of the current TCB. */
564 " ldr r2, [r3] \n"
565 " \n"
566 " subs r0, r0, #32 \n" /* Make space for the remaining low registers. */
567 " str r0, [r2] \n" /* Save the new top of stack. */
568 " stmia r0!, {r4-r7} \n" /* Store the low registers that are not saved automatically. */
569 " mov r4, r8 \n" /* Store the high registers. */
570 " mov r5, r9 \n"
571 " mov r6, r10 \n"
572 " mov r7, r11 \n"
573 " stmia r0!, {r4-r7} \n"
574 #if portUSE_DIVIDER_SAVE_RESTORE
575 " movs r2, #0xd \n" /* Store the divider state. */
576 " lsls r2, #28 \n"
577
578 /* We expect that the divider is ready at this point (which is
579 * necessary to safely save/restore), because:
580 * a) if we have not been interrupted since we entered this method,
581 * then >8 cycles have clearly passed, so the divider is done
582 * b) if we were interrupted in the interim, then any "safe" - i.e.
583 * does the right thing in an IRQ - use of the divider should
584 * have waited for any in-process divide to complete, saved and
585 * then fully restored the result, thus the result is ready in
586 * that case too. */
587 " ldr r4, [r2, #0x60] \n" /* SIO_DIV_UDIVIDEND_OFFSET */
588 " ldr r5, [r2, #0x64] \n" /* SIO_DIV_UDIVISOR_OFFSET */
589 " ldr r6, [r2, #0x74] \n" /* SIO_DIV_REMAINDER_OFFSET */
590 " ldr r7, [r2, #0x70] \n" /* SIO_DIV_QUOTIENT_OFFSET */
591
592 /* We actually save the divider state in the 4 words below
593 * our recorded stack pointer, so as not to disrupt the stack
594 * frame expected by debuggers - this is addressed by
595 * portEXTRA_STACK_SIZE */
596 " subs r0, r0, #48 \n"
597 " stmia r0!, {r4-r7} \n"
598 #endif /* portUSE_DIVIDER_SAVE_RESTORE */
599 " push {r3, r14} \n"
600 " cpsid i \n"
601 " bl vTaskSwitchContext \n"
602 " cpsie i \n"
603 " pop {r2, r3} \n" /* lr goes in r3. r2 now holds tcb pointer. */
604 " \n"
605 " ldr r1, [r2] \n"
606 " ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. */
607 " adds r0, r0, #16 \n" /* Move to the high registers. */
608 " ldmia r0!, {r4-r7} \n" /* Pop the high registers. */
609 " mov r8, r4 \n"
610 " mov r9, r5 \n"
611 " mov r10, r6 \n"
612 " mov r11, r7 \n"
613 " \n"
614 " msr psp, r0 \n" /* Remember the new top of stack for the task. */
615 " \n"
616 #if portUSE_DIVIDER_SAVE_RESTORE
617 " movs r2, #0xd \n" /* Pop the divider state. */
618 " lsls r2, #28 \n"
619 " subs r0, r0, #48 \n" /* Go back for the divider state */
620 " ldmia r0!, {r4-r7} \n" /* Pop the divider state. */
621
622 /* Note always restore via SIO_DIV_UDIVI*, because we will overwrite the
623 * results stopping the calculation anyway, however the sign of results
624 * is adjusted by the h/w at read time based on whether the last started
625 * division was signed and the inputs' signs differed */
626 " str r4, [r2, #0x60] \n" /* SIO_DIV_UDIVIDEND_OFFSET */
627 " str r5, [r2, #0x64] \n" /* SIO_DIV_UDIVISOR_OFFSET */
628 " str r6, [r2, #0x74] \n" /* SIO_DIV_REMAINDER_OFFSET */
629 " str r7, [r2, #0x70] \n" /* SIO_DIV_QUOTIENT_OFFSET */
630 #else /* if portUSE_DIVIDER_SAVE_RESTORE */
631 " subs r0, r0, #32 \n" /* Go back for the low registers that are not automatically restored. */
632 #endif /* portUSE_DIVIDER_SAVE_RESTORE */
633 " ldmia r0!, {r4-r7} \n" /* Pop low registers. */
634 " \n"
635 " bx r3 \n"
636 " .align 4 \n"
637 "pxCurrentTCBConst2: .word pxCurrentTCB \n"
638 );
639 #else /* if ( configNUMBER_OF_CORES == 1 ) */
640 __asm volatile
641 (
642 " .syntax unified \n"
643 " mrs r1, psp \n"
644 " \n"
645 " adr r0, ulAsmLocals2 \n" /* Get the location of the current TCB for the current core. */
646 " ldmia r0!, {r2, r3} \n"
647 #if portRUNNING_ON_BOTH_CORES
648 " ldr r0, [r2] \n" /* r0 = Core number */
649 " lsls r0, r0, #2 \n"
650 " adds r3, r0 \n" /* r3 = &pxCurrentTCBs[get_core_num()] */
651 #else
652 " \n" /* r3 = &pxCurrentTCBs[0] */
653 #endif /* portRUNNING_ON_BOTH_CORES */
654 " ldr r0, [r3] \n" /* r0 = pxCurrentTCB */
655 " \n"
656 " subs r1, r1, #32 \n" /* Make space for the remaining low registers. */
657 " str r1, [r0] \n" /* Save the new top of stack. */
658 " stmia r1!, {r4-r7} \n" /* Store the low registers that are not saved automatically. */
659 " mov r4, r8 \n" /* Store the high registers. */
660 " mov r5, r9 \n"
661 " mov r6, r10 \n"
662 " mov r7, r11 \n"
663 " stmia r1!, {r4-r7} \n"
664 #if portUSE_DIVIDER_SAVE_RESTORE
665
666 /* We expect that the divider is ready at this point (which is
667 * necessary to safely save/restore), because:
668 * a) if we have not been interrupted since we entered this method,
669 * then >8 cycles have clearly passed, so the divider is done
670 * b) if we were interrupted in the interim, then any "safe" - i.e.
671 * does the right thing in an IRQ - use of the divider should
672 * have waited for any in-process divide to complete, saved and
673 * then fully restored the result, thus the result is ready in
674 * that case too. */
675 " ldr r4, [r2, #0x60] \n" /* SIO_DIV_UDIVIDEND_OFFSET */
676 " ldr r5, [r2, #0x64] \n" /* SIO_DIV_UDIVISOR_OFFSET */
677 " ldr r6, [r2, #0x74] \n" /* SIO_DIV_REMAINDER_OFFSET */
678 " ldr r7, [r2, #0x70] \n" /* SIO_DIV_QUOTIENT_OFFSET */
679
680 /* We actually save the divider state in the 4 words below
681 * our recorded stack pointer, so as not to disrupt the stack
682 * frame expected by debuggers - this is addressed by
683 * portEXTRA_STACK_SIZE */
684 " subs r1, r1, #48 \n"
685 " stmia r1!, {r4-r7} \n"
686 #endif /* portUSE_DIVIDER_SAVE_RESTORE */
687 #if portRUNNING_ON_BOTH_CORES
688 " ldr r0, [r2] \n" /* r0 = Core number */
689 #else
690 " movs r0, #0 \n"
691 #endif /* portRUNNING_ON_BOTH_CORES */
692 " push {r3, r14} \n"
693 " cpsid i \n"
694 " bl vTaskSwitchContext \n"
695 " cpsie i \n"
696 " pop {r2, r3} \n" /* lr goes in r3. r2 now holds tcb pointer. */
697 " \n"
698 " ldr r1, [r2] \n"
699 " ldr r0, [r1] \n" /* The first item in pxCurrentTCB is the task top of stack. */
700 " adds r0, r0, #16 \n" /* Move to the high registers. */
701 " ldmia r0!, {r4-r7} \n" /* Pop the high registers. */
702 " mov r8, r4 \n"
703 " mov r9, r5 \n"
704 " mov r10, r6 \n"
705 " mov r11, r7 \n"
706 " \n"
707 " msr psp, r0 \n" /* Remember the new top of stack for the task. */
708 " \n"
709 #if portUSE_DIVIDER_SAVE_RESTORE
710 " movs r2, #0xd \n" /* Pop the divider state. */
711 " lsls r2, #28 \n"
712 " subs r0, r0, #48 \n" /* Go back for the divider state */
713 " ldmia r0!, {r4-r7} \n" /* Pop the divider state. */
714
715 /* Note always restore via SIO_DIV_UDIVI*, because we will overwrite the
716 * results stopping the calculation anyway, however the sign of results
717 * is adjusted by the h/w at read time based on whether the last started
718 * division was signed and the inputs' signs differed */
719 " str r4, [r2, #0x60] \n" /* SIO_DIV_UDIVIDEND_OFFSET */
720 " str r5, [r2, #0x64] \n" /* SIO_DIV_UDIVISOR_OFFSET */
721 " str r6, [r2, #0x74] \n" /* SIO_DIV_REMAINDER_OFFSET */
722 " str r7, [r2, #0x70] \n" /* SIO_DIV_QUOTIENT_OFFSET */
723 #else /* if portUSE_DIVIDER_SAVE_RESTORE */
724 " subs r0, r0, #32 \n" /* Go back for the low registers that are not automatically restored. */
725 #endif /* portUSE_DIVIDER_SAVE_RESTORE */
726 " ldmia r0!, {r4-r7} \n" /* Pop low registers. */
727 " \n"
728 " bx r3 \n"
729 " \n"
730 " .align 4 \n"
731 "ulAsmLocals2: \n"
732 " .word 0xD0000000 \n" /* SIO */
733 " .word pxCurrentTCBs \n"
734 );
735 #endif /* if ( configNUMBER_OF_CORES == 1 ) */
736 }
737 /*-----------------------------------------------------------*/
738
xPortSysTickHandler(void)739 void xPortSysTickHandler( void )
740 {
741 uint32_t ulPreviousMask;
742
743 ulPreviousMask = taskENTER_CRITICAL_FROM_ISR();
744 traceISR_ENTER();
745 {
746 /* Increment the RTOS tick. */
747 if( xTaskIncrementTick() != pdFALSE )
748 {
749 traceISR_EXIT_TO_SCHEDULER();
750 /* Pend a context switch. */
751 portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
752 }
753 else
754 {
755 traceISR_EXIT();
756 }
757 }
758 taskEXIT_CRITICAL_FROM_ISR( ulPreviousMask );
759 }
760 /*-----------------------------------------------------------*/
761
762 /*
763 * Setup the systick timer to generate the tick interrupts at the required
764 * frequency.
765 */
vPortSetupTimerInterrupt(void)766 __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void )
767 {
768 /* Calculate the constants required to configure the tick interrupt. */
769 #if ( configUSE_TICKLESS_IDLE == 1 )
770 {
771 ulTimerCountsForOneTick = ( clock_get_hz( clk_sys ) / configTICK_RATE_HZ );
772 xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick;
773 ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR;
774 }
775 #endif /* configUSE_TICKLESS_IDLE */
776
777 /* Stop and reset the SysTick. */
778 portNVIC_SYSTICK_CTRL_REG = 0UL;
779 portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
780
781 /* Configure SysTick to interrupt at the requested rate. */
782 portNVIC_SYSTICK_LOAD_REG = ( clock_get_hz( clk_sys ) / configTICK_RATE_HZ ) - 1UL;
783 portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
784 }
785 /*-----------------------------------------------------------*/
786
787 #if ( configUSE_TICKLESS_IDLE == 1 )
788
vPortSuppressTicksAndSleep(TickType_t xExpectedIdleTime)789 __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime )
790 {
791 uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements;
792 TickType_t xModifiableIdleTime;
793
794 /* Make sure the SysTick reload value does not overflow the counter. */
795 if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks )
796 {
797 xExpectedIdleTime = xMaximumPossibleSuppressedTicks;
798 }
799
800 /* Stop the SysTick momentarily. The time the SysTick is stopped for
801 * is accounted for as best it can be, but using the tickless mode will
802 * inevitably result in some tiny drift of the time maintained by the
803 * kernel with respect to calendar time. */
804 portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT;
805
806 /* Calculate the reload value required to wait xExpectedIdleTime
807 * tick periods. -1 is used because this code will execute part way
808 * through one of the tick periods. */
809 ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) );
810
811 if( ulReloadValue > ulStoppedTimerCompensation )
812 {
813 ulReloadValue -= ulStoppedTimerCompensation;
814 }
815
816 /* Enter a critical section but don't use the taskENTER_CRITICAL()
817 * method as that will mask interrupts that should exit sleep mode. */
818 __asm volatile ( "cpsid i" ::: "memory" );
819 __asm volatile ( "dsb" );
820 __asm volatile ( "isb" );
821
822 /* If a context switch is pending or a task is waiting for the scheduler
823 * to be unsuspended then abandon the low power entry. */
824 if( eTaskConfirmSleepModeStatus() == eAbortSleep )
825 {
826 /* Restart from whatever is left in the count register to complete
827 * this tick period. */
828 portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG;
829
830 /* Restart SysTick. */
831 portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
832
833 /* Reset the reload register to the value required for normal tick
834 * periods. */
835 portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
836
837 /* Re-enable interrupts - see comments above the cpsid instruction()
838 * above. */
839 __asm volatile ( "cpsie i" ::: "memory" );
840 }
841 else
842 {
843 /* Set the new reload value. */
844 portNVIC_SYSTICK_LOAD_REG = ulReloadValue;
845
846 /* Clear the SysTick count flag and set the count value back to
847 * zero. */
848 portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
849
850 /* Restart SysTick. */
851 portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
852
853 /* Sleep until something happens. configPRE_SLEEP_PROCESSING() can
854 * set its parameter to 0 to indicate that its implementation contains
855 * its own wait for interrupt or wait for event instruction, and so wfi
856 * should not be executed again. However, the original expected idle
857 * time variable must remain unmodified, so a copy is taken. */
858 xModifiableIdleTime = xExpectedIdleTime;
859 configPRE_SLEEP_PROCESSING( xModifiableIdleTime );
860
861 if( xModifiableIdleTime > 0 )
862 {
863 __asm volatile ( "dsb" ::: "memory" );
864 __asm volatile ( "wfi" );
865 __asm volatile ( "isb" );
866 }
867
868 configPOST_SLEEP_PROCESSING( xExpectedIdleTime );
869
870 /* Re-enable interrupts to allow the interrupt that brought the MCU
871 * out of sleep mode to execute immediately. see comments above
872 * __disable_interrupt() call above. */
873 __asm volatile ( "cpsie i" ::: "memory" );
874 __asm volatile ( "dsb" );
875 __asm volatile ( "isb" );
876
877 /* Disable interrupts again because the clock is about to be stopped
878 * and interrupts that execute while the clock is stopped will increase
879 * any slippage between the time maintained by the RTOS and calendar
880 * time. */
881 __asm volatile ( "cpsid i" ::: "memory" );
882 __asm volatile ( "dsb" );
883 __asm volatile ( "isb" );
884
885 /* Disable the SysTick clock without reading the
886 * portNVIC_SYSTICK_CTRL_REG register to ensure the
887 * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set. Again,
888 * the time the SysTick is stopped for is accounted for as best it can
889 * be, but using the tickless mode will inevitably result in some tiny
890 * drift of the time maintained by the kernel with respect to calendar
891 * time*/
892 portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT );
893
894 /* Determine if the SysTick clock has already counted to zero and
895 * been set back to the current reload value (the reload back being
896 * correct for the entire expected idle time) or if the SysTick is yet
897 * to count to zero (in which case an interrupt other than the SysTick
898 * must have brought the system out of sleep mode). */
899 if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
900 {
901 uint32_t ulCalculatedLoadValue;
902
903 /* The tick interrupt is already pending, and the SysTick count
904 * reloaded with ulReloadValue. Reset the
905 * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick
906 * period. */
907 ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG );
908
909 /* Don't allow a tiny value, or values that have somehow
910 * underflowed because the post sleep hook did something
911 * that took too long. */
912 if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) )
913 {
914 ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL );
915 }
916
917 portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue;
918
919 /* As the pending tick will be processed as soon as this
920 * function exits, the tick value maintained by the tick is stepped
921 * forward by one less than the time spent waiting. */
922 ulCompleteTickPeriods = xExpectedIdleTime - 1UL;
923 }
924 else
925 {
926 /* Something other than the tick interrupt ended the sleep.
927 * Work out how long the sleep lasted rounded to complete tick
928 * periods (not the ulReload value which accounted for part
929 * ticks). */
930 ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG;
931
932 /* How many complete tick periods passed while the processor
933 * was waiting? */
934 ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick;
935
936 /* The reload value is set to whatever fraction of a single tick
937 * period remains. */
938 portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements;
939 }
940
941 /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG
942 * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard
943 * value. */
944 portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
945 portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
946 vTaskStepTick( ulCompleteTickPeriods );
947 portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
948
949 /* Exit with interrupts enabled. */
950 __asm volatile ( "cpsie i" ::: "memory" );
951 }
952 }
953
954 #endif /* configUSE_TICKLESS_IDLE */
955
956 #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) || ( configSUPPORT_PICO_TIME_INTEROP == 1 )
prvGetTicksToWaitBefore(absolute_time_t t)957 static TickType_t prvGetTicksToWaitBefore( absolute_time_t t )
958 {
959 int64_t xDelay = absolute_time_diff_us( get_absolute_time(), t );
960 const uint32_t ulTickPeriod = 1000000 / configTICK_RATE_HZ;
961
962 xDelay -= ulTickPeriod;
963
964 if( xDelay >= ulTickPeriod )
965 {
966 return xDelay / ulTickPeriod;
967 }
968
969 return 0;
970 }
971 #endif /* if ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) || ( configSUPPORT_PICO_TIME_INTEROP == 1 ) */
972
973 #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
ulPortLockGetCurrentOwnerId()974 uint32_t ulPortLockGetCurrentOwnerId()
975 {
976 if( portIS_FREE_RTOS_CORE() )
977 {
978 uint32_t exception = __get_current_exception();
979
980 if( !exception )
981 {
982 return ( uintptr_t ) xTaskGetCurrentTaskHandle();
983 }
984
985 /* Note: since ROM as at 0x00000000, these can't be confused with
986 * valid task handles (pointers) in RAM */
987 /* We make all exception handler/core combinations distinct owners */
988 return get_core_num() + exception * 2;
989 }
990
991 /* Note: since ROM as at 0x00000000, this can't be confused with
992 * valid task handles (pointers) in RAM */
993 return get_core_num();
994 }
995
prvGetEventGroupBit(spin_lock_t * spinLock)996 static inline EventBits_t prvGetEventGroupBit( spin_lock_t * spinLock )
997 {
998 uint32_t ulBit;
999
1000 #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
1001 ulBit = 1u << ( spin_lock_get_num( spinLock ) & 0x7u );
1002 #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
1003 ulBit = 1u << spin_lock_get_num( spinLock );
1004 /* reduce to range 0-24 */
1005 ulBit |= ulBit << 8u;
1006 ulBit >>= 8u;
1007 #endif /* configTICK_TYPE_WIDTH_IN_BITS */
1008 return ( EventBits_t ) ulBit;
1009 }
1010
prvGetAllEventGroupBits()1011 static inline EventBits_t prvGetAllEventGroupBits()
1012 {
1013 #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
1014 return ( EventBits_t ) 0xffu;
1015 #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
1016 return ( EventBits_t ) 0xffffffu;
1017 #endif /* configTICK_TYPE_WIDTH_IN_BITS */
1018 }
1019
vPortLockInternalSpinUnlockWithWait(struct lock_core * pxLock,uint32_t ulSave)1020 void vPortLockInternalSpinUnlockWithWait( struct lock_core * pxLock,
1021 uint32_t ulSave )
1022 {
1023 configASSERT( !portCHECK_IF_IN_ISR() );
1024
1025 /* note no need to check LIB_PICO_MULTICORE, as this is always returns true if that is not defined */
1026 if( !portIS_FREE_RTOS_CORE() )
1027 {
1028 spin_unlock( pxLock->spin_lock, ulSave );
1029 __wfe();
1030 }
1031 else
1032 {
1033 configASSERT( pxYieldSpinLock[ portGET_CORE_ID() ] == NULL );
1034
1035 /* we want to hold the lock until the event bits have been set; since interrupts are currently disabled */
1036 /* by the spinlock, we can defer until portENABLE_INTERRUPTS is called which is always called when */
1037 /* the scheduler is unlocked during this call */
1038 configASSERT( pxLock->spin_lock );
1039 int xCoreID = ( int ) portGET_CORE_ID();
1040 pxYieldSpinLock[ xCoreID ] = pxLock->spin_lock;
1041 ulYieldSpinLockSaveValue[ xCoreID ] = ulSave;
1042 xEventGroupWaitBits( xEventGroup, prvGetEventGroupBit( pxLock->spin_lock ),
1043 pdTRUE, pdFALSE, portMAX_DELAY );
1044 }
1045 }
1046
vPortLockInternalSpinUnlockWithNotify(struct lock_core * pxLock,uint32_t ulSave)1047 void vPortLockInternalSpinUnlockWithNotify( struct lock_core * pxLock,
1048 uint32_t ulSave )
1049 {
1050 EventBits_t uxBits = prvGetEventGroupBit( pxLock->spin_lock );
1051
1052 if( portIS_FREE_RTOS_CORE() )
1053 {
1054 #if LIB_PICO_MULTICORE
1055 /* signal an event in case a regular core is waiting */
1056 __sev();
1057 #endif
1058 spin_unlock( pxLock->spin_lock, ulSave );
1059
1060 if( !portCHECK_IF_IN_ISR() )
1061 {
1062 xEventGroupSetBits( xEventGroup, uxBits );
1063 }
1064 else
1065 {
1066 BaseType_t xHigherPriorityTaskWoken = pdFALSE;
1067 xEventGroupSetBitsFromISR( xEventGroup, uxBits, &xHigherPriorityTaskWoken );
1068 portYIELD_FROM_ISR( xHigherPriorityTaskWoken );
1069 }
1070 }
1071 else
1072 {
1073 __sev();
1074 #if ( portRUNNING_ON_BOTH_CORES == 0 )
1075
1076 /* We could sent the bits across the FIFO which would have required us to block here if the FIFO was full,
1077 * or we could have just set all bits on the other side, however it seems reasonable instead to take
1078 * the hit of another spin lock to protect an accurate bit set. */
1079 if( pxCrossCoreSpinLock != pxLock->spin_lock )
1080 {
1081 spin_lock_unsafe_blocking( pxCrossCoreSpinLock );
1082 uxCrossCoreEventBits |= uxBits;
1083 spin_unlock_unsafe( pxCrossCoreSpinLock );
1084 }
1085 else
1086 {
1087 uxCrossCoreEventBits |= uxBits;
1088 }
1089
1090 /* This causes fifo irq on the other (FreeRTOS) core which will do the set the event bits */
1091 sio_hw->fifo_wr = 0;
1092 #endif /* portRUNNING_ON_BOTH_CORES == 0 */
1093 spin_unlock( pxLock->spin_lock, ulSave );
1094 }
1095 }
1096
xPortLockInternalSpinUnlockWithBestEffortWaitOrTimeout(struct lock_core * pxLock,uint32_t ulSave,absolute_time_t uxUntil)1097 bool xPortLockInternalSpinUnlockWithBestEffortWaitOrTimeout( struct lock_core * pxLock,
1098 uint32_t ulSave,
1099 absolute_time_t uxUntil )
1100 {
1101 configASSERT( !portCHECK_IF_IN_ISR() );
1102
1103 /* note no need to check LIB_PICO_MULTICORE, as this is always returns true if that is not defined */
1104 if( !portIS_FREE_RTOS_CORE() )
1105 {
1106 spin_unlock( pxLock->spin_lock, ulSave );
1107 return best_effort_wfe_or_timeout( uxUntil );
1108 }
1109 else
1110 {
1111 configASSERT( portIS_FREE_RTOS_CORE() );
1112 configASSERT( pxYieldSpinLock[ portGET_CORE_ID() ] == NULL );
1113
1114 TickType_t uxTicksToWait = prvGetTicksToWaitBefore( uxUntil );
1115
1116 if( uxTicksToWait )
1117 {
1118 /* We want to hold the lock until the event bits have been set; since interrupts are currently disabled
1119 * by the spinlock, we can defer until portENABLE_INTERRUPTS is called which is always called when
1120 * the scheduler is unlocked during this call */
1121 configASSERT( pxLock->spin_lock );
1122 int xCoreID = ( int ) portGET_CORE_ID();
1123 pxYieldSpinLock[ xCoreID ] = pxLock->spin_lock;
1124 ulYieldSpinLockSaveValue[ xCoreID ] = ulSave;
1125 xEventGroupWaitBits( xEventGroup,
1126 prvGetEventGroupBit( pxLock->spin_lock ), pdTRUE,
1127 pdFALSE, uxTicksToWait );
1128 }
1129 else
1130 {
1131 spin_unlock( pxLock->spin_lock, ulSave );
1132 }
1133
1134 if( time_reached( uxUntil ) )
1135 {
1136 return true;
1137 }
1138 else
1139 {
1140 /* We do not want to hog the core */
1141 portYIELD();
1142 /* We aren't sure if we've reached the timeout yet; the caller will check */
1143 return false;
1144 }
1145 }
1146 }
1147
1148 #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
1149 /* runs before main */
prvRuntimeInitializer(void)1150 static void __attribute__( ( constructor ) ) prvRuntimeInitializer( void )
1151 {
1152 /* This must be done even before the scheduler is started, as the spin lock
1153 * is used by the overrides of the SDK wait/notify primitives */
1154 #if ( portRUNNING_ON_BOTH_CORES == 0 )
1155 pxCrossCoreSpinLock = spin_lock_instance( next_striped_spin_lock_num() );
1156 #endif /* portRUNNING_ON_BOTH_CORES */
1157
1158 /* The event group is not used prior to scheduler init, but is initialized
1159 * here to since it logically belongs with the spin lock */
1160 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
1161 xEventGroup = xEventGroupCreateStatic( &xStaticEventGroup );
1162 #else
1163
1164 /* Note that it is slightly dubious calling this here before the scheduler is initialized,
1165 * however the only thing it touches is the allocator which then calls vPortEnterCritical
1166 * and vPortExitCritical, and allocating here saves us checking the one time initialized variable in
1167 * some rather critical code paths */
1168 xEventGroup = xEventGroupCreate();
1169 #endif /* configSUPPORT_STATIC_ALLOCATION */
1170 }
1171 #endif /* if ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) */
1172 #endif /* configSUPPORT_PICO_SYNC_INTEROP */
1173
1174 #if ( configSUPPORT_PICO_TIME_INTEROP == 1 )
xPortSyncInternalYieldUntilBefore(absolute_time_t t)1175 void xPortSyncInternalYieldUntilBefore( absolute_time_t t )
1176 {
1177 TickType_t uxTicksToWait = prvGetTicksToWaitBefore( t );
1178
1179 if( uxTicksToWait )
1180 {
1181 vTaskDelay( uxTicksToWait );
1182 }
1183 }
1184 #endif /* configSUPPORT_PICO_TIME_INTEROP */
1185