xref: /Kernel-v10.6.2/portable/ThirdParty/GCC/RP2040/port.c (revision ef7b253b56c9788077f5ecd6c9deb4021923d646)
1 /*
2  * FreeRTOS Kernel V10.6.2
3  * Copyright (C) 2020 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
4  * Copyright (c) 2021 Raspberry Pi (Trading) Ltd.
5  *
6  * SPDX-License-Identifier: MIT AND BSD-3-Clause
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a copy of
9  * this software and associated documentation files (the "Software"), to deal in
10  * the Software without restriction, including without limitation the rights to
11  * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
12  * the Software, and to permit persons to whom the Software is furnished to do so,
13  * subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice shall be included in all
16  * copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
20  * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
21  * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
22  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
23  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  * https://www.FreeRTOS.org
26  * https://github.com/FreeRTOS
27  *
28  */
29 
30 /*----------------------------------------------------------------------
31 * Implementation of functions defined in portable.h for the RP2040 port.
32 *----------------------------------------------------------------------*/
33 
34 #include "FreeRTOS.h"
35 #include "task.h"
36 #include "rp2040_config.h"
37 #include "hardware/clocks.h"
38 #include "hardware/exception.h"
39 
40 /*
41  * LIB_PICO_MULTICORE == 1, if we are linked with pico_multicore (note that
42  * the non SMP FreeRTOS_Kernel is not linked with pico_multicore itself). We
43  * use this flag to determine if we need multi-core functionality.
44  */
45 #if ( LIB_PICO_MULTICORE == 1)
46     #include "pico/multicore.h"
47 #endif /* LIB_PICO_MULTICORE */
48 
49 /* Constants required to manipulate the NVIC. */
50 #define portNVIC_SYSTICK_CTRL_REG             ( *( ( volatile uint32_t * ) 0xe000e010 ) )
51 #define portNVIC_SYSTICK_LOAD_REG             ( *( ( volatile uint32_t * ) 0xe000e014 ) )
52 #define portNVIC_SYSTICK_CURRENT_VALUE_REG    ( *( ( volatile uint32_t * ) 0xe000e018 ) )
53 #define portNVIC_INT_CTRL_REG                 ( *( ( volatile uint32_t * ) 0xe000ed04 ) )
54 #define portNVIC_SHPR3_REG                    ( *( ( volatile uint32_t * ) 0xe000ed20 ) )
55 #define portNVIC_SYSTICK_CLK_BIT              ( 1UL << 2UL )
56 #define portNVIC_SYSTICK_INT_BIT              ( 1UL << 1UL )
57 #define portNVIC_SYSTICK_ENABLE_BIT           ( 1UL << 0UL )
58 #define portNVIC_SYSTICK_COUNT_FLAG_BIT       ( 1UL << 16UL )
59 #define portNVIC_PENDSVSET_BIT                ( 1UL << 28UL )
60 #define portMIN_INTERRUPT_PRIORITY            ( 255UL )
61 #define portNVIC_PENDSV_PRI                   ( portMIN_INTERRUPT_PRIORITY << 16UL )
62 #define portNVIC_SYSTICK_PRI                  ( portMIN_INTERRUPT_PRIORITY << 24UL )
63 
64 /* Constants required to set up the initial stack. */
65 #define portINITIAL_XPSR                      ( 0x01000000 )
66 
67 /* The systick is a 24-bit counter. */
68 #define portMAX_24_BIT_NUMBER                 ( 0xffffffUL )
69 
70 /* A fiddle factor to estimate the number of SysTick counts that would have
71  * occurred while the SysTick counter is stopped during tickless idle
72  * calculations. */
73 #ifndef portMISSED_COUNTS_FACTOR
74     #define portMISSED_COUNTS_FACTOR    ( 45UL )
75 #endif
76 
77 /* Let the user override the pre-loading of the initial LR with the address of
78  * prvTaskExitError() in case it messes up unwinding of the stack in the
79  * debugger. */
80 #ifdef configTASK_RETURN_ADDRESS
81     #define portTASK_RETURN_ADDRESS    configTASK_RETURN_ADDRESS
82 #else
83     #define portTASK_RETURN_ADDRESS    prvTaskExitError
84 #endif
85 
86 /*
87  * Setup the timer to generate the tick interrupts.  The implementation in this
88  * file is weak to allow application writers to change the timer used to
89  * generate the tick interrupt.
90  */
91 void vPortSetupTimerInterrupt( void );
92 
93 /*
94  * Exception handlers.
95  */
96 void xPortPendSVHandler( void ) __attribute__( ( naked ) );
97 void xPortSysTickHandler( void );
98 void vPortSVCHandler( void );
99 
100 /*
101  * Start first task is a separate function so it can be tested in isolation.
102  */
103 static void vPortStartFirstTask( void ) __attribute__( ( naked ) );
104 
105 /*
106  * Used to catch tasks that attempt to return from their implementing function.
107  */
108 static void prvTaskExitError( void );
109 
110 /*-----------------------------------------------------------*/
111 
112 /* Each task maintains its own interrupt status in the critical nesting
113  * variable. This is initialized to 0 to allow vPortEnter/ExitCritical
114  * to be called before the scheduler is started */
115 static UBaseType_t uxCriticalNesting;
116 
117 /*-----------------------------------------------------------*/
118 
119 #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
120     #include "pico/lock_core.h"
121     #include "hardware/irq.h"
122     #include "event_groups.h"
123     #if configSUPPORT_STATIC_ALLOCATION
124         static StaticEventGroup_t xStaticEventGroup;
125         #define pEventGroup (&xStaticEventGroup)
126     #endif /* configSUPPORT_STATIC_ALLOCATION */
127     static EventGroupHandle_t xEventGroup;
128     #if ( LIB_PICO_MULTICORE == 1 )
129         static EventBits_t uxCrossCoreEventBits;
130         static spin_lock_t * pxCrossCoreSpinLock;
131     #endif /* LIB_PICO_MULTICORE */
132 
133     static spin_lock_t * pxYieldSpinLock;
134     static uint32_t ulYieldSpinLockSaveValue;
135 #endif /* configSUPPORT_PICO_SYNC_INTEROP */
136 
137 /*
138  * The number of SysTick increments that make up one tick period.
139  */
140 #if ( configUSE_TICKLESS_IDLE == 1 )
141     static uint32_t ulTimerCountsForOneTick = 0;
142 #endif /* configUSE_TICKLESS_IDLE */
143 
144 /*
145  * The maximum number of tick periods that can be suppressed is limited by the
146  * 24 bit resolution of the SysTick timer.
147  */
148 #if ( configUSE_TICKLESS_IDLE == 1 )
149     static uint32_t xMaximumPossibleSuppressedTicks = 0;
150 #endif /* configUSE_TICKLESS_IDLE */
151 
152 /*
153  * Compensate for the CPU cycles that pass while the SysTick is stopped (low
154  * power functionality only.
155  */
156 #if ( configUSE_TICKLESS_IDLE == 1 )
157     static uint32_t ulStoppedTimerCompensation = 0;
158 #endif /* configUSE_TICKLESS_IDLE */
159 
160 /*-----------------------------------------------------------*/
161 
162 #define INVALID_LAUNCH_CORE_NUM 0xffu
163 static uint8_t ucLaunchCoreNum = INVALID_LAUNCH_CORE_NUM;
164 #define portIS_FREE_RTOS_CORE() ( ucLaunchCoreNum == get_core_num() )
165 
166 /*
167  * See header file for description.
168  */
pxPortInitialiseStack(StackType_t * pxTopOfStack,TaskFunction_t pxCode,void * pvParameters)169 StackType_t * pxPortInitialiseStack( StackType_t * pxTopOfStack,
170                                      TaskFunction_t pxCode,
171                                      void * pvParameters )
172 {
173     /* Simulate the stack frame as it would be created by a context switch
174      * interrupt. */
175     pxTopOfStack--;                                          /* Offset added to account for the way the MCU uses the stack on entry/exit of interrupts. */
176     *pxTopOfStack = portINITIAL_XPSR;                        /* xPSR */
177     pxTopOfStack--;
178     *pxTopOfStack = ( StackType_t ) pxCode;                  /* PC */
179     pxTopOfStack--;
180     *pxTopOfStack = ( StackType_t ) portTASK_RETURN_ADDRESS; /* LR */
181     pxTopOfStack -= 5;                                       /* R12, R3, R2 and R1. */
182     *pxTopOfStack = ( StackType_t ) pvParameters;            /* R0 */
183     pxTopOfStack -= 8;                                       /* R11..R4. */
184 
185     return pxTopOfStack;
186 }
187 /*-----------------------------------------------------------*/
188 
prvTaskExitError(void)189 static void prvTaskExitError( void )
190 {
191     /* A function that implements a task must not exit or attempt to return to
192      * its caller as there is nothing to return to.  If a task wants to exit it
193      * should instead call vTaskDelete( NULL ). */
194     panic_unsupported();
195 }
196 /*-----------------------------------------------------------*/
197 
vPortSVCHandler(void)198 void vPortSVCHandler( void )
199 {
200     /* This function is no longer used, but retained for backward
201      * compatibility. */
202 }
203 /*-----------------------------------------------------------*/
204 
vPortStartFirstTask(void)205 void vPortStartFirstTask( void )
206 {
207     __asm volatile (
208         "   .syntax unified             \n"
209         "   ldr  r2, pxCurrentTCBConst1 \n"/* Obtain location of pxCurrentTCB. */
210         "   ldr  r3, [r2]               \n"
211         "   ldr  r0, [r3]               \n"/* The first item in pxCurrentTCB is the task top of stack. */
212         "   adds r0, #32                \n"/* Discard everything up to r0. */
213         "   msr  psp, r0                \n"/* This is now the new top of stack to use in the task. */
214         "   movs r0, #2                 \n"/* Switch to the psp stack. */
215         "   msr  CONTROL, r0            \n"
216         "   isb                         \n"
217         "   pop  {r0-r5}                \n"/* Pop the registers that are saved automatically. */
218         "   mov  lr, r5                 \n"/* lr is now in r5. */
219         "   pop  {r3}                   \n"/* Return address is now in r3. */
220         "   pop  {r2}                   \n"/* Pop and discard XPSR. */
221         "   cpsie i                     \n"/* The first task has its context and interrupts can be enabled. */
222         "   bx   r3                     \n"/* Finally, jump to the user defined task code. */
223     "   .align 4                       \n"
224     "pxCurrentTCBConst1: .word pxCurrentTCB\n"
225     );
226 }
227 /*-----------------------------------------------------------*/
228 
229 #if ( LIB_PICO_MULTICORE == 1 ) && ( configSUPPORT_PICO_SYNC_INTEROP == 1)
prvFIFOInterruptHandler()230     static void prvFIFOInterruptHandler()
231     {
232         /* We must remove the contents (which we don't care about)
233          * to clear the IRQ */
234         multicore_fifo_drain();
235         multicore_fifo_clear_irq();
236         BaseType_t xHigherPriorityTaskWoken = pdFALSE;
237         uint32_t ulSave = spin_lock_blocking( pxCrossCoreSpinLock );
238         EventBits_t ulBits = uxCrossCoreEventBits;
239         uxCrossCoreEventBits &= ~ulBits;
240         spin_unlock( pxCrossCoreSpinLock, ulSave );
241         xEventGroupSetBitsFromISR( xEventGroup, ulBits, &xHigherPriorityTaskWoken );
242         portYIELD_FROM_ISR( xHigherPriorityTaskWoken );
243     }
244 #endif
245 
246 /*
247  * See header file for description.
248  */
xPortStartScheduler(void)249 BaseType_t xPortStartScheduler( void )
250 {
251     /* Make PendSV, CallSV and SysTick the same priority as the kernel. */
252     portNVIC_SHPR3_REG |= portNVIC_PENDSV_PRI;
253     portNVIC_SHPR3_REG |= portNVIC_SYSTICK_PRI;
254 
255     #if (configUSE_DYNAMIC_EXCEPTION_HANDLERS == 1)
256         exception_set_exclusive_handler( PENDSV_EXCEPTION, xPortPendSVHandler );
257         exception_set_exclusive_handler( SYSTICK_EXCEPTION, xPortSysTickHandler );
258         exception_set_exclusive_handler( SVCALL_EXCEPTION, vPortSVCHandler );
259     #endif
260 
261     /* Start the timer that generates the tick ISR.  Interrupts are disabled
262      * here already. */
263     vPortSetupTimerInterrupt();
264 
265     /* Initialise the critical nesting count ready for the first task. */
266     uxCriticalNesting = 0;
267 
268     ucLaunchCoreNum = get_core_num();
269     #if (LIB_PICO_MULTICORE == 1)
270         #if ( configSUPPORT_PICO_SYNC_INTEROP == 1)
271             multicore_fifo_clear_irq();
272             multicore_fifo_drain();
273             uint32_t irq_num = 15 + get_core_num();
274             irq_set_priority( irq_num, portMIN_INTERRUPT_PRIORITY );
275             irq_set_exclusive_handler( irq_num, prvFIFOInterruptHandler );
276             irq_set_enabled( irq_num, 1 );
277         #endif
278     #endif
279 
280     /* Start the first task. */
281     vPortStartFirstTask();
282 
283     /* Should never get here as the tasks will now be executing!  Call the task
284      * exit error function to prevent compiler warnings about a static function
285      * not being called in the case that the application writer overrides this
286      * functionality by defining configTASK_RETURN_ADDRESS.  Call
287      * vTaskSwitchContext() so link time optimisation does not remove the
288      * symbol. */
289     vTaskSwitchContext();
290     prvTaskExitError();
291 
292     /* Should not get here! */
293     return 0;
294 }
295 /*-----------------------------------------------------------*/
296 
vPortEndScheduler(void)297 void vPortEndScheduler( void )
298 {
299     /* Not implemented in ports where there is nothing to return to. */
300     panic_unsupported();
301 }
302 /*-----------------------------------------------------------*/
303 
vPortYield(void)304 void vPortYield( void )
305 {
306     #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
307         /* We are not in an ISR, and pxYieldSpinLock is always dealt with and
308          * cleared interrupts are re-enabled, so should be NULL */
309         configASSERT( pxYieldSpinLock == NULL );
310     #endif /* configSUPPORT_PICO_SYNC_INTEROP */
311 
312     /* Set a PendSV to request a context switch. */
313     portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
314 
315     /* Barriers are normally not required but do ensure the code is completely
316      * within the specified behaviour for the architecture. */
317     __asm volatile ( "dsb" ::: "memory" );
318     __asm volatile ( "isb" );
319 }
320 
321 /*-----------------------------------------------------------*/
322 
vPortEnterCritical(void)323 void vPortEnterCritical( void )
324 {
325     portDISABLE_INTERRUPTS();
326     uxCriticalNesting++;
327     __asm volatile ( "dsb" ::: "memory" );
328     __asm volatile ( "isb" );
329 }
330 /*-----------------------------------------------------------*/
331 
vPortExitCritical(void)332 void vPortExitCritical( void )
333 {
334     configASSERT( uxCriticalNesting );
335     uxCriticalNesting--;
336     if( uxCriticalNesting == 0 )
337     {
338         portENABLE_INTERRUPTS();
339     }
340 }
341 
vPortEnableInterrupts()342 void vPortEnableInterrupts() {
343     #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
344         if( pxYieldSpinLock )
345         {
346             spin_unlock(pxYieldSpinLock, ulYieldSpinLockSaveValue);
347             pxYieldSpinLock = NULL;
348         }
349     #endif
350     __asm volatile ( " cpsie i " ::: "memory" );
351 }
352 
353 /*-----------------------------------------------------------*/
354 
ulSetInterruptMaskFromISR(void)355 uint32_t ulSetInterruptMaskFromISR( void )
356 {
357     __asm volatile (
358         " mrs r0, PRIMASK    \n"
359         " cpsid i            \n"
360         " bx lr                "
361         ::: "memory"
362         );
363 }
364 /*-----------------------------------------------------------*/
365 
vClearInterruptMaskFromISR(uint32_t ulMask)366 void vClearInterruptMaskFromISR( __attribute__( ( unused ) ) uint32_t ulMask )
367 {
368     __asm volatile (
369         " msr PRIMASK, r0    \n"
370         " bx lr                "
371         ::: "memory"
372         );
373 }
374 /*-----------------------------------------------------------*/
375 
xPortPendSVHandler(void)376 void xPortPendSVHandler( void )
377 {
378     /* This is a naked function. */
379 
380     __asm volatile
381     (
382         "   .syntax unified                     \n"
383         "   mrs r0, psp                         \n"
384         "                                       \n"
385         "   ldr r3, pxCurrentTCBConst2          \n"/* Get the location of the current TCB. */
386         "   ldr r2, [r3]                        \n"
387         "                                       \n"
388         "   subs r0, r0, #32                    \n"/* Make space for the remaining low registers. */
389         "   str r0, [r2]                        \n"/* Save the new top of stack. */
390         "   stmia r0!, {r4-r7}                  \n"/* Store the low registers that are not saved automatically. */
391         "   mov r4, r8                          \n"/* Store the high registers. */
392         "   mov r5, r9                          \n"
393         "   mov r6, r10                         \n"
394         "   mov r7, r11                         \n"
395         "   stmia r0!, {r4-r7}                  \n"
396         #if portUSE_DIVIDER_SAVE_RESTORE
397             "   movs r2, #0xd                   \n"/* Store the divider state. */
398             "   lsls r2, #28                    \n"
399             /* We expect that the divider is ready at this point (which is
400              * necessary to safely save/restore), because:
401              * a) if we have not been interrupted since we entered this method,
402              *    then >8 cycles have clearly passed, so the divider is done
403              * b) if we were interrupted in the interim, then any "safe" - i.e.
404              *    does the right thing in an IRQ - use of the divider should
405              *    have waited for any in-process divide to complete, saved and
406              *    then fully restored the result, thus the result is ready in
407              *    that case too. */
408             "   ldr r4, [r2, #0x60]             \n"/* SIO_DIV_UDIVIDEND_OFFSET */
409             "   ldr r5, [r2, #0x64]             \n"/* SIO_DIV_UDIVISOR_OFFSET */
410             "   ldr r6, [r2, #0x74]             \n"/* SIO_DIV_REMAINDER_OFFSET */
411             "   ldr r7, [r2, #0x70]             \n"/* SIO_DIV_QUOTIENT_OFFSET */
412             /* We actually save the divider state in the 4 words below
413              * our recorded stack pointer, so as not to disrupt the stack
414              * frame expected by debuggers - this is addressed by
415              * portEXTRA_STACK_SIZE */
416             "   subs r0, r0, #48                \n"
417             "   stmia r0!, {r4-r7}              \n"
418         #endif /* portUSE_DIVIDER_SAVE_RESTORE */
419         "   push {r3, r14}                      \n"
420         "   cpsid i                             \n"
421         "   bl vTaskSwitchContext               \n"
422         "   cpsie i                             \n"
423         "   pop {r2, r3}                        \n"/* lr goes in r3. r2 now holds tcb pointer. */
424         "                                       \n"
425         "   ldr r1, [r2]                        \n"
426         "   ldr r0, [r1]                        \n"/* The first item in pxCurrentTCB is the task top of stack. */
427         "   adds r0, r0, #16                    \n"/* Move to the high registers. */
428         "   ldmia r0!, {r4-r7}                  \n"/* Pop the high registers. */
429         "   mov r8, r4                          \n"
430         "   mov r9, r5                          \n"
431         "   mov r10, r6                         \n"
432         "   mov r11, r7                         \n"
433         "                                       \n"
434         "   msr psp, r0                         \n"/* Remember the new top of stack for the task. */
435         "                                       \n"
436         #if portUSE_DIVIDER_SAVE_RESTORE
437         "   movs r2, #0xd                       \n"/* Pop the divider state. */
438         "   lsls r2, #28                        \n"
439         "   subs r0, r0, #48                    \n"/* Go back for the divider state */
440         "   ldmia r0!, {r4-r7}                  \n"/* Pop the divider state. */
441         /* Note always restore via SIO_DIV_UDIVI*, because we will overwrite the
442          * results stopping the calculation anyway, however the sign of results
443          * is adjusted by the h/w at read time based on whether the last started
444          * division was signed and the inputs' signs differed */
445         "   str r4, [r2, #0x60]                 \n"/* SIO_DIV_UDIVIDEND_OFFSET */
446         "   str r5, [r2, #0x64]                 \n"/* SIO_DIV_UDIVISOR_OFFSET */
447         "   str r6, [r2, #0x74]                 \n"/* SIO_DIV_REMAINDER_OFFSET */
448         "   str r7, [r2, #0x70]                 \n"/* SIO_DIV_QUOTIENT_OFFSET */
449         #else
450         "   subs r0, r0, #32                    \n"/* Go back for the low registers that are not automatically restored. */
451         #endif /* portUSE_DIVIDER_SAVE_RESTORE */
452         "   ldmia r0!, {r4-r7}                  \n"/* Pop low registers.  */
453         "                                       \n"
454         "   bx r3                               \n"
455     "   .align 4                            \n"
456     "pxCurrentTCBConst2: .word pxCurrentTCB \n"
457     );
458 }
459 /*-----------------------------------------------------------*/
460 
xPortSysTickHandler(void)461 void xPortSysTickHandler( void )
462 {
463     uint32_t ulPreviousMask;
464 
465     ulPreviousMask = portSET_INTERRUPT_MASK_FROM_ISR();
466     {
467         /* Increment the RTOS tick. */
468         if( xTaskIncrementTick() != pdFALSE )
469         {
470             /* Pend a context switch. */
471             portNVIC_INT_CTRL_REG = portNVIC_PENDSVSET_BIT;
472         }
473     }
474     portCLEAR_INTERRUPT_MASK_FROM_ISR( ulPreviousMask );
475 }
476 /*-----------------------------------------------------------*/
477 
478 /*
479  * Setup the systick timer to generate the tick interrupts at the required
480  * frequency.
481  */
vPortSetupTimerInterrupt(void)482 __attribute__( ( weak ) ) void vPortSetupTimerInterrupt( void )
483 {
484     /* Calculate the constants required to configure the tick interrupt. */
485     #if ( configUSE_TICKLESS_IDLE == 1 )
486         {
487             ulTimerCountsForOneTick = ( clock_get_hz(clk_sys) / configTICK_RATE_HZ );
488             xMaximumPossibleSuppressedTicks = portMAX_24_BIT_NUMBER / ulTimerCountsForOneTick;
489             ulStoppedTimerCompensation = portMISSED_COUNTS_FACTOR;
490         }
491     #endif /* configUSE_TICKLESS_IDLE */
492 
493     /* Stop and reset the SysTick. */
494     portNVIC_SYSTICK_CTRL_REG = 0UL;
495     portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
496 
497     /* Configure SysTick to interrupt at the requested rate. */
498     portNVIC_SYSTICK_LOAD_REG = ( clock_get_hz( clk_sys ) / configTICK_RATE_HZ ) - 1UL;
499     portNVIC_SYSTICK_CTRL_REG = portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT | portNVIC_SYSTICK_ENABLE_BIT;
500 }
501 /*-----------------------------------------------------------*/
502 
503 #if ( configUSE_TICKLESS_IDLE == 1 )
504 
vPortSuppressTicksAndSleep(TickType_t xExpectedIdleTime)505     __attribute__( ( weak ) ) void vPortSuppressTicksAndSleep( TickType_t xExpectedIdleTime )
506     {
507         uint32_t ulReloadValue, ulCompleteTickPeriods, ulCompletedSysTickDecrements;
508         TickType_t xModifiableIdleTime;
509 
510         /* Make sure the SysTick reload value does not overflow the counter. */
511         if( xExpectedIdleTime > xMaximumPossibleSuppressedTicks )
512         {
513             xExpectedIdleTime = xMaximumPossibleSuppressedTicks;
514         }
515 
516         /* Stop the SysTick momentarily.  The time the SysTick is stopped for
517          * is accounted for as best it can be, but using the tickless mode will
518          * inevitably result in some tiny drift of the time maintained by the
519          * kernel with respect to calendar time. */
520         portNVIC_SYSTICK_CTRL_REG &= ~portNVIC_SYSTICK_ENABLE_BIT;
521 
522         /* Calculate the reload value required to wait xExpectedIdleTime
523          * tick periods.  -1 is used because this code will execute part way
524          * through one of the tick periods. */
525         ulReloadValue = portNVIC_SYSTICK_CURRENT_VALUE_REG + ( ulTimerCountsForOneTick * ( xExpectedIdleTime - 1UL ) );
526 
527         if( ulReloadValue > ulStoppedTimerCompensation )
528         {
529             ulReloadValue -= ulStoppedTimerCompensation;
530         }
531 
532         /* Enter a critical section but don't use the taskENTER_CRITICAL()
533          * method as that will mask interrupts that should exit sleep mode. */
534         __asm volatile ( "cpsid i" ::: "memory" );
535         __asm volatile ( "dsb" );
536         __asm volatile ( "isb" );
537 
538         /* If a context switch is pending or a task is waiting for the scheduler
539          * to be unsuspended then abandon the low power entry. */
540         if( eTaskConfirmSleepModeStatus() == eAbortSleep )
541         {
542             /* Restart from whatever is left in the count register to complete
543              * this tick period. */
544             portNVIC_SYSTICK_LOAD_REG = portNVIC_SYSTICK_CURRENT_VALUE_REG;
545 
546             /* Restart SysTick. */
547             portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
548 
549             /* Reset the reload register to the value required for normal tick
550              * periods. */
551             portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
552 
553             /* Re-enable interrupts - see comments above the cpsid instruction()
554              * above. */
555             __asm volatile ( "cpsie i" ::: "memory" );
556         }
557         else
558         {
559             /* Set the new reload value. */
560             portNVIC_SYSTICK_LOAD_REG = ulReloadValue;
561 
562             /* Clear the SysTick count flag and set the count value back to
563              * zero. */
564             portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
565 
566             /* Restart SysTick. */
567             portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
568 
569             /* Sleep until something happens.  configPRE_SLEEP_PROCESSING() can
570              * set its parameter to 0 to indicate that its implementation contains
571              * its own wait for interrupt or wait for event instruction, and so wfi
572              * should not be executed again.  However, the original expected idle
573              * time variable must remain unmodified, so a copy is taken. */
574             xModifiableIdleTime = xExpectedIdleTime;
575             configPRE_SLEEP_PROCESSING( xModifiableIdleTime );
576 
577             if( xModifiableIdleTime > 0 )
578             {
579                 __asm volatile ( "dsb" ::: "memory" );
580                 __asm volatile ( "wfi" );
581                 __asm volatile ( "isb" );
582             }
583 
584             configPOST_SLEEP_PROCESSING( xExpectedIdleTime );
585 
586             /* Re-enable interrupts to allow the interrupt that brought the MCU
587              * out of sleep mode to execute immediately.  see comments above
588              * __disable_interrupt() call above. */
589             __asm volatile ( "cpsie i" ::: "memory" );
590             __asm volatile ( "dsb" );
591             __asm volatile ( "isb" );
592 
593             /* Disable interrupts again because the clock is about to be stopped
594              * and interrupts that execute while the clock is stopped will increase
595              * any slippage between the time maintained by the RTOS and calendar
596              * time. */
597             __asm volatile ( "cpsid i" ::: "memory" );
598             __asm volatile ( "dsb" );
599             __asm volatile ( "isb" );
600 
601             /* Disable the SysTick clock without reading the
602              * portNVIC_SYSTICK_CTRL_REG register to ensure the
603              * portNVIC_SYSTICK_COUNT_FLAG_BIT is not cleared if it is set.  Again,
604              * the time the SysTick is stopped for is accounted for as best it can
605              * be, but using the tickless mode will inevitably result in some tiny
606              * drift of the time maintained by the kernel with respect to calendar
607              * time*/
608             portNVIC_SYSTICK_CTRL_REG = ( portNVIC_SYSTICK_CLK_BIT | portNVIC_SYSTICK_INT_BIT );
609 
610             /* Determine if the SysTick clock has already counted to zero and
611              * been set back to the current reload value (the reload back being
612              * correct for the entire expected idle time) or if the SysTick is yet
613              * to count to zero (in which case an interrupt other than the SysTick
614              * must have brought the system out of sleep mode). */
615             if( ( portNVIC_SYSTICK_CTRL_REG & portNVIC_SYSTICK_COUNT_FLAG_BIT ) != 0 )
616             {
617                 uint32_t ulCalculatedLoadValue;
618 
619                 /* The tick interrupt is already pending, and the SysTick count
620                  * reloaded with ulReloadValue.  Reset the
621                  * portNVIC_SYSTICK_LOAD_REG with whatever remains of this tick
622                  * period. */
623                 ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL ) - ( ulReloadValue - portNVIC_SYSTICK_CURRENT_VALUE_REG );
624 
625                 /* Don't allow a tiny value, or values that have somehow
626                  * underflowed because the post sleep hook did something
627                  * that took too long. */
628                 if( ( ulCalculatedLoadValue < ulStoppedTimerCompensation ) || ( ulCalculatedLoadValue > ulTimerCountsForOneTick ) )
629                 {
630                     ulCalculatedLoadValue = ( ulTimerCountsForOneTick - 1UL );
631                 }
632 
633                 portNVIC_SYSTICK_LOAD_REG = ulCalculatedLoadValue;
634 
635                 /* As the pending tick will be processed as soon as this
636                  * function exits, the tick value maintained by the tick is stepped
637                  * forward by one less than the time spent waiting. */
638                 ulCompleteTickPeriods = xExpectedIdleTime - 1UL;
639             }
640             else
641             {
642                 /* Something other than the tick interrupt ended the sleep.
643                  * Work out how long the sleep lasted rounded to complete tick
644                  * periods (not the ulReload value which accounted for part
645                  * ticks). */
646                 ulCompletedSysTickDecrements = ( xExpectedIdleTime * ulTimerCountsForOneTick ) - portNVIC_SYSTICK_CURRENT_VALUE_REG;
647 
648                 /* How many complete tick periods passed while the processor
649                  * was waiting? */
650                 ulCompleteTickPeriods = ulCompletedSysTickDecrements / ulTimerCountsForOneTick;
651 
652                 /* The reload value is set to whatever fraction of a single tick
653                  * period remains. */
654                 portNVIC_SYSTICK_LOAD_REG = ( ( ulCompleteTickPeriods + 1UL ) * ulTimerCountsForOneTick ) - ulCompletedSysTickDecrements;
655             }
656 
657             /* Restart SysTick so it runs from portNVIC_SYSTICK_LOAD_REG
658              * again, then set portNVIC_SYSTICK_LOAD_REG back to its standard
659              * value. */
660             portNVIC_SYSTICK_CURRENT_VALUE_REG = 0UL;
661             portNVIC_SYSTICK_CTRL_REG |= portNVIC_SYSTICK_ENABLE_BIT;
662             vTaskStepTick( ulCompleteTickPeriods );
663             portNVIC_SYSTICK_LOAD_REG = ulTimerCountsForOneTick - 1UL;
664 
665             /* Exit with interrupts enabled. */
666             __asm volatile ( "cpsie i" ::: "memory" );
667         }
668     }
669 
670 #endif /* configUSE_TICKLESS_IDLE */
671 
672 #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 ) || ( configSUPPORT_PICO_TIME_INTEROP == 1 )
prvGetTicksToWaitBefore(absolute_time_t t)673     static TickType_t prvGetTicksToWaitBefore( absolute_time_t t )
674     {
675         int64_t xDelay = absolute_time_diff_us(get_absolute_time(), t);
676         const uint32_t ulTickPeriod = 1000000 / configTICK_RATE_HZ;
677         xDelay -= ulTickPeriod;
678         if( xDelay >= ulTickPeriod )
679         {
680             return xDelay / ulTickPeriod;
681         }
682         return 0;
683     }
684 #endif
685 
686 #if ( configSUPPORT_PICO_SYNC_INTEROP == 1 )
ulPortLockGetCurrentOwnerId()687     uint32_t ulPortLockGetCurrentOwnerId()
688     {
689         if( portIS_FREE_RTOS_CORE())
690         {
691             uint32_t exception = __get_current_exception();
692             if( !exception )
693             {
694                 return ( uintptr_t ) xTaskGetCurrentTaskHandle();
695             }
696             /* Note: since ROM as at 0x00000000, these can't be confused with
697              * valid task handles (pointers) in RAM */
698             /* We make all exception handler/core combinations distinct owners */
699             return get_core_num() + exception * 2;
700         }
701         /* Note: since ROM as at 0x00000000, this can't be confused with
702          * valid task handles (pointers) in RAM */
703         return get_core_num();
704     }
705 
prvGetEventGroupBit(spin_lock_t * spinLock)706     static inline EventBits_t prvGetEventGroupBit( spin_lock_t * spinLock )
707     {
708         uint32_t ulBit;
709         #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
710             ulBit = 1u << (spin_lock_get_num(spinLock) & 0x7u);
711         #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
712             ulBit = 1u << spin_lock_get_num(spinLock);
713             /* reduce to range 0-24 */
714             ulBit |= ulBit << 8u;
715             ulBit >>= 8u;
716         #endif /* configTICK_TYPE_WIDTH_IN_BITS */
717         return ( EventBits_t ) ulBit;
718     }
719 
prvGetAllEventGroupBits()720     static inline EventBits_t prvGetAllEventGroupBits()
721     {
722         #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
723             return (EventBits_t) 0xffu;
724         #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
725             return ( EventBits_t ) 0xffffffu;
726         #endif /* configTICK_TYPE_WIDTH_IN_BITS */
727     }
728 
vPortLockInternalSpinUnlockWithWait(struct lock_core * pxLock,uint32_t ulSave)729     void vPortLockInternalSpinUnlockWithWait( struct lock_core * pxLock, uint32_t ulSave )
730     {
731         configASSERT( !portCHECK_IF_IN_ISR() );
732         if( !portIS_FREE_RTOS_CORE() )
733         {
734             spin_unlock(pxLock->spin_lock, ulSave );
735             __wfe();
736         }
737         else
738         {
739             configASSERT( pxYieldSpinLock == NULL );
740 
741             // we want to hold the lock until the event bits have been set; since interrupts are currently disabled
742             // by the spinlock, we can defer until portENABLE_INTERRUPTS is called which is always called when
743             // the scheduler is unlocked during this call
744             configASSERT(pxLock->spin_lock);
745             pxYieldSpinLock = pxLock->spin_lock;
746             ulYieldSpinLockSaveValue = ulSave;
747             xEventGroupWaitBits( xEventGroup, prvGetEventGroupBit(pxLock->spin_lock),
748                                  pdTRUE, pdFALSE, portMAX_DELAY);
749         }
750     }
751 
vPortLockInternalSpinUnlockWithNotify(struct lock_core * pxLock,uint32_t ulSave)752     void vPortLockInternalSpinUnlockWithNotify( struct lock_core *pxLock, uint32_t ulSave ) {
753         EventBits_t uxBits = prvGetEventGroupBit(pxLock->spin_lock );
754         if (portIS_FREE_RTOS_CORE()) {
755             #if LIB_PICO_MULTICORE
756                 /* signal an event in case a regular core is waiting */
757                 __sev();
758             #endif
759             spin_unlock(pxLock->spin_lock, ulSave );
760             if( !portCHECK_IF_IN_ISR() )
761             {
762                 xEventGroupSetBits( xEventGroup, uxBits );
763             }
764             else
765             {
766                 BaseType_t xHigherPriorityTaskWoken = pdFALSE;
767                 xEventGroupSetBitsFromISR( xEventGroup, uxBits, &xHigherPriorityTaskWoken );
768                 portYIELD_FROM_ISR( xHigherPriorityTaskWoken );
769             }
770         }
771         else
772         {
773             __sev();
774             #if ( LIB_PICO_MULTICORE == 1)
775                 /* We could sent the bits across the FIFO which would have required us to block here if the FIFO was full,
776                  * or we could have just set all bits on the other side, however it seems reasonable instead to take
777                  * the hit of another spin lock to protect an accurate bit set. */
778                 if( pxCrossCoreSpinLock != pxLock->spin_lock )
779                 {
780                     spin_lock_unsafe_blocking(pxCrossCoreSpinLock);
781                     uxCrossCoreEventBits |= uxBits;
782                     spin_unlock_unsafe(pxCrossCoreSpinLock);
783                 }
784                 else
785                 {
786                     uxCrossCoreEventBits |= uxBits;
787                 }
788                 /* This causes fifo irq on the other (FreeRTOS) core which will do the set the event bits */
789                 sio_hw->fifo_wr = 0;
790             #endif /* LIB_PICO_MULTICORE */
791             spin_unlock(pxLock->spin_lock, ulSave);
792         }
793     }
794 
xPortLockInternalSpinUnlockWithBestEffortWaitOrTimeout(struct lock_core * pxLock,uint32_t ulSave,absolute_time_t uxUntil)795     bool xPortLockInternalSpinUnlockWithBestEffortWaitOrTimeout( struct lock_core * pxLock, uint32_t ulSave, absolute_time_t uxUntil )
796     {
797         configASSERT( !portCHECK_IF_IN_ISR() );
798         // note no need to check LIB_PICO_MULTICORE, as this is always returns true if that is not defined
799         if( !portIS_FREE_RTOS_CORE() )
800         {
801             spin_unlock(pxLock->spin_lock, ulSave);
802             return best_effort_wfe_or_timeout(uxUntil);
803         }
804         else
805         {
806             configASSERT( pxYieldSpinLock == NULL );
807 
808             TickType_t uxTicksToWait = prvGetTicksToWaitBefore( uxUntil );
809             if( uxTicksToWait )
810             {
811                 /* We want to hold the lock until the event bits have been set; since interrupts are currently disabled
812                  * by the spinlock, we can defer until portENABLE_INTERRUPTS is called which is always called when
813                  * the scheduler is unlocked during this call */
814                 configASSERT(pxLock->spin_lock);
815                 pxYieldSpinLock = pxLock->spin_lock;
816                 ulYieldSpinLockSaveValue = ulSave;
817                 xEventGroupWaitBits( xEventGroup,
818                                      prvGetEventGroupBit(pxLock->spin_lock), pdTRUE,
819                                      pdFALSE, uxTicksToWait );
820                 /* sanity check that interrupts were disabled, then re-enabled during the call, which will have
821                  * taken care of the yield */
822                 configASSERT( pxYieldSpinLock == NULL );
823             }
824             else
825             {
826                 spin_unlock( pxLock->spin_lock, ulSave );
827             }
828             if ( time_reached( uxUntil ) )
829             {
830                 return true;
831             }
832             else
833             {
834                 /* We do not want to hog the core */
835                 portYIELD();
836                 /* We aren't sure if we've reached the timeout yet; the caller will check */
837                 return false;
838             }
839         }
840     }
841 
842     #if ( configSUPPORT_PICO_SYNC_INTEROP == 1)
843         /* runs before main */
prvRuntimeInitializer(void)844         static void __attribute__((constructor)) prvRuntimeInitializer( void )
845         {
846             /* This must be done even before the scheduler is started, as the spin lock
847              * is used by the overrides of the SDK wait/notify primitives */
848             #if ( LIB_PICO_MULTICORE == 1 )
849                 pxCrossCoreSpinLock = spin_lock_instance( next_striped_spin_lock_num() );
850             #endif /* portRUNNING_ON_BOTH_CORES */
851 
852             /* The event group is not used prior to scheduler init, but is initialized
853              * here to since it logically belongs with the spin lock */
854             #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
855                 xEventGroup = xEventGroupCreateStatic(&xStaticEventGroup);
856             #else
857                 /* Note that it is slightly dubious calling this here before the scheduler is initialized,
858                  * however the only thing it touches is the allocator which then calls vPortEnterCritical
859                  * and vPortExitCritical, and allocating here saves us checking the one time initialized variable in
860                  * some rather critical code paths */
861                 xEventGroup = xEventGroupCreate();
862             #endif /* configSUPPORT_STATIC_ALLOCATION */
863         }
864     #endif
865 #endif /* configSUPPORT_PICO_SYNC_INTEROP */
866 
867 #if ( configSUPPORT_PICO_TIME_INTEROP == 1 )
xPortSyncInternalYieldUntilBefore(absolute_time_t t)868     void xPortSyncInternalYieldUntilBefore( absolute_time_t t )
869     {
870         TickType_t uxTicksToWait = prvGetTicksToWaitBefore(t);
871         if( uxTicksToWait )
872         {
873             vTaskDelay(uxTicksToWait);
874         }
875     }
876 #endif /* configSUPPORT_PICO_TIME_INTEROP */
877