1 /*
2 * FreeRTOS Kernel V10.4.3
3 * Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a copy of
6 * this software and associated documentation files (the "Software"), to deal in
7 * the Software without restriction, including without limitation the rights to
8 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9 * the Software, and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in all
13 * copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
18 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * https://www.FreeRTOS.org
23 * https://github.com/FreeRTOS
24 *
25 */
26
27 /* Standard includes. */
28 #include <stdlib.h>
29 #include <string.h>
30
31 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
32 * all the API functions to use the MPU wrappers. That should only be done when
33 * task.h is included from an application file. */
34 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
35
36 /* FreeRTOS includes. */
37 #include "FreeRTOS.h"
38 #include "task.h"
39 #include "timers.h"
40 #include "stack_macros.h"
41
42 #ifdef ESP_PLATFORM
43 #define taskCRITICAL_MUX &xTaskQueueMutex
44 #undef taskENTER_CRITICAL
45 #undef taskEXIT_CRITICAL
46 #undef taskENTER_CRITICAL_ISR
47 #undef taskEXIT_CRITICAL_ISR
48 #define taskENTER_CRITICAL( ) portENTER_CRITICAL( taskCRITICAL_MUX )
49 #define taskEXIT_CRITICAL( ) portEXIT_CRITICAL( taskCRITICAL_MUX )
50 #define taskENTER_CRITICAL_ISR( ) portENTER_CRITICAL_ISR( taskCRITICAL_MUX )
51 #define taskEXIT_CRITICAL_ISR( ) portEXIT_CRITICAL_ISR( taskCRITICAL_MUX )
52 #undef _REENT_INIT_PTR
53 #define _REENT_INIT_PTR esp_reent_init
54 #endif
55
56 /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
57 * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
58 * for the header files above, but not in this file, in order to generate the
59 * correct privileged Vs unprivileged linkage and placement. */
60 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
61
62 /* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting
63 * functions but without including stdio.h here. */
64 #if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 )
65
66 /* At the bottom of this file are two optional functions that can be used
67 * to generate human readable text from the raw data generated by the
68 * uxTaskGetSystemState() function. Note the formatting functions are provided
69 * for convenience only, and are NOT considered part of the kernel. */
70 #include <stdio.h>
71 #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
72
73 #if ( configUSE_PREEMPTION == 0 )
74
75 /* If the cooperative scheduler is being used then a yield should not be
76 * performed just because a higher priority task has been woken. */
77 #define taskYIELD_IF_USING_PREEMPTION()
78 #else
79 #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
80 #endif
81
82 /* Values that can be assigned to the ucNotifyState member of the TCB. */
83 #define taskNOT_WAITING_NOTIFICATION ( ( uint8_t ) 0 ) /* Must be zero as it is the initialised value. */
84 #define taskWAITING_NOTIFICATION ( ( uint8_t ) 1 )
85 #define taskNOTIFICATION_RECEIVED ( ( uint8_t ) 2 )
86
87 /*
88 * The value used to fill the stack of a task when the task is created. This
89 * is used purely for checking the high water mark for tasks.
90 */
91 #define tskSTACK_FILL_BYTE ( 0xa5U )
92
93 /* Bits used to record how a task's stack and TCB were allocated. */
94 #define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 )
95 #define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 )
96 #define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 )
97
98 /* If any of the following are set then task stacks are filled with a known
99 * value so the high water mark can be determined. If none of the following are
100 * set then don't fill the stack so there is no unnecessary dependency on memset. */
101 #if ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
102 #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 1
103 #else
104 #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 0
105 #endif
106
107 /*
108 * Macros used by vListTask to indicate which state a task is in.
109 */
110 #define tskRUNNING_CHAR ( 'X' )
111 #define tskBLOCKED_CHAR ( 'B' )
112 #define tskREADY_CHAR ( 'R' )
113 #define tskDELETED_CHAR ( 'D' )
114 #define tskSUSPENDED_CHAR ( 'S' )
115
116 /*
117 * Some kernel aware debuggers require the data the debugger needs access to to
118 * be global, rather than file scope.
119 */
120 #ifdef portREMOVE_STATIC_QUALIFIER
121 #define static
122 #endif
123
124 /* The name allocated to the Idle task. This can be overridden by defining
125 * configIDLE_TASK_NAME in FreeRTOSConfig.h. */
126 #ifndef configIDLE_TASK_NAME
127 #define configIDLE_TASK_NAME "IDLE"
128 #endif
129
130 #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
131
132 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is
133 * performed in a generic way that is not optimised to any particular
134 * microcontroller architecture. */
135
136 /* uxTopReadyPriority holds the priority of the highest priority ready
137 * state task. */
138 #define taskRECORD_READY_PRIORITY( uxPriority ) \
139 { \
140 if( ( uxPriority ) > uxTopReadyPriority ) \
141 { \
142 uxTopReadyPriority = ( uxPriority ); \
143 } \
144 } /* taskRECORD_READY_PRIORITY */
145
146 /*-----------------------------------------------------------*/
147
148 #ifdef ESP_PLATFORM
149 #define taskSELECT_HIGHEST_PRIORITY_TASK() taskSelectHighestPriorityTaskSMP()
150 #else //ESP_PLATFORM
151 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
152 { \
153 UBaseType_t uxTopPriority = uxTopReadyPriority; \
154 \
155 /* Find the highest priority queue that contains ready tasks. */ \
156 while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \
157 { \
158 configASSERT( uxTopPriority ); \
159 --uxTopPriority; \
160 } \
161 \
162 /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \
163 * the same priority get an equal share of the processor time. */ \
164 listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB[xPortGetCoreID()], &( pxReadyTasksLists[ uxTopPriority ] ) ); \
165 uxTopReadyPriority = uxTopPriority; \
166 } /* taskSELECT_HIGHEST_PRIORITY_TASK */
167 #endif //ESP_PLATFORM
168
169 /*-----------------------------------------------------------*/
170
171 /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as
172 * they are only required when a port optimised method of task selection is
173 * being used. */
174 #define taskRESET_READY_PRIORITY( uxPriority )
175 #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority )
176
177 #else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
178
179 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is
180 * performed in a way that is tailored to the particular microcontroller
181 * architecture being used. */
182
183 /* A port optimised version is provided. Call the port defined macros. */
184 #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( uxPriority, uxTopReadyPriority )
185
186 /*-----------------------------------------------------------*/
187
188 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
189 { \
190 UBaseType_t uxTopPriority; \
191 \
192 /* Find the highest priority list that contains ready tasks. */ \
193 portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \
194 configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \
195 listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB[xPortGetCoreID()], &( pxReadyTasksLists[ uxTopPriority ] ) ); \
196 } /* taskSELECT_HIGHEST_PRIORITY_TASK() */
197
198 /*-----------------------------------------------------------*/
199
200 /* A port optimised version is provided, call it only if the TCB being reset
201 * is being referenced from a ready list. If it is referenced from a delayed
202 * or suspended list then it won't be in a ready list. */
203 #define taskRESET_READY_PRIORITY( uxPriority ) \
204 { \
205 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \
206 { \
207 portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \
208 } \
209 }
210
211 #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
212
213 /*-----------------------------------------------------------*/
214
215 /* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick
216 * count overflows. */
217 #define taskSWITCH_DELAYED_LISTS() \
218 { \
219 List_t * pxTemp; \
220 \
221 /* The delayed tasks list should be empty when the lists are switched. */ \
222 configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) ); \
223 \
224 pxTemp = pxDelayedTaskList; \
225 pxDelayedTaskList = pxOverflowDelayedTaskList; \
226 pxOverflowDelayedTaskList = pxTemp; \
227 xNumOfOverflows++; \
228 prvResetNextTaskUnblockTime(); \
229 }
230
231 /*-----------------------------------------------------------*/
232
233 /*
234 * Place the task represented by pxTCB into the appropriate ready list for
235 * the task. It is inserted at the end of the list.
236 */
237 #define prvAddTaskToReadyList( pxTCB ) \
238 traceMOVED_TASK_TO_READY_STATE( pxTCB ); \
239 taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
240 vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \
241 tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB )
242 /*-----------------------------------------------------------*/
243
244 #define tskCAN_RUN_HERE( cpuid ) ( cpuid==xPortGetCoreID() || cpuid==tskNO_AFFINITY )
245
246 /*
247 * Several functions take a TaskHandle_t parameter that can optionally be NULL,
248 * where NULL is used to indicate that the handle of the currently executing
249 * task should be used in place of the parameter. This macro simply checks to
250 * see if the parameter is NULL and returns a pointer to the appropriate TCB.
251 */
252 #if configNUM_CORES > 1
253 /* In SMP, we need to disable interrupts if getting the current task handle outside a critical section. Calling xTaskGetCurrentTaskHandle() ensures this. */
254 #define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? xTaskGetCurrentTaskHandle() : ( (TaskHandle_t)pxHandle ) )
255 #else
256 #define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? (TaskHandle_t) pxCurrentTCB[0] : ( (TaskHandle_t)pxHandle ) )
257 #endif
258
259 /* The item value of the event list item is normally used to hold the priority
260 * of the task to which it belongs (coded to allow it to be held in reverse
261 * priority order). However, it is occasionally borrowed for other purposes. It
262 * is important its value is not updated due to a task priority change while it is
263 * being used for another purpose. The following bit definition is used to inform
264 * the scheduler that the value should not be changed - in which case it is the
265 * responsibility of whichever module is using the value to ensure it gets set back
266 * to its original value when it is released. */
267 #if ( configUSE_16_BIT_TICKS == 1 )
268 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U
269 #else
270 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL
271 #endif
272
273 /*
274 * Task control block. A task control block (TCB) is allocated for each task,
275 * and stores task state information, including a pointer to the task's context
276 * (the task's run time environment, including register values)
277 */
278 typedef struct tskTaskControlBlock /* The old naming convention is used to prevent breaking kernel aware debuggers. */
279 {
280 volatile StackType_t * pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */
281
282 #if ( portUSING_MPU_WRAPPERS == 1 )
283 xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
284 #endif
285
286 ListItem_t xStateListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */
287 ListItem_t xEventListItem; /*< Used to reference a task from an event list. */
288 UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */
289 StackType_t * pxStack; /*< Points to the start of the stack. */
290 char pcTaskName[ configMAX_TASK_NAME_LEN ]; /*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
291 BaseType_t xCoreID; /*< Core this task is pinned to */
292
293 #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
294 StackType_t * pxEndOfStack; /*< Points to the highest valid address for the stack. */
295 #endif
296
297 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
298 UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
299 #endif
300
301 #if ( configUSE_TRACE_FACILITY == 1 )
302 UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */
303 UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */
304 #endif
305
306 #if ( configUSE_MUTEXES == 1 )
307 UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */
308 UBaseType_t uxMutexesHeld;
309 #endif
310
311 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
312 TaskHookFunction_t pxTaskTag;
313 #endif
314
315 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 )
316 void * pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
317 #if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
318 TlsDeleteCallbackFunction_t pvThreadLocalStoragePointersDelCallback[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
319 #endif
320 #endif
321
322 #if ( configGENERATE_RUN_TIME_STATS == 1 )
323 uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */
324 #endif
325
326 #if ( configUSE_NEWLIB_REENTRANT == 1 )
327
328 /* Allocate a Newlib reent structure that is specific to this task.
329 * Note Newlib support has been included by popular demand, but is not
330 * used by the FreeRTOS maintainers themselves. FreeRTOS is not
331 * responsible for resulting newlib operation. User must be familiar with
332 * newlib and must provide system-wide implementations of the necessary
333 * stubs. Be warned that (at the time of writing) the current newlib design
334 * implements a system-wide malloc() that must be provided with locks.
335 *
336 * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
337 * for additional information. */
338 struct _reent xNewLib_reent;
339 #endif
340
341 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
342 volatile uint32_t ulNotifiedValue[ configTASK_NOTIFICATION_ARRAY_ENTRIES ];
343 volatile uint8_t ucNotifyState[ configTASK_NOTIFICATION_ARRAY_ENTRIES ];
344 #endif
345
346 /* See the comments in FreeRTOS.h with the definition of
347 * tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */
348 #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
349 uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */
350 #endif
351
352 #if ( INCLUDE_xTaskAbortDelay == 1 )
353 uint8_t ucDelayAborted;
354 #endif
355
356 #if ( configUSE_POSIX_ERRNO == 1 )
357 int iTaskErrno;
358 #endif
359 } tskTCB;
360
361 /* The old tskTCB name is maintained above then typedefed to the new TCB_t name
362 * below to enable the use of older kernel aware debuggers. */
363 typedef tskTCB TCB_t;
364
365 /*lint -save -e956 A manual analysis and inspection has been used to determine
366 * which static variables must be declared volatile. */
367 PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB[configNUM_CORES] = {NULL};
368
369 /* Lists for ready and blocked tasks. --------------------
370 * xDelayedTaskList1 and xDelayedTaskList2 could be moved to function scope but
371 * doing so breaks some kernel aware debuggers and debuggers that rely on removing
372 * the static qualifier. */
373 PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ]; /*< Prioritised ready tasks. */
374 PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */
375 PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
376 PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */
377 PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
378 PRIVILEGED_DATA static List_t xPendingReadyList[ configNUM_CORES ]; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
379
380 #ifdef ESP_PLATFORM
381 PRIVILEGED_DATA static portMUX_TYPE xTaskQueueMutex = portMUX_INITIALIZER_UNLOCKED;
382 #endif // ESP_PLATFORM
383
384 #if ( INCLUDE_vTaskDelete == 1 )
385
386 PRIVILEGED_DATA static List_t xTasksWaitingTermination; /*< Tasks that have been deleted - but their memory not yet freed. */
387 PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U;
388
389 #endif
390
391 #if ( INCLUDE_vTaskSuspend == 1 )
392
393 PRIVILEGED_DATA static List_t xSuspendedTaskList; /*< Tasks that are currently suspended. */
394
395 #endif
396
397 /* Global POSIX errno. Its value is changed upon context switching to match
398 * the errno of the currently running task. */
399 #if ( configUSE_POSIX_ERRNO == 1 )
400 int FreeRTOS_errno = 0;
401 #endif
402
403 /* Other file private variables. --------------------------------*/
404 PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U;
405 PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
406 PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY;
407 PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;
408 PRIVILEGED_DATA static volatile TickType_t xPendedTicks = ( TickType_t ) 0U;
409 PRIVILEGED_DATA static volatile BaseType_t xYieldPending[configNUM_CORES] = {pdFALSE};
410 PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;
411 PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
412 PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */
413 PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle[configNUM_CORES] = {NULL}; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */
414
415 /* Context switches are held pending while the scheduler is suspended. Also,
416 * interrupts must not manipulate the xStateListItem of a TCB, or any of the
417 * lists the xStateListItem can be referenced from, if the scheduler is suspended.
418 * If an interrupt needs to unblock a task while the scheduler is suspended then it
419 * moves the task's event list item into the xPendingReadyList, ready for the
420 * kernel to move the task from the pending ready list into the real ready list
421 * when the scheduler is unsuspended. The pending ready list itself can only be
422 * accessed from a critical section. */
423 PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended[configNUM_CORES] = {( UBaseType_t ) pdFALSE};
424
425 #if ( configGENERATE_RUN_TIME_STATS == 1 )
426
427 /* Do not move these variables to function scope as doing so prevents the
428 * code working with debuggers that need to remove the static qualifier. */
429 PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime[configNUM_CORES] = {0U}; /*< Holds the value of a timer/counter the last time a task was switched in. */
430 PRIVILEGED_DATA static uint32_t ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */
431
432 #endif
433
434 // per-CPU flags indicating that we are doing context switch, it is used by apptrace and sysview modules
435 // in order to avoid calls of vPortYield from traceTASK_SWITCHED_IN/OUT when waiting
436 // for locks to be free or for host to read full trace buffer
437 PRIVILEGED_DATA static volatile BaseType_t xSwitchingContext[ configNUM_CORES ] = { pdFALSE };
438
439 /*lint -restore */
440
441 /*-----------------------------------------------------------*/
442
443 /* Callback function prototypes. --------------------------*/
444 #if( configCHECK_FOR_STACK_OVERFLOW > 0 )
445
446 extern void vApplicationStackOverflowHook( TaskHandle_t xTask, char *pcTaskName );
447
448 #endif
449
450 #if( configUSE_TICK_HOOK > 0 )
451
452 extern void vApplicationTickHook( void ); /*lint !e526 Symbol not defined as it is an application callback. */
453
454 #endif
455
456 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
457
458 extern void vApplicationGetIdleTaskMemory( StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize ); /*lint !e526 Symbol not defined as it is an application callback. */
459
460 #endif
461
462 /* File private functions. --------------------------------*/
463
464 /**
465 * Utility task that simply returns pdTRUE if the task referenced by xTask is
466 * currently in the Suspended state, or pdFALSE if the task referenced by xTask
467 * is in any other state.
468 */
469 #if ( INCLUDE_vTaskSuspend == 1 )
470
471 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
472
473 #endif /* INCLUDE_vTaskSuspend */
474
475 /*
476 * Utility to ready all the lists used by the scheduler. This is called
477 * automatically upon the creation of the first task.
478 */
479 static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION;
480
481 /*
482 * The idle task, which as all tasks is implemented as a never ending loop.
483 * The idle task is automatically created and added to the ready lists upon
484 * creation of the first user task.
485 *
486 * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific
487 * language extensions. The equivalent prototype for this function is:
488 *
489 * void prvIdleTask( void *pvParameters );
490 *
491 */
492 static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION;
493
494 /*
495 * Utility to free all memory allocated by the scheduler to hold a TCB,
496 * including the stack pointed to by the TCB.
497 *
498 * This does not free memory allocated by the task itself (i.e. memory
499 * allocated by calls to pvPortMalloc from within the tasks application code).
500 */
501 #if ( INCLUDE_vTaskDelete == 1 )
502
503 static void prvDeleteTCB( TCB_t * pxTCB ) PRIVILEGED_FUNCTION;
504
505 #endif
506
507 /* Function to call the Thread Local Storage Pointer Deletion Callbacks. Will be
508 * called during task deletion before prvDeleteTCB is called.
509 */
510 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
511 static void prvDeleteTLS( TCB_t *pxTCB );
512 #endif
513
514 /*
515 * Used only by the idle task. This checks to see if anything has been placed
516 * in the list of tasks waiting to be deleted. If so the task is cleaned up
517 * and its TCB deleted.
518 */
519 static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION;
520
521 /*
522 * The currently executing task is entering the Blocked state. Add the task to
523 * either the current or the overflow delayed task list.
524 */
525 static void prvAddCurrentTaskToDelayedList( const portBASE_TYPE xCoreID,
526 const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
527
528 /*
529 * Fills an TaskStatus_t structure with information on each task that is
530 * referenced from the pxList list (which may be a ready list, a delayed list,
531 * a suspended list, etc.).
532 *
533 * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM
534 * NORMAL APPLICATION CODE.
535 */
536 #if ( configUSE_TRACE_FACILITY == 1 )
537
538 static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t * pxTaskStatusArray,
539 List_t * pxList,
540 eTaskState eState ) PRIVILEGED_FUNCTION;
541
542 #endif
543
544 /*
545 * Searches pxList for a task with name pcNameToQuery - returning a handle to
546 * the task if it is found, or NULL if the task is not found.
547 */
548 #if ( INCLUDE_xTaskGetHandle == 1 )
549
550 static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList,
551 const char pcNameToQuery[] ) PRIVILEGED_FUNCTION;
552
553 #endif
554
555 /*
556 * When a task is created, the stack of the task is filled with a known value.
557 * This function determines the 'high water mark' of the task stack by
558 * determining how much of the stack remains at the original preset value.
559 */
560 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
561
562 static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION;
563
564 #endif
565
566 /*
567 * Return the amount of time, in ticks, that will pass before the kernel will
568 * next move a task from the Blocked state to the Running state.
569 *
570 * This conditional compilation should use inequality to 0, not equality to 1.
571 * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user
572 * defined low power mode implementations require configUSE_TICKLESS_IDLE to be
573 * set to a value other than 1.
574 */
575 #if ( configUSE_TICKLESS_IDLE != 0 )
576
577 static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION;
578
579 #endif
580
581 /*
582 * Set xNextTaskUnblockTime to the time at which the next Blocked state task
583 * will exit the Blocked state.
584 */
585 static void prvResetNextTaskUnblockTime( void ) PRIVILEGED_FUNCTION;
586
587 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
588
589 /*
590 * Helper function used to pad task names with spaces when printing out
591 * human readable tables of task information.
592 */
593 static char * prvWriteNameToBuffer( char * pcBuffer,
594 const char * pcTaskName ) PRIVILEGED_FUNCTION;
595
596 #endif
597
598 /*
599 * Called after a Task_t structure has been allocated either statically or
600 * dynamically to fill in the structure's members.
601 */
602 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
603 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
604 const uint32_t ulStackDepth,
605 void * const pvParameters,
606 UBaseType_t uxPriority,
607 TaskHandle_t * const pxCreatedTask,
608 TCB_t * pxNewTCB,
609 const MemoryRegion_t * const xRegions,
610 BaseType_t xCoreID ) PRIVILEGED_FUNCTION;
611
612 /*
613 * Called after a new task has been created and initialised to place the task
614 * under the control of the scheduler.
615 */
616 static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
617 TaskFunction_t pxTaskCode,
618 BaseType_t xCoreID ) PRIVILEGED_FUNCTION;
619
620 /*
621 * freertos_tasks_c_additions_init() should only be called if the user definable
622 * macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is the only macro
623 * called by the function.
624 */
625 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
626
627 static void freertos_tasks_c_additions_init( void ) PRIVILEGED_FUNCTION;
628
629 #endif
630
631 /*
632 * This routine tries to send an interrupt to another core if needed to make it execute a task
633 * of higher priority. We try to figure out if needed first by inspecting the pxTCB of the
634 * other CPU first. Specifically for Xtensa, we can do this because pxTCB is an atomic pointer. It
635 * is possible that it is inaccurate because the other CPU just did a task switch, but in that case
636 * at most a superfluous interrupt is generated.
637 */
taskYIELD_OTHER_CORE(BaseType_t xCoreID,UBaseType_t uxPriority)638 void taskYIELD_OTHER_CORE( BaseType_t xCoreID, UBaseType_t uxPriority )
639 {
640 BaseType_t i;
641
642 if (xCoreID != tskNO_AFFINITY) {
643 if ( pxCurrentTCB[ xCoreID ]->uxPriority < uxPriority ) { // NOLINT(clang-analyzer-core.NullDereference) IDF-685
644 vPortYieldOtherCore( xCoreID );
645 }
646 }
647 else
648 {
649 /* The task has no affinity. See if we can find a CPU to put it on.*/
650 for (i=0; i<configNUM_CORES; i++) {
651 if (i != xPortGetCoreID() && pxCurrentTCB[ i ]->uxPriority < uxPriority)
652 {
653 vPortYieldOtherCore( i );
654 break;
655 }
656 }
657 }
658 }
659
660 /*-----------------------------------------------------------*/
661
662 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
663
xTaskCreateStaticPinnedToCore(TaskFunction_t pvTaskCode,const char * const pcName,const uint32_t ulStackDepth,void * const pvParameters,UBaseType_t uxPriority,StackType_t * const pxStackBuffer,StaticTask_t * const pxTaskBuffer,const BaseType_t xCoreID)664 TaskHandle_t xTaskCreateStaticPinnedToCore( TaskFunction_t pvTaskCode,
665 const char * const pcName,
666 const uint32_t ulStackDepth,
667 void * const pvParameters,
668 UBaseType_t uxPriority,
669 StackType_t * const pxStackBuffer,
670 StaticTask_t * const pxTaskBuffer,
671 const BaseType_t xCoreID )
672 {
673 TCB_t *pxNewTCB;
674 TaskHandle_t xReturn;
675
676 configASSERT( portVALID_TCB_MEM(pxTaskBuffer) );
677 configASSERT( portVALID_STACK_MEM(pxStackBuffer) );
678 configASSERT( (xCoreID>=0 && xCoreID<configNUM_CORES) || (xCoreID==tskNO_AFFINITY) );
679
680 #if ( configASSERT_DEFINED == 1 )
681 {
682 /* Sanity check that the size of the structure used to declare a
683 * variable of type StaticTask_t equals the size of the real task
684 * structure. */
685 volatile size_t xSize = sizeof( StaticTask_t );
686 configASSERT( xSize == sizeof( TCB_t ) );
687 ( void ) xSize; /* Prevent lint warning when configASSERT() is not used. */
688 }
689 #endif /* configASSERT_DEFINED */
690
691
692 if( ( pxTaskBuffer != NULL ) && ( pxStackBuffer != NULL ) )
693 {
694 /* The memory used for the task's TCB and stack are passed into this
695 * function - use them. */
696 pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
697 pxNewTCB->pxStack = ( StackType_t * ) pxStackBuffer;
698
699 #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
700 {
701 /* Tasks can be created statically or dynamically, so note this
702 * task was created statically in case the task is later deleted. */
703 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
704 }
705 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
706
707 prvInitialiseNewTask( pvTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL, xCoreID );
708 prvAddNewTaskToReadyList( pxNewTCB, pvTaskCode, xCoreID );
709 }
710 else
711 {
712 xReturn = NULL;
713 }
714
715 return xReturn;
716 }
717
718 #endif /* SUPPORT_STATIC_ALLOCATION */
719 /*-----------------------------------------------------------*/
720
721 #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
722
xTaskCreateRestrictedStatic(const TaskParameters_t * const pxTaskDefinition,TaskHandle_t * pxCreatedTask)723 BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition,
724 TaskHandle_t * pxCreatedTask )
725 {
726 TCB_t * pxNewTCB;
727 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
728
729 configASSERT( pxTaskDefinition->puxStackBuffer != NULL );
730 configASSERT( pxTaskDefinition->pxTaskBuffer != NULL );
731
732 if( ( pxTaskDefinition->puxStackBuffer != NULL ) && ( pxTaskDefinition->pxTaskBuffer != NULL ) )
733 {
734 /* Allocate space for the TCB. Where the memory comes from depends
735 * on the implementation of the port malloc function and whether or
736 * not static allocation is being used. */
737 pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer;
738
739 /* Store the stack location in the TCB. */
740 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
741
742 #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
743 {
744 /* Tasks can be created statically or dynamically, so note this
745 * task was created statically in case the task is later deleted. */
746 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
747 }
748 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
749
750 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
751 pxTaskDefinition->pcName,
752 ( uint32_t ) pxTaskDefinition->usStackDepth,
753 pxTaskDefinition->pvParameters,
754 pxTaskDefinition->uxPriority,
755 pxCreatedTask, pxNewTCB,
756 pxTaskDefinition->xRegions,
757 tskNO_AFFINITY );
758
759 prvAddNewTaskToReadyList( pxNewTCB, pxTaskDefinition->pvTaskCode, tskNO_AFFINITY);
760 xReturn = pdPASS;
761 }
762
763 return xReturn;
764 }
765
766 #endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
767 /*-----------------------------------------------------------*/
768
769 #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
770
xTaskCreateRestricted(const TaskParameters_t * const pxTaskDefinition,TaskHandle_t * pxCreatedTask)771 BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition,
772 TaskHandle_t * pxCreatedTask )
773 {
774 TCB_t * pxNewTCB;
775 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
776
777 configASSERT( pxTaskDefinition->puxStackBuffer );
778
779 if( pxTaskDefinition->puxStackBuffer != NULL )
780 {
781 /* Allocate space for the TCB. Where the memory comes from depends
782 * on the implementation of the port malloc function and whether or
783 * not static allocation is being used. */
784 pxNewTCB = ( TCB_t * ) pvPortMallocTcbMem( sizeof( TCB_t ) );
785
786 if( pxNewTCB != NULL )
787 {
788 /* Store the stack location in the TCB. */
789 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
790
791 #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
792 {
793 /* Tasks can be created statically or dynamically, so note
794 * this task had a statically allocated stack in case it is
795 * later deleted. The TCB was allocated dynamically. */
796 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY;
797 }
798 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
799
800 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
801 pxTaskDefinition->pcName,
802 ( uint32_t ) pxTaskDefinition->usStackDepth,
803 pxTaskDefinition->pvParameters,
804 pxTaskDefinition->uxPriority,
805 pxCreatedTask, pxNewTCB,
806 pxTaskDefinition->xRegions,
807 tskNO_AFFINITY );
808
809 prvAddNewTaskToReadyList( pxNewTCB, pxTaskDefinition->pvTaskCode, tskNO_AFFINITY);
810 xReturn = pdPASS;
811 }
812 }
813
814 return xReturn;
815 }
816
817 #endif /* portUSING_MPU_WRAPPERS */
818 /*-----------------------------------------------------------*/
819
820 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
821
xTaskCreatePinnedToCore(TaskFunction_t pvTaskCode,const char * const pcName,const uint32_t usStackDepth,void * const pvParameters,UBaseType_t uxPriority,TaskHandle_t * const pvCreatedTask,const BaseType_t xCoreID)822 BaseType_t xTaskCreatePinnedToCore( TaskFunction_t pvTaskCode,
823 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
824 const uint32_t usStackDepth,
825 void * const pvParameters,
826 UBaseType_t uxPriority,
827 TaskHandle_t * const pvCreatedTask,
828 const BaseType_t xCoreID)
829 {
830 TCB_t * pxNewTCB;
831 BaseType_t xReturn;
832
833 /* If the stack grows down then allocate the stack then the TCB so the stack
834 * does not grow into the TCB. Likewise if the stack grows up then allocate
835 * the TCB then the stack. */
836 #if ( portSTACK_GROWTH > 0 )
837 {
838 /* Allocate space for the TCB. Where the memory comes from depends on
839 * the implementation of the port malloc function and whether or not static
840 * allocation is being used. */
841 pxNewTCB = ( TCB_t * ) pvPortMallocTcbMem( sizeof( TCB_t ) );
842
843 if( pxNewTCB != NULL )
844 {
845 /* Allocate space for the stack used by the task being created.
846 * The base of the stack memory stored in the TCB so the task can
847 * be deleted later if required. */
848 pxNewTCB->pxStack = ( StackType_t * ) pvPortMallocStackMem( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
849
850 if( pxNewTCB->pxStack == NULL )
851 {
852 /* Could not allocate the stack. Delete the allocated TCB. */
853 vPortFree( pxNewTCB );
854 pxNewTCB = NULL;
855 }
856 }
857 }
858 #else /* portSTACK_GROWTH */
859 {
860 StackType_t * pxStack;
861
862 /* Allocate space for the stack used by the task being created. */
863 pxStack = pvPortMallocStackMem( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation is the stack. */
864
865 if( pxStack != NULL )
866 {
867 /* Allocate space for the TCB. */
868 pxNewTCB = ( TCB_t * ) pvPortMallocTcbMem( sizeof( TCB_t ) ); /*lint !e9087 !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack, and the first member of TCB_t is always a pointer to the task's stack. */
869
870 if( pxNewTCB != NULL )
871 {
872 /* Store the stack location in the TCB. */
873 pxNewTCB->pxStack = pxStack;
874 }
875 else
876 {
877 /* The stack cannot be used as the TCB was not created. Free
878 * it again. */
879 vPortFree( pxStack );
880 }
881 }
882 else
883 {
884 pxNewTCB = NULL;
885 }
886 }
887 #endif /* portSTACK_GROWTH */
888
889 if( pxNewTCB != NULL )
890 {
891 #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e9029 !e731 Macro has been consolidated for readability reasons. */
892 {
893 /* Tasks can be created statically or dynamically, so note this
894 * task was created dynamically in case it is later deleted. */
895 pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB;
896 }
897 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
898
899 prvInitialiseNewTask( pvTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pvCreatedTask, pxNewTCB, NULL, xCoreID );
900 prvAddNewTaskToReadyList( pxNewTCB, pvTaskCode, xCoreID);
901 xReturn = pdPASS;
902 }
903 else
904 {
905 xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
906 }
907
908 return xReturn;
909 }
910
911 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
912 /*-----------------------------------------------------------*/
913
prvInitialiseNewTask(TaskFunction_t pxTaskCode,const char * const pcName,const uint32_t ulStackDepth,void * const pvParameters,UBaseType_t uxPriority,TaskHandle_t * const pxCreatedTask,TCB_t * pxNewTCB,const MemoryRegion_t * const xRegions,BaseType_t xCoreID)914 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
915 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
916 const uint32_t ulStackDepth,
917 void * const pvParameters,
918 UBaseType_t uxPriority,
919 TaskHandle_t * const pxCreatedTask,
920 TCB_t * pxNewTCB,
921 const MemoryRegion_t * const xRegions,
922 BaseType_t xCoreID )
923 {
924 StackType_t * pxTopOfStack;
925 UBaseType_t x;
926
927 #if (configNUM_CORES < 2)
928 xCoreID = 0;
929 #endif
930
931 #if ( portUSING_MPU_WRAPPERS == 1 )
932 /* Should the task be created in privileged mode? */
933 BaseType_t xRunPrivileged;
934
935 if( ( uxPriority & portPRIVILEGE_BIT ) != 0U )
936 {
937 xRunPrivileged = pdTRUE;
938 }
939 else
940 {
941 xRunPrivileged = pdFALSE;
942 }
943 uxPriority &= ~portPRIVILEGE_BIT;
944 #endif /* portUSING_MPU_WRAPPERS == 1 */
945
946 /* Avoid dependency on memset() if it is not required. */
947 #if ( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 )
948 {
949 /* Fill the stack with a known value to assist debugging. */
950 ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) );
951 }
952 #endif /* tskSET_NEW_STACKS_TO_KNOWN_VALUE */
953
954 #if( configUSE_TRACE_FACILITY == 1 )
955 {
956 /* Zero the uxTaskNumber TCB member to avoid random value from dynamically allocated TCBs */
957 pxNewTCB->uxTaskNumber = 0;
958 }
959 #endif /* ( configUSE_TRACE_FACILITY == 1 ) */
960
961 /* Calculate the top of stack address. This depends on whether the stack
962 * grows from high memory to low (as per the 80x86) or vice versa.
963 * portSTACK_GROWTH is used to make the result positive or negative as required
964 * by the port. */
965 #if ( portSTACK_GROWTH < 0 )
966 {
967 pxTopOfStack = &( pxNewTCB->pxStack[ ulStackDepth - ( uint32_t ) 1 ] );
968 pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 !e9033 !e9078 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. Checked by assert(). */
969
970 /* Check the alignment of the calculated top of stack is correct. */
971 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
972
973 #if ( configRECORD_STACK_HIGH_ADDRESS == 1 )
974 {
975 /* Also record the stack's high address, which may assist
976 * debugging. */
977 pxNewTCB->pxEndOfStack = pxTopOfStack;
978 }
979 #endif /* configRECORD_STACK_HIGH_ADDRESS */
980 }
981 #else /* portSTACK_GROWTH */
982 {
983 pxTopOfStack = pxNewTCB->pxStack;
984
985 /* Check the alignment of the stack buffer is correct. */
986 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxNewTCB->pxStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
987
988 /* The other extreme of the stack space is required if stack checking is
989 * performed. */
990 pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
991 }
992 #endif /* portSTACK_GROWTH */
993
994 /* Store the task name in the TCB. */
995 if( pcName != NULL )
996 {
997 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
998 {
999 pxNewTCB->pcTaskName[ x ] = pcName[ x ];
1000
1001 /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
1002 * configMAX_TASK_NAME_LEN characters just in case the memory after the
1003 * string is not accessible (extremely unlikely). */
1004 if( pcName[ x ] == ( char ) 0x00 )
1005 {
1006 break;
1007 }
1008 else
1009 {
1010 mtCOVERAGE_TEST_MARKER();
1011 }
1012 }
1013
1014 /* Ensure the name string is terminated in the case that the string length
1015 * was greater or equal to configMAX_TASK_NAME_LEN. */
1016 pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0';
1017 }
1018 else
1019 {
1020 /* The task has not been given a name, so just ensure there is a NULL
1021 * terminator when it is read out. */
1022 pxNewTCB->pcTaskName[ 0 ] = 0x00;
1023 }
1024
1025 /* This is used as an array index so must ensure it's not too large. First
1026 * remove the privilege bit if one is present. */
1027 if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
1028 {
1029 uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
1030 }
1031 else
1032 {
1033 mtCOVERAGE_TEST_MARKER();
1034 }
1035
1036 pxNewTCB->uxPriority = uxPriority;
1037 pxNewTCB->xCoreID = xCoreID;
1038 #if ( configUSE_MUTEXES == 1 )
1039 {
1040 pxNewTCB->uxBasePriority = uxPriority;
1041 pxNewTCB->uxMutexesHeld = 0;
1042 }
1043 #endif /* configUSE_MUTEXES */
1044
1045 vListInitialiseItem( &( pxNewTCB->xStateListItem ) );
1046 vListInitialiseItem( &( pxNewTCB->xEventListItem ) );
1047
1048 /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get
1049 * back to the containing TCB from a generic item in a list. */
1050 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xStateListItem ), pxNewTCB );
1051
1052 /* Event lists are always in priority order. */
1053 listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
1054 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB );
1055
1056 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
1057 {
1058 pxNewTCB->uxCriticalNesting = ( UBaseType_t ) 0U;
1059 }
1060 #endif /* portCRITICAL_NESTING_IN_TCB */
1061
1062 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
1063 {
1064 pxNewTCB->pxTaskTag = NULL;
1065 }
1066 #endif /* configUSE_APPLICATION_TASK_TAG */
1067
1068 #if ( configGENERATE_RUN_TIME_STATS == 1 )
1069 {
1070 pxNewTCB->ulRunTimeCounter = 0UL;
1071 }
1072 #endif /* configGENERATE_RUN_TIME_STATS */
1073
1074 #if ( portUSING_MPU_WRAPPERS == 1 )
1075 {
1076 vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth );
1077 }
1078 #else
1079 {
1080 /* Avoid compiler warning about unreferenced parameter. */
1081 ( void ) xRegions;
1082 }
1083 #endif
1084
1085 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
1086 {
1087 for( x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ )
1088 {
1089 pxNewTCB->pvThreadLocalStoragePointers[ x ] = NULL;
1090 #if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS == 1)
1091 pxNewTCB->pvThreadLocalStoragePointersDelCallback[ x ] = NULL;
1092 #endif
1093
1094 }
1095 }
1096 #endif
1097
1098 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
1099 {
1100 memset( ( void * ) &( pxNewTCB->ulNotifiedValue[ 0 ] ), 0x00, sizeof( pxNewTCB->ulNotifiedValue ) );
1101 memset( ( void * ) &( pxNewTCB->ucNotifyState[ 0 ] ), 0x00, sizeof( pxNewTCB->ucNotifyState ) );
1102 }
1103 #endif
1104
1105 #if ( configUSE_NEWLIB_REENTRANT == 1 )
1106 {
1107 /* Initialise this task's Newlib reent structure. */
1108 _REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) );
1109 }
1110 #endif
1111
1112 #if ( INCLUDE_xTaskAbortDelay == 1 )
1113 {
1114 pxNewTCB->ucDelayAborted = pdFALSE;
1115 }
1116 #endif
1117
1118 /* Initialize the TCB stack to look as if the task was already running,
1119 * but had been interrupted by the scheduler. The return address is set
1120 * to the start of the task function. Once the stack has been initialised
1121 * the top of stack variable is updated. */
1122 #if ( portUSING_MPU_WRAPPERS == 1 )
1123 {
1124 /* If the port has capability to detect stack overflow,
1125 * pass the stack end address to the stack initialization
1126 * function as well. */
1127 #if ( portHAS_STACK_OVERFLOW_CHECKING == 1 )
1128 {
1129 #if ( portSTACK_GROWTH < 0 )
1130 {
1131 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged );
1132 }
1133 #else /* portSTACK_GROWTH */
1134 {
1135 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged );
1136 }
1137 #endif /* portSTACK_GROWTH */
1138 }
1139 #else /* portHAS_STACK_OVERFLOW_CHECKING */
1140 {
1141 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged );
1142 }
1143 #endif /* portHAS_STACK_OVERFLOW_CHECKING */
1144 }
1145 #else /* portUSING_MPU_WRAPPERS */
1146 {
1147 /* If the port has capability to detect stack overflow,
1148 * pass the stack end address to the stack initialization
1149 * function as well. */
1150 #if ( portHAS_STACK_OVERFLOW_CHECKING == 1 )
1151 {
1152 #if ( portSTACK_GROWTH < 0 )
1153 {
1154 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters );
1155 }
1156 #else /* portSTACK_GROWTH */
1157 {
1158 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters );
1159 }
1160 #endif /* portSTACK_GROWTH */
1161 }
1162 #else /* portHAS_STACK_OVERFLOW_CHECKING */
1163 {
1164 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );
1165 }
1166 #endif /* portHAS_STACK_OVERFLOW_CHECKING */
1167 }
1168 #endif /* portUSING_MPU_WRAPPERS */
1169
1170 if( pxCreatedTask != NULL )
1171 {
1172 /* Pass the handle out in an anonymous way. The handle can be used to
1173 * change the created task's priority, delete the created task, etc.*/
1174 *pxCreatedTask = ( TaskHandle_t ) pxNewTCB;
1175 }
1176 else
1177 {
1178 mtCOVERAGE_TEST_MARKER();
1179 }
1180 }
1181 /*-----------------------------------------------------------*/
1182
prvAddNewTaskToReadyList(TCB_t * pxNewTCB,TaskFunction_t pxTaskCode,BaseType_t xCoreID)1183 static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB,
1184 TaskFunction_t pxTaskCode,
1185 BaseType_t xCoreID )
1186 {
1187 TCB_t *curTCB, *tcb0, *tcb1;
1188
1189 #if (configNUM_CORES < 2)
1190 xCoreID = 0;
1191 #endif
1192
1193 /* Ensure interrupts don't access the task lists while the lists are being
1194 * updated. */
1195 taskENTER_CRITICAL();
1196 {
1197 uxCurrentNumberOfTasks++;
1198
1199 if ( xCoreID == tskNO_AFFINITY )
1200 {
1201 if ( configNUM_CORES == 1 )
1202 {
1203 xCoreID = 0;
1204 }
1205 else
1206 {
1207 // if the task has no affinity, put it on either core if nothing is currently scheduled there. Failing that,
1208 // put it on the core where it will preempt the lowest priority running task. If neither of these are true,
1209 // queue it on the currently running core.
1210 tcb0 = pxCurrentTCB[0];
1211 tcb1 = pxCurrentTCB[1];
1212 if ( tcb0 == NULL )
1213 {
1214 xCoreID = 0;
1215 }
1216 else if ( tcb1 == NULL )
1217 {
1218 xCoreID = 1;
1219 }
1220 else if ( tcb0->uxPriority < pxNewTCB->uxPriority && tcb0->uxPriority < tcb1->uxPriority )
1221 {
1222 xCoreID = 0;
1223 }
1224 else if ( tcb1->uxPriority < pxNewTCB->uxPriority )
1225 {
1226 xCoreID = 1;
1227 }
1228 else
1229 {
1230 xCoreID = xPortGetCoreID(); // Both CPU have higher priority tasks running on them, so this won't run yet
1231 }
1232 }
1233 }
1234
1235 if( pxCurrentTCB[xCoreID] == NULL )
1236 {
1237 /* There are no other tasks, or all the other tasks are in
1238 * the suspended state - make this the current task. */
1239 pxCurrentTCB[xCoreID] = pxNewTCB;
1240
1241 if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
1242 {
1243 /* This is the first task to be created so do the preliminary
1244 * initialisation required. We will not recover if this call
1245 * fails, but we will report the failure. */
1246 prvInitialiseTaskLists();
1247 }
1248 else
1249 {
1250 mtCOVERAGE_TEST_MARKER();
1251 }
1252 }
1253 else
1254 {
1255 /* If the scheduler is not already running, make this task the
1256 * current task if it is the highest priority task to be created
1257 * so far. */
1258 if( xSchedulerRunning == pdFALSE )
1259 {
1260 if( pxCurrentTCB[xCoreID] == NULL || pxCurrentTCB[xCoreID]->uxPriority <= pxNewTCB->uxPriority )
1261 {
1262 pxCurrentTCB[xCoreID] = pxNewTCB;
1263 }
1264 else
1265 {
1266 mtCOVERAGE_TEST_MARKER();
1267 }
1268 }
1269 else
1270 {
1271 mtCOVERAGE_TEST_MARKER();
1272 }
1273 }
1274
1275 uxTaskNumber++;
1276
1277 #if ( configUSE_TRACE_FACILITY == 1 )
1278 {
1279 /* Add a counter into the TCB for tracing only. */
1280 pxNewTCB->uxTCBNumber = uxTaskNumber;
1281 }
1282 #endif /* configUSE_TRACE_FACILITY */
1283 traceTASK_CREATE( pxNewTCB );
1284
1285 prvAddTaskToReadyList( pxNewTCB );
1286
1287 portSETUP_TCB( pxNewTCB );
1288 }
1289 taskEXIT_CRITICAL();
1290
1291 if( xSchedulerRunning != pdFALSE )
1292 {
1293 /* If the created task is of a higher priority than the current task
1294 * then it should run now. */
1295 taskENTER_CRITICAL();
1296
1297 curTCB = pxCurrentTCB[ xCoreID ];
1298 if( curTCB == NULL || curTCB->uxPriority < pxNewTCB->uxPriority )
1299 {
1300 if( xCoreID == xPortGetCoreID() )
1301 {
1302 taskYIELD_IF_USING_PREEMPTION();
1303 }
1304 else {
1305 taskYIELD_OTHER_CORE(xCoreID, pxNewTCB->uxPriority);
1306 }
1307 }
1308 else
1309 {
1310 mtCOVERAGE_TEST_MARKER();
1311 }
1312 taskEXIT_CRITICAL();
1313 }
1314 else
1315 {
1316 mtCOVERAGE_TEST_MARKER();
1317 }
1318 }
1319 /*-----------------------------------------------------------*/
1320
1321 #if ( INCLUDE_vTaskDelete == 1 )
1322
vTaskDelete(TaskHandle_t xTaskToDelete)1323 void vTaskDelete( TaskHandle_t xTaskToDelete )
1324 {
1325 TCB_t * pxTCB;
1326 TCB_t * curTCB;
1327 BaseType_t core;
1328 BaseType_t xFreeNow = 0;
1329
1330 taskENTER_CRITICAL();
1331 {
1332 core = xPortGetCoreID();
1333 curTCB = pxCurrentTCB[core];
1334
1335 /* If null is passed in here then it is the calling task that is
1336 * being deleted. */
1337 pxTCB = prvGetTCBFromHandle( xTaskToDelete );
1338
1339 /* Remove task from the ready/delayed list. */
1340 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
1341 {
1342 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1343 }
1344 else
1345 {
1346 mtCOVERAGE_TEST_MARKER();
1347 }
1348
1349 /* Is the task waiting on an event also? */
1350 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
1351 {
1352 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
1353 }
1354 else
1355 {
1356 mtCOVERAGE_TEST_MARKER();
1357 }
1358
1359 /* Increment the uxTaskNumber also so kernel aware debuggers can
1360 * detect that the task lists need re-generating. This is done before
1361 * portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will
1362 * not return. */
1363 uxTaskNumber++;
1364
1365 if( pxTCB == curTCB ||
1366 /* in SMP, we also can't immediately delete the task active on the other core */
1367 (configNUM_CORES > 1 && pxTCB == pxCurrentTCB[ !core ]) ||
1368 /* ... and we can't delete a non-running task pinned to the other core, as
1369 FPU cleanup has to happen on the same core */
1370 (configNUM_CORES > 1 && pxTCB->xCoreID == (!core)) )
1371 {
1372 /* A task is deleting itself. This cannot complete within the
1373 * task itself, as a context switch to another task is required.
1374 * Place the task in the termination list. The idle task will
1375 * check the termination list and free up any memory allocated by
1376 * the scheduler for the TCB and stack of the deleted task. */
1377 vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) );
1378
1379 /* Increment the ucTasksDeleted variable so the idle task knows
1380 * there is a task that has been deleted and that it should therefore
1381 * check the xTasksWaitingTermination list. */
1382 ++uxDeletedTasksWaitingCleanUp;
1383
1384 /* The pre-delete hook is primarily for the Windows simulator,
1385 * in which Windows specific clean up operations are performed,
1386 * after which it is not possible to yield away from this task -
1387 * hence xYieldPending is used to latch that a context switch is
1388 * required. */
1389 portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending[core] );
1390
1391 if (configNUM_CORES > 1 && pxTCB == pxCurrentTCB[ !core ])
1392 {
1393 /* SMP case of deleting a task running on a different core. Same issue
1394 as a task deleting itself, but we need to send a yield to this task now
1395 before we release xTaskQueueMutex.
1396
1397 Specifically there is a case where the other core may already be spinning on
1398 xTaskQueueMutex waiting to go into a blocked state. A check is added in
1399 prvAddCurrentTaskToDelayedList() to prevent it from removing itself from
1400 xTasksWaitingTermination list in this case (instead it will immediately
1401 release xTaskQueueMutex again and be yielded before the FreeRTOS function
1402 returns.) */
1403 vPortYieldOtherCore( !core );
1404 }
1405 }
1406 else
1407 {
1408 --uxCurrentNumberOfTasks;
1409 xFreeNow = pdTRUE;
1410
1411 /* Reset the next expected unblock time in case it referred to
1412 * the task that has just been deleted. */
1413 prvResetNextTaskUnblockTime();
1414 }
1415
1416 traceTASK_DELETE( pxTCB );
1417 }
1418 taskEXIT_CRITICAL();
1419
1420 if(xFreeNow == pdTRUE) {
1421 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
1422 prvDeleteTLS( pxTCB );
1423 #endif
1424
1425 prvDeleteTCB( pxTCB );
1426 }
1427
1428 /* Force a reschedule if it is the currently running task that has just
1429 * been deleted. */
1430 if( xSchedulerRunning != pdFALSE )
1431 {
1432 if( pxTCB == curTCB )
1433 {
1434 configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
1435 portYIELD_WITHIN_API();
1436 }
1437 else
1438 {
1439 mtCOVERAGE_TEST_MARKER();
1440 }
1441 }
1442 }
1443
1444 #endif /* INCLUDE_vTaskDelete */
1445 /*-----------------------------------------------------------*/
1446
1447 #if ( INCLUDE_xTaskDelayUntil == 1 )
1448 #ifdef ESP_PLATFORM
1449 // backward binary compatibility - remove later
1450 #undef vTaskDelayUntil
vTaskDelayUntil(TickType_t * const pxPreviousWakeTime,const TickType_t xTimeIncrement)1451 void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
1452 const TickType_t xTimeIncrement )
1453 {
1454 xTaskDelayUntil(pxPreviousWakeTime, xTimeIncrement);
1455 }
1456 #endif // ESP_PLATFORM
1457
xTaskDelayUntil(TickType_t * const pxPreviousWakeTime,const TickType_t xTimeIncrement)1458 BaseType_t xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
1459 const TickType_t xTimeIncrement )
1460 {
1461 TickType_t xTimeToWake;
1462 #ifdef ESP_PLATFORM
1463 BaseType_t xShouldDelay = pdFALSE;
1464 #else
1465 BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE;
1466 #endif // ESP_PLATFORM
1467
1468 configASSERT( pxPreviousWakeTime );
1469 configASSERT( ( xTimeIncrement > 0U ) );
1470 configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
1471
1472 #ifdef ESP_PLATFORM // IDF-3755
1473 taskENTER_CRITICAL();
1474 #else
1475 vTaskSuspendAll();
1476 #endif // ESP_PLATFORM
1477 {
1478 /* Minor optimisation. The tick count cannot change in this
1479 * block. */
1480 const TickType_t xConstTickCount = xTickCount;
1481
1482 /* Generate the tick time at which the task wants to wake. */
1483 xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
1484
1485 if( xConstTickCount < *pxPreviousWakeTime )
1486 {
1487 /* The tick count has overflowed since this function was
1488 * lasted called. In this case the only time we should ever
1489 * actually delay is if the wake time has also overflowed,
1490 * and the wake time is greater than the tick time. When this
1491 * is the case it is as if neither time had overflowed. */
1492 if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) )
1493 {
1494 xShouldDelay = pdTRUE;
1495 }
1496 else
1497 {
1498 mtCOVERAGE_TEST_MARKER();
1499 }
1500 }
1501 else
1502 {
1503 /* The tick time has not overflowed. In this case we will
1504 * delay if either the wake time has overflowed, and/or the
1505 * tick time is less than the wake time. */
1506 if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) )
1507 {
1508 xShouldDelay = pdTRUE;
1509 }
1510 else
1511 {
1512 mtCOVERAGE_TEST_MARKER();
1513 }
1514 }
1515
1516 /* Update the wake time ready for the next call. */
1517 *pxPreviousWakeTime = xTimeToWake;
1518
1519 if( xShouldDelay != pdFALSE )
1520 {
1521 traceTASK_DELAY_UNTIL();
1522
1523 /* prvAddCurrentTaskToDelayedList() needs the block time, not
1524 * the time to wake, so subtract the current tick count. */
1525 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake - xConstTickCount );
1526 }
1527 else
1528 {
1529 mtCOVERAGE_TEST_MARKER();
1530 }
1531 }
1532 #ifdef ESP_PLATFORM // IDF-3755
1533 taskEXIT_CRITICAL();
1534 #else
1535 xAlreadyYielded = xTaskResumeAll();
1536 #endif // ESP_PLATFORM
1537
1538 /* Force a reschedule if xTaskResumeAll has not already done so, we may
1539 * have put ourselves to sleep. */
1540 #ifdef ESP_PLATFORM
1541 portYIELD_WITHIN_API();
1542 #else
1543 if( xAlreadyYielded == pdFALSE )
1544 {
1545 portYIELD_WITHIN_API();
1546 }
1547 else
1548 {
1549 mtCOVERAGE_TEST_MARKER();
1550 }
1551 #endif // ESP_PLATFORM
1552 return xShouldDelay;
1553 }
1554
1555 #endif /* INCLUDE_xTaskDelayUntil */
1556 /*-----------------------------------------------------------*/
1557
1558 #if ( INCLUDE_vTaskDelay == 1 )
1559
vTaskDelay(const TickType_t xTicksToDelay)1560 void vTaskDelay( const TickType_t xTicksToDelay )
1561 {
1562 /* A delay time of zero just forces a reschedule. */
1563 if( xTicksToDelay > ( TickType_t ) 0U )
1564 {
1565 configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
1566 #ifdef ESP_PLATFORM // IDF-3755
1567 taskENTER_CRITICAL();
1568 #else
1569 vTaskSuspendAll();
1570 #endif // ESP_PLATFORM
1571 {
1572 traceTASK_DELAY();
1573
1574 /* A task that is removed from the event list while the
1575 * scheduler is suspended will not get placed in the ready
1576 * list or removed from the blocked list until the scheduler
1577 * is resumed.
1578 *
1579 * This task cannot be in an event list as it is the currently
1580 * executing task. */
1581 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToDelay );
1582 }
1583 #ifdef ESP_PLATFORM // IDF-3755
1584 taskEXIT_CRITICAL();
1585 #else
1586 xAlreadyYielded = xTaskResumeAll();
1587 #endif // ESP_PLATFORM
1588 }
1589 else
1590 {
1591 mtCOVERAGE_TEST_MARKER();
1592 }
1593
1594 /* Force a reschedule, we may have put ourselves to sleep. */
1595 portYIELD_WITHIN_API();
1596 }
1597
1598 #endif /* INCLUDE_vTaskDelay */
1599 /*-----------------------------------------------------------*/
1600
1601 #if ( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_xTaskAbortDelay == 1 ) )
1602
eTaskGetState(TaskHandle_t xTask)1603 eTaskState eTaskGetState( TaskHandle_t xTask )
1604 {
1605 eTaskState eReturn;
1606 List_t const * pxStateList, * pxDelayedList, * pxOverflowedDelayedList;
1607 const TCB_t * const pxTCB = xTask;
1608
1609 configASSERT( pxTCB );
1610
1611 taskENTER_CRITICAL(); //Need critical section incase either core context switches in between
1612 if( pxTCB == pxCurrentTCB[xPortGetCoreID()])
1613 {
1614 /* The task calling this function is querying its own state. */
1615 eReturn = eRunning;
1616 }
1617 #if (configNUM_CORES > 1)
1618 else if (pxTCB == pxCurrentTCB[!xPortGetCoreID()])
1619 {
1620 /* The task calling this function is querying its own state. */
1621 eReturn = eRunning;
1622 }
1623 #endif
1624 else
1625 {
1626 pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) );
1627 pxDelayedList = pxDelayedTaskList;
1628 pxOverflowedDelayedList = pxOverflowDelayedTaskList;
1629
1630 if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) )
1631 {
1632 /* The task being queried is referenced from one of the Blocked
1633 * lists. */
1634 eReturn = eBlocked;
1635 }
1636
1637 #if ( INCLUDE_vTaskSuspend == 1 )
1638 else if( pxStateList == &xSuspendedTaskList )
1639 {
1640 /* The task being queried is referenced from the suspended
1641 * list. Is it genuinely suspended or is it blocked
1642 * indefinitely? */
1643 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL )
1644 {
1645 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
1646 {
1647 BaseType_t x;
1648
1649 /* The task does not appear on the event list item of
1650 * and of the RTOS objects, but could still be in the
1651 * blocked state if it is waiting on its notification
1652 * rather than waiting on an object. If not, is
1653 * suspended. */
1654 eReturn = eSuspended;
1655
1656 for( x = 0; x < configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
1657 {
1658 if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
1659 {
1660 eReturn = eBlocked;
1661 break;
1662 }
1663 }
1664 }
1665 #else /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
1666 {
1667 eReturn = eSuspended;
1668 }
1669 #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
1670 }
1671 else
1672 {
1673 eReturn = eBlocked;
1674 }
1675 }
1676 #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
1677
1678 #if ( INCLUDE_vTaskDelete == 1 )
1679 else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) )
1680 {
1681 /* The task being queried is referenced from the deleted
1682 * tasks list, or it is not referenced from any lists at
1683 * all. */
1684 eReturn = eDeleted;
1685 }
1686 #endif
1687
1688 else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */
1689 {
1690 /* If the task is not in any other state, it must be in the
1691 * Ready (including pending ready) state. */
1692 eReturn = eReady;
1693 }
1694 }
1695 taskEXIT_CRITICAL();
1696
1697 return eReturn;
1698 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
1699
1700 #endif /* INCLUDE_eTaskGetState */
1701 /*-----------------------------------------------------------*/
1702
1703 #if ( INCLUDE_uxTaskPriorityGet == 1 )
1704
uxTaskPriorityGet(const TaskHandle_t xTask)1705 UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask )
1706 {
1707 TCB_t const * pxTCB;
1708 UBaseType_t uxReturn;
1709
1710 taskENTER_CRITICAL();
1711 {
1712 /* If null is passed in here then it is the priority of the task
1713 * that called uxTaskPriorityGet() that is being queried. */
1714 pxTCB = prvGetTCBFromHandle( xTask );
1715 uxReturn = pxTCB->uxPriority;
1716 }
1717 taskEXIT_CRITICAL();
1718
1719 return uxReturn;
1720 }
1721
1722 #endif /* INCLUDE_uxTaskPriorityGet */
1723 /*-----------------------------------------------------------*/
1724
1725 #if ( INCLUDE_uxTaskPriorityGet == 1 )
1726
uxTaskPriorityGetFromISR(const TaskHandle_t xTask)1727 UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask )
1728 {
1729 TCB_t const * pxTCB;
1730 UBaseType_t uxReturn;
1731
1732 /* RTOS ports that support interrupt nesting have the concept of a
1733 * maximum system call (or maximum API call) interrupt priority.
1734 * Interrupts that are above the maximum system call priority are keep
1735 * permanently enabled, even when the RTOS kernel is in a critical section,
1736 * but cannot make any calls to FreeRTOS API functions. If configASSERT()
1737 * is defined in FreeRTOSConfig.h then
1738 * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1739 * failure if a FreeRTOS API function is called from an interrupt that has
1740 * been assigned a priority above the configured maximum system call
1741 * priority. Only FreeRTOS functions that end in FromISR can be called
1742 * from interrupts that have been assigned a priority at or (logically)
1743 * below the maximum system call interrupt priority. FreeRTOS maintains a
1744 * separate interrupt safe API to ensure interrupt entry is as fast and as
1745 * simple as possible. More information (albeit Cortex-M specific) is
1746 * provided on the following link:
1747 * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
1748 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1749
1750 portENTER_CRITICAL_ISR(&xTaskQueueMutex );
1751 {
1752 /* If null is passed in here then it is the priority of the calling
1753 * task that is being queried. */
1754 pxTCB = prvGetTCBFromHandle( xTask );
1755 uxReturn = pxTCB->uxPriority;
1756 }
1757 portEXIT_CRITICAL_ISR(&xTaskQueueMutex);
1758
1759 return uxReturn;
1760 }
1761
1762 #endif /* INCLUDE_uxTaskPriorityGet */
1763 /*-----------------------------------------------------------*/
1764
1765 #if ( INCLUDE_vTaskPrioritySet == 1 )
1766
vTaskPrioritySet(TaskHandle_t xTask,UBaseType_t uxNewPriority)1767 void vTaskPrioritySet( TaskHandle_t xTask,
1768 UBaseType_t uxNewPriority )
1769 {
1770 TCB_t * pxTCB;
1771 UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry;
1772 BaseType_t xYieldRequired = pdFALSE;
1773
1774 configASSERT( ( uxNewPriority < configMAX_PRIORITIES ) );
1775
1776 /* Ensure the new priority is valid. */
1777 if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
1778 {
1779 uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
1780 }
1781 else
1782 {
1783 mtCOVERAGE_TEST_MARKER();
1784 }
1785
1786 taskENTER_CRITICAL();
1787 {
1788 /* If null is passed in here then it is the priority of the calling
1789 * task that is being changed. */
1790 pxTCB = prvGetTCBFromHandle( xTask );
1791
1792 traceTASK_PRIORITY_SET( pxTCB, uxNewPriority );
1793
1794 #if ( configUSE_MUTEXES == 1 )
1795 {
1796 uxCurrentBasePriority = pxTCB->uxBasePriority;
1797 }
1798 #else
1799 {
1800 uxCurrentBasePriority = pxTCB->uxPriority;
1801 }
1802 #endif
1803
1804 if( uxCurrentBasePriority != uxNewPriority )
1805 {
1806 /* The priority change may have readied a task of higher
1807 * priority than the calling task. */
1808 if( uxNewPriority > uxCurrentBasePriority )
1809 {
1810 if( pxTCB != pxCurrentTCB[xPortGetCoreID()] )
1811 {
1812 /* The priority of a task other than the currently
1813 * running task is being raised. Is the priority being
1814 * raised above that of the running task? */
1815 if ( tskCAN_RUN_HERE(pxTCB->xCoreID) && uxNewPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
1816 {
1817 xYieldRequired = pdTRUE;
1818 }
1819 else if ( pxTCB->xCoreID != xPortGetCoreID() )
1820 {
1821 taskYIELD_OTHER_CORE( pxTCB->xCoreID, uxNewPriority );
1822 }
1823 else
1824 {
1825 mtCOVERAGE_TEST_MARKER();
1826 }
1827 }
1828 else
1829 {
1830 /* The priority of the running task is being raised,
1831 * but the running task must already be the highest
1832 * priority task able to run so no yield is required. */
1833 }
1834 }
1835 else if( pxTCB == pxCurrentTCB[xPortGetCoreID()] )
1836 {
1837 /* Setting the priority of the running task down means
1838 * there may now be another task of higher priority that
1839 * is ready to execute. */
1840 xYieldRequired = pdTRUE;
1841 }
1842 else if( pxTCB != pxCurrentTCB[xPortGetCoreID()] )
1843 {
1844 /* The priority of a task other than the currently
1845 * running task is being raised. Is the priority being
1846 * raised above that of the running task? */
1847 if( uxNewPriority >= pxCurrentTCB[xPortGetCoreID()]->uxPriority )
1848 {
1849 xYieldRequired = pdTRUE;
1850 }
1851 else if ( pxTCB->xCoreID != xPortGetCoreID() ) //Need to check if not currently running on other core
1852 {
1853 taskYIELD_OTHER_CORE( pxTCB->xCoreID, uxNewPriority );
1854 }
1855 else
1856 {
1857 mtCOVERAGE_TEST_MARKER();
1858 }
1859 }
1860 else
1861 {
1862 /* Setting the priority of any other task down does not
1863 * require a yield as the running task must be above the
1864 * new priority of the task being modified. */
1865 }
1866
1867 /* Remember the ready list the task might be referenced from
1868 * before its uxPriority member is changed so the
1869 * taskRESET_READY_PRIORITY() macro can function correctly. */
1870 uxPriorityUsedOnEntry = pxTCB->uxPriority;
1871
1872 #if ( configUSE_MUTEXES == 1 )
1873 {
1874 /* Only change the priority being used if the task is not
1875 * currently using an inherited priority. */
1876 if( pxTCB->uxBasePriority == pxTCB->uxPriority )
1877 {
1878 pxTCB->uxPriority = uxNewPriority;
1879 }
1880 else
1881 {
1882 mtCOVERAGE_TEST_MARKER();
1883 }
1884
1885 /* The base priority gets set whatever. */
1886 pxTCB->uxBasePriority = uxNewPriority;
1887 }
1888 #else /* if ( configUSE_MUTEXES == 1 ) */
1889 {
1890 pxTCB->uxPriority = uxNewPriority;
1891 }
1892 #endif /* if ( configUSE_MUTEXES == 1 ) */
1893
1894 /* Only reset the event list item value if the value is not
1895 * being used for anything else. */
1896 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
1897 {
1898 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
1899 }
1900 else
1901 {
1902 mtCOVERAGE_TEST_MARKER();
1903 }
1904
1905 /* If the task is in the blocked or suspended list we need do
1906 * nothing more than change its priority variable. However, if
1907 * the task is in a ready list it needs to be removed and placed
1908 * in the list appropriate to its new priority. */
1909 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
1910 {
1911 /* The task is currently in its ready list - remove before
1912 * adding it to its new ready list. As we are in a critical
1913 * section we can do this even if the scheduler is suspended. */
1914 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
1915 {
1916 /* It is known that the task is in its ready list so
1917 * there is no need to check again and the port level
1918 * reset macro can be called directly. */
1919 portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority );
1920 }
1921 else
1922 {
1923 mtCOVERAGE_TEST_MARKER();
1924 }
1925
1926 prvAddTaskToReadyList( pxTCB );
1927 }
1928 else
1929 {
1930 mtCOVERAGE_TEST_MARKER();
1931 }
1932
1933 if( xYieldRequired != pdFALSE )
1934 {
1935 taskYIELD_IF_USING_PREEMPTION();
1936 }
1937 else
1938 {
1939 mtCOVERAGE_TEST_MARKER();
1940 }
1941
1942 /* Remove compiler warning about unused variables when the port
1943 * optimised task selection is not being used. */
1944 ( void ) uxPriorityUsedOnEntry;
1945 }
1946 }
1947 taskEXIT_CRITICAL();
1948 }
1949
1950 #endif /* INCLUDE_vTaskPrioritySet */
1951 /*-----------------------------------------------------------*/
1952
1953 #if ( INCLUDE_vTaskSuspend == 1 )
1954
vTaskSuspend(TaskHandle_t xTaskToSuspend)1955 void vTaskSuspend( TaskHandle_t xTaskToSuspend )
1956 {
1957 TCB_t * pxTCB;
1958 TCB_t * curTCB;
1959
1960 taskENTER_CRITICAL();
1961 {
1962 /* If null is passed in here then it is the running task that is
1963 * being suspended. */
1964 pxTCB = prvGetTCBFromHandle( xTaskToSuspend );
1965
1966 traceTASK_SUSPEND( pxTCB );
1967
1968 /* Remove task from the ready/delayed list and place in the
1969 * suspended list. */
1970 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
1971 {
1972 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1973 }
1974 else
1975 {
1976 mtCOVERAGE_TEST_MARKER();
1977 }
1978
1979 /* Is the task waiting on an event also? */
1980 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
1981 {
1982 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
1983 }
1984 else
1985 {
1986 mtCOVERAGE_TEST_MARKER();
1987 }
1988
1989 vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) );
1990 curTCB = pxCurrentTCB[ xPortGetCoreID() ];
1991
1992 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
1993 {
1994 BaseType_t x;
1995
1996 for( x = 0; x < configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
1997 {
1998 if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
1999 {
2000 /* The task was blocked to wait for a notification, but is
2001 * now suspended, so no notification was received. */
2002 pxTCB->ucNotifyState[ x ] = taskNOT_WAITING_NOTIFICATION;
2003 }
2004 }
2005 }
2006 #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
2007 }
2008 taskEXIT_CRITICAL();
2009
2010 if( xSchedulerRunning != pdFALSE )
2011 {
2012 /* Reset the next expected unblock time in case it referred to the
2013 * task that is now in the Suspended state. */
2014 taskENTER_CRITICAL();
2015 {
2016 prvResetNextTaskUnblockTime();
2017 }
2018 taskEXIT_CRITICAL();
2019 }
2020 else
2021 {
2022 mtCOVERAGE_TEST_MARKER();
2023 }
2024
2025 if( pxTCB == curTCB )
2026 {
2027 if( xSchedulerRunning != pdFALSE )
2028 {
2029 /* The current task has just been suspended. */
2030 taskENTER_CRITICAL();
2031 BaseType_t suspended = uxSchedulerSuspended[xPortGetCoreID()];
2032 taskEXIT_CRITICAL();
2033
2034 configASSERT( suspended == 0 );
2035 (void)suspended;
2036 portYIELD_WITHIN_API();
2037 }
2038 else
2039 {
2040 /* The scheduler is not running, but the task that was pointed
2041 * to by pxCurrentTCB has just been suspended and pxCurrentTCB
2042 * must be adjusted to point to a different task. */
2043 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */
2044 {
2045 /* No other tasks are ready, so set pxCurrentTCB back to
2046 * NULL so when the next task is created pxCurrentTCB will
2047 * be set to point to it no matter what its relative priority
2048 * is. */
2049 taskENTER_CRITICAL();
2050 pxCurrentTCB[ xPortGetCoreID() ] = NULL;
2051 taskEXIT_CRITICAL();
2052 }
2053 else
2054 {
2055 vTaskSwitchContext();
2056 }
2057 }
2058 }
2059 else
2060 {
2061 if( xSchedulerRunning != pdFALSE )
2062 {
2063 /* A task other than the currently running task was suspended,
2064 * reset the next expected unblock time in case it referred to the
2065 * task that is now in the Suspended state. */
2066 taskENTER_CRITICAL();
2067 {
2068 prvResetNextTaskUnblockTime();
2069 }
2070 taskEXIT_CRITICAL();
2071 }
2072 else
2073 {
2074 mtCOVERAGE_TEST_MARKER();
2075 }
2076 }
2077 }
2078
2079 #endif /* INCLUDE_vTaskSuspend */
2080 /*-----------------------------------------------------------*/
2081
2082 #if ( INCLUDE_vTaskSuspend == 1 )
2083
prvTaskIsTaskSuspended(const TaskHandle_t xTask)2084 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask )
2085 {
2086 BaseType_t xReturn = pdFALSE;
2087 const TCB_t * const pxTCB = xTask;
2088
2089 /* Accesses xPendingReadyList so must be called from a critical
2090 * section. */
2091
2092 /* It does not make sense to check if the calling task is suspended. */
2093 configASSERT( xTask );
2094
2095 /* Is the task being resumed actually in the suspended list? */
2096 if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE )
2097 {
2098 /* Has the task already been resumed from within an ISR? */
2099 if( listIS_CONTAINED_WITHIN( &xPendingReadyList[xPortGetCoreID()], &( pxTCB->xEventListItem )) == pdFALSE &&
2100 listIS_CONTAINED_WITHIN( &xPendingReadyList[!xPortGetCoreID()], &( pxTCB->xEventListItem )) == pdFALSE )
2101 {
2102 /* Is it in the suspended list because it is in the Suspended
2103 * state, or because is is blocked with no timeout? */
2104 if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE ) /*lint !e961. The cast is only redundant when NULL is used. */
2105 {
2106 xReturn = pdTRUE;
2107 }
2108 else
2109 {
2110 mtCOVERAGE_TEST_MARKER();
2111 }
2112 }
2113 else
2114 {
2115 mtCOVERAGE_TEST_MARKER();
2116 }
2117 }
2118 else
2119 {
2120 mtCOVERAGE_TEST_MARKER();
2121 }
2122
2123 return xReturn;
2124 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
2125
2126 #endif /* INCLUDE_vTaskSuspend */
2127 /*-----------------------------------------------------------*/
2128
2129 #if ( INCLUDE_vTaskSuspend == 1 )
2130
vTaskResume(TaskHandle_t xTaskToResume)2131 void vTaskResume( TaskHandle_t xTaskToResume )
2132 {
2133 TCB_t * const pxTCB = xTaskToResume;
2134
2135 /* It does not make sense to resume the calling task. */
2136 configASSERT( xTaskToResume );
2137 taskENTER_CRITICAL();
2138
2139 /* The parameter cannot be NULL as it is impossible to resume the
2140 * currently executing task. */
2141 if( ( pxTCB != pxCurrentTCB[xPortGetCoreID()] ) && ( pxTCB != NULL ) )
2142 {
2143 {
2144 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
2145 {
2146 traceTASK_RESUME( pxTCB );
2147
2148 /* The ready list can be accessed even if the scheduler is
2149 * suspended because this is inside a critical section. */
2150 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
2151 prvAddTaskToReadyList( pxTCB );
2152
2153 /* We may have just resumed a higher priority task. */
2154 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
2155 {
2156 /* This yield may not cause the task just resumed to run,
2157 * but will leave the lists in the correct state for the
2158 * next yield. */
2159 taskYIELD_IF_USING_PREEMPTION();
2160 }
2161 else if( pxTCB->xCoreID != xPortGetCoreID() )
2162 {
2163 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
2164 }
2165 else
2166 {
2167 mtCOVERAGE_TEST_MARKER();
2168 }
2169 }
2170 else
2171 {
2172 mtCOVERAGE_TEST_MARKER();
2173 }
2174 }
2175 }
2176 else
2177 {
2178 mtCOVERAGE_TEST_MARKER();
2179 }
2180 taskEXIT_CRITICAL();
2181 }
2182
2183 #endif /* INCLUDE_vTaskSuspend */
2184
2185 /*-----------------------------------------------------------*/
2186
2187 #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
2188
xTaskResumeFromISR(TaskHandle_t xTaskToResume)2189 BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
2190 {
2191 BaseType_t xYieldRequired = pdFALSE;
2192 TCB_t * const pxTCB = xTaskToResume;
2193
2194 configASSERT( xTaskToResume );
2195
2196 /* RTOS ports that support interrupt nesting have the concept of a
2197 * maximum system call (or maximum API call) interrupt priority.
2198 * Interrupts that are above the maximum system call priority are keep
2199 * permanently enabled, even when the RTOS kernel is in a critical section,
2200 * but cannot make any calls to FreeRTOS API functions. If configASSERT()
2201 * is defined in FreeRTOSConfig.h then
2202 * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2203 * failure if a FreeRTOS API function is called from an interrupt that has
2204 * been assigned a priority above the configured maximum system call
2205 * priority. Only FreeRTOS functions that end in FromISR can be called
2206 * from interrupts that have been assigned a priority at or (logically)
2207 * below the maximum system call interrupt priority. FreeRTOS maintains a
2208 * separate interrupt safe API to ensure interrupt entry is as fast and as
2209 * simple as possible. More information (albeit Cortex-M specific) is
2210 * provided on the following link:
2211 * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
2212 //portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2213
2214 taskENTER_CRITICAL_ISR();
2215 {
2216 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
2217 {
2218 traceTASK_RESUME_FROM_ISR( pxTCB );
2219
2220 /* Check the ready lists can be accessed. */
2221 if( uxSchedulerSuspended[xPortGetCoreID()] == ( UBaseType_t ) pdFALSE )
2222 {
2223
2224 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
2225 prvAddTaskToReadyList( pxTCB );
2226
2227 if( tskCAN_RUN_HERE( pxTCB->xCoreID ) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
2228 {
2229 xYieldRequired = pdTRUE;
2230 }
2231 else if ( pxTCB->xCoreID != xPortGetCoreID() )
2232 {
2233 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority);
2234 }
2235 else
2236 {
2237 mtCOVERAGE_TEST_MARKER();
2238 }
2239
2240 }
2241 else
2242 {
2243 /* The delayed or ready lists cannot be accessed so the task
2244 * is held in the pending ready list until the scheduler is
2245 * unsuspended. */
2246 vListInsertEnd( &( xPendingReadyList[xPortGetCoreID()] ), &( pxTCB->xEventListItem ) );
2247 }
2248 }
2249 else
2250 {
2251 mtCOVERAGE_TEST_MARKER();
2252 }
2253 }
2254 taskEXIT_CRITICAL_ISR();
2255
2256 return xYieldRequired;
2257 }
2258
2259 #endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */
2260 /*-----------------------------------------------------------*/
2261
vTaskStartScheduler(void)2262 void vTaskStartScheduler( void )
2263 {
2264 BaseType_t xReturn;
2265
2266 #ifdef ESP_PLATFORM
2267 /* Create an IDLE task for each core */
2268 for(BaseType_t xCoreID = 0; xCoreID < configNUM_CORES; xCoreID++)
2269 #endif //ESP_PLATFORM
2270 /* Add the idle task at the lowest priority. */
2271 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
2272 {
2273 StaticTask_t * pxIdleTaskTCBBuffer = NULL;
2274 StackType_t * pxIdleTaskStackBuffer = NULL;
2275 uint32_t ulIdleTaskStackSize;
2276
2277 /* The Idle task is created using user provided RAM - obtain the
2278 address of the RAM then create the idle task. */
2279 vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize );
2280 xIdleTaskHandle[ xCoreID ] = xTaskCreateStaticPinnedToCore( prvIdleTask,
2281 configIDLE_TASK_NAME,
2282 ulIdleTaskStackSize,
2283 ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */
2284 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
2285 pxIdleTaskStackBuffer,
2286 pxIdleTaskTCBBuffer,
2287 xCoreID ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
2288
2289 if( xIdleTaskHandle[ xCoreID ] != NULL )
2290 {
2291 xReturn = pdPASS;
2292 }
2293 else
2294 {
2295 xReturn = pdFAIL;
2296 }
2297 }
2298 #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
2299 {
2300 /* The Idle task is being created using dynamically allocated RAM. */
2301 xReturn = xTaskCreatePinnedToCore( prvIdleTask,
2302 configIDLE_TASK_NAME,
2303 configIDLE_TASK_STACK_SIZE,
2304 ( void * ) NULL,
2305 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
2306 &xIdleTaskHandle[ xCoreID ],
2307 xCoreID ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
2308
2309 if( xIdleTaskHandle[ xCoreID ] != NULL )
2310 {
2311 xReturn = pdPASS;
2312 }
2313 else
2314 {
2315 xReturn = pdFAIL;
2316 }
2317 }
2318 #endif /* configSUPPORT_STATIC_ALLOCATION */
2319
2320 #if ( configUSE_TIMERS == 1 )
2321 {
2322 if( xReturn == pdPASS )
2323 {
2324 xReturn = xTimerCreateTimerTask();
2325 }
2326 else
2327 {
2328 mtCOVERAGE_TEST_MARKER();
2329 }
2330 }
2331 #endif /* configUSE_TIMERS */
2332
2333 if( xReturn == pdPASS )
2334 {
2335 /* freertos_tasks_c_additions_init() should only be called if the user
2336 * definable macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is
2337 * the only macro called by the function. */
2338 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
2339 {
2340 freertos_tasks_c_additions_init();
2341 }
2342 #endif
2343
2344 /* Interrupts are turned off here, to ensure a tick does not occur
2345 * before or during the call to xPortStartScheduler(). The stacks of
2346 * the created tasks contain a status word with interrupts switched on
2347 * so interrupts will automatically get re-enabled when the first task
2348 * starts to run. */
2349 portDISABLE_INTERRUPTS();
2350
2351 #if ( configUSE_NEWLIB_REENTRANT == 1 )
2352 {
2353 /* Switch Newlib's _impure_ptr variable to point to the _reent
2354 * structure specific to the task that will run first.
2355 * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
2356 * for additional information. */
2357 // _impure_ptr = &( pxCurrentTCB[xPortGetCoreID()]->xNewLib_reent );
2358 }
2359 #endif /* configUSE_NEWLIB_REENTRANT */
2360
2361 xNextTaskUnblockTime = portMAX_DELAY;
2362 xSchedulerRunning = pdTRUE;
2363 xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
2364
2365 /* If configGENERATE_RUN_TIME_STATS is defined then the following
2366 * macro must be defined to configure the timer/counter used to generate
2367 * the run time counter time base. NOTE: If configGENERATE_RUN_TIME_STATS
2368 * is set to 0 and the following line fails to build then ensure you do not
2369 * have portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() defined in your
2370 * FreeRTOSConfig.h file. */
2371 portCONFIGURE_TIMER_FOR_RUN_TIME_STATS();
2372
2373 traceTASK_SWITCHED_IN();
2374
2375 /* Setting up the timer tick is hardware specific and thus in the
2376 * portable interface. */
2377 if( xPortStartScheduler() != pdFALSE )
2378 {
2379 /* Should not reach here as if the scheduler is running the
2380 * function will not return. */
2381 }
2382 else
2383 {
2384 /* Should only reach here if a task calls xTaskEndScheduler(). */
2385 }
2386 }
2387 else
2388 {
2389 /* This line will only be reached if the kernel could not be started,
2390 * because there was not enough FreeRTOS heap to create the idle task
2391 * or the timer task. */
2392 configASSERT( xReturn != errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY );
2393 }
2394
2395 /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0,
2396 * meaning xIdleTaskHandle is not used anywhere else. */
2397 ( void ) xIdleTaskHandle[0];
2398 }
2399 /*-----------------------------------------------------------*/
2400
vTaskEndScheduler(void)2401 void vTaskEndScheduler( void )
2402 {
2403 /* Stop the scheduler interrupts and call the portable scheduler end
2404 * routine so the original ISRs can be restored if necessary. The port
2405 * layer must ensure interrupts enable bit is left in the correct state. */
2406 portDISABLE_INTERRUPTS();
2407 xSchedulerRunning = pdFALSE;
2408 vPortEndScheduler();
2409 }
2410 /*----------------------------------------------------------*/
2411
2412 #if ( configUSE_NEWLIB_REENTRANT == 1 )
2413 //Return global reent struct if FreeRTOS isn't running,
__getreent(void)2414 struct _reent* __getreent(void) {
2415 //No lock needed because if this changes, we won't be running anymore.
2416 TCB_t *currTask=xTaskGetCurrentTaskHandle();
2417 if (currTask==NULL) {
2418 //No task running. Return global struct.
2419 return _GLOBAL_REENT;
2420 } else {
2421 //We have a task; return its reentrant struct.
2422 return &currTask->xNewLib_reent;
2423 }
2424 }
2425 #endif
2426
2427
vTaskSuspendAll(void)2428 void vTaskSuspendAll( void )
2429 {
2430 /* A critical section is not required as the variable is of type
2431 * BaseType_t. Please read Richard Barry's reply in the following link to a
2432 * post in the FreeRTOS support forum before reporting this as a bug! -
2433 * https://goo.gl/wu4acr */
2434 unsigned state;
2435 state = portSET_INTERRUPT_MASK_FROM_ISR();
2436 ++uxSchedulerSuspended[ xPortGetCoreID() ];
2437 portCLEAR_INTERRUPT_MASK_FROM_ISR(state);
2438 }
2439 /*----------------------------------------------------------*/
2440
2441 #if ( configUSE_TICKLESS_IDLE != 0 )
2442
2443 #if ( configNUM_CORES > 1 )
2444
xHaveReadyTasks(void)2445 static BaseType_t xHaveReadyTasks( void )
2446 {
2447 for (int i = tskIDLE_PRIORITY + 1; i < configMAX_PRIORITIES; ++i)
2448 {
2449 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ i ] ) ) > 0 )
2450 {
2451 return pdTRUE;
2452 }
2453 else
2454 {
2455 mtCOVERAGE_TEST_MARKER();
2456 }
2457 }
2458 return pdFALSE;
2459 }
2460
2461 #endif // configNUM_CORES > 1
2462
prvGetExpectedIdleTime(void)2463 static TickType_t prvGetExpectedIdleTime( void )
2464 {
2465 TickType_t xReturn;
2466
2467
2468 taskENTER_CRITICAL();
2469 if( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority > tskIDLE_PRIORITY )
2470 {
2471 xReturn = 0;
2472 }
2473 #if configNUM_CORES > 1
2474 /* This function is called from Idle task; in single core case this
2475 * means that no higher priority tasks are ready to run, and we can
2476 * enter sleep. In SMP case, there might be ready tasks waiting for
2477 * the other CPU, so need to check all ready lists.
2478 */
2479 else if( xHaveReadyTasks() )
2480 {
2481 xReturn = 0;
2482 }
2483 #endif // configNUM_CORES > 1
2484 else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > configNUM_CORES )
2485 {
2486 /* There are other idle priority tasks in the ready state. If
2487 * time slicing is used then the very next tick interrupt must be
2488 * processed. */
2489 xReturn = 0;
2490 }
2491 else
2492 {
2493 xReturn = xNextTaskUnblockTime - xTickCount;
2494 }
2495 taskEXIT_CRITICAL();
2496
2497 return xReturn;
2498 }
2499
2500 #endif /* configUSE_TICKLESS_IDLE */
2501 /*----------------------------------------------------------*/
2502
xTaskResumeAll(void)2503 BaseType_t xTaskResumeAll( void )
2504 {
2505 TCB_t *pxTCB = NULL;
2506 BaseType_t xAlreadyYielded = pdFALSE;
2507 TickType_t xTicksToNextUnblockTime;
2508
2509 /* If scheduler state is `taskSCHEDULER_RUNNING` then this function does not match a
2510 * previous call to taskENTER_CRITICAL(). */
2511 configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_RUNNING );
2512
2513 /* It is possible that an ISR caused a task to be removed from an event
2514 * list while the scheduler was suspended. If this was the case then the
2515 * removed task will have been added to the xPendingReadyList. Once the
2516 * scheduler has been resumed it is safe to move all the pending ready
2517 * tasks from this list into their appropriate ready list. */
2518 taskENTER_CRITICAL();
2519 {
2520 --uxSchedulerSuspended[xPortGetCoreID()];
2521
2522 if( uxSchedulerSuspended[xPortGetCoreID()] == ( UBaseType_t ) pdFALSE )
2523 {
2524 if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U )
2525 {
2526 /* Move any readied tasks from the pending list into the
2527 * appropriate ready list. */
2528 while( listLIST_IS_EMPTY( &xPendingReadyList[xPortGetCoreID()] ) == pdFALSE )
2529 {
2530 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList[xPortGetCoreID()] ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2531 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2532 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
2533 prvAddTaskToReadyList( pxTCB );
2534
2535 /* If the moved task has a priority higher than the current
2536 * task then a yield must be performed. */
2537 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
2538 {
2539 xYieldPending[xPortGetCoreID()] = pdTRUE;
2540 }
2541 else
2542 {
2543 mtCOVERAGE_TEST_MARKER();
2544 }
2545 }
2546
2547 if( pxTCB != NULL )
2548 {
2549 /* A task was unblocked while the scheduler was suspended,
2550 * which may have prevented the next unblock time from being
2551 * re-calculated, in which case re-calculate it now. Mainly
2552 * important for low power tickless implementations, where
2553 * this can prevent an unnecessary exit from low power
2554 * state. */
2555 prvResetNextTaskUnblockTime();
2556 }
2557
2558 /* If any ticks occurred while the scheduler was suspended then
2559 * they should be processed now. This ensures the tick count does
2560 * not slip, and that any delayed tasks are resumed at the correct
2561 * time. */
2562 while( xPendedTicks > ( TickType_t ) 0 )
2563 {
2564 /* Calculate how far into the future the next task will
2565 * leave the Blocked state because its timeout expired. If
2566 * there are no tasks due to leave the blocked state between
2567 * the time now and the time at which the tick count overflows
2568 * then xNextTaskUnblockTime will the tick overflow time.
2569 * This means xNextTaskUnblockTime can never be less than
2570 * xTickCount, and the following can therefore not
2571 * underflow. */
2572 configASSERT( xNextTaskUnblockTime >= xTickCount );
2573 xTicksToNextUnblockTime = xNextTaskUnblockTime - xTickCount;
2574
2575 /* Don't want to move the tick count more than the number
2576 of ticks that are pending, so cap if necessary. */
2577 if( xTicksToNextUnblockTime > xPendedTicks )
2578 {
2579 xTicksToNextUnblockTime = xPendedTicks;
2580 }
2581
2582 if( xTicksToNextUnblockTime == 0 )
2583 {
2584 /* xTicksToNextUnblockTime could be zero if the tick
2585 * count is about to overflow and xTicksToNetUnblockTime
2586 * holds the time at which the tick count will overflow
2587 * (rather than the time at which the next task will
2588 * unblock). Set to 1 otherwise xPendedTicks won't be
2589 * decremented below. */
2590 xTicksToNextUnblockTime = ( TickType_t ) 1;
2591 }
2592 else if( xTicksToNextUnblockTime > ( TickType_t ) 1)
2593 {
2594 /* Move the tick count one short of the next unblock
2595 * time, then call xTaskIncrementTick() to move the tick
2596 * count up to the next unblock time to unblock the task,
2597 * if any. This will also swap the blocked task and
2598 * overflow blocked task lists if necessary. */
2599 xTickCount += ( xTicksToNextUnblockTime - ( TickType_t ) 1 );
2600 }
2601 xYieldPending[xPortGetCoreID()] |= xTaskIncrementTick();
2602
2603 /* Adjust for the number of ticks just added to
2604 xTickCount and go around the loop again if
2605 xTicksToCatchUp is still greater than 0. */
2606 xPendedTicks -= xTicksToNextUnblockTime;
2607 }
2608
2609 if( xYieldPending[xPortGetCoreID()] != pdFALSE )
2610 {
2611 #if ( configUSE_PREEMPTION != 0 )
2612 {
2613 xAlreadyYielded = pdTRUE;
2614 }
2615 #endif
2616 taskYIELD_IF_USING_PREEMPTION();
2617 }
2618 else
2619 {
2620 mtCOVERAGE_TEST_MARKER();
2621 }
2622 }
2623 }
2624 else
2625 {
2626 mtCOVERAGE_TEST_MARKER();
2627 }
2628 }
2629 taskEXIT_CRITICAL();
2630
2631 return xAlreadyYielded;
2632 }
2633 /*-----------------------------------------------------------*/
2634
xTaskGetTickCount(void)2635 TickType_t xTaskGetTickCount( void )
2636 {
2637 TickType_t xTicks;
2638
2639 xTicks = xTickCount;
2640
2641 return xTicks;
2642 }
2643 /*-----------------------------------------------------------*/
2644
xTaskGetTickCountFromISR(void)2645 TickType_t xTaskGetTickCountFromISR( void )
2646 {
2647 TickType_t xReturn;
2648 UBaseType_t uxSavedInterruptStatus;
2649
2650 /* RTOS ports that support interrupt nesting have the concept of a maximum
2651 * system call (or maximum API call) interrupt priority. Interrupts that are
2652 * above the maximum system call priority are kept permanently enabled, even
2653 * when the RTOS kernel is in a critical section, but cannot make any calls to
2654 * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
2655 * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2656 * failure if a FreeRTOS API function is called from an interrupt that has been
2657 * assigned a priority above the configured maximum system call priority.
2658 * Only FreeRTOS functions that end in FromISR can be called from interrupts
2659 * that have been assigned a priority at or (logically) below the maximum
2660 * system call interrupt priority. FreeRTOS maintains a separate interrupt
2661 * safe API to ensure interrupt entry is as fast and as simple as possible.
2662 * More information (albeit Cortex-M specific) is provided on the following
2663 * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
2664 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2665
2666 uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
2667 {
2668 xReturn = xTickCount;
2669 }
2670 portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
2671
2672 return xReturn;
2673 }
2674 /*-----------------------------------------------------------*/
2675
uxTaskGetNumberOfTasks(void)2676 UBaseType_t uxTaskGetNumberOfTasks( void )
2677 {
2678 /* A critical section is not required because the variables are of type
2679 * BaseType_t. */
2680 return uxCurrentNumberOfTasks;
2681 }
2682 /*-----------------------------------------------------------*/
2683
pcTaskGetName(TaskHandle_t xTaskToQuery)2684 char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2685 {
2686 TCB_t * pxTCB;
2687
2688 /* If null is passed in here then the name of the calling task is being
2689 * queried. */
2690 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
2691 configASSERT( pxTCB );
2692 return &( pxTCB->pcTaskName[ 0 ] );
2693 }
2694 /*-----------------------------------------------------------*/
2695
2696 #if ( INCLUDE_xTaskGetHandle == 1 )
2697
prvSearchForNameWithinSingleList(List_t * pxList,const char pcNameToQuery[])2698 static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList,
2699 const char pcNameToQuery[] )
2700 {
2701 TCB_t * pxNextTCB, * pxFirstTCB, * pxReturn = NULL;
2702 UBaseType_t x;
2703 char cNextChar;
2704 BaseType_t xBreakLoop;
2705
2706 /* This function is called with the scheduler suspended. */
2707
2708 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
2709 {
2710 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2711
2712 do
2713 {
2714 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2715
2716 /* Check each character in the name looking for a match or
2717 * mismatch. */
2718 xBreakLoop = pdFALSE;
2719
2720 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
2721 {
2722 cNextChar = pxNextTCB->pcTaskName[ x ];
2723
2724 if( cNextChar != pcNameToQuery[ x ] )
2725 {
2726 /* Characters didn't match. */
2727 xBreakLoop = pdTRUE;
2728 }
2729 else if( cNextChar == ( char ) 0x00 )
2730 {
2731 /* Both strings terminated, a match must have been
2732 * found. */
2733 pxReturn = pxNextTCB;
2734 xBreakLoop = pdTRUE;
2735 }
2736 else
2737 {
2738 mtCOVERAGE_TEST_MARKER();
2739 }
2740
2741 if( xBreakLoop != pdFALSE )
2742 {
2743 break;
2744 }
2745 }
2746
2747 if( pxReturn != NULL )
2748 {
2749 /* The handle has been found. */
2750 break;
2751 }
2752 } while( pxNextTCB != pxFirstTCB );
2753 }
2754 else
2755 {
2756 mtCOVERAGE_TEST_MARKER();
2757 }
2758
2759 return pxReturn;
2760 }
2761
2762 #endif /* INCLUDE_xTaskGetHandle */
2763 /*-----------------------------------------------------------*/
2764
2765 #if ( INCLUDE_xTaskGetHandle == 1 )
2766
xTaskGetHandle(const char * pcNameToQuery)2767 TaskHandle_t xTaskGetHandle( const char * pcNameToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2768 {
2769 UBaseType_t uxQueue = configMAX_PRIORITIES;
2770 TCB_t * pxTCB;
2771
2772 /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
2773 configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
2774
2775 #ifdef ESP_PLATFORM // IDF-3755
2776 taskENTER_CRITICAL();
2777 #else
2778 vTaskSuspendAll();
2779 #endif // ESP_PLATFORM
2780 {
2781 /* Search the ready lists. */
2782 do
2783 {
2784 uxQueue--;
2785 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) &( pxReadyTasksLists[ uxQueue ] ), pcNameToQuery );
2786
2787 if( pxTCB != NULL )
2788 {
2789 /* Found the handle. */
2790 break;
2791 }
2792 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2793
2794 /* Search the delayed lists. */
2795 if( pxTCB == NULL )
2796 {
2797 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxDelayedTaskList, pcNameToQuery );
2798 }
2799
2800 if( pxTCB == NULL )
2801 {
2802 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxOverflowDelayedTaskList, pcNameToQuery );
2803 }
2804
2805 #if ( INCLUDE_vTaskSuspend == 1 )
2806 {
2807 if( pxTCB == NULL )
2808 {
2809 /* Search the suspended list. */
2810 pxTCB = prvSearchForNameWithinSingleList( &xSuspendedTaskList, pcNameToQuery );
2811 }
2812 }
2813 #endif
2814
2815 #if ( INCLUDE_vTaskDelete == 1 )
2816 {
2817 if( pxTCB == NULL )
2818 {
2819 /* Search the deleted list. */
2820 pxTCB = prvSearchForNameWithinSingleList( &xTasksWaitingTermination, pcNameToQuery );
2821 }
2822 }
2823 #endif
2824 }
2825 #ifdef ESP_PLATFORM // IDF-3755
2826 taskEXIT_CRITICAL();
2827 #else
2828 ( void ) xTaskResumeAll();
2829 #endif // ESP_PLATFORM
2830
2831 return pxTCB;
2832 }
2833
2834 #endif /* INCLUDE_xTaskGetHandle */
2835 /*-----------------------------------------------------------*/
2836
2837 #if ( configUSE_TRACE_FACILITY == 1 )
2838
uxTaskGetSystemState(TaskStatus_t * const pxTaskStatusArray,const UBaseType_t uxArraySize,uint32_t * const pulTotalRunTime)2839 UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
2840 const UBaseType_t uxArraySize,
2841 uint32_t * const pulTotalRunTime )
2842 {
2843 UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
2844
2845 #ifdef ESP_PLATFORM // IDF-3755
2846 taskENTER_CRITICAL();
2847 #else
2848 vTaskSuspendAll();
2849 #endif // ESP_PLATFORM
2850 {
2851 /* Is there a space in the array for each task in the system? */
2852 if( uxArraySize >= uxCurrentNumberOfTasks )
2853 {
2854 /* Fill in an TaskStatus_t structure with information on each
2855 * task in the Ready state. */
2856 do
2857 {
2858 uxQueue--;
2859 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady );
2860 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2861
2862 /* Fill in an TaskStatus_t structure with information on each
2863 * task in the Blocked state. */
2864 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked );
2865 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked );
2866
2867 #if ( INCLUDE_vTaskDelete == 1 )
2868 {
2869 /* Fill in an TaskStatus_t structure with information on
2870 * each task that has been deleted but not yet cleaned up. */
2871 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted );
2872 }
2873 #endif
2874
2875 #if ( INCLUDE_vTaskSuspend == 1 )
2876 {
2877 /* Fill in an TaskStatus_t structure with information on
2878 * each task in the Suspended state. */
2879 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended );
2880 }
2881 #endif
2882
2883 #if ( configGENERATE_RUN_TIME_STATS == 1 )
2884 {
2885 if( pulTotalRunTime != NULL )
2886 {
2887 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
2888 portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) );
2889 #else
2890 *pulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
2891 #endif
2892 }
2893 }
2894 #else /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
2895 {
2896 if( pulTotalRunTime != NULL )
2897 {
2898 *pulTotalRunTime = 0;
2899 }
2900 }
2901 #endif /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
2902 }
2903 else
2904 {
2905 mtCOVERAGE_TEST_MARKER();
2906 }
2907 }
2908 #ifdef ESP_PLATFORM // IDF-3755
2909 taskEXIT_CRITICAL();
2910 #else
2911 ( void ) xTaskResumeAll();
2912 #endif // ESP_PLATFORM
2913
2914 return uxTask;
2915 }
2916
2917 #endif /* configUSE_TRACE_FACILITY */
2918 /*----------------------------------------------------------*/
2919
2920 #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
2921
xTaskGetIdleTaskHandle(void)2922 TaskHandle_t xTaskGetIdleTaskHandle( void )
2923 {
2924 /* If xTaskGetIdleTaskHandle() is called before the scheduler has been
2925 * started, then xIdleTaskHandle will be NULL. */
2926 configASSERT( ( xIdleTaskHandle[xPortGetCoreID()] != NULL ) );
2927 return xIdleTaskHandle[xPortGetCoreID()];
2928 }
2929
xTaskGetIdleTaskHandleForCPU(UBaseType_t cpuid)2930 TaskHandle_t xTaskGetIdleTaskHandleForCPU( UBaseType_t cpuid )
2931 {
2932 configASSERT( cpuid < configNUM_CORES );
2933 configASSERT( ( xIdleTaskHandle[cpuid] != NULL ) );
2934 return xIdleTaskHandle[cpuid];
2935 }
2936 #endif /* INCLUDE_xTaskGetIdleTaskHandle */
2937 /*----------------------------------------------------------*/
2938
2939 /* This conditional compilation should use inequality to 0, not equality to 1.
2940 * This is to ensure vTaskStepTick() is available when user defined low power mode
2941 * implementations require configUSE_TICKLESS_IDLE to be set to a value other than
2942 * 1. */
2943 #if ( configUSE_TICKLESS_IDLE != 0 )
2944
vTaskStepTick(const TickType_t xTicksToJump)2945 void vTaskStepTick( const TickType_t xTicksToJump )
2946 {
2947 /* Correct the tick count value after a period during which the tick
2948 * was suppressed. Note this does *not* call the tick hook function for
2949 * each stepped tick. */
2950 taskENTER_CRITICAL();
2951 configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
2952 xTickCount += xTicksToJump;
2953 traceINCREASE_TICK_COUNT( xTicksToJump );
2954 taskEXIT_CRITICAL();
2955 }
2956
2957 #endif /* configUSE_TICKLESS_IDLE */
2958 /*----------------------------------------------------------*/
2959
xTaskCatchUpTicks(TickType_t xTicksToCatchUp)2960 BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
2961 {
2962 #ifdef ESP_PLATFORM
2963 BaseType_t xYieldRequired = pdFALSE;
2964 #else
2965 BaseType_t xYieldOccurred;
2966 #endif // ESP_PLATFORM
2967
2968 /* Must not be called with the scheduler suspended as the implementation
2969 * relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */
2970 configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
2971
2972 /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occuring when
2973 * the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */
2974 #ifdef ESP_PLATFORM // IDF-3755
2975 taskENTER_CRITICAL();
2976 #else
2977 vTaskSuspendAll();
2978 #endif // ESP_PLATFORM
2979 xPendedTicks += xTicksToCatchUp;
2980 #ifdef ESP_PLATFORM // IDF-3755
2981 taskEXIT_CRITICAL();
2982 return xYieldRequired;
2983 #else
2984 xYieldOccurred = xTaskResumeAll();
2985
2986 return xYieldOccurred;
2987 #endif // ESP_PLATFORM
2988 }
2989 /*----------------------------------------------------------*/
2990
2991 #if ( INCLUDE_xTaskAbortDelay == 1 )
2992
xTaskAbortDelay(TaskHandle_t xTask)2993 BaseType_t xTaskAbortDelay( TaskHandle_t xTask )
2994 {
2995 TCB_t * pxTCB = xTask;
2996 BaseType_t xReturn;
2997
2998 configASSERT( pxTCB );
2999
3000 #ifdef ESP_PLATFORM // IDF-3755
3001 taskENTER_CRITICAL();
3002 #else
3003 vTaskSuspendAll();
3004 #endif // ESP_PLATFORM
3005 {
3006 /* A task can only be prematurely removed from the Blocked state if
3007 * it is actually in the Blocked state. */
3008 if( eTaskGetState( xTask ) == eBlocked )
3009 {
3010 xReturn = pdPASS;
3011
3012 /* Remove the reference to the task from the blocked list. An
3013 * interrupt won't touch the xStateListItem because the
3014 * scheduler is suspended. */
3015 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
3016
3017 /* Is the task waiting on an event also? If so remove it from
3018 * the event list too. Interrupts can touch the event list item,
3019 * even though the scheduler is suspended, so a critical section
3020 * is used. */
3021 taskENTER_CRITICAL();
3022 {
3023 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
3024 {
3025 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
3026
3027 /* This lets the task know it was forcibly removed from the
3028 * blocked state so it should not re-evaluate its block time and
3029 * then block again. */
3030 pxTCB->ucDelayAborted = pdTRUE;
3031 }
3032 else
3033 {
3034 mtCOVERAGE_TEST_MARKER();
3035 }
3036 }
3037 taskEXIT_CRITICAL();
3038
3039 /* Place the unblocked task into the appropriate ready list. */
3040 prvAddTaskToReadyList( pxTCB );
3041
3042 /* A task being unblocked cannot cause an immediate context
3043 * switch if preemption is turned off. */
3044 #if ( configUSE_PREEMPTION == 1 )
3045 {
3046 /* Preemption is on, but a context switch should only be
3047 * performed if the unblocked task has a priority that is
3048 * equal to or higher than the currently executing task. */
3049 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
3050 {
3051 /* Pend the yield to be performed when the scheduler
3052 * is unsuspended. */
3053 xYieldPending[xPortGetCoreID()] = pdTRUE;
3054 }
3055 else if ( pxTCB->xCoreID != xPortGetCoreID() )
3056 {
3057 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
3058 }
3059 else
3060 {
3061 mtCOVERAGE_TEST_MARKER();
3062 }
3063 }
3064 #endif /* configUSE_PREEMPTION */
3065 }
3066 else
3067 {
3068 xReturn = pdFAIL;
3069 }
3070 }
3071 #ifdef ESP_PLATFORM // IDF-3755
3072 taskEXIT_CRITICAL();
3073 #else
3074 ( void ) xTaskResumeAll();
3075 #endif // ESP_PLATFORM
3076
3077 return xReturn;
3078 }
3079
3080 #endif /* INCLUDE_xTaskAbortDelay */
3081 /*----------------------------------------------------------*/
3082
xTaskIncrementTick(void)3083 BaseType_t xTaskIncrementTick( void )
3084 {
3085 TCB_t * pxTCB;
3086 TickType_t xItemValue;
3087 BaseType_t xSwitchRequired = pdFALSE;
3088
3089 /* Only allow core 0 increase the tick count in the case of xPortSysTickHandler processing. */
3090 /* And allow core 0 and core 1 to unwind uxPendedTicks during xTaskResumeAll. */
3091
3092 if (xPortInIsrContext())
3093 {
3094 #if ( configUSE_TICK_HOOK == 1 )
3095 vApplicationTickHook();
3096 #endif /* configUSE_TICK_HOOK */
3097 #if ( CONFIG_FREERTOS_LEGACY_HOOKS == 1 )
3098 esp_vApplicationTickHook();
3099 #endif /* CONFIG_FREERTOS_LEGACY_HOOKS */
3100
3101 if (xPortGetCoreID() != 0 )
3102 {
3103 return pdTRUE;
3104 }
3105 }
3106
3107 /* Called by the portable layer each time a tick interrupt occurs.
3108 * Increments the tick then checks to see if the new tick value will cause any
3109 * tasks to be unblocked. */
3110 traceTASK_INCREMENT_TICK( xTickCount );
3111
3112 if( uxSchedulerSuspended[xPortGetCoreID()] == ( UBaseType_t ) pdFALSE )
3113 {
3114 taskENTER_CRITICAL_ISR();
3115 /* Minor optimisation. The tick count cannot change in this
3116 * block. */
3117 const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1;
3118
3119 /* Increment the RTOS tick, switching the delayed and overflowed
3120 * delayed lists if it wraps to 0. */
3121 xTickCount = xConstTickCount;
3122
3123 if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */
3124 {
3125 taskSWITCH_DELAYED_LISTS();
3126 }
3127 else
3128 {
3129 mtCOVERAGE_TEST_MARKER();
3130 }
3131
3132 /* See if this tick has made a timeout expire. Tasks are stored in
3133 * the queue in the order of their wake time - meaning once one task
3134 * has been found whose block time has not expired there is no need to
3135 * look any further down the list. */
3136 if( xConstTickCount >= xNextTaskUnblockTime )
3137 {
3138 for( ; ; )
3139 {
3140 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
3141 {
3142 /* The delayed list is empty. Set xNextTaskUnblockTime
3143 * to the maximum possible value so it is extremely
3144 * unlikely that the
3145 * if( xTickCount >= xNextTaskUnblockTime ) test will pass
3146 * next time through. */
3147 xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
3148 break;
3149 }
3150 else
3151 {
3152 /* The delayed list is not empty, get the value of the
3153 * item at the head of the delayed list. This is the time
3154 * at which the task at the head of the delayed list must
3155 * be removed from the Blocked state. */
3156 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3157 xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) );
3158
3159 if( xConstTickCount < xItemValue )
3160 {
3161 /* It is not time to unblock this item yet, but the
3162 * item value is the time at which the task at the head
3163 * of the blocked list must be removed from the Blocked
3164 * state - so record the item value in
3165 * xNextTaskUnblockTime. */
3166 xNextTaskUnblockTime = xItemValue;
3167 break; /*lint !e9011 Code structure here is deemed easier to understand with multiple breaks. */
3168 }
3169 else
3170 {
3171 mtCOVERAGE_TEST_MARKER();
3172 }
3173
3174 /* It is time to remove the item from the Blocked state. */
3175 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
3176
3177 /* Is the task waiting on an event also? If so remove
3178 * it from the event list. */
3179 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
3180 {
3181 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
3182 }
3183 else
3184 {
3185 mtCOVERAGE_TEST_MARKER();
3186 }
3187
3188 /* Place the unblocked task into the appropriate ready
3189 * list. */
3190 prvAddTaskToReadyList( pxTCB );
3191
3192 /* A task being unblocked cannot cause an immediate
3193 * context switch if preemption is turned off. */
3194 #if ( configUSE_PREEMPTION == 1 )
3195 {
3196 /* Preemption is on, but a context switch should
3197 * only be performed if the unblocked task has a
3198 * priority that is equal to or higher than the
3199 * currently executing task. */
3200 if( pxTCB->uxPriority >= pxCurrentTCB[xPortGetCoreID()]->uxPriority )
3201 {
3202 xSwitchRequired = pdTRUE;
3203 }
3204 else
3205 {
3206 mtCOVERAGE_TEST_MARKER();
3207 }
3208 }
3209 #endif /* configUSE_PREEMPTION */
3210 }
3211 }
3212 }
3213
3214 /* Tasks of equal priority to the currently running task will share
3215 * processing time (time slice) if preemption is on, and the application
3216 * writer has not explicitly turned time slicing off. */
3217 #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
3218 {
3219 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB[xPortGetCoreID()]->uxPriority ] ) ) > ( UBaseType_t ) 1 )
3220 {
3221 xSwitchRequired = pdTRUE;
3222 }
3223 else
3224 {
3225 mtCOVERAGE_TEST_MARKER();
3226 }
3227 }
3228 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
3229 taskEXIT_CRITICAL_ISR();
3230 }
3231 else
3232 {
3233 ++xPendedTicks;
3234 }
3235
3236 #if ( configUSE_PREEMPTION == 1 )
3237 {
3238 if( xYieldPending[xPortGetCoreID()] != pdFALSE )
3239 {
3240 xSwitchRequired = pdTRUE;
3241 }
3242 else
3243 {
3244 mtCOVERAGE_TEST_MARKER();
3245 }
3246 }
3247 #endif /* configUSE_PREEMPTION */
3248
3249 return xSwitchRequired;
3250 }
3251 /*-----------------------------------------------------------*/
3252
3253 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
3254
vTaskSetApplicationTaskTag(TaskHandle_t xTask,TaskHookFunction_t pxHookFunction)3255 void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction )
3256 {
3257 TCB_t * xTCB;
3258
3259 /* If xTask is NULL then it is the task hook of the calling task that is
3260 * getting set. */
3261 if( xTask == NULL )
3262 {
3263 xTCB = ( TCB_t * ) pxCurrentTCB[xPortGetCoreID()];
3264 }
3265 else
3266 {
3267 xTCB = xTask;
3268 }
3269
3270 /* Save the hook function in the TCB. A critical section is required as
3271 * the value can be accessed from an interrupt. */
3272 taskENTER_CRITICAL();
3273 {
3274 xTCB->pxTaskTag = pxHookFunction;
3275 }
3276 taskEXIT_CRITICAL();
3277 }
3278
3279 #endif /* configUSE_APPLICATION_TASK_TAG */
3280 /*-----------------------------------------------------------*/
3281
3282 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
3283
xTaskGetApplicationTaskTag(TaskHandle_t xTask)3284 TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )
3285 {
3286 TCB_t * pxTCB;
3287 TaskHookFunction_t xReturn;
3288
3289 /* If xTask is NULL then set the calling task's hook. */
3290 pxTCB = prvGetTCBFromHandle( xTask );
3291
3292 /* Save the hook function in the TCB. A critical section is required as
3293 * the value can be accessed from an interrupt. */
3294 taskENTER_CRITICAL();
3295 {
3296 xReturn = pxTCB->pxTaskTag;
3297 }
3298 taskEXIT_CRITICAL();
3299
3300 return xReturn;
3301 }
3302
3303 #endif /* configUSE_APPLICATION_TASK_TAG */
3304 /*-----------------------------------------------------------*/
3305
3306 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
3307
xTaskGetApplicationTaskTagFromISR(TaskHandle_t xTask)3308 TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask )
3309 {
3310 TCB_t * pxTCB;
3311 TaskHookFunction_t xReturn;
3312 UBaseType_t uxSavedInterruptStatus;
3313
3314 /* If xTask is NULL then set the calling task's hook. */
3315 pxTCB = prvGetTCBFromHandle( xTask );
3316
3317 /* Save the hook function in the TCB. A critical section is required as
3318 * the value can be accessed from an interrupt. */
3319 portENTER_CRITICAL_ISR(&xTaskQueueMutex);
3320 {
3321 xReturn = pxTCB->pxTaskTag;
3322 }
3323 portEXIT_CRITICAL_ISR(&xTaskQueueMutex);
3324
3325 return xReturn;
3326 }
3327
3328 #endif /* configUSE_APPLICATION_TASK_TAG */
3329 /*-----------------------------------------------------------*/
3330
3331 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
3332
xTaskCallApplicationTaskHook(TaskHandle_t xTask,void * pvParameter)3333 BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask,
3334 void * pvParameter )
3335 {
3336 TCB_t * xTCB;
3337 BaseType_t xReturn;
3338
3339 /* If xTask is NULL then we are calling our own task hook. */
3340 if( xTask == NULL )
3341 {
3342 xTCB = xTaskGetCurrentTaskHandle();
3343 }
3344 else
3345 {
3346 xTCB = xTask;
3347 }
3348
3349 if( xTCB->pxTaskTag != NULL )
3350 {
3351 xReturn = xTCB->pxTaskTag( pvParameter );
3352 }
3353 else
3354 {
3355 xReturn = pdFAIL;
3356 }
3357
3358 return xReturn;
3359 }
3360
3361 #endif /* configUSE_APPLICATION_TASK_TAG */
3362 /*-----------------------------------------------------------*/
3363
3364 #ifdef ESP_PLATFORM
3365 #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
taskSelectHighestPriorityTaskSMP(void)3366 static void taskSelectHighestPriorityTaskSMP( void )
3367 {
3368 /* This function is called from a critical section. So some optimizations are made */
3369 BaseType_t uxCurPriority;
3370 BaseType_t xTaskScheduled = pdFALSE;
3371 BaseType_t xNewTopPrioritySet = pdFALSE;
3372 BaseType_t xCoreID = xPortGetCoreID(); /* Optimization: Read once */
3373
3374 /* Search for tasks, starting form the highest ready priority. If nothing is
3375 * found, we eventually default to the IDLE tasks at priority 0 */
3376 for ( uxCurPriority = uxTopReadyPriority; uxCurPriority >= 0 && xTaskScheduled == pdFALSE; uxCurPriority-- )
3377 {
3378 /* Check if current priority has one or more ready tasks. Skip if none */
3379 if( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxCurPriority ] ) ) )
3380 {
3381 continue;
3382 }
3383
3384 /* Save a copy of highest priority that has a ready state task */
3385 if( xNewTopPrioritySet == pdFALSE )
3386 {
3387 xNewTopPrioritySet = pdTRUE;
3388 uxTopReadyPriority = uxCurPriority;
3389 }
3390
3391 /* We now search this priority's ready task list for a runnable task.
3392 * We always start searching from the head of the list, so we reset
3393 * pxIndex to point to the tail so that we start walking the list from
3394 * the first item */
3395 pxReadyTasksLists[ uxCurPriority ].pxIndex = ( ListItem_t * ) &( pxReadyTasksLists[ uxCurPriority ].xListEnd );
3396
3397 /* Get the first item on the list */
3398 TCB_t * pxTCBCur;
3399 TCB_t * pxTCBFirst;
3400 listGET_OWNER_OF_NEXT_ENTRY( pxTCBCur, &( pxReadyTasksLists[ uxCurPriority ] ) );
3401 pxTCBFirst = pxTCBCur;
3402 do
3403 {
3404 /* Check if the current task is currently being executed. However, if
3405 * it's being executed by the current core, we can still schedule it.
3406 * Todo: Each task can store a xTaskRunState, instead of needing to
3407 * check each core */
3408 UBaseType_t ux;
3409 for( ux = 0; ux < ( UBaseType_t )configNUM_CORES; ux++ )
3410 {
3411 if ( ux == xCoreID )
3412 {
3413 continue;
3414 }
3415 else if ( pxCurrentTCB[ux] == pxTCBCur )
3416 {
3417 /* Current task is already being executed. Get the next task */
3418 goto get_next_task;
3419 }
3420 }
3421
3422 /* Check if the current task has a compatible affinity */
3423 if ( ( pxTCBCur->xCoreID != xCoreID ) && ( pxTCBCur->xCoreID != tskNO_AFFINITY ) )
3424 {
3425 goto get_next_task;
3426 }
3427
3428 /* The current task is runnable. Schedule it */
3429 pxCurrentTCB[ xCoreID ] = pxTCBCur;
3430 xTaskScheduled = pdTRUE;
3431
3432 /* Move the current tasks list item to the back of the list in order
3433 * to implement best effort round robin. To do this, we need to reset
3434 * the pxIndex to point to the tail again. */
3435 pxReadyTasksLists[ uxCurPriority ].pxIndex = ( ListItem_t * ) &( pxReadyTasksLists[ uxCurPriority ].xListEnd );
3436 uxListRemove( &( pxTCBCur->xStateListItem ) );
3437 vListInsertEnd( &( pxReadyTasksLists[ uxCurPriority ] ), &( pxTCBCur->xStateListItem ) );
3438 break;
3439
3440 get_next_task:
3441 /* The current task cannot be scheduled. Get the next task in the list */
3442 listGET_OWNER_OF_NEXT_ENTRY( pxTCBCur, &( pxReadyTasksLists[ uxCurPriority ] ) );
3443 } while( pxTCBCur != pxTCBFirst); /* Check to see if we've walked the entire list */
3444 }
3445
3446 assert( xTaskScheduled == pdTRUE ); /* At this point, a task MUST have been scheduled */
3447 }
3448 #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
3449 #endif //ESP_PLATFORM
3450
vTaskSwitchContext(void)3451 void vTaskSwitchContext( void )
3452 {
3453 #ifdef ESP_PLATFORM
3454 /* vTaskSwitchContext is called either from:
3455 * - ISR dispatcher when return from an ISR (interrupts will already be disabled)
3456 * - vTaskSuspend() which is not in a critical section
3457 * Therefore, we enter a critical section ISR version to ensure safety */
3458 taskENTER_CRITICAL_ISR();
3459 #endif // ESP_PLATFORM
3460 if( uxSchedulerSuspended[ xPortGetCoreID() ] != ( UBaseType_t ) pdFALSE )
3461 {
3462 /* The scheduler is currently suspended - do not allow a context
3463 * switch. */
3464 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
3465 }
3466 else
3467 {
3468 xYieldPending[ xPortGetCoreID() ] = pdFALSE;
3469 #ifdef ESP_PLATFORM
3470 xSwitchingContext[ xPortGetCoreID() ] = pdTRUE;
3471 #endif // ESP_PLATFORM
3472 traceTASK_SWITCHED_OUT();
3473
3474 #if ( configGENERATE_RUN_TIME_STATS == 1 )
3475 {
3476 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
3477 portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime );
3478 #else
3479 ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
3480 #endif
3481
3482 /* Add the amount of time the task has been running to the
3483 * accumulated time so far. The time the task started running was
3484 * stored in ulTaskSwitchedInTime. Note that there is no overflow
3485 * protection here so count values are only valid until the timer
3486 * overflows. The guard against negative values is to protect
3487 * against suspect run time stat counter implementations - which
3488 * are provided by the application, not the kernel. */
3489 if( ulTotalRunTime > ulTaskSwitchedInTime[ xPortGetCoreID() ] )
3490 {
3491 pxCurrentTCB[ xPortGetCoreID() ]->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime[ xPortGetCoreID() ] );
3492 }
3493 else
3494 {
3495 mtCOVERAGE_TEST_MARKER();
3496 }
3497
3498 ulTaskSwitchedInTime[ xPortGetCoreID() ] = ulTotalRunTime;
3499 }
3500 #endif /* configGENERATE_RUN_TIME_STATS */
3501
3502 /* Check for stack overflow, if configured. */
3503 #ifdef ESP_PLATFORM
3504 taskFIRST_CHECK_FOR_STACK_OVERFLOW();
3505 taskSECOND_CHECK_FOR_STACK_OVERFLOW();
3506 #else
3507 taskCHECK_FOR_STACK_OVERFLOW();
3508
3509 /* Before the currently running task is switched out, save its errno. */
3510 #if ( configUSE_POSIX_ERRNO == 1 )
3511 {
3512 pxCurrentTCB->iTaskErrno = FreeRTOS_errno;
3513 }
3514 #endif
3515 #endif // ESP_PLATFORM
3516
3517 /* Select a new task to run using either the generic C or port
3518 * optimised asm code. */
3519 taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3520 traceTASK_SWITCHED_IN();
3521
3522 #ifdef ESP_PLATFORM
3523 xSwitchingContext[ xPortGetCoreID() ] = pdFALSE;
3524 #if CONFIG_FREERTOS_WATCHPOINT_END_OF_STACK
3525 vPortSetStackWatchpoint(pxCurrentTCB[xPortGetCoreID()]->pxStack);
3526 #endif
3527 #else
3528 /* After the new task is switched in, update the global errno. */
3529 #if ( configUSE_POSIX_ERRNO == 1 )
3530 {
3531 FreeRTOS_errno = pxCurrentTCB->iTaskErrno;
3532 }
3533 #endif
3534
3535 #if ( configUSE_NEWLIB_REENTRANT == 1 )
3536 {
3537 /* Switch Newlib's _impure_ptr variable to point to the _reent
3538 * structure specific to this task.
3539 * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
3540 * for additional information. */
3541 _impure_ptr = &( pxCurrentTCB->xNewLib_reent );
3542 }
3543 #endif /* configUSE_NEWLIB_REENTRANT */
3544 #endif // ESP_PLATFORM
3545 }
3546 #ifdef ESP_PLATFORM
3547 /* Exit the critical section previously entered */
3548 taskEXIT_CRITICAL_ISR();
3549 #endif // ESP_PLATFORM
3550 }
3551 /*-----------------------------------------------------------*/
3552
vTaskPlaceOnEventList(List_t * const pxEventList,const TickType_t xTicksToWait)3553 void vTaskPlaceOnEventList( List_t * const pxEventList,
3554 const TickType_t xTicksToWait )
3555 {
3556 configASSERT( pxEventList );
3557 taskENTER_CRITICAL();
3558
3559 /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE
3560 * SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */
3561
3562 /* Place the event list item of the TCB in the appropriate event list.
3563 * This is placed in the list in priority order so the highest priority task
3564 * is the first to be woken by the event. The queue that contains the event
3565 * list is locked, preventing simultaneous access from interrupts. */
3566 vListInsert( pxEventList, &( pxCurrentTCB[xPortGetCoreID()]->xEventListItem ) );
3567
3568 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToWait);
3569 taskEXIT_CRITICAL();
3570 }
3571 /*-----------------------------------------------------------*/
3572
vTaskPlaceOnUnorderedEventList(List_t * pxEventList,const TickType_t xItemValue,const TickType_t xTicksToWait)3573 void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
3574 const TickType_t xItemValue,
3575 const TickType_t xTicksToWait )
3576 {
3577 configASSERT( pxEventList );
3578 taskENTER_CRITICAL();
3579
3580 /* Store the item value in the event list item. It is safe to access the
3581 * event list item here as interrupts won't access the event list item of a
3582 * task that is not in the Blocked state. */
3583 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[xPortGetCoreID()]->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
3584
3585 /* Place the event list item of the TCB at the end of the appropriate event
3586 * list. It is safe to access the event list here because it is part of an
3587 * event group implementation - and interrupts don't access event groups
3588 * directly (instead they access them indirectly by pending function calls to
3589 * the task level). */
3590 vListInsertEnd( pxEventList, &( pxCurrentTCB[xPortGetCoreID()]->xEventListItem ) );
3591
3592 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToWait );
3593 taskEXIT_CRITICAL();
3594 }
3595 /*-----------------------------------------------------------*/
3596
3597 #if ( configUSE_TIMERS == 1 )
3598
vTaskPlaceOnEventListRestricted(List_t * const pxEventList,TickType_t xTicksToWait,const BaseType_t xWaitIndefinitely)3599 void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
3600 {
3601 taskENTER_CRITICAL();
3602 configASSERT( pxEventList );
3603
3604 /* This function should not be called by application code hence the
3605 * 'Restricted' in its name. It is not part of the public API. It is
3606 * designed for use by kernel code, and has special calling requirements -
3607 * it should be called with the scheduler suspended. */
3608
3609
3610 /* Place the event list item of the TCB in the appropriate event list.
3611 * In this case it is assume that this is the only task that is going to
3612 * be waiting on this event list, so the faster vListInsertEnd() function
3613 * can be used in place of vListInsert. */
3614 vListInsertEnd( pxEventList, &( pxCurrentTCB[xPortGetCoreID()]->xEventListItem ) );
3615
3616 /* If the task should block indefinitely then set the block time to a
3617 * value that will be recognised as an indefinite delay inside the
3618 * prvAddCurrentTaskToDelayedList() function. */
3619 if( xWaitIndefinitely != pdFALSE )
3620 {
3621 xTicksToWait = portMAX_DELAY;
3622 }
3623
3624 traceTASK_DELAY_UNTIL( );
3625 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToWait );
3626 taskEXIT_CRITICAL();
3627 }
3628
3629 #endif /* configUSE_TIMERS */
3630 /*-----------------------------------------------------------*/
3631
xTaskRemoveFromEventList(const List_t * const pxEventList)3632 BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
3633 {
3634 TCB_t * pxUnblockedTCB;
3635 BaseType_t xReturn;
3636 BaseType_t xTaskCanBeReady;
3637 UBaseType_t i, uxTargetCPU;
3638
3639 taskENTER_CRITICAL_ISR();
3640 /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
3641 * called from a critical section within an ISR. */
3642
3643 /* The event list is sorted in priority order, so the first in the list can
3644 * be removed as it is known to be the highest priority. Remove the TCB from
3645 * the delayed list, and add it to the ready list.
3646 *
3647 * If an event is for a queue that is locked then this function will never
3648 * get called - the lock count on the queue will get modified instead. This
3649 * means exclusive access to the event list is guaranteed here.
3650 *
3651 * This function assumes that a check has already been made to ensure that
3652 * pxEventList is not empty. */
3653 if ( ( listLIST_IS_EMPTY( pxEventList ) ) == pdFALSE )
3654 {
3655 pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3656 configASSERT( pxUnblockedTCB );
3657 ( void ) uxListRemove( &( pxUnblockedTCB->xEventListItem ) );
3658 }
3659 else
3660 {
3661 taskEXIT_CRITICAL_ISR();
3662 return pdFALSE;
3663 }
3664
3665 xTaskCanBeReady = pdFALSE;
3666 if ( pxUnblockedTCB->xCoreID == tskNO_AFFINITY )
3667 {
3668 uxTargetCPU = xPortGetCoreID();
3669 for (i = 0; i < configNUM_CORES; i++)
3670 {
3671 if ( uxSchedulerSuspended[ i ] == ( UBaseType_t ) pdFALSE )
3672 {
3673 xTaskCanBeReady = pdTRUE;
3674 break;
3675 }
3676 }
3677 }
3678 else
3679 {
3680 uxTargetCPU = pxUnblockedTCB->xCoreID;
3681 xTaskCanBeReady = uxSchedulerSuspended[ uxTargetCPU ] == ( UBaseType_t ) pdFALSE;
3682 }
3683
3684 if( xTaskCanBeReady )
3685 {
3686 ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );
3687 prvAddTaskToReadyList( pxUnblockedTCB );
3688 }
3689 else
3690 {
3691 /* The delayed and ready lists cannot be accessed, so hold this task
3692 * pending until the scheduler is resumed on this CPU. */
3693 vListInsertEnd( &( xPendingReadyList[ uxTargetCPU ] ), &( pxUnblockedTCB->xEventListItem ) );
3694 }
3695
3696 if ( tskCAN_RUN_HERE(pxUnblockedTCB->xCoreID) && pxUnblockedTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
3697 {
3698 /* Return true if the task removed from the event list has a higher
3699 * priority than the calling task. This allows the calling task to know if
3700 * it should force a context switch now. */
3701 xReturn = pdTRUE;
3702
3703 /* Mark that a yield is pending in case the user is not using the
3704 * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
3705 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
3706 }
3707 else if ( pxUnblockedTCB->xCoreID != xPortGetCoreID() )
3708 {
3709 taskYIELD_OTHER_CORE( pxUnblockedTCB->xCoreID, pxUnblockedTCB->uxPriority );
3710 xReturn = pdFALSE;
3711 }
3712 else
3713 {
3714 xReturn = pdFALSE;
3715 }
3716
3717 #if( configUSE_TICKLESS_IDLE != 0 )
3718 {
3719 /* If a task is blocked on a kernel object then xNextTaskUnblockTime
3720 might be set to the blocked task's time out time. If the task is
3721 unblocked for a reason other than a timeout xNextTaskUnblockTime is
3722 normally left unchanged, because it is automatically reset to a new
3723 value when the tick count equals xNextTaskUnblockTime. However if
3724 tickless idling is used it might be more important to enter sleep mode
3725 at the earliest possible time - so reset xNextTaskUnblockTime here to
3726 ensure it is updated at the earliest possible time. */
3727 prvResetNextTaskUnblockTime();
3728 }
3729 #endif
3730
3731 taskEXIT_CRITICAL_ISR();
3732 return xReturn;
3733 }
3734 /*-----------------------------------------------------------*/
3735
vTaskRemoveFromUnorderedEventList(ListItem_t * pxEventListItem,const TickType_t xItemValue)3736 void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
3737 const TickType_t xItemValue )
3738 {
3739 TCB_t * pxUnblockedTCB;
3740
3741 taskENTER_CRITICAL();
3742
3743 /* Store the new item value in the event list. */
3744 listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
3745
3746 /* Remove the event list form the event flag. Interrupts do not access
3747 * event flags. */
3748 pxUnblockedTCB = ( TCB_t * ) listGET_LIST_ITEM_OWNER( pxEventListItem );
3749 configASSERT( pxUnblockedTCB );
3750 ( void ) uxListRemove( pxEventListItem );
3751
3752 /* Remove the task from the delayed list and add it to the ready list. The
3753 * scheduler is suspended so interrupts will not be accessing the ready
3754 * lists. */
3755 ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );
3756 prvAddTaskToReadyList( pxUnblockedTCB );
3757
3758 if ( tskCAN_RUN_HERE(pxUnblockedTCB->xCoreID) && pxUnblockedTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
3759 {
3760 /* Mark that a yield is pending in case the user is not using the
3761 * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
3762 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
3763 }
3764 else if ( pxUnblockedTCB->xCoreID != xPortGetCoreID() )
3765 {
3766 taskYIELD_OTHER_CORE( pxUnblockedTCB->xCoreID, pxUnblockedTCB->uxPriority );
3767 }
3768
3769 taskEXIT_CRITICAL();
3770 }
3771 /*-----------------------------------------------------------*/
3772
vTaskSetTimeOutState(TimeOut_t * const pxTimeOut)3773 void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
3774 {
3775 configASSERT( pxTimeOut );
3776 taskENTER_CRITICAL();
3777 {
3778 pxTimeOut->xOverflowCount = xNumOfOverflows;
3779 pxTimeOut->xTimeOnEntering = xTickCount;
3780 }
3781 taskEXIT_CRITICAL();
3782 }
3783 /*-----------------------------------------------------------*/
3784
vTaskInternalSetTimeOutState(TimeOut_t * const pxTimeOut)3785 void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
3786 {
3787 /* For internal use only as it does not use a critical section. */
3788 pxTimeOut->xOverflowCount = xNumOfOverflows;
3789 pxTimeOut->xTimeOnEntering = xTickCount;
3790 }
3791 /*-----------------------------------------------------------*/
3792
xTaskCheckForTimeOut(TimeOut_t * const pxTimeOut,TickType_t * const pxTicksToWait)3793 BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
3794 TickType_t * const pxTicksToWait )
3795 {
3796 BaseType_t xReturn;
3797
3798 configASSERT( pxTimeOut );
3799 configASSERT( pxTicksToWait );
3800
3801 taskENTER_CRITICAL();
3802 {
3803 /* Minor optimisation. The tick count cannot change in this block. */
3804 const TickType_t xConstTickCount = xTickCount;
3805 const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering;
3806
3807 #if ( INCLUDE_xTaskAbortDelay == 1 )
3808 if( pxCurrentTCB[xPortGetCoreID()]->ucDelayAborted != ( uint8_t ) pdFALSE )
3809 {
3810 /* The delay was aborted, which is not the same as a time out,
3811 * but has the same result. */
3812 pxCurrentTCB[xPortGetCoreID()]->ucDelayAborted = pdFALSE;
3813 xReturn = pdTRUE;
3814 }
3815 else
3816 #endif
3817
3818 #if ( INCLUDE_vTaskSuspend == 1 )
3819 if( *pxTicksToWait == portMAX_DELAY )
3820 {
3821 /* If INCLUDE_vTaskSuspend is set to 1 and the block time
3822 * specified is the maximum block time then the task should block
3823 * indefinitely, and therefore never time out. */
3824 xReturn = pdFALSE;
3825 }
3826 else
3827 #endif
3828
3829 if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) ) /*lint !e525 Indentation preferred as is to make code within pre-processor directives clearer. */
3830 {
3831 /* The tick count is greater than the time at which
3832 * vTaskSetTimeout() was called, but has also overflowed since
3833 * vTaskSetTimeOut() was called. It must have wrapped all the way
3834 * around and gone past again. This passed since vTaskSetTimeout()
3835 * was called. */
3836 xReturn = pdTRUE;
3837 }
3838 else if( xElapsedTime < *pxTicksToWait ) /*lint !e961 Explicit casting is only redundant with some compilers, whereas others require it to prevent integer conversion errors. */
3839 {
3840 /* Not a genuine timeout. Adjust parameters for time remaining. */
3841 *pxTicksToWait -= xElapsedTime;
3842 vTaskInternalSetTimeOutState( pxTimeOut );
3843 xReturn = pdFALSE;
3844 }
3845 else
3846 {
3847 *pxTicksToWait = 0;
3848 xReturn = pdTRUE;
3849 }
3850 }
3851 taskEXIT_CRITICAL();
3852
3853 return xReturn;
3854 }
3855 /*-----------------------------------------------------------*/
3856
vTaskMissedYield(void)3857 void vTaskMissedYield( void )
3858 {
3859 xYieldPending[xPortGetCoreID()] = pdTRUE;
3860 }
3861 /*-----------------------------------------------------------*/
3862
3863 #if ( configUSE_TRACE_FACILITY == 1 )
3864
uxTaskGetTaskNumber(TaskHandle_t xTask)3865 UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask )
3866 {
3867 UBaseType_t uxReturn;
3868 TCB_t const * pxTCB;
3869
3870 if( xTask != NULL )
3871 {
3872 pxTCB = xTask;
3873 uxReturn = pxTCB->uxTaskNumber;
3874 }
3875 else
3876 {
3877 uxReturn = 0U;
3878 }
3879
3880 return uxReturn;
3881 }
3882
3883 #endif /* configUSE_TRACE_FACILITY */
3884 /*-----------------------------------------------------------*/
3885
3886 #if ( configUSE_TRACE_FACILITY == 1 )
3887
vTaskSetTaskNumber(TaskHandle_t xTask,const UBaseType_t uxHandle)3888 void vTaskSetTaskNumber( TaskHandle_t xTask,
3889 const UBaseType_t uxHandle )
3890 {
3891 TCB_t * pxTCB;
3892
3893 if( xTask != NULL )
3894 {
3895 pxTCB = xTask;
3896 pxTCB->uxTaskNumber = uxHandle;
3897 }
3898 }
3899
3900 #endif /* configUSE_TRACE_FACILITY */
3901
3902 /*
3903 * -----------------------------------------------------------
3904 * The Idle task.
3905 * ----------------------------------------------------------
3906 *
3907 * The portTASK_FUNCTION() macro is used to allow port/compiler specific
3908 * language extensions. The equivalent prototype for this function is:
3909 *
3910 * void prvIdleTask( void *pvParameters );
3911 *
3912 */
portTASK_FUNCTION(prvIdleTask,pvParameters)3913 static portTASK_FUNCTION( prvIdleTask, pvParameters )
3914 {
3915 /* Stop warnings. */
3916 ( void ) pvParameters;
3917
3918 /** THIS IS THE RTOS IDLE TASK - WHICH IS CREATED AUTOMATICALLY WHEN THE
3919 * SCHEDULER IS STARTED. **/
3920
3921 /* In case a task that has a secure context deletes itself, in which case
3922 * the idle task is responsible for deleting the task's secure context, if
3923 * any. */
3924 portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE );
3925
3926 for( ; ; )
3927 {
3928 /* See if any tasks have deleted themselves - if so then the idle task
3929 * is responsible for freeing the deleted task's TCB and stack. */
3930 prvCheckTasksWaitingTermination();
3931
3932 #if ( configUSE_PREEMPTION == 0 )
3933 {
3934 /* If we are not using preemption we keep forcing a task switch to
3935 * see if any other task has become available. If we are using
3936 * preemption we don't need to do this as any task becoming available
3937 * will automatically get the processor anyway. */
3938 taskYIELD();
3939 }
3940 #endif /* configUSE_PREEMPTION */
3941
3942 #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
3943 {
3944 /* When using preemption tasks of equal priority will be
3945 * timesliced. If a task that is sharing the idle priority is ready
3946 * to run then the idle task should yield before the end of the
3947 * timeslice.
3948 *
3949 * A critical region is not required here as we are just reading from
3950 * the list, and an occasional incorrect value will not matter. If
3951 * the ready list at the idle priority contains more than one task
3952 * then a task other than the idle task is ready to execute. */
3953 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 )
3954 {
3955 taskYIELD();
3956 }
3957 else
3958 {
3959 mtCOVERAGE_TEST_MARKER();
3960 }
3961 }
3962 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
3963
3964 #if ( configUSE_IDLE_HOOK == 1 )
3965 {
3966 extern void vApplicationIdleHook( void );
3967
3968 /* Call the user defined function from within the idle task. This
3969 * allows the application designer to add background functionality
3970 * without the overhead of a separate task.
3971 * NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
3972 * CALL A FUNCTION THAT MIGHT BLOCK. */
3973 vApplicationIdleHook();
3974 }
3975 #endif /* configUSE_IDLE_HOOK */
3976 #if ( CONFIG_FREERTOS_LEGACY_HOOKS == 1 )
3977 {
3978 /* Call the esp-idf hook system */
3979 esp_vApplicationIdleHook();
3980 }
3981 #endif /* CONFIG_FREERTOS_LEGACY_HOOKS */
3982
3983 /* This conditional compilation should use inequality to 0, not equality
3984 * to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when
3985 * user defined low power mode implementations require
3986 * configUSE_TICKLESS_IDLE to be set to a value other than 1. */
3987 #if ( configUSE_TICKLESS_IDLE != 0 )
3988 {
3989 TickType_t xExpectedIdleTime;
3990
3991 /* It is not desirable to suspend then resume the scheduler on
3992 * each iteration of the idle task. Therefore, a preliminary
3993 * test of the expected idle time is performed without the
3994 * scheduler suspended. The result here is not necessarily
3995 * valid. */
3996 xExpectedIdleTime = prvGetExpectedIdleTime();
3997
3998 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
3999 {
4000 #ifdef ESP_PLATFORM // IDF-3755
4001 taskENTER_CRITICAL();
4002 #else
4003 vTaskSuspendAll();
4004 #endif // ESP_PLATFORM
4005 {
4006 /* Now the scheduler is suspended, the expected idle
4007 * time can be sampled again, and this time its value can
4008 * be used. */
4009 configASSERT( xNextTaskUnblockTime >= xTickCount );
4010 xExpectedIdleTime = prvGetExpectedIdleTime();
4011
4012 /* Define the following macro to set xExpectedIdleTime to 0
4013 * if the application does not want
4014 * portSUPPRESS_TICKS_AND_SLEEP() to be called. */
4015 configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( xExpectedIdleTime );
4016
4017 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
4018 {
4019 traceLOW_POWER_IDLE_BEGIN();
4020 portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime );
4021 traceLOW_POWER_IDLE_END();
4022 }
4023 else
4024 {
4025 mtCOVERAGE_TEST_MARKER();
4026 }
4027 }
4028 #ifdef ESP_PLATFORM // IDF-3755
4029 taskEXIT_CRITICAL();
4030 #else
4031 ( void ) xTaskResumeAll();
4032 #endif // ESP_PLATFORM
4033 }
4034 else
4035 {
4036 mtCOVERAGE_TEST_MARKER();
4037 }
4038 }
4039 #endif /* configUSE_TICKLESS_IDLE */
4040 }
4041 }
4042 /*-----------------------------------------------------------*/
4043
4044 #if ( configUSE_TICKLESS_IDLE != 0 )
4045
eTaskConfirmSleepModeStatus(void)4046 eSleepModeStatus eTaskConfirmSleepModeStatus( void )
4047 {
4048 /* The idle task exists in addition to the application tasks. */
4049 const UBaseType_t uxNonApplicationTasks = 1;
4050 eSleepModeStatus eReturn = eStandardSleep;
4051
4052 taskENTER_CRITICAL();
4053 if( listCURRENT_LIST_LENGTH( &xPendingReadyList[xPortGetCoreID()] ) != 0 )
4054 {
4055 /* A task was made ready while the scheduler was suspended. */
4056 eReturn = eAbortSleep;
4057 }
4058 else if( xYieldPending[xPortGetCoreID()] != pdFALSE )
4059 {
4060 /* A yield was pended while the scheduler was suspended. */
4061 eReturn = eAbortSleep;
4062 }
4063 else
4064 {
4065 /* If all the tasks are in the suspended list (which might mean they
4066 * have an infinite block time rather than actually being suspended)
4067 * then it is safe to turn all clocks off and just wait for external
4068 * interrupts. */
4069 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) )
4070 {
4071 eReturn = eNoTasksWaitingTimeout;
4072 }
4073 else
4074 {
4075 mtCOVERAGE_TEST_MARKER();
4076 }
4077 }
4078 taskEXIT_CRITICAL();
4079
4080 return eReturn;
4081 }
4082
4083 #endif /* configUSE_TICKLESS_IDLE */
4084 /*-----------------------------------------------------------*/
4085
4086 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
4087
4088 #if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
4089
vTaskSetThreadLocalStoragePointerAndDelCallback(TaskHandle_t xTaskToSet,BaseType_t xIndex,void * pvValue,TlsDeleteCallbackFunction_t xDelCallback)4090 void vTaskSetThreadLocalStoragePointerAndDelCallback( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue , TlsDeleteCallbackFunction_t xDelCallback)
4091 {
4092 TCB_t *pxTCB;
4093
4094 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
4095 {
4096 taskENTER_CRITICAL();
4097 pxTCB = prvGetTCBFromHandle( xTaskToSet );
4098 pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
4099 pxTCB->pvThreadLocalStoragePointersDelCallback[ xIndex ] = xDelCallback;
4100 taskEXIT_CRITICAL();
4101 }
4102 }
4103
vTaskSetThreadLocalStoragePointer(TaskHandle_t xTaskToSet,BaseType_t xIndex,void * pvValue)4104 void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue )
4105 {
4106 vTaskSetThreadLocalStoragePointerAndDelCallback( xTaskToSet, xIndex, pvValue, (TlsDeleteCallbackFunction_t)NULL );
4107 }
4108
4109
4110 #else
vTaskSetThreadLocalStoragePointer(TaskHandle_t xTaskToSet,BaseType_t xIndex,void * pvValue)4111 void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
4112 BaseType_t xIndex,
4113 void * pvValue )
4114 {
4115 TCB_t * pxTCB;
4116
4117 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
4118 {
4119 taskENTER_CRITICAL();
4120 pxTCB = prvGetTCBFromHandle( xTaskToSet );
4121 pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
4122 taskEXIT_CRITICAL();
4123 }
4124 }
4125 #endif /* configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS */
4126
4127 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
4128 /*-----------------------------------------------------------*/
4129
4130 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
4131
pvTaskGetThreadLocalStoragePointer(TaskHandle_t xTaskToQuery,BaseType_t xIndex)4132 void * pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
4133 BaseType_t xIndex )
4134 {
4135 void * pvReturn = NULL;
4136 TCB_t * pxTCB;
4137
4138 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
4139 {
4140 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
4141 pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ];
4142 }
4143 else
4144 {
4145 pvReturn = NULL;
4146 }
4147
4148 return pvReturn;
4149 }
4150
4151 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
4152 /*-----------------------------------------------------------*/
4153
4154 #if ( portUSING_MPU_WRAPPERS == 1 )
4155
vTaskAllocateMPURegions(TaskHandle_t xTaskToModify,const MemoryRegion_t * const xRegions)4156 void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify,
4157 const MemoryRegion_t * const xRegions )
4158 {
4159 TCB_t * pxTCB;
4160
4161 /* If null is passed in here then we are modifying the MPU settings of
4162 * the calling task. */
4163 pxTCB = prvGetTCBFromHandle( xTaskToModify );
4164
4165 vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, NULL, 0 );
4166 }
4167
4168 #endif /* portUSING_MPU_WRAPPERS */
4169 /*-----------------------------------------------------------*/
4170
prvInitialiseTaskLists(void)4171 static void prvInitialiseTaskLists( void )
4172 {
4173 UBaseType_t uxPriority;
4174
4175 for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ )
4176 {
4177 vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) );
4178 }
4179
4180 vListInitialise( &xDelayedTaskList1 );
4181 vListInitialise( &xDelayedTaskList2 );
4182
4183 #if ( configNUM_CORES > 1 )
4184 for(BaseType_t i = 0; i < configNUM_CORES; i++) {
4185 vListInitialise( &xPendingReadyList[ i ] );
4186 }
4187 #else
4188 vListInitialise( &xPendingReadyList[xPortGetCoreID()] );
4189 #endif
4190
4191 #if ( INCLUDE_vTaskDelete == 1 )
4192 {
4193 vListInitialise( &xTasksWaitingTermination );
4194 }
4195 #endif /* INCLUDE_vTaskDelete */
4196
4197 #if ( INCLUDE_vTaskSuspend == 1 )
4198 {
4199 vListInitialise( &xSuspendedTaskList );
4200 }
4201 #endif /* INCLUDE_vTaskSuspend */
4202
4203 /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList
4204 * using list2. */
4205 pxDelayedTaskList = &xDelayedTaskList1;
4206 pxOverflowDelayedTaskList = &xDelayedTaskList2;
4207 }
4208 /*-----------------------------------------------------------*/
4209
prvCheckTasksWaitingTermination(void)4210 static void prvCheckTasksWaitingTermination( void )
4211 {
4212 /** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/
4213
4214 #if ( INCLUDE_vTaskDelete == 1 )
4215 {
4216 BaseType_t xListIsEmpty;
4217 BaseType_t core = xPortGetCoreID();
4218
4219 /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL()
4220 * being called too often in the idle task. */
4221 while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U )
4222 {
4223 TCB_t *pxTCB = NULL;
4224
4225 taskENTER_CRITICAL();
4226 {
4227 xListIsEmpty = listLIST_IS_EMPTY( &xTasksWaitingTermination );
4228 if( xListIsEmpty == pdFALSE )
4229 {
4230 /* We only want to kill tasks that ran on this core because e.g. _xt_coproc_release needs to
4231 be called on the core the process is pinned on, if any */
4232 ListItem_t *target = listGET_HEAD_ENTRY(&xTasksWaitingTermination);
4233 for( ; target != listGET_END_MARKER(&xTasksWaitingTermination); target = listGET_NEXT(target) ){ //Walk the list
4234 TCB_t *tgt_tcb = ( TCB_t * )listGET_LIST_ITEM_OWNER(target);
4235 int affinity = tgt_tcb->xCoreID;
4236 //Self deleting tasks are added to Termination List before they switch context. Ensure they aren't still currently running
4237 if( pxCurrentTCB[core] == tgt_tcb || (configNUM_CORES > 1 && pxCurrentTCB[!core] == tgt_tcb) ){
4238 continue; //Can't free memory of task that is still running
4239 }
4240 if(affinity == core || affinity == tskNO_AFFINITY){ //Find first item not pinned to other core
4241 pxTCB = tgt_tcb;
4242 break;
4243 }
4244 }
4245 if(pxTCB != NULL){
4246 ( void ) uxListRemove( target ); //Remove list item from list
4247 --uxCurrentNumberOfTasks;
4248 --uxDeletedTasksWaitingCleanUp;
4249 }
4250 }
4251 }
4252 taskEXIT_CRITICAL(); //Need to call deletion callbacks outside critical section
4253
4254 if (pxTCB != NULL) { //Call deletion callbacks and free TCB memory
4255 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
4256 prvDeleteTLS( pxTCB );
4257 #endif
4258 prvDeleteTCB( pxTCB );
4259 }
4260 else
4261 {
4262 mtCOVERAGE_TEST_MARKER();
4263 break; //No TCB found that could be freed by this core, break out of loop
4264 }
4265 }
4266 }
4267 #endif /* INCLUDE_vTaskDelete */
4268 }
4269 /*-----------------------------------------------------------*/
4270
4271 #if ( configUSE_TRACE_FACILITY == 1 )
4272
vTaskGetInfo(TaskHandle_t xTask,TaskStatus_t * pxTaskStatus,BaseType_t xGetFreeStackSpace,eTaskState eState)4273 void vTaskGetInfo( TaskHandle_t xTask,
4274 TaskStatus_t * pxTaskStatus,
4275 BaseType_t xGetFreeStackSpace,
4276 eTaskState eState )
4277 {
4278 TCB_t * pxTCB;
4279
4280 /* xTask is NULL then get the state of the calling task. */
4281 pxTCB = prvGetTCBFromHandle( xTask );
4282
4283 pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB;
4284 pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName[ 0 ] );
4285 pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority;
4286 pxTaskStatus->pxStackBase = pxTCB->pxStack;
4287 pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber;
4288
4289 #if ( configTASKLIST_INCLUDE_COREID == 1 )
4290 pxTaskStatus->xCoreID = pxTCB->xCoreID;
4291 #endif /* configTASKLIST_INCLUDE_COREID */
4292
4293 #if ( configUSE_MUTEXES == 1 )
4294 {
4295 pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority;
4296 }
4297 #else
4298 {
4299 pxTaskStatus->uxBasePriority = 0;
4300 }
4301 #endif
4302
4303 #if ( configGENERATE_RUN_TIME_STATS == 1 )
4304 {
4305 pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter;
4306 }
4307 #else
4308 {
4309 pxTaskStatus->ulRunTimeCounter = 0;
4310 }
4311 #endif
4312
4313 /* Obtaining the task state is a little fiddly, so is only done if the
4314 * value of eState passed into this function is eInvalid - otherwise the
4315 * state is just set to whatever is passed in. */
4316 if( eState != eInvalid )
4317 {
4318 if( pxTCB == pxCurrentTCB[xPortGetCoreID()] )
4319 {
4320 pxTaskStatus->eCurrentState = eRunning;
4321 }
4322 else
4323 {
4324 pxTaskStatus->eCurrentState = eState;
4325
4326 #if ( INCLUDE_vTaskSuspend == 1 )
4327 {
4328 /* If the task is in the suspended list then there is a
4329 * chance it is actually just blocked indefinitely - so really
4330 * it should be reported as being in the Blocked state. */
4331 if( eState == eSuspended )
4332 {
4333 #ifdef ESP_PLATFORM // IDF-3755
4334 taskENTER_CRITICAL();
4335 #else
4336 vTaskSuspendAll();
4337 #endif // ESP_PLATFORM
4338 {
4339 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
4340 {
4341 pxTaskStatus->eCurrentState = eBlocked;
4342 }
4343 }
4344 #ifdef ESP_PLATFORM // IDF-3755
4345 taskEXIT_CRITICAL();
4346 #else
4347 ( void ) xTaskResumeAll();
4348 #endif // ESP_PLATFORM
4349 }
4350 }
4351 #endif /* INCLUDE_vTaskSuspend */
4352 }
4353 }
4354 else
4355 {
4356 pxTaskStatus->eCurrentState = eTaskGetState( pxTCB );
4357 }
4358
4359 /* Obtaining the stack space takes some time, so the xGetFreeStackSpace
4360 * parameter is provided to allow it to be skipped. */
4361 if( xGetFreeStackSpace != pdFALSE )
4362 {
4363 #if ( portSTACK_GROWTH > 0 )
4364 {
4365 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack );
4366 }
4367 #else
4368 {
4369 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack );
4370 }
4371 #endif
4372 }
4373 else
4374 {
4375 pxTaskStatus->usStackHighWaterMark = 0;
4376 }
4377 }
4378
4379 #endif /* configUSE_TRACE_FACILITY */
4380 /*-----------------------------------------------------------*/
4381
xTaskGetAffinity(TaskHandle_t xTask)4382 BaseType_t xTaskGetAffinity( TaskHandle_t xTask )
4383 {
4384 TCB_t *pxTCB;
4385
4386 pxTCB = prvGetTCBFromHandle( xTask );
4387
4388 return pxTCB->xCoreID;
4389 }
4390 /*-----------------------------------------------------------*/
4391
4392 #if ( configUSE_TRACE_FACILITY == 1 )
4393
prvListTasksWithinSingleList(TaskStatus_t * pxTaskStatusArray,List_t * pxList,eTaskState eState)4394 static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t * pxTaskStatusArray,
4395 List_t * pxList,
4396 eTaskState eState )
4397 {
4398 configLIST_VOLATILE TCB_t * pxNextTCB, * pxFirstTCB;
4399 UBaseType_t uxTask = 0;
4400
4401 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
4402 {
4403 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
4404
4405 /* Populate an TaskStatus_t structure within the
4406 * pxTaskStatusArray array for each task that is referenced from
4407 * pxList. See the definition of TaskStatus_t in task.h for the
4408 * meaning of each TaskStatus_t structure member. */
4409 do
4410 {
4411 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
4412 vTaskGetInfo( ( TaskHandle_t ) pxNextTCB, &( pxTaskStatusArray[ uxTask ] ), pdTRUE, eState );
4413 uxTask++;
4414 } while( pxNextTCB != pxFirstTCB );
4415 }
4416 else
4417 {
4418 mtCOVERAGE_TEST_MARKER();
4419 }
4420
4421 return uxTask;
4422 }
4423
4424 #endif /* configUSE_TRACE_FACILITY */
4425 /*-----------------------------------------------------------*/
4426
4427 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
4428
prvTaskCheckFreeStackSpace(const uint8_t * pucStackByte)4429 static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte )
4430 {
4431 uint32_t ulCount = 0U;
4432
4433 while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE )
4434 {
4435 pucStackByte -= portSTACK_GROWTH;
4436 ulCount++;
4437 }
4438
4439 ulCount /= ( uint32_t ) sizeof( StackType_t ); /*lint !e961 Casting is not redundant on smaller architectures. */
4440
4441 return ( configSTACK_DEPTH_TYPE ) ulCount;
4442 }
4443
4444 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) */
4445 /*-----------------------------------------------------------*/
4446
4447 #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
4448
4449 /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the
4450 * same except for their return type. Using configSTACK_DEPTH_TYPE allows the
4451 * user to determine the return type. It gets around the problem of the value
4452 * overflowing on 8-bit types without breaking backward compatibility for
4453 * applications that expect an 8-bit return type. */
uxTaskGetStackHighWaterMark2(TaskHandle_t xTask)4454 configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask )
4455 {
4456 TCB_t * pxTCB;
4457 uint8_t * pucEndOfStack;
4458 configSTACK_DEPTH_TYPE uxReturn;
4459
4460 /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are
4461 * the same except for their return type. Using configSTACK_DEPTH_TYPE
4462 * allows the user to determine the return type. It gets around the
4463 * problem of the value overflowing on 8-bit types without breaking
4464 * backward compatibility for applications that expect an 8-bit return
4465 * type. */
4466
4467 pxTCB = prvGetTCBFromHandle( xTask );
4468
4469 #if portSTACK_GROWTH < 0
4470 {
4471 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
4472 }
4473 #else
4474 {
4475 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
4476 }
4477 #endif
4478
4479 uxReturn = prvTaskCheckFreeStackSpace( pucEndOfStack );
4480
4481 return uxReturn;
4482 }
4483
4484 #endif /* INCLUDE_uxTaskGetStackHighWaterMark2 */
4485 /*-----------------------------------------------------------*/
4486
4487 #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
4488
uxTaskGetStackHighWaterMark(TaskHandle_t xTask)4489 UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask )
4490 {
4491 TCB_t * pxTCB;
4492 uint8_t * pucEndOfStack;
4493 UBaseType_t uxReturn;
4494
4495 pxTCB = prvGetTCBFromHandle( xTask );
4496
4497 #if portSTACK_GROWTH < 0
4498 {
4499 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
4500 }
4501 #else
4502 {
4503 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
4504 }
4505 #endif
4506
4507 uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack );
4508
4509 return uxReturn;
4510 }
4511
4512 #endif /* INCLUDE_uxTaskGetStackHighWaterMark */
4513 /*-----------------------------------------------------------*/
4514 #if (INCLUDE_pxTaskGetStackStart == 1)
4515
pxTaskGetStackStart(TaskHandle_t xTask)4516 uint8_t* pxTaskGetStackStart( TaskHandle_t xTask)
4517 {
4518 TCB_t *pxTCB;
4519 uint8_t* uxReturn;
4520
4521 pxTCB = prvGetTCBFromHandle( xTask );
4522 uxReturn = (uint8_t*)pxTCB->pxStack;
4523
4524 return uxReturn;
4525 }
4526
4527 #endif /* INCLUDE_pxTaskGetStackStart */
4528
4529 #if ( INCLUDE_vTaskDelete == 1 )
4530
prvDeleteTCB(TCB_t * pxTCB)4531 static void prvDeleteTCB( TCB_t * pxTCB )
4532 {
4533 /* This call is required specifically for the TriCore port. It must be
4534 * above the vPortFree() calls. The call is also used by ports/demos that
4535 * want to allocate and clean RAM statically. */
4536 portCLEAN_UP_TCB( pxTCB );
4537
4538 /* Free up the memory allocated by the scheduler for the task. It is up
4539 * to the task to free any memory allocated at the application level.
4540 * See the third party link http://www.nadler.com/embedded/newlibAndFreeRTOS.html
4541 * for additional information. */
4542 #if ( configUSE_NEWLIB_REENTRANT == 1 )
4543 {
4544 _reclaim_reent( &( pxTCB->xNewLib_reent ) );
4545 }
4546 #endif /* configUSE_NEWLIB_REENTRANT */
4547
4548 #if ( portUSING_MPU_WRAPPERS == 1 )
4549 vPortReleaseTaskMPUSettings( &( pxTCB->xMPUSettings) );
4550 #endif
4551
4552 #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
4553 {
4554 /* The task can only have been allocated dynamically - free both
4555 * the stack and TCB. */
4556 vPortFree( pxTCB->pxStack );
4557 vPortFree( pxTCB );
4558 }
4559 #elif ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
4560 {
4561 /* The task could have been allocated statically or dynamically, so
4562 * check what was statically allocated before trying to free the
4563 * memory. */
4564 if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB )
4565 {
4566 /* Both the stack and TCB were allocated dynamically, so both
4567 * must be freed. */
4568 vPortFree( pxTCB->pxStack );
4569 vPortFree( pxTCB );
4570 }
4571 else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
4572 {
4573 /* Only the stack was statically allocated, so the TCB is the
4574 * only memory that must be freed. */
4575 vPortFree( pxTCB );
4576 }
4577 else
4578 {
4579 /* Neither the stack nor the TCB were allocated dynamically, so
4580 * nothing needs to be freed. */
4581 configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB );
4582 mtCOVERAGE_TEST_MARKER();
4583 }
4584 }
4585 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
4586 }
4587
4588 #endif /* INCLUDE_vTaskDelete */
4589 /*-----------------------------------------------------------*/
4590
4591 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
4592
prvDeleteTLS(TCB_t * pxTCB)4593 static void prvDeleteTLS( TCB_t *pxTCB )
4594 {
4595 configASSERT( pxTCB );
4596 for( int x = 0; x < configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ )
4597 {
4598 if (pxTCB->pvThreadLocalStoragePointersDelCallback[ x ] != NULL) //If del cb is set
4599 {
4600 pxTCB->pvThreadLocalStoragePointersDelCallback[ x ](x, pxTCB->pvThreadLocalStoragePointers[ x ]); //Call del cb
4601 }
4602 }
4603 }
4604
4605 #endif /* ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS ) */
4606 /*-----------------------------------------------------------*/
4607
4608
prvResetNextTaskUnblockTime(void)4609 static void prvResetNextTaskUnblockTime( void )
4610 {
4611 TCB_t * pxTCB;
4612
4613 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
4614 {
4615 /* The new current delayed list is empty. Set xNextTaskUnblockTime to
4616 * the maximum possible value so it is extremely unlikely that the
4617 * if( xTickCount >= xNextTaskUnblockTime ) test will pass until
4618 * there is an item in the delayed list. */
4619 xNextTaskUnblockTime = portMAX_DELAY;
4620 }
4621 else
4622 {
4623 /* The new current delayed list is not empty, get the value of
4624 * the item at the head of the delayed list. This is the time at
4625 * which the task at the head of the delayed list should be removed
4626 * from the Blocked state. */
4627 ( pxTCB ) = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
4628 xNextTaskUnblockTime = listGET_LIST_ITEM_VALUE( &( ( pxTCB )->xStateListItem ) );
4629 }
4630 }
4631 /*-----------------------------------------------------------*/
4632
4633 #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) || (configNUM_CORES > 1) )
4634
xTaskGetCurrentTaskHandle(void)4635 TaskHandle_t xTaskGetCurrentTaskHandle( void )
4636 {
4637 TaskHandle_t xReturn;
4638 unsigned state;
4639
4640 state = portSET_INTERRUPT_MASK_FROM_ISR();
4641 xReturn = pxCurrentTCB[ xPortGetCoreID() ];
4642 portCLEAR_INTERRUPT_MASK_FROM_ISR(state);
4643
4644 return xReturn;
4645 }
4646
xTaskGetCurrentTaskHandleForCPU(BaseType_t cpuid)4647 TaskHandle_t xTaskGetCurrentTaskHandleForCPU( BaseType_t cpuid )
4648 {
4649 TaskHandle_t xReturn=NULL;
4650
4651 //Xtensa-specific: the pxCurrentPCB pointer is atomic so we shouldn't need a lock.
4652 if (cpuid < configNUM_CORES) {
4653 xReturn = pxCurrentTCB[ cpuid ];
4654 }
4655
4656 return xReturn;
4657 }
4658
4659 #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
4660 /*-----------------------------------------------------------*/
4661
4662 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
4663
xTaskGetSchedulerState(void)4664 BaseType_t xTaskGetSchedulerState( void )
4665 {
4666 BaseType_t xReturn;
4667 unsigned state;
4668
4669 state = portSET_INTERRUPT_MASK_FROM_ISR();
4670 if( xSchedulerRunning == pdFALSE )
4671 {
4672 xReturn = taskSCHEDULER_NOT_STARTED;
4673 }
4674 else
4675 {
4676 if( uxSchedulerSuspended[xPortGetCoreID()] == ( UBaseType_t ) pdFALSE )
4677 {
4678 xReturn = taskSCHEDULER_RUNNING;
4679 }
4680 else
4681 {
4682 xReturn = taskSCHEDULER_SUSPENDED;
4683 }
4684 }
4685 portCLEAR_INTERRUPT_MASK_FROM_ISR(state);
4686
4687 return xReturn;
4688 }
4689
4690 #endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */
4691 /*-----------------------------------------------------------*/
4692
4693 #if ( configUSE_MUTEXES == 1 )
4694
xTaskPriorityInherit(TaskHandle_t const pxMutexHolder)4695 BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder )
4696 {
4697 TCB_t * const pxMutexHolderTCB = pxMutexHolder;
4698 BaseType_t xReturn = pdFALSE;
4699
4700 taskENTER_CRITICAL();
4701 /* If the mutex was given back by an interrupt while the queue was
4702 * locked then the mutex holder might now be NULL. _RB_ Is this still
4703 * needed as interrupts can no longer use mutexes? */
4704 if( pxMutexHolder != NULL )
4705 {
4706 /* If the holder of the mutex has a priority below the priority of
4707 * the task attempting to obtain the mutex then it will temporarily
4708 * inherit the priority of the task attempting to obtain the mutex. */
4709 if( pxMutexHolderTCB->uxPriority < pxCurrentTCB[xPortGetCoreID()]->uxPriority )
4710 {
4711 /* Adjust the mutex holder state to account for its new
4712 * priority. Only reset the event list item value if the value is
4713 * not being used for anything else. */
4714 if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
4715 {
4716 listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB[xPortGetCoreID()]->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4717 }
4718 else
4719 {
4720 mtCOVERAGE_TEST_MARKER();
4721 }
4722
4723 /* If the task being modified is in the ready state it will need
4724 * to be moved into a new list. */
4725 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE )
4726 {
4727 if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
4728 {
4729 /* It is known that the task is in its ready list so
4730 * there is no need to check again and the port level
4731 * reset macro can be called directly. */
4732 portRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority, uxTopReadyPriority );
4733 }
4734 else
4735 {
4736 mtCOVERAGE_TEST_MARKER();
4737 }
4738
4739 /* Inherit the priority before being moved into the new list. */
4740 pxMutexHolderTCB->uxPriority = pxCurrentTCB[xPortGetCoreID()]->uxPriority;
4741 prvAddTaskToReadyList( pxMutexHolderTCB );
4742 }
4743 else
4744 {
4745 /* Just inherit the priority. */
4746 pxMutexHolderTCB->uxPriority = pxCurrentTCB[xPortGetCoreID()]->uxPriority;
4747 }
4748
4749 traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCB[xPortGetCoreID()]->uxPriority );
4750
4751 /* Inheritance occurred. */
4752 xReturn = pdTRUE;
4753 }
4754 else
4755 {
4756 if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCB[xPortGetCoreID()]->uxPriority )
4757 {
4758 /* The base priority of the mutex holder is lower than the
4759 * priority of the task attempting to take the mutex, but the
4760 * current priority of the mutex holder is not lower than the
4761 * priority of the task attempting to take the mutex.
4762 * Therefore the mutex holder must have already inherited a
4763 * priority, but inheritance would have occurred if that had
4764 * not been the case. */
4765 xReturn = pdTRUE;
4766 }
4767 else
4768 {
4769 mtCOVERAGE_TEST_MARKER();
4770 }
4771 }
4772 }
4773 else
4774 {
4775 mtCOVERAGE_TEST_MARKER();
4776 }
4777 taskEXIT_CRITICAL();
4778
4779 return xReturn;
4780 }
4781
4782 #endif /* configUSE_MUTEXES */
4783 /*-----------------------------------------------------------*/
4784
4785 #if ( configUSE_MUTEXES == 1 )
4786
xTaskPriorityDisinherit(TaskHandle_t const pxMutexHolder)4787 BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder )
4788 {
4789 TCB_t * const pxTCB = pxMutexHolder;
4790 BaseType_t xReturn = pdFALSE;
4791
4792 taskENTER_CRITICAL();
4793 if( pxMutexHolder != NULL )
4794 {
4795 /* A task can only have an inherited priority if it holds the mutex.
4796 * If the mutex is held by a task then it cannot be given from an
4797 * interrupt, and if a mutex is given by the holding task then it must
4798 * be the running state task. */
4799 configASSERT( pxTCB == pxCurrentTCB[xPortGetCoreID()] );
4800 configASSERT( pxTCB->uxMutexesHeld );
4801 ( pxTCB->uxMutexesHeld )--;
4802
4803 /* Has the holder of the mutex inherited the priority of another
4804 * task? */
4805 if( pxTCB->uxPriority != pxTCB->uxBasePriority )
4806 {
4807 /* Only disinherit if no other mutexes are held. */
4808 if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 )
4809 {
4810 /* A task can only have an inherited priority if it holds
4811 * the mutex. If the mutex is held by a task then it cannot be
4812 * given from an interrupt, and if a mutex is given by the
4813 * holding task then it must be the running state task. Remove
4814 * the holding task from the ready list. */
4815 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
4816 {
4817 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
4818 }
4819 else
4820 {
4821 mtCOVERAGE_TEST_MARKER();
4822 }
4823
4824 /* Disinherit the priority before adding the task into the
4825 * new ready list. */
4826 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
4827 pxTCB->uxPriority = pxTCB->uxBasePriority;
4828
4829 /* Reset the event list item value. It cannot be in use for
4830 * any other purpose if this task is running, and it must be
4831 * running to give back the mutex. */
4832 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4833 prvAddTaskToReadyList( pxTCB );
4834
4835 /* Return true to indicate that a context switch is required.
4836 * This is only actually required in the corner case whereby
4837 * multiple mutexes were held and the mutexes were given back
4838 * in an order different to that in which they were taken.
4839 * If a context switch did not occur when the first mutex was
4840 * returned, even if a task was waiting on it, then a context
4841 * switch should occur when the last mutex is returned whether
4842 * a task is waiting on it or not. */
4843 xReturn = pdTRUE;
4844 }
4845 else
4846 {
4847 mtCOVERAGE_TEST_MARKER();
4848 }
4849 }
4850 else
4851 {
4852 mtCOVERAGE_TEST_MARKER();
4853 }
4854 }
4855 else
4856 {
4857 mtCOVERAGE_TEST_MARKER();
4858 }
4859 taskEXIT_CRITICAL();
4860
4861 return xReturn;
4862 }
4863
4864 #endif /* configUSE_MUTEXES */
4865 /*-----------------------------------------------------------*/
4866
4867 #if ( configUSE_MUTEXES == 1 )
4868
vTaskPriorityDisinheritAfterTimeout(TaskHandle_t const pxMutexHolder,UBaseType_t uxHighestPriorityWaitingTask)4869 void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder,
4870 UBaseType_t uxHighestPriorityWaitingTask )
4871 {
4872 TCB_t * const pxTCB = pxMutexHolder;
4873 UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse;
4874 const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
4875
4876 taskENTER_CRITICAL();
4877 if( pxMutexHolder != NULL )
4878 {
4879 /* If pxMutexHolder is not NULL then the holder must hold at least
4880 * one mutex. */
4881 configASSERT( pxTCB->uxMutexesHeld );
4882
4883 /* Determine the priority to which the priority of the task that
4884 * holds the mutex should be set. This will be the greater of the
4885 * holding task's base priority and the priority of the highest
4886 * priority task that is waiting to obtain the mutex. */
4887 if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask )
4888 {
4889 uxPriorityToUse = uxHighestPriorityWaitingTask;
4890 }
4891 else
4892 {
4893 uxPriorityToUse = pxTCB->uxBasePriority;
4894 }
4895
4896 /* Does the priority need to change? */
4897 if( pxTCB->uxPriority != uxPriorityToUse )
4898 {
4899 /* Only disinherit if no other mutexes are held. This is a
4900 * simplification in the priority inheritance implementation. If
4901 * the task that holds the mutex is also holding other mutexes then
4902 * the other mutexes may have caused the priority inheritance. */
4903 if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld )
4904 {
4905 /* If a task has timed out because it already holds the
4906 * mutex it was trying to obtain then it cannot of inherited
4907 * its own priority. */
4908 configASSERT( pxTCB != pxCurrentTCB[xPortGetCoreID()] );
4909
4910 /* Disinherit the priority, remembering the previous
4911 * priority to facilitate determining the subject task's
4912 * state. */
4913 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
4914 uxPriorityUsedOnEntry = pxTCB->uxPriority;
4915 pxTCB->uxPriority = uxPriorityToUse;
4916
4917 /* Only reset the event list item value if the value is not
4918 * being used for anything else. */
4919 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
4920 {
4921 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4922 }
4923 else
4924 {
4925 mtCOVERAGE_TEST_MARKER();
4926 }
4927
4928 /* If the running task is not the task that holds the mutex
4929 * then the task that holds the mutex could be in either the
4930 * Ready, Blocked or Suspended states. Only remove the task
4931 * from its current state list if it is in the Ready state as
4932 * the task's priority is going to change and there is one
4933 * Ready list per priority. */
4934 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
4935 {
4936 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
4937 {
4938 /* It is known that the task is in its ready list so
4939 * there is no need to check again and the port level
4940 * reset macro can be called directly. */
4941 portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority );
4942 }
4943 else
4944 {
4945 mtCOVERAGE_TEST_MARKER();
4946 }
4947
4948 prvAddTaskToReadyList( pxTCB );
4949 }
4950 else
4951 {
4952 mtCOVERAGE_TEST_MARKER();
4953 }
4954 }
4955 else
4956 {
4957 mtCOVERAGE_TEST_MARKER();
4958 }
4959 }
4960 else
4961 {
4962 mtCOVERAGE_TEST_MARKER();
4963 }
4964 }
4965 else
4966 {
4967 mtCOVERAGE_TEST_MARKER();
4968 }
4969 taskEXIT_CRITICAL();
4970 }
4971
4972 #endif /* configUSE_MUTEXES */
4973 /*-----------------------------------------------------------*/
4974
4975 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
4976
vTaskEnterCritical(void)4977 void vTaskEnterCritical( void )
4978 {
4979 portDISABLE_INTERRUPTS();
4980
4981 if( xSchedulerRunning != pdFALSE )
4982 {
4983 ( pxCurrentTCB[xPortGetCoreID()]->uxCriticalNesting )++;
4984
4985 /* This is not the interrupt safe version of the enter critical
4986 * function so assert() if it is being called from an interrupt
4987 * context. Only API functions that end in "FromISR" can be used in an
4988 * interrupt. Only assert if the critical nesting count is 1 to
4989 * protect against recursive calls if the assert function also uses a
4990 * critical section. */
4991 if( pxCurrentTCB[xPortGetCoreID()]->uxCriticalNesting == 1 )
4992 {
4993 portASSERT_IF_IN_ISR();
4994 }
4995 }
4996 else
4997 {
4998 mtCOVERAGE_TEST_MARKER();
4999 }
5000 }
5001
5002 #endif /* portCRITICAL_NESTING_IN_TCB */
5003 /*-----------------------------------------------------------*/
5004
5005 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
5006
vTaskExitCritical(void)5007 void vTaskExitCritical( void )
5008 {
5009 if( xSchedulerRunning != pdFALSE )
5010 {
5011 if( pxCurrentTCB[xPortGetCoreID()]->uxCriticalNesting > 0U )
5012 {
5013 ( pxCurrentTCB[xPortGetCoreID()]->uxCriticalNesting )--;
5014
5015 if( pxCurrentTCB[xPortGetCoreID()]->uxCriticalNesting == 0U )
5016 {
5017 portENABLE_INTERRUPTS();
5018 }
5019 else
5020 {
5021 mtCOVERAGE_TEST_MARKER();
5022 }
5023 }
5024 else
5025 {
5026 mtCOVERAGE_TEST_MARKER();
5027 }
5028 }
5029 else
5030 {
5031 mtCOVERAGE_TEST_MARKER();
5032 }
5033 }
5034
5035 #endif /* portCRITICAL_NESTING_IN_TCB */
5036 /*-----------------------------------------------------------*/
5037
5038 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
5039
prvWriteNameToBuffer(char * pcBuffer,const char * pcTaskName)5040 static char * prvWriteNameToBuffer( char * pcBuffer,
5041 const char * pcTaskName )
5042 {
5043 size_t x;
5044
5045 /* Start by copying the entire string. */
5046 strcpy( pcBuffer, pcTaskName );
5047
5048 /* Pad the end of the string with spaces to ensure columns line up when
5049 * printed out. */
5050 for( x = strlen( pcBuffer ); x < ( size_t ) ( configMAX_TASK_NAME_LEN - 1 ); x++ )
5051 {
5052 pcBuffer[ x ] = ' ';
5053 }
5054
5055 /* Terminate. */
5056 pcBuffer[ x ] = ( char ) 0x00;
5057
5058 /* Return the new end of string. */
5059 return &( pcBuffer[ x ] );
5060 }
5061
5062 #endif /* ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */
5063 /*-----------------------------------------------------------*/
5064
5065 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
5066
vTaskList(char * pcWriteBuffer)5067 void vTaskList( char * pcWriteBuffer )
5068 {
5069 TaskStatus_t * pxTaskStatusArray;
5070 UBaseType_t uxArraySize, x;
5071 char cStatus;
5072
5073 /*
5074 * PLEASE NOTE:
5075 *
5076 * This function is provided for convenience only, and is used by many
5077 * of the demo applications. Do not consider it to be part of the
5078 * scheduler.
5079 *
5080 * vTaskList() calls uxTaskGetSystemState(), then formats part of the
5081 * uxTaskGetSystemState() output into a human readable table that
5082 * displays task names, states and stack usage.
5083 *
5084 * vTaskList() has a dependency on the sprintf() C library function that
5085 * might bloat the code size, use a lot of stack, and provide different
5086 * results on different platforms. An alternative, tiny, third party,
5087 * and limited functionality implementation of sprintf() is provided in
5088 * many of the FreeRTOS/Demo sub-directories in a file called
5089 * printf-stdarg.c (note printf-stdarg.c does not provide a full
5090 * snprintf() implementation!).
5091 *
5092 * It is recommended that production systems call uxTaskGetSystemState()
5093 * directly to get access to raw stats data, rather than indirectly
5094 * through a call to vTaskList().
5095 */
5096
5097
5098 /* Make sure the write buffer does not contain a string. */
5099 *pcWriteBuffer = ( char ) 0x00;
5100
5101 /* Take a snapshot of the number of tasks in case it changes while this
5102 * function is executing. */
5103 uxArraySize = uxCurrentNumberOfTasks;
5104
5105 /* Allocate an array index for each task. NOTE! if
5106 * configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
5107 * equate to NULL. */
5108 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */
5109
5110 if( pxTaskStatusArray != NULL )
5111 {
5112 /* Generate the (binary) data. */
5113 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, NULL );
5114
5115 /* Create a human readable table from the binary data. */
5116 for( x = 0; x < uxArraySize; x++ )
5117 {
5118 switch( pxTaskStatusArray[ x ].eCurrentState )
5119 {
5120 case eRunning:
5121 cStatus = tskRUNNING_CHAR;
5122 break;
5123
5124 case eReady:
5125 cStatus = tskREADY_CHAR;
5126 break;
5127
5128 case eBlocked:
5129 cStatus = tskBLOCKED_CHAR;
5130 break;
5131
5132 case eSuspended:
5133 cStatus = tskSUSPENDED_CHAR;
5134 break;
5135
5136 case eDeleted:
5137 cStatus = tskDELETED_CHAR;
5138 break;
5139
5140 case eInvalid: /* Fall through. */
5141 default: /* Should not get here, but it is included
5142 * to prevent static checking errors. */
5143 cStatus = ( char ) 0x00;
5144 break;
5145 }
5146
5147 /* Write the task name to the string, padding with spaces so it
5148 * can be printed in tabular form more easily. */
5149 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
5150
5151 /* Write the rest of the string. */
5152 #if configTASKLIST_INCLUDE_COREID
5153 sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\t%hd\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber, ( int ) pxTaskStatusArray[ x ].xCoreID );
5154 #else
5155 sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
5156 #endif
5157 pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */
5158 }
5159
5160 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
5161 * is 0 then vPortFree() will be #defined to nothing. */
5162 vPortFree( pxTaskStatusArray );
5163 }
5164 else
5165 {
5166 mtCOVERAGE_TEST_MARKER();
5167 }
5168 }
5169
5170 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
5171 /*----------------------------------------------------------*/
5172
5173 #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
5174
vTaskGetRunTimeStats(char * pcWriteBuffer)5175 void vTaskGetRunTimeStats( char * pcWriteBuffer )
5176 {
5177 TaskStatus_t * pxTaskStatusArray;
5178 UBaseType_t uxArraySize, x;
5179 uint32_t ulTotalTime, ulStatsAsPercentage;
5180
5181 #if ( configUSE_TRACE_FACILITY != 1 )
5182 {
5183 #error configUSE_TRACE_FACILITY must also be set to 1 in FreeRTOSConfig.h to use vTaskGetRunTimeStats().
5184 }
5185 #endif
5186
5187 /*
5188 * PLEASE NOTE:
5189 *
5190 * This function is provided for convenience only, and is used by many
5191 * of the demo applications. Do not consider it to be part of the
5192 * scheduler.
5193 *
5194 * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part
5195 * of the uxTaskGetSystemState() output into a human readable table that
5196 * displays the amount of time each task has spent in the Running state
5197 * in both absolute and percentage terms.
5198 *
5199 * vTaskGetRunTimeStats() has a dependency on the sprintf() C library
5200 * function that might bloat the code size, use a lot of stack, and
5201 * provide different results on different platforms. An alternative,
5202 * tiny, third party, and limited functionality implementation of
5203 * sprintf() is provided in many of the FreeRTOS/Demo sub-directories in
5204 * a file called printf-stdarg.c (note printf-stdarg.c does not provide
5205 * a full snprintf() implementation!).
5206 *
5207 * It is recommended that production systems call uxTaskGetSystemState()
5208 * directly to get access to raw stats data, rather than indirectly
5209 * through a call to vTaskGetRunTimeStats().
5210 */
5211
5212 /* Make sure the write buffer does not contain a string. */
5213 *pcWriteBuffer = ( char ) 0x00;
5214
5215 /* Take a snapshot of the number of tasks in case it changes while this
5216 * function is executing. */
5217 uxArraySize = uxCurrentNumberOfTasks;
5218
5219 /* Allocate an array index for each task. NOTE! If
5220 * configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
5221 * equate to NULL. */
5222 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */
5223
5224 if( pxTaskStatusArray != NULL )
5225 {
5226 /* Generate the (binary) data. */
5227 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime );
5228
5229 /* For percentage calculations. */
5230 ulTotalTime /= 100UL;
5231
5232 /* Avoid divide by zero errors. */
5233 if( ulTotalTime > 0UL )
5234 {
5235 /* Create a human readable table from the binary data. */
5236 for( x = 0; x < uxArraySize; x++ )
5237 {
5238 /* What percentage of the total run time has the task used?
5239 * This will always be rounded down to the nearest integer.
5240 * ulTotalRunTimeDiv100 has already been divided by 100. */
5241 ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime;
5242
5243 /* Write the task name to the string, padding with
5244 * spaces so it can be printed in tabular form more
5245 * easily. */
5246 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
5247
5248 if( ulStatsAsPercentage > 0UL )
5249 {
5250 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
5251 {
5252 sprintf( pcWriteBuffer, "\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage );
5253 }
5254 #else
5255 {
5256 /* sizeof( int ) == sizeof( long ) so a smaller
5257 * printf() library can be used. */
5258 sprintf( pcWriteBuffer, "\t%u\t\t%u%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter, ( unsigned int ) ulStatsAsPercentage ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
5259 }
5260 #endif
5261 }
5262 else
5263 {
5264 /* If the percentage is zero here then the task has
5265 * consumed less than 1% of the total run time. */
5266 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
5267 {
5268 sprintf( pcWriteBuffer, "\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter );
5269 }
5270 #else
5271 {
5272 /* sizeof( int ) == sizeof( long ) so a smaller
5273 * printf() library can be used. */
5274 sprintf( pcWriteBuffer, "\t%u\t\t<1%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
5275 }
5276 #endif
5277 }
5278
5279 pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */
5280 }
5281 }
5282 else
5283 {
5284 mtCOVERAGE_TEST_MARKER();
5285 }
5286
5287 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
5288 * is 0 then vPortFree() will be #defined to nothing. */
5289 vPortFree( pxTaskStatusArray );
5290 }
5291 else
5292 {
5293 mtCOVERAGE_TEST_MARKER();
5294 }
5295 }
5296
5297 #endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */
5298 /*-----------------------------------------------------------*/
5299
uxTaskResetEventItemValue(void)5300 TickType_t uxTaskResetEventItemValue( void )
5301 {
5302 TickType_t uxReturn;
5303
5304 taskENTER_CRITICAL();
5305 uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) );
5306
5307 /* Reset the event list item to its normal value - so it can be used with
5308 * queues and semaphores. */
5309 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
5310 taskEXIT_CRITICAL();
5311
5312 return uxReturn;
5313 }
5314 /*-----------------------------------------------------------*/
5315
5316 #if ( configUSE_MUTEXES == 1 )
5317
pvTaskIncrementMutexHeldCount(void)5318 void *pvTaskIncrementMutexHeldCount( void )
5319 {
5320 TCB_t * curTCB;
5321
5322 /* If xSemaphoreCreateMutex() is called before any tasks have been created
5323 * then pxCurrentTCB will be NULL. */
5324 taskENTER_CRITICAL();
5325 if( pxCurrentTCB[ xPortGetCoreID() ] != NULL )
5326 {
5327 ( pxCurrentTCB[ xPortGetCoreID() ]->uxMutexesHeld )++;
5328 }
5329 curTCB = pxCurrentTCB[ xPortGetCoreID() ];
5330 taskEXIT_CRITICAL();
5331
5332 return curTCB;
5333 }
5334
5335 #endif /* configUSE_MUTEXES */
5336 /*-----------------------------------------------------------*/
5337
5338 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
5339
5340 #ifdef ESP_PLATFORM // IDF-3851
5341 // included here for backward binary compatibility
5342 #undef ulTaskNotifyTake
ulTaskNotifyTake(BaseType_t xClearCountOnExit,TickType_t xTicksToWait)5343 uint32_t ulTaskNotifyTake(BaseType_t xClearCountOnExit,
5344 TickType_t xTicksToWait )
5345 {
5346 return ulTaskGenericNotifyTake(tskDEFAULT_INDEX_TO_NOTIFY, xClearCountOnExit, xTicksToWait);
5347 }
5348 #endif // ESP-PLATFORM
5349
ulTaskGenericNotifyTake(UBaseType_t uxIndexToWait,BaseType_t xClearCountOnExit,TickType_t xTicksToWait)5350 uint32_t ulTaskGenericNotifyTake( UBaseType_t uxIndexToWait,
5351 BaseType_t xClearCountOnExit,
5352 TickType_t xTicksToWait )
5353 {
5354 uint32_t ulReturn;
5355
5356 configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES );
5357
5358 taskENTER_CRITICAL();
5359 {
5360 /* Only block if the notification count is not already non-zero. */
5361 if( pxCurrentTCB[xPortGetCoreID()]->ulNotifiedValue[ uxIndexToWait ] == 0UL )
5362 {
5363 /* Mark this task as waiting for a notification. */
5364 pxCurrentTCB[xPortGetCoreID()]->ucNotifyState[ uxIndexToWait ] = taskWAITING_NOTIFICATION;
5365
5366 if( xTicksToWait > ( TickType_t ) 0 )
5367 {
5368 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToWait );
5369 traceTASK_NOTIFY_TAKE_BLOCK( uxIndexToWait );
5370
5371 /* All ports are written to allow a yield in a critical
5372 * section (some will yield immediately, others wait until the
5373 * critical section exits) - but it is not something that
5374 * application code should ever do. */
5375 portYIELD_WITHIN_API();
5376 }
5377 else
5378 {
5379 mtCOVERAGE_TEST_MARKER();
5380 }
5381 }
5382 else
5383 {
5384 mtCOVERAGE_TEST_MARKER();
5385 }
5386 }
5387 taskEXIT_CRITICAL();
5388
5389 taskENTER_CRITICAL();
5390 {
5391 traceTASK_NOTIFY_TAKE( uxIndexToWait );
5392 ulReturn = pxCurrentTCB[xPortGetCoreID()]->ulNotifiedValue[ uxIndexToWait ];
5393
5394 if( ulReturn != 0UL )
5395 {
5396 if( xClearCountOnExit != pdFALSE )
5397 {
5398 pxCurrentTCB[xPortGetCoreID()]->ulNotifiedValue[ uxIndexToWait ] = 0UL;
5399 }
5400 else
5401 {
5402 pxCurrentTCB[xPortGetCoreID()]->ulNotifiedValue[ uxIndexToWait ] = ulReturn - ( uint32_t ) 1;
5403 }
5404 }
5405 else
5406 {
5407 mtCOVERAGE_TEST_MARKER();
5408 }
5409
5410 pxCurrentTCB[xPortGetCoreID()]->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION;
5411 }
5412 taskEXIT_CRITICAL();
5413
5414 return ulReturn;
5415 }
5416
5417 #endif /* configUSE_TASK_NOTIFICATIONS */
5418 /*-----------------------------------------------------------*/
5419
5420 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
5421
5422 #ifdef ESP_PLATFORM // IDF-3851
5423 // included for backward compatibility
5424 #undef xTaskNotifyWait
xTaskNotifyWait(uint32_t ulBitsToClearOnEntry,uint32_t ulBitsToClearOnExit,uint32_t * pulNotificationValue,TickType_t xTicksToWait)5425 BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry,
5426 uint32_t ulBitsToClearOnExit,
5427 uint32_t * pulNotificationValue,
5428 TickType_t xTicksToWait )
5429 {
5430 return xTaskGenericNotifyWait(tskDEFAULT_INDEX_TO_NOTIFY, ulBitsToClearOnEntry, ulBitsToClearOnExit, pulNotificationValue, xTicksToWait);
5431 }
5432 #endif // ESP-PLATFORM
5433
xTaskGenericNotifyWait(UBaseType_t uxIndexToWait,uint32_t ulBitsToClearOnEntry,uint32_t ulBitsToClearOnExit,uint32_t * pulNotificationValue,TickType_t xTicksToWait)5434 BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWait,
5435 uint32_t ulBitsToClearOnEntry,
5436 uint32_t ulBitsToClearOnExit,
5437 uint32_t * pulNotificationValue,
5438 TickType_t xTicksToWait )
5439 {
5440 BaseType_t xReturn;
5441
5442 configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES );
5443
5444 taskENTER_CRITICAL();
5445 {
5446 /* Only block if a notification is not already pending. */
5447 if( pxCurrentTCB[xPortGetCoreID()]->ucNotifyState[ uxIndexToWait ] != taskNOTIFICATION_RECEIVED )
5448 {
5449 /* Clear bits in the task's notification value as bits may get
5450 * set by the notifying task or interrupt. This can be used to
5451 * clear the value to zero. */
5452 pxCurrentTCB[xPortGetCoreID()]->ulNotifiedValue[ uxIndexToWait ] &= ~ulBitsToClearOnEntry;
5453
5454 /* Mark this task as waiting for a notification. */
5455 pxCurrentTCB[xPortGetCoreID()]->ucNotifyState[ uxIndexToWait ] = taskWAITING_NOTIFICATION;
5456
5457 if( xTicksToWait > ( TickType_t ) 0 )
5458 {
5459 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToWait);
5460 traceTASK_NOTIFY_WAIT_BLOCK( uxIndexToWait );
5461
5462 /* All ports are written to allow a yield in a critical
5463 * section (some will yield immediately, others wait until the
5464 * critical section exits) - but it is not something that
5465 * application code should ever do. */
5466 portYIELD_WITHIN_API();
5467 }
5468 else
5469 {
5470 mtCOVERAGE_TEST_MARKER();
5471 }
5472 }
5473 else
5474 {
5475 mtCOVERAGE_TEST_MARKER();
5476 }
5477 }
5478 taskEXIT_CRITICAL();
5479
5480 taskENTER_CRITICAL();
5481 {
5482 traceTASK_NOTIFY_WAIT( uxIndexToWait );
5483
5484 if( pulNotificationValue != NULL )
5485 {
5486 /* Output the current notification value, which may or may not
5487 * have changed. */
5488 *pulNotificationValue = pxCurrentTCB[xPortGetCoreID()]->ulNotifiedValue[ uxIndexToWait ];
5489 }
5490
5491 /* If ucNotifyValue is set then either the task never entered the
5492 * blocked state (because a notification was already pending) or the
5493 * task unblocked because of a notification. Otherwise the task
5494 * unblocked because of a timeout. */
5495 if( pxCurrentTCB[xPortGetCoreID()]->ucNotifyState[ uxIndexToWait ] != taskNOTIFICATION_RECEIVED )
5496 {
5497 /* A notification was not received. */
5498 xReturn = pdFALSE;
5499 }
5500 else
5501 {
5502 /* A notification was already pending or a notification was
5503 * received while the task was waiting. */
5504 pxCurrentTCB[xPortGetCoreID()]->ulNotifiedValue[ uxIndexToWait ] &= ~ulBitsToClearOnExit;
5505 xReturn = pdTRUE;
5506 }
5507
5508 pxCurrentTCB[xPortGetCoreID()]->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION;
5509 }
5510 taskEXIT_CRITICAL();
5511
5512 return xReturn;
5513 }
5514
5515 #endif /* configUSE_TASK_NOTIFICATIONS */
5516 /*-----------------------------------------------------------*/
5517
5518 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
5519
xTaskGenericNotify(TaskHandle_t xTaskToNotify,UBaseType_t uxIndexToNotify,uint32_t ulValue,eNotifyAction eAction,uint32_t * pulPreviousNotificationValue)5520 BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify,
5521 UBaseType_t uxIndexToNotify,
5522 uint32_t ulValue,
5523 eNotifyAction eAction,
5524 uint32_t * pulPreviousNotificationValue )
5525 {
5526 TCB_t * pxTCB;
5527 BaseType_t xReturn = pdPASS;
5528 uint8_t ucOriginalNotifyState;
5529
5530 configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
5531 configASSERT( xTaskToNotify );
5532 pxTCB = xTaskToNotify;
5533
5534 taskENTER_CRITICAL();
5535 {
5536 if( pulPreviousNotificationValue != NULL )
5537 {
5538 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue[ uxIndexToNotify ];
5539 }
5540
5541 ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
5542
5543 pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
5544
5545 switch( eAction )
5546 {
5547 case eSetBits:
5548 pxTCB->ulNotifiedValue[ uxIndexToNotify ] |= ulValue;
5549 break;
5550
5551 case eIncrement:
5552 ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
5553 break;
5554
5555 case eSetValueWithOverwrite:
5556 pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
5557 break;
5558
5559 case eSetValueWithoutOverwrite:
5560
5561 if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
5562 {
5563 pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
5564 }
5565 else
5566 {
5567 /* The value could not be written to the task. */
5568 xReturn = pdFAIL;
5569 }
5570
5571 break;
5572
5573 case eNoAction:
5574
5575 /* The task is being notified without its notify value being
5576 * updated. */
5577 break;
5578
5579 default:
5580
5581 /* Should not get here if all enums are handled.
5582 * Artificially force an assert by testing a value the
5583 * compiler can't assume is const. */
5584 configASSERT( pxTCB->ulNotifiedValue[ uxIndexToNotify ] == ~0UL );
5585
5586 break;
5587 }
5588
5589 traceTASK_NOTIFY( uxIndexToNotify );
5590
5591 /* If the task is in the blocked state specifically to wait for a
5592 * notification then unblock it now. */
5593 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
5594 {
5595 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
5596 prvAddTaskToReadyList( pxTCB );
5597
5598 /* The task should not have been on an event list. */
5599 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
5600
5601 #if ( configUSE_TICKLESS_IDLE != 0 )
5602 {
5603 /* If a task is blocked waiting for a notification then
5604 * xNextTaskUnblockTime might be set to the blocked task's time
5605 * out time. If the task is unblocked for a reason other than
5606 * a timeout xNextTaskUnblockTime is normally left unchanged,
5607 * because it will automatically get reset to a new value when
5608 * the tick count equals xNextTaskUnblockTime. However if
5609 * tickless idling is used it might be more important to enter
5610 * sleep mode at the earliest possible time - so reset
5611 * xNextTaskUnblockTime here to ensure it is updated at the
5612 * earliest possible time. */
5613 prvResetNextTaskUnblockTime();
5614 }
5615 #endif
5616
5617 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
5618 {
5619 /* The notified task has a priority above the currently
5620 * executing task so a yield is required. */
5621 portYIELD_WITHIN_API();
5622 }
5623 else if ( pxTCB->xCoreID != xPortGetCoreID() )
5624 {
5625 taskYIELD_OTHER_CORE(pxTCB->xCoreID, pxTCB->uxPriority);
5626 }
5627 else
5628 {
5629 mtCOVERAGE_TEST_MARKER();
5630 }
5631 }
5632 else
5633 {
5634 mtCOVERAGE_TEST_MARKER();
5635 }
5636 }
5637 taskEXIT_CRITICAL();
5638
5639 return xReturn;
5640 }
5641
5642 #endif /* configUSE_TASK_NOTIFICATIONS */
5643 /*-----------------------------------------------------------*/
5644
5645 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
5646
xTaskGenericNotifyFromISR(TaskHandle_t xTaskToNotify,UBaseType_t uxIndexToNotify,uint32_t ulValue,eNotifyAction eAction,uint32_t * pulPreviousNotificationValue,BaseType_t * pxHigherPriorityTaskWoken)5647 BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
5648 UBaseType_t uxIndexToNotify,
5649 uint32_t ulValue,
5650 eNotifyAction eAction,
5651 uint32_t * pulPreviousNotificationValue,
5652 BaseType_t * pxHigherPriorityTaskWoken )
5653 {
5654 TCB_t * pxTCB;
5655 uint8_t ucOriginalNotifyState;
5656 BaseType_t xReturn = pdPASS;
5657
5658 configASSERT( xTaskToNotify );
5659 configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
5660
5661 /* RTOS ports that support interrupt nesting have the concept of a
5662 * maximum system call (or maximum API call) interrupt priority.
5663 * Interrupts that are above the maximum system call priority are keep
5664 * permanently enabled, even when the RTOS kernel is in a critical section,
5665 * but cannot make any calls to FreeRTOS API functions. If configASSERT()
5666 * is defined in FreeRTOSConfig.h then
5667 * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
5668 * failure if a FreeRTOS API function is called from an interrupt that has
5669 * been assigned a priority above the configured maximum system call
5670 * priority. Only FreeRTOS functions that end in FromISR can be called
5671 * from interrupts that have been assigned a priority at or (logically)
5672 * below the maximum system call interrupt priority. FreeRTOS maintains a
5673 * separate interrupt safe API to ensure interrupt entry is as fast and as
5674 * simple as possible. More information (albeit Cortex-M specific) is
5675 * provided on the following link:
5676 * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
5677 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
5678
5679 pxTCB = xTaskToNotify;
5680
5681 taskENTER_CRITICAL_ISR();
5682 {
5683 if( pulPreviousNotificationValue != NULL )
5684 {
5685 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue[ uxIndexToNotify ];
5686 }
5687
5688 ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
5689 pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
5690
5691 switch( eAction )
5692 {
5693 case eSetBits:
5694 pxTCB->ulNotifiedValue[ uxIndexToNotify ] |= ulValue;
5695 break;
5696
5697 case eIncrement:
5698 ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
5699 break;
5700
5701 case eSetValueWithOverwrite:
5702 pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
5703 break;
5704
5705 case eSetValueWithoutOverwrite:
5706
5707 if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
5708 {
5709 pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
5710 }
5711 else
5712 {
5713 /* The value could not be written to the task. */
5714 xReturn = pdFAIL;
5715 }
5716
5717 break;
5718
5719 case eNoAction:
5720
5721 /* The task is being notified without its notify value being
5722 * updated. */
5723 break;
5724
5725 default:
5726
5727 /* Should not get here if all enums are handled.
5728 * Artificially force an assert by testing a value the
5729 * compiler can't assume is const. */
5730 configASSERT( pxTCB->ulNotifiedValue[ uxIndexToNotify ] == ~0UL );
5731 break;
5732 }
5733
5734 traceTASK_NOTIFY_FROM_ISR( uxIndexToNotify );
5735
5736 /* If the task is in the blocked state specifically to wait for a
5737 * notification then unblock it now. */
5738 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
5739 {
5740 /* The task should not have been on an event list. */
5741 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
5742
5743 if( uxSchedulerSuspended[xPortGetCoreID()] == ( UBaseType_t ) pdFALSE )
5744 {
5745 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
5746 prvAddTaskToReadyList( pxTCB );
5747 }
5748 else
5749 {
5750 /* The delayed and ready lists cannot be accessed, so hold
5751 * this task pending until the scheduler is resumed. */
5752 vListInsertEnd( &( xPendingReadyList[xPortGetCoreID()] ), &( pxTCB->xEventListItem ) );
5753 }
5754
5755 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
5756 {
5757 /* The notified task has a priority above the currently
5758 * executing task so a yield is required. */
5759 if( pxHigherPriorityTaskWoken != NULL )
5760 {
5761 *pxHigherPriorityTaskWoken = pdTRUE;
5762 }
5763 }
5764 else if ( pxTCB->xCoreID != xPortGetCoreID() )
5765 {
5766 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
5767 }
5768 else
5769 {
5770 mtCOVERAGE_TEST_MARKER();
5771 }
5772 }
5773 }
5774 taskEXIT_CRITICAL_ISR();
5775
5776 return xReturn;
5777 }
5778
5779 #endif /* configUSE_TASK_NOTIFICATIONS */
5780 /*-----------------------------------------------------------*/
5781
5782 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
5783
vTaskGenericNotifyGiveFromISR(TaskHandle_t xTaskToNotify,UBaseType_t uxIndexToNotify,BaseType_t * pxHigherPriorityTaskWoken)5784 void vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify,
5785 UBaseType_t uxIndexToNotify,
5786 BaseType_t * pxHigherPriorityTaskWoken )
5787 {
5788 TCB_t * pxTCB;
5789 uint8_t ucOriginalNotifyState;
5790
5791
5792 configASSERT( xTaskToNotify );
5793 configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
5794
5795 /* RTOS ports that support interrupt nesting have the concept of a
5796 * maximum system call (or maximum API call) interrupt priority.
5797 * Interrupts that are above the maximum system call priority are keep
5798 * permanently enabled, even when the RTOS kernel is in a critical section,
5799 * but cannot make any calls to FreeRTOS API functions. If configASSERT()
5800 * is defined in FreeRTOSConfig.h then
5801 * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
5802 * failure if a FreeRTOS API function is called from an interrupt that has
5803 * been assigned a priority above the configured maximum system call
5804 * priority. Only FreeRTOS functions that end in FromISR can be called
5805 * from interrupts that have been assigned a priority at or (logically)
5806 * below the maximum system call interrupt priority. FreeRTOS maintains a
5807 * separate interrupt safe API to ensure interrupt entry is as fast and as
5808 * simple as possible. More information (albeit Cortex-M specific) is
5809 * provided on the following link:
5810 * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
5811 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
5812
5813 pxTCB = xTaskToNotify;
5814
5815 taskENTER_CRITICAL_ISR();
5816 {
5817 ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
5818 pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
5819
5820 /* 'Giving' is equivalent to incrementing a count in a counting
5821 * semaphore. */
5822 ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
5823
5824 traceTASK_NOTIFY_GIVE_FROM_ISR( uxIndexToNotify );
5825
5826 /* If the task is in the blocked state specifically to wait for a
5827 * notification then unblock it now. */
5828 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
5829 {
5830 /* The task should not have been on an event list. */
5831 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
5832
5833 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
5834 {
5835 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
5836 prvAddTaskToReadyList( pxTCB );
5837 }
5838 else
5839 {
5840 /* The delayed and ready lists cannot be accessed, so hold
5841 * this task pending until the scheduler is resumed. */
5842 vListInsertEnd( &( xPendingReadyList[xPortGetCoreID()] ), &( pxTCB->xEventListItem ) );
5843 }
5844
5845 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
5846 {
5847 /* The notified task has a priority above the currently
5848 * executing task so a yield is required. */
5849 if( pxHigherPriorityTaskWoken != NULL )
5850 {
5851 *pxHigherPriorityTaskWoken = pdTRUE;
5852 }
5853 }
5854 else if ( pxTCB->xCoreID != xPortGetCoreID() )
5855 {
5856 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
5857 }
5858 else
5859 {
5860 mtCOVERAGE_TEST_MARKER();
5861 }
5862 }
5863 }
5864 taskEXIT_CRITICAL_ISR();
5865 }
5866
5867 #endif /* configUSE_TASK_NOTIFICATIONS */
5868 /*-----------------------------------------------------------*/
5869
5870 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
5871
xTaskGenericNotifyStateClear(TaskHandle_t xTask,UBaseType_t uxIndexToClear)5872 BaseType_t xTaskGenericNotifyStateClear( TaskHandle_t xTask,
5873 UBaseType_t uxIndexToClear )
5874 {
5875 TCB_t * pxTCB;
5876 BaseType_t xReturn;
5877
5878 configASSERT( uxIndexToClear < configTASK_NOTIFICATION_ARRAY_ENTRIES );
5879
5880 /* If null is passed in here then it is the calling task that is having
5881 * its notification state cleared. */
5882 pxTCB = prvGetTCBFromHandle( xTask );
5883
5884 taskENTER_CRITICAL();
5885 {
5886 if( pxTCB->ucNotifyState[ uxIndexToClear ] == taskNOTIFICATION_RECEIVED )
5887 {
5888 pxTCB->ucNotifyState[ uxIndexToClear ] = taskNOT_WAITING_NOTIFICATION;
5889 xReturn = pdPASS;
5890 }
5891 else
5892 {
5893 xReturn = pdFAIL;
5894 }
5895 }
5896 taskEXIT_CRITICAL();
5897
5898 return xReturn;
5899 }
5900
5901 #endif /* configUSE_TASK_NOTIFICATIONS */
5902 /*-----------------------------------------------------------*/
5903
5904 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
5905
ulTaskGenericNotifyValueClear(TaskHandle_t xTask,UBaseType_t uxIndexToClear,uint32_t ulBitsToClear)5906 uint32_t ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
5907 UBaseType_t uxIndexToClear,
5908 uint32_t ulBitsToClear )
5909 {
5910 TCB_t * pxTCB;
5911 uint32_t ulReturn;
5912
5913 /* If null is passed in here then it is the calling task that is having
5914 * its notification state cleared. */
5915 pxTCB = prvGetTCBFromHandle( xTask );
5916
5917 taskENTER_CRITICAL();
5918 {
5919 /* Return the notification as it was before the bits were cleared,
5920 * then clear the bit mask. */
5921 ulReturn = pxTCB->ulNotifiedValue[ uxIndexToClear ];
5922 pxTCB->ulNotifiedValue[ uxIndexToClear ] &= ~ulBitsToClear;
5923 }
5924 taskEXIT_CRITICAL();
5925
5926 return ulReturn;
5927 }
5928
5929 #endif /* configUSE_TASK_NOTIFICATIONS */
5930 /*-----------------------------------------------------------*/
5931
5932 #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
5933
ulTaskGetIdleRunTimeCounter(void)5934 uint32_t ulTaskGetIdleRunTimeCounter( void )
5935 {
5936 taskENTER_CRITICAL();
5937 tskTCB *pxTCB = (tskTCB *)xIdleTaskHandle[xPortGetCoreID()];
5938 taskEXIT_CRITICAL();
5939
5940 return pxTCB->ulRunTimeCounter;
5941 }
5942
5943 #endif
5944 /*-----------------------------------------------------------*/
5945
prvAddCurrentTaskToDelayedList(const portBASE_TYPE xCoreID,const TickType_t xTicksToWait)5946 static void prvAddCurrentTaskToDelayedList( const portBASE_TYPE xCoreID, const TickType_t xTicksToWait )
5947 {
5948 TickType_t xTimeToWake;
5949 const TickType_t xConstTickCount = xTickCount;
5950
5951 if (configNUM_CORES > 1 && listIS_CONTAINED_WITHIN(&xTasksWaitingTermination, &( pxCurrentTCB[xCoreID]->xStateListItem))) {
5952 /* vTaskDelete() has been called to delete this task. This would have happened from the other core while this task was spinning on xTaskQueueMutex,
5953 so don't move the running task to the delayed list - as soon as this core re-enables interrupts this task will
5954 be suspended permanently */
5955 return;
5956 }
5957
5958 #if ( INCLUDE_xTaskAbortDelay == 1 )
5959 {
5960 /* About to enter a delayed list, so ensure the ucDelayAborted flag is
5961 * reset to pdFALSE so it can be detected as having been set to pdTRUE
5962 * when the task leaves the Blocked state. */
5963 pxCurrentTCB[xCoreID]->ucDelayAborted = pdFALSE;
5964 }
5965 #endif
5966
5967 /* Remove the task from the ready list before adding it to the blocked list
5968 * as the same list item is used for both lists. */
5969 if( uxListRemove( &( pxCurrentTCB[xCoreID]->xStateListItem ) ) == ( UBaseType_t ) 0 )
5970 {
5971 /* The current task must be in a ready list, so there is no need to
5972 * check, and the port reset macro can be called directly. */
5973 portRESET_READY_PRIORITY( pxCurrentTCB[xCoreID]->uxPriority, uxTopReadyPriority ); /*lint !e931 pxCurrentTCB[xPortGetCoreID()] cannot change as it is the calling task. pxCurrentTCB->uxPriority and uxTopReadyPriority cannot change as called with scheduler suspended or in a critical section. */
5974 }
5975 else
5976 {
5977 mtCOVERAGE_TEST_MARKER();
5978 }
5979
5980 #if ( INCLUDE_vTaskSuspend == 1 )
5981 {
5982 if( ( xTicksToWait == portMAX_DELAY ) )
5983 {
5984 /* Add the task to the suspended task list instead of a delayed task
5985 * list to ensure it is not woken by a timing event. It will block
5986 * indefinitely. */
5987 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB[xCoreID]->xStateListItem ) );
5988 }
5989 else
5990 {
5991 /* Calculate the time at which the task should be woken if the event
5992 * does not occur. This may overflow but this doesn't matter, the
5993 * kernel will manage it correctly. */
5994 xTimeToWake = xConstTickCount + xTicksToWait;
5995
5996 /* The list item will be inserted in wake time order. */
5997 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[xCoreID]->xStateListItem ), xTimeToWake );
5998
5999 if( xTimeToWake < xConstTickCount )
6000 {
6001 /* Wake time has overflowed. Place this item in the overflow
6002 * list. */
6003 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB[xCoreID]->xStateListItem ) );
6004 }
6005 else
6006 {
6007 /* The wake time has not overflowed, so the current block list
6008 * is used. */
6009 vListInsert( pxDelayedTaskList, &( pxCurrentTCB[xCoreID]->xStateListItem ) );
6010
6011 /* If the task entering the blocked state was placed at the
6012 * head of the list of blocked tasks then xNextTaskUnblockTime
6013 * needs to be updated too. */
6014 if( xTimeToWake < xNextTaskUnblockTime )
6015 {
6016 xNextTaskUnblockTime = xTimeToWake;
6017 }
6018 else
6019 {
6020 mtCOVERAGE_TEST_MARKER();
6021 }
6022 }
6023 }
6024 }
6025 #else /* INCLUDE_vTaskSuspend */
6026 {
6027 /* Calculate the time at which the task should be woken if the event
6028 * does not occur. This may overflow but this doesn't matter, the kernel
6029 * will manage it correctly. */
6030 xTimeToWake = xConstTickCount + xTicksToWait;
6031
6032 /* The list item will be inserted in wake time order. */
6033 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[xCoreID]->xStateListItem ), xTimeToWake );
6034
6035 if( xTimeToWake < xConstTickCount )
6036 {
6037 /* Wake time has overflowed. Place this item in the overflow list. */
6038 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB[xCoreID]->xStateListItem ) );
6039 }
6040 else
6041 {
6042 /* The wake time has not overflowed, so the current block list is used. */
6043 vListInsert( pxDelayedTaskList, &( pxCurrentTCB[xCoreID]->xStateListItem ) );
6044
6045 /* If the task entering the blocked state was placed at the head of the
6046 * list of blocked tasks then xNextTaskUnblockTime needs to be updated
6047 * too. */
6048 if( xTimeToWake < xNextTaskUnblockTime )
6049 {
6050 xNextTaskUnblockTime = xTimeToWake;
6051 }
6052 else
6053 {
6054 mtCOVERAGE_TEST_MARKER();
6055 }
6056 }
6057 }
6058 #endif /* INCLUDE_vTaskSuspend */
6059 }
6060
6061 /* Code below here allows additional code to be inserted into this source file,
6062 * especially where access to file scope functions and data is needed (for example
6063 * when performing module tests). */
6064
6065 #ifdef FREERTOS_MODULE_TEST
6066 #include "tasks_test_access_functions.h"
6067 #endif
6068
6069
6070 #if ( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 )
6071
6072 #include "freertos_tasks_c_additions.h"
6073
6074 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
freertos_tasks_c_additions_init(void)6075 static void freertos_tasks_c_additions_init( void )
6076 {
6077 FREERTOS_TASKS_C_ADDITIONS_INIT();
6078 }
6079 #endif
6080
6081 #endif /* if ( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 ) */
6082
6083 /* If timers.c is not referenced anywhere, don't create the timer task to save RAM */
xTimerCreateTimerTask(void)6084 BaseType_t __attribute__((weak)) xTimerCreateTimerTask( void )
6085 {
6086 return pdPASS;
6087 }
6088