1 /*
2 * FreeRTOS Kernel V10.2.1
3 * Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a copy of
6 * this software and associated documentation files (the "Software"), to deal in
7 * the Software without restriction, including without limitation the rights to
8 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9 * the Software, and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in all
13 * copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
18 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * http://www.FreeRTOS.org
23 * http://aws.amazon.com/freertos
24 *
25 * 1 tab == 4 spaces!
26 */
27
28 /* Standard includes. */
29 #include <stdlib.h>
30 #include <string.h>
31
32 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
33 all the API functions to use the MPU wrappers. That should only be done when
34 task.h is included from an application file. */
35 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
36
37
38 /* FreeRTOS includes. */
39 #include "FreeRTOS.h"
40 #include "task.h"
41 #include "timers.h"
42 #include "stack_macros.h"
43
44 /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
45 because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
46 for the header files above, but not in this file, in order to generate the
47 correct privileged Vs unprivileged linkage and placement. */
48 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
49
50 /* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting
51 functions but without including stdio.h here. */
52 #if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 )
53 /* At the bottom of this file are two optional functions that can be used
54 to generate human readable text from the raw data generated by the
55 uxTaskGetSystemState() function. Note the formatting functions are provided
56 for convenience only, and are NOT considered part of the kernel. */
57 #include <stdio.h>
58 #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
59
60 #if( configUSE_PREEMPTION == 0 )
61 /* If the cooperative scheduler is being used then a yield should not be
62 performed just because a higher priority task has been woken. */
63 #define taskYIELD_IF_USING_PREEMPTION()
64 #else
65 #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
66 #endif
67
68 /* Values that can be assigned to the ucNotifyState member of the TCB. */
69 #define taskNOT_WAITING_NOTIFICATION ( ( uint8_t ) 0 )
70 #define taskWAITING_NOTIFICATION ( ( uint8_t ) 1 )
71 #define taskNOTIFICATION_RECEIVED ( ( uint8_t ) 2 )
72
73 /*
74 * The value used to fill the stack of a task when the task is created. This
75 * is used purely for checking the high water mark for tasks.
76 */
77 #define tskSTACK_FILL_BYTE ( 0xa5U )
78
79 /* Bits used to recored how a task's stack and TCB were allocated. */
80 #define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 )
81 #define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 )
82 #define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 )
83
84 /* If any of the following are set then task stacks are filled with a known
85 value so the high water mark can be determined. If none of the following are
86 set then don't fill the stack so there is no unnecessary dependency on memset. */
87 #if( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
88 #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 1
89 #else
90 #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 0
91 #endif
92
93 /*
94 * Macros used by vListTask to indicate which state a task is in.
95 */
96 #define tskRUNNING_CHAR ( 'X' )
97 #define tskBLOCKED_CHAR ( 'B' )
98 #define tskREADY_CHAR ( 'R' )
99 #define tskDELETED_CHAR ( 'D' )
100 #define tskSUSPENDED_CHAR ( 'S' )
101
102 /*
103 * Some kernel aware debuggers require the data the debugger needs access to be
104 * global, rather than file scope.
105 */
106 #ifdef portREMOVE_STATIC_QUALIFIER
107 #define static
108 #endif
109
110 /* The name allocated to the Idle task. This can be overridden by defining
111 configIDLE_TASK_NAME in FreeRTOSConfig.h. */
112 #ifndef configIDLE_TASK_NAME
113 #define configIDLE_TASK_NAME "IDLE"
114 #endif
115
116 #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
117
118 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is
119 performed in a generic way that is not optimised to any particular
120 microcontroller architecture. */
121
122 /* uxTopReadyPriority holds the priority of the highest priority ready
123 state task. */
124 #define taskRECORD_READY_PRIORITY( uxPriority ) \
125 { \
126 if( ( uxPriority ) > uxTopReadyPriority ) \
127 { \
128 uxTopReadyPriority = ( uxPriority ); \
129 } \
130 } /* taskRECORD_READY_PRIORITY */
131
132 /*-----------------------------------------------------------*/
133
134 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
135 { \
136 UBaseType_t uxTopPriority = uxTopReadyPriority; \
137 \
138 /* Find the highest priority queue that contains ready tasks. */ \
139 while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \
140 { \
141 configASSERT( uxTopPriority ); \
142 --uxTopPriority; \
143 } \
144 \
145 /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \
146 the same priority get an equal share of the processor time. */ \
147 listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB[xPortGetCoreID()], &( pxReadyTasksLists[ uxTopPriority ] ) ); \
148 uxTopReadyPriority = uxTopPriority; \
149 } /* taskSELECT_HIGHEST_PRIORITY_TASK */
150
151 /*-----------------------------------------------------------*/
152
153 /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as
154 they are only required when a port optimised method of task selection is
155 being used. */
156 #define taskRESET_READY_PRIORITY( uxPriority )
157 #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority )
158
159 #else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
160
161 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is
162 performed in a way that is tailored to the particular microcontroller
163 architecture being used. */
164
165 /* A port optimised version is provided. Call the port defined macros. */
166 #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( uxPriority, uxTopReadyPriority )
167
168 /*-----------------------------------------------------------*/
169
170 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
171 { \
172 UBaseType_t uxTopPriority; \
173 \
174 /* Find the highest priority list that contains ready tasks. */ \
175 portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \
176 configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \
177 listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB[xPortGetCoreID()], &( pxReadyTasksLists[ uxTopPriority ] ) ); \
178 } /* taskSELECT_HIGHEST_PRIORITY_TASK() */
179
180 /*-----------------------------------------------------------*/
181
182 /* A port optimised version is provided, call it only if the TCB being reset
183 is being referenced from a ready list. If it is referenced from a delayed
184 or suspended list then it won't be in a ready list. */
185 #define taskRESET_READY_PRIORITY( uxPriority ) \
186 { \
187 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \
188 { \
189 portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \
190 } \
191 }
192
193 #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
194
195 /*-----------------------------------------------------------*/
196
197 /* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick
198 count overflows. */
199 #define taskSWITCH_DELAYED_LISTS() \
200 { \
201 List_t *pxTemp; \
202 \
203 /* The delayed tasks list should be empty when the lists are switched. */ \
204 configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) ); \
205 \
206 pxTemp = pxDelayedTaskList; \
207 pxDelayedTaskList = pxOverflowDelayedTaskList; \
208 pxOverflowDelayedTaskList = pxTemp; \
209 xNumOfOverflows++; \
210 prvResetNextTaskUnblockTime(); \
211 }
212
213 /*-----------------------------------------------------------*/
214
215 /*
216 * Place the task represented by pxTCB into the appropriate ready list for
217 * the task. It is inserted at the end of the list.
218 */
219 #define prvAddTaskToReadyList( pxTCB ) \
220 traceMOVED_TASK_TO_READY_STATE( pxTCB ); \
221 taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
222 vListInsertEnd( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \
223 tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB )
224 /*-----------------------------------------------------------*/
225
226 #define tskCAN_RUN_HERE( cpuid ) ( cpuid==xPortGetCoreID() || cpuid==tskNO_AFFINITY )
227
228 /*
229 * Several functions take an TaskHandle_t parameter that can optionally be NULL,
230 * where NULL is used to indicate that the handle of the currently executing
231 * task should be used in place of the parameter. This macro simply checks to
232 * see if the parameter is NULL and returns a pointer to the appropriate TCB.
233 */
234 #if portNUM_PROCESSORS > 1
235 /* In SMP, we need to disable interrupts if getting the current task handle outside a critical section. Calling xTaskGetCurrentTaskHandle() ensures this. */
236 #define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? xTaskGetCurrentTaskHandle() : ( (TaskHandle_t)pxHandle ) )
237 #else
238 #define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? (TaskHandle_t) pxCurrentTCB[0] : ( (TaskHandle_t)pxHandle ) )
239 #endif
240
241 /* The item value of the event list item is normally used to hold the priority
242 of the task to which it belongs (coded to allow it to be held in reverse
243 priority order). However, it is occasionally borrowed for other purposes. It
244 is important its value is not updated due to a task priority change while it is
245 being used for another purpose. The following bit definition is used to inform
246 the scheduler that the value should not be changed - in which case it is the
247 responsibility of whichever module is using the value to ensure it gets set back
248 to its original value when it is released. */
249 #if( configUSE_16_BIT_TICKS == 1 )
250 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U
251 #else
252 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL
253 #endif
254
255 /*
256 * Task control block. A task control block (TCB) is allocated for each task,
257 * and stores task state information, including a pointer to the task's context
258 * (the task's run time environment, including register values)
259 */
260 typedef struct tskTaskControlBlock /* The old naming convention is used to prevent breaking kernel aware debuggers. */
261 {
262 volatile StackType_t *pxTopOfStack; /*< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */
263
264 #if ( portUSING_MPU_WRAPPERS == 1 )
265 xMPU_SETTINGS xMPUSettings; /*< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
266 #endif
267
268 ListItem_t xStateListItem; /*< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */
269 ListItem_t xEventListItem; /*< Used to reference a task from an event list. */
270 UBaseType_t uxPriority; /*< The priority of the task. 0 is the lowest priority. */
271 StackType_t *pxStack; /*< Points to the start of the stack. */
272 char pcTaskName[ configMAX_TASK_NAME_LEN ];/*< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
273 BaseType_t xCoreID; /*< Core this task is pinned to */
274
275 #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
276 StackType_t *pxEndOfStack; /*< Points to the highest valid address for the stack. */
277 #endif
278
279 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
280 UBaseType_t uxCriticalNesting; /*< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
281 #endif
282
283 #if ( configUSE_TRACE_FACILITY == 1 )
284 UBaseType_t uxTCBNumber; /*< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */
285 UBaseType_t uxTaskNumber; /*< Stores a number specifically for use by third party trace code. */
286 #endif
287
288 #if ( configUSE_MUTEXES == 1 )
289 UBaseType_t uxBasePriority; /*< The priority last assigned to the task - used by the priority inheritance mechanism. */
290 UBaseType_t uxMutexesHeld;
291 #endif
292
293 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
294 TaskHookFunction_t pxTaskTag;
295 #endif
296
297 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 )
298 void *pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
299 #if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
300 TlsDeleteCallbackFunction_t pvThreadLocalStoragePointersDelCallback[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
301 #endif
302 #endif
303
304 #if( configGENERATE_RUN_TIME_STATS == 1 )
305 uint32_t ulRunTimeCounter; /*< Stores the amount of time the task has spent in the Running state. */
306 #endif
307
308 #if ( configUSE_NEWLIB_REENTRANT == 1 )
309 /* Allocate a Newlib reent structure that is specific to this task.
310 Note Newlib support has been included by popular demand, but is not
311 used by the FreeRTOS maintainers themselves. FreeRTOS is not
312 responsible for resulting newlib operation. User must be familiar with
313 newlib and must provide system-wide implementations of the necessary
314 stubs. Be warned that (at the time of writing) the current newlib design
315 implements a system-wide malloc() that must be provided with locks. */
316 struct _reent xNewLib_reent;
317 #endif
318
319 #if( configUSE_TASK_NOTIFICATIONS == 1 )
320 volatile uint32_t ulNotifiedValue;
321 volatile uint8_t ucNotifyState;
322 #endif
323
324 /* See the comments in FreeRTOS.h with the definition of
325 tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */
326 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
327 uint8_t ucStaticallyAllocated; /*< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */
328 #endif
329
330 #if( INCLUDE_xTaskAbortDelay == 1 )
331 uint8_t ucDelayAborted;
332 #endif
333
334 #if( configUSE_POSIX_ERRNO == 1 )
335 int iTaskErrno;
336 #endif
337
338 } tskTCB;
339
340 /* The old tskTCB name is maintained above then typedefed to the new TCB_t name
341 below to enable the use of older kernel aware debuggers. */
342 typedef tskTCB TCB_t;
343
344 /*lint -save -e956 A manual analysis and inspection has been used to determine
345 which static variables must be declared volatile. */
346 PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB[portNUM_PROCESSORS] = {NULL};
347
348 /* Lists for ready and blocked tasks. --------------------
349 xDelayedTaskList1 and xDelayedTaskList2 could be move to function scople but
350 doing so breaks some kernel aware debuggers and debuggers that rely on removing
351 the static qualifier. */
352 PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ];/*< Prioritised ready tasks. */
353 PRIVILEGED_DATA static List_t xDelayedTaskList1; /*< Delayed tasks. */
354 PRIVILEGED_DATA static List_t xDelayedTaskList2; /*< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
355 PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /*< Points to the delayed task list currently being used. */
356 PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /*< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
357 PRIVILEGED_DATA static List_t xPendingReadyList[ portNUM_PROCESSORS ]; /*< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
358 PRIVILEGED_DATA static portMUX_TYPE xTaskQueueMutex = portMUX_INITIALIZER_UNLOCKED;
359
360 #if( INCLUDE_vTaskDelete == 1 )
361
362 PRIVILEGED_DATA static List_t xTasksWaitingTermination; /*< Tasks that have been deleted - but their memory not yet freed. */
363 PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U;
364
365 #endif
366
367 #if ( INCLUDE_vTaskSuspend == 1 )
368
369 PRIVILEGED_DATA static List_t xSuspendedTaskList; /*< Tasks that are currently suspended. */
370
371 #endif
372
373 /* Global POSIX errno. Its value is changed upon context switching to match
374 the errno of the currently running task. */
375 #if ( configUSE_POSIX_ERRNO == 1 )
376 int FreeRTOS_errno = 0;
377 #endif
378
379 /* Other file private variables. --------------------------------*/
380 PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U;
381 PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
382 PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY;
383 PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;
384 PRIVILEGED_DATA static volatile TickType_t xPendedTicks = ( TickType_t ) 0U;
385 PRIVILEGED_DATA static volatile BaseType_t xYieldPending[portNUM_PROCESSORS] = {pdFALSE};
386 PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;
387 PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
388 PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */
389 PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle[portNUM_PROCESSORS] = {NULL}; /*< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */
390
391 /* Context switches are held pending while the scheduler is suspended. Also,
392 interrupts must not manipulate the xStateListItem of a TCB, or any of the
393 lists the xStateListItem can be referenced from, if the scheduler is suspended.
394 If an interrupt needs to unblock a task while the scheduler is suspended then it
395 moves the task's event list item into the xPendingReadyList, ready for the
396 kernel to move the task from the pending ready list into the real ready list
397 when the scheduler is unsuspended. The pending ready list itself can only be
398 accessed from a critical section. */
399 PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended[portNUM_PROCESSORS] = {( UBaseType_t ) pdFALSE};
400
401 #if ( configGENERATE_RUN_TIME_STATS == 1 )
402
403 /* Do not move these variables to function scope as doing so prevents the
404 code working with debuggers that need to remove the static qualifier. */
405 PRIVILEGED_DATA static uint32_t ulTaskSwitchedInTime[portNUM_PROCESSORS] = {0U}; /*< Holds the value of a timer/counter the last time a task was switched in. */
406 PRIVILEGED_DATA static uint32_t ulTotalRunTime = 0UL; /*< Holds the total amount of execution time as defined by the run time counter clock. */
407
408 #endif
409
410 // per-CPU flags indicating that we are doing context switch, it is used by apptrace and sysview modules
411 // in order to avoid calls of vPortYield from traceTASK_SWITCHED_IN/OUT when waiting
412 // for locks to be free or for host to read full trace buffer
413 PRIVILEGED_DATA static volatile BaseType_t xSwitchingContext[ portNUM_PROCESSORS ] = { pdFALSE };
414
415 /*lint -restore */
416
417 /*-----------------------------------------------------------*/
418
419 /* Callback function prototypes. --------------------------*/
420 #if( configCHECK_FOR_STACK_OVERFLOW > 0 )
421
422 extern void vApplicationStackOverflowHook( TaskHandle_t xTask, char *pcTaskName );
423
424 #endif
425
426 #if( configUSE_TICK_HOOK > 0 )
427
428 extern void vApplicationTickHook( void ); /*lint !e526 Symbol not defined as it is an application callback. */
429
430 #endif
431
432 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
433
434 extern void vApplicationGetIdleTaskMemory( StaticTask_t **ppxIdleTaskTCBBuffer, StackType_t **ppxIdleTaskStackBuffer, uint32_t *pulIdleTaskStackSize ); /*lint !e526 Symbol not defined as it is an application callback. */
435
436 #endif
437
438 /* File private functions. --------------------------------*/
439
440 /**
441 * Utility task that simply returns pdTRUE if the task referenced by xTask is
442 * currently in the Suspended state, or pdFALSE if the task referenced by xTask
443 * is in any other state.
444 */
445 #if ( INCLUDE_vTaskSuspend == 1 )
446
447 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
448
449 #endif /* INCLUDE_vTaskSuspend */
450
451 /*
452 * Utility to ready all the lists used by the scheduler. This is called
453 * automatically upon the creation of the first task.
454 */
455 static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION;
456
457 /*
458 * The idle task, which as all tasks is implemented as a never ending loop.
459 * The idle task is automatically created and added to the ready lists upon
460 * creation of the first user task.
461 *
462 * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific
463 * language extensions. The equivalent prototype for this function is:
464 *
465 * void prvIdleTask( void *pvParameters );
466 *
467 */
468 static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters );
469
470 /*
471 * Utility to free all memory allocated by the scheduler to hold a TCB,
472 * including the stack pointed to by the TCB.
473 *
474 * This does not free memory allocated by the task itself (i.e. memory
475 * allocated by calls to pvPortMalloc from within the tasks application code).
476 */
477 #if ( INCLUDE_vTaskDelete == 1 )
478
479 static void prvDeleteTCB( TCB_t *pxTCB ) PRIVILEGED_FUNCTION;
480
481 #endif
482
483 /* Function to call the Thread Local Storage Pointer Deletion Callbacks. Will be
484 * called during task deletion before prvDeleteTCB is called.
485 */
486 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
487 static void prvDeleteTLS( TCB_t *pxTCB );
488 #endif
489
490 /*
491 * Used only by the idle task. This checks to see if anything has been placed
492 * in the list of tasks waiting to be deleted. If so the task is cleaned up
493 * and its TCB deleted.
494 */
495 static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION;
496
497 /*
498 * The currently executing task is entering the Blocked state. Add the task to
499 * either the current or the overflow delayed task list.
500 */
501 static void prvAddCurrentTaskToDelayedList( const portBASE_TYPE xCoreID, const TickType_t xTicksToWait ) PRIVILEGED_FUNCTION;
502
503 /*
504 * Fills an TaskStatus_t structure with information on each task that is
505 * referenced from the pxList list (which may be a ready list, a delayed list,
506 * a suspended list, etc.).
507 *
508 * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM
509 * NORMAL APPLICATION CODE.
510 */
511 #if ( configUSE_TRACE_FACILITY == 1 )
512
513 static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState ) PRIVILEGED_FUNCTION;
514
515 #endif
516
517 /*
518 * Searches pxList for a task with name pcNameToQuery - returning a handle to
519 * the task if it is found, or NULL if the task is not found.
520 */
521 #if ( INCLUDE_xTaskGetHandle == 1 )
522
523 static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] ) PRIVILEGED_FUNCTION;
524
525 #endif
526
527 /*
528 * When a task is created, the stack of the task is filled with a known value.
529 * This function determines the 'high water mark' of the task stack by
530 * determining how much of the stack remains at the original preset value.
531 */
532 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
533
534 static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION;
535
536 #endif
537
538 /*
539 * Return the amount of time, in ticks, that will pass before the kernel will
540 * next move a task from the Blocked state to the Running state.
541 *
542 * This conditional compilation should use inequality to 0, not equality to 1.
543 * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user
544 * defined low power mode implementations require configUSE_TICKLESS_IDLE to be
545 * set to a value other than 1.
546 */
547 #if ( configUSE_TICKLESS_IDLE != 0 )
548
549 static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION;
550
551 #endif
552
553 /*
554 * Set xNextTaskUnblockTime to the time at which the next Blocked state task
555 * will exit the Blocked state.
556 */
557 static void prvResetNextTaskUnblockTime( void );
558
559 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
560
561 /*
562 * Helper function used to pad task names with spaces when printing out
563 * human readable tables of task information.
564 */
565 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName ) PRIVILEGED_FUNCTION;
566
567 #endif
568
569 /*
570 * Called after a Task_t structure has been allocated either statically or
571 * dynamically to fill in the structure's members.
572 */
573 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
574 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
575 const uint32_t ulStackDepth,
576 void * const pvParameters,
577 UBaseType_t uxPriority,
578 TaskHandle_t * const pxCreatedTask,
579 TCB_t *pxNewTCB,
580 const MemoryRegion_t * const xRegions,
581 BaseType_t xCoreID ) PRIVILEGED_FUNCTION;
582
583 /*
584 * Called after a new task has been created and initialised to place the task
585 * under the control of the scheduler.
586 */
587 static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode, BaseType_t xCoreID ) PRIVILEGED_FUNCTION;
588
589 /*
590 * freertos_tasks_c_additions_init() should only be called if the user definable
591 * macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is the only macro
592 * called by the function.
593 */
594 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
595
596 static void freertos_tasks_c_additions_init( void ) PRIVILEGED_FUNCTION;
597
598 #endif
599
600 /*
601 * This routine tries to send an interrupt to another core if needed to make it execute a task
602 * of higher priority. We try to figure out if needed first by inspecting the pxTCB of the
603 * other CPU first. Specifically for Xtensa, we can do this because pxTCB is an atomic pointer. It
604 * is possible that it is inaccurate because the other CPU just did a task switch, but in that case
605 * at most a superfluous interrupt is generated.
606 */
taskYIELD_OTHER_CORE(BaseType_t xCoreID,UBaseType_t uxPriority)607 void taskYIELD_OTHER_CORE( BaseType_t xCoreID, UBaseType_t uxPriority )
608 {
609 BaseType_t i;
610
611 if (xCoreID != tskNO_AFFINITY) {
612 if ( pxCurrentTCB[ xCoreID ]->uxPriority < uxPriority ) { // NOLINT(clang-analyzer-core.NullDereference) IDF-685
613 vPortYieldOtherCore( xCoreID );
614 }
615 }
616 else
617 {
618 /* The task has no affinity. See if we can find a CPU to put it on.*/
619 for (i=0; i<portNUM_PROCESSORS; i++) {
620 if (i != xPortGetCoreID() && pxCurrentTCB[ i ]->uxPriority < uxPriority)
621 {
622 vPortYieldOtherCore( i );
623 break;
624 }
625 }
626 }
627 }
628
629 /*-----------------------------------------------------------*/
630
631 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
632
xTaskCreateStaticPinnedToCore(TaskFunction_t pvTaskCode,const char * const pcName,const uint32_t ulStackDepth,void * const pvParameters,UBaseType_t uxPriority,StackType_t * const pxStackBuffer,StaticTask_t * const pxTaskBuffer,const BaseType_t xCoreID)633 TaskHandle_t xTaskCreateStaticPinnedToCore( TaskFunction_t pvTaskCode,
634 const char * const pcName,
635 const uint32_t ulStackDepth,
636 void * const pvParameters,
637 UBaseType_t uxPriority,
638 StackType_t * const pxStackBuffer,
639 StaticTask_t * const pxTaskBuffer,
640 const BaseType_t xCoreID )
641 {
642 TCB_t *pxNewTCB;
643 TaskHandle_t xReturn;
644
645 configASSERT( portVALID_TCB_MEM(pxTaskBuffer) );
646 configASSERT( portVALID_STACK_MEM(pxStackBuffer) );
647 configASSERT( (xCoreID>=0 && xCoreID<portNUM_PROCESSORS) || (xCoreID==tskNO_AFFINITY) );
648
649 #if( configASSERT_DEFINED == 1 )
650 {
651 /* Sanity check that the size of the structure used to declare a
652 variable of type StaticTask_t equals the size of the real task
653 structure. */
654 volatile size_t xSize = sizeof( StaticTask_t );
655 configASSERT( xSize == sizeof( TCB_t ) );
656 ( void ) xSize; /* Prevent lint warning when configASSERT() is not used. */
657 }
658 #endif /* configASSERT_DEFINED */
659
660
661 if( ( pxTaskBuffer != NULL ) && ( pxStackBuffer != NULL ) )
662 {
663 /* The memory used for the task's TCB and stack are passed into this
664 function - use them. */
665 pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
666 pxNewTCB->pxStack = ( StackType_t * ) pxStackBuffer;
667
668 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
669 {
670 /* Tasks can be created statically or dynamically, so note this
671 task was created statically in case the task is later deleted. */
672 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
673 }
674 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
675
676 prvInitialiseNewTask( pvTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL, xCoreID );
677 prvAddNewTaskToReadyList( pxNewTCB, pvTaskCode, xCoreID );
678 }
679 else
680 {
681 xReturn = NULL;
682 }
683
684 return xReturn;
685 }
686
687 #endif /* SUPPORT_STATIC_ALLOCATION */
688 /*-----------------------------------------------------------*/
689
690 #if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
691
xTaskCreateRestrictedStatic(const TaskParameters_t * const pxTaskDefinition,TaskHandle_t * pxCreatedTask)692 BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask )
693 {
694 TCB_t *pxNewTCB;
695 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
696
697 configASSERT( pxTaskDefinition->puxStackBuffer != NULL );
698 configASSERT( pxTaskDefinition->pxTaskBuffer != NULL );
699
700 if( ( pxTaskDefinition->puxStackBuffer != NULL ) && ( pxTaskDefinition->pxTaskBuffer != NULL ) )
701 {
702 /* Allocate space for the TCB. Where the memory comes from depends
703 on the implementation of the port malloc function and whether or
704 not static allocation is being used. */
705 pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer;
706
707 /* Store the stack location in the TCB. */
708 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
709
710 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
711 {
712 /* Tasks can be created statically or dynamically, so note this
713 task was created statically in case the task is later deleted. */
714 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
715 }
716 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
717
718 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
719 pxTaskDefinition->pcName,
720 ( uint32_t ) pxTaskDefinition->usStackDepth,
721 pxTaskDefinition->pvParameters,
722 pxTaskDefinition->uxPriority,
723 pxCreatedTask, pxNewTCB,
724 pxTaskDefinition->xRegions,
725 tskNO_AFFINITY );
726
727 prvAddNewTaskToReadyList( pxNewTCB, pxTaskDefinition->pvTaskCode, tskNO_AFFINITY);
728 xReturn = pdPASS;
729 }
730
731 return xReturn;
732 }
733
734 #endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
735 /*-----------------------------------------------------------*/
736
737 #if( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
738
xTaskCreateRestricted(const TaskParameters_t * const pxTaskDefinition,TaskHandle_t * pxCreatedTask)739 BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition, TaskHandle_t *pxCreatedTask )
740 {
741 TCB_t *pxNewTCB;
742 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
743
744 configASSERT( pxTaskDefinition->puxStackBuffer );
745
746 if( pxTaskDefinition->puxStackBuffer != NULL )
747 {
748 /* Allocate space for the TCB. Where the memory comes from depends
749 on the implementation of the port malloc function and whether or
750 not static allocation is being used. */
751 pxNewTCB = ( TCB_t * ) pvPortMallocTcbMem( sizeof( TCB_t ) );
752
753 if( pxNewTCB != NULL )
754 {
755 /* Store the stack location in the TCB. */
756 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
757
758 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
759 {
760 /* Tasks can be created statically or dynamically, so note
761 this task had a statically allocated stack in case it is
762 later deleted. The TCB was allocated dynamically. */
763 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY;
764 }
765 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
766
767 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
768 pxTaskDefinition->pcName,
769 ( uint32_t ) pxTaskDefinition->usStackDepth,
770 pxTaskDefinition->pvParameters,
771 pxTaskDefinition->uxPriority,
772 pxCreatedTask, pxNewTCB,
773 pxTaskDefinition->xRegions,
774 tskNO_AFFINITY );
775
776 prvAddNewTaskToReadyList( pxNewTCB, pxTaskDefinition->pvTaskCode, tskNO_AFFINITY);
777 xReturn = pdPASS;
778 }
779 }
780
781 return xReturn;
782 }
783
784 #endif /* portUSING_MPU_WRAPPERS */
785 /*-----------------------------------------------------------*/
786
787 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
788
xTaskCreatePinnedToCore(TaskFunction_t pvTaskCode,const char * const pcName,const uint32_t usStackDepth,void * const pvParameters,UBaseType_t uxPriority,TaskHandle_t * const pvCreatedTask,const BaseType_t xCoreID)789 BaseType_t xTaskCreatePinnedToCore( TaskFunction_t pvTaskCode,
790 const char * const pcName,
791 const uint32_t usStackDepth,
792 void * const pvParameters,
793 UBaseType_t uxPriority,
794 TaskHandle_t * const pvCreatedTask,
795 const BaseType_t xCoreID)
796 {
797 TCB_t *pxNewTCB;
798 BaseType_t xReturn;
799
800 /* If the stack grows down then allocate the stack then the TCB so the stack
801 does not grow into the TCB. Likewise if the stack grows up then allocate
802 the TCB then the stack. */
803 #if( portSTACK_GROWTH > 0 )
804 {
805 /* Allocate space for the TCB. Where the memory comes from depends on
806 the implementation of the port malloc function and whether or not static
807 allocation is being used. */
808 pxNewTCB = ( TCB_t * ) pvPortMallocTcbMem( sizeof( TCB_t ) );
809
810 if( pxNewTCB != NULL )
811 {
812 /* Allocate space for the stack used by the task being created.
813 The base of the stack memory stored in the TCB so the task can
814 be deleted later if required. */
815 pxNewTCB->pxStack = ( StackType_t * ) pvPortMallocStackMem( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
816
817 if( pxNewTCB->pxStack == NULL )
818 {
819 /* Could not allocate the stack. Delete the allocated TCB. */
820 vPortFree( pxNewTCB );
821 pxNewTCB = NULL;
822 }
823 }
824 }
825 #else /* portSTACK_GROWTH */
826 {
827 StackType_t *pxStack;
828
829 /* Allocate space for the stack used by the task being created. */
830 pxStack = pvPortMallocStackMem( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation is the stack. */
831
832 if( pxStack != NULL )
833 {
834 /* Allocate space for the TCB. */
835 pxNewTCB = ( TCB_t * ) pvPortMallocTcbMem( sizeof( TCB_t ) ); /*lint !e9087 !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack, and the first member of TCB_t is always a pointer to the task's stack. */
836
837 if( pxNewTCB != NULL )
838 {
839 /* Store the stack location in the TCB. */
840 pxNewTCB->pxStack = pxStack;
841 }
842 else
843 {
844 /* The stack cannot be used as the TCB was not created. Free
845 it again. */
846 vPortFree( pxStack );
847 }
848 }
849 else
850 {
851 pxNewTCB = NULL;
852 }
853 }
854 #endif /* portSTACK_GROWTH */
855
856 if( pxNewTCB != NULL )
857 {
858 #if( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e9029 !e731 Macro has been consolidated for readability reasons. */
859 {
860 /* Tasks can be created statically or dynamically, so note this
861 task was created dynamically in case it is later deleted. */
862 pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB;
863 }
864 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
865
866 prvInitialiseNewTask( pvTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pvCreatedTask, pxNewTCB, NULL, xCoreID );
867 prvAddNewTaskToReadyList( pxNewTCB, pvTaskCode, xCoreID);
868 xReturn = pdPASS;
869 }
870 else
871 {
872 xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
873 }
874
875 return xReturn;
876 }
877
878 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
879 /*-----------------------------------------------------------*/
880
prvInitialiseNewTask(TaskFunction_t pxTaskCode,const char * const pcName,const uint32_t ulStackDepth,void * const pvParameters,UBaseType_t uxPriority,TaskHandle_t * const pxCreatedTask,TCB_t * pxNewTCB,const MemoryRegion_t * const xRegions,BaseType_t xCoreID)881 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
882 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
883 const uint32_t ulStackDepth,
884 void * const pvParameters,
885 UBaseType_t uxPriority,
886 TaskHandle_t * const pxCreatedTask,
887 TCB_t *pxNewTCB,
888 const MemoryRegion_t * const xRegions,
889 BaseType_t xCoreID )
890 {
891 StackType_t *pxTopOfStack;
892 UBaseType_t x;
893
894 #if (portNUM_PROCESSORS < 2)
895 xCoreID = 0;
896 #endif
897
898 #if( portUSING_MPU_WRAPPERS == 1 )
899 /* Should the task be created in privileged mode? */
900 BaseType_t xRunPrivileged;
901 if( ( uxPriority & portPRIVILEGE_BIT ) != 0U )
902 {
903 xRunPrivileged = pdTRUE;
904 }
905 else
906 {
907 xRunPrivileged = pdFALSE;
908 }
909 uxPriority &= ~portPRIVILEGE_BIT;
910 #endif /* portUSING_MPU_WRAPPERS == 1 */
911
912 /* Avoid dependency on memset() if it is not required. */
913 #if( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 )
914 {
915 /* Fill the stack with a known value to assist debugging. */
916 ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) );
917 }
918 #endif /* tskSET_NEW_STACKS_TO_KNOWN_VALUE */
919
920 /* Calculate the top of stack address. This depends on whether the stack
921 grows from high memory to low (as per the 80x86) or vice versa.
922 portSTACK_GROWTH is used to make the result positive or negative as required
923 by the port. */
924 #if( portSTACK_GROWTH < 0 )
925 {
926 pxTopOfStack = &( pxNewTCB->pxStack[ ulStackDepth - ( uint32_t ) 1 ] );
927 pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 !e9033 !e9078 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. Checked by assert(). */
928
929 /* Check the alignment of the calculated top of stack is correct. */
930 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
931
932 #if( configRECORD_STACK_HIGH_ADDRESS == 1 )
933 {
934 /* Also record the stack's high address, which may assist
935 debugging. */
936 pxNewTCB->pxEndOfStack = pxTopOfStack;
937 }
938 #endif /* configRECORD_STACK_HIGH_ADDRESS */
939 }
940 #else /* portSTACK_GROWTH */
941 {
942 pxTopOfStack = pxNewTCB->pxStack;
943
944 /* Check the alignment of the stack buffer is correct. */
945 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxNewTCB->pxStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
946
947 /* The other extreme of the stack space is required if stack checking is
948 performed. */
949 pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
950 }
951 #endif /* portSTACK_GROWTH */
952
953 /* Store the task name in the TCB. */
954 if( pcName != NULL )
955 {
956 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
957 {
958 pxNewTCB->pcTaskName[ x ] = pcName[ x ];
959
960 /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
961 configMAX_TASK_NAME_LEN characters just in case the memory after the
962 string is not accessible (extremely unlikely). */
963 if( pcName[ x ] == ( char ) 0x00 )
964 {
965 break;
966 }
967 else
968 {
969 mtCOVERAGE_TEST_MARKER();
970 }
971 }
972
973 /* Ensure the name string is terminated in the case that the string length
974 was greater or equal to configMAX_TASK_NAME_LEN. */
975 pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0';
976 }
977 else
978 {
979 /* The task has not been given a name, so just ensure there is a NULL
980 terminator when it is read out. */
981 pxNewTCB->pcTaskName[ 0 ] = 0x00;
982 }
983
984 /* This is used as an array index so must ensure it's not too large. First
985 remove the privilege bit if one is present. */
986 if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
987 {
988 uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
989 }
990 else
991 {
992 mtCOVERAGE_TEST_MARKER();
993 }
994
995 pxNewTCB->uxPriority = uxPriority;
996 pxNewTCB->xCoreID = xCoreID;
997 #if ( configUSE_MUTEXES == 1 )
998 {
999 pxNewTCB->uxBasePriority = uxPriority;
1000 pxNewTCB->uxMutexesHeld = 0;
1001 }
1002 #endif /* configUSE_MUTEXES */
1003
1004 vListInitialiseItem( &( pxNewTCB->xStateListItem ) );
1005 vListInitialiseItem( &( pxNewTCB->xEventListItem ) );
1006
1007 /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get
1008 back to the containing TCB from a generic item in a list. */
1009 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xStateListItem ), pxNewTCB );
1010
1011 /* Event lists are always in priority order. */
1012 listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
1013 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB );
1014
1015 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
1016 {
1017 pxNewTCB->uxCriticalNesting = ( UBaseType_t ) 0U;
1018 }
1019 #endif /* portCRITICAL_NESTING_IN_TCB */
1020
1021 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
1022 {
1023 pxNewTCB->pxTaskTag = NULL;
1024 }
1025 #endif /* configUSE_APPLICATION_TASK_TAG */
1026
1027 #if ( configGENERATE_RUN_TIME_STATS == 1 )
1028 {
1029 pxNewTCB->ulRunTimeCounter = 0UL;
1030 }
1031 #endif /* configGENERATE_RUN_TIME_STATS */
1032
1033 #if ( portUSING_MPU_WRAPPERS == 1 )
1034 {
1035 vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth );
1036 }
1037 #else
1038 {
1039 /* Avoid compiler warning about unreferenced parameter. */
1040 ( void ) xRegions;
1041 }
1042 #endif
1043
1044 #if( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
1045 {
1046 for( x = 0; x < ( UBaseType_t ) configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ )
1047 {
1048 pxNewTCB->pvThreadLocalStoragePointers[ x ] = NULL;
1049 #if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS == 1)
1050 pxNewTCB->pvThreadLocalStoragePointersDelCallback[ x ] = NULL;
1051 #endif
1052
1053 }
1054 }
1055 #endif
1056
1057 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
1058 {
1059 pxNewTCB->ulNotifiedValue = 0;
1060 pxNewTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
1061 }
1062 #endif
1063
1064 #if ( configUSE_NEWLIB_REENTRANT == 1 )
1065 {
1066 // /* Initialise this task's Newlib reent structure. */
1067 // _REENT_INIT_PTR( ( &( pxNewTCB->xNewLib_reent ) ) );
1068
1069 /* Initialise this task's Newlib reent structure. */
1070 esp_reent_init(&pxNewTCB->xNewLib_reent);
1071
1072 }
1073 #endif
1074
1075 #if( INCLUDE_xTaskAbortDelay == 1 )
1076 {
1077 pxNewTCB->ucDelayAborted = pdFALSE;
1078 }
1079 #endif
1080
1081 /* Initialize the TCB stack to look as if the task was already running,
1082 but had been interrupted by the scheduler. The return address is set
1083 to the start of the task function. Once the stack has been initialised
1084 the top of stack variable is updated. */
1085 #if( portUSING_MPU_WRAPPERS == 1 )
1086 {
1087 /* If the port has capability to detect stack overflow,
1088 pass the stack end address to the stack initialization
1089 function as well. */
1090 #if( portHAS_STACK_OVERFLOW_CHECKING == 1 )
1091 {
1092 #if( portSTACK_GROWTH < 0 )
1093 {
1094 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged );
1095 }
1096 #else /* portSTACK_GROWTH */
1097 {
1098 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged );
1099 }
1100 #endif /* portSTACK_GROWTH */
1101 }
1102 #else /* portHAS_STACK_OVERFLOW_CHECKING */
1103 {
1104 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged );
1105 }
1106 #endif /* portHAS_STACK_OVERFLOW_CHECKING */
1107 }
1108 #else /* portUSING_MPU_WRAPPERS */
1109 {
1110 /* If the port has capability to detect stack overflow,
1111 pass the stack end address to the stack initialization
1112 function as well. */
1113 #if( portHAS_STACK_OVERFLOW_CHECKING == 1 )
1114 {
1115 #if( portSTACK_GROWTH < 0 )
1116 {
1117 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters );
1118 }
1119 #else /* portSTACK_GROWTH */
1120 {
1121 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters );
1122 }
1123 #endif /* portSTACK_GROWTH */
1124 }
1125 #else /* portHAS_STACK_OVERFLOW_CHECKING */
1126 {
1127 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );
1128 }
1129 #endif /* portHAS_STACK_OVERFLOW_CHECKING */
1130 }
1131 #endif /* portUSING_MPU_WRAPPERS */
1132
1133 if( pxCreatedTask != NULL )
1134 {
1135 /* Pass the handle out in an anonymous way. The handle can be used to
1136 change the created task's priority, delete the created task, etc.*/
1137 *pxCreatedTask = ( TaskHandle_t ) pxNewTCB;
1138 }
1139 else
1140 {
1141 mtCOVERAGE_TEST_MARKER();
1142 }
1143 }
1144 /*-----------------------------------------------------------*/
1145
prvAddNewTaskToReadyList(TCB_t * pxNewTCB,TaskFunction_t pxTaskCode,BaseType_t xCoreID)1146 static void prvAddNewTaskToReadyList( TCB_t *pxNewTCB, TaskFunction_t pxTaskCode, BaseType_t xCoreID )
1147 {
1148 TCB_t *curTCB, *tcb0, *tcb1;
1149
1150 #if (portNUM_PROCESSORS < 2)
1151 xCoreID = 0;
1152 #endif
1153
1154 /* Ensure interrupts don't access the task lists while the lists are being
1155 updated. */
1156 taskENTER_CRITICAL( &xTaskQueueMutex );
1157 {
1158 uxCurrentNumberOfTasks++;
1159
1160 if ( xCoreID == tskNO_AFFINITY )
1161 {
1162 if ( portNUM_PROCESSORS == 1 )
1163 {
1164 xCoreID = 0;
1165 }
1166 else
1167 {
1168 // if the task has no affinity, put it on either core if nothing is currently scheduled there. Failing that,
1169 // put it on the core where it will preempt the lowest priority running task. If neither of these are true,
1170 // queue it on the currently running core.
1171 tcb0 = pxCurrentTCB[0];
1172 tcb1 = pxCurrentTCB[1];
1173 if ( tcb0 == NULL )
1174 {
1175 xCoreID = 0;
1176 }
1177 else if ( tcb1 == NULL )
1178 {
1179 xCoreID = 1;
1180 }
1181 else if ( tcb0->uxPriority < pxNewTCB->uxPriority && tcb0->uxPriority < tcb1->uxPriority )
1182 {
1183 xCoreID = 0;
1184 }
1185 else if ( tcb1->uxPriority < pxNewTCB->uxPriority )
1186 {
1187 xCoreID = 1;
1188 }
1189 else
1190 {
1191 xCoreID = xPortGetCoreID(); // Both CPU have higher priority tasks running on them, so this won't run yet
1192 }
1193 }
1194 }
1195
1196 if( pxCurrentTCB[xCoreID] == NULL )
1197 {
1198 /* There are no other tasks, or all the other tasks are in
1199 the suspended state - make this the current task. */
1200 pxCurrentTCB[xCoreID] = pxNewTCB;
1201
1202 if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
1203 {
1204 /* This is the first task to be created so do the preliminary
1205 initialisation required. We will not recover if this call
1206 fails, but we will report the failure. */
1207 prvInitialiseTaskLists();
1208 }
1209 else
1210 {
1211 mtCOVERAGE_TEST_MARKER();
1212 }
1213 }
1214 else
1215 {
1216 /* If the scheduler is not already running, make this task the
1217 current task if it is the highest priority task to be created
1218 so far. */
1219 if( xSchedulerRunning == pdFALSE )
1220 {
1221 if( pxCurrentTCB[xCoreID] == NULL || pxCurrentTCB[xCoreID]->uxPriority <= pxNewTCB->uxPriority )
1222 {
1223 pxCurrentTCB[xCoreID] = pxNewTCB;
1224 }
1225 else
1226 {
1227 mtCOVERAGE_TEST_MARKER();
1228 }
1229 }
1230 else
1231 {
1232 mtCOVERAGE_TEST_MARKER();
1233 }
1234 }
1235
1236 uxTaskNumber++;
1237
1238 #if ( configUSE_TRACE_FACILITY == 1 )
1239 {
1240 /* Add a counter into the TCB for tracing only. */
1241 pxNewTCB->uxTCBNumber = uxTaskNumber;
1242 }
1243 #endif /* configUSE_TRACE_FACILITY */
1244 traceTASK_CREATE( pxNewTCB );
1245
1246 prvAddTaskToReadyList( pxNewTCB );
1247
1248 portSETUP_TCB( pxNewTCB );
1249 }
1250 taskEXIT_CRITICAL( &xTaskQueueMutex );
1251
1252 if( xSchedulerRunning != pdFALSE )
1253 {
1254 /* If the created task is of a higher priority than the current task
1255 then it should run now. */
1256 taskENTER_CRITICAL(&xTaskQueueMutex);
1257
1258 curTCB = pxCurrentTCB[ xCoreID ];
1259 if( curTCB == NULL || curTCB->uxPriority < pxNewTCB->uxPriority )
1260 {
1261 if( xCoreID == xPortGetCoreID() )
1262 {
1263 taskYIELD_IF_USING_PREEMPTION();
1264 }
1265 else {
1266 taskYIELD_OTHER_CORE(xCoreID, pxNewTCB->uxPriority);
1267 }
1268 }
1269 else
1270 {
1271 mtCOVERAGE_TEST_MARKER();
1272 }
1273 taskEXIT_CRITICAL(&xTaskQueueMutex);
1274 }
1275 else
1276 {
1277 mtCOVERAGE_TEST_MARKER();
1278 }
1279 }
1280 /*-----------------------------------------------------------*/
1281
1282 #if ( INCLUDE_vTaskDelete == 1 )
1283
vTaskDelete(TaskHandle_t xTaskToDelete)1284 void vTaskDelete( TaskHandle_t xTaskToDelete )
1285 {
1286 TCB_t *pxTCB;
1287 TCB_t *curTCB;
1288 BaseType_t core;
1289 BaseType_t xFreeNow = 0;
1290
1291 taskENTER_CRITICAL( &xTaskQueueMutex );
1292 {
1293 core = xPortGetCoreID();
1294 curTCB = pxCurrentTCB[core];
1295
1296 /* If null is passed in here then it is the calling task that is
1297 being deleted. */
1298 pxTCB = prvGetTCBFromHandle( xTaskToDelete );
1299
1300 /* Remove task from the ready/delayed list. */
1301 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
1302 {
1303 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1304 }
1305 else
1306 {
1307 mtCOVERAGE_TEST_MARKER();
1308 }
1309
1310 /* Is the task waiting on an event also? */
1311 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
1312 {
1313 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
1314 }
1315 else
1316 {
1317 mtCOVERAGE_TEST_MARKER();
1318 }
1319
1320 /* Increment the uxTaskNumber also so kernel aware debuggers can
1321 detect that the task lists need re-generating. This is done before
1322 portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will
1323 not return. */
1324 uxTaskNumber++;
1325
1326 if( pxTCB == curTCB ||
1327 /* in SMP, we also can't immediately delete the task active on the other core */
1328 (portNUM_PROCESSORS > 1 && pxTCB == pxCurrentTCB[ !core ]) ||
1329 /* ... and we can't delete a non-running task pinned to the other core, as
1330 FPU cleanup has to happen on the same core */
1331 (portNUM_PROCESSORS > 1 && pxTCB->xCoreID == (!core)) )
1332 {
1333 /* A task is deleting itself. This cannot complete within the
1334 task itself, as a context switch to another task is required.
1335 Place the task in the termination list. The idle task will
1336 check the termination list and free up any memory allocated by
1337 the scheduler for the TCB and stack of the deleted task. */
1338 vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) );
1339
1340 /* Increment the ucTasksDeleted variable so the idle task knows
1341 there is a task that has been deleted and that it should therefore
1342 check the xTasksWaitingTermination list. */
1343 ++uxDeletedTasksWaitingCleanUp;
1344
1345 /* The pre-delete hook is primarily for the Windows simulator,
1346 in which Windows specific clean up operations are performed,
1347 after which it is not possible to yield away from this task -
1348 hence xYieldPending is used to latch that a context switch is
1349 required. */
1350 portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending[core] );
1351
1352 if (portNUM_PROCESSORS > 1 && pxTCB == pxCurrentTCB[ !core ])
1353 {
1354 /* SMP case of deleting a task running on a different core. Same issue
1355 as a task deleting itself, but we need to send a yield to this task now
1356 before we release xTaskQueueMutex.
1357
1358 Specifically there is a case where the other core may already be spinning on
1359 xTaskQueueMutex waiting to go into a blocked state. A check is added in
1360 prvAddCurrentTaskToDelayedList() to prevent it from removing itself from
1361 xTasksWaitingTermination list in this case (instead it will immediately
1362 release xTaskQueueMutex again and be yielded before the FreeRTOS function
1363 returns.) */
1364 vPortYieldOtherCore( !core );
1365 }
1366 }
1367 else
1368 {
1369 --uxCurrentNumberOfTasks;
1370 xFreeNow = pdTRUE;
1371
1372 /* Reset the next expected unblock time in case it referred to
1373 the task that has just been deleted. */
1374 prvResetNextTaskUnblockTime();
1375 }
1376
1377 traceTASK_DELETE( pxTCB );
1378 }
1379 taskEXIT_CRITICAL( &xTaskQueueMutex );
1380
1381 if(xFreeNow == pdTRUE) {
1382 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
1383 prvDeleteTLS( pxTCB );
1384 #endif
1385
1386 prvDeleteTCB( pxTCB );
1387 }
1388
1389 /* Force a reschedule if it is the currently running task that has just
1390 been deleted. */
1391 if( xSchedulerRunning != pdFALSE )
1392 {
1393 if( pxTCB == curTCB )
1394 {
1395 configASSERT( xTaskGetSchedulerState() != taskSCHEDULER_SUSPENDED );
1396 portYIELD_WITHIN_API();
1397 }
1398 else
1399 {
1400 mtCOVERAGE_TEST_MARKER();
1401 }
1402 }
1403 }
1404
1405 #endif /* INCLUDE_vTaskDelete */
1406 /*-----------------------------------------------------------*/
1407
1408 #if ( INCLUDE_vTaskDelayUntil == 1 )
1409
vTaskDelayUntil(TickType_t * const pxPreviousWakeTime,const TickType_t xTimeIncrement)1410 void vTaskDelayUntil( TickType_t * const pxPreviousWakeTime, const TickType_t xTimeIncrement )
1411 {
1412 TickType_t xTimeToWake;
1413 BaseType_t xAlreadyYielded = pdFALSE, xShouldDelay = pdFALSE;
1414
1415 configASSERT( pxPreviousWakeTime );
1416 configASSERT( ( xTimeIncrement > 0U ) );
1417 configASSERT( uxSchedulerSuspended[xPortGetCoreID()] == 0 );
1418
1419 taskENTER_CRITICAL( &xTaskQueueMutex );
1420 {
1421 /* Minor optimisation. The tick count cannot change in this
1422 block. */
1423 const TickType_t xConstTickCount = xTickCount;
1424
1425 /* Generate the tick time at which the task wants to wake. */
1426 xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
1427
1428 if( xConstTickCount < *pxPreviousWakeTime )
1429 {
1430 /* The tick count has overflowed since this function was
1431 lasted called. In this case the only time we should ever
1432 actually delay is if the wake time has also overflowed,
1433 and the wake time is greater than the tick time. When this
1434 is the case it is as if neither time had overflowed. */
1435 if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) )
1436 {
1437 xShouldDelay = pdTRUE;
1438 }
1439 else
1440 {
1441 mtCOVERAGE_TEST_MARKER();
1442 }
1443 }
1444 else
1445 {
1446 /* The tick time has not overflowed. In this case we will
1447 delay if either the wake time has overflowed, and/or the
1448 tick time is less than the wake time. */
1449 if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) )
1450 {
1451 xShouldDelay = pdTRUE;
1452 }
1453 else
1454 {
1455 mtCOVERAGE_TEST_MARKER();
1456 }
1457 }
1458
1459 /* Update the wake time ready for the next call. */
1460 *pxPreviousWakeTime = xTimeToWake;
1461
1462 if( xShouldDelay != pdFALSE )
1463 {
1464 traceTASK_DELAY_UNTIL();
1465
1466 /* prvAddCurrentTaskToDelayedList() needs the block time, not
1467 the time to wake, so subtract the current tick count. */
1468 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTimeToWake - xConstTickCount );
1469 }
1470 else
1471 {
1472 mtCOVERAGE_TEST_MARKER();
1473 }
1474 }
1475 taskEXIT_CRITICAL( &xTaskQueueMutex );
1476
1477 /* Force a reschedule if xTaskResumeAll has not already done so, we may
1478 have put ourselves to sleep. */
1479 if( xAlreadyYielded == pdFALSE )
1480 {
1481 portYIELD_WITHIN_API();
1482 }
1483 else
1484 {
1485 mtCOVERAGE_TEST_MARKER();
1486 }
1487 }
1488
1489 #endif /* INCLUDE_vTaskDelayUntil */
1490 /*-----------------------------------------------------------*/
1491
1492 #if ( INCLUDE_vTaskDelay == 1 )
1493
vTaskDelay(const TickType_t xTicksToDelay)1494 void vTaskDelay( const TickType_t xTicksToDelay )
1495 {
1496 BaseType_t xAlreadyYielded = pdFALSE;
1497
1498 /* A delay time of zero just forces a reschedule. */
1499 if( xTicksToDelay > ( TickType_t ) 0U )
1500 {
1501 configASSERT( uxSchedulerSuspended[xPortGetCoreID()] == 0 );
1502 taskENTER_CRITICAL( &xTaskQueueMutex );
1503 {
1504 traceTASK_DELAY();
1505
1506 /* A task that is removed from the event list while the
1507 scheduler is suspended will not get placed in the ready
1508 list or removed from the blocked list until the scheduler
1509 is resumed.
1510
1511 This task cannot be in an event list as it is the currently
1512 executing task. */
1513 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToDelay );
1514 }
1515 taskEXIT_CRITICAL( &xTaskQueueMutex );
1516 }
1517 else
1518 {
1519 mtCOVERAGE_TEST_MARKER();
1520 }
1521
1522 /* Force a reschedule if xTaskResumeAll has not already done so, we may
1523 have put ourselves to sleep. */
1524 if( xAlreadyYielded == pdFALSE )
1525 {
1526 portYIELD_WITHIN_API();
1527 }
1528 else
1529 {
1530 mtCOVERAGE_TEST_MARKER();
1531 }
1532 }
1533
1534 #endif /* INCLUDE_vTaskDelay */
1535 /*-----------------------------------------------------------*/
1536
1537 #if( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_xTaskAbortDelay == 1 ) )
1538
eTaskGetState(TaskHandle_t xTask)1539 eTaskState eTaskGetState( TaskHandle_t xTask )
1540 {
1541 eTaskState eReturn;
1542 List_t const * pxStateList, *pxDelayedList, *pxOverflowedDelayedList;
1543 const TCB_t * const pxTCB = xTask;
1544
1545 configASSERT( pxTCB );
1546
1547 taskENTER_CRITICAL( &xTaskQueueMutex ); //Need critical section incase either core context switches in between
1548 if( pxTCB == pxCurrentTCB[xPortGetCoreID()])
1549 {
1550 /* The task calling this function is querying its own state. */
1551 eReturn = eRunning;
1552 }
1553 #if (portNUM_PROCESSORS > 1)
1554 else if (pxTCB == pxCurrentTCB[!xPortGetCoreID()])
1555 {
1556 /* The task calling this function is querying its own state. */
1557 eReturn = eRunning;
1558 }
1559 #endif
1560 else
1561 {
1562 pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) );
1563 pxDelayedList = pxDelayedTaskList;
1564 pxOverflowedDelayedList = pxOverflowDelayedTaskList;
1565
1566 if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) )
1567 {
1568 /* The task being queried is referenced from one of the Blocked
1569 lists. */
1570 eReturn = eBlocked;
1571 }
1572
1573 #if ( INCLUDE_vTaskSuspend == 1 )
1574 else if( pxStateList == &xSuspendedTaskList )
1575 {
1576 /* The task being queried is referenced from the suspended
1577 list. Is it genuinely suspended or is it blocked
1578 indefinitely? */
1579 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL )
1580 {
1581 #if( configUSE_TASK_NOTIFICATIONS == 1 )
1582 {
1583 /* The task does not appear on the event list item of
1584 and of the RTOS objects, but could still be in the
1585 blocked state if it is waiting on its notification
1586 rather than waiting on an object. */
1587 if( pxTCB->ucNotifyState == taskWAITING_NOTIFICATION )
1588 {
1589 eReturn = eBlocked;
1590 }
1591 else
1592 {
1593 eReturn = eSuspended;
1594 }
1595 }
1596 #else
1597 {
1598 eReturn = eSuspended;
1599 }
1600 #endif
1601 }
1602 else
1603 {
1604 eReturn = eBlocked;
1605 }
1606 }
1607 #endif
1608
1609 #if ( INCLUDE_vTaskDelete == 1 )
1610 else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) )
1611 {
1612 /* The task being queried is referenced from the deleted
1613 tasks list, or it is not referenced from any lists at
1614 all. */
1615 eReturn = eDeleted;
1616 }
1617 #endif
1618
1619 else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */
1620 {
1621 /* If the task is not in any other state, it must be in the
1622 Ready (including pending ready) state. */
1623 eReturn = eReady;
1624 }
1625 }
1626 taskEXIT_CRITICAL( &xTaskQueueMutex );
1627
1628 return eReturn;
1629 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
1630
1631 #endif /* INCLUDE_eTaskGetState */
1632 /*-----------------------------------------------------------*/
1633
1634 #if ( INCLUDE_uxTaskPriorityGet == 1 )
1635
uxTaskPriorityGet(const TaskHandle_t xTask)1636 UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask )
1637 {
1638 TCB_t const *pxTCB;
1639 UBaseType_t uxReturn;
1640
1641 taskENTER_CRITICAL( &xTaskQueueMutex );
1642 {
1643 /* If null is passed in here then it is the priority of the task
1644 that called uxTaskPriorityGet() that is being queried. */
1645 pxTCB = prvGetTCBFromHandle( xTask );
1646 uxReturn = pxTCB->uxPriority;
1647 }
1648 taskEXIT_CRITICAL( &xTaskQueueMutex );
1649
1650 return uxReturn;
1651 }
1652
1653 #endif /* INCLUDE_uxTaskPriorityGet */
1654 /*-----------------------------------------------------------*/
1655
1656 #if ( INCLUDE_uxTaskPriorityGet == 1 )
1657
uxTaskPriorityGetFromISR(const TaskHandle_t xTask)1658 UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask )
1659 {
1660 TCB_t const *pxTCB;
1661 UBaseType_t uxReturn;
1662
1663 /* RTOS ports that support interrupt nesting have the concept of a
1664 maximum system call (or maximum API call) interrupt priority.
1665 Interrupts that are above the maximum system call priority are keep
1666 permanently enabled, even when the RTOS kernel is in a critical section,
1667 but cannot make any calls to FreeRTOS API functions. If configASSERT()
1668 is defined in FreeRTOSConfig.h then
1669 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1670 failure if a FreeRTOS API function is called from an interrupt that has
1671 been assigned a priority above the configured maximum system call
1672 priority. Only FreeRTOS functions that end in FromISR can be called
1673 from interrupts that have been assigned a priority at or (logically)
1674 below the maximum system call interrupt priority. FreeRTOS maintains a
1675 separate interrupt safe API to ensure interrupt entry is as fast and as
1676 simple as possible. More information (albeit Cortex-M specific) is
1677 provided on the following link:
1678 https://www.freertos.org/RTOS-Cortex-M3-M4.html */
1679 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1680
1681 portENTER_CRITICAL_ISR(&xTaskQueueMutex );
1682 {
1683 /* If null is passed in here then it is the priority of the calling
1684 task that is being queried. */
1685 pxTCB = prvGetTCBFromHandle( xTask );
1686 uxReturn = pxTCB->uxPriority;
1687 }
1688 portEXIT_CRITICAL_ISR(&xTaskQueueMutex);
1689
1690 return uxReturn;
1691 }
1692
1693 #endif /* INCLUDE_uxTaskPriorityGet */
1694 /*-----------------------------------------------------------*/
1695
1696 #if ( INCLUDE_vTaskPrioritySet == 1 )
1697
vTaskPrioritySet(TaskHandle_t xTask,UBaseType_t uxNewPriority)1698 void vTaskPrioritySet( TaskHandle_t xTask, UBaseType_t uxNewPriority )
1699 {
1700 TCB_t *pxTCB;
1701 UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry;
1702 BaseType_t xYieldRequired = pdFALSE;
1703
1704 configASSERT( ( uxNewPriority < configMAX_PRIORITIES ) );
1705
1706 /* Ensure the new priority is valid. */
1707 if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
1708 {
1709 uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
1710 }
1711 else
1712 {
1713 mtCOVERAGE_TEST_MARKER();
1714 }
1715
1716 taskENTER_CRITICAL( &xTaskQueueMutex );
1717 {
1718 /* If null is passed in here then it is the priority of the calling
1719 task that is being changed. */
1720 pxTCB = prvGetTCBFromHandle( xTask );
1721
1722 traceTASK_PRIORITY_SET( pxTCB, uxNewPriority );
1723
1724 #if ( configUSE_MUTEXES == 1 )
1725 {
1726 uxCurrentBasePriority = pxTCB->uxBasePriority;
1727 }
1728 #else
1729 {
1730 uxCurrentBasePriority = pxTCB->uxPriority;
1731 }
1732 #endif
1733
1734 if( uxCurrentBasePriority != uxNewPriority )
1735 {
1736 /* The priority change may have readied a task of higher
1737 priority than the calling task. */
1738 if( uxNewPriority > uxCurrentBasePriority )
1739 {
1740 if( pxTCB != pxCurrentTCB[xPortGetCoreID()] )
1741 {
1742 /* The priority of a task other than the currently
1743 running task is being raised. Is the priority being
1744 raised above that of the running task? */
1745 if ( tskCAN_RUN_HERE(pxTCB->xCoreID) && uxNewPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
1746 {
1747 xYieldRequired = pdTRUE;
1748 }
1749 else if ( pxTCB->xCoreID != xPortGetCoreID() )
1750 {
1751 taskYIELD_OTHER_CORE( pxTCB->xCoreID, uxNewPriority );
1752 }
1753 else
1754 {
1755 mtCOVERAGE_TEST_MARKER();
1756 }
1757 }
1758 else
1759 {
1760 /* The priority of the running task is being raised,
1761 but the running task must already be the highest
1762 priority task able to run so no yield is required. */
1763 }
1764 }
1765 else if( pxTCB == pxCurrentTCB[xPortGetCoreID()] )
1766 {
1767 /* Setting the priority of the running task down means
1768 there may now be another task of higher priority that
1769 is ready to execute. */
1770 xYieldRequired = pdTRUE;
1771 }
1772 else if( pxTCB != pxCurrentTCB[xPortGetCoreID()] )
1773 {
1774 /* The priority of a task other than the currently
1775 running task is being raised. Is the priority being
1776 raised above that of the running task? */
1777 if( uxNewPriority >= pxCurrentTCB[xPortGetCoreID()]->uxPriority )
1778 {
1779 xYieldRequired = pdTRUE;
1780 }
1781 else if ( pxTCB->xCoreID != xPortGetCoreID() ) //Need to check if not currently running on other core
1782 {
1783 taskYIELD_OTHER_CORE( pxTCB->xCoreID, uxNewPriority );
1784 }
1785 else
1786 {
1787 mtCOVERAGE_TEST_MARKER();
1788 }
1789 }
1790 else
1791 {
1792 /* Setting the priority of any other task down does not
1793 require a yield as the running task must be above the
1794 new priority of the task being modified. */
1795 }
1796
1797 /* Remember the ready list the task might be referenced from
1798 before its uxPriority member is changed so the
1799 taskRESET_READY_PRIORITY() macro can function correctly. */
1800 uxPriorityUsedOnEntry = pxTCB->uxPriority;
1801
1802 #if ( configUSE_MUTEXES == 1 )
1803 {
1804 /* Only change the priority being used if the task is not
1805 currently using an inherited priority. */
1806 if( pxTCB->uxBasePriority == pxTCB->uxPriority )
1807 {
1808 pxTCB->uxPriority = uxNewPriority;
1809 }
1810 else
1811 {
1812 mtCOVERAGE_TEST_MARKER();
1813 }
1814
1815 /* The base priority gets set whatever. */
1816 pxTCB->uxBasePriority = uxNewPriority;
1817 }
1818 #else
1819 {
1820 pxTCB->uxPriority = uxNewPriority;
1821 }
1822 #endif
1823
1824 /* Only reset the event list item value if the value is not
1825 being used for anything else. */
1826 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
1827 {
1828 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
1829 }
1830 else
1831 {
1832 mtCOVERAGE_TEST_MARKER();
1833 }
1834
1835 /* If the task is in the blocked or suspended list we need do
1836 nothing more than change its priority variable. However, if
1837 the task is in a ready list it needs to be removed and placed
1838 in the list appropriate to its new priority. */
1839 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
1840 {
1841 /* The task is currently in its ready list - remove before
1842 adding it to it's new ready list. As we are in a critical
1843 section we can do this even if the scheduler is suspended. */
1844 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
1845 {
1846 /* It is known that the task is in its ready list so
1847 there is no need to check again and the port level
1848 reset macro can be called directly. */
1849 portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority );
1850 }
1851 else
1852 {
1853 mtCOVERAGE_TEST_MARKER();
1854 }
1855 prvAddTaskToReadyList( pxTCB );
1856 }
1857 else
1858 {
1859 mtCOVERAGE_TEST_MARKER();
1860 }
1861
1862 if( xYieldRequired != pdFALSE )
1863 {
1864 taskYIELD_IF_USING_PREEMPTION();
1865 }
1866 else
1867 {
1868 mtCOVERAGE_TEST_MARKER();
1869 }
1870
1871 /* Remove compiler warning about unused variables when the port
1872 optimised task selection is not being used. */
1873 ( void ) uxPriorityUsedOnEntry;
1874 }
1875 }
1876 taskEXIT_CRITICAL( &xTaskQueueMutex );
1877 }
1878
1879 #endif /* INCLUDE_vTaskPrioritySet */
1880 /*-----------------------------------------------------------*/
1881
1882 #if ( INCLUDE_vTaskSuspend == 1 )
1883
vTaskSuspend(TaskHandle_t xTaskToSuspend)1884 void vTaskSuspend( TaskHandle_t xTaskToSuspend )
1885 {
1886 TCB_t *pxTCB;
1887 TCB_t *curTCB;
1888
1889 taskENTER_CRITICAL( &xTaskQueueMutex );
1890 {
1891 /* If null is passed in here then it is the running task that is
1892 being suspended. */
1893 pxTCB = prvGetTCBFromHandle( xTaskToSuspend );
1894
1895 traceTASK_SUSPEND( pxTCB );
1896
1897 /* Remove task from the ready/delayed list and place in the
1898 suspended list. */
1899 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
1900 {
1901 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1902 }
1903 else
1904 {
1905 mtCOVERAGE_TEST_MARKER();
1906 }
1907
1908 /* Is the task waiting on an event also? */
1909 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
1910 {
1911 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
1912 }
1913 else
1914 {
1915 mtCOVERAGE_TEST_MARKER();
1916 }
1917
1918 vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) );
1919 curTCB = pxCurrentTCB[ xPortGetCoreID() ];
1920
1921 #if( configUSE_TASK_NOTIFICATIONS == 1 )
1922 {
1923 if( pxTCB->ucNotifyState == taskWAITING_NOTIFICATION )
1924 {
1925 /* The task was blocked to wait for a notification, but is
1926 now suspended, so no notification was received. */
1927 pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
1928 }
1929 }
1930 #endif
1931 }
1932 taskEXIT_CRITICAL( &xTaskQueueMutex );
1933
1934 if( xSchedulerRunning != pdFALSE )
1935 {
1936 /* Reset the next expected unblock time in case it referred to the
1937 task that is now in the Suspended state. */
1938 taskENTER_CRITICAL( &xTaskQueueMutex );
1939 {
1940 prvResetNextTaskUnblockTime();
1941 }
1942 taskEXIT_CRITICAL( &xTaskQueueMutex );
1943 }
1944 else
1945 {
1946 mtCOVERAGE_TEST_MARKER();
1947 }
1948
1949 if( pxTCB == curTCB )
1950 {
1951 if( xSchedulerRunning != pdFALSE )
1952 {
1953 /* The current task has just been suspended. */
1954 taskENTER_CRITICAL(&xTaskQueueMutex);
1955 BaseType_t suspended = uxSchedulerSuspended[xPortGetCoreID()];
1956 taskEXIT_CRITICAL(&xTaskQueueMutex);
1957
1958 configASSERT( suspended == 0 );
1959 portYIELD_WITHIN_API();
1960 }
1961 else
1962 {
1963 /* The scheduler is not running, but the task that was pointed
1964 to by pxCurrentTCB has just been suspended and pxCurrentTCB
1965 must be adjusted to point to a different task. */
1966 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */
1967 {
1968 /* No other tasks are ready, so set pxCurrentTCB back to
1969 NULL so when the next task is created pxCurrentTCB will
1970 be set to point to it no matter what its relative priority
1971 is. */
1972 taskENTER_CRITICAL(&xTaskQueueMutex);
1973 pxCurrentTCB[ xPortGetCoreID() ] = NULL;
1974 taskEXIT_CRITICAL(&xTaskQueueMutex);
1975 }
1976 else
1977 {
1978 vTaskSwitchContext();
1979 }
1980 }
1981 }
1982 else
1983 {
1984 if( xSchedulerRunning != pdFALSE )
1985 {
1986 /* A task other than the currently running task was suspended,
1987 reset the next expected unblock time in case it referred to the
1988 task that is now in the Suspended state. */
1989 taskENTER_CRITICAL(&xTaskQueueMutex);
1990 {
1991 prvResetNextTaskUnblockTime();
1992 }
1993 taskEXIT_CRITICAL(&xTaskQueueMutex);
1994 }
1995 else
1996 {
1997 mtCOVERAGE_TEST_MARKER();
1998 }
1999 }
2000 }
2001
2002 #endif /* INCLUDE_vTaskSuspend */
2003 /*-----------------------------------------------------------*/
2004
2005 #if ( INCLUDE_vTaskSuspend == 1 )
2006
prvTaskIsTaskSuspended(const TaskHandle_t xTask)2007 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask )
2008 {
2009 BaseType_t xReturn = pdFALSE;
2010 const TCB_t * const pxTCB = xTask;
2011
2012 /* Accesses xPendingReadyList so must be called from a critical
2013 section. */
2014
2015 /* It does not make sense to check if the calling task is suspended. */
2016 configASSERT( xTask );
2017
2018 /* Is the task being resumed actually in the suspended list? */
2019 if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE )
2020 {
2021 /* Has the task already been resumed from within an ISR? */
2022 if( listIS_CONTAINED_WITHIN( &xPendingReadyList[xPortGetCoreID()], &( pxTCB->xEventListItem )) ||
2023 listIS_CONTAINED_WITHIN( &xPendingReadyList[!xPortGetCoreID()], &( pxTCB->xEventListItem )) == pdFALSE )
2024 {
2025 /* Is it in the suspended list because it is in the Suspended
2026 state, or because is is blocked with no timeout? */
2027 if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE ) /*lint !e961. The cast is only redundant when NULL is used. */
2028 {
2029 xReturn = pdTRUE;
2030 }
2031 else
2032 {
2033 mtCOVERAGE_TEST_MARKER();
2034 }
2035 }
2036 else
2037 {
2038 mtCOVERAGE_TEST_MARKER();
2039 }
2040 }
2041 else
2042 {
2043 mtCOVERAGE_TEST_MARKER();
2044 }
2045
2046 return xReturn;
2047 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
2048
2049 #endif /* INCLUDE_vTaskSuspend */
2050 /*-----------------------------------------------------------*/
2051
2052 #if ( INCLUDE_vTaskSuspend == 1 )
2053
vTaskResume(TaskHandle_t xTaskToResume)2054 void vTaskResume( TaskHandle_t xTaskToResume )
2055 {
2056 TCB_t * const pxTCB = xTaskToResume;
2057
2058 /* It does not make sense to resume the calling task. */
2059 configASSERT( xTaskToResume );
2060 taskENTER_CRITICAL( &xTaskQueueMutex );
2061
2062 /* The parameter cannot be NULL as it is impossible to resume the
2063 currently executing task. */
2064 if( ( pxTCB != pxCurrentTCB[xPortGetCoreID()] ) && ( pxTCB != NULL ) )
2065 {
2066 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
2067 {
2068 traceTASK_RESUME( pxTCB );
2069
2070 /* The ready list can be accessed even if the scheduler is
2071 suspended because this is inside a critical section. */
2072 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
2073 prvAddTaskToReadyList( pxTCB );
2074
2075 /* We may have just resumed a higher priority task. */
2076 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
2077 {
2078 /* This yield may not cause the task just resumed to run,
2079 but will leave the lists in the correct state for the
2080 next yield. */
2081 taskYIELD_IF_USING_PREEMPTION();
2082 }
2083 else if( pxTCB->xCoreID != xPortGetCoreID() )
2084 {
2085 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
2086 }
2087 else
2088 {
2089 mtCOVERAGE_TEST_MARKER();
2090 }
2091 }
2092 else
2093 {
2094 mtCOVERAGE_TEST_MARKER();
2095 }
2096 }
2097 else
2098 {
2099 mtCOVERAGE_TEST_MARKER();
2100 }
2101 taskEXIT_CRITICAL( &xTaskQueueMutex );
2102 }
2103
2104 #endif /* INCLUDE_vTaskSuspend */
2105
2106 /*-----------------------------------------------------------*/
2107
2108 #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
2109
xTaskResumeFromISR(TaskHandle_t xTaskToResume)2110 BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
2111 {
2112 BaseType_t xYieldRequired = pdFALSE;
2113 TCB_t * const pxTCB = xTaskToResume;
2114
2115 configASSERT( xTaskToResume );
2116
2117 /* RTOS ports that support interrupt nesting have the concept of a
2118 maximum system call (or maximum API call) interrupt priority.
2119 Interrupts that are above the maximum system call priority are keep
2120 permanently enabled, even when the RTOS kernel is in a critical section,
2121 but cannot make any calls to FreeRTOS API functions. If configASSERT()
2122 is defined in FreeRTOSConfig.h then
2123 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2124 failure if a FreeRTOS API function is called from an interrupt that has
2125 been assigned a priority above the configured maximum system call
2126 priority. Only FreeRTOS functions that end in FromISR can be called
2127 from interrupts that have been assigned a priority at or (logically)
2128 below the maximum system call interrupt priority. FreeRTOS maintains a
2129 separate interrupt safe API to ensure interrupt entry is as fast and as
2130 simple as possible. More information (albeit Cortex-M specific) is
2131 provided on the following link:
2132 https://www.freertos.org/RTOS-Cortex-M3-M4.html */
2133 //portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2134
2135 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
2136 {
2137 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
2138 {
2139 traceTASK_RESUME_FROM_ISR( pxTCB );
2140
2141 /* Check the ready lists can be accessed. */
2142 if( uxSchedulerSuspended[xPortGetCoreID()] == ( UBaseType_t ) pdFALSE )
2143 {
2144
2145 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
2146 prvAddTaskToReadyList( pxTCB );
2147
2148 if( tskCAN_RUN_HERE( pxTCB->xCoreID ) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
2149 {
2150 xYieldRequired = pdTRUE;
2151 }
2152 else if ( pxTCB->xCoreID != xPortGetCoreID() )
2153 {
2154 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority);
2155 }
2156 else
2157 {
2158 mtCOVERAGE_TEST_MARKER();
2159 }
2160
2161 }
2162 else
2163 {
2164 /* The delayed or ready lists cannot be accessed so the task
2165 is held in the pending ready list until the scheduler is
2166 unsuspended. */
2167 vListInsertEnd( &( xPendingReadyList[xPortGetCoreID()] ), &( pxTCB->xEventListItem ) );
2168 }
2169 }
2170 else
2171 {
2172 mtCOVERAGE_TEST_MARKER();
2173 }
2174 }
2175 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
2176
2177 return xYieldRequired;
2178 }
2179
2180 #endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */
2181 /*-----------------------------------------------------------*/
2182
vTaskStartScheduler(void)2183 void vTaskStartScheduler( void )
2184 {
2185 BaseType_t xReturn;
2186
2187 #if( configSUPPORT_STATIC_ALLOCATION == 1 && configSUPPORT_STATIC_ALLOCATION == 0 )
2188 StaticTask_t *pxIdleTaskTCBBuffer[portNUM_PROCESSORS] = {NULL};
2189 StackType_t *pxIdleTaskStackBuffer[portNUM_PROCESSORS] = {NULL};
2190 uint32_t ulIdleTaskStackSize;
2191 #endif
2192
2193 for(BaseType_t i = 0; i < portNUM_PROCESSORS; i++)
2194 {
2195 /* Add the idle task at the lowest priority. */
2196 #if( 0 ) /* configSUPPORT_STATIC_ALLOCATION == 1 ) Temporarily unsupported IDF-2243 */
2197 {
2198 /* The Idle task is created using user provided RAM - obtain the
2199 address of the RAM then create the idle task. */
2200 vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer[i], &pxIdleTaskStackBuffer[i], &ulIdleTaskStackSize );
2201 xIdleTaskHandle[i] = xTaskCreateStaticPinnedToCore( prvIdleTask,
2202 configIDLE_TASK_NAME,
2203 ulIdleTaskStackSize,
2204 ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */
2205 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
2206 pxIdleTaskStackBuffer[i],
2207 pxIdleTaskTCBBuffer[i],
2208 i ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
2209
2210 if( xIdleTaskHandle[i] != NULL )
2211 {
2212 xReturn = pdPASS;
2213 }
2214 else
2215 {
2216 xReturn = pdFAIL;
2217 }
2218 }
2219 #else
2220 {
2221 /* The Idle task is being created using dynamically allocated RAM. */
2222 xReturn = xTaskCreatePinnedToCore( prvIdleTask,
2223 configIDLE_TASK_NAME,
2224 configIDLE_TASK_STACK_SIZE,
2225 ( void * ) NULL,
2226 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
2227 &xIdleTaskHandle[i],
2228 i ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
2229
2230 if( xIdleTaskHandle[i] != NULL )
2231 {
2232 xReturn = pdPASS;
2233 }
2234 else
2235 {
2236 xReturn = pdFAIL;
2237 }
2238 }
2239 #endif /* configSUPPORT_STATIC_ALLOCATION */
2240 }
2241
2242 #if ( configUSE_TIMERS == 1 )
2243 {
2244 if( xReturn == pdPASS )
2245 {
2246 xReturn = xTimerCreateTimerTask();
2247 }
2248 else
2249 {
2250 mtCOVERAGE_TEST_MARKER();
2251 }
2252 }
2253 #endif /* configUSE_TIMERS */
2254
2255 if( xReturn == pdPASS )
2256 {
2257 /* freertos_tasks_c_additions_init() should only be called if the user
2258 definable macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is
2259 the only macro called by the function. */
2260 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
2261 {
2262 freertos_tasks_c_additions_init();
2263 }
2264 #endif
2265
2266 /* Interrupts are turned off here, to ensure a tick does not occur
2267 before or during the call to xPortStartScheduler(). The stacks of
2268 the created tasks contain a status word with interrupts switched on
2269 so interrupts will automatically get re-enabled when the first task
2270 starts to run. */
2271 portDISABLE_INTERRUPTS();
2272
2273 #if ( configUSE_NEWLIB_REENTRANT == 1 )
2274 {
2275 // /* Switch Newlib's _impure_ptr variable to point to the _reent
2276 // structure specific to the task that will run first. */
2277 // _impure_ptr = &( pxCurrentTCB[xPortGetCoreID()]->xNewLib_reent );
2278 }
2279 #endif /* configUSE_NEWLIB_REENTRANT */
2280
2281 xNextTaskUnblockTime = portMAX_DELAY;
2282 xSchedulerRunning = pdTRUE;
2283 xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
2284
2285 /* If configGENERATE_RUN_TIME_STATS is defined then the following
2286 macro must be defined to configure the timer/counter used to generate
2287 the run time counter time base. NOTE: If configGENERATE_RUN_TIME_STATS
2288 is set to 0 and the following line fails to build then ensure you do not
2289 have portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() defined in your
2290 FreeRTOSConfig.h file. */
2291 portCONFIGURE_TIMER_FOR_RUN_TIME_STATS();
2292
2293 traceTASK_SWITCHED_IN();
2294
2295 /* Setting up the timer tick is hardware specific and thus in the
2296 portable interface. */
2297 if( xPortStartScheduler() != pdFALSE )
2298 {
2299 /* Should not reach here as if the scheduler is running the
2300 function will not return. */
2301 }
2302 else
2303 {
2304 /* Should only reach here if a task calls xTaskEndScheduler(). */
2305 }
2306 }
2307 else
2308 {
2309 /* This line will only be reached if the kernel could not be started,
2310 because there was not enough FreeRTOS heap to create the idle task
2311 or the timer task. */
2312 configASSERT( xReturn != errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY );
2313 }
2314
2315 /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0,
2316 meaning xIdleTaskHandle is not used anywhere else. */
2317 ( void ) xIdleTaskHandle[0];
2318 }
2319 /*-----------------------------------------------------------*/
2320
vTaskEndScheduler(void)2321 void vTaskEndScheduler( void )
2322 {
2323 /* Stop the scheduler interrupts and call the portable scheduler end
2324 routine so the original ISRs can be restored if necessary. The port
2325 layer must ensure interrupts enable bit is left in the correct state. */
2326 portDISABLE_INTERRUPTS();
2327 xSchedulerRunning = pdFALSE;
2328 vPortEndScheduler();
2329 }
2330 /*----------------------------------------------------------*/
2331
2332 #if ( configUSE_NEWLIB_REENTRANT == 1 )
2333 //Return global reent struct if FreeRTOS isn't running,
__getreent(void)2334 struct _reent* __getreent(void) {
2335 //No lock needed because if this changes, we won't be running anymore.
2336 TCB_t *currTask=xTaskGetCurrentTaskHandle();
2337 if (currTask==NULL) {
2338 //No task running. Return global struct.
2339 return _GLOBAL_REENT;
2340 } else {
2341 //We have a task; return its reentrant struct.
2342 return &currTask->xNewLib_reent;
2343 }
2344 }
2345 #endif
2346
2347
vTaskSuspendAll(void)2348 void vTaskSuspendAll( void )
2349 {
2350 /* A critical section is not required as the variable is of type
2351 BaseType_t. Please read Richard Barry's reply in the following link to a
2352 post in the FreeRTOS support forum before reporting this as a bug! -
2353 http://goo.gl/wu4acr */
2354 unsigned state;
2355
2356 state = portENTER_CRITICAL_NESTED();
2357 ++uxSchedulerSuspended[ xPortGetCoreID() ];
2358 portEXIT_CRITICAL_NESTED(state);
2359 }
2360 /*----------------------------------------------------------*/
2361
2362 #if ( configUSE_TICKLESS_IDLE != 0 )
2363
2364 #if ( portNUM_PROCESSORS > 1 )
2365
xHaveReadyTasks(void)2366 static BaseType_t xHaveReadyTasks( void )
2367 {
2368 for (int i = tskIDLE_PRIORITY + 1; i < configMAX_PRIORITIES; ++i)
2369 {
2370 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ i ] ) ) > 0 )
2371 {
2372 return pdTRUE;
2373 }
2374 else
2375 {
2376 mtCOVERAGE_TEST_MARKER();
2377 }
2378 }
2379 return pdFALSE;
2380 }
2381
2382 #endif // portNUM_PROCESSORS > 1
2383
prvGetExpectedIdleTime(void)2384 static TickType_t prvGetExpectedIdleTime( void )
2385 {
2386 TickType_t xReturn;
2387
2388
2389 taskENTER_CRITICAL(&xTaskQueueMutex);
2390 if( pxCurrentTCB[ xPortGetCoreID() ]->uxPriority > tskIDLE_PRIORITY )
2391 {
2392 xReturn = 0;
2393 }
2394 #if portNUM_PROCESSORS > 1
2395 /* This function is called from Idle task; in single core case this
2396 * means that no higher priority tasks are ready to run, and we can
2397 * enter sleep. In SMP case, there might be ready tasks waiting for
2398 * the other CPU, so need to check all ready lists.
2399 */
2400 else if( xHaveReadyTasks() )
2401 {
2402 xReturn = 0;
2403 }
2404 #endif // portNUM_PROCESSORS > 1
2405 else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > portNUM_PROCESSORS )
2406 {
2407 /* There are other idle priority tasks in the ready state. If
2408 time slicing is used then the very next tick interrupt must be
2409 processed. */
2410 xReturn = 0;
2411 }
2412 else
2413 {
2414 xReturn = xNextTaskUnblockTime - xTickCount;
2415 }
2416 taskEXIT_CRITICAL(&xTaskQueueMutex);
2417
2418 return xReturn;
2419 }
2420
2421 #endif /* configUSE_TICKLESS_IDLE */
2422 /*----------------------------------------------------------*/
2423
xTaskResumeAll(void)2424 BaseType_t xTaskResumeAll( void )
2425 {
2426 TCB_t *pxTCB = NULL;
2427 BaseType_t xAlreadyYielded = pdFALSE;
2428 TickType_t xTicksToNextUnblockTime;
2429
2430 /* If uxSchedulerSuspended[xPortGetCoreID()] is zero then this function does not match a
2431 previous call to taskENTER_CRITICAL( &xTaskQueueMutex ). */
2432 configASSERT( uxSchedulerSuspended[xPortGetCoreID()] );
2433
2434 /* It is possible that an ISR caused a task to be removed from an event
2435 list while the scheduler was suspended. If this was the case then the
2436 removed task will have been added to the xPendingReadyList. Once the
2437 scheduler has been resumed it is safe to move all the pending ready
2438 tasks from this list into their appropriate ready list. */
2439 taskENTER_CRITICAL( &xTaskQueueMutex );
2440 {
2441 --uxSchedulerSuspended[xPortGetCoreID()];
2442
2443 if( uxSchedulerSuspended[xPortGetCoreID()] == ( UBaseType_t ) pdFALSE )
2444 {
2445 if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U )
2446 {
2447 /* Move any readied tasks from the pending list into the
2448 appropriate ready list. */
2449 while( listLIST_IS_EMPTY( &xPendingReadyList[xPortGetCoreID()] ) == pdFALSE )
2450 {
2451 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList[xPortGetCoreID()] ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2452 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2453 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
2454 prvAddTaskToReadyList( pxTCB );
2455
2456 /* If the moved task has a priority higher than the current
2457 task then a yield must be performed. */
2458 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
2459 {
2460 xYieldPending[xPortGetCoreID()] = pdTRUE;
2461 }
2462 else
2463 {
2464 mtCOVERAGE_TEST_MARKER();
2465 }
2466 }
2467
2468 if( pxTCB != NULL )
2469 {
2470 /* A task was unblocked while the scheduler was suspended,
2471 which may have prevented the next unblock time from being
2472 re-calculated, in which case re-calculate it now. Mainly
2473 important for low power tickless implementations, where
2474 this can prevent an unnecessary exit from low power
2475 state. */
2476 prvResetNextTaskUnblockTime();
2477 }
2478
2479 /* If any ticks occurred while the scheduler was suspended then
2480 they should be processed now. This ensures the tick count does
2481 not slip, and that any delayed tasks are resumed at the correct
2482 time. */
2483 while( xPendedTicks > ( TickType_t ) 0 )
2484 {
2485 /* Calculate how far into the future the next task will
2486 leave the Blocked state because its timeout expired. If
2487 there are no tasks due to leave the blocked state between
2488 the time now and the time at which the tick count overflows
2489 then xNextTaskUnblockTime will the tick overflow time.
2490 This means xNextTaskUnblockTime can never be less than
2491 xTickCount, and the following can therefore not
2492 underflow. */
2493 configASSERT( xNextTaskUnblockTime >= xTickCount );
2494 xTicksToNextUnblockTime = xNextTaskUnblockTime - xTickCount;
2495
2496 /* Don't want to move the tick count more than the number
2497 of ticks that are pending, so cap if necessary. */
2498 if( xTicksToNextUnblockTime > xPendedTicks )
2499 {
2500 xTicksToNextUnblockTime = xPendedTicks;
2501 }
2502
2503 if( xTicksToNextUnblockTime == 0 )
2504 {
2505 /* xTicksToNextUnblockTime could be zero if the tick
2506 count is about to overflow and xTicksToNetUnblockTime
2507 holds the time at which the tick count will overflow
2508 (rather than the time at which the next task will
2509 unblock). Set to 1 otherwise xPendedTicks won't be
2510 decremented below. */
2511 xTicksToNextUnblockTime = ( TickType_t ) 1;
2512 }
2513 else if( xTicksToNextUnblockTime > ( TickType_t ) 1)
2514 {
2515 /* Move the tick count one short of the next unblock
2516 time, then call xTaskIncrementTick() to move the tick
2517 count up to the next unblock time to unblock the task,
2518 if any. This will also swap the blocked task and
2519 overflow blocked task lists if necessary. */
2520 xTickCount += ( xTicksToNextUnblockTime - ( TickType_t ) 1 );
2521 }
2522 xYieldPending[xPortGetCoreID()] |= xTaskIncrementTick();
2523
2524 /* Adjust for the number of ticks just added to
2525 xTickCount and go around the loop again if
2526 xTicksToCatchUp is still greater than 0. */
2527 xPendedTicks -= xTicksToNextUnblockTime;
2528 }
2529
2530 if( xYieldPending[xPortGetCoreID()] != pdFALSE )
2531 {
2532 #if( configUSE_PREEMPTION != 0 )
2533 {
2534 xAlreadyYielded = pdTRUE;
2535 }
2536 #endif
2537 taskYIELD_IF_USING_PREEMPTION();
2538 }
2539 else
2540 {
2541 mtCOVERAGE_TEST_MARKER();
2542 }
2543 }
2544 }
2545 else
2546 {
2547 mtCOVERAGE_TEST_MARKER();
2548 }
2549 }
2550 taskEXIT_CRITICAL( &xTaskQueueMutex );
2551
2552 return xAlreadyYielded;
2553 }
2554 /*-----------------------------------------------------------*/
2555
xTaskGetTickCount(void)2556 TickType_t xTaskGetTickCount( void )
2557 {
2558 TickType_t xTicks;
2559
2560 xTicks = xTickCount;
2561
2562 return xTicks;
2563 }
2564 /*-----------------------------------------------------------*/
2565
xTaskGetTickCountFromISR(void)2566 TickType_t xTaskGetTickCountFromISR( void )
2567 {
2568 TickType_t xReturn;
2569 UBaseType_t uxSavedInterruptStatus;
2570
2571 /* RTOS ports that support interrupt nesting have the concept of a maximum
2572 system call (or maximum API call) interrupt priority. Interrupts that are
2573 above the maximum system call priority are kept permanently enabled, even
2574 when the RTOS kernel is in a critical section, but cannot make any calls to
2575 FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
2576 then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2577 failure if a FreeRTOS API function is called from an interrupt that has been
2578 assigned a priority above the configured maximum system call priority.
2579 Only FreeRTOS functions that end in FromISR can be called from interrupts
2580 that have been assigned a priority at or (logically) below the maximum
2581 system call interrupt priority. FreeRTOS maintains a separate interrupt
2582 safe API to ensure interrupt entry is as fast and as simple as possible.
2583 More information (albeit Cortex-M specific) is provided on the following
2584 link: https://www.freertos.org/RTOS-Cortex-M3-M4.html */
2585 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2586
2587 uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
2588 {
2589 xReturn = xTickCount;
2590 }
2591 portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
2592
2593 return xReturn;
2594 }
2595 /*-----------------------------------------------------------*/
2596
uxTaskGetNumberOfTasks(void)2597 UBaseType_t uxTaskGetNumberOfTasks( void )
2598 {
2599 /* A critical section is not required because the variables are of type
2600 BaseType_t. */
2601 return uxCurrentNumberOfTasks;
2602 }
2603 /*-----------------------------------------------------------*/
2604
pcTaskGetName(TaskHandle_t xTaskToQuery)2605 char *pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2606 {
2607 TCB_t *pxTCB;
2608
2609 /* If null is passed in here then the name of the calling task is being
2610 queried. */
2611 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
2612 configASSERT( pxTCB );
2613 return &( pxTCB->pcTaskName[ 0 ] );
2614 }
2615 /*-----------------------------------------------------------*/
2616
2617 #if ( INCLUDE_xTaskGetHandle == 1 )
2618
prvSearchForNameWithinSingleList(List_t * pxList,const char pcNameToQuery[])2619 static TCB_t *prvSearchForNameWithinSingleList( List_t *pxList, const char pcNameToQuery[] )
2620 {
2621 TCB_t *pxNextTCB, *pxFirstTCB, *pxReturn = NULL;
2622 UBaseType_t x;
2623 char cNextChar;
2624 BaseType_t xBreakLoop;
2625
2626 /* This function is called with the scheduler suspended. */
2627
2628 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
2629 {
2630 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2631
2632 do
2633 {
2634 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2635
2636 /* Check each character in the name looking for a match or
2637 mismatch. */
2638 xBreakLoop = pdFALSE;
2639 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
2640 {
2641 cNextChar = pxNextTCB->pcTaskName[ x ];
2642
2643 if( cNextChar != pcNameToQuery[ x ] )
2644 {
2645 /* Characters didn't match. */
2646 xBreakLoop = pdTRUE;
2647 }
2648 else if( cNextChar == ( char ) 0x00 )
2649 {
2650 /* Both strings terminated, a match must have been
2651 found. */
2652 pxReturn = pxNextTCB;
2653 xBreakLoop = pdTRUE;
2654 }
2655 else
2656 {
2657 mtCOVERAGE_TEST_MARKER();
2658 }
2659
2660 if( xBreakLoop != pdFALSE )
2661 {
2662 break;
2663 }
2664 }
2665
2666 if( pxReturn != NULL )
2667 {
2668 /* The handle has been found. */
2669 break;
2670 }
2671
2672 } while( pxNextTCB != pxFirstTCB );
2673 }
2674 else
2675 {
2676 mtCOVERAGE_TEST_MARKER();
2677 }
2678
2679 return pxReturn;
2680 }
2681
2682 #endif /* INCLUDE_xTaskGetHandle */
2683 /*-----------------------------------------------------------*/
2684
2685 #if ( INCLUDE_xTaskGetHandle == 1 )
2686
xTaskGetHandle(const char * pcNameToQuery)2687 TaskHandle_t xTaskGetHandle( const char *pcNameToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2688 {
2689 UBaseType_t uxQueue = configMAX_PRIORITIES;
2690 TCB_t* pxTCB;
2691
2692 /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
2693 configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
2694
2695 taskENTER_CRITICAL( &xTaskQueueMutex );
2696 {
2697 /* Search the ready lists. */
2698 do
2699 {
2700 uxQueue--;
2701 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) &( pxReadyTasksLists[ uxQueue ] ), pcNameToQuery );
2702
2703 if( pxTCB != NULL )
2704 {
2705 /* Found the handle. */
2706 break;
2707 }
2708
2709 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2710
2711 /* Search the delayed lists. */
2712 if( pxTCB == NULL )
2713 {
2714 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxDelayedTaskList, pcNameToQuery );
2715 }
2716
2717 if( pxTCB == NULL )
2718 {
2719 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxOverflowDelayedTaskList, pcNameToQuery );
2720 }
2721
2722 #if ( INCLUDE_vTaskSuspend == 1 )
2723 {
2724 if( pxTCB == NULL )
2725 {
2726 /* Search the suspended list. */
2727 pxTCB = prvSearchForNameWithinSingleList( &xSuspendedTaskList, pcNameToQuery );
2728 }
2729 }
2730 #endif
2731
2732 #if( INCLUDE_vTaskDelete == 1 )
2733 {
2734 if( pxTCB == NULL )
2735 {
2736 /* Search the deleted list. */
2737 pxTCB = prvSearchForNameWithinSingleList( &xTasksWaitingTermination, pcNameToQuery );
2738 }
2739 }
2740 #endif
2741 }
2742 taskEXIT_CRITICAL( &xTaskQueueMutex );
2743
2744 return pxTCB;
2745 }
2746
2747 #endif /* INCLUDE_xTaskGetHandle */
2748 /*-----------------------------------------------------------*/
2749
2750 #if ( configUSE_TRACE_FACILITY == 1 )
2751
uxTaskGetSystemState(TaskStatus_t * const pxTaskStatusArray,const UBaseType_t uxArraySize,uint32_t * const pulTotalRunTime)2752 UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray, const UBaseType_t uxArraySize, uint32_t * const pulTotalRunTime )
2753 {
2754 UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
2755
2756 taskENTER_CRITICAL( &xTaskQueueMutex );
2757 {
2758 /* Is there a space in the array for each task in the system? */
2759 if( uxArraySize >= uxCurrentNumberOfTasks )
2760 {
2761 /* Fill in an TaskStatus_t structure with information on each
2762 task in the Ready state. */
2763 do
2764 {
2765 uxQueue--;
2766 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady );
2767
2768 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2769
2770 /* Fill in an TaskStatus_t structure with information on each
2771 task in the Blocked state. */
2772 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked );
2773 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked );
2774
2775 #if( INCLUDE_vTaskDelete == 1 )
2776 {
2777 /* Fill in an TaskStatus_t structure with information on
2778 each task that has been deleted but not yet cleaned up. */
2779 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted );
2780 }
2781 #endif
2782
2783 #if ( INCLUDE_vTaskSuspend == 1 )
2784 {
2785 /* Fill in an TaskStatus_t structure with information on
2786 each task in the Suspended state. */
2787 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended );
2788 }
2789 #endif
2790
2791 #if ( configGENERATE_RUN_TIME_STATS == 1)
2792 {
2793 if( pulTotalRunTime != NULL )
2794 {
2795 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
2796 portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) );
2797 #else
2798 *pulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
2799 #endif
2800 }
2801 }
2802 #else
2803 {
2804 if( pulTotalRunTime != NULL )
2805 {
2806 *pulTotalRunTime = 0;
2807 }
2808 }
2809 #endif
2810 }
2811 else
2812 {
2813 mtCOVERAGE_TEST_MARKER();
2814 }
2815 }
2816 taskEXIT_CRITICAL( &xTaskQueueMutex );
2817
2818 return uxTask;
2819 }
2820
2821 #endif /* configUSE_TRACE_FACILITY */
2822 /*----------------------------------------------------------*/
2823
2824 #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
2825
xTaskGetIdleTaskHandle(void)2826 TaskHandle_t xTaskGetIdleTaskHandle( void )
2827 {
2828 /* If xTaskGetIdleTaskHandle() is called before the scheduler has been
2829 started, then xIdleTaskHandle will be NULL. */
2830 configASSERT( ( xIdleTaskHandle[xPortGetCoreID()] != NULL ) );
2831 return xIdleTaskHandle[xPortGetCoreID()];
2832 }
2833
xTaskGetIdleTaskHandleForCPU(UBaseType_t cpuid)2834 TaskHandle_t xTaskGetIdleTaskHandleForCPU( UBaseType_t cpuid )
2835 {
2836 configASSERT( cpuid < portNUM_PROCESSORS );
2837 configASSERT( ( xIdleTaskHandle[cpuid] != NULL ) );
2838 return xIdleTaskHandle[cpuid];
2839 }
2840 #endif /* INCLUDE_xTaskGetIdleTaskHandle */
2841 /*----------------------------------------------------------*/
2842
2843 /* This conditional compilation should use inequality to 0, not equality to 1.
2844 This is to ensure vTaskStepTick() is available when user defined low power mode
2845 implementations require configUSE_TICKLESS_IDLE to be set to a value other than
2846 1. */
2847 #if ( configUSE_TICKLESS_IDLE != 0 )
2848
vTaskStepTick(const TickType_t xTicksToJump)2849 void vTaskStepTick( const TickType_t xTicksToJump )
2850 {
2851 /* Correct the tick count value after a period during which the tick
2852 was suppressed. Note this does *not* call the tick hook function for
2853 each stepped tick. */
2854 taskENTER_CRITICAL(&xTaskQueueMutex);
2855 configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
2856 xTickCount += xTicksToJump;
2857 traceINCREASE_TICK_COUNT( xTicksToJump );
2858 taskEXIT_CRITICAL(&xTaskQueueMutex);
2859 }
2860
2861 #endif /* configUSE_TICKLESS_IDLE */
2862 /*----------------------------------------------------------*/
2863
xTaskCatchUpTicks(TickType_t xTicksToCatchUp)2864 BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
2865 {
2866 BaseType_t xYieldRequired = pdFALSE;
2867
2868 /* Must not be called with the scheduler suspended as the implementation
2869 relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */
2870 configASSERT( uxSchedulerSuspended[xPortGetCoreID()] == 0 );
2871
2872 /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occuring when
2873 the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */
2874 taskENTER_CRITICAL( &xTaskQueueMutex );
2875 xPendedTicks += xTicksToCatchUp;
2876 taskEXIT_CRITICAL( &xTaskQueueMutex );
2877
2878 return xYieldRequired;
2879 }
2880 /*----------------------------------------------------------*/
2881
2882 #if ( INCLUDE_xTaskAbortDelay == 1 )
2883
xTaskAbortDelay(TaskHandle_t xTask)2884 BaseType_t xTaskAbortDelay( TaskHandle_t xTask )
2885 {
2886 TCB_t *pxTCB = xTask;
2887 BaseType_t xReturn;
2888
2889 configASSERT( pxTCB );
2890
2891 taskENTER_CRITICAL( &xTaskQueueMutex );
2892 {
2893 /* A task can only be prematurely removed from the Blocked state if
2894 it is actually in the Blocked state. */
2895 if( eTaskGetState( xTask ) == eBlocked )
2896 {
2897 xReturn = pdPASS;
2898
2899 /* Remove the reference to the task from the blocked list. An
2900 interrupt won't touch the xStateListItem because the
2901 scheduler is suspended. */
2902 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
2903
2904 /* Is the task waiting on an event also? If so remove it from
2905 the event list too. Interrupts can touch the event list item,
2906 even though the scheduler is suspended, so a critical section
2907 is used. */
2908 taskENTER_CRITICAL( &xTaskQueueMutex );
2909 {
2910 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
2911 {
2912 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2913 pxTCB->ucDelayAborted = pdTRUE;
2914 }
2915 else
2916 {
2917 mtCOVERAGE_TEST_MARKER();
2918 }
2919 }
2920 taskEXIT_CRITICAL( &xTaskQueueMutex );
2921
2922 /* Place the unblocked task into the appropriate ready list. */
2923 prvAddTaskToReadyList( pxTCB );
2924
2925 /* A task being unblocked cannot cause an immediate context
2926 switch if preemption is turned off. */
2927 #if ( configUSE_PREEMPTION == 1 )
2928 {
2929 /* Preemption is on, but a context switch should only be
2930 performed if the unblocked task has a priority that is
2931 equal to or higher than the currently executing task. */
2932 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
2933 {
2934 /* Pend the yield to be performed when the scheduler
2935 is unsuspended. */
2936 xYieldPending[xPortGetCoreID()] = pdTRUE;
2937 }
2938 else if ( pxTCB->xCoreID != xPortGetCoreID() )
2939 {
2940 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority);
2941 }
2942 else
2943 {
2944 mtCOVERAGE_TEST_MARKER();
2945 }
2946 }
2947 #endif /* configUSE_PREEMPTION */
2948 }
2949 else
2950 {
2951 xReturn = pdFAIL;
2952 }
2953 }
2954 taskEXIT_CRITICAL( &xTaskQueueMutex );
2955
2956 return xReturn;
2957 }
2958
2959 #endif /* INCLUDE_xTaskAbortDelay */
2960 /*----------------------------------------------------------*/
2961
xTaskIncrementTick(void)2962 BaseType_t xTaskIncrementTick( void )
2963 {
2964 TCB_t * pxTCB;
2965 TickType_t xItemValue;
2966 BaseType_t xSwitchRequired = pdFALSE;
2967
2968 /* Only allow core 0 increase the tick count in the case of xPortSysTickHandler processing. */
2969 /* And allow core 0 and core 1 to unwind uxPendedTicks during xTaskResumeAll. */
2970
2971 if (xPortInIsrContext())
2972 {
2973 #if ( configUSE_TICK_HOOK == 1 )
2974 vApplicationTickHook();
2975 #endif /* configUSE_TICK_HOOK */
2976 #if ( CONFIG_FREERTOS_LEGACY_HOOKS == 1 )
2977 esp_vApplicationTickHook();
2978 #endif /* CONFIG_FREERTOS_LEGACY_HOOKS */
2979
2980 if (xPortGetCoreID() != 0 )
2981 {
2982 return pdTRUE;
2983 }
2984 }
2985
2986 /* Called by the portable layer each time a tick interrupt occurs.
2987 Increments the tick then checks to see if the new tick value will cause any
2988 tasks to be unblocked. */
2989 traceTASK_INCREMENT_TICK( xTickCount );
2990 if( uxSchedulerSuspended[xPortGetCoreID()] == ( UBaseType_t ) pdFALSE )
2991 {
2992 taskENTER_CRITICAL_ISR( &xTaskQueueMutex );
2993 /* Minor optimisation. The tick count cannot change in this
2994 block. */
2995 const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1;
2996
2997 /* Increment the RTOS tick, switching the delayed and overflowed
2998 delayed lists if it wraps to 0. */
2999 xTickCount = xConstTickCount;
3000
3001 if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */
3002 {
3003 taskSWITCH_DELAYED_LISTS();
3004 }
3005 else
3006 {
3007 mtCOVERAGE_TEST_MARKER();
3008 }
3009
3010 /* See if this tick has made a timeout expire. Tasks are stored in
3011 the queue in the order of their wake time - meaning once one task
3012 has been found whose block time has not expired there is no need to
3013 look any further down the list. */
3014 if( xConstTickCount >= xNextTaskUnblockTime )
3015 {
3016 for( ;; )
3017 {
3018 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
3019 {
3020 /* The delayed list is empty. Set xNextTaskUnblockTime
3021 to the maximum possible value so it is extremely
3022 unlikely that the
3023 if( xTickCount >= xNextTaskUnblockTime ) test will pass
3024 next time through. */
3025 xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
3026 break;
3027 }
3028 else
3029 {
3030 /* The delayed list is not empty, get the value of the
3031 item at the head of the delayed list. This is the time
3032 at which the task at the head of the delayed list must
3033 be removed from the Blocked state. */
3034 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3035 xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) );
3036
3037 if( xConstTickCount < xItemValue )
3038 {
3039 /* It is not time to unblock this item yet, but the
3040 item value is the time at which the task at the head
3041 of the blocked list must be removed from the Blocked
3042 state - so record the item value in
3043 xNextTaskUnblockTime. */
3044 xNextTaskUnblockTime = xItemValue;
3045 break; /*lint !e9011 Code structure here is deedmed easier to understand with multiple breaks. */
3046 }
3047 else
3048 {
3049 mtCOVERAGE_TEST_MARKER();
3050 }
3051
3052 /* It is time to remove the item from the Blocked state. */
3053 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
3054
3055 /* Is the task waiting on an event also? If so remove
3056 it from the event list. */
3057 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
3058 {
3059 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
3060 }
3061 else
3062 {
3063 mtCOVERAGE_TEST_MARKER();
3064 }
3065
3066 /* Place the unblocked task into the appropriate ready
3067 list. */
3068 prvAddTaskToReadyList( pxTCB );
3069
3070 /* A task being unblocked cannot cause an immediate
3071 context switch if preemption is turned off. */
3072 #if ( configUSE_PREEMPTION == 1 )
3073 {
3074 /* Preemption is on, but a context switch should
3075 only be performed if the unblocked task has a
3076 priority that is equal to or higher than the
3077 currently executing task. */
3078 if( pxTCB->uxPriority >= pxCurrentTCB[xPortGetCoreID()]->uxPriority )
3079 {
3080 xSwitchRequired = pdTRUE;
3081 }
3082 else
3083 {
3084 mtCOVERAGE_TEST_MARKER();
3085 }
3086 }
3087 #endif /* configUSE_PREEMPTION */
3088 }
3089 }
3090 }
3091
3092 /* Tasks of equal priority to the currently running task will share
3093 processing time (time slice) if preemption is on, and the application
3094 writer has not explicitly turned time slicing off. */
3095 #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
3096 {
3097 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB[xPortGetCoreID()]->uxPriority ] ) ) > ( UBaseType_t ) 1 )
3098 {
3099 xSwitchRequired = pdTRUE;
3100 }
3101 else
3102 {
3103 mtCOVERAGE_TEST_MARKER();
3104 }
3105 }
3106 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
3107 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
3108 }
3109 else
3110 {
3111 ++xPendedTicks;
3112 }
3113
3114 #if ( configUSE_PREEMPTION == 1 )
3115 {
3116 if( xYieldPending[xPortGetCoreID()] != pdFALSE )
3117 {
3118 xSwitchRequired = pdTRUE;
3119 }
3120 else
3121 {
3122 mtCOVERAGE_TEST_MARKER();
3123 }
3124 }
3125 #endif /* configUSE_PREEMPTION */
3126
3127 return xSwitchRequired;
3128 }
3129 /*-----------------------------------------------------------*/
3130
3131 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
3132
vTaskSetApplicationTaskTag(TaskHandle_t xTask,TaskHookFunction_t pxHookFunction)3133 void vTaskSetApplicationTaskTag( TaskHandle_t xTask, TaskHookFunction_t pxHookFunction )
3134 {
3135 TCB_t *xTCB;
3136
3137 /* If xTask is NULL then it is the task hook of the calling task that is
3138 getting set. */
3139 if( xTask == NULL )
3140 {
3141 xTCB = ( TCB_t * ) pxCurrentTCB[xPortGetCoreID()];
3142 }
3143 else
3144 {
3145 xTCB = xTask;
3146 }
3147
3148 /* Save the hook function in the TCB. A critical section is required as
3149 the value can be accessed from an interrupt. */
3150 taskENTER_CRITICAL( &xTaskQueueMutex );
3151 {
3152 xTCB->pxTaskTag = pxHookFunction;
3153 }
3154 taskEXIT_CRITICAL( &xTaskQueueMutex );
3155 }
3156
3157 #endif /* configUSE_APPLICATION_TASK_TAG */
3158 /*-----------------------------------------------------------*/
3159
3160 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
3161
xTaskGetApplicationTaskTag(TaskHandle_t xTask)3162 TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )
3163 {
3164 TCB_t *pxTCB;
3165 TaskHookFunction_t xReturn;
3166
3167 /* If xTask is NULL then set the calling task's hook. */
3168 pxTCB = prvGetTCBFromHandle( xTask );
3169
3170 /* Save the hook function in the TCB. A critical section is required as
3171 the value can be accessed from an interrupt. */
3172 taskENTER_CRITICAL( &xTaskQueueMutex );
3173 {
3174 xReturn = pxTCB->pxTaskTag;
3175 }
3176 taskEXIT_CRITICAL( &xTaskQueueMutex );
3177
3178 return xReturn;
3179 }
3180
3181 #endif /* configUSE_APPLICATION_TASK_TAG */
3182 /*-----------------------------------------------------------*/
3183
3184 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
3185
xTaskGetApplicationTaskTagFromISR(TaskHandle_t xTask)3186 TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask )
3187 {
3188 TCB_t *pxTCB;
3189 TaskHookFunction_t xReturn;
3190 UBaseType_t uxSavedInterruptStatus;
3191
3192 /* If xTask is NULL then set the calling task's hook. */
3193 pxTCB = prvGetTCBFromHandle( xTask );
3194
3195 /* Save the hook function in the TCB. A critical section is required as
3196 the value can be accessed from an interrupt. */
3197 portENTER_CRITICAL_ISR(&xTaskQueueMutex);
3198 {
3199 xReturn = pxTCB->pxTaskTag;
3200 }
3201 portEXIT_CRITICAL_ISR(&xTaskQueueMutex);
3202
3203 return xReturn;
3204 }
3205
3206 #endif /* configUSE_APPLICATION_TASK_TAG */
3207 /*-----------------------------------------------------------*/
3208
3209 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
3210
xTaskCallApplicationTaskHook(TaskHandle_t xTask,void * pvParameter)3211 BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask, void *pvParameter )
3212 {
3213 TCB_t *xTCB;
3214 BaseType_t xReturn;
3215
3216 /* If xTask is NULL then we are calling our own task hook. */
3217 if( xTask == NULL )
3218 {
3219 xTCB = xTaskGetCurrentTaskHandle();
3220 }
3221 else
3222 {
3223 xTCB = xTask;
3224 }
3225
3226 if( xTCB->pxTaskTag != NULL )
3227 {
3228 xReturn = xTCB->pxTaskTag( pvParameter );
3229 }
3230 else
3231 {
3232 xReturn = pdFAIL;
3233 }
3234
3235 return xReturn;
3236 }
3237
3238 #endif /* configUSE_APPLICATION_TASK_TAG */
3239 /*-----------------------------------------------------------*/
3240
vTaskSwitchContext(void)3241 void vTaskSwitchContext( void )
3242 {
3243 //Theoretically, this is only called from either the tick interrupt or the crosscore interrupt, so disabling
3244 //interrupts shouldn't be necessary anymore. Still, for safety we'll leave it in for now.
3245 int irqstate=portENTER_CRITICAL_NESTED();
3246
3247 if( uxSchedulerSuspended[ xPortGetCoreID() ] != ( UBaseType_t ) pdFALSE )
3248 {
3249 /* The scheduler is currently suspended - do not allow a context
3250 switch. */
3251 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
3252 }
3253 else
3254 {
3255 xYieldPending[ xPortGetCoreID() ] = pdFALSE;
3256 xSwitchingContext[ xPortGetCoreID() ] = pdTRUE;
3257 traceTASK_SWITCHED_OUT();
3258
3259 #if ( configGENERATE_RUN_TIME_STATS == 1 )
3260 {
3261 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
3262 portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime );
3263 #else
3264 ulTotalRunTime = portGET_RUN_TIME_COUNTER_VALUE();
3265 #endif
3266
3267 /* Add the amount of time the task has been running to the
3268 accumulated time so far. The time the task started running was
3269 stored in ulTaskSwitchedInTime. Note that there is no overflow
3270 protection here so count values are only valid until the timer
3271 overflows. The guard against negative values is to protect
3272 against suspect run time stat counter implementations - which
3273 are provided by the application, not the kernel. */
3274 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
3275 if( ulTotalRunTime > ulTaskSwitchedInTime[ xPortGetCoreID() ] )
3276 {
3277 pxCurrentTCB[ xPortGetCoreID() ]->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime[ xPortGetCoreID() ] );
3278 }
3279 else
3280 {
3281 mtCOVERAGE_TEST_MARKER();
3282 }
3283 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
3284 ulTaskSwitchedInTime[ xPortGetCoreID() ] = ulTotalRunTime;
3285 }
3286 #endif /* configGENERATE_RUN_TIME_STATS */
3287
3288 /* Check for stack overflow, if configured. */
3289 taskFIRST_CHECK_FOR_STACK_OVERFLOW();
3290 taskSECOND_CHECK_FOR_STACK_OVERFLOW();
3291
3292 /* Select a new task to run */
3293
3294 /*
3295 We cannot do taskENTER_CRITICAL_ISR(&xTaskQueueMutex); here because it saves the interrupt context to the task tcb, and we're
3296 swapping that out here. Instead, we're going to do the work here ourselves. Because interrupts are already disabled, we only
3297 need to acquire the mutex.
3298 */
3299 vPortCPUAcquireMutex( &xTaskQueueMutex );
3300
3301 #if !configUSE_PORT_OPTIMISED_TASK_SELECTION
3302 unsigned portBASE_TYPE foundNonExecutingWaiter = pdFALSE, ableToSchedule = pdFALSE, resetListHead;
3303 unsigned portBASE_TYPE holdTop=pdFALSE;
3304 tskTCB * pxTCB;
3305
3306 portBASE_TYPE uxDynamicTopReady = uxTopReadyPriority;
3307 /*
3308 * ToDo: This scheduler doesn't correctly implement the round-robin scheduling as done in the single-core
3309 * FreeRTOS stack when multiple tasks have the same priority and are all ready; it just keeps grabbing the
3310 * first one. ToDo: fix this.
3311 * (Is this still true? if any, there's the issue with one core skipping over the processes for the other
3312 * core, potentially not giving the skipped-over processes any time.)
3313 */
3314
3315 while ( ableToSchedule == pdFALSE && uxDynamicTopReady >= 0 )
3316 {
3317 resetListHead = pdFALSE;
3318 // Nothing to do for empty lists
3319 if (!listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxDynamicTopReady ] ) )) {
3320
3321 ableToSchedule = pdFALSE;
3322 tskTCB * pxRefTCB;
3323
3324 /* Remember the current list item so that we
3325 can detect if all items have been inspected.
3326 Once this happens, we move on to a lower
3327 priority list (assuming nothing is suitable
3328 for scheduling). Note: This can return NULL if
3329 the list index is at the listItem */
3330 pxRefTCB = pxReadyTasksLists[ uxDynamicTopReady ].pxIndex->pvOwner;
3331
3332 if ((void*)pxReadyTasksLists[ uxDynamicTopReady ].pxIndex==(void*)&pxReadyTasksLists[ uxDynamicTopReady ].xListEnd) {
3333 //pxIndex points to the list end marker. Skip that and just get the next item.
3334 listGET_OWNER_OF_NEXT_ENTRY( pxRefTCB, &( pxReadyTasksLists[ uxDynamicTopReady ] ) );
3335 }
3336
3337 do {
3338 listGET_OWNER_OF_NEXT_ENTRY( pxTCB, &( pxReadyTasksLists[ uxDynamicTopReady ] ) );
3339 /* Find out if the next task in the list is
3340 already being executed by another core */
3341 foundNonExecutingWaiter = pdTRUE;
3342 portBASE_TYPE i = 0;
3343 for ( i=0; i<portNUM_PROCESSORS; i++ ) {
3344 if (i == xPortGetCoreID()) {
3345 continue;
3346 } else if (pxCurrentTCB[i] == pxTCB) {
3347 holdTop=pdTRUE; //keep this as the top prio, for the other CPU
3348 foundNonExecutingWaiter = pdFALSE;
3349 break;
3350 }
3351 }
3352
3353 if (foundNonExecutingWaiter == pdTRUE) {
3354 /* If the task is not being executed
3355 by another core and its affinity is
3356 compatible with the current one,
3357 prepare it to be swapped in */
3358 if (pxTCB->xCoreID == tskNO_AFFINITY) {
3359 pxCurrentTCB[xPortGetCoreID()] = pxTCB;
3360 ableToSchedule = pdTRUE;
3361 } else if (pxTCB->xCoreID == xPortGetCoreID()) {
3362 pxCurrentTCB[xPortGetCoreID()] = pxTCB;
3363 ableToSchedule = pdTRUE;
3364 } else {
3365 ableToSchedule = pdFALSE;
3366 holdTop=pdTRUE; //keep this as the top prio, for the other CPU
3367 }
3368 } else {
3369 ableToSchedule = pdFALSE;
3370 }
3371
3372 if (ableToSchedule == pdFALSE) {
3373 resetListHead = pdTRUE;
3374 } else if ((ableToSchedule == pdTRUE) && (resetListHead == pdTRUE)) {
3375 tskTCB * pxResetTCB;
3376 do {
3377 listGET_OWNER_OF_NEXT_ENTRY( pxResetTCB, &( pxReadyTasksLists[ uxDynamicTopReady ] ) );
3378 } while(pxResetTCB != pxRefTCB);
3379 }
3380 } while ((ableToSchedule == pdFALSE) && (pxTCB != pxRefTCB));
3381 } else {
3382 if (!holdTop) --uxTopReadyPriority;
3383 }
3384 --uxDynamicTopReady;
3385 }
3386
3387 #else
3388 //For Unicore targets we can keep the current FreeRTOS O(1)
3389 //Scheduler. I hope to optimize better the scheduler for
3390 //Multicore settings -- This will involve to create a per
3391 //affinity ready task list which will impact hugely on
3392 //tasks module
3393 taskSELECT_HIGHEST_PRIORITY_TASK();
3394 #endif
3395
3396 traceTASK_SWITCHED_IN();
3397 xSwitchingContext[ xPortGetCoreID() ] = pdFALSE;
3398
3399 //Exit critical region manually as well: release the mux now, interrupts will be re-enabled when we
3400 //exit the function.
3401 vPortCPUReleaseMutex( &xTaskQueueMutex );
3402
3403 #if CONFIG_FREERTOS_WATCHPOINT_END_OF_STACK
3404 vPortSetStackWatchpoint(pxCurrentTCB[xPortGetCoreID()]->pxStack);
3405 #endif
3406
3407 }
3408 portEXIT_CRITICAL_NESTED(irqstate);
3409 }
3410 /*-----------------------------------------------------------*/
3411
vTaskPlaceOnEventList(List_t * const pxEventList,const TickType_t xTicksToWait)3412 void vTaskPlaceOnEventList( List_t * const pxEventList, const TickType_t xTicksToWait )
3413 {
3414 configASSERT( pxEventList );
3415 taskENTER_CRITICAL(&xTaskQueueMutex);
3416
3417 /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE
3418 SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */
3419
3420 /* Place the event list item of the TCB in the appropriate event list.
3421 This is placed in the list in priority order so the highest priority task
3422 is the first to be woken by the event. The queue that contains the event
3423 list is locked, preventing simultaneous access from interrupts. */
3424 vListInsert( pxEventList, &( pxCurrentTCB[xPortGetCoreID()]->xEventListItem ) );
3425
3426 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToWait);
3427 taskEXIT_CRITICAL(&xTaskQueueMutex);
3428 }
3429 /*-----------------------------------------------------------*/
3430
vTaskPlaceOnUnorderedEventList(List_t * pxEventList,const TickType_t xItemValue,const TickType_t xTicksToWait)3431 void vTaskPlaceOnUnorderedEventList( List_t * pxEventList, const TickType_t xItemValue, const TickType_t xTicksToWait )
3432 {
3433 configASSERT( pxEventList );
3434 taskENTER_CRITICAL(&xTaskQueueMutex);
3435
3436 /* Store the item value in the event list item. It is safe to access the
3437 event list item here as interrupts won't access the event list item of a
3438 task that is not in the Blocked state. */
3439 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[xPortGetCoreID()]->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
3440
3441 /* Place the event list item of the TCB at the end of the appropriate event
3442 list. It is safe to access the event list here because it is part of an
3443 event group implementation - and interrupts don't access event groups
3444 directly (instead they access them indirectly by pending function calls to
3445 the task level). */
3446 vListInsertEnd( pxEventList, &( pxCurrentTCB[xPortGetCoreID()]->xEventListItem ) );
3447
3448 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToWait );
3449 taskEXIT_CRITICAL(&xTaskQueueMutex);
3450 }
3451 /*-----------------------------------------------------------*/
3452
3453 #if( configUSE_TIMERS == 1 )
3454
vTaskPlaceOnEventListRestricted(List_t * const pxEventList,TickType_t xTicksToWait,const BaseType_t xWaitIndefinitely)3455 void vTaskPlaceOnEventListRestricted( List_t * const pxEventList, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
3456 {
3457 taskENTER_CRITICAL(&xTaskQueueMutex);
3458 configASSERT( pxEventList );
3459
3460 /* This function should not be called by application code hence the
3461 'Restricted' in its name. It is not part of the public API. It is
3462 designed for use by kernel code, and has special calling requirements -
3463 it should be called with the scheduler suspended. */
3464
3465
3466 /* Place the event list item of the TCB in the appropriate event list.
3467 In this case it is assume that this is the only task that is going to
3468 be waiting on this event list, so the faster vListInsertEnd() function
3469 can be used in place of vListInsert. */
3470 vListInsertEnd( pxEventList, &( pxCurrentTCB[xPortGetCoreID()]->xEventListItem ) );
3471
3472 /* If the task should block indefinitely then set the block time to a
3473 value that will be recognised as an indefinite delay inside the
3474 prvAddCurrentTaskToDelayedList() function. */
3475 if( xWaitIndefinitely != pdFALSE )
3476 {
3477 xTicksToWait = portMAX_DELAY;
3478 }
3479
3480 traceTASK_DELAY_UNTIL( );
3481 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToWait );
3482 taskEXIT_CRITICAL(&xTaskQueueMutex);
3483 }
3484
3485 #endif /* configUSE_TIMERS */
3486 /*-----------------------------------------------------------*/
3487
xTaskRemoveFromEventList(const List_t * const pxEventList)3488 BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
3489 {
3490 TCB_t *pxUnblockedTCB;
3491 BaseType_t xReturn;
3492 BaseType_t xTaskCanBeReady;
3493 UBaseType_t i, uxTargetCPU;
3494
3495 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
3496 /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
3497 called from a critical section within an ISR. */
3498
3499 /* The event list is sorted in priority order, so the first in the list can
3500 be removed as it is known to be the highest priority. Remove the TCB from
3501 the delayed list, and add it to the ready list.
3502
3503 If an event is for a queue that is locked then this function will never
3504 get called - the lock count on the queue will get modified instead. This
3505 means exclusive access to the event list is guaranteed here.
3506
3507 This function assumes that a check has already been made to ensure that
3508 pxEventList is not empty. */
3509 if ( ( listLIST_IS_EMPTY( pxEventList ) ) == pdFALSE )
3510 {
3511 pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3512 configASSERT( pxUnblockedTCB );
3513 ( void ) uxListRemove( &( pxUnblockedTCB->xEventListItem ) );
3514 }
3515 else
3516 {
3517 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
3518 return pdFALSE;
3519 }
3520
3521 xTaskCanBeReady = pdFALSE;
3522 if ( pxUnblockedTCB->xCoreID == tskNO_AFFINITY )
3523 {
3524 uxTargetCPU = xPortGetCoreID();
3525 for (i = 0; i < portNUM_PROCESSORS; i++)
3526 {
3527 if ( uxSchedulerSuspended[ i ] == ( UBaseType_t ) pdFALSE )
3528 {
3529 xTaskCanBeReady = pdTRUE;
3530 break;
3531 }
3532 }
3533 }
3534 else
3535 {
3536 uxTargetCPU = pxUnblockedTCB->xCoreID;
3537 xTaskCanBeReady = uxSchedulerSuspended[ uxTargetCPU ] == ( UBaseType_t ) pdFALSE;
3538 }
3539
3540 if( xTaskCanBeReady )
3541 {
3542 ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );
3543 prvAddTaskToReadyList( pxUnblockedTCB );
3544 }
3545 else
3546 {
3547 /* The delayed and ready lists cannot be accessed, so hold this task
3548 pending until the scheduler is resumed on this CPU. */
3549 vListInsertEnd( &( xPendingReadyList[ uxTargetCPU ] ), &( pxUnblockedTCB->xEventListItem ) );
3550 }
3551
3552 if ( tskCAN_RUN_HERE(pxUnblockedTCB->xCoreID) && pxUnblockedTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
3553 {
3554 /* Return true if the task removed from the event list has a higher
3555 priority than the calling task. This allows the calling task to know if
3556 it should force a context switch now. */
3557 xReturn = pdTRUE;
3558
3559 /* Mark that a yield is pending in case the user is not using the
3560 "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
3561 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
3562 }
3563 else if ( pxUnblockedTCB->xCoreID != xPortGetCoreID() )
3564 {
3565 taskYIELD_OTHER_CORE( pxUnblockedTCB->xCoreID, pxUnblockedTCB->uxPriority );
3566 xReturn = pdFALSE;
3567 }
3568 else
3569 {
3570 xReturn = pdFALSE;
3571 }
3572
3573 #if( configUSE_TICKLESS_IDLE != 0 )
3574 {
3575 /* If a task is blocked on a kernel object then xNextTaskUnblockTime
3576 might be set to the blocked task's time out time. If the task is
3577 unblocked for a reason other than a timeout xNextTaskUnblockTime is
3578 normally left unchanged, because it is automatically reset to a new
3579 value when the tick count equals xNextTaskUnblockTime. However if
3580 tickless idling is used it might be more important to enter sleep mode
3581 at the earliest possible time - so reset xNextTaskUnblockTime here to
3582 ensure it is updated at the earliest possible time. */
3583 prvResetNextTaskUnblockTime();
3584 }
3585 #endif
3586
3587 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
3588 return xReturn;
3589 }
3590 /*-----------------------------------------------------------*/
3591
xTaskRemoveFromUnorderedEventList(ListItem_t * pxEventListItem,const TickType_t xItemValue)3592 BaseType_t xTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem, const TickType_t xItemValue )
3593 {
3594 TCB_t *pxUnblockedTCB;
3595 BaseType_t xReturn;
3596
3597 taskENTER_CRITICAL(&xTaskQueueMutex);
3598
3599 /* Store the new item value in the event list. */
3600 listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
3601
3602 /* Remove the event list form the event flag. Interrupts do not access
3603 event flags. */
3604 pxUnblockedTCB = ( TCB_t * ) listGET_LIST_ITEM_OWNER( pxEventListItem );
3605 configASSERT( pxUnblockedTCB );
3606 ( void ) uxListRemove( pxEventListItem );
3607
3608 /* Remove the task from the delayed list and add it to the ready list. The
3609 scheduler is suspended so interrupts will not be accessing the ready
3610 lists. */
3611 ( void ) uxListRemove( &( pxUnblockedTCB->xStateListItem ) );
3612 prvAddTaskToReadyList( pxUnblockedTCB );
3613
3614 if ( tskCAN_RUN_HERE(pxUnblockedTCB->xCoreID) && pxUnblockedTCB->uxPriority >= pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
3615 {
3616 /* Return true if the task removed from the event list has
3617 a higher priority than the calling task. This allows
3618 the calling task to know if it should force a context
3619 switch now. */
3620 xReturn = pdTRUE;
3621
3622 /* Mark that a yield is pending in case the user is not using the
3623 "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
3624 xYieldPending[ xPortGetCoreID() ] = pdTRUE;
3625 }
3626 else if ( pxUnblockedTCB->xCoreID != xPortGetCoreID() )
3627 {
3628 taskYIELD_OTHER_CORE( pxUnblockedTCB->xCoreID, pxUnblockedTCB->uxPriority );
3629 xReturn = pdFALSE;
3630 }
3631 else
3632 {
3633 xReturn = pdFALSE;
3634 }
3635
3636 taskEXIT_CRITICAL(&xTaskQueueMutex);
3637 return xReturn;
3638 }
3639 /*-----------------------------------------------------------*/
3640
vTaskSetTimeOutState(TimeOut_t * const pxTimeOut)3641 void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
3642 {
3643 configASSERT( pxTimeOut );
3644 taskENTER_CRITICAL( &xTaskQueueMutex );
3645 {
3646 pxTimeOut->xOverflowCount = xNumOfOverflows;
3647 pxTimeOut->xTimeOnEntering = xTickCount;
3648 }
3649 taskEXIT_CRITICAL( &xTaskQueueMutex );
3650 }
3651 /*-----------------------------------------------------------*/
3652
vTaskInternalSetTimeOutState(TimeOut_t * const pxTimeOut)3653 void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
3654 {
3655 /* For internal use only as it does not use a critical section. */
3656 pxTimeOut->xOverflowCount = xNumOfOverflows;
3657 pxTimeOut->xTimeOnEntering = xTickCount;
3658 }
3659 /*-----------------------------------------------------------*/
3660
xTaskCheckForTimeOut(TimeOut_t * const pxTimeOut,TickType_t * const pxTicksToWait)3661 BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut, TickType_t * const pxTicksToWait )
3662 {
3663 BaseType_t xReturn;
3664
3665 configASSERT( pxTimeOut );
3666 configASSERT( pxTicksToWait );
3667
3668 taskENTER_CRITICAL( &xTaskQueueMutex );
3669 {
3670 /* Minor optimisation. The tick count cannot change in this block. */
3671 const TickType_t xConstTickCount = xTickCount;
3672 const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering;
3673
3674 #if( INCLUDE_xTaskAbortDelay == 1 )
3675 if( pxCurrentTCB[xPortGetCoreID()]->ucDelayAborted != ( uint8_t ) pdFALSE )
3676 {
3677 /* The delay was aborted, which is not the same as a time out,
3678 but has the same result. */
3679 pxCurrentTCB[xPortGetCoreID()]->ucDelayAborted = pdFALSE;
3680 xReturn = pdTRUE;
3681 }
3682 else
3683 #endif
3684
3685 #if ( INCLUDE_vTaskSuspend == 1 )
3686 if( *pxTicksToWait == portMAX_DELAY )
3687 {
3688 /* If INCLUDE_vTaskSuspend is set to 1 and the block time
3689 specified is the maximum block time then the task should block
3690 indefinitely, and therefore never time out. */
3691 xReturn = pdFALSE;
3692 }
3693 else
3694 #endif
3695
3696 if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) ) /*lint !e525 Indentation preferred as is to make code within pre-processor directives clearer. */
3697 {
3698 /* The tick count is greater than the time at which
3699 vTaskSetTimeout() was called, but has also overflowed since
3700 vTaskSetTimeOut() was called. It must have wrapped all the way
3701 around and gone past again. This passed since vTaskSetTimeout()
3702 was called. */
3703 xReturn = pdTRUE;
3704 }
3705 else if( xElapsedTime < *pxTicksToWait ) /*lint !e961 Explicit casting is only redundant with some compilers, whereas others require it to prevent integer conversion errors. */
3706 {
3707 /* Not a genuine timeout. Adjust parameters for time remaining. */
3708 *pxTicksToWait -= xElapsedTime;
3709 vTaskInternalSetTimeOutState( pxTimeOut );
3710 xReturn = pdFALSE;
3711 }
3712 else
3713 {
3714 *pxTicksToWait = 0;
3715 xReturn = pdTRUE;
3716 }
3717 }
3718 taskEXIT_CRITICAL( &xTaskQueueMutex );
3719
3720 return xReturn;
3721 }
3722 /*-----------------------------------------------------------*/
3723
vTaskMissedYield(void)3724 void vTaskMissedYield( void )
3725 {
3726 xYieldPending[xPortGetCoreID()] = pdTRUE;
3727 }
3728 /*-----------------------------------------------------------*/
3729
3730 #if ( configUSE_TRACE_FACILITY == 1 )
3731
uxTaskGetTaskNumber(TaskHandle_t xTask)3732 UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask )
3733 {
3734 UBaseType_t uxReturn;
3735 TCB_t const *pxTCB;
3736
3737 if( xTask != NULL )
3738 {
3739 pxTCB = xTask;
3740 uxReturn = pxTCB->uxTaskNumber;
3741 }
3742 else
3743 {
3744 uxReturn = 0U;
3745 }
3746
3747 return uxReturn;
3748 }
3749
3750 #endif /* configUSE_TRACE_FACILITY */
3751 /*-----------------------------------------------------------*/
3752
3753 #if ( configUSE_TRACE_FACILITY == 1 )
3754
vTaskSetTaskNumber(TaskHandle_t xTask,const UBaseType_t uxHandle)3755 void vTaskSetTaskNumber( TaskHandle_t xTask, const UBaseType_t uxHandle )
3756 {
3757 TCB_t * pxTCB;
3758
3759 if( xTask != NULL )
3760 {
3761 pxTCB = xTask;
3762 pxTCB->uxTaskNumber = uxHandle;
3763 }
3764 }
3765
3766 #endif /* configUSE_TRACE_FACILITY */
3767
3768 /*
3769 * -----------------------------------------------------------
3770 * The Idle task.
3771 * ----------------------------------------------------------
3772 *
3773 * The portTASK_FUNCTION() macro is used to allow port/compiler specific
3774 * language extensions. The equivalent prototype for this function is:
3775 *
3776 * void prvIdleTask( void *pvParameters );
3777 *
3778 */
portTASK_FUNCTION(prvIdleTask,pvParameters)3779 static portTASK_FUNCTION( prvIdleTask, pvParameters )
3780 {
3781 /* Stop warnings. */
3782 ( void ) pvParameters;
3783
3784 /** THIS IS THE RTOS IDLE TASK - WHICH IS CREATED AUTOMATICALLY WHEN THE
3785 SCHEDULER IS STARTED. **/
3786
3787 /* In case a task that has a secure context deletes itself, in which case
3788 the idle task is responsible for deleting the task's secure context, if
3789 any. */
3790 portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE );
3791
3792 for( ;; )
3793 {
3794 /* See if any tasks have deleted themselves - if so then the idle task
3795 is responsible for freeing the deleted task's TCB and stack. */
3796 prvCheckTasksWaitingTermination();
3797
3798 #if ( configUSE_PREEMPTION == 0 )
3799 {
3800 /* If we are not using preemption we keep forcing a task switch to
3801 see if any other task has become available. If we are using
3802 preemption we don't need to do this as any task becoming available
3803 will automatically get the processor anyway. */
3804 taskYIELD();
3805 }
3806 #endif /* configUSE_PREEMPTION */
3807
3808 #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
3809 {
3810 /* When using preemption tasks of equal priority will be
3811 timesliced. If a task that is sharing the idle priority is ready
3812 to run then the idle task should yield before the end of the
3813 timeslice.
3814
3815 A critical region is not required here as we are just reading from
3816 the list, and an occasional incorrect value will not matter. If
3817 the ready list at the idle priority contains more than one task
3818 then a task other than the idle task is ready to execute. */
3819 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 )
3820 {
3821 taskYIELD();
3822 }
3823 else
3824 {
3825 mtCOVERAGE_TEST_MARKER();
3826 }
3827 }
3828 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
3829
3830 #if ( configUSE_IDLE_HOOK == 1 )
3831 {
3832 extern void vApplicationIdleHook( void );
3833
3834 /* Call the user defined function from within the idle task. This
3835 allows the application designer to add background functionality
3836 without the overhead of a separate task.
3837 NOTE: vApplicationIdleHook() MUST NOT, UNDER ANY CIRCUMSTANCES,
3838 CALL A FUNCTION THAT MIGHT BLOCK. */
3839 vApplicationIdleHook();
3840 }
3841 #endif /* configUSE_IDLE_HOOK */
3842 #if ( CONFIG_FREERTOS_LEGACY_HOOKS == 1 )
3843 {
3844 /* Call the esp-idf hook system */
3845 esp_vApplicationIdleHook();
3846 }
3847 #endif /* CONFIG_FREERTOS_LEGACY_HOOKS */
3848
3849 /* This conditional compilation should use inequality to 0, not equality
3850 to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when
3851 user defined low power mode implementations require
3852 configUSE_TICKLESS_IDLE to be set to a value other than 1. */
3853 #if ( configUSE_TICKLESS_IDLE != 0 )
3854 {
3855 TickType_t xExpectedIdleTime;
3856
3857 /* It is not desirable to suspend then resume the scheduler on
3858 each iteration of the idle task. Therefore, a preliminary
3859 test of the expected idle time is performed without the
3860 scheduler suspended. The result here is not necessarily
3861 valid. */
3862 xExpectedIdleTime = prvGetExpectedIdleTime();
3863
3864 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
3865 {
3866 taskENTER_CRITICAL( &xTaskQueueMutex );
3867 {
3868 /* Now the scheduler is suspended, the expected idle
3869 time can be sampled again, and this time its value can
3870 be used. */
3871 configASSERT( xNextTaskUnblockTime >= xTickCount );
3872 xExpectedIdleTime = prvGetExpectedIdleTime();
3873
3874 /* Define the following macro to set xExpectedIdleTime to 0
3875 if the application does not want
3876 portSUPPRESS_TICKS_AND_SLEEP() to be called. */
3877 configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( xExpectedIdleTime );
3878
3879 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
3880 {
3881 traceLOW_POWER_IDLE_BEGIN();
3882 portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime );
3883 traceLOW_POWER_IDLE_END();
3884 }
3885 else
3886 {
3887 mtCOVERAGE_TEST_MARKER();
3888 }
3889 }
3890 taskEXIT_CRITICAL( &xTaskQueueMutex );
3891 }
3892 else
3893 {
3894 mtCOVERAGE_TEST_MARKER();
3895 }
3896 }
3897 #endif /* configUSE_TICKLESS_IDLE */
3898 }
3899 }
3900 /*-----------------------------------------------------------*/
3901
3902 #if( configUSE_TICKLESS_IDLE != 0 )
3903
eTaskConfirmSleepModeStatus(void)3904 eSleepModeStatus eTaskConfirmSleepModeStatus( void )
3905 {
3906 /* The idle task exists in addition to the application tasks. */
3907 const UBaseType_t uxNonApplicationTasks = 1;
3908 eSleepModeStatus eReturn = eStandardSleep;
3909
3910 taskEXIT_CRITICAL(&xTaskQueueMutex);
3911 if( listCURRENT_LIST_LENGTH( &xPendingReadyList[xPortGetCoreID()] ) != 0 )
3912 {
3913 /* A task was made ready while the scheduler was suspended. */
3914 eReturn = eAbortSleep;
3915 }
3916 else if( xYieldPending[xPortGetCoreID()] != pdFALSE )
3917 {
3918 /* A yield was pended while the scheduler was suspended. */
3919 eReturn = eAbortSleep;
3920 }
3921 else
3922 {
3923 /* If all the tasks are in the suspended list (which might mean they
3924 have an infinite block time rather than actually being suspended)
3925 then it is safe to turn all clocks off and just wait for external
3926 interrupts. */
3927 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) )
3928 {
3929 eReturn = eNoTasksWaitingTimeout;
3930 }
3931 else
3932 {
3933 mtCOVERAGE_TEST_MARKER();
3934 }
3935 }
3936 taskEXIT_CRITICAL(&xTaskQueueMutex);
3937
3938 return eReturn;
3939 }
3940
3941 #endif /* configUSE_TICKLESS_IDLE */
3942 /*-----------------------------------------------------------*/
3943 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
3944
3945 #if ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
3946
vTaskSetThreadLocalStoragePointerAndDelCallback(TaskHandle_t xTaskToSet,BaseType_t xIndex,void * pvValue,TlsDeleteCallbackFunction_t xDelCallback)3947 void vTaskSetThreadLocalStoragePointerAndDelCallback( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue , TlsDeleteCallbackFunction_t xDelCallback)
3948 {
3949 TCB_t *pxTCB;
3950
3951 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3952 {
3953 taskENTER_CRITICAL(&xTaskQueueMutex);
3954 pxTCB = prvGetTCBFromHandle( xTaskToSet );
3955 pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
3956 pxTCB->pvThreadLocalStoragePointersDelCallback[ xIndex ] = xDelCallback;
3957 taskEXIT_CRITICAL(&xTaskQueueMutex);
3958 }
3959 }
3960
vTaskSetThreadLocalStoragePointer(TaskHandle_t xTaskToSet,BaseType_t xIndex,void * pvValue)3961 void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue )
3962 {
3963 vTaskSetThreadLocalStoragePointerAndDelCallback( xTaskToSet, xIndex, pvValue, (TlsDeleteCallbackFunction_t)NULL );
3964 }
3965
3966
3967 #else
vTaskSetThreadLocalStoragePointer(TaskHandle_t xTaskToSet,BaseType_t xIndex,void * pvValue)3968 void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet, BaseType_t xIndex, void *pvValue )
3969 {
3970 TCB_t *pxTCB;
3971
3972 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3973 {
3974 taskENTER_CRITICAL(&xTaskQueueMutex);
3975 pxTCB = prvGetTCBFromHandle( xTaskToSet );
3976 pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
3977 taskEXIT_CRITICAL(&xTaskQueueMutex);
3978 }
3979 }
3980 #endif /* configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS */
3981
3982 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
3983 /*-----------------------------------------------------------*/
3984
3985 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
3986
pvTaskGetThreadLocalStoragePointer(TaskHandle_t xTaskToQuery,BaseType_t xIndex)3987 void *pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery, BaseType_t xIndex )
3988 {
3989 void *pvReturn = NULL;
3990 TCB_t *pxTCB;
3991
3992 if( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS )
3993 {
3994 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
3995 pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ];
3996 }
3997 else
3998 {
3999 pvReturn = NULL;
4000 }
4001
4002 return pvReturn;
4003 }
4004
4005 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
4006 /*-----------------------------------------------------------*/
4007
4008 #if ( portUSING_MPU_WRAPPERS == 1 )
4009
vTaskAllocateMPURegions(TaskHandle_t xTaskToModify,const MemoryRegion_t * const xRegions)4010 void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify, const MemoryRegion_t * const xRegions )
4011 {
4012 TCB_t *pxTCB;
4013
4014 /* If null is passed in here then we are modifying the MPU settings of
4015 the calling task. */
4016 pxTCB = prvGetTCBFromHandle( xTaskToModify );
4017
4018 vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, NULL, 0 );
4019 }
4020
4021 #endif /* portUSING_MPU_WRAPPERS */
4022 /*-----------------------------------------------------------*/
4023
prvInitialiseTaskLists(void)4024 static void prvInitialiseTaskLists( void )
4025 {
4026 UBaseType_t uxPriority;
4027
4028 for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ )
4029 {
4030 vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) );
4031 }
4032
4033 vListInitialise( &xDelayedTaskList1 );
4034 vListInitialise( &xDelayedTaskList2 );
4035
4036 #if ( portNUM_PROCESSORS > 1 )
4037 for(BaseType_t i = 0; i < portNUM_PROCESSORS; i++) {
4038 vListInitialise( &xPendingReadyList[ i ] );
4039 }
4040 #else
4041 vListInitialise( &xPendingReadyList[xPortGetCoreID()] );
4042 #endif
4043
4044 #if ( INCLUDE_vTaskDelete == 1 )
4045 {
4046 vListInitialise( &xTasksWaitingTermination );
4047 }
4048 #endif /* INCLUDE_vTaskDelete */
4049
4050 #if ( INCLUDE_vTaskSuspend == 1 )
4051 {
4052 vListInitialise( &xSuspendedTaskList );
4053 }
4054 #endif /* INCLUDE_vTaskSuspend */
4055
4056 /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList
4057 using list2. */
4058 pxDelayedTaskList = &xDelayedTaskList1;
4059 pxOverflowDelayedTaskList = &xDelayedTaskList2;
4060 }
4061 /*-----------------------------------------------------------*/
4062
prvCheckTasksWaitingTermination(void)4063 static void prvCheckTasksWaitingTermination( void )
4064 {
4065
4066 /** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/
4067
4068 #if ( INCLUDE_vTaskDelete == 1 )
4069 {
4070 BaseType_t xListIsEmpty;
4071 BaseType_t core = xPortGetCoreID();
4072
4073 /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL( &xTaskQueueMutex )
4074 being called too often in the idle task. */
4075 while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U )
4076 {
4077 TCB_t *pxTCB = NULL;
4078
4079 taskENTER_CRITICAL(&xTaskQueueMutex);
4080 {
4081 xListIsEmpty = listLIST_IS_EMPTY( &xTasksWaitingTermination );
4082 if( xListIsEmpty == pdFALSE )
4083 {
4084 /* We only want to kill tasks that ran on this core because e.g. _xt_coproc_release needs to
4085 be called on the core the process is pinned on, if any */
4086 ListItem_t *target = listGET_HEAD_ENTRY(&xTasksWaitingTermination);
4087 for( ; target != listGET_END_MARKER(&xTasksWaitingTermination); target = listGET_NEXT(target) ){ //Walk the list
4088 TCB_t *tgt_tcb = ( TCB_t * )listGET_LIST_ITEM_OWNER(target);
4089 int affinity = tgt_tcb->xCoreID;
4090 //Self deleting tasks are added to Termination List before they switch context. Ensure they aren't still currently running
4091 if( pxCurrentTCB[core] == tgt_tcb || (portNUM_PROCESSORS > 1 && pxCurrentTCB[!core] == tgt_tcb) ){
4092 continue; //Can't free memory of task that is still running
4093 }
4094 if(affinity == core || affinity == tskNO_AFFINITY){ //Find first item not pinned to other core
4095 pxTCB = tgt_tcb;
4096 break;
4097 }
4098 }
4099 if(pxTCB != NULL){
4100 ( void ) uxListRemove( target ); //Remove list item from list
4101 --uxCurrentNumberOfTasks;
4102 --uxDeletedTasksWaitingCleanUp;
4103 }
4104 }
4105 }
4106 taskEXIT_CRITICAL(&xTaskQueueMutex); //Need to call deletion callbacks outside critical section
4107
4108 if (pxTCB != NULL) { //Call deletion callbacks and free TCB memory
4109 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
4110 prvDeleteTLS( pxTCB );
4111 #endif
4112 prvDeleteTCB( pxTCB );
4113 }
4114 else
4115 {
4116 mtCOVERAGE_TEST_MARKER();
4117 break; //No TCB found that could be freed by this core, break out of loop
4118 }
4119 }
4120
4121 }
4122 #endif /* INCLUDE_vTaskDelete */
4123 }
4124 /*-----------------------------------------------------------*/
4125
4126 #if( configUSE_TRACE_FACILITY == 1 )
4127
vTaskGetInfo(TaskHandle_t xTask,TaskStatus_t * pxTaskStatus,BaseType_t xGetFreeStackSpace,eTaskState eState)4128 void vTaskGetInfo( TaskHandle_t xTask, TaskStatus_t *pxTaskStatus, BaseType_t xGetFreeStackSpace, eTaskState eState )
4129 {
4130 TCB_t *pxTCB;
4131
4132 /* xTask is NULL then get the state of the calling task. */
4133 pxTCB = prvGetTCBFromHandle( xTask );
4134
4135 pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB;
4136 pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName [ 0 ] );
4137 pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority;
4138 pxTaskStatus->pxStackBase = pxTCB->pxStack;
4139 pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber;
4140
4141 #if ( configTASKLIST_INCLUDE_COREID == 1 )
4142 pxTaskStatus->xCoreID = pxTCB->xCoreID;
4143 #endif /* configTASKLIST_INCLUDE_COREID */
4144
4145 #if ( configUSE_MUTEXES == 1 )
4146 {
4147 pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority;
4148 }
4149 #else
4150 {
4151 pxTaskStatus->uxBasePriority = 0;
4152 }
4153 #endif
4154
4155 #if ( configGENERATE_RUN_TIME_STATS == 1 )
4156 {
4157 pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter;
4158 }
4159 #else
4160 {
4161 pxTaskStatus->ulRunTimeCounter = 0;
4162 }
4163 #endif
4164
4165 /* Obtaining the task state is a little fiddly, so is only done if the
4166 value of eState passed into this function is eInvalid - otherwise the
4167 state is just set to whatever is passed in. */
4168 if( eState != eInvalid )
4169 {
4170 if( pxTCB == pxCurrentTCB[xPortGetCoreID()] )
4171 {
4172 pxTaskStatus->eCurrentState = eRunning;
4173 }
4174 else
4175 {
4176 pxTaskStatus->eCurrentState = eState;
4177
4178 #if ( INCLUDE_vTaskSuspend == 1 )
4179 {
4180 /* If the task is in the suspended list then there is a
4181 chance it is actually just blocked indefinitely - so really
4182 it should be reported as being in the Blocked state. */
4183 if( eState == eSuspended )
4184 {
4185 taskENTER_CRITICAL( &xTaskQueueMutex );
4186 {
4187 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
4188 {
4189 pxTaskStatus->eCurrentState = eBlocked;
4190 }
4191 }
4192 taskEXIT_CRITICAL( &xTaskQueueMutex );
4193 }
4194 }
4195 #endif /* INCLUDE_vTaskSuspend */
4196 }
4197 }
4198 else
4199 {
4200 pxTaskStatus->eCurrentState = eTaskGetState( pxTCB );
4201 }
4202
4203 /* Obtaining the stack space takes some time, so the xGetFreeStackSpace
4204 parameter is provided to allow it to be skipped. */
4205 if( xGetFreeStackSpace != pdFALSE )
4206 {
4207 #if ( portSTACK_GROWTH > 0 )
4208 {
4209 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack );
4210 }
4211 #else
4212 {
4213 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack );
4214 }
4215 #endif
4216 }
4217 else
4218 {
4219 pxTaskStatus->usStackHighWaterMark = 0;
4220 }
4221 }
4222
4223 #endif /* configUSE_TRACE_FACILITY */
4224 /*-----------------------------------------------------------*/
4225
xTaskGetAffinity(TaskHandle_t xTask)4226 BaseType_t xTaskGetAffinity( TaskHandle_t xTask )
4227 {
4228 TCB_t *pxTCB;
4229
4230 pxTCB = prvGetTCBFromHandle( xTask );
4231
4232 return pxTCB->xCoreID;
4233 }
4234 /*-----------------------------------------------------------*/
4235
4236 #if ( configUSE_TRACE_FACILITY == 1 )
4237
prvListTasksWithinSingleList(TaskStatus_t * pxTaskStatusArray,List_t * pxList,eTaskState eState)4238 static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t *pxTaskStatusArray, List_t *pxList, eTaskState eState )
4239 {
4240 configLIST_VOLATILE TCB_t *pxNextTCB, *pxFirstTCB;
4241 UBaseType_t uxTask = 0;
4242
4243 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
4244 {
4245 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
4246
4247 /* Populate an TaskStatus_t structure within the
4248 pxTaskStatusArray array for each task that is referenced from
4249 pxList. See the definition of TaskStatus_t in task.h for the
4250 meaning of each TaskStatus_t structure member. */
4251 do
4252 {
4253 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
4254 vTaskGetInfo( ( TaskHandle_t ) pxNextTCB, &( pxTaskStatusArray[ uxTask ] ), pdTRUE, eState );
4255 uxTask++;
4256 } while( pxNextTCB != pxFirstTCB );
4257 }
4258 else
4259 {
4260 mtCOVERAGE_TEST_MARKER();
4261 }
4262
4263 return uxTask;
4264 }
4265
4266 #endif /* configUSE_TRACE_FACILITY */
4267 /*-----------------------------------------------------------*/
4268
4269 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
4270
prvTaskCheckFreeStackSpace(const uint8_t * pucStackByte)4271 static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte )
4272 {
4273 uint32_t ulCount = 0U;
4274
4275 while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE )
4276 {
4277 pucStackByte -= portSTACK_GROWTH;
4278 ulCount++;
4279 }
4280
4281 ulCount /= ( uint32_t ) sizeof( StackType_t ); /*lint !e961 Casting is not redundant on smaller architectures. */
4282
4283 return ( configSTACK_DEPTH_TYPE ) ulCount;
4284 }
4285
4286 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) */
4287 /*-----------------------------------------------------------*/
4288
4289 #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
4290
4291 /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the
4292 same except for their return type. Using configSTACK_DEPTH_TYPE allows the
4293 user to determine the return type. It gets around the problem of the value
4294 overflowing on 8-bit types without breaking backward compatibility for
4295 applications that expect an 8-bit return type. */
uxTaskGetStackHighWaterMark2(TaskHandle_t xTask)4296 configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask )
4297 {
4298 TCB_t *pxTCB;
4299 uint8_t *pucEndOfStack;
4300 configSTACK_DEPTH_TYPE uxReturn;
4301
4302 /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are
4303 the same except for their return type. Using configSTACK_DEPTH_TYPE
4304 allows the user to determine the return type. It gets around the
4305 problem of the value overflowing on 8-bit types without breaking
4306 backward compatibility for applications that expect an 8-bit return
4307 type. */
4308
4309 pxTCB = prvGetTCBFromHandle( xTask );
4310
4311 #if portSTACK_GROWTH < 0
4312 {
4313 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
4314 }
4315 #else
4316 {
4317 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
4318 }
4319 #endif
4320
4321 uxReturn = prvTaskCheckFreeStackSpace( pucEndOfStack );
4322
4323 return uxReturn;
4324 }
4325
4326 #endif /* INCLUDE_uxTaskGetStackHighWaterMark2 */
4327 /*-----------------------------------------------------------*/
4328
4329 #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
4330
uxTaskGetStackHighWaterMark(TaskHandle_t xTask)4331 UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask )
4332 {
4333 TCB_t *pxTCB;
4334 uint8_t *pucEndOfStack;
4335 UBaseType_t uxReturn;
4336
4337 pxTCB = prvGetTCBFromHandle( xTask );
4338
4339 #if portSTACK_GROWTH < 0
4340 {
4341 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
4342 }
4343 #else
4344 {
4345 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
4346 }
4347 #endif
4348
4349 uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack );
4350
4351 return uxReturn;
4352 }
4353
4354 #endif /* INCLUDE_uxTaskGetStackHighWaterMark */
4355 /*-----------------------------------------------------------*/
4356 #if (INCLUDE_pxTaskGetStackStart == 1)
4357
pxTaskGetStackStart(TaskHandle_t xTask)4358 uint8_t* pxTaskGetStackStart( TaskHandle_t xTask)
4359 {
4360 TCB_t *pxTCB;
4361 uint8_t* uxReturn;
4362
4363 pxTCB = prvGetTCBFromHandle( xTask );
4364 uxReturn = (uint8_t*)pxTCB->pxStack;
4365
4366 return uxReturn;
4367 }
4368
4369 #endif /* INCLUDE_pxTaskGetStackStart */
4370
4371 #if ( INCLUDE_vTaskDelete == 1 )
4372
prvDeleteTCB(TCB_t * pxTCB)4373 static void prvDeleteTCB( TCB_t *pxTCB )
4374 {
4375 /* This call is required specifically for the TriCore port. It must be
4376 above the vPortFree() calls. The call is also used by ports/demos that
4377 want to allocate and clean RAM statically. */
4378 portCLEAN_UP_TCB( pxTCB );
4379
4380 /* Free up the memory allocated by the scheduler for the task. It is up
4381 to the task to free any memory allocated at the application level. */
4382 #if ( configUSE_NEWLIB_REENTRANT == 1 )
4383 {
4384 _reclaim_reent( &( pxTCB->xNewLib_reent ) );
4385 }
4386 #endif /* configUSE_NEWLIB_REENTRANT */
4387
4388 #if ( portUSING_MPU_WRAPPERS == 1 )
4389 vPortReleaseTaskMPUSettings( &( pxTCB->xMPUSettings) );
4390 #endif
4391
4392 #if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
4393 {
4394 /* The task can only have been allocated dynamically - free both
4395 the stack and TCB. */
4396 vPortFree( pxTCB->pxStack );
4397 vPortFree( pxTCB );
4398 }
4399 #elif( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
4400 {
4401 /* The task could have been allocated statically or dynamically, so
4402 check what was statically allocated before trying to free the
4403 memory. */
4404 if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB )
4405 {
4406 /* Both the stack and TCB were allocated dynamically, so both
4407 must be freed. */
4408 vPortFree( pxTCB->pxStack );
4409 vPortFree( pxTCB );
4410 }
4411 else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
4412 {
4413 /* Only the stack was statically allocated, so the TCB is the
4414 only memory that must be freed. */
4415 vPortFree( pxTCB );
4416 }
4417 else
4418 {
4419 /* Neither the stack nor the TCB were allocated dynamically, so
4420 nothing needs to be freed. */
4421 configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB );
4422 mtCOVERAGE_TEST_MARKER();
4423 }
4424 }
4425 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
4426 }
4427
4428 #endif /* INCLUDE_vTaskDelete */
4429 /*-----------------------------------------------------------*/
4430
4431 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS )
4432
prvDeleteTLS(TCB_t * pxTCB)4433 static void prvDeleteTLS( TCB_t *pxTCB )
4434 {
4435 configASSERT( pxTCB );
4436 for( int x = 0; x < configNUM_THREAD_LOCAL_STORAGE_POINTERS; x++ )
4437 {
4438 if (pxTCB->pvThreadLocalStoragePointersDelCallback[ x ] != NULL) //If del cb is set
4439 {
4440 pxTCB->pvThreadLocalStoragePointersDelCallback[ x ](x, pxTCB->pvThreadLocalStoragePointers[ x ]); //Call del cb
4441 }
4442 }
4443 }
4444
4445 #endif /* ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 ) && ( configTHREAD_LOCAL_STORAGE_DELETE_CALLBACKS ) */
4446 /*-----------------------------------------------------------*/
4447
4448
prvResetNextTaskUnblockTime(void)4449 static void prvResetNextTaskUnblockTime( void )
4450 {
4451 TCB_t *pxTCB;
4452
4453 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
4454 {
4455 /* The new current delayed list is empty. Set xNextTaskUnblockTime to
4456 the maximum possible value so it is extremely unlikely that the
4457 if( xTickCount >= xNextTaskUnblockTime ) test will pass until
4458 there is an item in the delayed list. */
4459 xNextTaskUnblockTime = portMAX_DELAY;
4460 }
4461 else
4462 {
4463 /* The new current delayed list is not empty, get the value of
4464 the item at the head of the delayed list. This is the time at
4465 which the task at the head of the delayed list should be removed
4466 from the Blocked state. */
4467 ( pxTCB ) = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
4468 xNextTaskUnblockTime = listGET_LIST_ITEM_VALUE( &( ( pxTCB )->xStateListItem ) );
4469 }
4470 }
4471 /*-----------------------------------------------------------*/
4472
4473 #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) || (portNUM_PROCESSORS > 1) )
4474
xTaskGetCurrentTaskHandle(void)4475 TaskHandle_t xTaskGetCurrentTaskHandle( void )
4476 {
4477 TaskHandle_t xReturn;
4478 unsigned state;
4479
4480 state = portENTER_CRITICAL_NESTED();
4481 xReturn = pxCurrentTCB[ xPortGetCoreID() ];
4482 portEXIT_CRITICAL_NESTED(state);
4483
4484 return xReturn;
4485 }
4486
xTaskGetCurrentTaskHandleForCPU(BaseType_t cpuid)4487 TaskHandle_t xTaskGetCurrentTaskHandleForCPU( BaseType_t cpuid )
4488 {
4489 TaskHandle_t xReturn=NULL;
4490
4491 //Xtensa-specific: the pxCurrentPCB pointer is atomic so we shouldn't need a lock.
4492 if (cpuid < portNUM_PROCESSORS) {
4493 xReturn = pxCurrentTCB[ cpuid ];
4494 }
4495
4496 return xReturn;
4497 }
4498
4499 #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
4500 /*-----------------------------------------------------------*/
4501
4502 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
4503
xTaskGetSchedulerState(void)4504 BaseType_t xTaskGetSchedulerState( void )
4505 {
4506 BaseType_t xReturn;
4507
4508 if( xSchedulerRunning == pdFALSE )
4509 {
4510 xReturn = taskSCHEDULER_NOT_STARTED;
4511 }
4512 else
4513 {
4514 if( uxSchedulerSuspended[xPortGetCoreID()] == ( UBaseType_t ) pdFALSE )
4515 {
4516 xReturn = taskSCHEDULER_RUNNING;
4517 }
4518 else
4519 {
4520 xReturn = taskSCHEDULER_SUSPENDED;
4521 }
4522 }
4523
4524 return xReturn;
4525 }
4526
4527 #endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */
4528 /*-----------------------------------------------------------*/
4529
4530 #if ( configUSE_MUTEXES == 1 )
4531
xTaskPriorityInherit(TaskHandle_t const pxMutexHolder)4532 BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder )
4533 {
4534 TCB_t * const pxMutexHolderTCB = pxMutexHolder;
4535 BaseType_t xReturn = pdFALSE;
4536
4537 taskENTER_CRITICAL(&xTaskQueueMutex);
4538 /* If the mutex was given back by an interrupt while the queue was
4539 locked then the mutex holder might now be NULL. _RB_ Is this still
4540 needed as interrupts can no longer use mutexes? */
4541 if( pxMutexHolder != NULL )
4542 {
4543 /* If the holder of the mutex has a priority below the priority of
4544 the task attempting to obtain the mutex then it will temporarily
4545 inherit the priority of the task attempting to obtain the mutex. */
4546 if( pxMutexHolderTCB->uxPriority < pxCurrentTCB[xPortGetCoreID()]->uxPriority )
4547 {
4548 /* Adjust the mutex holder state to account for its new
4549 priority. Only reset the event list item value if the value is
4550 not being used for anything else. */
4551 if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
4552 {
4553 listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB[xPortGetCoreID()]->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4554 }
4555 else
4556 {
4557 mtCOVERAGE_TEST_MARKER();
4558 }
4559
4560 /* If the task being modified is in the ready state it will need
4561 to be moved into a new list. */
4562 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE )
4563 {
4564 if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
4565 {
4566 /* It is known that the task is in its ready list so
4567 there is no need to check again and the port level
4568 reset macro can be called directly. */
4569 portRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority, uxTopReadyPriority );
4570 }
4571 else
4572 {
4573 mtCOVERAGE_TEST_MARKER();
4574 }
4575
4576 /* Inherit the priority before being moved into the new list. */
4577 pxMutexHolderTCB->uxPriority = pxCurrentTCB[xPortGetCoreID()]->uxPriority;
4578 prvAddTaskToReadyList( pxMutexHolderTCB );
4579 }
4580 else
4581 {
4582 /* Just inherit the priority. */
4583 pxMutexHolderTCB->uxPriority = pxCurrentTCB[xPortGetCoreID()]->uxPriority;
4584 }
4585
4586 traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCB[xPortGetCoreID()]->uxPriority );
4587
4588 /* Inheritance occurred. */
4589 xReturn = pdTRUE;
4590 }
4591 else
4592 {
4593 if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCB[xPortGetCoreID()]->uxPriority )
4594 {
4595 /* The base priority of the mutex holder is lower than the
4596 priority of the task attempting to take the mutex, but the
4597 current priority of the mutex holder is not lower than the
4598 priority of the task attempting to take the mutex.
4599 Therefore the mutex holder must have already inherited a
4600 priority, but inheritance would have occurred if that had
4601 not been the case. */
4602 xReturn = pdTRUE;
4603 }
4604 else
4605 {
4606 mtCOVERAGE_TEST_MARKER();
4607 }
4608 }
4609 }
4610 else
4611 {
4612 mtCOVERAGE_TEST_MARKER();
4613 }
4614 taskEXIT_CRITICAL(&xTaskQueueMutex);
4615
4616 return xReturn;
4617 }
4618
4619 #endif /* configUSE_MUTEXES */
4620 /*-----------------------------------------------------------*/
4621
4622 #if ( configUSE_MUTEXES == 1 )
4623
xTaskPriorityDisinherit(TaskHandle_t const pxMutexHolder)4624 BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder )
4625 {
4626 TCB_t * const pxTCB = pxMutexHolder;
4627 BaseType_t xReturn = pdFALSE;
4628
4629 taskENTER_CRITICAL(&xTaskQueueMutex);
4630 if( pxMutexHolder != NULL )
4631 {
4632 /* A task can only have an inherited priority if it holds the mutex.
4633 If the mutex is held by a task then it cannot be given from an
4634 interrupt, and if a mutex is given by the holding task then it must
4635 be the running state task. */
4636 configASSERT( pxTCB == pxCurrentTCB[xPortGetCoreID()] );
4637 configASSERT( pxTCB->uxMutexesHeld );
4638 ( pxTCB->uxMutexesHeld )--;
4639
4640 /* Has the holder of the mutex inherited the priority of another
4641 task? */
4642 if( pxTCB->uxPriority != pxTCB->uxBasePriority )
4643 {
4644 /* Only disinherit if no other mutexes are held. */
4645 if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 )
4646 {
4647 /* A task can only have an inherited priority if it holds
4648 the mutex. If the mutex is held by a task then it cannot be
4649 given from an interrupt, and if a mutex is given by the
4650 holding task then it must be the running state task. Remove
4651 the holding task from the ready/delayed list. */
4652 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
4653 {
4654 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
4655 }
4656 else
4657 {
4658 mtCOVERAGE_TEST_MARKER();
4659 }
4660
4661 /* Disinherit the priority before adding the task into the
4662 new ready list. */
4663 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
4664 pxTCB->uxPriority = pxTCB->uxBasePriority;
4665
4666 /* Reset the event list item value. It cannot be in use for
4667 any other purpose if this task is running, and it must be
4668 running to give back the mutex. */
4669 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4670 prvAddTaskToReadyList( pxTCB );
4671
4672 /* Return true to indicate that a context switch is required.
4673 This is only actually required in the corner case whereby
4674 multiple mutexes were held and the mutexes were given back
4675 in an order different to that in which they were taken.
4676 If a context switch did not occur when the first mutex was
4677 returned, even if a task was waiting on it, then a context
4678 switch should occur when the last mutex is returned whether
4679 a task is waiting on it or not. */
4680 xReturn = pdTRUE;
4681 }
4682 else
4683 {
4684 mtCOVERAGE_TEST_MARKER();
4685 }
4686 }
4687 else
4688 {
4689 mtCOVERAGE_TEST_MARKER();
4690 }
4691 }
4692 else
4693 {
4694 mtCOVERAGE_TEST_MARKER();
4695 }
4696 taskEXIT_CRITICAL(&xTaskQueueMutex);
4697
4698 return xReturn;
4699 }
4700
4701 #endif /* configUSE_MUTEXES */
4702 /*-----------------------------------------------------------*/
4703
4704 #if ( configUSE_MUTEXES == 1 )
4705
vTaskPriorityDisinheritAfterTimeout(TaskHandle_t const pxMutexHolder,UBaseType_t uxHighestPriorityWaitingTask)4706 void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder, UBaseType_t uxHighestPriorityWaitingTask )
4707 {
4708 TCB_t * const pxTCB = pxMutexHolder;
4709 UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse;
4710 const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
4711
4712 taskENTER_CRITICAL(&xTaskQueueMutex);
4713 if( pxMutexHolder != NULL )
4714 {
4715 /* If pxMutexHolder is not NULL then the holder must hold at least
4716 one mutex. */
4717 configASSERT( pxTCB->uxMutexesHeld );
4718
4719 /* Determine the priority to which the priority of the task that
4720 holds the mutex should be set. This will be the greater of the
4721 holding task's base priority and the priority of the highest
4722 priority task that is waiting to obtain the mutex. */
4723 if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask )
4724 {
4725 uxPriorityToUse = uxHighestPriorityWaitingTask;
4726 }
4727 else
4728 {
4729 uxPriorityToUse = pxTCB->uxBasePriority;
4730 }
4731
4732 /* Does the priority need to change? */
4733 if( pxTCB->uxPriority != uxPriorityToUse )
4734 {
4735 /* Only disinherit if no other mutexes are held. This is a
4736 simplification in the priority inheritance implementation. If
4737 the task that holds the mutex is also holding other mutexes then
4738 the other mutexes may have caused the priority inheritance. */
4739 if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld )
4740 {
4741 /* If a task has timed out because it already holds the
4742 mutex it was trying to obtain then it cannot of inherited
4743 its own priority. */
4744 configASSERT( pxTCB != pxCurrentTCB[xPortGetCoreID()] );
4745
4746 /* Disinherit the priority, remembering the previous
4747 priority to facilitate determining the subject task's
4748 state. */
4749 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
4750 uxPriorityUsedOnEntry = pxTCB->uxPriority;
4751 pxTCB->uxPriority = uxPriorityToUse;
4752
4753 /* Only reset the event list item value if the value is not
4754 being used for anything else. */
4755 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
4756 {
4757 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4758 }
4759 else
4760 {
4761 mtCOVERAGE_TEST_MARKER();
4762 }
4763
4764 /* If the running task is not the task that holds the mutex
4765 then the task that holds the mutex could be in either the
4766 Ready, Blocked or Suspended states. Only remove the task
4767 from its current state list if it is in the Ready state as
4768 the task's priority is going to change and there is one
4769 Ready list per priority. */
4770 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
4771 {
4772 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
4773 {
4774 /* It is known that the task is in its ready list so
4775 there is no need to check again and the port level
4776 reset macro can be called directly. */
4777 portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority );
4778 }
4779 else
4780 {
4781 mtCOVERAGE_TEST_MARKER();
4782 }
4783
4784 prvAddTaskToReadyList( pxTCB );
4785 }
4786 else
4787 {
4788 mtCOVERAGE_TEST_MARKER();
4789 }
4790 }
4791 else
4792 {
4793 mtCOVERAGE_TEST_MARKER();
4794 }
4795 }
4796 else
4797 {
4798 mtCOVERAGE_TEST_MARKER();
4799 }
4800 }
4801 else
4802 {
4803 mtCOVERAGE_TEST_MARKER();
4804 }
4805 taskEXIT_CRITICAL(&xTaskQueueMutex);
4806 }
4807
4808 #endif /* configUSE_MUTEXES */
4809 /*-----------------------------------------------------------*/
4810
4811 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
4812
vTaskEnterCritical(void)4813 void vTaskEnterCritical( void )
4814 {
4815 portDISABLE_INTERRUPTS();
4816
4817 if( xSchedulerRunning != pdFALSE )
4818 {
4819 ( pxCurrentTCB[xPortGetCoreID()]->uxCriticalNesting )++;
4820
4821 /* This is not the interrupt safe version of the enter critical
4822 function so assert() if it is being called from an interrupt
4823 context. Only API functions that end in "FromISR" can be used in an
4824 interrupt. Only assert if the critical nesting count is 1 to
4825 protect against recursive calls if the assert function also uses a
4826 critical section. */
4827 if( pxCurrentTCB[xPortGetCoreID()]->uxCriticalNesting == 1 )
4828 {
4829 portASSERT_IF_IN_ISR();
4830 }
4831 }
4832 else
4833 {
4834 mtCOVERAGE_TEST_MARKER();
4835 }
4836 }
4837
4838 #endif /* portCRITICAL_NESTING_IN_TCB */
4839 /*-----------------------------------------------------------*/
4840
4841 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
4842
vTaskExitCritical(void)4843 void vTaskExitCritical( void )
4844 {
4845 if( xSchedulerRunning != pdFALSE )
4846 {
4847 if( pxCurrentTCB[xPortGetCoreID()]->uxCriticalNesting > 0U )
4848 {
4849 ( pxCurrentTCB[xPortGetCoreID()]->uxCriticalNesting )--;
4850
4851 if( pxCurrentTCB[xPortGetCoreID()]->uxCriticalNesting == 0U )
4852 {
4853 portENABLE_INTERRUPTS();
4854 }
4855 else
4856 {
4857 mtCOVERAGE_TEST_MARKER();
4858 }
4859 }
4860 else
4861 {
4862 mtCOVERAGE_TEST_MARKER();
4863 }
4864 }
4865 else
4866 {
4867 mtCOVERAGE_TEST_MARKER();
4868 }
4869 }
4870
4871 #endif /* portCRITICAL_NESTING_IN_TCB */
4872 /*-----------------------------------------------------------*/
4873
4874 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
4875
prvWriteNameToBuffer(char * pcBuffer,const char * pcTaskName)4876 static char *prvWriteNameToBuffer( char *pcBuffer, const char *pcTaskName )
4877 {
4878 size_t x;
4879
4880 /* Start by copying the entire string. */
4881 strcpy( pcBuffer, pcTaskName );
4882
4883 /* Pad the end of the string with spaces to ensure columns line up when
4884 printed out. */
4885 for( x = strlen( pcBuffer ); x < ( size_t ) ( configMAX_TASK_NAME_LEN - 1 ); x++ )
4886 {
4887 pcBuffer[ x ] = ' ';
4888 }
4889
4890 /* Terminate. */
4891 pcBuffer[ x ] = ( char ) 0x00;
4892
4893 /* Return the new end of string. */
4894 return &( pcBuffer[ x ] );
4895 }
4896
4897 #endif /* ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */
4898 /*-----------------------------------------------------------*/
4899
4900 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
4901
vTaskList(char * pcWriteBuffer)4902 void vTaskList( char * pcWriteBuffer )
4903 {
4904 TaskStatus_t *pxTaskStatusArray;
4905 UBaseType_t uxArraySize, x;
4906 char cStatus;
4907
4908 /*
4909 * PLEASE NOTE:
4910 *
4911 * This function is provided for convenience only, and is used by many
4912 * of the demo applications. Do not consider it to be part of the
4913 * scheduler.
4914 *
4915 * vTaskList() calls uxTaskGetSystemState(), then formats part of the
4916 * uxTaskGetSystemState() output into a human readable table that
4917 * displays task names, states and stack usage.
4918 *
4919 * vTaskList() has a dependency on the sprintf() C library function that
4920 * might bloat the code size, use a lot of stack, and provide different
4921 * results on different platforms. An alternative, tiny, third party,
4922 * and limited functionality implementation of sprintf() is provided in
4923 * many of the FreeRTOS/Demo sub-directories in a file called
4924 * printf-stdarg.c (note printf-stdarg.c does not provide a full
4925 * snprintf() implementation!).
4926 *
4927 * It is recommended that production systems call uxTaskGetSystemState()
4928 * directly to get access to raw stats data, rather than indirectly
4929 * through a call to vTaskList().
4930 */
4931
4932
4933 /* Make sure the write buffer does not contain a string. */
4934 *pcWriteBuffer = ( char ) 0x00;
4935
4936 /* Take a snapshot of the number of tasks in case it changes while this
4937 function is executing. */
4938 uxArraySize = uxCurrentNumberOfTasks;
4939
4940 /* Allocate an array index for each task. NOTE! if
4941 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
4942 equate to NULL. */
4943 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */
4944
4945 if( pxTaskStatusArray != NULL )
4946 {
4947 /* Generate the (binary) data. */
4948 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, NULL );
4949
4950 /* Create a human readable table from the binary data. */
4951 for( x = 0; x < uxArraySize; x++ )
4952 {
4953 switch( pxTaskStatusArray[ x ].eCurrentState )
4954 {
4955 case eRunning: cStatus = tskRUNNING_CHAR;
4956 break;
4957
4958 case eReady: cStatus = tskREADY_CHAR;
4959 break;
4960
4961 case eBlocked: cStatus = tskBLOCKED_CHAR;
4962 break;
4963
4964 case eSuspended: cStatus = tskSUSPENDED_CHAR;
4965 break;
4966
4967 case eDeleted: cStatus = tskDELETED_CHAR;
4968 break;
4969
4970 case eInvalid: /* Fall through. */
4971 default: /* Should not get here, but it is included
4972 to prevent static checking errors. */
4973 cStatus = ( char ) 0x00;
4974 break;
4975 }
4976
4977 /* Write the task name to the string, padding with spaces so it
4978 can be printed in tabular form more easily. */
4979 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
4980
4981 /* Write the rest of the string. */
4982 #if configTASKLIST_INCLUDE_COREID
4983 sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\t%hd\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber, ( int ) pxTaskStatusArray[ x ].xCoreID );
4984 #else
4985 sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
4986 #endif
4987 pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */
4988 }
4989
4990 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
4991 is 0 then vPortFree() will be #defined to nothing. */
4992 vPortFree( pxTaskStatusArray );
4993 }
4994 else
4995 {
4996 mtCOVERAGE_TEST_MARKER();
4997 }
4998 }
4999
5000 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
5001 /*----------------------------------------------------------*/
5002
5003 #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
5004
vTaskGetRunTimeStats(char * pcWriteBuffer)5005 void vTaskGetRunTimeStats( char *pcWriteBuffer )
5006 {
5007 TaskStatus_t *pxTaskStatusArray;
5008 UBaseType_t uxArraySize, x;
5009 uint32_t ulTotalTime, ulStatsAsPercentage;
5010
5011 #if( configUSE_TRACE_FACILITY != 1 )
5012 {
5013 #error configUSE_TRACE_FACILITY must also be set to 1 in FreeRTOSConfig.h to use vTaskGetRunTimeStats().
5014 }
5015 #endif
5016
5017 /*
5018 * PLEASE NOTE:
5019 *
5020 * This function is provided for convenience only, and is used by many
5021 * of the demo applications. Do not consider it to be part of the
5022 * scheduler.
5023 *
5024 * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part
5025 * of the uxTaskGetSystemState() output into a human readable table that
5026 * displays the amount of time each task has spent in the Running state
5027 * in both absolute and percentage terms.
5028 *
5029 * vTaskGetRunTimeStats() has a dependency on the sprintf() C library
5030 * function that might bloat the code size, use a lot of stack, and
5031 * provide different results on different platforms. An alternative,
5032 * tiny, third party, and limited functionality implementation of
5033 * sprintf() is provided in many of the FreeRTOS/Demo sub-directories in
5034 * a file called printf-stdarg.c (note printf-stdarg.c does not provide
5035 * a full snprintf() implementation!).
5036 *
5037 * It is recommended that production systems call uxTaskGetSystemState()
5038 * directly to get access to raw stats data, rather than indirectly
5039 * through a call to vTaskGetRunTimeStats().
5040 */
5041
5042 /* Make sure the write buffer does not contain a string. */
5043 *pcWriteBuffer = ( char ) 0x00;
5044
5045 /* Take a snapshot of the number of tasks in case it changes while this
5046 function is executing. */
5047 uxArraySize = uxCurrentNumberOfTasks;
5048
5049 /* Allocate an array index for each task. NOTE! If
5050 configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
5051 equate to NULL. */
5052 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */
5053
5054 if( pxTaskStatusArray != NULL )
5055 {
5056 /* Generate the (binary) data. */
5057 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime );
5058
5059 /* For percentage calculations. */
5060 ulTotalTime /= 100UL;
5061
5062 /* Avoid divide by zero errors. */
5063 if( ulTotalTime > 0UL )
5064 {
5065 /* Create a human readable table from the binary data. */
5066 for( x = 0; x < uxArraySize; x++ )
5067 {
5068 /* What percentage of the total run time has the task used?
5069 This will always be rounded down to the nearest integer.
5070 ulTotalRunTimeDiv100 has already been divided by 100. */
5071 ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime;
5072
5073 /* Write the task name to the string, padding with
5074 spaces so it can be printed in tabular form more
5075 easily. */
5076 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
5077
5078 if( ulStatsAsPercentage > 0UL )
5079 {
5080 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
5081 {
5082 sprintf( pcWriteBuffer, "\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage );
5083 }
5084 #else
5085 {
5086 /* sizeof( int ) == sizeof( long ) so a smaller
5087 printf() library can be used. */
5088 sprintf( pcWriteBuffer, "\t%u\t\t%u%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter, ( unsigned int ) ulStatsAsPercentage ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
5089 }
5090 #endif
5091 }
5092 else
5093 {
5094 /* If the percentage is zero here then the task has
5095 consumed less than 1% of the total run time. */
5096 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
5097 {
5098 sprintf( pcWriteBuffer, "\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter );
5099 }
5100 #else
5101 {
5102 /* sizeof( int ) == sizeof( long ) so a smaller
5103 printf() library can be used. */
5104 sprintf( pcWriteBuffer, "\t%u\t\t<1%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
5105 }
5106 #endif
5107 }
5108
5109 pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */
5110 }
5111 }
5112 else
5113 {
5114 mtCOVERAGE_TEST_MARKER();
5115 }
5116
5117 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
5118 is 0 then vPortFree() will be #defined to nothing. */
5119 vPortFree( pxTaskStatusArray );
5120 }
5121 else
5122 {
5123 mtCOVERAGE_TEST_MARKER();
5124 }
5125 }
5126
5127 #endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) ) */
5128 /*-----------------------------------------------------------*/
5129
uxTaskResetEventItemValue(void)5130 TickType_t uxTaskResetEventItemValue( void )
5131 {
5132 TickType_t uxReturn;
5133
5134 taskENTER_CRITICAL(&xTaskQueueMutex);
5135 uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ) );
5136
5137 /* Reset the event list item to its normal value - so it can be used with
5138 queues and semaphores. */
5139 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[ xPortGetCoreID() ]->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB[ xPortGetCoreID() ]->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
5140 taskEXIT_CRITICAL(&xTaskQueueMutex);
5141
5142 return uxReturn;
5143 }
5144 /*-----------------------------------------------------------*/
5145
5146 #if ( configUSE_MUTEXES == 1 )
5147
pvTaskIncrementMutexHeldCount(void)5148 void *pvTaskIncrementMutexHeldCount( void )
5149 {
5150 TCB_t *curTCB;
5151
5152 /* If xSemaphoreCreateMutex() is called before any tasks have been created
5153 then pxCurrentTCB will be NULL. */
5154 taskENTER_CRITICAL(&xTaskQueueMutex);
5155 if( pxCurrentTCB[ xPortGetCoreID() ] != NULL )
5156 {
5157 ( pxCurrentTCB[ xPortGetCoreID() ]->uxMutexesHeld )++;
5158 }
5159 curTCB = pxCurrentTCB[ xPortGetCoreID() ];
5160 taskEXIT_CRITICAL(&xTaskQueueMutex);
5161
5162 return curTCB;
5163 }
5164
5165 #endif /* configUSE_MUTEXES */
5166 /*-----------------------------------------------------------*/
5167
5168 #if( configUSE_TASK_NOTIFICATIONS == 1 )
5169
ulTaskNotifyTake(BaseType_t xClearCountOnExit,TickType_t xTicksToWait)5170 uint32_t ulTaskNotifyTake( BaseType_t xClearCountOnExit, TickType_t xTicksToWait )
5171 {
5172 uint32_t ulReturn;
5173
5174 taskENTER_CRITICAL( &xTaskQueueMutex );
5175 {
5176 /* Only block if the notification count is not already non-zero. */
5177 if( pxCurrentTCB[xPortGetCoreID()]->ulNotifiedValue == 0UL )
5178 {
5179 /* Mark this task as waiting for a notification. */
5180 pxCurrentTCB[xPortGetCoreID()]->ucNotifyState = taskWAITING_NOTIFICATION;
5181
5182 if( xTicksToWait > ( TickType_t ) 0 )
5183 {
5184 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToWait );
5185 traceTASK_NOTIFY_TAKE_BLOCK();
5186
5187 /* All ports are written to allow a yield in a critical
5188 section (some will yield immediately, others wait until the
5189 critical section exits) - but it is not something that
5190 application code should ever do. */
5191 portYIELD_WITHIN_API();
5192 }
5193 else
5194 {
5195 mtCOVERAGE_TEST_MARKER();
5196 }
5197 }
5198 else
5199 {
5200 mtCOVERAGE_TEST_MARKER();
5201 }
5202 }
5203 taskEXIT_CRITICAL( &xTaskQueueMutex );
5204
5205 taskENTER_CRITICAL( &xTaskQueueMutex );
5206 {
5207 traceTASK_NOTIFY_TAKE();
5208 ulReturn = pxCurrentTCB[xPortGetCoreID()]->ulNotifiedValue;
5209
5210 if( ulReturn != 0UL )
5211 {
5212 if( xClearCountOnExit != pdFALSE )
5213 {
5214 pxCurrentTCB[xPortGetCoreID()]->ulNotifiedValue = 0UL;
5215 }
5216 else
5217 {
5218 pxCurrentTCB[xPortGetCoreID()]->ulNotifiedValue = ulReturn - ( uint32_t ) 1;
5219 }
5220 }
5221 else
5222 {
5223 mtCOVERAGE_TEST_MARKER();
5224 }
5225
5226 pxCurrentTCB[xPortGetCoreID()]->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
5227 }
5228 taskEXIT_CRITICAL( &xTaskQueueMutex );
5229
5230 return ulReturn;
5231 }
5232
5233 #endif /* configUSE_TASK_NOTIFICATIONS */
5234 /*-----------------------------------------------------------*/
5235
5236 #if( configUSE_TASK_NOTIFICATIONS == 1 )
5237
xTaskNotifyWait(uint32_t ulBitsToClearOnEntry,uint32_t ulBitsToClearOnExit,uint32_t * pulNotificationValue,TickType_t xTicksToWait)5238 BaseType_t xTaskNotifyWait( uint32_t ulBitsToClearOnEntry, uint32_t ulBitsToClearOnExit, uint32_t *pulNotificationValue, TickType_t xTicksToWait )
5239 {
5240 BaseType_t xReturn;
5241
5242 taskENTER_CRITICAL( &xTaskQueueMutex );
5243 {
5244 /* Only block if a notification is not already pending. */
5245 if( pxCurrentTCB[xPortGetCoreID()]->ucNotifyState != taskNOTIFICATION_RECEIVED )
5246 {
5247 /* Clear bits in the task's notification value as bits may get
5248 set by the notifying task or interrupt. This can be used to
5249 clear the value to zero. */
5250 pxCurrentTCB[xPortGetCoreID()]->ulNotifiedValue &= ~ulBitsToClearOnEntry;
5251
5252 /* Mark this task as waiting for a notification. */
5253 pxCurrentTCB[xPortGetCoreID()]->ucNotifyState = taskWAITING_NOTIFICATION;
5254
5255 if( xTicksToWait > ( TickType_t ) 0 )
5256 {
5257 prvAddCurrentTaskToDelayedList( xPortGetCoreID(), xTicksToWait);
5258 traceTASK_NOTIFY_WAIT_BLOCK();
5259
5260 /* All ports are written to allow a yield in a critical
5261 section (some will yield immediately, others wait until the
5262 critical section exits) - but it is not something that
5263 application code should ever do. */
5264 portYIELD_WITHIN_API();
5265 }
5266 else
5267 {
5268 mtCOVERAGE_TEST_MARKER();
5269 }
5270 }
5271 else
5272 {
5273 mtCOVERAGE_TEST_MARKER();
5274 }
5275 }
5276 taskEXIT_CRITICAL( &xTaskQueueMutex );
5277
5278 taskENTER_CRITICAL( &xTaskQueueMutex );
5279 {
5280 traceTASK_NOTIFY_WAIT();
5281
5282 if( pulNotificationValue != NULL )
5283 {
5284 /* Output the current notification value, which may or may not
5285 have changed. */
5286 *pulNotificationValue = pxCurrentTCB[xPortGetCoreID()]->ulNotifiedValue;
5287 }
5288
5289 /* If ucNotifyValue is set then either the task never entered the
5290 blocked state (because a notification was already pending) or the
5291 task unblocked because of a notification. Otherwise the task
5292 unblocked because of a timeout. */
5293 if( pxCurrentTCB[xPortGetCoreID()]->ucNotifyState != taskNOTIFICATION_RECEIVED )
5294 {
5295 /* A notification was not received. */
5296 xReturn = pdFALSE;
5297 }
5298 else
5299 {
5300 /* A notification was already pending or a notification was
5301 received while the task was waiting. */
5302 pxCurrentTCB[xPortGetCoreID()]->ulNotifiedValue &= ~ulBitsToClearOnExit;
5303 xReturn = pdTRUE;
5304 }
5305
5306 pxCurrentTCB[xPortGetCoreID()]->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
5307 }
5308 taskEXIT_CRITICAL( &xTaskQueueMutex );
5309
5310 return xReturn;
5311 }
5312
5313 #endif /* configUSE_TASK_NOTIFICATIONS */
5314 /*-----------------------------------------------------------*/
5315
5316 #if( configUSE_TASK_NOTIFICATIONS == 1 )
5317
xTaskGenericNotify(TaskHandle_t xTaskToNotify,uint32_t ulValue,eNotifyAction eAction,uint32_t * pulPreviousNotificationValue)5318 BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue )
5319 {
5320 TCB_t * pxTCB;
5321 BaseType_t xReturn = pdPASS;
5322 uint8_t ucOriginalNotifyState;
5323
5324 configASSERT( xTaskToNotify );
5325 pxTCB = xTaskToNotify;
5326
5327 taskENTER_CRITICAL( &xTaskQueueMutex );
5328 {
5329 if( pulPreviousNotificationValue != NULL )
5330 {
5331 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue;
5332 }
5333
5334 ucOriginalNotifyState = pxTCB->ucNotifyState;
5335
5336 pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
5337
5338 switch( eAction )
5339 {
5340 case eSetBits :
5341 pxTCB->ulNotifiedValue |= ulValue;
5342 break;
5343
5344 case eIncrement :
5345 ( pxTCB->ulNotifiedValue )++;
5346 break;
5347
5348 case eSetValueWithOverwrite :
5349 pxTCB->ulNotifiedValue = ulValue;
5350 break;
5351
5352 case eSetValueWithoutOverwrite :
5353 if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
5354 {
5355 pxTCB->ulNotifiedValue = ulValue;
5356 }
5357 else
5358 {
5359 /* The value could not be written to the task. */
5360 xReturn = pdFAIL;
5361 }
5362 break;
5363
5364 case eNoAction:
5365 /* The task is being notified without its notify value being
5366 updated. */
5367 break;
5368
5369 default:
5370 /* Should not get here if all enums are handled.
5371 Artificially force an assert by testing a value the
5372 compiler can't assume is const. */
5373 configASSERT( pxTCB->ulNotifiedValue == ~0UL );
5374
5375 break;
5376 }
5377
5378 traceTASK_NOTIFY();
5379
5380 /* If the task is in the blocked state specifically to wait for a
5381 notification then unblock it now. */
5382 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
5383 {
5384 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
5385 prvAddTaskToReadyList( pxTCB );
5386
5387 /* The task should not have been on an event list. */
5388 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
5389
5390 #if( configUSE_TICKLESS_IDLE != 0 )
5391 {
5392 /* If a task is blocked waiting for a notification then
5393 xNextTaskUnblockTime might be set to the blocked task's time
5394 out time. If the task is unblocked for a reason other than
5395 a timeout xNextTaskUnblockTime is normally left unchanged,
5396 because it will automatically get reset to a new value when
5397 the tick count equals xNextTaskUnblockTime. However if
5398 tickless idling is used it might be more important to enter
5399 sleep mode at the earliest possible time - so reset
5400 xNextTaskUnblockTime here to ensure it is updated at the
5401 earliest possible time. */
5402 prvResetNextTaskUnblockTime();
5403 }
5404 #endif
5405
5406 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
5407 {
5408 /* The notified task has a priority above the currently
5409 executing task so a yield is required. */
5410 portYIELD_WITHIN_API();
5411 }
5412 else if ( pxTCB->xCoreID != xPortGetCoreID() )
5413 {
5414 taskYIELD_OTHER_CORE(pxTCB->xCoreID, pxTCB->uxPriority);
5415 }
5416 else
5417 {
5418 mtCOVERAGE_TEST_MARKER();
5419 }
5420 }
5421 else
5422 {
5423 mtCOVERAGE_TEST_MARKER();
5424 }
5425 }
5426 taskEXIT_CRITICAL( &xTaskQueueMutex );
5427
5428 return xReturn;
5429 }
5430
5431 #endif /* configUSE_TASK_NOTIFICATIONS */
5432 /*-----------------------------------------------------------*/
5433
5434 #if( configUSE_TASK_NOTIFICATIONS == 1 )
5435
xTaskGenericNotifyFromISR(TaskHandle_t xTaskToNotify,uint32_t ulValue,eNotifyAction eAction,uint32_t * pulPreviousNotificationValue,BaseType_t * pxHigherPriorityTaskWoken)5436 BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify, uint32_t ulValue, eNotifyAction eAction, uint32_t *pulPreviousNotificationValue, BaseType_t *pxHigherPriorityTaskWoken )
5437 {
5438 TCB_t * pxTCB;
5439 uint8_t ucOriginalNotifyState;
5440 BaseType_t xReturn = pdPASS;
5441
5442 configASSERT( xTaskToNotify );
5443
5444 /* RTOS ports that support interrupt nesting have the concept of a
5445 maximum system call (or maximum API call) interrupt priority.
5446 Interrupts that are above the maximum system call priority are keep
5447 permanently enabled, even when the RTOS kernel is in a critical section,
5448 but cannot make any calls to FreeRTOS API functions. If configASSERT()
5449 is defined in FreeRTOSConfig.h then
5450 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
5451 failure if a FreeRTOS API function is called from an interrupt that has
5452 been assigned a priority above the configured maximum system call
5453 priority. Only FreeRTOS functions that end in FromISR can be called
5454 from interrupts that have been assigned a priority at or (logically)
5455 below the maximum system call interrupt priority. FreeRTOS maintains a
5456 separate interrupt safe API to ensure interrupt entry is as fast and as
5457 simple as possible. More information (albeit Cortex-M specific) is
5458 provided on the following link:
5459 http://www.freertos.org/RTOS-Cortex-M3-M4.html */
5460 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
5461
5462 pxTCB = xTaskToNotify;
5463
5464 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
5465 {
5466 if( pulPreviousNotificationValue != NULL )
5467 {
5468 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue;
5469 }
5470
5471 ucOriginalNotifyState = pxTCB->ucNotifyState;
5472 pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
5473
5474 switch( eAction )
5475 {
5476 case eSetBits :
5477 pxTCB->ulNotifiedValue |= ulValue;
5478 break;
5479
5480 case eIncrement :
5481 ( pxTCB->ulNotifiedValue )++;
5482 break;
5483
5484 case eSetValueWithOverwrite :
5485 pxTCB->ulNotifiedValue = ulValue;
5486 break;
5487
5488 case eSetValueWithoutOverwrite :
5489 if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
5490 {
5491 pxTCB->ulNotifiedValue = ulValue;
5492 }
5493 else
5494 {
5495 /* The value could not be written to the task. */
5496 xReturn = pdFAIL;
5497 }
5498 break;
5499
5500 case eNoAction :
5501 /* The task is being notified without its notify value being
5502 updated. */
5503 break;
5504
5505 default:
5506 /* Should not get here if all enums are handled.
5507 Artificially force an assert by testing a value the
5508 compiler can't assume is const. */
5509 configASSERT( pxTCB->ulNotifiedValue == ~0UL );
5510 break;
5511 }
5512
5513 traceTASK_NOTIFY_FROM_ISR();
5514
5515 /* If the task is in the blocked state specifically to wait for a
5516 notification then unblock it now. */
5517 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
5518 {
5519 /* The task should not have been on an event list. */
5520 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
5521
5522 if( uxSchedulerSuspended[xPortGetCoreID()] == ( UBaseType_t ) pdFALSE )
5523 {
5524 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
5525 prvAddTaskToReadyList( pxTCB );
5526 }
5527 else
5528 {
5529 /* The delayed and ready lists cannot be accessed, so hold
5530 this task pending until the scheduler is resumed. */
5531 vListInsertEnd( &( xPendingReadyList[xPortGetCoreID()] ), &( pxTCB->xEventListItem ) );
5532 }
5533
5534 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
5535 {
5536 /* The notified task has a priority above the currently
5537 executing task so a yield is required. */
5538 if( pxHigherPriorityTaskWoken != NULL )
5539 {
5540 *pxHigherPriorityTaskWoken = pdTRUE;
5541 }
5542 }
5543 else if ( pxTCB->xCoreID != xPortGetCoreID() )
5544 {
5545 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
5546 }
5547 else
5548 {
5549 mtCOVERAGE_TEST_MARKER();
5550 }
5551
5552 }
5553 }
5554 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
5555
5556 return xReturn;
5557 }
5558
5559 #endif /* configUSE_TASK_NOTIFICATIONS */
5560 /*-----------------------------------------------------------*/
5561
5562 #if( configUSE_TASK_NOTIFICATIONS == 1 )
5563
vTaskNotifyGiveFromISR(TaskHandle_t xTaskToNotify,BaseType_t * pxHigherPriorityTaskWoken)5564 void vTaskNotifyGiveFromISR( TaskHandle_t xTaskToNotify, BaseType_t *pxHigherPriorityTaskWoken )
5565 {
5566 TCB_t * pxTCB;
5567 uint8_t ucOriginalNotifyState;
5568
5569
5570 configASSERT( xTaskToNotify );
5571
5572 /* RTOS ports that support interrupt nesting have the concept of a
5573 maximum system call (or maximum API call) interrupt priority.
5574 Interrupts that are above the maximum system call priority are keep
5575 permanently enabled, even when the RTOS kernel is in a critical section,
5576 but cannot make any calls to FreeRTOS API functions. If configASSERT()
5577 is defined in FreeRTOSConfig.h then
5578 portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
5579 failure if a FreeRTOS API function is called from an interrupt that has
5580 been assigned a priority above the configured maximum system call
5581 priority. Only FreeRTOS functions that end in FromISR can be called
5582 from interrupts that have been assigned a priority at or (logically)
5583 below the maximum system call interrupt priority. FreeRTOS maintains a
5584 separate interrupt safe API to ensure interrupt entry is as fast and as
5585 simple as possible. More information (albeit Cortex-M specific) is
5586 provided on the following link:
5587 http://www.freertos.org/RTOS-Cortex-M3-M4.html */
5588 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
5589
5590 pxTCB = xTaskToNotify;
5591
5592 taskENTER_CRITICAL_ISR(&xTaskQueueMutex);
5593 {
5594 ucOriginalNotifyState = pxTCB->ucNotifyState;
5595 pxTCB->ucNotifyState = taskNOTIFICATION_RECEIVED;
5596
5597 /* 'Giving' is equivalent to incrementing a count in a counting
5598 semaphore. */
5599 ( pxTCB->ulNotifiedValue )++;
5600
5601 traceTASK_NOTIFY_GIVE_FROM_ISR();
5602
5603 /* If the task is in the blocked state specifically to wait for a
5604 notification then unblock it now. */
5605 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
5606 {
5607 /* The task should not have been on an event list. */
5608 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
5609
5610 if( uxSchedulerSuspended[ xPortGetCoreID() ] == ( UBaseType_t ) pdFALSE )
5611 {
5612 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
5613 prvAddTaskToReadyList( pxTCB );
5614 }
5615 else
5616 {
5617 /* The delayed and ready lists cannot be accessed, so hold
5618 this task pending until the scheduler is resumed. */
5619 vListInsertEnd( &( xPendingReadyList[xPortGetCoreID()] ), &( pxTCB->xEventListItem ) );
5620 }
5621
5622
5623 if( tskCAN_RUN_HERE(pxTCB->xCoreID) && pxTCB->uxPriority > pxCurrentTCB[ xPortGetCoreID() ]->uxPriority )
5624 {
5625 /* The notified task has a priority above the currently
5626 executing task so a yield is required. */
5627 if( pxHigherPriorityTaskWoken != NULL )
5628 {
5629 *pxHigherPriorityTaskWoken = pdTRUE;
5630 }
5631 }
5632 else if ( pxTCB->xCoreID != xPortGetCoreID() )
5633 {
5634 taskYIELD_OTHER_CORE( pxTCB->xCoreID, pxTCB->uxPriority );
5635 }
5636 else
5637 {
5638 mtCOVERAGE_TEST_MARKER();
5639 }
5640
5641 }
5642 }
5643 taskEXIT_CRITICAL_ISR(&xTaskQueueMutex);
5644 }
5645
5646 #endif /* configUSE_TASK_NOTIFICATIONS */
5647
5648 /*-----------------------------------------------------------*/
5649
5650 #if( configUSE_TASK_NOTIFICATIONS == 1 )
5651
xTaskNotifyStateClear(TaskHandle_t xTask)5652 BaseType_t xTaskNotifyStateClear( TaskHandle_t xTask )
5653 {
5654 TCB_t *pxTCB;
5655 BaseType_t xReturn;
5656
5657 /* If null is passed in here then it is the calling task that is having
5658 its notification state cleared. */
5659 pxTCB = prvGetTCBFromHandle( xTask );
5660
5661 taskENTER_CRITICAL( &xTaskQueueMutex );
5662 {
5663 if( pxTCB->ucNotifyState == taskNOTIFICATION_RECEIVED )
5664 {
5665 pxTCB->ucNotifyState = taskNOT_WAITING_NOTIFICATION;
5666 xReturn = pdPASS;
5667 }
5668 else
5669 {
5670 xReturn = pdFAIL;
5671 }
5672 }
5673 taskEXIT_CRITICAL( &xTaskQueueMutex );
5674
5675 return xReturn;
5676 }
5677
5678 #endif /* configUSE_TASK_NOTIFICATIONS */
5679 /*-----------------------------------------------------------*/
5680
5681 #if( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( INCLUDE_xTaskGetIdleTaskHandle == 1 ) )
5682
ulTaskGetIdleRunTimeCounter(void)5683 uint32_t ulTaskGetIdleRunTimeCounter( void )
5684 {
5685 taskENTER_CRITICAL(&xTaskQueueMutex);
5686 tskTCB *pxTCB = (tskTCB *)xIdleTaskHandle[xPortGetCoreID()];
5687 taskEXIT_CRITICAL(&xTaskQueueMutex);
5688
5689 return pxTCB->ulRunTimeCounter;
5690 }
5691
5692 #endif
5693 /*-----------------------------------------------------------*/
5694
prvAddCurrentTaskToDelayedList(const portBASE_TYPE xCoreID,const TickType_t xTicksToWait)5695 static void prvAddCurrentTaskToDelayedList( const portBASE_TYPE xCoreID, const TickType_t xTicksToWait )
5696 {
5697 TickType_t xTimeToWake;
5698 const TickType_t xConstTickCount = xTickCount;
5699
5700 if (portNUM_PROCESSORS > 1 && listIS_CONTAINED_WITHIN(&xTasksWaitingTermination, &( pxCurrentTCB[xCoreID]->xStateListItem))) {
5701 /* vTaskDelete() has been called to delete this task. This would have happened from the other core while this task was spinning on xTaskQueueMutex,
5702 so don't move the running task to the delayed list - as soon as this core re-enables interrupts this task will
5703 be suspended permanently */
5704 return;
5705 }
5706
5707 #if( INCLUDE_xTaskAbortDelay == 1 )
5708 {
5709 /* About to enter a delayed list, so ensure the ucDelayAborted flag is
5710 reset to pdFALSE so it can be detected as having been set to pdTRUE
5711 when the task leaves the Blocked state. */
5712 pxCurrentTCB[xCoreID]->ucDelayAborted = pdFALSE;
5713 }
5714 #endif
5715
5716 /* Remove the task from the ready list before adding it to the blocked list
5717 as the same list item is used for both lists. */
5718 if( uxListRemove( &( pxCurrentTCB[xCoreID]->xStateListItem ) ) == ( UBaseType_t ) 0 )
5719 {
5720 /* The current task must be in a ready list, so there is no need to
5721 check, and the port reset macro can be called directly. */
5722 portRESET_READY_PRIORITY( pxCurrentTCB[xCoreID]->uxPriority, uxTopReadyPriority ); /*lint !e931 pxCurrentTCB[xPortGetCoreID()] cannot change as it is the calling task. pxCurrentTCB->uxPriority and uxTopReadyPriority cannot change as called with scheduler suspended or in a critical section. */
5723 }
5724 else
5725 {
5726 mtCOVERAGE_TEST_MARKER();
5727 }
5728
5729 #if ( INCLUDE_vTaskSuspend == 1 )
5730 {
5731 if( ( xTicksToWait == portMAX_DELAY ) )
5732 {
5733 /* Add the task to the suspended task list instead of a delayed task
5734 list to ensure it is not woken by a timing event. It will block
5735 indefinitely. */
5736 vListInsertEnd( &xSuspendedTaskList, &( pxCurrentTCB[xCoreID]->xStateListItem ) );
5737 }
5738 else
5739 {
5740 /* Calculate the time at which the task should be woken if the event
5741 does not occur. This may overflow but this doesn't matter, the
5742 kernel will manage it correctly. */
5743 xTimeToWake = xConstTickCount + xTicksToWait;
5744
5745 /* The list item will be inserted in wake time order. */
5746 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[xCoreID]->xStateListItem ), xTimeToWake );
5747
5748 if( xTimeToWake < xConstTickCount )
5749 {
5750 /* Wake time has overflowed. Place this item in the overflow
5751 list. */
5752 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB[xCoreID]->xStateListItem ) );
5753 }
5754 else
5755 {
5756 /* The wake time has not overflowed, so the current block list
5757 is used. */
5758 vListInsert( pxDelayedTaskList, &( pxCurrentTCB[xCoreID]->xStateListItem ) );
5759
5760 /* If the task entering the blocked state was placed at the
5761 head of the list of blocked tasks then xNextTaskUnblockTime
5762 needs to be updated too. */
5763 if( xTimeToWake < xNextTaskUnblockTime )
5764 {
5765 xNextTaskUnblockTime = xTimeToWake;
5766 }
5767 else
5768 {
5769 mtCOVERAGE_TEST_MARKER();
5770 }
5771 }
5772 }
5773 }
5774 #else /* INCLUDE_vTaskSuspend */
5775 {
5776 /* Calculate the time at which the task should be woken if the event
5777 does not occur. This may overflow but this doesn't matter, the kernel
5778 will manage it correctly. */
5779 xTimeToWake = xConstTickCount + xTicksToWait;
5780
5781 /* The list item will be inserted in wake time order. */
5782 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB[xCoreID]->xStateListItem ), xTimeToWake );
5783
5784 if( xTimeToWake < xConstTickCount )
5785 {
5786 /* Wake time has overflowed. Place this item in the overflow list. */
5787 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB[xCoreID]->xStateListItem ) );
5788 }
5789 else
5790 {
5791 /* The wake time has not overflowed, so the current block list is used. */
5792 vListInsert( pxDelayedTaskList, &( pxCurrentTCB[xCoreID]->xStateListItem ) );
5793
5794 /* If the task entering the blocked state was placed at the head of the
5795 list of blocked tasks then xNextTaskUnblockTime needs to be updated
5796 too. */
5797 if( xTimeToWake < xNextTaskUnblockTime )
5798 {
5799 xNextTaskUnblockTime = xTimeToWake;
5800 }
5801 else
5802 {
5803 mtCOVERAGE_TEST_MARKER();
5804 }
5805 }
5806 }
5807 #endif /* INCLUDE_vTaskSuspend */
5808 }
5809
5810 #if ( configENABLE_TASK_SNAPSHOT == 1 )
prvTaskGetSnapshot(TaskSnapshot_t * pxTaskSnapshotArray,UBaseType_t * uxTask,TCB_t * pxTCB)5811 static void prvTaskGetSnapshot( TaskSnapshot_t *pxTaskSnapshotArray, UBaseType_t *uxTask, TCB_t *pxTCB )
5812 {
5813 if (pxTCB == NULL) {
5814 return;
5815 }
5816 pxTaskSnapshotArray[ *uxTask ].pxTCB = pxTCB;
5817 pxTaskSnapshotArray[ *uxTask ].pxTopOfStack = (StackType_t *)pxTCB->pxTopOfStack;
5818 #if( portSTACK_GROWTH < 0 )
5819 {
5820 pxTaskSnapshotArray[ *uxTask ].pxEndOfStack = pxTCB->pxEndOfStack;
5821 }
5822 #else
5823 {
5824 pxTaskSnapshotArray[ *uxTask ].pxEndOfStack = pxTCB->pxStack;
5825 }
5826 #endif
5827 (*uxTask)++;
5828 }
5829
prvTaskGetSnapshotsFromList(TaskSnapshot_t * pxTaskSnapshotArray,UBaseType_t * uxTask,const UBaseType_t uxArraySize,List_t * pxList)5830 static void prvTaskGetSnapshotsFromList( TaskSnapshot_t *pxTaskSnapshotArray, UBaseType_t *uxTask, const UBaseType_t uxArraySize, List_t *pxList )
5831 {
5832 TCB_t *pxNextTCB, *pxFirstTCB;
5833
5834 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
5835 {
5836 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList );
5837 do
5838 {
5839 if( *uxTask >= uxArraySize )
5840 break;
5841
5842 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList );
5843 prvTaskGetSnapshot( pxTaskSnapshotArray, uxTask, pxNextTCB );
5844 } while( pxNextTCB != pxFirstTCB );
5845 }
5846 else
5847 {
5848 mtCOVERAGE_TEST_MARKER();
5849 }
5850 }
5851
uxTaskGetSnapshotAll(TaskSnapshot_t * const pxTaskSnapshotArray,const UBaseType_t uxArraySize,UBaseType_t * const pxTcbSz)5852 UBaseType_t uxTaskGetSnapshotAll( TaskSnapshot_t * const pxTaskSnapshotArray, const UBaseType_t uxArraySize, UBaseType_t * const pxTcbSz )
5853 {
5854 UBaseType_t uxTask = 0, i = 0;
5855
5856
5857 *pxTcbSz = sizeof(TCB_t);
5858 /* Fill in an TaskStatus_t structure with information on each
5859 task in the Ready state. */
5860 i = configMAX_PRIORITIES;
5861 do
5862 {
5863 i--;
5864 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, &( pxReadyTasksLists[ i ] ) );
5865 } while( i > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
5866
5867 /* Fill in an TaskStatus_t structure with information on each
5868 task in the Blocked state. */
5869 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, ( List_t * ) pxDelayedTaskList );
5870 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, ( List_t * ) pxOverflowDelayedTaskList );
5871 for (i = 0; i < portNUM_PROCESSORS; i++) {
5872 if( uxTask >= uxArraySize )
5873 break;
5874 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, &( xPendingReadyList[i]) );
5875 }
5876
5877 #if( INCLUDE_vTaskDelete == 1 )
5878 {
5879 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, &xTasksWaitingTermination );
5880 }
5881 #endif
5882
5883 #if ( INCLUDE_vTaskSuspend == 1 )
5884 {
5885 prvTaskGetSnapshotsFromList( pxTaskSnapshotArray, &uxTask, uxArraySize, &xSuspendedTaskList );
5886 }
5887 #endif
5888 return uxTask;
5889 }
5890
prvFirstTaskGet(List_t * pxList)5891 static TCB_t *prvFirstTaskGet( List_t *pxList )
5892 {
5893 ListItem_t *pxListItem = listGET_HEAD_ENTRY( pxList );
5894 if( pxListItem != listGET_END_MARKER( pxList ) ) {
5895 return listGET_LIST_ITEM_OWNER( pxListItem );
5896 }
5897 return NULL;
5898 }
5899
prvNextTaskGet(TCB_t * pxTCB)5900 static TCB_t *prvNextTaskGet( TCB_t *pxTCB )
5901 {
5902 List_t *pxList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) );
5903 ListItem_t *pxListItem = listGET_NEXT( &( pxTCB->xStateListItem ) );
5904 if( pxListItem != listGET_END_MARKER( pxList ) ) {
5905 return listGET_LIST_ITEM_OWNER( pxListItem );
5906 }
5907 return NULL;
5908 }
5909
vTaskGetSnapshot(TaskHandle_t pxTask,TaskSnapshot_t * pxTaskSnapshot)5910 inline void vTaskGetSnapshot( TaskHandle_t pxTask, TaskSnapshot_t *pxTaskSnapshot )
5911 {
5912 configASSERT( portVALID_TCB_MEM(pxTask) );
5913 configASSERT( pxTaskSnapshot != NULL );
5914 pxTaskSnapshot->pxTCB = (TCB_t *)pxTask;
5915 pxTaskSnapshot->pxTopOfStack = (StackType_t *)((TCB_t *)pxTask)->pxTopOfStack;
5916 pxTaskSnapshot->pxEndOfStack = ((TCB_t *)pxTask)->pxEndOfStack;
5917 }
5918
pxTaskGetNext(TaskHandle_t pxTask)5919 TaskHandle_t pxTaskGetNext( TaskHandle_t pxTask )
5920 {
5921 TCB_t *pxTCB = pxTask;
5922 List_t *pxTaskList = NULL;
5923 UBaseType_t i = configMAX_PRIORITIES;
5924 UBaseType_t bCurTaskListFound = pdFALSE;
5925 List_t *task_lists[] = {
5926 pxDelayedTaskList,
5927 pxOverflowDelayedTaskList,
5928 #if( INCLUDE_vTaskDelete == 1 )
5929 &xTasksWaitingTermination,
5930 #endif
5931 #if( INCLUDE_vTaskSuspend == 1 )
5932 &xSuspendedTaskList
5933 #endif
5934 };
5935
5936 if( pxTask != NULL && !portVALID_TCB_MEM(pxTask) ) {
5937 return NULL;
5938 }
5939
5940 if( pxTCB != NULL ) {
5941 pxTCB = prvNextTaskGet( pxTCB );
5942 if( pxTCB != NULL ) {
5943 // take care not to return garbage
5944 return portVALID_TCB_MEM(pxTCB) ? pxTCB : NULL;
5945 }
5946 pxTaskList = listLIST_ITEM_CONTAINER( &( ((TCB_t *)pxTask)->xStateListItem ) );
5947 }
5948 /* ready tasks lists */
5949 do
5950 {
5951 i--;
5952 List_t *pxList = &( pxReadyTasksLists[ i ] );
5953 if( bCurTaskListFound == pdFALSE && pxTaskList != NULL ) {
5954 /* need to find list the current task item from */
5955 if( pxTaskList == pxList ) {
5956 bCurTaskListFound = pdTRUE;
5957 }
5958 continue; /* go to the next 'ready list' */
5959 }
5960 pxTCB = prvFirstTaskGet( pxList );
5961 if( pxTCB != NULL ) {
5962 // take care not to return garbage
5963 return portVALID_TCB_MEM(pxTCB) ? pxTCB : NULL;
5964 }
5965 }
5966 while( i > tskIDLE_PRIORITY );
5967 /* pending ready tasks lists */
5968 for (i = 0; i < portNUM_PROCESSORS; i++) {
5969 List_t *pxList = &( xPendingReadyList[ i ] );
5970 if( bCurTaskListFound == pdFALSE && pxTaskList != NULL ) {
5971 /* need to find list the current task item from */
5972 if( pxTaskList == pxList ) {
5973 bCurTaskListFound = pdTRUE;
5974 }
5975 continue; /* go to the next 'ready list' */
5976 }
5977 pxTCB = prvFirstTaskGet( pxList );
5978 if( pxTCB != NULL ) {
5979 // take care not to return garbage
5980 return portVALID_TCB_MEM(pxTCB) ? pxTCB : NULL;
5981 }
5982 }
5983 /* other tasks lists */
5984 for (i = 0; i < sizeof(task_lists)/sizeof(task_lists[0]); i++) {
5985 List_t *pxList = task_lists[ i ];
5986 if( bCurTaskListFound == pdFALSE && pxTaskList != NULL ) {
5987 /* need to find list the current task item from */
5988 if( pxTaskList == pxList ) {
5989 bCurTaskListFound = pdTRUE;
5990 }
5991 continue; /* go to the next 'ready list' */
5992 }
5993 pxTCB = prvFirstTaskGet( pxList );
5994 if( pxTCB != NULL ) {
5995 // take care not to return garbage
5996 return portVALID_TCB_MEM(pxTCB) ? pxTCB : NULL;
5997 }
5998 }
5999
6000 return NULL;
6001 }
6002
6003 #endif
6004
6005 /* Code below here allows additional code to be inserted into this source file,
6006 especially where access to file scope functions and data is needed (for example
6007 when performing module tests). */
6008
6009 #ifdef FREERTOS_MODULE_TEST
6010 #include "tasks_test_access_functions.h"
6011 #endif
6012
6013 #if( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 )
6014
6015 #include "freertos_tasks_c_additions.h"
6016
6017 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
freertos_tasks_c_additions_init(void)6018 static void freertos_tasks_c_additions_init( void )
6019 {
6020 FREERTOS_TASKS_C_ADDITIONS_INIT();
6021 }
6022 #endif
6023
6024 #endif
6025
6026 /* If timers.c is not referenced anywhere, don't create the timer task to save RAM */
xTimerCreateTimerTask(void)6027 BaseType_t __attribute__((weak)) xTimerCreateTimerTask( void )
6028 {
6029 return pdPASS;
6030 }
6031