1 /*
2 * FreeRTOS Kernel V10.6.2
3 * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 *
5 * SPDX-License-Identifier: MIT
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy of
8 * this software and associated documentation files (the "Software"), to deal in
9 * the Software without restriction, including without limitation the rights to
10 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11 * the Software, and to permit persons to whom the Software is furnished to do so,
12 * subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in all
15 * copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * https://www.FreeRTOS.org
25 * https://github.com/FreeRTOS
26 *
27 */
28
29 /* Standard includes. */
30 #include <stdlib.h>
31 #include <string.h>
32
33 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
34 * all the API functions to use the MPU wrappers. That should only be done when
35 * task.h is included from an application file. */
36 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
37
38 /* FreeRTOS includes. */
39 #include "FreeRTOS.h"
40 #include "task.h"
41 #include "timers.h"
42 #include "stack_macros.h"
43
44 /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
45 * because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
46 * for the header files above, but not in this file, in order to generate the
47 * correct privileged Vs unprivileged linkage and placement. */
48 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
49
50 /* Set configUSE_STATS_FORMATTING_FUNCTIONS to 2 to include the stats formatting
51 * functions but without including stdio.h here. */
52 #if ( configUSE_STATS_FORMATTING_FUNCTIONS == 1 )
53
54 /* At the bottom of this file are two optional functions that can be used
55 * to generate human readable text from the raw data generated by the
56 * uxTaskGetSystemState() function. Note the formatting functions are provided
57 * for convenience only, and are NOT considered part of the kernel. */
58 #include <stdio.h>
59 #endif /* configUSE_STATS_FORMATTING_FUNCTIONS == 1 ) */
60
61 #if ( configUSE_PREEMPTION == 0 )
62
63 /* If the cooperative scheduler is being used then a yield should not be
64 * performed just because a higher priority task has been woken. */
65 #define taskYIELD_IF_USING_PREEMPTION()
66 #else
67 #define taskYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
68 #endif
69
70 /* Values that can be assigned to the ucNotifyState member of the TCB. */
71 #define taskNOT_WAITING_NOTIFICATION ( ( uint8_t ) 0 ) /* Must be zero as it is the initialised value. */
72 #define taskWAITING_NOTIFICATION ( ( uint8_t ) 1 )
73 #define taskNOTIFICATION_RECEIVED ( ( uint8_t ) 2 )
74
75 /*
76 * The value used to fill the stack of a task when the task is created. This
77 * is used purely for checking the high water mark for tasks.
78 */
79 #define tskSTACK_FILL_BYTE ( 0xa5U )
80
81 /* Bits used to record how a task's stack and TCB were allocated. */
82 #define tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 0 )
83 #define tskSTATICALLY_ALLOCATED_STACK_ONLY ( ( uint8_t ) 1 )
84 #define tskSTATICALLY_ALLOCATED_STACK_AND_TCB ( ( uint8_t ) 2 )
85
86 /* If any of the following are set then task stacks are filled with a known
87 * value so the high water mark can be determined. If none of the following are
88 * set then don't fill the stack so there is no unnecessary dependency on memset. */
89 #if ( ( configCHECK_FOR_STACK_OVERFLOW > 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
90 #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 1
91 #else
92 #define tskSET_NEW_STACKS_TO_KNOWN_VALUE 0
93 #endif
94
95 /*
96 * Macros used by vListTask to indicate which state a task is in.
97 */
98 #define tskRUNNING_CHAR ( 'X' )
99 #define tskBLOCKED_CHAR ( 'B' )
100 #define tskREADY_CHAR ( 'R' )
101 #define tskDELETED_CHAR ( 'D' )
102 #define tskSUSPENDED_CHAR ( 'S' )
103
104 /*
105 * Some kernel aware debuggers require the data the debugger needs access to to
106 * be global, rather than file scope.
107 */
108 #ifdef portREMOVE_STATIC_QUALIFIER
109 #define static
110 #endif
111
112 /* The name allocated to the Idle task. This can be overridden by defining
113 * configIDLE_TASK_NAME in FreeRTOSConfig.h. */
114 #ifndef configIDLE_TASK_NAME
115 #define configIDLE_TASK_NAME "IDLE"
116 #endif
117
118 #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
119
120 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 0 then task selection is
121 * performed in a generic way that is not optimised to any particular
122 * microcontroller architecture. */
123
124 /* uxTopReadyPriority holds the priority of the highest priority ready
125 * state task. */
126 #define taskRECORD_READY_PRIORITY( uxPriority ) \
127 do { \
128 if( ( uxPriority ) > uxTopReadyPriority ) \
129 { \
130 uxTopReadyPriority = ( uxPriority ); \
131 } \
132 } while( 0 ) /* taskRECORD_READY_PRIORITY */
133
134 /*-----------------------------------------------------------*/
135
136 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
137 do { \
138 UBaseType_t uxTopPriority = uxTopReadyPriority; \
139 \
140 /* Find the highest priority queue that contains ready tasks. */ \
141 while( listLIST_IS_EMPTY( &( pxReadyTasksLists[ uxTopPriority ] ) ) ) \
142 { \
143 configASSERT( uxTopPriority ); \
144 --uxTopPriority; \
145 } \
146 \
147 /* listGET_OWNER_OF_NEXT_ENTRY indexes through the list, so the tasks of \
148 * the same priority get an equal share of the processor time. */ \
149 listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
150 uxTopReadyPriority = uxTopPriority; \
151 } while( 0 ) /* taskSELECT_HIGHEST_PRIORITY_TASK */
152
153 /*-----------------------------------------------------------*/
154
155 /* Define away taskRESET_READY_PRIORITY() and portRESET_READY_PRIORITY() as
156 * they are only required when a port optimised method of task selection is
157 * being used. */
158 #define taskRESET_READY_PRIORITY( uxPriority )
159 #define portRESET_READY_PRIORITY( uxPriority, uxTopReadyPriority )
160
161 #else /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
162
163 /* If configUSE_PORT_OPTIMISED_TASK_SELECTION is 1 then task selection is
164 * performed in a way that is tailored to the particular microcontroller
165 * architecture being used. */
166
167 /* A port optimised version is provided. Call the port defined macros. */
168 #define taskRECORD_READY_PRIORITY( uxPriority ) portRECORD_READY_PRIORITY( ( uxPriority ), uxTopReadyPriority )
169
170 /*-----------------------------------------------------------*/
171
172 #define taskSELECT_HIGHEST_PRIORITY_TASK() \
173 do { \
174 UBaseType_t uxTopPriority; \
175 \
176 /* Find the highest priority list that contains ready tasks. */ \
177 portGET_HIGHEST_PRIORITY( uxTopPriority, uxTopReadyPriority ); \
178 configASSERT( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ uxTopPriority ] ) ) > 0 ); \
179 listGET_OWNER_OF_NEXT_ENTRY( pxCurrentTCB, &( pxReadyTasksLists[ uxTopPriority ] ) ); \
180 } while( 0 )
181
182 /*-----------------------------------------------------------*/
183
184 /* A port optimised version is provided, call it only if the TCB being reset
185 * is being referenced from a ready list. If it is referenced from a delayed
186 * or suspended list then it won't be in a ready list. */
187 #define taskRESET_READY_PRIORITY( uxPriority ) \
188 do { \
189 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ ( uxPriority ) ] ) ) == ( UBaseType_t ) 0 ) \
190 { \
191 portRESET_READY_PRIORITY( ( uxPriority ), ( uxTopReadyPriority ) ); \
192 } \
193 } while( 0 )
194
195 #endif /* configUSE_PORT_OPTIMISED_TASK_SELECTION */
196
197 /*-----------------------------------------------------------*/
198
199 /* pxDelayedTaskList and pxOverflowDelayedTaskList are switched when the tick
200 * count overflows. */
201 #define taskSWITCH_DELAYED_LISTS() \
202 do { \
203 List_t * pxTemp; \
204 \
205 /* The delayed tasks list should be empty when the lists are switched. */ \
206 configASSERT( ( listLIST_IS_EMPTY( pxDelayedTaskList ) ) ); \
207 \
208 pxTemp = pxDelayedTaskList; \
209 pxDelayedTaskList = pxOverflowDelayedTaskList; \
210 pxOverflowDelayedTaskList = pxTemp; \
211 xNumOfOverflows++; \
212 prvResetNextTaskUnblockTime(); \
213 } while( 0 )
214
215 /*-----------------------------------------------------------*/
216
217 /*
218 * Place the task represented by pxTCB into the appropriate ready list for
219 * the task. It is inserted at the end of the list.
220 */
221 #define prvAddTaskToReadyList( pxTCB ) \
222 do { \
223 traceMOVED_TASK_TO_READY_STATE( pxTCB ); \
224 taskRECORD_READY_PRIORITY( ( pxTCB )->uxPriority ); \
225 listINSERT_END( &( pxReadyTasksLists[ ( pxTCB )->uxPriority ] ), &( ( pxTCB )->xStateListItem ) ); \
226 tracePOST_MOVED_TASK_TO_READY_STATE( pxTCB ); \
227 } while( 0 )
228 /*-----------------------------------------------------------*/
229
230 /*
231 * Several functions take a TaskHandle_t parameter that can optionally be NULL,
232 * where NULL is used to indicate that the handle of the currently executing
233 * task should be used in place of the parameter. This macro simply checks to
234 * see if the parameter is NULL and returns a pointer to the appropriate TCB.
235 */
236 #define prvGetTCBFromHandle( pxHandle ) ( ( ( pxHandle ) == NULL ) ? pxCurrentTCB : ( pxHandle ) )
237
238 /* The item value of the event list item is normally used to hold the priority
239 * of the task to which it belongs (coded to allow it to be held in reverse
240 * priority order). However, it is occasionally borrowed for other purposes. It
241 * is important its value is not updated due to a task priority change while it is
242 * being used for another purpose. The following bit definition is used to inform
243 * the scheduler that the value should not be changed - in which case it is the
244 * responsibility of whichever module is using the value to ensure it gets set back
245 * to its original value when it is released. */
246 #if ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_16_BITS )
247 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000U
248 #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_32_BITS )
249 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x80000000UL
250 #elif ( configTICK_TYPE_WIDTH_IN_BITS == TICK_TYPE_WIDTH_64_BITS )
251 #define taskEVENT_LIST_ITEM_VALUE_IN_USE 0x8000000000000000ULL
252 #endif
253
254 /*
255 * Task control block. A task control block (TCB) is allocated for each task,
256 * and stores task state information, including a pointer to the task's context
257 * (the task's run time environment, including register values)
258 */
259 typedef struct tskTaskControlBlock /* The old naming convention is used to prevent breaking kernel aware debuggers. */
260 {
261 volatile StackType_t * pxTopOfStack; /**< Points to the location of the last item placed on the tasks stack. THIS MUST BE THE FIRST MEMBER OF THE TCB STRUCT. */
262
263 #if ( portUSING_MPU_WRAPPERS == 1 )
264 xMPU_SETTINGS xMPUSettings; /**< The MPU settings are defined as part of the port layer. THIS MUST BE THE SECOND MEMBER OF THE TCB STRUCT. */
265 #endif
266
267 ListItem_t xStateListItem; /**< The list that the state list item of a task is reference from denotes the state of that task (Ready, Blocked, Suspended ). */
268 ListItem_t xEventListItem; /**< Used to reference a task from an event list. */
269 UBaseType_t uxPriority; /**< The priority of the task. 0 is the lowest priority. */
270 StackType_t * pxStack; /**< Points to the start of the stack. */
271 char pcTaskName[ configMAX_TASK_NAME_LEN ]; /**< Descriptive name given to the task when created. Facilitates debugging only. */ /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
272
273 #if ( ( portSTACK_GROWTH > 0 ) || ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
274 StackType_t * pxEndOfStack; /**< Points to the highest valid address for the stack. */
275 #endif
276
277 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
278 UBaseType_t uxCriticalNesting; /**< Holds the critical section nesting depth for ports that do not maintain their own count in the port layer. */
279 #endif
280
281 #if ( configUSE_TRACE_FACILITY == 1 )
282 UBaseType_t uxTCBNumber; /**< Stores a number that increments each time a TCB is created. It allows debuggers to determine when a task has been deleted and then recreated. */
283 UBaseType_t uxTaskNumber; /**< Stores a number specifically for use by third party trace code. */
284 #endif
285
286 #if ( configUSE_MUTEXES == 1 )
287 UBaseType_t uxBasePriority; /**< The priority last assigned to the task - used by the priority inheritance mechanism. */
288 UBaseType_t uxMutexesHeld;
289 #endif
290
291 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
292 TaskHookFunction_t pxTaskTag;
293 #endif
294
295 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS > 0 )
296 void * pvThreadLocalStoragePointers[ configNUM_THREAD_LOCAL_STORAGE_POINTERS ];
297 #endif
298
299 #if ( configGENERATE_RUN_TIME_STATS == 1 )
300 configRUN_TIME_COUNTER_TYPE ulRunTimeCounter; /**< Stores the amount of time the task has spent in the Running state. */
301 #endif
302
303 #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
304 configTLS_BLOCK_TYPE xTLSBlock; /**< Memory block used as Thread Local Storage (TLS) Block for the task. */
305 #endif
306
307 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
308 volatile uint32_t ulNotifiedValue[ configTASK_NOTIFICATION_ARRAY_ENTRIES ];
309 volatile uint8_t ucNotifyState[ configTASK_NOTIFICATION_ARRAY_ENTRIES ];
310 #endif
311
312 /* See the comments in FreeRTOS.h with the definition of
313 * tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE. */
314 #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
315 uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the task is a statically allocated to ensure no attempt is made to free the memory. */
316 #endif
317
318 #if ( INCLUDE_xTaskAbortDelay == 1 )
319 uint8_t ucDelayAborted;
320 #endif
321
322 #if ( configUSE_POSIX_ERRNO == 1 )
323 int iTaskErrno;
324 #endif
325 } tskTCB;
326
327 /* The old tskTCB name is maintained above then typedefed to the new TCB_t name
328 * below to enable the use of older kernel aware debuggers. */
329 typedef tskTCB TCB_t;
330
331 /*lint -save -e956 A manual analysis and inspection has been used to determine
332 * which static variables must be declared volatile. */
333 portDONT_DISCARD PRIVILEGED_DATA TCB_t * volatile pxCurrentTCB = NULL;
334
335 /* Lists for ready and blocked tasks. --------------------
336 * xDelayedTaskList1 and xDelayedTaskList2 could be moved to function scope but
337 * doing so breaks some kernel aware debuggers and debuggers that rely on removing
338 * the static qualifier. */
339 PRIVILEGED_DATA static List_t pxReadyTasksLists[ configMAX_PRIORITIES ]; /**< Prioritised ready tasks. */
340 PRIVILEGED_DATA static List_t xDelayedTaskList1; /**< Delayed tasks. */
341 PRIVILEGED_DATA static List_t xDelayedTaskList2; /**< Delayed tasks (two lists are used - one for delays that have overflowed the current tick count. */
342 PRIVILEGED_DATA static List_t * volatile pxDelayedTaskList; /**< Points to the delayed task list currently being used. */
343 PRIVILEGED_DATA static List_t * volatile pxOverflowDelayedTaskList; /**< Points to the delayed task list currently being used to hold tasks that have overflowed the current tick count. */
344 PRIVILEGED_DATA static List_t xPendingReadyList; /**< Tasks that have been readied while the scheduler was suspended. They will be moved to the ready list when the scheduler is resumed. */
345
346 #if ( INCLUDE_vTaskDelete == 1 )
347
348 PRIVILEGED_DATA static List_t xTasksWaitingTermination; /**< Tasks that have been deleted - but their memory not yet freed. */
349 PRIVILEGED_DATA static volatile UBaseType_t uxDeletedTasksWaitingCleanUp = ( UBaseType_t ) 0U;
350
351 #endif
352
353 #if ( INCLUDE_vTaskSuspend == 1 )
354
355 PRIVILEGED_DATA static List_t xSuspendedTaskList; /**< Tasks that are currently suspended. */
356
357 #endif
358
359 /* Global POSIX errno. Its value is changed upon context switching to match
360 * the errno of the currently running task. */
361 #if ( configUSE_POSIX_ERRNO == 1 )
362 int FreeRTOS_errno = 0;
363 #endif
364
365 /* Other file private variables. --------------------------------*/
366 PRIVILEGED_DATA static volatile UBaseType_t uxCurrentNumberOfTasks = ( UBaseType_t ) 0U;
367 PRIVILEGED_DATA static volatile TickType_t xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
368 PRIVILEGED_DATA static volatile UBaseType_t uxTopReadyPriority = tskIDLE_PRIORITY;
369 PRIVILEGED_DATA static volatile BaseType_t xSchedulerRunning = pdFALSE;
370 PRIVILEGED_DATA static volatile TickType_t xPendedTicks = ( TickType_t ) 0U;
371 PRIVILEGED_DATA static volatile BaseType_t xYieldPending = pdFALSE;
372 PRIVILEGED_DATA static volatile BaseType_t xNumOfOverflows = ( BaseType_t ) 0;
373 PRIVILEGED_DATA static UBaseType_t uxTaskNumber = ( UBaseType_t ) 0U;
374 PRIVILEGED_DATA static volatile TickType_t xNextTaskUnblockTime = ( TickType_t ) 0U; /* Initialised to portMAX_DELAY before the scheduler starts. */
375 PRIVILEGED_DATA static TaskHandle_t xIdleTaskHandle = NULL; /**< Holds the handle of the idle task. The idle task is created automatically when the scheduler is started. */
376
377 /* Improve support for OpenOCD. The kernel tracks Ready tasks via priority lists.
378 * For tracking the state of remote threads, OpenOCD uses uxTopUsedPriority
379 * to determine the number of priority lists to read back from the remote target. */
380 const volatile UBaseType_t uxTopUsedPriority = configMAX_PRIORITIES - 1U;
381
382 /* Context switches are held pending while the scheduler is suspended. Also,
383 * interrupts must not manipulate the xStateListItem of a TCB, or any of the
384 * lists the xStateListItem can be referenced from, if the scheduler is suspended.
385 * If an interrupt needs to unblock a task while the scheduler is suspended then it
386 * moves the task's event list item into the xPendingReadyList, ready for the
387 * kernel to move the task from the pending ready list into the real ready list
388 * when the scheduler is unsuspended. The pending ready list itself can only be
389 * accessed from a critical section. */
390 PRIVILEGED_DATA static volatile UBaseType_t uxSchedulerSuspended = ( UBaseType_t ) 0U;
391
392 #if ( configGENERATE_RUN_TIME_STATS == 1 )
393
394 /* Do not move these variables to function scope as doing so prevents the
395 * code working with debuggers that need to remove the static qualifier. */
396 PRIVILEGED_DATA static configRUN_TIME_COUNTER_TYPE ulTaskSwitchedInTime = 0UL; /**< Holds the value of a timer/counter the last time a task was switched in. */
397 PRIVILEGED_DATA static volatile configRUN_TIME_COUNTER_TYPE ulTotalRunTime = 0UL; /**< Holds the total amount of execution time as defined by the run time counter clock. */
398
399 #endif
400
401 /*lint -restore */
402
403 /*-----------------------------------------------------------*/
404
405 /* File private functions. --------------------------------*/
406
407 /**
408 * Utility task that simply returns pdTRUE if the task referenced by xTask is
409 * currently in the Suspended state, or pdFALSE if the task referenced by xTask
410 * is in any other state.
411 */
412 #if ( INCLUDE_vTaskSuspend == 1 )
413
414 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask ) PRIVILEGED_FUNCTION;
415
416 #endif /* INCLUDE_vTaskSuspend */
417
418 /*
419 * Utility to ready all the lists used by the scheduler. This is called
420 * automatically upon the creation of the first task.
421 */
422 static void prvInitialiseTaskLists( void ) PRIVILEGED_FUNCTION;
423
424 /*
425 * The idle task, which as all tasks is implemented as a never ending loop.
426 * The idle task is automatically created and added to the ready lists upon
427 * creation of the first user task.
428 *
429 * The portTASK_FUNCTION_PROTO() macro is used to allow port/compiler specific
430 * language extensions. The equivalent prototype for this function is:
431 *
432 * void prvIdleTask( void *pvParameters );
433 *
434 */
435 static portTASK_FUNCTION_PROTO( prvIdleTask, pvParameters ) PRIVILEGED_FUNCTION;
436
437 /*
438 * Utility to free all memory allocated by the scheduler to hold a TCB,
439 * including the stack pointed to by the TCB.
440 *
441 * This does not free memory allocated by the task itself (i.e. memory
442 * allocated by calls to pvPortMalloc from within the tasks application code).
443 */
444 #if ( INCLUDE_vTaskDelete == 1 )
445
446 static void prvDeleteTCB( TCB_t * pxTCB ) PRIVILEGED_FUNCTION;
447
448 #endif
449
450 /*
451 * Used only by the idle task. This checks to see if anything has been placed
452 * in the list of tasks waiting to be deleted. If so the task is cleaned up
453 * and its TCB deleted.
454 */
455 static void prvCheckTasksWaitingTermination( void ) PRIVILEGED_FUNCTION;
456
457 /*
458 * The currently executing task is entering the Blocked state. Add the task to
459 * either the current or the overflow delayed task list.
460 */
461 static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait,
462 const BaseType_t xCanBlockIndefinitely ) PRIVILEGED_FUNCTION;
463
464 /*
465 * Fills an TaskStatus_t structure with information on each task that is
466 * referenced from the pxList list (which may be a ready list, a delayed list,
467 * a suspended list, etc.).
468 *
469 * THIS FUNCTION IS INTENDED FOR DEBUGGING ONLY, AND SHOULD NOT BE CALLED FROM
470 * NORMAL APPLICATION CODE.
471 */
472 #if ( configUSE_TRACE_FACILITY == 1 )
473
474 static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t * pxTaskStatusArray,
475 List_t * pxList,
476 eTaskState eState ) PRIVILEGED_FUNCTION;
477
478 #endif
479
480 /*
481 * Searches pxList for a task with name pcNameToQuery - returning a handle to
482 * the task if it is found, or NULL if the task is not found.
483 */
484 #if ( INCLUDE_xTaskGetHandle == 1 )
485
486 static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList,
487 const char pcNameToQuery[] ) PRIVILEGED_FUNCTION;
488
489 #endif
490
491 /*
492 * When a task is created, the stack of the task is filled with a known value.
493 * This function determines the 'high water mark' of the task stack by
494 * determining how much of the stack remains at the original preset value.
495 */
496 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
497
498 static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte ) PRIVILEGED_FUNCTION;
499
500 #endif
501
502 /*
503 * Return the amount of time, in ticks, that will pass before the kernel will
504 * next move a task from the Blocked state to the Running state.
505 *
506 * This conditional compilation should use inequality to 0, not equality to 1.
507 * This is to ensure portSUPPRESS_TICKS_AND_SLEEP() can be called when user
508 * defined low power mode implementations require configUSE_TICKLESS_IDLE to be
509 * set to a value other than 1.
510 */
511 #if ( configUSE_TICKLESS_IDLE != 0 )
512
513 static TickType_t prvGetExpectedIdleTime( void ) PRIVILEGED_FUNCTION;
514
515 #endif
516
517 /*
518 * Set xNextTaskUnblockTime to the time at which the next Blocked state task
519 * will exit the Blocked state.
520 */
521 static void prvResetNextTaskUnblockTime( void ) PRIVILEGED_FUNCTION;
522
523 #if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 )
524
525 /*
526 * Helper function used to pad task names with spaces when printing out
527 * human readable tables of task information.
528 */
529 static char * prvWriteNameToBuffer( char * pcBuffer,
530 const char * pcTaskName ) PRIVILEGED_FUNCTION;
531
532 #endif
533
534 /*
535 * Called after a Task_t structure has been allocated either statically or
536 * dynamically to fill in the structure's members.
537 */
538 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
539 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
540 const uint32_t ulStackDepth,
541 void * const pvParameters,
542 UBaseType_t uxPriority,
543 TaskHandle_t * const pxCreatedTask,
544 TCB_t * pxNewTCB,
545 const MemoryRegion_t * const xRegions ) PRIVILEGED_FUNCTION;
546
547 /*
548 * Called after a new task has been created and initialised to place the task
549 * under the control of the scheduler.
550 */
551 static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
552
553 /*
554 * freertos_tasks_c_additions_init() should only be called if the user definable
555 * macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is the only macro
556 * called by the function.
557 */
558 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
559
560 static void freertos_tasks_c_additions_init( void ) PRIVILEGED_FUNCTION;
561
562 #endif
563
564 /*-----------------------------------------------------------*/
565
566 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
567
xTaskCreateStatic(TaskFunction_t pxTaskCode,const char * const pcName,const uint32_t ulStackDepth,void * const pvParameters,UBaseType_t uxPriority,StackType_t * const puxStackBuffer,StaticTask_t * const pxTaskBuffer)568 TaskHandle_t xTaskCreateStatic( TaskFunction_t pxTaskCode,
569 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
570 const uint32_t ulStackDepth,
571 void * const pvParameters,
572 UBaseType_t uxPriority,
573 StackType_t * const puxStackBuffer,
574 StaticTask_t * const pxTaskBuffer )
575 {
576 TCB_t * pxNewTCB;
577 TaskHandle_t xReturn;
578
579 configASSERT( puxStackBuffer != NULL );
580 configASSERT( pxTaskBuffer != NULL );
581
582 #if ( configASSERT_DEFINED == 1 )
583 {
584 /* Sanity check that the size of the structure used to declare a
585 * variable of type StaticTask_t equals the size of the real task
586 * structure. */
587 volatile size_t xSize = sizeof( StaticTask_t );
588 configASSERT( xSize == sizeof( TCB_t ) );
589 ( void ) xSize; /* Prevent lint warning when configASSERT() is not used. */
590 }
591 #endif /* configASSERT_DEFINED */
592
593 if( ( pxTaskBuffer != NULL ) && ( puxStackBuffer != NULL ) )
594 {
595 /* The memory used for the task's TCB and stack are passed into this
596 * function - use them. */
597 pxNewTCB = ( TCB_t * ) pxTaskBuffer; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
598 memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
599 pxNewTCB->pxStack = ( StackType_t * ) puxStackBuffer;
600
601 #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
602 {
603 /* Tasks can be created statically or dynamically, so note this
604 * task was created statically in case the task is later deleted. */
605 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
606 }
607 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
608
609 prvInitialiseNewTask( pxTaskCode, pcName, ulStackDepth, pvParameters, uxPriority, &xReturn, pxNewTCB, NULL );
610 prvAddNewTaskToReadyList( pxNewTCB );
611 }
612 else
613 {
614 xReturn = NULL;
615 }
616
617 return xReturn;
618 }
619
620 #endif /* SUPPORT_STATIC_ALLOCATION */
621 /*-----------------------------------------------------------*/
622
623 #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
624
xTaskCreateRestrictedStatic(const TaskParameters_t * const pxTaskDefinition,TaskHandle_t * pxCreatedTask)625 BaseType_t xTaskCreateRestrictedStatic( const TaskParameters_t * const pxTaskDefinition,
626 TaskHandle_t * pxCreatedTask )
627 {
628 TCB_t * pxNewTCB;
629 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
630
631 configASSERT( pxTaskDefinition->puxStackBuffer != NULL );
632 configASSERT( pxTaskDefinition->pxTaskBuffer != NULL );
633
634 if( ( pxTaskDefinition->puxStackBuffer != NULL ) && ( pxTaskDefinition->pxTaskBuffer != NULL ) )
635 {
636 /* Allocate space for the TCB. Where the memory comes from depends
637 * on the implementation of the port malloc function and whether or
638 * not static allocation is being used. */
639 pxNewTCB = ( TCB_t * ) pxTaskDefinition->pxTaskBuffer;
640 memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
641
642 /* Store the stack location in the TCB. */
643 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
644
645 #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
646 {
647 /* Tasks can be created statically or dynamically, so note this
648 * task was created statically in case the task is later deleted. */
649 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_AND_TCB;
650 }
651 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
652
653 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
654 pxTaskDefinition->pcName,
655 ( uint32_t ) pxTaskDefinition->usStackDepth,
656 pxTaskDefinition->pvParameters,
657 pxTaskDefinition->uxPriority,
658 pxCreatedTask, pxNewTCB,
659 pxTaskDefinition->xRegions );
660
661 prvAddNewTaskToReadyList( pxNewTCB );
662 xReturn = pdPASS;
663 }
664
665 return xReturn;
666 }
667
668 #endif /* ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
669 /*-----------------------------------------------------------*/
670
671 #if ( ( portUSING_MPU_WRAPPERS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
672
xTaskCreateRestricted(const TaskParameters_t * const pxTaskDefinition,TaskHandle_t * pxCreatedTask)673 BaseType_t xTaskCreateRestricted( const TaskParameters_t * const pxTaskDefinition,
674 TaskHandle_t * pxCreatedTask )
675 {
676 TCB_t * pxNewTCB;
677 BaseType_t xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
678
679 configASSERT( pxTaskDefinition->puxStackBuffer );
680
681 if( pxTaskDefinition->puxStackBuffer != NULL )
682 {
683 /* Allocate space for the TCB. Where the memory comes from depends
684 * on the implementation of the port malloc function and whether or
685 * not static allocation is being used. */
686 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
687
688 if( pxNewTCB != NULL )
689 {
690 memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
691
692 /* Store the stack location in the TCB. */
693 pxNewTCB->pxStack = pxTaskDefinition->puxStackBuffer;
694
695 #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 )
696 {
697 /* Tasks can be created statically or dynamically, so note
698 * this task had a statically allocated stack in case it is
699 * later deleted. The TCB was allocated dynamically. */
700 pxNewTCB->ucStaticallyAllocated = tskSTATICALLY_ALLOCATED_STACK_ONLY;
701 }
702 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
703
704 prvInitialiseNewTask( pxTaskDefinition->pvTaskCode,
705 pxTaskDefinition->pcName,
706 ( uint32_t ) pxTaskDefinition->usStackDepth,
707 pxTaskDefinition->pvParameters,
708 pxTaskDefinition->uxPriority,
709 pxCreatedTask, pxNewTCB,
710 pxTaskDefinition->xRegions );
711
712 prvAddNewTaskToReadyList( pxNewTCB );
713 xReturn = pdPASS;
714 }
715 }
716
717 return xReturn;
718 }
719
720 #endif /* portUSING_MPU_WRAPPERS */
721 /*-----------------------------------------------------------*/
722
723 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
724
xTaskCreate(TaskFunction_t pxTaskCode,const char * const pcName,const configSTACK_DEPTH_TYPE usStackDepth,void * const pvParameters,UBaseType_t uxPriority,TaskHandle_t * const pxCreatedTask)725 BaseType_t xTaskCreate( TaskFunction_t pxTaskCode,
726 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
727 const configSTACK_DEPTH_TYPE usStackDepth,
728 void * const pvParameters,
729 UBaseType_t uxPriority,
730 TaskHandle_t * const pxCreatedTask )
731 {
732 TCB_t * pxNewTCB;
733 BaseType_t xReturn;
734
735 /* If the stack grows down then allocate the stack then the TCB so the stack
736 * does not grow into the TCB. Likewise if the stack grows up then allocate
737 * the TCB then the stack. */
738 #if ( portSTACK_GROWTH > 0 )
739 {
740 /* Allocate space for the TCB. Where the memory comes from depends on
741 * the implementation of the port malloc function and whether or not static
742 * allocation is being used. */
743 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) );
744
745 if( pxNewTCB != NULL )
746 {
747 memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
748
749 /* Allocate space for the stack used by the task being created.
750 * The base of the stack memory stored in the TCB so the task can
751 * be deleted later if required. */
752 pxNewTCB->pxStack = ( StackType_t * ) pvPortMallocStack( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
753
754 if( pxNewTCB->pxStack == NULL )
755 {
756 /* Could not allocate the stack. Delete the allocated TCB. */
757 vPortFree( pxNewTCB );
758 pxNewTCB = NULL;
759 }
760 }
761 }
762 #else /* portSTACK_GROWTH */
763 {
764 StackType_t * pxStack;
765
766 /* Allocate space for the stack used by the task being created. */
767 pxStack = pvPortMallocStack( ( ( ( size_t ) usStackDepth ) * sizeof( StackType_t ) ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation is the stack. */
768
769 if( pxStack != NULL )
770 {
771 /* Allocate space for the TCB. */
772 pxNewTCB = ( TCB_t * ) pvPortMalloc( sizeof( TCB_t ) ); /*lint !e9087 !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack, and the first member of TCB_t is always a pointer to the task's stack. */
773
774 if( pxNewTCB != NULL )
775 {
776 memset( ( void * ) pxNewTCB, 0x00, sizeof( TCB_t ) );
777
778 /* Store the stack location in the TCB. */
779 pxNewTCB->pxStack = pxStack;
780 }
781 else
782 {
783 /* The stack cannot be used as the TCB was not created. Free
784 * it again. */
785 vPortFreeStack( pxStack );
786 }
787 }
788 else
789 {
790 pxNewTCB = NULL;
791 }
792 }
793 #endif /* portSTACK_GROWTH */
794
795 if( pxNewTCB != NULL )
796 {
797 #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e9029 !e731 Macro has been consolidated for readability reasons. */
798 {
799 /* Tasks can be created statically or dynamically, so note this
800 * task was created dynamically in case it is later deleted. */
801 pxNewTCB->ucStaticallyAllocated = tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB;
802 }
803 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE */
804
805 prvInitialiseNewTask( pxTaskCode, pcName, ( uint32_t ) usStackDepth, pvParameters, uxPriority, pxCreatedTask, pxNewTCB, NULL );
806 prvAddNewTaskToReadyList( pxNewTCB );
807 xReturn = pdPASS;
808 }
809 else
810 {
811 xReturn = errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY;
812 }
813
814 return xReturn;
815 }
816
817 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
818 /*-----------------------------------------------------------*/
819
prvInitialiseNewTask(TaskFunction_t pxTaskCode,const char * const pcName,const uint32_t ulStackDepth,void * const pvParameters,UBaseType_t uxPriority,TaskHandle_t * const pxCreatedTask,TCB_t * pxNewTCB,const MemoryRegion_t * const xRegions)820 static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
821 const char * const pcName, /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
822 const uint32_t ulStackDepth,
823 void * const pvParameters,
824 UBaseType_t uxPriority,
825 TaskHandle_t * const pxCreatedTask,
826 TCB_t * pxNewTCB,
827 const MemoryRegion_t * const xRegions )
828 {
829 StackType_t * pxTopOfStack;
830 UBaseType_t x;
831
832 #if ( portUSING_MPU_WRAPPERS == 1 )
833 /* Should the task be created in privileged mode? */
834 BaseType_t xRunPrivileged;
835
836 if( ( uxPriority & portPRIVILEGE_BIT ) != 0U )
837 {
838 xRunPrivileged = pdTRUE;
839 }
840 else
841 {
842 xRunPrivileged = pdFALSE;
843 }
844 uxPriority &= ~portPRIVILEGE_BIT;
845 #endif /* portUSING_MPU_WRAPPERS == 1 */
846
847 /* Avoid dependency on memset() if it is not required. */
848 #if ( tskSET_NEW_STACKS_TO_KNOWN_VALUE == 1 )
849 {
850 /* Fill the stack with a known value to assist debugging. */
851 ( void ) memset( pxNewTCB->pxStack, ( int ) tskSTACK_FILL_BYTE, ( size_t ) ulStackDepth * sizeof( StackType_t ) );
852 }
853 #endif /* tskSET_NEW_STACKS_TO_KNOWN_VALUE */
854
855 /* Calculate the top of stack address. This depends on whether the stack
856 * grows from high memory to low (as per the 80x86) or vice versa.
857 * portSTACK_GROWTH is used to make the result positive or negative as required
858 * by the port. */
859 #if ( portSTACK_GROWTH < 0 )
860 {
861 pxTopOfStack = &( pxNewTCB->pxStack[ ulStackDepth - ( uint32_t ) 1 ] );
862 pxTopOfStack = ( StackType_t * ) ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack ) & ( ~( ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) ) ); /*lint !e923 !e9033 !e9078 MISRA exception. Avoiding casts between pointers and integers is not practical. Size differences accounted for using portPOINTER_SIZE_TYPE type. Checked by assert(). */
863
864 /* Check the alignment of the calculated top of stack is correct. */
865 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxTopOfStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
866
867 #if ( configRECORD_STACK_HIGH_ADDRESS == 1 )
868 {
869 /* Also record the stack's high address, which may assist
870 * debugging. */
871 pxNewTCB->pxEndOfStack = pxTopOfStack;
872 }
873 #endif /* configRECORD_STACK_HIGH_ADDRESS */
874 }
875 #else /* portSTACK_GROWTH */
876 {
877 pxTopOfStack = pxNewTCB->pxStack;
878
879 /* Check the alignment of the stack buffer is correct. */
880 configASSERT( ( ( ( portPOINTER_SIZE_TYPE ) pxNewTCB->pxStack & ( portPOINTER_SIZE_TYPE ) portBYTE_ALIGNMENT_MASK ) == 0UL ) );
881
882 /* The other extreme of the stack space is required if stack checking is
883 * performed. */
884 pxNewTCB->pxEndOfStack = pxNewTCB->pxStack + ( ulStackDepth - ( uint32_t ) 1 );
885 }
886 #endif /* portSTACK_GROWTH */
887
888 /* Store the task name in the TCB. */
889 if( pcName != NULL )
890 {
891 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
892 {
893 pxNewTCB->pcTaskName[ x ] = pcName[ x ];
894
895 /* Don't copy all configMAX_TASK_NAME_LEN if the string is shorter than
896 * configMAX_TASK_NAME_LEN characters just in case the memory after the
897 * string is not accessible (extremely unlikely). */
898 if( pcName[ x ] == ( char ) 0x00 )
899 {
900 break;
901 }
902 else
903 {
904 mtCOVERAGE_TEST_MARKER();
905 }
906 }
907
908 /* Ensure the name string is terminated in the case that the string length
909 * was greater or equal to configMAX_TASK_NAME_LEN. */
910 pxNewTCB->pcTaskName[ configMAX_TASK_NAME_LEN - 1 ] = '\0';
911 }
912 else
913 {
914 mtCOVERAGE_TEST_MARKER();
915 }
916
917 /* This is used as an array index so must ensure it's not too large. */
918 configASSERT( uxPriority < configMAX_PRIORITIES );
919
920 if( uxPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
921 {
922 uxPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
923 }
924 else
925 {
926 mtCOVERAGE_TEST_MARKER();
927 }
928
929 pxNewTCB->uxPriority = uxPriority;
930 #if ( configUSE_MUTEXES == 1 )
931 {
932 pxNewTCB->uxBasePriority = uxPriority;
933 }
934 #endif /* configUSE_MUTEXES */
935
936 vListInitialiseItem( &( pxNewTCB->xStateListItem ) );
937 vListInitialiseItem( &( pxNewTCB->xEventListItem ) );
938
939 /* Set the pxNewTCB as a link back from the ListItem_t. This is so we can get
940 * back to the containing TCB from a generic item in a list. */
941 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xStateListItem ), pxNewTCB );
942
943 /* Event lists are always in priority order. */
944 listSET_LIST_ITEM_VALUE( &( pxNewTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
945 listSET_LIST_ITEM_OWNER( &( pxNewTCB->xEventListItem ), pxNewTCB );
946
947 #if ( portUSING_MPU_WRAPPERS == 1 )
948 {
949 vPortStoreTaskMPUSettings( &( pxNewTCB->xMPUSettings ), xRegions, pxNewTCB->pxStack, ulStackDepth );
950 }
951 #else
952 {
953 /* Avoid compiler warning about unreferenced parameter. */
954 ( void ) xRegions;
955 }
956 #endif
957
958 #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
959 {
960 /* Allocate and initialize memory for the task's TLS Block. */
961 configINIT_TLS_BLOCK( pxNewTCB->xTLSBlock, pxTopOfStack );
962 }
963 #endif
964
965 /* Initialize the TCB stack to look as if the task was already running,
966 * but had been interrupted by the scheduler. The return address is set
967 * to the start of the task function. Once the stack has been initialised
968 * the top of stack variable is updated. */
969 #if ( portUSING_MPU_WRAPPERS == 1 )
970 {
971 /* If the port has capability to detect stack overflow,
972 * pass the stack end address to the stack initialization
973 * function as well. */
974 #if ( portHAS_STACK_OVERFLOW_CHECKING == 1 )
975 {
976 #if ( portSTACK_GROWTH < 0 )
977 {
978 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) );
979 }
980 #else /* portSTACK_GROWTH */
981 {
982 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) );
983 }
984 #endif /* portSTACK_GROWTH */
985 }
986 #else /* portHAS_STACK_OVERFLOW_CHECKING */
987 {
988 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters, xRunPrivileged, &( pxNewTCB->xMPUSettings ) );
989 }
990 #endif /* portHAS_STACK_OVERFLOW_CHECKING */
991 }
992 #else /* portUSING_MPU_WRAPPERS */
993 {
994 /* If the port has capability to detect stack overflow,
995 * pass the stack end address to the stack initialization
996 * function as well. */
997 #if ( portHAS_STACK_OVERFLOW_CHECKING == 1 )
998 {
999 #if ( portSTACK_GROWTH < 0 )
1000 {
1001 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxStack, pxTaskCode, pvParameters );
1002 }
1003 #else /* portSTACK_GROWTH */
1004 {
1005 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxNewTCB->pxEndOfStack, pxTaskCode, pvParameters );
1006 }
1007 #endif /* portSTACK_GROWTH */
1008 }
1009 #else /* portHAS_STACK_OVERFLOW_CHECKING */
1010 {
1011 pxNewTCB->pxTopOfStack = pxPortInitialiseStack( pxTopOfStack, pxTaskCode, pvParameters );
1012 }
1013 #endif /* portHAS_STACK_OVERFLOW_CHECKING */
1014 }
1015 #endif /* portUSING_MPU_WRAPPERS */
1016
1017 if( pxCreatedTask != NULL )
1018 {
1019 /* Pass the handle out in an anonymous way. The handle can be used to
1020 * change the created task's priority, delete the created task, etc.*/
1021 *pxCreatedTask = ( TaskHandle_t ) pxNewTCB;
1022 }
1023 else
1024 {
1025 mtCOVERAGE_TEST_MARKER();
1026 }
1027 }
1028 /*-----------------------------------------------------------*/
1029
prvAddNewTaskToReadyList(TCB_t * pxNewTCB)1030 static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB )
1031 {
1032 /* Ensure interrupts don't access the task lists while the lists are being
1033 * updated. */
1034 taskENTER_CRITICAL();
1035 {
1036 uxCurrentNumberOfTasks++;
1037
1038 if( pxCurrentTCB == NULL )
1039 {
1040 /* There are no other tasks, or all the other tasks are in
1041 * the suspended state - make this the current task. */
1042 pxCurrentTCB = pxNewTCB;
1043
1044 if( uxCurrentNumberOfTasks == ( UBaseType_t ) 1 )
1045 {
1046 /* This is the first task to be created so do the preliminary
1047 * initialisation required. We will not recover if this call
1048 * fails, but we will report the failure. */
1049 prvInitialiseTaskLists();
1050 }
1051 else
1052 {
1053 mtCOVERAGE_TEST_MARKER();
1054 }
1055 }
1056 else
1057 {
1058 /* If the scheduler is not already running, make this task the
1059 * current task if it is the highest priority task to be created
1060 * so far. */
1061 if( xSchedulerRunning == pdFALSE )
1062 {
1063 if( pxCurrentTCB->uxPriority <= pxNewTCB->uxPriority )
1064 {
1065 pxCurrentTCB = pxNewTCB;
1066 }
1067 else
1068 {
1069 mtCOVERAGE_TEST_MARKER();
1070 }
1071 }
1072 else
1073 {
1074 mtCOVERAGE_TEST_MARKER();
1075 }
1076 }
1077
1078 uxTaskNumber++;
1079
1080 #if ( configUSE_TRACE_FACILITY == 1 )
1081 {
1082 /* Add a counter into the TCB for tracing only. */
1083 pxNewTCB->uxTCBNumber = uxTaskNumber;
1084 }
1085 #endif /* configUSE_TRACE_FACILITY */
1086 traceTASK_CREATE( pxNewTCB );
1087
1088 prvAddTaskToReadyList( pxNewTCB );
1089
1090 portSETUP_TCB( pxNewTCB );
1091 }
1092 taskEXIT_CRITICAL();
1093
1094 if( xSchedulerRunning != pdFALSE )
1095 {
1096 /* If the created task is of a higher priority than the current task
1097 * then it should run now. */
1098 if( pxCurrentTCB->uxPriority < pxNewTCB->uxPriority )
1099 {
1100 taskYIELD_IF_USING_PREEMPTION();
1101 }
1102 else
1103 {
1104 mtCOVERAGE_TEST_MARKER();
1105 }
1106 }
1107 else
1108 {
1109 mtCOVERAGE_TEST_MARKER();
1110 }
1111 }
1112 /*-----------------------------------------------------------*/
1113
1114 #if ( INCLUDE_vTaskDelete == 1 )
1115
vTaskDelete(TaskHandle_t xTaskToDelete)1116 void vTaskDelete( TaskHandle_t xTaskToDelete )
1117 {
1118 TCB_t * pxTCB;
1119
1120 taskENTER_CRITICAL();
1121 {
1122 /* If null is passed in here then it is the calling task that is
1123 * being deleted. */
1124 pxTCB = prvGetTCBFromHandle( xTaskToDelete );
1125
1126 /* Remove task from the ready/delayed list. */
1127 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
1128 {
1129 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1130 }
1131 else
1132 {
1133 mtCOVERAGE_TEST_MARKER();
1134 }
1135
1136 /* Is the task waiting on an event also? */
1137 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
1138 {
1139 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
1140 }
1141 else
1142 {
1143 mtCOVERAGE_TEST_MARKER();
1144 }
1145
1146 /* Increment the uxTaskNumber also so kernel aware debuggers can
1147 * detect that the task lists need re-generating. This is done before
1148 * portPRE_TASK_DELETE_HOOK() as in the Windows port that macro will
1149 * not return. */
1150 uxTaskNumber++;
1151
1152 if( pxTCB == pxCurrentTCB )
1153 {
1154 /* A task is deleting itself. This cannot complete within the
1155 * task itself, as a context switch to another task is required.
1156 * Place the task in the termination list. The idle task will
1157 * check the termination list and free up any memory allocated by
1158 * the scheduler for the TCB and stack of the deleted task. */
1159 vListInsertEnd( &xTasksWaitingTermination, &( pxTCB->xStateListItem ) );
1160
1161 /* Increment the ucTasksDeleted variable so the idle task knows
1162 * there is a task that has been deleted and that it should therefore
1163 * check the xTasksWaitingTermination list. */
1164 ++uxDeletedTasksWaitingCleanUp;
1165
1166 /* Call the delete hook before portPRE_TASK_DELETE_HOOK() as
1167 * portPRE_TASK_DELETE_HOOK() does not return in the Win32 port. */
1168 traceTASK_DELETE( pxTCB );
1169
1170 /* The pre-delete hook is primarily for the Windows simulator,
1171 * in which Windows specific clean up operations are performed,
1172 * after which it is not possible to yield away from this task -
1173 * hence xYieldPending is used to latch that a context switch is
1174 * required. */
1175 portPRE_TASK_DELETE_HOOK( pxTCB, &xYieldPending );
1176 }
1177 else
1178 {
1179 --uxCurrentNumberOfTasks;
1180 traceTASK_DELETE( pxTCB );
1181
1182 /* Reset the next expected unblock time in case it referred to
1183 * the task that has just been deleted. */
1184 prvResetNextTaskUnblockTime();
1185 }
1186 }
1187 taskEXIT_CRITICAL();
1188
1189 /* If the task is not deleting itself, call prvDeleteTCB from outside of
1190 * critical section. If a task deletes itself, prvDeleteTCB is called
1191 * from prvCheckTasksWaitingTermination which is called from Idle task. */
1192 if( pxTCB != pxCurrentTCB )
1193 {
1194 prvDeleteTCB( pxTCB );
1195 }
1196
1197 /* Force a reschedule if it is the currently running task that has just
1198 * been deleted. */
1199 if( xSchedulerRunning != pdFALSE )
1200 {
1201 if( pxTCB == pxCurrentTCB )
1202 {
1203 configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U );
1204 portYIELD_WITHIN_API();
1205 }
1206 else
1207 {
1208 mtCOVERAGE_TEST_MARKER();
1209 }
1210 }
1211 }
1212
1213 #endif /* INCLUDE_vTaskDelete */
1214 /*-----------------------------------------------------------*/
1215
1216 #if ( INCLUDE_xTaskDelayUntil == 1 )
1217
xTaskDelayUntil(TickType_t * const pxPreviousWakeTime,const TickType_t xTimeIncrement)1218 BaseType_t xTaskDelayUntil( TickType_t * const pxPreviousWakeTime,
1219 const TickType_t xTimeIncrement )
1220 {
1221 TickType_t xTimeToWake;
1222 BaseType_t xAlreadyYielded, xShouldDelay = pdFALSE;
1223
1224 configASSERT( pxPreviousWakeTime );
1225 configASSERT( ( xTimeIncrement > 0U ) );
1226 configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U );
1227
1228 vTaskSuspendAll();
1229 {
1230 /* Minor optimisation. The tick count cannot change in this
1231 * block. */
1232 const TickType_t xConstTickCount = xTickCount;
1233
1234 /* Generate the tick time at which the task wants to wake. */
1235 xTimeToWake = *pxPreviousWakeTime + xTimeIncrement;
1236
1237 if( xConstTickCount < *pxPreviousWakeTime )
1238 {
1239 /* The tick count has overflowed since this function was
1240 * lasted called. In this case the only time we should ever
1241 * actually delay is if the wake time has also overflowed,
1242 * and the wake time is greater than the tick time. When this
1243 * is the case it is as if neither time had overflowed. */
1244 if( ( xTimeToWake < *pxPreviousWakeTime ) && ( xTimeToWake > xConstTickCount ) )
1245 {
1246 xShouldDelay = pdTRUE;
1247 }
1248 else
1249 {
1250 mtCOVERAGE_TEST_MARKER();
1251 }
1252 }
1253 else
1254 {
1255 /* The tick time has not overflowed. In this case we will
1256 * delay if either the wake time has overflowed, and/or the
1257 * tick time is less than the wake time. */
1258 if( ( xTimeToWake < *pxPreviousWakeTime ) || ( xTimeToWake > xConstTickCount ) )
1259 {
1260 xShouldDelay = pdTRUE;
1261 }
1262 else
1263 {
1264 mtCOVERAGE_TEST_MARKER();
1265 }
1266 }
1267
1268 /* Update the wake time ready for the next call. */
1269 *pxPreviousWakeTime = xTimeToWake;
1270
1271 if( xShouldDelay != pdFALSE )
1272 {
1273 traceTASK_DELAY_UNTIL( xTimeToWake );
1274
1275 /* prvAddCurrentTaskToDelayedList() needs the block time, not
1276 * the time to wake, so subtract the current tick count. */
1277 prvAddCurrentTaskToDelayedList( xTimeToWake - xConstTickCount, pdFALSE );
1278 }
1279 else
1280 {
1281 mtCOVERAGE_TEST_MARKER();
1282 }
1283 }
1284 xAlreadyYielded = xTaskResumeAll();
1285
1286 /* Force a reschedule if xTaskResumeAll has not already done so, we may
1287 * have put ourselves to sleep. */
1288 if( xAlreadyYielded == pdFALSE )
1289 {
1290 portYIELD_WITHIN_API();
1291 }
1292 else
1293 {
1294 mtCOVERAGE_TEST_MARKER();
1295 }
1296
1297 return xShouldDelay;
1298 }
1299
1300 #endif /* INCLUDE_xTaskDelayUntil */
1301 /*-----------------------------------------------------------*/
1302
1303 #if ( INCLUDE_vTaskDelay == 1 )
1304
vTaskDelay(const TickType_t xTicksToDelay)1305 void vTaskDelay( const TickType_t xTicksToDelay )
1306 {
1307 BaseType_t xAlreadyYielded = pdFALSE;
1308
1309 /* A delay time of zero just forces a reschedule. */
1310 if( xTicksToDelay > ( TickType_t ) 0U )
1311 {
1312 configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U );
1313 vTaskSuspendAll();
1314 {
1315 traceTASK_DELAY();
1316
1317 /* A task that is removed from the event list while the
1318 * scheduler is suspended will not get placed in the ready
1319 * list or removed from the blocked list until the scheduler
1320 * is resumed.
1321 *
1322 * This task cannot be in an event list as it is the currently
1323 * executing task. */
1324 prvAddCurrentTaskToDelayedList( xTicksToDelay, pdFALSE );
1325 }
1326 xAlreadyYielded = xTaskResumeAll();
1327 }
1328 else
1329 {
1330 mtCOVERAGE_TEST_MARKER();
1331 }
1332
1333 /* Force a reschedule if xTaskResumeAll has not already done so, we may
1334 * have put ourselves to sleep. */
1335 if( xAlreadyYielded == pdFALSE )
1336 {
1337 portYIELD_WITHIN_API();
1338 }
1339 else
1340 {
1341 mtCOVERAGE_TEST_MARKER();
1342 }
1343 }
1344
1345 #endif /* INCLUDE_vTaskDelay */
1346 /*-----------------------------------------------------------*/
1347
1348 #if ( ( INCLUDE_eTaskGetState == 1 ) || ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_xTaskAbortDelay == 1 ) )
1349
eTaskGetState(TaskHandle_t xTask)1350 eTaskState eTaskGetState( TaskHandle_t xTask )
1351 {
1352 eTaskState eReturn;
1353 List_t const * pxStateList;
1354 List_t const * pxEventList;
1355 List_t const * pxDelayedList;
1356 List_t const * pxOverflowedDelayedList;
1357 const TCB_t * const pxTCB = xTask;
1358
1359 configASSERT( pxTCB );
1360
1361 if( pxTCB == pxCurrentTCB )
1362 {
1363 /* The task calling this function is querying its own state. */
1364 eReturn = eRunning;
1365 }
1366 else
1367 {
1368 taskENTER_CRITICAL();
1369 {
1370 pxStateList = listLIST_ITEM_CONTAINER( &( pxTCB->xStateListItem ) );
1371 pxEventList = listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) );
1372 pxDelayedList = pxDelayedTaskList;
1373 pxOverflowedDelayedList = pxOverflowDelayedTaskList;
1374 }
1375 taskEXIT_CRITICAL();
1376
1377 if( pxEventList == &xPendingReadyList )
1378 {
1379 /* The task has been placed on the pending ready list, so its
1380 * state is eReady regardless of what list the task's state list
1381 * item is currently placed on. */
1382 eReturn = eReady;
1383 }
1384 else if( ( pxStateList == pxDelayedList ) || ( pxStateList == pxOverflowedDelayedList ) )
1385 {
1386 /* The task being queried is referenced from one of the Blocked
1387 * lists. */
1388 eReturn = eBlocked;
1389 }
1390
1391 #if ( INCLUDE_vTaskSuspend == 1 )
1392 else if( pxStateList == &xSuspendedTaskList )
1393 {
1394 /* The task being queried is referenced from the suspended
1395 * list. Is it genuinely suspended or is it blocked
1396 * indefinitely? */
1397 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL )
1398 {
1399 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
1400 {
1401 BaseType_t x;
1402
1403 /* The task does not appear on the event list item of
1404 * and of the RTOS objects, but could still be in the
1405 * blocked state if it is waiting on its notification
1406 * rather than waiting on an object. If not, is
1407 * suspended. */
1408 eReturn = eSuspended;
1409
1410 for( x = 0; x < configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
1411 {
1412 if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
1413 {
1414 eReturn = eBlocked;
1415 break;
1416 }
1417 }
1418 }
1419 #else /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
1420 {
1421 eReturn = eSuspended;
1422 }
1423 #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
1424 }
1425 else
1426 {
1427 eReturn = eBlocked;
1428 }
1429 }
1430 #endif /* if ( INCLUDE_vTaskSuspend == 1 ) */
1431
1432 #if ( INCLUDE_vTaskDelete == 1 )
1433 else if( ( pxStateList == &xTasksWaitingTermination ) || ( pxStateList == NULL ) )
1434 {
1435 /* The task being queried is referenced from the deleted
1436 * tasks list, or it is not referenced from any lists at
1437 * all. */
1438 eReturn = eDeleted;
1439 }
1440 #endif
1441
1442 else /*lint !e525 Negative indentation is intended to make use of pre-processor clearer. */
1443 {
1444 /* If the task is not in any other state, it must be in the
1445 * Ready (including pending ready) state. */
1446 eReturn = eReady;
1447 }
1448 }
1449
1450 return eReturn;
1451 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
1452
1453 #endif /* INCLUDE_eTaskGetState */
1454 /*-----------------------------------------------------------*/
1455
1456 #if ( INCLUDE_uxTaskPriorityGet == 1 )
1457
uxTaskPriorityGet(const TaskHandle_t xTask)1458 UBaseType_t uxTaskPriorityGet( const TaskHandle_t xTask )
1459 {
1460 TCB_t const * pxTCB;
1461 UBaseType_t uxReturn;
1462
1463 taskENTER_CRITICAL();
1464 {
1465 /* If null is passed in here then it is the priority of the task
1466 * that called uxTaskPriorityGet() that is being queried. */
1467 pxTCB = prvGetTCBFromHandle( xTask );
1468 uxReturn = pxTCB->uxPriority;
1469 }
1470 taskEXIT_CRITICAL();
1471
1472 return uxReturn;
1473 }
1474
1475 #endif /* INCLUDE_uxTaskPriorityGet */
1476 /*-----------------------------------------------------------*/
1477
1478 #if ( INCLUDE_uxTaskPriorityGet == 1 )
1479
uxTaskPriorityGetFromISR(const TaskHandle_t xTask)1480 UBaseType_t uxTaskPriorityGetFromISR( const TaskHandle_t xTask )
1481 {
1482 TCB_t const * pxTCB;
1483 UBaseType_t uxReturn;
1484 UBaseType_t uxSavedInterruptState;
1485
1486 /* RTOS ports that support interrupt nesting have the concept of a
1487 * maximum system call (or maximum API call) interrupt priority.
1488 * Interrupts that are above the maximum system call priority are keep
1489 * permanently enabled, even when the RTOS kernel is in a critical section,
1490 * but cannot make any calls to FreeRTOS API functions. If configASSERT()
1491 * is defined in FreeRTOSConfig.h then
1492 * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1493 * failure if a FreeRTOS API function is called from an interrupt that has
1494 * been assigned a priority above the configured maximum system call
1495 * priority. Only FreeRTOS functions that end in FromISR can be called
1496 * from interrupts that have been assigned a priority at or (logically)
1497 * below the maximum system call interrupt priority. FreeRTOS maintains a
1498 * separate interrupt safe API to ensure interrupt entry is as fast and as
1499 * simple as possible. More information (albeit Cortex-M specific) is
1500 * provided on the following link:
1501 * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
1502 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1503
1504 uxSavedInterruptState = portSET_INTERRUPT_MASK_FROM_ISR();
1505 {
1506 /* If null is passed in here then it is the priority of the calling
1507 * task that is being queried. */
1508 pxTCB = prvGetTCBFromHandle( xTask );
1509 uxReturn = pxTCB->uxPriority;
1510 }
1511 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptState );
1512
1513 return uxReturn;
1514 }
1515
1516 #endif /* INCLUDE_uxTaskPriorityGet */
1517 /*-----------------------------------------------------------*/
1518
1519 #if ( INCLUDE_vTaskPrioritySet == 1 )
1520
vTaskPrioritySet(TaskHandle_t xTask,UBaseType_t uxNewPriority)1521 void vTaskPrioritySet( TaskHandle_t xTask,
1522 UBaseType_t uxNewPriority )
1523 {
1524 TCB_t * pxTCB;
1525 UBaseType_t uxCurrentBasePriority, uxPriorityUsedOnEntry;
1526 BaseType_t xYieldRequired = pdFALSE;
1527
1528 configASSERT( uxNewPriority < configMAX_PRIORITIES );
1529
1530 /* Ensure the new priority is valid. */
1531 if( uxNewPriority >= ( UBaseType_t ) configMAX_PRIORITIES )
1532 {
1533 uxNewPriority = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) 1U;
1534 }
1535 else
1536 {
1537 mtCOVERAGE_TEST_MARKER();
1538 }
1539
1540 taskENTER_CRITICAL();
1541 {
1542 /* If null is passed in here then it is the priority of the calling
1543 * task that is being changed. */
1544 pxTCB = prvGetTCBFromHandle( xTask );
1545
1546 traceTASK_PRIORITY_SET( pxTCB, uxNewPriority );
1547
1548 #if ( configUSE_MUTEXES == 1 )
1549 {
1550 uxCurrentBasePriority = pxTCB->uxBasePriority;
1551 }
1552 #else
1553 {
1554 uxCurrentBasePriority = pxTCB->uxPriority;
1555 }
1556 #endif
1557
1558 if( uxCurrentBasePriority != uxNewPriority )
1559 {
1560 /* The priority change may have readied a task of higher
1561 * priority than the calling task. */
1562 if( uxNewPriority > uxCurrentBasePriority )
1563 {
1564 if( pxTCB != pxCurrentTCB )
1565 {
1566 /* The priority of a task other than the currently
1567 * running task is being raised. Is the priority being
1568 * raised above that of the running task? */
1569 if( uxNewPriority > pxCurrentTCB->uxPriority )
1570 {
1571 xYieldRequired = pdTRUE;
1572 }
1573 else
1574 {
1575 mtCOVERAGE_TEST_MARKER();
1576 }
1577 }
1578 else
1579 {
1580 /* The priority of the running task is being raised,
1581 * but the running task must already be the highest
1582 * priority task able to run so no yield is required. */
1583 }
1584 }
1585 else if( pxTCB == pxCurrentTCB )
1586 {
1587 /* Setting the priority of the running task down means
1588 * there may now be another task of higher priority that
1589 * is ready to execute. */
1590 xYieldRequired = pdTRUE;
1591 }
1592 else
1593 {
1594 /* Setting the priority of any other task down does not
1595 * require a yield as the running task must be above the
1596 * new priority of the task being modified. */
1597 }
1598
1599 /* Remember the ready list the task might be referenced from
1600 * before its uxPriority member is changed so the
1601 * taskRESET_READY_PRIORITY() macro can function correctly. */
1602 uxPriorityUsedOnEntry = pxTCB->uxPriority;
1603
1604 #if ( configUSE_MUTEXES == 1 )
1605 {
1606 /* Only change the priority being used if the task is not
1607 * currently using an inherited priority. */
1608 if( pxTCB->uxBasePriority == pxTCB->uxPriority )
1609 {
1610 pxTCB->uxPriority = uxNewPriority;
1611 }
1612 else
1613 {
1614 mtCOVERAGE_TEST_MARKER();
1615 }
1616
1617 /* The base priority gets set whatever. */
1618 pxTCB->uxBasePriority = uxNewPriority;
1619 }
1620 #else /* if ( configUSE_MUTEXES == 1 ) */
1621 {
1622 pxTCB->uxPriority = uxNewPriority;
1623 }
1624 #endif /* if ( configUSE_MUTEXES == 1 ) */
1625
1626 /* Only reset the event list item value if the value is not
1627 * being used for anything else. */
1628 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
1629 {
1630 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxNewPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
1631 }
1632 else
1633 {
1634 mtCOVERAGE_TEST_MARKER();
1635 }
1636
1637 /* If the task is in the blocked or suspended list we need do
1638 * nothing more than change its priority variable. However, if
1639 * the task is in a ready list it needs to be removed and placed
1640 * in the list appropriate to its new priority. */
1641 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
1642 {
1643 /* The task is currently in its ready list - remove before
1644 * adding it to its new ready list. As we are in a critical
1645 * section we can do this even if the scheduler is suspended. */
1646 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
1647 {
1648 /* It is known that the task is in its ready list so
1649 * there is no need to check again and the port level
1650 * reset macro can be called directly. */
1651 portRESET_READY_PRIORITY( uxPriorityUsedOnEntry, uxTopReadyPriority );
1652 }
1653 else
1654 {
1655 mtCOVERAGE_TEST_MARKER();
1656 }
1657
1658 prvAddTaskToReadyList( pxTCB );
1659 }
1660 else
1661 {
1662 mtCOVERAGE_TEST_MARKER();
1663 }
1664
1665 if( xYieldRequired != pdFALSE )
1666 {
1667 taskYIELD_IF_USING_PREEMPTION();
1668 }
1669 else
1670 {
1671 mtCOVERAGE_TEST_MARKER();
1672 }
1673
1674 /* Remove compiler warning about unused variables when the port
1675 * optimised task selection is not being used. */
1676 ( void ) uxPriorityUsedOnEntry;
1677 }
1678 }
1679 taskEXIT_CRITICAL();
1680 }
1681
1682 #endif /* INCLUDE_vTaskPrioritySet */
1683 /*-----------------------------------------------------------*/
1684
1685 #if ( INCLUDE_vTaskSuspend == 1 )
1686
vTaskSuspend(TaskHandle_t xTaskToSuspend)1687 void vTaskSuspend( TaskHandle_t xTaskToSuspend )
1688 {
1689 TCB_t * pxTCB;
1690
1691 taskENTER_CRITICAL();
1692 {
1693 /* If null is passed in here then it is the running task that is
1694 * being suspended. */
1695 pxTCB = prvGetTCBFromHandle( xTaskToSuspend );
1696
1697 traceTASK_SUSPEND( pxTCB );
1698
1699 /* Remove task from the ready/delayed list and place in the
1700 * suspended list. */
1701 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
1702 {
1703 taskRESET_READY_PRIORITY( pxTCB->uxPriority );
1704 }
1705 else
1706 {
1707 mtCOVERAGE_TEST_MARKER();
1708 }
1709
1710 /* Is the task waiting on an event also? */
1711 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
1712 {
1713 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
1714 }
1715 else
1716 {
1717 mtCOVERAGE_TEST_MARKER();
1718 }
1719
1720 vListInsertEnd( &xSuspendedTaskList, &( pxTCB->xStateListItem ) );
1721
1722 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
1723 {
1724 BaseType_t x;
1725
1726 for( x = 0; x < configTASK_NOTIFICATION_ARRAY_ENTRIES; x++ )
1727 {
1728 if( pxTCB->ucNotifyState[ x ] == taskWAITING_NOTIFICATION )
1729 {
1730 /* The task was blocked to wait for a notification, but is
1731 * now suspended, so no notification was received. */
1732 pxTCB->ucNotifyState[ x ] = taskNOT_WAITING_NOTIFICATION;
1733 }
1734 }
1735 }
1736 #endif /* if ( configUSE_TASK_NOTIFICATIONS == 1 ) */
1737 }
1738 taskEXIT_CRITICAL();
1739
1740 if( xSchedulerRunning != pdFALSE )
1741 {
1742 /* Reset the next expected unblock time in case it referred to the
1743 * task that is now in the Suspended state. */
1744 taskENTER_CRITICAL();
1745 {
1746 prvResetNextTaskUnblockTime();
1747 }
1748 taskEXIT_CRITICAL();
1749 }
1750 else
1751 {
1752 mtCOVERAGE_TEST_MARKER();
1753 }
1754
1755 if( pxTCB == pxCurrentTCB )
1756 {
1757 if( xSchedulerRunning != pdFALSE )
1758 {
1759 /* The current task has just been suspended. */
1760 configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U );
1761 portYIELD_WITHIN_API();
1762 }
1763 else
1764 {
1765 /* The scheduler is not running, but the task that was pointed
1766 * to by pxCurrentTCB has just been suspended and pxCurrentTCB
1767 * must be adjusted to point to a different task. */
1768 if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == uxCurrentNumberOfTasks ) /*lint !e931 Right has no side effect, just volatile. */
1769 {
1770 /* No other tasks are ready, so set pxCurrentTCB back to
1771 * NULL so when the next task is created pxCurrentTCB will
1772 * be set to point to it no matter what its relative priority
1773 * is. */
1774 pxCurrentTCB = NULL;
1775 }
1776 else
1777 {
1778 vTaskSwitchContext();
1779 }
1780 }
1781 }
1782 else
1783 {
1784 mtCOVERAGE_TEST_MARKER();
1785 }
1786 }
1787
1788 #endif /* INCLUDE_vTaskSuspend */
1789 /*-----------------------------------------------------------*/
1790
1791 #if ( INCLUDE_vTaskSuspend == 1 )
1792
prvTaskIsTaskSuspended(const TaskHandle_t xTask)1793 static BaseType_t prvTaskIsTaskSuspended( const TaskHandle_t xTask )
1794 {
1795 BaseType_t xReturn = pdFALSE;
1796 const TCB_t * const pxTCB = xTask;
1797
1798 /* Accesses xPendingReadyList so must be called from a critical
1799 * section. */
1800
1801 /* It does not make sense to check if the calling task is suspended. */
1802 configASSERT( xTask );
1803
1804 /* Is the task being resumed actually in the suspended list? */
1805 if( listIS_CONTAINED_WITHIN( &xSuspendedTaskList, &( pxTCB->xStateListItem ) ) != pdFALSE )
1806 {
1807 /* Has the task already been resumed from within an ISR? */
1808 if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) == pdFALSE )
1809 {
1810 /* Is it in the suspended list because it is in the Suspended
1811 * state, or because is is blocked with no timeout? */
1812 if( listIS_CONTAINED_WITHIN( NULL, &( pxTCB->xEventListItem ) ) != pdFALSE ) /*lint !e961. The cast is only redundant when NULL is used. */
1813 {
1814 xReturn = pdTRUE;
1815 }
1816 else
1817 {
1818 mtCOVERAGE_TEST_MARKER();
1819 }
1820 }
1821 else
1822 {
1823 mtCOVERAGE_TEST_MARKER();
1824 }
1825 }
1826 else
1827 {
1828 mtCOVERAGE_TEST_MARKER();
1829 }
1830
1831 return xReturn;
1832 } /*lint !e818 xTask cannot be a pointer to const because it is a typedef. */
1833
1834 #endif /* INCLUDE_vTaskSuspend */
1835 /*-----------------------------------------------------------*/
1836
1837 #if ( INCLUDE_vTaskSuspend == 1 )
1838
vTaskResume(TaskHandle_t xTaskToResume)1839 void vTaskResume( TaskHandle_t xTaskToResume )
1840 {
1841 TCB_t * const pxTCB = xTaskToResume;
1842
1843 /* It does not make sense to resume the calling task. */
1844 configASSERT( xTaskToResume );
1845
1846 /* The parameter cannot be NULL as it is impossible to resume the
1847 * currently executing task. */
1848 if( ( pxTCB != pxCurrentTCB ) && ( pxTCB != NULL ) )
1849 {
1850 taskENTER_CRITICAL();
1851 {
1852 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
1853 {
1854 traceTASK_RESUME( pxTCB );
1855
1856 /* The ready list can be accessed even if the scheduler is
1857 * suspended because this is inside a critical section. */
1858 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
1859 prvAddTaskToReadyList( pxTCB );
1860
1861 /* A higher priority task may have just been resumed. */
1862 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
1863 {
1864 /* This yield may not cause the task just resumed to run,
1865 * but will leave the lists in the correct state for the
1866 * next yield. */
1867 taskYIELD_IF_USING_PREEMPTION();
1868 }
1869 else
1870 {
1871 mtCOVERAGE_TEST_MARKER();
1872 }
1873 }
1874 else
1875 {
1876 mtCOVERAGE_TEST_MARKER();
1877 }
1878 }
1879 taskEXIT_CRITICAL();
1880 }
1881 else
1882 {
1883 mtCOVERAGE_TEST_MARKER();
1884 }
1885 }
1886
1887 #endif /* INCLUDE_vTaskSuspend */
1888
1889 /*-----------------------------------------------------------*/
1890
1891 #if ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) )
1892
xTaskResumeFromISR(TaskHandle_t xTaskToResume)1893 BaseType_t xTaskResumeFromISR( TaskHandle_t xTaskToResume )
1894 {
1895 BaseType_t xYieldRequired = pdFALSE;
1896 TCB_t * const pxTCB = xTaskToResume;
1897 UBaseType_t uxSavedInterruptStatus;
1898
1899 configASSERT( xTaskToResume );
1900
1901 /* RTOS ports that support interrupt nesting have the concept of a
1902 * maximum system call (or maximum API call) interrupt priority.
1903 * Interrupts that are above the maximum system call priority are keep
1904 * permanently enabled, even when the RTOS kernel is in a critical section,
1905 * but cannot make any calls to FreeRTOS API functions. If configASSERT()
1906 * is defined in FreeRTOSConfig.h then
1907 * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1908 * failure if a FreeRTOS API function is called from an interrupt that has
1909 * been assigned a priority above the configured maximum system call
1910 * priority. Only FreeRTOS functions that end in FromISR can be called
1911 * from interrupts that have been assigned a priority at or (logically)
1912 * below the maximum system call interrupt priority. FreeRTOS maintains a
1913 * separate interrupt safe API to ensure interrupt entry is as fast and as
1914 * simple as possible. More information (albeit Cortex-M specific) is
1915 * provided on the following link:
1916 * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
1917 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1918
1919 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1920 {
1921 if( prvTaskIsTaskSuspended( pxTCB ) != pdFALSE )
1922 {
1923 traceTASK_RESUME_FROM_ISR( pxTCB );
1924
1925 /* Check the ready lists can be accessed. */
1926 if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
1927 {
1928 /* Ready lists can be accessed so move the task from the
1929 * suspended list to the ready list directly. */
1930 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
1931 {
1932 xYieldRequired = pdTRUE;
1933
1934 /* Mark that a yield is pending in case the user is not
1935 * using the return value to initiate a context switch
1936 * from the ISR using portYIELD_FROM_ISR. */
1937 xYieldPending = pdTRUE;
1938 }
1939 else
1940 {
1941 mtCOVERAGE_TEST_MARKER();
1942 }
1943
1944 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
1945 prvAddTaskToReadyList( pxTCB );
1946 }
1947 else
1948 {
1949 /* The delayed or ready lists cannot be accessed so the task
1950 * is held in the pending ready list until the scheduler is
1951 * unsuspended. */
1952 vListInsertEnd( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
1953 }
1954 }
1955 else
1956 {
1957 mtCOVERAGE_TEST_MARKER();
1958 }
1959 }
1960 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1961
1962 return xYieldRequired;
1963 }
1964
1965 #endif /* ( ( INCLUDE_xTaskResumeFromISR == 1 ) && ( INCLUDE_vTaskSuspend == 1 ) ) */
1966 /*-----------------------------------------------------------*/
1967
vTaskStartScheduler(void)1968 void vTaskStartScheduler( void )
1969 {
1970 BaseType_t xReturn;
1971
1972 /* Add the idle task at the lowest priority. */
1973 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
1974 {
1975 StaticTask_t * pxIdleTaskTCBBuffer = NULL;
1976 StackType_t * pxIdleTaskStackBuffer = NULL;
1977 uint32_t ulIdleTaskStackSize;
1978
1979 /* The Idle task is created using user provided RAM - obtain the
1980 * address of the RAM then create the idle task. */
1981 vApplicationGetIdleTaskMemory( &pxIdleTaskTCBBuffer, &pxIdleTaskStackBuffer, &ulIdleTaskStackSize );
1982 xIdleTaskHandle = xTaskCreateStatic( prvIdleTask,
1983 configIDLE_TASK_NAME,
1984 ulIdleTaskStackSize,
1985 ( void * ) NULL, /*lint !e961. The cast is not redundant for all compilers. */
1986 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
1987 pxIdleTaskStackBuffer,
1988 pxIdleTaskTCBBuffer ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
1989
1990 if( xIdleTaskHandle != NULL )
1991 {
1992 xReturn = pdPASS;
1993 }
1994 else
1995 {
1996 xReturn = pdFAIL;
1997 }
1998 }
1999 #else /* if ( configSUPPORT_STATIC_ALLOCATION == 1 ) */
2000 {
2001 /* The Idle task is being created using dynamically allocated RAM. */
2002 xReturn = xTaskCreate( prvIdleTask,
2003 configIDLE_TASK_NAME,
2004 configMINIMAL_STACK_SIZE,
2005 ( void * ) NULL,
2006 portPRIVILEGE_BIT, /* In effect ( tskIDLE_PRIORITY | portPRIVILEGE_BIT ), but tskIDLE_PRIORITY is zero. */
2007 &xIdleTaskHandle ); /*lint !e961 MISRA exception, justified as it is not a redundant explicit cast to all supported compilers. */
2008 }
2009 #endif /* configSUPPORT_STATIC_ALLOCATION */
2010
2011 #if ( configUSE_TIMERS == 1 )
2012 {
2013 if( xReturn == pdPASS )
2014 {
2015 xReturn = xTimerCreateTimerTask();
2016 }
2017 else
2018 {
2019 mtCOVERAGE_TEST_MARKER();
2020 }
2021 }
2022 #endif /* configUSE_TIMERS */
2023
2024 if( xReturn == pdPASS )
2025 {
2026 /* freertos_tasks_c_additions_init() should only be called if the user
2027 * definable macro FREERTOS_TASKS_C_ADDITIONS_INIT() is defined, as that is
2028 * the only macro called by the function. */
2029 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
2030 {
2031 freertos_tasks_c_additions_init();
2032 }
2033 #endif
2034
2035 /* Interrupts are turned off here, to ensure a tick does not occur
2036 * before or during the call to xPortStartScheduler(). The stacks of
2037 * the created tasks contain a status word with interrupts switched on
2038 * so interrupts will automatically get re-enabled when the first task
2039 * starts to run. */
2040 portDISABLE_INTERRUPTS();
2041
2042 #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
2043 {
2044 /* Switch C-Runtime's TLS Block to point to the TLS
2045 * block specific to the task that will run first. */
2046 configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock );
2047 }
2048 #endif
2049
2050 xNextTaskUnblockTime = portMAX_DELAY;
2051 xSchedulerRunning = pdTRUE;
2052 xTickCount = ( TickType_t ) configINITIAL_TICK_COUNT;
2053
2054 /* If configGENERATE_RUN_TIME_STATS is defined then the following
2055 * macro must be defined to configure the timer/counter used to generate
2056 * the run time counter time base. NOTE: If configGENERATE_RUN_TIME_STATS
2057 * is set to 0 and the following line fails to build then ensure you do not
2058 * have portCONFIGURE_TIMER_FOR_RUN_TIME_STATS() defined in your
2059 * FreeRTOSConfig.h file. */
2060 portCONFIGURE_TIMER_FOR_RUN_TIME_STATS();
2061
2062 traceTASK_SWITCHED_IN();
2063
2064 /* Setting up the timer tick is hardware specific and thus in the
2065 * portable interface. */
2066 xPortStartScheduler();
2067
2068 /* In most cases, xPortStartScheduler() will not return. If it
2069 * returns pdTRUE then there was not enough heap memory available
2070 * to create either the Idle or the Timer task. If it returned
2071 * pdFALSE, then the application called xTaskEndScheduler().
2072 * Most ports don't implement xTaskEndScheduler() as there is
2073 * nothing to return to. */
2074 }
2075 else
2076 {
2077 /* This line will only be reached if the kernel could not be started,
2078 * because there was not enough FreeRTOS heap to create the idle task
2079 * or the timer task. */
2080 configASSERT( xReturn != errCOULD_NOT_ALLOCATE_REQUIRED_MEMORY );
2081 }
2082
2083 /* Prevent compiler warnings if INCLUDE_xTaskGetIdleTaskHandle is set to 0,
2084 * meaning xIdleTaskHandle is not used anywhere else. */
2085 ( void ) xIdleTaskHandle;
2086
2087 /* OpenOCD makes use of uxTopUsedPriority for thread debugging. Prevent uxTopUsedPriority
2088 * from getting optimized out as it is no longer used by the kernel. */
2089 ( void ) uxTopUsedPriority;
2090 }
2091 /*-----------------------------------------------------------*/
2092
vTaskEndScheduler(void)2093 void vTaskEndScheduler( void )
2094 {
2095 /* Stop the scheduler interrupts and call the portable scheduler end
2096 * routine so the original ISRs can be restored if necessary. The port
2097 * layer must ensure interrupts enable bit is left in the correct state. */
2098 portDISABLE_INTERRUPTS();
2099 xSchedulerRunning = pdFALSE;
2100 vPortEndScheduler();
2101 }
2102 /*----------------------------------------------------------*/
2103
vTaskSuspendAll(void)2104 void vTaskSuspendAll( void )
2105 {
2106 /* A critical section is not required as the variable is of type
2107 * BaseType_t. Please read Richard Barry's reply in the following link to a
2108 * post in the FreeRTOS support forum before reporting this as a bug! -
2109 * https://goo.gl/wu4acr */
2110
2111 /* portSOFTWARE_BARRIER() is only implemented for emulated/simulated ports that
2112 * do not otherwise exhibit real time behaviour. */
2113 portSOFTWARE_BARRIER();
2114
2115 /* The scheduler is suspended if uxSchedulerSuspended is non-zero. An increment
2116 * is used to allow calls to vTaskSuspendAll() to nest. */
2117 ++uxSchedulerSuspended;
2118
2119 /* Enforces ordering for ports and optimised compilers that may otherwise place
2120 * the above increment elsewhere. */
2121 portMEMORY_BARRIER();
2122 }
2123 /*----------------------------------------------------------*/
2124
2125 #if ( configUSE_TICKLESS_IDLE != 0 )
2126
prvGetExpectedIdleTime(void)2127 static TickType_t prvGetExpectedIdleTime( void )
2128 {
2129 TickType_t xReturn;
2130 UBaseType_t uxHigherPriorityReadyTasks = pdFALSE;
2131
2132 /* uxHigherPriorityReadyTasks takes care of the case where
2133 * configUSE_PREEMPTION is 0, so there may be tasks above the idle priority
2134 * task that are in the Ready state, even though the idle task is
2135 * running. */
2136 #if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 )
2137 {
2138 if( uxTopReadyPriority > tskIDLE_PRIORITY )
2139 {
2140 uxHigherPriorityReadyTasks = pdTRUE;
2141 }
2142 }
2143 #else
2144 {
2145 const UBaseType_t uxLeastSignificantBit = ( UBaseType_t ) 0x01;
2146
2147 /* When port optimised task selection is used the uxTopReadyPriority
2148 * variable is used as a bit map. If bits other than the least
2149 * significant bit are set then there are tasks that have a priority
2150 * above the idle priority that are in the Ready state. This takes
2151 * care of the case where the co-operative scheduler is in use. */
2152 if( uxTopReadyPriority > uxLeastSignificantBit )
2153 {
2154 uxHigherPriorityReadyTasks = pdTRUE;
2155 }
2156 }
2157 #endif /* if ( configUSE_PORT_OPTIMISED_TASK_SELECTION == 0 ) */
2158
2159 if( pxCurrentTCB->uxPriority > tskIDLE_PRIORITY )
2160 {
2161 xReturn = 0;
2162 }
2163 else if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > 1 )
2164 {
2165 /* There are other idle priority tasks in the ready state. If
2166 * time slicing is used then the very next tick interrupt must be
2167 * processed. */
2168 xReturn = 0;
2169 }
2170 else if( uxHigherPriorityReadyTasks != pdFALSE )
2171 {
2172 /* There are tasks in the Ready state that have a priority above the
2173 * idle priority. This path can only be reached if
2174 * configUSE_PREEMPTION is 0. */
2175 xReturn = 0;
2176 }
2177 else
2178 {
2179 xReturn = xNextTaskUnblockTime - xTickCount;
2180 }
2181
2182 return xReturn;
2183 }
2184
2185 #endif /* configUSE_TICKLESS_IDLE */
2186 /*----------------------------------------------------------*/
2187
xTaskResumeAll(void)2188 BaseType_t xTaskResumeAll( void )
2189 {
2190 TCB_t * pxTCB = NULL;
2191 BaseType_t xAlreadyYielded = pdFALSE;
2192
2193 /* If uxSchedulerSuspended is zero then this function does not match a
2194 * previous call to vTaskSuspendAll(). */
2195 configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U );
2196
2197 /* It is possible that an ISR caused a task to be removed from an event
2198 * list while the scheduler was suspended. If this was the case then the
2199 * removed task will have been added to the xPendingReadyList. Once the
2200 * scheduler has been resumed it is safe to move all the pending ready
2201 * tasks from this list into their appropriate ready list. */
2202 taskENTER_CRITICAL();
2203 {
2204 --uxSchedulerSuspended;
2205
2206 if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
2207 {
2208 if( uxCurrentNumberOfTasks > ( UBaseType_t ) 0U )
2209 {
2210 /* Move any readied tasks from the pending list into the
2211 * appropriate ready list. */
2212 while( listLIST_IS_EMPTY( &xPendingReadyList ) == pdFALSE )
2213 {
2214 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xPendingReadyList ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2215 listREMOVE_ITEM( &( pxTCB->xEventListItem ) );
2216 portMEMORY_BARRIER();
2217 listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
2218 prvAddTaskToReadyList( pxTCB );
2219
2220 /* If the moved task has a priority higher than the current
2221 * task then a yield must be performed. */
2222 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
2223 {
2224 xYieldPending = pdTRUE;
2225 }
2226 else
2227 {
2228 mtCOVERAGE_TEST_MARKER();
2229 }
2230 }
2231
2232 if( pxTCB != NULL )
2233 {
2234 /* A task was unblocked while the scheduler was suspended,
2235 * which may have prevented the next unblock time from being
2236 * re-calculated, in which case re-calculate it now. Mainly
2237 * important for low power tickless implementations, where
2238 * this can prevent an unnecessary exit from low power
2239 * state. */
2240 prvResetNextTaskUnblockTime();
2241 }
2242
2243 /* If any ticks occurred while the scheduler was suspended then
2244 * they should be processed now. This ensures the tick count does
2245 * not slip, and that any delayed tasks are resumed at the correct
2246 * time. */
2247 {
2248 TickType_t xPendedCounts = xPendedTicks; /* Non-volatile copy. */
2249
2250 if( xPendedCounts > ( TickType_t ) 0U )
2251 {
2252 do
2253 {
2254 if( xTaskIncrementTick() != pdFALSE )
2255 {
2256 xYieldPending = pdTRUE;
2257 }
2258 else
2259 {
2260 mtCOVERAGE_TEST_MARKER();
2261 }
2262
2263 --xPendedCounts;
2264 } while( xPendedCounts > ( TickType_t ) 0U );
2265
2266 xPendedTicks = 0;
2267 }
2268 else
2269 {
2270 mtCOVERAGE_TEST_MARKER();
2271 }
2272 }
2273
2274 if( xYieldPending != pdFALSE )
2275 {
2276 #if ( configUSE_PREEMPTION != 0 )
2277 {
2278 xAlreadyYielded = pdTRUE;
2279 }
2280 #endif
2281 taskYIELD_IF_USING_PREEMPTION();
2282 }
2283 else
2284 {
2285 mtCOVERAGE_TEST_MARKER();
2286 }
2287 }
2288 }
2289 else
2290 {
2291 mtCOVERAGE_TEST_MARKER();
2292 }
2293 }
2294 taskEXIT_CRITICAL();
2295
2296 return xAlreadyYielded;
2297 }
2298 /*-----------------------------------------------------------*/
2299
xTaskGetTickCount(void)2300 TickType_t xTaskGetTickCount( void )
2301 {
2302 TickType_t xTicks;
2303
2304 /* Critical section required if running on a 16 bit processor. */
2305 portTICK_TYPE_ENTER_CRITICAL();
2306 {
2307 xTicks = xTickCount;
2308 }
2309 portTICK_TYPE_EXIT_CRITICAL();
2310
2311 return xTicks;
2312 }
2313 /*-----------------------------------------------------------*/
2314
xTaskGetTickCountFromISR(void)2315 TickType_t xTaskGetTickCountFromISR( void )
2316 {
2317 TickType_t xReturn;
2318 UBaseType_t uxSavedInterruptStatus;
2319
2320 /* RTOS ports that support interrupt nesting have the concept of a maximum
2321 * system call (or maximum API call) interrupt priority. Interrupts that are
2322 * above the maximum system call priority are kept permanently enabled, even
2323 * when the RTOS kernel is in a critical section, but cannot make any calls to
2324 * FreeRTOS API functions. If configASSERT() is defined in FreeRTOSConfig.h
2325 * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2326 * failure if a FreeRTOS API function is called from an interrupt that has been
2327 * assigned a priority above the configured maximum system call priority.
2328 * Only FreeRTOS functions that end in FromISR can be called from interrupts
2329 * that have been assigned a priority at or (logically) below the maximum
2330 * system call interrupt priority. FreeRTOS maintains a separate interrupt
2331 * safe API to ensure interrupt entry is as fast and as simple as possible.
2332 * More information (albeit Cortex-M specific) is provided on the following
2333 * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
2334 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2335
2336 uxSavedInterruptStatus = portTICK_TYPE_SET_INTERRUPT_MASK_FROM_ISR();
2337 {
2338 xReturn = xTickCount;
2339 }
2340 portTICK_TYPE_CLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
2341
2342 return xReturn;
2343 }
2344 /*-----------------------------------------------------------*/
2345
uxTaskGetNumberOfTasks(void)2346 UBaseType_t uxTaskGetNumberOfTasks( void )
2347 {
2348 /* A critical section is not required because the variables are of type
2349 * BaseType_t. */
2350 return uxCurrentNumberOfTasks;
2351 }
2352 /*-----------------------------------------------------------*/
2353
pcTaskGetName(TaskHandle_t xTaskToQuery)2354 char * pcTaskGetName( TaskHandle_t xTaskToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2355 {
2356 TCB_t * pxTCB;
2357
2358 /* If null is passed in here then the name of the calling task is being
2359 * queried. */
2360 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
2361 configASSERT( pxTCB );
2362 return &( pxTCB->pcTaskName[ 0 ] );
2363 }
2364 /*-----------------------------------------------------------*/
2365
2366 #if ( INCLUDE_xTaskGetHandle == 1 )
2367
prvSearchForNameWithinSingleList(List_t * pxList,const char pcNameToQuery[])2368 static TCB_t * prvSearchForNameWithinSingleList( List_t * pxList,
2369 const char pcNameToQuery[] )
2370 {
2371 TCB_t * pxNextTCB;
2372 TCB_t * pxFirstTCB;
2373 TCB_t * pxReturn = NULL;
2374 UBaseType_t x;
2375 char cNextChar;
2376 BaseType_t xBreakLoop;
2377
2378 /* This function is called with the scheduler suspended. */
2379
2380 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
2381 {
2382 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2383
2384 do
2385 {
2386 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2387
2388 /* Check each character in the name looking for a match or
2389 * mismatch. */
2390 xBreakLoop = pdFALSE;
2391
2392 for( x = ( UBaseType_t ) 0; x < ( UBaseType_t ) configMAX_TASK_NAME_LEN; x++ )
2393 {
2394 cNextChar = pxNextTCB->pcTaskName[ x ];
2395
2396 if( cNextChar != pcNameToQuery[ x ] )
2397 {
2398 /* Characters didn't match. */
2399 xBreakLoop = pdTRUE;
2400 }
2401 else if( cNextChar == ( char ) 0x00 )
2402 {
2403 /* Both strings terminated, a match must have been
2404 * found. */
2405 pxReturn = pxNextTCB;
2406 xBreakLoop = pdTRUE;
2407 }
2408 else
2409 {
2410 mtCOVERAGE_TEST_MARKER();
2411 }
2412
2413 if( xBreakLoop != pdFALSE )
2414 {
2415 break;
2416 }
2417 }
2418
2419 if( pxReturn != NULL )
2420 {
2421 /* The handle has been found. */
2422 break;
2423 }
2424 } while( pxNextTCB != pxFirstTCB );
2425 }
2426 else
2427 {
2428 mtCOVERAGE_TEST_MARKER();
2429 }
2430
2431 return pxReturn;
2432 }
2433
2434 #endif /* INCLUDE_xTaskGetHandle */
2435 /*-----------------------------------------------------------*/
2436
2437 #if ( INCLUDE_xTaskGetHandle == 1 )
2438
xTaskGetHandle(const char * pcNameToQuery)2439 TaskHandle_t xTaskGetHandle( const char * pcNameToQuery ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2440 {
2441 UBaseType_t uxQueue = configMAX_PRIORITIES;
2442 TCB_t * pxTCB;
2443
2444 /* Task names will be truncated to configMAX_TASK_NAME_LEN - 1 bytes. */
2445 configASSERT( strlen( pcNameToQuery ) < configMAX_TASK_NAME_LEN );
2446
2447 vTaskSuspendAll();
2448 {
2449 /* Search the ready lists. */
2450 do
2451 {
2452 uxQueue--;
2453 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) &( pxReadyTasksLists[ uxQueue ] ), pcNameToQuery );
2454
2455 if( pxTCB != NULL )
2456 {
2457 /* Found the handle. */
2458 break;
2459 }
2460 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2461
2462 /* Search the delayed lists. */
2463 if( pxTCB == NULL )
2464 {
2465 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxDelayedTaskList, pcNameToQuery );
2466 }
2467
2468 if( pxTCB == NULL )
2469 {
2470 pxTCB = prvSearchForNameWithinSingleList( ( List_t * ) pxOverflowDelayedTaskList, pcNameToQuery );
2471 }
2472
2473 #if ( INCLUDE_vTaskSuspend == 1 )
2474 {
2475 if( pxTCB == NULL )
2476 {
2477 /* Search the suspended list. */
2478 pxTCB = prvSearchForNameWithinSingleList( &xSuspendedTaskList, pcNameToQuery );
2479 }
2480 }
2481 #endif
2482
2483 #if ( INCLUDE_vTaskDelete == 1 )
2484 {
2485 if( pxTCB == NULL )
2486 {
2487 /* Search the deleted list. */
2488 pxTCB = prvSearchForNameWithinSingleList( &xTasksWaitingTermination, pcNameToQuery );
2489 }
2490 }
2491 #endif
2492 }
2493 ( void ) xTaskResumeAll();
2494
2495 return pxTCB;
2496 }
2497
2498 #endif /* INCLUDE_xTaskGetHandle */
2499 /*-----------------------------------------------------------*/
2500
2501 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
2502
xTaskGetStaticBuffers(TaskHandle_t xTask,StackType_t ** ppuxStackBuffer,StaticTask_t ** ppxTaskBuffer)2503 BaseType_t xTaskGetStaticBuffers( TaskHandle_t xTask,
2504 StackType_t ** ppuxStackBuffer,
2505 StaticTask_t ** ppxTaskBuffer )
2506 {
2507 BaseType_t xReturn;
2508 TCB_t * pxTCB;
2509
2510 configASSERT( ppuxStackBuffer != NULL );
2511 configASSERT( ppxTaskBuffer != NULL );
2512
2513 pxTCB = prvGetTCBFromHandle( xTask );
2514
2515 #if ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 )
2516 {
2517 if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB )
2518 {
2519 *ppuxStackBuffer = pxTCB->pxStack;
2520 *ppxTaskBuffer = ( StaticTask_t * ) pxTCB;
2521 xReturn = pdTRUE;
2522 }
2523 else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
2524 {
2525 *ppuxStackBuffer = pxTCB->pxStack;
2526 *ppxTaskBuffer = NULL;
2527 xReturn = pdTRUE;
2528 }
2529 else
2530 {
2531 xReturn = pdFALSE;
2532 }
2533 }
2534 #else /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 */
2535 {
2536 *ppuxStackBuffer = pxTCB->pxStack;
2537 *ppxTaskBuffer = ( StaticTask_t * ) pxTCB;
2538 xReturn = pdTRUE;
2539 }
2540 #endif /* tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE == 1 */
2541
2542 return xReturn;
2543 }
2544
2545 #endif /* configSUPPORT_STATIC_ALLOCATION */
2546 /*-----------------------------------------------------------*/
2547
2548 #if ( configUSE_TRACE_FACILITY == 1 )
2549
uxTaskGetSystemState(TaskStatus_t * const pxTaskStatusArray,const UBaseType_t uxArraySize,configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime)2550 UBaseType_t uxTaskGetSystemState( TaskStatus_t * const pxTaskStatusArray,
2551 const UBaseType_t uxArraySize,
2552 configRUN_TIME_COUNTER_TYPE * const pulTotalRunTime )
2553 {
2554 UBaseType_t uxTask = 0, uxQueue = configMAX_PRIORITIES;
2555
2556 vTaskSuspendAll();
2557 {
2558 /* Is there a space in the array for each task in the system? */
2559 if( uxArraySize >= uxCurrentNumberOfTasks )
2560 {
2561 /* Fill in an TaskStatus_t structure with information on each
2562 * task in the Ready state. */
2563 do
2564 {
2565 uxQueue--;
2566 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &( pxReadyTasksLists[ uxQueue ] ), eReady );
2567 } while( uxQueue > ( UBaseType_t ) tskIDLE_PRIORITY ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2568
2569 /* Fill in an TaskStatus_t structure with information on each
2570 * task in the Blocked state. */
2571 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxDelayedTaskList, eBlocked );
2572 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), ( List_t * ) pxOverflowDelayedTaskList, eBlocked );
2573
2574 #if ( INCLUDE_vTaskDelete == 1 )
2575 {
2576 /* Fill in an TaskStatus_t structure with information on
2577 * each task that has been deleted but not yet cleaned up. */
2578 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xTasksWaitingTermination, eDeleted );
2579 }
2580 #endif
2581
2582 #if ( INCLUDE_vTaskSuspend == 1 )
2583 {
2584 /* Fill in an TaskStatus_t structure with information on
2585 * each task in the Suspended state. */
2586 uxTask += prvListTasksWithinSingleList( &( pxTaskStatusArray[ uxTask ] ), &xSuspendedTaskList, eSuspended );
2587 }
2588 #endif
2589
2590 #if ( configGENERATE_RUN_TIME_STATS == 1 )
2591 {
2592 if( pulTotalRunTime != NULL )
2593 {
2594 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
2595 portALT_GET_RUN_TIME_COUNTER_VALUE( ( *pulTotalRunTime ) );
2596 #else
2597 *pulTotalRunTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE();
2598 #endif
2599 }
2600 }
2601 #else /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
2602 {
2603 if( pulTotalRunTime != NULL )
2604 {
2605 *pulTotalRunTime = 0;
2606 }
2607 }
2608 #endif /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
2609 }
2610 else
2611 {
2612 mtCOVERAGE_TEST_MARKER();
2613 }
2614 }
2615 ( void ) xTaskResumeAll();
2616
2617 return uxTask;
2618 }
2619
2620 #endif /* configUSE_TRACE_FACILITY */
2621 /*----------------------------------------------------------*/
2622
2623 #if ( INCLUDE_xTaskGetIdleTaskHandle == 1 )
2624
xTaskGetIdleTaskHandle(void)2625 TaskHandle_t xTaskGetIdleTaskHandle( void )
2626 {
2627 /* If xTaskGetIdleTaskHandle() is called before the scheduler has been
2628 * started, then xIdleTaskHandle will be NULL. */
2629 configASSERT( ( xIdleTaskHandle != NULL ) );
2630 return xIdleTaskHandle;
2631 }
2632
2633 #endif /* INCLUDE_xTaskGetIdleTaskHandle */
2634 /*----------------------------------------------------------*/
2635
2636 /* This conditional compilation should use inequality to 0, not equality to 1.
2637 * This is to ensure vTaskStepTick() is available when user defined low power mode
2638 * implementations require configUSE_TICKLESS_IDLE to be set to a value other than
2639 * 1. */
2640 #if ( configUSE_TICKLESS_IDLE != 0 )
2641
vTaskStepTick(TickType_t xTicksToJump)2642 void vTaskStepTick( TickType_t xTicksToJump )
2643 {
2644 /* Correct the tick count value after a period during which the tick
2645 * was suppressed. Note this does *not* call the tick hook function for
2646 * each stepped tick. */
2647 configASSERT( ( xTickCount + xTicksToJump ) <= xNextTaskUnblockTime );
2648
2649 if( ( xTickCount + xTicksToJump ) == xNextTaskUnblockTime )
2650 {
2651 /* Arrange for xTickCount to reach xNextTaskUnblockTime in
2652 * xTaskIncrementTick() when the scheduler resumes. This ensures
2653 * that any delayed tasks are resumed at the correct time. */
2654 configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U );
2655 configASSERT( xTicksToJump != ( TickType_t ) 0 );
2656
2657 /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */
2658 taskENTER_CRITICAL();
2659 {
2660 xPendedTicks++;
2661 }
2662 taskEXIT_CRITICAL();
2663 xTicksToJump--;
2664 }
2665 else
2666 {
2667 mtCOVERAGE_TEST_MARKER();
2668 }
2669
2670 xTickCount += xTicksToJump;
2671 traceINCREASE_TICK_COUNT( xTicksToJump );
2672 }
2673
2674 #endif /* configUSE_TICKLESS_IDLE */
2675 /*----------------------------------------------------------*/
2676
xTaskCatchUpTicks(TickType_t xTicksToCatchUp)2677 BaseType_t xTaskCatchUpTicks( TickType_t xTicksToCatchUp )
2678 {
2679 BaseType_t xYieldOccurred;
2680
2681 /* Must not be called with the scheduler suspended as the implementation
2682 * relies on xPendedTicks being wound down to 0 in xTaskResumeAll(). */
2683 configASSERT( uxSchedulerSuspended == ( UBaseType_t ) 0U );
2684
2685 /* Use xPendedTicks to mimic xTicksToCatchUp number of ticks occurring when
2686 * the scheduler is suspended so the ticks are executed in xTaskResumeAll(). */
2687 vTaskSuspendAll();
2688
2689 /* Prevent the tick interrupt modifying xPendedTicks simultaneously. */
2690 taskENTER_CRITICAL();
2691 {
2692 xPendedTicks += xTicksToCatchUp;
2693 }
2694 taskEXIT_CRITICAL();
2695 xYieldOccurred = xTaskResumeAll();
2696
2697 return xYieldOccurred;
2698 }
2699 /*----------------------------------------------------------*/
2700
2701 #if ( INCLUDE_xTaskAbortDelay == 1 )
2702
xTaskAbortDelay(TaskHandle_t xTask)2703 BaseType_t xTaskAbortDelay( TaskHandle_t xTask )
2704 {
2705 TCB_t * pxTCB = xTask;
2706 BaseType_t xReturn;
2707
2708 configASSERT( pxTCB );
2709
2710 vTaskSuspendAll();
2711 {
2712 /* A task can only be prematurely removed from the Blocked state if
2713 * it is actually in the Blocked state. */
2714 if( eTaskGetState( xTask ) == eBlocked )
2715 {
2716 xReturn = pdPASS;
2717
2718 /* Remove the reference to the task from the blocked list. An
2719 * interrupt won't touch the xStateListItem because the
2720 * scheduler is suspended. */
2721 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
2722
2723 /* Is the task waiting on an event also? If so remove it from
2724 * the event list too. Interrupts can touch the event list item,
2725 * even though the scheduler is suspended, so a critical section
2726 * is used. */
2727 taskENTER_CRITICAL();
2728 {
2729 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
2730 {
2731 ( void ) uxListRemove( &( pxTCB->xEventListItem ) );
2732
2733 /* This lets the task know it was forcibly removed from the
2734 * blocked state so it should not re-evaluate its block time and
2735 * then block again. */
2736 pxTCB->ucDelayAborted = pdTRUE;
2737 }
2738 else
2739 {
2740 mtCOVERAGE_TEST_MARKER();
2741 }
2742 }
2743 taskEXIT_CRITICAL();
2744
2745 /* Place the unblocked task into the appropriate ready list. */
2746 prvAddTaskToReadyList( pxTCB );
2747
2748 /* A task being unblocked cannot cause an immediate context
2749 * switch if preemption is turned off. */
2750 #if ( configUSE_PREEMPTION == 1 )
2751 {
2752 /* Preemption is on, but a context switch should only be
2753 * performed if the unblocked task has a priority that is
2754 * higher than the currently executing task. */
2755 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
2756 {
2757 /* Pend the yield to be performed when the scheduler
2758 * is unsuspended. */
2759 xYieldPending = pdTRUE;
2760 }
2761 else
2762 {
2763 mtCOVERAGE_TEST_MARKER();
2764 }
2765 }
2766 #endif /* configUSE_PREEMPTION */
2767 }
2768 else
2769 {
2770 xReturn = pdFAIL;
2771 }
2772 }
2773 ( void ) xTaskResumeAll();
2774
2775 return xReturn;
2776 }
2777
2778 #endif /* INCLUDE_xTaskAbortDelay */
2779 /*----------------------------------------------------------*/
2780
xTaskIncrementTick(void)2781 BaseType_t xTaskIncrementTick( void )
2782 {
2783 TCB_t * pxTCB;
2784 TickType_t xItemValue;
2785 BaseType_t xSwitchRequired = pdFALSE;
2786
2787 /* Called by the portable layer each time a tick interrupt occurs.
2788 * Increments the tick then checks to see if the new tick value will cause any
2789 * tasks to be unblocked. */
2790 traceTASK_INCREMENT_TICK( xTickCount );
2791
2792 if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
2793 {
2794 /* Minor optimisation. The tick count cannot change in this
2795 * block. */
2796 const TickType_t xConstTickCount = xTickCount + ( TickType_t ) 1;
2797
2798 /* Increment the RTOS tick, switching the delayed and overflowed
2799 * delayed lists if it wraps to 0. */
2800 xTickCount = xConstTickCount;
2801
2802 if( xConstTickCount == ( TickType_t ) 0U ) /*lint !e774 'if' does not always evaluate to false as it is looking for an overflow. */
2803 {
2804 taskSWITCH_DELAYED_LISTS();
2805 }
2806 else
2807 {
2808 mtCOVERAGE_TEST_MARKER();
2809 }
2810
2811 /* See if this tick has made a timeout expire. Tasks are stored in
2812 * the queue in the order of their wake time - meaning once one task
2813 * has been found whose block time has not expired there is no need to
2814 * look any further down the list. */
2815 if( xConstTickCount >= xNextTaskUnblockTime )
2816 {
2817 for( ; ; )
2818 {
2819 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
2820 {
2821 /* The delayed list is empty. Set xNextTaskUnblockTime
2822 * to the maximum possible value so it is extremely
2823 * unlikely that the
2824 * if( xTickCount >= xNextTaskUnblockTime ) test will pass
2825 * next time through. */
2826 xNextTaskUnblockTime = portMAX_DELAY; /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
2827 break;
2828 }
2829 else
2830 {
2831 /* The delayed list is not empty, get the value of the
2832 * item at the head of the delayed list. This is the time
2833 * at which the task at the head of the delayed list must
2834 * be removed from the Blocked state. */
2835 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( pxDelayedTaskList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
2836 xItemValue = listGET_LIST_ITEM_VALUE( &( pxTCB->xStateListItem ) );
2837
2838 if( xConstTickCount < xItemValue )
2839 {
2840 /* It is not time to unblock this item yet, but the
2841 * item value is the time at which the task at the head
2842 * of the blocked list must be removed from the Blocked
2843 * state - so record the item value in
2844 * xNextTaskUnblockTime. */
2845 xNextTaskUnblockTime = xItemValue;
2846 break; /*lint !e9011 Code structure here is deemed easier to understand with multiple breaks. */
2847 }
2848 else
2849 {
2850 mtCOVERAGE_TEST_MARKER();
2851 }
2852
2853 /* It is time to remove the item from the Blocked state. */
2854 listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
2855
2856 /* Is the task waiting on an event also? If so remove
2857 * it from the event list. */
2858 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
2859 {
2860 listREMOVE_ITEM( &( pxTCB->xEventListItem ) );
2861 }
2862 else
2863 {
2864 mtCOVERAGE_TEST_MARKER();
2865 }
2866
2867 /* Place the unblocked task into the appropriate ready
2868 * list. */
2869 prvAddTaskToReadyList( pxTCB );
2870
2871 /* A task being unblocked cannot cause an immediate
2872 * context switch if preemption is turned off. */
2873 #if ( configUSE_PREEMPTION == 1 )
2874 {
2875 /* Preemption is on, but a context switch should
2876 * only be performed if the unblocked task's
2877 * priority is higher than the currently executing
2878 * task.
2879 * The case of equal priority tasks sharing
2880 * processing time (which happens when both
2881 * preemption and time slicing are on) is
2882 * handled below.*/
2883 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
2884 {
2885 xSwitchRequired = pdTRUE;
2886 }
2887 else
2888 {
2889 mtCOVERAGE_TEST_MARKER();
2890 }
2891 }
2892 #endif /* configUSE_PREEMPTION */
2893 }
2894 }
2895 }
2896
2897 /* Tasks of equal priority to the currently running task will share
2898 * processing time (time slice) if preemption is on, and the application
2899 * writer has not explicitly turned time slicing off. */
2900 #if ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) )
2901 {
2902 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ pxCurrentTCB->uxPriority ] ) ) > ( UBaseType_t ) 1 )
2903 {
2904 xSwitchRequired = pdTRUE;
2905 }
2906 else
2907 {
2908 mtCOVERAGE_TEST_MARKER();
2909 }
2910 }
2911 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configUSE_TIME_SLICING == 1 ) ) */
2912
2913 #if ( configUSE_TICK_HOOK == 1 )
2914 {
2915 /* Guard against the tick hook being called when the pended tick
2916 * count is being unwound (when the scheduler is being unlocked). */
2917 if( xPendedTicks == ( TickType_t ) 0 )
2918 {
2919 vApplicationTickHook();
2920 }
2921 else
2922 {
2923 mtCOVERAGE_TEST_MARKER();
2924 }
2925 }
2926 #endif /* configUSE_TICK_HOOK */
2927
2928 #if ( configUSE_PREEMPTION == 1 )
2929 {
2930 if( xYieldPending != pdFALSE )
2931 {
2932 xSwitchRequired = pdTRUE;
2933 }
2934 else
2935 {
2936 mtCOVERAGE_TEST_MARKER();
2937 }
2938 }
2939 #endif /* configUSE_PREEMPTION */
2940 }
2941 else
2942 {
2943 ++xPendedTicks;
2944
2945 /* The tick hook gets called at regular intervals, even if the
2946 * scheduler is locked. */
2947 #if ( configUSE_TICK_HOOK == 1 )
2948 {
2949 vApplicationTickHook();
2950 }
2951 #endif
2952 }
2953
2954 return xSwitchRequired;
2955 }
2956 /*-----------------------------------------------------------*/
2957
2958 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
2959
vTaskSetApplicationTaskTag(TaskHandle_t xTask,TaskHookFunction_t pxHookFunction)2960 void vTaskSetApplicationTaskTag( TaskHandle_t xTask,
2961 TaskHookFunction_t pxHookFunction )
2962 {
2963 TCB_t * xTCB;
2964
2965 /* If xTask is NULL then it is the task hook of the calling task that is
2966 * getting set. */
2967 if( xTask == NULL )
2968 {
2969 xTCB = ( TCB_t * ) pxCurrentTCB;
2970 }
2971 else
2972 {
2973 xTCB = xTask;
2974 }
2975
2976 /* Save the hook function in the TCB. A critical section is required as
2977 * the value can be accessed from an interrupt. */
2978 taskENTER_CRITICAL();
2979 {
2980 xTCB->pxTaskTag = pxHookFunction;
2981 }
2982 taskEXIT_CRITICAL();
2983 }
2984
2985 #endif /* configUSE_APPLICATION_TASK_TAG */
2986 /*-----------------------------------------------------------*/
2987
2988 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
2989
xTaskGetApplicationTaskTag(TaskHandle_t xTask)2990 TaskHookFunction_t xTaskGetApplicationTaskTag( TaskHandle_t xTask )
2991 {
2992 TCB_t * pxTCB;
2993 TaskHookFunction_t xReturn;
2994
2995 /* If xTask is NULL then set the calling task's hook. */
2996 pxTCB = prvGetTCBFromHandle( xTask );
2997
2998 /* Save the hook function in the TCB. A critical section is required as
2999 * the value can be accessed from an interrupt. */
3000 taskENTER_CRITICAL();
3001 {
3002 xReturn = pxTCB->pxTaskTag;
3003 }
3004 taskEXIT_CRITICAL();
3005
3006 return xReturn;
3007 }
3008
3009 #endif /* configUSE_APPLICATION_TASK_TAG */
3010 /*-----------------------------------------------------------*/
3011
3012 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
3013
xTaskGetApplicationTaskTagFromISR(TaskHandle_t xTask)3014 TaskHookFunction_t xTaskGetApplicationTaskTagFromISR( TaskHandle_t xTask )
3015 {
3016 TCB_t * pxTCB;
3017 TaskHookFunction_t xReturn;
3018 UBaseType_t uxSavedInterruptStatus;
3019
3020 /* If xTask is NULL then set the calling task's hook. */
3021 pxTCB = prvGetTCBFromHandle( xTask );
3022
3023 /* Save the hook function in the TCB. A critical section is required as
3024 * the value can be accessed from an interrupt. */
3025 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
3026 {
3027 xReturn = pxTCB->pxTaskTag;
3028 }
3029 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
3030
3031 return xReturn;
3032 }
3033
3034 #endif /* configUSE_APPLICATION_TASK_TAG */
3035 /*-----------------------------------------------------------*/
3036
3037 #if ( configUSE_APPLICATION_TASK_TAG == 1 )
3038
xTaskCallApplicationTaskHook(TaskHandle_t xTask,void * pvParameter)3039 BaseType_t xTaskCallApplicationTaskHook( TaskHandle_t xTask,
3040 void * pvParameter )
3041 {
3042 TCB_t * xTCB;
3043 BaseType_t xReturn;
3044
3045 /* If xTask is NULL then we are calling our own task hook. */
3046 if( xTask == NULL )
3047 {
3048 xTCB = pxCurrentTCB;
3049 }
3050 else
3051 {
3052 xTCB = xTask;
3053 }
3054
3055 if( xTCB->pxTaskTag != NULL )
3056 {
3057 xReturn = xTCB->pxTaskTag( pvParameter );
3058 }
3059 else
3060 {
3061 xReturn = pdFAIL;
3062 }
3063
3064 return xReturn;
3065 }
3066
3067 #endif /* configUSE_APPLICATION_TASK_TAG */
3068 /*-----------------------------------------------------------*/
3069
vTaskSwitchContext(void)3070 void vTaskSwitchContext( void )
3071 {
3072 if( uxSchedulerSuspended != ( UBaseType_t ) 0U )
3073 {
3074 /* The scheduler is currently suspended - do not allow a context
3075 * switch. */
3076 xYieldPending = pdTRUE;
3077 }
3078 else
3079 {
3080 xYieldPending = pdFALSE;
3081 traceTASK_SWITCHED_OUT();
3082
3083 #if ( configGENERATE_RUN_TIME_STATS == 1 )
3084 {
3085 #ifdef portALT_GET_RUN_TIME_COUNTER_VALUE
3086 portALT_GET_RUN_TIME_COUNTER_VALUE( ulTotalRunTime );
3087 #else
3088 ulTotalRunTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE();
3089 #endif
3090
3091 /* Add the amount of time the task has been running to the
3092 * accumulated time so far. The time the task started running was
3093 * stored in ulTaskSwitchedInTime. Note that there is no overflow
3094 * protection here so count values are only valid until the timer
3095 * overflows. The guard against negative values is to protect
3096 * against suspect run time stat counter implementations - which
3097 * are provided by the application, not the kernel. */
3098 if( ulTotalRunTime > ulTaskSwitchedInTime )
3099 {
3100 pxCurrentTCB->ulRunTimeCounter += ( ulTotalRunTime - ulTaskSwitchedInTime );
3101 }
3102 else
3103 {
3104 mtCOVERAGE_TEST_MARKER();
3105 }
3106
3107 ulTaskSwitchedInTime = ulTotalRunTime;
3108 }
3109 #endif /* configGENERATE_RUN_TIME_STATS */
3110
3111 /* Check for stack overflow, if configured. */
3112 taskCHECK_FOR_STACK_OVERFLOW();
3113
3114 /* Before the currently running task is switched out, save its errno. */
3115 #if ( configUSE_POSIX_ERRNO == 1 )
3116 {
3117 pxCurrentTCB->iTaskErrno = FreeRTOS_errno;
3118 }
3119 #endif
3120
3121 /* Select a new task to run using either the generic C or port
3122 * optimised asm code. */
3123 taskSELECT_HIGHEST_PRIORITY_TASK(); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3124 traceTASK_SWITCHED_IN();
3125
3126 /* After the new task is switched in, update the global errno. */
3127 #if ( configUSE_POSIX_ERRNO == 1 )
3128 {
3129 FreeRTOS_errno = pxCurrentTCB->iTaskErrno;
3130 }
3131 #endif
3132
3133 #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
3134 {
3135 /* Switch C-Runtime's TLS Block to point to the TLS
3136 * Block specific to this task. */
3137 configSET_TLS_BLOCK( pxCurrentTCB->xTLSBlock );
3138 }
3139 #endif
3140 }
3141 }
3142 /*-----------------------------------------------------------*/
3143
vTaskPlaceOnEventList(List_t * const pxEventList,const TickType_t xTicksToWait)3144 void vTaskPlaceOnEventList( List_t * const pxEventList,
3145 const TickType_t xTicksToWait )
3146 {
3147 configASSERT( pxEventList );
3148
3149 /* THIS FUNCTION MUST BE CALLED WITH EITHER INTERRUPTS DISABLED OR THE
3150 * SCHEDULER SUSPENDED AND THE QUEUE BEING ACCESSED LOCKED. */
3151
3152 /* Place the event list item of the TCB in the appropriate event list.
3153 * This is placed in the list in priority order so the highest priority task
3154 * is the first to be woken by the event.
3155 *
3156 * Note: Lists are sorted in ascending order by ListItem_t.xItemValue.
3157 * Normally, the xItemValue of a TCB's ListItem_t members is:
3158 * xItemValue = ( configMAX_PRIORITIES - uxPriority )
3159 * Therefore, the event list is sorted in descending priority order.
3160 *
3161 * The queue that contains the event list is locked, preventing
3162 * simultaneous access from interrupts. */
3163 vListInsert( pxEventList, &( pxCurrentTCB->xEventListItem ) );
3164
3165 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
3166 }
3167 /*-----------------------------------------------------------*/
3168
vTaskPlaceOnUnorderedEventList(List_t * pxEventList,const TickType_t xItemValue,const TickType_t xTicksToWait)3169 void vTaskPlaceOnUnorderedEventList( List_t * pxEventList,
3170 const TickType_t xItemValue,
3171 const TickType_t xTicksToWait )
3172 {
3173 configASSERT( pxEventList );
3174
3175 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
3176 * the event groups implementation. */
3177 configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U );
3178
3179 /* Store the item value in the event list item. It is safe to access the
3180 * event list item here as interrupts won't access the event list item of a
3181 * task that is not in the Blocked state. */
3182 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
3183
3184 /* Place the event list item of the TCB at the end of the appropriate event
3185 * list. It is safe to access the event list here because it is part of an
3186 * event group implementation - and interrupts don't access event groups
3187 * directly (instead they access them indirectly by pending function calls to
3188 * the task level). */
3189 listINSERT_END( pxEventList, &( pxCurrentTCB->xEventListItem ) );
3190
3191 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
3192 }
3193 /*-----------------------------------------------------------*/
3194
3195 #if ( configUSE_TIMERS == 1 )
3196
vTaskPlaceOnEventListRestricted(List_t * const pxEventList,TickType_t xTicksToWait,const BaseType_t xWaitIndefinitely)3197 void vTaskPlaceOnEventListRestricted( List_t * const pxEventList,
3198 TickType_t xTicksToWait,
3199 const BaseType_t xWaitIndefinitely )
3200 {
3201 configASSERT( pxEventList );
3202
3203 /* This function should not be called by application code hence the
3204 * 'Restricted' in its name. It is not part of the public API. It is
3205 * designed for use by kernel code, and has special calling requirements -
3206 * it should be called with the scheduler suspended. */
3207
3208
3209 /* Place the event list item of the TCB in the appropriate event list.
3210 * In this case it is assume that this is the only task that is going to
3211 * be waiting on this event list, so the faster vListInsertEnd() function
3212 * can be used in place of vListInsert. */
3213 listINSERT_END( pxEventList, &( pxCurrentTCB->xEventListItem ) );
3214
3215 /* If the task should block indefinitely then set the block time to a
3216 * value that will be recognised as an indefinite delay inside the
3217 * prvAddCurrentTaskToDelayedList() function. */
3218 if( xWaitIndefinitely != pdFALSE )
3219 {
3220 xTicksToWait = portMAX_DELAY;
3221 }
3222
3223 traceTASK_DELAY_UNTIL( ( xTickCount + xTicksToWait ) );
3224 prvAddCurrentTaskToDelayedList( xTicksToWait, xWaitIndefinitely );
3225 }
3226
3227 #endif /* configUSE_TIMERS */
3228 /*-----------------------------------------------------------*/
3229
xTaskRemoveFromEventList(const List_t * const pxEventList)3230 BaseType_t xTaskRemoveFromEventList( const List_t * const pxEventList )
3231 {
3232 TCB_t * pxUnblockedTCB;
3233 BaseType_t xReturn;
3234
3235 /* THIS FUNCTION MUST BE CALLED FROM A CRITICAL SECTION. It can also be
3236 * called from a critical section within an ISR. */
3237
3238 /* The event list is sorted in priority order, so the first in the list can
3239 * be removed as it is known to be the highest priority. Remove the TCB from
3240 * the delayed list, and add it to the ready list.
3241 *
3242 * If an event is for a queue that is locked then this function will never
3243 * get called - the lock count on the queue will get modified instead. This
3244 * means exclusive access to the event list is guaranteed here.
3245 *
3246 * This function assumes that a check has already been made to ensure that
3247 * pxEventList is not empty. */
3248 pxUnblockedTCB = listGET_OWNER_OF_HEAD_ENTRY( pxEventList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3249 configASSERT( pxUnblockedTCB );
3250 listREMOVE_ITEM( &( pxUnblockedTCB->xEventListItem ) );
3251
3252 if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
3253 {
3254 listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) );
3255 prvAddTaskToReadyList( pxUnblockedTCB );
3256
3257 #if ( configUSE_TICKLESS_IDLE != 0 )
3258 {
3259 /* If a task is blocked on a kernel object then xNextTaskUnblockTime
3260 * might be set to the blocked task's time out time. If the task is
3261 * unblocked for a reason other than a timeout xNextTaskUnblockTime is
3262 * normally left unchanged, because it is automatically reset to a new
3263 * value when the tick count equals xNextTaskUnblockTime. However if
3264 * tickless idling is used it might be more important to enter sleep mode
3265 * at the earliest possible time - so reset xNextTaskUnblockTime here to
3266 * ensure it is updated at the earliest possible time. */
3267 prvResetNextTaskUnblockTime();
3268 }
3269 #endif
3270 }
3271 else
3272 {
3273 /* The delayed and ready lists cannot be accessed, so hold this task
3274 * pending until the scheduler is resumed. */
3275 listINSERT_END( &( xPendingReadyList ), &( pxUnblockedTCB->xEventListItem ) );
3276 }
3277
3278 if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
3279 {
3280 /* Return true if the task removed from the event list has a higher
3281 * priority than the calling task. This allows the calling task to know if
3282 * it should force a context switch now. */
3283 xReturn = pdTRUE;
3284
3285 /* Mark that a yield is pending in case the user is not using the
3286 * "xHigherPriorityTaskWoken" parameter to an ISR safe FreeRTOS function. */
3287 xYieldPending = pdTRUE;
3288 }
3289 else
3290 {
3291 xReturn = pdFALSE;
3292 }
3293
3294 return xReturn;
3295 }
3296 /*-----------------------------------------------------------*/
3297
vTaskRemoveFromUnorderedEventList(ListItem_t * pxEventListItem,const TickType_t xItemValue)3298 void vTaskRemoveFromUnorderedEventList( ListItem_t * pxEventListItem,
3299 const TickType_t xItemValue )
3300 {
3301 TCB_t * pxUnblockedTCB;
3302
3303 /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. It is used by
3304 * the event flags implementation. */
3305 configASSERT( uxSchedulerSuspended != ( UBaseType_t ) 0U );
3306
3307 /* Store the new item value in the event list. */
3308 listSET_LIST_ITEM_VALUE( pxEventListItem, xItemValue | taskEVENT_LIST_ITEM_VALUE_IN_USE );
3309
3310 /* Remove the event list form the event flag. Interrupts do not access
3311 * event flags. */
3312 pxUnblockedTCB = listGET_LIST_ITEM_OWNER( pxEventListItem ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3313 configASSERT( pxUnblockedTCB );
3314 listREMOVE_ITEM( pxEventListItem );
3315
3316 #if ( configUSE_TICKLESS_IDLE != 0 )
3317 {
3318 /* If a task is blocked on a kernel object then xNextTaskUnblockTime
3319 * might be set to the blocked task's time out time. If the task is
3320 * unblocked for a reason other than a timeout xNextTaskUnblockTime is
3321 * normally left unchanged, because it is automatically reset to a new
3322 * value when the tick count equals xNextTaskUnblockTime. However if
3323 * tickless idling is used it might be more important to enter sleep mode
3324 * at the earliest possible time - so reset xNextTaskUnblockTime here to
3325 * ensure it is updated at the earliest possible time. */
3326 prvResetNextTaskUnblockTime();
3327 }
3328 #endif
3329
3330 /* Remove the task from the delayed list and add it to the ready list. The
3331 * scheduler is suspended so interrupts will not be accessing the ready
3332 * lists. */
3333 listREMOVE_ITEM( &( pxUnblockedTCB->xStateListItem ) );
3334 prvAddTaskToReadyList( pxUnblockedTCB );
3335
3336 if( pxUnblockedTCB->uxPriority > pxCurrentTCB->uxPriority )
3337 {
3338 /* The unblocked task has a priority above that of the calling task, so
3339 * a context switch is required. This function is called with the
3340 * scheduler suspended so xYieldPending is set so the context switch
3341 * occurs immediately that the scheduler is resumed (unsuspended). */
3342 xYieldPending = pdTRUE;
3343 }
3344 }
3345 /*-----------------------------------------------------------*/
3346
vTaskSetTimeOutState(TimeOut_t * const pxTimeOut)3347 void vTaskSetTimeOutState( TimeOut_t * const pxTimeOut )
3348 {
3349 configASSERT( pxTimeOut );
3350 taskENTER_CRITICAL();
3351 {
3352 pxTimeOut->xOverflowCount = xNumOfOverflows;
3353 pxTimeOut->xTimeOnEntering = xTickCount;
3354 }
3355 taskEXIT_CRITICAL();
3356 }
3357 /*-----------------------------------------------------------*/
3358
vTaskInternalSetTimeOutState(TimeOut_t * const pxTimeOut)3359 void vTaskInternalSetTimeOutState( TimeOut_t * const pxTimeOut )
3360 {
3361 /* For internal use only as it does not use a critical section. */
3362 pxTimeOut->xOverflowCount = xNumOfOverflows;
3363 pxTimeOut->xTimeOnEntering = xTickCount;
3364 }
3365 /*-----------------------------------------------------------*/
3366
xTaskCheckForTimeOut(TimeOut_t * const pxTimeOut,TickType_t * const pxTicksToWait)3367 BaseType_t xTaskCheckForTimeOut( TimeOut_t * const pxTimeOut,
3368 TickType_t * const pxTicksToWait )
3369 {
3370 BaseType_t xReturn;
3371
3372 configASSERT( pxTimeOut );
3373 configASSERT( pxTicksToWait );
3374
3375 taskENTER_CRITICAL();
3376 {
3377 /* Minor optimisation. The tick count cannot change in this block. */
3378 const TickType_t xConstTickCount = xTickCount;
3379 const TickType_t xElapsedTime = xConstTickCount - pxTimeOut->xTimeOnEntering;
3380
3381 #if ( INCLUDE_xTaskAbortDelay == 1 )
3382 if( pxCurrentTCB->ucDelayAborted != ( uint8_t ) pdFALSE )
3383 {
3384 /* The delay was aborted, which is not the same as a time out,
3385 * but has the same result. */
3386 pxCurrentTCB->ucDelayAborted = pdFALSE;
3387 xReturn = pdTRUE;
3388 }
3389 else
3390 #endif
3391
3392 #if ( INCLUDE_vTaskSuspend == 1 )
3393 if( *pxTicksToWait == portMAX_DELAY )
3394 {
3395 /* If INCLUDE_vTaskSuspend is set to 1 and the block time
3396 * specified is the maximum block time then the task should block
3397 * indefinitely, and therefore never time out. */
3398 xReturn = pdFALSE;
3399 }
3400 else
3401 #endif
3402
3403 if( ( xNumOfOverflows != pxTimeOut->xOverflowCount ) && ( xConstTickCount >= pxTimeOut->xTimeOnEntering ) ) /*lint !e525 Indentation preferred as is to make code within pre-processor directives clearer. */
3404 {
3405 /* The tick count is greater than the time at which
3406 * vTaskSetTimeout() was called, but has also overflowed since
3407 * vTaskSetTimeOut() was called. It must have wrapped all the way
3408 * around and gone past again. This passed since vTaskSetTimeout()
3409 * was called. */
3410 xReturn = pdTRUE;
3411 *pxTicksToWait = ( TickType_t ) 0;
3412 }
3413 else if( xElapsedTime < *pxTicksToWait ) /*lint !e961 Explicit casting is only redundant with some compilers, whereas others require it to prevent integer conversion errors. */
3414 {
3415 /* Not a genuine timeout. Adjust parameters for time remaining. */
3416 *pxTicksToWait -= xElapsedTime;
3417 vTaskInternalSetTimeOutState( pxTimeOut );
3418 xReturn = pdFALSE;
3419 }
3420 else
3421 {
3422 *pxTicksToWait = ( TickType_t ) 0;
3423 xReturn = pdTRUE;
3424 }
3425 }
3426 taskEXIT_CRITICAL();
3427
3428 return xReturn;
3429 }
3430 /*-----------------------------------------------------------*/
3431
vTaskMissedYield(void)3432 void vTaskMissedYield( void )
3433 {
3434 xYieldPending = pdTRUE;
3435 }
3436 /*-----------------------------------------------------------*/
3437
3438 #if ( configUSE_TRACE_FACILITY == 1 )
3439
uxTaskGetTaskNumber(TaskHandle_t xTask)3440 UBaseType_t uxTaskGetTaskNumber( TaskHandle_t xTask )
3441 {
3442 UBaseType_t uxReturn;
3443 TCB_t const * pxTCB;
3444
3445 if( xTask != NULL )
3446 {
3447 pxTCB = xTask;
3448 uxReturn = pxTCB->uxTaskNumber;
3449 }
3450 else
3451 {
3452 uxReturn = 0U;
3453 }
3454
3455 return uxReturn;
3456 }
3457
3458 #endif /* configUSE_TRACE_FACILITY */
3459 /*-----------------------------------------------------------*/
3460
3461 #if ( configUSE_TRACE_FACILITY == 1 )
3462
vTaskSetTaskNumber(TaskHandle_t xTask,const UBaseType_t uxHandle)3463 void vTaskSetTaskNumber( TaskHandle_t xTask,
3464 const UBaseType_t uxHandle )
3465 {
3466 TCB_t * pxTCB;
3467
3468 if( xTask != NULL )
3469 {
3470 pxTCB = xTask;
3471 pxTCB->uxTaskNumber = uxHandle;
3472 }
3473 }
3474
3475 #endif /* configUSE_TRACE_FACILITY */
3476
3477 /*
3478 * -----------------------------------------------------------
3479 * The Idle task.
3480 * ----------------------------------------------------------
3481 *
3482 * The portTASK_FUNCTION() macro is used to allow port/compiler specific
3483 * language extensions. The equivalent prototype for this function is:
3484 *
3485 * void prvIdleTask( void *pvParameters );
3486 *
3487 */
3488
portTASK_FUNCTION(prvIdleTask,pvParameters)3489 static portTASK_FUNCTION( prvIdleTask, pvParameters )
3490 {
3491 /* Stop warnings. */
3492 ( void ) pvParameters;
3493
3494 /** THIS IS THE RTOS IDLE TASK - WHICH IS CREATED AUTOMATICALLY WHEN THE
3495 * SCHEDULER IS STARTED. **/
3496
3497 /* In case a task that has a secure context deletes itself, in which case
3498 * the idle task is responsible for deleting the task's secure context, if
3499 * any. */
3500 portALLOCATE_SECURE_CONTEXT( configMINIMAL_SECURE_STACK_SIZE );
3501
3502 for( ; ; )
3503 {
3504 /* See if any tasks have deleted themselves - if so then the idle task
3505 * is responsible for freeing the deleted task's TCB and stack. */
3506 prvCheckTasksWaitingTermination();
3507
3508 #if ( configUSE_PREEMPTION == 0 )
3509 {
3510 /* If we are not using preemption we keep forcing a task switch to
3511 * see if any other task has become available. If we are using
3512 * preemption we don't need to do this as any task becoming available
3513 * will automatically get the processor anyway. */
3514 taskYIELD();
3515 }
3516 #endif /* configUSE_PREEMPTION */
3517
3518 #if ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) )
3519 {
3520 /* When using preemption tasks of equal priority will be
3521 * timesliced. If a task that is sharing the idle priority is ready
3522 * to run then the idle task should yield before the end of the
3523 * timeslice.
3524 *
3525 * A critical region is not required here as we are just reading from
3526 * the list, and an occasional incorrect value will not matter. If
3527 * the ready list at the idle priority contains more than one task
3528 * then a task other than the idle task is ready to execute. */
3529 if( listCURRENT_LIST_LENGTH( &( pxReadyTasksLists[ tskIDLE_PRIORITY ] ) ) > ( UBaseType_t ) 1 )
3530 {
3531 taskYIELD();
3532 }
3533 else
3534 {
3535 mtCOVERAGE_TEST_MARKER();
3536 }
3537 }
3538 #endif /* ( ( configUSE_PREEMPTION == 1 ) && ( configIDLE_SHOULD_YIELD == 1 ) ) */
3539
3540 #if ( configUSE_IDLE_HOOK == 1 )
3541 {
3542 /* Call the user defined function from within the idle task. */
3543 vApplicationIdleHook();
3544 }
3545 #endif /* configUSE_IDLE_HOOK */
3546
3547 /* This conditional compilation should use inequality to 0, not equality
3548 * to 1. This is to ensure portSUPPRESS_TICKS_AND_SLEEP() is called when
3549 * user defined low power mode implementations require
3550 * configUSE_TICKLESS_IDLE to be set to a value other than 1. */
3551 #if ( configUSE_TICKLESS_IDLE != 0 )
3552 {
3553 TickType_t xExpectedIdleTime;
3554
3555 /* It is not desirable to suspend then resume the scheduler on
3556 * each iteration of the idle task. Therefore, a preliminary
3557 * test of the expected idle time is performed without the
3558 * scheduler suspended. The result here is not necessarily
3559 * valid. */
3560 xExpectedIdleTime = prvGetExpectedIdleTime();
3561
3562 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
3563 {
3564 vTaskSuspendAll();
3565 {
3566 /* Now the scheduler is suspended, the expected idle
3567 * time can be sampled again, and this time its value can
3568 * be used. */
3569 configASSERT( xNextTaskUnblockTime >= xTickCount );
3570 xExpectedIdleTime = prvGetExpectedIdleTime();
3571
3572 /* Define the following macro to set xExpectedIdleTime to 0
3573 * if the application does not want
3574 * portSUPPRESS_TICKS_AND_SLEEP() to be called. */
3575 configPRE_SUPPRESS_TICKS_AND_SLEEP_PROCESSING( xExpectedIdleTime );
3576
3577 if( xExpectedIdleTime >= configEXPECTED_IDLE_TIME_BEFORE_SLEEP )
3578 {
3579 traceLOW_POWER_IDLE_BEGIN();
3580 portSUPPRESS_TICKS_AND_SLEEP( xExpectedIdleTime );
3581 traceLOW_POWER_IDLE_END();
3582 }
3583 else
3584 {
3585 mtCOVERAGE_TEST_MARKER();
3586 }
3587 }
3588 ( void ) xTaskResumeAll();
3589 }
3590 else
3591 {
3592 mtCOVERAGE_TEST_MARKER();
3593 }
3594 }
3595 #endif /* configUSE_TICKLESS_IDLE */
3596 }
3597 }
3598 /*-----------------------------------------------------------*/
3599
3600 #if ( configUSE_TICKLESS_IDLE != 0 )
3601
eTaskConfirmSleepModeStatus(void)3602 eSleepModeStatus eTaskConfirmSleepModeStatus( void )
3603 {
3604 #if ( INCLUDE_vTaskSuspend == 1 )
3605 /* The idle task exists in addition to the application tasks. */
3606 const UBaseType_t uxNonApplicationTasks = 1;
3607 #endif /* INCLUDE_vTaskSuspend */
3608
3609 eSleepModeStatus eReturn = eStandardSleep;
3610
3611 /* This function must be called from a critical section. */
3612
3613 if( listCURRENT_LIST_LENGTH( &xPendingReadyList ) != 0 )
3614 {
3615 /* A task was made ready while the scheduler was suspended. */
3616 eReturn = eAbortSleep;
3617 }
3618 else if( xYieldPending != pdFALSE )
3619 {
3620 /* A yield was pended while the scheduler was suspended. */
3621 eReturn = eAbortSleep;
3622 }
3623 else if( xPendedTicks != 0 )
3624 {
3625 /* A tick interrupt has already occurred but was held pending
3626 * because the scheduler is suspended. */
3627 eReturn = eAbortSleep;
3628 }
3629
3630 #if ( INCLUDE_vTaskSuspend == 1 )
3631 else if( listCURRENT_LIST_LENGTH( &xSuspendedTaskList ) == ( uxCurrentNumberOfTasks - uxNonApplicationTasks ) )
3632 {
3633 /* If all the tasks are in the suspended list (which might mean they
3634 * have an infinite block time rather than actually being suspended)
3635 * then it is safe to turn all clocks off and just wait for external
3636 * interrupts. */
3637 eReturn = eNoTasksWaitingTimeout;
3638 }
3639 #endif /* INCLUDE_vTaskSuspend */
3640 else
3641 {
3642 mtCOVERAGE_TEST_MARKER();
3643 }
3644
3645 return eReturn;
3646 }
3647
3648 #endif /* configUSE_TICKLESS_IDLE */
3649 /*-----------------------------------------------------------*/
3650
3651 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
3652
vTaskSetThreadLocalStoragePointer(TaskHandle_t xTaskToSet,BaseType_t xIndex,void * pvValue)3653 void vTaskSetThreadLocalStoragePointer( TaskHandle_t xTaskToSet,
3654 BaseType_t xIndex,
3655 void * pvValue )
3656 {
3657 TCB_t * pxTCB;
3658
3659 if( ( xIndex >= 0 ) &&
3660 ( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) )
3661 {
3662 pxTCB = prvGetTCBFromHandle( xTaskToSet );
3663 configASSERT( pxTCB != NULL );
3664 pxTCB->pvThreadLocalStoragePointers[ xIndex ] = pvValue;
3665 }
3666 }
3667
3668 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
3669 /*-----------------------------------------------------------*/
3670
3671 #if ( configNUM_THREAD_LOCAL_STORAGE_POINTERS != 0 )
3672
pvTaskGetThreadLocalStoragePointer(TaskHandle_t xTaskToQuery,BaseType_t xIndex)3673 void * pvTaskGetThreadLocalStoragePointer( TaskHandle_t xTaskToQuery,
3674 BaseType_t xIndex )
3675 {
3676 void * pvReturn = NULL;
3677 TCB_t * pxTCB;
3678
3679 if( ( xIndex >= 0 ) &&
3680 ( xIndex < configNUM_THREAD_LOCAL_STORAGE_POINTERS ) )
3681 {
3682 pxTCB = prvGetTCBFromHandle( xTaskToQuery );
3683 pvReturn = pxTCB->pvThreadLocalStoragePointers[ xIndex ];
3684 }
3685 else
3686 {
3687 pvReturn = NULL;
3688 }
3689
3690 return pvReturn;
3691 }
3692
3693 #endif /* configNUM_THREAD_LOCAL_STORAGE_POINTERS */
3694 /*-----------------------------------------------------------*/
3695
3696 #if ( portUSING_MPU_WRAPPERS == 1 )
3697
vTaskAllocateMPURegions(TaskHandle_t xTaskToModify,const MemoryRegion_t * const xRegions)3698 void vTaskAllocateMPURegions( TaskHandle_t xTaskToModify,
3699 const MemoryRegion_t * const xRegions )
3700 {
3701 TCB_t * pxTCB;
3702
3703 /* If null is passed in here then we are modifying the MPU settings of
3704 * the calling task. */
3705 pxTCB = prvGetTCBFromHandle( xTaskToModify );
3706
3707 vPortStoreTaskMPUSettings( &( pxTCB->xMPUSettings ), xRegions, NULL, 0 );
3708 }
3709
3710 #endif /* portUSING_MPU_WRAPPERS */
3711 /*-----------------------------------------------------------*/
3712
prvInitialiseTaskLists(void)3713 static void prvInitialiseTaskLists( void )
3714 {
3715 UBaseType_t uxPriority;
3716
3717 for( uxPriority = ( UBaseType_t ) 0U; uxPriority < ( UBaseType_t ) configMAX_PRIORITIES; uxPriority++ )
3718 {
3719 vListInitialise( &( pxReadyTasksLists[ uxPriority ] ) );
3720 }
3721
3722 vListInitialise( &xDelayedTaskList1 );
3723 vListInitialise( &xDelayedTaskList2 );
3724 vListInitialise( &xPendingReadyList );
3725
3726 #if ( INCLUDE_vTaskDelete == 1 )
3727 {
3728 vListInitialise( &xTasksWaitingTermination );
3729 }
3730 #endif /* INCLUDE_vTaskDelete */
3731
3732 #if ( INCLUDE_vTaskSuspend == 1 )
3733 {
3734 vListInitialise( &xSuspendedTaskList );
3735 }
3736 #endif /* INCLUDE_vTaskSuspend */
3737
3738 /* Start with pxDelayedTaskList using list1 and the pxOverflowDelayedTaskList
3739 * using list2. */
3740 pxDelayedTaskList = &xDelayedTaskList1;
3741 pxOverflowDelayedTaskList = &xDelayedTaskList2;
3742 }
3743 /*-----------------------------------------------------------*/
3744
prvCheckTasksWaitingTermination(void)3745 static void prvCheckTasksWaitingTermination( void )
3746 {
3747 /** THIS FUNCTION IS CALLED FROM THE RTOS IDLE TASK **/
3748
3749 #if ( INCLUDE_vTaskDelete == 1 )
3750 {
3751 TCB_t * pxTCB;
3752
3753 /* uxDeletedTasksWaitingCleanUp is used to prevent taskENTER_CRITICAL()
3754 * being called too often in the idle task. */
3755 while( uxDeletedTasksWaitingCleanUp > ( UBaseType_t ) 0U )
3756 {
3757 taskENTER_CRITICAL();
3758 {
3759 pxTCB = listGET_OWNER_OF_HEAD_ENTRY( ( &xTasksWaitingTermination ) ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3760 ( void ) uxListRemove( &( pxTCB->xStateListItem ) );
3761 --uxCurrentNumberOfTasks;
3762 --uxDeletedTasksWaitingCleanUp;
3763 }
3764 taskEXIT_CRITICAL();
3765
3766 prvDeleteTCB( pxTCB );
3767 }
3768 }
3769 #endif /* INCLUDE_vTaskDelete */
3770 }
3771 /*-----------------------------------------------------------*/
3772
3773 #if ( configUSE_TRACE_FACILITY == 1 )
3774
vTaskGetInfo(TaskHandle_t xTask,TaskStatus_t * pxTaskStatus,BaseType_t xGetFreeStackSpace,eTaskState eState)3775 void vTaskGetInfo( TaskHandle_t xTask,
3776 TaskStatus_t * pxTaskStatus,
3777 BaseType_t xGetFreeStackSpace,
3778 eTaskState eState )
3779 {
3780 TCB_t * pxTCB;
3781
3782 /* xTask is NULL then get the state of the calling task. */
3783 pxTCB = prvGetTCBFromHandle( xTask );
3784
3785 pxTaskStatus->xHandle = ( TaskHandle_t ) pxTCB;
3786 pxTaskStatus->pcTaskName = ( const char * ) &( pxTCB->pcTaskName[ 0 ] );
3787 pxTaskStatus->uxCurrentPriority = pxTCB->uxPriority;
3788 pxTaskStatus->pxStackBase = pxTCB->pxStack;
3789 #if ( ( portSTACK_GROWTH > 0 ) && ( configRECORD_STACK_HIGH_ADDRESS == 1 ) )
3790 pxTaskStatus->pxTopOfStack = pxTCB->pxTopOfStack;
3791 pxTaskStatus->pxEndOfStack = pxTCB->pxEndOfStack;
3792 #endif
3793 pxTaskStatus->xTaskNumber = pxTCB->uxTCBNumber;
3794
3795 #if ( configUSE_MUTEXES == 1 )
3796 {
3797 pxTaskStatus->uxBasePriority = pxTCB->uxBasePriority;
3798 }
3799 #else
3800 {
3801 pxTaskStatus->uxBasePriority = 0;
3802 }
3803 #endif
3804
3805 #if ( configGENERATE_RUN_TIME_STATS == 1 )
3806 {
3807 pxTaskStatus->ulRunTimeCounter = pxTCB->ulRunTimeCounter;
3808 }
3809 #else
3810 {
3811 pxTaskStatus->ulRunTimeCounter = ( configRUN_TIME_COUNTER_TYPE ) 0;
3812 }
3813 #endif
3814
3815 /* Obtaining the task state is a little fiddly, so is only done if the
3816 * value of eState passed into this function is eInvalid - otherwise the
3817 * state is just set to whatever is passed in. */
3818 if( eState != eInvalid )
3819 {
3820 if( pxTCB == pxCurrentTCB )
3821 {
3822 pxTaskStatus->eCurrentState = eRunning;
3823 }
3824 else
3825 {
3826 pxTaskStatus->eCurrentState = eState;
3827
3828 #if ( INCLUDE_vTaskSuspend == 1 )
3829 {
3830 /* If the task is in the suspended list then there is a
3831 * chance it is actually just blocked indefinitely - so really
3832 * it should be reported as being in the Blocked state. */
3833 if( eState == eSuspended )
3834 {
3835 vTaskSuspendAll();
3836 {
3837 if( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) != NULL )
3838 {
3839 pxTaskStatus->eCurrentState = eBlocked;
3840 }
3841 }
3842 ( void ) xTaskResumeAll();
3843 }
3844 }
3845 #endif /* INCLUDE_vTaskSuspend */
3846
3847 /* Tasks can be in pending ready list and other state list at the
3848 * same time. These tasks are in ready state no matter what state
3849 * list the task is in. */
3850 taskENTER_CRITICAL();
3851 {
3852 if( listIS_CONTAINED_WITHIN( &xPendingReadyList, &( pxTCB->xEventListItem ) ) != pdFALSE )
3853 {
3854 pxTaskStatus->eCurrentState = eReady;
3855 }
3856 }
3857 taskEXIT_CRITICAL();
3858 }
3859 }
3860 else
3861 {
3862 pxTaskStatus->eCurrentState = eTaskGetState( pxTCB );
3863 }
3864
3865 /* Obtaining the stack space takes some time, so the xGetFreeStackSpace
3866 * parameter is provided to allow it to be skipped. */
3867 if( xGetFreeStackSpace != pdFALSE )
3868 {
3869 #if ( portSTACK_GROWTH > 0 )
3870 {
3871 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxEndOfStack );
3872 }
3873 #else
3874 {
3875 pxTaskStatus->usStackHighWaterMark = prvTaskCheckFreeStackSpace( ( uint8_t * ) pxTCB->pxStack );
3876 }
3877 #endif
3878 }
3879 else
3880 {
3881 pxTaskStatus->usStackHighWaterMark = 0;
3882 }
3883 }
3884
3885 #endif /* configUSE_TRACE_FACILITY */
3886 /*-----------------------------------------------------------*/
3887
3888 #if ( configUSE_TRACE_FACILITY == 1 )
3889
prvListTasksWithinSingleList(TaskStatus_t * pxTaskStatusArray,List_t * pxList,eTaskState eState)3890 static UBaseType_t prvListTasksWithinSingleList( TaskStatus_t * pxTaskStatusArray,
3891 List_t * pxList,
3892 eTaskState eState )
3893 {
3894 configLIST_VOLATILE TCB_t * pxNextTCB;
3895 configLIST_VOLATILE TCB_t * pxFirstTCB;
3896 UBaseType_t uxTask = 0;
3897
3898 if( listCURRENT_LIST_LENGTH( pxList ) > ( UBaseType_t ) 0 )
3899 {
3900 listGET_OWNER_OF_NEXT_ENTRY( pxFirstTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3901
3902 /* Populate an TaskStatus_t structure within the
3903 * pxTaskStatusArray array for each task that is referenced from
3904 * pxList. See the definition of TaskStatus_t in task.h for the
3905 * meaning of each TaskStatus_t structure member. */
3906 do
3907 {
3908 listGET_OWNER_OF_NEXT_ENTRY( pxNextTCB, pxList ); /*lint !e9079 void * is used as this macro is used with timers and co-routines too. Alignment is known to be fine as the type of the pointer stored and retrieved is the same. */
3909 vTaskGetInfo( ( TaskHandle_t ) pxNextTCB, &( pxTaskStatusArray[ uxTask ] ), pdTRUE, eState );
3910 uxTask++;
3911 } while( pxNextTCB != pxFirstTCB );
3912 }
3913 else
3914 {
3915 mtCOVERAGE_TEST_MARKER();
3916 }
3917
3918 return uxTask;
3919 }
3920
3921 #endif /* configUSE_TRACE_FACILITY */
3922 /*-----------------------------------------------------------*/
3923
3924 #if ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) )
3925
prvTaskCheckFreeStackSpace(const uint8_t * pucStackByte)3926 static configSTACK_DEPTH_TYPE prvTaskCheckFreeStackSpace( const uint8_t * pucStackByte )
3927 {
3928 uint32_t ulCount = 0U;
3929
3930 while( *pucStackByte == ( uint8_t ) tskSTACK_FILL_BYTE )
3931 {
3932 pucStackByte -= portSTACK_GROWTH;
3933 ulCount++;
3934 }
3935
3936 ulCount /= ( uint32_t ) sizeof( StackType_t ); /*lint !e961 Casting is not redundant on smaller architectures. */
3937
3938 return ( configSTACK_DEPTH_TYPE ) ulCount;
3939 }
3940
3941 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark == 1 ) || ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 ) ) */
3942 /*-----------------------------------------------------------*/
3943
3944 #if ( INCLUDE_uxTaskGetStackHighWaterMark2 == 1 )
3945
3946 /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are the
3947 * same except for their return type. Using configSTACK_DEPTH_TYPE allows the
3948 * user to determine the return type. It gets around the problem of the value
3949 * overflowing on 8-bit types without breaking backward compatibility for
3950 * applications that expect an 8-bit return type. */
uxTaskGetStackHighWaterMark2(TaskHandle_t xTask)3951 configSTACK_DEPTH_TYPE uxTaskGetStackHighWaterMark2( TaskHandle_t xTask )
3952 {
3953 TCB_t * pxTCB;
3954 uint8_t * pucEndOfStack;
3955 configSTACK_DEPTH_TYPE uxReturn;
3956
3957 /* uxTaskGetStackHighWaterMark() and uxTaskGetStackHighWaterMark2() are
3958 * the same except for their return type. Using configSTACK_DEPTH_TYPE
3959 * allows the user to determine the return type. It gets around the
3960 * problem of the value overflowing on 8-bit types without breaking
3961 * backward compatibility for applications that expect an 8-bit return
3962 * type. */
3963
3964 pxTCB = prvGetTCBFromHandle( xTask );
3965
3966 #if portSTACK_GROWTH < 0
3967 {
3968 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
3969 }
3970 #else
3971 {
3972 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
3973 }
3974 #endif
3975
3976 uxReturn = prvTaskCheckFreeStackSpace( pucEndOfStack );
3977
3978 return uxReturn;
3979 }
3980
3981 #endif /* INCLUDE_uxTaskGetStackHighWaterMark2 */
3982 /*-----------------------------------------------------------*/
3983
3984 #if ( INCLUDE_uxTaskGetStackHighWaterMark == 1 )
3985
uxTaskGetStackHighWaterMark(TaskHandle_t xTask)3986 UBaseType_t uxTaskGetStackHighWaterMark( TaskHandle_t xTask )
3987 {
3988 TCB_t * pxTCB;
3989 uint8_t * pucEndOfStack;
3990 UBaseType_t uxReturn;
3991
3992 pxTCB = prvGetTCBFromHandle( xTask );
3993
3994 #if portSTACK_GROWTH < 0
3995 {
3996 pucEndOfStack = ( uint8_t * ) pxTCB->pxStack;
3997 }
3998 #else
3999 {
4000 pucEndOfStack = ( uint8_t * ) pxTCB->pxEndOfStack;
4001 }
4002 #endif
4003
4004 uxReturn = ( UBaseType_t ) prvTaskCheckFreeStackSpace( pucEndOfStack );
4005
4006 return uxReturn;
4007 }
4008
4009 #endif /* INCLUDE_uxTaskGetStackHighWaterMark */
4010 /*-----------------------------------------------------------*/
4011
4012 #if ( INCLUDE_vTaskDelete == 1 )
4013
prvDeleteTCB(TCB_t * pxTCB)4014 static void prvDeleteTCB( TCB_t * pxTCB )
4015 {
4016 /* This call is required specifically for the TriCore port. It must be
4017 * above the vPortFree() calls. The call is also used by ports/demos that
4018 * want to allocate and clean RAM statically. */
4019 portCLEAN_UP_TCB( pxTCB );
4020
4021 #if ( configUSE_C_RUNTIME_TLS_SUPPORT == 1 )
4022 {
4023 /* Free up the memory allocated for the task's TLS Block. */
4024 configDEINIT_TLS_BLOCK( pxCurrentTCB->xTLSBlock );
4025 }
4026 #endif
4027
4028 #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) && ( portUSING_MPU_WRAPPERS == 0 ) )
4029 {
4030 /* The task can only have been allocated dynamically - free both
4031 * the stack and TCB. */
4032 vPortFreeStack( pxTCB->pxStack );
4033 vPortFree( pxTCB );
4034 }
4035 #elif ( tskSTATIC_AND_DYNAMIC_ALLOCATION_POSSIBLE != 0 ) /*lint !e731 !e9029 Macro has been consolidated for readability reasons. */
4036 {
4037 /* The task could have been allocated statically or dynamically, so
4038 * check what was statically allocated before trying to free the
4039 * memory. */
4040 if( pxTCB->ucStaticallyAllocated == tskDYNAMICALLY_ALLOCATED_STACK_AND_TCB )
4041 {
4042 /* Both the stack and TCB were allocated dynamically, so both
4043 * must be freed. */
4044 vPortFreeStack( pxTCB->pxStack );
4045 vPortFree( pxTCB );
4046 }
4047 else if( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_ONLY )
4048 {
4049 /* Only the stack was statically allocated, so the TCB is the
4050 * only memory that must be freed. */
4051 vPortFree( pxTCB );
4052 }
4053 else
4054 {
4055 /* Neither the stack nor the TCB were allocated dynamically, so
4056 * nothing needs to be freed. */
4057 configASSERT( pxTCB->ucStaticallyAllocated == tskSTATICALLY_ALLOCATED_STACK_AND_TCB );
4058 mtCOVERAGE_TEST_MARKER();
4059 }
4060 }
4061 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
4062 }
4063
4064 #endif /* INCLUDE_vTaskDelete */
4065 /*-----------------------------------------------------------*/
4066
prvResetNextTaskUnblockTime(void)4067 static void prvResetNextTaskUnblockTime( void )
4068 {
4069 if( listLIST_IS_EMPTY( pxDelayedTaskList ) != pdFALSE )
4070 {
4071 /* The new current delayed list is empty. Set xNextTaskUnblockTime to
4072 * the maximum possible value so it is extremely unlikely that the
4073 * if( xTickCount >= xNextTaskUnblockTime ) test will pass until
4074 * there is an item in the delayed list. */
4075 xNextTaskUnblockTime = portMAX_DELAY;
4076 }
4077 else
4078 {
4079 /* The new current delayed list is not empty, get the value of
4080 * the item at the head of the delayed list. This is the time at
4081 * which the task at the head of the delayed list should be removed
4082 * from the Blocked state. */
4083 xNextTaskUnblockTime = listGET_ITEM_VALUE_OF_HEAD_ENTRY( pxDelayedTaskList );
4084 }
4085 }
4086 /*-----------------------------------------------------------*/
4087
4088 #if ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) )
4089
xTaskGetCurrentTaskHandle(void)4090 TaskHandle_t xTaskGetCurrentTaskHandle( void )
4091 {
4092 TaskHandle_t xReturn;
4093
4094 /* A critical section is not required as this is not called from
4095 * an interrupt and the current TCB will always be the same for any
4096 * individual execution thread. */
4097 xReturn = pxCurrentTCB;
4098
4099 return xReturn;
4100 }
4101
4102 #endif /* ( ( INCLUDE_xTaskGetCurrentTaskHandle == 1 ) || ( configUSE_MUTEXES == 1 ) ) */
4103 /*-----------------------------------------------------------*/
4104
4105 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
4106
xTaskGetSchedulerState(void)4107 BaseType_t xTaskGetSchedulerState( void )
4108 {
4109 BaseType_t xReturn;
4110
4111 if( xSchedulerRunning == pdFALSE )
4112 {
4113 xReturn = taskSCHEDULER_NOT_STARTED;
4114 }
4115 else
4116 {
4117 if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
4118 {
4119 xReturn = taskSCHEDULER_RUNNING;
4120 }
4121 else
4122 {
4123 xReturn = taskSCHEDULER_SUSPENDED;
4124 }
4125 }
4126
4127 return xReturn;
4128 }
4129
4130 #endif /* ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) ) */
4131 /*-----------------------------------------------------------*/
4132
4133 #if ( configUSE_MUTEXES == 1 )
4134
xTaskPriorityInherit(TaskHandle_t const pxMutexHolder)4135 BaseType_t xTaskPriorityInherit( TaskHandle_t const pxMutexHolder )
4136 {
4137 TCB_t * const pxMutexHolderTCB = pxMutexHolder;
4138 BaseType_t xReturn = pdFALSE;
4139
4140 /* If the mutex was given back by an interrupt while the queue was
4141 * locked then the mutex holder might now be NULL. _RB_ Is this still
4142 * needed as interrupts can no longer use mutexes? */
4143 if( pxMutexHolder != NULL )
4144 {
4145 /* If the holder of the mutex has a priority below the priority of
4146 * the task attempting to obtain the mutex then it will temporarily
4147 * inherit the priority of the task attempting to obtain the mutex. */
4148 if( pxMutexHolderTCB->uxPriority < pxCurrentTCB->uxPriority )
4149 {
4150 /* Adjust the mutex holder state to account for its new
4151 * priority. Only reset the event list item value if the value is
4152 * not being used for anything else. */
4153 if( ( listGET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
4154 {
4155 listSET_LIST_ITEM_VALUE( &( pxMutexHolderTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4156 }
4157 else
4158 {
4159 mtCOVERAGE_TEST_MARKER();
4160 }
4161
4162 /* If the task being modified is in the ready state it will need
4163 * to be moved into a new list. */
4164 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ pxMutexHolderTCB->uxPriority ] ), &( pxMutexHolderTCB->xStateListItem ) ) != pdFALSE )
4165 {
4166 if( uxListRemove( &( pxMutexHolderTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
4167 {
4168 /* It is known that the task is in its ready list so
4169 * there is no need to check again and the port level
4170 * reset macro can be called directly. */
4171 portRESET_READY_PRIORITY( pxMutexHolderTCB->uxPriority, uxTopReadyPriority );
4172 }
4173 else
4174 {
4175 mtCOVERAGE_TEST_MARKER();
4176 }
4177
4178 /* Inherit the priority before being moved into the new list. */
4179 pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
4180 prvAddTaskToReadyList( pxMutexHolderTCB );
4181 }
4182 else
4183 {
4184 /* Just inherit the priority. */
4185 pxMutexHolderTCB->uxPriority = pxCurrentTCB->uxPriority;
4186 }
4187
4188 traceTASK_PRIORITY_INHERIT( pxMutexHolderTCB, pxCurrentTCB->uxPriority );
4189
4190 /* Inheritance occurred. */
4191 xReturn = pdTRUE;
4192 }
4193 else
4194 {
4195 if( pxMutexHolderTCB->uxBasePriority < pxCurrentTCB->uxPriority )
4196 {
4197 /* The base priority of the mutex holder is lower than the
4198 * priority of the task attempting to take the mutex, but the
4199 * current priority of the mutex holder is not lower than the
4200 * priority of the task attempting to take the mutex.
4201 * Therefore the mutex holder must have already inherited a
4202 * priority, but inheritance would have occurred if that had
4203 * not been the case. */
4204 xReturn = pdTRUE;
4205 }
4206 else
4207 {
4208 mtCOVERAGE_TEST_MARKER();
4209 }
4210 }
4211 }
4212 else
4213 {
4214 mtCOVERAGE_TEST_MARKER();
4215 }
4216
4217 return xReturn;
4218 }
4219
4220 #endif /* configUSE_MUTEXES */
4221 /*-----------------------------------------------------------*/
4222
4223 #if ( configUSE_MUTEXES == 1 )
4224
xTaskPriorityDisinherit(TaskHandle_t const pxMutexHolder)4225 BaseType_t xTaskPriorityDisinherit( TaskHandle_t const pxMutexHolder )
4226 {
4227 TCB_t * const pxTCB = pxMutexHolder;
4228 BaseType_t xReturn = pdFALSE;
4229
4230 if( pxMutexHolder != NULL )
4231 {
4232 /* A task can only have an inherited priority if it holds the mutex.
4233 * If the mutex is held by a task then it cannot be given from an
4234 * interrupt, and if a mutex is given by the holding task then it must
4235 * be the running state task. */
4236 configASSERT( pxTCB == pxCurrentTCB );
4237 configASSERT( pxTCB->uxMutexesHeld );
4238 ( pxTCB->uxMutexesHeld )--;
4239
4240 /* Has the holder of the mutex inherited the priority of another
4241 * task? */
4242 if( pxTCB->uxPriority != pxTCB->uxBasePriority )
4243 {
4244 /* Only disinherit if no other mutexes are held. */
4245 if( pxTCB->uxMutexesHeld == ( UBaseType_t ) 0 )
4246 {
4247 /* A task can only have an inherited priority if it holds
4248 * the mutex. If the mutex is held by a task then it cannot be
4249 * given from an interrupt, and if a mutex is given by the
4250 * holding task then it must be the running state task. Remove
4251 * the holding task from the ready list. */
4252 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
4253 {
4254 portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority );
4255 }
4256 else
4257 {
4258 mtCOVERAGE_TEST_MARKER();
4259 }
4260
4261 /* Disinherit the priority before adding the task into the
4262 * new ready list. */
4263 traceTASK_PRIORITY_DISINHERIT( pxTCB, pxTCB->uxBasePriority );
4264 pxTCB->uxPriority = pxTCB->uxBasePriority;
4265
4266 /* Reset the event list item value. It cannot be in use for
4267 * any other purpose if this task is running, and it must be
4268 * running to give back the mutex. */
4269 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxTCB->uxPriority ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4270 prvAddTaskToReadyList( pxTCB );
4271
4272 /* Return true to indicate that a context switch is required.
4273 * This is only actually required in the corner case whereby
4274 * multiple mutexes were held and the mutexes were given back
4275 * in an order different to that in which they were taken.
4276 * If a context switch did not occur when the first mutex was
4277 * returned, even if a task was waiting on it, then a context
4278 * switch should occur when the last mutex is returned whether
4279 * a task is waiting on it or not. */
4280 xReturn = pdTRUE;
4281 }
4282 else
4283 {
4284 mtCOVERAGE_TEST_MARKER();
4285 }
4286 }
4287 else
4288 {
4289 mtCOVERAGE_TEST_MARKER();
4290 }
4291 }
4292 else
4293 {
4294 mtCOVERAGE_TEST_MARKER();
4295 }
4296
4297 return xReturn;
4298 }
4299
4300 #endif /* configUSE_MUTEXES */
4301 /*-----------------------------------------------------------*/
4302
4303 #if ( configUSE_MUTEXES == 1 )
4304
vTaskPriorityDisinheritAfterTimeout(TaskHandle_t const pxMutexHolder,UBaseType_t uxHighestPriorityWaitingTask)4305 void vTaskPriorityDisinheritAfterTimeout( TaskHandle_t const pxMutexHolder,
4306 UBaseType_t uxHighestPriorityWaitingTask )
4307 {
4308 TCB_t * const pxTCB = pxMutexHolder;
4309 UBaseType_t uxPriorityUsedOnEntry, uxPriorityToUse;
4310 const UBaseType_t uxOnlyOneMutexHeld = ( UBaseType_t ) 1;
4311
4312 if( pxMutexHolder != NULL )
4313 {
4314 /* If pxMutexHolder is not NULL then the holder must hold at least
4315 * one mutex. */
4316 configASSERT( pxTCB->uxMutexesHeld );
4317
4318 /* Determine the priority to which the priority of the task that
4319 * holds the mutex should be set. This will be the greater of the
4320 * holding task's base priority and the priority of the highest
4321 * priority task that is waiting to obtain the mutex. */
4322 if( pxTCB->uxBasePriority < uxHighestPriorityWaitingTask )
4323 {
4324 uxPriorityToUse = uxHighestPriorityWaitingTask;
4325 }
4326 else
4327 {
4328 uxPriorityToUse = pxTCB->uxBasePriority;
4329 }
4330
4331 /* Does the priority need to change? */
4332 if( pxTCB->uxPriority != uxPriorityToUse )
4333 {
4334 /* Only disinherit if no other mutexes are held. This is a
4335 * simplification in the priority inheritance implementation. If
4336 * the task that holds the mutex is also holding other mutexes then
4337 * the other mutexes may have caused the priority inheritance. */
4338 if( pxTCB->uxMutexesHeld == uxOnlyOneMutexHeld )
4339 {
4340 /* If a task has timed out because it already holds the
4341 * mutex it was trying to obtain then it cannot of inherited
4342 * its own priority. */
4343 configASSERT( pxTCB != pxCurrentTCB );
4344
4345 /* Disinherit the priority, remembering the previous
4346 * priority to facilitate determining the subject task's
4347 * state. */
4348 traceTASK_PRIORITY_DISINHERIT( pxTCB, uxPriorityToUse );
4349 uxPriorityUsedOnEntry = pxTCB->uxPriority;
4350 pxTCB->uxPriority = uxPriorityToUse;
4351
4352 /* Only reset the event list item value if the value is not
4353 * being used for anything else. */
4354 if( ( listGET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ) ) & taskEVENT_LIST_ITEM_VALUE_IN_USE ) == 0UL )
4355 {
4356 listSET_LIST_ITEM_VALUE( &( pxTCB->xEventListItem ), ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) uxPriorityToUse ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4357 }
4358 else
4359 {
4360 mtCOVERAGE_TEST_MARKER();
4361 }
4362
4363 /* If the running task is not the task that holds the mutex
4364 * then the task that holds the mutex could be in either the
4365 * Ready, Blocked or Suspended states. Only remove the task
4366 * from its current state list if it is in the Ready state as
4367 * the task's priority is going to change and there is one
4368 * Ready list per priority. */
4369 if( listIS_CONTAINED_WITHIN( &( pxReadyTasksLists[ uxPriorityUsedOnEntry ] ), &( pxTCB->xStateListItem ) ) != pdFALSE )
4370 {
4371 if( uxListRemove( &( pxTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
4372 {
4373 /* It is known that the task is in its ready list so
4374 * there is no need to check again and the port level
4375 * reset macro can be called directly. */
4376 portRESET_READY_PRIORITY( pxTCB->uxPriority, uxTopReadyPriority );
4377 }
4378 else
4379 {
4380 mtCOVERAGE_TEST_MARKER();
4381 }
4382
4383 prvAddTaskToReadyList( pxTCB );
4384 }
4385 else
4386 {
4387 mtCOVERAGE_TEST_MARKER();
4388 }
4389 }
4390 else
4391 {
4392 mtCOVERAGE_TEST_MARKER();
4393 }
4394 }
4395 else
4396 {
4397 mtCOVERAGE_TEST_MARKER();
4398 }
4399 }
4400 else
4401 {
4402 mtCOVERAGE_TEST_MARKER();
4403 }
4404 }
4405
4406 #endif /* configUSE_MUTEXES */
4407 /*-----------------------------------------------------------*/
4408
4409 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
4410
vTaskEnterCritical(void)4411 void vTaskEnterCritical( void )
4412 {
4413 portDISABLE_INTERRUPTS();
4414
4415 if( xSchedulerRunning != pdFALSE )
4416 {
4417 ( pxCurrentTCB->uxCriticalNesting )++;
4418
4419 /* This is not the interrupt safe version of the enter critical
4420 * function so assert() if it is being called from an interrupt
4421 * context. Only API functions that end in "FromISR" can be used in an
4422 * interrupt. Only assert if the critical nesting count is 1 to
4423 * protect against recursive calls if the assert function also uses a
4424 * critical section. */
4425 if( pxCurrentTCB->uxCriticalNesting == 1 )
4426 {
4427 portASSERT_IF_IN_ISR();
4428 }
4429 }
4430 else
4431 {
4432 mtCOVERAGE_TEST_MARKER();
4433 }
4434 }
4435
4436 #endif /* portCRITICAL_NESTING_IN_TCB */
4437 /*-----------------------------------------------------------*/
4438
4439 #if ( portCRITICAL_NESTING_IN_TCB == 1 )
4440
vTaskExitCritical(void)4441 void vTaskExitCritical( void )
4442 {
4443 if( xSchedulerRunning != pdFALSE )
4444 {
4445 if( pxCurrentTCB->uxCriticalNesting > 0U )
4446 {
4447 ( pxCurrentTCB->uxCriticalNesting )--;
4448
4449 if( pxCurrentTCB->uxCriticalNesting == 0U )
4450 {
4451 portENABLE_INTERRUPTS();
4452 }
4453 else
4454 {
4455 mtCOVERAGE_TEST_MARKER();
4456 }
4457 }
4458 else
4459 {
4460 mtCOVERAGE_TEST_MARKER();
4461 }
4462 }
4463 else
4464 {
4465 mtCOVERAGE_TEST_MARKER();
4466 }
4467 }
4468
4469 #endif /* portCRITICAL_NESTING_IN_TCB */
4470 /*-----------------------------------------------------------*/
4471
4472 #if ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 )
4473
prvWriteNameToBuffer(char * pcBuffer,const char * pcTaskName)4474 static char * prvWriteNameToBuffer( char * pcBuffer,
4475 const char * pcTaskName )
4476 {
4477 size_t x;
4478
4479 /* Start by copying the entire string. */
4480 strcpy( pcBuffer, pcTaskName );
4481
4482 /* Pad the end of the string with spaces to ensure columns line up when
4483 * printed out. */
4484 for( x = strlen( pcBuffer ); x < ( size_t ) ( configMAX_TASK_NAME_LEN - 1 ); x++ )
4485 {
4486 pcBuffer[ x ] = ' ';
4487 }
4488
4489 /* Terminate. */
4490 pcBuffer[ x ] = ( char ) 0x00;
4491
4492 /* Return the new end of string. */
4493 return &( pcBuffer[ x ] );
4494 }
4495
4496 #endif /* ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) */
4497 /*-----------------------------------------------------------*/
4498
4499 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) )
4500
vTaskList(char * pcWriteBuffer)4501 void vTaskList( char * pcWriteBuffer )
4502 {
4503 TaskStatus_t * pxTaskStatusArray;
4504 UBaseType_t uxArraySize, x;
4505 char cStatus;
4506
4507 /*
4508 * PLEASE NOTE:
4509 *
4510 * This function is provided for convenience only, and is used by many
4511 * of the demo applications. Do not consider it to be part of the
4512 * scheduler.
4513 *
4514 * vTaskList() calls uxTaskGetSystemState(), then formats part of the
4515 * uxTaskGetSystemState() output into a human readable table that
4516 * displays task: names, states, priority, stack usage and task number.
4517 * Stack usage specified as the number of unused StackType_t words stack can hold
4518 * on top of stack - not the number of bytes.
4519 *
4520 * vTaskList() has a dependency on the sprintf() C library function that
4521 * might bloat the code size, use a lot of stack, and provide different
4522 * results on different platforms. An alternative, tiny, third party,
4523 * and limited functionality implementation of sprintf() is provided in
4524 * many of the FreeRTOS/Demo sub-directories in a file called
4525 * printf-stdarg.c (note printf-stdarg.c does not provide a full
4526 * snprintf() implementation!).
4527 *
4528 * It is recommended that production systems call uxTaskGetSystemState()
4529 * directly to get access to raw stats data, rather than indirectly
4530 * through a call to vTaskList().
4531 */
4532
4533
4534 /* Make sure the write buffer does not contain a string. */
4535 *pcWriteBuffer = ( char ) 0x00;
4536
4537 /* Take a snapshot of the number of tasks in case it changes while this
4538 * function is executing. */
4539 uxArraySize = uxCurrentNumberOfTasks;
4540
4541 /* Allocate an array index for each task. NOTE! if
4542 * configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
4543 * equate to NULL. */
4544 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */
4545
4546 if( pxTaskStatusArray != NULL )
4547 {
4548 /* Generate the (binary) data. */
4549 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, NULL );
4550
4551 /* Create a human readable table from the binary data. */
4552 for( x = 0; x < uxArraySize; x++ )
4553 {
4554 switch( pxTaskStatusArray[ x ].eCurrentState )
4555 {
4556 case eRunning:
4557 cStatus = tskRUNNING_CHAR;
4558 break;
4559
4560 case eReady:
4561 cStatus = tskREADY_CHAR;
4562 break;
4563
4564 case eBlocked:
4565 cStatus = tskBLOCKED_CHAR;
4566 break;
4567
4568 case eSuspended:
4569 cStatus = tskSUSPENDED_CHAR;
4570 break;
4571
4572 case eDeleted:
4573 cStatus = tskDELETED_CHAR;
4574 break;
4575
4576 case eInvalid: /* Fall through. */
4577 default: /* Should not get here, but it is included
4578 * to prevent static checking errors. */
4579 cStatus = ( char ) 0x00;
4580 break;
4581 }
4582
4583 /* Write the task name to the string, padding with spaces so it
4584 * can be printed in tabular form more easily. */
4585 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
4586
4587 /* Write the rest of the string. */
4588 sprintf( pcWriteBuffer, "\t%c\t%u\t%u\t%u\r\n", cStatus, ( unsigned int ) pxTaskStatusArray[ x ].uxCurrentPriority, ( unsigned int ) pxTaskStatusArray[ x ].usStackHighWaterMark, ( unsigned int ) pxTaskStatusArray[ x ].xTaskNumber ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
4589 pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */
4590 }
4591
4592 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
4593 * is 0 then vPortFree() will be #defined to nothing. */
4594 vPortFree( pxTaskStatusArray );
4595 }
4596 else
4597 {
4598 mtCOVERAGE_TEST_MARKER();
4599 }
4600 }
4601
4602 #endif /* ( ( configUSE_TRACE_FACILITY == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
4603 /*----------------------------------------------------------*/
4604
4605 #if ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) && ( configUSE_TRACE_FACILITY == 1 ) )
4606
vTaskGetRunTimeStats(char * pcWriteBuffer)4607 void vTaskGetRunTimeStats( char * pcWriteBuffer )
4608 {
4609 TaskStatus_t * pxTaskStatusArray;
4610 UBaseType_t uxArraySize, x;
4611 configRUN_TIME_COUNTER_TYPE ulTotalTime, ulStatsAsPercentage;
4612
4613 /*
4614 * PLEASE NOTE:
4615 *
4616 * This function is provided for convenience only, and is used by many
4617 * of the demo applications. Do not consider it to be part of the
4618 * scheduler.
4619 *
4620 * vTaskGetRunTimeStats() calls uxTaskGetSystemState(), then formats part
4621 * of the uxTaskGetSystemState() output into a human readable table that
4622 * displays the amount of time each task has spent in the Running state
4623 * in both absolute and percentage terms.
4624 *
4625 * vTaskGetRunTimeStats() has a dependency on the sprintf() C library
4626 * function that might bloat the code size, use a lot of stack, and
4627 * provide different results on different platforms. An alternative,
4628 * tiny, third party, and limited functionality implementation of
4629 * sprintf() is provided in many of the FreeRTOS/Demo sub-directories in
4630 * a file called printf-stdarg.c (note printf-stdarg.c does not provide
4631 * a full snprintf() implementation!).
4632 *
4633 * It is recommended that production systems call uxTaskGetSystemState()
4634 * directly to get access to raw stats data, rather than indirectly
4635 * through a call to vTaskGetRunTimeStats().
4636 */
4637
4638 /* Make sure the write buffer does not contain a string. */
4639 *pcWriteBuffer = ( char ) 0x00;
4640
4641 /* Take a snapshot of the number of tasks in case it changes while this
4642 * function is executing. */
4643 uxArraySize = uxCurrentNumberOfTasks;
4644
4645 /* Allocate an array index for each task. NOTE! If
4646 * configSUPPORT_DYNAMIC_ALLOCATION is set to 0 then pvPortMalloc() will
4647 * equate to NULL. */
4648 pxTaskStatusArray = pvPortMalloc( uxCurrentNumberOfTasks * sizeof( TaskStatus_t ) ); /*lint !e9079 All values returned by pvPortMalloc() have at least the alignment required by the MCU's stack and this allocation allocates a struct that has the alignment requirements of a pointer. */
4649
4650 if( pxTaskStatusArray != NULL )
4651 {
4652 /* Generate the (binary) data. */
4653 uxArraySize = uxTaskGetSystemState( pxTaskStatusArray, uxArraySize, &ulTotalTime );
4654
4655 /* For percentage calculations. */
4656 ulTotalTime /= 100UL;
4657
4658 /* Avoid divide by zero errors. */
4659 if( ulTotalTime > 0UL )
4660 {
4661 /* Create a human readable table from the binary data. */
4662 for( x = 0; x < uxArraySize; x++ )
4663 {
4664 /* What percentage of the total run time has the task used?
4665 * This will always be rounded down to the nearest integer.
4666 * ulTotalRunTime has already been divided by 100. */
4667 ulStatsAsPercentage = pxTaskStatusArray[ x ].ulRunTimeCounter / ulTotalTime;
4668
4669 /* Write the task name to the string, padding with
4670 * spaces so it can be printed in tabular form more
4671 * easily. */
4672 pcWriteBuffer = prvWriteNameToBuffer( pcWriteBuffer, pxTaskStatusArray[ x ].pcTaskName );
4673
4674 if( ulStatsAsPercentage > 0UL )
4675 {
4676 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
4677 {
4678 sprintf( pcWriteBuffer, "\t%lu\t\t%lu%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter, ulStatsAsPercentage );
4679 }
4680 #else
4681 {
4682 /* sizeof( int ) == sizeof( long ) so a smaller
4683 * printf() library can be used. */
4684 sprintf( pcWriteBuffer, "\t%u\t\t%u%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter, ( unsigned int ) ulStatsAsPercentage ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
4685 }
4686 #endif
4687 }
4688 else
4689 {
4690 /* If the percentage is zero here then the task has
4691 * consumed less than 1% of the total run time. */
4692 #ifdef portLU_PRINTF_SPECIFIER_REQUIRED
4693 {
4694 sprintf( pcWriteBuffer, "\t%lu\t\t<1%%\r\n", pxTaskStatusArray[ x ].ulRunTimeCounter );
4695 }
4696 #else
4697 {
4698 /* sizeof( int ) == sizeof( long ) so a smaller
4699 * printf() library can be used. */
4700 sprintf( pcWriteBuffer, "\t%u\t\t<1%%\r\n", ( unsigned int ) pxTaskStatusArray[ x ].ulRunTimeCounter ); /*lint !e586 sprintf() allowed as this is compiled with many compilers and this is a utility function only - not part of the core kernel implementation. */
4701 }
4702 #endif
4703 }
4704
4705 pcWriteBuffer += strlen( pcWriteBuffer ); /*lint !e9016 Pointer arithmetic ok on char pointers especially as in this case where it best denotes the intent of the code. */
4706 }
4707 }
4708 else
4709 {
4710 mtCOVERAGE_TEST_MARKER();
4711 }
4712
4713 /* Free the array again. NOTE! If configSUPPORT_DYNAMIC_ALLOCATION
4714 * is 0 then vPortFree() will be #defined to nothing. */
4715 vPortFree( pxTaskStatusArray );
4716 }
4717 else
4718 {
4719 mtCOVERAGE_TEST_MARKER();
4720 }
4721 }
4722
4723 #endif /* ( ( configGENERATE_RUN_TIME_STATS == 1 ) && ( configUSE_STATS_FORMATTING_FUNCTIONS > 0 ) ) */
4724 /*-----------------------------------------------------------*/
4725
uxTaskResetEventItemValue(void)4726 TickType_t uxTaskResetEventItemValue( void )
4727 {
4728 TickType_t uxReturn;
4729
4730 uxReturn = listGET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ) );
4731
4732 /* Reset the event list item to its normal value - so it can be used with
4733 * queues and semaphores. */
4734 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xEventListItem ), ( ( TickType_t ) configMAX_PRIORITIES - ( TickType_t ) pxCurrentTCB->uxPriority ) ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
4735
4736 return uxReturn;
4737 }
4738 /*-----------------------------------------------------------*/
4739
4740 #if ( configUSE_MUTEXES == 1 )
4741
pvTaskIncrementMutexHeldCount(void)4742 TaskHandle_t pvTaskIncrementMutexHeldCount( void )
4743 {
4744 /* If xSemaphoreCreateMutex() is called before any tasks have been created
4745 * then pxCurrentTCB will be NULL. */
4746 if( pxCurrentTCB != NULL )
4747 {
4748 ( pxCurrentTCB->uxMutexesHeld )++;
4749 }
4750
4751 return pxCurrentTCB;
4752 }
4753
4754 #endif /* configUSE_MUTEXES */
4755 /*-----------------------------------------------------------*/
4756
4757 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
4758
ulTaskGenericNotifyTake(UBaseType_t uxIndexToWait,BaseType_t xClearCountOnExit,TickType_t xTicksToWait)4759 uint32_t ulTaskGenericNotifyTake( UBaseType_t uxIndexToWait,
4760 BaseType_t xClearCountOnExit,
4761 TickType_t xTicksToWait )
4762 {
4763 uint32_t ulReturn;
4764
4765 configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES );
4766
4767 taskENTER_CRITICAL();
4768 {
4769 /* Only block if the notification count is not already non-zero. */
4770 if( pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] == 0UL )
4771 {
4772 /* Mark this task as waiting for a notification. */
4773 pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskWAITING_NOTIFICATION;
4774
4775 if( xTicksToWait > ( TickType_t ) 0 )
4776 {
4777 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
4778 traceTASK_NOTIFY_TAKE_BLOCK( uxIndexToWait );
4779
4780 /* All ports are written to allow a yield in a critical
4781 * section (some will yield immediately, others wait until the
4782 * critical section exits) - but it is not something that
4783 * application code should ever do. */
4784 portYIELD_WITHIN_API();
4785 }
4786 else
4787 {
4788 mtCOVERAGE_TEST_MARKER();
4789 }
4790 }
4791 else
4792 {
4793 mtCOVERAGE_TEST_MARKER();
4794 }
4795 }
4796 taskEXIT_CRITICAL();
4797
4798 taskENTER_CRITICAL();
4799 {
4800 traceTASK_NOTIFY_TAKE( uxIndexToWait );
4801 ulReturn = pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ];
4802
4803 if( ulReturn != 0UL )
4804 {
4805 if( xClearCountOnExit != pdFALSE )
4806 {
4807 pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] = 0UL;
4808 }
4809 else
4810 {
4811 pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] = ulReturn - ( uint32_t ) 1;
4812 }
4813 }
4814 else
4815 {
4816 mtCOVERAGE_TEST_MARKER();
4817 }
4818
4819 pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION;
4820 }
4821 taskEXIT_CRITICAL();
4822
4823 return ulReturn;
4824 }
4825
4826 #endif /* configUSE_TASK_NOTIFICATIONS */
4827 /*-----------------------------------------------------------*/
4828
4829 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
4830
xTaskGenericNotifyWait(UBaseType_t uxIndexToWait,uint32_t ulBitsToClearOnEntry,uint32_t ulBitsToClearOnExit,uint32_t * pulNotificationValue,TickType_t xTicksToWait)4831 BaseType_t xTaskGenericNotifyWait( UBaseType_t uxIndexToWait,
4832 uint32_t ulBitsToClearOnEntry,
4833 uint32_t ulBitsToClearOnExit,
4834 uint32_t * pulNotificationValue,
4835 TickType_t xTicksToWait )
4836 {
4837 BaseType_t xReturn;
4838
4839 configASSERT( uxIndexToWait < configTASK_NOTIFICATION_ARRAY_ENTRIES );
4840
4841 taskENTER_CRITICAL();
4842 {
4843 /* Only block if a notification is not already pending. */
4844 if( pxCurrentTCB->ucNotifyState[ uxIndexToWait ] != taskNOTIFICATION_RECEIVED )
4845 {
4846 /* Clear bits in the task's notification value as bits may get
4847 * set by the notifying task or interrupt. This can be used to
4848 * clear the value to zero. */
4849 pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] &= ~ulBitsToClearOnEntry;
4850
4851 /* Mark this task as waiting for a notification. */
4852 pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskWAITING_NOTIFICATION;
4853
4854 if( xTicksToWait > ( TickType_t ) 0 )
4855 {
4856 prvAddCurrentTaskToDelayedList( xTicksToWait, pdTRUE );
4857 traceTASK_NOTIFY_WAIT_BLOCK( uxIndexToWait );
4858
4859 /* All ports are written to allow a yield in a critical
4860 * section (some will yield immediately, others wait until the
4861 * critical section exits) - but it is not something that
4862 * application code should ever do. */
4863 portYIELD_WITHIN_API();
4864 }
4865 else
4866 {
4867 mtCOVERAGE_TEST_MARKER();
4868 }
4869 }
4870 else
4871 {
4872 mtCOVERAGE_TEST_MARKER();
4873 }
4874 }
4875 taskEXIT_CRITICAL();
4876
4877 taskENTER_CRITICAL();
4878 {
4879 traceTASK_NOTIFY_WAIT( uxIndexToWait );
4880
4881 if( pulNotificationValue != NULL )
4882 {
4883 /* Output the current notification value, which may or may not
4884 * have changed. */
4885 *pulNotificationValue = pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ];
4886 }
4887
4888 /* If ucNotifyValue is set then either the task never entered the
4889 * blocked state (because a notification was already pending) or the
4890 * task unblocked because of a notification. Otherwise the task
4891 * unblocked because of a timeout. */
4892 if( pxCurrentTCB->ucNotifyState[ uxIndexToWait ] != taskNOTIFICATION_RECEIVED )
4893 {
4894 /* A notification was not received. */
4895 xReturn = pdFALSE;
4896 }
4897 else
4898 {
4899 /* A notification was already pending or a notification was
4900 * received while the task was waiting. */
4901 pxCurrentTCB->ulNotifiedValue[ uxIndexToWait ] &= ~ulBitsToClearOnExit;
4902 xReturn = pdTRUE;
4903 }
4904
4905 pxCurrentTCB->ucNotifyState[ uxIndexToWait ] = taskNOT_WAITING_NOTIFICATION;
4906 }
4907 taskEXIT_CRITICAL();
4908
4909 return xReturn;
4910 }
4911
4912 #endif /* configUSE_TASK_NOTIFICATIONS */
4913 /*-----------------------------------------------------------*/
4914
4915 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
4916
xTaskGenericNotify(TaskHandle_t xTaskToNotify,UBaseType_t uxIndexToNotify,uint32_t ulValue,eNotifyAction eAction,uint32_t * pulPreviousNotificationValue)4917 BaseType_t xTaskGenericNotify( TaskHandle_t xTaskToNotify,
4918 UBaseType_t uxIndexToNotify,
4919 uint32_t ulValue,
4920 eNotifyAction eAction,
4921 uint32_t * pulPreviousNotificationValue )
4922 {
4923 TCB_t * pxTCB;
4924 BaseType_t xReturn = pdPASS;
4925 uint8_t ucOriginalNotifyState;
4926
4927 configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
4928 configASSERT( xTaskToNotify );
4929 pxTCB = xTaskToNotify;
4930
4931 taskENTER_CRITICAL();
4932 {
4933 if( pulPreviousNotificationValue != NULL )
4934 {
4935 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue[ uxIndexToNotify ];
4936 }
4937
4938 ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
4939
4940 pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
4941
4942 switch( eAction )
4943 {
4944 case eSetBits:
4945 pxTCB->ulNotifiedValue[ uxIndexToNotify ] |= ulValue;
4946 break;
4947
4948 case eIncrement:
4949 ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
4950 break;
4951
4952 case eSetValueWithOverwrite:
4953 pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
4954 break;
4955
4956 case eSetValueWithoutOverwrite:
4957
4958 if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
4959 {
4960 pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
4961 }
4962 else
4963 {
4964 /* The value could not be written to the task. */
4965 xReturn = pdFAIL;
4966 }
4967
4968 break;
4969
4970 case eNoAction:
4971
4972 /* The task is being notified without its notify value being
4973 * updated. */
4974 break;
4975
4976 default:
4977
4978 /* Should not get here if all enums are handled.
4979 * Artificially force an assert by testing a value the
4980 * compiler can't assume is const. */
4981 configASSERT( xTickCount == ( TickType_t ) 0 );
4982
4983 break;
4984 }
4985
4986 traceTASK_NOTIFY( uxIndexToNotify );
4987
4988 /* If the task is in the blocked state specifically to wait for a
4989 * notification then unblock it now. */
4990 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
4991 {
4992 listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
4993 prvAddTaskToReadyList( pxTCB );
4994
4995 /* The task should not have been on an event list. */
4996 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
4997
4998 #if ( configUSE_TICKLESS_IDLE != 0 )
4999 {
5000 /* If a task is blocked waiting for a notification then
5001 * xNextTaskUnblockTime might be set to the blocked task's time
5002 * out time. If the task is unblocked for a reason other than
5003 * a timeout xNextTaskUnblockTime is normally left unchanged,
5004 * because it will automatically get reset to a new value when
5005 * the tick count equals xNextTaskUnblockTime. However if
5006 * tickless idling is used it might be more important to enter
5007 * sleep mode at the earliest possible time - so reset
5008 * xNextTaskUnblockTime here to ensure it is updated at the
5009 * earliest possible time. */
5010 prvResetNextTaskUnblockTime();
5011 }
5012 #endif
5013
5014 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
5015 {
5016 /* The notified task has a priority above the currently
5017 * executing task so a yield is required. */
5018 taskYIELD_IF_USING_PREEMPTION();
5019 }
5020 else
5021 {
5022 mtCOVERAGE_TEST_MARKER();
5023 }
5024 }
5025 else
5026 {
5027 mtCOVERAGE_TEST_MARKER();
5028 }
5029 }
5030 taskEXIT_CRITICAL();
5031
5032 return xReturn;
5033 }
5034
5035 #endif /* configUSE_TASK_NOTIFICATIONS */
5036 /*-----------------------------------------------------------*/
5037
5038 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
5039
xTaskGenericNotifyFromISR(TaskHandle_t xTaskToNotify,UBaseType_t uxIndexToNotify,uint32_t ulValue,eNotifyAction eAction,uint32_t * pulPreviousNotificationValue,BaseType_t * pxHigherPriorityTaskWoken)5040 BaseType_t xTaskGenericNotifyFromISR( TaskHandle_t xTaskToNotify,
5041 UBaseType_t uxIndexToNotify,
5042 uint32_t ulValue,
5043 eNotifyAction eAction,
5044 uint32_t * pulPreviousNotificationValue,
5045 BaseType_t * pxHigherPriorityTaskWoken )
5046 {
5047 TCB_t * pxTCB;
5048 uint8_t ucOriginalNotifyState;
5049 BaseType_t xReturn = pdPASS;
5050 UBaseType_t uxSavedInterruptStatus;
5051
5052 configASSERT( xTaskToNotify );
5053 configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
5054
5055 /* RTOS ports that support interrupt nesting have the concept of a
5056 * maximum system call (or maximum API call) interrupt priority.
5057 * Interrupts that are above the maximum system call priority are keep
5058 * permanently enabled, even when the RTOS kernel is in a critical section,
5059 * but cannot make any calls to FreeRTOS API functions. If configASSERT()
5060 * is defined in FreeRTOSConfig.h then
5061 * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
5062 * failure if a FreeRTOS API function is called from an interrupt that has
5063 * been assigned a priority above the configured maximum system call
5064 * priority. Only FreeRTOS functions that end in FromISR can be called
5065 * from interrupts that have been assigned a priority at or (logically)
5066 * below the maximum system call interrupt priority. FreeRTOS maintains a
5067 * separate interrupt safe API to ensure interrupt entry is as fast and as
5068 * simple as possible. More information (albeit Cortex-M specific) is
5069 * provided on the following link:
5070 * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
5071 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
5072
5073 pxTCB = xTaskToNotify;
5074
5075 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
5076 {
5077 if( pulPreviousNotificationValue != NULL )
5078 {
5079 *pulPreviousNotificationValue = pxTCB->ulNotifiedValue[ uxIndexToNotify ];
5080 }
5081
5082 ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
5083 pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
5084
5085 switch( eAction )
5086 {
5087 case eSetBits:
5088 pxTCB->ulNotifiedValue[ uxIndexToNotify ] |= ulValue;
5089 break;
5090
5091 case eIncrement:
5092 ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
5093 break;
5094
5095 case eSetValueWithOverwrite:
5096 pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
5097 break;
5098
5099 case eSetValueWithoutOverwrite:
5100
5101 if( ucOriginalNotifyState != taskNOTIFICATION_RECEIVED )
5102 {
5103 pxTCB->ulNotifiedValue[ uxIndexToNotify ] = ulValue;
5104 }
5105 else
5106 {
5107 /* The value could not be written to the task. */
5108 xReturn = pdFAIL;
5109 }
5110
5111 break;
5112
5113 case eNoAction:
5114
5115 /* The task is being notified without its notify value being
5116 * updated. */
5117 break;
5118
5119 default:
5120
5121 /* Should not get here if all enums are handled.
5122 * Artificially force an assert by testing a value the
5123 * compiler can't assume is const. */
5124 configASSERT( xTickCount == ( TickType_t ) 0 );
5125 break;
5126 }
5127
5128 traceTASK_NOTIFY_FROM_ISR( uxIndexToNotify );
5129
5130 /* If the task is in the blocked state specifically to wait for a
5131 * notification then unblock it now. */
5132 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
5133 {
5134 /* The task should not have been on an event list. */
5135 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
5136
5137 if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
5138 {
5139 listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
5140 prvAddTaskToReadyList( pxTCB );
5141 }
5142 else
5143 {
5144 /* The delayed and ready lists cannot be accessed, so hold
5145 * this task pending until the scheduler is resumed. */
5146 listINSERT_END( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
5147 }
5148
5149 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
5150 {
5151 /* The notified task has a priority above the currently
5152 * executing task so a yield is required. */
5153 if( pxHigherPriorityTaskWoken != NULL )
5154 {
5155 *pxHigherPriorityTaskWoken = pdTRUE;
5156 }
5157
5158 /* Mark that a yield is pending in case the user is not
5159 * using the "xHigherPriorityTaskWoken" parameter to an ISR
5160 * safe FreeRTOS function. */
5161 xYieldPending = pdTRUE;
5162 }
5163 else
5164 {
5165 mtCOVERAGE_TEST_MARKER();
5166 }
5167 }
5168 }
5169 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
5170
5171 return xReturn;
5172 }
5173
5174 #endif /* configUSE_TASK_NOTIFICATIONS */
5175 /*-----------------------------------------------------------*/
5176
5177 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
5178
vTaskGenericNotifyGiveFromISR(TaskHandle_t xTaskToNotify,UBaseType_t uxIndexToNotify,BaseType_t * pxHigherPriorityTaskWoken)5179 void vTaskGenericNotifyGiveFromISR( TaskHandle_t xTaskToNotify,
5180 UBaseType_t uxIndexToNotify,
5181 BaseType_t * pxHigherPriorityTaskWoken )
5182 {
5183 TCB_t * pxTCB;
5184 uint8_t ucOriginalNotifyState;
5185 UBaseType_t uxSavedInterruptStatus;
5186
5187 configASSERT( xTaskToNotify );
5188 configASSERT( uxIndexToNotify < configTASK_NOTIFICATION_ARRAY_ENTRIES );
5189
5190 /* RTOS ports that support interrupt nesting have the concept of a
5191 * maximum system call (or maximum API call) interrupt priority.
5192 * Interrupts that are above the maximum system call priority are keep
5193 * permanently enabled, even when the RTOS kernel is in a critical section,
5194 * but cannot make any calls to FreeRTOS API functions. If configASSERT()
5195 * is defined in FreeRTOSConfig.h then
5196 * portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
5197 * failure if a FreeRTOS API function is called from an interrupt that has
5198 * been assigned a priority above the configured maximum system call
5199 * priority. Only FreeRTOS functions that end in FromISR can be called
5200 * from interrupts that have been assigned a priority at or (logically)
5201 * below the maximum system call interrupt priority. FreeRTOS maintains a
5202 * separate interrupt safe API to ensure interrupt entry is as fast and as
5203 * simple as possible. More information (albeit Cortex-M specific) is
5204 * provided on the following link:
5205 * https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
5206 portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
5207
5208 pxTCB = xTaskToNotify;
5209
5210 uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
5211 {
5212 ucOriginalNotifyState = pxTCB->ucNotifyState[ uxIndexToNotify ];
5213 pxTCB->ucNotifyState[ uxIndexToNotify ] = taskNOTIFICATION_RECEIVED;
5214
5215 /* 'Giving' is equivalent to incrementing a count in a counting
5216 * semaphore. */
5217 ( pxTCB->ulNotifiedValue[ uxIndexToNotify ] )++;
5218
5219 traceTASK_NOTIFY_GIVE_FROM_ISR( uxIndexToNotify );
5220
5221 /* If the task is in the blocked state specifically to wait for a
5222 * notification then unblock it now. */
5223 if( ucOriginalNotifyState == taskWAITING_NOTIFICATION )
5224 {
5225 /* The task should not have been on an event list. */
5226 configASSERT( listLIST_ITEM_CONTAINER( &( pxTCB->xEventListItem ) ) == NULL );
5227
5228 if( uxSchedulerSuspended == ( UBaseType_t ) 0U )
5229 {
5230 listREMOVE_ITEM( &( pxTCB->xStateListItem ) );
5231 prvAddTaskToReadyList( pxTCB );
5232 }
5233 else
5234 {
5235 /* The delayed and ready lists cannot be accessed, so hold
5236 * this task pending until the scheduler is resumed. */
5237 listINSERT_END( &( xPendingReadyList ), &( pxTCB->xEventListItem ) );
5238 }
5239
5240 if( pxTCB->uxPriority > pxCurrentTCB->uxPriority )
5241 {
5242 /* The notified task has a priority above the currently
5243 * executing task so a yield is required. */
5244 if( pxHigherPriorityTaskWoken != NULL )
5245 {
5246 *pxHigherPriorityTaskWoken = pdTRUE;
5247 }
5248
5249 /* Mark that a yield is pending in case the user is not
5250 * using the "xHigherPriorityTaskWoken" parameter in an ISR
5251 * safe FreeRTOS function. */
5252 xYieldPending = pdTRUE;
5253 }
5254 else
5255 {
5256 mtCOVERAGE_TEST_MARKER();
5257 }
5258 }
5259 }
5260 portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
5261 }
5262
5263 #endif /* configUSE_TASK_NOTIFICATIONS */
5264 /*-----------------------------------------------------------*/
5265
5266 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
5267
xTaskGenericNotifyStateClear(TaskHandle_t xTask,UBaseType_t uxIndexToClear)5268 BaseType_t xTaskGenericNotifyStateClear( TaskHandle_t xTask,
5269 UBaseType_t uxIndexToClear )
5270 {
5271 TCB_t * pxTCB;
5272 BaseType_t xReturn;
5273
5274 configASSERT( uxIndexToClear < configTASK_NOTIFICATION_ARRAY_ENTRIES );
5275
5276 /* If null is passed in here then it is the calling task that is having
5277 * its notification state cleared. */
5278 pxTCB = prvGetTCBFromHandle( xTask );
5279
5280 taskENTER_CRITICAL();
5281 {
5282 if( pxTCB->ucNotifyState[ uxIndexToClear ] == taskNOTIFICATION_RECEIVED )
5283 {
5284 pxTCB->ucNotifyState[ uxIndexToClear ] = taskNOT_WAITING_NOTIFICATION;
5285 xReturn = pdPASS;
5286 }
5287 else
5288 {
5289 xReturn = pdFAIL;
5290 }
5291 }
5292 taskEXIT_CRITICAL();
5293
5294 return xReturn;
5295 }
5296
5297 #endif /* configUSE_TASK_NOTIFICATIONS */
5298 /*-----------------------------------------------------------*/
5299
5300 #if ( configUSE_TASK_NOTIFICATIONS == 1 )
5301
ulTaskGenericNotifyValueClear(TaskHandle_t xTask,UBaseType_t uxIndexToClear,uint32_t ulBitsToClear)5302 uint32_t ulTaskGenericNotifyValueClear( TaskHandle_t xTask,
5303 UBaseType_t uxIndexToClear,
5304 uint32_t ulBitsToClear )
5305 {
5306 TCB_t * pxTCB;
5307 uint32_t ulReturn;
5308
5309 configASSERT( uxIndexToClear < configTASK_NOTIFICATION_ARRAY_ENTRIES );
5310
5311 /* If null is passed in here then it is the calling task that is having
5312 * its notification state cleared. */
5313 pxTCB = prvGetTCBFromHandle( xTask );
5314
5315 taskENTER_CRITICAL();
5316 {
5317 /* Return the notification as it was before the bits were cleared,
5318 * then clear the bit mask. */
5319 ulReturn = pxTCB->ulNotifiedValue[ uxIndexToClear ];
5320 pxTCB->ulNotifiedValue[ uxIndexToClear ] &= ~ulBitsToClear;
5321 }
5322 taskEXIT_CRITICAL();
5323
5324 return ulReturn;
5325 }
5326
5327 #endif /* configUSE_TASK_NOTIFICATIONS */
5328 /*-----------------------------------------------------------*/
5329
5330 #if ( configGENERATE_RUN_TIME_STATS == 1 )
5331
ulTaskGetRunTimeCounter(const TaskHandle_t xTask)5332 configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimeCounter( const TaskHandle_t xTask )
5333 {
5334 return xTask->ulRunTimeCounter;
5335 }
5336
5337 #endif
5338 /*-----------------------------------------------------------*/
5339
5340 #if ( configGENERATE_RUN_TIME_STATS == 1 )
5341
ulTaskGetRunTimePercent(const TaskHandle_t xTask)5342 configRUN_TIME_COUNTER_TYPE ulTaskGetRunTimePercent( const TaskHandle_t xTask )
5343 {
5344 configRUN_TIME_COUNTER_TYPE ulTotalTime, ulReturn;
5345
5346 ulTotalTime = ( configRUN_TIME_COUNTER_TYPE ) portGET_RUN_TIME_COUNTER_VALUE();
5347
5348 /* For percentage calculations. */
5349 ulTotalTime /= ( configRUN_TIME_COUNTER_TYPE ) 100;
5350
5351 /* Avoid divide by zero errors. */
5352 if( ulTotalTime > ( configRUN_TIME_COUNTER_TYPE ) 0 )
5353 {
5354 ulReturn = xTask->ulRunTimeCounter / ulTotalTime;
5355 }
5356 else
5357 {
5358 ulReturn = 0;
5359 }
5360
5361 return ulReturn;
5362 }
5363
5364 #endif /* if ( configGENERATE_RUN_TIME_STATS == 1 ) */
5365 /*-----------------------------------------------------------*/
5366
5367 #if ( configGENERATE_RUN_TIME_STATS == 1 )
5368
ulTaskGetIdleRunTimeCounter(void)5369 configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimeCounter( void )
5370 {
5371 return ulTaskGetRunTimeCounter( xIdleTaskHandle );
5372 }
5373
5374 #endif
5375 /*-----------------------------------------------------------*/
5376
5377 #if ( configGENERATE_RUN_TIME_STATS == 1 )
5378
ulTaskGetIdleRunTimePercent(void)5379 configRUN_TIME_COUNTER_TYPE ulTaskGetIdleRunTimePercent( void )
5380 {
5381 return ulTaskGetRunTimePercent( xIdleTaskHandle );
5382 }
5383
5384 #endif
5385 /*-----------------------------------------------------------*/
5386
prvAddCurrentTaskToDelayedList(TickType_t xTicksToWait,const BaseType_t xCanBlockIndefinitely)5387 static void prvAddCurrentTaskToDelayedList( TickType_t xTicksToWait,
5388 const BaseType_t xCanBlockIndefinitely )
5389 {
5390 TickType_t xTimeToWake;
5391 const TickType_t xConstTickCount = xTickCount;
5392
5393 #if ( INCLUDE_xTaskAbortDelay == 1 )
5394 {
5395 /* About to enter a delayed list, so ensure the ucDelayAborted flag is
5396 * reset to pdFALSE so it can be detected as having been set to pdTRUE
5397 * when the task leaves the Blocked state. */
5398 pxCurrentTCB->ucDelayAborted = pdFALSE;
5399 }
5400 #endif
5401
5402 /* Remove the task from the ready list before adding it to the blocked list
5403 * as the same list item is used for both lists. */
5404 if( uxListRemove( &( pxCurrentTCB->xStateListItem ) ) == ( UBaseType_t ) 0 )
5405 {
5406 /* The current task must be in a ready list, so there is no need to
5407 * check, and the port reset macro can be called directly. */
5408 portRESET_READY_PRIORITY( pxCurrentTCB->uxPriority, uxTopReadyPriority ); /*lint !e931 pxCurrentTCB cannot change as it is the calling task. pxCurrentTCB->uxPriority and uxTopReadyPriority cannot change as called with scheduler suspended or in a critical section. */
5409 }
5410 else
5411 {
5412 mtCOVERAGE_TEST_MARKER();
5413 }
5414
5415 #if ( INCLUDE_vTaskSuspend == 1 )
5416 {
5417 if( ( xTicksToWait == portMAX_DELAY ) && ( xCanBlockIndefinitely != pdFALSE ) )
5418 {
5419 /* Add the task to the suspended task list instead of a delayed task
5420 * list to ensure it is not woken by a timing event. It will block
5421 * indefinitely. */
5422 listINSERT_END( &xSuspendedTaskList, &( pxCurrentTCB->xStateListItem ) );
5423 }
5424 else
5425 {
5426 /* Calculate the time at which the task should be woken if the event
5427 * does not occur. This may overflow but this doesn't matter, the
5428 * kernel will manage it correctly. */
5429 xTimeToWake = xConstTickCount + xTicksToWait;
5430
5431 /* The list item will be inserted in wake time order. */
5432 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
5433
5434 if( xTimeToWake < xConstTickCount )
5435 {
5436 /* Wake time has overflowed. Place this item in the overflow
5437 * list. */
5438 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
5439 }
5440 else
5441 {
5442 /* The wake time has not overflowed, so the current block list
5443 * is used. */
5444 vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
5445
5446 /* If the task entering the blocked state was placed at the
5447 * head of the list of blocked tasks then xNextTaskUnblockTime
5448 * needs to be updated too. */
5449 if( xTimeToWake < xNextTaskUnblockTime )
5450 {
5451 xNextTaskUnblockTime = xTimeToWake;
5452 }
5453 else
5454 {
5455 mtCOVERAGE_TEST_MARKER();
5456 }
5457 }
5458 }
5459 }
5460 #else /* INCLUDE_vTaskSuspend */
5461 {
5462 /* Calculate the time at which the task should be woken if the event
5463 * does not occur. This may overflow but this doesn't matter, the kernel
5464 * will manage it correctly. */
5465 xTimeToWake = xConstTickCount + xTicksToWait;
5466
5467 /* The list item will be inserted in wake time order. */
5468 listSET_LIST_ITEM_VALUE( &( pxCurrentTCB->xStateListItem ), xTimeToWake );
5469
5470 if( xTimeToWake < xConstTickCount )
5471 {
5472 /* Wake time has overflowed. Place this item in the overflow list. */
5473 vListInsert( pxOverflowDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
5474 }
5475 else
5476 {
5477 /* The wake time has not overflowed, so the current block list is used. */
5478 vListInsert( pxDelayedTaskList, &( pxCurrentTCB->xStateListItem ) );
5479
5480 /* If the task entering the blocked state was placed at the head of the
5481 * list of blocked tasks then xNextTaskUnblockTime needs to be updated
5482 * too. */
5483 if( xTimeToWake < xNextTaskUnblockTime )
5484 {
5485 xNextTaskUnblockTime = xTimeToWake;
5486 }
5487 else
5488 {
5489 mtCOVERAGE_TEST_MARKER();
5490 }
5491 }
5492
5493 /* Avoid compiler warning when INCLUDE_vTaskSuspend is not 1. */
5494 ( void ) xCanBlockIndefinitely;
5495 }
5496 #endif /* INCLUDE_vTaskSuspend */
5497 }
5498 /*-----------------------------------------------------------*/
5499
5500 #if ( portUSING_MPU_WRAPPERS == 1 )
5501
xTaskGetMPUSettings(TaskHandle_t xTask)5502 xMPU_SETTINGS * xTaskGetMPUSettings( TaskHandle_t xTask )
5503 {
5504 TCB_t * pxTCB;
5505
5506 pxTCB = prvGetTCBFromHandle( xTask );
5507
5508 return &( pxTCB->xMPUSettings );
5509 }
5510
5511 #endif /* portUSING_MPU_WRAPPERS */
5512 /*-----------------------------------------------------------*/
5513
5514 /* Code below here allows additional code to be inserted into this source file,
5515 * especially where access to file scope functions and data is needed (for example
5516 * when performing module tests). */
5517
5518 #ifdef FREERTOS_MODULE_TEST
5519 #include "tasks_test_access_functions.h"
5520 #endif
5521
5522
5523 #if ( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 )
5524
5525 #include "freertos_tasks_c_additions.h"
5526
5527 #ifdef FREERTOS_TASKS_C_ADDITIONS_INIT
freertos_tasks_c_additions_init(void)5528 static void freertos_tasks_c_additions_init( void )
5529 {
5530 FREERTOS_TASKS_C_ADDITIONS_INIT();
5531 }
5532 #endif
5533
5534 #endif /* if ( configINCLUDE_FREERTOS_TASK_C_ADDITIONS_H == 1 ) */
5535