1 /*
2  * FreeRTOS Kernel V11.1.0
3  * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4  *
5  * SPDX-License-Identifier: MIT
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy of
8  * this software and associated documentation files (the "Software"), to deal in
9  * the Software without restriction, including without limitation the rights to
10  * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11  * the Software, and to permit persons to whom the Software is furnished to do so,
12  * subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in all
15  * copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19  * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20  * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * https://www.FreeRTOS.org
25  * https://github.com/FreeRTOS
26  *
27  */
28 
29 #include <stdlib.h>
30 #include <string.h>
31 
32 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
33  * all the API functions to use the MPU wrappers.  That should only be done when
34  * task.h is included from an application file. */
35 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
36 
37 #include "FreeRTOS.h"
38 #include "task.h"
39 #include "queue.h"
40 
41 #if ( configUSE_CO_ROUTINES == 1 )
42     #include "croutine.h"
43 #endif
44 
45 /* The MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
46  * for the header files above, but not in this file, in order to generate the
47  * correct privileged Vs unprivileged linkage and placement. */
48 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
49 
50 
51 /* Constants used with the cRxLock and cTxLock structure members. */
52 #define queueUNLOCKED             ( ( int8_t ) -1 )
53 #define queueLOCKED_UNMODIFIED    ( ( int8_t ) 0 )
54 #define queueINT8_MAX             ( ( int8_t ) 127 )
55 
56 /* When the Queue_t structure is used to represent a base queue its pcHead and
57  * pcTail members are used as pointers into the queue storage area.  When the
58  * Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
59  * not necessary, and the pcHead pointer is set to NULL to indicate that the
60  * structure instead holds a pointer to the mutex holder (if any).  Map alternative
61  * names to the pcHead and structure member to ensure the readability of the code
62  * is maintained.  The QueuePointers_t and SemaphoreData_t types are used to form
63  * a union as their usage is mutually exclusive dependent on what the queue is
64  * being used for. */
65 #define uxQueueType               pcHead
66 #define queueQUEUE_IS_MUTEX       NULL
67 
68 typedef struct QueuePointers
69 {
70     int8_t * pcTail;     /**< Points to the byte at the end of the queue storage area.  Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
71     int8_t * pcReadFrom; /**< Points to the last place that a queued item was read from when the structure is used as a queue. */
72 } QueuePointers_t;
73 
74 typedef struct SemaphoreData
75 {
76     TaskHandle_t xMutexHolder;        /**< The handle of the task that holds the mutex. */
77     UBaseType_t uxRecursiveCallCount; /**< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
78 } SemaphoreData_t;
79 
80 /* Semaphores do not actually store or copy data, so have an item size of
81  * zero. */
82 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH    ( ( UBaseType_t ) 0 )
83 #define queueMUTEX_GIVE_BLOCK_TIME          ( ( TickType_t ) 0U )
84 
85 #if ( configUSE_PREEMPTION == 0 )
86 
87 /* If the cooperative scheduler is being used then a yield should not be
88  * performed just because a higher priority task has been woken. */
89     #define queueYIELD_IF_USING_PREEMPTION()
90 #else
91     #if ( configNUMBER_OF_CORES == 1 )
92         #define queueYIELD_IF_USING_PREEMPTION()    portYIELD_WITHIN_API()
93     #else /* #if ( configNUMBER_OF_CORES == 1 ) */
94         #define queueYIELD_IF_USING_PREEMPTION()    vTaskYieldWithinAPI()
95     #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
96 #endif
97 
98 /*
99  * Definition of the queue used by the scheduler.
100  * Items are queued by copy, not reference.  See the following link for the
101  * rationale: https://www.FreeRTOS.org/Embedded-RTOS-Queues.html
102  */
103 typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */
104 {
105     int8_t * pcHead;           /**< Points to the beginning of the queue storage area. */
106     int8_t * pcWriteTo;        /**< Points to the free next place in the storage area. */
107 
108     union
109     {
110         QueuePointers_t xQueue;     /**< Data required exclusively when this structure is used as a queue. */
111         SemaphoreData_t xSemaphore; /**< Data required exclusively when this structure is used as a semaphore. */
112     } u;
113 
114     List_t xTasksWaitingToSend;             /**< List of tasks that are blocked waiting to post onto this queue.  Stored in priority order. */
115     List_t xTasksWaitingToReceive;          /**< List of tasks that are blocked waiting to read from this queue.  Stored in priority order. */
116 
117     volatile UBaseType_t uxMessagesWaiting; /**< The number of items currently in the queue. */
118     UBaseType_t uxLength;                   /**< The length of the queue defined as the number of items it will hold, not the number of bytes. */
119     UBaseType_t uxItemSize;                 /**< The size of each items that the queue will hold. */
120 
121     volatile int8_t cRxLock;                /**< Stores the number of items received from the queue (removed from the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */
122     volatile int8_t cTxLock;                /**< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */
123 
124     #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
125         uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
126     #endif
127 
128     #if ( configUSE_QUEUE_SETS == 1 )
129         struct QueueDefinition * pxQueueSetContainer;
130     #endif
131 
132     #if ( configUSE_TRACE_FACILITY == 1 )
133         UBaseType_t uxQueueNumber;
134         uint8_t ucQueueType;
135     #endif
136 } xQUEUE;
137 
138 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
139  * name below to enable the use of older kernel aware debuggers. */
140 typedef xQUEUE Queue_t;
141 
142 /*-----------------------------------------------------------*/
143 
144 /*
145  * The queue registry is just a means for kernel aware debuggers to locate
146  * queue structures.  It has no other purpose so is an optional component.
147  */
148 #if ( configQUEUE_REGISTRY_SIZE > 0 )
149 
150 /* The type stored within the queue registry array.  This allows a name
151  * to be assigned to each queue making kernel aware debugging a little
152  * more user friendly. */
153     typedef struct QUEUE_REGISTRY_ITEM
154     {
155         const char * pcQueueName;
156         QueueHandle_t xHandle;
157     } xQueueRegistryItem;
158 
159 /* The old xQueueRegistryItem name is maintained above then typedefed to the
160  * new xQueueRegistryItem name below to enable the use of older kernel aware
161  * debuggers. */
162     typedef xQueueRegistryItem QueueRegistryItem_t;
163 
164 /* The queue registry is simply an array of QueueRegistryItem_t structures.
165  * The pcQueueName member of a structure being NULL is indicative of the
166  * array position being vacant. */
167 
168 /* MISRA Ref 8.4.2 [Declaration shall be visible] */
169 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-84 */
170 /* coverity[misra_c_2012_rule_8_4_violation] */
171     PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
172 
173 #endif /* configQUEUE_REGISTRY_SIZE */
174 
175 /*
176  * Unlocks a queue locked by a call to prvLockQueue.  Locking a queue does not
177  * prevent an ISR from adding or removing items to the queue, but does prevent
178  * an ISR from removing tasks from the queue event lists.  If an ISR finds a
179  * queue is locked it will instead increment the appropriate queue lock count
180  * to indicate that a task may require unblocking.  When the queue in unlocked
181  * these lock counts are inspected, and the appropriate action taken.
182  */
183 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
184 
185 /*
186  * Uses a critical section to determine if there is any data in a queue.
187  *
188  * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
189  */
190 static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
191 
192 /*
193  * Uses a critical section to determine if there is any space in a queue.
194  *
195  * @return pdTRUE if there is no space, otherwise pdFALSE;
196  */
197 static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
198 
199 /*
200  * Copies an item into the queue, either at the front of the queue or the
201  * back of the queue.
202  */
203 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
204                                       const void * pvItemToQueue,
205                                       const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
206 
207 /*
208  * Copies an item out of a queue.
209  */
210 static void prvCopyDataFromQueue( Queue_t * const pxQueue,
211                                   void * const pvBuffer ) PRIVILEGED_FUNCTION;
212 
213 #if ( configUSE_QUEUE_SETS == 1 )
214 
215 /*
216  * Checks to see if a queue is a member of a queue set, and if so, notifies
217  * the queue set that the queue contains data.
218  */
219     static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
220 #endif
221 
222 /*
223  * Called after a Queue_t structure has been allocated either statically or
224  * dynamically to fill in the structure's members.
225  */
226 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
227                                    const UBaseType_t uxItemSize,
228                                    uint8_t * pucQueueStorage,
229                                    const uint8_t ucQueueType,
230                                    Queue_t * pxNewQueue ) PRIVILEGED_FUNCTION;
231 
232 /*
233  * Mutexes are a special type of queue.  When a mutex is created, first the
234  * queue is created, then prvInitialiseMutex() is called to configure the queue
235  * as a mutex.
236  */
237 #if ( configUSE_MUTEXES == 1 )
238     static void prvInitialiseMutex( Queue_t * pxNewQueue ) PRIVILEGED_FUNCTION;
239 #endif
240 
241 #if ( configUSE_MUTEXES == 1 )
242 
243 /*
244  * If a task waiting for a mutex causes the mutex holder to inherit a
245  * priority, but the waiting task times out, then the holder should
246  * disinherit the priority - but only down to the highest priority of any
247  * other tasks that are waiting for the same mutex.  This function returns
248  * that priority.
249  */
250     static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
251 #endif
252 /*-----------------------------------------------------------*/
253 
254 /*
255  * Macro to mark a queue as locked.  Locking a queue prevents an ISR from
256  * accessing the queue event lists.
257  */
258 #define prvLockQueue( pxQueue )                            \
259     taskENTER_CRITICAL();                                  \
260     {                                                      \
261         if( ( pxQueue )->cRxLock == queueUNLOCKED )        \
262         {                                                  \
263             ( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \
264         }                                                  \
265         if( ( pxQueue )->cTxLock == queueUNLOCKED )        \
266         {                                                  \
267             ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
268         }                                                  \
269     }                                                      \
270     taskEXIT_CRITICAL()
271 
272 /*
273  * Macro to increment cTxLock member of the queue data structure. It is
274  * capped at the number of tasks in the system as we cannot unblock more
275  * tasks than the number of tasks in the system.
276  */
277 #define prvIncrementQueueTxLock( pxQueue, cTxLock )                           \
278     do {                                                                      \
279         const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks();         \
280         if( ( UBaseType_t ) ( cTxLock ) < uxNumberOfTasks )                   \
281         {                                                                     \
282             configASSERT( ( cTxLock ) != queueINT8_MAX );                     \
283             ( pxQueue )->cTxLock = ( int8_t ) ( ( cTxLock ) + ( int8_t ) 1 ); \
284         }                                                                     \
285     } while( 0 )
286 
287 /*
288  * Macro to increment cRxLock member of the queue data structure. It is
289  * capped at the number of tasks in the system as we cannot unblock more
290  * tasks than the number of tasks in the system.
291  */
292 #define prvIncrementQueueRxLock( pxQueue, cRxLock )                           \
293     do {                                                                      \
294         const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks();         \
295         if( ( UBaseType_t ) ( cRxLock ) < uxNumberOfTasks )                   \
296         {                                                                     \
297             configASSERT( ( cRxLock ) != queueINT8_MAX );                     \
298             ( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \
299         }                                                                     \
300     } while( 0 )
301 /*-----------------------------------------------------------*/
302 
xQueueGenericReset(QueueHandle_t xQueue,BaseType_t xNewQueue)303 BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
304                                BaseType_t xNewQueue )
305 {
306     BaseType_t xReturn = pdPASS;
307     Queue_t * const pxQueue = xQueue;
308 
309     traceENTER_xQueueGenericReset( xQueue, xNewQueue );
310 
311     configASSERT( pxQueue );
312 
313     if( ( pxQueue != NULL ) &&
314         ( pxQueue->uxLength >= 1U ) &&
315         /* Check for multiplication overflow. */
316         ( ( SIZE_MAX / pxQueue->uxLength ) >= pxQueue->uxItemSize ) )
317     {
318         taskENTER_CRITICAL();
319         {
320             pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
321             pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
322             pxQueue->pcWriteTo = pxQueue->pcHead;
323             pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize );
324             pxQueue->cRxLock = queueUNLOCKED;
325             pxQueue->cTxLock = queueUNLOCKED;
326 
327             if( xNewQueue == pdFALSE )
328             {
329                 /* If there are tasks blocked waiting to read from the queue, then
330                  * the tasks will remain blocked as after this function exits the queue
331                  * will still be empty.  If there are tasks blocked waiting to write to
332                  * the queue, then one should be unblocked as after this function exits
333                  * it will be possible to write to it. */
334                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
335                 {
336                     if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
337                     {
338                         queueYIELD_IF_USING_PREEMPTION();
339                     }
340                     else
341                     {
342                         mtCOVERAGE_TEST_MARKER();
343                     }
344                 }
345                 else
346                 {
347                     mtCOVERAGE_TEST_MARKER();
348                 }
349             }
350             else
351             {
352                 /* Ensure the event queues start in the correct state. */
353                 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
354                 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
355             }
356         }
357         taskEXIT_CRITICAL();
358     }
359     else
360     {
361         xReturn = pdFAIL;
362     }
363 
364     configASSERT( xReturn != pdFAIL );
365 
366     /* A value is returned for calling semantic consistency with previous
367      * versions. */
368     traceRETURN_xQueueGenericReset( xReturn );
369 
370     return xReturn;
371 }
372 /*-----------------------------------------------------------*/
373 
374 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
375 
xQueueGenericCreateStatic(const UBaseType_t uxQueueLength,const UBaseType_t uxItemSize,uint8_t * pucQueueStorage,StaticQueue_t * pxStaticQueue,const uint8_t ucQueueType)376     QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength,
377                                              const UBaseType_t uxItemSize,
378                                              uint8_t * pucQueueStorage,
379                                              StaticQueue_t * pxStaticQueue,
380                                              const uint8_t ucQueueType )
381     {
382         Queue_t * pxNewQueue = NULL;
383 
384         traceENTER_xQueueGenericCreateStatic( uxQueueLength, uxItemSize, pucQueueStorage, pxStaticQueue, ucQueueType );
385 
386         /* The StaticQueue_t structure and the queue storage area must be
387          * supplied. */
388         configASSERT( pxStaticQueue );
389 
390         if( ( uxQueueLength > ( UBaseType_t ) 0 ) &&
391             ( pxStaticQueue != NULL ) &&
392 
393             /* A queue storage area should be provided if the item size is not 0, and
394              * should not be provided if the item size is 0. */
395             ( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0U ) ) ) &&
396             ( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0U ) ) ) )
397         {
398             #if ( configASSERT_DEFINED == 1 )
399             {
400                 /* Sanity check that the size of the structure used to declare a
401                  * variable of type StaticQueue_t or StaticSemaphore_t equals the size of
402                  * the real queue and semaphore structures. */
403                 volatile size_t xSize = sizeof( StaticQueue_t );
404 
405                 /* This assertion cannot be branch covered in unit tests */
406                 configASSERT( xSize == sizeof( Queue_t ) ); /* LCOV_EXCL_BR_LINE */
407                 ( void ) xSize;                             /* Prevent unused variable warning when configASSERT() is not defined. */
408             }
409             #endif /* configASSERT_DEFINED */
410 
411             /* The address of a statically allocated queue was passed in, use it.
412              * The address of a statically allocated storage area was also passed in
413              * but is already set. */
414             /* MISRA Ref 11.3.1 [Misaligned access] */
415             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-113 */
416             /* coverity[misra_c_2012_rule_11_3_violation] */
417             pxNewQueue = ( Queue_t * ) pxStaticQueue;
418 
419             #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
420             {
421                 /* Queues can be allocated wither statically or dynamically, so
422                  * note this queue was allocated statically in case the queue is
423                  * later deleted. */
424                 pxNewQueue->ucStaticallyAllocated = pdTRUE;
425             }
426             #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
427 
428             prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
429         }
430         else
431         {
432             configASSERT( pxNewQueue );
433             mtCOVERAGE_TEST_MARKER();
434         }
435 
436         traceRETURN_xQueueGenericCreateStatic( pxNewQueue );
437 
438         return pxNewQueue;
439     }
440 
441 #endif /* configSUPPORT_STATIC_ALLOCATION */
442 /*-----------------------------------------------------------*/
443 
444 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
445 
xQueueGenericGetStaticBuffers(QueueHandle_t xQueue,uint8_t ** ppucQueueStorage,StaticQueue_t ** ppxStaticQueue)446     BaseType_t xQueueGenericGetStaticBuffers( QueueHandle_t xQueue,
447                                               uint8_t ** ppucQueueStorage,
448                                               StaticQueue_t ** ppxStaticQueue )
449     {
450         BaseType_t xReturn;
451         Queue_t * const pxQueue = xQueue;
452 
453         traceENTER_xQueueGenericGetStaticBuffers( xQueue, ppucQueueStorage, ppxStaticQueue );
454 
455         configASSERT( pxQueue );
456         configASSERT( ppxStaticQueue );
457 
458         #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
459         {
460             /* Check if the queue was statically allocated. */
461             if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdTRUE )
462             {
463                 if( ppucQueueStorage != NULL )
464                 {
465                     *ppucQueueStorage = ( uint8_t * ) pxQueue->pcHead;
466                 }
467 
468                 /* MISRA Ref 11.3.1 [Misaligned access] */
469                 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-113 */
470                 /* coverity[misra_c_2012_rule_11_3_violation] */
471                 *ppxStaticQueue = ( StaticQueue_t * ) pxQueue;
472                 xReturn = pdTRUE;
473             }
474             else
475             {
476                 xReturn = pdFALSE;
477             }
478         }
479         #else /* configSUPPORT_DYNAMIC_ALLOCATION */
480         {
481             /* Queue must have been statically allocated. */
482             if( ppucQueueStorage != NULL )
483             {
484                 *ppucQueueStorage = ( uint8_t * ) pxQueue->pcHead;
485             }
486 
487             *ppxStaticQueue = ( StaticQueue_t * ) pxQueue;
488             xReturn = pdTRUE;
489         }
490         #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
491 
492         traceRETURN_xQueueGenericGetStaticBuffers( xReturn );
493 
494         return xReturn;
495     }
496 
497 #endif /* configSUPPORT_STATIC_ALLOCATION */
498 /*-----------------------------------------------------------*/
499 
500 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
501 
xQueueGenericCreate(const UBaseType_t uxQueueLength,const UBaseType_t uxItemSize,const uint8_t ucQueueType)502     QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength,
503                                        const UBaseType_t uxItemSize,
504                                        const uint8_t ucQueueType )
505     {
506         Queue_t * pxNewQueue = NULL;
507         size_t xQueueSizeInBytes;
508         uint8_t * pucQueueStorage;
509 
510         traceENTER_xQueueGenericCreate( uxQueueLength, uxItemSize, ucQueueType );
511 
512         if( ( uxQueueLength > ( UBaseType_t ) 0 ) &&
513             /* Check for multiplication overflow. */
514             ( ( SIZE_MAX / uxQueueLength ) >= uxItemSize ) &&
515             /* Check for addition overflow. */
516             ( ( UBaseType_t ) ( SIZE_MAX - sizeof( Queue_t ) ) >= ( uxQueueLength * uxItemSize ) ) )
517         {
518             /* Allocate enough space to hold the maximum number of items that
519              * can be in the queue at any time.  It is valid for uxItemSize to be
520              * zero in the case the queue is used as a semaphore. */
521             xQueueSizeInBytes = ( size_t ) ( ( size_t ) uxQueueLength * ( size_t ) uxItemSize );
522 
523             /* MISRA Ref 11.5.1 [Malloc memory assignment] */
524             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
525             /* coverity[misra_c_2012_rule_11_5_violation] */
526             pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );
527 
528             if( pxNewQueue != NULL )
529             {
530                 /* Jump past the queue structure to find the location of the queue
531                  * storage area. */
532                 pucQueueStorage = ( uint8_t * ) pxNewQueue;
533                 pucQueueStorage += sizeof( Queue_t );
534 
535                 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
536                 {
537                     /* Queues can be created either statically or dynamically, so
538                      * note this task was created dynamically in case it is later
539                      * deleted. */
540                     pxNewQueue->ucStaticallyAllocated = pdFALSE;
541                 }
542                 #endif /* configSUPPORT_STATIC_ALLOCATION */
543 
544                 prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
545             }
546             else
547             {
548                 traceQUEUE_CREATE_FAILED( ucQueueType );
549                 mtCOVERAGE_TEST_MARKER();
550             }
551         }
552         else
553         {
554             configASSERT( pxNewQueue );
555             mtCOVERAGE_TEST_MARKER();
556         }
557 
558         traceRETURN_xQueueGenericCreate( pxNewQueue );
559 
560         return pxNewQueue;
561     }
562 
563 #endif /* configSUPPORT_STATIC_ALLOCATION */
564 /*-----------------------------------------------------------*/
565 
prvInitialiseNewQueue(const UBaseType_t uxQueueLength,const UBaseType_t uxItemSize,uint8_t * pucQueueStorage,const uint8_t ucQueueType,Queue_t * pxNewQueue)566 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
567                                    const UBaseType_t uxItemSize,
568                                    uint8_t * pucQueueStorage,
569                                    const uint8_t ucQueueType,
570                                    Queue_t * pxNewQueue )
571 {
572     /* Remove compiler warnings about unused parameters should
573      * configUSE_TRACE_FACILITY not be set to 1. */
574     ( void ) ucQueueType;
575 
576     if( uxItemSize == ( UBaseType_t ) 0 )
577     {
578         /* No RAM was allocated for the queue storage area, but PC head cannot
579          * be set to NULL because NULL is used as a key to say the queue is used as
580          * a mutex.  Therefore just set pcHead to point to the queue as a benign
581          * value that is known to be within the memory map. */
582         pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
583     }
584     else
585     {
586         /* Set the head to the start of the queue storage area. */
587         pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
588     }
589 
590     /* Initialise the queue members as described where the queue type is
591      * defined. */
592     pxNewQueue->uxLength = uxQueueLength;
593     pxNewQueue->uxItemSize = uxItemSize;
594     ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
595 
596     #if ( configUSE_TRACE_FACILITY == 1 )
597     {
598         pxNewQueue->ucQueueType = ucQueueType;
599     }
600     #endif /* configUSE_TRACE_FACILITY */
601 
602     #if ( configUSE_QUEUE_SETS == 1 )
603     {
604         pxNewQueue->pxQueueSetContainer = NULL;
605     }
606     #endif /* configUSE_QUEUE_SETS */
607 
608     traceQUEUE_CREATE( pxNewQueue );
609 }
610 /*-----------------------------------------------------------*/
611 
612 #if ( configUSE_MUTEXES == 1 )
613 
prvInitialiseMutex(Queue_t * pxNewQueue)614     static void prvInitialiseMutex( Queue_t * pxNewQueue )
615     {
616         if( pxNewQueue != NULL )
617         {
618             /* The queue create function will set all the queue structure members
619             * correctly for a generic queue, but this function is creating a
620             * mutex.  Overwrite those members that need to be set differently -
621             * in particular the information required for priority inheritance. */
622             pxNewQueue->u.xSemaphore.xMutexHolder = NULL;
623             pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
624 
625             /* In case this is a recursive mutex. */
626             pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0;
627 
628             traceCREATE_MUTEX( pxNewQueue );
629 
630             /* Start with the semaphore in the expected state. */
631             ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
632         }
633         else
634         {
635             traceCREATE_MUTEX_FAILED();
636         }
637     }
638 
639 #endif /* configUSE_MUTEXES */
640 /*-----------------------------------------------------------*/
641 
642 #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
643 
xQueueCreateMutex(const uint8_t ucQueueType)644     QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
645     {
646         QueueHandle_t xNewQueue;
647         const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
648 
649         traceENTER_xQueueCreateMutex( ucQueueType );
650 
651         xNewQueue = xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType );
652         prvInitialiseMutex( ( Queue_t * ) xNewQueue );
653 
654         traceRETURN_xQueueCreateMutex( xNewQueue );
655 
656         return xNewQueue;
657     }
658 
659 #endif /* configUSE_MUTEXES */
660 /*-----------------------------------------------------------*/
661 
662 #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
663 
xQueueCreateMutexStatic(const uint8_t ucQueueType,StaticQueue_t * pxStaticQueue)664     QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType,
665                                            StaticQueue_t * pxStaticQueue )
666     {
667         QueueHandle_t xNewQueue;
668         const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
669 
670         traceENTER_xQueueCreateMutexStatic( ucQueueType, pxStaticQueue );
671 
672         /* Prevent compiler warnings about unused parameters if
673          * configUSE_TRACE_FACILITY does not equal 1. */
674         ( void ) ucQueueType;
675 
676         xNewQueue = xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );
677         prvInitialiseMutex( ( Queue_t * ) xNewQueue );
678 
679         traceRETURN_xQueueCreateMutexStatic( xNewQueue );
680 
681         return xNewQueue;
682     }
683 
684 #endif /* configUSE_MUTEXES */
685 /*-----------------------------------------------------------*/
686 
687 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
688 
xQueueGetMutexHolder(QueueHandle_t xSemaphore)689     TaskHandle_t xQueueGetMutexHolder( QueueHandle_t xSemaphore )
690     {
691         TaskHandle_t pxReturn;
692         Queue_t * const pxSemaphore = ( Queue_t * ) xSemaphore;
693 
694         traceENTER_xQueueGetMutexHolder( xSemaphore );
695 
696         configASSERT( xSemaphore );
697 
698         /* This function is called by xSemaphoreGetMutexHolder(), and should not
699          * be called directly.  Note:  This is a good way of determining if the
700          * calling task is the mutex holder, but not a good way of determining the
701          * identity of the mutex holder, as the holder may change between the
702          * following critical section exiting and the function returning. */
703         taskENTER_CRITICAL();
704         {
705             if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX )
706             {
707                 pxReturn = pxSemaphore->u.xSemaphore.xMutexHolder;
708             }
709             else
710             {
711                 pxReturn = NULL;
712             }
713         }
714         taskEXIT_CRITICAL();
715 
716         traceRETURN_xQueueGetMutexHolder( pxReturn );
717 
718         return pxReturn;
719     }
720 
721 #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
722 /*-----------------------------------------------------------*/
723 
724 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
725 
xQueueGetMutexHolderFromISR(QueueHandle_t xSemaphore)726     TaskHandle_t xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore )
727     {
728         TaskHandle_t pxReturn;
729 
730         traceENTER_xQueueGetMutexHolderFromISR( xSemaphore );
731 
732         configASSERT( xSemaphore );
733 
734         /* Mutexes cannot be used in interrupt service routines, so the mutex
735          * holder should not change in an ISR, and therefore a critical section is
736          * not required here. */
737         if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
738         {
739             pxReturn = ( ( Queue_t * ) xSemaphore )->u.xSemaphore.xMutexHolder;
740         }
741         else
742         {
743             pxReturn = NULL;
744         }
745 
746         traceRETURN_xQueueGetMutexHolderFromISR( pxReturn );
747 
748         return pxReturn;
749     }
750 
751 #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
752 /*-----------------------------------------------------------*/
753 
754 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
755 
xQueueGiveMutexRecursive(QueueHandle_t xMutex)756     BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
757     {
758         BaseType_t xReturn;
759         Queue_t * const pxMutex = ( Queue_t * ) xMutex;
760 
761         traceENTER_xQueueGiveMutexRecursive( xMutex );
762 
763         configASSERT( pxMutex );
764 
765         /* If this is the task that holds the mutex then xMutexHolder will not
766          * change outside of this task.  If this task does not hold the mutex then
767          * pxMutexHolder can never coincidentally equal the tasks handle, and as
768          * this is the only condition we are interested in it does not matter if
769          * pxMutexHolder is accessed simultaneously by another task.  Therefore no
770          * mutual exclusion is required to test the pxMutexHolder variable. */
771         if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )
772         {
773             traceGIVE_MUTEX_RECURSIVE( pxMutex );
774 
775             /* uxRecursiveCallCount cannot be zero if xMutexHolder is equal to
776              * the task handle, therefore no underflow check is required.  Also,
777              * uxRecursiveCallCount is only modified by the mutex holder, and as
778              * there can only be one, no mutual exclusion is required to modify the
779              * uxRecursiveCallCount member. */
780             ( pxMutex->u.xSemaphore.uxRecursiveCallCount )--;
781 
782             /* Has the recursive call count unwound to 0? */
783             if( pxMutex->u.xSemaphore.uxRecursiveCallCount == ( UBaseType_t ) 0 )
784             {
785                 /* Return the mutex.  This will automatically unblock any other
786                  * task that might be waiting to access the mutex. */
787                 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
788             }
789             else
790             {
791                 mtCOVERAGE_TEST_MARKER();
792             }
793 
794             xReturn = pdPASS;
795         }
796         else
797         {
798             /* The mutex cannot be given because the calling task is not the
799              * holder. */
800             xReturn = pdFAIL;
801 
802             traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
803         }
804 
805         traceRETURN_xQueueGiveMutexRecursive( xReturn );
806 
807         return xReturn;
808     }
809 
810 #endif /* configUSE_RECURSIVE_MUTEXES */
811 /*-----------------------------------------------------------*/
812 
813 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
814 
xQueueTakeMutexRecursive(QueueHandle_t xMutex,TickType_t xTicksToWait)815     BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex,
816                                          TickType_t xTicksToWait )
817     {
818         BaseType_t xReturn;
819         Queue_t * const pxMutex = ( Queue_t * ) xMutex;
820 
821         traceENTER_xQueueTakeMutexRecursive( xMutex, xTicksToWait );
822 
823         configASSERT( pxMutex );
824 
825         /* Comments regarding mutual exclusion as per those within
826          * xQueueGiveMutexRecursive(). */
827 
828         traceTAKE_MUTEX_RECURSIVE( pxMutex );
829 
830         if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )
831         {
832             ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;
833             xReturn = pdPASS;
834         }
835         else
836         {
837             xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait );
838 
839             /* pdPASS will only be returned if the mutex was successfully
840              * obtained.  The calling task may have entered the Blocked state
841              * before reaching here. */
842             if( xReturn != pdFAIL )
843             {
844                 ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;
845             }
846             else
847             {
848                 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
849             }
850         }
851 
852         traceRETURN_xQueueTakeMutexRecursive( xReturn );
853 
854         return xReturn;
855     }
856 
857 #endif /* configUSE_RECURSIVE_MUTEXES */
858 /*-----------------------------------------------------------*/
859 
860 #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
861 
xQueueCreateCountingSemaphoreStatic(const UBaseType_t uxMaxCount,const UBaseType_t uxInitialCount,StaticQueue_t * pxStaticQueue)862     QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount,
863                                                        const UBaseType_t uxInitialCount,
864                                                        StaticQueue_t * pxStaticQueue )
865     {
866         QueueHandle_t xHandle = NULL;
867 
868         traceENTER_xQueueCreateCountingSemaphoreStatic( uxMaxCount, uxInitialCount, pxStaticQueue );
869 
870         if( ( uxMaxCount != 0U ) &&
871             ( uxInitialCount <= uxMaxCount ) )
872         {
873             xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
874 
875             if( xHandle != NULL )
876             {
877                 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
878 
879                 traceCREATE_COUNTING_SEMAPHORE();
880             }
881             else
882             {
883                 traceCREATE_COUNTING_SEMAPHORE_FAILED();
884             }
885         }
886         else
887         {
888             configASSERT( xHandle );
889             mtCOVERAGE_TEST_MARKER();
890         }
891 
892         traceRETURN_xQueueCreateCountingSemaphoreStatic( xHandle );
893 
894         return xHandle;
895     }
896 
897 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
898 /*-----------------------------------------------------------*/
899 
900 #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
901 
xQueueCreateCountingSemaphore(const UBaseType_t uxMaxCount,const UBaseType_t uxInitialCount)902     QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount,
903                                                  const UBaseType_t uxInitialCount )
904     {
905         QueueHandle_t xHandle = NULL;
906 
907         traceENTER_xQueueCreateCountingSemaphore( uxMaxCount, uxInitialCount );
908 
909         if( ( uxMaxCount != 0U ) &&
910             ( uxInitialCount <= uxMaxCount ) )
911         {
912             xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
913 
914             if( xHandle != NULL )
915             {
916                 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
917 
918                 traceCREATE_COUNTING_SEMAPHORE();
919             }
920             else
921             {
922                 traceCREATE_COUNTING_SEMAPHORE_FAILED();
923             }
924         }
925         else
926         {
927             configASSERT( xHandle );
928             mtCOVERAGE_TEST_MARKER();
929         }
930 
931         traceRETURN_xQueueCreateCountingSemaphore( xHandle );
932 
933         return xHandle;
934     }
935 
936 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
937 /*-----------------------------------------------------------*/
938 
xQueueGenericSend(QueueHandle_t xQueue,const void * const pvItemToQueue,TickType_t xTicksToWait,const BaseType_t xCopyPosition)939 BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
940                               const void * const pvItemToQueue,
941                               TickType_t xTicksToWait,
942                               const BaseType_t xCopyPosition )
943 {
944     BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
945     TimeOut_t xTimeOut;
946     Queue_t * const pxQueue = xQueue;
947 
948     traceENTER_xQueueGenericSend( xQueue, pvItemToQueue, xTicksToWait, xCopyPosition );
949 
950     configASSERT( pxQueue );
951     configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
952     configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
953     #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
954     {
955         configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
956     }
957     #endif
958 
959     for( ; ; )
960     {
961         taskENTER_CRITICAL();
962         {
963             /* Is there room on the queue now?  The running task must be the
964              * highest priority task wanting to access the queue.  If the head item
965              * in the queue is to be overwritten then it does not matter if the
966              * queue is full. */
967             if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
968             {
969                 traceQUEUE_SEND( pxQueue );
970 
971                 #if ( configUSE_QUEUE_SETS == 1 )
972                 {
973                     const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
974 
975                     xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
976 
977                     if( pxQueue->pxQueueSetContainer != NULL )
978                     {
979                         if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
980                         {
981                             /* Do not notify the queue set as an existing item
982                              * was overwritten in the queue so the number of items
983                              * in the queue has not changed. */
984                             mtCOVERAGE_TEST_MARKER();
985                         }
986                         else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
987                         {
988                             /* The queue is a member of a queue set, and posting
989                              * to the queue set caused a higher priority task to
990                              * unblock. A context switch is required. */
991                             queueYIELD_IF_USING_PREEMPTION();
992                         }
993                         else
994                         {
995                             mtCOVERAGE_TEST_MARKER();
996                         }
997                     }
998                     else
999                     {
1000                         /* If there was a task waiting for data to arrive on the
1001                          * queue then unblock it now. */
1002                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1003                         {
1004                             if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1005                             {
1006                                 /* The unblocked task has a priority higher than
1007                                  * our own so yield immediately.  Yes it is ok to
1008                                  * do this from within the critical section - the
1009                                  * kernel takes care of that. */
1010                                 queueYIELD_IF_USING_PREEMPTION();
1011                             }
1012                             else
1013                             {
1014                                 mtCOVERAGE_TEST_MARKER();
1015                             }
1016                         }
1017                         else if( xYieldRequired != pdFALSE )
1018                         {
1019                             /* This path is a special case that will only get
1020                              * executed if the task was holding multiple mutexes
1021                              * and the mutexes were given back in an order that is
1022                              * different to that in which they were taken. */
1023                             queueYIELD_IF_USING_PREEMPTION();
1024                         }
1025                         else
1026                         {
1027                             mtCOVERAGE_TEST_MARKER();
1028                         }
1029                     }
1030                 }
1031                 #else /* configUSE_QUEUE_SETS */
1032                 {
1033                     xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
1034 
1035                     /* If there was a task waiting for data to arrive on the
1036                      * queue then unblock it now. */
1037                     if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1038                     {
1039                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1040                         {
1041                             /* The unblocked task has a priority higher than
1042                              * our own so yield immediately.  Yes it is ok to do
1043                              * this from within the critical section - the kernel
1044                              * takes care of that. */
1045                             queueYIELD_IF_USING_PREEMPTION();
1046                         }
1047                         else
1048                         {
1049                             mtCOVERAGE_TEST_MARKER();
1050                         }
1051                     }
1052                     else if( xYieldRequired != pdFALSE )
1053                     {
1054                         /* This path is a special case that will only get
1055                          * executed if the task was holding multiple mutexes and
1056                          * the mutexes were given back in an order that is
1057                          * different to that in which they were taken. */
1058                         queueYIELD_IF_USING_PREEMPTION();
1059                     }
1060                     else
1061                     {
1062                         mtCOVERAGE_TEST_MARKER();
1063                     }
1064                 }
1065                 #endif /* configUSE_QUEUE_SETS */
1066 
1067                 taskEXIT_CRITICAL();
1068 
1069                 traceRETURN_xQueueGenericSend( pdPASS );
1070 
1071                 return pdPASS;
1072             }
1073             else
1074             {
1075                 if( xTicksToWait == ( TickType_t ) 0 )
1076                 {
1077                     /* The queue was full and no block time is specified (or
1078                      * the block time has expired) so leave now. */
1079                     taskEXIT_CRITICAL();
1080 
1081                     /* Return to the original privilege level before exiting
1082                      * the function. */
1083                     traceQUEUE_SEND_FAILED( pxQueue );
1084                     traceRETURN_xQueueGenericSend( errQUEUE_FULL );
1085 
1086                     return errQUEUE_FULL;
1087                 }
1088                 else if( xEntryTimeSet == pdFALSE )
1089                 {
1090                     /* The queue was full and a block time was specified so
1091                      * configure the timeout structure. */
1092                     vTaskInternalSetTimeOutState( &xTimeOut );
1093                     xEntryTimeSet = pdTRUE;
1094                 }
1095                 else
1096                 {
1097                     /* Entry time was already set. */
1098                     mtCOVERAGE_TEST_MARKER();
1099                 }
1100             }
1101         }
1102         taskEXIT_CRITICAL();
1103 
1104         /* Interrupts and other tasks can send to and receive from the queue
1105          * now the critical section has been exited. */
1106 
1107         vTaskSuspendAll();
1108         prvLockQueue( pxQueue );
1109 
1110         /* Update the timeout state to see if it has expired yet. */
1111         if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1112         {
1113             if( prvIsQueueFull( pxQueue ) != pdFALSE )
1114             {
1115                 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
1116                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
1117 
1118                 /* Unlocking the queue means queue events can effect the
1119                  * event list. It is possible that interrupts occurring now
1120                  * remove this task from the event list again - but as the
1121                  * scheduler is suspended the task will go onto the pending
1122                  * ready list instead of the actual ready list. */
1123                 prvUnlockQueue( pxQueue );
1124 
1125                 /* Resuming the scheduler will move tasks from the pending
1126                  * ready list into the ready list - so it is feasible that this
1127                  * task is already in the ready list before it yields - in which
1128                  * case the yield will not cause a context switch unless there
1129                  * is also a higher priority task in the pending ready list. */
1130                 if( xTaskResumeAll() == pdFALSE )
1131                 {
1132                     taskYIELD_WITHIN_API();
1133                 }
1134             }
1135             else
1136             {
1137                 /* Try again. */
1138                 prvUnlockQueue( pxQueue );
1139                 ( void ) xTaskResumeAll();
1140             }
1141         }
1142         else
1143         {
1144             /* The timeout has expired. */
1145             prvUnlockQueue( pxQueue );
1146             ( void ) xTaskResumeAll();
1147 
1148             traceQUEUE_SEND_FAILED( pxQueue );
1149             traceRETURN_xQueueGenericSend( errQUEUE_FULL );
1150 
1151             return errQUEUE_FULL;
1152         }
1153     }
1154 }
1155 /*-----------------------------------------------------------*/
1156 
xQueueGenericSendFromISR(QueueHandle_t xQueue,const void * const pvItemToQueue,BaseType_t * const pxHigherPriorityTaskWoken,const BaseType_t xCopyPosition)1157 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
1158                                      const void * const pvItemToQueue,
1159                                      BaseType_t * const pxHigherPriorityTaskWoken,
1160                                      const BaseType_t xCopyPosition )
1161 {
1162     BaseType_t xReturn;
1163     UBaseType_t uxSavedInterruptStatus;
1164     Queue_t * const pxQueue = xQueue;
1165 
1166     traceENTER_xQueueGenericSendFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken, xCopyPosition );
1167 
1168     configASSERT( pxQueue );
1169     configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1170     configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
1171 
1172     /* RTOS ports that support interrupt nesting have the concept of a maximum
1173      * system call (or maximum API call) interrupt priority.  Interrupts that are
1174      * above the maximum system call priority are kept permanently enabled, even
1175      * when the RTOS kernel is in a critical section, but cannot make any calls to
1176      * FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
1177      * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1178      * failure if a FreeRTOS API function is called from an interrupt that has been
1179      * assigned a priority above the configured maximum system call priority.
1180      * Only FreeRTOS functions that end in FromISR can be called from interrupts
1181      * that have been assigned a priority at or (logically) below the maximum
1182      * system call interrupt priority.  FreeRTOS maintains a separate interrupt
1183      * safe API to ensure interrupt entry is as fast and as simple as possible.
1184      * More information (albeit Cortex-M specific) is provided on the following
1185      * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
1186     portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1187 
1188     /* Similar to xQueueGenericSend, except without blocking if there is no room
1189      * in the queue.  Also don't directly wake a task that was blocked on a queue
1190      * read, instead return a flag to say whether a context switch is required or
1191      * not (i.e. has a task with a higher priority than us been woken by this
1192      * post). */
1193     /* MISRA Ref 4.7.1 [Return value shall be checked] */
1194     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
1195     /* coverity[misra_c_2012_directive_4_7_violation] */
1196     uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
1197     {
1198         if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
1199         {
1200             const int8_t cTxLock = pxQueue->cTxLock;
1201             const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
1202 
1203             traceQUEUE_SEND_FROM_ISR( pxQueue );
1204 
1205             /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
1206              *  semaphore or mutex.  That means prvCopyDataToQueue() cannot result
1207              *  in a task disinheriting a priority and prvCopyDataToQueue() can be
1208              *  called here even though the disinherit function does not check if
1209              *  the scheduler is suspended before accessing the ready lists. */
1210             ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
1211 
1212             /* The event list is not altered if the queue is locked.  This will
1213              * be done when the queue is unlocked later. */
1214             if( cTxLock == queueUNLOCKED )
1215             {
1216                 #if ( configUSE_QUEUE_SETS == 1 )
1217                 {
1218                     if( pxQueue->pxQueueSetContainer != NULL )
1219                     {
1220                         if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
1221                         {
1222                             /* Do not notify the queue set as an existing item
1223                              * was overwritten in the queue so the number of items
1224                              * in the queue has not changed. */
1225                             mtCOVERAGE_TEST_MARKER();
1226                         }
1227                         else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
1228                         {
1229                             /* The queue is a member of a queue set, and posting
1230                              * to the queue set caused a higher priority task to
1231                              * unblock.  A context switch is required. */
1232                             if( pxHigherPriorityTaskWoken != NULL )
1233                             {
1234                                 *pxHigherPriorityTaskWoken = pdTRUE;
1235                             }
1236                             else
1237                             {
1238                                 mtCOVERAGE_TEST_MARKER();
1239                             }
1240                         }
1241                         else
1242                         {
1243                             mtCOVERAGE_TEST_MARKER();
1244                         }
1245                     }
1246                     else
1247                     {
1248                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1249                         {
1250                             if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1251                             {
1252                                 /* The task waiting has a higher priority so
1253                                  *  record that a context switch is required. */
1254                                 if( pxHigherPriorityTaskWoken != NULL )
1255                                 {
1256                                     *pxHigherPriorityTaskWoken = pdTRUE;
1257                                 }
1258                                 else
1259                                 {
1260                                     mtCOVERAGE_TEST_MARKER();
1261                                 }
1262                             }
1263                             else
1264                             {
1265                                 mtCOVERAGE_TEST_MARKER();
1266                             }
1267                         }
1268                         else
1269                         {
1270                             mtCOVERAGE_TEST_MARKER();
1271                         }
1272                     }
1273                 }
1274                 #else /* configUSE_QUEUE_SETS */
1275                 {
1276                     if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1277                     {
1278                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1279                         {
1280                             /* The task waiting has a higher priority so record that a
1281                              * context switch is required. */
1282                             if( pxHigherPriorityTaskWoken != NULL )
1283                             {
1284                                 *pxHigherPriorityTaskWoken = pdTRUE;
1285                             }
1286                             else
1287                             {
1288                                 mtCOVERAGE_TEST_MARKER();
1289                             }
1290                         }
1291                         else
1292                         {
1293                             mtCOVERAGE_TEST_MARKER();
1294                         }
1295                     }
1296                     else
1297                     {
1298                         mtCOVERAGE_TEST_MARKER();
1299                     }
1300 
1301                     /* Not used in this path. */
1302                     ( void ) uxPreviousMessagesWaiting;
1303                 }
1304                 #endif /* configUSE_QUEUE_SETS */
1305             }
1306             else
1307             {
1308                 /* Increment the lock count so the task that unlocks the queue
1309                  * knows that data was posted while it was locked. */
1310                 prvIncrementQueueTxLock( pxQueue, cTxLock );
1311             }
1312 
1313             xReturn = pdPASS;
1314         }
1315         else
1316         {
1317             traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
1318             xReturn = errQUEUE_FULL;
1319         }
1320     }
1321     taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
1322 
1323     traceRETURN_xQueueGenericSendFromISR( xReturn );
1324 
1325     return xReturn;
1326 }
1327 /*-----------------------------------------------------------*/
1328 
xQueueGiveFromISR(QueueHandle_t xQueue,BaseType_t * const pxHigherPriorityTaskWoken)1329 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
1330                               BaseType_t * const pxHigherPriorityTaskWoken )
1331 {
1332     BaseType_t xReturn;
1333     UBaseType_t uxSavedInterruptStatus;
1334     Queue_t * const pxQueue = xQueue;
1335 
1336     traceENTER_xQueueGiveFromISR( xQueue, pxHigherPriorityTaskWoken );
1337 
1338     /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
1339      * item size is 0.  Don't directly wake a task that was blocked on a queue
1340      * read, instead return a flag to say whether a context switch is required or
1341      * not (i.e. has a task with a higher priority than us been woken by this
1342      * post). */
1343 
1344     configASSERT( pxQueue );
1345 
1346     /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
1347      * if the item size is not 0. */
1348     configASSERT( pxQueue->uxItemSize == 0 );
1349 
1350     /* Normally a mutex would not be given from an interrupt, especially if
1351      * there is a mutex holder, as priority inheritance makes no sense for an
1352      * interrupts, only tasks. */
1353     configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->u.xSemaphore.xMutexHolder != NULL ) ) );
1354 
1355     /* RTOS ports that support interrupt nesting have the concept of a maximum
1356      * system call (or maximum API call) interrupt priority.  Interrupts that are
1357      * above the maximum system call priority are kept permanently enabled, even
1358      * when the RTOS kernel is in a critical section, but cannot make any calls to
1359      * FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
1360      * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1361      * failure if a FreeRTOS API function is called from an interrupt that has been
1362      * assigned a priority above the configured maximum system call priority.
1363      * Only FreeRTOS functions that end in FromISR can be called from interrupts
1364      * that have been assigned a priority at or (logically) below the maximum
1365      * system call interrupt priority.  FreeRTOS maintains a separate interrupt
1366      * safe API to ensure interrupt entry is as fast and as simple as possible.
1367      * More information (albeit Cortex-M specific) is provided on the following
1368      * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
1369     portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1370 
1371     /* MISRA Ref 4.7.1 [Return value shall be checked] */
1372     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
1373     /* coverity[misra_c_2012_directive_4_7_violation] */
1374     uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
1375     {
1376         const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1377 
1378         /* When the queue is used to implement a semaphore no data is ever
1379          * moved through the queue but it is still valid to see if the queue 'has
1380          * space'. */
1381         if( uxMessagesWaiting < pxQueue->uxLength )
1382         {
1383             const int8_t cTxLock = pxQueue->cTxLock;
1384 
1385             traceQUEUE_SEND_FROM_ISR( pxQueue );
1386 
1387             /* A task can only have an inherited priority if it is a mutex
1388              * holder - and if there is a mutex holder then the mutex cannot be
1389              * given from an ISR.  As this is the ISR version of the function it
1390              * can be assumed there is no mutex holder and no need to determine if
1391              * priority disinheritance is needed.  Simply increase the count of
1392              * messages (semaphores) available. */
1393             pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting + ( UBaseType_t ) 1 );
1394 
1395             /* The event list is not altered if the queue is locked.  This will
1396              * be done when the queue is unlocked later. */
1397             if( cTxLock == queueUNLOCKED )
1398             {
1399                 #if ( configUSE_QUEUE_SETS == 1 )
1400                 {
1401                     if( pxQueue->pxQueueSetContainer != NULL )
1402                     {
1403                         if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
1404                         {
1405                             /* The semaphore is a member of a queue set, and
1406                              * posting to the queue set caused a higher priority
1407                              * task to unblock.  A context switch is required. */
1408                             if( pxHigherPriorityTaskWoken != NULL )
1409                             {
1410                                 *pxHigherPriorityTaskWoken = pdTRUE;
1411                             }
1412                             else
1413                             {
1414                                 mtCOVERAGE_TEST_MARKER();
1415                             }
1416                         }
1417                         else
1418                         {
1419                             mtCOVERAGE_TEST_MARKER();
1420                         }
1421                     }
1422                     else
1423                     {
1424                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1425                         {
1426                             if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1427                             {
1428                                 /* The task waiting has a higher priority so
1429                                  *  record that a context switch is required. */
1430                                 if( pxHigherPriorityTaskWoken != NULL )
1431                                 {
1432                                     *pxHigherPriorityTaskWoken = pdTRUE;
1433                                 }
1434                                 else
1435                                 {
1436                                     mtCOVERAGE_TEST_MARKER();
1437                                 }
1438                             }
1439                             else
1440                             {
1441                                 mtCOVERAGE_TEST_MARKER();
1442                             }
1443                         }
1444                         else
1445                         {
1446                             mtCOVERAGE_TEST_MARKER();
1447                         }
1448                     }
1449                 }
1450                 #else /* configUSE_QUEUE_SETS */
1451                 {
1452                     if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1453                     {
1454                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1455                         {
1456                             /* The task waiting has a higher priority so record that a
1457                              * context switch is required. */
1458                             if( pxHigherPriorityTaskWoken != NULL )
1459                             {
1460                                 *pxHigherPriorityTaskWoken = pdTRUE;
1461                             }
1462                             else
1463                             {
1464                                 mtCOVERAGE_TEST_MARKER();
1465                             }
1466                         }
1467                         else
1468                         {
1469                             mtCOVERAGE_TEST_MARKER();
1470                         }
1471                     }
1472                     else
1473                     {
1474                         mtCOVERAGE_TEST_MARKER();
1475                     }
1476                 }
1477                 #endif /* configUSE_QUEUE_SETS */
1478             }
1479             else
1480             {
1481                 /* Increment the lock count so the task that unlocks the queue
1482                  * knows that data was posted while it was locked. */
1483                 prvIncrementQueueTxLock( pxQueue, cTxLock );
1484             }
1485 
1486             xReturn = pdPASS;
1487         }
1488         else
1489         {
1490             traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
1491             xReturn = errQUEUE_FULL;
1492         }
1493     }
1494     taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
1495 
1496     traceRETURN_xQueueGiveFromISR( xReturn );
1497 
1498     return xReturn;
1499 }
1500 /*-----------------------------------------------------------*/
1501 
xQueueReceive(QueueHandle_t xQueue,void * const pvBuffer,TickType_t xTicksToWait)1502 BaseType_t xQueueReceive( QueueHandle_t xQueue,
1503                           void * const pvBuffer,
1504                           TickType_t xTicksToWait )
1505 {
1506     BaseType_t xEntryTimeSet = pdFALSE;
1507     TimeOut_t xTimeOut;
1508     Queue_t * const pxQueue = xQueue;
1509 
1510     traceENTER_xQueueReceive( xQueue, pvBuffer, xTicksToWait );
1511 
1512     /* Check the pointer is not NULL. */
1513     configASSERT( ( pxQueue ) );
1514 
1515     /* The buffer into which data is received can only be NULL if the data size
1516      * is zero (so no data is copied into the buffer). */
1517     configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
1518 
1519     /* Cannot block if the scheduler is suspended. */
1520     #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1521     {
1522         configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1523     }
1524     #endif
1525 
1526     for( ; ; )
1527     {
1528         taskENTER_CRITICAL();
1529         {
1530             const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1531 
1532             /* Is there data in the queue now?  To be running the calling task
1533              * must be the highest priority task wanting to access the queue. */
1534             if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1535             {
1536                 /* Data available, remove one item. */
1537                 prvCopyDataFromQueue( pxQueue, pvBuffer );
1538                 traceQUEUE_RECEIVE( pxQueue );
1539                 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting - ( UBaseType_t ) 1 );
1540 
1541                 /* There is now space in the queue, were any tasks waiting to
1542                  * post to the queue?  If so, unblock the highest priority waiting
1543                  * task. */
1544                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1545                 {
1546                     if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1547                     {
1548                         queueYIELD_IF_USING_PREEMPTION();
1549                     }
1550                     else
1551                     {
1552                         mtCOVERAGE_TEST_MARKER();
1553                     }
1554                 }
1555                 else
1556                 {
1557                     mtCOVERAGE_TEST_MARKER();
1558                 }
1559 
1560                 taskEXIT_CRITICAL();
1561 
1562                 traceRETURN_xQueueReceive( pdPASS );
1563 
1564                 return pdPASS;
1565             }
1566             else
1567             {
1568                 if( xTicksToWait == ( TickType_t ) 0 )
1569                 {
1570                     /* The queue was empty and no block time is specified (or
1571                      * the block time has expired) so leave now. */
1572                     taskEXIT_CRITICAL();
1573 
1574                     traceQUEUE_RECEIVE_FAILED( pxQueue );
1575                     traceRETURN_xQueueReceive( errQUEUE_EMPTY );
1576 
1577                     return errQUEUE_EMPTY;
1578                 }
1579                 else if( xEntryTimeSet == pdFALSE )
1580                 {
1581                     /* The queue was empty and a block time was specified so
1582                      * configure the timeout structure. */
1583                     vTaskInternalSetTimeOutState( &xTimeOut );
1584                     xEntryTimeSet = pdTRUE;
1585                 }
1586                 else
1587                 {
1588                     /* Entry time was already set. */
1589                     mtCOVERAGE_TEST_MARKER();
1590                 }
1591             }
1592         }
1593         taskEXIT_CRITICAL();
1594 
1595         /* Interrupts and other tasks can send to and receive from the queue
1596          * now the critical section has been exited. */
1597 
1598         vTaskSuspendAll();
1599         prvLockQueue( pxQueue );
1600 
1601         /* Update the timeout state to see if it has expired yet. */
1602         if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1603         {
1604             /* The timeout has not expired.  If the queue is still empty place
1605              * the task on the list of tasks waiting to receive from the queue. */
1606             if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1607             {
1608                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1609                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1610                 prvUnlockQueue( pxQueue );
1611 
1612                 if( xTaskResumeAll() == pdFALSE )
1613                 {
1614                     taskYIELD_WITHIN_API();
1615                 }
1616                 else
1617                 {
1618                     mtCOVERAGE_TEST_MARKER();
1619                 }
1620             }
1621             else
1622             {
1623                 /* The queue contains data again.  Loop back to try and read the
1624                  * data. */
1625                 prvUnlockQueue( pxQueue );
1626                 ( void ) xTaskResumeAll();
1627             }
1628         }
1629         else
1630         {
1631             /* Timed out.  If there is no data in the queue exit, otherwise loop
1632              * back and attempt to read the data. */
1633             prvUnlockQueue( pxQueue );
1634             ( void ) xTaskResumeAll();
1635 
1636             if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1637             {
1638                 traceQUEUE_RECEIVE_FAILED( pxQueue );
1639                 traceRETURN_xQueueReceive( errQUEUE_EMPTY );
1640 
1641                 return errQUEUE_EMPTY;
1642             }
1643             else
1644             {
1645                 mtCOVERAGE_TEST_MARKER();
1646             }
1647         }
1648     }
1649 }
1650 /*-----------------------------------------------------------*/
1651 
xQueueSemaphoreTake(QueueHandle_t xQueue,TickType_t xTicksToWait)1652 BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
1653                                 TickType_t xTicksToWait )
1654 {
1655     BaseType_t xEntryTimeSet = pdFALSE;
1656     TimeOut_t xTimeOut;
1657     Queue_t * const pxQueue = xQueue;
1658 
1659     #if ( configUSE_MUTEXES == 1 )
1660         BaseType_t xInheritanceOccurred = pdFALSE;
1661     #endif
1662 
1663     traceENTER_xQueueSemaphoreTake( xQueue, xTicksToWait );
1664 
1665     /* Check the queue pointer is not NULL. */
1666     configASSERT( ( pxQueue ) );
1667 
1668     /* Check this really is a semaphore, in which case the item size will be
1669      * 0. */
1670     configASSERT( pxQueue->uxItemSize == 0 );
1671 
1672     /* Cannot block if the scheduler is suspended. */
1673     #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1674     {
1675         configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1676     }
1677     #endif
1678 
1679     for( ; ; )
1680     {
1681         taskENTER_CRITICAL();
1682         {
1683             /* Semaphores are queues with an item size of 0, and where the
1684              * number of messages in the queue is the semaphore's count value. */
1685             const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting;
1686 
1687             /* Is there data in the queue now?  To be running the calling task
1688              * must be the highest priority task wanting to access the queue. */
1689             if( uxSemaphoreCount > ( UBaseType_t ) 0 )
1690             {
1691                 traceQUEUE_RECEIVE( pxQueue );
1692 
1693                 /* Semaphores are queues with a data size of zero and where the
1694                  * messages waiting is the semaphore's count.  Reduce the count. */
1695                 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxSemaphoreCount - ( UBaseType_t ) 1 );
1696 
1697                 #if ( configUSE_MUTEXES == 1 )
1698                 {
1699                     if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1700                     {
1701                         /* Record the information required to implement
1702                          * priority inheritance should it become necessary. */
1703                         pxQueue->u.xSemaphore.xMutexHolder = pvTaskIncrementMutexHeldCount();
1704                     }
1705                     else
1706                     {
1707                         mtCOVERAGE_TEST_MARKER();
1708                     }
1709                 }
1710                 #endif /* configUSE_MUTEXES */
1711 
1712                 /* Check to see if other tasks are blocked waiting to give the
1713                  * semaphore, and if so, unblock the highest priority such task. */
1714                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1715                 {
1716                     if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1717                     {
1718                         queueYIELD_IF_USING_PREEMPTION();
1719                     }
1720                     else
1721                     {
1722                         mtCOVERAGE_TEST_MARKER();
1723                     }
1724                 }
1725                 else
1726                 {
1727                     mtCOVERAGE_TEST_MARKER();
1728                 }
1729 
1730                 taskEXIT_CRITICAL();
1731 
1732                 traceRETURN_xQueueSemaphoreTake( pdPASS );
1733 
1734                 return pdPASS;
1735             }
1736             else
1737             {
1738                 if( xTicksToWait == ( TickType_t ) 0 )
1739                 {
1740                     /* The semaphore count was 0 and no block time is specified
1741                      * (or the block time has expired) so exit now. */
1742                     taskEXIT_CRITICAL();
1743 
1744                     traceQUEUE_RECEIVE_FAILED( pxQueue );
1745                     traceRETURN_xQueueSemaphoreTake( errQUEUE_EMPTY );
1746 
1747                     return errQUEUE_EMPTY;
1748                 }
1749                 else if( xEntryTimeSet == pdFALSE )
1750                 {
1751                     /* The semaphore count was 0 and a block time was specified
1752                      * so configure the timeout structure ready to block. */
1753                     vTaskInternalSetTimeOutState( &xTimeOut );
1754                     xEntryTimeSet = pdTRUE;
1755                 }
1756                 else
1757                 {
1758                     /* Entry time was already set. */
1759                     mtCOVERAGE_TEST_MARKER();
1760                 }
1761             }
1762         }
1763         taskEXIT_CRITICAL();
1764 
1765         /* Interrupts and other tasks can give to and take from the semaphore
1766          * now the critical section has been exited. */
1767 
1768         vTaskSuspendAll();
1769         prvLockQueue( pxQueue );
1770 
1771         /* Update the timeout state to see if it has expired yet. */
1772         if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1773         {
1774             /* A block time is specified and not expired.  If the semaphore
1775              * count is 0 then enter the Blocked state to wait for a semaphore to
1776              * become available.  As semaphores are implemented with queues the
1777              * queue being empty is equivalent to the semaphore count being 0. */
1778             if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1779             {
1780                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1781 
1782                 #if ( configUSE_MUTEXES == 1 )
1783                 {
1784                     if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1785                     {
1786                         taskENTER_CRITICAL();
1787                         {
1788                             xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder );
1789                         }
1790                         taskEXIT_CRITICAL();
1791                     }
1792                     else
1793                     {
1794                         mtCOVERAGE_TEST_MARKER();
1795                     }
1796                 }
1797                 #endif /* if ( configUSE_MUTEXES == 1 ) */
1798 
1799                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1800                 prvUnlockQueue( pxQueue );
1801 
1802                 if( xTaskResumeAll() == pdFALSE )
1803                 {
1804                     taskYIELD_WITHIN_API();
1805                 }
1806                 else
1807                 {
1808                     mtCOVERAGE_TEST_MARKER();
1809                 }
1810             }
1811             else
1812             {
1813                 /* There was no timeout and the semaphore count was not 0, so
1814                  * attempt to take the semaphore again. */
1815                 prvUnlockQueue( pxQueue );
1816                 ( void ) xTaskResumeAll();
1817             }
1818         }
1819         else
1820         {
1821             /* Timed out. */
1822             prvUnlockQueue( pxQueue );
1823             ( void ) xTaskResumeAll();
1824 
1825             /* If the semaphore count is 0 exit now as the timeout has
1826              * expired.  Otherwise return to attempt to take the semaphore that is
1827              * known to be available.  As semaphores are implemented by queues the
1828              * queue being empty is equivalent to the semaphore count being 0. */
1829             if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1830             {
1831                 #if ( configUSE_MUTEXES == 1 )
1832                 {
1833                     /* xInheritanceOccurred could only have be set if
1834                      * pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to
1835                      * test the mutex type again to check it is actually a mutex. */
1836                     if( xInheritanceOccurred != pdFALSE )
1837                     {
1838                         taskENTER_CRITICAL();
1839                         {
1840                             UBaseType_t uxHighestWaitingPriority;
1841 
1842                             /* This task blocking on the mutex caused another
1843                              * task to inherit this task's priority.  Now this task
1844                              * has timed out the priority should be disinherited
1845                              * again, but only as low as the next highest priority
1846                              * task that is waiting for the same mutex. */
1847                             uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );
1848 
1849                             /* vTaskPriorityDisinheritAfterTimeout uses the uxHighestWaitingPriority
1850                              * parameter to index pxReadyTasksLists when adding the task holding
1851                              * mutex to the ready list for its new priority. Coverity thinks that
1852                              * it can result in out-of-bounds access which is not true because
1853                              * uxHighestWaitingPriority, as returned by prvGetDisinheritPriorityAfterTimeout,
1854                              * is capped at ( configMAX_PRIORITIES - 1 ). */
1855                             /* coverity[overrun] */
1856                             vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority );
1857                         }
1858                         taskEXIT_CRITICAL();
1859                     }
1860                 }
1861                 #endif /* configUSE_MUTEXES */
1862 
1863                 traceQUEUE_RECEIVE_FAILED( pxQueue );
1864                 traceRETURN_xQueueSemaphoreTake( errQUEUE_EMPTY );
1865 
1866                 return errQUEUE_EMPTY;
1867             }
1868             else
1869             {
1870                 mtCOVERAGE_TEST_MARKER();
1871             }
1872         }
1873     }
1874 }
1875 /*-----------------------------------------------------------*/
1876 
xQueuePeek(QueueHandle_t xQueue,void * const pvBuffer,TickType_t xTicksToWait)1877 BaseType_t xQueuePeek( QueueHandle_t xQueue,
1878                        void * const pvBuffer,
1879                        TickType_t xTicksToWait )
1880 {
1881     BaseType_t xEntryTimeSet = pdFALSE;
1882     TimeOut_t xTimeOut;
1883     int8_t * pcOriginalReadPosition;
1884     Queue_t * const pxQueue = xQueue;
1885 
1886     traceENTER_xQueuePeek( xQueue, pvBuffer, xTicksToWait );
1887 
1888     /* Check the pointer is not NULL. */
1889     configASSERT( ( pxQueue ) );
1890 
1891     /* The buffer into which data is received can only be NULL if the data size
1892      * is zero (so no data is copied into the buffer. */
1893     configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
1894 
1895     /* Cannot block if the scheduler is suspended. */
1896     #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1897     {
1898         configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1899     }
1900     #endif
1901 
1902     for( ; ; )
1903     {
1904         taskENTER_CRITICAL();
1905         {
1906             const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1907 
1908             /* Is there data in the queue now?  To be running the calling task
1909              * must be the highest priority task wanting to access the queue. */
1910             if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1911             {
1912                 /* Remember the read position so it can be reset after the data
1913                  * is read from the queue as this function is only peeking the
1914                  * data, not removing it. */
1915                 pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
1916 
1917                 prvCopyDataFromQueue( pxQueue, pvBuffer );
1918                 traceQUEUE_PEEK( pxQueue );
1919 
1920                 /* The data is not being removed, so reset the read pointer. */
1921                 pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
1922 
1923                 /* The data is being left in the queue, so see if there are
1924                  * any other tasks waiting for the data. */
1925                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1926                 {
1927                     if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1928                     {
1929                         /* The task waiting has a higher priority than this task. */
1930                         queueYIELD_IF_USING_PREEMPTION();
1931                     }
1932                     else
1933                     {
1934                         mtCOVERAGE_TEST_MARKER();
1935                     }
1936                 }
1937                 else
1938                 {
1939                     mtCOVERAGE_TEST_MARKER();
1940                 }
1941 
1942                 taskEXIT_CRITICAL();
1943 
1944                 traceRETURN_xQueuePeek( pdPASS );
1945 
1946                 return pdPASS;
1947             }
1948             else
1949             {
1950                 if( xTicksToWait == ( TickType_t ) 0 )
1951                 {
1952                     /* The queue was empty and no block time is specified (or
1953                      * the block time has expired) so leave now. */
1954                     taskEXIT_CRITICAL();
1955 
1956                     traceQUEUE_PEEK_FAILED( pxQueue );
1957                     traceRETURN_xQueuePeek( errQUEUE_EMPTY );
1958 
1959                     return errQUEUE_EMPTY;
1960                 }
1961                 else if( xEntryTimeSet == pdFALSE )
1962                 {
1963                     /* The queue was empty and a block time was specified so
1964                      * configure the timeout structure ready to enter the blocked
1965                      * state. */
1966                     vTaskInternalSetTimeOutState( &xTimeOut );
1967                     xEntryTimeSet = pdTRUE;
1968                 }
1969                 else
1970                 {
1971                     /* Entry time was already set. */
1972                     mtCOVERAGE_TEST_MARKER();
1973                 }
1974             }
1975         }
1976         taskEXIT_CRITICAL();
1977 
1978         /* Interrupts and other tasks can send to and receive from the queue
1979          * now that the critical section has been exited. */
1980 
1981         vTaskSuspendAll();
1982         prvLockQueue( pxQueue );
1983 
1984         /* Update the timeout state to see if it has expired yet. */
1985         if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1986         {
1987             /* Timeout has not expired yet, check to see if there is data in the
1988             * queue now, and if not enter the Blocked state to wait for data. */
1989             if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1990             {
1991                 traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
1992                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1993                 prvUnlockQueue( pxQueue );
1994 
1995                 if( xTaskResumeAll() == pdFALSE )
1996                 {
1997                     taskYIELD_WITHIN_API();
1998                 }
1999                 else
2000                 {
2001                     mtCOVERAGE_TEST_MARKER();
2002                 }
2003             }
2004             else
2005             {
2006                 /* There is data in the queue now, so don't enter the blocked
2007                  * state, instead return to try and obtain the data. */
2008                 prvUnlockQueue( pxQueue );
2009                 ( void ) xTaskResumeAll();
2010             }
2011         }
2012         else
2013         {
2014             /* The timeout has expired.  If there is still no data in the queue
2015              * exit, otherwise go back and try to read the data again. */
2016             prvUnlockQueue( pxQueue );
2017             ( void ) xTaskResumeAll();
2018 
2019             if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
2020             {
2021                 traceQUEUE_PEEK_FAILED( pxQueue );
2022                 traceRETURN_xQueuePeek( errQUEUE_EMPTY );
2023 
2024                 return errQUEUE_EMPTY;
2025             }
2026             else
2027             {
2028                 mtCOVERAGE_TEST_MARKER();
2029             }
2030         }
2031     }
2032 }
2033 /*-----------------------------------------------------------*/
2034 
xQueueReceiveFromISR(QueueHandle_t xQueue,void * const pvBuffer,BaseType_t * const pxHigherPriorityTaskWoken)2035 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
2036                                  void * const pvBuffer,
2037                                  BaseType_t * const pxHigherPriorityTaskWoken )
2038 {
2039     BaseType_t xReturn;
2040     UBaseType_t uxSavedInterruptStatus;
2041     Queue_t * const pxQueue = xQueue;
2042 
2043     traceENTER_xQueueReceiveFromISR( xQueue, pvBuffer, pxHigherPriorityTaskWoken );
2044 
2045     configASSERT( pxQueue );
2046     configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
2047 
2048     /* RTOS ports that support interrupt nesting have the concept of a maximum
2049      * system call (or maximum API call) interrupt priority.  Interrupts that are
2050      * above the maximum system call priority are kept permanently enabled, even
2051      * when the RTOS kernel is in a critical section, but cannot make any calls to
2052      * FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
2053      * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2054      * failure if a FreeRTOS API function is called from an interrupt that has been
2055      * assigned a priority above the configured maximum system call priority.
2056      * Only FreeRTOS functions that end in FromISR can be called from interrupts
2057      * that have been assigned a priority at or (logically) below the maximum
2058      * system call interrupt priority.  FreeRTOS maintains a separate interrupt
2059      * safe API to ensure interrupt entry is as fast and as simple as possible.
2060      * More information (albeit Cortex-M specific) is provided on the following
2061      * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
2062     portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2063 
2064     /* MISRA Ref 4.7.1 [Return value shall be checked] */
2065     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
2066     /* coverity[misra_c_2012_directive_4_7_violation] */
2067     uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
2068     {
2069         const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
2070 
2071         /* Cannot block in an ISR, so check there is data available. */
2072         if( uxMessagesWaiting > ( UBaseType_t ) 0 )
2073         {
2074             const int8_t cRxLock = pxQueue->cRxLock;
2075 
2076             traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
2077 
2078             prvCopyDataFromQueue( pxQueue, pvBuffer );
2079             pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting - ( UBaseType_t ) 1 );
2080 
2081             /* If the queue is locked the event list will not be modified.
2082              * Instead update the lock count so the task that unlocks the queue
2083              * will know that an ISR has removed data while the queue was
2084              * locked. */
2085             if( cRxLock == queueUNLOCKED )
2086             {
2087                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2088                 {
2089                     if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2090                     {
2091                         /* The task waiting has a higher priority than us so
2092                          * force a context switch. */
2093                         if( pxHigherPriorityTaskWoken != NULL )
2094                         {
2095                             *pxHigherPriorityTaskWoken = pdTRUE;
2096                         }
2097                         else
2098                         {
2099                             mtCOVERAGE_TEST_MARKER();
2100                         }
2101                     }
2102                     else
2103                     {
2104                         mtCOVERAGE_TEST_MARKER();
2105                     }
2106                 }
2107                 else
2108                 {
2109                     mtCOVERAGE_TEST_MARKER();
2110                 }
2111             }
2112             else
2113             {
2114                 /* Increment the lock count so the task that unlocks the queue
2115                  * knows that data was removed while it was locked. */
2116                 prvIncrementQueueRxLock( pxQueue, cRxLock );
2117             }
2118 
2119             xReturn = pdPASS;
2120         }
2121         else
2122         {
2123             xReturn = pdFAIL;
2124             traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
2125         }
2126     }
2127     taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
2128 
2129     traceRETURN_xQueueReceiveFromISR( xReturn );
2130 
2131     return xReturn;
2132 }
2133 /*-----------------------------------------------------------*/
2134 
xQueuePeekFromISR(QueueHandle_t xQueue,void * const pvBuffer)2135 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
2136                               void * const pvBuffer )
2137 {
2138     BaseType_t xReturn;
2139     UBaseType_t uxSavedInterruptStatus;
2140     int8_t * pcOriginalReadPosition;
2141     Queue_t * const pxQueue = xQueue;
2142 
2143     traceENTER_xQueuePeekFromISR( xQueue, pvBuffer );
2144 
2145     configASSERT( pxQueue );
2146     configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
2147     configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
2148 
2149     /* RTOS ports that support interrupt nesting have the concept of a maximum
2150      * system call (or maximum API call) interrupt priority.  Interrupts that are
2151      * above the maximum system call priority are kept permanently enabled, even
2152      * when the RTOS kernel is in a critical section, but cannot make any calls to
2153      * FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
2154      * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2155      * failure if a FreeRTOS API function is called from an interrupt that has been
2156      * assigned a priority above the configured maximum system call priority.
2157      * Only FreeRTOS functions that end in FromISR can be called from interrupts
2158      * that have been assigned a priority at or (logically) below the maximum
2159      * system call interrupt priority.  FreeRTOS maintains a separate interrupt
2160      * safe API to ensure interrupt entry is as fast and as simple as possible.
2161      * More information (albeit Cortex-M specific) is provided on the following
2162      * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
2163     portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2164 
2165     /* MISRA Ref 4.7.1 [Return value shall be checked] */
2166     /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#dir-47 */
2167     /* coverity[misra_c_2012_directive_4_7_violation] */
2168     uxSavedInterruptStatus = ( UBaseType_t ) taskENTER_CRITICAL_FROM_ISR();
2169     {
2170         /* Cannot block in an ISR, so check there is data available. */
2171         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2172         {
2173             traceQUEUE_PEEK_FROM_ISR( pxQueue );
2174 
2175             /* Remember the read position so it can be reset as nothing is
2176              * actually being removed from the queue. */
2177             pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
2178             prvCopyDataFromQueue( pxQueue, pvBuffer );
2179             pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
2180 
2181             xReturn = pdPASS;
2182         }
2183         else
2184         {
2185             xReturn = pdFAIL;
2186             traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
2187         }
2188     }
2189     taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
2190 
2191     traceRETURN_xQueuePeekFromISR( xReturn );
2192 
2193     return xReturn;
2194 }
2195 /*-----------------------------------------------------------*/
2196 
uxQueueMessagesWaiting(const QueueHandle_t xQueue)2197 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
2198 {
2199     UBaseType_t uxReturn;
2200 
2201     traceENTER_uxQueueMessagesWaiting( xQueue );
2202 
2203     configASSERT( xQueue );
2204 
2205     taskENTER_CRITICAL();
2206     {
2207         uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
2208     }
2209     taskEXIT_CRITICAL();
2210 
2211     traceRETURN_uxQueueMessagesWaiting( uxReturn );
2212 
2213     return uxReturn;
2214 }
2215 /*-----------------------------------------------------------*/
2216 
uxQueueSpacesAvailable(const QueueHandle_t xQueue)2217 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
2218 {
2219     UBaseType_t uxReturn;
2220     Queue_t * const pxQueue = xQueue;
2221 
2222     traceENTER_uxQueueSpacesAvailable( xQueue );
2223 
2224     configASSERT( pxQueue );
2225 
2226     taskENTER_CRITICAL();
2227     {
2228         uxReturn = ( UBaseType_t ) ( pxQueue->uxLength - pxQueue->uxMessagesWaiting );
2229     }
2230     taskEXIT_CRITICAL();
2231 
2232     traceRETURN_uxQueueSpacesAvailable( uxReturn );
2233 
2234     return uxReturn;
2235 }
2236 /*-----------------------------------------------------------*/
2237 
uxQueueMessagesWaitingFromISR(const QueueHandle_t xQueue)2238 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
2239 {
2240     UBaseType_t uxReturn;
2241     Queue_t * const pxQueue = xQueue;
2242 
2243     traceENTER_uxQueueMessagesWaitingFromISR( xQueue );
2244 
2245     configASSERT( pxQueue );
2246     uxReturn = pxQueue->uxMessagesWaiting;
2247 
2248     traceRETURN_uxQueueMessagesWaitingFromISR( uxReturn );
2249 
2250     return uxReturn;
2251 }
2252 /*-----------------------------------------------------------*/
2253 
vQueueDelete(QueueHandle_t xQueue)2254 void vQueueDelete( QueueHandle_t xQueue )
2255 {
2256     Queue_t * const pxQueue = xQueue;
2257 
2258     traceENTER_vQueueDelete( xQueue );
2259 
2260     configASSERT( pxQueue );
2261     traceQUEUE_DELETE( pxQueue );
2262 
2263     #if ( configQUEUE_REGISTRY_SIZE > 0 )
2264     {
2265         vQueueUnregisterQueue( pxQueue );
2266     }
2267     #endif
2268 
2269     #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
2270     {
2271         /* The queue can only have been allocated dynamically - free it
2272          * again. */
2273         vPortFree( pxQueue );
2274     }
2275     #elif ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
2276     {
2277         /* The queue could have been allocated statically or dynamically, so
2278          * check before attempting to free the memory. */
2279         if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
2280         {
2281             vPortFree( pxQueue );
2282         }
2283         else
2284         {
2285             mtCOVERAGE_TEST_MARKER();
2286         }
2287     }
2288     #else /* if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) */
2289     {
2290         /* The queue must have been statically allocated, so is not going to be
2291          * deleted.  Avoid compiler warnings about the unused parameter. */
2292         ( void ) pxQueue;
2293     }
2294     #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
2295 
2296     traceRETURN_vQueueDelete();
2297 }
2298 /*-----------------------------------------------------------*/
2299 
2300 #if ( configUSE_TRACE_FACILITY == 1 )
2301 
uxQueueGetQueueNumber(QueueHandle_t xQueue)2302     UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
2303     {
2304         traceENTER_uxQueueGetQueueNumber( xQueue );
2305 
2306         traceRETURN_uxQueueGetQueueNumber( ( ( Queue_t * ) xQueue )->uxQueueNumber );
2307 
2308         return ( ( Queue_t * ) xQueue )->uxQueueNumber;
2309     }
2310 
2311 #endif /* configUSE_TRACE_FACILITY */
2312 /*-----------------------------------------------------------*/
2313 
2314 #if ( configUSE_TRACE_FACILITY == 1 )
2315 
vQueueSetQueueNumber(QueueHandle_t xQueue,UBaseType_t uxQueueNumber)2316     void vQueueSetQueueNumber( QueueHandle_t xQueue,
2317                                UBaseType_t uxQueueNumber )
2318     {
2319         traceENTER_vQueueSetQueueNumber( xQueue, uxQueueNumber );
2320 
2321         ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
2322 
2323         traceRETURN_vQueueSetQueueNumber();
2324     }
2325 
2326 #endif /* configUSE_TRACE_FACILITY */
2327 /*-----------------------------------------------------------*/
2328 
2329 #if ( configUSE_TRACE_FACILITY == 1 )
2330 
ucQueueGetQueueType(QueueHandle_t xQueue)2331     uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
2332     {
2333         traceENTER_ucQueueGetQueueType( xQueue );
2334 
2335         traceRETURN_ucQueueGetQueueType( ( ( Queue_t * ) xQueue )->ucQueueType );
2336 
2337         return ( ( Queue_t * ) xQueue )->ucQueueType;
2338     }
2339 
2340 #endif /* configUSE_TRACE_FACILITY */
2341 /*-----------------------------------------------------------*/
2342 
uxQueueGetQueueItemSize(QueueHandle_t xQueue)2343 UBaseType_t uxQueueGetQueueItemSize( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
2344 {
2345     traceENTER_uxQueueGetQueueItemSize( xQueue );
2346 
2347     traceRETURN_uxQueueGetQueueItemSize( ( ( Queue_t * ) xQueue )->uxItemSize );
2348 
2349     return ( ( Queue_t * ) xQueue )->uxItemSize;
2350 }
2351 /*-----------------------------------------------------------*/
2352 
uxQueueGetQueueLength(QueueHandle_t xQueue)2353 UBaseType_t uxQueueGetQueueLength( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
2354 {
2355     traceENTER_uxQueueGetQueueLength( xQueue );
2356 
2357     traceRETURN_uxQueueGetQueueLength( ( ( Queue_t * ) xQueue )->uxLength );
2358 
2359     return ( ( Queue_t * ) xQueue )->uxLength;
2360 }
2361 /*-----------------------------------------------------------*/
2362 
2363 #if ( configUSE_MUTEXES == 1 )
2364 
prvGetDisinheritPriorityAfterTimeout(const Queue_t * const pxQueue)2365     static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue )
2366     {
2367         UBaseType_t uxHighestPriorityOfWaitingTasks;
2368 
2369         /* If a task waiting for a mutex causes the mutex holder to inherit a
2370          * priority, but the waiting task times out, then the holder should
2371          * disinherit the priority - but only down to the highest priority of any
2372          * other tasks that are waiting for the same mutex.  For this purpose,
2373          * return the priority of the highest priority task that is waiting for the
2374          * mutex. */
2375         if( listCURRENT_LIST_LENGTH( &( pxQueue->xTasksWaitingToReceive ) ) > 0U )
2376         {
2377             uxHighestPriorityOfWaitingTasks = ( UBaseType_t ) ( ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( pxQueue->xTasksWaitingToReceive ) ) );
2378         }
2379         else
2380         {
2381             uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY;
2382         }
2383 
2384         return uxHighestPriorityOfWaitingTasks;
2385     }
2386 
2387 #endif /* configUSE_MUTEXES */
2388 /*-----------------------------------------------------------*/
2389 
prvCopyDataToQueue(Queue_t * const pxQueue,const void * pvItemToQueue,const BaseType_t xPosition)2390 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
2391                                       const void * pvItemToQueue,
2392                                       const BaseType_t xPosition )
2393 {
2394     BaseType_t xReturn = pdFALSE;
2395     UBaseType_t uxMessagesWaiting;
2396 
2397     /* This function is called from a critical section. */
2398 
2399     uxMessagesWaiting = pxQueue->uxMessagesWaiting;
2400 
2401     if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
2402     {
2403         #if ( configUSE_MUTEXES == 1 )
2404         {
2405             if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
2406             {
2407                 /* The mutex is no longer being held. */
2408                 xReturn = xTaskPriorityDisinherit( pxQueue->u.xSemaphore.xMutexHolder );
2409                 pxQueue->u.xSemaphore.xMutexHolder = NULL;
2410             }
2411             else
2412             {
2413                 mtCOVERAGE_TEST_MARKER();
2414             }
2415         }
2416         #endif /* configUSE_MUTEXES */
2417     }
2418     else if( xPosition == queueSEND_TO_BACK )
2419     {
2420         ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize );
2421         pxQueue->pcWriteTo += pxQueue->uxItemSize;
2422 
2423         if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail )
2424         {
2425             pxQueue->pcWriteTo = pxQueue->pcHead;
2426         }
2427         else
2428         {
2429             mtCOVERAGE_TEST_MARKER();
2430         }
2431     }
2432     else
2433     {
2434         ( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize );
2435         pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize;
2436 
2437         if( pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead )
2438         {
2439             pxQueue->u.xQueue.pcReadFrom = ( pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize );
2440         }
2441         else
2442         {
2443             mtCOVERAGE_TEST_MARKER();
2444         }
2445 
2446         if( xPosition == queueOVERWRITE )
2447         {
2448             if( uxMessagesWaiting > ( UBaseType_t ) 0 )
2449             {
2450                 /* An item is not being added but overwritten, so subtract
2451                  * one from the recorded number of items in the queue so when
2452                  * one is added again below the number of recorded items remains
2453                  * correct. */
2454                 --uxMessagesWaiting;
2455             }
2456             else
2457             {
2458                 mtCOVERAGE_TEST_MARKER();
2459             }
2460         }
2461         else
2462         {
2463             mtCOVERAGE_TEST_MARKER();
2464         }
2465     }
2466 
2467     pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting + ( UBaseType_t ) 1 );
2468 
2469     return xReturn;
2470 }
2471 /*-----------------------------------------------------------*/
2472 
prvCopyDataFromQueue(Queue_t * const pxQueue,void * const pvBuffer)2473 static void prvCopyDataFromQueue( Queue_t * const pxQueue,
2474                                   void * const pvBuffer )
2475 {
2476     if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
2477     {
2478         pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
2479 
2480         if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )
2481         {
2482             pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2483         }
2484         else
2485         {
2486             mtCOVERAGE_TEST_MARKER();
2487         }
2488 
2489         ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize );
2490     }
2491 }
2492 /*-----------------------------------------------------------*/
2493 
prvUnlockQueue(Queue_t * const pxQueue)2494 static void prvUnlockQueue( Queue_t * const pxQueue )
2495 {
2496     /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
2497 
2498     /* The lock counts contains the number of extra data items placed or
2499      * removed from the queue while the queue was locked.  When a queue is
2500      * locked items can be added or removed, but the event lists cannot be
2501      * updated. */
2502     taskENTER_CRITICAL();
2503     {
2504         int8_t cTxLock = pxQueue->cTxLock;
2505 
2506         /* See if data was added to the queue while it was locked. */
2507         while( cTxLock > queueLOCKED_UNMODIFIED )
2508         {
2509             /* Data was posted while the queue was locked.  Are any tasks
2510              * blocked waiting for data to become available? */
2511             #if ( configUSE_QUEUE_SETS == 1 )
2512             {
2513                 if( pxQueue->pxQueueSetContainer != NULL )
2514                 {
2515                     if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
2516                     {
2517                         /* The queue is a member of a queue set, and posting to
2518                          * the queue set caused a higher priority task to unblock.
2519                          * A context switch is required. */
2520                         vTaskMissedYield();
2521                     }
2522                     else
2523                     {
2524                         mtCOVERAGE_TEST_MARKER();
2525                     }
2526                 }
2527                 else
2528                 {
2529                     /* Tasks that are removed from the event list will get
2530                      * added to the pending ready list as the scheduler is still
2531                      * suspended. */
2532                     if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2533                     {
2534                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2535                         {
2536                             /* The task waiting has a higher priority so record that a
2537                              * context switch is required. */
2538                             vTaskMissedYield();
2539                         }
2540                         else
2541                         {
2542                             mtCOVERAGE_TEST_MARKER();
2543                         }
2544                     }
2545                     else
2546                     {
2547                         break;
2548                     }
2549                 }
2550             }
2551             #else /* configUSE_QUEUE_SETS */
2552             {
2553                 /* Tasks that are removed from the event list will get added to
2554                  * the pending ready list as the scheduler is still suspended. */
2555                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2556                 {
2557                     if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2558                     {
2559                         /* The task waiting has a higher priority so record that
2560                          * a context switch is required. */
2561                         vTaskMissedYield();
2562                     }
2563                     else
2564                     {
2565                         mtCOVERAGE_TEST_MARKER();
2566                     }
2567                 }
2568                 else
2569                 {
2570                     break;
2571                 }
2572             }
2573             #endif /* configUSE_QUEUE_SETS */
2574 
2575             --cTxLock;
2576         }
2577 
2578         pxQueue->cTxLock = queueUNLOCKED;
2579     }
2580     taskEXIT_CRITICAL();
2581 
2582     /* Do the same for the Rx lock. */
2583     taskENTER_CRITICAL();
2584     {
2585         int8_t cRxLock = pxQueue->cRxLock;
2586 
2587         while( cRxLock > queueLOCKED_UNMODIFIED )
2588         {
2589             if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2590             {
2591                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2592                 {
2593                     vTaskMissedYield();
2594                 }
2595                 else
2596                 {
2597                     mtCOVERAGE_TEST_MARKER();
2598                 }
2599 
2600                 --cRxLock;
2601             }
2602             else
2603             {
2604                 break;
2605             }
2606         }
2607 
2608         pxQueue->cRxLock = queueUNLOCKED;
2609     }
2610     taskEXIT_CRITICAL();
2611 }
2612 /*-----------------------------------------------------------*/
2613 
prvIsQueueEmpty(const Queue_t * pxQueue)2614 static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
2615 {
2616     BaseType_t xReturn;
2617 
2618     taskENTER_CRITICAL();
2619     {
2620         if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2621         {
2622             xReturn = pdTRUE;
2623         }
2624         else
2625         {
2626             xReturn = pdFALSE;
2627         }
2628     }
2629     taskEXIT_CRITICAL();
2630 
2631     return xReturn;
2632 }
2633 /*-----------------------------------------------------------*/
2634 
xQueueIsQueueEmptyFromISR(const QueueHandle_t xQueue)2635 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
2636 {
2637     BaseType_t xReturn;
2638     Queue_t * const pxQueue = xQueue;
2639 
2640     traceENTER_xQueueIsQueueEmptyFromISR( xQueue );
2641 
2642     configASSERT( pxQueue );
2643 
2644     if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2645     {
2646         xReturn = pdTRUE;
2647     }
2648     else
2649     {
2650         xReturn = pdFALSE;
2651     }
2652 
2653     traceRETURN_xQueueIsQueueEmptyFromISR( xReturn );
2654 
2655     return xReturn;
2656 }
2657 /*-----------------------------------------------------------*/
2658 
prvIsQueueFull(const Queue_t * pxQueue)2659 static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
2660 {
2661     BaseType_t xReturn;
2662 
2663     taskENTER_CRITICAL();
2664     {
2665         if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
2666         {
2667             xReturn = pdTRUE;
2668         }
2669         else
2670         {
2671             xReturn = pdFALSE;
2672         }
2673     }
2674     taskEXIT_CRITICAL();
2675 
2676     return xReturn;
2677 }
2678 /*-----------------------------------------------------------*/
2679 
xQueueIsQueueFullFromISR(const QueueHandle_t xQueue)2680 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
2681 {
2682     BaseType_t xReturn;
2683     Queue_t * const pxQueue = xQueue;
2684 
2685     traceENTER_xQueueIsQueueFullFromISR( xQueue );
2686 
2687     configASSERT( pxQueue );
2688 
2689     if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
2690     {
2691         xReturn = pdTRUE;
2692     }
2693     else
2694     {
2695         xReturn = pdFALSE;
2696     }
2697 
2698     traceRETURN_xQueueIsQueueFullFromISR( xReturn );
2699 
2700     return xReturn;
2701 }
2702 /*-----------------------------------------------------------*/
2703 
2704 #if ( configUSE_CO_ROUTINES == 1 )
2705 
xQueueCRSend(QueueHandle_t xQueue,const void * pvItemToQueue,TickType_t xTicksToWait)2706     BaseType_t xQueueCRSend( QueueHandle_t xQueue,
2707                              const void * pvItemToQueue,
2708                              TickType_t xTicksToWait )
2709     {
2710         BaseType_t xReturn;
2711         Queue_t * const pxQueue = xQueue;
2712 
2713         traceENTER_xQueueCRSend( xQueue, pvItemToQueue, xTicksToWait );
2714 
2715         /* If the queue is already full we may have to block.  A critical section
2716          * is required to prevent an interrupt removing something from the queue
2717          * between the check to see if the queue is full and blocking on the queue. */
2718         portDISABLE_INTERRUPTS();
2719         {
2720             if( prvIsQueueFull( pxQueue ) != pdFALSE )
2721             {
2722                 /* The queue is full - do we want to block or just leave without
2723                  * posting? */
2724                 if( xTicksToWait > ( TickType_t ) 0 )
2725                 {
2726                     /* As this is called from a coroutine we cannot block directly, but
2727                      * return indicating that we need to block. */
2728                     vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
2729                     portENABLE_INTERRUPTS();
2730                     return errQUEUE_BLOCKED;
2731                 }
2732                 else
2733                 {
2734                     portENABLE_INTERRUPTS();
2735                     return errQUEUE_FULL;
2736                 }
2737             }
2738         }
2739         portENABLE_INTERRUPTS();
2740 
2741         portDISABLE_INTERRUPTS();
2742         {
2743             if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2744             {
2745                 /* There is room in the queue, copy the data into the queue. */
2746                 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2747                 xReturn = pdPASS;
2748 
2749                 /* Were any co-routines waiting for data to become available? */
2750                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2751                 {
2752                     /* In this instance the co-routine could be placed directly
2753                      * into the ready list as we are within a critical section.
2754                      * Instead the same pending ready list mechanism is used as if
2755                      * the event were caused from within an interrupt. */
2756                     if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2757                     {
2758                         /* The co-routine waiting has a higher priority so record
2759                          * that a yield might be appropriate. */
2760                         xReturn = errQUEUE_YIELD;
2761                     }
2762                     else
2763                     {
2764                         mtCOVERAGE_TEST_MARKER();
2765                     }
2766                 }
2767                 else
2768                 {
2769                     mtCOVERAGE_TEST_MARKER();
2770                 }
2771             }
2772             else
2773             {
2774                 xReturn = errQUEUE_FULL;
2775             }
2776         }
2777         portENABLE_INTERRUPTS();
2778 
2779         traceRETURN_xQueueCRSend( xReturn );
2780 
2781         return xReturn;
2782     }
2783 
2784 #endif /* configUSE_CO_ROUTINES */
2785 /*-----------------------------------------------------------*/
2786 
2787 #if ( configUSE_CO_ROUTINES == 1 )
2788 
xQueueCRReceive(QueueHandle_t xQueue,void * pvBuffer,TickType_t xTicksToWait)2789     BaseType_t xQueueCRReceive( QueueHandle_t xQueue,
2790                                 void * pvBuffer,
2791                                 TickType_t xTicksToWait )
2792     {
2793         BaseType_t xReturn;
2794         Queue_t * const pxQueue = xQueue;
2795 
2796         traceENTER_xQueueCRReceive( xQueue, pvBuffer, xTicksToWait );
2797 
2798         /* If the queue is already empty we may have to block.  A critical section
2799          * is required to prevent an interrupt adding something to the queue
2800          * between the check to see if the queue is empty and blocking on the queue. */
2801         portDISABLE_INTERRUPTS();
2802         {
2803             if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2804             {
2805                 /* There are no messages in the queue, do we want to block or just
2806                  * leave with nothing? */
2807                 if( xTicksToWait > ( TickType_t ) 0 )
2808                 {
2809                     /* As this is a co-routine we cannot block directly, but return
2810                      * indicating that we need to block. */
2811                     vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
2812                     portENABLE_INTERRUPTS();
2813                     return errQUEUE_BLOCKED;
2814                 }
2815                 else
2816                 {
2817                     portENABLE_INTERRUPTS();
2818                     return errQUEUE_FULL;
2819                 }
2820             }
2821             else
2822             {
2823                 mtCOVERAGE_TEST_MARKER();
2824             }
2825         }
2826         portENABLE_INTERRUPTS();
2827 
2828         portDISABLE_INTERRUPTS();
2829         {
2830             if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2831             {
2832                 /* Data is available from the queue. */
2833                 pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
2834 
2835                 if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )
2836                 {
2837                     pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2838                 }
2839                 else
2840                 {
2841                     mtCOVERAGE_TEST_MARKER();
2842                 }
2843 
2844                 --( pxQueue->uxMessagesWaiting );
2845                 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2846 
2847                 xReturn = pdPASS;
2848 
2849                 /* Were any co-routines waiting for space to become available? */
2850                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2851                 {
2852                     /* In this instance the co-routine could be placed directly
2853                      * into the ready list as we are within a critical section.
2854                      * Instead the same pending ready list mechanism is used as if
2855                      * the event were caused from within an interrupt. */
2856                     if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2857                     {
2858                         xReturn = errQUEUE_YIELD;
2859                     }
2860                     else
2861                     {
2862                         mtCOVERAGE_TEST_MARKER();
2863                     }
2864                 }
2865                 else
2866                 {
2867                     mtCOVERAGE_TEST_MARKER();
2868                 }
2869             }
2870             else
2871             {
2872                 xReturn = pdFAIL;
2873             }
2874         }
2875         portENABLE_INTERRUPTS();
2876 
2877         traceRETURN_xQueueCRReceive( xReturn );
2878 
2879         return xReturn;
2880     }
2881 
2882 #endif /* configUSE_CO_ROUTINES */
2883 /*-----------------------------------------------------------*/
2884 
2885 #if ( configUSE_CO_ROUTINES == 1 )
2886 
xQueueCRSendFromISR(QueueHandle_t xQueue,const void * pvItemToQueue,BaseType_t xCoRoutinePreviouslyWoken)2887     BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue,
2888                                     const void * pvItemToQueue,
2889                                     BaseType_t xCoRoutinePreviouslyWoken )
2890     {
2891         Queue_t * const pxQueue = xQueue;
2892 
2893         traceENTER_xQueueCRSendFromISR( xQueue, pvItemToQueue, xCoRoutinePreviouslyWoken );
2894 
2895         /* Cannot block within an ISR so if there is no space on the queue then
2896          * exit without doing anything. */
2897         if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2898         {
2899             prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2900 
2901             /* We only want to wake one co-routine per ISR, so check that a
2902              * co-routine has not already been woken. */
2903             if( xCoRoutinePreviouslyWoken == pdFALSE )
2904             {
2905                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2906                 {
2907                     if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2908                     {
2909                         return pdTRUE;
2910                     }
2911                     else
2912                     {
2913                         mtCOVERAGE_TEST_MARKER();
2914                     }
2915                 }
2916                 else
2917                 {
2918                     mtCOVERAGE_TEST_MARKER();
2919                 }
2920             }
2921             else
2922             {
2923                 mtCOVERAGE_TEST_MARKER();
2924             }
2925         }
2926         else
2927         {
2928             mtCOVERAGE_TEST_MARKER();
2929         }
2930 
2931         traceRETURN_xQueueCRSendFromISR( xCoRoutinePreviouslyWoken );
2932 
2933         return xCoRoutinePreviouslyWoken;
2934     }
2935 
2936 #endif /* configUSE_CO_ROUTINES */
2937 /*-----------------------------------------------------------*/
2938 
2939 #if ( configUSE_CO_ROUTINES == 1 )
2940 
xQueueCRReceiveFromISR(QueueHandle_t xQueue,void * pvBuffer,BaseType_t * pxCoRoutineWoken)2941     BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue,
2942                                        void * pvBuffer,
2943                                        BaseType_t * pxCoRoutineWoken )
2944     {
2945         BaseType_t xReturn;
2946         Queue_t * const pxQueue = xQueue;
2947 
2948         traceENTER_xQueueCRReceiveFromISR( xQueue, pvBuffer, pxCoRoutineWoken );
2949 
2950         /* We cannot block from an ISR, so check there is data available. If
2951          * not then just leave without doing anything. */
2952         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2953         {
2954             /* Copy the data from the queue. */
2955             pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
2956 
2957             if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )
2958             {
2959                 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2960             }
2961             else
2962             {
2963                 mtCOVERAGE_TEST_MARKER();
2964             }
2965 
2966             --( pxQueue->uxMessagesWaiting );
2967             ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2968 
2969             if( ( *pxCoRoutineWoken ) == pdFALSE )
2970             {
2971                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2972                 {
2973                     if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2974                     {
2975                         *pxCoRoutineWoken = pdTRUE;
2976                     }
2977                     else
2978                     {
2979                         mtCOVERAGE_TEST_MARKER();
2980                     }
2981                 }
2982                 else
2983                 {
2984                     mtCOVERAGE_TEST_MARKER();
2985                 }
2986             }
2987             else
2988             {
2989                 mtCOVERAGE_TEST_MARKER();
2990             }
2991 
2992             xReturn = pdPASS;
2993         }
2994         else
2995         {
2996             xReturn = pdFAIL;
2997         }
2998 
2999         traceRETURN_xQueueCRReceiveFromISR( xReturn );
3000 
3001         return xReturn;
3002     }
3003 
3004 #endif /* configUSE_CO_ROUTINES */
3005 /*-----------------------------------------------------------*/
3006 
3007 #if ( configQUEUE_REGISTRY_SIZE > 0 )
3008 
vQueueAddToRegistry(QueueHandle_t xQueue,const char * pcQueueName)3009     void vQueueAddToRegistry( QueueHandle_t xQueue,
3010                               const char * pcQueueName )
3011     {
3012         UBaseType_t ux;
3013         QueueRegistryItem_t * pxEntryToWrite = NULL;
3014 
3015         traceENTER_vQueueAddToRegistry( xQueue, pcQueueName );
3016 
3017         configASSERT( xQueue );
3018 
3019         if( pcQueueName != NULL )
3020         {
3021             /* See if there is an empty space in the registry.  A NULL name denotes
3022              * a free slot. */
3023             for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
3024             {
3025                 /* Replace an existing entry if the queue is already in the registry. */
3026                 if( xQueue == xQueueRegistry[ ux ].xHandle )
3027                 {
3028                     pxEntryToWrite = &( xQueueRegistry[ ux ] );
3029                     break;
3030                 }
3031                 /* Otherwise, store in the next empty location */
3032                 else if( ( pxEntryToWrite == NULL ) && ( xQueueRegistry[ ux ].pcQueueName == NULL ) )
3033                 {
3034                     pxEntryToWrite = &( xQueueRegistry[ ux ] );
3035                 }
3036                 else
3037                 {
3038                     mtCOVERAGE_TEST_MARKER();
3039                 }
3040             }
3041         }
3042 
3043         if( pxEntryToWrite != NULL )
3044         {
3045             /* Store the information on this queue. */
3046             pxEntryToWrite->pcQueueName = pcQueueName;
3047             pxEntryToWrite->xHandle = xQueue;
3048 
3049             traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
3050         }
3051 
3052         traceRETURN_vQueueAddToRegistry();
3053     }
3054 
3055 #endif /* configQUEUE_REGISTRY_SIZE */
3056 /*-----------------------------------------------------------*/
3057 
3058 #if ( configQUEUE_REGISTRY_SIZE > 0 )
3059 
pcQueueGetName(QueueHandle_t xQueue)3060     const char * pcQueueGetName( QueueHandle_t xQueue )
3061     {
3062         UBaseType_t ux;
3063         const char * pcReturn = NULL;
3064 
3065         traceENTER_pcQueueGetName( xQueue );
3066 
3067         configASSERT( xQueue );
3068 
3069         /* Note there is nothing here to protect against another task adding or
3070          * removing entries from the registry while it is being searched. */
3071 
3072         for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
3073         {
3074             if( xQueueRegistry[ ux ].xHandle == xQueue )
3075             {
3076                 pcReturn = xQueueRegistry[ ux ].pcQueueName;
3077                 break;
3078             }
3079             else
3080             {
3081                 mtCOVERAGE_TEST_MARKER();
3082             }
3083         }
3084 
3085         traceRETURN_pcQueueGetName( pcReturn );
3086 
3087         return pcReturn;
3088     }
3089 
3090 #endif /* configQUEUE_REGISTRY_SIZE */
3091 /*-----------------------------------------------------------*/
3092 
3093 #if ( configQUEUE_REGISTRY_SIZE > 0 )
3094 
vQueueUnregisterQueue(QueueHandle_t xQueue)3095     void vQueueUnregisterQueue( QueueHandle_t xQueue )
3096     {
3097         UBaseType_t ux;
3098 
3099         traceENTER_vQueueUnregisterQueue( xQueue );
3100 
3101         configASSERT( xQueue );
3102 
3103         /* See if the handle of the queue being unregistered in actually in the
3104          * registry. */
3105         for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
3106         {
3107             if( xQueueRegistry[ ux ].xHandle == xQueue )
3108             {
3109                 /* Set the name to NULL to show that this slot if free again. */
3110                 xQueueRegistry[ ux ].pcQueueName = NULL;
3111 
3112                 /* Set the handle to NULL to ensure the same queue handle cannot
3113                  * appear in the registry twice if it is added, removed, then
3114                  * added again. */
3115                 xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;
3116                 break;
3117             }
3118             else
3119             {
3120                 mtCOVERAGE_TEST_MARKER();
3121             }
3122         }
3123 
3124         traceRETURN_vQueueUnregisterQueue();
3125     }
3126 
3127 #endif /* configQUEUE_REGISTRY_SIZE */
3128 /*-----------------------------------------------------------*/
3129 
3130 #if ( configUSE_TIMERS == 1 )
3131 
vQueueWaitForMessageRestricted(QueueHandle_t xQueue,TickType_t xTicksToWait,const BaseType_t xWaitIndefinitely)3132     void vQueueWaitForMessageRestricted( QueueHandle_t xQueue,
3133                                          TickType_t xTicksToWait,
3134                                          const BaseType_t xWaitIndefinitely )
3135     {
3136         Queue_t * const pxQueue = xQueue;
3137 
3138         traceENTER_vQueueWaitForMessageRestricted( xQueue, xTicksToWait, xWaitIndefinitely );
3139 
3140         /* This function should not be called by application code hence the
3141          * 'Restricted' in its name.  It is not part of the public API.  It is
3142          * designed for use by kernel code, and has special calling requirements.
3143          * It can result in vListInsert() being called on a list that can only
3144          * possibly ever have one item in it, so the list will be fast, but even
3145          * so it should be called with the scheduler locked and not from a critical
3146          * section. */
3147 
3148         /* Only do anything if there are no messages in the queue.  This function
3149          *  will not actually cause the task to block, just place it on a blocked
3150          *  list.  It will not block until the scheduler is unlocked - at which
3151          *  time a yield will be performed.  If an item is added to the queue while
3152          *  the queue is locked, and the calling task blocks on the queue, then the
3153          *  calling task will be immediately unblocked when the queue is unlocked. */
3154         prvLockQueue( pxQueue );
3155 
3156         if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
3157         {
3158             /* There is nothing in the queue, block for the specified period. */
3159             vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );
3160         }
3161         else
3162         {
3163             mtCOVERAGE_TEST_MARKER();
3164         }
3165 
3166         prvUnlockQueue( pxQueue );
3167 
3168         traceRETURN_vQueueWaitForMessageRestricted();
3169     }
3170 
3171 #endif /* configUSE_TIMERS */
3172 /*-----------------------------------------------------------*/
3173 
3174 #if ( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
3175 
xQueueCreateSet(const UBaseType_t uxEventQueueLength)3176     QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
3177     {
3178         QueueSetHandle_t pxQueue;
3179 
3180         traceENTER_xQueueCreateSet( uxEventQueueLength );
3181 
3182         pxQueue = xQueueGenericCreate( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
3183 
3184         traceRETURN_xQueueCreateSet( pxQueue );
3185 
3186         return pxQueue;
3187     }
3188 
3189 #endif /* configUSE_QUEUE_SETS */
3190 /*-----------------------------------------------------------*/
3191 
3192 #if ( configUSE_QUEUE_SETS == 1 )
3193 
xQueueAddToSet(QueueSetMemberHandle_t xQueueOrSemaphore,QueueSetHandle_t xQueueSet)3194     BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
3195                                QueueSetHandle_t xQueueSet )
3196     {
3197         BaseType_t xReturn;
3198 
3199         traceENTER_xQueueAddToSet( xQueueOrSemaphore, xQueueSet );
3200 
3201         taskENTER_CRITICAL();
3202         {
3203             if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
3204             {
3205                 /* Cannot add a queue/semaphore to more than one queue set. */
3206                 xReturn = pdFAIL;
3207             }
3208             else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
3209             {
3210                 /* Cannot add a queue/semaphore to a queue set if there are already
3211                  * items in the queue/semaphore. */
3212                 xReturn = pdFAIL;
3213             }
3214             else
3215             {
3216                 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
3217                 xReturn = pdPASS;
3218             }
3219         }
3220         taskEXIT_CRITICAL();
3221 
3222         traceRETURN_xQueueAddToSet( xReturn );
3223 
3224         return xReturn;
3225     }
3226 
3227 #endif /* configUSE_QUEUE_SETS */
3228 /*-----------------------------------------------------------*/
3229 
3230 #if ( configUSE_QUEUE_SETS == 1 )
3231 
xQueueRemoveFromSet(QueueSetMemberHandle_t xQueueOrSemaphore,QueueSetHandle_t xQueueSet)3232     BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore,
3233                                     QueueSetHandle_t xQueueSet )
3234     {
3235         BaseType_t xReturn;
3236         Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
3237 
3238         traceENTER_xQueueRemoveFromSet( xQueueOrSemaphore, xQueueSet );
3239 
3240         if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
3241         {
3242             /* The queue was not a member of the set. */
3243             xReturn = pdFAIL;
3244         }
3245         else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
3246         {
3247             /* It is dangerous to remove a queue from a set when the queue is
3248              * not empty because the queue set will still hold pending events for
3249              * the queue. */
3250             xReturn = pdFAIL;
3251         }
3252         else
3253         {
3254             taskENTER_CRITICAL();
3255             {
3256                 /* The queue is no longer contained in the set. */
3257                 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
3258             }
3259             taskEXIT_CRITICAL();
3260             xReturn = pdPASS;
3261         }
3262 
3263         traceRETURN_xQueueRemoveFromSet( xReturn );
3264 
3265         return xReturn;
3266     }
3267 
3268 #endif /* configUSE_QUEUE_SETS */
3269 /*-----------------------------------------------------------*/
3270 
3271 #if ( configUSE_QUEUE_SETS == 1 )
3272 
xQueueSelectFromSet(QueueSetHandle_t xQueueSet,TickType_t const xTicksToWait)3273     QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
3274                                                 TickType_t const xTicksToWait )
3275     {
3276         QueueSetMemberHandle_t xReturn = NULL;
3277 
3278         traceENTER_xQueueSelectFromSet( xQueueSet, xTicksToWait );
3279 
3280         ( void ) xQueueReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait );
3281 
3282         traceRETURN_xQueueSelectFromSet( xReturn );
3283 
3284         return xReturn;
3285     }
3286 
3287 #endif /* configUSE_QUEUE_SETS */
3288 /*-----------------------------------------------------------*/
3289 
3290 #if ( configUSE_QUEUE_SETS == 1 )
3291 
xQueueSelectFromSetFromISR(QueueSetHandle_t xQueueSet)3292     QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
3293     {
3294         QueueSetMemberHandle_t xReturn = NULL;
3295 
3296         traceENTER_xQueueSelectFromSetFromISR( xQueueSet );
3297 
3298         ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL );
3299 
3300         traceRETURN_xQueueSelectFromSetFromISR( xReturn );
3301 
3302         return xReturn;
3303     }
3304 
3305 #endif /* configUSE_QUEUE_SETS */
3306 /*-----------------------------------------------------------*/
3307 
3308 #if ( configUSE_QUEUE_SETS == 1 )
3309 
prvNotifyQueueSetContainer(const Queue_t * const pxQueue)3310     static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue )
3311     {
3312         Queue_t * pxQueueSetContainer = pxQueue->pxQueueSetContainer;
3313         BaseType_t xReturn = pdFALSE;
3314 
3315         /* This function must be called form a critical section. */
3316 
3317         /* The following line is not reachable in unit tests because every call
3318          * to prvNotifyQueueSetContainer is preceded by a check that
3319          * pxQueueSetContainer != NULL */
3320         configASSERT( pxQueueSetContainer ); /* LCOV_EXCL_BR_LINE */
3321         configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
3322 
3323         if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
3324         {
3325             const int8_t cTxLock = pxQueueSetContainer->cTxLock;
3326 
3327             traceQUEUE_SET_SEND( pxQueueSetContainer );
3328 
3329             /* The data copied is the handle of the queue that contains data. */
3330             xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK );
3331 
3332             if( cTxLock == queueUNLOCKED )
3333             {
3334                 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
3335                 {
3336                     if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
3337                     {
3338                         /* The task waiting has a higher priority. */
3339                         xReturn = pdTRUE;
3340                     }
3341                     else
3342                     {
3343                         mtCOVERAGE_TEST_MARKER();
3344                     }
3345                 }
3346                 else
3347                 {
3348                     mtCOVERAGE_TEST_MARKER();
3349                 }
3350             }
3351             else
3352             {
3353                 prvIncrementQueueTxLock( pxQueueSetContainer, cTxLock );
3354             }
3355         }
3356         else
3357         {
3358             mtCOVERAGE_TEST_MARKER();
3359         }
3360 
3361         return xReturn;
3362     }
3363 
3364 #endif /* configUSE_QUEUE_SETS */
3365