1 /*
2  * FreeRTOS Kernel V11.0.1
3  * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
4  *
5  * SPDX-License-Identifier: MIT
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a copy of
8  * this software and associated documentation files (the "Software"), to deal in
9  * the Software without restriction, including without limitation the rights to
10  * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11  * the Software, and to permit persons to whom the Software is furnished to do so,
12  * subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be included in all
15  * copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19  * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20  * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * https://www.FreeRTOS.org
25  * https://github.com/FreeRTOS
26  *
27  */
28 
29 #include <stdlib.h>
30 #include <string.h>
31 
32 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
33  * all the API functions to use the MPU wrappers.  That should only be done when
34  * task.h is included from an application file. */
35 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
36 
37 #include "FreeRTOS.h"
38 #include "task.h"
39 #include "queue.h"
40 
41 #if ( configUSE_CO_ROUTINES == 1 )
42     #include "croutine.h"
43 #endif
44 
45 /* The MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
46  * for the header files above, but not in this file, in order to generate the
47  * correct privileged Vs unprivileged linkage and placement. */
48 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
49 
50 
51 /* Constants used with the cRxLock and cTxLock structure members. */
52 #define queueUNLOCKED             ( ( int8_t ) -1 )
53 #define queueLOCKED_UNMODIFIED    ( ( int8_t ) 0 )
54 #define queueINT8_MAX             ( ( int8_t ) 127 )
55 
56 /* When the Queue_t structure is used to represent a base queue its pcHead and
57  * pcTail members are used as pointers into the queue storage area.  When the
58  * Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
59  * not necessary, and the pcHead pointer is set to NULL to indicate that the
60  * structure instead holds a pointer to the mutex holder (if any).  Map alternative
61  * names to the pcHead and structure member to ensure the readability of the code
62  * is maintained.  The QueuePointers_t and SemaphoreData_t types are used to form
63  * a union as their usage is mutually exclusive dependent on what the queue is
64  * being used for. */
65 #define uxQueueType               pcHead
66 #define queueQUEUE_IS_MUTEX       NULL
67 
68 typedef struct QueuePointers
69 {
70     int8_t * pcTail;     /**< Points to the byte at the end of the queue storage area.  Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
71     int8_t * pcReadFrom; /**< Points to the last place that a queued item was read from when the structure is used as a queue. */
72 } QueuePointers_t;
73 
74 typedef struct SemaphoreData
75 {
76     TaskHandle_t xMutexHolder;        /**< The handle of the task that holds the mutex. */
77     UBaseType_t uxRecursiveCallCount; /**< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
78 } SemaphoreData_t;
79 
80 /* Semaphores do not actually store or copy data, so have an item size of
81  * zero. */
82 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH    ( ( UBaseType_t ) 0 )
83 #define queueMUTEX_GIVE_BLOCK_TIME          ( ( TickType_t ) 0U )
84 
85 #if ( configUSE_PREEMPTION == 0 )
86 
87 /* If the cooperative scheduler is being used then a yield should not be
88  * performed just because a higher priority task has been woken. */
89     #define queueYIELD_IF_USING_PREEMPTION()
90 #else
91     #if ( configNUMBER_OF_CORES == 1 )
92         #define queueYIELD_IF_USING_PREEMPTION()    portYIELD_WITHIN_API()
93     #else /* #if ( configNUMBER_OF_CORES == 1 ) */
94         #define queueYIELD_IF_USING_PREEMPTION()    vTaskYieldWithinAPI()
95     #endif /* #if ( configNUMBER_OF_CORES == 1 ) */
96 #endif
97 
98 /*
99  * Definition of the queue used by the scheduler.
100  * Items are queued by copy, not reference.  See the following link for the
101  * rationale: https://www.FreeRTOS.org/Embedded-RTOS-Queues.html
102  */
103 typedef struct QueueDefinition /* The old naming convention is used to prevent breaking kernel aware debuggers. */
104 {
105     int8_t * pcHead;           /**< Points to the beginning of the queue storage area. */
106     int8_t * pcWriteTo;        /**< Points to the free next place in the storage area. */
107 
108     union
109     {
110         QueuePointers_t xQueue;     /**< Data required exclusively when this structure is used as a queue. */
111         SemaphoreData_t xSemaphore; /**< Data required exclusively when this structure is used as a semaphore. */
112     } u;
113 
114     List_t xTasksWaitingToSend;             /**< List of tasks that are blocked waiting to post onto this queue.  Stored in priority order. */
115     List_t xTasksWaitingToReceive;          /**< List of tasks that are blocked waiting to read from this queue.  Stored in priority order. */
116 
117     volatile UBaseType_t uxMessagesWaiting; /**< The number of items currently in the queue. */
118     UBaseType_t uxLength;                   /**< The length of the queue defined as the number of items it will hold, not the number of bytes. */
119     UBaseType_t uxItemSize;                 /**< The size of each items that the queue will hold. */
120 
121     volatile int8_t cRxLock;                /**< Stores the number of items received from the queue (removed from the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */
122     volatile int8_t cTxLock;                /**< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */
123 
124     #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
125         uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
126     #endif
127 
128     #if ( configUSE_QUEUE_SETS == 1 )
129         struct QueueDefinition * pxQueueSetContainer;
130     #endif
131 
132     #if ( configUSE_TRACE_FACILITY == 1 )
133         UBaseType_t uxQueueNumber;
134         uint8_t ucQueueType;
135     #endif
136 } xQUEUE;
137 
138 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
139  * name below to enable the use of older kernel aware debuggers. */
140 typedef xQUEUE Queue_t;
141 
142 /*-----------------------------------------------------------*/
143 
144 /*
145  * The queue registry is just a means for kernel aware debuggers to locate
146  * queue structures.  It has no other purpose so is an optional component.
147  */
148 #if ( configQUEUE_REGISTRY_SIZE > 0 )
149 
150 /* The type stored within the queue registry array.  This allows a name
151  * to be assigned to each queue making kernel aware debugging a little
152  * more user friendly. */
153     typedef struct QUEUE_REGISTRY_ITEM
154     {
155         const char * pcQueueName;
156         QueueHandle_t xHandle;
157     } xQueueRegistryItem;
158 
159 /* The old xQueueRegistryItem name is maintained above then typedefed to the
160  * new xQueueRegistryItem name below to enable the use of older kernel aware
161  * debuggers. */
162     typedef xQueueRegistryItem QueueRegistryItem_t;
163 
164 /* The queue registry is simply an array of QueueRegistryItem_t structures.
165  * The pcQueueName member of a structure being NULL is indicative of the
166  * array position being vacant. */
167 
168 /* MISRA Ref 8.4.2 [Declaration shall be visible] */
169 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-84 */
170 /* coverity[misra_c_2012_rule_8_4_violation] */
171     PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
172 
173 #endif /* configQUEUE_REGISTRY_SIZE */
174 
175 /*
176  * Unlocks a queue locked by a call to prvLockQueue.  Locking a queue does not
177  * prevent an ISR from adding or removing items to the queue, but does prevent
178  * an ISR from removing tasks from the queue event lists.  If an ISR finds a
179  * queue is locked it will instead increment the appropriate queue lock count
180  * to indicate that a task may require unblocking.  When the queue in unlocked
181  * these lock counts are inspected, and the appropriate action taken.
182  */
183 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
184 
185 /*
186  * Uses a critical section to determine if there is any data in a queue.
187  *
188  * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
189  */
190 static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
191 
192 /*
193  * Uses a critical section to determine if there is any space in a queue.
194  *
195  * @return pdTRUE if there is no space, otherwise pdFALSE;
196  */
197 static BaseType_t prvIsQueueFull( const Queue_t * pxQueue ) PRIVILEGED_FUNCTION;
198 
199 /*
200  * Copies an item into the queue, either at the front of the queue or the
201  * back of the queue.
202  */
203 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
204                                       const void * pvItemToQueue,
205                                       const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
206 
207 /*
208  * Copies an item out of a queue.
209  */
210 static void prvCopyDataFromQueue( Queue_t * const pxQueue,
211                                   void * const pvBuffer ) PRIVILEGED_FUNCTION;
212 
213 #if ( configUSE_QUEUE_SETS == 1 )
214 
215 /*
216  * Checks to see if a queue is a member of a queue set, and if so, notifies
217  * the queue set that the queue contains data.
218  */
219     static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
220 #endif
221 
222 /*
223  * Called after a Queue_t structure has been allocated either statically or
224  * dynamically to fill in the structure's members.
225  */
226 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
227                                    const UBaseType_t uxItemSize,
228                                    uint8_t * pucQueueStorage,
229                                    const uint8_t ucQueueType,
230                                    Queue_t * pxNewQueue ) PRIVILEGED_FUNCTION;
231 
232 /*
233  * Mutexes are a special type of queue.  When a mutex is created, first the
234  * queue is created, then prvInitialiseMutex() is called to configure the queue
235  * as a mutex.
236  */
237 #if ( configUSE_MUTEXES == 1 )
238     static void prvInitialiseMutex( Queue_t * pxNewQueue ) PRIVILEGED_FUNCTION;
239 #endif
240 
241 #if ( configUSE_MUTEXES == 1 )
242 
243 /*
244  * If a task waiting for a mutex causes the mutex holder to inherit a
245  * priority, but the waiting task times out, then the holder should
246  * disinherit the priority - but only down to the highest priority of any
247  * other tasks that are waiting for the same mutex.  This function returns
248  * that priority.
249  */
250     static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
251 #endif
252 /*-----------------------------------------------------------*/
253 
254 /*
255  * Macro to mark a queue as locked.  Locking a queue prevents an ISR from
256  * accessing the queue event lists.
257  */
258 #define prvLockQueue( pxQueue )                            \
259     taskENTER_CRITICAL();                                  \
260     {                                                      \
261         if( ( pxQueue )->cRxLock == queueUNLOCKED )        \
262         {                                                  \
263             ( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED; \
264         }                                                  \
265         if( ( pxQueue )->cTxLock == queueUNLOCKED )        \
266         {                                                  \
267             ( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED; \
268         }                                                  \
269     }                                                      \
270     taskEXIT_CRITICAL()
271 
272 /*
273  * Macro to increment cTxLock member of the queue data structure. It is
274  * capped at the number of tasks in the system as we cannot unblock more
275  * tasks than the number of tasks in the system.
276  */
277 #define prvIncrementQueueTxLock( pxQueue, cTxLock )                           \
278     do {                                                                      \
279         const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks();         \
280         if( ( UBaseType_t ) ( cTxLock ) < uxNumberOfTasks )                   \
281         {                                                                     \
282             configASSERT( ( cTxLock ) != queueINT8_MAX );                     \
283             ( pxQueue )->cTxLock = ( int8_t ) ( ( cTxLock ) + ( int8_t ) 1 ); \
284         }                                                                     \
285     } while( 0 )
286 
287 /*
288  * Macro to increment cRxLock member of the queue data structure. It is
289  * capped at the number of tasks in the system as we cannot unblock more
290  * tasks than the number of tasks in the system.
291  */
292 #define prvIncrementQueueRxLock( pxQueue, cRxLock )                           \
293     do {                                                                      \
294         const UBaseType_t uxNumberOfTasks = uxTaskGetNumberOfTasks();         \
295         if( ( UBaseType_t ) ( cRxLock ) < uxNumberOfTasks )                   \
296         {                                                                     \
297             configASSERT( ( cRxLock ) != queueINT8_MAX );                     \
298             ( pxQueue )->cRxLock = ( int8_t ) ( ( cRxLock ) + ( int8_t ) 1 ); \
299         }                                                                     \
300     } while( 0 )
301 /*-----------------------------------------------------------*/
302 
xQueueGenericReset(QueueHandle_t xQueue,BaseType_t xNewQueue)303 BaseType_t xQueueGenericReset( QueueHandle_t xQueue,
304                                BaseType_t xNewQueue )
305 {
306     BaseType_t xReturn = pdPASS;
307     Queue_t * const pxQueue = xQueue;
308 
309     traceENTER_xQueueGenericReset( xQueue, xNewQueue );
310 
311     configASSERT( pxQueue );
312 
313     if( ( pxQueue != NULL ) &&
314         ( pxQueue->uxLength >= 1U ) &&
315         /* Check for multiplication overflow. */
316         ( ( SIZE_MAX / pxQueue->uxLength ) >= pxQueue->uxItemSize ) )
317     {
318         taskENTER_CRITICAL();
319         {
320             pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize );
321             pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
322             pxQueue->pcWriteTo = pxQueue->pcHead;
323             pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize );
324             pxQueue->cRxLock = queueUNLOCKED;
325             pxQueue->cTxLock = queueUNLOCKED;
326 
327             if( xNewQueue == pdFALSE )
328             {
329                 /* If there are tasks blocked waiting to read from the queue, then
330                  * the tasks will remain blocked as after this function exits the queue
331                  * will still be empty.  If there are tasks blocked waiting to write to
332                  * the queue, then one should be unblocked as after this function exits
333                  * it will be possible to write to it. */
334                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
335                 {
336                     if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
337                     {
338                         queueYIELD_IF_USING_PREEMPTION();
339                     }
340                     else
341                     {
342                         mtCOVERAGE_TEST_MARKER();
343                     }
344                 }
345                 else
346                 {
347                     mtCOVERAGE_TEST_MARKER();
348                 }
349             }
350             else
351             {
352                 /* Ensure the event queues start in the correct state. */
353                 vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
354                 vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
355             }
356         }
357         taskEXIT_CRITICAL();
358     }
359     else
360     {
361         xReturn = pdFAIL;
362     }
363 
364     configASSERT( xReturn != pdFAIL );
365 
366     /* A value is returned for calling semantic consistency with previous
367      * versions. */
368     traceRETURN_xQueueGenericReset( xReturn );
369 
370     return xReturn;
371 }
372 /*-----------------------------------------------------------*/
373 
374 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
375 
xQueueGenericCreateStatic(const UBaseType_t uxQueueLength,const UBaseType_t uxItemSize,uint8_t * pucQueueStorage,StaticQueue_t * pxStaticQueue,const uint8_t ucQueueType)376     QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength,
377                                              const UBaseType_t uxItemSize,
378                                              uint8_t * pucQueueStorage,
379                                              StaticQueue_t * pxStaticQueue,
380                                              const uint8_t ucQueueType )
381     {
382         Queue_t * pxNewQueue = NULL;
383 
384         traceENTER_xQueueGenericCreateStatic( uxQueueLength, uxItemSize, pucQueueStorage, pxStaticQueue, ucQueueType );
385 
386         /* The StaticQueue_t structure and the queue storage area must be
387          * supplied. */
388         configASSERT( pxStaticQueue );
389 
390         if( ( uxQueueLength > ( UBaseType_t ) 0 ) &&
391             ( pxStaticQueue != NULL ) &&
392 
393             /* A queue storage area should be provided if the item size is not 0, and
394              * should not be provided if the item size is 0. */
395             ( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0U ) ) ) &&
396             ( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0U ) ) ) )
397         {
398             #if ( configASSERT_DEFINED == 1 )
399             {
400                 /* Sanity check that the size of the structure used to declare a
401                  * variable of type StaticQueue_t or StaticSemaphore_t equals the size of
402                  * the real queue and semaphore structures. */
403                 volatile size_t xSize = sizeof( StaticQueue_t );
404 
405                 /* This assertion cannot be branch covered in unit tests */
406                 configASSERT( xSize == sizeof( Queue_t ) ); /* LCOV_EXCL_BR_LINE */
407                 ( void ) xSize;                             /* Prevent unused variable warning when configASSERT() is not defined. */
408             }
409             #endif /* configASSERT_DEFINED */
410 
411             /* The address of a statically allocated queue was passed in, use it.
412              * The address of a statically allocated storage area was also passed in
413              * but is already set. */
414             /* MISRA Ref 11.3.1 [Misaligned access] */
415             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-113 */
416             /* coverity[misra_c_2012_rule_11_3_violation] */
417             pxNewQueue = ( Queue_t * ) pxStaticQueue;
418 
419             #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
420             {
421                 /* Queues can be allocated wither statically or dynamically, so
422                  * note this queue was allocated statically in case the queue is
423                  * later deleted. */
424                 pxNewQueue->ucStaticallyAllocated = pdTRUE;
425             }
426             #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
427 
428             prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
429         }
430         else
431         {
432             configASSERT( pxNewQueue );
433             mtCOVERAGE_TEST_MARKER();
434         }
435 
436         traceRETURN_xQueueGenericCreateStatic( pxNewQueue );
437 
438         return pxNewQueue;
439     }
440 
441 #endif /* configSUPPORT_STATIC_ALLOCATION */
442 /*-----------------------------------------------------------*/
443 
444 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
445 
xQueueGenericGetStaticBuffers(QueueHandle_t xQueue,uint8_t ** ppucQueueStorage,StaticQueue_t ** ppxStaticQueue)446     BaseType_t xQueueGenericGetStaticBuffers( QueueHandle_t xQueue,
447                                               uint8_t ** ppucQueueStorage,
448                                               StaticQueue_t ** ppxStaticQueue )
449     {
450         BaseType_t xReturn;
451         Queue_t * const pxQueue = xQueue;
452 
453         traceENTER_xQueueGenericGetStaticBuffers( xQueue, ppucQueueStorage, ppxStaticQueue );
454 
455         configASSERT( pxQueue );
456         configASSERT( ppxStaticQueue );
457 
458         #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
459         {
460             /* Check if the queue was statically allocated. */
461             if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdTRUE )
462             {
463                 if( ppucQueueStorage != NULL )
464                 {
465                     *ppucQueueStorage = ( uint8_t * ) pxQueue->pcHead;
466                 }
467 
468                 /* MISRA Ref 11.3.1 [Misaligned access] */
469                 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-113 */
470                 /* coverity[misra_c_2012_rule_11_3_violation] */
471                 *ppxStaticQueue = ( StaticQueue_t * ) pxQueue;
472                 xReturn = pdTRUE;
473             }
474             else
475             {
476                 xReturn = pdFALSE;
477             }
478         }
479         #else /* configSUPPORT_DYNAMIC_ALLOCATION */
480         {
481             /* Queue must have been statically allocated. */
482             if( ppucQueueStorage != NULL )
483             {
484                 *ppucQueueStorage = ( uint8_t * ) pxQueue->pcHead;
485             }
486 
487             *ppxStaticQueue = ( StaticQueue_t * ) pxQueue;
488             xReturn = pdTRUE;
489         }
490         #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
491 
492         traceRETURN_xQueueGenericGetStaticBuffers( xReturn );
493 
494         return xReturn;
495     }
496 
497 #endif /* configSUPPORT_STATIC_ALLOCATION */
498 /*-----------------------------------------------------------*/
499 
500 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
501 
xQueueGenericCreate(const UBaseType_t uxQueueLength,const UBaseType_t uxItemSize,const uint8_t ucQueueType)502     QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength,
503                                        const UBaseType_t uxItemSize,
504                                        const uint8_t ucQueueType )
505     {
506         Queue_t * pxNewQueue = NULL;
507         size_t xQueueSizeInBytes;
508         uint8_t * pucQueueStorage;
509 
510         traceENTER_xQueueGenericCreate( uxQueueLength, uxItemSize, ucQueueType );
511 
512         if( ( uxQueueLength > ( UBaseType_t ) 0 ) &&
513             /* Check for multiplication overflow. */
514             ( ( SIZE_MAX / uxQueueLength ) >= uxItemSize ) &&
515             /* Check for addition overflow. */
516             ( ( UBaseType_t ) ( SIZE_MAX - sizeof( Queue_t ) ) >= ( uxQueueLength * uxItemSize ) ) )
517         {
518             /* Allocate enough space to hold the maximum number of items that
519              * can be in the queue at any time.  It is valid for uxItemSize to be
520              * zero in the case the queue is used as a semaphore. */
521             xQueueSizeInBytes = ( size_t ) ( ( size_t ) uxQueueLength * ( size_t ) uxItemSize );
522 
523             /* MISRA Ref 11.5.1 [Malloc memory assignment] */
524             /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
525             /* coverity[misra_c_2012_rule_11_5_violation] */
526             pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes );
527 
528             if( pxNewQueue != NULL )
529             {
530                 /* Jump past the queue structure to find the location of the queue
531                  * storage area. */
532                 pucQueueStorage = ( uint8_t * ) pxNewQueue;
533                 pucQueueStorage += sizeof( Queue_t );
534 
535                 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
536                 {
537                     /* Queues can be created either statically or dynamically, so
538                      * note this task was created dynamically in case it is later
539                      * deleted. */
540                     pxNewQueue->ucStaticallyAllocated = pdFALSE;
541                 }
542                 #endif /* configSUPPORT_STATIC_ALLOCATION */
543 
544                 prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
545             }
546             else
547             {
548                 traceQUEUE_CREATE_FAILED( ucQueueType );
549                 mtCOVERAGE_TEST_MARKER();
550             }
551         }
552         else
553         {
554             configASSERT( pxNewQueue );
555             mtCOVERAGE_TEST_MARKER();
556         }
557 
558         traceRETURN_xQueueGenericCreate( pxNewQueue );
559 
560         return pxNewQueue;
561     }
562 
563 #endif /* configSUPPORT_STATIC_ALLOCATION */
564 /*-----------------------------------------------------------*/
565 
prvInitialiseNewQueue(const UBaseType_t uxQueueLength,const UBaseType_t uxItemSize,uint8_t * pucQueueStorage,const uint8_t ucQueueType,Queue_t * pxNewQueue)566 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
567                                    const UBaseType_t uxItemSize,
568                                    uint8_t * pucQueueStorage,
569                                    const uint8_t ucQueueType,
570                                    Queue_t * pxNewQueue )
571 {
572     /* Remove compiler warnings about unused parameters should
573      * configUSE_TRACE_FACILITY not be set to 1. */
574     ( void ) ucQueueType;
575 
576     if( uxItemSize == ( UBaseType_t ) 0 )
577     {
578         /* No RAM was allocated for the queue storage area, but PC head cannot
579          * be set to NULL because NULL is used as a key to say the queue is used as
580          * a mutex.  Therefore just set pcHead to point to the queue as a benign
581          * value that is known to be within the memory map. */
582         pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
583     }
584     else
585     {
586         /* Set the head to the start of the queue storage area. */
587         pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
588     }
589 
590     /* Initialise the queue members as described where the queue type is
591      * defined. */
592     pxNewQueue->uxLength = uxQueueLength;
593     pxNewQueue->uxItemSize = uxItemSize;
594     ( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
595 
596     #if ( configUSE_TRACE_FACILITY == 1 )
597     {
598         pxNewQueue->ucQueueType = ucQueueType;
599     }
600     #endif /* configUSE_TRACE_FACILITY */
601 
602     #if ( configUSE_QUEUE_SETS == 1 )
603     {
604         pxNewQueue->pxQueueSetContainer = NULL;
605     }
606     #endif /* configUSE_QUEUE_SETS */
607 
608     traceQUEUE_CREATE( pxNewQueue );
609 }
610 /*-----------------------------------------------------------*/
611 
612 #if ( configUSE_MUTEXES == 1 )
613 
prvInitialiseMutex(Queue_t * pxNewQueue)614     static void prvInitialiseMutex( Queue_t * pxNewQueue )
615     {
616         if( pxNewQueue != NULL )
617         {
618             /* The queue create function will set all the queue structure members
619             * correctly for a generic queue, but this function is creating a
620             * mutex.  Overwrite those members that need to be set differently -
621             * in particular the information required for priority inheritance. */
622             pxNewQueue->u.xSemaphore.xMutexHolder = NULL;
623             pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
624 
625             /* In case this is a recursive mutex. */
626             pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0;
627 
628             traceCREATE_MUTEX( pxNewQueue );
629 
630             /* Start with the semaphore in the expected state. */
631             ( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
632         }
633         else
634         {
635             traceCREATE_MUTEX_FAILED();
636         }
637     }
638 
639 #endif /* configUSE_MUTEXES */
640 /*-----------------------------------------------------------*/
641 
642 #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
643 
xQueueCreateMutex(const uint8_t ucQueueType)644     QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
645     {
646         QueueHandle_t xNewQueue;
647         const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
648 
649         traceENTER_xQueueCreateMutex( ucQueueType );
650 
651         xNewQueue = xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType );
652         prvInitialiseMutex( ( Queue_t * ) xNewQueue );
653 
654         traceRETURN_xQueueCreateMutex( xNewQueue );
655 
656         return xNewQueue;
657     }
658 
659 #endif /* configUSE_MUTEXES */
660 /*-----------------------------------------------------------*/
661 
662 #if ( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
663 
xQueueCreateMutexStatic(const uint8_t ucQueueType,StaticQueue_t * pxStaticQueue)664     QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType,
665                                            StaticQueue_t * pxStaticQueue )
666     {
667         QueueHandle_t xNewQueue;
668         const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
669 
670         traceENTER_xQueueCreateMutexStatic( ucQueueType, pxStaticQueue );
671 
672         /* Prevent compiler warnings about unused parameters if
673          * configUSE_TRACE_FACILITY does not equal 1. */
674         ( void ) ucQueueType;
675 
676         xNewQueue = xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );
677         prvInitialiseMutex( ( Queue_t * ) xNewQueue );
678 
679         traceRETURN_xQueueCreateMutexStatic( xNewQueue );
680 
681         return xNewQueue;
682     }
683 
684 #endif /* configUSE_MUTEXES */
685 /*-----------------------------------------------------------*/
686 
687 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
688 
xQueueGetMutexHolder(QueueHandle_t xSemaphore)689     TaskHandle_t xQueueGetMutexHolder( QueueHandle_t xSemaphore )
690     {
691         TaskHandle_t pxReturn;
692         Queue_t * const pxSemaphore = ( Queue_t * ) xSemaphore;
693 
694         traceENTER_xQueueGetMutexHolder( xSemaphore );
695 
696         configASSERT( xSemaphore );
697 
698         /* This function is called by xSemaphoreGetMutexHolder(), and should not
699          * be called directly.  Note:  This is a good way of determining if the
700          * calling task is the mutex holder, but not a good way of determining the
701          * identity of the mutex holder, as the holder may change between the
702          * following critical section exiting and the function returning. */
703         taskENTER_CRITICAL();
704         {
705             if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX )
706             {
707                 pxReturn = pxSemaphore->u.xSemaphore.xMutexHolder;
708             }
709             else
710             {
711                 pxReturn = NULL;
712             }
713         }
714         taskEXIT_CRITICAL();
715 
716         traceRETURN_xQueueGetMutexHolder( pxReturn );
717 
718         return pxReturn;
719     }
720 
721 #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
722 /*-----------------------------------------------------------*/
723 
724 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
725 
xQueueGetMutexHolderFromISR(QueueHandle_t xSemaphore)726     TaskHandle_t xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore )
727     {
728         TaskHandle_t pxReturn;
729 
730         traceENTER_xQueueGetMutexHolderFromISR( xSemaphore );
731 
732         configASSERT( xSemaphore );
733 
734         /* Mutexes cannot be used in interrupt service routines, so the mutex
735          * holder should not change in an ISR, and therefore a critical section is
736          * not required here. */
737         if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
738         {
739             pxReturn = ( ( Queue_t * ) xSemaphore )->u.xSemaphore.xMutexHolder;
740         }
741         else
742         {
743             pxReturn = NULL;
744         }
745 
746         traceRETURN_xQueueGetMutexHolderFromISR( pxReturn );
747 
748         return pxReturn;
749     }
750 
751 #endif /* if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) ) */
752 /*-----------------------------------------------------------*/
753 
754 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
755 
xQueueGiveMutexRecursive(QueueHandle_t xMutex)756     BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
757     {
758         BaseType_t xReturn;
759         Queue_t * const pxMutex = ( Queue_t * ) xMutex;
760 
761         traceENTER_xQueueGiveMutexRecursive( xMutex );
762 
763         configASSERT( pxMutex );
764 
765         /* If this is the task that holds the mutex then xMutexHolder will not
766          * change outside of this task.  If this task does not hold the mutex then
767          * pxMutexHolder can never coincidentally equal the tasks handle, and as
768          * this is the only condition we are interested in it does not matter if
769          * pxMutexHolder is accessed simultaneously by another task.  Therefore no
770          * mutual exclusion is required to test the pxMutexHolder variable. */
771         if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )
772         {
773             traceGIVE_MUTEX_RECURSIVE( pxMutex );
774 
775             /* uxRecursiveCallCount cannot be zero if xMutexHolder is equal to
776              * the task handle, therefore no underflow check is required.  Also,
777              * uxRecursiveCallCount is only modified by the mutex holder, and as
778              * there can only be one, no mutual exclusion is required to modify the
779              * uxRecursiveCallCount member. */
780             ( pxMutex->u.xSemaphore.uxRecursiveCallCount )--;
781 
782             /* Has the recursive call count unwound to 0? */
783             if( pxMutex->u.xSemaphore.uxRecursiveCallCount == ( UBaseType_t ) 0 )
784             {
785                 /* Return the mutex.  This will automatically unblock any other
786                  * task that might be waiting to access the mutex. */
787                 ( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
788             }
789             else
790             {
791                 mtCOVERAGE_TEST_MARKER();
792             }
793 
794             xReturn = pdPASS;
795         }
796         else
797         {
798             /* The mutex cannot be given because the calling task is not the
799              * holder. */
800             xReturn = pdFAIL;
801 
802             traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
803         }
804 
805         traceRETURN_xQueueGiveMutexRecursive( xReturn );
806 
807         return xReturn;
808     }
809 
810 #endif /* configUSE_RECURSIVE_MUTEXES */
811 /*-----------------------------------------------------------*/
812 
813 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
814 
xQueueTakeMutexRecursive(QueueHandle_t xMutex,TickType_t xTicksToWait)815     BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex,
816                                          TickType_t xTicksToWait )
817     {
818         BaseType_t xReturn;
819         Queue_t * const pxMutex = ( Queue_t * ) xMutex;
820 
821         traceENTER_xQueueTakeMutexRecursive( xMutex, xTicksToWait );
822 
823         configASSERT( pxMutex );
824 
825         /* Comments regarding mutual exclusion as per those within
826          * xQueueGiveMutexRecursive(). */
827 
828         traceTAKE_MUTEX_RECURSIVE( pxMutex );
829 
830         if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )
831         {
832             ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;
833             xReturn = pdPASS;
834         }
835         else
836         {
837             xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait );
838 
839             /* pdPASS will only be returned if the mutex was successfully
840              * obtained.  The calling task may have entered the Blocked state
841              * before reaching here. */
842             if( xReturn != pdFAIL )
843             {
844                 ( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;
845             }
846             else
847             {
848                 traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
849             }
850         }
851 
852         traceRETURN_xQueueTakeMutexRecursive( xReturn );
853 
854         return xReturn;
855     }
856 
857 #endif /* configUSE_RECURSIVE_MUTEXES */
858 /*-----------------------------------------------------------*/
859 
860 #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
861 
xQueueCreateCountingSemaphoreStatic(const UBaseType_t uxMaxCount,const UBaseType_t uxInitialCount,StaticQueue_t * pxStaticQueue)862     QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount,
863                                                        const UBaseType_t uxInitialCount,
864                                                        StaticQueue_t * pxStaticQueue )
865     {
866         QueueHandle_t xHandle = NULL;
867 
868         traceENTER_xQueueCreateCountingSemaphoreStatic( uxMaxCount, uxInitialCount, pxStaticQueue );
869 
870         if( ( uxMaxCount != 0U ) &&
871             ( uxInitialCount <= uxMaxCount ) )
872         {
873             xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
874 
875             if( xHandle != NULL )
876             {
877                 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
878 
879                 traceCREATE_COUNTING_SEMAPHORE();
880             }
881             else
882             {
883                 traceCREATE_COUNTING_SEMAPHORE_FAILED();
884             }
885         }
886         else
887         {
888             configASSERT( xHandle );
889             mtCOVERAGE_TEST_MARKER();
890         }
891 
892         traceRETURN_xQueueCreateCountingSemaphoreStatic( xHandle );
893 
894         return xHandle;
895     }
896 
897 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
898 /*-----------------------------------------------------------*/
899 
900 #if ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
901 
xQueueCreateCountingSemaphore(const UBaseType_t uxMaxCount,const UBaseType_t uxInitialCount)902     QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount,
903                                                  const UBaseType_t uxInitialCount )
904     {
905         QueueHandle_t xHandle = NULL;
906 
907         traceENTER_xQueueCreateCountingSemaphore( uxMaxCount, uxInitialCount );
908 
909         if( ( uxMaxCount != 0U ) &&
910             ( uxInitialCount <= uxMaxCount ) )
911         {
912             xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
913 
914             if( xHandle != NULL )
915             {
916                 ( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
917 
918                 traceCREATE_COUNTING_SEMAPHORE();
919             }
920             else
921             {
922                 traceCREATE_COUNTING_SEMAPHORE_FAILED();
923             }
924         }
925         else
926         {
927             configASSERT( xHandle );
928             mtCOVERAGE_TEST_MARKER();
929         }
930 
931         traceRETURN_xQueueCreateCountingSemaphore( xHandle );
932 
933         return xHandle;
934     }
935 
936 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
937 /*-----------------------------------------------------------*/
938 
xQueueGenericSend(QueueHandle_t xQueue,const void * const pvItemToQueue,TickType_t xTicksToWait,const BaseType_t xCopyPosition)939 BaseType_t xQueueGenericSend( QueueHandle_t xQueue,
940                               const void * const pvItemToQueue,
941                               TickType_t xTicksToWait,
942                               const BaseType_t xCopyPosition )
943 {
944     BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
945     TimeOut_t xTimeOut;
946     Queue_t * const pxQueue = xQueue;
947 
948     traceENTER_xQueueGenericSend( xQueue, pvItemToQueue, xTicksToWait, xCopyPosition );
949 
950     configASSERT( pxQueue );
951     configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
952     configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
953     #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
954     {
955         configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
956     }
957     #endif
958 
959     for( ; ; )
960     {
961         taskENTER_CRITICAL();
962         {
963             /* Is there room on the queue now?  The running task must be the
964              * highest priority task wanting to access the queue.  If the head item
965              * in the queue is to be overwritten then it does not matter if the
966              * queue is full. */
967             if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
968             {
969                 traceQUEUE_SEND( pxQueue );
970 
971                 #if ( configUSE_QUEUE_SETS == 1 )
972                 {
973                     const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
974 
975                     xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
976 
977                     if( pxQueue->pxQueueSetContainer != NULL )
978                     {
979                         if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
980                         {
981                             /* Do not notify the queue set as an existing item
982                              * was overwritten in the queue so the number of items
983                              * in the queue has not changed. */
984                             mtCOVERAGE_TEST_MARKER();
985                         }
986                         else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
987                         {
988                             /* The queue is a member of a queue set, and posting
989                              * to the queue set caused a higher priority task to
990                              * unblock. A context switch is required. */
991                             queueYIELD_IF_USING_PREEMPTION();
992                         }
993                         else
994                         {
995                             mtCOVERAGE_TEST_MARKER();
996                         }
997                     }
998                     else
999                     {
1000                         /* If there was a task waiting for data to arrive on the
1001                          * queue then unblock it now. */
1002                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1003                         {
1004                             if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1005                             {
1006                                 /* The unblocked task has a priority higher than
1007                                  * our own so yield immediately.  Yes it is ok to
1008                                  * do this from within the critical section - the
1009                                  * kernel takes care of that. */
1010                                 queueYIELD_IF_USING_PREEMPTION();
1011                             }
1012                             else
1013                             {
1014                                 mtCOVERAGE_TEST_MARKER();
1015                             }
1016                         }
1017                         else if( xYieldRequired != pdFALSE )
1018                         {
1019                             /* This path is a special case that will only get
1020                              * executed if the task was holding multiple mutexes
1021                              * and the mutexes were given back in an order that is
1022                              * different to that in which they were taken. */
1023                             queueYIELD_IF_USING_PREEMPTION();
1024                         }
1025                         else
1026                         {
1027                             mtCOVERAGE_TEST_MARKER();
1028                         }
1029                     }
1030                 }
1031                 #else /* configUSE_QUEUE_SETS */
1032                 {
1033                     xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
1034 
1035                     /* If there was a task waiting for data to arrive on the
1036                      * queue then unblock it now. */
1037                     if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1038                     {
1039                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1040                         {
1041                             /* The unblocked task has a priority higher than
1042                              * our own so yield immediately.  Yes it is ok to do
1043                              * this from within the critical section - the kernel
1044                              * takes care of that. */
1045                             queueYIELD_IF_USING_PREEMPTION();
1046                         }
1047                         else
1048                         {
1049                             mtCOVERAGE_TEST_MARKER();
1050                         }
1051                     }
1052                     else if( xYieldRequired != pdFALSE )
1053                     {
1054                         /* This path is a special case that will only get
1055                          * executed if the task was holding multiple mutexes and
1056                          * the mutexes were given back in an order that is
1057                          * different to that in which they were taken. */
1058                         queueYIELD_IF_USING_PREEMPTION();
1059                     }
1060                     else
1061                     {
1062                         mtCOVERAGE_TEST_MARKER();
1063                     }
1064                 }
1065                 #endif /* configUSE_QUEUE_SETS */
1066 
1067                 taskEXIT_CRITICAL();
1068 
1069                 traceRETURN_xQueueGenericSend( pdPASS );
1070 
1071                 return pdPASS;
1072             }
1073             else
1074             {
1075                 if( xTicksToWait == ( TickType_t ) 0 )
1076                 {
1077                     /* The queue was full and no block time is specified (or
1078                      * the block time has expired) so leave now. */
1079                     taskEXIT_CRITICAL();
1080 
1081                     /* Return to the original privilege level before exiting
1082                      * the function. */
1083                     traceQUEUE_SEND_FAILED( pxQueue );
1084                     traceRETURN_xQueueGenericSend( errQUEUE_FULL );
1085 
1086                     return errQUEUE_FULL;
1087                 }
1088                 else if( xEntryTimeSet == pdFALSE )
1089                 {
1090                     /* The queue was full and a block time was specified so
1091                      * configure the timeout structure. */
1092                     vTaskInternalSetTimeOutState( &xTimeOut );
1093                     xEntryTimeSet = pdTRUE;
1094                 }
1095                 else
1096                 {
1097                     /* Entry time was already set. */
1098                     mtCOVERAGE_TEST_MARKER();
1099                 }
1100             }
1101         }
1102         taskEXIT_CRITICAL();
1103 
1104         /* Interrupts and other tasks can send to and receive from the queue
1105          * now the critical section has been exited. */
1106 
1107         vTaskSuspendAll();
1108         prvLockQueue( pxQueue );
1109 
1110         /* Update the timeout state to see if it has expired yet. */
1111         if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1112         {
1113             if( prvIsQueueFull( pxQueue ) != pdFALSE )
1114             {
1115                 traceBLOCKING_ON_QUEUE_SEND( pxQueue );
1116                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
1117 
1118                 /* Unlocking the queue means queue events can effect the
1119                  * event list. It is possible that interrupts occurring now
1120                  * remove this task from the event list again - but as the
1121                  * scheduler is suspended the task will go onto the pending
1122                  * ready list instead of the actual ready list. */
1123                 prvUnlockQueue( pxQueue );
1124 
1125                 /* Resuming the scheduler will move tasks from the pending
1126                  * ready list into the ready list - so it is feasible that this
1127                  * task is already in the ready list before it yields - in which
1128                  * case the yield will not cause a context switch unless there
1129                  * is also a higher priority task in the pending ready list. */
1130                 if( xTaskResumeAll() == pdFALSE )
1131                 {
1132                     taskYIELD_WITHIN_API();
1133                 }
1134             }
1135             else
1136             {
1137                 /* Try again. */
1138                 prvUnlockQueue( pxQueue );
1139                 ( void ) xTaskResumeAll();
1140             }
1141         }
1142         else
1143         {
1144             /* The timeout has expired. */
1145             prvUnlockQueue( pxQueue );
1146             ( void ) xTaskResumeAll();
1147 
1148             traceQUEUE_SEND_FAILED( pxQueue );
1149             traceRETURN_xQueueGenericSend( errQUEUE_FULL );
1150 
1151             return errQUEUE_FULL;
1152         }
1153     }
1154 }
1155 /*-----------------------------------------------------------*/
1156 
xQueueGenericSendFromISR(QueueHandle_t xQueue,const void * const pvItemToQueue,BaseType_t * const pxHigherPriorityTaskWoken,const BaseType_t xCopyPosition)1157 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue,
1158                                      const void * const pvItemToQueue,
1159                                      BaseType_t * const pxHigherPriorityTaskWoken,
1160                                      const BaseType_t xCopyPosition )
1161 {
1162     BaseType_t xReturn;
1163     UBaseType_t uxSavedInterruptStatus;
1164     Queue_t * const pxQueue = xQueue;
1165 
1166     traceENTER_xQueueGenericSendFromISR( xQueue, pvItemToQueue, pxHigherPriorityTaskWoken, xCopyPosition );
1167 
1168     configASSERT( pxQueue );
1169     configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1170     configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
1171 
1172     /* RTOS ports that support interrupt nesting have the concept of a maximum
1173      * system call (or maximum API call) interrupt priority.  Interrupts that are
1174      * above the maximum system call priority are kept permanently enabled, even
1175      * when the RTOS kernel is in a critical section, but cannot make any calls to
1176      * FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
1177      * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1178      * failure if a FreeRTOS API function is called from an interrupt that has been
1179      * assigned a priority above the configured maximum system call priority.
1180      * Only FreeRTOS functions that end in FromISR can be called from interrupts
1181      * that have been assigned a priority at or (logically) below the maximum
1182      * system call interrupt priority.  FreeRTOS maintains a separate interrupt
1183      * safe API to ensure interrupt entry is as fast and as simple as possible.
1184      * More information (albeit Cortex-M specific) is provided on the following
1185      * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
1186     portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1187 
1188     /* Similar to xQueueGenericSend, except without blocking if there is no room
1189      * in the queue.  Also don't directly wake a task that was blocked on a queue
1190      * read, instead return a flag to say whether a context switch is required or
1191      * not (i.e. has a task with a higher priority than us been woken by this
1192      * post). */
1193     uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
1194     {
1195         if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
1196         {
1197             const int8_t cTxLock = pxQueue->cTxLock;
1198             const UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
1199 
1200             traceQUEUE_SEND_FROM_ISR( pxQueue );
1201 
1202             /* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
1203              *  semaphore or mutex.  That means prvCopyDataToQueue() cannot result
1204              *  in a task disinheriting a priority and prvCopyDataToQueue() can be
1205              *  called here even though the disinherit function does not check if
1206              *  the scheduler is suspended before accessing the ready lists. */
1207             ( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
1208 
1209             /* The event list is not altered if the queue is locked.  This will
1210              * be done when the queue is unlocked later. */
1211             if( cTxLock == queueUNLOCKED )
1212             {
1213                 #if ( configUSE_QUEUE_SETS == 1 )
1214                 {
1215                     if( pxQueue->pxQueueSetContainer != NULL )
1216                     {
1217                         if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
1218                         {
1219                             /* Do not notify the queue set as an existing item
1220                              * was overwritten in the queue so the number of items
1221                              * in the queue has not changed. */
1222                             mtCOVERAGE_TEST_MARKER();
1223                         }
1224                         else if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
1225                         {
1226                             /* The queue is a member of a queue set, and posting
1227                              * to the queue set caused a higher priority task to
1228                              * unblock.  A context switch is required. */
1229                             if( pxHigherPriorityTaskWoken != NULL )
1230                             {
1231                                 *pxHigherPriorityTaskWoken = pdTRUE;
1232                             }
1233                             else
1234                             {
1235                                 mtCOVERAGE_TEST_MARKER();
1236                             }
1237                         }
1238                         else
1239                         {
1240                             mtCOVERAGE_TEST_MARKER();
1241                         }
1242                     }
1243                     else
1244                     {
1245                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1246                         {
1247                             if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1248                             {
1249                                 /* The task waiting has a higher priority so
1250                                  *  record that a context switch is required. */
1251                                 if( pxHigherPriorityTaskWoken != NULL )
1252                                 {
1253                                     *pxHigherPriorityTaskWoken = pdTRUE;
1254                                 }
1255                                 else
1256                                 {
1257                                     mtCOVERAGE_TEST_MARKER();
1258                                 }
1259                             }
1260                             else
1261                             {
1262                                 mtCOVERAGE_TEST_MARKER();
1263                             }
1264                         }
1265                         else
1266                         {
1267                             mtCOVERAGE_TEST_MARKER();
1268                         }
1269                     }
1270                 }
1271                 #else /* configUSE_QUEUE_SETS */
1272                 {
1273                     if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1274                     {
1275                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1276                         {
1277                             /* The task waiting has a higher priority so record that a
1278                              * context switch is required. */
1279                             if( pxHigherPriorityTaskWoken != NULL )
1280                             {
1281                                 *pxHigherPriorityTaskWoken = pdTRUE;
1282                             }
1283                             else
1284                             {
1285                                 mtCOVERAGE_TEST_MARKER();
1286                             }
1287                         }
1288                         else
1289                         {
1290                             mtCOVERAGE_TEST_MARKER();
1291                         }
1292                     }
1293                     else
1294                     {
1295                         mtCOVERAGE_TEST_MARKER();
1296                     }
1297 
1298                     /* Not used in this path. */
1299                     ( void ) uxPreviousMessagesWaiting;
1300                 }
1301                 #endif /* configUSE_QUEUE_SETS */
1302             }
1303             else
1304             {
1305                 /* Increment the lock count so the task that unlocks the queue
1306                  * knows that data was posted while it was locked. */
1307                 prvIncrementQueueTxLock( pxQueue, cTxLock );
1308             }
1309 
1310             xReturn = pdPASS;
1311         }
1312         else
1313         {
1314             traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
1315             xReturn = errQUEUE_FULL;
1316         }
1317     }
1318     taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
1319 
1320     traceRETURN_xQueueGenericSendFromISR( xReturn );
1321 
1322     return xReturn;
1323 }
1324 /*-----------------------------------------------------------*/
1325 
xQueueGiveFromISR(QueueHandle_t xQueue,BaseType_t * const pxHigherPriorityTaskWoken)1326 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue,
1327                               BaseType_t * const pxHigherPriorityTaskWoken )
1328 {
1329     BaseType_t xReturn;
1330     UBaseType_t uxSavedInterruptStatus;
1331     Queue_t * const pxQueue = xQueue;
1332 
1333     traceENTER_xQueueGiveFromISR( xQueue, pxHigherPriorityTaskWoken );
1334 
1335     /* Similar to xQueueGenericSendFromISR() but used with semaphores where the
1336      * item size is 0.  Don't directly wake a task that was blocked on a queue
1337      * read, instead return a flag to say whether a context switch is required or
1338      * not (i.e. has a task with a higher priority than us been woken by this
1339      * post). */
1340 
1341     configASSERT( pxQueue );
1342 
1343     /* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
1344      * if the item size is not 0. */
1345     configASSERT( pxQueue->uxItemSize == 0 );
1346 
1347     /* Normally a mutex would not be given from an interrupt, especially if
1348      * there is a mutex holder, as priority inheritance makes no sense for an
1349      * interrupts, only tasks. */
1350     configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->u.xSemaphore.xMutexHolder != NULL ) ) );
1351 
1352     /* RTOS ports that support interrupt nesting have the concept of a maximum
1353      * system call (or maximum API call) interrupt priority.  Interrupts that are
1354      * above the maximum system call priority are kept permanently enabled, even
1355      * when the RTOS kernel is in a critical section, but cannot make any calls to
1356      * FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
1357      * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1358      * failure if a FreeRTOS API function is called from an interrupt that has been
1359      * assigned a priority above the configured maximum system call priority.
1360      * Only FreeRTOS functions that end in FromISR can be called from interrupts
1361      * that have been assigned a priority at or (logically) below the maximum
1362      * system call interrupt priority.  FreeRTOS maintains a separate interrupt
1363      * safe API to ensure interrupt entry is as fast and as simple as possible.
1364      * More information (albeit Cortex-M specific) is provided on the following
1365      * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
1366     portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1367 
1368     uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
1369     {
1370         const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1371 
1372         /* When the queue is used to implement a semaphore no data is ever
1373          * moved through the queue but it is still valid to see if the queue 'has
1374          * space'. */
1375         if( uxMessagesWaiting < pxQueue->uxLength )
1376         {
1377             const int8_t cTxLock = pxQueue->cTxLock;
1378 
1379             traceQUEUE_SEND_FROM_ISR( pxQueue );
1380 
1381             /* A task can only have an inherited priority if it is a mutex
1382              * holder - and if there is a mutex holder then the mutex cannot be
1383              * given from an ISR.  As this is the ISR version of the function it
1384              * can be assumed there is no mutex holder and no need to determine if
1385              * priority disinheritance is needed.  Simply increase the count of
1386              * messages (semaphores) available. */
1387             pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting + ( UBaseType_t ) 1 );
1388 
1389             /* The event list is not altered if the queue is locked.  This will
1390              * be done when the queue is unlocked later. */
1391             if( cTxLock == queueUNLOCKED )
1392             {
1393                 #if ( configUSE_QUEUE_SETS == 1 )
1394                 {
1395                     if( pxQueue->pxQueueSetContainer != NULL )
1396                     {
1397                         if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
1398                         {
1399                             /* The semaphore is a member of a queue set, and
1400                              * posting to the queue set caused a higher priority
1401                              * task to unblock.  A context switch is required. */
1402                             if( pxHigherPriorityTaskWoken != NULL )
1403                             {
1404                                 *pxHigherPriorityTaskWoken = pdTRUE;
1405                             }
1406                             else
1407                             {
1408                                 mtCOVERAGE_TEST_MARKER();
1409                             }
1410                         }
1411                         else
1412                         {
1413                             mtCOVERAGE_TEST_MARKER();
1414                         }
1415                     }
1416                     else
1417                     {
1418                         if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1419                         {
1420                             if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1421                             {
1422                                 /* The task waiting has a higher priority so
1423                                  *  record that a context switch is required. */
1424                                 if( pxHigherPriorityTaskWoken != NULL )
1425                                 {
1426                                     *pxHigherPriorityTaskWoken = pdTRUE;
1427                                 }
1428                                 else
1429                                 {
1430                                     mtCOVERAGE_TEST_MARKER();
1431                                 }
1432                             }
1433                             else
1434                             {
1435                                 mtCOVERAGE_TEST_MARKER();
1436                             }
1437                         }
1438                         else
1439                         {
1440                             mtCOVERAGE_TEST_MARKER();
1441                         }
1442                     }
1443                 }
1444                 #else /* configUSE_QUEUE_SETS */
1445                 {
1446                     if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1447                     {
1448                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1449                         {
1450                             /* The task waiting has a higher priority so record that a
1451                              * context switch is required. */
1452                             if( pxHigherPriorityTaskWoken != NULL )
1453                             {
1454                                 *pxHigherPriorityTaskWoken = pdTRUE;
1455                             }
1456                             else
1457                             {
1458                                 mtCOVERAGE_TEST_MARKER();
1459                             }
1460                         }
1461                         else
1462                         {
1463                             mtCOVERAGE_TEST_MARKER();
1464                         }
1465                     }
1466                     else
1467                     {
1468                         mtCOVERAGE_TEST_MARKER();
1469                     }
1470                 }
1471                 #endif /* configUSE_QUEUE_SETS */
1472             }
1473             else
1474             {
1475                 /* Increment the lock count so the task that unlocks the queue
1476                  * knows that data was posted while it was locked. */
1477                 prvIncrementQueueTxLock( pxQueue, cTxLock );
1478             }
1479 
1480             xReturn = pdPASS;
1481         }
1482         else
1483         {
1484             traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
1485             xReturn = errQUEUE_FULL;
1486         }
1487     }
1488     taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
1489 
1490     traceRETURN_xQueueGiveFromISR( xReturn );
1491 
1492     return xReturn;
1493 }
1494 /*-----------------------------------------------------------*/
1495 
xQueueReceive(QueueHandle_t xQueue,void * const pvBuffer,TickType_t xTicksToWait)1496 BaseType_t xQueueReceive( QueueHandle_t xQueue,
1497                           void * const pvBuffer,
1498                           TickType_t xTicksToWait )
1499 {
1500     BaseType_t xEntryTimeSet = pdFALSE;
1501     TimeOut_t xTimeOut;
1502     Queue_t * const pxQueue = xQueue;
1503 
1504     traceENTER_xQueueReceive( xQueue, pvBuffer, xTicksToWait );
1505 
1506     /* Check the pointer is not NULL. */
1507     configASSERT( ( pxQueue ) );
1508 
1509     /* The buffer into which data is received can only be NULL if the data size
1510      * is zero (so no data is copied into the buffer). */
1511     configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
1512 
1513     /* Cannot block if the scheduler is suspended. */
1514     #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1515     {
1516         configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1517     }
1518     #endif
1519 
1520     for( ; ; )
1521     {
1522         taskENTER_CRITICAL();
1523         {
1524             const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1525 
1526             /* Is there data in the queue now?  To be running the calling task
1527              * must be the highest priority task wanting to access the queue. */
1528             if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1529             {
1530                 /* Data available, remove one item. */
1531                 prvCopyDataFromQueue( pxQueue, pvBuffer );
1532                 traceQUEUE_RECEIVE( pxQueue );
1533                 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting - ( UBaseType_t ) 1 );
1534 
1535                 /* There is now space in the queue, were any tasks waiting to
1536                  * post to the queue?  If so, unblock the highest priority waiting
1537                  * task. */
1538                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1539                 {
1540                     if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1541                     {
1542                         queueYIELD_IF_USING_PREEMPTION();
1543                     }
1544                     else
1545                     {
1546                         mtCOVERAGE_TEST_MARKER();
1547                     }
1548                 }
1549                 else
1550                 {
1551                     mtCOVERAGE_TEST_MARKER();
1552                 }
1553 
1554                 taskEXIT_CRITICAL();
1555 
1556                 traceRETURN_xQueueReceive( pdPASS );
1557 
1558                 return pdPASS;
1559             }
1560             else
1561             {
1562                 if( xTicksToWait == ( TickType_t ) 0 )
1563                 {
1564                     /* The queue was empty and no block time is specified (or
1565                      * the block time has expired) so leave now. */
1566                     taskEXIT_CRITICAL();
1567 
1568                     traceQUEUE_RECEIVE_FAILED( pxQueue );
1569                     traceRETURN_xQueueReceive( errQUEUE_EMPTY );
1570 
1571                     return errQUEUE_EMPTY;
1572                 }
1573                 else if( xEntryTimeSet == pdFALSE )
1574                 {
1575                     /* The queue was empty and a block time was specified so
1576                      * configure the timeout structure. */
1577                     vTaskInternalSetTimeOutState( &xTimeOut );
1578                     xEntryTimeSet = pdTRUE;
1579                 }
1580                 else
1581                 {
1582                     /* Entry time was already set. */
1583                     mtCOVERAGE_TEST_MARKER();
1584                 }
1585             }
1586         }
1587         taskEXIT_CRITICAL();
1588 
1589         /* Interrupts and other tasks can send to and receive from the queue
1590          * now the critical section has been exited. */
1591 
1592         vTaskSuspendAll();
1593         prvLockQueue( pxQueue );
1594 
1595         /* Update the timeout state to see if it has expired yet. */
1596         if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1597         {
1598             /* The timeout has not expired.  If the queue is still empty place
1599              * the task on the list of tasks waiting to receive from the queue. */
1600             if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1601             {
1602                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1603                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1604                 prvUnlockQueue( pxQueue );
1605 
1606                 if( xTaskResumeAll() == pdFALSE )
1607                 {
1608                     taskYIELD_WITHIN_API();
1609                 }
1610                 else
1611                 {
1612                     mtCOVERAGE_TEST_MARKER();
1613                 }
1614             }
1615             else
1616             {
1617                 /* The queue contains data again.  Loop back to try and read the
1618                  * data. */
1619                 prvUnlockQueue( pxQueue );
1620                 ( void ) xTaskResumeAll();
1621             }
1622         }
1623         else
1624         {
1625             /* Timed out.  If there is no data in the queue exit, otherwise loop
1626              * back and attempt to read the data. */
1627             prvUnlockQueue( pxQueue );
1628             ( void ) xTaskResumeAll();
1629 
1630             if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1631             {
1632                 traceQUEUE_RECEIVE_FAILED( pxQueue );
1633                 traceRETURN_xQueueReceive( errQUEUE_EMPTY );
1634 
1635                 return errQUEUE_EMPTY;
1636             }
1637             else
1638             {
1639                 mtCOVERAGE_TEST_MARKER();
1640             }
1641         }
1642     }
1643 }
1644 /*-----------------------------------------------------------*/
1645 
xQueueSemaphoreTake(QueueHandle_t xQueue,TickType_t xTicksToWait)1646 BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue,
1647                                 TickType_t xTicksToWait )
1648 {
1649     BaseType_t xEntryTimeSet = pdFALSE;
1650     TimeOut_t xTimeOut;
1651     Queue_t * const pxQueue = xQueue;
1652 
1653     #if ( configUSE_MUTEXES == 1 )
1654         BaseType_t xInheritanceOccurred = pdFALSE;
1655     #endif
1656 
1657     traceENTER_xQueueSemaphoreTake( xQueue, xTicksToWait );
1658 
1659     /* Check the queue pointer is not NULL. */
1660     configASSERT( ( pxQueue ) );
1661 
1662     /* Check this really is a semaphore, in which case the item size will be
1663      * 0. */
1664     configASSERT( pxQueue->uxItemSize == 0 );
1665 
1666     /* Cannot block if the scheduler is suspended. */
1667     #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1668     {
1669         configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1670     }
1671     #endif
1672 
1673     for( ; ; )
1674     {
1675         taskENTER_CRITICAL();
1676         {
1677             /* Semaphores are queues with an item size of 0, and where the
1678              * number of messages in the queue is the semaphore's count value. */
1679             const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting;
1680 
1681             /* Is there data in the queue now?  To be running the calling task
1682              * must be the highest priority task wanting to access the queue. */
1683             if( uxSemaphoreCount > ( UBaseType_t ) 0 )
1684             {
1685                 traceQUEUE_RECEIVE( pxQueue );
1686 
1687                 /* Semaphores are queues with a data size of zero and where the
1688                  * messages waiting is the semaphore's count.  Reduce the count. */
1689                 pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxSemaphoreCount - ( UBaseType_t ) 1 );
1690 
1691                 #if ( configUSE_MUTEXES == 1 )
1692                 {
1693                     if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1694                     {
1695                         /* Record the information required to implement
1696                          * priority inheritance should it become necessary. */
1697                         pxQueue->u.xSemaphore.xMutexHolder = pvTaskIncrementMutexHeldCount();
1698                     }
1699                     else
1700                     {
1701                         mtCOVERAGE_TEST_MARKER();
1702                     }
1703                 }
1704                 #endif /* configUSE_MUTEXES */
1705 
1706                 /* Check to see if other tasks are blocked waiting to give the
1707                  * semaphore, and if so, unblock the highest priority such task. */
1708                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1709                 {
1710                     if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1711                     {
1712                         queueYIELD_IF_USING_PREEMPTION();
1713                     }
1714                     else
1715                     {
1716                         mtCOVERAGE_TEST_MARKER();
1717                     }
1718                 }
1719                 else
1720                 {
1721                     mtCOVERAGE_TEST_MARKER();
1722                 }
1723 
1724                 taskEXIT_CRITICAL();
1725 
1726                 traceRETURN_xQueueSemaphoreTake( pdPASS );
1727 
1728                 return pdPASS;
1729             }
1730             else
1731             {
1732                 if( xTicksToWait == ( TickType_t ) 0 )
1733                 {
1734                     /* The semaphore count was 0 and no block time is specified
1735                      * (or the block time has expired) so exit now. */
1736                     taskEXIT_CRITICAL();
1737 
1738                     traceQUEUE_RECEIVE_FAILED( pxQueue );
1739                     traceRETURN_xQueueSemaphoreTake( errQUEUE_EMPTY );
1740 
1741                     return errQUEUE_EMPTY;
1742                 }
1743                 else if( xEntryTimeSet == pdFALSE )
1744                 {
1745                     /* The semaphore count was 0 and a block time was specified
1746                      * so configure the timeout structure ready to block. */
1747                     vTaskInternalSetTimeOutState( &xTimeOut );
1748                     xEntryTimeSet = pdTRUE;
1749                 }
1750                 else
1751                 {
1752                     /* Entry time was already set. */
1753                     mtCOVERAGE_TEST_MARKER();
1754                 }
1755             }
1756         }
1757         taskEXIT_CRITICAL();
1758 
1759         /* Interrupts and other tasks can give to and take from the semaphore
1760          * now the critical section has been exited. */
1761 
1762         vTaskSuspendAll();
1763         prvLockQueue( pxQueue );
1764 
1765         /* Update the timeout state to see if it has expired yet. */
1766         if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1767         {
1768             /* A block time is specified and not expired.  If the semaphore
1769              * count is 0 then enter the Blocked state to wait for a semaphore to
1770              * become available.  As semaphores are implemented with queues the
1771              * queue being empty is equivalent to the semaphore count being 0. */
1772             if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1773             {
1774                 traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1775 
1776                 #if ( configUSE_MUTEXES == 1 )
1777                 {
1778                     if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1779                     {
1780                         taskENTER_CRITICAL();
1781                         {
1782                             xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder );
1783                         }
1784                         taskEXIT_CRITICAL();
1785                     }
1786                     else
1787                     {
1788                         mtCOVERAGE_TEST_MARKER();
1789                     }
1790                 }
1791                 #endif /* if ( configUSE_MUTEXES == 1 ) */
1792 
1793                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1794                 prvUnlockQueue( pxQueue );
1795 
1796                 if( xTaskResumeAll() == pdFALSE )
1797                 {
1798                     taskYIELD_WITHIN_API();
1799                 }
1800                 else
1801                 {
1802                     mtCOVERAGE_TEST_MARKER();
1803                 }
1804             }
1805             else
1806             {
1807                 /* There was no timeout and the semaphore count was not 0, so
1808                  * attempt to take the semaphore again. */
1809                 prvUnlockQueue( pxQueue );
1810                 ( void ) xTaskResumeAll();
1811             }
1812         }
1813         else
1814         {
1815             /* Timed out. */
1816             prvUnlockQueue( pxQueue );
1817             ( void ) xTaskResumeAll();
1818 
1819             /* If the semaphore count is 0 exit now as the timeout has
1820              * expired.  Otherwise return to attempt to take the semaphore that is
1821              * known to be available.  As semaphores are implemented by queues the
1822              * queue being empty is equivalent to the semaphore count being 0. */
1823             if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1824             {
1825                 #if ( configUSE_MUTEXES == 1 )
1826                 {
1827                     /* xInheritanceOccurred could only have be set if
1828                      * pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to
1829                      * test the mutex type again to check it is actually a mutex. */
1830                     if( xInheritanceOccurred != pdFALSE )
1831                     {
1832                         taskENTER_CRITICAL();
1833                         {
1834                             UBaseType_t uxHighestWaitingPriority;
1835 
1836                             /* This task blocking on the mutex caused another
1837                              * task to inherit this task's priority.  Now this task
1838                              * has timed out the priority should be disinherited
1839                              * again, but only as low as the next highest priority
1840                              * task that is waiting for the same mutex. */
1841                             uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );
1842 
1843                             /* vTaskPriorityDisinheritAfterTimeout uses the uxHighestWaitingPriority
1844                              * parameter to index pxReadyTasksLists when adding the task holding
1845                              * mutex to the ready list for its new priority. Coverity thinks that
1846                              * it can result in out-of-bounds access which is not true because
1847                              * uxHighestWaitingPriority, as returned by prvGetDisinheritPriorityAfterTimeout,
1848                              * is capped at ( configMAX_PRIORITIES - 1 ). */
1849                             /* coverity[overrun] */
1850                             vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority );
1851                         }
1852                         taskEXIT_CRITICAL();
1853                     }
1854                 }
1855                 #endif /* configUSE_MUTEXES */
1856 
1857                 traceQUEUE_RECEIVE_FAILED( pxQueue );
1858                 traceRETURN_xQueueSemaphoreTake( errQUEUE_EMPTY );
1859 
1860                 return errQUEUE_EMPTY;
1861             }
1862             else
1863             {
1864                 mtCOVERAGE_TEST_MARKER();
1865             }
1866         }
1867     }
1868 }
1869 /*-----------------------------------------------------------*/
1870 
xQueuePeek(QueueHandle_t xQueue,void * const pvBuffer,TickType_t xTicksToWait)1871 BaseType_t xQueuePeek( QueueHandle_t xQueue,
1872                        void * const pvBuffer,
1873                        TickType_t xTicksToWait )
1874 {
1875     BaseType_t xEntryTimeSet = pdFALSE;
1876     TimeOut_t xTimeOut;
1877     int8_t * pcOriginalReadPosition;
1878     Queue_t * const pxQueue = xQueue;
1879 
1880     traceENTER_xQueuePeek( xQueue, pvBuffer, xTicksToWait );
1881 
1882     /* Check the pointer is not NULL. */
1883     configASSERT( ( pxQueue ) );
1884 
1885     /* The buffer into which data is received can only be NULL if the data size
1886      * is zero (so no data is copied into the buffer. */
1887     configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
1888 
1889     /* Cannot block if the scheduler is suspended. */
1890     #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1891     {
1892         configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1893     }
1894     #endif
1895 
1896     for( ; ; )
1897     {
1898         taskENTER_CRITICAL();
1899         {
1900             const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1901 
1902             /* Is there data in the queue now?  To be running the calling task
1903              * must be the highest priority task wanting to access the queue. */
1904             if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1905             {
1906                 /* Remember the read position so it can be reset after the data
1907                  * is read from the queue as this function is only peeking the
1908                  * data, not removing it. */
1909                 pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
1910 
1911                 prvCopyDataFromQueue( pxQueue, pvBuffer );
1912                 traceQUEUE_PEEK( pxQueue );
1913 
1914                 /* The data is not being removed, so reset the read pointer. */
1915                 pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
1916 
1917                 /* The data is being left in the queue, so see if there are
1918                  * any other tasks waiting for the data. */
1919                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1920                 {
1921                     if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1922                     {
1923                         /* The task waiting has a higher priority than this task. */
1924                         queueYIELD_IF_USING_PREEMPTION();
1925                     }
1926                     else
1927                     {
1928                         mtCOVERAGE_TEST_MARKER();
1929                     }
1930                 }
1931                 else
1932                 {
1933                     mtCOVERAGE_TEST_MARKER();
1934                 }
1935 
1936                 taskEXIT_CRITICAL();
1937 
1938                 traceRETURN_xQueuePeek( pdPASS );
1939 
1940                 return pdPASS;
1941             }
1942             else
1943             {
1944                 if( xTicksToWait == ( TickType_t ) 0 )
1945                 {
1946                     /* The queue was empty and no block time is specified (or
1947                      * the block time has expired) so leave now. */
1948                     taskEXIT_CRITICAL();
1949 
1950                     traceQUEUE_PEEK_FAILED( pxQueue );
1951                     traceRETURN_xQueuePeek( errQUEUE_EMPTY );
1952 
1953                     return errQUEUE_EMPTY;
1954                 }
1955                 else if( xEntryTimeSet == pdFALSE )
1956                 {
1957                     /* The queue was empty and a block time was specified so
1958                      * configure the timeout structure ready to enter the blocked
1959                      * state. */
1960                     vTaskInternalSetTimeOutState( &xTimeOut );
1961                     xEntryTimeSet = pdTRUE;
1962                 }
1963                 else
1964                 {
1965                     /* Entry time was already set. */
1966                     mtCOVERAGE_TEST_MARKER();
1967                 }
1968             }
1969         }
1970         taskEXIT_CRITICAL();
1971 
1972         /* Interrupts and other tasks can send to and receive from the queue
1973          * now that the critical section has been exited. */
1974 
1975         vTaskSuspendAll();
1976         prvLockQueue( pxQueue );
1977 
1978         /* Update the timeout state to see if it has expired yet. */
1979         if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1980         {
1981             /* Timeout has not expired yet, check to see if there is data in the
1982             * queue now, and if not enter the Blocked state to wait for data. */
1983             if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1984             {
1985                 traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
1986                 vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1987                 prvUnlockQueue( pxQueue );
1988 
1989                 if( xTaskResumeAll() == pdFALSE )
1990                 {
1991                     taskYIELD_WITHIN_API();
1992                 }
1993                 else
1994                 {
1995                     mtCOVERAGE_TEST_MARKER();
1996                 }
1997             }
1998             else
1999             {
2000                 /* There is data in the queue now, so don't enter the blocked
2001                  * state, instead return to try and obtain the data. */
2002                 prvUnlockQueue( pxQueue );
2003                 ( void ) xTaskResumeAll();
2004             }
2005         }
2006         else
2007         {
2008             /* The timeout has expired.  If there is still no data in the queue
2009              * exit, otherwise go back and try to read the data again. */
2010             prvUnlockQueue( pxQueue );
2011             ( void ) xTaskResumeAll();
2012 
2013             if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
2014             {
2015                 traceQUEUE_PEEK_FAILED( pxQueue );
2016                 traceRETURN_xQueuePeek( errQUEUE_EMPTY );
2017 
2018                 return errQUEUE_EMPTY;
2019             }
2020             else
2021             {
2022                 mtCOVERAGE_TEST_MARKER();
2023             }
2024         }
2025     }
2026 }
2027 /*-----------------------------------------------------------*/
2028 
xQueueReceiveFromISR(QueueHandle_t xQueue,void * const pvBuffer,BaseType_t * const pxHigherPriorityTaskWoken)2029 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue,
2030                                  void * const pvBuffer,
2031                                  BaseType_t * const pxHigherPriorityTaskWoken )
2032 {
2033     BaseType_t xReturn;
2034     UBaseType_t uxSavedInterruptStatus;
2035     Queue_t * const pxQueue = xQueue;
2036 
2037     traceENTER_xQueueReceiveFromISR( xQueue, pvBuffer, pxHigherPriorityTaskWoken );
2038 
2039     configASSERT( pxQueue );
2040     configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
2041 
2042     /* RTOS ports that support interrupt nesting have the concept of a maximum
2043      * system call (or maximum API call) interrupt priority.  Interrupts that are
2044      * above the maximum system call priority are kept permanently enabled, even
2045      * when the RTOS kernel is in a critical section, but cannot make any calls to
2046      * FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
2047      * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2048      * failure if a FreeRTOS API function is called from an interrupt that has been
2049      * assigned a priority above the configured maximum system call priority.
2050      * Only FreeRTOS functions that end in FromISR can be called from interrupts
2051      * that have been assigned a priority at or (logically) below the maximum
2052      * system call interrupt priority.  FreeRTOS maintains a separate interrupt
2053      * safe API to ensure interrupt entry is as fast and as simple as possible.
2054      * More information (albeit Cortex-M specific) is provided on the following
2055      * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
2056     portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2057 
2058     uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
2059     {
2060         const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
2061 
2062         /* Cannot block in an ISR, so check there is data available. */
2063         if( uxMessagesWaiting > ( UBaseType_t ) 0 )
2064         {
2065             const int8_t cRxLock = pxQueue->cRxLock;
2066 
2067             traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
2068 
2069             prvCopyDataFromQueue( pxQueue, pvBuffer );
2070             pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting - ( UBaseType_t ) 1 );
2071 
2072             /* If the queue is locked the event list will not be modified.
2073              * Instead update the lock count so the task that unlocks the queue
2074              * will know that an ISR has removed data while the queue was
2075              * locked. */
2076             if( cRxLock == queueUNLOCKED )
2077             {
2078                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2079                 {
2080                     if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2081                     {
2082                         /* The task waiting has a higher priority than us so
2083                          * force a context switch. */
2084                         if( pxHigherPriorityTaskWoken != NULL )
2085                         {
2086                             *pxHigherPriorityTaskWoken = pdTRUE;
2087                         }
2088                         else
2089                         {
2090                             mtCOVERAGE_TEST_MARKER();
2091                         }
2092                     }
2093                     else
2094                     {
2095                         mtCOVERAGE_TEST_MARKER();
2096                     }
2097                 }
2098                 else
2099                 {
2100                     mtCOVERAGE_TEST_MARKER();
2101                 }
2102             }
2103             else
2104             {
2105                 /* Increment the lock count so the task that unlocks the queue
2106                  * knows that data was removed while it was locked. */
2107                 prvIncrementQueueRxLock( pxQueue, cRxLock );
2108             }
2109 
2110             xReturn = pdPASS;
2111         }
2112         else
2113         {
2114             xReturn = pdFAIL;
2115             traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
2116         }
2117     }
2118     taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
2119 
2120     traceRETURN_xQueueReceiveFromISR( xReturn );
2121 
2122     return xReturn;
2123 }
2124 /*-----------------------------------------------------------*/
2125 
xQueuePeekFromISR(QueueHandle_t xQueue,void * const pvBuffer)2126 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,
2127                               void * const pvBuffer )
2128 {
2129     BaseType_t xReturn;
2130     UBaseType_t uxSavedInterruptStatus;
2131     int8_t * pcOriginalReadPosition;
2132     Queue_t * const pxQueue = xQueue;
2133 
2134     traceENTER_xQueuePeekFromISR( xQueue, pvBuffer );
2135 
2136     configASSERT( pxQueue );
2137     configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
2138     configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
2139 
2140     /* RTOS ports that support interrupt nesting have the concept of a maximum
2141      * system call (or maximum API call) interrupt priority.  Interrupts that are
2142      * above the maximum system call priority are kept permanently enabled, even
2143      * when the RTOS kernel is in a critical section, but cannot make any calls to
2144      * FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
2145      * then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
2146      * failure if a FreeRTOS API function is called from an interrupt that has been
2147      * assigned a priority above the configured maximum system call priority.
2148      * Only FreeRTOS functions that end in FromISR can be called from interrupts
2149      * that have been assigned a priority at or (logically) below the maximum
2150      * system call interrupt priority.  FreeRTOS maintains a separate interrupt
2151      * safe API to ensure interrupt entry is as fast and as simple as possible.
2152      * More information (albeit Cortex-M specific) is provided on the following
2153      * link: https://www.FreeRTOS.org/RTOS-Cortex-M3-M4.html */
2154     portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
2155 
2156     uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
2157     {
2158         /* Cannot block in an ISR, so check there is data available. */
2159         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2160         {
2161             traceQUEUE_PEEK_FROM_ISR( pxQueue );
2162 
2163             /* Remember the read position so it can be reset as nothing is
2164              * actually being removed from the queue. */
2165             pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
2166             prvCopyDataFromQueue( pxQueue, pvBuffer );
2167             pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
2168 
2169             xReturn = pdPASS;
2170         }
2171         else
2172         {
2173             xReturn = pdFAIL;
2174             traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
2175         }
2176     }
2177     taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
2178 
2179     traceRETURN_xQueuePeekFromISR( xReturn );
2180 
2181     return xReturn;
2182 }
2183 /*-----------------------------------------------------------*/
2184 
uxQueueMessagesWaiting(const QueueHandle_t xQueue)2185 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
2186 {
2187     UBaseType_t uxReturn;
2188 
2189     traceENTER_uxQueueMessagesWaiting( xQueue );
2190 
2191     configASSERT( xQueue );
2192 
2193     taskENTER_CRITICAL();
2194     {
2195         uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
2196     }
2197     taskEXIT_CRITICAL();
2198 
2199     traceRETURN_uxQueueMessagesWaiting( uxReturn );
2200 
2201     return uxReturn;
2202 }
2203 /*-----------------------------------------------------------*/
2204 
uxQueueSpacesAvailable(const QueueHandle_t xQueue)2205 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
2206 {
2207     UBaseType_t uxReturn;
2208     Queue_t * const pxQueue = xQueue;
2209 
2210     traceENTER_uxQueueSpacesAvailable( xQueue );
2211 
2212     configASSERT( pxQueue );
2213 
2214     taskENTER_CRITICAL();
2215     {
2216         uxReturn = ( UBaseType_t ) ( pxQueue->uxLength - pxQueue->uxMessagesWaiting );
2217     }
2218     taskEXIT_CRITICAL();
2219 
2220     traceRETURN_uxQueueSpacesAvailable( uxReturn );
2221 
2222     return uxReturn;
2223 }
2224 /*-----------------------------------------------------------*/
2225 
uxQueueMessagesWaitingFromISR(const QueueHandle_t xQueue)2226 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
2227 {
2228     UBaseType_t uxReturn;
2229     Queue_t * const pxQueue = xQueue;
2230 
2231     traceENTER_uxQueueMessagesWaitingFromISR( xQueue );
2232 
2233     configASSERT( pxQueue );
2234     uxReturn = pxQueue->uxMessagesWaiting;
2235 
2236     traceRETURN_uxQueueMessagesWaitingFromISR( uxReturn );
2237 
2238     return uxReturn;
2239 }
2240 /*-----------------------------------------------------------*/
2241 
vQueueDelete(QueueHandle_t xQueue)2242 void vQueueDelete( QueueHandle_t xQueue )
2243 {
2244     Queue_t * const pxQueue = xQueue;
2245 
2246     traceENTER_vQueueDelete( xQueue );
2247 
2248     configASSERT( pxQueue );
2249     traceQUEUE_DELETE( pxQueue );
2250 
2251     #if ( configQUEUE_REGISTRY_SIZE > 0 )
2252     {
2253         vQueueUnregisterQueue( pxQueue );
2254     }
2255     #endif
2256 
2257     #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
2258     {
2259         /* The queue can only have been allocated dynamically - free it
2260          * again. */
2261         vPortFree( pxQueue );
2262     }
2263     #elif ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
2264     {
2265         /* The queue could have been allocated statically or dynamically, so
2266          * check before attempting to free the memory. */
2267         if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
2268         {
2269             vPortFree( pxQueue );
2270         }
2271         else
2272         {
2273             mtCOVERAGE_TEST_MARKER();
2274         }
2275     }
2276     #else /* if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) ) */
2277     {
2278         /* The queue must have been statically allocated, so is not going to be
2279          * deleted.  Avoid compiler warnings about the unused parameter. */
2280         ( void ) pxQueue;
2281     }
2282     #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
2283 
2284     traceRETURN_vQueueDelete();
2285 }
2286 /*-----------------------------------------------------------*/
2287 
2288 #if ( configUSE_TRACE_FACILITY == 1 )
2289 
uxQueueGetQueueNumber(QueueHandle_t xQueue)2290     UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
2291     {
2292         traceENTER_uxQueueGetQueueNumber( xQueue );
2293 
2294         traceRETURN_uxQueueGetQueueNumber( ( ( Queue_t * ) xQueue )->uxQueueNumber );
2295 
2296         return ( ( Queue_t * ) xQueue )->uxQueueNumber;
2297     }
2298 
2299 #endif /* configUSE_TRACE_FACILITY */
2300 /*-----------------------------------------------------------*/
2301 
2302 #if ( configUSE_TRACE_FACILITY == 1 )
2303 
vQueueSetQueueNumber(QueueHandle_t xQueue,UBaseType_t uxQueueNumber)2304     void vQueueSetQueueNumber( QueueHandle_t xQueue,
2305                                UBaseType_t uxQueueNumber )
2306     {
2307         traceENTER_vQueueSetQueueNumber( xQueue, uxQueueNumber );
2308 
2309         ( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
2310 
2311         traceRETURN_vQueueSetQueueNumber();
2312     }
2313 
2314 #endif /* configUSE_TRACE_FACILITY */
2315 /*-----------------------------------------------------------*/
2316 
2317 #if ( configUSE_TRACE_FACILITY == 1 )
2318 
ucQueueGetQueueType(QueueHandle_t xQueue)2319     uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
2320     {
2321         traceENTER_ucQueueGetQueueType( xQueue );
2322 
2323         traceRETURN_ucQueueGetQueueType( ( ( Queue_t * ) xQueue )->ucQueueType );
2324 
2325         return ( ( Queue_t * ) xQueue )->ucQueueType;
2326     }
2327 
2328 #endif /* configUSE_TRACE_FACILITY */
2329 /*-----------------------------------------------------------*/
2330 
uxQueueGetQueueItemSize(QueueHandle_t xQueue)2331 UBaseType_t uxQueueGetQueueItemSize( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
2332 {
2333     traceENTER_uxQueueGetQueueItemSize( xQueue );
2334 
2335     traceRETURN_uxQueueGetQueueItemSize( ( ( Queue_t * ) xQueue )->uxItemSize );
2336 
2337     return ( ( Queue_t * ) xQueue )->uxItemSize;
2338 }
2339 /*-----------------------------------------------------------*/
2340 
uxQueueGetQueueLength(QueueHandle_t xQueue)2341 UBaseType_t uxQueueGetQueueLength( QueueHandle_t xQueue ) /* PRIVILEGED_FUNCTION */
2342 {
2343     traceENTER_uxQueueGetQueueLength( xQueue );
2344 
2345     traceRETURN_uxQueueGetQueueLength( ( ( Queue_t * ) xQueue )->uxLength );
2346 
2347     return ( ( Queue_t * ) xQueue )->uxLength;
2348 }
2349 /*-----------------------------------------------------------*/
2350 
2351 #if ( configUSE_MUTEXES == 1 )
2352 
prvGetDisinheritPriorityAfterTimeout(const Queue_t * const pxQueue)2353     static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue )
2354     {
2355         UBaseType_t uxHighestPriorityOfWaitingTasks;
2356 
2357         /* If a task waiting for a mutex causes the mutex holder to inherit a
2358          * priority, but the waiting task times out, then the holder should
2359          * disinherit the priority - but only down to the highest priority of any
2360          * other tasks that are waiting for the same mutex.  For this purpose,
2361          * return the priority of the highest priority task that is waiting for the
2362          * mutex. */
2363         if( listCURRENT_LIST_LENGTH( &( pxQueue->xTasksWaitingToReceive ) ) > 0U )
2364         {
2365             uxHighestPriorityOfWaitingTasks = ( UBaseType_t ) ( ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( pxQueue->xTasksWaitingToReceive ) ) );
2366         }
2367         else
2368         {
2369             uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY;
2370         }
2371 
2372         return uxHighestPriorityOfWaitingTasks;
2373     }
2374 
2375 #endif /* configUSE_MUTEXES */
2376 /*-----------------------------------------------------------*/
2377 
prvCopyDataToQueue(Queue_t * const pxQueue,const void * pvItemToQueue,const BaseType_t xPosition)2378 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue,
2379                                       const void * pvItemToQueue,
2380                                       const BaseType_t xPosition )
2381 {
2382     BaseType_t xReturn = pdFALSE;
2383     UBaseType_t uxMessagesWaiting;
2384 
2385     /* This function is called from a critical section. */
2386 
2387     uxMessagesWaiting = pxQueue->uxMessagesWaiting;
2388 
2389     if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
2390     {
2391         #if ( configUSE_MUTEXES == 1 )
2392         {
2393             if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
2394             {
2395                 /* The mutex is no longer being held. */
2396                 xReturn = xTaskPriorityDisinherit( pxQueue->u.xSemaphore.xMutexHolder );
2397                 pxQueue->u.xSemaphore.xMutexHolder = NULL;
2398             }
2399             else
2400             {
2401                 mtCOVERAGE_TEST_MARKER();
2402             }
2403         }
2404         #endif /* configUSE_MUTEXES */
2405     }
2406     else if( xPosition == queueSEND_TO_BACK )
2407     {
2408         ( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize );
2409         pxQueue->pcWriteTo += pxQueue->uxItemSize;
2410 
2411         if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail )
2412         {
2413             pxQueue->pcWriteTo = pxQueue->pcHead;
2414         }
2415         else
2416         {
2417             mtCOVERAGE_TEST_MARKER();
2418         }
2419     }
2420     else
2421     {
2422         ( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize );
2423         pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize;
2424 
2425         if( pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead )
2426         {
2427             pxQueue->u.xQueue.pcReadFrom = ( pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize );
2428         }
2429         else
2430         {
2431             mtCOVERAGE_TEST_MARKER();
2432         }
2433 
2434         if( xPosition == queueOVERWRITE )
2435         {
2436             if( uxMessagesWaiting > ( UBaseType_t ) 0 )
2437             {
2438                 /* An item is not being added but overwritten, so subtract
2439                  * one from the recorded number of items in the queue so when
2440                  * one is added again below the number of recorded items remains
2441                  * correct. */
2442                 --uxMessagesWaiting;
2443             }
2444             else
2445             {
2446                 mtCOVERAGE_TEST_MARKER();
2447             }
2448         }
2449         else
2450         {
2451             mtCOVERAGE_TEST_MARKER();
2452         }
2453     }
2454 
2455     pxQueue->uxMessagesWaiting = ( UBaseType_t ) ( uxMessagesWaiting + ( UBaseType_t ) 1 );
2456 
2457     return xReturn;
2458 }
2459 /*-----------------------------------------------------------*/
2460 
prvCopyDataFromQueue(Queue_t * const pxQueue,void * const pvBuffer)2461 static void prvCopyDataFromQueue( Queue_t * const pxQueue,
2462                                   void * const pvBuffer )
2463 {
2464     if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
2465     {
2466         pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
2467 
2468         if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )
2469         {
2470             pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2471         }
2472         else
2473         {
2474             mtCOVERAGE_TEST_MARKER();
2475         }
2476 
2477         ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize );
2478     }
2479 }
2480 /*-----------------------------------------------------------*/
2481 
prvUnlockQueue(Queue_t * const pxQueue)2482 static void prvUnlockQueue( Queue_t * const pxQueue )
2483 {
2484     /* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
2485 
2486     /* The lock counts contains the number of extra data items placed or
2487      * removed from the queue while the queue was locked.  When a queue is
2488      * locked items can be added or removed, but the event lists cannot be
2489      * updated. */
2490     taskENTER_CRITICAL();
2491     {
2492         int8_t cTxLock = pxQueue->cTxLock;
2493 
2494         /* See if data was added to the queue while it was locked. */
2495         while( cTxLock > queueLOCKED_UNMODIFIED )
2496         {
2497             /* Data was posted while the queue was locked.  Are any tasks
2498              * blocked waiting for data to become available? */
2499             #if ( configUSE_QUEUE_SETS == 1 )
2500             {
2501                 if( pxQueue->pxQueueSetContainer != NULL )
2502                 {
2503                     if( prvNotifyQueueSetContainer( pxQueue ) != pdFALSE )
2504                     {
2505                         /* The queue is a member of a queue set, and posting to
2506                          * the queue set caused a higher priority task to unblock.
2507                          * A context switch is required. */
2508                         vTaskMissedYield();
2509                     }
2510                     else
2511                     {
2512                         mtCOVERAGE_TEST_MARKER();
2513                     }
2514                 }
2515                 else
2516                 {
2517                     /* Tasks that are removed from the event list will get
2518                      * added to the pending ready list as the scheduler is still
2519                      * suspended. */
2520                     if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2521                     {
2522                         if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2523                         {
2524                             /* The task waiting has a higher priority so record that a
2525                              * context switch is required. */
2526                             vTaskMissedYield();
2527                         }
2528                         else
2529                         {
2530                             mtCOVERAGE_TEST_MARKER();
2531                         }
2532                     }
2533                     else
2534                     {
2535                         break;
2536                     }
2537                 }
2538             }
2539             #else /* configUSE_QUEUE_SETS */
2540             {
2541                 /* Tasks that are removed from the event list will get added to
2542                  * the pending ready list as the scheduler is still suspended. */
2543                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2544                 {
2545                     if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2546                     {
2547                         /* The task waiting has a higher priority so record that
2548                          * a context switch is required. */
2549                         vTaskMissedYield();
2550                     }
2551                     else
2552                     {
2553                         mtCOVERAGE_TEST_MARKER();
2554                     }
2555                 }
2556                 else
2557                 {
2558                     break;
2559                 }
2560             }
2561             #endif /* configUSE_QUEUE_SETS */
2562 
2563             --cTxLock;
2564         }
2565 
2566         pxQueue->cTxLock = queueUNLOCKED;
2567     }
2568     taskEXIT_CRITICAL();
2569 
2570     /* Do the same for the Rx lock. */
2571     taskENTER_CRITICAL();
2572     {
2573         int8_t cRxLock = pxQueue->cRxLock;
2574 
2575         while( cRxLock > queueLOCKED_UNMODIFIED )
2576         {
2577             if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2578             {
2579                 if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2580                 {
2581                     vTaskMissedYield();
2582                 }
2583                 else
2584                 {
2585                     mtCOVERAGE_TEST_MARKER();
2586                 }
2587 
2588                 --cRxLock;
2589             }
2590             else
2591             {
2592                 break;
2593             }
2594         }
2595 
2596         pxQueue->cRxLock = queueUNLOCKED;
2597     }
2598     taskEXIT_CRITICAL();
2599 }
2600 /*-----------------------------------------------------------*/
2601 
prvIsQueueEmpty(const Queue_t * pxQueue)2602 static BaseType_t prvIsQueueEmpty( const Queue_t * pxQueue )
2603 {
2604     BaseType_t xReturn;
2605 
2606     taskENTER_CRITICAL();
2607     {
2608         if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2609         {
2610             xReturn = pdTRUE;
2611         }
2612         else
2613         {
2614             xReturn = pdFALSE;
2615         }
2616     }
2617     taskEXIT_CRITICAL();
2618 
2619     return xReturn;
2620 }
2621 /*-----------------------------------------------------------*/
2622 
xQueueIsQueueEmptyFromISR(const QueueHandle_t xQueue)2623 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
2624 {
2625     BaseType_t xReturn;
2626     Queue_t * const pxQueue = xQueue;
2627 
2628     traceENTER_xQueueIsQueueEmptyFromISR( xQueue );
2629 
2630     configASSERT( pxQueue );
2631 
2632     if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2633     {
2634         xReturn = pdTRUE;
2635     }
2636     else
2637     {
2638         xReturn = pdFALSE;
2639     }
2640 
2641     traceRETURN_xQueueIsQueueEmptyFromISR( xReturn );
2642 
2643     return xReturn;
2644 }
2645 /*-----------------------------------------------------------*/
2646 
prvIsQueueFull(const Queue_t * pxQueue)2647 static BaseType_t prvIsQueueFull( const Queue_t * pxQueue )
2648 {
2649     BaseType_t xReturn;
2650 
2651     taskENTER_CRITICAL();
2652     {
2653         if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
2654         {
2655             xReturn = pdTRUE;
2656         }
2657         else
2658         {
2659             xReturn = pdFALSE;
2660         }
2661     }
2662     taskEXIT_CRITICAL();
2663 
2664     return xReturn;
2665 }
2666 /*-----------------------------------------------------------*/
2667 
xQueueIsQueueFullFromISR(const QueueHandle_t xQueue)2668 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
2669 {
2670     BaseType_t xReturn;
2671     Queue_t * const pxQueue = xQueue;
2672 
2673     traceENTER_xQueueIsQueueFullFromISR( xQueue );
2674 
2675     configASSERT( pxQueue );
2676 
2677     if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
2678     {
2679         xReturn = pdTRUE;
2680     }
2681     else
2682     {
2683         xReturn = pdFALSE;
2684     }
2685 
2686     traceRETURN_xQueueIsQueueFullFromISR( xReturn );
2687 
2688     return xReturn;
2689 }
2690 /*-----------------------------------------------------------*/
2691 
2692 #if ( configUSE_CO_ROUTINES == 1 )
2693 
xQueueCRSend(QueueHandle_t xQueue,const void * pvItemToQueue,TickType_t xTicksToWait)2694     BaseType_t xQueueCRSend( QueueHandle_t xQueue,
2695                              const void * pvItemToQueue,
2696                              TickType_t xTicksToWait )
2697     {
2698         BaseType_t xReturn;
2699         Queue_t * const pxQueue = xQueue;
2700 
2701         traceENTER_xQueueCRSend( xQueue, pvItemToQueue, xTicksToWait );
2702 
2703         /* If the queue is already full we may have to block.  A critical section
2704          * is required to prevent an interrupt removing something from the queue
2705          * between the check to see if the queue is full and blocking on the queue. */
2706         portDISABLE_INTERRUPTS();
2707         {
2708             if( prvIsQueueFull( pxQueue ) != pdFALSE )
2709             {
2710                 /* The queue is full - do we want to block or just leave without
2711                  * posting? */
2712                 if( xTicksToWait > ( TickType_t ) 0 )
2713                 {
2714                     /* As this is called from a coroutine we cannot block directly, but
2715                      * return indicating that we need to block. */
2716                     vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
2717                     portENABLE_INTERRUPTS();
2718                     return errQUEUE_BLOCKED;
2719                 }
2720                 else
2721                 {
2722                     portENABLE_INTERRUPTS();
2723                     return errQUEUE_FULL;
2724                 }
2725             }
2726         }
2727         portENABLE_INTERRUPTS();
2728 
2729         portDISABLE_INTERRUPTS();
2730         {
2731             if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2732             {
2733                 /* There is room in the queue, copy the data into the queue. */
2734                 prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2735                 xReturn = pdPASS;
2736 
2737                 /* Were any co-routines waiting for data to become available? */
2738                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2739                 {
2740                     /* In this instance the co-routine could be placed directly
2741                      * into the ready list as we are within a critical section.
2742                      * Instead the same pending ready list mechanism is used as if
2743                      * the event were caused from within an interrupt. */
2744                     if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2745                     {
2746                         /* The co-routine waiting has a higher priority so record
2747                          * that a yield might be appropriate. */
2748                         xReturn = errQUEUE_YIELD;
2749                     }
2750                     else
2751                     {
2752                         mtCOVERAGE_TEST_MARKER();
2753                     }
2754                 }
2755                 else
2756                 {
2757                     mtCOVERAGE_TEST_MARKER();
2758                 }
2759             }
2760             else
2761             {
2762                 xReturn = errQUEUE_FULL;
2763             }
2764         }
2765         portENABLE_INTERRUPTS();
2766 
2767         traceRETURN_xQueueCRSend( xReturn );
2768 
2769         return xReturn;
2770     }
2771 
2772 #endif /* configUSE_CO_ROUTINES */
2773 /*-----------------------------------------------------------*/
2774 
2775 #if ( configUSE_CO_ROUTINES == 1 )
2776 
xQueueCRReceive(QueueHandle_t xQueue,void * pvBuffer,TickType_t xTicksToWait)2777     BaseType_t xQueueCRReceive( QueueHandle_t xQueue,
2778                                 void * pvBuffer,
2779                                 TickType_t xTicksToWait )
2780     {
2781         BaseType_t xReturn;
2782         Queue_t * const pxQueue = xQueue;
2783 
2784         traceENTER_xQueueCRReceive( xQueue, pvBuffer, xTicksToWait );
2785 
2786         /* If the queue is already empty we may have to block.  A critical section
2787          * is required to prevent an interrupt adding something to the queue
2788          * between the check to see if the queue is empty and blocking on the queue. */
2789         portDISABLE_INTERRUPTS();
2790         {
2791             if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2792             {
2793                 /* There are no messages in the queue, do we want to block or just
2794                  * leave with nothing? */
2795                 if( xTicksToWait > ( TickType_t ) 0 )
2796                 {
2797                     /* As this is a co-routine we cannot block directly, but return
2798                      * indicating that we need to block. */
2799                     vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
2800                     portENABLE_INTERRUPTS();
2801                     return errQUEUE_BLOCKED;
2802                 }
2803                 else
2804                 {
2805                     portENABLE_INTERRUPTS();
2806                     return errQUEUE_FULL;
2807                 }
2808             }
2809             else
2810             {
2811                 mtCOVERAGE_TEST_MARKER();
2812             }
2813         }
2814         portENABLE_INTERRUPTS();
2815 
2816         portDISABLE_INTERRUPTS();
2817         {
2818             if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2819             {
2820                 /* Data is available from the queue. */
2821                 pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
2822 
2823                 if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )
2824                 {
2825                     pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2826                 }
2827                 else
2828                 {
2829                     mtCOVERAGE_TEST_MARKER();
2830                 }
2831 
2832                 --( pxQueue->uxMessagesWaiting );
2833                 ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2834 
2835                 xReturn = pdPASS;
2836 
2837                 /* Were any co-routines waiting for space to become available? */
2838                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2839                 {
2840                     /* In this instance the co-routine could be placed directly
2841                      * into the ready list as we are within a critical section.
2842                      * Instead the same pending ready list mechanism is used as if
2843                      * the event were caused from within an interrupt. */
2844                     if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2845                     {
2846                         xReturn = errQUEUE_YIELD;
2847                     }
2848                     else
2849                     {
2850                         mtCOVERAGE_TEST_MARKER();
2851                     }
2852                 }
2853                 else
2854                 {
2855                     mtCOVERAGE_TEST_MARKER();
2856                 }
2857             }
2858             else
2859             {
2860                 xReturn = pdFAIL;
2861             }
2862         }
2863         portENABLE_INTERRUPTS();
2864 
2865         traceRETURN_xQueueCRReceive( xReturn );
2866 
2867         return xReturn;
2868     }
2869 
2870 #endif /* configUSE_CO_ROUTINES */
2871 /*-----------------------------------------------------------*/
2872 
2873 #if ( configUSE_CO_ROUTINES == 1 )
2874 
xQueueCRSendFromISR(QueueHandle_t xQueue,const void * pvItemToQueue,BaseType_t xCoRoutinePreviouslyWoken)2875     BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue,
2876                                     const void * pvItemToQueue,
2877                                     BaseType_t xCoRoutinePreviouslyWoken )
2878     {
2879         Queue_t * const pxQueue = xQueue;
2880 
2881         traceENTER_xQueueCRSendFromISR( xQueue, pvItemToQueue, xCoRoutinePreviouslyWoken );
2882 
2883         /* Cannot block within an ISR so if there is no space on the queue then
2884          * exit without doing anything. */
2885         if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2886         {
2887             prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2888 
2889             /* We only want to wake one co-routine per ISR, so check that a
2890              * co-routine has not already been woken. */
2891             if( xCoRoutinePreviouslyWoken == pdFALSE )
2892             {
2893                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2894                 {
2895                     if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2896                     {
2897                         return pdTRUE;
2898                     }
2899                     else
2900                     {
2901                         mtCOVERAGE_TEST_MARKER();
2902                     }
2903                 }
2904                 else
2905                 {
2906                     mtCOVERAGE_TEST_MARKER();
2907                 }
2908             }
2909             else
2910             {
2911                 mtCOVERAGE_TEST_MARKER();
2912             }
2913         }
2914         else
2915         {
2916             mtCOVERAGE_TEST_MARKER();
2917         }
2918 
2919         traceRETURN_xQueueCRSendFromISR( xCoRoutinePreviouslyWoken );
2920 
2921         return xCoRoutinePreviouslyWoken;
2922     }
2923 
2924 #endif /* configUSE_CO_ROUTINES */
2925 /*-----------------------------------------------------------*/
2926 
2927 #if ( configUSE_CO_ROUTINES == 1 )
2928 
xQueueCRReceiveFromISR(QueueHandle_t xQueue,void * pvBuffer,BaseType_t * pxCoRoutineWoken)2929     BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue,
2930                                        void * pvBuffer,
2931                                        BaseType_t * pxCoRoutineWoken )
2932     {
2933         BaseType_t xReturn;
2934         Queue_t * const pxQueue = xQueue;
2935 
2936         traceENTER_xQueueCRReceiveFromISR( xQueue, pvBuffer, pxCoRoutineWoken );
2937 
2938         /* We cannot block from an ISR, so check there is data available. If
2939          * not then just leave without doing anything. */
2940         if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2941         {
2942             /* Copy the data from the queue. */
2943             pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
2944 
2945             if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )
2946             {
2947                 pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2948             }
2949             else
2950             {
2951                 mtCOVERAGE_TEST_MARKER();
2952             }
2953 
2954             --( pxQueue->uxMessagesWaiting );
2955             ( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2956 
2957             if( ( *pxCoRoutineWoken ) == pdFALSE )
2958             {
2959                 if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2960                 {
2961                     if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2962                     {
2963                         *pxCoRoutineWoken = pdTRUE;
2964                     }
2965                     else
2966                     {
2967                         mtCOVERAGE_TEST_MARKER();
2968                     }
2969                 }
2970                 else
2971                 {
2972                     mtCOVERAGE_TEST_MARKER();
2973                 }
2974             }
2975             else
2976             {
2977                 mtCOVERAGE_TEST_MARKER();
2978             }
2979 
2980             xReturn = pdPASS;
2981         }
2982         else
2983         {
2984             xReturn = pdFAIL;
2985         }
2986 
2987         traceRETURN_xQueueCRReceiveFromISR( xReturn );
2988 
2989         return xReturn;
2990     }
2991 
2992 #endif /* configUSE_CO_ROUTINES */
2993 /*-----------------------------------------------------------*/
2994 
2995 #if ( configQUEUE_REGISTRY_SIZE > 0 )
2996 
vQueueAddToRegistry(QueueHandle_t xQueue,const char * pcQueueName)2997     void vQueueAddToRegistry( QueueHandle_t xQueue,
2998                               const char * pcQueueName )
2999     {
3000         UBaseType_t ux;
3001         QueueRegistryItem_t * pxEntryToWrite = NULL;
3002 
3003         traceENTER_vQueueAddToRegistry( xQueue, pcQueueName );
3004 
3005         configASSERT( xQueue );
3006 
3007         if( pcQueueName != NULL )
3008         {
3009             /* See if there is an empty space in the registry.  A NULL name denotes
3010              * a free slot. */
3011             for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
3012             {
3013                 /* Replace an existing entry if the queue is already in the registry. */
3014                 if( xQueue == xQueueRegistry[ ux ].xHandle )
3015                 {
3016                     pxEntryToWrite = &( xQueueRegistry[ ux ] );
3017                     break;
3018                 }
3019                 /* Otherwise, store in the next empty location */
3020                 else if( ( pxEntryToWrite == NULL ) && ( xQueueRegistry[ ux ].pcQueueName == NULL ) )
3021                 {
3022                     pxEntryToWrite = &( xQueueRegistry[ ux ] );
3023                 }
3024                 else
3025                 {
3026                     mtCOVERAGE_TEST_MARKER();
3027                 }
3028             }
3029         }
3030 
3031         if( pxEntryToWrite != NULL )
3032         {
3033             /* Store the information on this queue. */
3034             pxEntryToWrite->pcQueueName = pcQueueName;
3035             pxEntryToWrite->xHandle = xQueue;
3036 
3037             traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
3038         }
3039 
3040         traceRETURN_vQueueAddToRegistry();
3041     }
3042 
3043 #endif /* configQUEUE_REGISTRY_SIZE */
3044 /*-----------------------------------------------------------*/
3045 
3046 #if ( configQUEUE_REGISTRY_SIZE > 0 )
3047 
pcQueueGetName(QueueHandle_t xQueue)3048     const char * pcQueueGetName( QueueHandle_t xQueue )
3049     {
3050         UBaseType_t ux;
3051         const char * pcReturn = NULL;
3052 
3053         traceENTER_pcQueueGetName( xQueue );
3054 
3055         configASSERT( xQueue );
3056 
3057         /* Note there is nothing here to protect against another task adding or
3058          * removing entries from the registry while it is being searched. */
3059 
3060         for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
3061         {
3062             if( xQueueRegistry[ ux ].xHandle == xQueue )
3063             {
3064                 pcReturn = xQueueRegistry[ ux ].pcQueueName;
3065                 break;
3066             }
3067             else
3068             {
3069                 mtCOVERAGE_TEST_MARKER();
3070             }
3071         }
3072 
3073         traceRETURN_pcQueueGetName( pcReturn );
3074 
3075         return pcReturn;
3076     }
3077 
3078 #endif /* configQUEUE_REGISTRY_SIZE */
3079 /*-----------------------------------------------------------*/
3080 
3081 #if ( configQUEUE_REGISTRY_SIZE > 0 )
3082 
vQueueUnregisterQueue(QueueHandle_t xQueue)3083     void vQueueUnregisterQueue( QueueHandle_t xQueue )
3084     {
3085         UBaseType_t ux;
3086 
3087         traceENTER_vQueueUnregisterQueue( xQueue );
3088 
3089         configASSERT( xQueue );
3090 
3091         /* See if the handle of the queue being unregistered in actually in the
3092          * registry. */
3093         for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
3094         {
3095             if( xQueueRegistry[ ux ].xHandle == xQueue )
3096             {
3097                 /* Set the name to NULL to show that this slot if free again. */
3098                 xQueueRegistry[ ux ].pcQueueName = NULL;
3099 
3100                 /* Set the handle to NULL to ensure the same queue handle cannot
3101                  * appear in the registry twice if it is added, removed, then
3102                  * added again. */
3103                 xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;
3104                 break;
3105             }
3106             else
3107             {
3108                 mtCOVERAGE_TEST_MARKER();
3109             }
3110         }
3111 
3112         traceRETURN_vQueueUnregisterQueue();
3113     }
3114 
3115 #endif /* configQUEUE_REGISTRY_SIZE */
3116 /*-----------------------------------------------------------*/
3117 
3118 #if ( configUSE_TIMERS == 1 )
3119 
vQueueWaitForMessageRestricted(QueueHandle_t xQueue,TickType_t xTicksToWait,const BaseType_t xWaitIndefinitely)3120     void vQueueWaitForMessageRestricted( QueueHandle_t xQueue,
3121                                          TickType_t xTicksToWait,
3122                                          const BaseType_t xWaitIndefinitely )
3123     {
3124         Queue_t * const pxQueue = xQueue;
3125 
3126         traceENTER_vQueueWaitForMessageRestricted( xQueue, xTicksToWait, xWaitIndefinitely );
3127 
3128         /* This function should not be called by application code hence the
3129          * 'Restricted' in its name.  It is not part of the public API.  It is
3130          * designed for use by kernel code, and has special calling requirements.
3131          * It can result in vListInsert() being called on a list that can only
3132          * possibly ever have one item in it, so the list will be fast, but even
3133          * so it should be called with the scheduler locked and not from a critical
3134          * section. */
3135 
3136         /* Only do anything if there are no messages in the queue.  This function
3137          *  will not actually cause the task to block, just place it on a blocked
3138          *  list.  It will not block until the scheduler is unlocked - at which
3139          *  time a yield will be performed.  If an item is added to the queue while
3140          *  the queue is locked, and the calling task blocks on the queue, then the
3141          *  calling task will be immediately unblocked when the queue is unlocked. */
3142         prvLockQueue( pxQueue );
3143 
3144         if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
3145         {
3146             /* There is nothing in the queue, block for the specified period. */
3147             vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );
3148         }
3149         else
3150         {
3151             mtCOVERAGE_TEST_MARKER();
3152         }
3153 
3154         prvUnlockQueue( pxQueue );
3155 
3156         traceRETURN_vQueueWaitForMessageRestricted();
3157     }
3158 
3159 #endif /* configUSE_TIMERS */
3160 /*-----------------------------------------------------------*/
3161 
3162 #if ( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
3163 
xQueueCreateSet(const UBaseType_t uxEventQueueLength)3164     QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
3165     {
3166         QueueSetHandle_t pxQueue;
3167 
3168         traceENTER_xQueueCreateSet( uxEventQueueLength );
3169 
3170         pxQueue = xQueueGenericCreate( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
3171 
3172         traceRETURN_xQueueCreateSet( pxQueue );
3173 
3174         return pxQueue;
3175     }
3176 
3177 #endif /* configUSE_QUEUE_SETS */
3178 /*-----------------------------------------------------------*/
3179 
3180 #if ( configUSE_QUEUE_SETS == 1 )
3181 
xQueueAddToSet(QueueSetMemberHandle_t xQueueOrSemaphore,QueueSetHandle_t xQueueSet)3182     BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore,
3183                                QueueSetHandle_t xQueueSet )
3184     {
3185         BaseType_t xReturn;
3186 
3187         traceENTER_xQueueAddToSet( xQueueOrSemaphore, xQueueSet );
3188 
3189         taskENTER_CRITICAL();
3190         {
3191             if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
3192             {
3193                 /* Cannot add a queue/semaphore to more than one queue set. */
3194                 xReturn = pdFAIL;
3195             }
3196             else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
3197             {
3198                 /* Cannot add a queue/semaphore to a queue set if there are already
3199                  * items in the queue/semaphore. */
3200                 xReturn = pdFAIL;
3201             }
3202             else
3203             {
3204                 ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
3205                 xReturn = pdPASS;
3206             }
3207         }
3208         taskEXIT_CRITICAL();
3209 
3210         traceRETURN_xQueueAddToSet( xReturn );
3211 
3212         return xReturn;
3213     }
3214 
3215 #endif /* configUSE_QUEUE_SETS */
3216 /*-----------------------------------------------------------*/
3217 
3218 #if ( configUSE_QUEUE_SETS == 1 )
3219 
xQueueRemoveFromSet(QueueSetMemberHandle_t xQueueOrSemaphore,QueueSetHandle_t xQueueSet)3220     BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore,
3221                                     QueueSetHandle_t xQueueSet )
3222     {
3223         BaseType_t xReturn;
3224         Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
3225 
3226         traceENTER_xQueueRemoveFromSet( xQueueOrSemaphore, xQueueSet );
3227 
3228         if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
3229         {
3230             /* The queue was not a member of the set. */
3231             xReturn = pdFAIL;
3232         }
3233         else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
3234         {
3235             /* It is dangerous to remove a queue from a set when the queue is
3236              * not empty because the queue set will still hold pending events for
3237              * the queue. */
3238             xReturn = pdFAIL;
3239         }
3240         else
3241         {
3242             taskENTER_CRITICAL();
3243             {
3244                 /* The queue is no longer contained in the set. */
3245                 pxQueueOrSemaphore->pxQueueSetContainer = NULL;
3246             }
3247             taskEXIT_CRITICAL();
3248             xReturn = pdPASS;
3249         }
3250 
3251         traceRETURN_xQueueRemoveFromSet( xReturn );
3252 
3253         return xReturn;
3254     }
3255 
3256 #endif /* configUSE_QUEUE_SETS */
3257 /*-----------------------------------------------------------*/
3258 
3259 #if ( configUSE_QUEUE_SETS == 1 )
3260 
xQueueSelectFromSet(QueueSetHandle_t xQueueSet,TickType_t const xTicksToWait)3261     QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet,
3262                                                 TickType_t const xTicksToWait )
3263     {
3264         QueueSetMemberHandle_t xReturn = NULL;
3265 
3266         traceENTER_xQueueSelectFromSet( xQueueSet, xTicksToWait );
3267 
3268         ( void ) xQueueReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait );
3269 
3270         traceRETURN_xQueueSelectFromSet( xReturn );
3271 
3272         return xReturn;
3273     }
3274 
3275 #endif /* configUSE_QUEUE_SETS */
3276 /*-----------------------------------------------------------*/
3277 
3278 #if ( configUSE_QUEUE_SETS == 1 )
3279 
xQueueSelectFromSetFromISR(QueueSetHandle_t xQueueSet)3280     QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
3281     {
3282         QueueSetMemberHandle_t xReturn = NULL;
3283 
3284         traceENTER_xQueueSelectFromSetFromISR( xQueueSet );
3285 
3286         ( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL );
3287 
3288         traceRETURN_xQueueSelectFromSetFromISR( xReturn );
3289 
3290         return xReturn;
3291     }
3292 
3293 #endif /* configUSE_QUEUE_SETS */
3294 /*-----------------------------------------------------------*/
3295 
3296 #if ( configUSE_QUEUE_SETS == 1 )
3297 
prvNotifyQueueSetContainer(const Queue_t * const pxQueue)3298     static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue )
3299     {
3300         Queue_t * pxQueueSetContainer = pxQueue->pxQueueSetContainer;
3301         BaseType_t xReturn = pdFALSE;
3302 
3303         /* This function must be called form a critical section. */
3304 
3305         /* The following line is not reachable in unit tests because every call
3306          * to prvNotifyQueueSetContainer is preceded by a check that
3307          * pxQueueSetContainer != NULL */
3308         configASSERT( pxQueueSetContainer ); /* LCOV_EXCL_BR_LINE */
3309         configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
3310 
3311         if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
3312         {
3313             const int8_t cTxLock = pxQueueSetContainer->cTxLock;
3314 
3315             traceQUEUE_SET_SEND( pxQueueSetContainer );
3316 
3317             /* The data copied is the handle of the queue that contains data. */
3318             xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, queueSEND_TO_BACK );
3319 
3320             if( cTxLock == queueUNLOCKED )
3321             {
3322                 if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
3323                 {
3324                     if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
3325                     {
3326                         /* The task waiting has a higher priority. */
3327                         xReturn = pdTRUE;
3328                     }
3329                     else
3330                     {
3331                         mtCOVERAGE_TEST_MARKER();
3332                     }
3333                 }
3334                 else
3335                 {
3336                     mtCOVERAGE_TEST_MARKER();
3337                 }
3338             }
3339             else
3340             {
3341                 prvIncrementQueueTxLock( pxQueueSetContainer, cTxLock );
3342             }
3343         }
3344         else
3345         {
3346             mtCOVERAGE_TEST_MARKER();
3347         }
3348 
3349         return xReturn;
3350     }
3351 
3352 #endif /* configUSE_QUEUE_SETS */
3353