1 /*
2  * FreeRTOS Kernel V10.2.1
3  * Copyright (C) 2019 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a copy of
6  * this software and associated documentation files (the "Software"), to deal in
7  * the Software without restriction, including without limitation the rights to
8  * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9  * the Software, and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in all
13  * copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17  * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
18  * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19  * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * http://www.FreeRTOS.org
23  * http://aws.amazon.com/freertos
24  *
25  * 1 tab == 4 spaces!
26  */
27 
28 #include <stdlib.h>
29 #include <string.h>
30 
31 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
32 all the API functions to use the MPU wrappers.  That should only be done when
33 task.h is included from an application file. */
34 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
35 
36 #include "FreeRTOS.h"
37 #include "task.h"
38 #include "queue.h"
39 
40 #if ( configUSE_CO_ROUTINES == 1 )
41 	#include "croutine.h"
42 #endif
43 
44 /* Lint e9021, e961 and e750 are suppressed as a MISRA exception justified
45 because the MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
46 for the header files above, but not in this file, in order to generate the
47 correct privileged Vs unprivileged linkage and placement. */
48 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE /*lint !e961 !e750 !e9021. */
49 
50 
51 /* Constants used with the cRxLock and cTxLock structure members. */
52 #define queueUNLOCKED					( ( int8_t ) -1 )
53 #define queueLOCKED_UNMODIFIED			( ( int8_t ) 0 )
54 
55 /* When the Queue_t structure is used to represent a base queue its pcHead and
56 pcTail members are used as pointers into the queue storage area.  When the
57 Queue_t structure is used to represent a mutex pcHead and pcTail pointers are
58 not necessary, and the pcHead pointer is set to NULL to indicate that the
59 structure instead holds a pointer to the mutex holder (if any).  Map alternative
60 names to the pcHead and structure member to ensure the readability of the code
61 is maintained.  The QueuePointers_t and SemaphoreData_t types are used to form
62 a union as their usage is mutually exclusive dependent on what the queue is
63 being used for. */
64 #define uxQueueType						pcHead
65 #define queueQUEUE_IS_MUTEX				NULL
66 
67 typedef struct QueuePointers
68 {
69 	int8_t *pcTail;					/*< Points to the byte at the end of the queue storage area.  Once more byte is allocated than necessary to store the queue items, this is used as a marker. */
70 	int8_t *pcReadFrom;				/*< Points to the last place that a queued item was read from when the structure is used as a queue. */
71 } QueuePointers_t;
72 
73 typedef struct SemaphoreData
74 {
75 	TaskHandle_t xMutexHolder;		 /*< The handle of the task that holds the mutex. */
76 	UBaseType_t uxRecursiveCallCount;/*< Maintains a count of the number of times a recursive mutex has been recursively 'taken' when the structure is used as a mutex. */
77 } SemaphoreData_t;
78 
79 /* Semaphores do not actually store or copy data, so have an item size of
80 zero. */
81 #define queueSEMAPHORE_QUEUE_ITEM_LENGTH ( ( UBaseType_t ) 0 )
82 #define queueMUTEX_GIVE_BLOCK_TIME		 ( ( TickType_t ) 0U )
83 
84 #if( configUSE_PREEMPTION == 0 )
85 	/* If the cooperative scheduler is being used then a yield should not be
86 	performed just because a higher priority task has been woken. */
87 	#define queueYIELD_IF_USING_PREEMPTION()
88 #else
89 	#define queueYIELD_IF_USING_PREEMPTION() portYIELD_WITHIN_API()
90 #endif
91 
92 /*
93  * Definition of the queue used by the scheduler.
94  * Items are queued by copy, not reference.  See the following link for the
95  * rationale: https://www.freertos.org/Embedded-RTOS-Queues.html
96  */
97 typedef struct QueueDefinition 		/* The old naming convention is used to prevent breaking kernel aware debuggers. */
98 {
99 	int8_t *pcHead;					/*< Points to the beginning of the queue storage area. */
100 	int8_t *pcWriteTo;				/*< Points to the free next place in the storage area. */
101 
102 	union
103 	{
104 		QueuePointers_t xQueue;		/*< Data required exclusively when this structure is used as a queue. */
105 		SemaphoreData_t xSemaphore; /*< Data required exclusively when this structure is used as a semaphore. */
106 	} u;
107 
108 	List_t xTasksWaitingToSend;		/*< List of tasks that are blocked waiting to post onto this queue.  Stored in priority order. */
109 	List_t xTasksWaitingToReceive;	/*< List of tasks that are blocked waiting to read from this queue.  Stored in priority order. */
110 
111 	volatile UBaseType_t uxMessagesWaiting;/*< The number of items currently in the queue. */
112 	UBaseType_t uxLength;			/*< The length of the queue defined as the number of items it will hold, not the number of bytes. */
113 	UBaseType_t uxItemSize;			/*< The size of each items that the queue will hold. */
114 
115 	volatile int8_t cRxLock;		/*< Stores the number of items received from the queue (removed from the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */
116 	volatile int8_t cTxLock;		/*< Stores the number of items transmitted to the queue (added to the queue) while the queue was locked.  Set to queueUNLOCKED when the queue is not locked. */
117 
118 	#if( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
119 		uint8_t ucStaticallyAllocated;	/*< Set to pdTRUE if the memory used by the queue was statically allocated to ensure no attempt is made to free the memory. */
120 	#endif
121 
122 	#if ( configUSE_QUEUE_SETS == 1 )
123 		struct QueueDefinition *pxQueueSetContainer;
124 	#endif
125 
126 	#if ( configUSE_TRACE_FACILITY == 1 )
127 		UBaseType_t uxQueueNumber;
128 		uint8_t ucQueueType;
129 	#endif
130 
131 	portMUX_TYPE mux;		//Mutex required due to SMP
132 
133 } xQUEUE;
134 
135 /* The old xQUEUE name is maintained above then typedefed to the new Queue_t
136 name below to enable the use of older kernel aware debuggers. */
137 typedef xQUEUE Queue_t;
138 
139 /*-----------------------------------------------------------*/
140 
141 /*
142  * The queue registry is just a means for kernel aware debuggers to locate
143  * queue structures.  It has no other purpose so is an optional component.
144  */
145 #if ( configQUEUE_REGISTRY_SIZE > 0 )
146 
147 	/* The type stored within the queue registry array.  This allows a name
148 	to be assigned to each queue making kernel aware debugging a little
149 	more user friendly. */
150 	typedef struct QUEUE_REGISTRY_ITEM
151 	{
152 		const char *pcQueueName; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
153 		QueueHandle_t xHandle;
154 	} xQueueRegistryItem;
155 
156 	/* The old xQueueRegistryItem name is maintained above then typedefed to the
157 	new xQueueRegistryItem name below to enable the use of older kernel aware
158 	debuggers. */
159 	typedef xQueueRegistryItem QueueRegistryItem_t;
160 
161 	/* The queue registry is simply an array of QueueRegistryItem_t structures.
162 	The pcQueueName member of a structure being NULL is indicative of the
163 	array position being vacant. */
164 	PRIVILEGED_DATA QueueRegistryItem_t xQueueRegistry[ configQUEUE_REGISTRY_SIZE ];
165 
166 	//Need to add queue registry mutex to protect against simultaneous access
167 	static portMUX_TYPE queue_registry_spinlock = portMUX_INITIALIZER_UNLOCKED;
168 
169 #endif /* configQUEUE_REGISTRY_SIZE */
170 
171 /*
172  * Unlocks a queue locked by a call to prvLockQueue.  Locking a queue does not
173  * prevent an ISR from adding or removing items to the queue, but does prevent
174  * an ISR from removing tasks from the queue event lists.  If an ISR finds a
175  * queue is locked it will instead increment the appropriate queue lock count
176  * to indicate that a task may require unblocking.  When the queue in unlocked
177  * these lock counts are inspected, and the appropriate action taken.
178  */
179 static void prvUnlockQueue( Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
180 
181 /*
182  * Uses a critical section to determine if there is any data in a queue.
183  *
184  * @return pdTRUE if the queue contains no items, otherwise pdFALSE.
185  */
186 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
187 
188 /*
189  * Uses a critical section to determine if there is any space in a queue.
190  *
191  * @return pdTRUE if there is no space, otherwise pdFALSE;
192  */
193 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue ) PRIVILEGED_FUNCTION;
194 
195 /*
196  * Copies an item into the queue, either at the front of the queue or the
197  * back of the queue.
198  */
199 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition ) PRIVILEGED_FUNCTION;
200 
201 /*
202  * Copies an item out of a queue.
203  */
204 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer ) PRIVILEGED_FUNCTION;
205 
206 #if ( configUSE_QUEUE_SETS == 1 )
207 	/*
208 	 * Checks to see if a queue is a member of a queue set, and if so, notifies
209 	 * the queue set that the queue contains data.
210 	 */
211 	static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition ) PRIVILEGED_FUNCTION;
212 #endif
213 
214 /*
215  * Called after a Queue_t structure has been allocated either statically or
216  * dynamically to fill in the structure's members.
217  */
218 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;
219 
220 /*
221  * Mutexes are a special type of queue.  When a mutex is created, first the
222  * queue is created, then prvInitialiseMutex() is called to configure the queue
223  * as a mutex.
224  */
225 #if( configUSE_MUTEXES == 1 )
226 	static void prvInitialiseMutex( Queue_t *pxNewQueue ) PRIVILEGED_FUNCTION;
227 #endif
228 
229 #if( configUSE_MUTEXES == 1 )
230 	/*
231 	 * If a task waiting for a mutex causes the mutex holder to inherit a
232 	 * priority, but the waiting task times out, then the holder should
233 	 * disinherit the priority - but only down to the highest priority of any
234 	 * other tasks that are waiting for the same mutex.  This function returns
235 	 * that priority.
236 	 */
237 	static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue ) PRIVILEGED_FUNCTION;
238 #endif
239 /*-----------------------------------------------------------*/
240 
241 /*
242  * Macro to mark a queue as locked.  Locking a queue prevents an ISR from
243  * accessing the queue event lists.
244  */
245 #define prvLockQueue( pxQueue )								\
246 	taskENTER_CRITICAL( &pxQueue->mux);									\
247 	{														\
248 		if( ( pxQueue )->cRxLock == queueUNLOCKED )			\
249 		{													\
250 			( pxQueue )->cRxLock = queueLOCKED_UNMODIFIED;	\
251 		}													\
252 		if( ( pxQueue )->cTxLock == queueUNLOCKED )			\
253 		{													\
254 			( pxQueue )->cTxLock = queueLOCKED_UNMODIFIED;	\
255 		}													\
256 	}														\
257 	taskEXIT_CRITICAL( &pxQueue->mux)
258 /*-----------------------------------------------------------*/
259 
xQueueGenericReset(QueueHandle_t xQueue,BaseType_t xNewQueue)260 BaseType_t xQueueGenericReset( QueueHandle_t xQueue, BaseType_t xNewQueue )
261 {
262 Queue_t * const pxQueue = xQueue;
263 
264 	configASSERT( pxQueue );
265 
266 	if( xNewQueue == pdTRUE )
267 	{
268 		vPortCPUInitializeMutex(&pxQueue->mux);
269 	}
270 
271 	taskENTER_CRITICAL( &pxQueue->mux);
272 	{
273 		pxQueue->u.xQueue.pcTail = pxQueue->pcHead + ( pxQueue->uxLength * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
274 		pxQueue->uxMessagesWaiting = ( UBaseType_t ) 0U;
275 		pxQueue->pcWriteTo = pxQueue->pcHead;
276 		pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead + ( ( pxQueue->uxLength - 1U ) * pxQueue->uxItemSize ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
277 		pxQueue->cRxLock = queueUNLOCKED;
278 		pxQueue->cTxLock = queueUNLOCKED;
279 
280 		if( xNewQueue == pdFALSE )
281 		{
282 			/* If there are tasks blocked waiting to read from the queue, then
283 			the tasks will remain blocked as after this function exits the queue
284 			will still be empty.  If there are tasks blocked waiting to write to
285 			the queue, then one should be unblocked as after this function exits
286 			it will be possible to write to it. */
287 			if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
288 			{
289 				if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
290 				{
291 					queueYIELD_IF_USING_PREEMPTION();
292 				}
293 				else
294 				{
295 					mtCOVERAGE_TEST_MARKER();
296 				}
297 			}
298 			else
299 			{
300 				mtCOVERAGE_TEST_MARKER();
301 			}
302 		}
303 		else
304 		{
305 			/* Ensure the event queues start in the correct state. */
306 			vListInitialise( &( pxQueue->xTasksWaitingToSend ) );
307 			vListInitialise( &( pxQueue->xTasksWaitingToReceive ) );
308 		}
309 	}
310 	taskEXIT_CRITICAL( &pxQueue->mux);
311 
312 	/* A value is returned for calling semantic consistency with previous
313 	versions. */
314 	return pdPASS;
315 }
316 /*-----------------------------------------------------------*/
317 
318 #if( configSUPPORT_STATIC_ALLOCATION == 1 )
319 
xQueueGenericCreateStatic(const UBaseType_t uxQueueLength,const UBaseType_t uxItemSize,uint8_t * pucQueueStorage,StaticQueue_t * pxStaticQueue,const uint8_t ucQueueType)320 	QueueHandle_t xQueueGenericCreateStatic( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, StaticQueue_t *pxStaticQueue, const uint8_t ucQueueType )
321 	{
322 	Queue_t *pxNewQueue;
323 
324 		configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
325 
326 		/* The StaticQueue_t structure and the queue storage area must be
327 		supplied. */
328 		configASSERT( pxStaticQueue != NULL );
329 
330 		/* A queue storage area should be provided if the item size is not 0, and
331 		should not be provided if the item size is 0. */
332 		configASSERT( !( ( pucQueueStorage != NULL ) && ( uxItemSize == 0 ) ) );
333 		configASSERT( !( ( pucQueueStorage == NULL ) && ( uxItemSize != 0 ) ) );
334 
335 		#if( configASSERT_DEFINED == 1 )
336 		{
337 			/* Sanity check that the size of the structure used to declare a
338 			variable of type StaticQueue_t or StaticSemaphore_t equals the size of
339 			the real queue and semaphore structures. */
340 			volatile size_t xSize = sizeof( StaticQueue_t );
341 			configASSERT( xSize == sizeof( Queue_t ) );
342 			( void ) xSize; /* Keeps lint quiet when configASSERT() is not defined. */
343 		}
344 		#endif /* configASSERT_DEFINED */
345 
346 		/* The address of a statically allocated queue was passed in, use it.
347 		The address of a statically allocated storage area was also passed in
348 		but is already set. */
349 		pxNewQueue = ( Queue_t * ) pxStaticQueue; /*lint !e740 !e9087 Unusual cast is ok as the structures are designed to have the same alignment, and the size is checked by an assert. */
350 
351 		if( pxNewQueue != NULL )
352 		{
353 			#if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
354 			{
355 				/* Queues can be allocated wither statically or dynamically, so
356 				note this queue was allocated statically in case the queue is
357 				later deleted. */
358 				pxNewQueue->ucStaticallyAllocated = pdTRUE;
359 			}
360 			#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
361 
362 			prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
363 		}
364 		else
365 		{
366 			traceQUEUE_CREATE_FAILED( ucQueueType );
367 			mtCOVERAGE_TEST_MARKER();
368 		}
369 
370 		return pxNewQueue;
371 	}
372 
373 #endif /* configSUPPORT_STATIC_ALLOCATION */
374 /*-----------------------------------------------------------*/
375 
376 #if( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
377 
xQueueGenericCreate(const UBaseType_t uxQueueLength,const UBaseType_t uxItemSize,const uint8_t ucQueueType)378 	QueueHandle_t xQueueGenericCreate( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, const uint8_t ucQueueType )
379 	{
380 	Queue_t *pxNewQueue;
381 	size_t xQueueSizeInBytes;
382 	uint8_t *pucQueueStorage;
383 
384 		configASSERT( uxQueueLength > ( UBaseType_t ) 0 );
385 
386 		if( uxItemSize == ( UBaseType_t ) 0 )
387 		{
388 			/* There is not going to be a queue storage area. */
389 			xQueueSizeInBytes = ( size_t ) 0;
390 		}
391 		else
392 		{
393 			/* Allocate enough space to hold the maximum number of items that
394 			can be in the queue at any time. */
395 			xQueueSizeInBytes = ( size_t ) ( uxQueueLength * uxItemSize ); /*lint !e961 MISRA exception as the casts are only redundant for some ports. */
396 		}
397 
398 		/* Check for multiplication overflow. */
399 		configASSERT( ( uxItemSize == 0 ) || ( uxQueueLength == ( xQueueSizeInBytes / uxItemSize ) ) );
400 
401 		/* Check for addition overflow. */
402 		configASSERT( ( sizeof( Queue_t ) + xQueueSizeInBytes ) >  xQueueSizeInBytes );
403 
404 		/* Allocate the queue and storage area.  Justification for MISRA
405 		deviation as follows:  pvPortMalloc() always ensures returned memory
406 		blocks are aligned per the requirements of the MCU stack.  In this case
407 		pvPortMalloc() must return a pointer that is guaranteed to meet the
408 		alignment requirements of the Queue_t structure - which in this case
409 		is an int8_t *.  Therefore, whenever the stack alignment requirements
410 		are greater than or equal to the pointer to char requirements the cast
411 		is safe.  In other cases alignment requirements are not strict (one or
412 		two bytes). */
413 		pxNewQueue = ( Queue_t * ) pvPortMalloc( sizeof( Queue_t ) + xQueueSizeInBytes ); /*lint !e9087 !e9079 see comment above. */
414 
415 		if( pxNewQueue != NULL )
416 		{
417 			/* Jump past the queue structure to find the location of the queue
418 			storage area. */
419 			pucQueueStorage = ( uint8_t * ) pxNewQueue;
420 			pucQueueStorage += sizeof( Queue_t ); /*lint !e9016 Pointer arithmetic allowed on char types, especially when it assists conveying intent. */
421 
422 			#if( configSUPPORT_STATIC_ALLOCATION == 1 )
423 			{
424 				/* Queues can be created either statically or dynamically, so
425 				note this task was created dynamically in case it is later
426 				deleted. */
427 				pxNewQueue->ucStaticallyAllocated = pdFALSE;
428 			}
429 			#endif /* configSUPPORT_STATIC_ALLOCATION */
430 
431 			prvInitialiseNewQueue( uxQueueLength, uxItemSize, pucQueueStorage, ucQueueType, pxNewQueue );
432 		}
433 		else
434 		{
435 			traceQUEUE_CREATE_FAILED( ucQueueType );
436 			mtCOVERAGE_TEST_MARKER();
437 		}
438 
439 		return pxNewQueue;
440 	}
441 
442 #endif /* configSUPPORT_STATIC_ALLOCATION */
443 /*-----------------------------------------------------------*/
444 
prvInitialiseNewQueue(const UBaseType_t uxQueueLength,const UBaseType_t uxItemSize,uint8_t * pucQueueStorage,const uint8_t ucQueueType,Queue_t * pxNewQueue)445 static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength, const UBaseType_t uxItemSize, uint8_t *pucQueueStorage, const uint8_t ucQueueType, Queue_t *pxNewQueue )
446 {
447 	/* Remove compiler warnings about unused parameters should
448 	configUSE_TRACE_FACILITY not be set to 1. */
449 	( void ) ucQueueType;
450 
451 	if( uxItemSize == ( UBaseType_t ) 0 )
452 	{
453 		/* No RAM was allocated for the queue storage area, but PC head cannot
454 		be set to NULL because NULL is used as a key to say the queue is used as
455 		a mutex.  Therefore just set pcHead to point to the queue as a benign
456 		value that is known to be within the memory map. */
457 		pxNewQueue->pcHead = ( int8_t * ) pxNewQueue;
458 	}
459 	else
460 	{
461 		/* Set the head to the start of the queue storage area. */
462 		pxNewQueue->pcHead = ( int8_t * ) pucQueueStorage;
463 	}
464 
465 	/* Initialise the queue members as described where the queue type is
466 	defined. */
467 	pxNewQueue->uxLength = uxQueueLength;
468 	pxNewQueue->uxItemSize = uxItemSize;
469 	( void ) xQueueGenericReset( pxNewQueue, pdTRUE );
470 
471 	#if ( configUSE_TRACE_FACILITY == 1 )
472 	{
473 		pxNewQueue->ucQueueType = ucQueueType;
474 	}
475 	#endif /* configUSE_TRACE_FACILITY */
476 
477 	#if( configUSE_QUEUE_SETS == 1 )
478 	{
479 		pxNewQueue->pxQueueSetContainer = NULL;
480 	}
481 	#endif /* configUSE_QUEUE_SETS */
482 
483 	traceQUEUE_CREATE( pxNewQueue );
484 }
485 /*-----------------------------------------------------------*/
486 
487 #if( configUSE_MUTEXES == 1 )
488 
prvInitialiseMutex(Queue_t * pxNewQueue)489 	static void prvInitialiseMutex( Queue_t *pxNewQueue )
490 	{
491 		if( pxNewQueue != NULL )
492 		{
493 			/* The queue create function will set all the queue structure members
494 			correctly for a generic queue, but this function is creating a
495 			mutex.  Overwrite those members that need to be set differently -
496 			in particular the information required for priority inheritance. */
497 			pxNewQueue->u.xSemaphore.xMutexHolder = NULL;
498 			pxNewQueue->uxQueueType = queueQUEUE_IS_MUTEX;
499 
500 			/* In case this is a recursive mutex. */
501 			pxNewQueue->u.xSemaphore.uxRecursiveCallCount = 0;
502 			vPortCPUInitializeMutex(&pxNewQueue->mux);
503 
504 			traceCREATE_MUTEX( pxNewQueue );
505 
506 			/* Start with the semaphore in the expected state. */
507 			( void ) xQueueGenericSend( pxNewQueue, NULL, ( TickType_t ) 0U, queueSEND_TO_BACK );
508 		}
509 		else
510 		{
511 			traceCREATE_MUTEX_FAILED();
512 		}
513 	}
514 
515 #endif /* configUSE_MUTEXES */
516 /*-----------------------------------------------------------*/
517 
518 #if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
519 
xQueueCreateMutex(const uint8_t ucQueueType)520 	QueueHandle_t xQueueCreateMutex( const uint8_t ucQueueType )
521 	{
522 	QueueHandle_t xNewQueue;
523 	const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
524 
525 		xNewQueue = xQueueGenericCreate( uxMutexLength, uxMutexSize, ucQueueType );
526 		prvInitialiseMutex( ( Queue_t * ) xNewQueue );
527 
528 		return xNewQueue;
529 	}
530 
531 #endif /* configUSE_MUTEXES */
532 /*-----------------------------------------------------------*/
533 
534 #if( ( configUSE_MUTEXES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
535 
xQueueCreateMutexStatic(const uint8_t ucQueueType,StaticQueue_t * pxStaticQueue)536 	QueueHandle_t xQueueCreateMutexStatic( const uint8_t ucQueueType, StaticQueue_t *pxStaticQueue )
537 	{
538 	QueueHandle_t xNewQueue;
539 	const UBaseType_t uxMutexLength = ( UBaseType_t ) 1, uxMutexSize = ( UBaseType_t ) 0;
540 
541 		/* Prevent compiler warnings about unused parameters if
542 		configUSE_TRACE_FACILITY does not equal 1. */
543 		( void ) ucQueueType;
544 
545 		xNewQueue = xQueueGenericCreateStatic( uxMutexLength, uxMutexSize, NULL, pxStaticQueue, ucQueueType );
546 		prvInitialiseMutex( ( Queue_t * ) xNewQueue );
547 
548 		return xNewQueue;
549 	}
550 
551 #endif /* configUSE_MUTEXES */
552 /*-----------------------------------------------------------*/
553 
554 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
555 
xQueueGetMutexHolder(QueueHandle_t xSemaphore)556 	TaskHandle_t xQueueGetMutexHolder( QueueHandle_t xSemaphore )
557 	{
558 	TaskHandle_t pxReturn;
559 	Queue_t * const pxSemaphore = ( Queue_t * ) xSemaphore;
560 
561 		/* This function is called by xSemaphoreGetMutexHolder(), and should not
562 		be called directly.  Note:  This is a good way of determining if the
563 		calling task is the mutex holder, but not a good way of determining the
564 		identity of the mutex holder, as the holder may change between the
565 		following critical section exiting and the function returning. */
566 		taskENTER_CRITICAL( &pxSemaphore->mux);
567 		{
568 			if( pxSemaphore->uxQueueType == queueQUEUE_IS_MUTEX )
569 			{
570 				pxReturn = pxSemaphore->u.xSemaphore.xMutexHolder;
571 			}
572 			else
573 			{
574 				pxReturn = NULL;
575 			}
576 		}
577 		taskEXIT_CRITICAL( &pxSemaphore->mux);
578 
579 		return pxReturn;
580 	} /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
581 
582 #endif
583 /*-----------------------------------------------------------*/
584 
585 #if ( ( configUSE_MUTEXES == 1 ) && ( INCLUDE_xSemaphoreGetMutexHolder == 1 ) )
586 
xQueueGetMutexHolderFromISR(QueueHandle_t xSemaphore)587 	TaskHandle_t xQueueGetMutexHolderFromISR( QueueHandle_t xSemaphore )
588 	{
589 	TaskHandle_t pxReturn;
590 
591 		configASSERT( xSemaphore );
592 
593 		/* Mutexes cannot be used in interrupt service routines, so the mutex
594 		holder should not change in an ISR, and therefore a critical section is
595 		not required here. */
596 		if( ( ( Queue_t * ) xSemaphore )->uxQueueType == queueQUEUE_IS_MUTEX )
597 		{
598 			pxReturn = ( ( Queue_t * ) xSemaphore )->u.xSemaphore.xMutexHolder;
599 		}
600 		else
601 		{
602 			pxReturn = NULL;
603 		}
604 
605 		return pxReturn;
606 	} /*lint !e818 xSemaphore cannot be a pointer to const because it is a typedef. */
607 
608 #endif
609 /*-----------------------------------------------------------*/
610 
611 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
612 
xQueueGiveMutexRecursive(QueueHandle_t xMutex)613 	BaseType_t xQueueGiveMutexRecursive( QueueHandle_t xMutex )
614 	{
615 	BaseType_t xReturn;
616 	Queue_t * const pxMutex = ( Queue_t * ) xMutex;
617 
618 		configASSERT( pxMutex );
619 
620 		/* If this is the task that holds the mutex then xMutexHolder will not
621 		change outside of this task.  If this task does not hold the mutex then
622 		pxMutexHolder can never coincidentally equal the tasks handle, and as
623 		this is the only condition we are interested in it does not matter if
624 		pxMutexHolder is accessed simultaneously by another task.  Therefore no
625 		mutual exclusion is required to test the pxMutexHolder variable. */
626 		if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )
627 		{
628 			traceGIVE_MUTEX_RECURSIVE( pxMutex );
629 
630 			/* uxRecursiveCallCount cannot be zero if xMutexHolder is equal to
631 			the task handle, therefore no underflow check is required.  Also,
632 			uxRecursiveCallCount is only modified by the mutex holder, and as
633 			there can only be one, no mutual exclusion is required to modify the
634 			uxRecursiveCallCount member. */
635 			( pxMutex->u.xSemaphore.uxRecursiveCallCount )--;
636 
637 			/* Has the recursive call count unwound to 0? */
638 			if( pxMutex->u.xSemaphore.uxRecursiveCallCount == ( UBaseType_t ) 0 )
639 			{
640 				/* Return the mutex.  This will automatically unblock any other
641 				task that might be waiting to access the mutex. */
642 				( void ) xQueueGenericSend( pxMutex, NULL, queueMUTEX_GIVE_BLOCK_TIME, queueSEND_TO_BACK );
643 			}
644 			else
645 			{
646 				mtCOVERAGE_TEST_MARKER();
647 			}
648 
649 			xReturn = pdPASS;
650 		}
651 		else
652 		{
653 			/* The mutex cannot be given because the calling task is not the
654 			holder. */
655 			xReturn = pdFAIL;
656 
657 			traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex );
658 		}
659 
660 		return xReturn;
661 	}
662 
663 #endif /* configUSE_RECURSIVE_MUTEXES */
664 /*-----------------------------------------------------------*/
665 
666 #if ( configUSE_RECURSIVE_MUTEXES == 1 )
667 
xQueueTakeMutexRecursive(QueueHandle_t xMutex,TickType_t xTicksToWait)668 	BaseType_t xQueueTakeMutexRecursive( QueueHandle_t xMutex, TickType_t xTicksToWait )
669 	{
670 	BaseType_t xReturn;
671 	Queue_t * const pxMutex = ( Queue_t * ) xMutex;
672 
673 		configASSERT( pxMutex );
674 
675 		/* Comments regarding mutual exclusion as per those within
676 		xQueueGiveMutexRecursive(). */
677 
678 		traceTAKE_MUTEX_RECURSIVE( pxMutex );
679 
680 		if( pxMutex->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle() )
681 		{
682 			( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;
683 			xReturn = pdPASS;
684 		}
685 		else
686 		{
687 			xReturn = xQueueSemaphoreTake( pxMutex, xTicksToWait );
688 
689 			/* pdPASS will only be returned if the mutex was successfully
690 			obtained.  The calling task may have entered the Blocked state
691 			before reaching here. */
692 			if( xReturn != pdFAIL )
693 			{
694 				( pxMutex->u.xSemaphore.uxRecursiveCallCount )++;
695 			}
696 			else
697 			{
698 				traceTAKE_MUTEX_RECURSIVE_FAILED( pxMutex );
699 			}
700 		}
701 
702 		return xReturn;
703 	}
704 
705 #endif /* configUSE_RECURSIVE_MUTEXES */
706 /*-----------------------------------------------------------*/
707 
708 #if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
709 
xQueueCreateCountingSemaphoreStatic(const UBaseType_t uxMaxCount,const UBaseType_t uxInitialCount,StaticQueue_t * pxStaticQueue)710 	QueueHandle_t xQueueCreateCountingSemaphoreStatic( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount, StaticQueue_t *pxStaticQueue )
711 	{
712 	QueueHandle_t xHandle;
713 
714 		configASSERT( uxMaxCount != 0 );
715 		configASSERT( uxInitialCount <= uxMaxCount );
716 
717 		xHandle = xQueueGenericCreateStatic( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, NULL, pxStaticQueue, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
718 
719 		if( xHandle != NULL )
720 		{
721 			( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
722 
723 			traceCREATE_COUNTING_SEMAPHORE();
724 		}
725 		else
726 		{
727 			traceCREATE_COUNTING_SEMAPHORE_FAILED();
728 		}
729 
730 		return xHandle;
731 	}
732 
733 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
734 /*-----------------------------------------------------------*/
735 
736 #if( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
737 
xQueueCreateCountingSemaphore(const UBaseType_t uxMaxCount,const UBaseType_t uxInitialCount)738 	QueueHandle_t xQueueCreateCountingSemaphore( const UBaseType_t uxMaxCount, const UBaseType_t uxInitialCount )
739 	{
740 	QueueHandle_t xHandle;
741 
742 		configASSERT( uxMaxCount != 0 );
743 		configASSERT( uxInitialCount <= uxMaxCount );
744 
745 		xHandle = xQueueGenericCreate( uxMaxCount, queueSEMAPHORE_QUEUE_ITEM_LENGTH, queueQUEUE_TYPE_COUNTING_SEMAPHORE );
746 
747 		if( xHandle != NULL )
748 		{
749 			( ( Queue_t * ) xHandle )->uxMessagesWaiting = uxInitialCount;
750 
751 			traceCREATE_COUNTING_SEMAPHORE();
752 		}
753 		else
754 		{
755 			traceCREATE_COUNTING_SEMAPHORE_FAILED();
756 		}
757 
758 		return xHandle;
759 	}
760 
761 #endif /* ( ( configUSE_COUNTING_SEMAPHORES == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) ) */
762 /*-----------------------------------------------------------*/
763 
xQueueGenericSend(QueueHandle_t xQueue,const void * const pvItemToQueue,TickType_t xTicksToWait,const BaseType_t xCopyPosition)764 BaseType_t xQueueGenericSend( QueueHandle_t xQueue, const void * const pvItemToQueue, TickType_t xTicksToWait, const BaseType_t xCopyPosition )
765 {
766 BaseType_t xEntryTimeSet = pdFALSE, xYieldRequired;
767 TimeOut_t xTimeOut;
768 Queue_t * const pxQueue = xQueue;
769 
770 	configASSERT( pxQueue );
771 	configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
772 	configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
773 	#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
774 	{
775 		configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
776 	}
777 	#endif
778 
779 #if ( configUSE_MUTEXES == 1 && configCHECK_MUTEX_GIVEN_BY_OWNER == 1)
780 	configASSERT(pxQueue->uxQueueType != queueQUEUE_IS_MUTEX
781 				 || pxQueue->u.xSemaphore.xMutexHolder == NULL
782 				 || pxQueue->u.xSemaphore.xMutexHolder == xTaskGetCurrentTaskHandle());
783 #endif
784 
785 	/*lint -save -e904 This function relaxes the coding standard somewhat to
786 	allow return statements within the function itself.  This is done in the
787 	interest of execution time efficiency. */
788 	for( ;; )
789 	{
790 		taskENTER_CRITICAL( &pxQueue->mux);
791 		{
792 			/* Is there room on the queue now?  The running task must be the
793 			highest priority task wanting to access the queue.  If the head item
794 			in the queue is to be overwritten then it does not matter if the
795 			queue is full. */
796 			if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
797 			{
798 				traceQUEUE_SEND( pxQueue );
799 
800 				#if ( configUSE_QUEUE_SETS == 1 )
801 				{
802 				UBaseType_t uxPreviousMessagesWaiting = pxQueue->uxMessagesWaiting;
803 
804 					xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
805 
806 					if( pxQueue->pxQueueSetContainer != NULL )
807 					{
808 						if( ( xCopyPosition == queueOVERWRITE ) && ( uxPreviousMessagesWaiting != ( UBaseType_t ) 0 ) )
809 						{
810 							/* Do not notify the queue set as an existing item
811 							was overwritten in the queue so the number of items
812 							in the queue has not changed. */
813 							mtCOVERAGE_TEST_MARKER();
814 						}
815 						else if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )
816 						{
817 							/* The queue is a member of a queue set, and posting
818 							to the queue set caused a higher priority task to
819 							unblock. A context switch is required. */
820 							queueYIELD_IF_USING_PREEMPTION();
821 						}
822 						else
823 						{
824 							mtCOVERAGE_TEST_MARKER();
825 						}
826 					}
827 					else
828 					{
829 						/* If there was a task waiting for data to arrive on the
830 						queue then unblock it now. */
831 						if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
832 						{
833 							if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
834 							{
835 								/* The unblocked task has a priority higher than
836 								our own so yield immediately.  Yes it is ok to
837 								do this from within the critical section - the
838 								kernel takes care of that. */
839 								queueYIELD_IF_USING_PREEMPTION();
840 							}
841 							else
842 							{
843 								mtCOVERAGE_TEST_MARKER();
844 							}
845 						}
846 						else if(xYieldRequired != pdFALSE)
847 						{
848 							/* This path is a special case that will only get
849 							executed if the task was holding multiple mutexes
850 							and the mutexes were given back in an order that is
851 							different to that in which they were taken. */
852 							queueYIELD_IF_USING_PREEMPTION();
853 						}
854 						else
855 						{
856 							mtCOVERAGE_TEST_MARKER();
857 						}
858 					}
859 				}
860 				#else /* configUSE_QUEUE_SETS */
861 				{
862 					xYieldRequired = prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
863 
864 					/* If there was a task waiting for data to arrive on the
865 					queue then unblock it now. */
866 					if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
867 					{
868 						if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
869 						{
870 							/* The unblocked task has a priority higher than
871 							our own so yield immediately.  Yes it is ok to do
872 							this from within the critical section - the kernel
873 							takes care of that. */
874 							queueYIELD_IF_USING_PREEMPTION();
875 						}
876 						else
877 						{
878 							mtCOVERAGE_TEST_MARKER();
879 						}
880 					}
881 					else if(xYieldRequired != pdFALSE)
882 					{
883 						/* This path is a special case that will only get
884 						executed if the task was holding multiple mutexes and
885 						the mutexes were given back in an order that is
886 						different to that in which they were taken. */
887 						queueYIELD_IF_USING_PREEMPTION();
888 					}
889 					else
890 					{
891 						mtCOVERAGE_TEST_MARKER();
892 					}
893 				}
894 				#endif /* configUSE_QUEUE_SETS */
895 
896 				taskEXIT_CRITICAL( &pxQueue->mux);
897 				return pdPASS;
898 			}
899 			else
900 			{
901 				if( xTicksToWait == ( TickType_t ) 0 )
902 				{
903 					/* The queue was full and no block time is specified (or
904 					the block time has expired) so leave now. */
905 					taskEXIT_CRITICAL( &pxQueue->mux);
906 
907 					/* Return to the original privilege level before exiting
908 					the function. */
909 					traceQUEUE_SEND_FAILED( pxQueue );
910 					return errQUEUE_FULL;
911 				}
912 				else if( xEntryTimeSet == pdFALSE )
913 				{
914 					/* The queue was full and a block time was specified so
915 					configure the timeout structure. */
916 					vTaskInternalSetTimeOutState( &xTimeOut );
917 					xEntryTimeSet = pdTRUE;
918 				}
919 				else
920 				{
921 					/* Entry time was already set. */
922 					mtCOVERAGE_TEST_MARKER();
923 				}
924 			}
925 		}
926 		taskEXIT_CRITICAL( &pxQueue->mux);
927 
928 		/* Interrupts and other tasks can send to and receive from the queue
929 		now the critical section has been exited. */
930 
931 		taskENTER_CRITICAL( &pxQueue->mux);
932 		prvLockQueue( pxQueue );
933 
934 		/* Update the timeout state to see if it has expired yet. */
935 		if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
936 		{
937 			if( prvIsQueueFull( pxQueue ) != pdFALSE )
938 			{
939 				traceBLOCKING_ON_QUEUE_SEND( pxQueue );
940 				vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToSend ), xTicksToWait );
941 
942 				/* Unlocking the queue means queue events can effect the
943 				event list.  It is possible that interrupts occurring now
944 				remove this task from the event list again - but as the
945 				scheduler is suspended the task will go onto the pending
946 				ready last instead of the actual ready list. */
947 				prvUnlockQueue( pxQueue );
948 
949 				/* Resuming the scheduler will move tasks from the pending
950 				ready list into the ready list - so it is feasible that this
951 				task is already in a ready list before it yields - in which
952 				case the yield will not cause a context switch unless there
953 				is also a higher priority task in the pending ready list. */
954 				taskEXIT_CRITICAL( &pxQueue->mux);
955 				portYIELD_WITHIN_API();
956 
957 			}
958 			else
959 			{
960 				/* Try again. */
961 				prvUnlockQueue( pxQueue );
962 				taskEXIT_CRITICAL( &pxQueue->mux);
963 			}
964 		}
965 		else
966 		{
967 			/* The timeout has expired. */
968 			prvUnlockQueue( pxQueue );
969 			taskEXIT_CRITICAL( &pxQueue->mux);
970 
971 			traceQUEUE_SEND_FAILED( pxQueue );
972 			return errQUEUE_FULL;
973 		}
974 	} /*lint -restore */
975 }
976 /*-----------------------------------------------------------*/
977 
xQueueGenericSendFromISR(QueueHandle_t xQueue,const void * const pvItemToQueue,BaseType_t * const pxHigherPriorityTaskWoken,const BaseType_t xCopyPosition)978 BaseType_t xQueueGenericSendFromISR( QueueHandle_t xQueue, const void * const pvItemToQueue, BaseType_t * const pxHigherPriorityTaskWoken, const BaseType_t xCopyPosition )
979 {
980 BaseType_t xReturn;
981 UBaseType_t uxSavedInterruptStatus;
982 Queue_t * const pxQueue = xQueue;
983 
984 	configASSERT( pxQueue );
985 	configASSERT( !( ( pvItemToQueue == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
986 	configASSERT( !( ( xCopyPosition == queueOVERWRITE ) && ( pxQueue->uxLength != 1 ) ) );
987 
988 	/* RTOS ports that support interrupt nesting have the concept of a maximum
989 	system call (or maximum API call) interrupt priority.  Interrupts that are
990 	above the maximum system call priority are kept permanently enabled, even
991 	when the RTOS kernel is in a critical section, but cannot make any calls to
992 	FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
993 	then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
994 	failure if a FreeRTOS API function is called from an interrupt that has been
995 	assigned a priority above the configured maximum system call priority.
996 	Only FreeRTOS functions that end in FromISR can be called from interrupts
997 	that have been assigned a priority at or (logically) below the maximum
998 	system call	interrupt priority.  FreeRTOS maintains a separate interrupt
999 	safe API to ensure interrupt entry is as fast and as simple as possible.
1000 	More information (albeit Cortex-M specific) is provided on the following
1001 	link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1002 	portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1003 
1004 	/* Similar to xQueueGenericSend, except without blocking if there is no room
1005 	in the queue.  Also don't directly wake a task that was blocked on a queue
1006 	read, instead return a flag to say whether a context switch is required or
1007 	not (i.e. has a task with a higher priority than us been woken by this
1008 	post). */
1009 	uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1010 	{
1011 		taskENTER_CRITICAL_ISR(&pxQueue->mux);
1012 
1013 		if( ( pxQueue->uxMessagesWaiting < pxQueue->uxLength ) || ( xCopyPosition == queueOVERWRITE ) )
1014 		{
1015 			const int8_t cTxLock = pxQueue->cTxLock;
1016 
1017 			traceQUEUE_SEND_FROM_ISR( pxQueue );
1018 
1019 			/* Semaphores use xQueueGiveFromISR(), so pxQueue will not be a
1020 			semaphore or mutex.  That means prvCopyDataToQueue() cannot result
1021 			in a task disinheriting a priority and prvCopyDataToQueue() can be
1022 			called here even though the disinherit function does not check if
1023 			the scheduler is suspended before accessing the ready lists. */
1024 			( void ) prvCopyDataToQueue( pxQueue, pvItemToQueue, xCopyPosition );
1025 
1026 			/* The event list is not altered if the queue is locked.  This will
1027 			be done when the queue is unlocked later. */
1028 			if( cTxLock == queueUNLOCKED )
1029 			{
1030 				#if ( configUSE_QUEUE_SETS == 1 )
1031 				{
1032 					if( pxQueue->pxQueueSetContainer != NULL )
1033 					{
1034 						if( prvNotifyQueueSetContainer( pxQueue, xCopyPosition ) != pdFALSE )
1035 						{
1036 							/* The queue is a member of a queue set, and posting
1037 							to the queue set caused a higher priority task to
1038 							unblock.  A context switch is required. */
1039 							if( pxHigherPriorityTaskWoken != NULL )
1040 							{
1041 								*pxHigherPriorityTaskWoken = pdTRUE;
1042 							}
1043 							else
1044 							{
1045 								mtCOVERAGE_TEST_MARKER();
1046 							}
1047 						}
1048 						else
1049 						{
1050 							mtCOVERAGE_TEST_MARKER();
1051 						}
1052 					}
1053 					else
1054 					{
1055 						if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1056 						{
1057 							if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1058 							{
1059 								/* The task waiting has a higher priority so
1060 								record that a context switch is required. */
1061 								if( pxHigherPriorityTaskWoken != NULL )
1062 								{
1063 									*pxHigherPriorityTaskWoken = pdTRUE;
1064 								}
1065 								else
1066 								{
1067 									mtCOVERAGE_TEST_MARKER();
1068 								}
1069 							}
1070 							else
1071 							{
1072 								mtCOVERAGE_TEST_MARKER();
1073 							}
1074 						}
1075 						else
1076 						{
1077 							mtCOVERAGE_TEST_MARKER();
1078 						}
1079 					}
1080 				}
1081 				#else /* configUSE_QUEUE_SETS */
1082 				{
1083 					if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1084 					{
1085 						if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1086 						{
1087 							/* The task waiting has a higher priority so record that a
1088 							context	switch is required. */
1089 							if( pxHigherPriorityTaskWoken != NULL )
1090 							{
1091 								*pxHigherPriorityTaskWoken = pdTRUE;
1092 							}
1093 							else
1094 							{
1095 								mtCOVERAGE_TEST_MARKER();
1096 							}
1097 						}
1098 						else
1099 						{
1100 							mtCOVERAGE_TEST_MARKER();
1101 						}
1102 					}
1103 					else
1104 					{
1105 						mtCOVERAGE_TEST_MARKER();
1106 					}
1107 				}
1108 				#endif /* configUSE_QUEUE_SETS */
1109 			}
1110 			else
1111 			{
1112 				/* Increment the lock count so the task that unlocks the queue
1113 				knows that data was posted while it was locked. */
1114 				pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
1115 			}
1116 
1117 			xReturn = pdPASS;
1118 		}
1119 		else
1120 		{
1121 			traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue );
1122 			xReturn = errQUEUE_FULL;
1123 		}
1124 
1125 		taskEXIT_CRITICAL_ISR(&pxQueue->mux);
1126 	}
1127 	portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1128 
1129 	return xReturn;
1130 }
1131 /*-----------------------------------------------------------*/
1132 
xQueueGiveFromISR(QueueHandle_t xQueue,BaseType_t * const pxHigherPriorityTaskWoken)1133 BaseType_t xQueueGiveFromISR( QueueHandle_t xQueue, BaseType_t * const pxHigherPriorityTaskWoken )
1134 {
1135 BaseType_t xReturn;
1136 UBaseType_t uxSavedInterruptStatus;
1137 Queue_t * const pxQueue = xQueue;
1138 
1139 	/* Similar to xQueueGenericSendFromISR() but used with semaphores where the
1140 	item size is 0.  Don't directly wake a task that was blocked on a queue
1141 	read, instead return a flag to say whether a context switch is required or
1142 	not (i.e. has a task with a higher priority than us been woken by this
1143 	post). */
1144 
1145 	configASSERT( pxQueue );
1146 
1147 	/* xQueueGenericSendFromISR() should be used instead of xQueueGiveFromISR()
1148 	if the item size is not 0. */
1149 	configASSERT( pxQueue->uxItemSize == 0 );
1150 
1151 	/* Normally a mutex would not be given from an interrupt, especially if
1152 	there is a mutex holder, as priority inheritance makes no sense for an
1153 	interrupts, only tasks. */
1154 	configASSERT( !( ( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX ) && ( pxQueue->u.xSemaphore.xMutexHolder != NULL ) ) );
1155 
1156 	/* RTOS ports that support interrupt nesting have the concept of a maximum
1157 	system call (or maximum API call) interrupt priority.  Interrupts that are
1158 	above the maximum system call priority are kept permanently enabled, even
1159 	when the RTOS kernel is in a critical section, but cannot make any calls to
1160 	FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
1161 	then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1162 	failure if a FreeRTOS API function is called from an interrupt that has been
1163 	assigned a priority above the configured maximum system call priority.
1164 	Only FreeRTOS functions that end in FromISR can be called from interrupts
1165 	that have been assigned a priority at or (logically) below the maximum
1166 	system call	interrupt priority.  FreeRTOS maintains a separate interrupt
1167 	safe API to ensure interrupt entry is as fast and as simple as possible.
1168 	More information (albeit Cortex-M specific) is provided on the following
1169 	link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1170 	portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1171 
1172 	uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1173 	{
1174 		taskENTER_CRITICAL_ISR(&pxQueue->mux);
1175 
1176 		const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1177 
1178 		/* When the queue is used to implement a semaphore no data is ever
1179 		moved through the queue but it is still valid to see if the queue 'has
1180 		space'. */
1181 		if( uxMessagesWaiting < pxQueue->uxLength )
1182 		{
1183 			const int8_t cTxLock = pxQueue->cTxLock;
1184 
1185 			traceQUEUE_GIVE_FROM_ISR( pxQueue );
1186 
1187 			/* A task can only have an inherited priority if it is a mutex
1188 			holder - and if there is a mutex holder then the mutex cannot be
1189 			given from an ISR.  As this is the ISR version of the function it
1190 			can be assumed there is no mutex holder and no need to determine if
1191 			priority disinheritance is needed.  Simply increase the count of
1192 			messages (semaphores) available. */
1193 			pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;
1194 
1195 			/* The event list is not altered if the queue is locked.  This will
1196 			be done when the queue is unlocked later. */
1197 			if( cTxLock == queueUNLOCKED )
1198 			{
1199 				#if ( configUSE_QUEUE_SETS == 1 )
1200 				{
1201 					if( pxQueue->pxQueueSetContainer != NULL )
1202 					{
1203 						if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )
1204 						{
1205 							/* The semaphore is a member of a queue set, and
1206 							posting	to the queue set caused a higher priority
1207 							task to	unblock.  A context switch is required. */
1208 							if( pxHigherPriorityTaskWoken != NULL )
1209 							{
1210 								*pxHigherPriorityTaskWoken = pdTRUE;
1211 							}
1212 							else
1213 							{
1214 								mtCOVERAGE_TEST_MARKER();
1215 							}
1216 						}
1217 						else
1218 						{
1219 							mtCOVERAGE_TEST_MARKER();
1220 						}
1221 					}
1222 					else
1223 					{
1224 						if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1225 						{
1226 							if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1227 							{
1228 								/* The task waiting has a higher priority so
1229 								record that a context switch is required. */
1230 								if( pxHigherPriorityTaskWoken != NULL )
1231 								{
1232 									*pxHigherPriorityTaskWoken = pdTRUE;
1233 								}
1234 								else
1235 								{
1236 									mtCOVERAGE_TEST_MARKER();
1237 								}
1238 							}
1239 							else
1240 							{
1241 								mtCOVERAGE_TEST_MARKER();
1242 							}
1243 						}
1244 						else
1245 						{
1246 							mtCOVERAGE_TEST_MARKER();
1247 						}
1248 					}
1249 				}
1250 				#else /* configUSE_QUEUE_SETS */
1251 				{
1252 					if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1253 					{
1254 						if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1255 						{
1256 							/* The task waiting has a higher priority so record that a
1257 							context	switch is required. */
1258 							if( pxHigherPriorityTaskWoken != NULL )
1259 							{
1260 								*pxHigherPriorityTaskWoken = pdTRUE;
1261 							}
1262 							else
1263 							{
1264 								mtCOVERAGE_TEST_MARKER();
1265 							}
1266 						}
1267 						else
1268 						{
1269 							mtCOVERAGE_TEST_MARKER();
1270 						}
1271 					}
1272 					else
1273 					{
1274 						mtCOVERAGE_TEST_MARKER();
1275 					}
1276 				}
1277 				#endif /* configUSE_QUEUE_SETS */
1278 			}
1279 			else
1280 			{
1281 				/* Increment the lock count so the task that unlocks the queue
1282 				knows that data was posted while it was locked. */
1283 				pxQueue->cTxLock = ( int8_t ) ( cTxLock + 1 );
1284 			}
1285 
1286 			xReturn = pdPASS;
1287 		}
1288 		else
1289 		{
1290 			traceQUEUE_GIVE_FROM_ISR_FAILED( pxQueue );
1291 			xReturn = errQUEUE_FULL;
1292 		}
1293 		taskEXIT_CRITICAL_ISR(&pxQueue->mux);
1294 	}
1295 	portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1296 
1297 	return xReturn;
1298 }
1299 /*-----------------------------------------------------------*/
1300 
xQueueReceive(QueueHandle_t xQueue,void * const pvBuffer,TickType_t xTicksToWait)1301 BaseType_t xQueueReceive( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait )
1302 {
1303 BaseType_t xEntryTimeSet = pdFALSE;
1304 TimeOut_t xTimeOut;
1305 Queue_t * const pxQueue = xQueue;
1306 
1307 	/* Check the pointer is not NULL. */
1308 	configASSERT( ( pxQueue ) );
1309 
1310 	/* The buffer into which data is received can only be NULL if the data size
1311 	is zero (so no data is copied into the buffer. */
1312 	configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
1313 
1314 	/* Cannot block if the scheduler is suspended. */
1315 	#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1316 	{
1317 		configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1318 	}
1319 	#endif
1320 
1321 
1322 	/*lint -save -e904  This function relaxes the coding standard somewhat to
1323 	allow return statements within the function itself.  This is done in the
1324 	interest of execution time efficiency. */
1325 	for( ;; )
1326 	{
1327 		taskENTER_CRITICAL( &pxQueue->mux);
1328 		{
1329 			const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1330 
1331 			/* Is there data in the queue now?  To be running the calling task
1332 			must be the highest priority task wanting to access the queue. */
1333 			if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1334 			{
1335 				/* Data available, remove one item. */
1336 				prvCopyDataFromQueue( pxQueue, pvBuffer );
1337 				traceQUEUE_RECEIVE( pxQueue );
1338 				pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
1339 
1340 				/* There is now space in the queue, were any tasks waiting to
1341 				post to the queue?  If so, unblock the highest priority waiting
1342 				task. */
1343 				if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1344 				{
1345 					if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1346 					{
1347 						queueYIELD_IF_USING_PREEMPTION();
1348 					}
1349 					else
1350 					{
1351 						mtCOVERAGE_TEST_MARKER();
1352 					}
1353 				}
1354 				else
1355 				{
1356 					mtCOVERAGE_TEST_MARKER();
1357 				}
1358 
1359 				taskEXIT_CRITICAL( &pxQueue->mux);
1360 				return pdPASS;
1361 			}
1362 			else
1363 			{
1364 				if( xTicksToWait == ( TickType_t ) 0 )
1365 				{
1366 					/* The queue was empty and no block time is specified (or
1367 					the block time has expired) so leave now. */
1368 					taskEXIT_CRITICAL( &pxQueue->mux);
1369 					traceQUEUE_RECEIVE_FAILED( pxQueue );
1370 					return errQUEUE_EMPTY;
1371 				}
1372 				else if( xEntryTimeSet == pdFALSE )
1373 				{
1374 					/* The queue was empty and a block time was specified so
1375 					configure the timeout structure. */
1376 					vTaskInternalSetTimeOutState( &xTimeOut );
1377 					xEntryTimeSet = pdTRUE;
1378 				}
1379 				else
1380 				{
1381 					/* Entry time was already set. */
1382 					mtCOVERAGE_TEST_MARKER();
1383 				}
1384 			}
1385 		}
1386 		taskEXIT_CRITICAL( &pxQueue->mux);
1387 
1388 		/* Interrupts and other tasks can send to and receive from the queue
1389 		now the critical section has been exited. */
1390 
1391 		taskENTER_CRITICAL( &pxQueue->mux);
1392 		prvLockQueue( pxQueue );
1393 
1394 		/* Update the timeout state to see if it has expired yet. */
1395 		if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1396 		{
1397 			/* The timeout has not expired.  If the queue is still empty place
1398 			the task on the list of tasks waiting to receive from the queue. */
1399 			if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1400 			{
1401 				traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1402 				vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1403 				prvUnlockQueue( pxQueue );
1404 				taskEXIT_CRITICAL( &pxQueue->mux);
1405 				portYIELD_WITHIN_API();
1406 			}
1407 			else
1408 			{
1409 				/* The queue contains data again.  Loop back to try and read the
1410 				data. */
1411 				prvUnlockQueue( pxQueue );
1412 				taskEXIT_CRITICAL( &pxQueue->mux);
1413 			}
1414 		}
1415 		else
1416 		{
1417 			/* Timed out.  If there is no data in the queue exit, otherwise loop
1418 			back and attempt to read the data. */
1419 			prvUnlockQueue( pxQueue );
1420 			taskEXIT_CRITICAL( &pxQueue->mux);
1421 
1422 			if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1423 			{
1424 				traceQUEUE_RECEIVE_FAILED( pxQueue );
1425 				return errQUEUE_EMPTY;
1426 			}
1427 			else
1428 			{
1429 				mtCOVERAGE_TEST_MARKER();
1430 			}
1431 		}
1432 	} /*lint -restore */
1433 }
1434 /*-----------------------------------------------------------*/
1435 
xQueueSemaphoreTake(QueueHandle_t xQueue,TickType_t xTicksToWait)1436 BaseType_t xQueueSemaphoreTake( QueueHandle_t xQueue, TickType_t xTicksToWait )
1437 {
1438 BaseType_t xEntryTimeSet = pdFALSE;
1439 TimeOut_t xTimeOut;
1440 Queue_t * const pxQueue = xQueue;
1441 
1442 #if( configUSE_MUTEXES == 1 )
1443 	BaseType_t xInheritanceOccurred = pdFALSE;
1444 #endif
1445 
1446 	/* Check the queue pointer is not NULL. */
1447 	configASSERT( ( pxQueue ) );
1448 
1449 	/* Check this really is a semaphore, in which case the item size will be
1450 	0. */
1451 	configASSERT( pxQueue->uxItemSize == 0 );
1452 
1453 	/* Cannot block if the scheduler is suspended. */
1454 	#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1455 	{
1456 		configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1457 	}
1458 	#endif
1459 
1460 
1461 	/*lint -save -e904 This function relaxes the coding standard somewhat to allow return
1462 	statements within the function itself.  This is done in the interest
1463 	of execution time efficiency. */
1464 	for( ;; )
1465 	{
1466 		taskENTER_CRITICAL( &pxQueue->mux);
1467 		{
1468 			/* Semaphores are queues with an item size of 0, and where the
1469 			number of messages in the queue is the semaphore's count value. */
1470 			const UBaseType_t uxSemaphoreCount = pxQueue->uxMessagesWaiting;
1471 
1472 			/* Is there data in the queue now?  To be running the calling task
1473 			must be the highest priority task wanting to access the queue. */
1474 			if( uxSemaphoreCount > ( UBaseType_t ) 0 )
1475 			{
1476 				traceQUEUE_SEMAPHORE_RECEIVE( pxQueue );
1477 
1478 				/* Semaphores are queues with a data size of zero and where the
1479 				messages waiting is the semaphore's count.  Reduce the count. */
1480 				pxQueue->uxMessagesWaiting = uxSemaphoreCount - ( UBaseType_t ) 1;
1481 
1482 				#if ( configUSE_MUTEXES == 1 )
1483 				{
1484 					if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1485 					{
1486 						/* Record the information required to implement
1487 						priority inheritance should it become necessary. */
1488 						pxQueue->u.xSemaphore.xMutexHolder = pvTaskIncrementMutexHeldCount();
1489 					}
1490 					else
1491 					{
1492 						mtCOVERAGE_TEST_MARKER();
1493 					}
1494 				}
1495 				#endif /* configUSE_MUTEXES */
1496 
1497 				/* Check to see if other tasks are blocked waiting to give the
1498 				semaphore, and if so, unblock the highest priority such task. */
1499 				if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1500 				{
1501 					if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1502 					{
1503 						queueYIELD_IF_USING_PREEMPTION();
1504 					}
1505 					else
1506 					{
1507 						mtCOVERAGE_TEST_MARKER();
1508 					}
1509 				}
1510 				else
1511 				{
1512 					mtCOVERAGE_TEST_MARKER();
1513 				}
1514 
1515 				taskEXIT_CRITICAL( &pxQueue->mux);
1516 				return pdPASS;
1517 			}
1518 			else
1519 			{
1520 				if( xTicksToWait == ( TickType_t ) 0 )
1521 				{
1522 					/* For inheritance to have occurred there must have been an
1523 					initial timeout, and an adjusted timeout cannot become 0, as
1524 					if it were 0 the function would have exited. */
1525 					#if( configUSE_MUTEXES == 1 )
1526 					{
1527 						configASSERT( xInheritanceOccurred == pdFALSE );
1528 					}
1529 					#endif /* configUSE_MUTEXES */
1530 
1531 					/* The semaphore count was 0 and no block time is specified
1532 					(or the block time has expired) so exit now. */
1533 					taskEXIT_CRITICAL( &pxQueue->mux);
1534 					traceQUEUE_RECEIVE_FAILED( pxQueue );
1535 					return errQUEUE_EMPTY;
1536 				}
1537 				else if( xEntryTimeSet == pdFALSE )
1538 				{
1539 					/* The semaphore count was 0 and a block time was specified
1540 					so configure the timeout structure ready to block. */
1541 					vTaskInternalSetTimeOutState( &xTimeOut );
1542 					xEntryTimeSet = pdTRUE;
1543 				}
1544 				else
1545 				{
1546 					/* Entry time was already set. */
1547 					mtCOVERAGE_TEST_MARKER();
1548 				}
1549 			}
1550 		}
1551 		taskEXIT_CRITICAL( &pxQueue->mux);
1552 
1553 		/* Interrupts and other tasks can give to and take from the semaphore
1554 		now the critical section has been exited. */
1555 
1556 		taskENTER_CRITICAL( &pxQueue->mux);
1557 		prvLockQueue( pxQueue );
1558 
1559 		/* Update the timeout state to see if it has expired yet. */
1560 		if( xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE )
1561 		{
1562 			/* A block time is specified and not expired.  If the semaphore
1563 			count is 0 then enter the Blocked state to wait for a semaphore to
1564 			become available.  As semaphores are implemented with queues the
1565 			queue being empty is equivalent to the semaphore count being 0. */
1566 			if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1567 			{
1568 				traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue );
1569 
1570 				#if ( configUSE_MUTEXES == 1 )
1571 				{
1572 					if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
1573 					{
1574 						taskENTER_CRITICAL( &pxQueue->mux);
1575 						{
1576 							xInheritanceOccurred = xTaskPriorityInherit( pxQueue->u.xSemaphore.xMutexHolder );
1577 						}
1578 						taskEXIT_CRITICAL( &pxQueue->mux);
1579 					}
1580 					else
1581 					{
1582 						mtCOVERAGE_TEST_MARKER();
1583 					}
1584 				}
1585 				#endif
1586 
1587 				vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1588 				prvUnlockQueue( pxQueue );
1589 				taskEXIT_CRITICAL( &pxQueue->mux);
1590 				portYIELD_WITHIN_API();
1591 			}
1592 			else
1593 			{
1594 				/* There was no timeout and the semaphore count was not 0, so
1595 				attempt to take the semaphore again. */
1596 				prvUnlockQueue( pxQueue );
1597 				taskEXIT_CRITICAL( &pxQueue->mux);
1598 			}
1599 		}
1600 		else
1601 		{
1602 			/* Timed out. */
1603 			prvUnlockQueue( pxQueue );
1604 			taskEXIT_CRITICAL( &pxQueue->mux);
1605 
1606 			/* If the semaphore count is 0 exit now as the timeout has
1607 			expired.  Otherwise return to attempt to take the semaphore that is
1608 			known to be available.  As semaphores are implemented by queues the
1609 			queue being empty is equivalent to the semaphore count being 0. */
1610 			if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1611 			{
1612 				#if ( configUSE_MUTEXES == 1 )
1613 				{
1614 					/* xInheritanceOccurred could only have be set if
1615 					pxQueue->uxQueueType == queueQUEUE_IS_MUTEX so no need to
1616 					test the mutex type again to check it is actually a mutex. */
1617 					if( xInheritanceOccurred != pdFALSE )
1618 					{
1619 						taskENTER_CRITICAL( &pxQueue->mux);
1620 						{
1621 							UBaseType_t uxHighestWaitingPriority;
1622 
1623 							/* This task blocking on the mutex caused another
1624 							task to inherit this task's priority.  Now this task
1625 							has timed out the priority should be disinherited
1626 							again, but only as low as the next highest priority
1627 							task that is waiting for the same mutex. */
1628 							uxHighestWaitingPriority = prvGetDisinheritPriorityAfterTimeout( pxQueue );
1629 							vTaskPriorityDisinheritAfterTimeout( pxQueue->u.xSemaphore.xMutexHolder, uxHighestWaitingPriority );
1630 						}
1631 						taskEXIT_CRITICAL( &pxQueue->mux);
1632 					}
1633 				}
1634 				#endif /* configUSE_MUTEXES */
1635 
1636 				traceQUEUE_RECEIVE_FAILED( pxQueue );
1637 				return errQUEUE_EMPTY;
1638 			}
1639 			else
1640 			{
1641 				mtCOVERAGE_TEST_MARKER();
1642 			}
1643 		}
1644 	} /*lint -restore */
1645 }
1646 /*-----------------------------------------------------------*/
1647 
xQueuePeek(QueueHandle_t xQueue,void * const pvBuffer,TickType_t xTicksToWait)1648 BaseType_t xQueuePeek( QueueHandle_t xQueue, void * const pvBuffer, TickType_t xTicksToWait )
1649 {
1650 BaseType_t xEntryTimeSet = pdFALSE;
1651 TimeOut_t xTimeOut;
1652 int8_t *pcOriginalReadPosition;
1653 Queue_t * const pxQueue = xQueue;
1654 
1655 	/* Check the pointer is not NULL. */
1656 	configASSERT( ( pxQueue ) );
1657 
1658 	/* The buffer into which data is received can only be NULL if the data size
1659 	is zero (so no data is copied into the buffer. */
1660 	configASSERT( !( ( ( pvBuffer ) == NULL ) && ( ( pxQueue )->uxItemSize != ( UBaseType_t ) 0U ) ) );
1661 
1662 	/* Cannot block if the scheduler is suspended. */
1663 	#if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
1664 	{
1665 		configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
1666 	}
1667 	#endif
1668 
1669 
1670 	/*lint -save -e904  This function relaxes the coding standard somewhat to
1671 	allow return statements within the function itself.  This is done in the
1672 	interest of execution time efficiency. */
1673 	for( ;; )
1674 	{
1675 		taskENTER_CRITICAL( &pxQueue->mux);
1676 		{
1677 			const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1678 
1679 			/* Is there data in the queue now?  To be running the calling task
1680 			must be the highest priority task wanting to access the queue. */
1681 			if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1682 			{
1683 				/* Remember the read position so it can be reset after the data
1684 				is read from the queue as this function is only peeking the
1685 				data, not removing it. */
1686 				pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
1687 
1688 				prvCopyDataFromQueue( pxQueue, pvBuffer );
1689 				traceQUEUE_PEEK( pxQueue );
1690 
1691 				/* The data is not being removed, so reset the read pointer. */
1692 				pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
1693 
1694 				/* The data is being left in the queue, so see if there are
1695 				any other tasks waiting for the data. */
1696 				if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
1697 				{
1698 					if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
1699 					{
1700 						/* The task waiting has a higher priority than this task. */
1701 						queueYIELD_IF_USING_PREEMPTION();
1702 					}
1703 					else
1704 					{
1705 						mtCOVERAGE_TEST_MARKER();
1706 					}
1707 				}
1708 				else
1709 				{
1710 					mtCOVERAGE_TEST_MARKER();
1711 				}
1712 
1713 				taskEXIT_CRITICAL( &pxQueue->mux);
1714 				return pdPASS;
1715 			}
1716 			else
1717 			{
1718 				if( xTicksToWait == ( TickType_t ) 0 )
1719 				{
1720 					/* The queue was empty and no block time is specified (or
1721 					the block time has expired) so leave now. */
1722 					taskEXIT_CRITICAL( &pxQueue->mux);
1723 					traceQUEUE_PEEK_FAILED( pxQueue );
1724 					return errQUEUE_EMPTY;
1725 				}
1726 				else if( xEntryTimeSet == pdFALSE )
1727 				{
1728 					/* The queue was empty and a block time was specified so
1729 					configure the timeout structure ready to enter the blocked
1730 					state. */
1731 					vTaskInternalSetTimeOutState( &xTimeOut );
1732 					xEntryTimeSet = pdTRUE;
1733 				}
1734 				else
1735 				{
1736 					/* Entry time was already set. */
1737 					mtCOVERAGE_TEST_MARKER();
1738 				}
1739 			}
1740 		}
1741 		taskEXIT_CRITICAL( &pxQueue->mux);
1742 
1743 		/* Interrupts and other tasks can send to and receive from the queue
1744 		now the critical section has been exited. */
1745 
1746 		taskENTER_CRITICAL( &pxQueue->mux);
1747 		prvLockQueue( pxQueue );
1748 
1749 		/* Update the timeout state to see if it has expired yet. */
1750 		if(xTaskCheckForTimeOut( &xTimeOut, &xTicksToWait ) == pdFALSE)
1751 		{
1752 			/* Timeout has not expired yet, check to see if there is data in the
1753 			queue now, and if not enter the Blocked state to wait for data. */
1754 			if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1755 			{
1756 				traceBLOCKING_ON_QUEUE_PEEK( pxQueue );
1757 				vTaskPlaceOnEventList( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait );
1758 				prvUnlockQueue( pxQueue );
1759 				taskEXIT_CRITICAL( &pxQueue->mux);
1760 				portYIELD_WITHIN_API();
1761 			}
1762 			else
1763 			{
1764 				/* There is data in the queue now, so don't enter the blocked
1765 				state, instead return to try and obtain the data. */
1766 				prvUnlockQueue( pxQueue );
1767 				taskEXIT_CRITICAL( &pxQueue->mux);
1768 			}
1769 		}
1770 		else
1771 		{
1772 			/* The timeout has expired.  If there is still no data in the queue
1773 			exit, otherwise go back and try to read the data again. */
1774 			prvUnlockQueue( pxQueue );
1775 			taskEXIT_CRITICAL( &pxQueue->mux);
1776 
1777 			if( prvIsQueueEmpty( pxQueue ) != pdFALSE )
1778 			{
1779 				traceQUEUE_PEEK_FAILED( pxQueue );
1780 				return errQUEUE_EMPTY;
1781 			}
1782 			else
1783 			{
1784 				mtCOVERAGE_TEST_MARKER();
1785 			}
1786 		}
1787 	} /*lint -restore */
1788 }
1789 /*-----------------------------------------------------------*/
1790 
xQueueReceiveFromISR(QueueHandle_t xQueue,void * const pvBuffer,BaseType_t * const pxHigherPriorityTaskWoken)1791 BaseType_t xQueueReceiveFromISR( QueueHandle_t xQueue, void * const pvBuffer, BaseType_t * const pxHigherPriorityTaskWoken )
1792 {
1793 BaseType_t xReturn;
1794 UBaseType_t uxSavedInterruptStatus;
1795 Queue_t * const pxQueue = xQueue;
1796 
1797 	configASSERT( pxQueue );
1798 	configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1799 
1800 	/* RTOS ports that support interrupt nesting have the concept of a maximum
1801 	system call (or maximum API call) interrupt priority.  Interrupts that are
1802 	above the maximum system call priority are kept permanently enabled, even
1803 	when the RTOS kernel is in a critical section, but cannot make any calls to
1804 	FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
1805 	then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1806 	failure if a FreeRTOS API function is called from an interrupt that has been
1807 	assigned a priority above the configured maximum system call priority.
1808 	Only FreeRTOS functions that end in FromISR can be called from interrupts
1809 	that have been assigned a priority at or (logically) below the maximum
1810 	system call	interrupt priority.  FreeRTOS maintains a separate interrupt
1811 	safe API to ensure interrupt entry is as fast and as simple as possible.
1812 	More information (albeit Cortex-M specific) is provided on the following
1813 	link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1814 	portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1815 
1816 	uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1817 	{
1818 		taskENTER_CRITICAL_ISR(&pxQueue->mux);
1819 
1820 		const UBaseType_t uxMessagesWaiting = pxQueue->uxMessagesWaiting;
1821 
1822 		/* Cannot block in an ISR, so check there is data available. */
1823 		if( uxMessagesWaiting > ( UBaseType_t ) 0 )
1824 		{
1825 			const int8_t cRxLock = pxQueue->cRxLock;
1826 
1827 			traceQUEUE_RECEIVE_FROM_ISR( pxQueue );
1828 
1829 			prvCopyDataFromQueue( pxQueue, pvBuffer );
1830 			pxQueue->uxMessagesWaiting = uxMessagesWaiting - ( UBaseType_t ) 1;
1831 
1832 			/* If the queue is locked the event list will not be modified.
1833 			Instead update the lock count so the task that unlocks the queue
1834 			will know that an ISR has removed data while the queue was
1835 			locked. */
1836 			if( cRxLock == queueUNLOCKED )
1837 			{
1838 				if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
1839 				{
1840 					if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
1841 					{
1842 						/* The task waiting has a higher priority than us so
1843 						force a context switch. */
1844 						if( pxHigherPriorityTaskWoken != NULL )
1845 						{
1846 							*pxHigherPriorityTaskWoken = pdTRUE;
1847 						}
1848 						else
1849 						{
1850 							mtCOVERAGE_TEST_MARKER();
1851 						}
1852 					}
1853 					else
1854 					{
1855 						mtCOVERAGE_TEST_MARKER();
1856 					}
1857 				}
1858 				else
1859 				{
1860 					mtCOVERAGE_TEST_MARKER();
1861 				}
1862 			}
1863 			else
1864 			{
1865 				/* Increment the lock count so the task that unlocks the queue
1866 				knows that data was removed while it was locked. */
1867 				pxQueue->cRxLock = ( int8_t ) ( cRxLock + 1 );
1868 			}
1869 
1870 			xReturn = pdPASS;
1871 		}
1872 		else
1873 		{
1874 			xReturn = pdFAIL;
1875 			traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue );
1876 		}
1877 		taskEXIT_CRITICAL_ISR(&pxQueue->mux);
1878 	}
1879 	portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1880 
1881 	return xReturn;
1882 }
1883 /*-----------------------------------------------------------*/
1884 
xQueuePeekFromISR(QueueHandle_t xQueue,void * const pvBuffer)1885 BaseType_t xQueuePeekFromISR( QueueHandle_t xQueue,  void * const pvBuffer )
1886 {
1887 BaseType_t xReturn;
1888 UBaseType_t uxSavedInterruptStatus;
1889 int8_t *pcOriginalReadPosition;
1890 Queue_t * const pxQueue = xQueue;
1891 
1892 	configASSERT( pxQueue );
1893 	configASSERT( !( ( pvBuffer == NULL ) && ( pxQueue->uxItemSize != ( UBaseType_t ) 0U ) ) );
1894 	configASSERT( pxQueue->uxItemSize != 0 ); /* Can't peek a semaphore. */
1895 
1896 	/* RTOS ports that support interrupt nesting have the concept of a maximum
1897 	system call (or maximum API call) interrupt priority.  Interrupts that are
1898 	above the maximum system call priority are kept permanently enabled, even
1899 	when the RTOS kernel is in a critical section, but cannot make any calls to
1900 	FreeRTOS API functions.  If configASSERT() is defined in FreeRTOSConfig.h
1901 	then portASSERT_IF_INTERRUPT_PRIORITY_INVALID() will result in an assertion
1902 	failure if a FreeRTOS API function is called from an interrupt that has been
1903 	assigned a priority above the configured maximum system call priority.
1904 	Only FreeRTOS functions that end in FromISR can be called from interrupts
1905 	that have been assigned a priority at or (logically) below the maximum
1906 	system call	interrupt priority.  FreeRTOS maintains a separate interrupt
1907 	safe API to ensure interrupt entry is as fast and as simple as possible.
1908 	More information (albeit Cortex-M specific) is provided on the following
1909 	link: http://www.freertos.org/RTOS-Cortex-M3-M4.html */
1910 	portASSERT_IF_INTERRUPT_PRIORITY_INVALID();
1911 
1912 	uxSavedInterruptStatus = portSET_INTERRUPT_MASK_FROM_ISR();
1913 	taskENTER_CRITICAL_ISR(&pxQueue->mux);
1914 	{
1915 		/* Cannot block in an ISR, so check there is data available. */
1916 		if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
1917 		{
1918 			traceQUEUE_PEEK_FROM_ISR( pxQueue );
1919 
1920 			/* Remember the read position so it can be reset as nothing is
1921 			actually being removed from the queue. */
1922 			pcOriginalReadPosition = pxQueue->u.xQueue.pcReadFrom;
1923 			prvCopyDataFromQueue( pxQueue, pvBuffer );
1924 			pxQueue->u.xQueue.pcReadFrom = pcOriginalReadPosition;
1925 
1926 			xReturn = pdPASS;
1927 		}
1928 		else
1929 		{
1930 			xReturn = pdFAIL;
1931 			traceQUEUE_PEEK_FROM_ISR_FAILED( pxQueue );
1932 		}
1933 	}
1934 	taskEXIT_CRITICAL_ISR(&pxQueue->mux);
1935 	portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus );
1936 
1937 	return xReturn;
1938 }
1939 /*-----------------------------------------------------------*/
1940 
uxQueueMessagesWaiting(const QueueHandle_t xQueue)1941 UBaseType_t uxQueueMessagesWaiting( const QueueHandle_t xQueue )
1942 {
1943 UBaseType_t uxReturn;
1944 Queue_t * const pxQueue = ( Queue_t * ) xQueue;
1945 
1946 	configASSERT( xQueue );
1947 
1948 	taskENTER_CRITICAL( &pxQueue->mux);
1949 	{
1950 		uxReturn = ( ( Queue_t * ) xQueue )->uxMessagesWaiting;
1951 	}
1952 	taskEXIT_CRITICAL( &pxQueue->mux);
1953 
1954 	return uxReturn;
1955 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
1956 /*-----------------------------------------------------------*/
1957 
uxQueueSpacesAvailable(const QueueHandle_t xQueue)1958 UBaseType_t uxQueueSpacesAvailable( const QueueHandle_t xQueue )
1959 {
1960 UBaseType_t uxReturn;
1961 Queue_t * const pxQueue = xQueue;
1962 
1963 	configASSERT( pxQueue );
1964 
1965 	taskENTER_CRITICAL( &pxQueue->mux);
1966 	{
1967 		uxReturn = pxQueue->uxLength - pxQueue->uxMessagesWaiting;
1968 	}
1969 	taskEXIT_CRITICAL( &pxQueue->mux);
1970 
1971 	return uxReturn;
1972 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
1973 /*-----------------------------------------------------------*/
1974 
uxQueueMessagesWaitingFromISR(const QueueHandle_t xQueue)1975 UBaseType_t uxQueueMessagesWaitingFromISR( const QueueHandle_t xQueue )
1976 {
1977 UBaseType_t uxReturn;
1978 Queue_t * const pxQueue = xQueue;
1979 
1980 	configASSERT( pxQueue );
1981 	uxReturn = pxQueue->uxMessagesWaiting;
1982 
1983 	return uxReturn;
1984 } /*lint !e818 Pointer cannot be declared const as xQueue is a typedef not pointer. */
1985 /*-----------------------------------------------------------*/
1986 
vQueueDelete(QueueHandle_t xQueue)1987 void vQueueDelete( QueueHandle_t xQueue )
1988 {
1989 Queue_t * const pxQueue = xQueue;
1990 
1991 	configASSERT( pxQueue );
1992 	traceQUEUE_DELETE( pxQueue );
1993 
1994 	#if ( configQUEUE_REGISTRY_SIZE > 0 )
1995 	{
1996 		vQueueUnregisterQueue( pxQueue );
1997 	}
1998 	#endif
1999 
2000 	#if( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
2001 	{
2002 		/* The queue can only have been allocated dynamically - free it
2003 		again. */
2004 		vPortFree( pxQueue );
2005 	}
2006 	#elif( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
2007 	{
2008 		/* The queue could have been allocated statically or dynamically, so
2009 		check before attempting to free the memory. */
2010 		if( pxQueue->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
2011 		{
2012 			vPortFree( pxQueue );
2013 		}
2014 		else
2015 		{
2016 			mtCOVERAGE_TEST_MARKER();
2017 		}
2018 	}
2019 	#else
2020 	{
2021 		/* The queue must have been statically allocated, so is not going to be
2022 		deleted.  Avoid compiler warnings about the unused parameter. */
2023 		( void ) pxQueue;
2024 	}
2025 	#endif /* configSUPPORT_DYNAMIC_ALLOCATION */
2026 }
2027 /*-----------------------------------------------------------*/
2028 
2029 #if ( configUSE_TRACE_FACILITY == 1 )
2030 
uxQueueGetQueueNumber(QueueHandle_t xQueue)2031 	UBaseType_t uxQueueGetQueueNumber( QueueHandle_t xQueue )
2032 	{
2033 		return ( ( Queue_t * ) xQueue )->uxQueueNumber;
2034 	}
2035 
2036 #endif /* configUSE_TRACE_FACILITY */
2037 /*-----------------------------------------------------------*/
2038 
2039 #if ( configUSE_TRACE_FACILITY == 1 )
2040 
vQueueSetQueueNumber(QueueHandle_t xQueue,UBaseType_t uxQueueNumber)2041 	void vQueueSetQueueNumber( QueueHandle_t xQueue, UBaseType_t uxQueueNumber )
2042 	{
2043 		( ( Queue_t * ) xQueue )->uxQueueNumber = uxQueueNumber;
2044 	}
2045 
2046 #endif /* configUSE_TRACE_FACILITY */
2047 /*-----------------------------------------------------------*/
2048 
2049 #if ( configUSE_TRACE_FACILITY == 1 )
2050 
ucQueueGetQueueType(QueueHandle_t xQueue)2051 	uint8_t ucQueueGetQueueType( QueueHandle_t xQueue )
2052 	{
2053 		return ( ( Queue_t * ) xQueue )->ucQueueType;
2054 	}
2055 
2056 #endif /* configUSE_TRACE_FACILITY */
2057 /*-----------------------------------------------------------*/
2058 
2059 #if( configUSE_MUTEXES == 1 )
2060 
prvGetDisinheritPriorityAfterTimeout(const Queue_t * const pxQueue)2061 	static UBaseType_t prvGetDisinheritPriorityAfterTimeout( const Queue_t * const pxQueue )
2062 	{
2063 	UBaseType_t uxHighestPriorityOfWaitingTasks;
2064 
2065 		/* If a task waiting for a mutex causes the mutex holder to inherit a
2066 		priority, but the waiting task times out, then the holder should
2067 		disinherit the priority - but only down to the highest priority of any
2068 		other tasks that are waiting for the same mutex.  For this purpose,
2069 		return the priority of the highest priority task that is waiting for the
2070 		mutex. */
2071 		if( listCURRENT_LIST_LENGTH( &( pxQueue->xTasksWaitingToReceive ) ) > 0U )
2072 		{
2073 			uxHighestPriorityOfWaitingTasks = ( UBaseType_t ) configMAX_PRIORITIES - ( UBaseType_t ) listGET_ITEM_VALUE_OF_HEAD_ENTRY( &( pxQueue->xTasksWaitingToReceive ) );
2074 		}
2075 		else
2076 		{
2077 			uxHighestPriorityOfWaitingTasks = tskIDLE_PRIORITY;
2078 		}
2079 
2080 		return uxHighestPriorityOfWaitingTasks;
2081 	}
2082 
2083 #endif /* configUSE_MUTEXES */
2084 /*-----------------------------------------------------------*/
2085 
prvCopyDataToQueue(Queue_t * const pxQueue,const void * pvItemToQueue,const BaseType_t xPosition)2086 static BaseType_t prvCopyDataToQueue( Queue_t * const pxQueue, const void *pvItemToQueue, const BaseType_t xPosition )
2087 {
2088 BaseType_t xReturn = pdFALSE;
2089 UBaseType_t uxMessagesWaiting;
2090 
2091 	/* This function is called from a critical section. */
2092 
2093 	uxMessagesWaiting = pxQueue->uxMessagesWaiting;
2094 
2095 	if( pxQueue->uxItemSize == ( UBaseType_t ) 0 )
2096 	{
2097 		#if ( configUSE_MUTEXES == 1 )
2098 		{
2099 			if( pxQueue->uxQueueType == queueQUEUE_IS_MUTEX )
2100 			{
2101 				/* The mutex is no longer being held. */
2102 				xReturn = xTaskPriorityDisinherit( pxQueue->u.xSemaphore.xMutexHolder );
2103 				pxQueue->u.xSemaphore.xMutexHolder = NULL;
2104 			}
2105 			else
2106 			{
2107 				mtCOVERAGE_TEST_MARKER();
2108 			}
2109 		}
2110 		#endif /* configUSE_MUTEXES */
2111 	}
2112 	else if( xPosition == queueSEND_TO_BACK )
2113 	{
2114 		( void ) memcpy( ( void * ) pxQueue->pcWriteTo, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports, plus previous logic ensures a null pointer can only be passed to memcpy() if the copy size is 0.  Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
2115 		pxQueue->pcWriteTo += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
2116 		if( pxQueue->pcWriteTo >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
2117 		{
2118 			pxQueue->pcWriteTo = pxQueue->pcHead;
2119 		}
2120 		else
2121 		{
2122 			mtCOVERAGE_TEST_MARKER();
2123 		}
2124 	}
2125 	else
2126 	{
2127 		( void ) memcpy( ( void * ) pxQueue->u.xQueue.pcReadFrom, pvItemToQueue, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e9087 !e418 MISRA exception as the casts are only redundant for some ports.  Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes.  Assert checks null pointer only used when length is 0. */
2128 		pxQueue->u.xQueue.pcReadFrom -= pxQueue->uxItemSize;
2129 		if( pxQueue->u.xQueue.pcReadFrom < pxQueue->pcHead ) /*lint !e946 MISRA exception justified as comparison of pointers is the cleanest solution. */
2130 		{
2131 			pxQueue->u.xQueue.pcReadFrom = ( pxQueue->u.xQueue.pcTail - pxQueue->uxItemSize );
2132 		}
2133 		else
2134 		{
2135 			mtCOVERAGE_TEST_MARKER();
2136 		}
2137 
2138 		if( xPosition == queueOVERWRITE )
2139 		{
2140 			if( uxMessagesWaiting > ( UBaseType_t ) 0 )
2141 			{
2142 				/* An item is not being added but overwritten, so subtract
2143 				one from the recorded number of items in the queue so when
2144 				one is added again below the number of recorded items remains
2145 				correct. */
2146 				--uxMessagesWaiting;
2147 			}
2148 			else
2149 			{
2150 				mtCOVERAGE_TEST_MARKER();
2151 			}
2152 		}
2153 		else
2154 		{
2155 			mtCOVERAGE_TEST_MARKER();
2156 		}
2157 	}
2158 
2159 	pxQueue->uxMessagesWaiting = uxMessagesWaiting + ( UBaseType_t ) 1;
2160 
2161 	return xReturn;
2162 }
2163 /*-----------------------------------------------------------*/
2164 
prvCopyDataFromQueue(Queue_t * const pxQueue,void * const pvBuffer)2165 static void prvCopyDataFromQueue( Queue_t * const pxQueue, void * const pvBuffer )
2166 {
2167 	if( pxQueue->uxItemSize != ( UBaseType_t ) 0 )
2168 	{
2169 		pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize; /*lint !e9016 Pointer arithmetic on char types ok, especially in this use case where it is the clearest way of conveying intent. */
2170 		if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail ) /*lint !e946 MISRA exception justified as use of the relational operator is the cleanest solutions. */
2171 		{
2172 			pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2173 		}
2174 		else
2175 		{
2176 			mtCOVERAGE_TEST_MARKER();
2177 		}
2178 		( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( size_t ) pxQueue->uxItemSize ); /*lint !e961 !e418 !e9087 MISRA exception as the casts are only redundant for some ports.  Also previous logic ensures a null pointer can only be passed to memcpy() when the count is 0.  Cast to void required by function signature and safe as no alignment requirement and copy length specified in bytes. */
2179 	}
2180 }
2181 /*-----------------------------------------------------------*/
2182 
prvUnlockQueue(Queue_t * const pxQueue)2183 static void prvUnlockQueue( Queue_t * const pxQueue )
2184 {
2185 	/* THIS FUNCTION MUST BE CALLED WITH THE SCHEDULER SUSPENDED. */
2186 
2187 	/* The lock counts contains the number of extra data items placed or
2188 	removed from the queue while the queue was locked.  When a queue is
2189 	locked items can be added or removed, but the event lists cannot be
2190 	updated. */
2191 	taskENTER_CRITICAL( &pxQueue->mux);
2192 	{
2193 		int8_t cTxLock = pxQueue->cTxLock;
2194 
2195 		/* See if data was added to the queue while it was locked. */
2196 		while( cTxLock > queueLOCKED_UNMODIFIED )
2197 		{
2198 			/* Data was posted while the queue was locked.  Are any tasks
2199 			blocked waiting for data to become available? */
2200 			#if ( configUSE_QUEUE_SETS == 1 )
2201 			{
2202 				if( pxQueue->pxQueueSetContainer != NULL )
2203 				{
2204 					if( prvNotifyQueueSetContainer( pxQueue, queueSEND_TO_BACK ) != pdFALSE )
2205 					{
2206 						/* The queue is a member of a queue set, and posting to
2207 						the queue set caused a higher priority task to unblock.
2208 						A context switch is required. */
2209 						vTaskMissedYield();
2210 					}
2211 					else
2212 					{
2213 						mtCOVERAGE_TEST_MARKER();
2214 					}
2215 				}
2216 				else
2217 				{
2218 					/* Tasks that are removed from the event list will get
2219 					added to the pending ready list as the scheduler is still
2220 					suspended. */
2221 					if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2222 					{
2223 						if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2224 						{
2225 							/* The task waiting has a higher priority so record that a
2226 							context	switch is required. */
2227 							vTaskMissedYield();
2228 						}
2229 						else
2230 						{
2231 							mtCOVERAGE_TEST_MARKER();
2232 						}
2233 					}
2234 					else
2235 					{
2236 						break;
2237 					}
2238 				}
2239 			}
2240 			#else /* configUSE_QUEUE_SETS */
2241 			{
2242 				/* Tasks that are removed from the event list will get added to
2243 				the pending ready list as the scheduler is still suspended. */
2244 				if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2245 				{
2246 					if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2247 					{
2248 						/* The task waiting has a higher priority so record that
2249 						a context switch is required. */
2250 						vTaskMissedYield();
2251 					}
2252 					else
2253 					{
2254 						mtCOVERAGE_TEST_MARKER();
2255 					}
2256 				}
2257 				else
2258 				{
2259 					break;
2260 				}
2261 			}
2262 			#endif /* configUSE_QUEUE_SETS */
2263 
2264 			--cTxLock;
2265 		}
2266 
2267 		pxQueue->cTxLock = queueUNLOCKED;
2268 	}
2269 	taskEXIT_CRITICAL( &pxQueue->mux);
2270 
2271 	/* Do the same for the Rx lock. */
2272 	taskENTER_CRITICAL( &pxQueue->mux);
2273 	{
2274 		int8_t cRxLock = pxQueue->cRxLock;
2275 
2276 		while( cRxLock > queueLOCKED_UNMODIFIED )
2277 		{
2278 			if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2279 			{
2280 				if( xTaskRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2281 				{
2282 					vTaskMissedYield();
2283 				}
2284 				else
2285 				{
2286 					mtCOVERAGE_TEST_MARKER();
2287 				}
2288 
2289 				--cRxLock;
2290 			}
2291 			else
2292 			{
2293 				break;
2294 			}
2295 		}
2296 
2297 		pxQueue->cRxLock = queueUNLOCKED;
2298 	}
2299 	taskEXIT_CRITICAL( &pxQueue->mux);
2300 }
2301 /*-----------------------------------------------------------*/
2302 
prvIsQueueEmpty(const Queue_t * pxQueue)2303 static BaseType_t prvIsQueueEmpty( const Queue_t *pxQueue )
2304 {
2305 BaseType_t xReturn;
2306 Queue_t *pxQ = (Queue_t *)pxQueue;
2307 	taskENTER_CRITICAL( &pxQ->mux );
2308 	{
2309 		if( pxQueue->uxMessagesWaiting == ( UBaseType_t )  0 )
2310 		{
2311 			xReturn = pdTRUE;
2312 		}
2313 		else
2314 		{
2315 			xReturn = pdFALSE;
2316 		}
2317 	}
2318 	taskEXIT_CRITICAL( &pxQ->mux );
2319 
2320 	return xReturn;
2321 }
2322 /*-----------------------------------------------------------*/
2323 
xQueueIsQueueEmptyFromISR(const QueueHandle_t xQueue)2324 BaseType_t xQueueIsQueueEmptyFromISR( const QueueHandle_t xQueue )
2325 {
2326 BaseType_t xReturn;
2327 Queue_t * const pxQueue = xQueue;
2328 
2329 	configASSERT( pxQueue );
2330 	if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2331 	{
2332 		xReturn = pdTRUE;
2333 	}
2334 	else
2335 	{
2336 		xReturn = pdFALSE;
2337 	}
2338 
2339 	return xReturn;
2340 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2341 /*-----------------------------------------------------------*/
2342 
prvIsQueueFull(const Queue_t * pxQueue)2343 static BaseType_t prvIsQueueFull( const Queue_t *pxQueue )
2344 {
2345 BaseType_t xReturn;
2346 
2347 	{
2348 		if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
2349 		{
2350 			xReturn = pdTRUE;
2351 		}
2352 		else
2353 		{
2354 			xReturn = pdFALSE;
2355 		}
2356 	}
2357 
2358 	return xReturn;
2359 }
2360 /*-----------------------------------------------------------*/
2361 
xQueueIsQueueFullFromISR(const QueueHandle_t xQueue)2362 BaseType_t xQueueIsQueueFullFromISR( const QueueHandle_t xQueue )
2363 {
2364 BaseType_t xReturn;
2365 Queue_t * const pxQueue = xQueue;
2366 
2367 	configASSERT( pxQueue );
2368 	if( pxQueue->uxMessagesWaiting == pxQueue->uxLength )
2369 	{
2370 		xReturn = pdTRUE;
2371 	}
2372 	else
2373 	{
2374 		xReturn = pdFALSE;
2375 	}
2376 
2377 	return xReturn;
2378 } /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2379 /*-----------------------------------------------------------*/
2380 
2381 #if ( configUSE_CO_ROUTINES == 1 )
2382 
xQueueCRSend(QueueHandle_t xQueue,const void * pvItemToQueue,TickType_t xTicksToWait)2383 	BaseType_t xQueueCRSend( QueueHandle_t xQueue, const void *pvItemToQueue, TickType_t xTicksToWait )
2384 	{
2385 	BaseType_t xReturn;
2386 	Queue_t * const pxQueue = xQueue;
2387 
2388 		/* If the queue is already full we may have to block.  A critical section
2389 		is required to prevent an interrupt removing something from the queue
2390 		between the check to see if the queue is full and blocking on the queue. */
2391 		portDISABLE_INTERRUPTS();
2392 		{
2393 			if( prvIsQueueFull( pxQueue ) != pdFALSE )
2394 			{
2395 				/* The queue is full - do we want to block or just leave without
2396 				posting? */
2397 				if( xTicksToWait > ( TickType_t ) 0 )
2398 				{
2399 					/* As this is called from a coroutine we cannot block directly, but
2400 					return indicating that we need to block. */
2401 					vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToSend ) );
2402 					portENABLE_INTERRUPTS();
2403 					return errQUEUE_BLOCKED;
2404 				}
2405 				else
2406 				{
2407 					portENABLE_INTERRUPTS();
2408 					return errQUEUE_FULL;
2409 				}
2410 			}
2411 		}
2412 		portENABLE_INTERRUPTS();
2413 
2414 		portDISABLE_INTERRUPTS();
2415 		{
2416 			if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2417 			{
2418 				/* There is room in the queue, copy the data into the queue. */
2419 				prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2420 				xReturn = pdPASS;
2421 
2422 				/* Were any co-routines waiting for data to become available? */
2423 				if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2424 				{
2425 					/* In this instance the co-routine could be placed directly
2426 					into the ready list as we are within a critical section.
2427 					Instead the same pending ready list mechanism is used as if
2428 					the event were caused from within an interrupt. */
2429 					if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2430 					{
2431 						/* The co-routine waiting has a higher priority so record
2432 						that a yield might be appropriate. */
2433 						xReturn = errQUEUE_YIELD;
2434 					}
2435 					else
2436 					{
2437 						mtCOVERAGE_TEST_MARKER();
2438 					}
2439 				}
2440 				else
2441 				{
2442 					mtCOVERAGE_TEST_MARKER();
2443 				}
2444 			}
2445 			else
2446 			{
2447 				xReturn = errQUEUE_FULL;
2448 			}
2449 		}
2450 		portENABLE_INTERRUPTS();
2451 
2452 		return xReturn;
2453 	}
2454 
2455 #endif /* configUSE_CO_ROUTINES */
2456 /*-----------------------------------------------------------*/
2457 
2458 #if ( configUSE_CO_ROUTINES == 1 )
2459 
xQueueCRReceive(QueueHandle_t xQueue,void * pvBuffer,TickType_t xTicksToWait)2460 	BaseType_t xQueueCRReceive( QueueHandle_t xQueue, void *pvBuffer, TickType_t xTicksToWait )
2461 	{
2462 	BaseType_t xReturn;
2463 	Queue_t * const pxQueue = xQueue;
2464 
2465 		/* If the queue is already empty we may have to block.  A critical section
2466 		is required to prevent an interrupt adding something to the queue
2467 		between the check to see if the queue is empty and blocking on the queue. */
2468 		portDISABLE_INTERRUPTS();
2469 		{
2470 			if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0 )
2471 			{
2472 				/* There are no messages in the queue, do we want to block or just
2473 				leave with nothing? */
2474 				if( xTicksToWait > ( TickType_t ) 0 )
2475 				{
2476 					/* As this is a co-routine we cannot block directly, but return
2477 					indicating that we need to block. */
2478 					vCoRoutineAddToDelayedList( xTicksToWait, &( pxQueue->xTasksWaitingToReceive ) );
2479 					portENABLE_INTERRUPTS();
2480 					return errQUEUE_BLOCKED;
2481 				}
2482 				else
2483 				{
2484 					portENABLE_INTERRUPTS();
2485 					return errQUEUE_FULL;
2486 				}
2487 			}
2488 			else
2489 			{
2490 				mtCOVERAGE_TEST_MARKER();
2491 			}
2492 		}
2493 		portENABLE_INTERRUPTS();
2494 
2495 		portDISABLE_INTERRUPTS();
2496 		{
2497 			if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2498 			{
2499 				/* Data is available from the queue. */
2500 				pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
2501 				if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )
2502 				{
2503 					pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2504 				}
2505 				else
2506 				{
2507 					mtCOVERAGE_TEST_MARKER();
2508 				}
2509 				--( pxQueue->uxMessagesWaiting );
2510 				( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2511 
2512 				xReturn = pdPASS;
2513 
2514 				/* Were any co-routines waiting for space to become available? */
2515 				if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2516 				{
2517 					/* In this instance the co-routine could be placed directly
2518 					into the ready list as we are within a critical section.
2519 					Instead the same pending ready list mechanism is used as if
2520 					the event were caused from within an interrupt. */
2521 					if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2522 					{
2523 						xReturn = errQUEUE_YIELD;
2524 					}
2525 					else
2526 					{
2527 						mtCOVERAGE_TEST_MARKER();
2528 					}
2529 				}
2530 				else
2531 				{
2532 					mtCOVERAGE_TEST_MARKER();
2533 				}
2534 			}
2535 			else
2536 			{
2537 				xReturn = pdFAIL;
2538 			}
2539 		}
2540 		portENABLE_INTERRUPTS();
2541 
2542 		return xReturn;
2543 	}
2544 
2545 #endif /* configUSE_CO_ROUTINES */
2546 /*-----------------------------------------------------------*/
2547 
2548 #if ( configUSE_CO_ROUTINES == 1 )
2549 
xQueueCRSendFromISR(QueueHandle_t xQueue,const void * pvItemToQueue,BaseType_t xCoRoutinePreviouslyWoken)2550 	BaseType_t xQueueCRSendFromISR( QueueHandle_t xQueue, const void *pvItemToQueue, BaseType_t xCoRoutinePreviouslyWoken )
2551 	{
2552 	Queue_t * const pxQueue = xQueue;
2553 
2554 		/* Cannot block within an ISR so if there is no space on the queue then
2555 		exit without doing anything. */
2556 		if( pxQueue->uxMessagesWaiting < pxQueue->uxLength )
2557 		{
2558 			prvCopyDataToQueue( pxQueue, pvItemToQueue, queueSEND_TO_BACK );
2559 
2560 			/* We only want to wake one co-routine per ISR, so check that a
2561 			co-routine has not already been woken. */
2562 			if( xCoRoutinePreviouslyWoken == pdFALSE )
2563 			{
2564 				if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToReceive ) ) == pdFALSE )
2565 				{
2566 					if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToReceive ) ) != pdFALSE )
2567 					{
2568 						return pdTRUE;
2569 					}
2570 					else
2571 					{
2572 						mtCOVERAGE_TEST_MARKER();
2573 					}
2574 				}
2575 				else
2576 				{
2577 					mtCOVERAGE_TEST_MARKER();
2578 				}
2579 			}
2580 			else
2581 			{
2582 				mtCOVERAGE_TEST_MARKER();
2583 			}
2584 		}
2585 		else
2586 		{
2587 			mtCOVERAGE_TEST_MARKER();
2588 		}
2589 
2590 		return xCoRoutinePreviouslyWoken;
2591 	}
2592 
2593 #endif /* configUSE_CO_ROUTINES */
2594 /*-----------------------------------------------------------*/
2595 
2596 #if ( configUSE_CO_ROUTINES == 1 )
2597 
xQueueCRReceiveFromISR(QueueHandle_t xQueue,void * pvBuffer,BaseType_t * pxCoRoutineWoken)2598 	BaseType_t xQueueCRReceiveFromISR( QueueHandle_t xQueue, void *pvBuffer, BaseType_t *pxCoRoutineWoken )
2599 	{
2600 	BaseType_t xReturn;
2601 	Queue_t * const pxQueue = xQueue;
2602 
2603 		/* We cannot block from an ISR, so check there is data available. If
2604 		not then just leave without doing anything. */
2605 		if( pxQueue->uxMessagesWaiting > ( UBaseType_t ) 0 )
2606 		{
2607 			/* Copy the data from the queue. */
2608 			pxQueue->u.xQueue.pcReadFrom += pxQueue->uxItemSize;
2609 			if( pxQueue->u.xQueue.pcReadFrom >= pxQueue->u.xQueue.pcTail )
2610 			{
2611 				pxQueue->u.xQueue.pcReadFrom = pxQueue->pcHead;
2612 			}
2613 			else
2614 			{
2615 				mtCOVERAGE_TEST_MARKER();
2616 			}
2617 			--( pxQueue->uxMessagesWaiting );
2618 			( void ) memcpy( ( void * ) pvBuffer, ( void * ) pxQueue->u.xQueue.pcReadFrom, ( unsigned ) pxQueue->uxItemSize );
2619 
2620 			if( ( *pxCoRoutineWoken ) == pdFALSE )
2621 			{
2622 				if( listLIST_IS_EMPTY( &( pxQueue->xTasksWaitingToSend ) ) == pdFALSE )
2623 				{
2624 					if( xCoRoutineRemoveFromEventList( &( pxQueue->xTasksWaitingToSend ) ) != pdFALSE )
2625 					{
2626 						*pxCoRoutineWoken = pdTRUE;
2627 					}
2628 					else
2629 					{
2630 						mtCOVERAGE_TEST_MARKER();
2631 					}
2632 				}
2633 				else
2634 				{
2635 					mtCOVERAGE_TEST_MARKER();
2636 				}
2637 			}
2638 			else
2639 			{
2640 				mtCOVERAGE_TEST_MARKER();
2641 			}
2642 
2643 			xReturn = pdPASS;
2644 		}
2645 		else
2646 		{
2647 			xReturn = pdFAIL;
2648 		}
2649 
2650 		return xReturn;
2651 	}
2652 
2653 #endif /* configUSE_CO_ROUTINES */
2654 /*-----------------------------------------------------------*/
2655 
2656 #if ( configQUEUE_REGISTRY_SIZE > 0 )
2657 
vQueueAddToRegistry(QueueHandle_t xQueue,const char * pcQueueName)2658 	void vQueueAddToRegistry( QueueHandle_t xQueue, const char *pcQueueName ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2659 	{
2660 	UBaseType_t ux;
2661 
2662 		portENTER_CRITICAL(&queue_registry_spinlock);
2663 		/* See if there is an empty space in the registry.  A NULL name denotes
2664 		a free slot. */
2665 		for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
2666 		{
2667 			if( xQueueRegistry[ ux ].pcQueueName == NULL )
2668 			{
2669 				/* Store the information on this queue. */
2670 				xQueueRegistry[ ux ].pcQueueName = pcQueueName;
2671 				xQueueRegistry[ ux ].xHandle = xQueue;
2672 
2673 				traceQUEUE_REGISTRY_ADD( xQueue, pcQueueName );
2674 				break;
2675 			}
2676 			else
2677 			{
2678 				mtCOVERAGE_TEST_MARKER();
2679 			}
2680 		}
2681 		portEXIT_CRITICAL(&queue_registry_spinlock);
2682 	}
2683 
2684 #endif /* configQUEUE_REGISTRY_SIZE */
2685 /*-----------------------------------------------------------*/
2686 
2687 #if ( configQUEUE_REGISTRY_SIZE > 0 )
2688 
pcQueueGetName(QueueHandle_t xQueue)2689 	const char *pcQueueGetName( QueueHandle_t xQueue ) /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2690 	{
2691 	UBaseType_t ux;
2692 	const char *pcReturn = NULL; /*lint !e971 Unqualified char types are allowed for strings and single characters only. */
2693 
2694 		portENTER_CRITICAL(&queue_registry_spinlock);
2695 		/* Note there is nothing here to protect against another task adding or
2696 		removing entries from the registry while it is being searched. */
2697 		for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
2698 		{
2699 			if( xQueueRegistry[ ux ].xHandle == xQueue )
2700 			{
2701 				pcReturn = xQueueRegistry[ ux ].pcQueueName;
2702 				break;
2703 			}
2704 			else
2705 			{
2706 				mtCOVERAGE_TEST_MARKER();
2707 			}
2708 		}
2709 		portEXIT_CRITICAL(&queue_registry_spinlock);
2710 
2711 		return pcReturn;
2712 	} /*lint !e818 xQueue cannot be a pointer to const because it is a typedef. */
2713 
2714 #endif /* configQUEUE_REGISTRY_SIZE */
2715 /*-----------------------------------------------------------*/
2716 
2717 #if ( configQUEUE_REGISTRY_SIZE > 0 )
2718 
vQueueUnregisterQueue(QueueHandle_t xQueue)2719 	void vQueueUnregisterQueue( QueueHandle_t xQueue )
2720 	{
2721 	UBaseType_t ux;
2722 
2723 		portENTER_CRITICAL(&queue_registry_spinlock);
2724 		/* See if the handle of the queue being unregistered in actually in the
2725 		registry. */
2726 		for( ux = ( UBaseType_t ) 0U; ux < ( UBaseType_t ) configQUEUE_REGISTRY_SIZE; ux++ )
2727 		{
2728 			if( xQueueRegistry[ ux ].xHandle == xQueue )
2729 			{
2730 				/* Set the name to NULL to show that this slot if free again. */
2731 				xQueueRegistry[ ux ].pcQueueName = NULL;
2732 
2733 				/* Set the handle to NULL to ensure the same queue handle cannot
2734 				appear in the registry twice if it is added, removed, then
2735 				added again. */
2736 				xQueueRegistry[ ux ].xHandle = ( QueueHandle_t ) 0;
2737 				break;
2738 			}
2739 			else
2740 			{
2741 				mtCOVERAGE_TEST_MARKER();
2742 			}
2743 		}
2744 		portEXIT_CRITICAL(&queue_registry_spinlock);
2745 
2746 	} /*lint !e818 xQueue could not be pointer to const because it is a typedef. */
2747 
2748 #endif /* configQUEUE_REGISTRY_SIZE */
2749 /*-----------------------------------------------------------*/
2750 
2751 #if ( configUSE_TIMERS == 1 )
2752 
vQueueWaitForMessageRestricted(QueueHandle_t xQueue,TickType_t xTicksToWait,const BaseType_t xWaitIndefinitely)2753 	void vQueueWaitForMessageRestricted( QueueHandle_t xQueue, TickType_t xTicksToWait, const BaseType_t xWaitIndefinitely )
2754 	{
2755 	Queue_t * const pxQueue = xQueue;
2756 
2757 		/* This function should not be called by application code hence the
2758 		'Restricted' in its name.  It is not part of the public API.  It is
2759 		designed for use by kernel code, and has special calling requirements.
2760 		It can result in vListInsert() being called on a list that can only
2761 		possibly ever have one item in it, so the list will be fast, but even
2762 		so it should be called with the scheduler locked and not from a critical
2763 		section. */
2764 
2765 		/* Only do anything if there are no messages in the queue.  This function
2766 		will not actually cause the task to block, just place it on a blocked
2767 		list.  It will not block until the scheduler is unlocked - at which
2768 		time a yield will be performed.  If an item is added to the queue while
2769 		the queue is locked, and the calling task blocks on the queue, then the
2770 		calling task will be immediately unblocked when the queue is unlocked. */
2771 		prvLockQueue( pxQueue );
2772 		if( pxQueue->uxMessagesWaiting == ( UBaseType_t ) 0U )
2773 		{
2774 			/* There is nothing in the queue, block for the specified period. */
2775 			vTaskPlaceOnEventListRestricted( &( pxQueue->xTasksWaitingToReceive ), xTicksToWait, xWaitIndefinitely );
2776 		}
2777 		else
2778 		{
2779 			mtCOVERAGE_TEST_MARKER();
2780 		}
2781 		prvUnlockQueue( pxQueue );
2782 	}
2783 
2784 #endif /* configUSE_TIMERS */
2785 /*-----------------------------------------------------------*/
2786 
2787 #if( ( configUSE_QUEUE_SETS == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
2788 
xQueueCreateSet(const UBaseType_t uxEventQueueLength)2789 	QueueSetHandle_t xQueueCreateSet( const UBaseType_t uxEventQueueLength )
2790 	{
2791 	QueueSetHandle_t pxQueue;
2792 
2793 		pxQueue = xQueueGenericCreate( uxEventQueueLength, ( UBaseType_t ) sizeof( Queue_t * ), queueQUEUE_TYPE_SET );
2794 
2795 		return pxQueue;
2796 	}
2797 
2798 #endif /* configUSE_QUEUE_SETS */
2799 /*-----------------------------------------------------------*/
2800 
2801 #if ( configUSE_QUEUE_SETS == 1 )
2802 
xQueueAddToSet(QueueSetMemberHandle_t xQueueOrSemaphore,QueueSetHandle_t xQueueSet)2803 	BaseType_t xQueueAddToSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
2804 	{
2805 	BaseType_t xReturn;
2806 
2807 		taskENTER_CRITICAL(&(((Queue_t * )xQueueOrSemaphore)->mux));
2808 		{
2809 			if( ( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer != NULL )
2810 			{
2811 				/* Cannot add a queue/semaphore to more than one queue set. */
2812 				xReturn = pdFAIL;
2813 			}
2814 			else if( ( ( Queue_t * ) xQueueOrSemaphore )->uxMessagesWaiting != ( UBaseType_t ) 0 )
2815 			{
2816 				/* Cannot add a queue/semaphore to a queue set if there are already
2817 				items in the queue/semaphore. */
2818 				xReturn = pdFAIL;
2819 			}
2820 			else
2821 			{
2822 				( ( Queue_t * ) xQueueOrSemaphore )->pxQueueSetContainer = xQueueSet;
2823 				xReturn = pdPASS;
2824 			}
2825 		}
2826 		taskEXIT_CRITICAL(&(((Queue_t * )xQueueOrSemaphore)->mux));
2827 
2828 		return xReturn;
2829 	}
2830 
2831 #endif /* configUSE_QUEUE_SETS */
2832 /*-----------------------------------------------------------*/
2833 
2834 #if ( configUSE_QUEUE_SETS == 1 )
2835 
xQueueRemoveFromSet(QueueSetMemberHandle_t xQueueOrSemaphore,QueueSetHandle_t xQueueSet)2836 	BaseType_t xQueueRemoveFromSet( QueueSetMemberHandle_t xQueueOrSemaphore, QueueSetHandle_t xQueueSet )
2837 	{
2838 	BaseType_t xReturn;
2839 	Queue_t * const pxQueueOrSemaphore = ( Queue_t * ) xQueueOrSemaphore;
2840 
2841 		if( pxQueueOrSemaphore->pxQueueSetContainer != xQueueSet )
2842 		{
2843 			/* The queue was not a member of the set. */
2844 			xReturn = pdFAIL;
2845 		}
2846 		else if( pxQueueOrSemaphore->uxMessagesWaiting != ( UBaseType_t ) 0 )
2847 		{
2848 			/* It is dangerous to remove a queue from a set when the queue is
2849 			not empty because the queue set will still hold pending events for
2850 			the queue. */
2851 			xReturn = pdFAIL;
2852 		}
2853 		else
2854 		{
2855 			taskENTER_CRITICAL(&(pxQueueOrSemaphore->mux));
2856 			{
2857 				/* The queue is no longer contained in the set. */
2858 				pxQueueOrSemaphore->pxQueueSetContainer = NULL;
2859 			}
2860 			taskEXIT_CRITICAL(&(pxQueueOrSemaphore->mux));
2861 			xReturn = pdPASS;
2862 		}
2863 
2864 		return xReturn;
2865 	} /*lint !e818 xQueueSet could not be declared as pointing to const as it is a typedef. */
2866 
2867 #endif /* configUSE_QUEUE_SETS */
2868 /*-----------------------------------------------------------*/
2869 
2870 #if ( configUSE_QUEUE_SETS == 1 )
2871 
xQueueSelectFromSet(QueueSetHandle_t xQueueSet,TickType_t const xTicksToWait)2872 	QueueSetMemberHandle_t xQueueSelectFromSet( QueueSetHandle_t xQueueSet, TickType_t const xTicksToWait )
2873 	{
2874 	QueueSetMemberHandle_t xReturn = NULL;
2875 
2876 		( void ) xQueueReceive( ( QueueHandle_t ) xQueueSet, &xReturn, xTicksToWait ); /*lint !e961 Casting from one typedef to another is not redundant. */
2877 		return xReturn;
2878 	}
2879 
2880 #endif /* configUSE_QUEUE_SETS */
2881 /*-----------------------------------------------------------*/
2882 
2883 #if ( configUSE_QUEUE_SETS == 1 )
2884 
xQueueSelectFromSetFromISR(QueueSetHandle_t xQueueSet)2885 	QueueSetMemberHandle_t xQueueSelectFromSetFromISR( QueueSetHandle_t xQueueSet )
2886 	{
2887 	QueueSetMemberHandle_t xReturn = NULL;
2888 
2889 		( void ) xQueueReceiveFromISR( ( QueueHandle_t ) xQueueSet, &xReturn, NULL ); /*lint !e961 Casting from one typedef to another is not redundant. */
2890 		return xReturn;
2891 	}
2892 
2893 #endif /* configUSE_QUEUE_SETS */
2894 /*-----------------------------------------------------------*/
2895 
2896 #if ( configUSE_QUEUE_SETS == 1 )
2897 
prvNotifyQueueSetContainer(const Queue_t * const pxQueue,const BaseType_t xCopyPosition)2898 	static BaseType_t prvNotifyQueueSetContainer( const Queue_t * const pxQueue, const BaseType_t xCopyPosition )
2899 	{
2900 	Queue_t *pxQueueSetContainer = pxQueue->pxQueueSetContainer;
2901 	BaseType_t xReturn = pdFALSE;
2902 
2903 		/* This function must be called form a critical section. */
2904 
2905 		configASSERT( pxQueueSetContainer );
2906 
2907 		//Acquire the Queue set's spinlock
2908 		portENTER_CRITICAL(&(pxQueueSetContainer->mux));
2909 
2910 		configASSERT( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength );
2911 
2912 		if( pxQueueSetContainer->uxMessagesWaiting < pxQueueSetContainer->uxLength )
2913 		{
2914 			const int8_t cTxLock = pxQueueSetContainer->cTxLock;
2915 
2916 			traceQUEUE_SEND( pxQueueSetContainer );
2917 
2918 			/* The data copied is the handle of the queue that contains data. */
2919 			xReturn = prvCopyDataToQueue( pxQueueSetContainer, &pxQueue, xCopyPosition );
2920 
2921 			if( cTxLock == queueUNLOCKED )
2922 			{
2923 				if( listLIST_IS_EMPTY( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) == pdFALSE )
2924 				{
2925 					if( xTaskRemoveFromEventList( &( pxQueueSetContainer->xTasksWaitingToReceive ) ) != pdFALSE )
2926 					{
2927 						/* The task waiting has a higher priority. */
2928 						xReturn = pdTRUE;
2929 					}
2930 					else
2931 					{
2932 						mtCOVERAGE_TEST_MARKER();
2933 					}
2934 				}
2935 				else
2936 				{
2937 					mtCOVERAGE_TEST_MARKER();
2938 				}
2939 			}
2940 			else
2941 			{
2942 				pxQueueSetContainer->cTxLock = ( int8_t ) ( cTxLock + 1 );
2943 			}
2944 		}
2945 		else
2946 		{
2947 			mtCOVERAGE_TEST_MARKER();
2948 		}
2949 
2950 		//Release the Queue set's spinlock
2951 		portEXIT_CRITICAL(&(pxQueueSetContainer->mux));
2952 
2953 		return xReturn;
2954 	}
2955 
2956 #endif /* configUSE_QUEUE_SETS */
2957