1 /*
2 * FreeRTOS Kernel V11.0.1
3 * Copyright (C) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 *
5 * SPDX-License-Identifier: MIT
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy of
8 * this software and associated documentation files (the "Software"), to deal in
9 * the Software without restriction, including without limitation the rights to
10 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
11 * the Software, and to permit persons to whom the Software is furnished to do so,
12 * subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in all
15 * copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
19 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
20 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
21 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
22 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * https://www.FreeRTOS.org
25 * https://github.com/FreeRTOS
26 *
27 */
28
29 /* Standard includes. */
30 #include <stdlib.h>
31
32 /* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
33 * all the API functions to use the MPU wrappers. That should only be done when
34 * task.h is included from an application file. */
35 #define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
36
37 /* FreeRTOS includes. */
38 #include "FreeRTOS.h"
39 #include "task.h"
40 #include "timers.h"
41 #include "event_groups.h"
42
43 /* The MPU ports require MPU_WRAPPERS_INCLUDED_FROM_API_FILE to be defined
44 * for the header files above, but not in this file, in order to generate the
45 * correct privileged Vs unprivileged linkage and placement. */
46 #undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
47
48 typedef struct EventGroupDef_t
49 {
50 EventBits_t uxEventBits;
51 List_t xTasksWaitingForBits; /**< List of tasks waiting for a bit to be set. */
52
53 #if ( configUSE_TRACE_FACILITY == 1 )
54 UBaseType_t uxEventGroupNumber;
55 #endif
56
57 #if ( ( configSUPPORT_STATIC_ALLOCATION == 1 ) && ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) )
58 uint8_t ucStaticallyAllocated; /**< Set to pdTRUE if the event group is statically allocated to ensure no attempt is made to free the memory. */
59 #endif
60 } EventGroup_t;
61
62 /*-----------------------------------------------------------*/
63
64 /*
65 * Test the bits set in uxCurrentEventBits to see if the wait condition is met.
66 * The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is
67 * pdTRUE then the wait condition is met if all the bits set in uxBitsToWaitFor
68 * are also set in uxCurrentEventBits. If xWaitForAllBits is pdFALSE then the
69 * wait condition is met if any of the bits set in uxBitsToWait for are also set
70 * in uxCurrentEventBits.
71 */
72 static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
73 const EventBits_t uxBitsToWaitFor,
74 const BaseType_t xWaitForAllBits ) PRIVILEGED_FUNCTION;
75
76 /*-----------------------------------------------------------*/
77
78 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
79
xEventGroupCreateStatic(StaticEventGroup_t * pxEventGroupBuffer)80 EventGroupHandle_t xEventGroupCreateStatic( StaticEventGroup_t * pxEventGroupBuffer )
81 {
82 EventGroup_t * pxEventBits;
83
84 traceENTER_xEventGroupCreateStatic( pxEventGroupBuffer );
85
86 /* A StaticEventGroup_t object must be provided. */
87 configASSERT( pxEventGroupBuffer );
88
89 #if ( configASSERT_DEFINED == 1 )
90 {
91 /* Sanity check that the size of the structure used to declare a
92 * variable of type StaticEventGroup_t equals the size of the real
93 * event group structure. */
94 volatile size_t xSize = sizeof( StaticEventGroup_t );
95 configASSERT( xSize == sizeof( EventGroup_t ) );
96 }
97 #endif /* configASSERT_DEFINED */
98
99 /* The user has provided a statically allocated event group - use it. */
100 /* MISRA Ref 11.3.1 [Misaligned access] */
101 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-113 */
102 /* coverity[misra_c_2012_rule_11_3_violation] */
103 pxEventBits = ( EventGroup_t * ) pxEventGroupBuffer;
104
105 if( pxEventBits != NULL )
106 {
107 pxEventBits->uxEventBits = 0;
108 vListInitialise( &( pxEventBits->xTasksWaitingForBits ) );
109
110 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
111 {
112 /* Both static and dynamic allocation can be used, so note that
113 * this event group was created statically in case the event group
114 * is later deleted. */
115 pxEventBits->ucStaticallyAllocated = pdTRUE;
116 }
117 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
118
119 traceEVENT_GROUP_CREATE( pxEventBits );
120 }
121 else
122 {
123 /* xEventGroupCreateStatic should only ever be called with
124 * pxEventGroupBuffer pointing to a pre-allocated (compile time
125 * allocated) StaticEventGroup_t variable. */
126 traceEVENT_GROUP_CREATE_FAILED();
127 }
128
129 traceRETURN_xEventGroupCreateStatic( pxEventBits );
130
131 return pxEventBits;
132 }
133
134 #endif /* configSUPPORT_STATIC_ALLOCATION */
135 /*-----------------------------------------------------------*/
136
137 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
138
xEventGroupCreate(void)139 EventGroupHandle_t xEventGroupCreate( void )
140 {
141 EventGroup_t * pxEventBits;
142
143 traceENTER_xEventGroupCreate();
144
145 /* MISRA Ref 11.5.1 [Malloc memory assignment] */
146 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
147 /* coverity[misra_c_2012_rule_11_5_violation] */
148 pxEventBits = ( EventGroup_t * ) pvPortMalloc( sizeof( EventGroup_t ) );
149
150 if( pxEventBits != NULL )
151 {
152 pxEventBits->uxEventBits = 0;
153 vListInitialise( &( pxEventBits->xTasksWaitingForBits ) );
154
155 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
156 {
157 /* Both static and dynamic allocation can be used, so note this
158 * event group was allocated statically in case the event group is
159 * later deleted. */
160 pxEventBits->ucStaticallyAllocated = pdFALSE;
161 }
162 #endif /* configSUPPORT_STATIC_ALLOCATION */
163
164 traceEVENT_GROUP_CREATE( pxEventBits );
165 }
166 else
167 {
168 traceEVENT_GROUP_CREATE_FAILED();
169 }
170
171 traceRETURN_xEventGroupCreate( pxEventBits );
172
173 return pxEventBits;
174 }
175
176 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
177 /*-----------------------------------------------------------*/
178
xEventGroupSync(EventGroupHandle_t xEventGroup,const EventBits_t uxBitsToSet,const EventBits_t uxBitsToWaitFor,TickType_t xTicksToWait)179 EventBits_t xEventGroupSync( EventGroupHandle_t xEventGroup,
180 const EventBits_t uxBitsToSet,
181 const EventBits_t uxBitsToWaitFor,
182 TickType_t xTicksToWait )
183 {
184 EventBits_t uxOriginalBitValue, uxReturn;
185 EventGroup_t * pxEventBits = xEventGroup;
186 BaseType_t xAlreadyYielded;
187 BaseType_t xTimeoutOccurred = pdFALSE;
188
189 traceENTER_xEventGroupSync( xEventGroup, uxBitsToSet, uxBitsToWaitFor, xTicksToWait );
190
191 configASSERT( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
192 configASSERT( uxBitsToWaitFor != 0 );
193 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
194 {
195 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
196 }
197 #endif
198
199 vTaskSuspendAll();
200 {
201 uxOriginalBitValue = pxEventBits->uxEventBits;
202
203 ( void ) xEventGroupSetBits( xEventGroup, uxBitsToSet );
204
205 if( ( ( uxOriginalBitValue | uxBitsToSet ) & uxBitsToWaitFor ) == uxBitsToWaitFor )
206 {
207 /* All the rendezvous bits are now set - no need to block. */
208 uxReturn = ( uxOriginalBitValue | uxBitsToSet );
209
210 /* Rendezvous always clear the bits. They will have been cleared
211 * already unless this is the only task in the rendezvous. */
212 pxEventBits->uxEventBits &= ~uxBitsToWaitFor;
213
214 xTicksToWait = 0;
215 }
216 else
217 {
218 if( xTicksToWait != ( TickType_t ) 0 )
219 {
220 traceEVENT_GROUP_SYNC_BLOCK( xEventGroup, uxBitsToSet, uxBitsToWaitFor );
221
222 /* Store the bits that the calling task is waiting for in the
223 * task's event list item so the kernel knows when a match is
224 * found. Then enter the blocked state. */
225 vTaskPlaceOnUnorderedEventList( &( pxEventBits->xTasksWaitingForBits ), ( uxBitsToWaitFor | eventCLEAR_EVENTS_ON_EXIT_BIT | eventWAIT_FOR_ALL_BITS ), xTicksToWait );
226
227 /* This assignment is obsolete as uxReturn will get set after
228 * the task unblocks, but some compilers mistakenly generate a
229 * warning about uxReturn being returned without being set if the
230 * assignment is omitted. */
231 uxReturn = 0;
232 }
233 else
234 {
235 /* The rendezvous bits were not set, but no block time was
236 * specified - just return the current event bit value. */
237 uxReturn = pxEventBits->uxEventBits;
238 xTimeoutOccurred = pdTRUE;
239 }
240 }
241 }
242 xAlreadyYielded = xTaskResumeAll();
243
244 if( xTicksToWait != ( TickType_t ) 0 )
245 {
246 if( xAlreadyYielded == pdFALSE )
247 {
248 taskYIELD_WITHIN_API();
249 }
250 else
251 {
252 mtCOVERAGE_TEST_MARKER();
253 }
254
255 /* The task blocked to wait for its required bits to be set - at this
256 * point either the required bits were set or the block time expired. If
257 * the required bits were set they will have been stored in the task's
258 * event list item, and they should now be retrieved then cleared. */
259 uxReturn = uxTaskResetEventItemValue();
260
261 if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
262 {
263 /* The task timed out, just return the current event bit value. */
264 taskENTER_CRITICAL();
265 {
266 uxReturn = pxEventBits->uxEventBits;
267
268 /* Although the task got here because it timed out before the
269 * bits it was waiting for were set, it is possible that since it
270 * unblocked another task has set the bits. If this is the case
271 * then it needs to clear the bits before exiting. */
272 if( ( uxReturn & uxBitsToWaitFor ) == uxBitsToWaitFor )
273 {
274 pxEventBits->uxEventBits &= ~uxBitsToWaitFor;
275 }
276 else
277 {
278 mtCOVERAGE_TEST_MARKER();
279 }
280 }
281 taskEXIT_CRITICAL();
282
283 xTimeoutOccurred = pdTRUE;
284 }
285 else
286 {
287 /* The task unblocked because the bits were set. */
288 }
289
290 /* Control bits might be set as the task had blocked should not be
291 * returned. */
292 uxReturn &= ~eventEVENT_BITS_CONTROL_BYTES;
293 }
294
295 traceEVENT_GROUP_SYNC_END( xEventGroup, uxBitsToSet, uxBitsToWaitFor, xTimeoutOccurred );
296
297 /* Prevent compiler warnings when trace macros are not used. */
298 ( void ) xTimeoutOccurred;
299
300 traceRETURN_xEventGroupSync( uxReturn );
301
302 return uxReturn;
303 }
304 /*-----------------------------------------------------------*/
305
xEventGroupWaitBits(EventGroupHandle_t xEventGroup,const EventBits_t uxBitsToWaitFor,const BaseType_t xClearOnExit,const BaseType_t xWaitForAllBits,TickType_t xTicksToWait)306 EventBits_t xEventGroupWaitBits( EventGroupHandle_t xEventGroup,
307 const EventBits_t uxBitsToWaitFor,
308 const BaseType_t xClearOnExit,
309 const BaseType_t xWaitForAllBits,
310 TickType_t xTicksToWait )
311 {
312 EventGroup_t * pxEventBits = xEventGroup;
313 EventBits_t uxReturn, uxControlBits = 0;
314 BaseType_t xWaitConditionMet, xAlreadyYielded;
315 BaseType_t xTimeoutOccurred = pdFALSE;
316
317 traceENTER_xEventGroupWaitBits( xEventGroup, uxBitsToWaitFor, xClearOnExit, xWaitForAllBits, xTicksToWait );
318
319 /* Check the user is not attempting to wait on the bits used by the kernel
320 * itself, and that at least one bit is being requested. */
321 configASSERT( xEventGroup );
322 configASSERT( ( uxBitsToWaitFor & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
323 configASSERT( uxBitsToWaitFor != 0 );
324 #if ( ( INCLUDE_xTaskGetSchedulerState == 1 ) || ( configUSE_TIMERS == 1 ) )
325 {
326 configASSERT( !( ( xTaskGetSchedulerState() == taskSCHEDULER_SUSPENDED ) && ( xTicksToWait != 0 ) ) );
327 }
328 #endif
329
330 vTaskSuspendAll();
331 {
332 const EventBits_t uxCurrentEventBits = pxEventBits->uxEventBits;
333
334 /* Check to see if the wait condition is already met or not. */
335 xWaitConditionMet = prvTestWaitCondition( uxCurrentEventBits, uxBitsToWaitFor, xWaitForAllBits );
336
337 if( xWaitConditionMet != pdFALSE )
338 {
339 /* The wait condition has already been met so there is no need to
340 * block. */
341 uxReturn = uxCurrentEventBits;
342 xTicksToWait = ( TickType_t ) 0;
343
344 /* Clear the wait bits if requested to do so. */
345 if( xClearOnExit != pdFALSE )
346 {
347 pxEventBits->uxEventBits &= ~uxBitsToWaitFor;
348 }
349 else
350 {
351 mtCOVERAGE_TEST_MARKER();
352 }
353 }
354 else if( xTicksToWait == ( TickType_t ) 0 )
355 {
356 /* The wait condition has not been met, but no block time was
357 * specified, so just return the current value. */
358 uxReturn = uxCurrentEventBits;
359 xTimeoutOccurred = pdTRUE;
360 }
361 else
362 {
363 /* The task is going to block to wait for its required bits to be
364 * set. uxControlBits are used to remember the specified behaviour of
365 * this call to xEventGroupWaitBits() - for use when the event bits
366 * unblock the task. */
367 if( xClearOnExit != pdFALSE )
368 {
369 uxControlBits |= eventCLEAR_EVENTS_ON_EXIT_BIT;
370 }
371 else
372 {
373 mtCOVERAGE_TEST_MARKER();
374 }
375
376 if( xWaitForAllBits != pdFALSE )
377 {
378 uxControlBits |= eventWAIT_FOR_ALL_BITS;
379 }
380 else
381 {
382 mtCOVERAGE_TEST_MARKER();
383 }
384
385 /* Store the bits that the calling task is waiting for in the
386 * task's event list item so the kernel knows when a match is
387 * found. Then enter the blocked state. */
388 vTaskPlaceOnUnorderedEventList( &( pxEventBits->xTasksWaitingForBits ), ( uxBitsToWaitFor | uxControlBits ), xTicksToWait );
389
390 /* This is obsolete as it will get set after the task unblocks, but
391 * some compilers mistakenly generate a warning about the variable
392 * being returned without being set if it is not done. */
393 uxReturn = 0;
394
395 traceEVENT_GROUP_WAIT_BITS_BLOCK( xEventGroup, uxBitsToWaitFor );
396 }
397 }
398 xAlreadyYielded = xTaskResumeAll();
399
400 if( xTicksToWait != ( TickType_t ) 0 )
401 {
402 if( xAlreadyYielded == pdFALSE )
403 {
404 taskYIELD_WITHIN_API();
405 }
406 else
407 {
408 mtCOVERAGE_TEST_MARKER();
409 }
410
411 /* The task blocked to wait for its required bits to be set - at this
412 * point either the required bits were set or the block time expired. If
413 * the required bits were set they will have been stored in the task's
414 * event list item, and they should now be retrieved then cleared. */
415 uxReturn = uxTaskResetEventItemValue();
416
417 if( ( uxReturn & eventUNBLOCKED_DUE_TO_BIT_SET ) == ( EventBits_t ) 0 )
418 {
419 taskENTER_CRITICAL();
420 {
421 /* The task timed out, just return the current event bit value. */
422 uxReturn = pxEventBits->uxEventBits;
423
424 /* It is possible that the event bits were updated between this
425 * task leaving the Blocked state and running again. */
426 if( prvTestWaitCondition( uxReturn, uxBitsToWaitFor, xWaitForAllBits ) != pdFALSE )
427 {
428 if( xClearOnExit != pdFALSE )
429 {
430 pxEventBits->uxEventBits &= ~uxBitsToWaitFor;
431 }
432 else
433 {
434 mtCOVERAGE_TEST_MARKER();
435 }
436 }
437 else
438 {
439 mtCOVERAGE_TEST_MARKER();
440 }
441
442 xTimeoutOccurred = pdTRUE;
443 }
444 taskEXIT_CRITICAL();
445 }
446 else
447 {
448 /* The task unblocked because the bits were set. */
449 }
450
451 /* The task blocked so control bits may have been set. */
452 uxReturn &= ~eventEVENT_BITS_CONTROL_BYTES;
453 }
454
455 traceEVENT_GROUP_WAIT_BITS_END( xEventGroup, uxBitsToWaitFor, xTimeoutOccurred );
456
457 /* Prevent compiler warnings when trace macros are not used. */
458 ( void ) xTimeoutOccurred;
459
460 traceRETURN_xEventGroupWaitBits( uxReturn );
461
462 return uxReturn;
463 }
464 /*-----------------------------------------------------------*/
465
xEventGroupClearBits(EventGroupHandle_t xEventGroup,const EventBits_t uxBitsToClear)466 EventBits_t xEventGroupClearBits( EventGroupHandle_t xEventGroup,
467 const EventBits_t uxBitsToClear )
468 {
469 EventGroup_t * pxEventBits = xEventGroup;
470 EventBits_t uxReturn;
471
472 traceENTER_xEventGroupClearBits( xEventGroup, uxBitsToClear );
473
474 /* Check the user is not attempting to clear the bits used by the kernel
475 * itself. */
476 configASSERT( xEventGroup );
477 configASSERT( ( uxBitsToClear & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
478
479 taskENTER_CRITICAL();
480 {
481 traceEVENT_GROUP_CLEAR_BITS( xEventGroup, uxBitsToClear );
482
483 /* The value returned is the event group value prior to the bits being
484 * cleared. */
485 uxReturn = pxEventBits->uxEventBits;
486
487 /* Clear the bits. */
488 pxEventBits->uxEventBits &= ~uxBitsToClear;
489 }
490 taskEXIT_CRITICAL();
491
492 traceRETURN_xEventGroupClearBits( uxReturn );
493
494 return uxReturn;
495 }
496 /*-----------------------------------------------------------*/
497
498 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) )
499
xEventGroupClearBitsFromISR(EventGroupHandle_t xEventGroup,const EventBits_t uxBitsToClear)500 BaseType_t xEventGroupClearBitsFromISR( EventGroupHandle_t xEventGroup,
501 const EventBits_t uxBitsToClear )
502 {
503 BaseType_t xReturn;
504
505 traceENTER_xEventGroupClearBitsFromISR( xEventGroup, uxBitsToClear );
506
507 traceEVENT_GROUP_CLEAR_BITS_FROM_ISR( xEventGroup, uxBitsToClear );
508 xReturn = xTimerPendFunctionCallFromISR( vEventGroupClearBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToClear, NULL );
509
510 traceRETURN_xEventGroupClearBitsFromISR( xReturn );
511
512 return xReturn;
513 }
514
515 #endif /* if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) */
516 /*-----------------------------------------------------------*/
517
xEventGroupGetBitsFromISR(EventGroupHandle_t xEventGroup)518 EventBits_t xEventGroupGetBitsFromISR( EventGroupHandle_t xEventGroup )
519 {
520 UBaseType_t uxSavedInterruptStatus;
521 EventGroup_t const * const pxEventBits = xEventGroup;
522 EventBits_t uxReturn;
523
524 traceENTER_xEventGroupGetBitsFromISR( xEventGroup );
525
526 uxSavedInterruptStatus = taskENTER_CRITICAL_FROM_ISR();
527 {
528 uxReturn = pxEventBits->uxEventBits;
529 }
530 taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
531
532 traceRETURN_xEventGroupGetBitsFromISR( uxReturn );
533
534 return uxReturn;
535 }
536 /*-----------------------------------------------------------*/
537
xEventGroupSetBits(EventGroupHandle_t xEventGroup,const EventBits_t uxBitsToSet)538 EventBits_t xEventGroupSetBits( EventGroupHandle_t xEventGroup,
539 const EventBits_t uxBitsToSet )
540 {
541 ListItem_t * pxListItem;
542 ListItem_t * pxNext;
543 ListItem_t const * pxListEnd;
544 List_t const * pxList;
545 EventBits_t uxBitsToClear = 0, uxBitsWaitedFor, uxControlBits;
546 EventGroup_t * pxEventBits = xEventGroup;
547 BaseType_t xMatchFound = pdFALSE;
548
549 traceENTER_xEventGroupSetBits( xEventGroup, uxBitsToSet );
550
551 /* Check the user is not attempting to set the bits used by the kernel
552 * itself. */
553 configASSERT( xEventGroup );
554 configASSERT( ( uxBitsToSet & eventEVENT_BITS_CONTROL_BYTES ) == 0 );
555
556 pxList = &( pxEventBits->xTasksWaitingForBits );
557 pxListEnd = listGET_END_MARKER( pxList );
558 vTaskSuspendAll();
559 {
560 traceEVENT_GROUP_SET_BITS( xEventGroup, uxBitsToSet );
561
562 pxListItem = listGET_HEAD_ENTRY( pxList );
563
564 /* Set the bits. */
565 pxEventBits->uxEventBits |= uxBitsToSet;
566
567 /* See if the new bit value should unblock any tasks. */
568 while( pxListItem != pxListEnd )
569 {
570 pxNext = listGET_NEXT( pxListItem );
571 uxBitsWaitedFor = listGET_LIST_ITEM_VALUE( pxListItem );
572 xMatchFound = pdFALSE;
573
574 /* Split the bits waited for from the control bits. */
575 uxControlBits = uxBitsWaitedFor & eventEVENT_BITS_CONTROL_BYTES;
576 uxBitsWaitedFor &= ~eventEVENT_BITS_CONTROL_BYTES;
577
578 if( ( uxControlBits & eventWAIT_FOR_ALL_BITS ) == ( EventBits_t ) 0 )
579 {
580 /* Just looking for single bit being set. */
581 if( ( uxBitsWaitedFor & pxEventBits->uxEventBits ) != ( EventBits_t ) 0 )
582 {
583 xMatchFound = pdTRUE;
584 }
585 else
586 {
587 mtCOVERAGE_TEST_MARKER();
588 }
589 }
590 else if( ( uxBitsWaitedFor & pxEventBits->uxEventBits ) == uxBitsWaitedFor )
591 {
592 /* All bits are set. */
593 xMatchFound = pdTRUE;
594 }
595 else
596 {
597 /* Need all bits to be set, but not all the bits were set. */
598 }
599
600 if( xMatchFound != pdFALSE )
601 {
602 /* The bits match. Should the bits be cleared on exit? */
603 if( ( uxControlBits & eventCLEAR_EVENTS_ON_EXIT_BIT ) != ( EventBits_t ) 0 )
604 {
605 uxBitsToClear |= uxBitsWaitedFor;
606 }
607 else
608 {
609 mtCOVERAGE_TEST_MARKER();
610 }
611
612 /* Store the actual event flag value in the task's event list
613 * item before removing the task from the event list. The
614 * eventUNBLOCKED_DUE_TO_BIT_SET bit is set so the task knows
615 * that is was unblocked due to its required bits matching, rather
616 * than because it timed out. */
617 vTaskRemoveFromUnorderedEventList( pxListItem, pxEventBits->uxEventBits | eventUNBLOCKED_DUE_TO_BIT_SET );
618 }
619
620 /* Move onto the next list item. Note pxListItem->pxNext is not
621 * used here as the list item may have been removed from the event list
622 * and inserted into the ready/pending reading list. */
623 pxListItem = pxNext;
624 }
625
626 /* Clear any bits that matched when the eventCLEAR_EVENTS_ON_EXIT_BIT
627 * bit was set in the control word. */
628 pxEventBits->uxEventBits &= ~uxBitsToClear;
629 }
630 ( void ) xTaskResumeAll();
631
632 traceRETURN_xEventGroupSetBits( pxEventBits->uxEventBits );
633
634 return pxEventBits->uxEventBits;
635 }
636 /*-----------------------------------------------------------*/
637
vEventGroupDelete(EventGroupHandle_t xEventGroup)638 void vEventGroupDelete( EventGroupHandle_t xEventGroup )
639 {
640 EventGroup_t * pxEventBits = xEventGroup;
641 const List_t * pxTasksWaitingForBits;
642
643 traceENTER_vEventGroupDelete( xEventGroup );
644
645 configASSERT( pxEventBits );
646
647 pxTasksWaitingForBits = &( pxEventBits->xTasksWaitingForBits );
648
649 vTaskSuspendAll();
650 {
651 traceEVENT_GROUP_DELETE( xEventGroup );
652
653 while( listCURRENT_LIST_LENGTH( pxTasksWaitingForBits ) > ( UBaseType_t ) 0 )
654 {
655 /* Unblock the task, returning 0 as the event list is being deleted
656 * and cannot therefore have any bits set. */
657 configASSERT( pxTasksWaitingForBits->xListEnd.pxNext != ( const ListItem_t * ) &( pxTasksWaitingForBits->xListEnd ) );
658 vTaskRemoveFromUnorderedEventList( pxTasksWaitingForBits->xListEnd.pxNext, eventUNBLOCKED_DUE_TO_BIT_SET );
659 }
660 }
661 ( void ) xTaskResumeAll();
662
663 #if ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 0 ) )
664 {
665 /* The event group can only have been allocated dynamically - free
666 * it again. */
667 vPortFree( pxEventBits );
668 }
669 #elif ( ( configSUPPORT_DYNAMIC_ALLOCATION == 1 ) && ( configSUPPORT_STATIC_ALLOCATION == 1 ) )
670 {
671 /* The event group could have been allocated statically or
672 * dynamically, so check before attempting to free the memory. */
673 if( pxEventBits->ucStaticallyAllocated == ( uint8_t ) pdFALSE )
674 {
675 vPortFree( pxEventBits );
676 }
677 else
678 {
679 mtCOVERAGE_TEST_MARKER();
680 }
681 }
682 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
683
684 traceRETURN_vEventGroupDelete();
685 }
686 /*-----------------------------------------------------------*/
687
688 #if ( configSUPPORT_STATIC_ALLOCATION == 1 )
xEventGroupGetStaticBuffer(EventGroupHandle_t xEventGroup,StaticEventGroup_t ** ppxEventGroupBuffer)689 BaseType_t xEventGroupGetStaticBuffer( EventGroupHandle_t xEventGroup,
690 StaticEventGroup_t ** ppxEventGroupBuffer )
691 {
692 BaseType_t xReturn;
693 EventGroup_t * pxEventBits = xEventGroup;
694
695 traceENTER_xEventGroupGetStaticBuffer( xEventGroup, ppxEventGroupBuffer );
696
697 configASSERT( pxEventBits );
698 configASSERT( ppxEventGroupBuffer );
699
700 #if ( configSUPPORT_DYNAMIC_ALLOCATION == 1 )
701 {
702 /* Check if the event group was statically allocated. */
703 if( pxEventBits->ucStaticallyAllocated == ( uint8_t ) pdTRUE )
704 {
705 /* MISRA Ref 11.3.1 [Misaligned access] */
706 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-113 */
707 /* coverity[misra_c_2012_rule_11_3_violation] */
708 *ppxEventGroupBuffer = ( StaticEventGroup_t * ) pxEventBits;
709 xReturn = pdTRUE;
710 }
711 else
712 {
713 xReturn = pdFALSE;
714 }
715 }
716 #else /* configSUPPORT_DYNAMIC_ALLOCATION */
717 {
718 /* Event group must have been statically allocated. */
719 /* MISRA Ref 11.3.1 [Misaligned access] */
720 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-113 */
721 /* coverity[misra_c_2012_rule_11_3_violation] */
722 *ppxEventGroupBuffer = ( StaticEventGroup_t * ) pxEventBits;
723 xReturn = pdTRUE;
724 }
725 #endif /* configSUPPORT_DYNAMIC_ALLOCATION */
726
727 traceRETURN_xEventGroupGetStaticBuffer( xReturn );
728
729 return xReturn;
730 }
731 #endif /* configSUPPORT_STATIC_ALLOCATION */
732 /*-----------------------------------------------------------*/
733
734 /* For internal use only - execute a 'set bits' command that was pended from
735 * an interrupt. */
vEventGroupSetBitsCallback(void * pvEventGroup,uint32_t ulBitsToSet)736 void vEventGroupSetBitsCallback( void * pvEventGroup,
737 uint32_t ulBitsToSet )
738 {
739 traceENTER_vEventGroupSetBitsCallback( pvEventGroup, ulBitsToSet );
740
741 /* MISRA Ref 11.5.4 [Callback function parameter] */
742 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
743 /* coverity[misra_c_2012_rule_11_5_violation] */
744 ( void ) xEventGroupSetBits( pvEventGroup, ( EventBits_t ) ulBitsToSet );
745
746 traceRETURN_vEventGroupSetBitsCallback();
747 }
748 /*-----------------------------------------------------------*/
749
750 /* For internal use only - execute a 'clear bits' command that was pended from
751 * an interrupt. */
vEventGroupClearBitsCallback(void * pvEventGroup,uint32_t ulBitsToClear)752 void vEventGroupClearBitsCallback( void * pvEventGroup,
753 uint32_t ulBitsToClear )
754 {
755 traceENTER_vEventGroupClearBitsCallback( pvEventGroup, ulBitsToClear );
756
757 /* MISRA Ref 11.5.4 [Callback function parameter] */
758 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
759 /* coverity[misra_c_2012_rule_11_5_violation] */
760 ( void ) xEventGroupClearBits( pvEventGroup, ( EventBits_t ) ulBitsToClear );
761
762 traceRETURN_vEventGroupClearBitsCallback();
763 }
764 /*-----------------------------------------------------------*/
765
prvTestWaitCondition(const EventBits_t uxCurrentEventBits,const EventBits_t uxBitsToWaitFor,const BaseType_t xWaitForAllBits)766 static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
767 const EventBits_t uxBitsToWaitFor,
768 const BaseType_t xWaitForAllBits )
769 {
770 BaseType_t xWaitConditionMet = pdFALSE;
771
772 if( xWaitForAllBits == pdFALSE )
773 {
774 /* Task only has to wait for one bit within uxBitsToWaitFor to be
775 * set. Is one already set? */
776 if( ( uxCurrentEventBits & uxBitsToWaitFor ) != ( EventBits_t ) 0 )
777 {
778 xWaitConditionMet = pdTRUE;
779 }
780 else
781 {
782 mtCOVERAGE_TEST_MARKER();
783 }
784 }
785 else
786 {
787 /* Task has to wait for all the bits in uxBitsToWaitFor to be set.
788 * Are they set already? */
789 if( ( uxCurrentEventBits & uxBitsToWaitFor ) == uxBitsToWaitFor )
790 {
791 xWaitConditionMet = pdTRUE;
792 }
793 else
794 {
795 mtCOVERAGE_TEST_MARKER();
796 }
797 }
798
799 return xWaitConditionMet;
800 }
801 /*-----------------------------------------------------------*/
802
803 #if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) )
804
xEventGroupSetBitsFromISR(EventGroupHandle_t xEventGroup,const EventBits_t uxBitsToSet,BaseType_t * pxHigherPriorityTaskWoken)805 BaseType_t xEventGroupSetBitsFromISR( EventGroupHandle_t xEventGroup,
806 const EventBits_t uxBitsToSet,
807 BaseType_t * pxHigherPriorityTaskWoken )
808 {
809 BaseType_t xReturn;
810
811 traceENTER_xEventGroupSetBitsFromISR( xEventGroup, uxBitsToSet, pxHigherPriorityTaskWoken );
812
813 traceEVENT_GROUP_SET_BITS_FROM_ISR( xEventGroup, uxBitsToSet );
814 xReturn = xTimerPendFunctionCallFromISR( vEventGroupSetBitsCallback, ( void * ) xEventGroup, ( uint32_t ) uxBitsToSet, pxHigherPriorityTaskWoken );
815
816 traceRETURN_xEventGroupSetBitsFromISR( xReturn );
817
818 return xReturn;
819 }
820
821 #endif /* if ( ( configUSE_TRACE_FACILITY == 1 ) && ( INCLUDE_xTimerPendFunctionCall == 1 ) && ( configUSE_TIMERS == 1 ) ) */
822 /*-----------------------------------------------------------*/
823
824 #if ( configUSE_TRACE_FACILITY == 1 )
825
uxEventGroupGetNumber(void * xEventGroup)826 UBaseType_t uxEventGroupGetNumber( void * xEventGroup )
827 {
828 UBaseType_t xReturn;
829
830 /* MISRA Ref 11.5.2 [Opaque pointer] */
831 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
832 /* coverity[misra_c_2012_rule_11_5_violation] */
833 EventGroup_t const * pxEventBits = ( EventGroup_t * ) xEventGroup;
834
835 traceENTER_uxEventGroupGetNumber( xEventGroup );
836
837 if( xEventGroup == NULL )
838 {
839 xReturn = 0;
840 }
841 else
842 {
843 xReturn = pxEventBits->uxEventGroupNumber;
844 }
845
846 traceRETURN_uxEventGroupGetNumber( xReturn );
847
848 return xReturn;
849 }
850
851 #endif /* configUSE_TRACE_FACILITY */
852 /*-----------------------------------------------------------*/
853
854 #if ( configUSE_TRACE_FACILITY == 1 )
855
vEventGroupSetNumber(void * xEventGroup,UBaseType_t uxEventGroupNumber)856 void vEventGroupSetNumber( void * xEventGroup,
857 UBaseType_t uxEventGroupNumber )
858 {
859 traceENTER_vEventGroupSetNumber( xEventGroup, uxEventGroupNumber );
860
861 /* MISRA Ref 11.5.2 [Opaque pointer] */
862 /* More details at: https://github.com/FreeRTOS/FreeRTOS-Kernel/blob/main/MISRA.md#rule-115 */
863 /* coverity[misra_c_2012_rule_11_5_violation] */
864 ( ( EventGroup_t * ) xEventGroup )->uxEventGroupNumber = uxEventGroupNumber;
865
866 traceRETURN_vEventGroupSetNumber();
867 }
868
869 #endif /* configUSE_TRACE_FACILITY */
870 /*-----------------------------------------------------------*/
871