1 /*
2  * Trace Recorder for Tracealyzer v4.5.1
3  * Copyright 2021 Percepio AB
4  * www.percepio.com
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  *
8  * The SAFERTOS specific definitions of the trace recorder
9  */
10 
11 #ifndef TRC_KERNEL_PORT_H
12 #define TRC_KERNEL_PORT_H
13 
14 #include "SafeRTOS.h"	/* Defines configUSE_TRACE_FACILITY */
15 #include "trcPortDefines.h"
16 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 #if ( configUSE_TRACE_FACILITY == 1 )
22 #define TRC_USE_TRACEALYZER_RECORDER 1
23 #else
24 #define TRC_USE_TRACEALYZER_RECORDER 0
25 #endif
26 
27 #if (TRC_USE_TRACEALYZER_RECORDER == 1)
28 
29 #define TRC_PLATFORM_CFG ""
30 #define TRC_PLATFORM_CFG_MAJOR 1
31 #define TRC_PLATFORM_CFG_MINOR 0
32 #define TRC_PLATFORM_CFG_PATCH 0
33 
34 #define trcIS_TASK_VALID( pxTask ) ((pxTask != NULL) && xPortIsTaskHandleValid(pxTask))
35 #define trcIS_QUEUE_VALID( pxQueue ) ((pxQueue != NULL) && prvIS_QUEUE_VALID(pxQueue))
36 #define trcIS_TIMER_VALID( pxTimer ) ((pxTimer != NULL) && xTimerIsHandleValid(pxTimer))
37 #define trcIS_EVENTGROUP_VALID( pxEventGroup ) ((pxEventGroup != NULL) && prvIS_EVENT_GROUP_VALID(((eventGroupType*)pxEventGroup)))
38 
39 unsigned char prvTraceIsSchedulerSuspended(void);
40 void* prvGetCurrentTaskHandle();
41 uint32_t prvGetTaskNumber(void* pxObject);
42 uint32_t prvGetQueueNumber(void* pxObject);
43 uint32_t prvGetTimerNumber(void* pxObject);
44 uint32_t prvGetEventGroupNumber(void* pxObject);
45 
46 uint8_t prvTraceGetQueueType(void* handle);
47 
48 /*******************************************************************************
49  * Note: Setting names for event groups is difficult to support, this has been
50  * excluded intentionally. This since we don't know if event_groups.c is
51  * included in the build, so referencing it from the recorder may cause errors.
52  ******************************************************************************/
53 
54 #define TRACE_KERNEL_VERSION 0x6AA6
55 #define TRACE_TICK_RATE_HZ configTICK_RATE_HZ
56 #define TRACE_CPU_CLOCK_HZ configCPU_CLOCK_HZ
57 #define TRACE_GET_CURRENT_TASK() prvGetCurrentTaskHandle()
58 
59 #define TRACE_GET_OS_TICKS() (uiTraceTickCount) /* Streaming only */
60 
61 #if (TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_ARM_Cortex_M)
62 
63 	#define TRACE_ALLOC_CRITICAL_SECTION() int __irq_status;
64 	#define TRACE_ENTER_CRITICAL_SECTION() {__irq_status = __get_PRIMASK(); __set_PRIMASK(1);} /* PRIMASK disables ALL interrupts - allows for tracing in any ISR */
65 	#define TRACE_EXIT_CRITICAL_SECTION() {__set_PRIMASK(__irq_status);}
66 #endif
67 
68 #if ((TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_ARM_CORTEX_A9) || (TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_Renesas_RX600) || (TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_MICROCHIP_PIC24_PIC32))
69 	#define TRACE_ALLOC_CRITICAL_SECTION() int __irq_status;
70 	#define TRACE_ENTER_CRITICAL_SECTION() {__irq_status = portSET_INTERRUPT_MASK_FROM_ISR();}
71 	#define TRACE_EXIT_CRITICAL_SECTION() {portCLEAR_INTERRUPT_MASK_FROM_ISR(__irq_status);}
72 #endif
73 
74 #if (TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_Win32)
75     /* In the Win32 port, there are no real interrupts, so we can use the normal critical sections */
76 	#define TRACE_ALLOC_CRITICAL_SECTION()
77 	#define TRACE_ENTER_CRITICAL_SECTION() portENTER_CRITICAL()
78 	#define TRACE_EXIT_CRITICAL_SECTION() portEXIT_CRITICAL()
79 #endif
80 
81 #if (TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_TEXAS_INSTRUMENTS_TMS570_RM48)
82 	extern void prvTraceEnterCritical( void );
83 	extern void prvTraceExitCritical ( void );
84 	#define TRACE_ALLOC_CRITICAL_SECTION()
85 	#define TRACE_ENTER_CRITICAL_SECTION() prvTraceEnterCritical()
86 	#define TRACE_EXIT_CRITICAL_SECTION() prvTraceExitCritical()
87 #endif
88 
89 #ifndef TRACE_ENTER_CRITICAL_SECTION
90 	#error "This hardware port has no definition for critical sections! See http://percepio.com/2014/10/27/how-to-define-critical-sections-for-the-recorder/"
91 #endif
92 
93 extern uint16_t CurrentFilterMask;
94 
95 extern uint16_t CurrentFilterGroup;
96 
97 #define TRACE_GET_TASK_FILTER(pxObject) TRACE_GET_HIGH16(prvGetTaskNumber(pxObject))
98 #define TRACE_SET_TASK_FILTER(pxObject, group) ((xTCB*)pxObject)->uxTaskNumber = TRACE_SET_HIGH16(((xTCB*)pxObject)->uxTaskNumber, group)
99 
100 #define TRACE_GET_QUEUE_FILTER(pxObject) TRACE_GET_HIGH16(prvGetQueueNumber(pxObject))
101 #define TRACE_SET_QUEUE_FILTER(pxObject, group) ((xQUEUE*)pxObject)->uxQueueNumber = TRACE_SET_HIGH16(((xQUEUE*)pxObject)->uxQueueNumber, group)
102 
103 #define TRACE_GET_TIMER_FILTER(pxObject) TRACE_GET_HIGH16(prvGetTimerNumber(pxObject))
104 #define TRACE_SET_TIMER_FILTER(pxObject, group) ((timerControlBlockType*)pxObject)->uxTimerNumber = TRACE_SET_HIGH16(((timerControlBlockType*)pxObject)->uxTimerNumber, group)
105 
106 #define TRACE_GET_EVENTGROUP_FILTER(pxObject) TRACE_GET_HIGH16(prvGetEventGroupNumber(pxObject))
107 #define TRACE_SET_EVENTGROUP_FILTER(pxObject, group) ((eventGroupType*)pxObject)->uxEventGroupNumber = TRACE_SET_HIGH16(((eventGroupType*)pxObject)->uxEventGroupNumber, group)
108 
109 #define TRACE_GET_OBJECT_FILTER(CLASS, pxObject) TRACE_GET_##CLASS##_FILTER(pxObject)
110 #define TRACE_SET_OBJECT_FILTER(CLASS, pxObject, group) TRACE_SET_##CLASS##_FILTER(pxObject, group)
111 
112 /******************************************************************************/
113 /*** Definitions for Snapshot mode ********************************************/
114 /******************************************************************************/
115 #if (TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_SNAPSHOT)
116 
117 /*******************************************************************************
118 * vTraceSetQueueName(void* object, const char* name)
119 *
120 * Parameter object: pointer to the Queue that shall be named
121 * Parameter name: the name to set (const string literal)
122 *
123 * Sets a name for Queue objects for display in Tracealyzer.
124 ******************************************************************************/
125 #define vTraceSetQueueName(object, name) prvTraceSetObjectName(TRACE_CLASS_QUEUE, TRACE_GET_QUEUE_NUMBER(object), name)
126 
127 /*******************************************************************************
128 * vTraceSetSemaphoreName(void* object, const char* name)
129 *
130 * Parameter object: pointer to the Semaphore that shall be named
131 * Parameter name: the name to set (const string literal)
132 *
133 * Sets a name for Semaphore objects for display in Tracealyzer.
134 ******************************************************************************/
135 #define vTraceSetSemaphoreName(object, name) prvTraceSetObjectName(TRACE_CLASS_SEMAPHORE, TRACE_GET_QUEUE_NUMBER(object), name)
136 
137 /*******************************************************************************
138 * vTraceSetMutexName(void* object, const char* name)
139 *
140 * Parameter object: pointer to the Mutex that shall be named
141 * Parameter name: the name to set (const string literal)
142 *
143 * Sets a name for Mutex objects for display in Tracealyzer.
144 ******************************************************************************/
145 #define vTraceSetMutexName(object, name) prvTraceSetObjectName(TRACE_CLASS_MUTEX, TRACE_GET_QUEUE_NUMBER(object), name)
146 
147 /*******************************************************************************
148 * vTraceSetEventGroupName(void* object, const char* name)
149 *
150 * Parameter object: pointer to the EventGroup that shall be named
151 * Parameter name: the name to set (const string literal)
152 *
153 * Sets a name for EventGroup objects for display in Tracealyzer.
154 ******************************************************************************/
155 #define vTraceSetEventGroupName(object, name) prvTraceSetObjectName(TRACE_CLASS_EVENTGROUP, TRACE_GET_EVENTGROUP_NUMBER(object), name)
156 
157 /*** The object classes *******************************************************/
158 
159 #define TRACE_NCLASSES 7
160 #define TRACE_CLASS_QUEUE		((traceObjectClass)0)
161 #define TRACE_CLASS_TASK		((traceObjectClass)1)
162 #define TRACE_CLASS_ISR			((traceObjectClass)2)
163 #define TRACE_CLASS_TIMER		((traceObjectClass)3)
164 #define TRACE_CLASS_EVENTGROUP	((traceObjectClass)4)
165 #define TRACE_CLASS_SEMAPHORE	((traceObjectClass)5)
166 #define TRACE_CLASS_MUTEX		((traceObjectClass)6)
167 
168 /*** Definitions for Object Table ********************************************/
169 #define TRACE_KERNEL_OBJECT_COUNT (TRC_CFG_NQUEUE + TRC_CFG_NTASK + TRC_CFG_NISR + TRC_CFG_NTIMER + TRC_CFG_NEVENTGROUP + TRC_CFG_NSEMAPHORE + TRC_CFG_NMUTEX)
170 
171 /* Queue properties (except name):	current number of message in queue */
172 #define PropertyTableSizeQueue		(TRC_CFG_NAME_LEN_QUEUE + 1)
173 
174 /* Semaphore properties (except name): state (signaled = 1, cleared = 0) */
175 #define PropertyTableSizeSemaphore	(TRC_CFG_NAME_LEN_SEMAPHORE + 1)
176 
177 /* Mutex properties (except name):	owner (task handle, 0 = free) */
178 #define PropertyTableSizeMutex		(TRC_CFG_NAME_LEN_MUTEX + 1)
179 
180 /* Task properties (except name):	Byte 0: Current priority
181 									Byte 1: state (if already active)
182 									Byte 2: legacy, not used
183 									Byte 3: legacy, not used */
184 #define PropertyTableSizeTask		(TRC_CFG_NAME_LEN_TASK + 4)
185 
186 /* ISR properties:					Byte 0: priority
187 									Byte 1: state (if already active) */
188 #define PropertyTableSizeISR		(TRC_CFG_NAME_LEN_ISR + 2)
189 
190 /* TRC_CFG_NTIMER properties:				Byte 0: state (unused for now) */
191 #define PropertyTableSizeTimer		(TRC_CFG_NAME_LEN_TIMER + 1)
192 
193 /* TRC_CFG_NEVENTGROUP properties:			Byte 0-3: state (unused for now)*/
194 #define PropertyTableSizeEventGroup	(TRC_CFG_NAME_LEN_EVENTGROUP + 4)
195 
196 
197 /* The layout of the byte array representing the Object Property Table */
198 #define StartIndexQueue			0
199 #define StartIndexTask			(StartIndexQueue		+ (TRC_CFG_NQUEUE		* PropertyTableSizeQueue))
200 #define StartIndexISR			(StartIndexTask			+ (TRC_CFG_NTASK		* PropertyTableSizeTask))
201 #define StartIndexTimer			(StartIndexISR			+ (TRC_CFG_NISR			* PropertyTableSizeISR))
202 #define StartIndexEventGroup	(StartIndexTimer		+ (TRC_CFG_NTIMER		* PropertyTableSizeTimer))
203 #define StartIndexSemaphore		(StartIndexEventGroup	+ (TRC_CFG_NEVENTGROUP	* PropertyTableSizeEventGroup))
204 #define StartIndexMutex			(StartIndexSemaphore	+ (TRC_CFG_NSEMAPHORE	* PropertyTableSizeSemaphore))
205 
206 /* Number of bytes used by the object table */
207 #define TRACE_OBJECT_TABLE_SIZE	(StartIndexMutex + TRC_CFG_NMUTEX * PropertyTableSizeMutex)
208 
209 #if (TRC_CFG_INCLUDE_OSTICK_EVENTS == 1 && configUSE_TICKLESS_IDLE != 0)
210 #error "OS Tick events can not be traced in combination with tickless idle!"
211 #endif
212 
213 /* Flag to tell the context of tracePEND_FUNC_CALL_FROM_ISR */
214 extern int uiInEventGroupSetBitsFromISR;
215 
216 /* Initialization of the object property table */
217 void vTraceInitObjectPropertyTable(void);
218 
219 /* Initialization of the handle mechanism, see e.g, prvTraceGetObjectHandle */
220 void vTraceInitObjectHandleStack(void);
221 
222 /* Returns the "Not enough handles" error message for the specified object class */
223 const char* pszTraceGetErrorNotEnoughHandles(traceObjectClass objectclass);
224 
225 /******************************************************************************
226  * TraceQueueClassTable
227  * Translates a QueueType into trace objects classes (TRACE_CLASS_).
228  * Has one entry for each QueueType, gives TRACE_CLASS ID.
229  ******************************************************************************/
230  extern traceObjectClass TraceQueueClassTable[3];
231 
232 /*** Event codes for snapshot mode - must match Tracealyzer config files ******/
233 
234 #define NULL_EVENT					(0x00UL)								/*0x00*/
235 
236 /*******************************************************************************
237  * EVENTGROUP_DIV
238  *
239  * Miscellaneous events.
240  ******************************************************************************/
241 #define EVENTGROUP_DIV				(NULL_EVENT + 1UL)						/*0x01*/
242 #define DIV_XPS						(EVENTGROUP_DIV + 0UL)					/*0x01*/
243 #define DIV_TASK_READY				(EVENTGROUP_DIV + 1UL)					/*0x02*/
244 #define DIV_NEW_TIME				(EVENTGROUP_DIV + 2UL)					/*0x03*/
245 
246 /*******************************************************************************
247  * EVENTGROUP_TS
248  *
249  * Events for storing task-switches and interrupts. The RESUME events are
250  * generated if the task/interrupt is already marked active.
251  ******************************************************************************/
252 #define EVENTGROUP_TS				(EVENTGROUP_DIV + 3UL)				/*0x04*/
253 #define TS_ISR_BEGIN				(EVENTGROUP_TS + 0UL)					/*0x04*/
254 #define TS_ISR_RESUME				(EVENTGROUP_TS + 1UL)					/*0x05*/
255 #define TS_TASK_BEGIN				(EVENTGROUP_TS + 2UL)					/*0x06*/
256 #define TS_TASK_RESUME				(EVENTGROUP_TS + 3UL)					/*0x07*/
257 
258 /*******************************************************************************
259  * EVENTGROUP_OBJCLOSE_NAME
260  *
261  * About Close Events
262  * When an object is evicted from the object property table (object close), two
263  * internal events are stored (EVENTGROUP_OBJCLOSE_NAME and
264  * EVENTGROUP_OBJCLOSE_PROP), containing the handle-name mapping and object
265  * properties valid up to this point.
266  ******************************************************************************/
267 #define EVENTGROUP_OBJCLOSE_NAME_TRCSUCCESS	(EVENTGROUP_TS + 4UL)			/*0x08*/
268 
269 /*******************************************************************************
270  * EVENTGROUP_OBJCLOSE_PROP
271  *
272  * The internal event carrying properties of deleted objects
273  * The handle and object class of the closed object is not stored in this event,
274  * but is assumed to be the same as in the preceding CLOSE event. Thus, these
275  * two events must be generated from within a critical section.
276  * When queues are closed, arg1 is the "state" property (i.e., number of
277  * buffered messages/signals).
278  * When actors are closed, arg1 is priority, arg2 is handle of the "instance
279  * finish" event, and arg3 is event code of the "instance finish" event.
280  * In this case, the lower three bits is the object class of the instance finish
281  * handle. The lower three bits are not used (always zero) when queues are
282  * closed since the queue type is given in the previous OBJCLOSE_NAME event.
283  ******************************************************************************/
284 #define EVENTGROUP_OBJCLOSE_PROP_TRCSUCCESS	(EVENTGROUP_OBJCLOSE_NAME_TRCSUCCESS + 8UL)	/*0x10*/
285 
286 /*******************************************************************************
287  * EVENTGROUP_CREATE
288  *
289  * The events in this group are used to log Kernel object creations.
290  * The lower three bits in the event code gives the object class, i.e., type of
291  * create operation (task, queue, semaphore, etc).
292  ******************************************************************************/
293 #define EVENTGROUP_CREATE_OBJ_TRCSUCCESS	(EVENTGROUP_OBJCLOSE_PROP_TRCSUCCESS + 8UL)	/*0x18*/
294 
295 /*******************************************************************************
296  * EVENTGROUP_SEND
297  *
298  * The events in this group are used to log Send/Give events on queues,
299  * semaphores and mutexes The lower three bits in the event code gives the
300  * object class, i.e., what type of object that is operated on (queue, semaphore
301  * or mutex).
302  ******************************************************************************/
303 #define EVENTGROUP_SEND_TRCSUCCESS	(EVENTGROUP_CREATE_OBJ_TRCSUCCESS + 8UL)		/*0x20*/
304 
305 /*******************************************************************************
306  * EVENTGROUP_RECEIVE
307  *
308  * The events in this group are used to log Receive/Take events on queues,
309  * semaphores and mutexes. The lower three bits in the event code gives the
310  * object class, i.e., what type of object that is operated on (queue, semaphore
311  * or mutex).
312  ******************************************************************************/
313 #define EVENTGROUP_RECEIVE_TRCSUCCESS	(EVENTGROUP_SEND_TRCSUCCESS + 8UL)			/*0x28*/
314 
315 /* Send/Give operations, from ISR */
316 #define EVENTGROUP_SEND_FROM_ISR_TRCSUCCESS \
317 									(EVENTGROUP_RECEIVE_TRCSUCCESS + 8UL)		/*0x30*/
318 
319 /* Receive/Take operations, from ISR */
320 #define EVENTGROUP_RECEIVE_FROM_ISR_TRCSUCCESS \
321 							(EVENTGROUP_SEND_FROM_ISR_TRCSUCCESS + 8UL)		/*0x38*/
322 
323 /* Failed create calls - memory allocation failed */
324 #define EVENTGROUP_CREATE_OBJ_TRCFAILED	(EVENTGROUP_RECEIVE_FROM_ISR_TRCSUCCESS + 8UL)	/*0x40*/
325 
326 /* Failed send/give - timeout! */
327 #define EVENTGROUP_SEND_TRCFAILED		(EVENTGROUP_CREATE_OBJ_TRCFAILED + 8UL)	/*0x48*/
328 
329 /* Failed receive/take - timeout! */
330 #define EVENTGROUP_RECEIVE_TRCFAILED	 (EVENTGROUP_SEND_TRCFAILED + 8UL)			/*0x50*/
331 
332 /* Failed non-blocking send/give - queue full */
333 #define EVENTGROUP_SEND_FROM_ISR_TRCFAILED (EVENTGROUP_RECEIVE_TRCFAILED + 8UL)	/*0x58*/
334 
335 /* Failed non-blocking receive/take - queue empty */
336 #define EVENTGROUP_RECEIVE_FROM_ISR_TRCFAILED \
337 								 (EVENTGROUP_SEND_FROM_ISR_TRCFAILED + 8UL)	/*0x60*/
338 
339 /* Events when blocking on receive/take */
340 #define EVENTGROUP_RECEIVE_TRCBLOCK \
341 							(EVENTGROUP_RECEIVE_FROM_ISR_TRCFAILED + 8UL)		/*0x68*/
342 
343 /* Events when blocking on send/give */
344 #define EVENTGROUP_SEND_TRCBLOCK	(EVENTGROUP_RECEIVE_TRCBLOCK + 8UL)			/*0x70*/
345 
346 /* Events on queue peek (receive) */
347 #define EVENTGROUP_PEEK_TRCSUCCESS	(EVENTGROUP_SEND_TRCBLOCK + 8UL)				/*0x78*/
348 
349 /* Events on object delete (vTaskDelete or vQueueDelete) */
350 #define EVENTGROUP_DELETE_OBJ_TRCSUCCESS	(EVENTGROUP_PEEK_TRCSUCCESS + 8UL)		/*0x80*/
351 
352 /* Other events - object class is implied: TASK */
353 #define EVENTGROUP_OTHERS	(EVENTGROUP_DELETE_OBJ_TRCSUCCESS + 8UL)			/*0x88*/
354 #define TASK_DELAY_UNTIL	(EVENTGROUP_OTHERS + 0UL)						/*0x88*/
355 #define TASK_DELAY			(EVENTGROUP_OTHERS + 1UL)						/*0x89*/
356 #define TASK_SUSPEND		(EVENTGROUP_OTHERS + 2UL)						/*0x8A*/
357 #define TASK_RESUME			(EVENTGROUP_OTHERS + 3UL)						/*0x8B*/
358 #define TASK_RESUME_FROM_ISR	(EVENTGROUP_OTHERS + 4UL)					/*0x8C*/
359 #define TASK_PRIORITY_SET		(EVENTGROUP_OTHERS + 5UL)					/*0x8D*/
360 #define TASK_PRIORITY_INHERIT	(EVENTGROUP_OTHERS + 6UL)					/*0x8E*/
361 #define TASK_PRIORITY_DISINHERIT	(EVENTGROUP_OTHERS + 7UL)				/*0x8F*/
362 
363 /* User events */
364 #define EVENTGROUP_USEREVENT (EVENTGROUP_OTHERS + 8UL)						/*0x90*/
365 #define USER_EVENT (EVENTGROUP_USEREVENT + 0UL)
366 
367 /* Allow for 0-15 arguments (the number of args is added to event code) */
368 #define USER_EVENT_LAST (EVENTGROUP_USEREVENT + 15UL)						/*0x9F*/
369 
370 /*******************************************************************************
371  * XTS Event - eXtended TimeStamp events
372  * The timestamps used in the recorder are "differential timestamps" (DTS), i.e.
373  * the time since the last stored event. The DTS fields are either 1 or 2 bytes
374  * in the other events, depending on the bytes available in the event struct.
375  * If the time since the last event (the DTS) is larger than allowed for by
376  * the DTS field of the current event, an XTS event is inserted immediately
377  * before the original event. The XTS event contains up to 3 additional bytes
378  * of the DTS value - the higher bytes of the true DTS value. The lower 1-2
379  * bytes are stored in the normal DTS field.
380  * There are two types of XTS events, XTS8 and XTS16. An XTS8 event is stored
381  * when there is only room for 1 byte (8 bit) DTS data in the original event,
382  * which means a limit of 0xFF (255UL). The XTS16 is used when the original event
383  * has a 16 bit DTS field and thereby can handle values up to 0xFFFF (65535UL).
384  *
385  * Using a very high frequency time base can result in many XTS events.
386  * Preferably, the time between two OS ticks should fit in 16 bits, i.e.,
387  * at most 65535. If your time base has a higher frequency, you can define
388  * the TRACE
389  ******************************************************************************/
390 
391 #define EVENTGROUP_SYS									(USER_EVENT_LAST + 1UL)						/*0xA0*/
392 #define XTS8											(EVENTGROUP_SYS + 0UL)						/*0xA0*/
393 #define XTS16											(EVENTGROUP_SYS + 1UL)						/*0xA1*/
394 #define EVENT_BEING_WRITTEN								(EVENTGROUP_SYS + 2UL)						/*0xA2*/
395 #define RESERVED_DUMMY_CODE								(EVENTGROUP_SYS + 3UL)						/*0xA3*/
396 #define LOW_POWER_BEGIN									(EVENTGROUP_SYS + 4UL)						/*0xA4*/
397 #define LOW_POWER_END									(EVENTGROUP_SYS + 5UL)						/*0xA5*/
398 #define XID												(EVENTGROUP_SYS + 6UL)						/*0xA6*/
399 #define XTS16L											(EVENTGROUP_SYS + 7UL)						/*0xA7*/
400 
401 #define TASK_INSTANCE_FINISHED_NEXT_KSE					(EVENTGROUP_SYS + 8UL)						/*0xA8*/
402 #define TASK_INSTANCE_FINISHED_DIRECT					(EVENTGROUP_SYS + 9UL)						/*0xA9*/
403 
404 #define EVENTGROUP_TIMER								(EVENTGROUP_SYS + 10UL)						/*0xAA*/
405 #define TIMER_CREATE									(EVENTGROUP_TIMER + 0UL)					/*0xAA*/
406 #define TIMER_DELETE_OBJ								(EVENTGROUP_TIMER + 1UL)					/*0xAB*/
407 #define TIMER_START										(EVENTGROUP_TIMER + 2UL)					/*0xAC*/
408 #define TIMER_STOP										(EVENTGROUP_TIMER + 3UL)					/*0xAD*/
409 #define TIMER_CHANGE_PERIOD								(EVENTGROUP_TIMER + 4UL)					/*0xAE*/
410 #define TIMER_START_FROM_ISR							(EVENTGROUP_TIMER + 5UL)					/*0xAF*/
411 #define TIMER_STOP_FROM_ISR								(EVENTGROUP_TIMER + 6UL)					/*0xB0*/
412 #define TIMER_CHANGE_PERIOD_FROM_ISR					(EVENTGROUP_TIMER + 7UL)					/*0xB1*/
413 
414 #define TIMER_CREATE_TRCFAILED							(EVENTGROUP_TIMER + 8UL)					/*0xB2*/
415 #define TIMER_DELETE_OBJ_TRCFAILED						(EVENTGROUP_TIMER + 9UL)					/*0xB3*/
416 #define TIMER_START_TRCFAILED							(EVENTGROUP_TIMER + 10UL)					/*0xB4*/
417 #define TIMER_STOP_TRCFAILED							(EVENTGROUP_TIMER + 11UL)					/*0xB5*/
418 #define TIMER_CHANGE_PERIOD_TRCFAILED					(EVENTGROUP_TIMER + 12UL)					/*0xB6*/
419 #define TIMER_START_FROM_ISR_TRCFAILED					(EVENTGROUP_TIMER + 13UL)					/*0xB7*/
420 #define TIMER_STOP_FROM_ISR_TRCFAILED					(EVENTGROUP_TIMER + 14UL)					/*0xB8*/
421 #define TIMER_CHANGE_PERIOD_FROM_ISR_TRCFAILED			(EVENTGROUP_TIMER + 15UL)					/*0xB9*/
422 
423 #define TIMER_PROCESS_START								(EVENTGROUP_TIMER + 16UL)					/*0xBA*/
424 #define TIMER_PROCESS_STOP								(EVENTGROUP_TIMER + 17UL)					/*0xBB*/
425 #define TIMER_PROCESS_CHANGE_PERIOD						(EVENTGROUP_TIMER + 18UL)					/*0xBC*/
426 #define TIMER_PROCESS_DELETE							(EVENTGROUP_TIMER + 19UL)					/*0xBD*/
427 #define TIMER_CALLBACK									(EVENTGROUP_TIMER + 20UL)					/*0xBE*/
428 
429 #define EVENTGROUP_EG									(EVENTGROUP_TIMER + 21UL)					/*0xBF*/
430 #define EVENT_GROUP_CREATE								(EVENTGROUP_EG + 0UL)						/*0xBF*/
431 #define EVENT_GROUP_CREATE_TRCFAILED					(EVENTGROUP_EG + 1UL)						/*0xC0*/
432 #define EVENT_GROUP_DELETE_OBJ							(EVENTGROUP_EG + 2UL)						/*0xC1*/
433 #define EVENT_GROUP_DELETE_OBJ_TRCFAILED				(EVENTGROUP_EG + 3UL)						/*0xC2*/
434 #define EVENT_GROUP_SET_BITS							(EVENTGROUP_EG + 4UL)						/*0xC3*/
435 #define EVENT_GROUP_SET_BITS_TRCFAILED					(EVENTGROUP_EG + 5UL)						/*0xC4*/
436 #define EVENT_GROUP_SET_BITS_FROM_ISR					(EVENTGROUP_EG + 6UL)						/*0xC5*/
437 #define EVENT_GROUP_SET_BITS_FROM_ISR_TRCFAILED			(EVENTGROUP_EG + 7UL)						/*0xC6*/
438 #define EVENT_GROUP_WAIT_BITS							(EVENTGROUP_EG + 8UL)						/*0xC7*/
439 #define EVENT_GROUP_WAIT_BITS_TRCBLOCK					(EVENTGROUP_EG + 9UL)						/*0xC8*/
440 #define EVENT_GROUP_WAIT_BITS_TRCFAILED					(EVENTGROUP_EG + 10UL)						/*0xC9*/
441 #define EVENT_GROUP_CLEAR_BITS							(EVENTGROUP_EG + 11UL)						/*0xCA*/
442 #define EVENT_GROUP_CLEAR_BITS_TRCFAILED				(EVENTGROUP_EG + 12UL)						/*0xCB*/
443 #define EVENT_GROUP_CLEAR_BITS_FROM_ISR					(EVENTGROUP_EG + 13UL)						/*0xCC*/
444 #define EVENT_GROUP_CLEAR_BITS_FROM_ISR_TRCFAILED		(EVENTGROUP_EG + 14UL)						/*0xCD*/
445 
446 #define TRACE_TASK_NOTIFY_GROUP							(EVENTGROUP_EG + 15UL)						/*0xCE*/
447 #define TRACE_TASK_NOTIFY_SEND_TRCSUCCESS				(TRACE_TASK_NOTIFY_GROUP + 0UL)				/*0xCE*/
448 #define TRACE_TASK_NOTIFY_SEND_TRCFAILED				(TRACE_TASK_NOTIFY_GROUP + 1UL)				/*0xCF*/
449 #define TRACE_TASK_NOTIFY_SEND_FROM_ISR_TRCSUCCESS		(TRACE_TASK_NOTIFY_GROUP + 2UL)				/*0xD0*/
450 #define TRACE_TASK_NOTIFY_SEND_FROM_ISR_TRCFAILED		(TRACE_TASK_NOTIFY_GROUP + 3UL)				/*0xD1*/
451 #define TRACE_TASK_NOTIFY_WAIT_TRCSUCCESS				(TRACE_TASK_NOTIFY_GROUP + 4UL)				/*0xD2*/
452 #define TRACE_TASK_NOTIFY_WAIT_TRCBLOCK					(TRACE_TASK_NOTIFY_GROUP + 5UL)				/*0xD3*/
453 #define TRACE_TASK_NOTIFY_WAIT_TRCFAILED				(TRACE_TASK_NOTIFY_GROUP + 6UL)				/*0xD4*/
454 
455 #define EVENTGROUP_QUEUE_EXTRAS							(TRACE_TASK_NOTIFY_GROUP + 7UL)				/*0xD5*/
456 #define TRACE_QUEUE_PEEK_TRCBLOCK						(EVENTGROUP_QUEUE_EXTRAS + 0UL)				/*0xD5*/
457 #define TRACE_QUEUE_PEEK_TRCFAILED						(EVENTGROUP_QUEUE_EXTRAS + 1UL)				/*0xD6*/
458 #define TRACE_QUEUE_SEND_TO_FRONT_TRCSUCCESS			(EVENTGROUP_QUEUE_EXTRAS + 2UL)				/*0xD7*/
459 #define TRACE_QUEUE_SEND_TO_FRONT_TRCBLOCK				(EVENTGROUP_QUEUE_EXTRAS + 3UL)				/*0xD8*/
460 #define TRACE_QUEUE_SEND_TO_FRONT_TRCFAILED				(EVENTGROUP_QUEUE_EXTRAS + 4UL)				/*0xD9*/
461 #define TRACE_QUEUE_SEND_TO_FRONT_FROM_ISR_TRCSUCCESS	(EVENTGROUP_QUEUE_EXTRAS + 5UL)				/*0xDA*/
462 #define TRACE_QUEUE_SEND_TO_FRONT_FROM_ISR_TRCFAILED	(EVENTGROUP_QUEUE_EXTRAS + 6UL)				/*0xDB*/
463 
464  /* LAST EVENT (0xDB) */
465 
466  /****************************
467  * MACROS TO GET TRACE CLASS *
468  ****************************/
469 #define TRACE_GET_TRACE_CLASS_FROM_TASK_CLASS(kernelClass) (TRACE_CLASS_TASK)
470 #define TRACE_GET_TRACE_CLASS_FROM_TASK_OBJECT(pxObject) (TRACE_CLASS_TASK)
471 
472 #define TRACE_GET_TRACE_CLASS_FROM_QUEUE_CLASS(kernelClass) (TraceQueueClassTable[kernelClass])
473 #define TRACE_GET_TRACE_CLASS_FROM_QUEUE_OBJECT(pxObject) (TRACE_GET_TRACE_CLASS_FROM_QUEUE_CLASS(prvTraceGetQueueType(pxObject)))
474 
475 #define TRACE_GET_TRACE_CLASS_FROM_TIMER_CLASS(kernelClass) (TRACE_CLASS_TIMER)
476 #define TRACE_GET_TRACE_CLASS_FROM_TIMER_OBJECT(pxObject) (TRACE_CLASS_TIMER)
477 
478 #define TRACE_GET_TRACE_CLASS_FROM_EVENTGROUP_CLASS(kernelClass) (TRACE_CLASS_EVENTGROUP)
479 #define TRACE_GET_TRACE_CLASS_FROM_EVENTGROUP_OBJECT(pxObject) (TRACE_CLASS_EVENTGROUP)
480 
481 /* Generic versions */
482 #define TRACE_GET_CLASS_TRACE_CLASS(CLASS, kernelClass) TRACE_GET_TRACE_CLASS_FROM_##CLASS##_CLASS(kernelClass)
483 #define TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject) TRACE_GET_TRACE_CLASS_FROM_##CLASS##_OBJECT(pxObject)
484 
485 /******************************
486 * MACROS TO GET OBJECT NUMBER *
487 ******************************/
488 #define TRACE_GET_TASK_NUMBER(pxTCB) ((traceHandle)TRACE_GET_LOW16(prvGetTaskNumber(pxTCB)))
489 #define TRACE_SET_TASK_NUMBER(pxTCB) ((xTCB*)pxTCB)->uxTaskNumber = TRACE_SET_LOW16(((xTCB*)pxTCB)->uxTaskNumber, prvTraceGetObjectHandle(TRACE_CLASS_TASK));
490 
491 #define TRACE_GET_QUEUE_NUMBER(q) ((traceHandle)TRACE_GET_LOW16(prvGetQueueNumber(q)))
492 #define TRACE_SET_QUEUE_NUMBER(q) ((xQUEUE*)q)->uxQueueNumber = TRACE_SET_LOW16(((xQUEUE*)q)->uxQueueNumber, prvTraceGetObjectHandle(TRACE_CLASS_QUEUE));
493 
494 #define TRACE_GET_TIMER_NUMBER(tmr) ((traceHandle)TRACE_GET_LOW16(prvGetTimerNumber(tmr)))
495 #define TRACE_SET_TIMER_NUMBER(tmr) ((timerControlBlockType*)tmr)->uxTimerNumber = TRACE_SET_LOW16(((timerControlBlockType*)tmr)->uxTimerNumber, prvTraceGetObjectHandle(TRACE_CLASS_TIMER));
496 
497 #define TRACE_GET_EVENTGROUP_NUMBER(eg) ((traceHandle)TRACE_GET_LOW16(prvGetEventGroupNumber(eg)))
498 #define TRACE_SET_EVENTGROUP_NUMBER(eg) ((eventGroupType*)eg)->uxEventGroupNumber = TRACE_SET_LOW16(((eventGroupType*)eg)->uxEventGroupNumber, prvTraceGetObjectHandle(TRACE_CLASS_EVENTGROUP));
499 
500 /* Generic versions */
501 #define TRACE_GET_OBJECT_NUMBER(CLASS, pxObject) TRACE_GET_##CLASS##_NUMBER(pxObject)
502 #define TRACE_SET_OBJECT_NUMBER(CLASS, pxObject) TRACE_SET_##CLASS##_NUMBER(pxObject)
503 
504 /******************************
505 * MACROS TO GET EVENT CODES   *
506 ******************************/
507 #define TRACE_GET_TASK_CLASS_EVENT_CODE(SERVICE, RESULT, kernelClass) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_CLASS_TRACE_CLASS(TASK, kernelClass))
508 #define TRACE_GET_QUEUE_CLASS_EVENT_CODE(SERVICE, RESULT, kernelClass) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_CLASS_TRACE_CLASS(QUEUE, kernelClass))
509 #define TRACE_GET_TIMER_CLASS_EVENT_CODE(SERVICE, RESULT, kernelClass) -- THIS IS NOT USED --
510 #define TRACE_GET_EVENTGROUP_CLASS_EVENT_CODE(SERVICE, RESULT, kernelClass) -- THIS IS NOT USED --
511 
512 #define TRACE_GET_TASK_OBJECT_EVENT_CODE(SERVICE, RESULT, pxTCB) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_OBJECT_TRACE_CLASS(TASK, pxTCB))
513 #define TRACE_GET_QUEUE_OBJECT_EVENT_CODE(SERVICE, RESULT, pxObject) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_OBJECT_TRACE_CLASS(QUEUE, pxObject))
514 #define TRACE_GET_TIMER_OBJECT_EVENT_CODE(SERVICE, RESULT, UNUSED) -- THIS IS NOT USED --
515 #define TRACE_GET_EVENTGROUP_OBJECT_EVENT_CODE(SERVICE, RESULT, UNUSED) -- THIS IS NOT USED --
516 
517 /* Generic versions */
518 #define TRACE_GET_CLASS_EVENT_CODE(SERVICE, RESULT, CLASS, kernelClass) TRACE_GET_##CLASS##_CLASS_EVENT_CODE(SERVICE, RESULT, kernelClass)
519 #define TRACE_GET_OBJECT_EVENT_CODE(SERVICE, RESULT, CLASS, pxObject) TRACE_GET_##CLASS##_OBJECT_EVENT_CODE(SERVICE, RESULT, pxObject)
520 
521 /******************************
522 * SPECIAL MACROS FOR TASKS    *
523 ******************************/
524 #define TRACE_GET_TASK_PRIORITY(pxTCB) ((uint8_t)pxTCB->uxPriority)
525 #define TRACE_GET_TASK_NAME(pxTCB) ((char*)pxTCB->pcNameOfTask)
526 
527 /*** The trace macros for snapshot mode **************************************/
528 
529 
530 /* A macro that will update the tick count when returning from tickless idle */
531 #undef traceINCREASE_TICK_COUNT
532 /* Note: This can handle time adjustments of max 2^32 ticks, i.e., 35 seconds at 120 MHz. Thus, tick-less idle periods longer than 2^32 ticks will appear "compressed" on the time line.*/
533 #define traceINCREASE_TICK_COUNT( xCount ) { TRC_DWT_CYCLES_ADDED += (xCount * (TRACE_CPU_CLOCK_HZ / TRACE_TICK_RATE_HZ)); }
534 
535 /* Called for each task that becomes ready */
536 #if (TRC_CFG_INCLUDE_READY_EVENTS == 1)
537 #undef traceMOVED_TASK_TO_READY_STATE
538 #define traceMOVED_TASK_TO_READY_STATE( pxTCB ) \
539 	trcKERNEL_HOOKS_MOVED_TASK_TO_READY_STATE(pxTCB);
540 #else /* (TRC_CFG_INCLUDE_READY_EVENTS == 1) */
541 #define traceMOVED_TASK_TO_READY_STATE( pxTCB )
542 #endif /* (TRC_CFG_INCLUDE_READY_EVENTS == 1) */
543 
544 /* Called on each OS tick. Will call uiPortGetTimestamp to make sure it is called at least once every OS tick. */
545 #undef traceTASK_INCREMENT_TICK
546 #define traceTASK_INCREMENT_TICK( xTickCount ) \
547 	if (uxSchedulerSuspended == ( portUnsignedBaseType ) pdTRUE || uxMissedTicks == 0) { trcKERNEL_HOOKS_INCREMENT_TICK(); } \
548 	if (uxSchedulerSuspended == ( portUnsignedBaseType ) pdFALSE) { trcKERNEL_HOOKS_NEW_TIME(DIV_NEW_TIME, xTickCount + 1); }
549 
550 /* Called on each task-switch */
551 #undef traceTASK_SWITCHED_IN
552 #define traceTASK_SWITCHED_IN() \
553 	trcKERNEL_HOOKS_TASK_SWITCH(TRACE_GET_CURRENT_TASK());
554 
555 /* Called on vTaskCreate */
556 #undef traceTASK_CREATE
557 #define traceTASK_CREATE(pxNewTCB) \
558 	trcKERNEL_HOOKS_TASK_CREATE(TRACE_GET_OBJECT_EVENT_CODE(CREATE_OBJ, TRCSUCCESS, TASK, pxNewTCB), TASK, pxNewTCB);
559 
560 /* Called in vTaskCreate, if it fails (typically if the stack can not be allocated) */
561 #undef traceTASK_CREATE_FAILED
562 #define traceTASK_CREATE_FAILED() \
563 	trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT(TRACE_GET_CLASS_EVENT_CODE(CREATE_OBJ, TRCFAILED, TASK, NOT_USED), TRACE_GET_CLASS_TRACE_CLASS(TASK, NOT_USED));
564 
565 /* Called on vTaskDelete */
566 #undef traceTASK_DELETE
567 #define traceTASK_DELETE( pxTaskToDelete ) \
568 	{ TRACE_ALLOC_CRITICAL_SECTION(); \
569 	TRACE_ENTER_CRITICAL_SECTION(); \
570 	trcKERNEL_HOOKS_TASK_DELETE(TRACE_GET_OBJECT_EVENT_CODE(DELETE_OBJ, TRCSUCCESS, TASK, pxTaskToDelete), TRACE_GET_OBJECT_EVENT_CODE(OBJCLOSE_NAME, TRCSUCCESS, TASK, pxTaskToDelete), TRACE_GET_OBJECT_EVENT_CODE(OBJCLOSE_PROP, TRCSUCCESS, TASK, pxTaskToDelete), pxTaskToDelete); \
571 	TRACE_EXIT_CRITICAL_SECTION(); }
572 
573 #if (TRC_CFG_SCHEDULING_ONLY == 0)
574 
575 #if defined(configUSE_TICKLESS_IDLE)
576 #if (configUSE_TICKLESS_IDLE != 0)
577 
578 #undef traceLOW_POWER_IDLE_BEGIN
579 #define traceLOW_POWER_IDLE_BEGIN() \
580 	{ \
581 		extern uint32_t trace_disable_timestamp; \
582 		prvTraceStoreLowPower(0); \
583 		trace_disable_timestamp = 1; \
584 	}
585 
586 #undef traceLOW_POWER_IDLE_END
587 #define traceLOW_POWER_IDLE_END() \
588 	{ \
589 		extern uint32_t trace_disable_timestamp; \
590 		trace_disable_timestamp = 0; \
591 		prvTraceStoreLowPower(1); \
592 	}
593 
594 #endif /* (configUSE_TICKLESS_IDLE != 0) */
595 #endif /* defined(configUSE_TICKLESS_IDLE)  */
596 
597 /* Called on vTaskSuspend */
598 #undef traceTASK_SUSPEND
599 #define traceTASK_SUSPEND( pxTaskToSuspend ) \
600 	trcKERNEL_HOOKS_TASK_SUSPEND(TASK_SUSPEND, pxTaskToSuspend);
601 
602 /* Called from special case with timer only */
603 #undef traceTASK_DELAY_SUSPEND
604 #define traceTASK_DELAY_SUSPEND( pxTaskToSuspend ) \
605 	trcKERNEL_HOOKS_TASK_SUSPEND(TASK_SUSPEND, pxTaskToSuspend); \
606 	trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
607 
608 /* Called on vTaskDelay - note the use of variable xTicksToDelay */
609 #undef traceTASK_DELAY
610 #define traceTASK_DELAY() \
611 	trcKERNEL_HOOKS_TASK_DELAY(TASK_DELAY, pxCurrentTCB, xTicksToDelay); \
612 	trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
613 
614 /* Called on vTaskDelayUntil - note the use of variable xTimeToWake */
615 #undef traceTASK_DELAY_UNTIL
616 #define traceTASK_DELAY_UNTIL(xTimeToWake) \
617 	trcKERNEL_HOOKS_TASK_DELAY(TASK_DELAY_UNTIL, pxCurrentTCB, xTimeToWake); \
618 	trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
619 
620 /* Called in xQueueCreate, and thereby for all other object based on queues, such as semaphores. */
621 #undef traceQUEUE_CREATE
622 #define traceQUEUE_CREATE( pxNewQueue ) \
623 	trcKERNEL_HOOKS_OBJECT_CREATE(TRACE_GET_OBJECT_EVENT_CODE(CREATE_OBJ, TRCSUCCESS, QUEUE, pxNewQueue), QUEUE, pxNewQueue);
624 
625 /* Called in xQueueCreate, if the queue creation fails */
626 #undef traceQUEUE_CREATE_FAILED
627 #define traceQUEUE_CREATE_FAILED( pcBuffer, queueType, queueLength ) \
628 	switch( queueType ) \
629 	{ \
630 		case queueQUEUE_IS_QUEUE: \
631 			trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT(TRACE_GET_CLASS_EVENT_CODE(CREATE_OBJ, TRCFAILED, QUEUE, traceQueueType), TRACE_GET_CLASS_TRACE_CLASS(QUEUE, traceQueueType)); \
632 			break; \
633 		case queueQUEUE_IS_SEMAPHORE: \
634 			trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT(TRACE_GET_CLASS_EVENT_CODE(CREATE_OBJ, TRCFAILED, QUEUE, traceSemaphoreType), TRACE_GET_CLASS_TRACE_CLASS(QUEUE, traceSemaphoreType)); \
635 			break; \
636 		case queueQUEUE_IS_MUTEX: \
637 			trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT(TRACE_GET_CLASS_EVENT_CODE(CREATE_OBJ, TRCFAILED, QUEUE, traceMutexType), TRACE_GET_CLASS_TRACE_CLASS(QUEUE, traceMutexType)); \
638 			break; \
639 	}
640 
641 /***************************************************/
642 
643 /* Called when a message is sent to a queue */
644 #undef traceQUEUE_SEND
645 #define traceQUEUE_SEND( pxQueue ) \
646 	trcKERNEL_HOOKS_KERNEL_SERVICE(xPosition == queueSEND_TO_BACK ? TRACE_GET_OBJECT_EVENT_CODE(SEND, TRCSUCCESS, QUEUE, pxQueue) : TRACE_QUEUE_SEND_TO_FRONT_TRCSUCCESS, QUEUE, pxQueue); \
647 	trcKERNEL_HOOKS_SET_OBJECT_STATE(QUEUE, pxQueue, (uint8_t)(pxQueue->uxItemsWaiting + 1));
648 
649 /* Called when a message failed to be sent to a queue (timeout) */
650 #undef traceQUEUE_SEND_FAILED
651 #define traceQUEUE_SEND_FAILED( pxQueue ) \
652 	if (trcIS_QUEUE_VALID(pxQueue)) \
653 	{ \
654 		trcKERNEL_HOOKS_KERNEL_SERVICE(xPosition == queueSEND_TO_BACK ? TRACE_GET_OBJECT_EVENT_CODE(SEND, TRCFAILED, QUEUE, pxQueue) : TRACE_QUEUE_SEND_TO_FRONT_TRCFAILED, QUEUE, pxQueue); \
655 	} \
656 	else \
657 	{ \
658 		trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT(xPosition == queueSEND_TO_BACK ? TRACE_GET_CLASS_EVENT_CODE(SEND, TRCFAILED, QUEUE, traceQueueType) : TRACE_QUEUE_SEND_TO_FRONT_TRCFAILED, TRACE_GET_CLASS_TRACE_CLASS(QUEUE, traceQueueType)); \
659 	}
660 
661 /* Called when the task is blocked due to a send operation on a full queue */
662 #undef traceBLOCKING_ON_QUEUE_SEND
663 #define traceBLOCKING_ON_QUEUE_SEND( pxQueue ) \
664 	trcKERNEL_HOOKS_KERNEL_SERVICE(xPosition == queueSEND_TO_BACK ? TRACE_GET_OBJECT_EVENT_CODE(SEND, TRCBLOCK, QUEUE, pxQueue) : TRACE_QUEUE_SEND_TO_FRONT_TRCBLOCK, QUEUE, pxQueue);
665 
666 /* Called when a message is received from a queue */
667 #undef traceQUEUE_RECEIVE
668 #define traceQUEUE_RECEIVE( pxQueue ) \
669 	trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE, TRCSUCCESS, QUEUE, pxQueue), QUEUE, pxQueue); \
670 	trcKERNEL_HOOKS_SET_OBJECT_STATE(QUEUE, pxQueue, (uint8_t)(pxQueue->uxItemsWaiting - 1));
671 
672 /* Called when a receive operation on a queue fails (timeout) */
673 #undef traceQUEUE_RECEIVE_FAILED
674 #define traceQUEUE_RECEIVE_FAILED( pxQueue ) \
675 	if (trcIS_QUEUE_VALID(pxQueue)) \
676 	{ \
677 		trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE, TRCFAILED, QUEUE, pxQueue), QUEUE, pxQueue); \
678 	} \
679 	else \
680 	{ \
681 		trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT(TRACE_GET_CLASS_EVENT_CODE(RECEIVE, TRCFAILED, QUEUE, traceQueueType), TRACE_GET_CLASS_TRACE_CLASS(QUEUE, traceQueueType)); \
682 	}
683 
684 /* Called when the task is blocked due to a receive operation on an empty queue */
685 #undef traceBLOCKING_ON_QUEUE_RECEIVE
686 #define traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ) \
687 	trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE, TRCBLOCK, QUEUE, pxQueue), QUEUE, pxQueue); \
688 	if (pxQueue->uxQueueType != queueQUEUE_IS_MUTEX) \
689 	{ \
690 		trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED(); \
691 	}
692 
693 /* Called on xQueuePeek */
694 #undef traceQUEUE_PEEK
695 #define traceQUEUE_PEEK( pxQueue ) \
696 	trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(PEEK, TRCSUCCESS, QUEUE, pxQueue), QUEUE, pxQueue);
697 
698 /* Called on xQueuePeek */
699 #undef traceBLOCKING_ON_QUEUE_PEEK
700 #define traceBLOCKING_ON_QUEUE_PEEK( pxQueue ) \
701 	trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_QUEUE_PEEK_TRCBLOCK, QUEUE, pxQueue); \
702 	trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
703 
704 /* Called on xQueuePeek */
705 #undef traceQUEUE_PEEK_FAILED
706 #define traceQUEUE_PEEK_FAILED( pxQueue ) \
707 	if (trcIS_QUEUE_VALID(pxQueue)) \
708 	{ \
709 		trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_QUEUE_PEEK_TRCFAILED, QUEUE, pxQueue); \
710 	} \
711 	else \
712 	{ \
713 		trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT(TRACE_QUEUE_PEEK_TRCFAILED, TRACE_GET_CLASS_TRACE_CLASS(QUEUE, traceQueueType)); \
714 	}
715 
716 /* Called when a message is sent from interrupt context, e.g., using xQueueSendFromISR */
717 #undef traceQUEUE_SEND_FROM_ISR
718 #define traceQUEUE_SEND_FROM_ISR( pxQueue ) \
719 	trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(xPosition == queueSEND_TO_BACK ? TRACE_GET_OBJECT_EVENT_CODE(SEND_FROM_ISR, TRCSUCCESS, QUEUE, pxQueue) : TRACE_QUEUE_SEND_TO_FRONT_FROM_ISR_TRCSUCCESS, QUEUE, pxQueue); \
720 	trcKERNEL_HOOKS_SET_OBJECT_STATE(QUEUE, pxQueue, (uint8_t)(pxQueue->uxItemsWaiting + 1));
721 
722 /* Called when a message send from interrupt context fails (since the queue was full) */
723 #undef traceQUEUE_SEND_FROM_ISR_FAILED
724 #define traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ) \
725 	if (trcIS_QUEUE_VALID(pxQueue)) \
726 	{ \
727 		trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(xPosition == queueSEND_TO_BACK ? TRACE_GET_OBJECT_EVENT_CODE(SEND_FROM_ISR, TRCFAILED, QUEUE, pxQueue) : TRACE_QUEUE_SEND_TO_FRONT_FROM_ISR_TRCFAILED, QUEUE, pxQueue); \
728 	} \
729 	else \
730 	{ \
731 		trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT_FROM_ISR(xPosition == queueSEND_TO_BACK ? TRACE_GET_CLASS_EVENT_CODE(SEND_FROM_ISR, TRCFAILED, QUEUE, traceQueueType) : TRACE_QUEUE_SEND_TO_FRONT_FROM_ISR_TRCFAILED, TRACE_GET_CLASS_TRACE_CLASS(QUEUE, traceQueueType)); \
732 	}
733 
734 /* Called when a message is received in interrupt context, e.g., using xQueueReceiveFromISR */
735 #undef traceQUEUE_RECEIVE_FROM_ISR
736 #define traceQUEUE_RECEIVE_FROM_ISR( pxQueue ) \
737 	trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE_FROM_ISR, TRCSUCCESS, QUEUE, pxQueue), QUEUE, pxQueue); \
738 	trcKERNEL_HOOKS_SET_OBJECT_STATE(QUEUE, pxQueue, (uint8_t)(pxQueue->uxItemsWaiting - 1));
739 
740 /* Called when a message receive from interrupt context fails (since the queue was empty) */
741 #undef traceQUEUE_RECEIVE_FROM_ISR_FAILED
742 #define traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ) \
743 	if (trcIS_QUEUE_VALID(pxQueue)) \
744 	{ \
745 		trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE_FROM_ISR, TRCFAILED, QUEUE, pxQueue), QUEUE, pxQueue); \
746 	} \
747 	else \
748 	{ \
749 		trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT_FROM_ISR(TRACE_GET_CLASS_EVENT_CODE(RECEIVE_FROM_ISR, TRCFAILED, QUEUE, traceQueueType), TRACE_GET_CLASS_TRACE_CLASS(QUEUE, traceQueueType)); \
750 	}
751 
752 /* Called in vTaskPrioritySet */
753 #undef traceTASK_PRIORITY_SET
754 #define traceTASK_PRIORITY_SET( pxTask, uxNewPriority ) \
755 	trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_SET, pxTask, uxNewPriority);
756 
757 /* Called in vTaskPriorityInherit, which is called by Mutex operations */
758 #undef traceTASK_PRIORITY_INHERIT
759 #define traceTASK_PRIORITY_INHERIT( pxTask, uxNewPriority ) \
760 	trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_INHERIT, pxTask, uxNewPriority);
761 
762 /* Called in vTaskPriorityDisinherit, which is called by Mutex operations */
763 #undef traceTASK_PRIORITY_DISINHERIT
764 #define traceTASK_PRIORITY_DISINHERIT( pxTask, uxNewPriority ) \
765 	trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_DISINHERIT, pxTask, uxNewPriority);
766 
767 /* Called in vTaskResume */
768 #undef traceTASK_RESUME
769 #define traceTASK_RESUME( pxTaskToResume ) \
770 	trcKERNEL_HOOKS_TASK_RESUME(TASK_RESUME, pxTaskToResume);
771 
772 /* Called in vTaskResumeFromISR */
773 #undef traceTASK_RESUME_FROM_ISR
774 #define traceTASK_RESUME_FROM_ISR( pxTaskToResume ) \
775 	trcKERNEL_HOOKS_TASK_RESUME(TASK_RESUME_FROM_ISR, pxTaskToResume);
776 
777 /* Called in timer.c - xTimerCreate */
778 #undef traceTIMER_CREATE
779 #define traceTIMER_CREATE(tmr) \
780 	if (pdPASS == xReturn) \
781 	{ \
782 		trcKERNEL_HOOKS_OBJECT_CREATE(TIMER_CREATE, TIMER, tmr); \
783 		prvTraceSetObjectName(TRACE_CLASS_TIMER, TRACE_GET_TIMER_NUMBER(tmr), (const char*)tmr->pcTimerName); \
784 	} \
785 	else \
786 	{ \
787 		trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT(TIMER_CREATE_TRCFAILED, TRACE_GET_CLASS_TRACE_CLASS(TIMER, NOT_USED)); \
788 	}
789 
790 #undef traceTIMER_COMMAND_SEND
791 #define traceTIMER_COMMAND_SEND(tmr, xCommandID, xOptionalValue, xReturn) \
792 	if (trcIS_TIMER_VALID(tmr)) \
793 	{ \
794 		switch(xCommandID) \
795 		{ \
796 			case timerCOMMAND_START: \
797 				trcKERNEL_HOOKS_KERNEL_SERVICE((xReturn == pdPASS) ? TIMER_START : TIMER_START_TRCFAILED, TIMER, tmr); \
798 				break; \
799 			case timerCOMMAND_STOP: \
800 				trcKERNEL_HOOKS_KERNEL_SERVICE((xReturn == pdPASS) ? TIMER_STOP : TIMER_STOP_TRCFAILED, TIMER, tmr); \
801 				break; \
802 			case timerCOMMAND_CHANGE_PERIOD: \
803 				trcKERNEL_HOOKS_KERNEL_SERVICE((xReturn == pdPASS) ? TIMER_CHANGE_PERIOD : TIMER_CHANGE_PERIOD_TRCFAILED, TIMER, tmr); \
804 				break; \
805 			case timerCOMMAND_DELETE: \
806 				trcKERNEL_HOOKS_KERNEL_SERVICE((xReturn == pdPASS) ? TIMER_DELETE_OBJ : TIMER_DELETE_OBJ_TRCFAILED, TIMER, tmr); \
807 				break; \
808 		} \
809 	} \
810 	else \
811 	{ \
812 		switch(xCommandID) \
813 		{ \
814 			case timerCOMMAND_START: \
815 				trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT(TIMER_START_TRCFAILED, TRACE_GET_CLASS_TRACE_CLASS(TIMER, NOT_USED)); \
816 				break; \
817 			case timerCOMMAND_STOP: \
818 				trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT(TIMER_STOP_TRCFAILED, TRACE_GET_CLASS_TRACE_CLASS(TIMER, NOT_USED)); \
819 				break; \
820 			case timerCOMMAND_CHANGE_PERIOD: \
821 				trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT(TIMER_CHANGE_PERIOD_TRCFAILED, TRACE_GET_CLASS_TRACE_CLASS(TIMER, NOT_USED)); \
822 				break; \
823 			case timerCOMMAND_DELETE: \
824 				trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT(TIMER_DELETE_OBJ_TRCFAILED, TRACE_GET_CLASS_TRACE_CLASS(TIMER, NOT_USED)); \
825 				break; \
826 		} \
827 	}
828 
829 #undef traceTIMER_COMMAND_SEND_FROM_ISR
830 #define traceTIMER_COMMAND_SEND_FROM_ISR(tmr, xCommandID, xOptionalValue, xReturn) \
831 	if (trcIS_TIMER_VALID(tmr)) \
832 	{ \
833 		switch (xCommandID) \
834 		{ \
835 		case timerCOMMAND_START: \
836 			trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR((xReturn == pdPASS) ? TIMER_START_FROM_ISR : TIMER_START_FROM_ISR_TRCFAILED, TIMER, tmr); \
837 			break; \
838 		case timerCOMMAND_STOP: \
839 			trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR((xReturn == pdPASS) ? TIMER_STOP_FROM_ISR : TIMER_STOP_FROM_ISR_TRCFAILED, TIMER, tmr); \
840 			break; \
841 		case timerCOMMAND_CHANGE_PERIOD: \
842 			trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR((xReturn == pdPASS) ? TIMER_CHANGE_PERIOD_FROM_ISR : TIMER_CHANGE_PERIOD_FROM_ISR_TRCFAILED, TIMER, tmr); \
843 			break; \
844 		} \
845 	} \
846 	else \
847 	{ \
848 		switch(xCommandID) \
849 		{ \
850 			case timerCOMMAND_START: \
851 				trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT_FROM_ISR(TIMER_START_FROM_ISR_TRCFAILED, TRACE_GET_CLASS_TRACE_CLASS(TIMER, NOT_USED)); \
852 				break; \
853 			case timerCOMMAND_STOP: \
854 				trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT_FROM_ISR(TIMER_STOP_FROM_ISR_TRCFAILED, TRACE_GET_CLASS_TRACE_CLASS(TIMER, NOT_USED)); \
855 				break; \
856 			case timerCOMMAND_CHANGE_PERIOD: \
857 				trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT_FROM_ISR(TIMER_CHANGE_PERIOD_FROM_ISR_TRCFAILED, TRACE_GET_CLASS_TRACE_CLASS(TIMER, NOT_USED)); \
858 				break; \
859 		} \
860 	}
861 
862 /* We currently don't do anything with these timer command process events
863 case timerCOMMAND_START: \
864 trcKERNEL_HOOKS_KERNEL_SERVICE(TIMER_PROCESS_START, TIMER, tmr); \
865 break; \
866 case timerCOMMAND_STOP: \
867 trcKERNEL_HOOKS_KERNEL_SERVICE(TIMER_PROCESS_STOP, TIMER, tmr); \
868 break; \
869 case timerCOMMAND_CHANGE_PERIOD: \
870 trcKERNEL_HOOKS_KERNEL_SERVICE(TIMER_PROCESS_CHANGE_PERIOD, TIMER, tmr); \
871 break;*/
872 #undef traceTIMER_COMMAND_PROCESS
873 #define traceTIMER_COMMAND_PROCESS( tmr, xCommandID, xOptionalValue ) \
874 	switch(xCommandID) \
875 	{ \
876 		case timerCOMMAND_DELETE: \
877 			{ \
878 				TRACE_ALLOC_CRITICAL_SECTION(); \
879 				TRACE_ENTER_CRITICAL_SECTION(); \
880 				trcKERNEL_HOOKS_OBJECT_DELETE(TIMER_PROCESS_DELETE, EVENTGROUP_OBJCLOSE_NAME_TRCSUCCESS + TRACE_GET_OBJECT_TRACE_CLASS(TIMER, tmr), EVENTGROUP_OBJCLOSE_PROP_TRCSUCCESS + TRACE_GET_OBJECT_TRACE_CLASS(TIMER, tmr), TIMER, tmr); \
881 				TRACE_EXIT_CRITICAL_SECTION(); \
882 			} \
883 			break; \
884 	}
885 
886 #undef traceTIMER_CALLBACK
887 #define traceTIMER_CALLBACK( tmr, callbackFunction ) \
888 	/*trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(TIMER_CALLBACK, TIMER, tmr, (uint32_t)callbackFunction);*/
889 
890 #undef traceEVENT_GROUP_CREATE
891 #define traceEVENT_GROUP_CREATE(eg) \
892 	trcKERNEL_HOOKS_OBJECT_CREATE(EVENT_GROUP_CREATE, EVENTGROUP, eg);
893 
894 #undef traceEVENT_GROUP_CREATE_FAILED
895 #define traceEVENT_GROUP_CREATE_FAILED() \
896 	trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT(EVENT_GROUP_CREATE_TRCFAILED, TRACE_GET_CLASS_TRACE_CLASS(EVENTGROUP, NOT_USED));
897 
898 #undef traceEVENT_GROUP_DELETE
899 #define traceEVENT_GROUP_DELETE(eg) \
900 	{ \
901 		TRACE_ALLOC_CRITICAL_SECTION(); \
902 		TRACE_ENTER_CRITICAL_SECTION(); \
903 		trcKERNEL_HOOKS_OBJECT_DELETE(EVENT_GROUP_DELETE_OBJ, EVENTGROUP_OBJCLOSE_NAME_TRCSUCCESS + TRACE_GET_OBJECT_TRACE_CLASS(EVENTGROUP, eg), EVENTGROUP_OBJCLOSE_PROP_TRCSUCCESS + TRACE_GET_OBJECT_TRACE_CLASS(EVENTGROUP, eg), EVENTGROUP, eg); \
904 		TRACE_EXIT_CRITICAL_SECTION(); \
905 	}
906 
907 #undef traceEVENT_GROUP_DELETE_FAILED
908 #define traceEVENT_GROUP_DELETE_FAILED(eg) \
909 	trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT(EVENT_GROUP_DELETE_OBJ_TRCFAILED, TRACE_GET_CLASS_TRACE_CLASS(EVENTGROUP, NOT_USED));
910 
911 #undef traceEVENT_GROUP_SET_BITS
912 #define traceEVENT_GROUP_SET_BITS(eg, bitsToSet) \
913 	trcKERNEL_HOOKS_KERNEL_SERVICE(EVENT_GROUP_SET_BITS, EVENTGROUP, eg);
914 
915 #undef traceEVENT_GROUP_SET_BITS_FAILED
916 #define traceEVENT_GROUP_SET_BITS_FAILED(eg, bitsToSet) \
917 	if (trcIS_EVENTGROUP_VALID(eg)) \
918 	{ \
919 		trcKERNEL_HOOKS_KERNEL_SERVICE(EVENT_GROUP_SET_BITS_TRCFAILED, EVENTGROUP, eg); \
920 	} \
921 	else \
922 	{ \
923 		trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT(EVENT_GROUP_SET_BITS_TRCFAILED, TRACE_GET_CLASS_TRACE_CLASS(EVENTGROUP, NOT_USED)); \
924 	}
925 
926 #undef traceEVENT_GROUP_SET_BITS_FROM_ISR
927 #define traceEVENT_GROUP_SET_BITS_FROM_ISR(eg, bitsToSet) \
928 	trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(EVENT_GROUP_SET_BITS_FROM_ISR, EVENTGROUP, eg);
929 
930 #undef traceEVENT_GROUP_SET_BITS_FROM_ISR_FAILED
931 #define traceEVENT_GROUP_SET_BITS_FROM_ISR_FAILED(eg, bitsToSet) \
932 	if (trcIS_EVENTGROUP_VALID(eg)) \
933 	{ \
934 		trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(EVENT_GROUP_SET_BITS_FROM_ISR_TRCFAILED, EVENTGROUP, eg); \
935 	} \
936 	else \
937 	{ \
938 		trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT_FROM_ISR(EVENT_GROUP_SET_BITS_FROM_ISR_TRCFAILED, TRACE_GET_CLASS_TRACE_CLASS(EVENTGROUP, NOT_USED)); \
939 	}
940 
941 #undef traceEVENT_GROUP_CLEAR_BITS
942 #define traceEVENT_GROUP_CLEAR_BITS(eg, bitsToClear) \
943 	trcKERNEL_HOOKS_KERNEL_SERVICE(EVENT_GROUP_CLEAR_BITS, EVENTGROUP, eg);
944 
945 #undef traceEVENT_GROUP_CLEAR_BITS_FAILED
946 #define traceEVENT_GROUP_CLEAR_BITS_FAILED(eg, bitsToClear) \
947 	if (trcIS_EVENTGROUP_VALID(eg)) \
948 	{ \
949 		trcKERNEL_HOOKS_KERNEL_SERVICE(EVENT_GROUP_CLEAR_BITS_TRCFAILED, EVENTGROUP, eg); \
950 	} \
951 	else \
952 	{ \
953 		trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT(EVENT_GROUP_CLEAR_BITS_TRCFAILED, TRACE_GET_CLASS_TRACE_CLASS(EVENTGROUP, NOT_USED)); \
954 	}
955 
956 #undef traceEVENT_GROUP_CLEAR_BITS_FROM_ISR
957 #define traceEVENT_GROUP_CLEAR_BITS_FROM_ISR(eg, bitsToClear) \
958 	trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(EVENT_GROUP_CLEAR_BITS_FROM_ISR, EVENTGROUP, eg);
959 
960 #undef traceEVENT_GROUP_CLEAR_BITS_FROM_ISR_FAILED
961 #define traceEVENT_GROUP_CLEAR_BITS_FROM_ISR_FAILED(eg, bitsToClear) \
962 	if (trcIS_EVENTGROUP_VALID(eg)) \
963 	{ \
964 		trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(EVENT_GROUP_CLEAR_BITS_FROM_ISR_TRCFAILED, EVENTGROUP, eg); \
965 	} \
966 	else \
967 	{ \
968 		trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT_FROM_ISR(EVENT_GROUP_CLEAR_BITS_FROM_ISR_TRCFAILED, TRACE_GET_CLASS_TRACE_CLASS(EVENTGROUP, NOT_USED)); \
969 	}
970 
971 #undef traceEVENT_GROUP_WAIT_BITS_BLOCK
972 #define traceEVENT_GROUP_WAIT_BITS_BLOCK(eg, bitsToWaitFor, timeout) \
973 	trcKERNEL_HOOKS_KERNEL_SERVICE(EVENT_GROUP_WAIT_BITS_TRCBLOCK, EVENTGROUP, eg); \
974 	trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
975 
976 #undef traceEVENT_GROUP_WAIT_BITS
977 #define traceEVENT_GROUP_WAIT_BITS(eg, bitsToWaitFor, timeout) \
978 	trcKERNEL_HOOKS_KERNEL_SERVICE(EVENT_GROUP_WAIT_BITS, EVENTGROUP, eg);
979 
980 #undef traceEVENT_GROUP_WAIT_BITS_FAILED
981 #define traceEVENT_GROUP_WAIT_BITS_FAILED(eg, bitsToWaitFor, timeout) \
982 	if (trcIS_EVENTGROUP_VALID(eg)) \
983 	{ \
984 		trcKERNEL_HOOKS_KERNEL_SERVICE(EVENT_GROUP_WAIT_BITS_TRCFAILED, EVENTGROUP, eg); \
985 	} \
986 	else \
987 	{ \
988 		trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT(EVENT_GROUP_WAIT_BITS_TRCFAILED, TRACE_GET_CLASS_TRACE_CLASS(EVENTGROUP, NOT_USED)); \
989 	}
990 
991 #undef traceTASK_NOTIFY_WAIT
992 #define traceTASK_NOTIFY_WAIT() \
993 	trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(TRACE_TASK_NOTIFY_WAIT_TRCSUCCESS, TASK, pxCurrentTCB, xTicksToWait);
994 
995 #undef traceTASK_NOTIFY_WAIT_FAILED
996 #define traceTASK_NOTIFY_WAIT_FAILED() \
997 	trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(TRACE_TASK_NOTIFY_WAIT_TRCFAILED, TASK, pxCurrentTCB, xTicksToWait);
998 
999 #undef traceTASK_NOTIFY_WAIT_BLOCK
1000 #define traceTASK_NOTIFY_WAIT_BLOCK() \
1001 	trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(TRACE_TASK_NOTIFY_WAIT_TRCBLOCK, TASK, pxCurrentTCB, xTicksToWait); \
1002 	trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED();
1003 
1004 #undef traceTASK_NOTIFY_SEND
1005 #define traceTASK_NOTIFY_SEND() \
1006 	trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_TASK_NOTIFY_SEND_TRCSUCCESS, TASK, xTaskToNotify);
1007 
1008 #undef traceTASK_NOTIFY_SEND_FAILED
1009 #define traceTASK_NOTIFY_SEND_FAILED() \
1010 	if (trcIS_TASK_VALID(xTaskToNotify)) \
1011 	{ \
1012 		trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_TASK_NOTIFY_SEND_TRCFAILED, TASK, xTaskToNotify); \
1013 	} \
1014 	else \
1015 	{ \
1016 		trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT(TRACE_TASK_NOTIFY_SEND_TRCFAILED, TRACE_GET_CLASS_TRACE_CLASS(TASK, NOT_USED)); \
1017 	}
1018 
1019 #undef traceTASK_NOTIFY_SEND_FROM_ISR
1020 #define traceTASK_NOTIFY_SEND_FROM_ISR() \
1021 	trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(TRACE_TASK_NOTIFY_SEND_FROM_ISR_TRCSUCCESS, TASK, xTaskToNotify);
1022 
1023 #undef traceTASK_NOTIFY_SEND_FROM_ISR_FAILED
1024 #define traceTASK_NOTIFY_SEND_FROM_ISR_FAILED() \
1025 	if (trcIS_TASK_VALID(xTaskToNotify)) \
1026 	{ \
1027 		trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(TRACE_TASK_NOTIFY_SEND_FROM_ISR_TRCFAILED, TASK, xTaskToNotify); \
1028 	} \
1029 	else \
1030 	{ \
1031 		trcKERNEL_HOOKS_KERNEL_SERVICE_NULL_OBJECT_FROM_ISR(TRACE_TASK_NOTIFY_SEND_FROM_ISR_TRCFAILED, TRACE_GET_CLASS_TRACE_CLASS(TASK, NOT_USED)); \
1032 	}
1033 
1034 #endif /* (TRC_CFG_SCHEDULING_ONLY == 0) */
1035 
1036 #endif /*#if TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_SNAPSHOT */
1037 
1038 /******************************************************************************/
1039 /*** Definitions for Streaming mode *******************************************/
1040 /******************************************************************************/
1041 #if (TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_STREAMING)
1042 
1043 #define vTraceSetQueueName(object, name) vTraceStoreKernelObjectName(object, name)
1044 #define vTraceSetSemaphoreName(object, name) vTraceStoreKernelObjectName(object, name)
1045 #define vTraceSetMutexName(object, name) vTraceStoreKernelObjectName(object, name)
1046 #define vTraceSetEventGroupName(object, name) vTraceStoreKernelObjectName(object, name)
1047 
1048 /*******************************************************************************
1049 * vTraceStoreKernelObjectName
1050 *
1051 * Set the name for a kernel object (defined by its address).
1052 ******************************************************************************/
1053 void vTraceStoreKernelObjectName(void* object, const char* name);
1054 
1055 /*******************************************************************************
1056 * prvIsNewTCB
1057 *
1058 * Tells if this task is already executing, or if there has been a task-switch.
1059 * Assumed to be called within a trace hook in kernel context.
1060 *******************************************************************************/
1061 uint32_t prvIsNewTCB(void* pNewTCB);
1062 
1063 /*************************************************************************/
1064 /* KERNEL SPECIFIC OBJECT CONFIGURATION									 */
1065 /*************************************************************************/
1066 
1067 /*******************************************************************************
1068  * The event codes - should match the offline config file.
1069  ******************************************************************************/
1070 
1071 /*** Event codes for streaming - should match the Tracealyzer config file *****/
1072 #define PSF_EVENT_NULL_EVENT								0x00
1073 
1074 #define PSF_EVENT_TRACE_START								0x01
1075 #define PSF_EVENT_TS_CONFIG									0x02
1076 #define PSF_EVENT_OBJ_NAME									0x03
1077 #define PSF_EVENT_OBJ_DATA									0x04
1078 #define PSF_EVENT_DEFINE_ISR								0x05
1079 
1080 #define PSF_EVENT_TASK_READY								0x06
1081 
1082 #define PSF_EVENT_NEW_TIME									0x07
1083 #define PSF_EVENT_NEW_TIME_SCHEDULER_SUSPENDED				0x08
1084 #define PSF_EVENT_ISR_BEGIN									0x09
1085 #define PSF_EVENT_ISR_RESUME								0x0A
1086 #define PSF_EVENT_TS_BEGIN									0x0B
1087 #define PSF_EVENT_TS_RESUME									0x0C
1088 #define PSF_EVENT_TASK_ACTIVATE								0x0D
1089 
1090 #define PSF_EVENT_LOWPOWER_BEGIN							0x0E
1091 #define PSF_EVENT_LOWPOWER_END								0x0F
1092 
1093 #define PSF_EVENT_IFE_NEXT									0x10
1094 #define PSF_EVENT_IFE_DIRECT								0x11
1095 
1096 #define PSF_EVENT_TASK_CREATE								0x12
1097 #define PSF_EVENT_TASK_CREATE_FAILED						0x13
1098 #define PSF_EVENT_TASK_DELETE								0x14
1099 #define PSF_EVENT_TASK_DELAY_UNTIL							0x15
1100 #define PSF_EVENT_TASK_DELAY								0x16
1101 #define PSF_EVENT_TASK_SUSPEND								0x17
1102 #define PSF_EVENT_TASK_RESUME								0x18
1103 #define PSF_EVENT_TASK_RESUME_FROMISR						0x19
1104 #define PSF_EVENT_TASK_PRIORITY								0x1A
1105 #define PSF_EVENT_TASK_PRIO_INHERIT							0x1B
1106 #define PSF_EVENT_TASK_PRIO_DISINHERIT						0x1C
1107 
1108 #define PSF_EVENT_TASK_NOTIFY_SEND							0x1D
1109 #define PSF_EVENT_TASK_NOTIFY_SEND_FAILED					0x1E
1110 #define PSF_EVENT_TASK_NOTIFY_SEND_FROM_ISR					0x1F
1111 #define PSF_EVENT_TASK_NOTIFY_SEND_FROM_ISR_FAILED			0x20
1112 #define PSF_EVENT_TASK_NOTIFY_WAIT							0x21
1113 #define PSF_EVENT_TASK_NOTIFY_WAIT_BLOCK					0x22
1114 #define PSF_EVENT_TASK_NOTIFY_WAIT_FAILED					0x23
1115 
1116 #define PSF_EVENT_QUEUE_CREATE								0x24
1117 #define PSF_EVENT_QUEUE_CREATE_FAILED						0x25
1118 #define PSF_EVENT_QUEUE_SEND								0x26
1119 #define PSF_EVENT_QUEUE_SEND_FAILED							0x27
1120 #define PSF_EVENT_QUEUE_SEND_BLOCK							0x28
1121 #define PSF_EVENT_QUEUE_SEND_TO_FRONT						0x29
1122 #define PSF_EVENT_QUEUE_SEND_TO_FRONT_FAILED				0x2A
1123 #define PSF_EVENT_QUEUE_SEND_TO_FRONT_BLOCK					0x2B
1124 #define PSF_EVENT_QUEUE_SEND_FROMISR						0x2C
1125 #define PSF_EVENT_QUEUE_SEND_FROMISR_FAILED					0x2D
1126 #define PSF_EVENT_QUEUE_SEND_TO_FRONT_FROMISR				0x2E
1127 #define PSF_EVENT_QUEUE_SEND_TO_FRONT_FROMISR_FAILED		0x2F
1128 #define PSF_EVENT_QUEUE_RECEIVE								0x30
1129 #define PSF_EVENT_QUEUE_RECEIVE_FAILED						0x31
1130 #define PSF_EVENT_QUEUE_RECEIVE_BLOCK						0x32
1131 #define PSF_EVENT_QUEUE_RECEIVE_FROMISR						0x33
1132 #define PSF_EVENT_QUEUE_RECEIVE_FROMISR_FAILED				0x34
1133 #define PSF_EVENT_QUEUE_PEEK								0x35
1134 #define PSF_EVENT_QUEUE_PEEK_FAILED							0x36
1135 #define PSF_EVENT_QUEUE_PEEK_BLOCK							0x37
1136 
1137 #define PSF_EVENT_SEMAPHORE_CREATE							0x38
1138 #define PSF_EVENT_SEMAPHORE_CREATE_FAILED					0x39
1139 #define PSF_EVENT_SEMAPHORE_SEND							0x3A
1140 #define PSF_EVENT_SEMAPHORE_SEND_FAILED						0x3B
1141 #define PSF_EVENT_SEMAPHORE_SEND_FROMISR					0x3C
1142 #define PSF_EVENT_SEMAPHORE_SEND_FROMISR_FAILED				0x3D
1143 #define PSF_EVENT_SEMAPHORE_RECEIVE							0x3E
1144 #define PSF_EVENT_SEMAPHORE_RECEIVE_FAILED					0x3F
1145 #define PSF_EVENT_SEMAPHORE_RECEIVE_BLOCK					0x40
1146 #define PSF_EVENT_SEMAPHORE_RECEIVE_FROMISR					0x41
1147 #define PSF_EVENT_SEMAPHORE_RECEIVE_FROMISR_FAILED			0x42
1148 
1149 #define PSF_EVENT_MUTEX_CREATE								0x43
1150 #define PSF_EVENT_MUTEX_CREATE_FAILED						0x44
1151 #define PSF_EVENT_MUTEX_SEND								0x45
1152 #define PSF_EVENT_MUTEX_SEND_FAILED							0x46
1153 #define PSF_EVENT_MUTEX_RECEIVE								0x47
1154 #define PSF_EVENT_MUTEX_RECEIVE_FAILED						0x48
1155 #define PSF_EVENT_MUTEX_RECEIVE_BLOCK						0x49
1156 
1157 #define PSF_EVENT_TIMER_CREATE								0x4A
1158 #define PSF_EVENT_TIMER_CREATE_FAILED						0x4B
1159 #define PSF_EVENT_TIMER_DELETE								0x4C
1160 #define PSF_EVENT_TIMER_DELETE_FAILED						0x4D
1161 #define PSF_EVENT_TIMER_START								0x4E
1162 #define PSF_EVENT_TIMER_START_FAILED						0x4F
1163 #define PSF_EVENT_TIMER_STOP								0x50
1164 #define PSF_EVENT_TIMER_STOP_FAILED							0x51
1165 #define PSF_EVENT_TIMER_CHANGEPERIOD						0x52
1166 #define PSF_EVENT_TIMER_CHANGEPERIOD_FAILED					0x53
1167 #define PSF_EVENT_TIMER_START_FROMISR						0x54
1168 #define PSF_EVENT_TIMER_START_FROMISR_FAILED				0x55
1169 #define PSF_EVENT_TIMER_STOP_FROMISR						0x56
1170 #define PSF_EVENT_TIMER_STOP_FROMISR_FAILED					0x57
1171 #define PSF_EVENT_TIMER_CHANGEPERIOD_FROMISR				0x58
1172 #define PSF_EVENT_TIMER_CHANGEPERIOD_FROMISR_FAILED			0x59
1173 #define PSF_EVENT_TIMER_PROCESS_START						0x5A
1174 #define PSF_EVENT_TIMER_PROCESS_STOP						0x5B
1175 #define PSF_EVENT_TIMER_PROCESS_CHANGEPERIOD				0x5C
1176 #define PSF_EVENT_TIMER_PROCESS_DELETE						0x5D
1177 #define PSF_EVENT_TIMER_CALLBACK							0x5E
1178 
1179 /*
1180 #define UNUSED_SO_FAR										0x5F
1181 #define UNUSED_SO_FAR										0x60
1182 #define UNUSED_SO_FAR										0x61
1183 #define UNUSED_SO_FAR										0x62
1184 #define UNUSED_SO_FAR										0x63
1185 */
1186 
1187 #define PSF_EVENT_EVENTGROUP_CREATE							0x64
1188 #define PSF_EVENT_EVENTGROUP_CREATE_FAILED					0x65
1189 #define PSF_EVENT_EVENTGROUP_DELETE							0x66
1190 #define PSF_EVENT_EVENTGROUP_DELETE_FAILED					0x67
1191 #define PSF_EVENT_EVENTGROUP_SETBITS						0x68
1192 #define PSF_EVENT_EVENTGROUP_SETBITS_FAILED					0x69
1193 #define PSF_EVENT_EVENTGROUP_SETBITS_FROMISR				0x6A
1194 #define PSF_EVENT_EVENTGROUP_SETBITS_FROMISR_FAILED			0x6B
1195 #define PSF_EVENT_EVENTGROUP_WAITBITS						0x6C
1196 #define PSF_EVENT_EVENTGROUP_WAITBITS_FAILED				0x6D
1197 #define PSF_EVENT_EVENTGROUP_WAITBITS_BLOCK					0x6E
1198 #define PSF_EVENT_EVENTGROUP_CLEARBITS						0x6F
1199 #define PSF_EVENT_EVENTGROUP_CLEARBITS_FAILED				0x70
1200 #define PSF_EVENT_EVENTGROUP_CLEARBITS_FROMISR				0x71
1201 #define PSF_EVENT_EVENTGROUP_CLEARBITS_FROMISR_FAILED		0x72
1202 
1203 /* LAST EVENT 0x72 (except user events from 0xF0) */
1204 
1205 /* Placing user events near the end */
1206 #define PSF_EVENT_USER_EVENT								0xF0
1207 #define PSF_EVENT_USER_EVENT_0								(PSF_EVENT_USER_EVENT + 0)
1208 #define PSF_EVENT_USER_EVENT_1								(PSF_EVENT_USER_EVENT + 1)
1209 #define PSF_EVENT_USER_EVENT_2								(PSF_EVENT_USER_EVENT + 2)
1210 #define PSF_EVENT_USER_EVENT_3								(PSF_EVENT_USER_EVENT + 3)
1211 #define PSF_EVENT_USER_EVENT_4								(PSF_EVENT_USER_EVENT + 4)
1212 #define PSF_EVENT_USER_EVENT_5								(PSF_EVENT_USER_EVENT + 5)
1213 #define PSF_EVENT_USER_EVENT_6								(PSF_EVENT_USER_EVENT + 6)
1214 #define PSF_EVENT_USER_EVENT_7								(PSF_EVENT_USER_EVENT + 7)
1215 
1216 /*** The trace macros for streaming ******************************************/
1217 
1218 /* Called on each OS tick. Will call uiPortGetTimestamp to make sure it is called at least once every OS tick. */
1219 #undef traceTASK_INCREMENT_TICK
1220 #define traceTASK_INCREMENT_TICK( xTickCount ) \
1221 	if (uxSchedulerSuspended == ( portUnsignedBaseType ) pdTRUE || uxMissedTicks == 0) { extern uint32_t uiTraceTickCount; uiTraceTickCount++; } \
1222 	if (uxSchedulerSuspended == ( portUnsignedBaseType ) pdFALSE) { prvTraceStoreEvent1(PSF_EVENT_NEW_TIME, (xTickCount + 1)); }
1223 
1224 /* A macro that will update the tick count when returning from tickless idle */
1225 #undef traceINCREASE_TICK_COUNT
1226 /* Note: This can handle time adjustments of max 2^32 ticks, i.e., 35 seconds at 120 MHz. Thus, tick-less idle periods longer than 2^32 ticks will appear "compressed" on the time line.*/
1227 #define traceINCREASE_TICK_COUNT( xCount ) { extern uint32_t uiTraceTickCount; uiTraceTickCount += xCount; }
1228 
1229 /* Called for each task that becomes ready */
1230 #if (TRC_CFG_INCLUDE_READY_EVENTS == 1)
1231 #undef traceMOVED_TASK_TO_READY_STATE
1232 #define traceMOVED_TASK_TO_READY_STATE( pxTCB ) \
1233 	if (TRACE_GET_TASK_FILTER(pxTCB) & CurrentFilterMask) \
1234 	{ \
1235 		prvTraceStoreEvent1(PSF_EVENT_TASK_READY, (uint32_t)pxTCB); \
1236 	}
1237 #else /* (TRC_CFG_INCLUDE_READY_EVENTS == 1) */
1238 #define traceMOVED_TASK_TO_READY_STATE( pxTCB )
1239 #endif /* (TRC_CFG_INCLUDE_READY_EVENTS == 1) */
1240 
1241 extern volatile uint32_t uiTraceSystemState;
1242 
1243 /* Called on each task-switch */
1244 #undef traceTASK_SWITCHED_IN
1245 #define traceTASK_SWITCHED_IN() \
1246 	uiTraceSystemState = TRC_STATE_IN_TASKSWITCH; \
1247 	if (TRACE_GET_TASK_FILTER(pxCurrentTCB) & CurrentFilterMask) \
1248 	{ \
1249 		if (prvIsNewTCB(pxCurrentTCB)) \
1250 		{ \
1251 			prvTraceStoreEvent2(PSF_EVENT_TASK_ACTIVATE, (uint32_t)pxCurrentTCB, pxCurrentTCB->uxPriority); \
1252 		} \
1253 	} \
1254 	uiTraceSystemState = TRC_STATE_IN_APPLICATION;
1255 
1256 /* Called on vTaskCreate */
1257 #undef traceTASK_CREATE
1258 #define traceTASK_CREATE(pxNewTCB) \
1259 	prvTraceSaveObjectSymbol(pxNewTCB, (const char*)pxNewTCB->pcNameOfTask); \
1260 	prvTraceSaveObjectData(pxNewTCB, pxNewTCB->uxPriority); \
1261 	prvTraceStoreStringEvent(1, PSF_EVENT_OBJ_NAME, pxNewTCB->pcNameOfTask, pxNewTCB); \
1262 	TRACE_SET_TASK_FILTER(pxNewTCB, CurrentFilterGroup); \
1263 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1264 		if (TRACE_GET_TASK_FILTER(pxNewTCB) & CurrentFilterMask) \
1265 			prvTraceStoreEvent2(PSF_EVENT_TASK_CREATE, (uint32_t)pxNewTCB, pxNewTCB->uxPriority);
1266 
1267 /* Called in vTaskCreate, if it fails (typically if the stack can not be allocated) */
1268 #undef traceTASK_CREATE_FAILED
1269 #define traceTASK_CREATE_FAILED() \
1270 	if (TRACE_GET_TASK_FILTER(pxCurrentTCB) & CurrentFilterMask) \
1271 		prvTraceStoreEvent0(PSF_EVENT_TASK_CREATE_FAILED);
1272 
1273 /* Called on vTaskDelete */
1274 #undef traceTASK_DELETE
1275 #define traceTASK_DELETE( pxTaskToDelete ) \
1276 	if (TRACE_GET_TASK_FILTER(pxCurrentTCB) & CurrentFilterMask) \
1277 	{ \
1278 		if (TRACE_GET_TASK_FILTER(pxTaskToDelete) & CurrentFilterMask) \
1279 		{ \
1280 			prvTraceStoreEvent2(PSF_EVENT_TASK_DELETE, (uint32_t)pxTaskToDelete, pxTaskToDelete->uxPriority); \
1281 		} \
1282 	} \
1283 	prvTraceDeleteSymbol(pxTaskToDelete); \
1284 	prvTraceDeleteObjectData(pxTaskToDelete);
1285 
1286 #if (TRC_CFG_SCHEDULING_ONLY == 0)
1287 
1288 #if (configUSE_TICKLESS_IDLE != 0)
1289 
1290 #undef traceLOW_POWER_IDLE_BEGIN
1291 #define traceLOW_POWER_IDLE_BEGIN() \
1292 	{ \
1293 		prvTraceStoreEvent1(PSF_EVENT_LOWPOWER_BEGIN, xExpectedIdleTime); \
1294 	}
1295 
1296 #undef traceLOW_POWER_IDLE_END
1297 #define traceLOW_POWER_IDLE_END() \
1298 	{ \
1299 		prvTraceStoreEvent0(PSF_EVENT_LOWPOWER_END); \
1300 	}
1301 
1302 #endif
1303 
1304 /* Called on vTaskSuspend */
1305 #undef traceTASK_SUSPEND
1306 #define traceTASK_SUSPEND( pxTaskToSuspend ) \
1307 	if (TRACE_GET_TASK_FILTER(pxCurrentTCB) & CurrentFilterMask) \
1308 	{ \
1309 		if (TRACE_GET_TASK_FILTER(pxTaskToSuspend) & CurrentFilterMask) \
1310 		{ \
1311 			prvTraceStoreEvent1(PSF_EVENT_TASK_SUSPEND, (uint32_t)pxTaskToSuspend); \
1312 		} \
1313 	}
1314 
1315 /* Called on vTaskDelay - note the use of variable xTicksToDelay */
1316 #undef traceTASK_DELAY
1317 #define traceTASK_DELAY() \
1318 	if (TRACE_GET_TASK_FILTER(pxCurrentTCB) & CurrentFilterMask) \
1319 	{ \
1320 		prvTraceStoreEvent1(PSF_EVENT_TASK_DELAY, xTicksToDelay); \
1321 	}
1322 
1323 /* Called on vTaskDelayUntil - note the use of variable xTimeToWake */
1324 #undef traceTASK_DELAY_UNTIL
1325 #define traceTASK_DELAY_UNTIL(xTimeToWake) \
1326 	if (TRACE_GET_TASK_FILTER(pxCurrentTCB) & CurrentFilterMask) \
1327 	{ \
1328 		prvTraceStoreEvent1(PSF_EVENT_TASK_DELAY_UNTIL, xTimeToWake); \
1329 	}
1330 
1331 /* Called in xQueueCreate, and thereby for all other object based on queues, such as semaphores. */
1332 #undef traceQUEUE_CREATE
1333 #define traceQUEUE_CREATE( pxNewQueue )\
1334 	TRACE_SET_OBJECT_FILTER(QUEUE, pxNewQueue, CurrentFilterGroup); \
1335 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1336 	{ \
1337 		if (TRACE_GET_OBJECT_FILTER(QUEUE, pxNewQueue) & CurrentFilterMask) \
1338 		{ \
1339 			switch (uxQueueType) \
1340 			{ \
1341 			case queueQUEUE_IS_QUEUE: \
1342 				prvTraceStoreEvent2(PSF_EVENT_QUEUE_CREATE, (uint32_t)pxNewQueue, pxNewQueue->uxMaxNumberOfItems); \
1343 				break; \
1344 			case queueQUEUE_IS_SEMAPHORE: \
1345 				prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_CREATE, (uint32_t)pxNewQueue, pxNewQueue->uxMaxNumberOfItems); \
1346 				break; \
1347 			case queueQUEUE_IS_MUTEX: \
1348 				prvTraceStoreEvent1(PSF_EVENT_MUTEX_CREATE, (uint32_t)pxNewQueue); \
1349 				break; \
1350 			} \
1351 		} \
1352 	}
1353 
1354 /* Called in xQueueCreate, if the queue creation fails */
1355 #undef traceQUEUE_CREATE_FAILED
1356 #define traceQUEUE_CREATE_FAILED( pcBuffer, queueType, queueLength ) \
1357 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1358 	{ \
1359 		switch (queueType) \
1360 		{ \
1361 		case queueQUEUE_IS_QUEUE: \
1362 			prvTraceStoreEvent2(PSF_EVENT_QUEUE_CREATE_FAILED, (uint32_t)pcBuffer, queueLength); \
1363 			break; \
1364 		case queueQUEUE_IS_SEMAPHORE: \
1365 			prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_CREATE_FAILED, (uint32_t)pcBuffer, queueLength); \
1366 			break; \
1367 		case queueQUEUE_IS_MUTEX: \
1368 			prvTraceStoreEvent1(PSF_EVENT_MUTEX_CREATE_FAILED, (uint32_t)pcBuffer); \
1369 			break; \
1370 		} \
1371 	}
1372 
1373 /* Called when a message is sent to a queue */
1374 #undef traceQUEUE_SEND
1375 #define traceQUEUE_SEND( pxQueue ) \
1376 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1377 	{ \
1378 		if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
1379 		{ \
1380 			switch (pxQueue->uxQueueType) \
1381 			{ \
1382 			case queueQUEUE_IS_QUEUE: \
1383 				prvTraceStoreEvent3(xPosition == queueSEND_TO_BACK ? PSF_EVENT_QUEUE_SEND : PSF_EVENT_QUEUE_SEND_TO_FRONT, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxItemsWaiting + 1); \
1384 				break; \
1385 			case queueQUEUE_IS_SEMAPHORE: \
1386 				prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_SEND, (uint32_t)pxQueue, pxQueue->uxItemsWaiting + 1); \
1387 				break; \
1388 			case queueQUEUE_IS_MUTEX: \
1389 				prvTraceStoreEvent2(PSF_EVENT_MUTEX_SEND, (uint32_t)pxQueue, pxQueue->uxItemsWaiting + 1); \
1390 				break; \
1391 			} \
1392 		} \
1393 	}
1394 
1395 /* Called when a message failed to be sent to a queue (timeout) */
1396 #undef traceQUEUE_SEND_FAILED
1397 #define traceQUEUE_SEND_FAILED( pxQueue ) \
1398 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1399 	{ \
1400 		if (trcIS_QUEUE_VALID(pxQueue)) \
1401 		{ \
1402 			if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
1403 			{ \
1404 				switch (pxQueue->uxQueueType) \
1405 				{ \
1406 				case queueQUEUE_IS_QUEUE: \
1407 					prvTraceStoreEvent3(xPosition == queueSEND_TO_BACK ? PSF_EVENT_QUEUE_SEND_FAILED : PSF_EVENT_QUEUE_SEND_TO_FRONT_FAILED, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxItemsWaiting + 1); \
1408 					break; \
1409 				case queueQUEUE_IS_SEMAPHORE: \
1410 					prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_SEND_FAILED, (uint32_t)pxQueue, pxQueue->uxItemsWaiting + 1); \
1411 					break; \
1412 				case queueQUEUE_IS_MUTEX: \
1413 					prvTraceStoreEvent2(PSF_EVENT_MUTEX_SEND_FAILED, (uint32_t)pxQueue, pxQueue->uxItemsWaiting + 1); \
1414 					break; \
1415 				} \
1416 			} \
1417 		} \
1418 		else \
1419 		{ \
1420 			prvTraceStoreEvent3(PSF_EVENT_QUEUE_SEND_FAILED, (uint32_t)pxQueue, xTicksToWait, 0); \
1421 		} \
1422 	}
1423 
1424 /* Called when the task is blocked due to a send operation on a full queue */
1425 /* Semaphores and mutexes don't block on send */
1426 #undef traceBLOCKING_ON_QUEUE_SEND
1427 #define traceBLOCKING_ON_QUEUE_SEND( pxQueue ) \
1428 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1429 	{ \
1430 		if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
1431 		{ \
1432 			prvTraceStoreEvent3(xPosition == queueSEND_TO_BACK ? PSF_EVENT_QUEUE_SEND_BLOCK : PSF_EVENT_QUEUE_SEND_TO_FRONT_BLOCK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxItemsWaiting + 1); \
1433 		} \
1434 	}
1435 
1436 /* Called when a message is sent from interrupt context, e.g., using xQueueSendFromISR */
1437 /* Mutexes can't be given from ISR */
1438 #undef traceQUEUE_SEND_FROM_ISR
1439 #define traceQUEUE_SEND_FROM_ISR( pxQueue ) \
1440 	if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
1441 	{ \
1442 		switch (pxQueue->uxQueueType) \
1443 		{ \
1444 		case queueQUEUE_IS_QUEUE: \
1445 			prvTraceStoreEvent2(xPosition == queueSEND_TO_BACK ? PSF_EVENT_QUEUE_SEND_FROMISR : PSF_EVENT_QUEUE_SEND_TO_FRONT_FROMISR, (uint32_t)pxQueue, pxQueue->uxItemsWaiting + 1); \
1446 			break; \
1447 		case queueQUEUE_IS_SEMAPHORE: \
1448 			prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_SEND_FROMISR, (uint32_t)pxQueue, pxQueue->uxItemsWaiting + 1); \
1449 			break; \
1450 		} \
1451 	}
1452 
1453 /* Called when a message send from interrupt context fails (since the queue was full) */
1454 /* Mutexes can't be given from ISR */
1455 #undef traceQUEUE_SEND_FROM_ISR_FAILED
1456 #define traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ) \
1457 	if (trcIS_QUEUE_VALID(pxQueue)) \
1458 	{ \
1459 		if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
1460 		{ \
1461 			switch (pxQueue->uxQueueType) \
1462 			{ \
1463 			case queueQUEUE_IS_QUEUE: \
1464 				prvTraceStoreEvent2(xPosition == queueSEND_TO_BACK ? PSF_EVENT_QUEUE_SEND_FROMISR_FAILED : PSF_EVENT_QUEUE_SEND_TO_FRONT_FROMISR_FAILED, (uint32_t)pxQueue, pxQueue->uxItemsWaiting); \
1465 				break; \
1466 			case queueQUEUE_IS_SEMAPHORE: \
1467 				prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_SEND_FROMISR_FAILED, (uint32_t)pxQueue, pxQueue->uxItemsWaiting); \
1468 				break; \
1469 			} \
1470 		} \
1471 	} \
1472 	else \
1473 	{ \
1474 		prvTraceStoreEvent2(xPosition == queueSEND_TO_BACK ? PSF_EVENT_QUEUE_SEND_FROMISR_FAILED : PSF_EVENT_QUEUE_SEND_TO_FRONT_FROMISR_FAILED, (uint32_t)pxQueue, 0); \
1475 	}
1476 
1477 /* Called when a message is received from a queue */
1478 #undef traceQUEUE_RECEIVE
1479 #define traceQUEUE_RECEIVE( pxQueue ) \
1480 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1481 	{ \
1482 		if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
1483 		{ \
1484 			switch (pxQueue->uxQueueType) \
1485 			{ \
1486 			case queueQUEUE_IS_QUEUE: \
1487 				prvTraceStoreEvent3(PSF_EVENT_QUEUE_RECEIVE, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxItemsWaiting - 1); \
1488 				break; \
1489 			case queueQUEUE_IS_SEMAPHORE: \
1490 				prvTraceStoreEvent3(PSF_EVENT_SEMAPHORE_RECEIVE, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxItemsWaiting - 1); \
1491 				break; \
1492 			case queueQUEUE_IS_MUTEX: \
1493 				prvTraceStoreEvent3(PSF_EVENT_MUTEX_RECEIVE, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxItemsWaiting - 1); \
1494 				break; \
1495 			} \
1496 		} \
1497 	}
1498 
1499 /* Called when a receive operation on a queue fails (timeout) */
1500 #undef traceQUEUE_RECEIVE_FAILED
1501 #define traceQUEUE_RECEIVE_FAILED( pxQueue ) \
1502 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1503 	{ \
1504 		if (trcIS_QUEUE_VALID(pxQueue)) \
1505 		{ \
1506 			if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
1507 			{ \
1508 				switch (pxQueue->uxQueueType) \
1509 				{ \
1510 				case queueQUEUE_IS_QUEUE: \
1511 					prvTraceStoreEvent3(PSF_EVENT_QUEUE_RECEIVE_FAILED, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxItemsWaiting); \
1512 					break; \
1513 				case queueQUEUE_IS_SEMAPHORE: \
1514 					prvTraceStoreEvent3(PSF_EVENT_SEMAPHORE_RECEIVE_FAILED, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxItemsWaiting); \
1515 					break; \
1516 				case queueQUEUE_IS_MUTEX: \
1517 					prvTraceStoreEvent3(PSF_EVENT_MUTEX_RECEIVE_FAILED, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxItemsWaiting); \
1518 					break; \
1519 				} \
1520 			} \
1521 		} \
1522 		else \
1523 		{ \
1524 			prvTraceStoreEvent3(PSF_EVENT_QUEUE_RECEIVE_FAILED, (uint32_t)pxQueue, xTicksToWait, 0); \
1525 		} \
1526 	}
1527 
1528 /* Called when the task is blocked due to a receive operation on an empty queue */
1529 #undef traceBLOCKING_ON_QUEUE_RECEIVE
1530 #define traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ) \
1531 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1532 	{ \
1533 		if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
1534 		{ \
1535 			switch (pxQueue->uxQueueType) \
1536 			{ \
1537 			case queueQUEUE_IS_QUEUE: \
1538 				prvTraceStoreEvent3(PSF_EVENT_QUEUE_RECEIVE_BLOCK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxItemsWaiting); \
1539 				break; \
1540 			case queueQUEUE_IS_SEMAPHORE: \
1541 				prvTraceStoreEvent3(PSF_EVENT_SEMAPHORE_RECEIVE_BLOCK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxItemsWaiting); \
1542 				break; \
1543 			case queueQUEUE_IS_MUTEX: \
1544 				prvTraceStoreEvent3(PSF_EVENT_MUTEX_RECEIVE_BLOCK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxItemsWaiting); \
1545 				break; \
1546 			} \
1547 		} \
1548 	}
1549 
1550 /* Called when a message is received in interrupt context, e.g., using xQueueReceiveFromISR */
1551 /* Mutexes can't be taken from ISR */
1552 #undef traceQUEUE_RECEIVE_FROM_ISR
1553 #define traceQUEUE_RECEIVE_FROM_ISR( pxQueue ) \
1554 	if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
1555 	{ \
1556 		switch (pxQueue->uxQueueType) \
1557 		{ \
1558 		case queueQUEUE_IS_QUEUE: \
1559 			prvTraceStoreEvent2(PSF_EVENT_QUEUE_RECEIVE_FROMISR, (uint32_t)pxQueue, pxQueue->uxItemsWaiting - 1); \
1560 			break; \
1561 		case queueQUEUE_IS_SEMAPHORE: \
1562 			prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_RECEIVE_FROMISR, (uint32_t)pxQueue, pxQueue->uxItemsWaiting - 1); \
1563 			break; \
1564 		} \
1565 	}
1566 
1567 /* Called when a message receive from interrupt context fails (since the queue was empty) */
1568 /* Mutexes can't be taken from ISR */
1569 #undef traceQUEUE_RECEIVE_FROM_ISR_FAILED
1570 #define traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ) \
1571 	if (trcIS_QUEUE_VALID(pxQueue)) \
1572 	{ \
1573 		if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
1574 		{ \
1575 			switch (pxQueue->uxQueueType) \
1576 			{ \
1577 			case queueQUEUE_IS_QUEUE: \
1578 				prvTraceStoreEvent2(PSF_EVENT_QUEUE_RECEIVE_FROMISR_FAILED, (uint32_t)pxQueue, pxQueue->uxItemsWaiting); \
1579 				break; \
1580 			case queueQUEUE_IS_SEMAPHORE: \
1581 				prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_RECEIVE_FROMISR_FAILED, (uint32_t)pxQueue, pxQueue->uxItemsWaiting); \
1582 				break; \
1583 			} \
1584 		} \
1585 	} \
1586 	else \
1587 	{ \
1588 		prvTraceStoreEvent2(PSF_EVENT_QUEUE_RECEIVE_FROMISR_FAILED, (uint32_t)pxQueue, 0); \
1589 	}
1590 
1591 /* Called on xQueuePeek */
1592 /* Can't Peek Semaphores or Mutexes */
1593 #undef traceQUEUE_PEEK
1594 #define traceQUEUE_PEEK( pxQueue ) \
1595 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1596 	{ \
1597 		if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
1598 		{ \
1599 			prvTraceStoreEvent3(PSF_EVENT_QUEUE_PEEK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxItemsWaiting); \
1600 		} \
1601 	}
1602 
1603 /* Called on xQueuePeek */
1604 /* Can't Peek Semaphores or Mutexes */
1605 #undef traceQUEUE_PEEK_FAILED
1606 #define traceQUEUE_PEEK_FAILED( pxQueue ) \
1607 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1608 	{ \
1609 		if (trcIS_QUEUE_VALID(pxQueue)) \
1610 		{ \
1611 			if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
1612 			{ \
1613 				prvTraceStoreEvent3(PSF_EVENT_QUEUE_PEEK_FAILED, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxItemsWaiting); \
1614 			} \
1615 		} \
1616 		else \
1617 		{ \
1618 			prvTraceStoreEvent3(PSF_EVENT_QUEUE_PEEK_FAILED, (uint32_t)pxQueue, xTicksToWait, 0); \
1619 		} \
1620 	}
1621 
1622 /* Called on xQueuePeek */
1623 /* Can't Peek Semaphores or Mutexes */
1624 #undef traceBLOCKING_ON_QUEUE_PEEK
1625 #define traceBLOCKING_ON_QUEUE_PEEK( pxQueue ) \
1626 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1627 	{ \
1628 		if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \
1629 		{ \
1630 			prvTraceStoreEvent3(PSF_EVENT_QUEUE_PEEK_BLOCK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxItemsWaiting); \
1631 		} \
1632 	}
1633 
1634 /* Called in xTaskPrioritySet */
1635 #undef traceTASK_PRIORITY_SET
1636 #define traceTASK_PRIORITY_SET( pxTask, uxNewPriority ) \
1637 	prvTraceSaveObjectData(pxTask, uxNewPriority); \
1638 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1639 	{ \
1640 		if (TRACE_GET_TASK_FILTER(pxTask) & CurrentFilterMask) \
1641 		{ \
1642 			prvTraceStoreEvent2(PSF_EVENT_TASK_PRIORITY, (uint32_t)pxTask, uxNewPriority); \
1643 		} \
1644 	}
1645 
1646 /* Called in vTaskPriorityInherit, which is called by Mutex operations */
1647 #undef traceTASK_PRIORITY_INHERIT
1648 #define traceTASK_PRIORITY_INHERIT( pxTask, uxNewPriority ) \
1649 	if (TRACE_GET_TASK_FILTER(pxTask) & CurrentFilterMask) \
1650 	{ \
1651 		prvTraceStoreEvent2(PSF_EVENT_TASK_PRIO_INHERIT, (uint32_t)pxTask, uxNewPriority); \
1652 	}
1653 
1654 /* Called in vTaskPriorityDisinherit, which is called by Mutex operations */
1655 #undef traceTASK_PRIORITY_DISINHERIT
1656 #define traceTASK_PRIORITY_DISINHERIT( pxTask, uxNewPriority ) \
1657 	if (TRACE_GET_TASK_FILTER(pxTask) & CurrentFilterMask) \
1658 	{ \
1659 		prvTraceStoreEvent2(PSF_EVENT_TASK_PRIO_DISINHERIT, (uint32_t)pxTask, uxNewPriority); \
1660 	}
1661 
1662 /* Called in vTaskResume */
1663 #undef traceTASK_RESUME
1664 #define traceTASK_RESUME( pxTaskToResume ) \
1665 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1666 	{ \
1667 		if (TRACE_GET_TASK_FILTER(pxTaskToResume) & CurrentFilterMask) \
1668 		{ \
1669 			prvTraceStoreEvent1(PSF_EVENT_TASK_RESUME, (uint32_t)pxTaskToResume); \
1670 		} \
1671 	}
1672 
1673 /* Called in vTaskResumeFromISR */
1674 #undef traceTASK_RESUME_FROM_ISR
1675 #define traceTASK_RESUME_FROM_ISR( pxTaskToResume ) \
1676 	if (TRACE_GET_TASK_FILTER(pxTaskToResume) & CurrentFilterMask) \
1677 	{ \
1678 		prvTraceStoreEvent1(PSF_EVENT_TASK_RESUME_FROMISR, (uint32_t)pxTaskToResume); \
1679 	}
1680 
1681 /* Called in timer.c - xTimerCreate */
1682 #undef traceTIMER_CREATE
1683 #define traceTIMER_CREATE(tmr) \
1684 	if (pdPASS == xReturn) \
1685 	{ \
1686 		TRACE_SET_OBJECT_FILTER(TIMER, tmr, CurrentFilterGroup); \
1687 		prvTraceSaveObjectSymbol(tmr, (const char*)tmr->pcTimerName); \
1688 		prvTraceStoreStringEvent(1, PSF_EVENT_OBJ_NAME, (const char*)tmr->pcTimerName, tmr); \
1689 		if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1690 		{ \
1691 			if (TRACE_GET_OBJECT_FILTER(TIMER, tmr) & CurrentFilterMask) \
1692 			{ \
1693 				prvTraceStoreEvent2(PSF_EVENT_TIMER_CREATE, (uint32_t)tmr, tmr->xTimerPeriodInTicks); \
1694 			} \
1695 		} \
1696 	} \
1697 	else \
1698 	{ \
1699 		if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1700 		{ \
1701 			prvTraceStoreEvent2(PSF_EVENT_TIMER_CREATE_FAILED, 0, 0); \
1702 		} \
1703 	}
1704 
1705 #undef traceTIMER_COMMAND_SEND
1706 #define traceTIMER_COMMAND_SEND(tmr, xCommandID, xOptionalValue, xReturn) \
1707 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1708 	{ \
1709 		if (trcIS_TIMER_VALID(tmr)) \
1710 		{ \
1711 			if (TRACE_GET_OBJECT_FILTER(TIMER, tmr) & CurrentFilterMask) \
1712 			{ \
1713 				switch(xCommandID) \
1714 				{ \
1715 					case timerCOMMAND_START: \
1716 						prvTraceStoreEvent1((xReturn == pdPASS) ? PSF_EVENT_TIMER_START : PSF_EVENT_TIMER_START_FAILED, (uint32_t)tmr); \
1717 						break; \
1718 					case timerCOMMAND_STOP: \
1719 						prvTraceStoreEvent1((xReturn == pdPASS) ? PSF_EVENT_TIMER_STOP : PSF_EVENT_TIMER_STOP_FAILED, (uint32_t)tmr); \
1720 						break; \
1721 					case timerCOMMAND_CHANGE_PERIOD: \
1722 						prvTraceStoreEvent2((xReturn == pdPASS) ? PSF_EVENT_TIMER_CHANGEPERIOD : PSF_EVENT_TIMER_CHANGEPERIOD_FAILED, (uint32_t)tmr, xOptionalValue); \
1723 						break; \
1724 					case timerCOMMAND_DELETE: \
1725 						prvTraceStoreEvent1((xReturn == pdPASS) ? PSF_EVENT_TIMER_DELETE : PSF_EVENT_TIMER_DELETE_FAILED, (uint32_t)tmr); \
1726 						break; \
1727 				} \
1728 			} \
1729 		} \
1730 		else \
1731 		{ \
1732 			switch(xCommandID) \
1733 			{ \
1734 				case timerCOMMAND_START: \
1735 					prvTraceStoreEvent1((xReturn == pdPASS) ? PSF_EVENT_TIMER_START : PSF_EVENT_TIMER_START_FAILED, (uint32_t)tmr); \
1736 					break; \
1737 				case timerCOMMAND_STOP: \
1738 					prvTraceStoreEvent1((xReturn == pdPASS) ? PSF_EVENT_TIMER_STOP : PSF_EVENT_TIMER_STOP_FAILED, (uint32_t)tmr); \
1739 					break; \
1740 				case timerCOMMAND_CHANGE_PERIOD: \
1741 					prvTraceStoreEvent2((xReturn == pdPASS) ? PSF_EVENT_TIMER_CHANGEPERIOD : PSF_EVENT_TIMER_CHANGEPERIOD_FAILED, (uint32_t)tmr, xOptionalValue); \
1742 					break; \
1743 				case timerCOMMAND_DELETE: \
1744 					prvTraceStoreEvent1((xReturn == pdPASS) ? PSF_EVENT_TIMER_DELETE : PSF_EVENT_TIMER_DELETE_FAILED, (uint32_t)tmr); \
1745 					break; \
1746 			} \
1747 		} \
1748 	}
1749 
1750 #undef traceTIMER_COMMAND_SEND_FROM_ISR
1751 #define traceTIMER_COMMAND_SEND_FROM_ISR(tmr, xCommandID, xOptionalValue, xReturn) \
1752 	if (trcIS_TIMER_VALID(tmr)) \
1753 	{ \
1754 		if (TRACE_GET_OBJECT_FILTER(TIMER, tmr) & CurrentFilterMask) \
1755 		{ \
1756 			switch(xCommandID) \
1757 			{ \
1758 				case timerCOMMAND_START: \
1759 					prvTraceStoreEvent1((xReturn == pdPASS) ? PSF_EVENT_TIMER_START_FROMISR : PSF_EVENT_TIMER_START_FROMISR_FAILED, (uint32_t)tmr); \
1760 					break; \
1761 				case timerCOMMAND_STOP: \
1762 					prvTraceStoreEvent1((xReturn == pdPASS) ? PSF_EVENT_TIMER_STOP_FROMISR : PSF_EVENT_TIMER_STOP_FROMISR_FAILED, (uint32_t)tmr); \
1763 					break; \
1764 				case timerCOMMAND_CHANGE_PERIOD: \
1765 					prvTraceStoreEvent2((xReturn == pdPASS) ? PSF_EVENT_TIMER_CHANGEPERIOD_FROMISR : PSF_EVENT_TIMER_CHANGEPERIOD_FROMISR_FAILED, (uint32_t)tmr, xOptionalValue); \
1766 					break; \
1767 			} \
1768 		} \
1769 	} \
1770 	else \
1771 	{ \
1772 		switch (xCommandID) \
1773 		{ \
1774 		case timerCOMMAND_START: \
1775 			prvTraceStoreEvent1((xReturn == pdPASS) ? PSF_EVENT_TIMER_START_FROMISR : PSF_EVENT_TIMER_START_FROMISR_FAILED, (uint32_t)tmr); \
1776 			break; \
1777 		case timerCOMMAND_STOP: \
1778 			prvTraceStoreEvent1((xReturn == pdPASS) ? PSF_EVENT_TIMER_STOP_FROMISR : PSF_EVENT_TIMER_STOP_FROMISR_FAILED, (uint32_t)tmr); \
1779 			break; \
1780 		case timerCOMMAND_CHANGE_PERIOD: \
1781 			prvTraceStoreEvent2((xReturn == pdPASS) ? PSF_EVENT_TIMER_CHANGEPERIOD_FROMISR : PSF_EVENT_TIMER_CHANGEPERIOD_FROMISR_FAILED, (uint32_t)tmr, xOptionalValue); \
1782 			break; \
1783 		} \
1784 	}
1785 
1786 #undef traceTIMER_COMMAND_PROCESS
1787 #define traceTIMER_COMMAND_PROCESS( tmr, xCommandID, xOptionalValue ) \
1788 	if (TRACE_GET_OBJECT_FILTER(TIMER, tmr) & CurrentFilterMask) \
1789 	{ \
1790 		switch(xCommandID) \
1791 		{ \
1792 			case timerCOMMAND_START: \
1793 				prvTraceStoreEvent1(PSF_EVENT_TIMER_PROCESS_START, (uint32_t)tmr); \
1794 				break; \
1795 			case timerCOMMAND_STOP: \
1796 				prvTraceStoreEvent1(PSF_EVENT_TIMER_PROCESS_STOP, (uint32_t)tmr); \
1797 				break; \
1798 			case timerCOMMAND_CHANGE_PERIOD: \
1799 				prvTraceStoreEvent2(PSF_EVENT_TIMER_PROCESS_CHANGEPERIOD, (uint32_t)tmr, xOptionalValue); \
1800 				break; \
1801 			case timerCOMMAND_DELETE: \
1802 				prvTraceStoreEvent1(PSF_EVENT_TIMER_PROCESS_DELETE, (uint32_t)tmr); \
1803 				prvTraceDeleteSymbol(tmr); \
1804 				break; \
1805 		} \
1806 	}
1807 
1808 #undef traceTIMER_CALLBACK
1809 #define traceTIMER_CALLBACK( tmr, callbackFunction ) \
1810 	if (TRACE_GET_OBJECT_FILTER(TIMER, tmr) & CurrentFilterMask) \
1811 	{ \
1812 		prvTraceStoreEvent2(PSF_EVENT_TIMER_CALLBACK, (uint32_t)tmr, (uint32_t)callbackFunction); \
1813 	}
1814 
1815 #undef traceEVENT_GROUP_CREATE
1816 #define traceEVENT_GROUP_CREATE(eg) \
1817 	TRACE_SET_OBJECT_FILTER(EVENTGROUP, eg, CurrentFilterGroup); \
1818 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1819 	{ \
1820 		if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
1821 		{ \
1822 			prvTraceStoreEvent1(PSF_EVENT_EVENTGROUP_CREATE, (uint32_t)eg); \
1823 		} \
1824 	}
1825 
1826 #undef traceEVENT_GROUP_CREATE_FAILED
1827 #define traceEVENT_GROUP_CREATE_FAILED() \
1828 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1829 	{ \
1830 		prvTraceStoreEvent1(PSF_EVENT_EVENTGROUP_CREATE_FAILED, 0); \
1831 	}
1832 
1833 #undef traceEVENT_GROUP_DELETE
1834 #define traceEVENT_GROUP_DELETE(eg) \
1835 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1836 	{ \
1837 		if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
1838 		{ \
1839 			prvTraceStoreEvent1(PSF_EVENT_EVENTGROUP_DELETE, (uint32_t)eg); \
1840 		} \
1841 	} \
1842 	prvTraceDeleteSymbol(eg);
1843 
1844 #undef traceEVENT_GROUP_DELETE_FAILED
1845 #define traceEVENT_GROUP_DELETE_FAILED(eg) \
1846 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1847 	{ \
1848 		prvTraceStoreEvent1(PSF_EVENT_EVENTGROUP_DELETE_FAILED, (uint32_t)eg); \
1849 	}
1850 
1851 #undef traceEVENT_GROUP_SET_BITS
1852 #define traceEVENT_GROUP_SET_BITS(eg, bitsToSet) \
1853 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1854 	{ \
1855 		if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
1856 		{ \
1857 			prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_SETBITS, (uint32_t)eg, bitsToSet); \
1858 		} \
1859 	}
1860 
1861 #undef traceEVENT_GROUP_SET_BITS_FAILED
1862 #define traceEVENT_GROUP_SET_BITS_FAILED(eg, bitsToSet) \
1863 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1864 	{ \
1865 		if (trcIS_EVENTGROUP_VALID(eg)) \
1866 		{ \
1867 			if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
1868 			{ \
1869 				prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_SETBITS_FAILED, (uint32_t)eg, bitsToSet); \
1870 			} \
1871 		} \
1872 		else \
1873 		{ \
1874 			prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_SETBITS_FAILED, (uint32_t)eg, bitsToSet); \
1875 		} \
1876 	}
1877 
1878 #undef traceEVENT_GROUP_SET_BITS_FROM_ISR
1879 #define traceEVENT_GROUP_SET_BITS_FROM_ISR(eg, bitsToSet) \
1880 	if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
1881 	{ \
1882 		prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_SETBITS_FROMISR, (uint32_t)eg, bitsToSet); \
1883 	}
1884 
1885 #undef traceEVENT_GROUP_SET_BITS_FROM_ISR_FAILED
1886 #define traceEVENT_GROUP_SET_BITS_FROM_ISR_FAILED(eg, bitsToSet) \
1887 	if (trcIS_EVENTGROUP_VALID(eg)) \
1888 	{ \
1889 		if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
1890 		{ \
1891 			prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_SETBITS_FROMISR_FAILED, (uint32_t)eg, bitsToSet); \
1892 		} \
1893 	} \
1894 	else \
1895 	{ \
1896 		prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_SETBITS_FROMISR_FAILED, (uint32_t)eg, bitsToSet); \
1897 	}
1898 
1899 #undef traceEVENT_GROUP_WAIT_BITS
1900 #define traceEVENT_GROUP_WAIT_BITS(eg, bitsToWaitFor, timeout) \
1901 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1902 	{ \
1903 		if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
1904 		{ \
1905 			prvTraceStoreEvent3(PSF_EVENT_EVENTGROUP_WAITBITS, (uint32_t)eg, bitsToWaitFor, timeout); \
1906 		} \
1907 	}
1908 
1909 #undef traceEVENT_GROUP_WAIT_BITS_FAILED
1910 #define traceEVENT_GROUP_WAIT_BITS_FAILED(eg, bitsToWaitFor, timeout) \
1911 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1912 	{ \
1913 		if (trcIS_EVENTGROUP_VALID(eg)) \
1914 		{ \
1915 			if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
1916 			{ \
1917 				prvTraceStoreEvent3(PSF_EVENT_EVENTGROUP_WAITBITS_FAILED, (uint32_t)eg, bitsToWaitFor, timeout); \
1918 			} \
1919 		} \
1920 		else \
1921 		{ \
1922 			prvTraceStoreEvent3(PSF_EVENT_EVENTGROUP_WAITBITS_FAILED, (uint32_t)eg, bitsToWaitFor, timeout); \
1923 		} \
1924 	}
1925 
1926 #undef traceEVENT_GROUP_WAIT_BITS_BLOCK
1927 #define traceEVENT_GROUP_WAIT_BITS_BLOCK(eg, bitsToWaitFor, timeout) \
1928 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1929 	{ \
1930 		if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
1931 		{ \
1932 			prvTraceStoreEvent3(PSF_EVENT_EVENTGROUP_WAITBITS_BLOCK, (uint32_t)eg, bitsToWaitFor, timeout); \
1933 		} \
1934 	}
1935 
1936 #undef traceEVENT_GROUP_CLEAR_BITS
1937 #define traceEVENT_GROUP_CLEAR_BITS(eg, bitsToClear) \
1938 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1939 	{ \
1940 		if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
1941 		{ \
1942 			prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_CLEARBITS, (uint32_t)eg, bitsToClear); \
1943 		} \
1944 	}
1945 
1946 #undef traceEVENT_GROUP_CLEAR_BITS_FAILED
1947 #define traceEVENT_GROUP_CLEAR_BITS_FAILED(eg, bitsToClear) \
1948 	if (TRACE_GET_TASK_FILTER(TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \
1949 	{ \
1950 		if (trcIS_EVENTGROUP_VALID(eg)) \
1951 		{ \
1952 			if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
1953 			{ \
1954 				prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_CLEARBITS_FAILED, (uint32_t)eg, bitsToClear); \
1955 			} \
1956 		} \
1957 		else \
1958 		{ \
1959 			prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_CLEARBITS_FAILED, (uint32_t)eg, bitsToClear); \
1960 		} \
1961 	}
1962 
1963 #undef traceEVENT_GROUP_CLEAR_BITS_FROM_ISR
1964 #define traceEVENT_GROUP_CLEAR_BITS_FROM_ISR(eg, bitsToClear) \
1965 	if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
1966 	{ \
1967 		prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_CLEARBITS_FROMISR, (uint32_t)eg, bitsToClear); \
1968 	}
1969 
1970 #undef traceEVENT_GROUP_CLEAR_BITS_FROM_ISR_FAILED
1971 #define traceEVENT_GROUP_CLEAR_BITS_FROM_ISR_FAILED(eg, bitsToClear) \
1972 	if (trcIS_EVENTGROUP_VALID(eg)) \
1973 	{ \
1974 		if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \
1975 		{ \
1976 			prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_CLEARBITS_FROMISR_FAILED, (uint32_t)eg, bitsToClear); \
1977 		} \
1978 	} \
1979 	else \
1980 	{ \
1981 		prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_CLEARBITS_FROMISR_FAILED, (uint32_t)eg, bitsToClear); \
1982 	} \
1983 
1984 #undef traceTASK_NOTIFY_WAIT
1985 #define traceTASK_NOTIFY_WAIT() \
1986 	if (TRACE_GET_TASK_FILTER(pxCurrentTCB) & CurrentFilterMask) \
1987 	{ \
1988 		prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_WAIT, (uint32_t)pxCurrentTCB, xTicksToWait); \
1989 	}
1990 
1991 #undef traceTASK_NOTIFY_WAIT_FAILED
1992 #define traceTASK_NOTIFY_WAIT_FAILED() \
1993 	if (TRACE_GET_TASK_FILTER(pxCurrentTCB) & CurrentFilterMask) \
1994 	{ \
1995 		prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_WAIT_FAILED, (uint32_t)pxCurrentTCB, xTicksToWait); \
1996 	}
1997 
1998 #undef traceTASK_NOTIFY_WAIT_BLOCK
1999 #define traceTASK_NOTIFY_WAIT_BLOCK() \
2000 	if (TRACE_GET_TASK_FILTER(pxCurrentTCB) & CurrentFilterMask) \
2001 	{ \
2002 		prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_WAIT_BLOCK, (uint32_t)pxCurrentTCB, xTicksToWait); \
2003 	}
2004 
2005 #undef traceTASK_NOTIFY_SEND
2006 #define traceTASK_NOTIFY_SEND() \
2007 	if (TRACE_GET_TASK_FILTER(pxCurrentTCB) & CurrentFilterMask) \
2008 	{ \
2009 		if (TRACE_GET_TASK_FILTER(xTaskToNotify) & CurrentFilterMask) \
2010 		{ \
2011 			prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_SEND, (uint32_t)xTaskToNotify, uxValue); \
2012 		} \
2013 	}
2014 
2015 #undef traceTASK_NOTIFY_SEND_FAILED
2016 #define traceTASK_NOTIFY_SEND_FAILED() \
2017 	if (TRACE_GET_TASK_FILTER(pxCurrentTCB) & CurrentFilterMask) \
2018 	{ \
2019 		if (trcIS_TASK_VALID(xTaskToNotify)) \
2020 		{ \
2021 			if (TRACE_GET_TASK_FILTER(xTaskToNotify) & CurrentFilterMask) \
2022 			{ \
2023 				prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_SEND_FAILED, (uint32_t)xTaskToNotify, uxValue); \
2024 			} \
2025 		} \
2026 		else \
2027 		{ \
2028 			prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_SEND_FAILED, (uint32_t)xTaskToNotify, uxValue); \
2029 		} \
2030 	}
2031 
2032 #undef traceTASK_NOTIFY_SEND_FROM_ISR
2033 #define traceTASK_NOTIFY_SEND_FROM_ISR() \
2034 	if (TRACE_GET_TASK_FILTER(xTaskToNotify) & CurrentFilterMask) \
2035 	{ \
2036 		prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_SEND_FROM_ISR, (uint32_t)xTaskToNotify, uxValue); \
2037 	}
2038 
2039 #undef traceTASK_NOTIFY_SEND_FROM_ISR_FAILED
2040 #define traceTASK_NOTIFY_SEND_FROM_ISR_FAILED() \
2041 	if (trcIS_TASK_VALID(xTaskToNotify)) \
2042 	{ \
2043 		if (TRACE_GET_TASK_FILTER(xTaskToNotify) & CurrentFilterMask) \
2044 		{ \
2045 			prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_SEND_FROM_ISR_FAILED, (uint32_t)xTaskToNotify, uxValue); \
2046 		} \
2047 	} \
2048 	else \
2049 	{ \
2050 		prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_SEND_FROM_ISR_FAILED, (uint32_t)xTaskToNotify, uxValue); \
2051 	} \
2052 
2053 #endif /* (TRC_CFG_SCHEDULING_ONLY == 0) */
2054 
2055 #endif /*#if TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_STREAMING  */
2056 
2057 #if (TRC_CFG_SCHEDULING_ONLY == 1)
2058 #define traceLOW_POWER_IDLE_BEGIN()
2059 #define traceLOW_POWER_IDLE_END()
2060 #define traceTASK_SUSPEND( pxTaskToSuspend )
2061 #define traceTASK_DELAY()
2062 #define traceTASK_DELAY_UNTIL(xTimeToWake)
2063 #define traceQUEUE_CREATE( pxNewQueue )
2064 #define traceQUEUE_CREATE_FAILED( pcBuffer, queueType, queueLength )
2065 #define traceQUEUE_SEND( pxQueue )
2066 #define traceQUEUE_SEND_FAILED( pxQueue )
2067 #define traceBLOCKING_ON_QUEUE_SEND( pxQueue )
2068 #define traceQUEUE_SEND_FROM_ISR( pxQueue )
2069 #define traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue )
2070 #define traceQUEUE_RECEIVE( pxQueue )
2071 #define traceQUEUE_RECEIVE_FAILED( pxQueue )
2072 #define traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue )
2073 #define traceQUEUE_RECEIVE_FROM_ISR( pxQueue )
2074 #define traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue )
2075 #define traceQUEUE_PEEK( pxQueue )
2076 #define traceQUEUE_PEEK_FAILED( pxQueue )
2077 #define traceBLOCKING_ON_QUEUE_PEEK( pxQueue )
2078 #define traceTASK_PRIORITY_SET( pxTask, uxNewPriority )
2079 #define traceTASK_PRIORITY_INHERIT( pxTask, uxNewPriority )
2080 #define traceTASK_PRIORITY_DISINHERIT( pxTask, uxNewPriority )
2081 #define traceTASK_RESUME( pxTaskToResume )
2082 #define traceTASK_RESUME_FROM_ISR( pxTaskToResume )
2083 #define traceTIMER_CREATE(tmr)
2084 #define traceTIMER_COMMAND_SEND(tmr, xCommandID, xOptionalValue, xReturn)
2085 #define traceTIMER_COMMAND_SEND_FROM_ISR(tmr, xCommandID, xOptionalValue, xReturn)
2086 #define traceTIMER_COMMAND_PROCESS( tmr, xCommandID, xOptionalValue )
2087 #define traceTIMER_CALLBACK( tmr, callbackFunction )
2088 #define traceEVENT_GROUP_CREATE(eg)
2089 #define traceEVENT_GROUP_CREATE_FAILED()
2090 #define traceEVENT_GROUP_DELETE(eg)
2091 #define traceEVENT_GROUP_DELETE_FAILED(eg)
2092 #define traceEVENT_GROUP_SET_BITS(eg, bitsToSet)
2093 #define traceEVENT_GROUP_SET_BITS_FAILED(eg, bitsToSet)
2094 #define traceEVENT_GROUP_SET_BITS_FROM_ISR(eg, bitsToSet)
2095 #define traceEVENT_GROUP_SET_BITS_FROM_ISR_FAILED(eg, bitsToSet)
2096 #define traceEVENT_GROUP_WAIT_BITS(eg, bitsToWaitFor, timeout)
2097 #define traceEVENT_GROUP_WAIT_BITS_FAILED(eg, bitsToWaitFor, timeout)
2098 #define traceEVENT_GROUP_WAIT_BITS_BLOCK(eg, bitsToWaitFor, timeout)
2099 #define traceEVENT_GROUP_CLEAR_BITS(eg, bitsToClear)
2100 #define traceEVENT_GROUP_CLEAR_BITS_FAILED(eg, bitsToClear)
2101 #define traceEVENT_GROUP_CLEAR_BITS_FROM_ISR(eg, bitsToClear)
2102 #define traceEVENT_GROUP_CLEAR_BITS_FROM_ISR_FAILED(eg, bitsToClear)
2103 #define traceTASK_NOTIFY_WAIT()
2104 #define traceTASK_NOTIFY_WAIT_FAILED()
2105 #define traceTASK_NOTIFY_WAIT_BLOCK()
2106 #define traceTASK_NOTIFY_SEND()
2107 #define traceTASK_NOTIFY_SEND_FAILED()
2108 #define traceTASK_NOTIFY_SEND_FROM_ISR()
2109 #define traceTASK_NOTIFY_SEND_FROM_ISR_FAILED()
2110 #endif /* (TRC_CFG_SCHEDULING_ONLY == 1) */
2111 
2112 #else /*(TRC_USE_TRACEALYZER_RECORDER == 1)*/
2113 
2114 	/* when recorder disabled */
2115 	#define vTraceSetQueueName(object, name)
2116 	#define vTraceSetSemaphoreName(object, name)
2117 	#define vTraceSetMutexName(object, name)
2118 	#define vTraceSetEventGroupName(object, name)
2119 
2120 #endif /*(TRC_USE_TRACEALYZER_RECORDER == 1)*/
2121 
2122 #ifdef __cplusplus
2123 }
2124 #endif
2125 
2126 #endif /* TRC_KERNEL_PORT_H */
2127