1 /* 2 * Trace Recorder for Tracealyzer v4.5.1 3 * Copyright 2021 Percepio AB 4 * www.percepio.com 5 * 6 * SPDX-License-Identifier: Apache-2.0 7 * 8 * FreeRTOS specific definitions needed by the trace recorder 9 */ 10 11 #ifndef TRC_KERNEL_PORT_H 12 #define TRC_KERNEL_PORT_H 13 14 #include "freertos/FreeRTOS.h" 15 #include "freertos/portmacro.h" 16 #include "trcPortDefines.h" 17 18 #ifdef __cplusplus 19 extern "C" { 20 #endif 21 22 #ifdef CONFIG_PERCEPIO_TRACERECORDER_ENABLED 23 #define TRC_USE_TRACEALYZER_RECORDER 1 24 #else 25 #define TRC_USE_TRACEALYZER_RECORDER 0 26 #endif 27 28 /*** FreeRTOS version codes **************************************************/ 29 #define TRC_ESP_IDF_VERSION_NOT_SET 0 30 #define TRC_ESP_IDF_VERSION_4_0_0 400 31 #define TRC_ESP_IDF_VERSION_4_1_0 410 32 #define TRC_ESP_IDF_VERSION_4_2_0 420 33 #define TRC_ESP_IDF_VERSION_4_3_0 430 34 35 #if (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) 36 #define prvGetStreamBufferType(x) ((( StreamBuffer_t * )x )->ucFlags & sbFLAGS_IS_MESSAGE_BUFFER) 37 #else 38 #define prvGetStreamBufferType(x) 0 39 #endif 40 41 /* Added mainly for our internal testing. This makes it easier to create test applications that 42 runs on multiple FreeRTOS versions. */ 43 #if (TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_0_0) 44 /* FreeRTOS v7.x */ 45 #define STRING_CAST(x) ( (signed char*) x ) 46 #define TickType portTickType 47 #define TaskType xTaskHandle 48 #else 49 /* FreeRTOS v8.0 and later */ 50 #define STRING_CAST(x) x 51 #define TickType TickType_t 52 #define TaskType TaskHandle_t 53 #endif 54 55 #if !CONFIG_FREERTOS_UNICORE 56 #define TRC_GET_CURRENT_CORE() (xPortGetCoreID()) 57 #endif 58 59 60 #if (defined(TRC_USE_TRACEALYZER_RECORDER)) && (TRC_USE_TRACEALYZER_RECORDER == 1) 61 62 #define TRC_PLATFORM_CFG "" 63 #define TRC_PLATFORM_CFG_MAJOR 1 64 #define TRC_PLATFORM_CFG_MINOR 0 65 #define TRC_PLATFORM_CFG_PATCH 0 66 67 /** 68 * @brief Global Tracerecorder ESP-IDF entry function 69 * 70 * NOTE: Although ESP-IDF global constructor functions are called 71 * too late for vTraceEnable it is a suitable entry point for 72 * initializing the control task. 73 * 74 * NOTE: ESP-IDF requires us to start the control task separately 75 * from the traditional vTraceEnable which is called at an early 76 * stage of the boot process. If we start the control task at that 77 * early stage it will become primary task during the boot procedure 78 * and cause all kinds of problems and kernel panics as the remainder 79 * of the boot process is performed. 80 */ 81 __attribute__((constructor)) void vTraceInit(); 82 83 #if defined(TRC_CFG_ENABLE_STACK_MONITOR) && (TRC_CFG_ENABLE_STACK_MONITOR == 1) && (TRC_CFG_SCHEDULING_ONLY == 0) 84 /* Required for this feature */ 85 #undef INCLUDE_uxTaskGetStackHighWaterMark 86 #define INCLUDE_uxTaskGetStackHighWaterMark 1 87 #endif /* defined(TRC_CFG_ENABLE_STACK_MONITOR) && (TRC_CFG_ENABLE_STACK_MONITOR == 1) && (TRC_CFG_SCHEDULING_ONLY == 0) */ 88 89 /******************************************************************************* 90 * INCLUDE_xTaskGetCurrentTaskHandle must be set to 1 for tracing to work properly 91 ******************************************************************************/ 92 #undef INCLUDE_xTaskGetCurrentTaskHandle 93 #define INCLUDE_xTaskGetCurrentTaskHandle 1 94 95 #if (TRC_CFG_SCHEDULING_ONLY == 0) 96 /******************************************************************************* 97 * vTraceSetQueueName(void* object, const char* name) 98 * 99 * Parameter object: pointer to the Queue that shall be named 100 * Parameter name: the name to set (const string literal) 101 * 102 * Sets a name for Queue objects for display in Tracealyzer. 103 ******************************************************************************/ 104 void vTraceSetQueueName(void* object, const char* name); 105 106 /******************************************************************************* 107 * vTraceSetSemaphoreName(void* object, const char* name) 108 * 109 * Parameter object: pointer to the Semaphore that shall be named 110 * Parameter name: the name to set (const string literal) 111 * 112 * Sets a name for Semaphore objects for display in Tracealyzer. 113 ******************************************************************************/ 114 void vTraceSetSemaphoreName(void* object, const char* name); 115 116 /******************************************************************************* 117 * vTraceSetMutexName(void* object, const char* name) 118 * 119 * Parameter object: pointer to the Mutex that shall be named 120 * Parameter name: the name to set (const string literal) 121 * 122 * Sets a name for Semaphore objects for display in Tracealyzer. 123 ******************************************************************************/ 124 void vTraceSetMutexName(void* object, const char* name); 125 126 #if (TRC_CFG_INCLUDE_EVENT_GROUP_EVENTS == 1) 127 /******************************************************************************* 128 * vTraceSetEventGroupName(void* object, const char* name) 129 * 130 * Parameter object: pointer to the EventGroup that shall be named 131 * Parameter name: the name to set (const string literal) 132 * 133 * Sets a name for EventGroup objects for display in Tracealyzer. 134 ******************************************************************************/ 135 void vTraceSetEventGroupName(void* object, const char* name); 136 #else /* (TRC_CFG_INCLUDE_EVENT_GROUP_EVENTS == 1) */ 137 #define vTraceSetEventGroupName(object, name) /* Do nothing */ 138 #endif /* (TRC_CFG_INCLUDE_EVENT_GROUP_EVENTS == 1) */ 139 140 #if (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1) 141 /******************************************************************************* 142 * vTraceSetStreamBufferName(void* object, const char* name) 143 * 144 * Parameter object: pointer to the StreamBuffer that shall be named 145 * Parameter name: the name to set (const string literal) 146 * 147 * Sets a name for StreamBuffer objects for display in Tracealyzer. 148 ******************************************************************************/ 149 void vTraceSetStreamBufferName(void* object, const char* name); 150 #else /* (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1) */ 151 #define vTraceSetStreamBufferName(object, name) /* Do nothing */ 152 #endif /* (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1) */ 153 154 #if (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1) 155 /******************************************************************************* 156 * vTraceSetMessageBufferName(void* object, const char* name) 157 * 158 * Parameter object: pointer to the MessageBuffer that shall be named 159 * Parameter name: the name to set (const string literal) 160 * 161 * Sets a name for MessageBuffer objects for display in Tracealyzer. 162 ******************************************************************************/ 163 void vTraceSetMessageBufferName(void* object, const char* name); 164 #else /* (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1) */ 165 #define vTraceSetMessageBufferName(object, name) /* Do nothing */ 166 #endif /* (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1) */ 167 168 #if defined(TRC_CFG_ENABLE_STACK_MONITOR) && (TRC_CFG_ENABLE_STACK_MONITOR == 1) 169 uint32_t prvTraceGetStackHighWaterMark(void* task); 170 #endif /* defined(TRC_CFG_ENABLE_STACK_MONITOR) && (TRC_CFG_ENABLE_STACK_MONITOR == 1)*/ 171 172 #else /* (TRC_CFG_SCHEDULING_ONLY == 0) */ 173 174 #define vTraceSetQueueName(object, name) /* Do nothing */ 175 #define vTraceSetSemaphoreName(object, name) /* Do nothing */ 176 #define vTraceSetMutexName(object, name) /* Do nothing */ 177 #define vTraceSetEventGroupName(object, name) /* Do nothing */ 178 #define vTraceSetStreamBufferName(object, name) /* Do nothing */ 179 #define vTraceSetMessageBufferName(object, name) /* Do nothing */ 180 #define prvAddTaskToStackMonitor(task) /* Do nothing */ 181 #define prvRemoveTaskFromStackMonitor(task) /* Do nothing */ 182 183 #endif /* (TRC_CFG_SCHEDULING_ONLY == 0) */ 184 185 /******************************************************************************* 186 * Note: Setting names for event groups is difficult to support, this has been 187 * excluded intentionally. This since we don't know if event_groups.c is 188 * included in the build, so referencing it from the recorder may cause errors. 189 ******************************************************************************/ 190 191 /* Gives the currently executing task (wrapper for RTOS-specific function) */ 192 void* prvTraceGetCurrentTaskHandle(void); 193 194 #if (((TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_SNAPSHOT) && (TRC_CFG_INCLUDE_ISR_TRACING == 1)) || (TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_STREAMING)) 195 /* Tells if the scheduler currently is suspended (task-switches can't occur) */ 196 unsigned char prvTraceIsSchedulerSuspended(void); 197 198 /******************************************************************************* 199 * INCLUDE_xTaskGetSchedulerState must be set to 1 for tracing to work properly 200 ******************************************************************************/ 201 #undef INCLUDE_xTaskGetSchedulerState 202 #define INCLUDE_xTaskGetSchedulerState 1 203 204 #endif /* (((TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_SNAPSHOT) && (TRC_CFG_INCLUDE_ISR_TRACING == 1)) || (TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_STREAMING)) */ 205 206 #define TRACE_KERNEL_VERSION 0x1AA1 207 #define TRACE_TICK_RATE_HZ configTICK_RATE_HZ /* Defined in "FreeRTOS.h" */ 208 #define TRACE_CPU_CLOCK_HZ configCPU_CLOCK_HZ /* Defined in "FreeRTOSConfig.h" */ 209 #define TRACE_GET_CURRENT_TASK() prvTraceGetCurrentTaskHandle() 210 211 #define TRACE_GET_OS_TICKS() (uiTraceTickCount) /* Streaming only */ 212 213 /* If using dynamic allocation of snapshot trace buffer... */ 214 #define TRACE_MALLOC(size) pvPortMalloc(size) 215 216 #if defined(configUSE_TIMERS) 217 #if (configUSE_TIMERS == 1) 218 #undef INCLUDE_xTimerGetTimerDaemonTaskHandle 219 #define INCLUDE_xTimerGetTimerDaemonTaskHandle 1 220 #endif /* configUSE_TIMERS == 1*/ 221 #endif /* configUSE_TIMERS */ 222 223 /* For ARM Cortex-M devices - assumes the ARM CMSIS API is available */ 224 #if (defined (__CORTEX_M)) 225 #define TRACE_ALLOC_CRITICAL_SECTION() uint32_t __irq_status; 226 #define TRACE_ENTER_CRITICAL_SECTION() {__irq_status = __get_PRIMASK(); __set_PRIMASK(1);} /* PRIMASK disables ALL interrupts - allows for tracing in any ISR */ 227 #define TRACE_EXIT_CRITICAL_SECTION() {__set_PRIMASK(__irq_status);} 228 #endif 229 230 #if (TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_ARM_CORTEX_A9) || (TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_XILINX_ZyncUltraScaleR5) 231 232 /************************************************************************** 233 * Disables "FreeRTOS-enabled" interrupts only , i.e. with priorities up to 234 * configMAX_API_CALL_INTERRUPT_PRIORITY. Don't add tracing in ISRs with 235 * greater priority. 236 *************************************************************************/ 237 238 extern int cortex_a9_r5_enter_critical(void); 239 extern void cortex_a9_r5_exit_critical(int irq_already_masked_at_enter); 240 241 #define TRACE_ALLOC_CRITICAL_SECTION() uint32_t __irq_mask_status; 242 243 #define TRACE_ENTER_CRITICAL_SECTION() { __irq_mask_status = cortex_a9_r5_enter_critical(); } 244 245 #define TRACE_EXIT_CRITICAL_SECTION() { cortex_a9_r5_exit_critical(__irq_mask_status); } 246 247 #endif 248 249 #if ( (TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_Renesas_RX600) || (TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_MICROCHIP_PIC24_PIC32)) 250 #define TRACE_ALLOC_CRITICAL_SECTION() int __irq_status; 251 #define TRACE_ENTER_CRITICAL_SECTION() {__irq_status = portSET_INTERRUPT_MASK_FROM_ISR();} 252 #define TRACE_EXIT_CRITICAL_SECTION() {portCLEAR_INTERRUPT_MASK_FROM_ISR(__irq_status);} 253 #endif 254 255 #if (TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_Altera_NiosII) 256 #include "system.h" 257 #include "sys/alt_irq.h" 258 #define TRACE_ALLOC_CRITICAL_SECTION() alt_irq_context __irq_status; 259 #define TRACE_ENTER_CRITICAL_SECTION(){__irq_status = alt_irq_disable_all();} 260 #define TRACE_EXIT_CRITICAL_SECTION() {alt_irq_enable_all(__irq_status);} 261 #endif 262 263 #if (TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_Win32) 264 /* In the Win32 port, there are no real interrupts, so we can use the normal critical sections */ 265 #define TRACE_ALLOC_CRITICAL_SECTION() 266 #define TRACE_ENTER_CRITICAL_SECTION() portENTER_CRITICAL() 267 #define TRACE_EXIT_CRITICAL_SECTION() portEXIT_CRITICAL() 268 #endif 269 270 #if (TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_POWERPC_Z4) 271 #if (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_0_0) 272 /* FreeRTOS v8.0 or later */ 273 #define TRACE_ALLOC_CRITICAL_SECTION() UBaseType_t __irq_status; 274 #define TRACE_ENTER_CRITICAL_SECTION() {__irq_status = portSET_INTERRUPT_MASK_FROM_ISR();} 275 #define TRACE_EXIT_CRITICAL_SECTION() {portCLEAR_INTERRUPT_MASK_FROM_ISR(__irq_status);} 276 #else 277 /* FreeRTOS v7.x */ 278 #define TRACE_ALLOC_CRITICAL_SECTION() unsigned portBASE_TYPE __irq_status; 279 #define TRACE_ENTER_CRITICAL_SECTION() {__irq_status = portSET_INTERRUPT_MASK_FROM_ISR();} 280 #define TRACE_EXIT_CRITICAL_SECTION() {portCLEAR_INTERRUPT_MASK_FROM_ISR(__irq_status);} 281 #endif 282 #endif 283 284 285 #if ((TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_XTensa_LX6) || (TRC_CFG_HARDWARE_PORT == TRC_HARDWARE_PORT_XTensa_LX7)) 286 #if (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_0_0) 287 /* FreeRTOS v8.0 or later */ 288 #if CONFIG_FREERTOS_UNICORE 289 /** 290 * @note In unicore mode we can use the Xtensa rsil register to manage critical sections. 291 */ 292 #define TRACE_ALLOC_CRITICAL_SECTION() UBaseType_t __irq_status; 293 #define TRACE_ENTER_CRITICAL_SECTION() {__irq_status = __extension__({ unsigned __tmp; \ 294 __asm__ __volatile__("rsil %0, 15\n" \ 295 : "=a" (__tmp) : : "memory" ); \ 296 __tmp;});} 297 #define TRACE_EXIT_CRITICAL_SECTION() {portCLEAR_INTERRUPT_MASK_FROM_ISR(__irq_status);} 298 #else 299 /** 300 * @note In multicore mode we have to use a portMUX mutex to prevent threads on the cores 301 * from interfering with the critical sections. 302 */ 303 extern portMUX_TYPE tz_mutex; 304 #define TRACE_ALLOC_CRITICAL_SECTION() 305 #define TRACE_ENTER_CRITICAL_SECTION() portENTER_CRITICAL_SAFE(&tz_mutex); 306 #define TRACE_EXIT_CRITICAL_SECTION() portEXIT_CRITICAL_SAFE(&tz_mutex); 307 #endif 308 #else 309 /* FreeRTOS v7.x */ 310 #define TRACE_ALLOC_CRITICAL_SECTION() unsigned portBASE_TYPE __irq_status; 311 #define TRACE_ENTER_CRITICAL_SECTION() {__irq_status = __extension__({ unsigned __tmp; \ 312 __asm__ __volatile__("rsil %0, 15\n" \ 313 : "=a" (__tmp) : : "memory" ); \ 314 __tmp;});} 315 #define TRACE_EXIT_CRITICAL_SECTION() {portCLEAR_INTERRUPT_MASK_FROM_ISR(__irq_status);} 316 #endif 317 #endif 318 319 #ifndef TRACE_ENTER_CRITICAL_SECTION 320 #error "This hardware port has no definition for critical sections! See http://percepio.com/2014/10/27/how-to-define-critical-sections-for-the-recorder/" 321 #endif 322 323 324 #if (TRC_CFG_ESP_IDF_VERSION <= TRC_ESP_IDF_VERSION_4_2_0) 325 #define isQueueReceiveHookActuallyPeek xJustPeeking 326 #elif (TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_2_0) 327 #define isQueueReceiveHookActuallyPeek (__LINE__ < 0) /* instead of pdFALSE to fix a warning of "constant condition" */ 328 #endif 329 330 extern uint16_t CurrentFilterMask; 331 332 extern uint16_t CurrentFilterGroup; 333 334 uint8_t prvTraceGetQueueType(void* handle); 335 uint16_t prvTraceGetTaskNumberLow16(void* handle); 336 uint16_t prvTraceGetTaskNumberHigh16(void* handle); 337 void prvTraceSetTaskNumberLow16(void* handle, uint16_t value); 338 void prvTraceSetTaskNumberHigh16(void* handle, uint16_t value); 339 340 uint16_t prvTraceGetQueueNumberLow16(void* handle); 341 uint16_t prvTraceGetQueueNumberHigh16(void* handle); 342 void prvTraceSetQueueNumberLow16(void* handle, uint16_t value); 343 void prvTraceSetQueueNumberHigh16(void* handle, uint16_t value); 344 345 #if (TRC_CFG_INCLUDE_TIMER_EVENTS == 1 && TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) 346 uint16_t prvTraceGetTimerNumberLow16(void* handle); 347 uint16_t prvTraceGetTimerNumberHigh16(void* handle); 348 void prvTraceSetTimerNumberLow16(void* handle, uint16_t value); 349 void prvTraceSetTimerNumberHigh16(void* handle, uint16_t value); 350 #endif /* (TRC_CFG_INCLUDE_TIMER_EVENTS == 1 && TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) */ 351 352 #if (TRC_CFG_INCLUDE_EVENT_GROUP_EVENTS == 1 && TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) 353 uint16_t prvTraceGetEventGroupNumberLow16(void* handle); 354 uint16_t prvTraceGetEventGroupNumberHigh16(void* handle); 355 void prvTraceSetEventGroupNumberLow16(void* handle, uint16_t value); 356 void prvTraceSetEventGroupNumberHigh16(void* handle, uint16_t value); 357 #endif /* (TRC_CFG_INCLUDE_EVENT_GROUP_EVENTS == 1 && TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) */ 358 359 #if (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1 && TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) 360 uint16_t prvTraceGetStreamBufferNumberLow16(void* handle); 361 uint16_t prvTraceGetStreamBufferNumberHigh16(void* handle); 362 void prvTraceSetStreamBufferNumberLow16(void* handle, uint16_t value); 363 void prvTraceSetStreamBufferNumberHigh16(void* handle, uint16_t value); 364 #endif /* (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1 && TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) */ 365 366 #define TRACE_GET_TASK_FILTER(pxTask) prvTraceGetTaskNumberHigh16((void*)pxTask) 367 #define TRACE_SET_TASK_FILTER(pxTask, group) prvTraceSetTaskNumberHigh16((void*)pxTask, group) 368 369 #define TRACE_GET_QUEUE_FILTER(pxObject) prvTraceGetQueueNumberHigh16((void*)pxObject) 370 #define TRACE_SET_QUEUE_FILTER(pxObject, group) prvTraceSetQueueNumberHigh16((void*)pxObject, group) 371 372 #if (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) 373 #define TRACE_GET_EVENTGROUP_FILTER(pxObject) prvTraceGetEventGroupNumberHigh16((void*)pxObject) 374 #define TRACE_SET_EVENTGROUP_FILTER(pxObject, group) prvTraceSetEventGroupNumberHigh16((void*)pxObject, group) 375 #else /* (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) */ 376 /* FreeRTOS versions before v10.0 does not support filtering for event groups */ 377 #define TRACE_GET_EVENTGROUP_FILTER(pxObject) 1 378 #define TRACE_SET_EVENTGROUP_FILTER(pxObject, group) 379 #endif /* (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) */ 380 381 #if (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) 382 #define TRACE_GET_TIMER_FILTER(pxObject) prvTraceGetTimerNumberHigh16((void*)pxObject) 383 #define TRACE_SET_TIMER_FILTER(pxObject, group) prvTraceSetTimerNumberHigh16((void*)pxObject, group) 384 #else /* (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) */ 385 /* FreeRTOS versions before v10.0 does not support filtering for timers */ 386 #define TRACE_GET_TIMER_FILTER(pxObject) 1 387 #define TRACE_SET_TIMER_FILTER(pxObject, group) 388 #endif /* (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) */ 389 390 #define TRACE_GET_STREAMBUFFER_FILTER(pxObject) prvTraceGetStreamBufferNumberHigh16((void*)pxObject) 391 #define TRACE_SET_STREAMBUFFER_FILTER(pxObject, group) prvTraceSetStreamBufferNumberHigh16((void*)pxObject, group) 392 393 /* We can only support filtering if FreeRTOS is at least v8.0 */ 394 #if (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_0_0) 395 #define TRACE_GET_OBJECT_FILTER(CLASS, pxObject) TRACE_GET_##CLASS##_FILTER(pxObject) 396 #define TRACE_SET_OBJECT_FILTER(CLASS, pxObject, group) TRACE_SET_##CLASS##_FILTER(pxObject, group) 397 #else /* (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_0_0) */ 398 #define TRACE_GET_OBJECT_FILTER(CLASS, pxObject) 0xFFFF 399 #define TRACE_SET_OBJECT_FILTER(CLASS, pxObject, group) 400 #endif /* (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_0_0) */ 401 402 /* Helpers needed to correctly expand names */ 403 #define TZ__CAT2(a,b) a ## b 404 #define TZ__CAT(a,b) TZ__CAT2(a, b) 405 406 /**************************************************************************/ 407 /* Makes sure xQueueGiveFromISR also has a xCopyPosition parameter */ 408 /**************************************************************************/ 409 410 /* Expands name if this header is included... uxQueueType must be a macro that only exists in queue.c or whatever, and it must expand to nothing or to something that's valid in identifiers */ 411 #define xQueueGiveFromISR(a,b) TZ__CAT(xQueueGiveFromISR__, uxQueueType) (a,b) 412 413 /* If in queue.c, the "uxQueueType" macro expands to "pcHead". queueSEND_TO_BACK is the value we need to send in */ 414 #define xQueueGiveFromISR__pcHead(__a, __b) MyWrapper_xQueueGiveFromISR(__a, __b, const BaseType_t xCopyPosition); \ 415 BaseType_t xQueueGiveFromISR(__a, __b) { return MyWrapper_xQueueGiveFromISR(xQueue, pxHigherPriorityTaskWoken, queueSEND_TO_BACK); } \ 416 BaseType_t MyWrapper_xQueueGiveFromISR(__a, __b, const BaseType_t xCopyPosition) 417 418 /* If not in queue.c, "uxQueueType" isn't expanded */ 419 #define xQueueGiveFromISR__uxQueueType(__a, __b) xQueueGiveFromISR(__a,__b) 420 421 /**************************************************************************/ 422 /* End of xQueueGiveFromISR fix */ 423 /**************************************************************************/ 424 425 /******************************************************************************/ 426 /*** Definitions for Snapshot mode ********************************************/ 427 /******************************************************************************/ 428 #if (TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_SNAPSHOT) 429 430 /*** The object classes *******************************************************/ 431 432 #define TRACE_NCLASSES 9 433 #define TRACE_CLASS_QUEUE ((traceObjectClass)0) 434 #define TRACE_CLASS_SEMAPHORE ((traceObjectClass)1) 435 #define TRACE_CLASS_MUTEX ((traceObjectClass)2) 436 #define TRACE_CLASS_TASK ((traceObjectClass)3) 437 #define TRACE_CLASS_ISR ((traceObjectClass)4) 438 #define TRACE_CLASS_TIMER ((traceObjectClass)5) 439 #define TRACE_CLASS_EVENTGROUP ((traceObjectClass)6) 440 #define TRACE_CLASS_STREAMBUFFER ((traceObjectClass)7) 441 #define TRACE_CLASS_MESSAGEBUFFER ((traceObjectClass)8) 442 443 /*** Definitions for Object Table ********************************************/ 444 #define TRACE_KERNEL_OBJECT_COUNT ((TRC_CFG_NQUEUE) + (TRC_CFG_NSEMAPHORE) + (TRC_CFG_NMUTEX) + (TRC_CFG_NTASK) + (TRC_CFG_NISR) + (TRC_CFG_NTIMER) + (TRC_CFG_NEVENTGROUP) + (TRC_CFG_NSTREAMBUFFER) + (TRC_CFG_NMESSAGEBUFFER)) 445 446 /* Queue properties (except name): current number of message in queue */ 447 #define PropertyTableSizeQueue ((TRC_CFG_NAME_LEN_QUEUE) + 1) 448 449 /* Semaphore properties (except name): state (signaled = 1, cleared = 0) */ 450 #define PropertyTableSizeSemaphore ((TRC_CFG_NAME_LEN_SEMAPHORE) + 1) 451 452 /* Mutex properties (except name): owner (task handle, 0 = free) */ 453 #define PropertyTableSizeMutex ((TRC_CFG_NAME_LEN_MUTEX) + 1) 454 455 /* Task properties (except name): Byte 0: Current priority 456 Byte 1: state (if already active) 457 Byte 2: legacy, not used 458 Byte 3: legacy, not used */ 459 #define PropertyTableSizeTask ((TRC_CFG_NAME_LEN_TASK) + 4) 460 461 /* ISR properties: Byte 0: priority 462 Byte 1: state (if already active) */ 463 #define PropertyTableSizeISR ((TRC_CFG_NAME_LEN_ISR) + 2) 464 465 /* TRC_CFG_NTIMER properties: Byte 0: state (unused for now) */ 466 #define PropertyTableSizeTimer ((TRC_CFG_NAME_LEN_TIMER) + 1) 467 468 /* TRC_CFG_NEVENTGROUP properties: Byte 0-3: state (unused for now)*/ 469 #define PropertyTableSizeEventGroup ((TRC_CFG_NAME_LEN_EVENTGROUP) + 4) 470 471 /* TRC_CFG_NSTREAMBUFFER properties: Byte 0-3: state (unused for now)*/ 472 #define PropertyTableSizeStreamBuffer ((TRC_CFG_NAME_LEN_STREAMBUFFER) + 4) 473 474 /* TRC_CFG_NMESSAGEBUFFER properties: Byte 0-3: state (unused for now)*/ 475 #define PropertyTableSizeMessageBuffer ((TRC_CFG_NAME_LEN_MESSAGEBUFFER) + 4) 476 477 478 /* The layout of the byte array representing the Object Property Table */ 479 #define StartIndexQueue (0) 480 #define StartIndexSemaphore (StartIndexQueue + (TRC_CFG_NQUEUE) * PropertyTableSizeQueue) 481 #define StartIndexMutex (StartIndexSemaphore + (TRC_CFG_NSEMAPHORE) * PropertyTableSizeSemaphore) 482 #define StartIndexTask (StartIndexMutex + (TRC_CFG_NMUTEX) * PropertyTableSizeMutex) 483 #define StartIndexISR (StartIndexTask + (TRC_CFG_NTASK) * PropertyTableSizeTask) 484 #define StartIndexTimer (StartIndexISR + (TRC_CFG_NISR) * PropertyTableSizeISR) 485 #define StartIndexEventGroup (StartIndexTimer + (TRC_CFG_NTIMER) * PropertyTableSizeTimer) 486 #define StartIndexStreamBuffer (StartIndexEventGroup + (TRC_CFG_NEVENTGROUP) * PropertyTableSizeEventGroup) 487 #define StartIndexMessageBuffer (StartIndexStreamBuffer + (TRC_CFG_NSTREAMBUFFER) * PropertyTableSizeStreamBuffer) 488 489 /* Number of bytes used by the object table */ 490 #define TRACE_OBJECT_TABLE_SIZE (StartIndexMessageBuffer + (TRC_CFG_NMESSAGEBUFFER) * PropertyTableSizeMessageBuffer) 491 492 /* Flag to tell the context of tracePEND_FUNC_CALL_FROM_ISR */ 493 extern int uiInEventGroupSetBitsFromISR; 494 495 /* Initialization of the object property table */ 496 void vTraceInitObjectPropertyTable(void); 497 498 /* Initialization of the handle mechanism, see e.g, prvTraceGetObjectHandle */ 499 void vTraceInitObjectHandleStack(void); 500 501 /* Returns the "Not enough handles" error message for the specified object class */ 502 const char* pszTraceGetErrorNotEnoughHandles(traceObjectClass objectclass); 503 504 void* prvTraceGetCurrentTaskHandle(void); 505 506 /****************************************************************************** 507 * TraceQueueClassTable 508 * Translates a FreeRTOS QueueType into trace objects classes (TRACE_CLASS_). 509 * Has one entry for each QueueType, gives TRACE_CLASS ID. 510 ******************************************************************************/ 511 extern traceObjectClass TraceQueueClassTable[5]; 512 513 514 /*** Event codes for snapshot mode - must match Tracealyzer config files ******/ 515 516 #define NULL_EVENT (0x00UL) 517 518 /******************************************************************************* 519 * EVENTGROUP_DIV 520 * 521 * Miscellaneous events. 522 ******************************************************************************/ 523 #define EVENTGROUP_DIV (NULL_EVENT + 1UL) /*0x01*/ 524 #define DIV_XPS (EVENTGROUP_DIV + 0UL) /*0x01*/ 525 #define DIV_TASK_READY (EVENTGROUP_DIV + 1UL) /*0x02*/ 526 #define DIV_NEW_TIME (EVENTGROUP_DIV + 2UL) /*0x03*/ 527 528 /******************************************************************************* 529 * EVENTGROUP_TS 530 * 531 * Events for storing task-switches and interrupts. The RESUME events are 532 * generated if the task/interrupt is already marked active. 533 ******************************************************************************/ 534 #define EVENTGROUP_TS (EVENTGROUP_DIV + 3UL) /*0x04*/ 535 #define TS_ISR_BEGIN (EVENTGROUP_TS + 0UL) /*0x04*/ 536 #define TS_ISR_RESUME (EVENTGROUP_TS + 1UL) /*0x05*/ 537 #define TS_TASK_BEGIN (EVENTGROUP_TS + 2UL) /*0x06*/ 538 #define TS_TASK_RESUME (EVENTGROUP_TS + 3UL) /*0x07*/ 539 540 /******************************************************************************* 541 * EVENTGROUP_OBJCLOSE_NAME 542 * 543 * About Close Events 544 * When an object is evicted from the object property table (object close), two 545 * internal events are stored (EVENTGROUP_OBJCLOSE_NAME and 546 * EVENTGROUP_OBJCLOSE_PROP), containing the handle-name mapping and object 547 * properties valid up to this point. 548 ******************************************************************************/ 549 #define EVENTGROUP_OBJCLOSE_NAME_TRCSUCCESS (EVENTGROUP_TS + 4UL) /*0x08*/ 550 551 /******************************************************************************* 552 * EVENTGROUP_OBJCLOSE_PROP 553 * 554 * The internal event carrying properties of deleted objects 555 * The handle and object class of the closed object is not stored in this event, 556 * but is assumed to be the same as in the preceding CLOSE event. Thus, these 557 * two events must be generated from within a critical section. 558 * When queues are closed, arg1 is the "state" property (i.e., number of 559 * buffered messages/signals). 560 * When actors are closed, arg1 is priority, arg2 is handle of the "instance 561 * finish" event, and arg3 is event code of the "instance finish" event. 562 * In this case, the lower three bits is the object class of the instance finish 563 * handle. The lower three bits are not used (always zero) when queues are 564 * closed since the queue type is given in the previous OBJCLOSE_NAME event. 565 ******************************************************************************/ 566 #define EVENTGROUP_OBJCLOSE_PROP_TRCSUCCESS (EVENTGROUP_OBJCLOSE_NAME_TRCSUCCESS + 8UL) /*0x10*/ 567 568 /******************************************************************************* 569 * EVENTGROUP_CREATE 570 * 571 * The events in this group are used to log Kernel object creations. 572 * The lower three bits in the event code gives the object class, i.e., type of 573 * create operation (task, queue, semaphore, etc). 574 ******************************************************************************/ 575 #define EVENTGROUP_CREATE_OBJ_TRCSUCCESS (EVENTGROUP_OBJCLOSE_PROP_TRCSUCCESS + 8UL) /*0x18*/ 576 577 /******************************************************************************* 578 * EVENTGROUP_SEND 579 * 580 * The events in this group are used to log Send/Give events on queues, 581 * semaphores and mutexes The lower three bits in the event code gives the 582 * object class, i.e., what type of object that is operated on (queue, semaphore 583 * or mutex). 584 ******************************************************************************/ 585 #define EVENTGROUP_SEND_TRCSUCCESS (EVENTGROUP_CREATE_OBJ_TRCSUCCESS + 8UL) /*0x20*/ 586 587 /******************************************************************************* 588 * EVENTGROUP_RECEIVE 589 * 590 * The events in this group are used to log Receive/Take events on queues, 591 * semaphores and mutexes. The lower three bits in the event code gives the 592 * object class, i.e., what type of object that is operated on (queue, semaphore 593 * or mutex). 594 ******************************************************************************/ 595 #define EVENTGROUP_RECEIVE_TRCSUCCESS (EVENTGROUP_SEND_TRCSUCCESS + 8UL) /*0x28*/ 596 597 /* Send/Give operations, from ISR */ 598 #define EVENTGROUP_SEND_FROM_ISR_TRCSUCCESS \ 599 (EVENTGROUP_RECEIVE_TRCSUCCESS + 8UL) /*0x30*/ 600 601 /* Receive/Take operations, from ISR */ 602 #define EVENTGROUP_RECEIVE_FROM_ISR_TRCSUCCESS \ 603 (EVENTGROUP_SEND_FROM_ISR_TRCSUCCESS + 8UL) /*0x38*/ 604 605 /* "Failed" event type versions of above (timeout, failed allocation, etc) */ 606 #define EVENTGROUP_KSE_TRCFAILED \ 607 (EVENTGROUP_RECEIVE_FROM_ISR_TRCSUCCESS + 8UL) /*0x40*/ 608 609 /* Failed create calls - memory allocation failed */ 610 #define EVENTGROUP_CREATE_OBJ_TRCFAILED (EVENTGROUP_KSE_TRCFAILED) /*0x40*/ 611 612 /* Failed send/give - timeout! */ 613 #define EVENTGROUP_SEND_TRCFAILED (EVENTGROUP_CREATE_OBJ_TRCFAILED + 8UL) /*0x48*/ 614 615 /* Failed receive/take - timeout! */ 616 #define EVENTGROUP_RECEIVE_TRCFAILED (EVENTGROUP_SEND_TRCFAILED + 8UL) /*0x50*/ 617 618 /* Failed non-blocking send/give - queue full */ 619 #define EVENTGROUP_SEND_FROM_ISR_TRCFAILED (EVENTGROUP_RECEIVE_TRCFAILED + 8UL) /*0x58*/ 620 621 /* Failed non-blocking receive/take - queue empty */ 622 #define EVENTGROUP_RECEIVE_FROM_ISR_TRCFAILED \ 623 (EVENTGROUP_SEND_FROM_ISR_TRCFAILED + 8UL) /*0x60*/ 624 625 /* Events when blocking on receive/take */ 626 #define EVENTGROUP_RECEIVE_TRCBLOCK \ 627 (EVENTGROUP_RECEIVE_FROM_ISR_TRCFAILED + 8UL) /*0x68*/ 628 629 /* Events when blocking on send/give */ 630 #define EVENTGROUP_SEND_TRCBLOCK (EVENTGROUP_RECEIVE_TRCBLOCK + 8UL) /*0x70*/ 631 632 /* Events on queue peek (receive) */ 633 #define EVENTGROUP_PEEK_TRCSUCCESS (EVENTGROUP_SEND_TRCBLOCK + 8UL) /*0x78*/ 634 635 /* Events on object delete (vTaskDelete or vQueueDelete) */ 636 #define EVENTGROUP_DELETE_OBJ_TRCSUCCESS (EVENTGROUP_PEEK_TRCSUCCESS + 8UL) /*0x80*/ 637 638 /* Other events - object class is implied: TASK */ 639 #define EVENTGROUP_OTHERS (EVENTGROUP_DELETE_OBJ_TRCSUCCESS + 8UL) /*0x88*/ 640 #define TASK_DELAY_UNTIL (EVENTGROUP_OTHERS + 0UL) /*0x88*/ 641 #define TASK_DELAY (EVENTGROUP_OTHERS + 1UL) /*0x89*/ 642 #define TASK_SUSPEND (EVENTGROUP_OTHERS + 2UL) /*0x8A*/ 643 #define TASK_RESUME (EVENTGROUP_OTHERS + 3UL) /*0x8B*/ 644 #define TASK_RESUME_FROM_ISR (EVENTGROUP_OTHERS + 4UL) /*0x8C*/ 645 #define TASK_PRIORITY_SET (EVENTGROUP_OTHERS + 5UL) /*0x8D*/ 646 #define TASK_PRIORITY_INHERIT (EVENTGROUP_OTHERS + 6UL) /*0x8E*/ 647 #define TASK_PRIORITY_DISINHERIT (EVENTGROUP_OTHERS + 7UL) /*0x8F*/ 648 649 #define EVENTGROUP_MISC_PLACEHOLDER (EVENTGROUP_OTHERS + 8UL) /*0x90*/ 650 #define PEND_FUNC_CALL (EVENTGROUP_MISC_PLACEHOLDER+0UL) /*0x90*/ 651 #define PEND_FUNC_CALL_FROM_ISR (EVENTGROUP_MISC_PLACEHOLDER+1UL) /*0x91*/ 652 #define PEND_FUNC_CALL_TRCFAILED (EVENTGROUP_MISC_PLACEHOLDER+2UL) /*0x92*/ 653 #define PEND_FUNC_CALL_FROM_ISR_TRCFAILED (EVENTGROUP_MISC_PLACEHOLDER+3UL) /*0x93*/ 654 #define MEM_MALLOC_SIZE (EVENTGROUP_MISC_PLACEHOLDER+4UL) /*0x94*/ 655 #define MEM_MALLOC_ADDR (EVENTGROUP_MISC_PLACEHOLDER+5UL) /*0x95*/ 656 #define MEM_FREE_SIZE (EVENTGROUP_MISC_PLACEHOLDER+6UL) /*0x96*/ 657 #define MEM_FREE_ADDR (EVENTGROUP_MISC_PLACEHOLDER+7UL) /*0x97*/ 658 659 /* User events */ 660 #define EVENTGROUP_USEREVENT (EVENTGROUP_MISC_PLACEHOLDER + 8UL) /*0x98*/ 661 #define USER_EVENT (EVENTGROUP_USEREVENT + 0UL) 662 663 /* Allow for 0-15 arguments (the number of args is added to event code) */ 664 #define USER_EVENT_LAST (EVENTGROUP_USEREVENT + 15UL) /*0xA7*/ 665 666 /******************************************************************************* 667 * XTS Event - eXtended TimeStamp events 668 * The timestamps used in the recorder are "differential timestamps" (DTS), i.e. 669 * the time since the last stored event. The DTS fields are either 1 or 2 bytes 670 * in the other events, depending on the bytes available in the event struct. 671 * If the time since the last event (the DTS) is larger than allowed for by 672 * the DTS field of the current event, an XTS event is inserted immediately 673 * before the original event. The XTS event contains up to 3 additional bytes 674 * of the DTS value - the higher bytes of the true DTS value. The lower 1-2 675 * bytes are stored in the normal DTS field. 676 * There are two types of XTS events, XTS8 and XTS16. An XTS8 event is stored 677 * when there is only room for 1 byte (8 bit) DTS data in the original event, 678 * which means a limit of 0xFF (255UL). The XTS16 is used when the original event 679 * has a 16 bit DTS field and thereby can handle values up to 0xFFFF (65535UL). 680 * 681 * Using a very high frequency time base can result in many XTS events. 682 * Preferably, the time between two OS ticks should fit in 16 bits, i.e., 683 * at most 65535. If your time base has a higher frequency, you can define 684 * the TRACE 685 ******************************************************************************/ 686 687 #define EVENTGROUP_SYS (EVENTGROUP_USEREVENT + 16UL) /*0xA8*/ 688 #define XTS8 (EVENTGROUP_SYS + 0UL) /*0xA8*/ 689 #define XTS16 (EVENTGROUP_SYS + 1UL) /*0xA9*/ 690 #define EVENT_BEING_WRITTEN (EVENTGROUP_SYS + 2UL) /*0xAA*/ 691 #define RESERVED_DUMMY_CODE (EVENTGROUP_SYS + 3UL) /*0xAB*/ 692 #define LOW_POWER_BEGIN (EVENTGROUP_SYS + 4UL) /*0xAC*/ 693 #define LOW_POWER_END (EVENTGROUP_SYS + 5UL) /*0xAD*/ 694 #define XID (EVENTGROUP_SYS + 6UL) /*0xAE*/ 695 #define XTS16L (EVENTGROUP_SYS + 7UL) /*0xAF*/ 696 697 #define EVENTGROUP_TIMER (EVENTGROUP_SYS + 8UL) /*0xB0*/ 698 #define TIMER_CREATE (EVENTGROUP_TIMER + 0UL) /*0xB0*/ 699 #define XTS_TIMER_START (EVENTGROUP_TIMER + 1UL) /*0xB1*/ 700 #define TIMER_RST (EVENTGROUP_TIMER + 2UL) /*0xB2*/ 701 #define TIMER_STOP (EVENTGROUP_TIMER + 3UL) /*0xB3*/ 702 #define TIMER_CHANGE_PERIOD (EVENTGROUP_TIMER + 4UL) /*0xB4*/ 703 #define TIMER_DELETE_OBJ (EVENTGROUP_TIMER + 5UL) /*0xB5*/ 704 #define TIMER_START_FROM_ISR (EVENTGROUP_TIMER + 6UL) /*0xB6*/ 705 #define TIMER_RESET_FROM_ISR (EVENTGROUP_TIMER + 7UL) /*0xB7*/ 706 #define TIMER_STOP_FROM_ISR (EVENTGROUP_TIMER + 8UL) /*0xB8*/ 707 708 #define TIMER_CREATE_TRCFAILED (EVENTGROUP_TIMER + 9UL) /*0xB9*/ 709 #define TIMER_START_TRCFAILED (EVENTGROUP_TIMER + 10UL) /*0xBA*/ 710 #define TIMER_RESET_TRCFAILED (EVENTGROUP_TIMER + 11UL) /*0xBB*/ 711 #define TIMER_STOP_TRCFAILED (EVENTGROUP_TIMER + 12UL) /*0xBC*/ 712 #define TIMER_CHANGE_PERIOD_TRCFAILED (EVENTGROUP_TIMER + 13UL) /*0xBD*/ 713 #define TIMER_DELETE_TRCFAILED (EVENTGROUP_TIMER + 14UL) /*0xBE*/ 714 #define TIMER_START_FROM_ISR_TRCFAILED (EVENTGROUP_TIMER + 15UL) /*0xBF*/ 715 #define TIMER_RESET_FROM_ISR_TRCFAILED (EVENTGROUP_TIMER + 16UL) /*0xC0*/ 716 #define TIMER_STOP_FROM_ISR_TRCFAILED (EVENTGROUP_TIMER + 17UL) /*0xC1*/ 717 718 #define EVENTGROUP_EG (EVENTGROUP_TIMER + 18UL) /*0xC2*/ 719 #define EVENT_GROUP_CREATE (EVENTGROUP_EG + 0UL) /*0xC2*/ 720 #define EVENT_GROUP_CREATE_TRCFAILED (EVENTGROUP_EG + 1UL) /*0xC3*/ 721 #define EVENT_GROUP_SYNC_TRCBLOCK (EVENTGROUP_EG + 2UL) /*0xC4*/ 722 #define EVENT_GROUP_SYNC_END (EVENTGROUP_EG + 3UL) /*0xC5*/ 723 #define EVENT_GROUP_WAIT_BITS_TRCBLOCK (EVENTGROUP_EG + 4UL) /*0xC6*/ 724 #define EVENT_GROUP_WAIT_BITS_END (EVENTGROUP_EG + 5UL) /*0xC7*/ 725 #define EVENT_GROUP_CLEAR_BITS (EVENTGROUP_EG + 6UL) /*0xC8*/ 726 #define EVENT_GROUP_CLEAR_BITS_FROM_ISR (EVENTGROUP_EG + 7UL) /*0xC9*/ 727 #define EVENT_GROUP_SET_BITS (EVENTGROUP_EG + 8UL) /*0xCA*/ 728 #define EVENT_GROUP_DELETE_OBJ (EVENTGROUP_EG + 9UL) /*0xCB*/ 729 #define EVENT_GROUP_SYNC_END_TRCFAILED (EVENTGROUP_EG + 10UL) /*0xCC*/ 730 #define EVENT_GROUP_WAIT_BITS_END_TRCFAILED (EVENTGROUP_EG + 11UL) /*0xCD*/ 731 #define EVENT_GROUP_SET_BITS_FROM_ISR (EVENTGROUP_EG + 12UL) /*0xCE*/ 732 #define EVENT_GROUP_SET_BITS_FROM_ISR_TRCFAILED (EVENTGROUP_EG + 13UL) /*0xCF*/ 733 734 #define TASK_INSTANCE_FINISHED_NEXT_KSE (EVENTGROUP_EG + 14UL) /*0xD0*/ 735 #define TASK_INSTANCE_FINISHED_DIRECT (EVENTGROUP_EG + 15UL) /*0xD1*/ 736 737 #define TRACE_TASK_NOTIFY_GROUP (EVENTGROUP_EG + 16UL) /*0xD2*/ 738 #define TRACE_TASK_NOTIFY (TRACE_TASK_NOTIFY_GROUP + 0UL) /*0xD2*/ 739 #define TRACE_TASK_NOTIFY_TAKE (TRACE_TASK_NOTIFY_GROUP + 1UL) /*0xD3*/ 740 #define TRACE_TASK_NOTIFY_TAKE_TRCBLOCK (TRACE_TASK_NOTIFY_GROUP + 2UL) /*0xD4*/ 741 #define TRACE_TASK_NOTIFY_TAKE_TRCFAILED (TRACE_TASK_NOTIFY_GROUP + 3UL) /*0xD5*/ 742 #define TRACE_TASK_NOTIFY_WAIT (TRACE_TASK_NOTIFY_GROUP + 4UL) /*0xD6*/ 743 #define TRACE_TASK_NOTIFY_WAIT_TRCBLOCK (TRACE_TASK_NOTIFY_GROUP + 5UL) /*0xD7*/ 744 #define TRACE_TASK_NOTIFY_WAIT_TRCFAILED (TRACE_TASK_NOTIFY_GROUP + 6UL) /*0xD8*/ 745 #define TRACE_TASK_NOTIFY_FROM_ISR (TRACE_TASK_NOTIFY_GROUP + 7UL) /*0xD9*/ 746 #define TRACE_TASK_NOTIFY_GIVE_FROM_ISR (TRACE_TASK_NOTIFY_GROUP + 8UL) /*0xDA*/ 747 748 #define TIMER_EXPIRED (TRACE_TASK_NOTIFY_GROUP + 9UL) /*0xDB*/ 749 750 /* Events on queue peek (receive) */ 751 #define EVENTGROUP_PEEK_TRCBLOCK (TRACE_TASK_NOTIFY_GROUP + 10UL) /*0xDC*/ 752 /* peek block on queue: 0xDC */ 753 /* peek block on semaphore: 0xDD */ 754 /* peek block on mutex: 0xDE */ 755 756 /* Events on queue peek (receive) */ 757 #define EVENTGROUP_PEEK_TRCFAILED (EVENTGROUP_PEEK_TRCBLOCK + 3UL) /*0xDF*/ 758 /* peek failed on queue: 0xDF */ 759 /* peek failed on semaphore: 0xE0 */ 760 /* peek failed on mutex: 0xE1 */ 761 762 #define EVENTGROUP_STREAMBUFFER_DIV (EVENTGROUP_PEEK_TRCFAILED + 3UL) /*0xE2*/ 763 #define TRACE_STREAMBUFFER_RESET (EVENTGROUP_STREAMBUFFER_DIV + 0) /*0xE2*/ 764 #define TRACE_MESSAGEBUFFER_RESET (EVENTGROUP_STREAMBUFFER_DIV + 1UL) /*0xE3*/ 765 #define TRACE_STREAMBUFFER_OBJCLOSE_NAME_TRCSUCCESS (EVENTGROUP_STREAMBUFFER_DIV + 2UL) /*0xE4*/ 766 #define TRACE_MESSAGEBUFFER_OBJCLOSE_NAME_TRCSUCCESS (EVENTGROUP_STREAMBUFFER_DIV + 3UL) /*0xE5*/ 767 #define TRACE_STREAMBUFFER_OBJCLOSE_PROP_TRCSUCCESS (EVENTGROUP_STREAMBUFFER_DIV + 4UL) /*0xE6*/ 768 #define TRACE_MESSAGEBUFFER_OBJCLOSE_PROP_TRCSUCCESS (EVENTGROUP_STREAMBUFFER_DIV + 5UL) /*0xE7*/ 769 770 #define EVENTGROUP_MALLOC_FAILED (EVENTGROUP_STREAMBUFFER_DIV + 6UL) /*0xE8*/ 771 #define MEM_MALLOC_SIZE_TRCFAILED (EVENTGROUP_MALLOC_FAILED + 0UL) /*0xE8*/ 772 #define MEM_MALLOC_ADDR_TRCFAILED (EVENTGROUP_MALLOC_FAILED + 1UL) /*0xE9*/ 773 774 /* The following are using previously "lost" event codes */ 775 #define TRACE_STREAMBUFFER_CREATE_OBJ_TRCSUCCESS (EVENTGROUP_CREATE_OBJ_TRCSUCCESS + 4UL) /*0x1C*/ 776 #define TRACE_STREAMBUFFER_CREATE_OBJ_TRCFAILED (EVENTGROUP_CREATE_OBJ_TRCFAILED + 4UL) /*0x44*/ 777 #define TRACE_STREAMBUFFER_DELETE_OBJ_TRCSUCCESS (EVENTGROUP_DELETE_OBJ_TRCSUCCESS + 4UL) /*0x84*/ 778 #define TRACE_STREAMBUFFER_SEND_TRCSUCCESS (EVENTGROUP_SEND_TRCSUCCESS + 3UL) /*0x23*/ 779 #define TRACE_STREAMBUFFER_SEND_TRCBLOCK (EVENTGROUP_SEND_TRCBLOCK + 3UL) /*0x73*/ 780 #define TRACE_STREAMBUFFER_SEND_TRCFAILED (EVENTGROUP_SEND_TRCFAILED + 3UL) /*0x4B*/ 781 #define TRACE_STREAMBUFFER_RECEIVE_TRCSUCCESS (EVENTGROUP_RECEIVE_TRCSUCCESS + 3UL) /*0x2B*/ 782 #define TRACE_STREAMBUFFER_RECEIVE_TRCBLOCK (EVENTGROUP_RECEIVE_TRCBLOCK + 3UL) /*0x6B*/ 783 #define TRACE_STREAMBUFFER_RECEIVE_TRCFAILED (EVENTGROUP_RECEIVE_TRCFAILED + 3UL) /*0x53*/ 784 #define TRACE_STREAMBUFFER_SEND_FROM_ISR_TRCSUCCESS (EVENTGROUP_SEND_FROM_ISR_TRCSUCCESS + 3UL) /*0x33*/ 785 #define TRACE_STREAMBUFFER_SEND_FROM_ISR_TRCFAILED (EVENTGROUP_SEND_FROM_ISR_TRCFAILED + 3UL) /*0x5B*/ 786 #define TRACE_STREAMBUFFER_RECEIVE_FROM_ISR_TRCSUCCESS (EVENTGROUP_RECEIVE_FROM_ISR_TRCSUCCESS + 3UL) /*0x3B*/ 787 #define TRACE_STREAMBUFFER_RECEIVE_FROM_ISR_TRCFAILED (EVENTGROUP_RECEIVE_FROM_ISR_TRCFAILED + 3UL) /*0x63*/ 788 789 /* The following are using previously "lost" event codes. These macros aren't even directly referenced, instead we do (equivalent STREAMBUFFER code) + 1. */ 790 #define TRACE_MESSAGEBUFFER_CREATE_OBJ_TRCSUCCESS (EVENTGROUP_CREATE_OBJ_TRCSUCCESS + 5UL) /*0x1D*/ 791 #define TRACE_MESSAGEBUFFER_CREATE_OBJ_TRCFAILED (EVENTGROUP_CREATE_OBJ_TRCFAILED + 5UL) /*0x45*/ 792 #define TRACE_MESSAGEBUFFER_DELETE_OBJ_TRCSUCCESS (EVENTGROUP_DELETE_OBJ_TRCSUCCESS + 5UL) /*0x85*/ 793 #define TRACE_MESSAGEBUFFER_SEND_TRCSUCCESS (EVENTGROUP_SEND_TRCSUCCESS + 4UL) /*0x24*/ 794 #define TRACE_MESSAGEBUFFER_SEND_TRCBLOCK (EVENTGROUP_SEND_TRCBLOCK + 4UL) /*0x74*/ 795 #define TRACE_MESSAGEBUFFER_SEND_TRCFAILED (EVENTGROUP_SEND_TRCFAILED + 4UL) /*0x4C*/ 796 #define TRACE_MESSAGEBUFFER_RECEIVE_TRCSUCCESS (EVENTGROUP_RECEIVE_TRCSUCCESS + 4UL) /*0x2C*/ 797 #define TRACE_MESSAGEBUFFER_RECEIVE_TRCBLOCK (EVENTGROUP_RECEIVE_TRCBLOCK + 4UL) /*0x6C*/ 798 #define TRACE_MESSAGEBUFFER_RECEIVE_TRCFAILED (EVENTGROUP_RECEIVE_TRCFAILED + 4UL) /*0x54*/ 799 #define TRACE_MESSAGEBUFFER_SEND_FROM_ISR_TRCSUCCESS (EVENTGROUP_SEND_FROM_ISR_TRCSUCCESS + 4UL) /*0x34*/ 800 #define TRACE_MESSAGEBUFFER_SEND_FROM_ISR_TRCFAILED (EVENTGROUP_SEND_FROM_ISR_TRCFAILED + 4UL) /*0x5C*/ 801 #define TRACE_MESSAGEBUFFER_RECEIVE_FROM_ISR_TRCSUCCESS (EVENTGROUP_RECEIVE_FROM_ISR_TRCSUCCESS + 4UL) /*0x3C*/ 802 #define TRACE_MESSAGEBUFFER_RECEIVE_FROM_ISR_TRCFAILED (EVENTGROUP_RECEIVE_FROM_ISR_TRCFAILED + 4UL) /*0x64*/ 803 804 #define TRACE_QUEUE_SEND_TO_FRONT_TRCSUCCESS (EVENTGROUP_SEND_TRCSUCCESS + 5UL) /*0x25*/ 805 #define TRACE_QUEUE_SEND_TO_FRONT_TRCBLOCK (EVENTGROUP_SEND_TRCBLOCK + 5UL) /*0x75*/ 806 #define TRACE_QUEUE_SEND_TO_FRONT_TRCFAILED (EVENTGROUP_SEND_TRCFAILED + 5UL) /*0x4D*/ 807 #define TRACE_QUEUE_SEND_TO_FRONT_FROM_ISR_TRCSUCCESS (EVENTGROUP_SEND_FROM_ISR_TRCSUCCESS + 5UL) /*0x35*/ 808 #define TRACE_QUEUE_SEND_TO_FRONT_FROM_ISR_TRCFAILED (EVENTGROUP_SEND_FROM_ISR_TRCFAILED + 5UL) /*0x5D*/ 809 810 #define TRACE_UNUSED_STACK (EVENTGROUP_MALLOC_FAILED + 2UL) /*0xEA*/ 811 812 /* LAST EVENT (0xEA) */ 813 814 /**************************** 815 * MACROS TO GET TRACE CLASS * 816 ****************************/ 817 #define TRACE_GET_TRACE_CLASS_FROM_TASK_CLASS(kernelClass) (TRACE_CLASS_TASK) 818 #define TRACE_GET_TRACE_CLASS_FROM_TASK_OBJECT(pxObject) (TRACE_CLASS_TASK) 819 820 #define TRACE_GET_TRACE_CLASS_FROM_QUEUE_CLASS(kernelClass) TraceQueueClassTable[kernelClass] 821 #define TRACE_GET_TRACE_CLASS_FROM_QUEUE_OBJECT(pxObject) TRACE_GET_TRACE_CLASS_FROM_QUEUE_CLASS(prvTraceGetQueueType(pxObject)) 822 823 #define TRACE_GET_TRACE_CLASS_FROM_TIMER_CLASS(kernelClass) (TRACE_CLASS_TIMER) 824 #define TRACE_GET_TRACE_CLASS_FROM_TIMER_OBJECT(pxObject) (TRACE_CLASS_TIMER) 825 826 #define TRACE_GET_TRACE_CLASS_FROM_EVENTGROUP_CLASS(kernelClass) (TRACE_CLASS_EVENTGROUP) 827 #define TRACE_GET_TRACE_CLASS_FROM_EVENTGROUP_OBJECT(pxObject) (TRACE_CLASS_EVENTGROUP) 828 829 /* TRACE_GET_TRACE_CLASS_FROM_STREAMBUFFER_CLASS can only be accessed with a parameter indicating if it is a MessageBuffer */ 830 #define TRACE_GET_TRACE_CLASS_FROM_STREAMBUFFER_CLASS(xIsMessageBuffer) (xIsMessageBuffer == 1 ? TRACE_CLASS_MESSAGEBUFFER : TRACE_CLASS_STREAMBUFFER) 831 #define TRACE_GET_TRACE_CLASS_FROM_STREAMBUFFER_OBJECT(pxObject) (prvGetStreamBufferType(pxObject) == 1 ? TRACE_CLASS_MESSAGEBUFFER : TRACE_CLASS_STREAMBUFFER) 832 833 /* Generic versions */ 834 #define TRACE_GET_CLASS_TRACE_CLASS(CLASS, kernelClass) TRACE_GET_TRACE_CLASS_FROM_##CLASS##_CLASS(kernelClass) 835 #define TRACE_GET_OBJECT_TRACE_CLASS(CLASS, pxObject) TRACE_GET_TRACE_CLASS_FROM_##CLASS##_OBJECT(pxObject) 836 837 /****************************** 838 * MACROS TO GET OBJECT NUMBER * 839 ******************************/ 840 #define TRACE_GET_TASK_NUMBER(pxTCB) (traceHandle)(prvTraceGetTaskNumberLow16((void*)pxTCB)) 841 #define TRACE_SET_TASK_NUMBER(pxTCB) prvTraceSetTaskNumberLow16(pxTCB, prvTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(TASK, pxTCB))); 842 843 #define TRACE_GET_QUEUE_NUMBER(queue) ( ( traceHandle ) prvTraceGetQueueNumberLow16(queue) ) 844 #define TRACE_SET_QUEUE_NUMBER(queue) prvTraceSetQueueNumberLow16(queue, (uint16_t)prvTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(QUEUE, queue))); 845 846 #if (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) 847 #define TRACE_GET_TIMER_NUMBER(tmr) ( ( traceHandle ) prvTraceGetTimerNumberLow16(tmr) ) 848 #define TRACE_SET_TIMER_NUMBER(tmr) prvTraceSetTimerNumberLow16(tmr, (uint16_t)prvTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(TIMER, tmr))); 849 #else /* (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) */ 850 #define TRACE_GET_TIMER_NUMBER(tmr) ( ( traceHandle ) ((Timer_t*)tmr)->uxTimerNumber ) 851 #define TRACE_SET_TIMER_NUMBER(tmr) ((Timer_t*)tmr)->uxTimerNumber = prvTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(TIMER, tmr)); 852 #endif /* (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) */ 853 854 #if (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) 855 #define TRACE_GET_EVENTGROUP_NUMBER(eg) ( ( traceHandle ) prvTraceGetEventGroupNumberLow16(eg) ) 856 #define TRACE_SET_EVENTGROUP_NUMBER(eg) prvTraceSetEventGroupNumberLow16(eg, (uint16_t)prvTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(EVENTGROUP, eg))); 857 #else /* (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) */ 858 #define TRACE_GET_EVENTGROUP_NUMBER(eg) ( ( traceHandle ) uxEventGroupGetNumber(eg) ) 859 #define TRACE_SET_EVENTGROUP_NUMBER(eg) ((EventGroup_t*)eg)->uxEventGroupNumber = prvTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(EVENTGROUP, eg)); 860 #endif /* (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) */ 861 862 863 #define TRACE_GET_STREAMBUFFER_NUMBER(sb) ( ( traceHandle ) prvTraceGetStreamBufferNumberLow16(sb) ) 864 #define TRACE_SET_STREAMBUFFER_NUMBER(sb) prvTraceSetStreamBufferNumberLow16(sb, (uint16_t)prvTraceGetObjectHandle(TRACE_GET_OBJECT_TRACE_CLASS(STREAMBUFFER, sb))); 865 866 /* Generic versions */ 867 #define TRACE_GET_OBJECT_NUMBER(CLASS, pxObject) TRACE_GET_##CLASS##_NUMBER(pxObject) 868 #define TRACE_SET_OBJECT_NUMBER(CLASS, pxObject) TRACE_SET_##CLASS##_NUMBER(pxObject) 869 870 /****************************** 871 * MACROS TO GET EVENT CODES * 872 ******************************/ 873 #define TRACE_GET_TASK_CLASS_EVENT_CODE(SERVICE, RESULT, kernelClass) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_CLASS_TRACE_CLASS(TASK, kernelClass)) 874 #define TRACE_GET_QUEUE_CLASS_EVENT_CODE(SERVICE, RESULT, kernelClass) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_CLASS_TRACE_CLASS(QUEUE, kernelClass)) 875 #define TRACE_GET_TIMER_CLASS_EVENT_CODE(SERVICE, RESULT, kernelClass) -- THIS IS NOT USED -- 876 #define TRACE_GET_EVENTGROUP_CLASS_EVENT_CODE(SERVICE, RESULT, kernelClass) -- THIS IS NOT USED -- 877 #define TRACE_GET_STREAMBUFFER_CLASS_EVENT_CODE(SERVICE, RESULT, isMessageBuffer) (uint8_t)(TRACE_STREAMBUFFER_##SERVICE##_##RESULT + (uint8_t)isMessageBuffer) 878 879 #define TRACE_GET_TASK_OBJECT_EVENT_CODE(SERVICE, RESULT, pxTCB) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_CLASS_TASK) 880 #define TRACE_GET_QUEUE_OBJECT_EVENT_CODE(SERVICE, RESULT, pxObject) (uint8_t)(EVENTGROUP_##SERVICE##_##RESULT + TRACE_GET_OBJECT_TRACE_CLASS(QUEUE, pxObject)) 881 #define TRACE_GET_TIMER_OBJECT_EVENT_CODE(SERVICE, RESULT, UNUSED) -- THIS IS NOT USED -- 882 #define TRACE_GET_EVENTGROUP_OBJECT_EVENT_CODE(SERVICE, RESULT, UNUSED) -- THIS IS NOT USED -- 883 #define TRACE_GET_STREAMBUFFER_OBJECT_EVENT_CODE(SERVICE, RESULT, pxObject) (uint8_t)(TRACE_STREAMBUFFER_##SERVICE##_##RESULT + prvGetStreamBufferType(pxObject)) 884 885 /* Generic versions */ 886 #define TRACE_GET_CLASS_EVENT_CODE(SERVICE, RESULT, CLASS, kernelClass) TRACE_GET_##CLASS##_CLASS_EVENT_CODE(SERVICE, RESULT, kernelClass) 887 #define TRACE_GET_OBJECT_EVENT_CODE(SERVICE, RESULT, CLASS, pxObject) TRACE_GET_##CLASS##_OBJECT_EVENT_CODE(SERVICE, RESULT, pxObject) 888 889 /****************************** 890 * SPECIAL MACROS FOR TASKS * 891 ******************************/ 892 #define TRACE_GET_TASK_PRIORITY(pxTCB) ((uint8_t)pxTCB->uxPriority) 893 #define TRACE_GET_TASK_NAME(pxTCB) ((char*)pxTCB->pcTaskName) 894 895 /*** The trace macros for snapshot mode **************************************/ 896 897 /* A macro that will update the tick count when returning from tickless idle */ 898 #undef traceINCREASE_TICK_COUNT 899 #define traceINCREASE_TICK_COUNT( xCount ) 900 901 /* Called for each task that becomes ready */ 902 #undef traceMOVED_TASK_TO_READY_STATE 903 #define traceMOVED_TASK_TO_READY_STATE( pxTCB ) \ 904 trcKERNEL_HOOKS_MOVED_TASK_TO_READY_STATE(pxTCB); 905 906 /* Called on each OS tick. Will call uiPortGetTimestamp to make sure it is called at least once every OS tick. */ 907 #undef traceTASK_INCREMENT_TICK 908 909 #if (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) 910 911 #define traceTASK_INCREMENT_TICK( xTickCount ) \ 912 { TRACE_ALLOC_CRITICAL_SECTION(); \ 913 TRACE_ENTER_CRITICAL_SECTION(); \ 914 if (uxSchedulerSuspended[xPortGetCoreID()] == ( unsigned portBASE_TYPE ) pdTRUE || xPendedTicks == 0) { trcKERNEL_HOOKS_INCREMENT_TICK(); } \ 915 if (uxSchedulerSuspended[xPortGetCoreID()] == ( unsigned portBASE_TYPE ) pdFALSE) { trcKERNEL_HOOKS_NEW_TIME(DIV_NEW_TIME, xTickCount + 1); }\ 916 TRACE_EXIT_CRITICAL_SECTION(); } 917 918 #elif (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_0_0) 919 920 #define traceTASK_INCREMENT_TICK( xTickCount ) \ 921 { TRACE_ALLOC_CRITICAL_SECTION(); \ 922 TRACE_ENTER_CRITICAL_SECTION(); \ 923 if (uxSchedulerSuspended[xPortGetCoreID()] == ( unsigned portBASE_TYPE ) pdTRUE || uxPendedTicks == 0) { trcKERNEL_HOOKS_INCREMENT_TICK(); } \ 924 if (uxSchedulerSuspended[xPortGetCoreID()] == ( unsigned portBASE_TYPE ) pdFALSE) { trcKERNEL_HOOKS_NEW_TIME(DIV_NEW_TIME, xTickCount + 1); }\ 925 TRACE_EXIT_CRITICAL_SECTION(); } 926 927 #else 928 929 #define traceTASK_INCREMENT_TICK( xTickCount ) \ 930 { TRACE_ALLOC_CRITICAL_SECTION(); \ 931 TRACE_ENTER_CRITICAL_SECTION(); \ 932 if (uxSchedulerSuspended[xPortGetCoreID()] == ( unsigned portBASE_TYPE ) pdTRUE || uxMissedTicks == 0) { trcKERNEL_HOOKS_INCREMENT_TICK(); } \ 933 if (uxSchedulerSuspended[xPortGetCoreID()] == ( unsigned portBASE_TYPE ) pdFALSE) { trcKERNEL_HOOKS_NEW_TIME(DIV_NEW_TIME, xTickCount + 1); }\ 934 TRACE_EXIT_CRITICAL_SECTION(); } 935 936 #endif 937 938 extern volatile uint32_t uiTraceSystemState; 939 940 /* Called on each task-switch */ 941 #undef traceTASK_SWITCHED_IN 942 #define traceTASK_SWITCHED_IN() \ 943 uiTraceSystemState = TRC_STATE_IN_TASKSWITCH; \ 944 trcKERNEL_HOOKS_TASK_SWITCH(TRACE_GET_CURRENT_TASK()); \ 945 uiTraceSystemState = TRC_STATE_IN_APPLICATION; 946 947 /* Called on vTaskCreate */ 948 #undef traceTASK_CREATE 949 #define traceTASK_CREATE(pxNewTCB) \ 950 if (pxNewTCB != NULL) \ 951 { \ 952 trcKERNEL_HOOKS_TASK_CREATE(TRACE_GET_OBJECT_EVENT_CODE(CREATE_OBJ, TRCSUCCESS, TASK, pxNewTCB), TASK, pxNewTCB); \ 953 prvAddTaskToStackMonitor(pxNewTCB); \ 954 } 955 956 /* Called in vTaskCreate, if it fails (typically if the stack can not be allocated) */ 957 #undef traceTASK_CREATE_FAILED 958 #define traceTASK_CREATE_FAILED() \ 959 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_NUMERIC_PARAM_ONLY(TRACE_GET_CLASS_EVENT_CODE(CREATE_OBJ, TRCFAILED, TASK, NOT_USED), 0); 960 961 /* Called on vTaskDelete */ 962 #undef traceTASK_DELETE 963 #define traceTASK_DELETE( pxTaskToDelete ) \ 964 { TRACE_ALLOC_CRITICAL_SECTION(); \ 965 TRACE_ENTER_CRITICAL_SECTION(); \ 966 trcKERNEL_HOOKS_TASK_DELETE(TRACE_GET_OBJECT_EVENT_CODE(DELETE_OBJ, TRCSUCCESS, TASK, pxTaskToDelete), TRACE_GET_OBJECT_EVENT_CODE(OBJCLOSE_NAME, TRCSUCCESS, TASK, pxTaskToDelete), TRACE_GET_OBJECT_EVENT_CODE(OBJCLOSE_PROP, TRCSUCCESS, TASK, pxTaskToDelete), pxTaskToDelete); \ 967 prvRemoveTaskFromStackMonitor(pxTaskToDelete); \ 968 TRACE_EXIT_CRITICAL_SECTION(); } 969 970 #if (TRC_CFG_SCHEDULING_ONLY == 0) 971 972 #if defined(configUSE_TICKLESS_IDLE) 973 #if (configUSE_TICKLESS_IDLE != 0) 974 975 #undef traceLOW_POWER_IDLE_BEGIN 976 #define traceLOW_POWER_IDLE_BEGIN() \ 977 { \ 978 extern uint32_t trace_disable_timestamp; \ 979 prvTraceStoreLowPower(0); \ 980 trace_disable_timestamp = 1; \ 981 } 982 983 #undef traceLOW_POWER_IDLE_END 984 #define traceLOW_POWER_IDLE_END() \ 985 { \ 986 extern uint32_t trace_disable_timestamp; \ 987 trace_disable_timestamp = 0; \ 988 prvTraceStoreLowPower(1); \ 989 } 990 991 #endif /* (configUSE_TICKLESS_IDLE != 0) */ 992 #endif /* defined(configUSE_TICKLESS_IDLE) */ 993 994 /* Called on vTaskSuspend */ 995 #undef traceTASK_SUSPEND 996 #define traceTASK_SUSPEND( pxTaskToSuspend ) \ 997 trcKERNEL_HOOKS_TASK_SUSPEND(TASK_SUSPEND, pxTaskToSuspend); 998 999 /* Called from special case with timer only */ 1000 #undef traceTASK_DELAY_SUSPEND 1001 #define traceTASK_DELAY_SUSPEND( pxTaskToSuspend ) \ 1002 trcKERNEL_HOOKS_TASK_SUSPEND(TASK_SUSPEND, pxTaskToSuspend); \ 1003 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED(); 1004 1005 /* Called on vTaskDelay - note the use of FreeRTOS variable xTicksToDelay */ 1006 #undef traceTASK_DELAY 1007 #define traceTASK_DELAY() \ 1008 trcKERNEL_HOOKS_TASK_DELAY(TASK_DELAY, pxCurrentTCB[xPortGetCoreID()], xTicksToDelay); \ 1009 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED(); 1010 1011 /* Called on vTaskDelayUntil - note the use of FreeRTOS variable xTimeToWake */ 1012 #undef traceTASK_DELAY_UNTIL 1013 #if TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0 1014 #define traceTASK_DELAY_UNTIL() \ 1015 trcKERNEL_HOOKS_TASK_DELAY(TASK_DELAY_UNTIL, pxCurrentTCB[xPortGetCoreID()], xTimeToWake); \ 1016 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED(); 1017 #endif /* (TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0) */ 1018 1019 /* Called in xQueueCreate, and thereby for all other object based on queues, such as semaphores. */ 1020 #undef traceQUEUE_CREATE 1021 #define traceQUEUE_CREATE( pxNewQueue ) \ 1022 trcKERNEL_HOOKS_OBJECT_CREATE(TRACE_GET_OBJECT_EVENT_CODE(CREATE_OBJ, TRCSUCCESS, QUEUE, pxNewQueue), QUEUE, pxNewQueue); 1023 1024 /* Called in xQueueCreate, if the queue creation fails */ 1025 #undef traceQUEUE_CREATE_FAILED 1026 #define traceQUEUE_CREATE_FAILED( queueType ) \ 1027 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_NUMERIC_PARAM_ONLY(TRACE_GET_CLASS_EVENT_CODE(CREATE_OBJ, TRCFAILED, QUEUE, queueType), 0); 1028 1029 /* Called on vQueueDelete */ 1030 #undef traceQUEUE_DELETE 1031 #define traceQUEUE_DELETE( pxQueue ) \ 1032 trcKERNEL_HOOKS_OBJECT_DELETE(TRACE_GET_OBJECT_EVENT_CODE(DELETE_OBJ, TRCSUCCESS, QUEUE, pxQueue), TRACE_GET_OBJECT_EVENT_CODE(OBJCLOSE_NAME, TRCSUCCESS, QUEUE, pxQueue), TRACE_GET_OBJECT_EVENT_CODE(OBJCLOSE_PROP, TRCSUCCESS, QUEUE, pxQueue), QUEUE, pxQueue); 1033 1034 /* This macro is not necessary as of FreeRTOS v9.0.0 */ 1035 #if (TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0) 1036 /* Called in xQueueCreateMutex, and thereby also from xSemaphoreCreateMutex and xSemaphoreCreateRecursiveMutex */ 1037 #undef traceCREATE_MUTEX 1038 #define traceCREATE_MUTEX( pxNewQueue ) \ 1039 trcKERNEL_HOOKS_OBJECT_CREATE(TRACE_GET_OBJECT_EVENT_CODE(CREATE_OBJ, TRCSUCCESS, QUEUE, pxNewQueue), QUEUE, pxNewQueue); 1040 1041 /* Called in xQueueCreateMutex when the operation fails (when memory allocation fails) */ 1042 #undef traceCREATE_MUTEX_FAILED 1043 #define traceCREATE_MUTEX_FAILED() \ 1044 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_NUMERIC_PARAM_ONLY(TRACE_GET_CLASS_EVENT_CODE(CREATE_OBJ, TRCFAILED, QUEUE, queueQUEUE_TYPE_MUTEX), 0); 1045 #endif /* (TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0) */ 1046 1047 /* Called when the Mutex can not be given, since not holder */ 1048 #undef traceGIVE_MUTEX_RECURSIVE_FAILED 1049 #define traceGIVE_MUTEX_RECURSIVE_FAILED( pxMutex ) \ 1050 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(SEND, TRCFAILED, QUEUE, pxMutex), QUEUE, pxMutex); 1051 1052 /* Called when a message is sent to a queue */ /* CS IS NEW ! */ 1053 #undef traceQUEUE_SEND 1054 #define traceQUEUE_SEND( pxQueue ) \ 1055 trcKERNEL_HOOKS_KERNEL_SERVICE(xCopyPosition == queueSEND_TO_BACK ? (TRACE_GET_OBJECT_EVENT_CODE(SEND, TRCSUCCESS, QUEUE, pxQueue)) : TRACE_QUEUE_SEND_TO_FRONT_TRCSUCCESS, QUEUE, pxQueue); \ 1056 trcKERNEL_HOOKS_SET_OBJECT_STATE(QUEUE, pxQueue, TRACE_GET_OBJECT_TRACE_CLASS(QUEUE, pxQueue) == TRACE_CLASS_MUTEX ? (uint8_t)0 : (uint8_t)(pxQueue->uxMessagesWaiting + 1)); 1057 1058 /* Called when a message is sent to a queue set */ 1059 #undef traceQUEUE_SET_SEND 1060 #define traceQUEUE_SET_SEND( pxQueue ) \ 1061 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(SEND, TRCSUCCESS, QUEUE, pxQueue), QUEUE, pxQueue); \ 1062 trcKERNEL_HOOKS_SET_OBJECT_STATE(QUEUE, pxQueue, (uint8_t)(pxQueue->uxMessagesWaiting + 1)); 1063 1064 /* Called when a message failed to be sent to a queue (timeout) */ 1065 #undef traceQUEUE_SEND_FAILED 1066 #define traceQUEUE_SEND_FAILED( pxQueue ) \ 1067 trcKERNEL_HOOKS_KERNEL_SERVICE(xCopyPosition == queueSEND_TO_BACK ? (TRACE_GET_OBJECT_EVENT_CODE(SEND, TRCFAILED, QUEUE, pxQueue)) : TRACE_QUEUE_SEND_TO_FRONT_TRCFAILED, QUEUE, pxQueue); 1068 1069 /* Called when the task is blocked due to a send operation on a full queue */ 1070 #undef traceBLOCKING_ON_QUEUE_SEND 1071 #define traceBLOCKING_ON_QUEUE_SEND( pxQueue ) \ 1072 trcKERNEL_HOOKS_KERNEL_SERVICE(xCopyPosition == queueSEND_TO_BACK ? (TRACE_GET_OBJECT_EVENT_CODE(SEND, TRCBLOCK, QUEUE, pxQueue)) : TRACE_QUEUE_SEND_TO_FRONT_TRCBLOCK, QUEUE, pxQueue); 1073 1074 /* Called when a message is received from a queue */ 1075 #undef traceQUEUE_RECEIVE 1076 #define traceQUEUE_RECEIVE( pxQueue ) \ 1077 if (isQueueReceiveHookActuallyPeek) \ 1078 { \ 1079 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(PEEK, TRCSUCCESS, QUEUE, pxQueue), QUEUE, pxQueue); \ 1080 } \ 1081 else \ 1082 { \ 1083 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE, TRCSUCCESS, QUEUE, pxQueue), QUEUE, pxQueue); \ 1084 } \ 1085 trcKERNEL_HOOKS_SET_OBJECT_STATE(QUEUE, pxQueue, TRACE_GET_OBJECT_TRACE_CLASS(QUEUE, pxQueue) == TRACE_CLASS_MUTEX ? (uint8_t)TRACE_GET_TASK_NUMBER(TRACE_GET_CURRENT_TASK()) : (uint8_t)(pxQueue->uxMessagesWaiting - 1)); 1086 #undef traceQUEUE_SEMAPHORE_RECEIVE 1087 #define traceQUEUE_SEMAPHORE_RECEIVE( pxQueue ) traceQUEUE_RECEIVE( pxQueue ) 1088 1089 /* Called when a receive operation on a queue fails (timeout) */ 1090 #undef traceQUEUE_RECEIVE_FAILED 1091 #define traceQUEUE_RECEIVE_FAILED( pxQueue ) \ 1092 if (isQueueReceiveHookActuallyPeek) \ 1093 { \ 1094 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(PEEK, TRCFAILED, QUEUE, pxQueue), QUEUE, pxQueue); \ 1095 } \ 1096 else \ 1097 { \ 1098 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE, TRCFAILED, QUEUE, pxQueue), QUEUE, pxQueue); \ 1099 } 1100 1101 /* Called when the task is blocked due to a receive operation on an empty queue */ 1102 #undef traceBLOCKING_ON_QUEUE_RECEIVE 1103 #define traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ) \ 1104 if (isQueueReceiveHookActuallyPeek) \ 1105 { \ 1106 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(PEEK, TRCBLOCK, QUEUE, pxQueue), QUEUE, pxQueue); \ 1107 } \ 1108 else \ 1109 { \ 1110 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE, TRCBLOCK, QUEUE, pxQueue), QUEUE, pxQueue); \ 1111 } \ 1112 if (TRACE_GET_OBJECT_TRACE_CLASS(QUEUE, pxQueue) != TRACE_CLASS_MUTEX) \ 1113 { \ 1114 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED(); \ 1115 } 1116 1117 /* Called on xQueuePeek */ 1118 #undef traceQUEUE_PEEK 1119 #define traceQUEUE_PEEK( pxQueue ) \ 1120 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(PEEK, TRCSUCCESS, QUEUE, pxQueue), QUEUE, pxQueue); 1121 1122 /* Called on xQueuePeek fail/timeout (added in FreeRTOS v9.0.2) */ 1123 #undef traceQUEUE_PEEK_FAILED 1124 #define traceQUEUE_PEEK_FAILED( pxQueue ) \ 1125 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(PEEK, TRCFAILED, QUEUE, pxQueue), QUEUE, pxQueue); 1126 1127 /* Called on xQueuePeek blocking (added in FreeRTOS v9.0.2) */ 1128 #undef traceBLOCKING_ON_QUEUE_PEEK 1129 #define traceBLOCKING_ON_QUEUE_PEEK( pxQueue ) \ 1130 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(PEEK, TRCBLOCK, QUEUE, pxQueue), QUEUE, pxQueue); \ 1131 if (TRACE_GET_OBJECT_TRACE_CLASS(QUEUE, pxQueue) != TRACE_CLASS_MUTEX) \ 1132 { \ 1133 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED(); \ 1134 } 1135 1136 /* Called when a message is sent from interrupt context, e.g., using xQueueSendFromISR */ 1137 #undef traceQUEUE_SEND_FROM_ISR 1138 #define traceQUEUE_SEND_FROM_ISR( pxQueue ) \ 1139 trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(xCopyPosition == queueSEND_TO_BACK ? (TRACE_GET_OBJECT_EVENT_CODE(SEND_FROM_ISR, TRCSUCCESS, QUEUE, pxQueue)) : TRACE_QUEUE_SEND_TO_FRONT_FROM_ISR_TRCSUCCESS, QUEUE, pxQueue); \ 1140 trcKERNEL_HOOKS_SET_OBJECT_STATE(QUEUE, pxQueue, (uint8_t)(pxQueue->uxMessagesWaiting + 1)); 1141 #undef traceQUEUE_GIVE_FROM_ISR 1142 #define traceQUEUE_GIVE_FROM_ISR( pxQueue ) traceQUEUE_SEND_FROM_ISR(pxQueue) 1143 1144 /* Called when a message send from interrupt context fails (since the queue was full) */ 1145 #undef traceQUEUE_SEND_FROM_ISR_FAILED 1146 #define traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ) \ 1147 trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(xCopyPosition == queueSEND_TO_BACK ? (TRACE_GET_OBJECT_EVENT_CODE(SEND_FROM_ISR, TRCFAILED, QUEUE, pxQueue)) : TRACE_QUEUE_SEND_TO_FRONT_FROM_ISR_TRCFAILED, QUEUE, pxQueue); 1148 #undef traceQUEUE_GIVE_FROM_ISR_FAILED 1149 #define traceQUEUE_GIVE_FROM_ISR_FAILED( pxQueue ) traceQUEUE_SEND_FROM_ISR_FAILED(pxQueue) 1150 1151 /* Called when a message is received in interrupt context, e.g., using xQueueReceiveFromISR */ 1152 #undef traceQUEUE_RECEIVE_FROM_ISR 1153 #define traceQUEUE_RECEIVE_FROM_ISR( pxQueue ) \ 1154 trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE_FROM_ISR, TRCSUCCESS, QUEUE, pxQueue), QUEUE, pxQueue); \ 1155 trcKERNEL_HOOKS_SET_OBJECT_STATE(QUEUE, pxQueue, (uint8_t)(pxQueue->uxMessagesWaiting - 1)); 1156 1157 /* Called when a message receive from interrupt context fails (since the queue was empty) */ 1158 #undef traceQUEUE_RECEIVE_FROM_ISR_FAILED 1159 #define traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ) \ 1160 trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE_FROM_ISR, TRCFAILED, QUEUE, pxQueue), QUEUE, pxQueue); 1161 1162 #undef traceQUEUE_REGISTRY_ADD 1163 #define traceQUEUE_REGISTRY_ADD(object, name) prvTraceSetObjectName(TRACE_GET_OBJECT_TRACE_CLASS(QUEUE, object), TRACE_GET_OBJECT_NUMBER(QUEUE, object), name); 1164 1165 /* Called in vTaskPrioritySet */ 1166 #undef traceTASK_PRIORITY_SET 1167 #define traceTASK_PRIORITY_SET( pxTask, uxNewPriority ) \ 1168 trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_SET, pxTask, uxNewPriority); 1169 1170 /* Called in vTaskPriorityInherit, which is called by Mutex operations */ 1171 #undef traceTASK_PRIORITY_INHERIT 1172 #define traceTASK_PRIORITY_INHERIT( pxTask, uxNewPriority ) \ 1173 trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_INHERIT, pxTask, uxNewPriority); 1174 1175 /* Called in vTaskPriorityDisinherit, which is called by Mutex operations */ 1176 #undef traceTASK_PRIORITY_DISINHERIT 1177 #define traceTASK_PRIORITY_DISINHERIT( pxTask, uxNewPriority ) \ 1178 trcKERNEL_HOOKS_TASK_PRIORITY_CHANGE(TASK_PRIORITY_DISINHERIT, pxTask, uxNewPriority); 1179 1180 /* Called in vTaskResume */ 1181 #undef traceTASK_RESUME 1182 #define traceTASK_RESUME( pxTaskToResume ) \ 1183 trcKERNEL_HOOKS_TASK_RESUME(TASK_RESUME, pxTaskToResume); 1184 1185 /* Called in vTaskResumeFromISR */ 1186 #undef traceTASK_RESUME_FROM_ISR 1187 #define traceTASK_RESUME_FROM_ISR( pxTaskToResume ) \ 1188 trcKERNEL_HOOKS_TASK_RESUME_FROM_ISR(TASK_RESUME_FROM_ISR, pxTaskToResume); 1189 1190 1191 #if (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_0_0) 1192 1193 #if (TRC_CFG_INCLUDE_MEMMANG_EVENTS == 1) 1194 1195 extern void vTraceStoreMemMangEvent(uint32_t ecode, uint32_t address, int32_t size); 1196 1197 /* MALLOC and FREE are always stored, no matter if they happen inside filtered task */ 1198 #undef traceMALLOC 1199 #define traceMALLOC( pvAddress, uiSize ) \ 1200 if (pvAddress != 0) \ 1201 { \ 1202 vTraceStoreMemMangEvent(MEM_MALLOC_SIZE, ( uint32_t ) pvAddress, (int32_t)uiSize); \ 1203 } \ 1204 else \ 1205 { \ 1206 vTraceStoreMemMangEvent(MEM_MALLOC_SIZE_TRCFAILED, ( uint32_t ) pvAddress, (int32_t)uiSize); \ 1207 } 1208 1209 #undef traceFREE 1210 #define traceFREE( pvAddress, uiSize ) \ 1211 vTraceStoreMemMangEvent(MEM_FREE_SIZE, ( uint32_t ) pvAddress, -((int32_t)uiSize)); 1212 1213 #endif /* (TRC_CFG_INCLUDE_MEMMANG_EVENTS == 1) */ 1214 1215 #if (TRC_CFG_INCLUDE_TIMER_EVENTS == 1) 1216 1217 /* Called in timer.c - xTimerCreate */ 1218 #undef traceTIMER_CREATE 1219 #define traceTIMER_CREATE(tmr) \ 1220 trcKERNEL_HOOKS_OBJECT_CREATE(TIMER_CREATE, TIMER, tmr); 1221 1222 #undef traceTIMER_CREATE_FAILED 1223 #define traceTIMER_CREATE_FAILED() \ 1224 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_NUMERIC_PARAM_ONLY(TIMER_CREATE_TRCFAILED, 0); 1225 1226 /* Note that xCommandID can never be tmrCOMMAND_EXECUTE_CALLBACK (-1) since the trace macro is not called in that case */ 1227 #undef traceTIMER_COMMAND_SEND 1228 #define traceTIMER_COMMAND_SEND(tmr, xCommandID, xOptionalValue, xReturn) \ 1229 if (xCommandID > tmrCOMMAND_START_DONT_TRACE) \ 1230 { \ 1231 if (xCommandID == tmrCOMMAND_CHANGE_PERIOD) \ 1232 { \ 1233 if (xReturn == pdPASS) { \ 1234 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(TIMER_CHANGE_PERIOD, TIMER, tmr, xOptionalValue); \ 1235 } \ 1236 else \ 1237 { \ 1238 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(TIMER_CHANGE_PERIOD_TRCFAILED, TIMER, tmr, xOptionalValue); \ 1239 } \ 1240 } \ 1241 else if ((xCommandID == tmrCOMMAND_DELETE) && (xReturn == pdPASS)) \ 1242 { \ 1243 trcKERNEL_HOOKS_OBJECT_DELETE(TIMER_DELETE_OBJ, EVENTGROUP_OBJCLOSE_NAME_TRCSUCCESS + TRACE_GET_OBJECT_TRACE_CLASS(TIMER, tmr), EVENTGROUP_OBJCLOSE_PROP_TRCSUCCESS + TRACE_GET_OBJECT_TRACE_CLASS(TIMER, tmr), TIMER, tmr); \ 1244 } \ 1245 else \ 1246 { \ 1247 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(EVENTGROUP_TIMER + (uint32_t)xCommandID + ((xReturn == pdPASS) ? 0 : (TIMER_CREATE_TRCFAILED - TIMER_CREATE)), TIMER, tmr, xOptionalValue); \ 1248 }\ 1249 } 1250 1251 #undef traceTIMER_EXPIRED 1252 #define traceTIMER_EXPIRED(tmr) \ 1253 trcKERNEL_HOOKS_KERNEL_SERVICE(TIMER_EXPIRED, TIMER, tmr); 1254 1255 #endif /* (TRC_CFG_INCLUDE_TIMER_EVENTS == 1) */ 1256 1257 #if (TRC_CFG_INCLUDE_PEND_FUNC_CALL_EVENTS == 1) 1258 1259 #undef tracePEND_FUNC_CALL 1260 #define tracePEND_FUNC_CALL(func, arg1, arg2, ret) \ 1261 if (ret == pdPASS){ \ 1262 trcKERNEL_HOOKS_KERNEL_SERVICE(PEND_FUNC_CALL, TASK, xTimerGetTimerDaemonTaskHandle() ); \ 1263 } \ 1264 else \ 1265 { \ 1266 trcKERNEL_HOOKS_KERNEL_SERVICE(PEND_FUNC_CALL_TRCFAILED, TASK, xTimerGetTimerDaemonTaskHandle() ); \ 1267 } 1268 1269 #undef tracePEND_FUNC_CALL_FROM_ISR 1270 #define tracePEND_FUNC_CALL_FROM_ISR(func, arg1, arg2, ret) \ 1271 if (! uiInEventGroupSetBitsFromISR) \ 1272 prvTraceStoreKernelCall(PEND_FUNC_CALL_FROM_ISR, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(xTimerGetTimerDaemonTaskHandle()) ); \ 1273 uiInEventGroupSetBitsFromISR = 0; 1274 1275 #endif /* (TRC_CFG_INCLUDE_PEND_FUNC_CALL_EVENTS == 1) */ 1276 1277 #endif /* (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_0_0) */ 1278 1279 #if (TRC_CFG_INCLUDE_EVENT_GROUP_EVENTS == 1) 1280 1281 #undef traceEVENT_GROUP_CREATE 1282 #define traceEVENT_GROUP_CREATE(eg) \ 1283 trcKERNEL_HOOKS_OBJECT_CREATE(EVENT_GROUP_CREATE, EVENTGROUP, eg); 1284 1285 #undef traceEVENT_GROUP_CREATE_FAILED 1286 #define traceEVENT_GROUP_CREATE_FAILED() \ 1287 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_NUMERIC_PARAM_ONLY(EVENT_GROUP_CREATE_TRCFAILED, 0); 1288 1289 #undef traceEVENT_GROUP_DELETE 1290 #define traceEVENT_GROUP_DELETE(eg) \ 1291 { TRACE_ALLOC_CRITICAL_SECTION(); \ 1292 TRACE_ENTER_CRITICAL_SECTION(); \ 1293 trcKERNEL_HOOKS_OBJECT_DELETE(EVENT_GROUP_DELETE_OBJ, EVENTGROUP_OBJCLOSE_NAME_TRCSUCCESS + TRACE_GET_OBJECT_TRACE_CLASS(EVENTGROUP, eg), EVENTGROUP_OBJCLOSE_NAME_TRCSUCCESS + TRACE_GET_OBJECT_TRACE_CLASS(EVENTGROUP, eg), EVENTGROUP, eg); \ 1294 TRACE_EXIT_CRITICAL_SECTION(); } 1295 1296 #undef traceEVENT_GROUP_SYNC_BLOCK 1297 #define traceEVENT_GROUP_SYNC_BLOCK(eg, bitsToSet, bitsToWaitFor) \ 1298 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(EVENT_GROUP_SYNC_TRCBLOCK, EVENTGROUP, eg, bitsToWaitFor); 1299 1300 #undef traceEVENT_GROUP_SYNC_END 1301 #define traceEVENT_GROUP_SYNC_END(eg, bitsToSet, bitsToWaitFor, wasTimeout) \ 1302 if (wasTimeout) \ 1303 { \ 1304 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(EVENT_GROUP_SYNC_END_TRCFAILED, EVENTGROUP, eg, bitsToWaitFor); \ 1305 } \ 1306 else \ 1307 { \ 1308 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(EVENT_GROUP_SYNC_END, EVENTGROUP, eg, bitsToWaitFor); \ 1309 } 1310 1311 #undef traceEVENT_GROUP_WAIT_BITS_BLOCK 1312 #define traceEVENT_GROUP_WAIT_BITS_BLOCK(eg, bitsToWaitFor) \ 1313 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(EVENT_GROUP_WAIT_BITS_TRCBLOCK, EVENTGROUP, eg, bitsToWaitFor); \ 1314 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED(); 1315 1316 #undef traceEVENT_GROUP_WAIT_BITS_END 1317 #define traceEVENT_GROUP_WAIT_BITS_END(eg, bitsToWaitFor, wasTimeout) \ 1318 if (wasTimeout) \ 1319 { \ 1320 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(EVENT_GROUP_WAIT_BITS_END_TRCFAILED, EVENTGROUP, eg, bitsToWaitFor); \ 1321 } \ 1322 else \ 1323 { \ 1324 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(EVENT_GROUP_WAIT_BITS_END, EVENTGROUP, eg, bitsToWaitFor); \ 1325 } 1326 1327 #undef traceEVENT_GROUP_CLEAR_BITS 1328 #define traceEVENT_GROUP_CLEAR_BITS(eg, bitsToClear) \ 1329 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(EVENT_GROUP_CLEAR_BITS, EVENTGROUP, eg, bitsToClear); 1330 1331 #undef traceEVENT_GROUP_CLEAR_BITS_FROM_ISR 1332 #define traceEVENT_GROUP_CLEAR_BITS_FROM_ISR(eg, bitsToClear) \ 1333 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM_FROM_ISR(EVENT_GROUP_CLEAR_BITS_FROM_ISR, EVENTGROUP, eg, bitsToClear); 1334 1335 #undef traceEVENT_GROUP_SET_BITS 1336 #define traceEVENT_GROUP_SET_BITS(eg, bitsToSet) \ 1337 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(EVENT_GROUP_SET_BITS, EVENTGROUP, eg, bitsToSet); 1338 1339 #undef traceEVENT_GROUP_SET_BITS_FROM_ISR 1340 #define traceEVENT_GROUP_SET_BITS_FROM_ISR(eg, bitsToSet) \ 1341 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM_FROM_ISR(EVENT_GROUP_SET_BITS_FROM_ISR, EVENTGROUP, eg, bitsToSet); \ 1342 uiInEventGroupSetBitsFromISR = 1; 1343 1344 #endif /* (TRC_CFG_INCLUDE_EVENT_GROUP_EVENTS == 1) */ 1345 1346 #undef traceTASK_NOTIFY_TAKE 1347 #if (TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0) 1348 #define traceTASK_NOTIFY_TAKE() \ 1349 if (pxCurrentTCB[xPortGetCoreID()]->eNotifyState == eNotified){ \ 1350 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(TRACE_TASK_NOTIFY_TAKE, TASK, pxCurrentTCB[xPortGetCoreID()], xTicksToWait); \ 1351 } \ 1352 else{ \ 1353 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(TRACE_TASK_NOTIFY_TAKE_TRCFAILED, TASK, pxCurrentTCB[xPortGetCoreID()], xTicksToWait); \ 1354 } 1355 #elif (TRC_CFG_ESP_IDF_VERSION <= TRC_ESP_IDF_VERSION_4_3_0) 1356 #define traceTASK_NOTIFY_TAKE() \ 1357 if (pxCurrentTCB[xPortGetCoreID()]->ucNotifyState == taskNOTIFICATION_RECEIVED){ \ 1358 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(TRACE_TASK_NOTIFY_TAKE, TASK, pxCurrentTCB[xPortGetCoreID()], xTicksToWait); \ 1359 }else{ \ 1360 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(TRACE_TASK_NOTIFY_TAKE_TRCFAILED, TASK, pxCurrentTCB[xPortGetCoreID()], xTicksToWait);} 1361 #else /* TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0 */ 1362 #define traceTASK_NOTIFY_TAKE(index) \ 1363 if (pxCurrentTCB[xPortGetCoreID()]->ucNotifyState[index] == taskNOTIFICATION_RECEIVED){ \ 1364 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(TRACE_TASK_NOTIFY_TAKE, TASK, pxCurrentTCB[xPortGetCoreID()], xTicksToWait); \ 1365 }else{ \ 1366 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(TRACE_TASK_NOTIFY_TAKE_TRCFAILED, TASK, pxCurrentTCB[xPortGetCoreID()], xTicksToWait);} 1367 #endif /* TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0 */ 1368 1369 #undef traceTASK_NOTIFY_TAKE_BLOCK 1370 #if (TRC_CFG_ESP_IDF_VERSION <= TRC_ESP_IDF_VERSION_4_3_0) 1371 #define traceTASK_NOTIFY_TAKE_BLOCK() \ 1372 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(TRACE_TASK_NOTIFY_TAKE_TRCBLOCK, TASK, pxCurrentTCB[xPortGetCoreID()], xTicksToWait); \ 1373 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED(); 1374 #else /* TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0 */ 1375 #define traceTASK_NOTIFY_TAKE_BLOCK(index) \ 1376 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_PARAM(TRACE_TASK_NOTIFY_TAKE_TRCBLOCK, TASK, pxCurrentTCB[xPortGetCoreID()], xTicksToWait); \ 1377 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED(); 1378 #endif /* TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0 */ 1379 1380 #undef traceTASK_NOTIFY_WAIT 1381 #if (TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0) 1382 #define traceTASK_NOTIFY_WAIT() \ 1383 if (TRACE_GET_OBJECT_FILTER(TASK, pxCurrentTCB[xPortGetCoreID()]) & CurrentFilterMask) \ 1384 { \ 1385 if (pxCurrentTCB[xPortGetCoreID()]->eNotifyState == eNotified) \ 1386 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(pxCurrentTCB[xPortGetCoreID()]), xTicksToWait); \ 1387 else \ 1388 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT_TRCFAILED, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(pxCurrentTCB[xPortGetCoreID()]), xTicksToWait); \ 1389 } 1390 #elif (TRC_CFG_ESP_IDF_VERSION <= TRC_ESP_IDF_VERSION_4_3_0) 1391 #define traceTASK_NOTIFY_WAIT() \ 1392 if (TRACE_GET_OBJECT_FILTER(TASK, pxCurrentTCB[xPortGetCoreID()]) & CurrentFilterMask) \ 1393 { \ 1394 if (pxCurrentTCB[xPortGetCoreID()]->ucNotifyState == taskNOTIFICATION_RECEIVED) \ 1395 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(pxCurrentTCB[xPortGetCoreID()]), xTicksToWait); \ 1396 else \ 1397 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT_TRCFAILED, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(pxCurrentTCB[xPortGetCoreID()]), xTicksToWait); \ 1398 } 1399 #else /* TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0 */ 1400 #define traceTASK_NOTIFY_WAIT(index) \ 1401 if (TRACE_GET_OBJECT_FILTER(TASK, pxCurrentTCB[xPortGetCoreID()]) & CurrentFilterMask) \ 1402 { \ 1403 if (pxCurrentTCB[xPortGetCoreID()]->ucNotifyState[index] == taskNOTIFICATION_RECEIVED) \ 1404 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(pxCurrentTCB[xPortGetCoreID()]), xTicksToWait); \ 1405 else \ 1406 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT_TRCFAILED, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(pxCurrentTCB[xPortGetCoreID()]), xTicksToWait); \ 1407 } 1408 #endif /* TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0 */ 1409 1410 #undef traceTASK_NOTIFY_WAIT_BLOCK 1411 #if (TRC_CFG_ESP_IDF_VERSION <= TRC_ESP_IDF_VERSION_4_3_0) 1412 #define traceTASK_NOTIFY_WAIT_BLOCK() \ 1413 if (TRACE_GET_OBJECT_FILTER(TASK, pxCurrentTCB[xPortGetCoreID()]) & CurrentFilterMask) \ 1414 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT_TRCBLOCK, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(pxCurrentTCB[xPortGetCoreID()]), xTicksToWait); \ 1415 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED(); 1416 #else /* TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0 */ 1417 #define traceTASK_NOTIFY_WAIT_BLOCK(index) \ 1418 if (TRACE_GET_OBJECT_FILTER(TASK, pxCurrentTCB[xPortGetCoreID()]) & CurrentFilterMask) \ 1419 prvTraceStoreKernelCallWithParam(TRACE_TASK_NOTIFY_WAIT_TRCBLOCK, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(pxCurrentTCB[xPortGetCoreID()]), xTicksToWait); \ 1420 trcKERNEL_HOOKS_SET_TASK_INSTANCE_FINISHED(); 1421 #endif /* TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0 */ 1422 1423 #undef traceTASK_NOTIFY 1424 #if (TRC_CFG_ESP_IDF_VERSION <= TRC_ESP_IDF_VERSION_4_3_0) 1425 #define traceTASK_NOTIFY() \ 1426 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 1427 if (TRACE_GET_OBJECT_FILTER(TASK, xTaskToNotify) & CurrentFilterMask) \ 1428 prvTraceStoreKernelCall(TRACE_TASK_NOTIFY, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(xTaskToNotify)); 1429 #else /* TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0 */ 1430 #define traceTASK_NOTIFY(index) \ 1431 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 1432 if (TRACE_GET_OBJECT_FILTER(TASK, xTaskToNotify) & CurrentFilterMask) \ 1433 prvTraceStoreKernelCall(TRACE_TASK_NOTIFY, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(xTaskToNotify)); 1434 #endif /* TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0 */ 1435 1436 #undef traceTASK_NOTIFY_FROM_ISR 1437 #if (TRC_CFG_ESP_IDF_VERSION <= TRC_ESP_IDF_VERSION_4_3_0) 1438 #define traceTASK_NOTIFY_FROM_ISR() \ 1439 if (TRACE_GET_OBJECT_FILTER(TASK, xTaskToNotify) & CurrentFilterMask) \ 1440 prvTraceStoreKernelCall(TRACE_TASK_NOTIFY_FROM_ISR, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(xTaskToNotify)); 1441 #else /* TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0 */ 1442 #define traceTASK_NOTIFY_FROM_ISR(index) \ 1443 if (TRACE_GET_OBJECT_FILTER(TASK, xTaskToNotify) & CurrentFilterMask) \ 1444 prvTraceStoreKernelCall(TRACE_TASK_NOTIFY_FROM_ISR, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(xTaskToNotify)); 1445 #endif /* TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0 */ 1446 1447 #undef traceTASK_NOTIFY_GIVE_FROM_ISR 1448 #if (TRC_CFG_ESP_IDF_VERSION <= TRC_ESP_IDF_VERSION_4_3_0) 1449 #define traceTASK_NOTIFY_GIVE_FROM_ISR() \ 1450 if (TRACE_GET_OBJECT_FILTER(TASK, xTaskToNotify) & CurrentFilterMask) \ 1451 prvTraceStoreKernelCall(TRACE_TASK_NOTIFY_GIVE_FROM_ISR, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(xTaskToNotify)); 1452 #else /* TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0 */ 1453 #define traceTASK_NOTIFY_GIVE_FROM_ISR(index) \ 1454 if (TRACE_GET_OBJECT_FILTER(TASK, xTaskToNotify) & CurrentFilterMask) \ 1455 prvTraceStoreKernelCall(TRACE_TASK_NOTIFY_GIVE_FROM_ISR, TRACE_CLASS_TASK, TRACE_GET_TASK_NUMBER(xTaskToNotify)); 1456 #endif /* TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0 */ 1457 1458 #if (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1) 1459 1460 #undef traceSTREAM_BUFFER_CREATE 1461 #define traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xIsMessageBuffer ) \ 1462 trcKERNEL_HOOKS_OBJECT_CREATE(TRACE_GET_OBJECT_EVENT_CODE(CREATE_OBJ, TRCSUCCESS, STREAMBUFFER, pxStreamBuffer), STREAMBUFFER, pxStreamBuffer); 1463 1464 #undef traceSTREAM_BUFFER_CREATE_FAILED 1465 #define traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer ) \ 1466 trcKERNEL_HOOKS_KERNEL_SERVICE_WITH_NUMERIC_PARAM_ONLY(TRACE_GET_CLASS_EVENT_CODE(CREATE_OBJ, TRCFAILED, STREAMBUFFER, xIsMessageBuffer), 0); 1467 1468 #undef traceSTREAM_BUFFER_CREATE_STATIC_FAILED 1469 #define traceSTREAM_BUFFER_CREATE_STATIC_FAILED( xReturn, xIsMessageBuffer ) \ 1470 traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer ) 1471 1472 #undef traceSTREAM_BUFFER_DELETE 1473 #define traceSTREAM_BUFFER_DELETE( xStreamBuffer ) \ 1474 trcKERNEL_HOOKS_OBJECT_DELETE(TRACE_GET_OBJECT_EVENT_CODE(DELETE_OBJ, TRCSUCCESS, STREAMBUFFER, xStreamBuffer), TRACE_GET_OBJECT_EVENT_CODE(OBJCLOSE_NAME, TRCSUCCESS, STREAMBUFFER, xStreamBuffer), TRACE_GET_OBJECT_EVENT_CODE(OBJCLOSE_PROP, TRCSUCCESS, STREAMBUFFER, xStreamBuffer), STREAMBUFFER, xStreamBuffer); 1475 1476 #undef traceSTREAM_BUFFER_RESET 1477 #define traceSTREAM_BUFFER_RESET( xStreamBuffer ) \ 1478 trcKERNEL_HOOKS_KERNEL_SERVICE(prvGetStreamBufferType(xStreamBuffer) > 0 ? TRACE_MESSAGEBUFFER_RESET : TRACE_STREAMBUFFER_RESET, STREAMBUFFER, xStreamBuffer); \ 1479 trcKERNEL_HOOKS_SET_OBJECT_STATE(STREAMBUFFER, xStreamBuffer, 0); 1480 1481 #undef traceSTREAM_BUFFER_SEND 1482 #define traceSTREAM_BUFFER_SEND( xStreamBuffer, xReturn ) \ 1483 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(SEND, TRCSUCCESS, STREAMBUFFER, xStreamBuffer), STREAMBUFFER, xStreamBuffer); \ 1484 trcKERNEL_HOOKS_SET_OBJECT_STATE(STREAMBUFFER, xStreamBuffer, prvBytesInBuffer(xStreamBuffer)); 1485 1486 #undef traceBLOCKING_ON_STREAM_BUFFER_SEND 1487 #define traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer ) \ 1488 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(SEND, TRCBLOCK, STREAMBUFFER, xStreamBuffer), STREAMBUFFER, xStreamBuffer); 1489 1490 #undef traceSTREAM_BUFFER_SEND_FAILED 1491 #define traceSTREAM_BUFFER_SEND_FAILED( xStreamBuffer ) \ 1492 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(SEND, TRCFAILED, STREAMBUFFER, xStreamBuffer), STREAMBUFFER, xStreamBuffer); 1493 1494 #undef traceSTREAM_BUFFER_RECEIVE 1495 #define traceSTREAM_BUFFER_RECEIVE( xStreamBuffer, xReceivedLength ) \ 1496 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE, TRCSUCCESS, STREAMBUFFER, xStreamBuffer), STREAMBUFFER, xStreamBuffer); \ 1497 trcKERNEL_HOOKS_SET_OBJECT_STATE(STREAMBUFFER, xStreamBuffer, prvBytesInBuffer(xStreamBuffer)); 1498 1499 1500 #undef traceBLOCKING_ON_STREAM_BUFFER_RECEIVE 1501 #define traceBLOCKING_ON_STREAM_BUFFER_RECEIVE( xStreamBuffer ) \ 1502 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE, TRCBLOCK, STREAMBUFFER, xStreamBuffer), STREAMBUFFER, xStreamBuffer); 1503 1504 #undef traceSTREAM_BUFFER_RECEIVE_FAILED 1505 #define traceSTREAM_BUFFER_RECEIVE_FAILED( xStreamBuffer ) \ 1506 trcKERNEL_HOOKS_KERNEL_SERVICE(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE, TRCFAILED, STREAMBUFFER, xStreamBuffer), STREAMBUFFER, xStreamBuffer); 1507 1508 #undef traceSTREAM_BUFFER_SEND_FROM_ISR 1509 #define traceSTREAM_BUFFER_SEND_FROM_ISR( xStreamBuffer, xReturn ) \ 1510 if( xReturn > ( size_t ) 0 ) \ 1511 { \ 1512 trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(TRACE_GET_OBJECT_EVENT_CODE(SEND_FROM_ISR, TRCSUCCESS, STREAMBUFFER, xStreamBuffer), STREAMBUFFER, xStreamBuffer); \ 1513 trcKERNEL_HOOKS_SET_OBJECT_STATE(STREAMBUFFER, xStreamBuffer, prvBytesInBuffer(xStreamBuffer)); \ 1514 } \ 1515 else \ 1516 { \ 1517 trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(TRACE_GET_OBJECT_EVENT_CODE(SEND_FROM_ISR, TRCFAILED, STREAMBUFFER, xStreamBuffer), STREAMBUFFER, xStreamBuffer); \ 1518 } 1519 1520 #undef traceSTREAM_BUFFER_RECEIVE_FROM_ISR 1521 #define traceSTREAM_BUFFER_RECEIVE_FROM_ISR( xStreamBuffer, xReceivedLength ) \ 1522 if( xReceivedLength > ( size_t ) 0 ) \ 1523 { \ 1524 trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE_FROM_ISR, TRCSUCCESS, STREAMBUFFER, xStreamBuffer), STREAMBUFFER, xStreamBuffer); \ 1525 trcKERNEL_HOOKS_SET_OBJECT_STATE(STREAMBUFFER, xStreamBuffer, prvBytesInBuffer(xStreamBuffer)); \ 1526 } \ 1527 else \ 1528 { \ 1529 trcKERNEL_HOOKS_KERNEL_SERVICE_FROM_ISR(TRACE_GET_OBJECT_EVENT_CODE(RECEIVE_FROM_ISR, TRCFAILED, STREAMBUFFER, xStreamBuffer), STREAMBUFFER, xStreamBuffer); \ 1530 } 1531 1532 #endif /* (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1) */ 1533 1534 #endif /* (TRC_CFG_SCHEDULING_ONLY == 0) */ 1535 1536 #endif /*#if TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_SNAPSHOT */ 1537 1538 /******************************************************************************/ 1539 /*** Definitions for Streaming mode *******************************************/ 1540 /******************************************************************************/ 1541 #if (TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_STREAMING) 1542 1543 /******************************************************************************* 1544 * vTraceStoreKernelObjectName 1545 * 1546 * Set the name for a kernel object (defined by its address). 1547 ******************************************************************************/ 1548 void vTraceStoreKernelObjectName(void* object, const char* name); 1549 1550 /******************************************************************************* 1551 * prvIsNewTCB 1552 * 1553 * Tells if this task is already executing, or if there has been a task-switch. 1554 * Assumed to be called within a trace hook in kernel context. 1555 *******************************************************************************/ 1556 uint32_t prvIsNewTCB(void* pNewTCB); 1557 1558 #define TRACE_GET_CURRENT_TASK() prvTraceGetCurrentTaskHandle() 1559 1560 /*************************************************************************/ 1561 /* KERNEL SPECIFIC OBJECT CONFIGURATION */ 1562 /*************************************************************************/ 1563 1564 /******************************************************************************* 1565 * The event codes - should match the offline config file. 1566 ******************************************************************************/ 1567 1568 /*** Event codes for streaming - should match the Tracealyzer config file *****/ 1569 #define PSF_EVENT_NULL_EVENT 0x00 1570 1571 #define PSF_EVENT_TRACE_START 0x01 1572 #define PSF_EVENT_TS_CONFIG 0x02 1573 #define PSF_EVENT_OBJ_NAME 0x03 1574 #define PSF_EVENT_TASK_PRIORITY 0x04 1575 #define PSF_EVENT_TASK_PRIO_INHERIT 0x05 1576 #define PSF_EVENT_TASK_PRIO_DISINHERIT 0x06 1577 #define PSF_EVENT_DEFINE_ISR 0x07 1578 1579 #define PSF_EVENT_TASK_CREATE 0x10 1580 #define PSF_EVENT_QUEUE_CREATE 0x11 1581 #define PSF_EVENT_SEMAPHORE_BINARY_CREATE 0x12 1582 #define PSF_EVENT_MUTEX_CREATE 0x13 1583 #define PSF_EVENT_TIMER_CREATE 0x14 1584 #define PSF_EVENT_EVENTGROUP_CREATE 0x15 1585 #define PSF_EVENT_SEMAPHORE_COUNTING_CREATE 0x16 1586 #define PSF_EVENT_MUTEX_RECURSIVE_CREATE 0x17 1587 #define PSF_EVENT_STREAMBUFFER_CREATE 0x18 1588 #define PSF_EVENT_MESSAGEBUFFER_CREATE 0x19 1589 1590 #define PSF_EVENT_TASK_DELETE 0x20 1591 #define PSF_EVENT_QUEUE_DELETE 0x21 1592 #define PSF_EVENT_SEMAPHORE_DELETE 0x22 1593 #define PSF_EVENT_MUTEX_DELETE 0x23 1594 #define PSF_EVENT_TIMER_DELETE 0x24 1595 #define PSF_EVENT_EVENTGROUP_DELETE 0x25 1596 #define PSF_EVENT_STREAMBUFFER_DELETE 0x28 1597 #define PSF_EVENT_MESSAGEBUFFER_DELETE 0x29 1598 1599 #define PSF_EVENT_TASK_READY 0x30 1600 #define PSF_EVENT_NEW_TIME 0x31 1601 #define PSF_EVENT_NEW_TIME_SCHEDULER_SUSPENDED 0x32 1602 #define PSF_EVENT_ISR_BEGIN 0x33 1603 #define PSF_EVENT_ISR_RESUME 0x34 1604 #define PSF_EVENT_TS_BEGIN 0x35 1605 #define PSF_EVENT_TS_RESUME 0x36 1606 #define PSF_EVENT_TASK_ACTIVATE 0x37 1607 1608 #define PSF_EVENT_MALLOC 0x38 1609 #define PSF_EVENT_FREE 0x39 1610 1611 #define PSF_EVENT_LOWPOWER_BEGIN 0x3A 1612 #define PSF_EVENT_LOWPOWER_END 0x3B 1613 1614 #define PSF_EVENT_IFE_NEXT 0x3C 1615 #define PSF_EVENT_IFE_DIRECT 0x3D 1616 1617 #define PSF_EVENT_TASK_CREATE_FAILED 0x40 1618 #define PSF_EVENT_QUEUE_CREATE_FAILED 0x41 1619 #define PSF_EVENT_SEMAPHORE_BINARY_CREATE_FAILED 0x42 1620 #define PSF_EVENT_MUTEX_CREATE_FAILED 0x43 1621 #define PSF_EVENT_TIMER_CREATE_FAILED 0x44 1622 #define PSF_EVENT_EVENTGROUP_CREATE_FAILED 0x45 1623 #define PSF_EVENT_SEMAPHORE_COUNTING_CREATE_FAILED 0x46 1624 #define PSF_EVENT_MUTEX_RECURSIVE_CREATE_FAILED 0x47 1625 #define PSF_EVENT_STREAMBUFFER_CREATE_FAILED 0x49 1626 #define PSF_EVENT_MESSAGEBUFFER_CREATE_FAILED 0x4A 1627 1628 #define PSF_EVENT_TIMER_DELETE_FAILED 0x48 1629 1630 #define PSF_EVENT_QUEUE_SEND 0x50 1631 #define PSF_EVENT_SEMAPHORE_GIVE 0x51 1632 #define PSF_EVENT_MUTEX_GIVE 0x52 1633 1634 #define PSF_EVENT_QUEUE_SEND_FAILED 0x53 1635 #define PSF_EVENT_SEMAPHORE_GIVE_FAILED 0x54 1636 #define PSF_EVENT_MUTEX_GIVE_FAILED 0x55 1637 1638 #define PSF_EVENT_QUEUE_SEND_BLOCK 0x56 1639 #define PSF_EVENT_SEMAPHORE_GIVE_BLOCK 0x57 1640 #define PSF_EVENT_MUTEX_GIVE_BLOCK 0x58 1641 1642 #define PSF_EVENT_QUEUE_SEND_FROMISR 0x59 1643 #define PSF_EVENT_SEMAPHORE_GIVE_FROMISR 0x5A 1644 1645 #define PSF_EVENT_QUEUE_SEND_FROMISR_FAILED 0x5C 1646 #define PSF_EVENT_SEMAPHORE_GIVE_FROMISR_FAILED 0x5D 1647 1648 #define PSF_EVENT_QUEUE_RECEIVE 0x60 1649 #define PSF_EVENT_SEMAPHORE_TAKE 0x61 1650 #define PSF_EVENT_MUTEX_TAKE 0x62 1651 1652 #define PSF_EVENT_QUEUE_RECEIVE_FAILED 0x63 1653 #define PSF_EVENT_SEMAPHORE_TAKE_FAILED 0x64 1654 #define PSF_EVENT_MUTEX_TAKE_FAILED 0x65 1655 1656 #define PSF_EVENT_QUEUE_RECEIVE_BLOCK 0x66 1657 #define PSF_EVENT_SEMAPHORE_TAKE_BLOCK 0x67 1658 #define PSF_EVENT_MUTEX_TAKE_BLOCK 0x68 1659 1660 #define PSF_EVENT_QUEUE_RECEIVE_FROMISR 0x69 1661 #define PSF_EVENT_SEMAPHORE_TAKE_FROMISR 0x6A 1662 1663 #define PSF_EVENT_QUEUE_RECEIVE_FROMISR_FAILED 0x6C 1664 #define PSF_EVENT_SEMAPHORE_TAKE_FROMISR_FAILED 0x6D 1665 1666 #define PSF_EVENT_QUEUE_PEEK 0x70 1667 #define PSF_EVENT_SEMAPHORE_PEEK 0x71 1668 #define PSF_EVENT_MUTEX_PEEK 0x72 1669 1670 #define PSF_EVENT_QUEUE_PEEK_FAILED 0x73 1671 #define PSF_EVENT_SEMAPHORE_PEEK_FAILED 0x74 1672 #define PSF_EVENT_MUTEX_PEEK_FAILED 0x75 1673 1674 #define PSF_EVENT_QUEUE_PEEK_BLOCK 0x76 1675 #define PSF_EVENT_SEMAPHORE_PEEK_BLOCK 0x77 1676 #define PSF_EVENT_MUTEX_PEEK_BLOCK 0x78 1677 1678 #define PSF_EVENT_TASK_DELAY_UNTIL 0x79 1679 #define PSF_EVENT_TASK_DELAY 0x7A 1680 #define PSF_EVENT_TASK_SUSPEND 0x7B 1681 #define PSF_EVENT_TASK_RESUME 0x7C 1682 #define PSF_EVENT_TASK_RESUME_FROMISR 0x7D 1683 1684 #define PSF_EVENT_TIMER_PENDFUNCCALL 0x80 1685 #define PSF_EVENT_TIMER_PENDFUNCCALL_FROMISR 0x81 1686 #define PSF_EVENT_TIMER_PENDFUNCCALL_FAILED 0x82 1687 #define PSF_EVENT_TIMER_PENDFUNCCALL_FROMISR_FAILED 0x83 1688 1689 #define PSF_EVENT_USER_EVENT 0x90 1690 1691 #define PSF_EVENT_TIMER_START 0xA0 1692 #define PSF_EVENT_TIMER_RESET 0xA1 1693 #define PSF_EVENT_TIMER_STOP 0xA2 1694 #define PSF_EVENT_TIMER_CHANGEPERIOD 0xA3 1695 #define PSF_EVENT_TIMER_START_FROMISR 0xA4 1696 #define PSF_EVENT_TIMER_RESET_FROMISR 0xA5 1697 #define PSF_EVENT_TIMER_STOP_FROMISR 0xA6 1698 #define PSF_EVENT_TIMER_CHANGEPERIOD_FROMISR 0xA7 1699 #define PSF_EVENT_TIMER_START_FAILED 0xA8 1700 #define PSF_EVENT_TIMER_RESET_FAILED 0xA9 1701 #define PSF_EVENT_TIMER_STOP_FAILED 0xAA 1702 #define PSF_EVENT_TIMER_CHANGEPERIOD_FAILED 0xAB 1703 #define PSF_EVENT_TIMER_START_FROMISR_FAILED 0xAC 1704 #define PSF_EVENT_TIMER_RESET_FROMISR_FAILED 0xAD 1705 #define PSF_EVENT_TIMER_STOP_FROMISR_FAILED 0xAE 1706 #define PSF_EVENT_TIMER_CHANGEPERIOD_FROMISR_FAILED 0xAF 1707 1708 #define PSF_EVENT_EVENTGROUP_SYNC 0xB0 1709 #define PSF_EVENT_EVENTGROUP_WAITBITS 0xB1 1710 #define PSF_EVENT_EVENTGROUP_CLEARBITS 0xB2 1711 #define PSF_EVENT_EVENTGROUP_CLEARBITS_FROMISR 0xB3 1712 #define PSF_EVENT_EVENTGROUP_SETBITS 0xB4 1713 #define PSF_EVENT_EVENTGROUP_SETBITS_FROMISR 0xB5 1714 #define PSF_EVENT_EVENTGROUP_SYNC_BLOCK 0xB6 1715 #define PSF_EVENT_EVENTGROUP_WAITBITS_BLOCK 0xB7 1716 #define PSF_EVENT_EVENTGROUP_SYNC_FAILED 0xB8 1717 #define PSF_EVENT_EVENTGROUP_WAITBITS_FAILED 0xB9 1718 1719 #define PSF_EVENT_QUEUE_SEND_FRONT 0xC0 1720 #define PSF_EVENT_QUEUE_SEND_FRONT_FAILED 0xC1 1721 #define PSF_EVENT_QUEUE_SEND_FRONT_BLOCK 0xC2 1722 #define PSF_EVENT_QUEUE_SEND_FRONT_FROMISR 0xC3 1723 #define PSF_EVENT_QUEUE_SEND_FRONT_FROMISR_FAILED 0xC4 1724 #define PSF_EVENT_MUTEX_GIVE_RECURSIVE 0xC5 1725 #define PSF_EVENT_MUTEX_GIVE_RECURSIVE_FAILED 0xC6 1726 #define PSF_EVENT_MUTEX_TAKE_RECURSIVE 0xC7 1727 #define PSF_EVENT_MUTEX_TAKE_RECURSIVE_FAILED 0xC8 1728 1729 #define PSF_EVENT_TASK_NOTIFY 0xC9 1730 #define PSF_EVENT_TASK_NOTIFY_TAKE 0xCA 1731 #define PSF_EVENT_TASK_NOTIFY_TAKE_BLOCK 0xCB 1732 #define PSF_EVENT_TASK_NOTIFY_TAKE_FAILED 0xCC 1733 #define PSF_EVENT_TASK_NOTIFY_WAIT 0xCD 1734 #define PSF_EVENT_TASK_NOTIFY_WAIT_BLOCK 0xCE 1735 #define PSF_EVENT_TASK_NOTIFY_WAIT_FAILED 0xCF 1736 #define PSF_EVENT_TASK_NOTIFY_FROM_ISR 0xD0 1737 #define PSF_EVENT_TASK_NOTIFY_GIVE_FROM_ISR 0xD1 1738 1739 #define PSF_EVENT_TIMER_EXPIRED 0xD2 1740 1741 #define PSF_EVENT_STREAMBUFFER_SEND 0xD3 1742 #define PSF_EVENT_STREAMBUFFER_SEND_BLOCK 0xD4 1743 #define PSF_EVENT_STREAMBUFFER_SEND_FAILED 0xD5 1744 #define PSF_EVENT_STREAMBUFFER_RECEIVE 0xD6 1745 #define PSF_EVENT_STREAMBUFFER_RECEIVE_BLOCK 0xD7 1746 #define PSF_EVENT_STREAMBUFFER_RECEIVE_FAILED 0xD8 1747 #define PSF_EVENT_STREAMBUFFER_SEND_FROM_ISR 0xD9 1748 #define PSF_EVENT_STREAMBUFFER_SEND_FROM_ISR_FAILED 0xDA 1749 #define PSF_EVENT_STREAMBUFFER_RECEIVE_FROM_ISR 0xDB 1750 #define PSF_EVENT_STREAMBUFFER_RECEIVE_FROM_ISR_FAILED 0xDC 1751 #define PSF_EVENT_STREAMBUFFER_RESET 0xDD 1752 1753 #define PSF_EVENT_MESSAGEBUFFER_SEND 0xDE 1754 #define PSF_EVENT_MESSAGEBUFFER_SEND_BLOCK 0xDF 1755 #define PSF_EVENT_MESSAGEBUFFER_SEND_FAILED 0xE0 1756 #define PSF_EVENT_MESSAGEBUFFER_RECEIVE 0xE1 1757 #define PSF_EVENT_MESSAGEBUFFER_RECEIVE_BLOCK 0xE2 1758 #define PSF_EVENT_MESSAGEBUFFER_RECEIVE_FAILED 0xE3 1759 #define PSF_EVENT_MESSAGEBUFFER_SEND_FROM_ISR 0xE4 1760 #define PSF_EVENT_MESSAGEBUFFER_SEND_FROM_ISR_FAILED 0xE5 1761 #define PSF_EVENT_MESSAGEBUFFER_RECEIVE_FROM_ISR 0xE6 1762 #define PSF_EVENT_MESSAGEBUFFER_RECEIVE_FROM_ISR_FAILED 0xE7 1763 #define PSF_EVENT_MESSAGEBUFFER_RESET 0xE8 1764 1765 #define PSF_EVENT_MALLOC_FAILED 0xE9 1766 1767 #define PSF_EVENT_UNUSED_STACK 0xEA 1768 1769 /*** The trace macros for streaming ******************************************/ 1770 1771 /* A macro that will update the tick count when returning from tickless idle */ 1772 #undef traceINCREASE_TICK_COUNT 1773 /* Note: This can handle time adjustments of max 2^32 ticks, i.e., 35 seconds at 120 MHz. Thus, tick-less idle periods longer than 2^32 ticks will appear "compressed" on the time line.*/ 1774 #define traceINCREASE_TICK_COUNT( xCount ) { extern uint32_t uiTraceTickCount; uiTraceTickCount += xCount; } 1775 1776 #if (TRC_CFG_INCLUDE_OSTICK_EVENTS == 1) 1777 #define OS_TICK_EVENT(uxSchedulerSuspended, xTickCount) if (uxSchedulerSuspended == (unsigned portBASE_TYPE) pdFALSE) { prvTraceStoreEvent1(PSF_EVENT_NEW_TIME, (uint32_t)(xTickCount + 1)); } 1778 #else 1779 #define OS_TICK_EVENT(uxSchedulerSuspended, xTickCount) 1780 #endif 1781 1782 /* Called on each OS tick. Will call uiPortGetTimestamp to make sure it is called at least once every OS tick. */ 1783 #undef traceTASK_INCREMENT_TICK 1784 #if TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0 1785 1786 #define traceTASK_INCREMENT_TICK( xTickCount ) \ 1787 if (uxSchedulerSuspended[xPortGetCoreID()] == ( unsigned portBASE_TYPE ) pdTRUE || xPendedTicks == 0) { extern uint32_t uiTraceTickCount; uiTraceTickCount++; } \ 1788 OS_TICK_EVENT(uxSchedulerSuspended[xPortGetCoreID()], xTickCount) 1789 1790 #elif TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_0_0 1791 1792 #define traceTASK_INCREMENT_TICK( xTickCount ) \ 1793 if (uxSchedulerSuspended[xPortGetCoreID()] == ( unsigned portBASE_TYPE ) pdTRUE || uxPendedTicks == 0) { extern uint32_t uiTraceTickCount; uiTraceTickCount++; } \ 1794 OS_TICK_EVENT(uxSchedulerSuspended[xPortGetCoreID()], xTickCount) 1795 1796 #else 1797 1798 #define traceTASK_INCREMENT_TICK( xTickCount ) \ 1799 if (uxSchedulerSuspended[xPortGetCoreID()] == ( unsigned portBASE_TYPE ) pdTRUE || uxMissedTicks == 0) { extern uint32_t uiTraceTickCount; uiTraceTickCount++; } \ 1800 OS_TICK_EVENT(uxSchedulerSuspended[xPortGetCoreID()], xTickCount) 1801 1802 #endif 1803 1804 extern volatile uint32_t uiTraceSystemState; 1805 1806 /* Called on each task-switch */ 1807 #undef traceTASK_SWITCHED_IN 1808 #define traceTASK_SWITCHED_IN() \ 1809 uiTraceSystemState = TRC_STATE_IN_TASKSWITCH; \ 1810 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 1811 { \ 1812 if (prvIsNewTCB(pxCurrentTCB[xPortGetCoreID()])) \ 1813 { \ 1814 prvTraceStoreEvent2(PSF_EVENT_TASK_ACTIVATE, (uint32_t)pxCurrentTCB[xPortGetCoreID()], pxCurrentTCB[xPortGetCoreID()]->uxPriority); \ 1815 } \ 1816 } \ 1817 uiTraceSystemState = TRC_STATE_IN_APPLICATION; 1818 1819 /* Called for each task that becomes ready */ 1820 #if (TRC_CFG_INCLUDE_READY_EVENTS == 1) 1821 #undef traceMOVED_TASK_TO_READY_STATE 1822 #define traceMOVED_TASK_TO_READY_STATE( pxTCB ) \ 1823 if (TRACE_GET_OBJECT_FILTER(TASK, pxTCB) & CurrentFilterMask) \ 1824 prvTraceStoreEvent1(PSF_EVENT_TASK_READY, (uint32_t)pxTCB); 1825 #endif 1826 1827 #undef traceTASK_CREATE 1828 #if TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_2_0 1829 #define traceTASK_CREATE(pxNewTCB) \ 1830 if (pxNewTCB != NULL) \ 1831 { \ 1832 TRACE_ALLOC_CRITICAL_SECTION(); \ 1833 TRACE_ENTER_CRITICAL_SECTION(); \ 1834 prvAddTaskToStackMonitor(pxNewTCB); \ 1835 TRACE_EXIT_CRITICAL_SECTION(); \ 1836 prvTraceSaveObjectSymbol(pxNewTCB, pxNewTCB->pcTaskName); \ 1837 prvTraceSaveObjectData(pxNewTCB, pxNewTCB->uxPriority); \ 1838 prvTraceStoreStringEvent(1, PSF_EVENT_OBJ_NAME, pxNewTCB->pcTaskName, pxNewTCB); \ 1839 TRACE_SET_OBJECT_FILTER(TASK, pxNewTCB, CurrentFilterGroup); \ 1840 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 1841 if (TRACE_GET_OBJECT_FILTER(TASK, pxNewTCB) & CurrentFilterMask) \ 1842 prvTraceStoreEvent2(PSF_EVENT_TASK_CREATE, (uint32_t)pxNewTCB, pxNewTCB->uxPriority); \ 1843 } 1844 #else /* TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_2_0 */ 1845 #define traceTASK_CREATE(pxNewTCB) \ 1846 if (pxNewTCB != NULL) \ 1847 { \ 1848 TRACE_ALLOC_CRITICAL_SECTION(); \ 1849 TRACE_ENTER_CRITICAL_SECTION(); \ 1850 prvAddTaskToStackMonitor(pxNewTCB); \ 1851 TRACE_EXIT_CRITICAL_SECTION(); \ 1852 prvTraceSaveObjectSymbol(pxNewTCB, (const char*)pcName); \ 1853 prvTraceSaveObjectData(pxNewTCB, uxPriority); \ 1854 prvTraceStoreStringEvent(1, PSF_EVENT_OBJ_NAME, (const char*)pcName, pxNewTCB); \ 1855 TRACE_SET_OBJECT_FILTER(TASK, pxNewTCB, CurrentFilterGroup); \ 1856 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 1857 if (TRACE_GET_OBJECT_FILTER(TASK, pxNewTCB) & CurrentFilterMask) \ 1858 prvTraceStoreEvent2(PSF_EVENT_TASK_CREATE, (uint32_t)pxNewTCB, uxPriority); \ 1859 } 1860 #endif /* TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_2_0 */ 1861 1862 /* Called in vTaskCreate, if it fails (typically if the stack can not be allocated) */ 1863 #undef traceTASK_CREATE_FAILED 1864 #define traceTASK_CREATE_FAILED() \ 1865 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 1866 prvTraceStoreEvent0(PSF_EVENT_TASK_CREATE_FAILED); \ 1867 1868 /* Called on vTaskDelete */ 1869 #undef traceTASK_DELETE // We don't allow for filtering out "delete" events. They are important and not very frequent. Moreover, we can't exclude create events, so this should be symmetrical. 1870 #define traceTASK_DELETE( pxTaskToDelete ) \ 1871 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 1872 if (TRACE_GET_OBJECT_FILTER(TASK, pxTaskToDelete) & CurrentFilterMask) \ 1873 prvTraceStoreEvent2(PSF_EVENT_TASK_DELETE, (uint32_t)pxTaskToDelete, (pxTaskToDelete != NULL) ? (pxTaskToDelete->uxPriority) : 0); \ 1874 prvTraceDeleteSymbol(pxTaskToDelete); \ 1875 prvTraceDeleteObjectData(pxTaskToDelete); \ 1876 { TRACE_ALLOC_CRITICAL_SECTION(); \ 1877 TRACE_ENTER_CRITICAL_SECTION(); \ 1878 prvRemoveTaskFromStackMonitor(pxTaskToDelete); \ 1879 TRACE_EXIT_CRITICAL_SECTION(); } 1880 1881 #if (TRC_CFG_SCHEDULING_ONLY == 0) 1882 1883 #if (defined(configUSE_TICKLESS_IDLE) && configUSE_TICKLESS_IDLE != 0) 1884 1885 #undef traceLOW_POWER_IDLE_BEGIN 1886 #define traceLOW_POWER_IDLE_BEGIN() \ 1887 { \ 1888 prvTraceStoreEvent1(PSF_EVENT_LOWPOWER_BEGIN, xExpectedIdleTime); \ 1889 } 1890 1891 #undef traceLOW_POWER_IDLE_END 1892 #define traceLOW_POWER_IDLE_END() \ 1893 { \ 1894 prvTraceStoreEvent0(PSF_EVENT_LOWPOWER_END); \ 1895 } 1896 1897 #endif /* (defined(configUSE_TICKLESS_IDLE) && configUSE_TICKLESS_IDLE != 0) */ 1898 1899 /* Called on vTaskSuspend */ 1900 #undef traceTASK_SUSPEND 1901 #define traceTASK_SUSPEND( pxTaskToSuspend ) \ 1902 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 1903 if (TRACE_GET_OBJECT_FILTER(TASK, pxTaskToSuspend) & CurrentFilterMask) \ 1904 prvTraceStoreEvent1(PSF_EVENT_TASK_SUSPEND, (uint32_t)pxTaskToSuspend); 1905 1906 /* Called on vTaskDelay - note the use of FreeRTOS variable xTicksToDelay */ 1907 #undef traceTASK_DELAY 1908 #define traceTASK_DELAY() \ 1909 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 1910 prvTraceStoreEvent1(PSF_EVENT_TASK_DELAY, xTicksToDelay); 1911 1912 /* Called on vTaskDelayUntil - note the use of FreeRTOS variable xTimeToWake */ 1913 #undef traceTASK_DELAY_UNTIL 1914 #if TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0 1915 #define traceTASK_DELAY_UNTIL() \ 1916 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 1917 prvTraceStoreEvent1(PSF_EVENT_TASK_DELAY_UNTIL, (uint32_t)xTimeToWake); 1918 #endif /* TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0 */ 1919 1920 #if (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) 1921 #define traceQUEUE_CREATE_HELPER() \ 1922 case queueQUEUE_TYPE_MUTEX: \ 1923 prvTraceStoreEvent1(PSF_EVENT_MUTEX_CREATE, (uint32_t)pxNewQueue); \ 1924 break; \ 1925 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \ 1926 prvTraceStoreEvent1(PSF_EVENT_MUTEX_RECURSIVE_CREATE, (uint32_t)pxNewQueue); \ 1927 break; 1928 #else 1929 #define traceQUEUE_CREATE_HELPER() 1930 #endif /* (TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0) */ 1931 1932 /* Called in xQueueCreate, and thereby for all other object based on queues, such as semaphores. */ 1933 #undef traceQUEUE_CREATE 1934 #define traceQUEUE_CREATE( pxNewQueue )\ 1935 TRACE_SET_OBJECT_FILTER(QUEUE, pxNewQueue, CurrentFilterGroup); \ 1936 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 1937 { \ 1938 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxNewQueue) & CurrentFilterMask) \ 1939 { \ 1940 switch (pxNewQueue->ucQueueType) \ 1941 { \ 1942 case queueQUEUE_TYPE_BASE: \ 1943 prvTraceStoreEvent2(PSF_EVENT_QUEUE_CREATE, (uint32_t)pxNewQueue, uxQueueLength); \ 1944 break; \ 1945 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \ 1946 prvTraceStoreEvent1(PSF_EVENT_SEMAPHORE_BINARY_CREATE, (uint32_t)pxNewQueue); \ 1947 break; \ 1948 traceQUEUE_CREATE_HELPER() \ 1949 } \ 1950 } \ 1951 } 1952 1953 #if (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) 1954 #define traceQUEUE_CREATE_FAILED_HELPER() \ 1955 case queueQUEUE_TYPE_MUTEX: \ 1956 prvTraceStoreEvent1(PSF_EVENT_MUTEX_CREATE_FAILED, 0); \ 1957 break; \ 1958 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \ 1959 prvTraceStoreEvent1(PSF_EVENT_MUTEX_RECURSIVE_CREATE_FAILED, 0); \ 1960 break; 1961 #else 1962 #define traceQUEUE_CREATE_FAILED_HELPER() 1963 #endif /* (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) */ 1964 1965 /* Called in xQueueCreate, if the queue creation fails */ 1966 #undef traceQUEUE_CREATE_FAILED 1967 #define traceQUEUE_CREATE_FAILED( queueType ) \ 1968 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 1969 { \ 1970 switch (queueType) \ 1971 { \ 1972 case queueQUEUE_TYPE_BASE: \ 1973 prvTraceStoreEvent2(PSF_EVENT_QUEUE_CREATE_FAILED, 0, uxQueueLength); \ 1974 break; \ 1975 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \ 1976 prvTraceStoreEvent1(PSF_EVENT_SEMAPHORE_BINARY_CREATE_FAILED, 0); \ 1977 break; \ 1978 traceQUEUE_CREATE_FAILED_HELPER() \ 1979 } \ 1980 } 1981 1982 #undef traceQUEUE_DELETE // We don't allow for filtering out "delete" events. They are important and not very frequent. Moreover, we can't exclude create events, so this should be symmetrical. 1983 #define traceQUEUE_DELETE( pxQueue ) \ 1984 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 1985 { \ 1986 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \ 1987 { \ 1988 switch (pxQueue->ucQueueType) \ 1989 { \ 1990 case queueQUEUE_TYPE_BASE: \ 1991 prvTraceStoreEvent2(PSF_EVENT_QUEUE_DELETE, (uint32_t)pxQueue, (pxQueue != NULL) ? (pxQueue->uxMessagesWaiting) : 0); \ 1992 break; \ 1993 case queueQUEUE_TYPE_MUTEX: \ 1994 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \ 1995 prvTraceStoreEvent2(PSF_EVENT_MUTEX_DELETE, (uint32_t)pxQueue, (pxQueue != NULL) ? (pxQueue->uxMessagesWaiting) : 0); \ 1996 break; \ 1997 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \ 1998 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \ 1999 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_DELETE, (uint32_t)pxQueue, (pxQueue != NULL) ? (pxQueue->uxMessagesWaiting) : 0); \ 2000 break; \ 2001 } \ 2002 } \ 2003 } \ 2004 prvTraceDeleteSymbol(pxQueue); 2005 2006 /* Called in xQueueCreateCountingSemaphore, if the queue creation fails */ 2007 #undef traceCREATE_COUNTING_SEMAPHORE 2008 #define traceCREATE_COUNTING_SEMAPHORE() \ 2009 TRACE_SET_OBJECT_FILTER(QUEUE, xHandle, CurrentFilterGroup); \ 2010 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) { \ 2011 if (TRACE_GET_OBJECT_FILTER(QUEUE, xHandle) & CurrentFilterMask) { \ 2012 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_COUNTING_CREATE, (uint32_t)xHandle, uxMaxCount); } } 2013 2014 #undef traceCREATE_COUNTING_SEMAPHORE_FAILED 2015 #define traceCREATE_COUNTING_SEMAPHORE_FAILED() \ 2016 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2017 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_COUNTING_CREATE_FAILED, 0, uxMaxCount); 2018 2019 2020 /* This macro is not necessary as of FreeRTOS v9.0.0 */ 2021 #if (TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0) 2022 /* Called in xQueueCreateMutex, and thereby also from xSemaphoreCreateMutex and xSemaphoreCreateRecursiveMutex */ 2023 #undef traceCREATE_MUTEX 2024 #define traceCREATE_MUTEX( pxNewQueue ) \ 2025 TRACE_SET_OBJECT_FILTER(QUEUE, pxNewQueue, CurrentFilterGroup); \ 2026 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2027 { \ 2028 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxNewQueue) & CurrentFilterMask) \ 2029 { \ 2030 switch (pxNewQueue->ucQueueType) \ 2031 { \ 2032 case queueQUEUE_TYPE_MUTEX: \ 2033 prvTraceStoreEvent1(PSF_EVENT_MUTEX_CREATE, (uint32_t)pxNewQueue); \ 2034 break; \ 2035 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \ 2036 prvTraceStoreEvent1(PSF_EVENT_MUTEX_RECURSIVE_CREATE, (uint32_t)pxNewQueue); \ 2037 break; \ 2038 } \ 2039 }\ 2040 } 2041 2042 /* Called in xQueueCreateMutex when the operation fails (when memory allocation fails) */ 2043 #undef traceCREATE_MUTEX_FAILED 2044 #define traceCREATE_MUTEX_FAILED() \ 2045 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2046 prvTraceStoreEvent1(PSF_EVENT_MUTEX_CREATE_FAILED, 0); 2047 #endif /* (TRC_CFG_ESP_IDF_VERSION < TRC_ESP_IDF_VERSION_4_3_0) */ 2048 2049 /* Called when a message is sent to a queue */ /* CS IS NEW ! */ 2050 #undef traceQUEUE_SEND 2051 #define traceQUEUE_SEND( pxQueue ) \ 2052 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2053 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \ 2054 switch (pxQueue->ucQueueType) \ 2055 { \ 2056 case queueQUEUE_TYPE_BASE: \ 2057 prvTraceStoreEvent2(xCopyPosition == queueSEND_TO_BACK ? PSF_EVENT_QUEUE_SEND : PSF_EVENT_QUEUE_SEND_FRONT, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting + 1); \ 2058 break; \ 2059 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \ 2060 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \ 2061 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_GIVE, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting + 1); \ 2062 break; \ 2063 case queueQUEUE_TYPE_MUTEX: \ 2064 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \ 2065 prvTraceStoreEvent1(PSF_EVENT_MUTEX_GIVE, (uint32_t)pxQueue); \ 2066 break; \ 2067 } 2068 2069 #undef traceQUEUE_SET_SEND 2070 #define traceQUEUE_SET_SEND( pxQueue ) \ 2071 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2072 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \ 2073 prvTraceStoreEvent2(PSF_EVENT_QUEUE_SEND, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting + 1); 2074 2075 /* Called when a message failed to be sent to a queue (timeout) */ 2076 #undef traceQUEUE_SEND_FAILED 2077 #define traceQUEUE_SEND_FAILED( pxQueue ) \ 2078 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2079 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \ 2080 switch (pxQueue->ucQueueType) \ 2081 { \ 2082 case queueQUEUE_TYPE_BASE: \ 2083 prvTraceStoreEvent2(xCopyPosition == queueSEND_TO_BACK ? PSF_EVENT_QUEUE_SEND_FAILED : PSF_EVENT_QUEUE_SEND_FRONT_FAILED, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \ 2084 break; \ 2085 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \ 2086 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \ 2087 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_GIVE_FAILED, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \ 2088 break; \ 2089 case queueQUEUE_TYPE_MUTEX: \ 2090 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \ 2091 prvTraceStoreEvent1(PSF_EVENT_MUTEX_GIVE_FAILED, (uint32_t)pxQueue); \ 2092 break; \ 2093 } 2094 2095 /* Called when the task is blocked due to a send operation on a full queue */ 2096 #undef traceBLOCKING_ON_QUEUE_SEND 2097 #define traceBLOCKING_ON_QUEUE_SEND( pxQueue ) \ 2098 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2099 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \ 2100 switch (pxQueue->ucQueueType) \ 2101 { \ 2102 case queueQUEUE_TYPE_BASE: \ 2103 prvTraceStoreEvent2(xCopyPosition == queueSEND_TO_BACK ? PSF_EVENT_QUEUE_SEND_BLOCK : PSF_EVENT_QUEUE_SEND_FRONT_BLOCK, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \ 2104 break; \ 2105 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \ 2106 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \ 2107 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_GIVE_BLOCK, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \ 2108 break; \ 2109 case queueQUEUE_TYPE_MUTEX: \ 2110 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \ 2111 prvTraceStoreEvent1(PSF_EVENT_MUTEX_GIVE_BLOCK, (uint32_t)pxQueue); \ 2112 break; \ 2113 } 2114 2115 /* Called when a message is sent from interrupt context, e.g., using xQueueSendFromISR */ 2116 #undef traceQUEUE_SEND_FROM_ISR 2117 #define traceQUEUE_SEND_FROM_ISR( pxQueue ) \ 2118 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \ 2119 switch (pxQueue->ucQueueType) \ 2120 { \ 2121 case queueQUEUE_TYPE_BASE: \ 2122 prvTraceStoreEvent2(xCopyPosition == queueSEND_TO_BACK ? PSF_EVENT_QUEUE_SEND_FROMISR : PSF_EVENT_QUEUE_SEND_FRONT_FROMISR, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting + 1); \ 2123 break; \ 2124 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \ 2125 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \ 2126 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_GIVE_FROMISR, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting + 1); \ 2127 break; \ 2128 } 2129 #undef traceQUEUE_GIVE_FROM_ISR 2130 #define traceQUEUE_GIVE_FROM_ISR( pxQueue ) traceQUEUE_SEND_FROM_ISR(pxQueue) 2131 2132 /* Called when a message send from interrupt context fails (since the queue was full) */ 2133 #undef traceQUEUE_SEND_FROM_ISR_FAILED 2134 #define traceQUEUE_SEND_FROM_ISR_FAILED( pxQueue ) \ 2135 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \ 2136 switch (pxQueue->ucQueueType) \ 2137 { \ 2138 case queueQUEUE_TYPE_BASE: \ 2139 prvTraceStoreEvent2(xCopyPosition == queueSEND_TO_BACK ? PSF_EVENT_QUEUE_SEND_FROMISR_FAILED : PSF_EVENT_QUEUE_SEND_FRONT_FROMISR_FAILED, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \ 2140 break; \ 2141 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \ 2142 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \ 2143 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_GIVE_FROMISR_FAILED, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \ 2144 break; \ 2145 } 2146 #undef traceQUEUE_GIVE_FROM_ISR_FAILED 2147 #define traceQUEUE_GIVE_FROM_ISR_FAILED( pxQueue ) traceQUEUE_SEND_FROM_ISR_FAILED(pxQueue) 2148 2149 /* Called when a message is received from a queue */ 2150 #undef traceQUEUE_RECEIVE 2151 #define traceQUEUE_RECEIVE( pxQueue ) \ 2152 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2153 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \ 2154 switch (pxQueue->ucQueueType) \ 2155 { \ 2156 case queueQUEUE_TYPE_BASE: \ 2157 if (isQueueReceiveHookActuallyPeek) \ 2158 prvTraceStoreEvent3(PSF_EVENT_QUEUE_PEEK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting - 1); \ 2159 else\ 2160 prvTraceStoreEvent3(PSF_EVENT_QUEUE_RECEIVE, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting - 1); \ 2161 break; \ 2162 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \ 2163 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \ 2164 if (isQueueReceiveHookActuallyPeek) \ 2165 prvTraceStoreEvent3(PSF_EVENT_SEMAPHORE_PEEK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting - 1); \ 2166 else \ 2167 prvTraceStoreEvent3(PSF_EVENT_SEMAPHORE_TAKE, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting - 1); \ 2168 break; \ 2169 case queueQUEUE_TYPE_MUTEX: \ 2170 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \ 2171 if (isQueueReceiveHookActuallyPeek) \ 2172 prvTraceStoreEvent2(PSF_EVENT_MUTEX_PEEK, (uint32_t)pxQueue, xTicksToWait); \ 2173 else \ 2174 prvTraceStoreEvent2(PSF_EVENT_MUTEX_TAKE, (uint32_t)pxQueue, xTicksToWait); \ 2175 break; \ 2176 } 2177 #undef traceQUEUE_SEMAPHORE_RECEIVE 2178 #define traceQUEUE_SEMAPHORE_RECEIVE( pxQueue ) traceQUEUE_RECEIVE( pxQueue ) 2179 2180 /* Called when a receive operation on a queue fails (timeout) */ 2181 #undef traceQUEUE_RECEIVE_FAILED 2182 #define traceQUEUE_RECEIVE_FAILED( pxQueue ) \ 2183 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2184 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \ 2185 switch (pxQueue->ucQueueType) \ 2186 { \ 2187 case queueQUEUE_TYPE_BASE: \ 2188 prvTraceStoreEvent3(isQueueReceiveHookActuallyPeek ? PSF_EVENT_QUEUE_PEEK_FAILED : PSF_EVENT_QUEUE_RECEIVE_FAILED, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \ 2189 break; \ 2190 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \ 2191 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \ 2192 prvTraceStoreEvent3(isQueueReceiveHookActuallyPeek ? PSF_EVENT_SEMAPHORE_PEEK_FAILED : PSF_EVENT_SEMAPHORE_TAKE_FAILED, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \ 2193 break; \ 2194 case queueQUEUE_TYPE_MUTEX: \ 2195 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \ 2196 prvTraceStoreEvent2(isQueueReceiveHookActuallyPeek ? PSF_EVENT_MUTEX_PEEK_FAILED : PSF_EVENT_MUTEX_TAKE_FAILED, (uint32_t)pxQueue, xTicksToWait); \ 2197 break; \ 2198 } 2199 2200 /* Called when the task is blocked due to a receive operation on an empty queue */ 2201 #undef traceBLOCKING_ON_QUEUE_RECEIVE 2202 #define traceBLOCKING_ON_QUEUE_RECEIVE( pxQueue ) \ 2203 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2204 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \ 2205 switch (pxQueue->ucQueueType) \ 2206 { \ 2207 case queueQUEUE_TYPE_BASE: \ 2208 prvTraceStoreEvent3(isQueueReceiveHookActuallyPeek ? PSF_EVENT_QUEUE_PEEK_BLOCK : PSF_EVENT_QUEUE_RECEIVE_BLOCK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \ 2209 break; \ 2210 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \ 2211 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \ 2212 prvTraceStoreEvent3(isQueueReceiveHookActuallyPeek ? PSF_EVENT_SEMAPHORE_PEEK_BLOCK : PSF_EVENT_SEMAPHORE_TAKE_BLOCK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \ 2213 break; \ 2214 case queueQUEUE_TYPE_MUTEX: \ 2215 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \ 2216 prvTraceStoreEvent2(isQueueReceiveHookActuallyPeek ? PSF_EVENT_MUTEX_PEEK_BLOCK : PSF_EVENT_MUTEX_TAKE_BLOCK, (uint32_t)pxQueue, xTicksToWait); \ 2217 break; \ 2218 } 2219 2220 #if (TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_3_0) 2221 /* Called when a peek operation on a queue fails (timeout) */ 2222 #undef traceQUEUE_PEEK_FAILED 2223 #define traceQUEUE_PEEK_FAILED( pxQueue ) \ 2224 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2225 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \ 2226 switch (pxQueue->ucQueueType) \ 2227 { \ 2228 case queueQUEUE_TYPE_BASE: \ 2229 prvTraceStoreEvent3(PSF_EVENT_QUEUE_PEEK_FAILED, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \ 2230 break; \ 2231 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \ 2232 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \ 2233 prvTraceStoreEvent3(PSF_EVENT_SEMAPHORE_PEEK_FAILED, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \ 2234 break; \ 2235 case queueQUEUE_TYPE_MUTEX: \ 2236 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \ 2237 prvTraceStoreEvent2(PSF_EVENT_MUTEX_PEEK_FAILED, (uint32_t)pxQueue, xTicksToWait); \ 2238 break; \ 2239 } 2240 2241 /* Called when the task is blocked due to a peek operation on an empty queue */ 2242 #undef traceBLOCKING_ON_QUEUE_PEEK 2243 #define traceBLOCKING_ON_QUEUE_PEEK( pxQueue ) \ 2244 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2245 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \ 2246 switch (pxQueue->ucQueueType) \ 2247 { \ 2248 case queueQUEUE_TYPE_BASE: \ 2249 prvTraceStoreEvent3(PSF_EVENT_QUEUE_PEEK_BLOCK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \ 2250 break; \ 2251 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \ 2252 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \ 2253 prvTraceStoreEvent3(PSF_EVENT_SEMAPHORE_PEEK_BLOCK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \ 2254 break; \ 2255 case queueQUEUE_TYPE_MUTEX: \ 2256 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \ 2257 prvTraceStoreEvent2(PSF_EVENT_MUTEX_PEEK_BLOCK, (uint32_t)pxQueue, xTicksToWait); \ 2258 break; \ 2259 } 2260 2261 #endif /* (TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_3_0) */ 2262 2263 /* Called when a message is received in interrupt context, e.g., using xQueueReceiveFromISR */ 2264 #undef traceQUEUE_RECEIVE_FROM_ISR 2265 #define traceQUEUE_RECEIVE_FROM_ISR( pxQueue ) \ 2266 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \ 2267 switch (pxQueue->ucQueueType) \ 2268 { \ 2269 case queueQUEUE_TYPE_BASE: \ 2270 prvTraceStoreEvent2(PSF_EVENT_QUEUE_RECEIVE_FROMISR, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting - 1); \ 2271 break; \ 2272 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \ 2273 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \ 2274 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_TAKE_FROMISR, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting - 1); \ 2275 break; \ 2276 } 2277 2278 /* Called when a message receive from interrupt context fails (since the queue was empty) */ 2279 #undef traceQUEUE_RECEIVE_FROM_ISR_FAILED 2280 #define traceQUEUE_RECEIVE_FROM_ISR_FAILED( pxQueue ) \ 2281 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \ 2282 switch (pxQueue->ucQueueType) \ 2283 { \ 2284 case queueQUEUE_TYPE_BASE: \ 2285 prvTraceStoreEvent2(PSF_EVENT_QUEUE_RECEIVE_FROMISR_FAILED, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \ 2286 break; \ 2287 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \ 2288 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \ 2289 prvTraceStoreEvent2(PSF_EVENT_SEMAPHORE_TAKE_FROMISR_FAILED, (uint32_t)pxQueue, pxQueue->uxMessagesWaiting); \ 2290 break; \ 2291 } 2292 2293 /* Called on xQueuePeek */ 2294 #undef traceQUEUE_PEEK 2295 #define traceQUEUE_PEEK( pxQueue ) \ 2296 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2297 if (TRACE_GET_OBJECT_FILTER(QUEUE, pxQueue) & CurrentFilterMask) \ 2298 switch (pxQueue->ucQueueType) \ 2299 { \ 2300 case queueQUEUE_TYPE_BASE: \ 2301 prvTraceStoreEvent3(PSF_EVENT_QUEUE_PEEK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \ 2302 break; \ 2303 case queueQUEUE_TYPE_BINARY_SEMAPHORE: \ 2304 case queueQUEUE_TYPE_COUNTING_SEMAPHORE: \ 2305 prvTraceStoreEvent3(PSF_EVENT_SEMAPHORE_PEEK, (uint32_t)pxQueue, xTicksToWait, pxQueue->uxMessagesWaiting); \ 2306 break; \ 2307 case queueQUEUE_TYPE_MUTEX: \ 2308 case queueQUEUE_TYPE_RECURSIVE_MUTEX: \ 2309 prvTraceStoreEvent2(PSF_EVENT_MUTEX_PEEK, (uint32_t)pxQueue, xTicksToWait); \ 2310 break; \ 2311 } 2312 2313 /* Called in vTaskPrioritySet */ 2314 #undef traceTASK_PRIORITY_SET 2315 #define traceTASK_PRIORITY_SET( pxTask, uxNewPriority ) \ 2316 prvTraceSaveObjectData(pxTask, uxNewPriority); \ 2317 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2318 if (TRACE_GET_OBJECT_FILTER(TASK, pxTask) & CurrentFilterMask) \ 2319 prvTraceStoreEvent2(PSF_EVENT_TASK_PRIORITY, (uint32_t)pxTask, uxNewPriority); 2320 2321 /* Called in vTaskPriorityInherit, which is called by Mutex operations */ 2322 #undef traceTASK_PRIORITY_INHERIT 2323 #define traceTASK_PRIORITY_INHERIT( pxTask, uxNewPriority ) \ 2324 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2325 if (TRACE_GET_OBJECT_FILTER(TASK, pxTask) & CurrentFilterMask) \ 2326 prvTraceStoreEvent2(PSF_EVENT_TASK_PRIO_INHERIT, (uint32_t)pxTask, uxNewPriority); 2327 2328 /* Called in vTaskPriorityDisinherit, which is called by Mutex operations */ 2329 #undef traceTASK_PRIORITY_DISINHERIT 2330 #define traceTASK_PRIORITY_DISINHERIT( pxTask, uxNewPriority ) \ 2331 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2332 if (TRACE_GET_OBJECT_FILTER(TASK, pxTask) & CurrentFilterMask) \ 2333 prvTraceStoreEvent2(PSF_EVENT_TASK_PRIO_DISINHERIT, (uint32_t)pxTask, uxNewPriority); 2334 2335 /* Called in vTaskResume */ 2336 #undef traceTASK_RESUME 2337 #define traceTASK_RESUME( pxTaskToResume ) \ 2338 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2339 if (TRACE_GET_OBJECT_FILTER(TASK, pxTaskToResume) & CurrentFilterMask) \ 2340 prvTraceStoreEvent1(PSF_EVENT_TASK_RESUME, (uint32_t)pxTaskToResume); 2341 2342 /* Called in vTaskResumeFromISR */ 2343 #undef traceTASK_RESUME_FROM_ISR 2344 #define traceTASK_RESUME_FROM_ISR( pxTaskToResume ) \ 2345 if (TRACE_GET_OBJECT_FILTER(TASK, pxTaskToResume) & CurrentFilterMask) \ 2346 prvTraceStoreEvent1(PSF_EVENT_TASK_RESUME_FROMISR, (uint32_t)pxTaskToResume); 2347 2348 #if (TRC_CFG_INCLUDE_MEMMANG_EVENTS == 1) 2349 2350 extern uint32_t trcHeapCounter; 2351 2352 #undef traceMALLOC 2353 #define traceMALLOC( pvAddress, uiSize ) \ 2354 if (pvAddress != 0) \ 2355 { \ 2356 trcHeapCounter += uiSize; \ 2357 } \ 2358 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2359 { \ 2360 if (pvAddress != 0) \ 2361 { \ 2362 prvTraceStoreEvent2(PSF_EVENT_MALLOC, (uint32_t)pvAddress, uiSize); \ 2363 } \ 2364 else \ 2365 { \ 2366 prvTraceStoreEvent2(PSF_EVENT_MALLOC_FAILED, (uint32_t)pvAddress, uiSize); \ 2367 } \ 2368 } 2369 2370 #undef traceFREE 2371 #define traceFREE( pvAddress, uiSize ) \ 2372 trcHeapCounter -= uiSize; \ 2373 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2374 prvTraceStoreEvent2(PSF_EVENT_FREE, (uint32_t)pvAddress, (uint32_t)(0 - uiSize)); /* "0 -" instead of just "-" to get rid of a warning... */ 2375 2376 #endif /* (TRC_CFG_INCLUDE_MEMMANG_EVENTS == 1) */ 2377 2378 #if (TRC_CFG_INCLUDE_TIMER_EVENTS == 1) 2379 2380 /* Called in timer.c - xTimerCreate */ 2381 #undef traceTIMER_CREATE 2382 #define traceTIMER_CREATE(tmr) \ 2383 TRACE_SET_OBJECT_FILTER(TIMER, tmr, CurrentFilterGroup); \ 2384 prvTraceSaveObjectSymbol(tmr, (const char*)tmr->pcTimerName); \ 2385 prvTraceStoreStringEvent(1, PSF_EVENT_OBJ_NAME, (const char*)tmr->pcTimerName, tmr); \ 2386 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2387 if (TRACE_GET_OBJECT_FILTER(TIMER, tmr) & CurrentFilterMask) \ 2388 prvTraceStoreEvent2(PSF_EVENT_TIMER_CREATE, (uint32_t)tmr, tmr->xTimerPeriodInTicks); 2389 2390 #undef traceTIMER_CREATE_FAILED 2391 #define traceTIMER_CREATE_FAILED() \ 2392 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2393 prvTraceStoreEvent0(PSF_EVENT_TIMER_CREATE_FAILED); 2394 2395 #if (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_0_0) 2396 #define traceTIMER_COMMAND_SEND_8_0_CASES(tmr) \ 2397 case tmrCOMMAND_RESET: \ 2398 prvTraceStoreEvent2((xReturn == pdPASS) ? PSF_EVENT_TIMER_RESET : PSF_EVENT_TIMER_RESET_FAILED, (uint32_t)tmr, xOptionalValue); \ 2399 break; \ 2400 case tmrCOMMAND_START_FROM_ISR: \ 2401 prvTraceStoreEvent2((xReturn == pdPASS) ? PSF_EVENT_TIMER_START_FROMISR : PSF_EVENT_TIMER_START_FROMISR_FAILED, (uint32_t)tmr, xOptionalValue); \ 2402 break; \ 2403 case tmrCOMMAND_RESET_FROM_ISR: \ 2404 prvTraceStoreEvent2((xReturn == pdPASS) ? PSF_EVENT_TIMER_RESET_FROMISR : PSF_EVENT_TIMER_RESET_FROMISR_FAILED, (uint32_t)tmr, xOptionalValue); \ 2405 break; \ 2406 case tmrCOMMAND_STOP_FROM_ISR: \ 2407 prvTraceStoreEvent2((xReturn == pdPASS) ? PSF_EVENT_TIMER_STOP_FROMISR : PSF_EVENT_TIMER_STOP_FROMISR_FAILED, (uint32_t)tmr, xOptionalValue); \ 2408 break; \ 2409 case tmrCOMMAND_CHANGE_PERIOD_FROM_ISR: \ 2410 prvTraceStoreEvent2((xReturn == pdPASS) ? PSF_EVENT_TIMER_CHANGEPERIOD_FROMISR : PSF_EVENT_TIMER_CHANGEPERIOD_FROMISR_FAILED, (uint32_t)tmr, xOptionalValue); \ 2411 break; 2412 #else /* TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_0_0 */ 2413 #define traceTIMER_COMMAND_SEND_8_0_CASES(tmr) 2414 #endif /* TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_0_0 */ 2415 2416 /* Note that xCommandID can never be tmrCOMMAND_EXECUTE_CALLBACK (-1) since the trace macro is not called in that case */ 2417 #undef traceTIMER_COMMAND_SEND 2418 #define traceTIMER_COMMAND_SEND(tmr, xCommandID, xOptionalValue, xReturn) \ 2419 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2420 if (TRACE_GET_OBJECT_FILTER(TIMER, tmr) & CurrentFilterMask) \ 2421 switch(xCommandID) \ 2422 { \ 2423 case tmrCOMMAND_START: \ 2424 prvTraceStoreEvent1((xReturn == pdPASS) ? PSF_EVENT_TIMER_START : PSF_EVENT_TIMER_START_FAILED, (uint32_t)tmr); \ 2425 break; \ 2426 case tmrCOMMAND_STOP: \ 2427 prvTraceStoreEvent1((xReturn == pdPASS) ? PSF_EVENT_TIMER_STOP : PSF_EVENT_TIMER_STOP_FAILED, (uint32_t)tmr); \ 2428 break; \ 2429 case tmrCOMMAND_CHANGE_PERIOD: \ 2430 prvTraceStoreEvent2((xReturn == pdPASS) ? PSF_EVENT_TIMER_CHANGEPERIOD : PSF_EVENT_TIMER_CHANGEPERIOD_FAILED, (uint32_t)tmr, xOptionalValue); \ 2431 break; \ 2432 case tmrCOMMAND_DELETE: \ 2433 prvTraceStoreEvent1((xReturn == pdPASS) ? PSF_EVENT_TIMER_DELETE : PSF_EVENT_TIMER_DELETE_FAILED, (uint32_t)tmr); \ 2434 break; \ 2435 traceTIMER_COMMAND_SEND_8_0_CASES(tmr) \ 2436 } 2437 2438 #undef traceTIMER_EXPIRED 2439 #define traceTIMER_EXPIRED(tmr) \ 2440 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2441 if (TRACE_GET_OBJECT_FILTER(TIMER, tmr) & CurrentFilterMask) \ 2442 prvTraceStoreEvent2(PSF_EVENT_TIMER_EXPIRED, (uint32_t)tmr->pxCallbackFunction, (uint32_t)tmr->pvTimerID); 2443 2444 #endif /* #if (TRC_CFG_INCLUDE_TIMER_EVENTS == 1) */ 2445 2446 2447 #if (TRC_CFG_INCLUDE_PEND_FUNC_CALL_EVENTS == 1) 2448 2449 #undef tracePEND_FUNC_CALL 2450 #define tracePEND_FUNC_CALL(func, arg1, arg2, ret) \ 2451 prvTraceStoreEvent1((ret == pdPASS) ? PSF_EVENT_TIMER_PENDFUNCCALL : PSF_EVENT_TIMER_PENDFUNCCALL_FAILED, (uint32_t)func); 2452 2453 #undef tracePEND_FUNC_CALL_FROM_ISR 2454 #define tracePEND_FUNC_CALL_FROM_ISR(func, arg1, arg2, ret) \ 2455 prvTraceStoreEvent1((ret == pdPASS) ? PSF_EVENT_TIMER_PENDFUNCCALL_FROMISR : PSF_EVENT_TIMER_PENDFUNCCALL_FROMISR_FAILED, (uint32_t)func); 2456 2457 #endif /* (TRC_CFG_INCLUDE_PEND_FUNC_CALL_EVENTS == 1) */ 2458 2459 #if (TRC_CFG_INCLUDE_EVENT_GROUP_EVENTS == 1) 2460 2461 #undef traceEVENT_GROUP_CREATE 2462 #define traceEVENT_GROUP_CREATE(eg) \ 2463 TRACE_SET_OBJECT_FILTER(EVENTGROUP, eg, CurrentFilterGroup); \ 2464 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2465 if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \ 2466 prvTraceStoreEvent1(PSF_EVENT_EVENTGROUP_CREATE, (uint32_t)eg); 2467 2468 #undef traceEVENT_GROUP_DELETE 2469 #define traceEVENT_GROUP_DELETE(eg) \ 2470 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2471 if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \ 2472 prvTraceStoreEvent1(PSF_EVENT_EVENTGROUP_DELETE, (uint32_t)eg); \ 2473 prvTraceDeleteSymbol(eg); 2474 2475 #undef traceEVENT_GROUP_CREATE_FAILED 2476 #define traceEVENT_GROUP_CREATE_FAILED() \ 2477 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2478 prvTraceStoreEvent0(PSF_EVENT_EVENTGROUP_CREATE_FAILED); 2479 2480 #undef traceEVENT_GROUP_SYNC_BLOCK 2481 #define traceEVENT_GROUP_SYNC_BLOCK(eg, bitsToSet, bitsToWaitFor) \ 2482 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2483 if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \ 2484 prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_SYNC_BLOCK, (uint32_t)eg, bitsToWaitFor); 2485 2486 #undef traceEVENT_GROUP_SYNC_END 2487 #define traceEVENT_GROUP_SYNC_END(eg, bitsToSet, bitsToWaitFor, wasTimeout) \ 2488 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2489 if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \ 2490 prvTraceStoreEvent2((wasTimeout != pdTRUE) ? PSF_EVENT_EVENTGROUP_SYNC : PSF_EVENT_EVENTGROUP_SYNC_FAILED, (uint32_t)eg, bitsToWaitFor); 2491 2492 #undef traceEVENT_GROUP_WAIT_BITS_BLOCK 2493 #define traceEVENT_GROUP_WAIT_BITS_BLOCK(eg, bitsToWaitFor) \ 2494 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2495 if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \ 2496 prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_WAITBITS_BLOCK, (uint32_t)eg, bitsToWaitFor); 2497 2498 #undef traceEVENT_GROUP_WAIT_BITS_END 2499 #define traceEVENT_GROUP_WAIT_BITS_END(eg, bitsToWaitFor, wasTimeout) \ 2500 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2501 if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \ 2502 prvTraceStoreEvent2((wasTimeout != pdTRUE) ? PSF_EVENT_EVENTGROUP_WAITBITS : PSF_EVENT_EVENTGROUP_WAITBITS_FAILED, (uint32_t)eg, bitsToWaitFor); 2503 2504 #undef traceEVENT_GROUP_CLEAR_BITS 2505 #define traceEVENT_GROUP_CLEAR_BITS(eg, bitsToClear) \ 2506 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2507 if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \ 2508 prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_CLEARBITS, (uint32_t)eg, bitsToClear); 2509 2510 #undef traceEVENT_GROUP_CLEAR_BITS_FROM_ISR 2511 #define traceEVENT_GROUP_CLEAR_BITS_FROM_ISR(eg, bitsToClear) \ 2512 if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \ 2513 prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_CLEARBITS_FROMISR, (uint32_t)eg, bitsToClear); 2514 2515 #undef traceEVENT_GROUP_SET_BITS 2516 #define traceEVENT_GROUP_SET_BITS(eg, bitsToSet) \ 2517 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2518 if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \ 2519 prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_SETBITS, (uint32_t)eg, bitsToSet); 2520 2521 #undef traceEVENT_GROUP_SET_BITS_FROM_ISR 2522 #define traceEVENT_GROUP_SET_BITS_FROM_ISR(eg, bitsToSet) \ 2523 if (TRACE_GET_OBJECT_FILTER(EVENTGROUP, eg) & CurrentFilterMask) \ 2524 prvTraceStoreEvent2(PSF_EVENT_EVENTGROUP_SETBITS_FROMISR, (uint32_t)eg, bitsToSet); \ 2525 2526 #endif /* (TRC_CFG_INCLUDE_EVENT_GROUP_EVENTS == 1) */ 2527 2528 #undef traceTASK_NOTIFY_TAKE 2529 #if (TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_3_0) 2530 #define traceTASK_NOTIFY_TAKE(index) \ 2531 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask){ \ 2532 if (pxCurrentTCB[xPortGetCoreID()]->ucNotifyState[index] == taskNOTIFICATION_RECEIVED) \ 2533 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_TAKE, (uint32_t)pxCurrentTCB[xPortGetCoreID()], xTicksToWait); \ 2534 else \ 2535 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_TAKE_FAILED, (uint32_t)pxCurrentTCB[xPortGetCoreID()], xTicksToWait);} 2536 #elif (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) 2537 #define traceTASK_NOTIFY_TAKE() \ 2538 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask){ \ 2539 if (pxCurrentTCB[xPortGetCoreID()]->ucNotifyState == taskNOTIFICATION_RECEIVED) \ 2540 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_TAKE, (uint32_t)pxCurrentTCB[xPortGetCoreID()], xTicksToWait); \ 2541 else \ 2542 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_TAKE_FAILED, (uint32_t)pxCurrentTCB[xPortGetCoreID()], xTicksToWait);} 2543 #else /* TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_3_0 */ 2544 #define traceTASK_NOTIFY_TAKE() \ 2545 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask){ \ 2546 if (pxCurrentTCB[xPortGetCoreID()]->eNotifyState == eNotified) \ 2547 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_TAKE, (uint32_t)pxCurrentTCB[xPortGetCoreID()], xTicksToWait); \ 2548 else \ 2549 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_TAKE_FAILED, (uint32_t)pxCurrentTCB[xPortGetCoreID()], xTicksToWait);} 2550 #endif /* TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_3_0 */ 2551 2552 #undef traceTASK_NOTIFY_TAKE_BLOCK 2553 #if (TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_3_0) 2554 #define traceTASK_NOTIFY_TAKE_BLOCK(index) \ 2555 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2556 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_TAKE_BLOCK, (uint32_t)pxCurrentTCB[xPortGetCoreID()], xTicksToWait); 2557 #else /* TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_3_0 */ 2558 #define traceTASK_NOTIFY_TAKE_BLOCK() \ 2559 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2560 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_TAKE_BLOCK, (uint32_t)pxCurrentTCB[xPortGetCoreID()], xTicksToWait); 2561 #endif /* TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0 */ 2562 2563 #undef traceTASK_NOTIFY_WAIT 2564 #if (TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_3_0) 2565 #define traceTASK_NOTIFY_WAIT(index) \ 2566 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask){ \ 2567 if (pxCurrentTCB[xPortGetCoreID()]->ucNotifyState[index] == taskNOTIFICATION_RECEIVED) \ 2568 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_WAIT, (uint32_t)pxCurrentTCB[xPortGetCoreID()], xTicksToWait); \ 2569 else \ 2570 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_WAIT_FAILED, (uint32_t)pxCurrentTCB[xPortGetCoreID()], xTicksToWait);} 2571 #elif (TRC_CFG_ESP_IDF_VERSION >= TRC_ESP_IDF_VERSION_4_3_0) 2572 #define traceTASK_NOTIFY_WAIT() \ 2573 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask){ \ 2574 if (pxCurrentTCB[xPortGetCoreID()]->ucNotifyState == taskNOTIFICATION_RECEIVED) \ 2575 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_WAIT, (uint32_t)pxCurrentTCB[xPortGetCoreID()], xTicksToWait); \ 2576 else \ 2577 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_WAIT_FAILED, (uint32_t)pxCurrentTCB[xPortGetCoreID()], xTicksToWait);} 2578 #else /* TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_3_0 */ 2579 #define traceTASK_NOTIFY_WAIT() \ 2580 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask){ \ 2581 if (pxCurrentTCB[xPortGetCoreID()]->eNotifyState == eNotified) \ 2582 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_WAIT, (uint32_t)pxCurrentTCB[xPortGetCoreID()], xTicksToWait); \ 2583 else \ 2584 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_WAIT_FAILED, (uint32_t)pxCurrentTCB[xPortGetCoreID()], xTicksToWait);} 2585 #endif /* TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_3_0 */ 2586 2587 #undef traceTASK_NOTIFY_WAIT_BLOCK 2588 #if (TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_3_0) 2589 #define traceTASK_NOTIFY_WAIT_BLOCK(index) \ 2590 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2591 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_WAIT_BLOCK, (uint32_t)pxCurrentTCB[xPortGetCoreID()], xTicksToWait); 2592 #else /* TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_3_0 */ 2593 #define traceTASK_NOTIFY_WAIT_BLOCK() \ 2594 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2595 prvTraceStoreEvent2(PSF_EVENT_TASK_NOTIFY_WAIT_BLOCK, (uint32_t)pxCurrentTCB[xPortGetCoreID()], xTicksToWait); 2596 #endif /* TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_3_0 */ 2597 2598 #undef traceTASK_NOTIFY 2599 #if (TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_3_0) 2600 #define traceTASK_NOTIFY(index) \ 2601 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2602 if (TRACE_GET_OBJECT_FILTER(TASK, xTaskToNotify) & CurrentFilterMask) \ 2603 prvTraceStoreEvent1(PSF_EVENT_TASK_NOTIFY, (uint32_t)xTaskToNotify); 2604 #else /* TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_3_0 */ 2605 #define traceTASK_NOTIFY() \ 2606 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2607 if (TRACE_GET_OBJECT_FILTER(TASK, xTaskToNotify) & CurrentFilterMask) \ 2608 prvTraceStoreEvent1(PSF_EVENT_TASK_NOTIFY, (uint32_t)xTaskToNotify); 2609 #endif /* TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_3_0 */ 2610 2611 #undef traceTASK_NOTIFY_FROM_ISR 2612 #if (TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_3_0) 2613 #define traceTASK_NOTIFY_FROM_ISR(index) \ 2614 if (TRACE_GET_OBJECT_FILTER(TASK, xTaskToNotify) & CurrentFilterMask) \ 2615 prvTraceStoreEvent1(PSF_EVENT_TASK_NOTIFY_FROM_ISR, (uint32_t)xTaskToNotify); 2616 #else /* TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_3_0 */ 2617 #define traceTASK_NOTIFY_FROM_ISR() \ 2618 if (TRACE_GET_OBJECT_FILTER(TASK, xTaskToNotify) & CurrentFilterMask) \ 2619 prvTraceStoreEvent1(PSF_EVENT_TASK_NOTIFY_FROM_ISR, (uint32_t)xTaskToNotify); 2620 #endif /* TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_3_0 */ 2621 2622 #undef traceTASK_NOTIFY_GIVE_FROM_ISR 2623 #if (TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_3_0) 2624 #define traceTASK_NOTIFY_GIVE_FROM_ISR(index) \ 2625 if (TRACE_GET_OBJECT_FILTER(TASK, xTaskToNotify) & CurrentFilterMask) \ 2626 prvTraceStoreEvent1(PSF_EVENT_TASK_NOTIFY_GIVE_FROM_ISR, (uint32_t)xTaskToNotify); 2627 #else /* TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_3_0 */ 2628 #define traceTASK_NOTIFY_GIVE_FROM_ISR() \ 2629 if (TRACE_GET_OBJECT_FILTER(TASK, xTaskToNotify) & CurrentFilterMask) \ 2630 prvTraceStoreEvent1(PSF_EVENT_TASK_NOTIFY_GIVE_FROM_ISR, (uint32_t)xTaskToNotify); 2631 #endif /* TRC_CFG_ESP_IDF_VERSION > TRC_ESP_IDF_VERSION_4_3_0 */ 2632 2633 #undef traceQUEUE_REGISTRY_ADD 2634 #define traceQUEUE_REGISTRY_ADD(object, name) \ 2635 prvTraceSaveObjectSymbol(object, (const char*)name); \ 2636 prvTraceStoreStringEvent(1, PSF_EVENT_OBJ_NAME, name, object); 2637 2638 #if (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1) 2639 2640 #undef traceSTREAM_BUFFER_CREATE 2641 #define traceSTREAM_BUFFER_CREATE( pxStreamBuffer, xIsMessageBuffer ) \ 2642 TRACE_SET_OBJECT_FILTER(STREAMBUFFER, pxStreamBuffer, CurrentFilterGroup); \ 2643 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2644 if (TRACE_GET_OBJECT_FILTER(STREAMBUFFER, pxStreamBuffer) & CurrentFilterMask) \ 2645 prvTraceStoreEvent2(xIsMessageBuffer == 1 ? PSF_EVENT_MESSAGEBUFFER_CREATE : PSF_EVENT_STREAMBUFFER_CREATE, (uint32_t)pxStreamBuffer, xBufferSizeBytes); 2646 2647 #undef traceSTREAM_BUFFER_CREATE_FAILED 2648 #define traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer ) \ 2649 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2650 prvTraceStoreEvent2(xIsMessageBuffer == 1 ? PSF_EVENT_MESSAGEBUFFER_CREATE_FAILED : PSF_EVENT_STREAMBUFFER_CREATE_FAILED, 0 , xBufferSizeBytes); 2651 2652 #undef traceSTREAM_BUFFER_CREATE_STATIC_FAILED 2653 #define traceSTREAM_BUFFER_CREATE_STATIC_FAILED( xReturn, xIsMessageBuffer ) \ 2654 traceSTREAM_BUFFER_CREATE_FAILED( xIsMessageBuffer ) 2655 2656 #undef traceSTREAM_BUFFER_DELETE 2657 #define traceSTREAM_BUFFER_DELETE( xStreamBuffer ) \ 2658 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2659 if (TRACE_GET_OBJECT_FILTER(STREAMBUFFER, pxStreamBuffer) & CurrentFilterMask) \ 2660 prvTraceStoreEvent2(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_DELETE : PSF_EVENT_STREAMBUFFER_DELETE, (uint32_t)xStreamBuffer, prvBytesInBuffer(xStreamBuffer)); \ 2661 prvTraceDeleteSymbol(xStreamBuffer); 2662 2663 #undef traceSTREAM_BUFFER_RESET 2664 #define traceSTREAM_BUFFER_RESET( xStreamBuffer ) \ 2665 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2666 if (TRACE_GET_OBJECT_FILTER(STREAMBUFFER, xStreamBuffer) & CurrentFilterMask) \ 2667 prvTraceStoreEvent2(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_RESET : PSF_EVENT_STREAMBUFFER_RESET, (uint32_t)xStreamBuffer, 0); 2668 2669 #undef traceSTREAM_BUFFER_SEND 2670 #define traceSTREAM_BUFFER_SEND( xStreamBuffer, xReturn ) \ 2671 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2672 if (TRACE_GET_OBJECT_FILTER(STREAMBUFFER, xStreamBuffer) & CurrentFilterMask) \ 2673 prvTraceStoreEvent2(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_SEND : PSF_EVENT_STREAMBUFFER_SEND, (uint32_t)xStreamBuffer, prvBytesInBuffer(xStreamBuffer)); 2674 2675 #undef traceBLOCKING_ON_STREAM_BUFFER_SEND 2676 #define traceBLOCKING_ON_STREAM_BUFFER_SEND( xStreamBuffer ) \ 2677 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2678 if (TRACE_GET_OBJECT_FILTER(STREAMBUFFER, xStreamBuffer) & CurrentFilterMask) \ 2679 prvTraceStoreEvent1(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_SEND_BLOCK : PSF_EVENT_STREAMBUFFER_SEND_BLOCK, (uint32_t)xStreamBuffer); 2680 2681 #undef traceSTREAM_BUFFER_SEND_FAILED 2682 #define traceSTREAM_BUFFER_SEND_FAILED( xStreamBuffer ) \ 2683 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2684 if (TRACE_GET_OBJECT_FILTER(STREAMBUFFER, xStreamBuffer) & CurrentFilterMask) \ 2685 prvTraceStoreEvent1(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_SEND_FAILED : PSF_EVENT_STREAMBUFFER_SEND_FAILED, (uint32_t)xStreamBuffer); 2686 2687 #undef traceSTREAM_BUFFER_RECEIVE 2688 #define traceSTREAM_BUFFER_RECEIVE( xStreamBuffer, xReceivedLength ) \ 2689 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2690 if (TRACE_GET_OBJECT_FILTER(STREAMBUFFER, xStreamBuffer) & CurrentFilterMask) \ 2691 prvTraceStoreEvent2(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_RECEIVE: PSF_EVENT_STREAMBUFFER_RECEIVE, (uint32_t)xStreamBuffer, prvBytesInBuffer(xStreamBuffer)); 2692 2693 #undef traceBLOCKING_ON_STREAM_BUFFER_RECEIVE 2694 #define traceBLOCKING_ON_STREAM_BUFFER_RECEIVE( xStreamBuffer ) \ 2695 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2696 if (TRACE_GET_OBJECT_FILTER(STREAMBUFFER, xStreamBuffer) & CurrentFilterMask) \ 2697 prvTraceStoreEvent1(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_RECEIVE_BLOCK: PSF_EVENT_STREAMBUFFER_RECEIVE_BLOCK, (uint32_t)xStreamBuffer); 2698 2699 #undef traceSTREAM_BUFFER_RECEIVE_FAILED 2700 #define traceSTREAM_BUFFER_RECEIVE_FAILED( xStreamBuffer ) \ 2701 if (TRACE_GET_OBJECT_FILTER(TASK, TRACE_GET_CURRENT_TASK()) & CurrentFilterMask) \ 2702 if (TRACE_GET_OBJECT_FILTER(STREAMBUFFER, xStreamBuffer) & CurrentFilterMask) \ 2703 prvTraceStoreEvent1(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_RECEIVE_FAILED: PSF_EVENT_STREAMBUFFER_RECEIVE_FAILED, (uint32_t)xStreamBuffer); 2704 2705 #undef traceSTREAM_BUFFER_SEND_FROM_ISR 2706 #define traceSTREAM_BUFFER_SEND_FROM_ISR( xStreamBuffer, xReturn ) \ 2707 if (TRACE_GET_OBJECT_FILTER(STREAMBUFFER, xStreamBuffer) & CurrentFilterMask) \ 2708 { \ 2709 if ( xReturn > ( size_t ) 0 ) \ 2710 { \ 2711 prvTraceStoreEvent2(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_SEND_FROM_ISR : PSF_EVENT_STREAMBUFFER_SEND_FROM_ISR, (uint32_t)xStreamBuffer, prvBytesInBuffer(xStreamBuffer)); \ 2712 } \ 2713 else \ 2714 { \ 2715 prvTraceStoreEvent1(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_SEND_FROM_ISR_FAILED : PSF_EVENT_STREAMBUFFER_SEND_FROM_ISR_FAILED, (uint32_t)xStreamBuffer); \ 2716 } \ 2717 } 2718 2719 #undef traceSTREAM_BUFFER_RECEIVE_FROM_ISR 2720 #define traceSTREAM_BUFFER_RECEIVE_FROM_ISR( xStreamBuffer, xReceivedLength ) \ 2721 if (TRACE_GET_OBJECT_FILTER(STREAMBUFFER, xStreamBuffer) & CurrentFilterMask) \ 2722 { \ 2723 if ( xReceivedLength > ( size_t ) 0 ) \ 2724 { \ 2725 prvTraceStoreEvent2(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_RECEIVE_FROM_ISR : PSF_EVENT_STREAMBUFFER_RECEIVE_FROM_ISR, (uint32_t)xStreamBuffer, prvBytesInBuffer(xStreamBuffer)); \ 2726 } \ 2727 else \ 2728 { \ 2729 prvTraceStoreEvent1(prvGetStreamBufferType(xStreamBuffer) > 0 ? PSF_EVENT_MESSAGEBUFFER_RECEIVE_FROM_ISR_FAILED : PSF_EVENT_STREAMBUFFER_RECEIVE_FROM_ISR_FAILED, (uint32_t)xStreamBuffer); \ 2730 } \ 2731 } 2732 2733 #endif /* (TRC_CFG_INCLUDE_STREAM_BUFFER_EVENTS == 1) */ 2734 2735 #endif /* (TRC_CFG_SCHEDULING_ONLY == 0) */ 2736 2737 #endif /* (TRC_CFG_RECORDER_MODE == TRC_RECORDER_MODE_STREAMING) */ 2738 2739 #else /* (TRC_USE_TRACEALYZER_RECORDER == 1) */ 2740 2741 /* When recorder is disabled */ 2742 #define vTraceSetQueueName(object, name) 2743 #define vTraceSetSemaphoreName(object, name) 2744 #define vTraceSetMutexName(object, name) 2745 #define vTraceSetEventGroupName(object, name) 2746 #define vTraceSetStreamBufferName(object, name) 2747 #define vTraceSetMessageBufferName(object, name) 2748 2749 #endif /* (TRC_USE_TRACEALYZER_RECORDER == 1) */ 2750 2751 #ifdef __cplusplus 2752 } 2753 #endif 2754 2755 #endif /* TRC_KERNEL_PORT_H */ 2756