1 /*
2  * Trace Recorder for Tracealyzer v4.6.6
3  * Copyright 2021 Percepio AB
4  * www.percepio.com
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  *
8  * The Zephyr specific parts of the trace recorder
9  */
10 
11 #include <zephyr/init.h>
12 #include <zephyr/kernel.h>
13 #include <string.h>
14 #include <trcRecorder.h>
15 
16 
17 /* Generic Zephyr system heap handle */
18 TraceHeapHandle_t xSystemHeapHandle;
19 
20 /* Generic Zephyr ISR handle used for all Zephyr ISRs that the user haven't
21  * manually added tracing for. */
22 static TraceISRHandle_t xHandleISR;
23 
24 /* Trace recorder controll thread stack */
25 static K_THREAD_STACK_DEFINE(TzCtrl_thread_stack, (TRC_CFG_CTRL_TASK_STACK_SIZE));
26 
27 
28 /**
29  * @brief TzCtrl_thread_entry
30  *
31  * Task for sending the trace data from the internal buffer to the stream
32  * interface (assuming TRC_STREAM_PORT_USE_INTERNAL_BUFFER == 1) and for
33  * receiving commands from Tracealyzer. Also does some diagnostics.
34  *
35  * @param[in] _args
36  */
TzCtrl_thread_entry(void * _args)37 void TzCtrl_thread_entry(void *_args)
38 {
39 	while (1)
40 	{
41 		xTraceTzCtrl();
42 
43 		k_msleep((TRC_CFG_CTRL_TASK_DELAY));
44 	}
45 }
46 
47 
48 /**
49  * @brief
50  */
51 typedef struct TraceKernelPortData
52 {
53 	TraceHeapHandle_t xSystemHeapHandle;
54 	TraceKernelPortTaskHandle_t xTzCtrlHandle;
55 } TraceKernelPortData_t;
56 
57 static TraceKernelPortData_t* pxKernelPortData;
58 
xTraceKernelPortInitialize(TraceKernelPortDataBuffer_t * pxBuffer)59 traceResult xTraceKernelPortInitialize(TraceKernelPortDataBuffer_t* pxBuffer)
60 {
61 	TRC_ASSERT_EQUAL_SIZE(TraceKernelPortDataBuffer_t, TraceKernelPortData_t);
62 
63 	if (pxBuffer == 0)
64 	{
65 		return TRC_FAIL;
66 	}
67 
68 	pxKernelPortData = (TraceKernelPortData_t*)pxBuffer;
69 
70 	pxKernelPortData->xSystemHeapHandle = 0;
71 
72 	return TRC_SUCCESS;
73 }
74 
xTraceKernelPortEnable(void)75 traceResult xTraceKernelPortEnable(void)
76 {
77 	return TRC_SUCCESS;
78 }
79 
xTraceKernelPortGetSystemHeapHandle(void)80 TraceHeapHandle_t xTraceKernelPortGetSystemHeapHandle(void)
81 {
82 	return 0;
83 }
84 
85 #if defined(TRC_CFG_ENABLE_STACK_MONITOR) && (TRC_CFG_ENABLE_STACK_MONITOR == 1) && (TRC_CFG_SCHEDULING_ONLY == 0)
xTraceKernelPortGetUnusedStack(void * thread,TraceUnsignedBaseType_t * puxUnusedStack)86 traceResult xTraceKernelPortGetUnusedStack(void* thread, TraceUnsignedBaseType_t* puxUnusedStack)
87 {
88 	return k_thread_stack_space_get(thread, puxUnusedStack);
89 }
90 #endif /* defined(TRC_CFG_ENABLE_STACK_MONITOR) && (TRC_CFG_ENABLE_STACK_MONITOR == 1) && (TRC_CFG_SCHEDULING_ONLY == 0) */
91 
xTraceKernelPortIsSchedulerSuspended(void)92 unsigned char xTraceKernelPortIsSchedulerSuspended(void)
93 {
94 	return 0;
95 }
96 
vTraceSetKernelObjectName(void * object,const char * name)97 void vTraceSetKernelObjectName(void* object, const char* name)
98 {
99 	xTraceObjectSetNameWithoutHandle(object, name);
100 }
101 
vTraceSetWorkQueueName(void * object,const char * name)102 void vTraceSetWorkQueueName(void* object, const char* name)
103 {
104 	xTraceObjectSetNameWithoutHandle(object, name);
105 }
106 
vTraceSetHeapName(void * object,const char * name)107 void vTraceSetHeapName(void* object, const char* name)
108 {
109 	xTraceObjectSetNameWithoutHandle(object, name);
110 }
111 
vTraceSetSemaphoreName(void * object,const char * name)112 void vTraceSetSemaphoreName(void* object, const char* name)
113 {
114 	xTraceObjectSetNameWithoutHandle(object, name);
115 }
116 
vTraceSetMutexName(void * object,const char * name)117 void vTraceSetMutexName(void* object, const char* name)
118 {
119 	xTraceObjectSetNameWithoutHandle(object, name);
120 }
121 
vTraceSetCondvarName(void * object,const char * name)122 void vTraceSetCondvarName(void* object, const char* name)
123 {
124 	xTraceObjectSetNameWithoutHandle(object, name);
125 }
126 
vTraceSetQueueName(void * object,const char * name)127 void vTraceSetQueueName(void* object, const char* name)
128 {
129 	xTraceObjectSetNameWithoutHandle(object, name);
130 }
131 
vTraceSetFIFOQueueName(void * object,const char * name)132 void vTraceSetFIFOQueueName(void* object, const char* name)
133 {
134 	xTraceObjectSetNameWithoutHandle(object, name);
135 }
136 
vTraceSetLIFOQueueName(void * object,const char * name)137 void vTraceSetLIFOQueueName(void* object, const char* name)
138 {
139 	xTraceObjectSetNameWithoutHandle(object, name);
140 }
141 
vTraceSetStackName(void * object,const char * name)142 void vTraceSetStackName(void* object, const char* name)
143 {
144 	xTraceObjectSetNameWithoutHandle(object, name);
145 }
146 
vTraceSetMessageQueueName(void * object,const char * name)147 void vTraceSetMessageQueueName(void* object, const char* name)
148 {
149 	xTraceObjectSetNameWithoutHandle(object, name);
150 }
151 
vTraceSetMailboxName(void * object,const char * name)152 void vTraceSetMailboxName(void* object, const char* name)
153 {
154 	xTraceObjectSetNameWithoutHandle(object, name);
155 }
156 
vTraceSetPipeName(void * object,const char * name)157 void vTraceSetPipeName(void* object, const char* name)
158 {
159 	xTraceObjectSetNameWithoutHandle(object, name);
160 }
161 
vTraceSetMemoryHeapName(void * object,const char * name)162 void vTraceSetMemoryHeapName(void* object, const char* name)
163 {
164 	xTraceObjectSetNameWithoutHandle(object, name);
165 }
166 
vTraceSetMemorySlabName(void * object,const char * name)167 void vTraceSetMemorySlabName(void* object, const char* name)
168 {
169 	xTraceObjectSetNameWithoutHandle(object, name);
170 }
171 
vTraceSetTimerName(void * object,const char * name)172 void vTraceSetTimerName(void* object, const char* name)
173 {
174 	xTraceObjectSetNameWithoutHandle(object, name);
175 }
176 
177 /**
178  * @brief Initialize aspects of the recorder that must preceed the
179  * kernel initialization (scheduling, threads, etc.).
180  *
181  * @param[in] arg
182  */
tracelyzer_pre_kernel_init(void)183 static int tracelyzer_pre_kernel_init(void)
184 {
185 
186 #ifdef CONFIG_PERCEPIO_TRC_START_MODE_START
187 	xTraceEnable(TRC_START);
188 #elif CONFIG_PERCEPIO_TRC_START_MODE_START_AWAIT_HOST
189 	xTraceEnable(TRC_START_AWAIT_HOST);
190 #else
191 	xTraceEnable(TRC_START_FROM_HOST);
192 #endif
193 
194 	/* Create ISR handle */
195 	xTraceISRRegister("Zephyr ISR", -32, &xHandleISR);
196 
197 	return 0;
198 }
199 
200 /**
201  * @brief Initialize aspects of the recorder that depends on the kernel
202  * being initialized.
203  *
204  * @param[in] arg
205  */
tracealyzer_post_kernel_init(void)206 static int tracealyzer_post_kernel_init(void)
207 {
208 
209 	/* Create controller task */
210 	k_thread_create(&pxKernelPortData->xTzCtrlHandle, TzCtrl_thread_stack,
211 	 	K_THREAD_STACK_SIZEOF(TzCtrl_thread_stack),
212 	 	(k_thread_entry_t)TzCtrl_thread_entry,
213 	 	NULL, NULL, NULL,
214 	 	(TRC_CFG_CTRL_TASK_PRIORITY),
215 	 	0,
216 	 	K_NO_WAIT);
217 
218 	/* Set controller task name */
219 	k_thread_name_set(&pxKernelPortData->xTzCtrlHandle, "TzCtrl");
220 
221 	return 0;
222 }
223 
224 /* Specify recorder module initialization stages */
225 SYS_INIT(tracelyzer_pre_kernel_init, PRE_KERNEL_2, 0);
226 SYS_INIT(tracealyzer_post_kernel_init, POST_KERNEL, 0);
227 
228 
229 
230 
231 /* Thread trace function definitions */
sys_trace_k_thread_foreach_enter(k_thread_user_cb_t user_cb,void * user_data)232 void sys_trace_k_thread_foreach_enter(k_thread_user_cb_t user_cb, void *user_data) {
233 	TraceEventHandle_t xTraceHandle;
234 
235 	if (xTraceEventBegin(PSF_EVENT_THREAD_FOREACH_ENTER, sizeof(void*) + sizeof(void*),
236 		&xTraceHandle) == TRC_SUCCESS) {
237 		xTraceEventAddPointer(xTraceHandle, user_cb);
238 		xTraceEventAddPointer(xTraceHandle, user_data);
239 		xTraceEventEnd(xTraceHandle);
240 	}
241 }
242 
sys_trace_k_thread_foreach_exit(k_thread_user_cb_t user_cb,void * user_data)243 void sys_trace_k_thread_foreach_exit(k_thread_user_cb_t user_cb, void *user_data) {
244 	TraceEventHandle_t xTraceHandle;
245 
246 	if (xTraceEventBegin(PSF_EVENT_THREAD_FOREACH_EXIT, sizeof(void*) + sizeof(void*),
247 		&xTraceHandle) == TRC_SUCCESS) {
248 		xTraceEventAddPointer(xTraceHandle, user_cb);
249 		xTraceEventAddPointer(xTraceHandle, user_data);
250 		xTraceEventEnd(xTraceHandle);
251 	}
252 }
253 
sys_trace_k_thread_foreach_unlocked_enter(k_thread_user_cb_t user_cb,void * user_data)254 void sys_trace_k_thread_foreach_unlocked_enter(k_thread_user_cb_t user_cb, void *user_data) {
255 	TraceEventHandle_t xTraceHandle;
256 
257 	if (xTraceEventBegin(PSF_EVENT_THREAD_FOREACH_UNLOCKED_ENTER, sizeof(void*) + sizeof(void*),
258 		&xTraceHandle) == TRC_SUCCESS) {
259 		xTraceEventAddPointer(xTraceHandle, user_cb);
260 		xTraceEventAddPointer(xTraceHandle, user_data);
261 		xTraceEventEnd(xTraceHandle);
262 	}
263 }
264 
sys_trace_k_thread_foreach_unlocked_exit(k_thread_user_cb_t user_cb,void * user_data)265 void sys_trace_k_thread_foreach_unlocked_exit(k_thread_user_cb_t user_cb, void *user_data) {
266 	TraceEventHandle_t xTraceHandle;
267 
268 	if (xTraceEventBegin(PSF_EVENT_THREAD_FOREACH_UNLOCKED_EXIT, sizeof(void*) + sizeof(void*),
269 		&xTraceHandle) == TRC_SUCCESS) {
270 		xTraceEventAddPointer(xTraceHandle, user_cb);
271 		xTraceEventAddPointer(xTraceHandle, user_data);
272 		xTraceEventEnd(xTraceHandle);
273 	}
274 }
275 
sys_trace_k_thread_create(struct k_thread * thread,size_t stack_size,int prio)276 void sys_trace_k_thread_create(struct k_thread *thread, size_t stack_size, int prio) {
277 	TraceEventHandle_t xTraceHandle;
278 
279 	/* Since we have a slightly different task/thread register event
280 	 * we manually update the entry table here */
281 	TraceEntryHandle_t xEntryHandle;
282 
283 	TRACE_ALLOC_CRITICAL_SECTION();
284 	TRACE_ENTER_CRITICAL_SECTION();
285 
286 	if (xTraceEntryCreateWithAddress((void*)thread, &xEntryHandle) == TRC_FAIL)
287 	{
288 		TRACE_EXIT_CRITICAL_SECTION();
289 		return;
290 	}
291 
292 	xTraceEntrySetState(xEntryHandle, 0, prio);
293 	TRACE_EXIT_CRITICAL_SECTION();
294 
295 	/* Register task with stack monitor */
296 	xTraceStackMonitorAdd((void*)thread);
297 
298 	if (xTraceEventBegin(PSF_EVENT_THREAD_INIT, sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t),
299 		&xTraceHandle) == TRC_SUCCESS) {
300 		xTraceEventAddPointer(xTraceHandle, (void*)thread);
301 		xTraceEventAdd32(xTraceHandle, (uint32_t)stack_size);
302 		xTraceEventAdd32(xTraceHandle, (uint32_t)prio);
303 		xTraceEventEnd(xTraceHandle);
304 	}
305 
306 #ifdef CONFIG_THREAD_NAME
307 	if (strlen(thread->name) > 0) {
308 		xTraceObjectSetName(xEntryHandle, thread->name);
309 	}
310 #endif
311 }
312 
sys_trace_k_thread_user_mode_enter(k_thread_entry_t entry,void * p1,void * p2,void * p3)313 void sys_trace_k_thread_user_mode_enter(k_thread_entry_t entry, void *p1, void *p2, void *p3) {
314 	TraceEventHandle_t xTraceHandle;
315 
316 	if (xTraceEventBegin(PSF_EVENT_THREAD_USER_MODE_ENTER, sizeof(void*) * 5,
317 		&xTraceHandle) == TRC_SUCCESS) {
318 		xTraceEventAddPointer(xTraceHandle, (void*)k_current_get());
319 		xTraceEventAddPointer(xTraceHandle, (void*)entry);
320 		xTraceEventAddPointer(xTraceHandle, (void*)p1);
321 		xTraceEventAddPointer(xTraceHandle, (void*)p2);
322 		xTraceEventAddPointer(xTraceHandle, (void*)p3);
323 		xTraceEventEnd(xTraceHandle);
324 	}
325 }
326 
sys_trace_k_thread_heap_assign(struct k_thread * thread,struct k_heap * heap)327 void sys_trace_k_thread_heap_assign(struct k_thread *thread, struct k_heap *heap) {
328 	TraceEventHandle_t xTraceHandle;
329 
330 	if (xTraceEventBegin(PSF_EVENT_THREAD_HEAP_ASSIGN, sizeof(void*) + sizeof(void*),
331 		&xTraceHandle) == TRC_SUCCESS) {
332 		xTraceEventAddPointer(xTraceHandle, (void*)thread);
333 		xTraceEventAddPointer(xTraceHandle, (void*)heap);
334 		xTraceEventEnd(xTraceHandle);
335 	}
336 }
337 
sys_trace_k_thread_join_blocking(struct k_thread * thread,k_timeout_t timeout)338 void sys_trace_k_thread_join_blocking(struct k_thread *thread, k_timeout_t timeout) {
339 	TraceEventHandle_t xTraceHandle;
340 
341 	if (xTraceEventBegin(PSF_EVENT_THREAD_JOIN_BLOCKING, sizeof(void*) + sizeof(uint32_t),
342 		&xTraceHandle) == TRC_SUCCESS) {
343 		xTraceEventAddPointer(xTraceHandle, (void*)thread);
344 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
345 		xTraceEventEnd(xTraceHandle);
346 	}
347 }
348 
sys_trace_k_thread_join_exit(struct k_thread * thread,k_timeout_t timeout,int ret)349 void sys_trace_k_thread_join_exit(struct k_thread *thread, k_timeout_t timeout, int ret) {
350 	traceResult xTraceResult;
351 	TraceEventHandle_t xTraceHandle;
352 
353 	if (ret == 0) {
354 		xTraceResult = xTraceEventBegin(PSF_EVENT_THREAD_JOIN_SUCCESS,
355 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
356 	} else {
357 		xTraceResult = xTraceEventBegin(PSF_EVENT_THREAD_JOIN_TIMEOUT,
358 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
359 	}
360 
361 	if (xTraceResult == TRC_SUCCESS) {
362 		xTraceEventAddPointer(xTraceHandle, (void*)thread);
363 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
364 		xTraceEventAdd32(xTraceHandle, ret);
365 		xTraceEventEnd(xTraceHandle);
366 	}
367 }
368 
sys_trace_k_thread_sleep_enter(k_timeout_t timeout)369 void sys_trace_k_thread_sleep_enter(k_timeout_t timeout) {
370 	TraceEventHandle_t xTraceHandle;
371 
372 	if (xTraceEventBegin(PSF_EVENT_THREAD_SLEEP_ENTER, sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
373 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
374 		xTraceEventEnd(xTraceHandle);
375 	}
376 }
377 
sys_trace_k_thread_sleep_exit(k_timeout_t timeout,int ret)378 void sys_trace_k_thread_sleep_exit(k_timeout_t timeout, int ret) {
379 	TraceEventHandle_t xTraceHandle;
380 
381 	if (xTraceEventBegin(PSF_EVENT_THREAD_SLEEP_EXIT, sizeof(uint32_t) + sizeof(uint32_t),
382 		&xTraceHandle) == TRC_SUCCESS) {
383 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
384 		xTraceEventAdd32(xTraceHandle, ret);
385 		xTraceEventEnd(xTraceHandle);
386 	}
387 }
388 
sys_trace_k_thread_msleep_enter(int32_t ms)389 void sys_trace_k_thread_msleep_enter(int32_t ms) {
390 	TraceEventHandle_t xTraceHandle;
391 
392 	if (xTraceEventBegin(PSF_EVENT_THREAD_MSLEEP_ENTER, sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
393 		xTraceEventAdd32(xTraceHandle, ms);
394 		xTraceEventEnd(xTraceHandle);
395 	}
396 }
397 
sys_trace_k_thread_msleep_exit(int32_t ms,int ret)398 void sys_trace_k_thread_msleep_exit(int32_t ms, int ret) {
399 	TraceEventHandle_t xTraceHandle;
400 
401 	if (xTraceEventBegin(PSF_EVENT_THREAD_MSLEEP_EXIT, sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
402 		xTraceEventAdd32(xTraceHandle, ms);
403 		xTraceEventEnd(xTraceHandle);
404 	}
405 }
406 
sys_trace_k_thread_usleep_enter(int32_t us)407 void sys_trace_k_thread_usleep_enter(int32_t us) {
408 	TraceEventHandle_t xTraceHandle;
409 
410 	if (xTraceEventBegin(PSF_EVENT_THREAD_USLEEP_ENTER, sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
411 		xTraceEventAdd32(xTraceHandle, us);
412 		xTraceEventEnd(xTraceHandle);
413 	}
414 }
415 
sys_trace_k_thread_usleep_exit(int32_t us,int ret)416 void sys_trace_k_thread_usleep_exit(int32_t us, int ret) {
417 	TraceEventHandle_t xTraceHandle;
418 
419 	if (xTraceEventBegin(PSF_EVENT_THREAD_USLEEP_EXIT, sizeof(uint32_t) + sizeof(uint32_t),
420 		&xTraceHandle) == TRC_SUCCESS) {
421 		xTraceEventAdd32(xTraceHandle, us);
422 		xTraceEventAdd32(xTraceHandle, ret);
423 		xTraceEventEnd(xTraceHandle);
424 	}
425 }
426 
sys_trace_k_thread_busy_wait_enter(uint32_t usec_to_wait)427 void sys_trace_k_thread_busy_wait_enter(uint32_t usec_to_wait) {
428 	TraceEventHandle_t xTraceHandle;
429 
430 	if (xTraceEventBegin(PSF_EVENT_THREAD_BUSY_WAIT_ENTER, sizeof(uint32_t),
431 		&xTraceHandle) == TRC_SUCCESS) {
432 		xTraceEventAdd32(xTraceHandle, usec_to_wait);
433 		xTraceEventEnd(xTraceHandle);
434 	}
435 }
436 
sys_trace_k_thread_busy_wait_exit(uint32_t usec_to_wait)437 void sys_trace_k_thread_busy_wait_exit(uint32_t usec_to_wait) {
438 	TraceEventHandle_t xTraceHandle;
439 
440 	if (xTraceEventBegin(PSF_EVENT_THREAD_BUSY_WAIT_EXIT, sizeof(uint32_t),
441 		&xTraceHandle) == TRC_SUCCESS) {
442 		xTraceEventAdd32(xTraceHandle, usec_to_wait);
443 		xTraceEventEnd(xTraceHandle);
444 	}
445 }
446 
sys_trace_k_thread_yield()447 void sys_trace_k_thread_yield() {
448 	TraceEventHandle_t xTraceHandle;
449 
450 	if (xTraceEventBegin(PSF_EVENT_THREAD_YIELD, 0, &xTraceHandle) == TRC_SUCCESS) {
451 		xTraceEventEnd(xTraceHandle);
452 	}
453 }
454 
sys_trace_k_thread_wakeup(struct k_thread * thread)455 void sys_trace_k_thread_wakeup(struct k_thread *thread) {
456 	TraceEventHandle_t xTraceHandle;
457 
458 	if (xTraceEventBegin(PSF_EVENT_THREAD_WAKEUP, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
459 		xTraceEventAddPointer(xTraceHandle, (void*)thread);
460 		xTraceEventEnd(xTraceHandle);
461 	}
462 }
463 
sys_trace_k_thread_abort(struct k_thread * thread)464 void sys_trace_k_thread_abort(struct k_thread *thread) {
465 	/* Intentionally left empty, see k_thread_sched_abort for implementation */
466 }
467 
sys_trace_k_thread_start(struct k_thread * thread)468 void sys_trace_k_thread_start(struct k_thread *thread) {
469 	TraceEventHandle_t xTraceHandle;
470 
471 	if (xTraceEventBegin(PSF_EVENT_THREAD_START, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
472 		xTraceEventAddPointer(xTraceHandle, (void*)thread);
473 		xTraceEventEnd(xTraceHandle);
474 	}
475 }
476 
sys_trace_k_thread_priority_set(struct k_thread * thread)477 void sys_trace_k_thread_priority_set(struct k_thread *thread) {
478 	TraceEventHandle_t xTraceHandle;
479 
480 	if (xTraceObjectSetStateWithoutHandle((void*)thread, k_thread_priority_get(thread)) == TRC_FAIL)
481 	{
482 		return;
483 	}
484 
485 	if (xTraceEventBegin(PSF_EVENT_THREAD_SET_PRIORITY, sizeof(void*) + sizeof(uint32_t),
486 		&xTraceHandle) == TRC_SUCCESS) {
487 		xTraceEventAddPointer(xTraceHandle, (void*)thread);
488 		xTraceEventAdd32(xTraceHandle, k_thread_priority_get(thread));
489 		xTraceEventEnd(xTraceHandle);
490 	}
491 }
492 
sys_trace_k_thread_suspend(struct k_thread * thread)493 void sys_trace_k_thread_suspend(struct k_thread *thread) {
494 	TraceEventHandle_t xTraceHandle;
495 
496 	if (xTraceEventBegin(PSF_EVENT_THREAD_SUSPEND, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
497 		xTraceEventAddPointer(xTraceHandle, (void*)thread);
498 		xTraceEventEnd(xTraceHandle);
499 	}
500 }
501 
sys_trace_k_thread_resume(struct k_thread * thread)502 void sys_trace_k_thread_resume(struct k_thread *thread) {
503 	TraceEventHandle_t xTraceHandle;
504 
505 	if (xTraceEventBegin(PSF_EVENT_THREAD_RESUME, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
506 		xTraceEventAddPointer(xTraceHandle, (void*)thread);
507 		xTraceEventEnd(xTraceHandle);
508 	}
509 }
510 
sys_trace_k_thread_name_set(struct k_thread * thread,int ret)511 void sys_trace_k_thread_name_set(struct k_thread *thread, int ret) {
512 	if (ret == 0) {
513 		xTraceObjectSetNameWithoutHandle((void*)thread, thread->name);
514 	}
515 }
516 
sys_trace_k_thread_switched_out(void)517 void sys_trace_k_thread_switched_out(void) {
518 }
519 
sys_trace_k_thread_switched_in(void)520 void sys_trace_k_thread_switched_in(void) {
521 	xTraceTaskSwitch(k_current_get(), k_thread_priority_get(k_current_get()));
522 }
523 
sys_trace_k_thread_info(struct k_thread * thread)524 void sys_trace_k_thread_info(struct k_thread *thread) {
525 }
526 
527 
528 /* Thread sceduler trace function definitions */
sys_trace_k_thread_sched_lock()529 void sys_trace_k_thread_sched_lock() {
530 	TraceEventHandle_t xTraceHandle;
531 
532 	if (xTraceEventBegin(PSF_EVENT_THREAD_SCHED_LOCK, 0, &xTraceHandle) == TRC_SUCCESS) {
533 		xTraceEventEnd(xTraceHandle);
534 	}
535 }
536 
sys_trace_k_thread_sched_unlock()537 void sys_trace_k_thread_sched_unlock() {
538 	TraceEventHandle_t xTraceHandle;
539 
540 	if (xTraceEventBegin(PSF_EVENT_THREAD_SCHED_UNLOCK, 0, &xTraceHandle) == TRC_SUCCESS) {
541 		xTraceEventEnd(xTraceHandle);
542 	}
543 }
544 
sys_trace_k_thread_sched_wakeup(struct k_thread * thread)545 void sys_trace_k_thread_sched_wakeup(struct k_thread *thread) {
546 	TraceEventHandle_t xTraceHandle;
547 
548 	if (xTraceEventBegin(PSF_EVENT_THREAD_SCHED_WAKEUP, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
549 		xTraceEventAddPointer(xTraceHandle, (void*)thread);
550 		xTraceEventEnd(xTraceHandle);
551 	}
552 }
553 
sys_trace_k_thread_sched_abort(struct k_thread * thread)554 void sys_trace_k_thread_sched_abort(struct k_thread *thread) {
555 	TraceEventHandle_t xTraceHandle;
556 	TraceEntryHandle_t xEntryHandle;
557 
558 	TRACE_ALLOC_CRITICAL_SECTION();
559 	TRACE_ENTER_CRITICAL_SECTION();
560 
561 	/* Fetch entry handle */
562 	if (xTraceEntryFind((void*)thread, &xEntryHandle) == TRC_FAIL)
563 	{
564 		TRACE_EXIT_CRITICAL_SECTION();
565 		return;
566 	}
567 
568 	/* Delete entry */
569 	if (xTraceEntryDelete(xEntryHandle) == TRC_FAIL)
570 	{
571 		TRACE_EXIT_CRITICAL_SECTION();
572 		return;
573 	}
574 
575 	TRACE_EXIT_CRITICAL_SECTION();
576 
577 	/* Remove thread from stack monitor */
578 	xTraceStackMonitorRemove((void*)thread);
579 
580 	if (xTraceEventBegin(PSF_EVENT_THREAD_SCHED_ABORT, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
581 		xTraceEventAddPointer(xTraceHandle, (void*)thread);
582 		xTraceEventEnd(xTraceHandle);
583 	}
584 }
585 
sys_trace_k_thread_sched_set_priority(struct k_thread * thread,int prio)586 void sys_trace_k_thread_sched_set_priority(struct k_thread *thread, int prio) {
587 	TraceEventHandle_t xTraceHandle;
588 
589 	if (xTraceEventBegin(PSF_EVENT_THREAD_SCHED_PRIORITY_SET, sizeof(void*) + sizeof(uint32_t),
590 		&xTraceHandle) == TRC_SUCCESS) {
591 		xTraceEventAddPointer(xTraceHandle, (void*)thread);
592 		xTraceEventAdd32(xTraceHandle, prio);
593 		xTraceEventEnd(xTraceHandle);
594 	}
595 }
596 
sys_trace_k_thread_sched_ready(struct k_thread * thread)597 void sys_trace_k_thread_sched_ready(struct k_thread *thread) {
598 	xTraceTaskReady((void*)thread);
599 }
600 
sys_trace_k_thread_sched_pend(struct k_thread * thread)601 void sys_trace_k_thread_sched_pend(struct k_thread *thread) {
602 
603 }
604 
sys_trace_k_thread_sched_resume(struct k_thread * thread)605 void sys_trace_k_thread_sched_resume(struct k_thread *thread) {
606 	TraceEventHandle_t xTraceHandle;
607 
608 	if (xTraceEventBegin(PSF_EVENT_THREAD_SCHED_RESUME, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
609 		xTraceEventAddPointer(xTraceHandle, (void*)thread);
610 		xTraceEventEnd(xTraceHandle);
611 	}
612 }
613 
sys_trace_k_thread_sched_suspend(struct k_thread * thread)614 void sys_trace_k_thread_sched_suspend(struct k_thread *thread) {
615 	TraceEventHandle_t xTraceHandle;
616 
617 	if (xTraceEventBegin(PSF_EVENT_THREAD_SCHED_SUSPEND, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
618 		xTraceEventAddPointer(xTraceHandle, (void*)thread);
619 		xTraceEventEnd(xTraceHandle);
620 	}
621 }
622 
623 
624 /* Work trace function definitions */
sys_trace_k_work_init(struct k_work * work,k_work_handler_t handler)625 void sys_trace_k_work_init(struct k_work *work, k_work_handler_t handler) {
626 	TraceEventHandle_t xTraceHandle;
627 
628 	if (xTraceEventBegin(PSF_EVENT_WORK_INIT, sizeof(void*) + sizeof(void*),
629 		&xTraceHandle) == TRC_SUCCESS) {
630 		xTraceEventAddPointer(xTraceHandle, (void*)work);
631 		xTraceEventAddPointer(xTraceHandle, (void*)handler);
632 		xTraceEventEnd(xTraceHandle);
633 	}
634 }
635 
sys_trace_k_work_submit_to_queue_enter(struct k_work_q * queue,struct k_work * work)636 void sys_trace_k_work_submit_to_queue_enter(struct k_work_q *queue, struct k_work *work) {
637 	TraceEventHandle_t xTraceHandle;
638 
639 	if (xTraceEventBegin(PSF_EVENT_WORK_SUBMIT_TO_QUEUE_BLOCKING, sizeof(void*) + sizeof(void*),
640 		&xTraceHandle) == TRC_SUCCESS) {
641 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
642 		xTraceEventAddPointer(xTraceHandle, (void*)work);
643 		xTraceEventEnd(xTraceHandle);
644 	}
645 }
646 
sys_trace_k_work_submit_to_queue_exit(struct k_work_q * queue,struct k_work * work,int ret)647 void sys_trace_k_work_submit_to_queue_exit(struct k_work_q *queue, struct k_work *work, int ret) {
648 	traceResult xTraceResult;
649 	TraceEventHandle_t xTraceHandle;
650 
651 	if (ret >= 0) {
652 		xTraceResult = xTraceEventBegin(PSF_EVENT_WORK_SUBMIT_TO_QUEUE_SUCCESS,
653 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
654 	} else {
655 		xTraceResult = xTraceEventBegin(PSF_EVENT_WORK_SUBMIT_TO_QUEUE_FAILURE,
656 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
657 	}
658 
659 	if (xTraceResult == TRC_SUCCESS) {
660 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
661 		xTraceEventAddPointer(xTraceHandle, (void*)work);
662 		xTraceEventAdd32(xTraceHandle, ret);
663 		xTraceEventEnd(xTraceHandle);
664 	}
665 }
666 
sys_trace_k_work_submit_enter(struct k_work * work)667 void sys_trace_k_work_submit_enter(struct k_work *work) {
668 	TraceEventHandle_t xTraceHandle;
669 
670 	if (xTraceEventBegin(PSF_EVENT_WORK_SUBMIT_BLOCKING, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
671 		xTraceEventAddPointer(xTraceHandle, (void*)work);
672 		xTraceEventEnd(xTraceHandle);
673 	}
674 }
675 
sys_trace_k_work_submit_exit(struct k_work * work,int ret)676 void sys_trace_k_work_submit_exit(struct k_work *work, int ret) {
677 	traceResult xTraceResult;
678 	TraceEventHandle_t xTraceHandle;
679 
680 	if (ret >= 0) {
681 		xTraceResult = xTraceEventBegin(PSF_EVENT_WORK_SUBMIT_SUCCESS,
682 			sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
683 	} else {
684 		xTraceResult = xTraceEventBegin(PSF_EVENT_WORK_SUBMIT_FAILURE,
685 			sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
686 	}
687 
688 	if (xTraceResult == TRC_SUCCESS) {
689 		xTraceEventAddPointer(xTraceHandle, (void*)work);
690 		xTraceEventAdd32(xTraceHandle, ret);
691 		xTraceEventEnd(xTraceHandle);
692 	}
693 }
694 
sys_trace_k_work_flush_enter(struct k_work * work,struct k_work_sync * sync)695 void sys_trace_k_work_flush_enter(struct k_work *work, struct k_work_sync *sync) {
696 	TraceEventHandle_t xTraceHandle;
697 
698 	if (xTraceEventBegin(PSF_EVENT_WORK_FLUSH_BLOCKING, sizeof(void*) + sizeof(void*),
699 		&xTraceHandle) == TRC_SUCCESS) {
700 		xTraceEventAddPointer(xTraceHandle, (void*)work);
701 		xTraceEventAddPointer(xTraceHandle, (void*)sync);
702 		xTraceEventEnd(xTraceHandle);
703 	}
704 }
705 
sys_trace_k_work_flush_blocking(struct k_work * work,struct k_work_sync * sync,k_timeout_t timeout)706 void sys_trace_k_work_flush_blocking(struct k_work *work, struct k_work_sync *sync, k_timeout_t timeout) {
707 
708 }
709 
sys_trace_k_work_flush_exit(struct k_work * work,struct k_work_sync * sync,bool ret)710 void sys_trace_k_work_flush_exit(struct k_work *work, struct k_work_sync *sync, bool ret) {
711 	TraceEventHandle_t xTraceHandle;
712 
713 	if (xTraceEventBegin(PSF_EVENT_WORK_FLUSH_SUCCESS, sizeof(void*) + sizeof(void*) + sizeof(uint32_t),
714 		&xTraceHandle) == TRC_SUCCESS) {
715 		xTraceEventAddPointer(xTraceHandle, (void*)work);
716 		xTraceEventAddPointer(xTraceHandle, (void*)sync);
717 		xTraceEventAdd32(xTraceHandle, ret);
718 		xTraceEventEnd(xTraceHandle);
719 	}
720 }
721 
sys_trace_k_work_cancel_enter(struct k_work * work)722 void sys_trace_k_work_cancel_enter(struct k_work *work) {
723 	TraceEventHandle_t xTraceHandle;
724 
725 	if (xTraceEventBegin(PSF_EVENT_WORK_CANCEL_BLOCKING, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
726 		xTraceEventAddPointer(xTraceHandle, (void*)work);
727 		xTraceEventEnd(xTraceHandle);
728 	}
729 }
730 
sys_trace_k_work_cancel_exit(struct k_work * work,int ret)731 void sys_trace_k_work_cancel_exit(struct k_work *work, int ret) {
732 	TraceEventHandle_t xTraceHandle;
733 
734 	if (xTraceEventBegin(PSF_EVENT_WORK_CANCEL_SUCCESS, sizeof(void*) + sizeof(uint32_t),
735 		&xTraceHandle) == TRC_SUCCESS) {
736 		xTraceEventAddPointer(xTraceHandle, (void*)work);
737 		xTraceEventAdd32(xTraceHandle, ret);
738 		xTraceEventEnd(xTraceHandle);
739 	}
740 }
741 
sys_trace_k_work_cancel_sync_enter(struct k_work * work,struct k_work_sync * sync)742 void sys_trace_k_work_cancel_sync_enter(struct k_work *work, struct k_work_sync *sync) {
743 	TraceEventHandle_t xTraceHandle;
744 
745 	if (xTraceEventBegin(PSF_EVENT_WORK_CANCEL_SYNC_BLOCKING, sizeof(void*) + sizeof(void*),
746 		&xTraceHandle) == TRC_SUCCESS) {
747 		xTraceEventAddPointer(xTraceHandle, (void*)work);
748 		xTraceEventAddPointer(xTraceHandle, (void*)sync);
749 		xTraceEventEnd(xTraceHandle);
750 	}
751 }
752 
sys_trace_k_work_cancel_sync_blocking(struct k_work * work,struct k_work_sync * sync)753 void sys_trace_k_work_cancel_sync_blocking(struct k_work *work, struct k_work_sync *sync) {
754 
755 }
756 
sys_trace_k_work_cancel_sync_exit(struct k_work * work,struct k_work_sync * sync,bool ret)757 void sys_trace_k_work_cancel_sync_exit(struct k_work *work, struct k_work_sync *sync, bool ret) {
758 	TraceEventHandle_t xTraceHandle;
759 
760 	if (xTraceEventBegin(PSF_EVENT_WORK_CANCEL_SYNC_SUCCESS,
761 		sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
762 		xTraceEventAddPointer(xTraceHandle, (void*)work);
763 		xTraceEventAddPointer(xTraceHandle, (void*)sync);
764 		xTraceEventAdd32(xTraceHandle, ret);
765 		xTraceEventEnd(xTraceHandle);
766 	}
767 }
768 
769 
770 /* Work queue trace function definitions */
sys_trace_k_work_queue_start_enter(struct k_work_q * queue,k_thread_stack_t * stack,size_t stack_size,int prio,const struct k_work_queue_config * cfg)771 void sys_trace_k_work_queue_start_enter(struct k_work_q *queue, k_thread_stack_t *stack, size_t stack_size, int prio, const struct k_work_queue_config *cfg) {
772 	TraceEventHandle_t xTraceHandle;
773 
774 	if (xTraceEventBegin(PSF_EVENT_WORK_QUEUE_START_BLOCKING,
775 		sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(void*),
776 		&xTraceHandle) == TRC_SUCCESS) {
777 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
778 		xTraceEventAddPointer(xTraceHandle, (void*)stack);
779 		xTraceEventAdd32(xTraceHandle, stack_size);
780 		xTraceEventAdd32(xTraceHandle, prio);
781 		xTraceEventAddPointer(xTraceHandle, (void*)cfg);
782 		xTraceEventEnd(xTraceHandle);
783 	}
784 }
785 
sys_trace_k_work_queue_start_exit(struct k_work_q * queue,k_thread_stack_t * stack,size_t stack_size,int prio,const struct k_work_queue_config * cfg)786 void sys_trace_k_work_queue_start_exit(struct k_work_q *queue, k_thread_stack_t *stack, size_t stack_size, int prio, const struct k_work_queue_config *cfg) {
787 	TraceEventHandle_t xTraceHandle;
788 
789 	if (xTraceEventBegin(PSF_EVENT_WORK_QUEUE_START_SUCCESS,
790 		sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(void*),
791 		&xTraceHandle) == TRC_SUCCESS) {
792 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
793 		xTraceEventAddPointer(xTraceHandle, (void*)stack);
794 		xTraceEventAdd32(xTraceHandle, stack_size);
795 		xTraceEventAdd32(xTraceHandle, prio);
796 		xTraceEventAddPointer(xTraceHandle, (void*)cfg);
797 		xTraceEventEnd(xTraceHandle);
798 	}
799 }
800 
sys_trace_k_work_queue_drain_enter(struct k_work_q * queue,bool plug)801 void sys_trace_k_work_queue_drain_enter(struct k_work_q *queue, bool plug) {
802 	TraceEventHandle_t xTraceHandle;
803 
804 	if (xTraceEventBegin(PSF_EVENT_WORK_QUEUE_DRAIN_BLOCKING, sizeof(void*) + sizeof(uint32_t),
805 		&xTraceHandle) == TRC_SUCCESS) {
806 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
807 		xTraceEventAdd32(xTraceHandle, plug);
808 		xTraceEventEnd(xTraceHandle);
809 	}
810 }
811 
sys_trace_k_work_queue_drain_exit(struct k_work_q * queue,bool plug,int ret)812 void sys_trace_k_work_queue_drain_exit(struct k_work_q *queue, bool plug, int ret) {
813 	traceResult xTraceResult;
814 	TraceEventHandle_t xTraceHandle;
815 
816 	if (ret >= 0) {
817 		xTraceResult = xTraceEventBegin(PSF_EVENT_WORK_QUEUE_DRAIN_SUCCESS,
818 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
819 	} else {
820 		xTraceResult = xTraceEventBegin(PSF_EVENT_WORK_QUEUE_DRAIN_FAILURE,
821 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
822 	}
823 
824 	if (xTraceResult == TRC_SUCCESS) {
825 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
826 		xTraceEventAdd32(xTraceHandle, plug);
827 		xTraceEventAdd32(xTraceHandle, ret);
828 		xTraceEventEnd(xTraceHandle);
829 	}
830 }
831 
sys_trace_k_work_queue_unplug_enter(struct k_work_q * queue)832 void sys_trace_k_work_queue_unplug_enter(struct k_work_q *queue) {
833 	TraceEventHandle_t xTraceHandle;
834 
835 	if (xTraceEventBegin(PSF_EVENT_WORK_QUEUE_UNPLUG_BLOCKING, sizeof(void*),
836 		&xTraceHandle) == TRC_SUCCESS) {
837 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
838 		xTraceEventEnd(xTraceHandle);
839 	}
840 }
841 
sys_trace_k_work_queue_unplug_exit(struct k_work_q * queue,int ret)842 void sys_trace_k_work_queue_unplug_exit(struct k_work_q *queue, int ret) {
843 	traceResult xTraceResult;
844 	TraceEventHandle_t xTraceHandle;
845 
846 	if (ret == 0) {
847 		xTraceResult = xTraceEventBegin(PSF_EVENT_WORK_QUEUE_UNPLUG_SUCCESS,
848 			sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
849 	} else {
850 		xTraceResult = xTraceEventBegin(PSF_EVENT_WORK_QUEUE_UNPLUG_FAILURE,
851 			sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
852 	}
853 
854 	if (xTraceResult == TRC_SUCCESS) {
855 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
856 		xTraceEventAdd32(xTraceHandle, ret);
857 		xTraceEventEnd(xTraceHandle);
858 	}
859 }
860 
861 
862 /* Work delayable trace function definitions */
sys_trace_k_work_delayable_init(struct k_work_delayable * dwork,k_work_handler_t handler)863 void sys_trace_k_work_delayable_init(struct k_work_delayable *dwork, k_work_handler_t handler) {
864 	TraceEventHandle_t xTraceHandle;
865 
866 	if (xTraceEventBegin(PSF_EVENT_DWORK_INIT, sizeof(void*) + sizeof(void*),
867 		&xTraceHandle) == TRC_SUCCESS) {
868 		xTraceEventAddPointer(xTraceHandle, (void*)dwork);
869 		xTraceEventAddPointer(xTraceHandle, (void*)handler);
870 		xTraceEventEnd(xTraceHandle);
871 	}
872 }
873 
sys_trace_k_work_schedule_for_queue_enter(struct k_work_q * queue,struct k_work_delayable * dwork,k_timeout_t delay)874 void sys_trace_k_work_schedule_for_queue_enter(struct k_work_q *queue, struct k_work_delayable *dwork, k_timeout_t delay) {
875 }
876 
sys_trace_k_work_schedule_for_queue_exit(struct k_work_q * queue,struct k_work_delayable * dwork,k_timeout_t delay,int ret)877 void sys_trace_k_work_schedule_for_queue_exit(struct k_work_q *queue, struct k_work_delayable *dwork, k_timeout_t delay, int ret) {
878 	TraceEventHandle_t xTraceHandle;
879 
880 	if (ret == 0 || ret == 1) {
881 		if (xTraceEventBegin(PSF_EVENT_DWORK_SCHEDULE_FOR_QUEUE_SUCCESS,
882 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
883 			xTraceEventAddPointer(xTraceHandle, (void*)queue);
884 			xTraceEventAddPointer(xTraceHandle, (void*)dwork);
885 			xTraceEventAdd32(xTraceHandle, delay.ticks);
886 			xTraceEventAdd32(xTraceHandle, ret);
887 			xTraceEventEnd(xTraceHandle);
888 		}
889 	} else {
890 		if (xTraceEventBegin(PSF_EVENT_DWORK_SCHEDULE_FOR_QUEUE_FAILURE,
891 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
892 			xTraceEventAddPointer(xTraceHandle, (void*)queue);
893 			xTraceEventAddPointer(xTraceHandle, (void*)dwork);
894 			xTraceEventAdd32(xTraceHandle, delay.ticks);
895 			xTraceEventAdd32(xTraceHandle, ret);
896 			xTraceEventEnd(xTraceHandle);
897 		}
898 	}
899 }
900 
sys_trace_k_work_schedule_enter(struct k_work_delayable * dwork,k_timeout_t delay)901 void sys_trace_k_work_schedule_enter(struct k_work_delayable *dwork, k_timeout_t delay) {
902 	TraceEventHandle_t xTraceHandle;
903 
904 	if (xTraceEventBegin(PSF_EVENT_DWORK_SCHEDULE_BLOCKING, sizeof(void*) + sizeof(uint32_t),
905 		&xTraceHandle) == TRC_SUCCESS) {
906 		xTraceEventAddPointer(xTraceHandle, (void*)dwork);
907 		xTraceEventAdd32(xTraceHandle, delay.ticks);
908 		xTraceEventEnd(xTraceHandle);
909 	}
910 }
911 
sys_trace_k_work_schedule_exit(struct k_work_delayable * dwork,k_timeout_t delay,int ret)912 void sys_trace_k_work_schedule_exit(struct k_work_delayable *dwork, k_timeout_t delay, int ret) {
913 	TraceEventHandle_t xTraceHandle;
914 
915 	if (ret == 0 || ret == 1) {
916 		if (xTraceEventBegin(PSF_EVENT_DWORK_SCHEDULE_SUCCESS,
917 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
918 			xTraceEventAddPointer(xTraceHandle, (void*)dwork);
919 			xTraceEventAdd32(xTraceHandle, delay.ticks);
920 			xTraceEventAdd32(xTraceHandle, ret);
921 			xTraceEventEnd(xTraceHandle);
922 		}
923 	} else {
924 		if (xTraceEventBegin(PSF_EVENT_DWORK_SCHEDULE_FAILURE,
925 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
926 			xTraceEventAddPointer(xTraceHandle, (void*)dwork);
927 			xTraceEventAdd32(xTraceHandle, delay.ticks);
928 			xTraceEventAdd32(xTraceHandle, ret);
929 			xTraceEventEnd(xTraceHandle);
930 		}
931 	}
932 }
933 
sys_trace_k_work_reschedule_for_queue_enter(struct k_work_q * queue,struct k_work_delayable * dwork,k_timeout_t delay)934 void sys_trace_k_work_reschedule_for_queue_enter(struct k_work_q *queue, struct k_work_delayable *dwork, k_timeout_t delay) {
935 }
936 
sys_trace_k_work_reschedule_for_queue_exit(struct k_work_q * queue,struct k_work_delayable * dwork,k_timeout_t delay,int ret)937 void sys_trace_k_work_reschedule_for_queue_exit(struct k_work_q *queue, struct k_work_delayable *dwork, k_timeout_t delay, int ret) {
938 	TraceEventHandle_t xTraceHandle;
939 
940 	if (ret == 0 || ret == 1 || ret == 2) {
941 		if (xTraceEventBegin(PSF_EVENT_DWORK_RESCHEDULE_FOR_QUEUE_SUCCESS,
942 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
943 			xTraceEventAddPointer(xTraceHandle, (void*)queue);
944 			xTraceEventAddPointer(xTraceHandle, (void*)dwork);
945 			xTraceEventAdd32(xTraceHandle, delay.ticks);
946 			xTraceEventAdd32(xTraceHandle, ret);
947 			xTraceEventEnd(xTraceHandle);
948 		}
949 	} else {
950 		if (xTraceEventBegin(PSF_EVENT_DWORK_RESCHEDULE_FOR_QUEUE_FAILURE,
951 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
952 			xTraceEventAddPointer(xTraceHandle, (void*)queue);
953 			xTraceEventAddPointer(xTraceHandle, (void*)dwork);
954 			xTraceEventAdd32(xTraceHandle, delay.ticks);
955 			xTraceEventAdd32(xTraceHandle, ret);
956 			xTraceEventEnd(xTraceHandle);
957 		}
958 	}
959 }
960 
sys_trace_k_work_reschedule_enter(struct k_work_delayable * dwork,k_timeout_t delay)961 void sys_trace_k_work_reschedule_enter(struct k_work_delayable *dwork, k_timeout_t delay) {
962 	TraceEventHandle_t xTraceHandle;
963 
964 	if (xTraceEventBegin(PSF_EVENT_DWORK_RESCHEDULE_BLOCKING, sizeof(void*) + sizeof(uint32_t),
965 		&xTraceHandle) == TRC_SUCCESS) {
966 		xTraceEventAddPointer(xTraceHandle, (void*)dwork);
967 		xTraceEventAdd32(xTraceHandle, delay.ticks);
968 		xTraceEventEnd(xTraceHandle);
969 	}
970 }
971 
sys_trace_k_work_reschedule_exit(struct k_work_delayable * dwork,k_timeout_t delay,int ret)972 void sys_trace_k_work_reschedule_exit(struct k_work_delayable *dwork, k_timeout_t delay, int ret) {
973 	TraceEventHandle_t xTraceHandle;
974 
975 	if (ret == 0 || ret == 1 || ret == 2) {
976 		if (xTraceEventBegin(PSF_EVENT_DWORK_RESCHEDULE_SUCCESS,
977 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
978 			xTraceEventAddPointer(xTraceHandle, (void*)dwork);
979 			xTraceEventAdd32(xTraceHandle, delay.ticks);
980 			xTraceEventAdd32(xTraceHandle, ret);
981 			xTraceEventEnd(xTraceHandle);
982 		}
983 	} else {
984 		if (xTraceEventBegin(PSF_EVENT_DWORK_RESCHEDULE_FAILURE,
985 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
986 			xTraceEventAddPointer(xTraceHandle, (void*)dwork);
987 			xTraceEventAdd32(xTraceHandle, delay.ticks);
988 			xTraceEventAdd32(xTraceHandle, ret);
989 			xTraceEventEnd(xTraceHandle);
990 		}
991 	}
992 }
993 
sys_trace_k_work_flush_delayable_enter(struct k_work_delayable * dwork,struct k_work_sync * sync)994 void sys_trace_k_work_flush_delayable_enter(struct k_work_delayable *dwork, struct k_work_sync *sync) {
995 	TraceEventHandle_t xTraceHandle;
996 
997 	if (xTraceEventBegin(PSF_EVENT_DWORK_FLUSH_BLOCKING, sizeof(void*) + sizeof(void*),
998 		&xTraceHandle) == TRC_SUCCESS) {
999 		xTraceEventAddPointer(xTraceHandle, (void*)dwork);
1000 		xTraceEventAddPointer(xTraceHandle, (void*)sync);
1001 		xTraceEventEnd(xTraceHandle);
1002 	}
1003 }
1004 
sys_trace_k_work_flush_delayable_exit(struct k_work_delayable * dwork,struct k_work_sync * sync,bool ret)1005 void sys_trace_k_work_flush_delayable_exit(struct k_work_delayable *dwork, struct k_work_sync *sync, bool ret) {
1006 	TraceEventHandle_t xTraceHandle;
1007 
1008 	if (xTraceEventBegin(PSF_EVENT_DWORK_FLUSH_SUCCESS, sizeof(void*) + sizeof(void*) + sizeof(uint32_t),
1009 		&xTraceHandle) == TRC_SUCCESS) {
1010 		xTraceEventAddPointer(xTraceHandle, (void*)dwork);
1011 		xTraceEventAddPointer(xTraceHandle, (void*)sync);
1012 		xTraceEventAdd32(xTraceHandle, ret);
1013 		xTraceEventEnd(xTraceHandle);
1014 	}
1015 }
1016 
sys_trace_k_work_cancel_delayable_enter(struct k_work_delayable * dwork)1017 void sys_trace_k_work_cancel_delayable_enter(struct k_work_delayable *dwork) {
1018 }
1019 
sys_trace_k_work_cancel_delayable_exit(struct k_work_delayable * dwork,int ret)1020 void sys_trace_k_work_cancel_delayable_exit(struct k_work_delayable *dwork, int ret) {
1021 	TraceEventHandle_t xTraceHandle;
1022 
1023 	if (xTraceEventBegin(PSF_EVENT_DWORK_CANCEL_DELAYABLE_SUCCESS, sizeof(void*) + sizeof(uint32_t),
1024 		&xTraceHandle) == TRC_SUCCESS) {
1025 		xTraceEventAddPointer(xTraceHandle, (void*)dwork);
1026 		xTraceEventAdd32(xTraceHandle, ret);
1027 		xTraceEventEnd(xTraceHandle);
1028 	}
1029 }
1030 
sys_trace_cancel_delayable_sync_enter(struct k_work_delayable * dwork,struct k_work_sync * sync)1031 void sys_trace_cancel_delayable_sync_enter(struct k_work_delayable *dwork, struct k_work_sync *sync) {
1032 	TraceEventHandle_t xTraceHandle;
1033 
1034 	if (xTraceEventBegin(PSF_EVENT_DWORK_CANCEL_DELAYABLE_SYNC_BLOCKING, sizeof(void*) + sizeof(void*),
1035 		&xTraceHandle) == TRC_SUCCESS) {
1036 		xTraceEventAddPointer(xTraceHandle, (void*)dwork);
1037 		xTraceEventAddPointer(xTraceHandle, (void*)sync);
1038 		xTraceEventEnd(xTraceHandle);
1039 	}
1040 }
1041 
sys_trace_cancel_delayable_sync_exit(struct k_work_delayable * dwork,struct k_work_sync * sync,bool ret)1042 void sys_trace_cancel_delayable_sync_exit(struct k_work_delayable *dwork, struct k_work_sync *sync, bool ret) {
1043 	TraceEventHandle_t xTraceHandle;
1044 
1045 	if (xTraceEventBegin(PSF_EVENT_DWORK_CANCEL_DELAYABLE_SYNC_SUCCESS,
1046 		sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
1047 		xTraceEventAddPointer(xTraceHandle, (void*)dwork);
1048 		xTraceEventAddPointer(xTraceHandle, (void*)sync);
1049 		xTraceEventAdd32(xTraceHandle, ret);
1050 		xTraceEventEnd(xTraceHandle);
1051 	}
1052 }
1053 
1054 
1055 /* Work poll trace function definitions */
sys_trace_k_work_poll_init_enter(struct k_work_poll * work,k_work_handler_t handler)1056 void sys_trace_k_work_poll_init_enter(struct k_work_poll *work, k_work_handler_t handler) {
1057 	TraceEventHandle_t xTraceHandle;
1058 
1059 	if (xTraceEventBegin(PSF_EVENT_PWORK_INIT_ENTER, sizeof(void*) + sizeof(void*),
1060 		&xTraceHandle) == TRC_SUCCESS) {
1061 		xTraceEventAddPointer(xTraceHandle, (void*)work);
1062 		xTraceEventAddPointer(xTraceHandle, (void*)handler);
1063 		xTraceEventEnd(xTraceHandle);
1064 	}
1065 }
1066 
sys_trace_k_work_poll_init_exit(struct k_work_poll * work,k_work_handler_t handler)1067 void sys_trace_k_work_poll_init_exit(struct k_work_poll *work, k_work_handler_t handler) {
1068 	TraceEventHandle_t xTraceHandle;
1069 
1070 	if (xTraceEventBegin(PSF_EVENT_PWORK_INIT_EXIT, sizeof(void*) + sizeof(void*),
1071 		&xTraceHandle) == TRC_SUCCESS) {
1072 		xTraceEventAddPointer(xTraceHandle, (void*)work);
1073 		xTraceEventAddPointer(xTraceHandle, (void*)handler);
1074 		xTraceEventEnd(xTraceHandle);
1075 	}
1076 }
1077 
sys_trace_k_work_poll_submit_to_queue_enter(struct k_work_q * work_q,struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout)1078 void sys_trace_k_work_poll_submit_to_queue_enter(struct k_work_q *work_q, struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout) {
1079 	TraceEventHandle_t xTraceHandle;
1080 
1081 	if (xTraceEventBegin(PSF_EVENT_PWORK_SUBMIT_TO_QUEUE_BLOCKING,
1082 		sizeof(void*) + sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t),
1083 		&xTraceHandle) == TRC_SUCCESS) {
1084 		xTraceEventAddPointer(xTraceHandle, (void*)work_q);
1085 		xTraceEventAddPointer(xTraceHandle, (void*)work);
1086 		xTraceEventAddPointer(xTraceHandle, (void*)events);
1087 		xTraceEventAdd32(xTraceHandle, num_events);
1088 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
1089 		xTraceEventEnd(xTraceHandle);
1090 	}
1091 }
1092 
sys_trace_k_work_poll_submit_to_queue_blocking(struct k_work_q * work_q,struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout)1093 void sys_trace_k_work_poll_submit_to_queue_blocking(struct k_work_q *work_q, struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout) {
1094 
1095 }
1096 
sys_trace_k_work_poll_submit_to_queue_exit(struct k_work_q * work_q,struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout,int ret)1097 void sys_trace_k_work_poll_submit_to_queue_exit(struct k_work_q *work_q, struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout, int ret) {
1098 	traceResult xTraceResult;
1099 	TraceEventHandle_t xTraceHandle;
1100 
1101 	if (ret == 0) {
1102 		xTraceResult = xTraceEventBegin(PSF_EVENT_PWORK_SUBMIT_TO_QUEUE_SUCCESS,
1103 			sizeof(void*) + sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t),
1104 			&xTraceHandle);
1105 	} else {
1106 		xTraceResult = xTraceEventBegin(PSF_EVENT_PWORK_SUBMIT_TO_QUEUE_FAILURE,
1107 			sizeof(void*) + sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t),
1108 			&xTraceHandle);
1109 	}
1110 
1111 	if (xTraceResult == TRC_SUCCESS) {
1112 		xTraceEventAddPointer(xTraceHandle, (void*)work_q);
1113 		xTraceEventAddPointer(xTraceHandle, (void*)work);
1114 		xTraceEventAddPointer(xTraceHandle, (void*)events);
1115 		xTraceEventAdd32(xTraceHandle, num_events);
1116 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
1117 		xTraceEventAdd32(xTraceHandle, ret);
1118 		xTraceEventEnd(xTraceHandle);
1119 	}
1120 }
1121 
sys_trace_k_work_poll_submit_enter(struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout)1122 void sys_trace_k_work_poll_submit_enter(struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout) {
1123 	TraceEventHandle_t xTraceHandle;
1124 
1125 	if (xTraceEventBegin(PSF_EVENT_PWORK_SUBMIT_BLOCKING,
1126 		sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t),
1127 		&xTraceHandle) == TRC_SUCCESS) {
1128 		xTraceEventAddPointer(xTraceHandle, (void*)work);
1129 		xTraceEventAddPointer(xTraceHandle, (void*)events);
1130 		xTraceEventAdd32(xTraceHandle, num_events);
1131 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
1132 		xTraceEventEnd(xTraceHandle);
1133 	}
1134 }
1135 
sys_trace_k_work_poll_submit_exit(struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout,int ret)1136 void sys_trace_k_work_poll_submit_exit(struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout, int ret) {
1137 	traceResult xTraceResult;
1138 	TraceEventHandle_t xTraceHandle;
1139 
1140 	if (ret == 0) {
1141 		xTraceResult = xTraceEventBegin(PSF_EVENT_PWORK_SUBMIT_SUCCESS,
1142 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t),
1143 		&xTraceHandle);
1144 	} else {
1145 		xTraceResult = xTraceEventBegin(PSF_EVENT_PWORK_SUBMIT_FAILURE,
1146 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t),
1147 		&xTraceHandle);
1148 	}
1149 
1150 	if (xTraceResult == TRC_SUCCESS) {
1151 		xTraceEventAddPointer(xTraceHandle, (void*)work);
1152 		xTraceEventAddPointer(xTraceHandle, (void*)events);
1153 		xTraceEventAdd32(xTraceHandle, num_events);
1154 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
1155 		xTraceEventAdd32(xTraceHandle, ret);
1156 		xTraceEventEnd(xTraceHandle);
1157 	}
1158 }
1159 
sys_trace_k_work_poll_cancel_enter(struct k_work_poll * work)1160 void sys_trace_k_work_poll_cancel_enter(struct k_work_poll *work) {
1161 	TraceEventHandle_t xTraceHandle;
1162 
1163 	if (xTraceEventBegin(PSF_EVENT_PWORK_CANCEL_BLOCKING, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
1164 		xTraceEventAddPointer(xTraceHandle, (void*)work);
1165 		xTraceEventEnd(xTraceHandle);
1166 	}
1167 }
1168 
sys_trace_k_work_poll_cancel_exit(struct k_work_poll * work,int ret)1169 void sys_trace_k_work_poll_cancel_exit(struct k_work_poll *work, int ret) {
1170 	traceResult xTraceResult;
1171 	TraceEventHandle_t xTraceHandle;
1172 
1173 	if (ret == 0) {
1174 		xTraceResult = xTraceEventBegin(PSF_EVENT_PWORK_CANCEL_SUCCESS,
1175 			sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
1176 	} else {
1177 		xTraceResult = xTraceEventBegin(PSF_EVENT_PWORK_CANCEL_FAILURE,
1178 			sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
1179 	}
1180 
1181 	if (xTraceResult == TRC_SUCCESS) {
1182 		xTraceEventAddPointer(xTraceHandle, (void*)work);
1183 		xTraceEventAdd32(xTraceHandle, ret);
1184 		xTraceEventEnd(xTraceHandle);
1185 	}
1186 }
1187 
1188 
1189 /* Poll API trace function definitions */
sys_trace_k_poll_api_event_init(struct k_poll_event * event,uint32_t type,int mode,void * obj)1190 void sys_trace_k_poll_api_event_init(struct k_poll_event *event, uint32_t type, int mode, void *obj) {
1191 	TraceEventHandle_t xTraceHandle;
1192 
1193 	if (xTraceEventBegin(PSF_EVENT_POLL_EVENT_INIT,
1194 		sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(void*),
1195 		&xTraceHandle) == TRC_SUCCESS) {
1196 		xTraceEventAddPointer(xTraceHandle, (void*)event);
1197 		xTraceEventAdd32(xTraceHandle, type);
1198 		xTraceEventAdd32(xTraceHandle, mode);
1199 		xTraceEventAddPointer(xTraceHandle, (void*)obj);
1200 		xTraceEventEnd(xTraceHandle);
1201 	}
1202 }
1203 
sys_trace_k_poll_api_event_poll_enter(struct k_poll_event * events,int num_events,k_timeout_t timeout)1204 void sys_trace_k_poll_api_event_poll_enter(struct k_poll_event *events, int num_events, k_timeout_t timeout) {
1205 	TraceEventHandle_t xTraceHandle;
1206 
1207 	if (xTraceEventBegin(PSF_EVENT_POLL_POLL_BLOCKING,
1208 		sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
1209 		xTraceEventAddPointer(xTraceHandle, (void*)events);
1210 		xTraceEventAdd32(xTraceHandle, num_events);
1211 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
1212 		xTraceEventEnd(xTraceHandle);
1213 	}
1214 }
1215 
sys_trace_k_poll_api_event_poll_exit(struct k_poll_event * events,int num_events,k_timeout_t timeout,int ret)1216 void sys_trace_k_poll_api_event_poll_exit(struct k_poll_event *events, int num_events, k_timeout_t timeout, int ret) {
1217 	traceResult xTraceResult;
1218 	TraceEventHandle_t xTraceHandle;
1219 
1220 	if (ret == 0) {
1221 		xTraceResult = xTraceEventBegin(PSF_EVENT_POLL_POLL_SUCCESS,
1222 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
1223 	} else {
1224 		xTraceResult = xTraceEventBegin(PSF_EVENT_POLL_POLL_FAILURE,
1225 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
1226 	}
1227 
1228 	if (xTraceResult == TRC_SUCCESS) {
1229 		xTraceEventAddPointer(xTraceHandle, (void*)events);
1230 		xTraceEventAdd32(xTraceHandle, num_events);
1231 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
1232 		xTraceEventAdd32(xTraceHandle, ret);
1233 		xTraceEventEnd(xTraceHandle);
1234 	}
1235 }
1236 
sys_trace_k_poll_api_signal_init(struct k_poll_signal * signal)1237 void sys_trace_k_poll_api_signal_init(struct k_poll_signal *signal) {
1238 	TraceEventHandle_t xTraceHandle;
1239 
1240 	if (xTraceEventBegin(PSF_EVENT_POLL_SIGNAL_INIT, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
1241 		xTraceEventAddPointer(xTraceHandle, (void*)signal);
1242 		xTraceEventEnd(xTraceHandle);
1243 	}
1244 }
1245 
sys_trace_k_poll_api_signal_reset(struct k_poll_signal * signal)1246 void sys_trace_k_poll_api_signal_reset(struct k_poll_signal *signal) {
1247 	TraceEventHandle_t xTraceHandle;
1248 
1249 	if (xTraceEventBegin(PSF_EVENT_POLL_SIGNAL_RESET, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
1250 		xTraceEventAddPointer(xTraceHandle, (void*)signal);
1251 		xTraceEventEnd(xTraceHandle);
1252 	}
1253 }
1254 
sys_trace_k_poll_api_signal_check(struct k_poll_signal * signal,unsigned int * signaled,int * result)1255 void sys_trace_k_poll_api_signal_check(struct k_poll_signal *signal, unsigned int *signaled, int *result) {
1256 	TraceEventHandle_t xTraceHandle;
1257 
1258 	if (xTraceEventBegin(PSF_EVENT_POLL_SIGNAL_CHECK,
1259 		sizeof(void*) + sizeof(void*) + sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
1260 		xTraceEventAddPointer(xTraceHandle, (void*)signal);
1261 		xTraceEventAddPointer(xTraceHandle, (void*)signaled);
1262 		xTraceEventAddPointer(xTraceHandle, (void*)result);
1263 		xTraceEventEnd(xTraceHandle);
1264 	}
1265 }
1266 
sys_trace_k_poll_api_signal_raise(struct k_poll_signal * signal,int result,int ret)1267 void sys_trace_k_poll_api_signal_raise(struct k_poll_signal *signal, int result, int ret) {
1268 	traceResult xTraceResult;
1269 	TraceEventHandle_t xTraceHandle;
1270 
1271 	if (ret == 0) {
1272 		xTraceResult = xTraceEventBegin(PSF_EVENT_POLL_SIGNAL_RAISE_SUCCESS,
1273 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
1274 	} else {
1275 		xTraceResult = xTraceEventBegin(PSF_EVENT_POLL_SIGNAL_RAISE_FAILURE,
1276 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
1277 	}
1278 
1279 	if (xTraceResult == TRC_SUCCESS) {
1280 		xTraceEventAddPointer(xTraceHandle, (void*)signal);
1281 		xTraceEventAdd32(xTraceHandle, result);
1282 		xTraceEventAdd32(xTraceHandle, ret);
1283 		xTraceEventEnd(xTraceHandle);
1284 	}
1285 }
1286 
1287 
1288 /* Semaphore trace function definitions */
sys_trace_k_sem_init(struct k_sem * sem,uint32_t initial_count,uint32_t limit,int ret)1289 void sys_trace_k_sem_init(struct k_sem *sem, uint32_t initial_count, uint32_t limit, int ret) {
1290 	traceResult xTraceResult;
1291 	TraceEventHandle_t xTraceHandle;
1292 
1293 	if (ret == 0) {
1294 		xTraceResult = xTraceEventBegin(PSF_EVENT_SEMAPHORE_CREATE_SUCCESS,
1295 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t),
1296 			&xTraceHandle);
1297 	} else {
1298 		xTraceResult = xTraceEventBegin(PSF_EVENT_SEMAPHORE_CREATE_TIMEOUT,
1299 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t),
1300 			&xTraceHandle);
1301 	}
1302 
1303 	if (xTraceResult == TRC_SUCCESS) {
1304 		xTraceEventAddPointer(xTraceHandle, (void*)sem);
1305 		xTraceEventAdd32(xTraceHandle, initial_count);
1306 		xTraceEventAdd32(xTraceHandle, limit);
1307 		xTraceEventAdd32(xTraceHandle, sem->count);
1308 		xTraceEventAdd32(xTraceHandle, ret);
1309 		xTraceEventEnd(xTraceHandle);
1310 	}
1311 }
1312 
sys_trace_k_sem_give_enter(struct k_sem * sem)1313 void sys_trace_k_sem_give_enter(struct k_sem *sem) {
1314 	TraceEventHandle_t xTraceHandle;
1315 
1316 	if (xTraceEventBegin(PSF_EVENT_SEMAPHORE_GIVE_SUCCESS, sizeof(void*) + sizeof(uint32_t),
1317 		&xTraceHandle) == TRC_SUCCESS) {
1318 		xTraceEventAddPointer(xTraceHandle, (void*)sem);
1319 		xTraceEventAdd32(xTraceHandle, sem->count);
1320 		xTraceEventEnd(xTraceHandle);
1321 	}
1322 }
1323 
sys_trace_k_sem_take_enter(struct k_sem * sem,k_timeout_t timeout)1324 void sys_trace_k_sem_take_enter(struct k_sem *sem, k_timeout_t timeout) {
1325 
1326 }
1327 
sys_trace_k_sem_take_blocking(struct k_sem * sem,k_timeout_t timeout)1328 void sys_trace_k_sem_take_blocking(struct k_sem *sem, k_timeout_t timeout) {
1329 	TraceEventHandle_t xTraceHandle;
1330 
1331 	if (xTraceEventBegin(PSF_EVENT_SEMAPHORE_TAKE_BLOCKING,
1332 		sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
1333 		xTraceEventAddPointer(xTraceHandle, (void*)sem);
1334 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
1335 		xTraceEventAdd32(xTraceHandle, sem->count);
1336 		xTraceEventEnd(xTraceHandle);
1337 	}
1338 }
1339 
sys_trace_k_sem_take_exit(struct k_sem * sem,k_timeout_t timeout,int ret)1340 void sys_trace_k_sem_take_exit(struct k_sem *sem, k_timeout_t timeout, int ret) {
1341 	traceResult xTraceResult;
1342 	TraceEventHandle_t xTraceHandle;
1343 
1344 	if (ret == 0) {
1345 		xTraceResult = xTraceEventBegin(PSF_EVENT_SEMAPHORE_TAKE_SUCCESS,
1346 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
1347 	} else {
1348 		xTraceResult = xTraceEventBegin(PSF_EVENT_SEMAPHORE_TAKE_FAILED,
1349 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
1350 	}
1351 
1352 	if (xTraceResult == TRC_SUCCESS) {
1353 		xTraceEventAddPointer(xTraceHandle, (void*)sem);
1354 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
1355 		xTraceEventAdd32(xTraceHandle, sem->count);
1356 		xTraceEventAdd32(xTraceHandle, ret);
1357 		xTraceEventEnd(xTraceHandle);
1358 	}
1359 }
1360 
sys_trace_k_sem_reset(struct k_sem * sem)1361 void sys_trace_k_sem_reset(struct k_sem *sem) {
1362 	TraceEventHandle_t xTraceHandle;
1363 
1364 	if (xTraceEventBegin(PSF_EVENT_SEMAPHORE_RESET, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
1365 		xTraceEventAddPointer(xTraceHandle, (void*)sem);
1366 		xTraceEventEnd(xTraceHandle);
1367 	}
1368 }
1369 
1370 
1371 /* Mutex trace function definitions */
sys_trace_k_mutex_init(struct k_mutex * mutex,int ret)1372 void sys_trace_k_mutex_init(struct k_mutex *mutex, int ret) {
1373 	TraceEventHandle_t xTraceHandle;
1374 
1375 	if (xTraceEventBegin(PSF_EVENT_MUTEX_CREATE, sizeof(void*) + sizeof(uint32_t),
1376 		&xTraceHandle) == TRC_SUCCESS) {
1377 		xTraceEventAddPointer(xTraceHandle, (void*)mutex);
1378 		xTraceEventAdd32(xTraceHandle, ret);
1379 		xTraceEventEnd(xTraceHandle);
1380 	}
1381 }
1382 
sys_trace_k_mutex_lock_enter(struct k_mutex * mutex,k_timeout_t timeout)1383 void sys_trace_k_mutex_lock_enter(struct k_mutex *mutex, k_timeout_t timeout) {
1384 }
1385 
sys_trace_k_mutex_lock_blocking(struct k_mutex * mutex,k_timeout_t timeout)1386 void sys_trace_k_mutex_lock_blocking(struct k_mutex *mutex, k_timeout_t timeout) {
1387 	TraceEventHandle_t xTraceHandle;
1388 
1389 	if (xTraceEventBegin(PSF_EVENT_MUTEX_TAKE_BLOCKING, sizeof(void*) + sizeof(uint32_t),
1390 		&xTraceHandle) == TRC_SUCCESS) {
1391 		xTraceEventAddPointer(xTraceHandle, (void*)mutex);
1392 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
1393 		xTraceEventEnd(xTraceHandle);
1394 	}
1395 }
1396 
sys_trace_k_mutex_lock_exit(struct k_mutex * mutex,k_timeout_t timeout,int ret)1397 void sys_trace_k_mutex_lock_exit(struct k_mutex *mutex, k_timeout_t timeout, int ret) {
1398 	traceResult xTraceResult;
1399 	TraceEventHandle_t xTraceHandle;
1400 
1401 	if (ret == 0) {
1402 		xTraceResult = xTraceEventBegin(PSF_EVENT_MUTEX_TAKE_SUCCESS,
1403 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
1404 	} else {
1405 		xTraceResult = xTraceEventBegin(PSF_EVENT_MUTEX_TAKE_FAILED,
1406 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
1407 	}
1408 
1409 	if (xTraceResult == TRC_SUCCESS) {
1410 		xTraceEventAddPointer(xTraceHandle, (void*)mutex);
1411 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
1412 		xTraceEventAdd32(xTraceHandle, ret);
1413 		xTraceEventEnd(xTraceHandle);
1414 	}
1415 }
1416 
sys_trace_k_mutex_unlock_enter(struct k_mutex * mutex)1417 void sys_trace_k_mutex_unlock_enter(struct k_mutex *mutex) {
1418 }
1419 
sys_trace_k_mutex_unlock_exit(struct k_mutex * mutex,int ret)1420 void sys_trace_k_mutex_unlock_exit(struct k_mutex *mutex, int ret) {
1421 	traceResult xTraceResult;
1422 	TraceEventHandle_t xTraceHandle;
1423 
1424 	if (ret == 0) {
1425 		xTraceResult = xTraceEventBegin(PSF_EVENT_MUTEX_GIVE_SUCCESS,
1426 			sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
1427 	} else {
1428 		xTraceResult = xTraceEventBegin(PSF_EVENT_MUTEX_GIVE_FAILED,
1429 			sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
1430 	}
1431 
1432 	if (xTraceResult == TRC_SUCCESS) {
1433 		xTraceEventAddPointer(xTraceHandle, (void*)mutex);
1434 		xTraceEventAdd32(xTraceHandle, ret);
1435 		xTraceEventEnd(xTraceHandle);
1436 	}
1437 }
1438 
1439 
1440 /* Conditional variable trace function definitions */
sys_trace_k_condvar_init(struct k_condvar * condvar,int ret)1441 void sys_trace_k_condvar_init(struct k_condvar *condvar, int ret) {
1442 	TraceEventHandle_t xTraceHandle;
1443 
1444 	if (xTraceEventBegin(PSF_EVENT_CONDVAR_INIT,
1445 		sizeof(void*) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
1446 		xTraceEventAddPointer(xTraceHandle, (void*)condvar);
1447 		xTraceEventAdd32(xTraceHandle, ret);
1448 		xTraceEventEnd(xTraceHandle);
1449 	}
1450 }
1451 
sys_trace_k_condvar_signal_enter(struct k_condvar * condvar)1452 void sys_trace_k_condvar_signal_enter(struct k_condvar *condvar) {
1453 
1454 }
1455 
sys_trace_k_condvar_signal_blocking(struct k_condvar * condvar)1456 void sys_trace_k_condvar_signal_blocking(struct k_condvar *condvar) {
1457 	TraceEventHandle_t xTraceHandle;
1458 
1459 	if (xTraceEventBegin(PSF_EVENT_CONDVAR_SIGNAL_BLOCKING, sizeof(void*),
1460 		&xTraceHandle) == TRC_SUCCESS) {
1461 		xTraceEventAddPointer(xTraceHandle, (void*)condvar);
1462 		xTraceEventEnd(xTraceHandle);
1463 	}
1464 }
1465 
sys_trace_k_condvar_signal_exit(struct k_condvar * condvar,int ret)1466 void sys_trace_k_condvar_signal_exit(struct k_condvar *condvar, int ret) {
1467 	TraceEventHandle_t xTraceHandle;
1468 
1469 	if (xTraceEventBegin(PSF_EVENT_CONDVAR_SIGNAL_SUCCESS, sizeof(void*) + sizeof(uint32_t),
1470 		&xTraceHandle) == TRC_SUCCESS) {
1471 		xTraceEventAddPointer(xTraceHandle, (void*)condvar);
1472 		xTraceEventAdd32(xTraceHandle, ret);
1473 		xTraceEventEnd(xTraceHandle);
1474 	}
1475 }
1476 
sys_trace_k_condvar_broadcast_enter(struct k_condvar * condvar)1477 void sys_trace_k_condvar_broadcast_enter(struct k_condvar *condvar) {
1478 	TraceEventHandle_t xTraceHandle;
1479 
1480 	if (xTraceEventBegin(PSF_EVENT_CONDVAR_BROADCAST_ENTER, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
1481 		xTraceEventAddPointer(xTraceHandle, (void*)condvar);
1482 		xTraceEventEnd(xTraceHandle);
1483 	}
1484 }
1485 
sys_trace_k_condvar_broadcast_exit(struct k_condvar * condvar,int ret)1486 void sys_trace_k_condvar_broadcast_exit(struct k_condvar *condvar, int ret) {
1487 	TraceEventHandle_t xTraceHandle;
1488 
1489 	if (xTraceEventBegin(PSF_EVENT_CONDVAR_BROADCAST_EXIT, sizeof(void*) + sizeof(uint32_t),
1490 		&xTraceHandle) == TRC_SUCCESS) {
1491 		xTraceEventAddPointer(xTraceHandle, (void*)condvar);
1492 		xTraceEventAdd32(xTraceHandle, ret);
1493 		xTraceEventEnd(xTraceHandle);
1494 	}
1495 }
1496 
sys_trace_k_condvar_wait_enter(struct k_condvar * condvar,struct k_mutex * mutex,k_timeout_t timeout)1497 void sys_trace_k_condvar_wait_enter(struct k_condvar *condvar, struct k_mutex *mutex, k_timeout_t timeout) {
1498 	TraceEventHandle_t xTraceHandle;
1499 
1500 	if (xTraceEventBegin(PSF_EVENT_CONDVAR_WAIT_BLOCKING,
1501 		sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
1502 		xTraceEventAddPointer(xTraceHandle, (void*)condvar);
1503 		xTraceEventAddPointer(xTraceHandle, (void*)mutex);
1504 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
1505 		xTraceEventEnd(xTraceHandle);
1506 	}
1507 }
1508 
sys_trace_k_condvar_wait_exit(struct k_condvar * condvar,struct k_mutex * mutex,k_timeout_t timeout,int ret)1509 void sys_trace_k_condvar_wait_exit(struct k_condvar *condvar, struct k_mutex *mutex, k_timeout_t timeout, int ret) {
1510 	traceResult xTraceResult;
1511 	TraceEventHandle_t xTraceHandle;
1512 
1513 	if (ret == 0) {
1514 		xTraceResult = xTraceEventBegin(PSF_EVENT_CONDVAR_WAIT_SUCCESS,
1515 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t),
1516 			&xTraceHandle);
1517 	} else {
1518 		xTraceResult = xTraceEventBegin(PSF_EVENT_CONDVAR_WAIT_FAILURE,
1519 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t),
1520 			&xTraceHandle);
1521 	}
1522 
1523 	if (xTraceResult == TRC_SUCCESS) {
1524 		xTraceEventAddPointer(xTraceHandle, (void*)condvar);
1525 		xTraceEventAddPointer(xTraceHandle, (void*)mutex);
1526 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
1527 		xTraceEventAdd32(xTraceHandle, ret);
1528 		xTraceEventEnd(xTraceHandle);
1529 	}
1530 }
1531 
1532 
1533 /* Queue trace function definitions */
sys_trace_k_queue_init(struct k_queue * queue)1534 void sys_trace_k_queue_init(struct k_queue *queue) {
1535 	TraceEventHandle_t xTraceHandle;
1536 
1537 	if (xTraceEventBegin(PSF_EVENT_QUEUE_INIT, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
1538 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1539 		xTraceEventEnd(xTraceHandle);
1540 	}
1541 }
1542 
sys_trace_k_queue_cancel_wait(struct k_queue * queue)1543 void sys_trace_k_queue_cancel_wait(struct k_queue *queue) {
1544 	TraceEventHandle_t xTraceHandle;
1545 
1546 	if (xTraceEventBegin(PSF_EVENT_QUEUE_CANCEL_WAIT, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
1547 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1548 		xTraceEventEnd(xTraceHandle);
1549 	}
1550 }
1551 
sys_trace_k_queue_queue_insert_enter(struct k_queue * queue,bool alloc,void * data)1552 void sys_trace_k_queue_queue_insert_enter(struct k_queue *queue, bool alloc, void *data) {
1553 	// Ignore non alloc tracing of this event
1554 	if (!alloc) {
1555 		return;
1556 	}
1557 }
1558 
sys_trace_k_queue_queue_insert_blocking(struct k_queue * queue,bool alloc,void * data)1559 void sys_trace_k_queue_queue_insert_blocking(struct k_queue *queue, bool alloc, void *data) {
1560 	// Ignore non alloc tracing of this event
1561 	if (!alloc) {
1562 		return;
1563 	}
1564 
1565 	TraceEventHandle_t xTraceHandle;
1566 
1567 	if (xTraceEventBegin(PSF_EVENT_QUEUE_QUEUE_INSERT_BLOCKING, sizeof(void*) + sizeof(void*),
1568 		&xTraceHandle) == TRC_SUCCESS) {
1569 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1570 		xTraceEventAddPointer(xTraceHandle, (void*)data);
1571 		xTraceEventEnd(xTraceHandle);
1572 	}
1573 }
1574 
sys_trace_k_queue_queue_insert_exit(struct k_queue * queue,bool alloc,void * data,int ret)1575 void sys_trace_k_queue_queue_insert_exit(struct k_queue *queue, bool alloc, void *data, int ret) {
1576 	// Ignore non alloc tracing of this event
1577 	if (!alloc) {
1578 		return;
1579 	}
1580 
1581 	traceResult xTraceResult;
1582 	TraceEventHandle_t xTraceHandle;
1583 
1584 	if (ret == 0) {
1585 		xTraceResult = xTraceEventBegin(PSF_EVENT_QUEUE_QUEUE_INSERT_SUCCESS,
1586 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
1587 	} else {
1588 		xTraceResult = xTraceEventBegin(PSF_EVENT_QUEUE_QUEUE_INSERT_FAILURE,
1589 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
1590 	}
1591 
1592 	if (xTraceResult == TRC_SUCCESS) {
1593 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1594 		xTraceEventAddPointer(xTraceHandle, (void*)data);
1595 		xTraceEventAdd32(xTraceHandle, ret);
1596 		xTraceEventEnd(xTraceHandle);
1597 	}
1598 }
1599 
sys_trace_k_queue_append_enter(struct k_queue * queue,void * data)1600 void sys_trace_k_queue_append_enter(struct k_queue *queue, void *data) {
1601 	TraceEventHandle_t xTraceHandle;
1602 
1603 	if (xTraceEventBegin(PSF_EVENT_QUEUE_APPEND, sizeof(void*) + sizeof(void*),
1604 		&xTraceHandle) == TRC_SUCCESS) {
1605 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1606 		xTraceEventAddPointer(xTraceHandle, (void*)data);
1607 		xTraceEventEnd(xTraceHandle);
1608 	}
1609 }
1610 
sys_trace_k_queue_append_exit(struct k_queue * queue,void * data)1611 void sys_trace_k_queue_append_exit(struct k_queue *queue, void *data) {
1612 }
1613 
sys_trace_k_queue_alloc_append_enter(struct k_queue * queue,void * data)1614 void sys_trace_k_queue_alloc_append_enter(struct k_queue *queue, void *data) {
1615 	TraceEventHandle_t xTraceHandle;
1616 
1617 	if (xTraceEventBegin(PSF_EVENT_QUEUE_ALLOC_APPEND_BLOCKING, sizeof(void*) + sizeof(void*),
1618 		&xTraceHandle) == TRC_SUCCESS) {
1619 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1620 		xTraceEventAddPointer(xTraceHandle, (void*)data);
1621 		xTraceEventEnd(xTraceHandle);
1622 	}
1623 }
1624 
sys_trace_k_queue_alloc_append_exit(struct k_queue * queue,void * data,int ret)1625 void sys_trace_k_queue_alloc_append_exit(struct k_queue *queue, void *data, int ret) {
1626 	traceResult xTraceResult;
1627 	TraceEventHandle_t xTraceHandle;
1628 
1629 	if (ret == 0) {
1630 		xTraceResult = xTraceEventBegin(PSF_EVENT_QUEUE_ALLOC_APPEND_SUCCESS,
1631 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
1632 	} else {
1633 		xTraceResult = xTraceEventBegin(PSF_EVENT_QUEUE_ALLOC_APPEND_FAILURE,
1634 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
1635 	}
1636 
1637 	if (xTraceResult == TRC_SUCCESS) {
1638 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1639 		xTraceEventAddPointer(xTraceHandle, (void*)data);
1640 		xTraceEventAdd32(xTraceHandle, ret);
1641 		xTraceEventEnd(xTraceHandle);
1642 	}
1643 }
1644 
sys_trace_k_queue_prepend_enter(struct k_queue * queue,void * data)1645 void sys_trace_k_queue_prepend_enter(struct k_queue *queue, void *data) {
1646 	TraceEventHandle_t xTraceHandle;
1647 
1648 	if (xTraceEventBegin(PSF_EVENT_QUEUE_PREPEND, sizeof(void*) + sizeof(void*),
1649 		&xTraceHandle) == TRC_SUCCESS) {
1650 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1651 		xTraceEventAddPointer(xTraceHandle, (void*)data);
1652 		xTraceEventEnd(xTraceHandle);
1653 	}
1654 }
1655 
sys_trace_k_queue_prepend_exit(struct k_queue * queue,void * data)1656 void sys_trace_k_queue_prepend_exit(struct k_queue *queue, void *data) {
1657 }
1658 
sys_trace_k_queue_alloc_prepend_enter(struct k_queue * queue,void * data)1659 void sys_trace_k_queue_alloc_prepend_enter(struct k_queue *queue, void *data) {
1660 	TraceEventHandle_t xTraceHandle;
1661 
1662 	if (xTraceEventBegin(PSF_EVENT_QUEUE_ALLOC_PREPEND_BLOCKING, sizeof(void*) + sizeof(void*),
1663 		&xTraceHandle) == TRC_SUCCESS) {
1664 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1665 		xTraceEventAddPointer(xTraceHandle, (void*)data);
1666 		xTraceEventEnd(xTraceHandle);
1667 	}
1668 }
1669 
sys_trace_k_queue_alloc_prepend_exit(struct k_queue * queue,void * data,int ret)1670 void sys_trace_k_queue_alloc_prepend_exit(struct k_queue *queue, void *data, int ret) {
1671 	traceResult xTraceResult;
1672 	TraceEventHandle_t xTraceHandle;
1673 
1674 	if (ret == 0) {
1675 		xTraceResult = xTraceEventBegin(PSF_EVENT_QUEUE_ALLOC_PREPEND_SUCCESS,
1676 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
1677 	} else {
1678 		xTraceResult = xTraceEventBegin(PSF_EVENT_QUEUE_ALLOC_PREPEND_FAILURE,
1679 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
1680 	}
1681 
1682 	if (xTraceResult == TRC_SUCCESS) {
1683 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1684 		xTraceEventAddPointer(xTraceHandle, (void*)data);
1685 		xTraceEventAdd32(xTraceHandle, ret);
1686 		xTraceEventEnd(xTraceHandle);
1687 	}
1688 }
1689 
sys_trace_k_queue_insert_enter(struct k_queue * queue,void * prev,void * data)1690 void sys_trace_k_queue_insert_enter(struct k_queue *queue, void *prev, void *data) {
1691 	TraceEventHandle_t xTraceHandle;
1692 
1693 	if (xTraceEventBegin(PSF_EVENT_QUEUE_INSERT, sizeof(void*) + sizeof(void*) + sizeof(void*),
1694 		&xTraceHandle) == TRC_SUCCESS) {
1695 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1696 		xTraceEventAddPointer(xTraceHandle, (void*)prev);
1697 		xTraceEventAddPointer(xTraceHandle, (void*)data);
1698 		xTraceEventEnd(xTraceHandle);
1699 	}
1700 }
1701 
sys_trace_k_queue_insert_exit(struct k_queue * queue,void * prev,void * data)1702 void sys_trace_k_queue_insert_exit(struct k_queue *queue, void *prev, void *data) {
1703 }
1704 
sys_trace_k_queue_append_list_enter(struct k_queue * queue,void * head,void * tail)1705 void sys_trace_k_queue_append_list_enter(struct k_queue *queue, void *head, void *tail) {
1706 	TraceEventHandle_t xTraceHandle;
1707 
1708 	if (xTraceEventBegin(PSF_EVENT_QUEUE_APPEND_LIST_BLOCKING,
1709 		sizeof(void*) + sizeof(void*) + sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
1710 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1711 		xTraceEventAddPointer(xTraceHandle, (void*)head);
1712 		xTraceEventAddPointer(xTraceHandle, (void*)tail);
1713 		xTraceEventEnd(xTraceHandle);
1714 	}
1715 }
1716 
sys_trace_k_queue_append_list_exit(struct k_queue * queue,int ret)1717 void sys_trace_k_queue_append_list_exit(struct k_queue *queue, int ret) {
1718 	traceResult xTraceResult;
1719 	TraceEventHandle_t xTraceHandle;
1720 
1721 	if (ret == 0) {
1722 		xTraceResult = xTraceEventBegin(PSF_EVENT_QUEUE_APPEND_LIST_SUCCESS,
1723 			sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
1724 	} else {
1725 		xTraceResult = xTraceEventBegin(PSF_EVENT_QUEUE_APPEND_LIST_FAILURE,
1726 			sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
1727 	}
1728 
1729 	if (xTraceResult == TRC_SUCCESS) {
1730 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1731 		xTraceEventAdd32(xTraceHandle, ret);
1732 		xTraceEventEnd(xTraceHandle);
1733 	}
1734 }
1735 
sys_trace_k_queue_merge_slist_enter(struct k_queue * queue,sys_slist_t * list)1736 void sys_trace_k_queue_merge_slist_enter(struct k_queue *queue, sys_slist_t *list) {
1737 	TraceEventHandle_t xTraceHandle;
1738 
1739 	if (xTraceEventBegin(PSF_EVENT_QUEUE_MERGE_SLIST_BLOCKING, sizeof(void*) + sizeof(void*),
1740 		&xTraceHandle) == TRC_SUCCESS) {
1741 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1742 		xTraceEventAddPointer(xTraceHandle, (void*)list);
1743 		xTraceEventEnd(xTraceHandle);
1744 	}
1745 }
1746 
sys_trace_k_queue_merge_slist_exit(struct k_queue * queue,sys_slist_t * list,int ret)1747 void sys_trace_k_queue_merge_slist_exit(struct k_queue *queue, sys_slist_t *list, int ret) {
1748 	traceResult xTraceResult;
1749 	TraceEventHandle_t xTraceHandle;
1750 
1751 	if (ret == 0) {
1752 		xTraceResult = xTraceEventBegin(PSF_EVENT_QUEUE_MERGE_SLIST_SUCCESS,
1753 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
1754 	} else {
1755 		xTraceResult = xTraceEventBegin(PSF_EVENT_QUEUE_MERGE_SLIST_FAILURE,
1756 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
1757 	}
1758 
1759 	if (xTraceResult == TRC_SUCCESS) {
1760 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1761 		xTraceEventAddPointer(xTraceHandle, (void*)list);
1762 		xTraceEventAdd32(xTraceHandle, ret);
1763 		xTraceEventEnd(xTraceHandle);
1764 	}
1765 }
1766 
sys_trace_k_queue_get_blocking(struct k_queue * queue,k_timeout_t timeout)1767 void sys_trace_k_queue_get_blocking(struct k_queue *queue, k_timeout_t timeout) {
1768 	TraceEventHandle_t xTraceHandle;
1769 
1770 	if (xTraceEventBegin(PSF_EVENT_QUEUE_GET_BLOCKING, sizeof(void*) + sizeof(uint32_t),
1771 		&xTraceHandle) == TRC_SUCCESS) {
1772 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1773 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
1774 		xTraceEventEnd(xTraceHandle);
1775 	}
1776 }
1777 
sys_trace_k_queue_get_exit(struct k_queue * queue,k_timeout_t timeout,void * ret)1778 void sys_trace_k_queue_get_exit(struct k_queue *queue, k_timeout_t timeout, void *ret) {
1779 	traceResult xTraceResult;
1780 	TraceEventHandle_t xTraceHandle;
1781 
1782 	if (ret != NULL) {
1783 		xTraceResult = xTraceEventBegin(PSF_EVENT_QUEUE_GET_SUCCESS,
1784 		sizeof(void*) + sizeof(uint32_t) + sizeof(void*), &xTraceHandle);
1785 	} else {
1786 		xTraceResult = xTraceEventBegin(PSF_EVENT_QUEUE_GET_TIMEOUT,
1787 			sizeof(void*) + sizeof(uint32_t) + sizeof(void*), &xTraceHandle);
1788 	}
1789 
1790 	if (xTraceResult == TRC_SUCCESS) {
1791 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1792 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
1793 		xTraceEventAddPointer(xTraceHandle, (void*)ret);
1794 		xTraceEventEnd(xTraceHandle);
1795 	}
1796 }
1797 
sys_trace_k_queue_remove_enter(struct k_queue * queue,void * data)1798 void sys_trace_k_queue_remove_enter(struct k_queue *queue, void *data) {
1799 	TraceEventHandle_t xTraceHandle;
1800 
1801 	if (xTraceEventBegin(PSF_EVENT_QUEUE_REMOVE_BLOCKING, sizeof(void*) + sizeof(void*),
1802 		&xTraceHandle) == TRC_SUCCESS) {
1803 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1804 		xTraceEventAddPointer(xTraceHandle, (void*)data);
1805 		xTraceEventEnd(xTraceHandle);
1806 	}
1807 }
1808 
sys_trace_k_queue_remove_exit(struct k_queue * queue,void * data,bool ret)1809 void sys_trace_k_queue_remove_exit(struct k_queue *queue, void *data, bool ret) {
1810 	traceResult xTraceResult;
1811 	TraceEventHandle_t xTraceHandle;
1812 
1813 	if (ret) {
1814 		xTraceResult = xTraceEventBegin(PSF_EVENT_QUEUE_REMOVE_SUCCESS,
1815 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
1816 	} else {
1817 		xTraceResult = xTraceEventBegin(PSF_EVENT_QUEUE_REMOVE_FAILURE,
1818 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
1819 	}
1820 
1821 	if (xTraceResult == TRC_SUCCESS) {
1822 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1823 		xTraceEventAddPointer(xTraceHandle, (void*)data);
1824 		xTraceEventAdd32(xTraceHandle, ret);
1825 		xTraceEventEnd(xTraceHandle);
1826 	}
1827 }
1828 
sys_trace_k_queue_unique_append_enter(struct k_queue * queue,void * data)1829 void sys_trace_k_queue_unique_append_enter(struct k_queue *queue, void *data) {
1830 	TraceEventHandle_t xTraceHandle;
1831 
1832 	if (xTraceEventBegin(PSF_EVENT_QUEUE_UNIQUE_APPEND_BLOCKING, sizeof(void*) + sizeof(void*),
1833 		&xTraceHandle) == TRC_SUCCESS) {
1834 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1835 		xTraceEventAddPointer(xTraceHandle, (void*)data);
1836 		xTraceEventEnd(xTraceHandle);
1837 	}
1838 }
1839 
sys_trace_k_queue_unique_append_exit(struct k_queue * queue,void * data,bool ret)1840 void sys_trace_k_queue_unique_append_exit(struct k_queue *queue, void *data, bool ret) {
1841 	traceResult xTraceResult;
1842 	TraceEventHandle_t xTraceHandle;
1843 
1844 	if (ret) {
1845 		xTraceResult = xTraceEventBegin(PSF_EVENT_QUEUE_UNIQUE_APPEND_SUCCESS,
1846 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
1847 	} else {
1848 		xTraceResult = xTraceEventBegin(PSF_EVENT_QUEUE_UNIQUE_APPEND_FAILURE,
1849 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
1850 	}
1851 
1852 	if (xTraceResult == TRC_SUCCESS) {
1853 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1854 		xTraceEventAddPointer(xTraceHandle, (void*)data);
1855 		xTraceEventAdd32(xTraceHandle, ret);
1856 		xTraceEventEnd(xTraceHandle);
1857 	}
1858 }
1859 
sys_trace_k_queue_peek_head(struct k_queue * queue,void * ret)1860 void sys_trace_k_queue_peek_head(struct k_queue *queue, void *ret) {
1861 	TraceEventHandle_t xTraceHandle;
1862 
1863 	if (xTraceEventBegin(PSF_EVENT_QUEUE_PEEK_HEAD, sizeof(void*) + sizeof(void*),
1864 		&xTraceHandle) == TRC_SUCCESS) {
1865 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1866 		xTraceEventAddPointer(xTraceHandle, (void*)ret);
1867 		xTraceEventEnd(xTraceHandle);
1868 	}
1869 }
1870 
sys_trace_k_queue_peek_tail(struct k_queue * queue,void * ret)1871 void sys_trace_k_queue_peek_tail(struct k_queue *queue, void *ret) {
1872 	TraceEventHandle_t xTraceHandle;
1873 
1874 	if (xTraceEventBegin(PSF_EVENT_QUEUE_PEEK_TAIL, sizeof(void*) + sizeof(void*),
1875 		&xTraceHandle) == TRC_SUCCESS) {
1876 		xTraceEventAddPointer(xTraceHandle, (void*)queue);
1877 		xTraceEventAddPointer(xTraceHandle, (void*)ret);
1878 		xTraceEventEnd(xTraceHandle);
1879 	}
1880 }
1881 
1882 
1883 /* FIFO trace function definitions */
sys_trace_k_fifo_init_enter(struct k_fifo * fifo)1884 void sys_trace_k_fifo_init_enter(struct k_fifo *fifo) {
1885 	TraceEventHandle_t xTraceHandle;
1886 
1887 	if (xTraceEventBegin(PSF_EVENT_FIFO_INIT_ENTER, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
1888 		xTraceEventAddPointer(xTraceHandle, (void*)fifo);
1889 		xTraceEventEnd(xTraceHandle);
1890 	}
1891 }
1892 
sys_trace_k_fifo_init_exit(struct k_fifo * fifo)1893 void sys_trace_k_fifo_init_exit(struct k_fifo *fifo) {
1894 	TraceEventHandle_t xTraceHandle;
1895 
1896 	if (xTraceEventBegin(PSF_EVENT_FIFO_INIT_EXIT, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
1897 		xTraceEventAddPointer(xTraceHandle, (void*)fifo);
1898 		xTraceEventEnd(xTraceHandle);
1899 	}
1900 }
1901 
sys_trace_k_fifo_cancel_wait_enter(struct k_fifo * fifo)1902 void sys_trace_k_fifo_cancel_wait_enter(struct k_fifo *fifo) {
1903 	TraceEventHandle_t xTraceHandle;
1904 
1905 	if (xTraceEventBegin(PSF_EVENT_FIFO_CANCEL_WAIT_ENTER, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
1906 		xTraceEventAddPointer(xTraceHandle, (void*)fifo);
1907 		xTraceEventEnd(xTraceHandle);
1908 	}
1909 }
1910 
sys_trace_k_fifo_cancel_wait_exit(struct k_fifo * fifo)1911 void sys_trace_k_fifo_cancel_wait_exit(struct k_fifo *fifo) {
1912 	TraceEventHandle_t xTraceHandle;
1913 
1914 	if (xTraceEventBegin(PSF_EVENT_FIFO_CANCEL_WAIT_EXIT, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
1915 		xTraceEventAddPointer(xTraceHandle, (void*)fifo);
1916 		xTraceEventEnd(xTraceHandle);
1917 	}
1918 }
1919 
sys_trace_k_fifo_put_enter(struct k_fifo * fifo,void * data)1920 void sys_trace_k_fifo_put_enter(struct k_fifo *fifo, void *data) {
1921 	TraceEventHandle_t xTraceHandle;
1922 
1923 	if (xTraceEventBegin(PSF_EVENT_FIFO_PUT_ENTER, sizeof(void*) + sizeof(void*),
1924 		&xTraceHandle) == TRC_SUCCESS) {
1925 		xTraceEventAddPointer(xTraceHandle, (void*)fifo);
1926 		xTraceEventAddPointer(xTraceHandle, (void*)data);
1927 		xTraceEventEnd(xTraceHandle);
1928 	}
1929 }
1930 
sys_trace_k_fifo_put_exit(struct k_fifo * fifo,void * data)1931 void sys_trace_k_fifo_put_exit(struct k_fifo *fifo, void *data) {
1932 	TraceEventHandle_t xTraceHandle;
1933 
1934 	if (xTraceEventBegin(PSF_EVENT_FIFO_PUT_EXIT, sizeof(void*) + sizeof(void*),
1935 		&xTraceHandle) == TRC_SUCCESS) {
1936 		xTraceEventAddPointer(xTraceHandle, (void*)fifo);
1937 		xTraceEventAddPointer(xTraceHandle, (void*)data);
1938 		xTraceEventEnd(xTraceHandle);
1939 	}
1940 }
1941 
sys_trace_k_fifo_alloc_put_enter(struct k_fifo * fifo,void * data)1942 void sys_trace_k_fifo_alloc_put_enter(struct k_fifo *fifo, void *data) {
1943 	TraceEventHandle_t xTraceHandle;
1944 
1945 	if (xTraceEventBegin(PSF_EVENT_FIFO_ALLOC_PUT_BLOCKING, sizeof(void*) + sizeof(void*),
1946 		&xTraceHandle) == TRC_SUCCESS) {
1947 		xTraceEventAddPointer(xTraceHandle, (void*)fifo);
1948 		xTraceEventAddPointer(xTraceHandle, (void*)data);
1949 		xTraceEventEnd(xTraceHandle);
1950 	}
1951 }
1952 
sys_trace_k_fifo_alloc_put_exit(struct k_fifo * fifo,void * data,int ret)1953 void sys_trace_k_fifo_alloc_put_exit(struct k_fifo *fifo, void *data, int ret) {
1954 	traceResult xTraceResult;
1955 	TraceEventHandle_t xTraceHandle;
1956 
1957 	if (ret == 0) {
1958 		xTraceResult = xTraceEventBegin(PSF_EVENT_FIFO_ALLOC_PUT_SUCCESS,
1959 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
1960 	} else {
1961 		xTraceResult = xTraceEventBegin(PSF_EVENT_FIFO_ALLOC_PUT_FAILURE,
1962 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
1963 	}
1964 
1965 	if (xTraceResult == TRC_SUCCESS) {
1966 		xTraceEventAddPointer(xTraceHandle, (void*)fifo);
1967 		xTraceEventAddPointer(xTraceHandle, (void*)data);
1968 		xTraceEventAdd32(xTraceHandle, ret);
1969 		xTraceEventEnd(xTraceHandle);
1970 	}
1971 }
1972 
sys_trace_k_fifo_put_list_enter(struct k_fifo * fifo,void * head,void * tail)1973 void sys_trace_k_fifo_put_list_enter(struct k_fifo *fifo, void *head, void *tail) {
1974 	TraceEventHandle_t xTraceHandle;
1975 
1976 	if (xTraceEventBegin(PSF_EVENT_FIFO_PUT_LIST_ENTER,
1977 		sizeof(void*) + sizeof(void*) + sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
1978 		xTraceEventAddPointer(xTraceHandle, (void*)fifo);
1979 		xTraceEventAddPointer(xTraceHandle, (void*)head);
1980 		xTraceEventAddPointer(xTraceHandle, (void*)tail);
1981 		xTraceEventEnd(xTraceHandle);
1982 	}
1983 }
1984 
sys_trace_k_fifo_put_list_exit(struct k_fifo * fifo,void * head,void * tail)1985 void sys_trace_k_fifo_put_list_exit(struct k_fifo *fifo, void *head, void *tail) {
1986 	TraceEventHandle_t xTraceHandle;
1987 
1988 	if (xTraceEventBegin(PSF_EVENT_FIFO_PUT_LIST_EXIT,
1989 		sizeof(void*) + sizeof(void*) + sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
1990 		xTraceEventAddPointer(xTraceHandle, (void*)fifo);
1991 		xTraceEventAddPointer(xTraceHandle, (void*)head);
1992 		xTraceEventAddPointer(xTraceHandle, (void*)tail);
1993 		xTraceEventEnd(xTraceHandle);
1994 	}
1995 }
1996 
sys_trace_k_fifo_put_slist_enter(struct k_fifo * fifo,sys_slist_t * list)1997 void sys_trace_k_fifo_put_slist_enter(struct k_fifo *fifo, sys_slist_t *list) {
1998 	TraceEventHandle_t xTraceHandle;
1999 
2000 	if (xTraceEventBegin(PSF_EVENT_FIFO_PUT_SLIST_ENTER, sizeof(void*) + sizeof(void*),
2001 		&xTraceHandle) == TRC_SUCCESS) {
2002 		xTraceEventAddPointer(xTraceHandle, (void*)fifo);
2003 		xTraceEventAddPointer(xTraceHandle, (void*)list);
2004 		xTraceEventEnd(xTraceHandle);
2005 	}
2006 }
2007 
sys_trace_k_fifo_put_slist_exit(struct k_fifo * fifo,sys_slist_t * list)2008 void sys_trace_k_fifo_put_slist_exit(struct k_fifo *fifo, sys_slist_t *list) {
2009 	TraceEventHandle_t xTraceHandle;
2010 
2011 	if (xTraceEventBegin(PSF_EVENT_FIFO_PUT_SLIST_EXIT, sizeof(void*) + sizeof(void*),
2012 		&xTraceHandle) == TRC_SUCCESS) {
2013 		xTraceEventAddPointer(xTraceHandle, (void*)fifo);
2014 		xTraceEventAddPointer(xTraceHandle, (void*)list);
2015 		xTraceEventEnd(xTraceHandle);
2016 	}
2017 }
2018 
sys_trace_k_fifo_get_enter(struct k_fifo * fifo,k_timeout_t timeout)2019 void sys_trace_k_fifo_get_enter(struct k_fifo *fifo, k_timeout_t timeout) {
2020 	TraceEventHandle_t xTraceHandle;
2021 
2022 	if (xTraceEventBegin(PSF_EVENT_FIFO_GET_BLOCKING, sizeof(void*) + sizeof(uint32_t),
2023 		&xTraceHandle) == TRC_SUCCESS) {
2024 		xTraceEventAddPointer(xTraceHandle, (void*)fifo);
2025 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2026 		xTraceEventEnd(xTraceHandle);
2027 	}
2028 }
2029 
sys_trace_k_fifo_get_exit(struct k_fifo * fifo,k_timeout_t timeout,void * ret)2030 void sys_trace_k_fifo_get_exit(struct k_fifo *fifo, k_timeout_t timeout, void *ret) {
2031 	traceResult xTraceResult;
2032 	TraceEventHandle_t xTraceHandle;
2033 
2034 	if (ret != NULL) {
2035 		xTraceResult = xTraceEventBegin(PSF_EVENT_FIFO_GET_SUCCESS,
2036 			sizeof(void*) + sizeof(uint32_t) + sizeof(void*), &xTraceHandle);
2037 	} else {
2038 		xTraceResult = xTraceEventBegin(PSF_EVENT_FIFO_GET_FAILURE,
2039 			sizeof(void*) + sizeof(uint32_t) + sizeof(void*), &xTraceHandle);
2040 	}
2041 
2042 	if (xTraceResult == TRC_SUCCESS) {
2043 		xTraceEventAddPointer(xTraceHandle, (void*)fifo);
2044 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2045 		xTraceEventAddPointer(xTraceHandle, (void*)ret);
2046 		xTraceEventEnd(xTraceHandle);
2047 	}
2048 }
2049 
sys_trace_k_fifo_peek_head_enter(struct k_fifo * fifo)2050 void sys_trace_k_fifo_peek_head_enter(struct k_fifo *fifo) {
2051 	TraceEventHandle_t xTraceHandle;
2052 
2053 	if (xTraceEventBegin(PSF_EVENT_FIFO_PEEK_HEAD_ENTER, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
2054 		xTraceEventAddPointer(xTraceHandle, (void*)fifo);
2055 		xTraceEventEnd(xTraceHandle);
2056 	}
2057 }
2058 
sys_trace_k_fifo_peek_head_exit(struct k_fifo * fifo,void * ret)2059 void sys_trace_k_fifo_peek_head_exit(struct k_fifo *fifo, void *ret) {
2060 	TraceEventHandle_t xTraceHandle;
2061 
2062 	if (xTraceEventBegin(PSF_EVENT_FIFO_PEEK_HEAD_EXIT, sizeof(void*) + sizeof(void*),
2063 		&xTraceHandle) == TRC_SUCCESS) {
2064 		xTraceEventAddPointer(xTraceHandle, (void*)fifo);
2065 		xTraceEventAddPointer(xTraceHandle, (void*)ret);
2066 		xTraceEventEnd(xTraceHandle);
2067 	}
2068 }
2069 
sys_trace_k_fifo_peek_tail_enter(struct k_fifo * fifo)2070 void sys_trace_k_fifo_peek_tail_enter(struct k_fifo *fifo) {
2071 	TraceEventHandle_t xTraceHandle;
2072 
2073 	if (xTraceEventBegin(PSF_EVENT_FIFO_PEEK_TAIL_ENTER, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
2074 		xTraceEventAddPointer(xTraceHandle, (void*)fifo);
2075 		xTraceEventEnd(xTraceHandle);
2076 	}
2077 }
2078 
sys_trace_k_fifo_peek_tail_exit(struct k_fifo * fifo,void * ret)2079 void sys_trace_k_fifo_peek_tail_exit(struct k_fifo *fifo, void *ret) {
2080 	TraceEventHandle_t xTraceHandle;
2081 
2082 	if (xTraceEventBegin(PSF_EVENT_FIFO_PEEK_TAIL_EXIT, sizeof(void*) + sizeof(void*),
2083 		&xTraceHandle) == TRC_SUCCESS) {
2084 		xTraceEventAddPointer(xTraceHandle, (void*)fifo);
2085 		xTraceEventAddPointer(xTraceHandle, (void*)ret);
2086 		xTraceEventEnd(xTraceHandle);
2087 	}
2088 }
2089 
2090 
2091 /* LIFO trace function definitions */
sys_trace_k_lifo_init_enter(struct k_lifo * lifo)2092 void sys_trace_k_lifo_init_enter(struct k_lifo *lifo) {
2093 	TraceEventHandle_t xTraceHandle;
2094 
2095 	if (xTraceEventBegin(PSF_EVENT_LIFO_INIT_ENTER, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
2096 		xTraceEventAddPointer(xTraceHandle, (void*)lifo);
2097 		xTraceEventEnd(xTraceHandle);
2098 	}
2099 }
2100 
sys_trace_k_lifo_init_exit(struct k_lifo * lifo)2101 void sys_trace_k_lifo_init_exit(struct k_lifo *lifo) {
2102 	TraceEventHandle_t xTraceHandle;
2103 
2104 	if (xTraceEventBegin(PSF_EVENT_LIFO_INIT_EXIT, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
2105 		xTraceEventAddPointer(xTraceHandle, (void*)lifo);
2106 		xTraceEventEnd(xTraceHandle);
2107 	}
2108 }
2109 
sys_trace_k_lifo_put_enter(struct k_lifo * lifo,void * data)2110 void sys_trace_k_lifo_put_enter(struct k_lifo *lifo, void *data) {
2111 	TraceEventHandle_t xTraceHandle;
2112 
2113 	if (xTraceEventBegin(PSF_EVENT_LIFO_PUT_ENTER, sizeof(void*) + sizeof(void*),
2114 		&xTraceHandle) == TRC_SUCCESS) {
2115 		xTraceEventAddPointer(xTraceHandle, (void*)lifo);
2116 		xTraceEventAddPointer(xTraceHandle, (void*)data);
2117 		xTraceEventEnd(xTraceHandle);
2118 	}
2119 }
2120 
sys_trace_k_lifo_put_exit(struct k_lifo * lifo,void * data)2121 void sys_trace_k_lifo_put_exit(struct k_lifo *lifo, void *data) {
2122 	TraceEventHandle_t xTraceHandle;
2123 
2124 	if (xTraceEventBegin(PSF_EVENT_LIFO_PUT_EXIT, sizeof(void*) + sizeof(void*),
2125 		&xTraceHandle) == TRC_SUCCESS) {
2126 		xTraceEventAddPointer(xTraceHandle, (void*)lifo);
2127 		xTraceEventAddPointer(xTraceHandle, (void*)data);
2128 		xTraceEventEnd(xTraceHandle);
2129 	}
2130 }
2131 
sys_trace_k_lifo_alloc_put_enter(struct k_lifo * lifo,void * data)2132 void sys_trace_k_lifo_alloc_put_enter(struct k_lifo *lifo, void *data) {
2133 	TraceEventHandle_t xTraceHandle;
2134 
2135 	if (xTraceEventBegin(PSF_EVENT_LIFO_ALLOC_PUT_BLOCKING, sizeof(void*) + sizeof(void*),
2136 		&xTraceHandle) == TRC_SUCCESS) {
2137 		xTraceEventAddPointer(xTraceHandle, (void*)lifo);
2138 		xTraceEventAddPointer(xTraceHandle, (void*)data);
2139 		xTraceEventEnd(xTraceHandle);
2140 	}
2141 }
2142 
sys_trace_k_lifo_alloc_put_exit(struct k_lifo * lifo,void * data,int ret)2143 void sys_trace_k_lifo_alloc_put_exit(struct k_lifo *lifo, void *data, int ret) {
2144 	traceResult xTraceResult;
2145 	TraceEventHandle_t xTraceHandle;
2146 
2147 	if (ret == 0) {
2148 		xTraceResult = xTraceEventBegin(PSF_EVENT_LIFO_ALLOC_PUT_SUCCESS,
2149 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
2150 	} else {
2151 		xTraceResult = xTraceEventBegin(PSF_EVENT_LIFO_ALLOC_PUT_FAILURE,
2152 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
2153 	}
2154 
2155 	if (xTraceResult == TRC_SUCCESS) {
2156 		xTraceEventAddPointer(xTraceHandle, (void*)lifo);
2157 		xTraceEventAddPointer(xTraceHandle, (void*)data);
2158 		xTraceEventAdd32(xTraceHandle, ret);
2159 		xTraceEventEnd(xTraceHandle);
2160 	}
2161 }
2162 
sys_trace_k_lifo_get_enter(struct k_lifo * lifo,k_timeout_t timeout)2163 void sys_trace_k_lifo_get_enter(struct k_lifo *lifo, k_timeout_t timeout) {
2164 	TraceEventHandle_t xTraceHandle;
2165 
2166 	if (xTraceEventBegin(PSF_EVENT_LIFO_GET_BLOCKING, sizeof(void*) + sizeof(uint32_t),
2167 		&xTraceHandle) == TRC_SUCCESS) {
2168 		xTraceEventAddPointer(xTraceHandle, (void*)lifo);
2169 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2170 		xTraceEventEnd(xTraceHandle);
2171 	}
2172 }
2173 
sys_trace_k_lifo_get_exit(struct k_lifo * lifo,k_timeout_t timeout,void * ret)2174 void sys_trace_k_lifo_get_exit(struct k_lifo *lifo, k_timeout_t timeout, void *ret) {
2175 	traceResult xTraceResult;
2176 	TraceEventHandle_t xTraceHandle;
2177 
2178 	if (ret != NULL) {
2179 		xTraceResult = xTraceEventBegin(PSF_EVENT_LIFO_GET_SUCCESS,
2180 			sizeof(void*) + sizeof(uint32_t) + sizeof(void*), &xTraceHandle);
2181 	} else {
2182 		xTraceResult = xTraceEventBegin(PSF_EVENT_LIFO_GET_FAILURE,
2183 			sizeof(void*) + sizeof(uint32_t) + sizeof(void*), &xTraceHandle);
2184 	}
2185 
2186 	if (xTraceResult == TRC_SUCCESS) {
2187 		xTraceEventAddPointer(xTraceHandle, (void*)lifo);
2188 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2189 		xTraceEventAddPointer(xTraceHandle, (void*)ret);
2190 		xTraceEventEnd(xTraceHandle);
2191 	}
2192 }
2193 
2194 
2195 /* Stack trace function definitions */
sys_trace_k_stack_init(struct k_stack * stack,stack_data_t * buffer,uint32_t num_entries)2196 void sys_trace_k_stack_init(struct k_stack *stack, stack_data_t *buffer, uint32_t num_entries) {
2197 	TraceEventHandle_t xTraceHandle;
2198 
2199 	if (xTraceEventBegin(PSF_EVENT_STACK_INIT, sizeof(void*) + sizeof(void*) + sizeof(uint32_t),
2200 		&xTraceHandle) == TRC_SUCCESS) {
2201 		xTraceEventAddPointer(xTraceHandle, (void*)stack);
2202 		xTraceEventAddPointer(xTraceHandle, (void*)buffer);
2203 		xTraceEventAdd32(xTraceHandle, num_entries);
2204 		xTraceEventEnd(xTraceHandle);
2205 	}
2206 }
2207 
sys_trace_k_stack_alloc_init_enter(struct k_stack * stack,uint32_t num_entries)2208 void sys_trace_k_stack_alloc_init_enter(struct k_stack *stack, uint32_t num_entries) {
2209 	TraceEventHandle_t xTraceHandle;
2210 
2211 	if (xTraceEventBegin(PSF_EVENT_STACK_ALLOC_INIT_BLOCKING, sizeof(void*) + sizeof(uint32_t),
2212 		&xTraceHandle) == TRC_SUCCESS) {
2213 		xTraceEventAddPointer(xTraceHandle, (void*)stack);
2214 		xTraceEventAdd32(xTraceHandle, num_entries);
2215 		xTraceEventEnd(xTraceHandle);
2216 	}
2217 }
2218 
sys_trace_k_stack_alloc_init_exit(struct k_stack * stack,uint32_t num_entries,int ret)2219 void sys_trace_k_stack_alloc_init_exit(struct k_stack *stack, uint32_t num_entries, int ret) {
2220 	traceResult xTraceResult;
2221 	TraceEventHandle_t xTraceHandle;
2222 
2223 	if (ret == 0) {
2224 		xTraceResult = xTraceEventBegin(PSF_EVENT_STACK_ALLOC_INIT_SUCCESS,
2225 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
2226 	} else {
2227 		xTraceResult = xTraceEventBegin(PSF_EVENT_STACK_ALLOC_INIT_FAILURE,
2228 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
2229 	}
2230 
2231 	if (xTraceResult == TRC_SUCCESS) {
2232 		xTraceEventAddPointer(xTraceHandle, (void*)stack);
2233 		xTraceEventAdd32(xTraceHandle, num_entries);
2234 		xTraceEventAdd32(xTraceHandle, ret);
2235 		xTraceEventEnd(xTraceHandle);
2236 	}
2237 }
2238 
sys_trace_k_stack_cleanup_enter(struct k_stack * stack)2239 void sys_trace_k_stack_cleanup_enter(struct k_stack *stack) {
2240 	TraceEventHandle_t xTraceHandle;
2241 
2242 	if (xTraceEventBegin(PSF_EVENT_STACK_CLEANUP_BLOCKING, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
2243 		xTraceEventAddPointer(xTraceHandle, (void*)stack);
2244 		xTraceEventEnd(xTraceHandle);
2245 	}
2246 }
2247 
sys_trace_k_stack_cleanup_exit(struct k_stack * stack,int ret)2248 void sys_trace_k_stack_cleanup_exit(struct k_stack *stack, int ret) {
2249 	traceResult xTraceResult;
2250 	TraceEventHandle_t xTraceHandle;
2251 
2252 	if (ret == 0) {
2253 		xTraceResult = xTraceEventBegin(PSF_EVENT_STACK_CLEANUP_SUCCESS,
2254 			sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
2255 	} else {
2256 		xTraceResult = xTraceEventBegin(PSF_EVENT_STACK_CLEANUP_FAILURE,
2257 			sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
2258 	}
2259 
2260 	if (xTraceResult == TRC_SUCCESS) {
2261 		xTraceEventAddPointer(xTraceHandle, (void*)stack);
2262 		xTraceEventAdd32(xTraceHandle, ret);
2263 		xTraceEventEnd(xTraceHandle);
2264 	}
2265 }
2266 
sys_trace_k_stack_push_enter(struct k_stack * stack,stack_data_t data)2267 void sys_trace_k_stack_push_enter(struct k_stack *stack, stack_data_t data) {
2268 	TraceEventHandle_t xTraceHandle;
2269 
2270 	if (xTraceEventBegin(PSF_EVENT_STACK_PUSH_BLOCKING, sizeof(void*) + sizeof(void*),
2271 		&xTraceHandle) == TRC_SUCCESS) {
2272 		xTraceEventAddPointer(xTraceHandle, (void*)stack);
2273 		xTraceEventAddPointer(xTraceHandle, (void*)data);
2274 		xTraceEventEnd(xTraceHandle);
2275 	}
2276 }
2277 
sys_trace_k_stack_push_exit(struct k_stack * stack,stack_data_t data,int ret)2278 void sys_trace_k_stack_push_exit(struct k_stack *stack, stack_data_t data, int ret) {
2279 	traceResult xTraceResult;
2280 	TraceEventHandle_t xTraceHandle;
2281 
2282 	if (ret == 0) {
2283 		xTraceResult = xTraceEventBegin(PSF_EVENT_STACK_PUSH_SUCCESS,
2284 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
2285 	} else {
2286 		xTraceResult = xTraceEventBegin(PSF_EVENT_STACK_PUSH_FAILURE,
2287 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
2288 	}
2289 
2290 	if (xTraceResult == TRC_SUCCESS) {
2291 		xTraceEventAddPointer(xTraceHandle, (void*)stack);
2292 		xTraceEventAddPointer(xTraceHandle, (void*)data);
2293 		xTraceEventAdd32(xTraceHandle, ret);
2294 		xTraceEventEnd(xTraceHandle);
2295 	}
2296 }
2297 
sys_trace_k_stack_pop_blocking(struct k_stack * stack,stack_data_t * data,k_timeout_t timeout)2298 void sys_trace_k_stack_pop_blocking(struct k_stack *stack, stack_data_t *data, k_timeout_t timeout) {
2299 	TraceEventHandle_t xTraceHandle;
2300 
2301 	if (xTraceEventBegin(PSF_EVENT_STACK_POP_BLOCKING,
2302 		sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
2303 		xTraceEventAddPointer(xTraceHandle, (void*)stack);
2304 		xTraceEventAddPointer(xTraceHandle, (void*)data);
2305 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2306 		xTraceEventEnd(xTraceHandle);
2307 	}
2308 }
2309 
sys_trace_k_stack_pop_exit(struct k_stack * stack,stack_data_t * data,k_timeout_t timeout,int ret)2310 void sys_trace_k_stack_pop_exit(struct k_stack *stack, stack_data_t *data, k_timeout_t timeout, int ret) {
2311 	traceResult xTraceResult;
2312 	TraceEventHandle_t xTraceHandle;
2313 
2314 	if (ret == 0) {
2315 		xTraceResult = xTraceEventBegin(PSF_EVENT_STACK_POP_SUCCESS,
2316 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
2317 	} else {
2318 		xTraceResult = xTraceEventBegin(PSF_EVENT_STACK_POP_FAILURE,
2319 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
2320 	}
2321 
2322 	if (xTraceResult == TRC_SUCCESS) {
2323 		xTraceEventAddPointer(xTraceHandle, (void*)stack);
2324 		xTraceEventAddPointer(xTraceHandle, (void*)data);
2325 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2326 		xTraceEventAdd32(xTraceHandle, ret);
2327 		xTraceEventEnd(xTraceHandle);
2328 	}
2329 }
2330 
2331 
2332 /* Message queue trace function definitions */
sys_trace_k_msgq_init(struct k_msgq * msgq)2333 void sys_trace_k_msgq_init(struct k_msgq *msgq) {
2334 	TraceEventHandle_t xTraceHandle;
2335 
2336 	if (xTraceEventBegin(PSF_EVENT_MESSAGEQUEUE_INIT,
2337 		sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t),
2338 		&xTraceHandle) == TRC_SUCCESS) {
2339 		xTraceEventAddPointer(xTraceHandle, (void*)msgq);
2340 		xTraceEventAddPointer(xTraceHandle, (void*)msgq->buffer_start);
2341 		xTraceEventAdd32(xTraceHandle, (uint32_t)msgq->msg_size);
2342 		xTraceEventAdd32(xTraceHandle, (uint32_t)msgq->max_msgs);
2343 		xTraceEventEnd(xTraceHandle);
2344 	}
2345 }
2346 
sys_trace_k_msgq_alloc_init_enter(struct k_msgq * msgq,size_t msg_size,uint32_t max_msgs)2347 void sys_trace_k_msgq_alloc_init_enter(struct k_msgq *msgq, size_t msg_size, uint32_t max_msgs) {
2348 	TraceEventHandle_t xTraceHandle;
2349 
2350 	if (xTraceEventBegin(PSF_EVENT_MESSAGEQUEUE_ALLOC_INIT_BLOCKING,
2351 		sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
2352 		xTraceEventAddPointer(xTraceHandle, (void*)msgq);
2353 		xTraceEventAdd32(xTraceHandle, (uint32_t)msg_size);
2354 		xTraceEventAdd32(xTraceHandle, (uint32_t)max_msgs);
2355 		xTraceEventEnd(xTraceHandle);
2356 	}
2357 }
2358 
sys_trace_k_msgq_alloc_init_exit(struct k_msgq * msgq,size_t msg_size,uint32_t max_msgs,int ret)2359 void sys_trace_k_msgq_alloc_init_exit(struct k_msgq *msgq, size_t msg_size, uint32_t max_msgs, int ret) {
2360 	traceResult xTraceResult;
2361 	TraceEventHandle_t xTraceHandle;
2362 
2363 	if (ret == 0) {
2364 		xTraceResult = xTraceEventBegin(PSF_EVENT_MESSAGEQUEUE_ALLOC_INIT_SUCCESS,
2365 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
2366 	} else {
2367 		xTraceResult = xTraceEventBegin(PSF_EVENT_MESSAGEQUEUE_ALLOC_INIT_TIMEOUT,
2368 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
2369 	}
2370 
2371 	if (xTraceResult == TRC_SUCCESS) {
2372 		xTraceEventAddPointer(xTraceHandle, (void*)msgq);
2373 		xTraceEventAdd32(xTraceHandle, (uint32_t)msg_size);
2374 		xTraceEventAdd32(xTraceHandle, (uint32_t)max_msgs);
2375 		xTraceEventAdd32(xTraceHandle, ret);
2376 		xTraceEventEnd(xTraceHandle);
2377 	}
2378 }
2379 
sys_trace_k_msgq_cleanup_enter(struct k_msgq * msgq)2380 void sys_trace_k_msgq_cleanup_enter(struct k_msgq *msgq) {
2381 	TraceEventHandle_t xTraceHandle;
2382 
2383 	if (xTraceEventBegin(PSF_EVENT_MESSAGEQUEUE_CLEANUP_BLOCKING, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
2384 		xTraceEventAddPointer(xTraceHandle, (void*)msgq);
2385 		xTraceEventEnd(xTraceHandle);
2386 	}
2387 }
2388 
sys_trace_k_msgq_cleanup_exit(struct k_msgq * msgq,int ret)2389 void sys_trace_k_msgq_cleanup_exit(struct k_msgq *msgq, int ret) {
2390 	traceResult xTraceResult;
2391 	TraceEventHandle_t xTraceHandle;
2392 
2393 	if (ret == 0) {
2394 		xTraceResult = xTraceEventBegin(PSF_EVENT_MESSAGEQUEUE_CLEANUP_SUCCESS,
2395 			sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
2396 	} else {
2397 		xTraceResult = xTraceEventBegin(PSF_EVENT_MESSAGEQUEUE_CLEANUP_TIMEOUT,
2398 			sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
2399 	}
2400 
2401 	if (xTraceResult == TRC_SUCCESS) {
2402 		xTraceEventAddPointer(xTraceHandle, (void*)msgq);
2403 		xTraceEventAdd32(xTraceHandle, ret);
2404 		xTraceEventEnd(xTraceHandle);
2405 	}
2406 }
2407 
sys_trace_k_msgq_put_enter(struct k_msgq * msgq,const void * data,k_timeout_t timeout)2408 void sys_trace_k_msgq_put_enter(struct k_msgq *msgq, const void *data, k_timeout_t timeout) {
2409 }
2410 
sys_trace_k_msgq_put_blocking(struct k_msgq * msgq,const void * data,k_timeout_t timeout)2411 void sys_trace_k_msgq_put_blocking(struct k_msgq *msgq, const void *data, k_timeout_t timeout) {
2412 	TraceEventHandle_t xTraceHandle;
2413 
2414 	if (xTraceEventBegin(PSF_EVENT_MESSAGEQUEUE_PUT_BLOCKING,
2415 		sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
2416 		xTraceEventAddPointer(xTraceHandle, (void*)msgq);
2417 		xTraceEventAddPointer(xTraceHandle, (void*)data);
2418 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2419 		xTraceEventEnd(xTraceHandle);
2420 	}
2421 }
2422 
sys_trace_k_msgq_put_exit(struct k_msgq * msgq,const void * data,k_timeout_t timeout,int ret)2423 void sys_trace_k_msgq_put_exit(struct k_msgq *msgq, const void *data, k_timeout_t timeout, int ret) {
2424 	traceResult xTraceResult;
2425 	TraceEventHandle_t xTraceHandle;
2426 
2427 	if (ret == 0) {
2428 		xTraceResult = xTraceEventBegin(PSF_EVENT_MESSAGEQUEUE_PUT_SUCCESS,
2429 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
2430 	} else {
2431 		xTraceResult = xTraceEventBegin(PSF_EVENT_MESSAGEQUEUE_PUT_TIMEOUT,
2432 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
2433 	}
2434 
2435 	if (xTraceResult == TRC_SUCCESS) {
2436 		xTraceEventAddPointer(xTraceHandle, (void*)msgq);
2437 		xTraceEventAddPointer(xTraceHandle, (void*)data);
2438 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2439 		xTraceEventAdd32(xTraceHandle, ret);
2440 		xTraceEventEnd(xTraceHandle);
2441 	}
2442 }
2443 
sys_trace_k_msgq_get_enter(struct k_msgq * msgq,const void * data,k_timeout_t timeout)2444 void sys_trace_k_msgq_get_enter(struct k_msgq *msgq, const void *data, k_timeout_t timeout) {
2445 }
2446 
sys_trace_k_msgq_get_blocking(struct k_msgq * msgq,const void * data,k_timeout_t timeout)2447 void sys_trace_k_msgq_get_blocking(struct k_msgq *msgq, const void *data, k_timeout_t timeout) {
2448 	TraceEventHandle_t xTraceHandle;
2449 
2450 	if (xTraceEventBegin(PSF_EVENT_MESSAGEQUEUE_GET_BLOCKING,
2451 		sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
2452 		xTraceEventAddPointer(xTraceHandle, (void*)msgq);
2453 		xTraceEventAddPointer(xTraceHandle, (void*)data);
2454 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2455 		xTraceEventEnd(xTraceHandle);
2456 	}
2457 }
2458 
sys_trace_k_msgq_get_exit(struct k_msgq * msgq,const void * data,k_timeout_t timeout,int ret)2459 void sys_trace_k_msgq_get_exit(struct k_msgq *msgq, const void *data, k_timeout_t timeout, int ret) {
2460 	traceResult xTraceResult;
2461 	TraceEventHandle_t xTraceHandle;
2462 
2463 	if (ret == 0) {
2464 		xTraceResult = xTraceEventBegin(PSF_EVENT_MESSAGEQUEUE_GET_SUCCESS,
2465 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
2466 	} else {
2467 		xTraceResult = xTraceEventBegin(PSF_EVENT_MESSAGEQUEUE_GET_TIMEOUT,
2468 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
2469 	}
2470 
2471 	if (xTraceResult == TRC_SUCCESS) {
2472 		xTraceEventAddPointer(xTraceHandle, (void*)msgq);
2473 		xTraceEventAddPointer(xTraceHandle, (void*)data);
2474 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2475 		xTraceEventAdd32(xTraceHandle, ret);
2476 		xTraceEventEnd(xTraceHandle);
2477 	}
2478 }
2479 
sys_trace_k_msgq_peek(struct k_msgq * msgq,void * data,int ret)2480 void sys_trace_k_msgq_peek(struct k_msgq *msgq, void *data, int ret) {
2481 	traceResult xTraceResult;
2482 	TraceEventHandle_t xTraceHandle;
2483 
2484 	if (ret == 0) {
2485 		xTraceResult = xTraceEventBegin(PSF_EVENT_MESSAGEQUEUE_PEEK_SUCCESS,
2486 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
2487 	} else {
2488 		xTraceResult = xTraceEventBegin(PSF_EVENT_MESSAGEQUEUE_PEEK_FAILED,
2489 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
2490 	}
2491 
2492 	if (xTraceResult == TRC_SUCCESS) {
2493 		xTraceEventAddPointer(xTraceHandle, (void*)msgq);
2494 		xTraceEventAddPointer(xTraceHandle, (void*)data);
2495 		xTraceEventAdd32(xTraceHandle, (uint32_t)ret);
2496 		xTraceEventEnd(xTraceHandle);
2497 	}
2498 }
2499 
sys_trace_k_msgq_purge(struct k_msgq * msgq)2500 void sys_trace_k_msgq_purge(struct k_msgq *msgq) {
2501 	TraceEventHandle_t xTraceHandle;
2502 
2503 	if (xTraceEventBegin(PSF_EVENT_MESSAGEQUEUE_PURGE, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
2504 		xTraceEventAddPointer(xTraceHandle, (void*)msgq);
2505 		xTraceEventEnd(xTraceHandle);
2506 	}
2507 }
2508 
2509 
2510 /* Mailbox trace function definitions */
sys_trace_k_mbox_init(struct k_mbox * mbox)2511 void sys_trace_k_mbox_init(struct k_mbox *mbox) {
2512 	TraceEventHandle_t xTraceHandle;
2513 
2514 	if (xTraceEventBegin(PSF_EVENT_MAILBOX_INIT, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
2515 		xTraceEventAddPointer(xTraceHandle, (void*)mbox);
2516 		xTraceEventEnd(xTraceHandle);
2517 	}
2518 }
2519 
sys_trace_k_mbox_message_put_enter(struct k_mbox * mbox,struct k_mbox_msg * tx_msg,k_timeout_t timeout)2520 void sys_trace_k_mbox_message_put_enter(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout) {
2521 }
2522 
sys_trace_k_mbox_message_put_blocking(struct k_mbox * mbox,struct k_mbox_msg * tx_msg,k_timeout_t timeout)2523 void sys_trace_k_mbox_message_put_blocking(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout) {
2524 	TraceEventHandle_t xTraceHandle;
2525 
2526 	if (xTraceEventBegin(PSF_EVENT_MAILBOX_MESSAGE_PUT_BLOCKING,
2527 		sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
2528 		xTraceEventAddPointer(xTraceHandle, (void*)mbox);
2529 		xTraceEventAddPointer(xTraceHandle, (void*)tx_msg);
2530 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2531 		xTraceEventEnd(xTraceHandle);
2532 	}
2533 }
2534 
sys_trace_k_mbox_message_put_exit(struct k_mbox * mbox,struct k_mbox_msg * tx_msg,k_timeout_t timeout,int ret)2535 void sys_trace_k_mbox_message_put_exit(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout, int ret) {
2536 	traceResult xTraceResult;
2537 	TraceEventHandle_t xTraceHandle;
2538 
2539 	if (ret == 0) {
2540 		xTraceResult = xTraceEventBegin(PSF_EVENT_MAILBOX_MESSAGE_PUT_SUCCESS,
2541 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
2542 	} else {
2543 		xTraceResult = xTraceEventBegin(PSF_EVENT_MAILBOX_MESSAGE_PUT_FAILURE,
2544 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
2545 	}
2546 
2547 	if (xTraceResult == TRC_SUCCESS) {
2548 		xTraceEventAddPointer(xTraceHandle, (void*)mbox);
2549 		xTraceEventAddPointer(xTraceHandle, (void*)tx_msg);
2550 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2551 		xTraceEventAdd32(xTraceHandle, ret);
2552 		xTraceEventEnd(xTraceHandle);
2553 	}
2554 }
2555 
sys_trace_k_mbox_put_enter(struct k_mbox * mbox,struct k_mbox_msg * tx_msg,k_timeout_t timeout)2556 void sys_trace_k_mbox_put_enter(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout) {
2557 	TraceEventHandle_t xTraceHandle;
2558 
2559 	if (xTraceEventBegin(PSF_EVENT_MAILBOX_PUT_BLOCKING,
2560 		sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
2561 		xTraceEventAddPointer(xTraceHandle, (void*)mbox);
2562 		xTraceEventAddPointer(xTraceHandle, (void*)tx_msg);
2563 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2564 		xTraceEventEnd(xTraceHandle);
2565 	}
2566 }
2567 
sys_trace_k_mbox_put_exit(struct k_mbox * mbox,struct k_mbox_msg * tx_msg,k_timeout_t timeout,int ret)2568 void sys_trace_k_mbox_put_exit(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout, int ret) {
2569 	traceResult xTraceResult;
2570 	TraceEventHandle_t xTraceHandle;
2571 
2572 	if (ret == 0) {
2573 		xTraceResult = xTraceEventBegin(PSF_EVENT_MAILBOX_PUT_SUCCESS,
2574 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
2575 	} else {
2576 		xTraceResult = xTraceEventBegin(PSF_EVENT_MAILBOX_PUT_FAILURE,
2577 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
2578 	}
2579 
2580 	if (xTraceResult == TRC_SUCCESS) {
2581 		xTraceEventAddPointer(xTraceHandle, (void*)mbox);
2582 		xTraceEventAddPointer(xTraceHandle, (void*)tx_msg);
2583 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2584 		xTraceEventAdd32(xTraceHandle, ret);
2585 		xTraceEventEnd(xTraceHandle);
2586 	}
2587 }
2588 
sys_trace_k_mbox_async_put_enter(struct k_mbox * mbox,struct k_sem * sem)2589 void sys_trace_k_mbox_async_put_enter(struct k_mbox *mbox, struct k_sem *sem) {
2590 	TraceEventHandle_t xTraceHandle;
2591 
2592 	if (xTraceEventBegin(PSF_EVENT_MAILBOX_ASYNC_PUT_ENTER, sizeof(void*) + sizeof(void*),
2593 		&xTraceHandle) == TRC_SUCCESS) {
2594 		xTraceEventAddPointer(xTraceHandle, (void*)mbox);
2595 		xTraceEventAddPointer(xTraceHandle, (void*)sem);
2596 		xTraceEventEnd(xTraceHandle);
2597 	}
2598 }
2599 
sys_trace_k_mbox_async_put_exit(struct k_mbox * mbox,struct k_sem * sem)2600 void sys_trace_k_mbox_async_put_exit(struct k_mbox *mbox, struct k_sem *sem) {
2601 	TraceEventHandle_t xTraceHandle;
2602 
2603 	if (xTraceEventBegin(PSF_EVENT_MAILBOX_ASYNC_PUT_EXIT, sizeof(void*) + sizeof(void*),
2604 		&xTraceHandle) == TRC_SUCCESS) {
2605 		xTraceEventAddPointer(xTraceHandle, (void*)mbox);
2606 		xTraceEventAddPointer(xTraceHandle, (void*)sem);
2607 		xTraceEventEnd(xTraceHandle);
2608 	}
2609 }
2610 
sys_trace_k_mbox_get_enter(struct k_mbox * mbox,struct k_mbox_msg * rx_msg,void * buffer,k_timeout_t timeout)2611 void sys_trace_k_mbox_get_enter(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, k_timeout_t timeout) {
2612 }
2613 
sys_trace_k_mbox_get_blocking(struct k_mbox * mbox,struct k_mbox_msg * rx_msg,void * buffer,k_timeout_t timeout)2614 void sys_trace_k_mbox_get_blocking(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, k_timeout_t timeout) {
2615 	TraceEventHandle_t xTraceHandle;
2616 
2617 	if (xTraceEventBegin(PSF_EVENT_MAILBOX_GET_BLOCKING,
2618 		sizeof(void*) + sizeof(void*) + sizeof(void*) + sizeof(uint32_t),
2619 		&xTraceHandle) == TRC_SUCCESS) {
2620 		xTraceEventAddPointer(xTraceHandle, (void*)mbox);
2621 		xTraceEventAddPointer(xTraceHandle, (void*)rx_msg);
2622 		xTraceEventAddPointer(xTraceHandle, (void*)buffer);
2623 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2624 		xTraceEventEnd(xTraceHandle);
2625 	}
2626 }
2627 
sys_trace_k_mbox_get_exit(struct k_mbox * mbox,struct k_mbox_msg * rx_msg,void * buffer,k_timeout_t timeout,int ret)2628 void sys_trace_k_mbox_get_exit(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, k_timeout_t timeout, int ret) {
2629 	traceResult xTraceResult;
2630 	TraceEventHandle_t xTraceHandle;
2631 
2632 	if (ret == 0) {
2633 		xTraceResult = xTraceEventBegin(PSF_EVENT_MAILBOX_GET_SUCCESS,
2634 		sizeof(void*) + sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t),
2635 		&xTraceHandle);
2636 	} else {
2637 		xTraceResult = xTraceEventBegin(PSF_EVENT_MAILBOX_GET_TIMEOUT,
2638 		sizeof(void*) + sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t),
2639 		&xTraceHandle);
2640 	}
2641 
2642 	if (xTraceResult == TRC_SUCCESS) {
2643 		xTraceEventAddPointer(xTraceHandle, (void*)mbox);
2644 		xTraceEventAddPointer(xTraceHandle, (void*)rx_msg);
2645 		xTraceEventAddPointer(xTraceHandle, (void*)buffer);
2646 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2647 		xTraceEventAdd32(xTraceHandle, ret);
2648 		xTraceEventEnd(xTraceHandle);
2649 	}
2650 }
2651 
2652 /* @note: Hook not implemented in Zephyr kernel */
sys_trace_k_mbox_data_get(struct k_mbox_msg * rx_msg,void * buffer)2653 void sys_trace_k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer) {
2654 	TraceEventHandle_t xTraceHandle;
2655 
2656 	if (xTraceEventBegin(PSF_EVENT_MAILBOX_DATA_GET,
2657 		sizeof(void*) + sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
2658 		xTraceEventAddPointer(xTraceHandle, (void*)rx_msg);
2659 		xTraceEventAddPointer(xTraceHandle, (void*)buffer);
2660 		xTraceEventEnd(xTraceHandle);
2661 	}
2662 }
2663 
2664 
2665 /* Pipe trace function definitions */
sys_trace_k_pipe_init(struct k_pipe * pipe,unsigned char * buffer,size_t size)2666 void sys_trace_k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size) {
2667 	TraceEventHandle_t xTraceHandle;
2668 
2669 	if (xTraceEventBegin(PSF_EVENT_PIPE_INIT,
2670 		sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
2671 		xTraceEventAddPointer(xTraceHandle, (void*)pipe);
2672 		xTraceEventAddPointer(xTraceHandle, (void*)buffer);
2673 		xTraceEventAdd32(xTraceHandle, (uint32_t)size);
2674 		xTraceEventEnd(xTraceHandle);
2675 	}
2676 }
2677 
sys_trace_k_pipe_cleanup_enter(struct k_pipe * pipe)2678 void sys_trace_k_pipe_cleanup_enter(struct k_pipe *pipe) {
2679 	TraceEventHandle_t xTraceHandle;
2680 
2681 	if (xTraceEventBegin(PSF_EVENT_PIPE_CLEANUP_BLOCKING, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
2682 		xTraceEventAddPointer(xTraceHandle, (void*)pipe);
2683 		xTraceEventEnd(xTraceHandle);
2684 	}
2685 }
2686 
sys_trace_k_pipe_cleanup_exit(struct k_pipe * pipe,int ret)2687 void sys_trace_k_pipe_cleanup_exit(struct k_pipe *pipe, int ret) {
2688 	traceResult xTraceResult;
2689 	TraceEventHandle_t xTraceHandle;
2690 
2691 	if (ret == 0) {
2692 		xTraceResult = xTraceEventBegin(PSF_EVENT_PIPE_CLEANUP_SUCCESS,
2693 			sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
2694 	} else {
2695 		xTraceResult = xTraceEventBegin(PSF_EVENT_PIPE_CLEANUP_FAILURE,
2696 			sizeof(void*) + sizeof(uint32_t), &xTraceHandle);
2697 	}
2698 
2699 	if (xTraceResult == TRC_SUCCESS) {
2700 		xTraceEventAddPointer(xTraceHandle, (void*)pipe);
2701 		xTraceEventAdd32(xTraceHandle, ret);
2702 		xTraceEventEnd(xTraceHandle);
2703 	}
2704 }
2705 
sys_trace_k_pipe_alloc_init_enter(struct k_pipe * pipe,size_t size)2706 void sys_trace_k_pipe_alloc_init_enter(struct k_pipe *pipe, size_t size) {
2707 	TraceEventHandle_t xTraceHandle;
2708 
2709 	if (xTraceEventBegin(PSF_EVENT_PIPE_ALLOC_INIT_BLOCKING,
2710 		sizeof(void*) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
2711 		xTraceEventAddPointer(xTraceHandle, (void*)pipe);
2712 		xTraceEventAdd32(xTraceHandle, (uint32_t)size);
2713 		xTraceEventEnd(xTraceHandle);
2714 	}
2715 }
2716 
sys_trace_k_pipe_alloc_init_exit(struct k_pipe * pipe,size_t size,int ret)2717 void sys_trace_k_pipe_alloc_init_exit(struct k_pipe *pipe, size_t size, int ret) {
2718 	traceResult xTraceResult;
2719 	TraceEventHandle_t xTraceHandle;
2720 
2721 	if (ret == 0) {
2722 		xTraceResult = xTraceEventBegin(PSF_EVENT_PIPE_ALLOC_INIT_SUCCESS,
2723 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
2724 	} else {
2725 		xTraceResult = xTraceEventBegin(PSF_EVENT_PIPE_ALLOC_INIT_FAILURE,
2726 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle);
2727 	}
2728 
2729 	if (xTraceResult == TRC_SUCCESS) {
2730 		xTraceEventAddPointer(xTraceHandle, (void*)pipe);
2731 		xTraceEventAdd32(xTraceHandle, (uint32_t)size);
2732 		xTraceEventAdd32(xTraceHandle, ret);
2733 		xTraceEventEnd(xTraceHandle);
2734 	}
2735 }
2736 
sys_trace_k_pipe_put_enter(struct k_pipe * pipe,void * data,size_t bytes_to_write,size_t * bytes_written,size_t min_xfer,k_timeout_t timeout)2737 void sys_trace_k_pipe_put_enter(struct k_pipe *pipe, void *data, size_t bytes_to_write, size_t *bytes_written, size_t min_xfer, k_timeout_t timeout) {
2738 
2739 }
2740 
sys_trace_k_pipe_put_blocking(struct k_pipe * pipe,void * data,size_t bytes_to_write,size_t * bytes_written,size_t min_xfer,k_timeout_t timeout)2741 void sys_trace_k_pipe_put_blocking(struct k_pipe *pipe, void *data, size_t bytes_to_write, size_t *bytes_written, size_t min_xfer, k_timeout_t timeout) {
2742 	TraceEventHandle_t xTraceHandle;
2743 
2744 	if (xTraceEventBegin(PSF_EVENT_PIPE_PUT_BLOCKING,
2745 		sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t),
2746 		&xTraceHandle) == TRC_SUCCESS) {
2747 		xTraceEventAddPointer(xTraceHandle, (void*)pipe);
2748 		xTraceEventAddPointer(xTraceHandle, (void*)data);
2749 		xTraceEventAdd32(xTraceHandle, (uint32_t)bytes_to_write);
2750 		xTraceEventAddPointer(xTraceHandle, (void*)bytes_written);
2751 		xTraceEventAdd32(xTraceHandle, (uint32_t)min_xfer);
2752 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2753 		xTraceEventEnd(xTraceHandle);
2754 	}
2755 }
2756 
sys_trace_k_pipe_put_exit(struct k_pipe * pipe,void * data,size_t bytes_to_write,size_t * bytes_written,size_t min_xfer,k_timeout_t timeout,int ret)2757 void sys_trace_k_pipe_put_exit(struct k_pipe *pipe, void *data, size_t bytes_to_write, size_t *bytes_written, size_t min_xfer, k_timeout_t timeout, int ret) {
2758 	traceResult xTraceResult;
2759 	TraceEventHandle_t xTraceHandle;
2760 
2761 	if (ret == 0) {
2762 		xTraceResult = xTraceEventBegin(PSF_EVENT_PIPE_PUT_SUCCESS,
2763 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t),
2764 			&xTraceHandle);
2765 	} else {
2766 		xTraceResult = xTraceEventBegin(PSF_EVENT_PIPE_PUT_TIMEOUT,
2767 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t),
2768 			&xTraceHandle);
2769 	}
2770 
2771 	if (xTraceResult == TRC_SUCCESS) {
2772 		xTraceEventAddPointer(xTraceHandle, (void*)pipe);
2773 		xTraceEventAddPointer(xTraceHandle, (void*)data);
2774 		xTraceEventAdd32(xTraceHandle, (uint32_t)bytes_to_write);
2775 		xTraceEventAddPointer(xTraceHandle, (void*)bytes_written);
2776 		xTraceEventAdd32(xTraceHandle, (uint32_t)min_xfer);
2777 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2778 		xTraceEventAdd32(xTraceHandle, ret);
2779 		xTraceEventEnd(xTraceHandle);
2780 	}
2781 }
2782 
sys_trace_k_pipe_get_enter(struct k_pipe * pipe,void * data,size_t bytes_to_read,size_t * bytes_read,size_t min_xfer,k_timeout_t timeout)2783 void sys_trace_k_pipe_get_enter(struct k_pipe *pipe, void *data, size_t bytes_to_read, size_t *bytes_read, size_t min_xfer, k_timeout_t timeout) {
2784 
2785 }
2786 
sys_trace_k_pipe_get_blocking(struct k_pipe * pipe,void * data,size_t bytes_to_read,size_t * bytes_read,size_t min_xfer,k_timeout_t timeout)2787 void sys_trace_k_pipe_get_blocking(struct k_pipe *pipe, void *data, size_t bytes_to_read, size_t *bytes_read, size_t min_xfer, k_timeout_t timeout) {
2788 	TraceEventHandle_t xTraceHandle;
2789 
2790 	if (xTraceEventBegin(PSF_EVENT_PIPE_GET_BLOCKING,
2791 		sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t),
2792 		&xTraceHandle) == TRC_SUCCESS) {
2793 		xTraceEventAddPointer(xTraceHandle, (void*)pipe);
2794 		xTraceEventAddPointer(xTraceHandle, (void*)data);
2795 		xTraceEventAdd32(xTraceHandle, (uint32_t)bytes_to_read);
2796 		xTraceEventAddPointer(xTraceHandle, (void*)bytes_read);
2797 		xTraceEventAdd32(xTraceHandle, (uint32_t)min_xfer);
2798 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2799 		xTraceEventEnd(xTraceHandle);
2800 	}
2801 }
2802 
sys_trace_k_pipe_get_exit(struct k_pipe * pipe,void * data,size_t bytes_to_read,size_t * bytes_read,size_t min_xfer,k_timeout_t timeout,int ret)2803 void sys_trace_k_pipe_get_exit(struct k_pipe *pipe, void *data, size_t bytes_to_read, size_t *bytes_read, size_t min_xfer, k_timeout_t timeout, int ret) {
2804 	traceResult xTraceResult;
2805 	TraceEventHandle_t xTraceHandle;
2806 
2807 	if (ret == 0) {
2808 		xTraceResult = xTraceEventBegin(PSF_EVENT_PIPE_GET_SUCCESS,
2809 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t),
2810 			&xTraceHandle);
2811 	} else {
2812 		xTraceResult = xTraceEventBegin(PSF_EVENT_PIPE_GET_TIMEOUT,
2813 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t),
2814 			&xTraceHandle);
2815 	}
2816 
2817 	if (xTraceResult == TRC_SUCCESS) {
2818 		xTraceEventAddPointer(xTraceHandle, (void*)pipe);
2819 		xTraceEventAddPointer(xTraceHandle, (void*)data);
2820 		xTraceEventAdd32(xTraceHandle, (uint32_t)bytes_to_read);
2821 		xTraceEventAddPointer(xTraceHandle, (void*)bytes_read);
2822 		xTraceEventAdd32(xTraceHandle, (uint32_t)min_xfer);
2823 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2824 		xTraceEventAdd32(xTraceHandle, ret);
2825 		xTraceEventEnd(xTraceHandle);
2826 	}
2827 }
2828 
sys_trace_k_pipe_block_put_enter(struct k_pipe * pipe,struct k_mem_block * block,size_t size,struct k_sem * sem)2829 void sys_trace_k_pipe_block_put_enter(struct k_pipe *pipe, struct k_mem_block *block, size_t size, struct k_sem *sem) {
2830 	TraceEventHandle_t xTraceHandle;
2831 
2832 	if (xTraceEventBegin(PSF_EVENT_PIPE_BLOCK_PUT_ENTER,
2833 		sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(void*),
2834 		&xTraceHandle) == TRC_SUCCESS) {
2835 		xTraceEventAddPointer(xTraceHandle, (void*)pipe);
2836 		xTraceEventAddPointer(xTraceHandle, (void*)block);
2837 		xTraceEventAdd32(xTraceHandle, (uint32_t)size);
2838 		xTraceEventAddPointer(xTraceHandle, (void*)sem);
2839 		xTraceEventEnd(xTraceHandle);
2840 	}
2841 }
2842 
sys_trace_k_pipe_block_put_exit(struct k_pipe * pipe,struct k_mem_block * block,size_t size,struct k_sem * sem)2843 void sys_trace_k_pipe_block_put_exit(struct k_pipe *pipe, struct k_mem_block *block, size_t size, struct k_sem *sem) {
2844 	TraceEventHandle_t xTraceHandle;
2845 
2846 	if (xTraceEventBegin(PSF_EVENT_PIPE_BLOCK_PUT_EXIT,
2847 		sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(void*),
2848 		&xTraceHandle) == TRC_SUCCESS) {
2849 		xTraceEventAddPointer(xTraceHandle, (void*)pipe);
2850 		xTraceEventAddPointer(xTraceHandle, (void*)block);
2851 		xTraceEventAdd32(xTraceHandle, (uint32_t)size);
2852 		xTraceEventAddPointer(xTraceHandle, (void*)sem);
2853 		xTraceEventEnd(xTraceHandle);
2854 	}
2855 }
2856 
2857 
2858 /* Memory heap trace function definitions */
sys_trace_k_heap_init(struct k_heap * h,void * mem,size_t bytes)2859 void sys_trace_k_heap_init(struct k_heap *h, void *mem, size_t bytes) {
2860 	TraceEventHandle_t xTraceHandle;
2861 
2862 	if (xTraceEventBegin(PSF_EVENT_KHEAP_INIT, sizeof(void*) + sizeof(void*) + sizeof(uint32_t),
2863 		&xTraceHandle) == TRC_SUCCESS) {
2864 		xTraceEventAddPointer(xTraceHandle, (void*)h);
2865 		xTraceEventAddPointer(xTraceHandle, (void*)mem);
2866 		xTraceEventAdd32(xTraceHandle, (uint32_t)bytes);
2867 		xTraceEventEnd(xTraceHandle);
2868 	}
2869 }
2870 
sys_trace_k_heap_alloc_enter(struct k_heap * h,size_t bytes,k_timeout_t timeout)2871 void sys_trace_k_heap_alloc_enter(struct k_heap *h, size_t bytes, k_timeout_t timeout) {
2872 	TraceEventHandle_t xTraceHandle;
2873 
2874 	if (xTraceEventBegin(PSF_EVENT_KHEAP_ALLOC_BLOCKING, sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t),
2875 		&xTraceHandle) == TRC_SUCCESS) {
2876 		xTraceEventAddPointer(xTraceHandle, (void*)h);
2877 		xTraceEventAdd32(xTraceHandle, bytes);
2878 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2879 		xTraceEventEnd(xTraceHandle);
2880 	}
2881 }
2882 
sys_trace_k_heap_alloc_exit(struct k_heap * h,size_t bytes,k_timeout_t timeout,void * ret)2883 void sys_trace_k_heap_alloc_exit(struct k_heap *h, size_t bytes, k_timeout_t timeout, void *ret) {
2884 	traceResult xTraceResult;
2885 	TraceEventHandle_t xTraceHandle;
2886 
2887 	if (ret != NULL) {
2888 		xTraceResult = xTraceEventBegin(PSF_EVENT_KHEAP_ALLOC_SUCCESS,
2889 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(void*), &xTraceHandle);
2890 	} else {
2891 		xTraceResult = xTraceEventBegin(PSF_EVENT_KHEAP_ALLOC_FAILURE,
2892 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(void*), &xTraceHandle);
2893 	}
2894 
2895 	if (xTraceResult == TRC_SUCCESS) {
2896 		xTraceEventAddPointer(xTraceHandle, (void*)h);
2897 		xTraceEventAdd32(xTraceHandle, bytes);
2898 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2899 		xTraceEventAddPointer(xTraceHandle, (void*)ret);
2900 		xTraceEventEnd(xTraceHandle);
2901 	}
2902 }
2903 
sys_trace_k_heap_aligned_alloc_enter(struct k_heap * h,size_t bytes,k_timeout_t timeout)2904 void sys_trace_k_heap_aligned_alloc_enter(struct k_heap *h, size_t bytes, k_timeout_t timeout) {
2905 
2906 }
2907 
sys_trace_k_heap_aligned_alloc_blocking(struct k_heap * h,size_t bytes,k_timeout_t timeout)2908 void sys_trace_k_heap_aligned_alloc_blocking(struct k_heap *h, size_t bytes, k_timeout_t timeout) {
2909 	TraceEventHandle_t xTraceHandle;
2910 
2911 	if (xTraceEventBegin(PSF_EVENT_KHEAP_ALIGNED_ALLOC_BLOCKING,
2912 		sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t),
2913 		&xTraceHandle) == TRC_SUCCESS) {
2914 		xTraceEventAddPointer(xTraceHandle, (void*)h);
2915 		xTraceEventAdd32(xTraceHandle, bytes);
2916 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2917 		xTraceEventAdd32(xTraceHandle, 0);
2918 		xTraceEventEnd(xTraceHandle);
2919 	}
2920 }
2921 
sys_trace_k_heap_aligned_alloc_exit(struct k_heap * h,size_t bytes,k_timeout_t timeout,void * ret)2922 void sys_trace_k_heap_aligned_alloc_exit(struct k_heap *h, size_t bytes, k_timeout_t timeout, void *ret) {
2923 	traceResult xTraceResult;
2924 	TraceEventHandle_t xTraceHandle;
2925 
2926 	/**
2927 	 * There seems to be an issue with k_heap and forever timeouts where if there is not enough memory for direct allocation
2928 	 * the system stops trying instantly and returns NULL.
2929 	 */
2930 	if (ret == NULL && K_TIMEOUT_EQ(timeout, K_FOREVER)) {
2931 		xTraceResult = xTraceEventBegin(PSF_EVENT_KHEAP_ALIGNED_ALLOC_FAILURE,
2932 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(void*), &xTraceHandle);
2933 	} else if (ret == NULL) {
2934 		xTraceResult = xTraceEventBegin(PSF_EVENT_KHEAP_ALIGNED_ALLOC_FAILURE,
2935 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(void*), &xTraceHandle);
2936 	} else {
2937 		xTraceResult = xTraceEventBegin(PSF_EVENT_KHEAP_ALIGNED_ALLOC_SUCCESS,
2938 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(void*), &xTraceHandle);
2939 	}
2940 
2941 	if (xTraceResult == TRC_SUCCESS) {
2942 		xTraceEventAddPointer(xTraceHandle, (void*)h);
2943 		xTraceEventAdd32(xTraceHandle, (uint32_t)bytes);
2944 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
2945 		xTraceEventAddPointer(xTraceHandle, (void*)ret);
2946 		xTraceEventEnd(xTraceHandle);
2947 	}
2948 }
2949 
sys_trace_k_heap_free(struct k_heap * h,void * mem)2950 void sys_trace_k_heap_free(struct k_heap *h, void *mem) {
2951 	TraceEventHandle_t xTraceHandle;
2952 
2953 	if (xTraceEventBegin(PSF_EVENT_KHEAP_FREE, sizeof(void*) + sizeof(void*),
2954 		&xTraceHandle) == TRC_SUCCESS) {
2955 		xTraceEventAddPointer(xTraceHandle, (void*)h);
2956 		xTraceEventAddPointer(xTraceHandle, (void*)mem);
2957 		xTraceEventEnd(xTraceHandle);
2958 	}
2959 }
2960 
sys_trace_k_heap_sys_k_aligned_alloc_enter(struct k_heap * h,size_t align,size_t size)2961 void sys_trace_k_heap_sys_k_aligned_alloc_enter(struct k_heap *h, size_t align, size_t size) {
2962 	TraceEventHandle_t xTraceHandle;
2963 
2964 	if (xTraceEventBegin(PSF_EVENT_KHEAP_SYS_ALIGNED_ALLOC_BLOCKING,
2965 		sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
2966 		xTraceEventAddPointer(xTraceHandle, (void*)h);
2967 		xTraceEventAdd32(xTraceHandle, align);
2968 		xTraceEventAdd32(xTraceHandle, size);
2969 		xTraceEventEnd(xTraceHandle);
2970 	}
2971 }
2972 
sys_trace_k_heap_sys_k_aligned_alloc_exit(struct k_heap * h,size_t align,size_t size,void * ret)2973 void sys_trace_k_heap_sys_k_aligned_alloc_exit(struct k_heap *h, size_t align, size_t size, void *ret) {
2974 	traceResult xTraceResult;
2975 	TraceEventHandle_t xTraceHandle;
2976 
2977 	if (ret != NULL) {
2978 		xTraceResult = xTraceEventBegin(PSF_EVENT_KHEAP_SYS_ALIGNED_ALLOC_SUCCESS,
2979 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(void*), &xTraceHandle);
2980 	} else {
2981 		xTraceResult = xTraceEventBegin(PSF_EVENT_KHEAP_SYS_ALIGNED_ALLOC_FAILURE,
2982 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(void*), &xTraceHandle);
2983 	}
2984 
2985 	if (xTraceResult == TRC_SUCCESS) {
2986 		xTraceEventAddPointer(xTraceHandle, (void*)h);
2987 		xTraceEventAdd32(xTraceHandle, align);
2988 		xTraceEventAdd32(xTraceHandle, size);
2989 		xTraceEventAddPointer(xTraceHandle, (void*)ret);
2990 		xTraceEventEnd(xTraceHandle);
2991 	}
2992 }
2993 
sys_trace_k_heap_sys_k_malloc_enter(struct k_heap * h,size_t size)2994 void sys_trace_k_heap_sys_k_malloc_enter(struct k_heap *h, size_t size) {
2995 	TraceEventHandle_t xTraceHandle;
2996 
2997 	if (xTraceEventBegin(PSF_EVENT_KHEAP_SYS_MALLOC_BLOCKING, sizeof(void*) + sizeof(uint32_t),
2998 		&xTraceHandle) == TRC_SUCCESS) {
2999 		xTraceEventAddPointer(xTraceHandle, (void*)h);
3000 		xTraceEventAdd32(xTraceHandle, size);
3001 		xTraceEventEnd(xTraceHandle);
3002 	}
3003 }
3004 
sys_trace_k_heap_sys_k_malloc_exit(struct k_heap * h,size_t size,void * ret)3005 void sys_trace_k_heap_sys_k_malloc_exit(struct k_heap *h, size_t size, void *ret) {
3006 	traceResult xTraceResult;
3007 	TraceEventHandle_t xTraceHandle;
3008 
3009 	if (ret != NULL) {
3010 		xTraceResult = xTraceEventBegin(PSF_EVENT_KHEAP_SYS_MALLOC_SUCCESS,
3011 			sizeof(void*) + sizeof(uint32_t) + sizeof(void*), &xTraceHandle);
3012 	} else {
3013 		xTraceResult = xTraceEventBegin(PSF_EVENT_KHEAP_SYS_MALLOC_FAILURE,
3014 			sizeof(void*) + sizeof(uint32_t) + sizeof(void*), &xTraceHandle);
3015 	}
3016 
3017 	if (xTraceResult == TRC_SUCCESS) {
3018 		xTraceEventAddPointer(xTraceHandle, (void*)h);
3019 		xTraceEventAdd32(xTraceHandle, size);
3020 		xTraceEventAddPointer(xTraceHandle, (void*)ret);
3021 		xTraceEventEnd(xTraceHandle);
3022 	}
3023 }
3024 
sys_trace_k_heap_sys_k_free_enter(struct k_heap * h)3025 void sys_trace_k_heap_sys_k_free_enter(struct k_heap *h) {
3026 	TraceEventHandle_t xTraceHandle;
3027 
3028 	if (xTraceEventBegin(PSF_EVENT_KHEAP_SYS_FREE_ENTER, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
3029 		xTraceEventAddPointer(xTraceHandle, (void*)h);
3030 		xTraceEventEnd(xTraceHandle);
3031 	}
3032 }
3033 
sys_trace_k_heap_sys_k_free_exit(struct k_heap * h)3034 void sys_trace_k_heap_sys_k_free_exit(struct k_heap *h) {
3035 	TraceEventHandle_t xTraceHandle;
3036 
3037 	if (xTraceEventBegin(PSF_EVENT_KHEAP_SYS_FREE_EXIT, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
3038 		xTraceEventAddPointer(xTraceHandle, (void*)h);
3039 		xTraceEventEnd(xTraceHandle);
3040 	}
3041 }
3042 
sys_trace_k_heap_sys_k_enter(struct k_heap * h,size_t nmemb,size_t size)3043 void sys_trace_k_heap_sys_k_enter(struct k_heap *h, size_t nmemb, size_t size) {
3044 
3045 }
3046 
sys_trace_k_heap_sys_k_exit(struct k_heap * h,size_t nmemb,size_t size,void * ret)3047 void sys_trace_k_heap_sys_k_exit(struct k_heap *h, size_t nmemb, size_t size, void *ret) {
3048 
3049 }
3050 
sys_trace_k_heap_sys_k_calloc_enter(struct k_heap * h,size_t nmemb,size_t size)3051 void sys_trace_k_heap_sys_k_calloc_enter(struct k_heap *h, size_t nmemb, size_t size) {
3052 	TraceEventHandle_t xTraceHandle;
3053 
3054 	if (xTraceEventBegin(PSF_EVENT_KHEAP_SYS_CALLOC_BLOCKING,
3055 		sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
3056 		xTraceEventAddPointer(xTraceHandle, (void*)h);
3057 		xTraceEventAdd32(xTraceHandle, nmemb);
3058 		xTraceEventAdd32(xTraceHandle, size);
3059 		xTraceEventEnd(xTraceHandle);
3060 	}
3061 }
3062 
sys_trace_k_heap_sys_k_calloc_exit(struct k_heap * h,size_t nmemb,size_t size,void * ret)3063 void sys_trace_k_heap_sys_k_calloc_exit(struct k_heap *h, size_t nmemb, size_t size, void *ret) {
3064 	traceResult xTraceResult;
3065 	TraceEventHandle_t xTraceHandle;
3066 
3067 	if (ret != NULL) {
3068 		xTraceResult = xTraceEventBegin(PSF_EVENT_KHEAP_SYS_CALLOC_SUCCESS,
3069 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(void*), &xTraceHandle);
3070 	} else {
3071 		xTraceResult = xTraceEventBegin(PSF_EVENT_KHEAP_SYS_CALLOC_FAILURE,
3072 			sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(void*), &xTraceHandle);
3073 	}
3074 
3075 	if (xTraceResult == TRC_SUCCESS) {
3076 		xTraceEventAddPointer(xTraceHandle, (void*)h);
3077 		xTraceEventAdd32(xTraceHandle, nmemb);
3078 		xTraceEventAdd32(xTraceHandle, size);
3079 		xTraceEventAddPointer(xTraceHandle, (void*)ret);
3080 		xTraceEventEnd(xTraceHandle);
3081 	}
3082 }
3083 
3084 
3085 /* Memory slab trace function definitions */
sys_trace_k_mem_slab_init(struct k_mem_slab * slab,void * buffer,size_t block_size,uint32_t num_blocks,int ret)3086 void sys_trace_k_mem_slab_init(struct k_mem_slab *slab, void *buffer, size_t block_size, uint32_t num_blocks, int ret) {
3087 	traceResult xTraceResult;
3088 	TraceEventHandle_t xTraceHandle;
3089 
3090 	if (ret == 0) {
3091 		xTraceResult = xTraceEventBegin(PSF_EVENT_MEMORY_SLAB_INIT_SUCCESS,
3092 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t),
3093 			&xTraceHandle);
3094 	} else {
3095 		xTraceResult = xTraceEventBegin(PSF_EVENT_MEMORY_SLAB_INIT_FAILURE,
3096 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t),
3097 			&xTraceHandle);
3098 	}
3099 
3100 	if (xTraceResult == TRC_SUCCESS) {
3101 		xTraceEventAddPointer(xTraceHandle, (void*)slab);
3102 		xTraceEventAddPointer(xTraceHandle, (void*)slab->buffer);
3103 		xTraceEventAdd32(xTraceHandle, (uint32_t)slab->block_size);
3104 		xTraceEventAdd32(xTraceHandle, (uint32_t)slab->num_blocks);
3105 		xTraceEventAdd32(xTraceHandle, ret);
3106 		xTraceEventAdd32(xTraceHandle, (uint32_t)slab->num_blocks);
3107 		xTraceEventEnd(xTraceHandle);
3108 	}
3109 }
3110 
sys_trace_k_mem_slab_alloc_enter(struct k_mem_slab * slab,void ** mem,k_timeout_t timeout)3111 void sys_trace_k_mem_slab_alloc_enter(struct k_mem_slab *slab, void **mem, k_timeout_t timeout) {
3112 
3113 }
3114 
sys_trace_k_mem_slab_alloc_blocking(struct k_mem_slab * slab,void ** mem,k_timeout_t timeout)3115 void sys_trace_k_mem_slab_alloc_blocking(struct k_mem_slab *slab, void **mem, k_timeout_t timeout) {
3116 	TraceEventHandle_t xTraceHandle;
3117 
3118 	if (xTraceEventBegin(PSF_EVENT_MEMORY_SLAB_ALLOC_BLOCKING,
3119 		sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
3120 		xTraceEventAddPointer(xTraceHandle, (void*)slab);
3121 		xTraceEventAddPointer(xTraceHandle, (void*)mem);
3122 		xTraceEventAdd32(xTraceHandle, timeout.ticks);
3123 		xTraceEventEnd(xTraceHandle);
3124 	}
3125 }
3126 
sys_trace_k_mem_slab_alloc_exit(struct k_mem_slab * slab,void ** mem,k_timeout_t timeout,int ret)3127 void sys_trace_k_mem_slab_alloc_exit(struct k_mem_slab *slab, void **mem, k_timeout_t timeout, int ret) {
3128 	TraceEventHandle_t xTraceHandle;
3129 
3130 	if (ret == 0) {
3131 		if (xTraceEventBegin(PSF_EVENT_MEMORY_SLAB_ALLOC_SUCCESS,
3132 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t),
3133 			&xTraceHandle) == TRC_SUCCESS) {
3134 			xTraceEventAddPointer(xTraceHandle, (void*)slab);
3135 			xTraceEventAddPointer(xTraceHandle, (void*)mem);
3136 			xTraceEventAdd32(xTraceHandle, timeout.ticks);
3137 			xTraceEventAdd32(xTraceHandle, ret);
3138 			xTraceEventAdd32(xTraceHandle, slab->num_blocks);
3139 			xTraceEventEnd(xTraceHandle);
3140 		}
3141 	} else if (ret == -ENOMEM || ret == -EAGAIN) {
3142 		if (xTraceEventBegin(PSF_EVENT_MEMORY_SLAB_ALLOC_TIMEOUT,
3143 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t),
3144 			&xTraceHandle) == TRC_SUCCESS) {
3145 			xTraceEventAddPointer(xTraceHandle, (void*)slab);
3146 			xTraceEventAddPointer(xTraceHandle, (void*)mem);
3147 			xTraceEventAdd32(xTraceHandle, timeout.ticks);
3148 			xTraceEventAdd32(xTraceHandle, ret);
3149 			xTraceEventEnd(xTraceHandle);
3150 		}
3151 	} else {
3152 		if (xTraceEventBegin(PSF_EVENT_MEMORY_SLAB_ALLOC_ERROR,
3153 			sizeof(void*) + sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t),
3154 			&xTraceHandle) == TRC_SUCCESS) {
3155 			xTraceEventAddPointer(xTraceHandle, (void*)slab);
3156 			xTraceEventAddPointer(xTraceHandle, (void*)mem);
3157 			xTraceEventAdd32(xTraceHandle, timeout.ticks);
3158 			xTraceEventAdd32(xTraceHandle, ret);
3159 			xTraceEventEnd(xTraceHandle);
3160 		}
3161 	}
3162 }
3163 
sys_trace_k_mem_slab_free_exit(struct k_mem_slab * slab,void ** mem)3164 void sys_trace_k_mem_slab_free_exit(struct k_mem_slab *slab, void **mem) {
3165 	TraceEventHandle_t xTraceHandle;
3166 
3167 	if (xTraceEventBegin(PSF_EVENT_MEMORY_SLAB_FREE,
3168 		sizeof(void*) + sizeof(void*) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
3169 		xTraceEventAddPointer(xTraceHandle, (void*)slab);
3170 		xTraceEventAddPointer(xTraceHandle, (void*)mem);
3171 		xTraceEventAdd32(xTraceHandle, slab->num_blocks);
3172 		xTraceEventEnd(xTraceHandle);
3173 	}
3174 }
3175 
3176 
3177 /* Timer trace function definitions */
sys_trace_k_timer_init(struct k_timer * timer,k_timer_expiry_t expiry_fn,k_timer_expiry_t stop_fn)3178 void sys_trace_k_timer_init(struct k_timer *timer, k_timer_expiry_t expiry_fn, k_timer_expiry_t stop_fn) {
3179 	TraceEventHandle_t xTraceHandle;
3180 
3181 	if (xTraceEventBegin(PSF_EVENT_TIMER_INIT,
3182 		sizeof(void*) + sizeof(void*) + sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
3183 		xTraceEventAddPointer(xTraceHandle, (void*)timer);
3184 		xTraceEventAddPointer(xTraceHandle, (void*)expiry_fn);
3185 		xTraceEventAddPointer(xTraceHandle, (void*)stop_fn);
3186 		xTraceEventEnd(xTraceHandle);
3187 	}
3188 }
3189 
sys_trace_k_timer_start(struct k_timer * timer,k_timeout_t duration,k_timeout_t period)3190 void sys_trace_k_timer_start(struct k_timer *timer, k_timeout_t duration, k_timeout_t period) {
3191 	TraceEventHandle_t xTraceHandle;
3192 
3193 	if (xTraceEventBegin(PSF_EVENT_TIMER_START,
3194 		sizeof(void*) + sizeof(uint32_t) + sizeof(uint32_t), &xTraceHandle) == TRC_SUCCESS) {
3195 		xTraceEventAddPointer(xTraceHandle, (void*)timer);
3196 		xTraceEventAdd32(xTraceHandle, duration.ticks);
3197 		xTraceEventAdd32(xTraceHandle, period.ticks);
3198 		xTraceEventEnd(xTraceHandle);
3199 	}
3200 }
3201 
sys_trace_k_timer_stop(struct k_timer * timer)3202 void sys_trace_k_timer_stop(struct k_timer *timer) {
3203 	TraceEventHandle_t xTraceHandle;
3204 
3205 	if (xTraceEventBegin(PSF_EVENT_TIMER_STOP, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
3206 		xTraceEventAddPointer(xTraceHandle, (void*)timer);
3207 		xTraceEventEnd(xTraceHandle);
3208 	}
3209 }
3210 
sys_trace_k_timer_status_sync_blocking(struct k_timer * timer)3211 void sys_trace_k_timer_status_sync_blocking(struct k_timer *timer) {
3212 	TraceEventHandle_t xTraceHandle;
3213 
3214 	if (xTraceEventBegin(PSF_EVENT_TIMER_STATUS_SYNC_AWAIT, sizeof(void*), &xTraceHandle) == TRC_SUCCESS) {
3215 		xTraceEventAddPointer(xTraceHandle, (void*)timer);
3216 		xTraceEventEnd(xTraceHandle);
3217 	}
3218 }
3219 
sys_trace_k_timer_status_sync_exit(struct k_timer * timer,uint32_t result)3220 void sys_trace_k_timer_status_sync_exit(struct k_timer *timer, uint32_t result) {
3221 	TraceEventHandle_t xTraceHandle;
3222 
3223 	if (xTraceEventBegin(PSF_EVENT_TIMER_STATUS_SYNC_EXIT, sizeof(void*) + sizeof(uint32_t),
3224 		&xTraceHandle) == TRC_SUCCESS) {
3225 		xTraceEventAddPointer(xTraceHandle, (void*)timer);
3226 		xTraceEventAdd32(xTraceHandle, result);
3227 		xTraceEventEnd(xTraceHandle);
3228 	}
3229 }
3230 
3231 
3232 /* Syscall trace function definitions */
sys_trace_syscall_enter(uint32_t id,const char * name)3233 void sys_trace_syscall_enter(uint32_t id, const char *name) {
3234 	TraceEventHandle_t xTraceHandle;
3235 	uint32_t uiRemainingBytes = 0;
3236 	uint32_t uiNull = 0;
3237 
3238 	if (xTraceEventBegin(PSF_EVENT_SYSTEM_SYSCALL_ENTER, sizeof(uint32_t) + strlen(name), &xTraceHandle) == TRC_SUCCESS) {
3239 		xTraceEventAdd32(xTraceHandle, id);
3240 
3241 		/* Add name */
3242 		xTraceEventAddData(xTraceHandle, (void*)name, strlen(name));
3243 
3244 		/* Events are 4-bytes aligned, pad remainder of data */
3245 		xTraceEventPayloadRemaining(xTraceHandle, &uiRemainingBytes);
3246 		xTraceEventAddData(xTraceHandle, (void*)&uiNull, uiRemainingBytes);
3247 
3248 		xTraceEventEnd(xTraceHandle);
3249 	}
3250 }
3251 
sys_trace_syscall_exit(uint32_t id,const char * name)3252 void sys_trace_syscall_exit(uint32_t id, const char *name) {
3253 	TraceEventHandle_t xTraceHandle;
3254 	uint32_t uiRemainingBytes = 0;
3255 	uint32_t uiNull = 0;
3256 
3257 	if (xTraceEventBegin(PSF_EVENT_SYSTEM_SYSCALL_EXIT, sizeof(uint32_t) + strlen(name), &xTraceHandle) == TRC_SUCCESS) {
3258 		xTraceEventAdd32(xTraceHandle, id);
3259 
3260 		/* Add name */
3261 		xTraceEventAddData(xTraceHandle, (void*)name, strlen(name));
3262 
3263 		/* Events are 4-bytes aligned, pad remainder of data */
3264 		xTraceEventPayloadRemaining(xTraceHandle, &uiRemainingBytes);
3265 		xTraceEventAddData(xTraceHandle, (void*)&uiNull, uiRemainingBytes);
3266 
3267 		xTraceEventEnd(xTraceHandle);
3268 	}
3269 }
3270 
3271 
3272 /* Legacy trace functions that are pending refactoring/removal by
3273  * the Zephyr team.
3274  */
sys_trace_isr_enter(void)3275 void sys_trace_isr_enter(void) {
3276 	xTraceISRBegin(xHandleISR);
3277 }
3278 
sys_trace_isr_exit(void)3279 void sys_trace_isr_exit(void) {
3280 	xTraceISREnd(0);
3281 }
3282 
sys_trace_isr_exit_to_scheduler(void)3283 void sys_trace_isr_exit_to_scheduler(void) {
3284 }
3285 
sys_trace_idle(void)3286 void sys_trace_idle(void) {
3287 }
3288 
sys_trace_void(unsigned int id)3289 void sys_trace_void(unsigned int id) {
3290 }
3291