1 /*
2  * Trace Recorder for Tracealyzer v4.8.1.hotfix1
3  * Copyright 2023 Percepio AB
4  * www.percepio.com
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  *
8  * The Zephyr specific parts of the trace recorder
9  */
10 
11 #include <zephyr/init.h>
12 #include <zephyr/kernel.h>
13 #include <string.h>
14 #include <trcRecorder.h>
15 
16 
17 #if (TRC_CFG_USE_SYSCALL_EXTENSION == 1)
18 
19 /* syscall extension name */
20 #define SYSCALLS_EXTENSION_NAME "syscalls"
21 
22 /* These definitions depend on the KERNEL_VERSION parameters generated during the Zephyr build process */
23 #define SYSCALLS_EXTENSION_VERSION_MAJOR KERNEL_VERSION_MAJOR
24 #define SYSCALLS_EXTENSION_VERSION_MINOR KERNEL_VERSION_MINOR
25 #define SYSCALLS_EXTENSION_VERSION_PATCH KERNEL_PATCHLEVEL
26 
27 /* Event codes for Enter and Exit */
28 #define SYSCALLS_EXTENSION_EVENT_COUNT ((K_SYSCALL_LIMIT + 1) * 2)
29 
30 #define xSyscallsExtensionEnable() (xTraceExtensionCreate(SYSCALLS_EXTENSION_NAME, SYSCALLS_EXTENSION_VERSION_MAJOR, SYSCALLS_EXTENSION_VERSION_MINOR, SYSCALLS_EXTENSION_VERSION_PATCH, SYSCALLS_EXTENSION_EVENT_COUNT, &pxKernelPortData->xSyscallsExtensionHandle))
31 #define xSyscallsExtensionEnter(id) prvTraceStoreEvent_None(xTraceExtensionGetEventId(pxKernelPortData->xSyscallsExtensionHandle, id))
32 #define xSyscallsExtensionExit(id) prvTraceStoreEvent_None(xTraceExtensionGetEventId(pxKernelPortData->xSyscallsExtensionHandle, id + (K_SYSCALL_LIMIT + 1)))
33 
34 #endif
35 
36 /* Generic Zephyr ISR handle used for all Zephyr ISRs that the user haven't
37  * manually added tracing for. */
38 static TraceISRHandle_t xHandleISR;
39 
40 /* Trace recorder controll thread stack */
41 static K_THREAD_STACK_DEFINE(TzCtrl_thread_stack, (TRC_CFG_CTRL_TASK_STACK_SIZE));
42 
43 /* Forward declarations */
44 traceResult prvTraceObjectSendNameEvent(void* pvObject, const char* szName, uint32_t uiLength);
45 
46 
47 /**
48  * @brief TzCtrl_thread_entry
49  *
50  * Task for sending the trace data from the internal buffer to the stream
51  * interface (assuming TRC_STREAM_PORT_USE_INTERNAL_BUFFER == 1) and for
52  * receiving commands from Tracealyzer. Also does some diagnostics.
53  *
54  * @param[in] _args
55  */
TzCtrl_thread_entry(void * _args)56 void TzCtrl_thread_entry(void *_args)
57 {
58 	while (1)
59 	{
60 		(void)xTraceTzCtrl();
61 
62 		k_msleep((TRC_CFG_CTRL_TASK_DELAY));
63 	}
64 }
65 
66 
67 /**
68  * @brief
69  */
70 typedef struct TraceKernelPortData
71 {
72 	TraceHeapHandle_t xSystemHeapHandle;
73 	TraceKernelPortTaskHandle_t xTzCtrlHandle;
74 	TraceExtensionHandle_t xSyscallsExtensionHandle;
75 } TraceKernelPortData_t;
76 
77 static TraceKernelPortData_t* pxKernelPortData TRC_CFG_RECORDER_DATA_ATTRIBUTE;
78 
xTraceKernelPortInitialize(TraceKernelPortDataBuffer_t * pxBuffer)79 traceResult xTraceKernelPortInitialize(TraceKernelPortDataBuffer_t* pxBuffer)
80 {
81 	TRC_ASSERT_EQUAL_SIZE(TraceKernelPortDataBuffer_t, TraceKernelPortData_t);
82 
83 	if (pxBuffer == 0)
84 	{
85 		return TRC_FAIL;
86 	}
87 
88 	pxKernelPortData = (TraceKernelPortData_t*)pxBuffer;
89 
90 	pxKernelPortData->xSystemHeapHandle = 0;
91 	pxKernelPortData->xSyscallsExtensionHandle = 0;
92 
93 	return TRC_SUCCESS;
94 }
95 
xTraceKernelPortEnable(void)96 traceResult xTraceKernelPortEnable(void)
97 {
98 	return TRC_SUCCESS;
99 }
100 
xTraceKernelPortGetSystemHeapHandle(void)101 TraceHeapHandle_t xTraceKernelPortGetSystemHeapHandle(void)
102 {
103 	return 0;
104 }
105 
106 #if defined(TRC_CFG_ENABLE_STACK_MONITOR) && (TRC_CFG_ENABLE_STACK_MONITOR == 1) && (TRC_CFG_SCHEDULING_ONLY == 0)
xTraceKernelPortGetUnusedStack(void * thread,TraceUnsignedBaseType_t * puxUnusedStack)107 traceResult xTraceKernelPortGetUnusedStack(void* thread, TraceUnsignedBaseType_t* puxUnusedStack)
108 {
109 	return k_thread_stack_space_get(thread, (size_t*)puxUnusedStack);
110 }
111 #endif /* defined(TRC_CFG_ENABLE_STACK_MONITOR) && (TRC_CFG_ENABLE_STACK_MONITOR == 1) && (TRC_CFG_SCHEDULING_ONLY == 0) */
112 
xTraceKernelPortIsSchedulerSuspended(void)113 unsigned char xTraceKernelPortIsSchedulerSuspended(void)
114 {
115 	return 0;
116 }
117 
vTraceSetKernelObjectName(void * object,const char * name)118 void vTraceSetKernelObjectName(void* object, const char* name)
119 {
120 	(void)xTraceObjectSetNameWithoutHandle(object, name);
121 }
122 
vTraceSetWorkQueueName(void * object,const char * name)123 void vTraceSetWorkQueueName(void* object, const char* name)
124 {
125 	(void)xTraceObjectSetNameWithoutHandle(object, name);
126 }
127 
vTraceSetHeapName(void * object,const char * name)128 void vTraceSetHeapName(void* object, const char* name)
129 {
130 	(void)xTraceObjectSetNameWithoutHandle(object, name);
131 }
132 
vTraceSetSemaphoreName(void * object,const char * name)133 void vTraceSetSemaphoreName(void* object, const char* name)
134 {
135 	(void)xTraceObjectSetNameWithoutHandle(object, name);
136 }
137 
vTraceSetMutexName(void * object,const char * name)138 void vTraceSetMutexName(void* object, const char* name)
139 {
140 	(void)xTraceObjectSetNameWithoutHandle(object, name);
141 }
142 
vTraceSetCondvarName(void * object,const char * name)143 void vTraceSetCondvarName(void* object, const char* name)
144 {
145 	(void)xTraceObjectSetNameWithoutHandle(object, name);
146 }
147 
vTraceSetQueueName(void * object,const char * name)148 void vTraceSetQueueName(void* object, const char* name)
149 {
150 	(void)xTraceObjectSetNameWithoutHandle(object, name);
151 }
152 
vTraceSetFIFOQueueName(void * object,const char * name)153 void vTraceSetFIFOQueueName(void* object, const char* name)
154 {
155 	(void)xTraceObjectSetNameWithoutHandle(object, name);
156 }
157 
vTraceSetLIFOQueueName(void * object,const char * name)158 void vTraceSetLIFOQueueName(void* object, const char* name)
159 {
160 	(void)xTraceObjectSetNameWithoutHandle(object, name);
161 }
162 
vTraceSetStackName(void * object,const char * name)163 void vTraceSetStackName(void* object, const char* name)
164 {
165 	(void)xTraceObjectSetNameWithoutHandle(object, name);
166 }
167 
vTraceSetMessageQueueName(void * object,const char * name)168 void vTraceSetMessageQueueName(void* object, const char* name)
169 {
170 	(void)xTraceObjectSetNameWithoutHandle(object, name);
171 }
172 
vTraceSetMailboxName(void * object,const char * name)173 void vTraceSetMailboxName(void* object, const char* name)
174 {
175 	(void)xTraceObjectSetNameWithoutHandle(object, name);
176 }
177 
vTraceSetPipeName(void * object,const char * name)178 void vTraceSetPipeName(void* object, const char* name)
179 {
180 	(void)xTraceObjectSetNameWithoutHandle(object, name);
181 }
182 
vTraceSetMemoryHeapName(void * object,const char * name)183 void vTraceSetMemoryHeapName(void* object, const char* name)
184 {
185 	(void)xTraceObjectSetNameWithoutHandle(object, name);
186 }
187 
vTraceSetMemorySlabName(void * object,const char * name)188 void vTraceSetMemorySlabName(void* object, const char* name)
189 {
190 	(void)xTraceObjectSetNameWithoutHandle(object, name);
191 }
192 
vTraceSetTimerName(void * object,const char * name)193 void vTraceSetTimerName(void* object, const char* name)
194 {
195 	(void)xTraceObjectSetNameWithoutHandle(object, name);
196 }
197 
198 /**
199  * @brief Initialize aspects of the recorder that must preceed the
200  * kernel initialization (scheduling, threads, etc.).
201  *
202  * @param[in] arg
203  */
tracelyzer_pre_kernel_init(void)204 static int tracelyzer_pre_kernel_init(void)
205 {
206 	xTraceInitialize();
207 
208 #if (TRC_CFG_USE_SYSCALL_EXTENSION == 1)
209 	xSyscallsExtensionEnable();
210 #endif
211 
212 #ifdef CONFIG_PERCEPIO_TRC_START_MODE_START
213 	(void)xTraceEnable(TRC_START);
214 #elif CONFIG_PERCEPIO_TRC_START_MODE_START_AWAIT_HOST
215 	(void)xTraceEnable(TRC_START_AWAIT_HOST);
216 #else
217 	(void)xTraceEnable(TRC_START_FROM_HOST);
218 #endif
219 
220 	/* Create ISR handle */
221 	(void)xTraceISRRegister("Zephyr ISR", -32, &xHandleISR);
222 
223 	return 0;
224 }
225 
226 /**
227  * @brief Initialize aspects of the recorder that depends on the kernel
228  * being initialized.
229  *
230  * @param[in] arg
231  */
tracealyzer_post_kernel_init(void)232 static int tracealyzer_post_kernel_init(void)
233 {
234 	/* Create controller task */
235 	k_thread_create(&pxKernelPortData->xTzCtrlHandle, TzCtrl_thread_stack,
236 		K_THREAD_STACK_SIZEOF(TzCtrl_thread_stack),
237 		(k_thread_entry_t)TzCtrl_thread_entry,
238 		NULL, NULL, NULL,
239 		(TRC_CFG_CTRL_TASK_PRIORITY),
240 		0,
241 		K_NO_WAIT);
242 
243 	/* Set controller task name */
244 	k_thread_name_set(&pxKernelPortData->xTzCtrlHandle, "TzCtrl");
245 
246 	return 0;
247 }
248 
249 /* Specify recorder module initialization stages */
250 SYS_INIT(tracelyzer_pre_kernel_init, PRE_KERNEL_2, 0);
251 SYS_INIT(tracealyzer_post_kernel_init, POST_KERNEL, 0);
252 
253 
254 
255 
256 /* Thread trace function definitions */
sys_trace_k_thread_foreach_enter(k_thread_user_cb_t user_cb,void * user_data)257 void sys_trace_k_thread_foreach_enter(k_thread_user_cb_t user_cb, void *user_data) {
258 	(void)xTraceEventCreate2(PSF_EVENT_THREAD_FOREACH_ENTER, (TraceUnsignedBaseType_t)user_cb, (TraceUnsignedBaseType_t)user_data);
259 }
260 
sys_trace_k_thread_foreach_exit(k_thread_user_cb_t user_cb,void * user_data)261 void sys_trace_k_thread_foreach_exit(k_thread_user_cb_t user_cb, void *user_data) {
262 	(void)xTraceEventCreate0(PSF_EVENT_THREAD_FOREACH_EXIT);
263 }
264 
sys_trace_k_thread_foreach_unlocked_enter(k_thread_user_cb_t user_cb,void * user_data)265 void sys_trace_k_thread_foreach_unlocked_enter(k_thread_user_cb_t user_cb, void *user_data) {
266 	(void)xTraceEventCreate2(PSF_EVENT_THREAD_FOREACH_UNLOCKED_ENTER, (TraceUnsignedBaseType_t)user_cb, (TraceUnsignedBaseType_t)user_data);
267 }
268 
sys_trace_k_thread_foreach_unlocked_exit(k_thread_user_cb_t user_cb,void * user_data)269 void sys_trace_k_thread_foreach_unlocked_exit(k_thread_user_cb_t user_cb, void *user_data) {
270 	(void)xTraceEventCreate0(PSF_EVENT_THREAD_FOREACH_UNLOCKED_EXIT);
271 }
272 
sys_trace_k_thread_create(struct k_thread * thread,size_t stack_size,int prio)273 void sys_trace_k_thread_create(struct k_thread *thread, size_t stack_size, int prio) {
274 	/* Since we have a slightly different task/thread register event
275 	 * we manually update the entry table here */
276 	TraceEntryHandle_t xEntryHandle;
277 
278 	TRACE_ALLOC_CRITICAL_SECTION();
279 	TRACE_ENTER_CRITICAL_SECTION();
280 
281 	if (xTraceEntryCreateWithAddress((void*)thread, &xEntryHandle) == TRC_FAIL)
282 	{
283 		TRACE_EXIT_CRITICAL_SECTION();
284 		return;
285 	}
286 
287 	xTraceEntrySetState(xEntryHandle, 0, prio);
288 	TRACE_EXIT_CRITICAL_SECTION();
289 
290 	/* Register task with stack monitor */
291 	xTraceStackMonitorAdd((void*)thread);
292 
293 	(void)xTraceEventCreate3(PSF_EVENT_THREAD_INIT, (TraceUnsignedBaseType_t)thread, (TraceUnsignedBaseType_t)stack_size, (TraceUnsignedBaseType_t)prio);
294 
295 #ifdef CONFIG_THREAD_NAME
296 	if (strlen(thread->name) > 0) {
297 		xTraceObjectSetName(xEntryHandle, thread->name);
298 	}
299 #endif
300 }
301 
sys_trace_k_thread_user_mode_enter(k_thread_entry_t entry,void * p1,void * p2,void * p3)302 void sys_trace_k_thread_user_mode_enter(k_thread_entry_t entry, void *p1, void *p2, void *p3) {
303 	(void)xTraceEventCreate5(
304 		PSF_EVENT_THREAD_USER_MODE_ENTER,
305 		(TraceUnsignedBaseType_t)k_current_get(),
306 		(TraceUnsignedBaseType_t)entry,
307 		(TraceUnsignedBaseType_t)p1,
308 		(TraceUnsignedBaseType_t)p2,
309 		(TraceUnsignedBaseType_t)p3
310 	);
311 }
312 
sys_trace_k_thread_heap_assign(struct k_thread * thread,struct k_heap * heap)313 void sys_trace_k_thread_heap_assign(struct k_thread *thread, struct k_heap *heap) {
314 	(void)xTraceEventCreate2(PSF_EVENT_THREAD_HEAP_ASSIGN, (TraceUnsignedBaseType_t)thread, (TraceUnsignedBaseType_t)heap);
315 }
316 
sys_trace_k_thread_join_blocking(struct k_thread * thread,k_timeout_t timeout)317 void sys_trace_k_thread_join_blocking(struct k_thread *thread, k_timeout_t timeout) {
318 	(void)xTraceEventCreate2(PSF_EVENT_THREAD_JOIN_BLOCKING, (TraceUnsignedBaseType_t)thread, (TraceUnsignedBaseType_t)timeout.ticks);
319 }
320 
sys_trace_k_thread_join_exit(struct k_thread * thread,k_timeout_t timeout,int ret)321 void sys_trace_k_thread_join_exit(struct k_thread *thread, k_timeout_t timeout, int ret) {
322 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_THREAD_JOIN_SUCCESS : PSF_EVENT_THREAD_JOIN_TIMEOUT, (TraceUnsignedBaseType_t)thread, (TraceUnsignedBaseType_t)ret);
323 }
324 
sys_trace_k_thread_sleep_enter(k_timeout_t timeout)325 void sys_trace_k_thread_sleep_enter(k_timeout_t timeout) {
326 	(void)xTraceEventCreate1(PSF_EVENT_THREAD_SLEEP_ENTER, (TraceUnsignedBaseType_t)timeout.ticks);
327 }
328 
sys_trace_k_thread_sleep_exit(k_timeout_t timeout,int ret)329 void sys_trace_k_thread_sleep_exit(k_timeout_t timeout, int ret) {
330 	(void)xTraceEventCreate1(PSF_EVENT_THREAD_SLEEP_EXIT, (TraceUnsignedBaseType_t)ret);
331 }
332 
sys_trace_k_thread_msleep_enter(int32_t ms)333 void sys_trace_k_thread_msleep_enter(int32_t ms) {
334 	(void)xTraceEventCreate1(PSF_EVENT_THREAD_MSLEEP_ENTER, (TraceUnsignedBaseType_t)ms);
335 }
336 
sys_trace_k_thread_msleep_exit(int32_t ms,int ret)337 void sys_trace_k_thread_msleep_exit(int32_t ms, int ret) {
338 	(void)xTraceEventCreate0(PSF_EVENT_THREAD_MSLEEP_EXIT);
339 }
340 
sys_trace_k_thread_usleep_enter(int32_t us)341 void sys_trace_k_thread_usleep_enter(int32_t us) {
342 	(void)xTraceEventCreate1(PSF_EVENT_THREAD_USLEEP_ENTER, (TraceUnsignedBaseType_t)us);
343 }
344 
sys_trace_k_thread_usleep_exit(int32_t us,int ret)345 void sys_trace_k_thread_usleep_exit(int32_t us, int ret) {
346 	(void)xTraceEventCreate1(PSF_EVENT_THREAD_USLEEP_EXIT, (TraceUnsignedBaseType_t)ret);
347 }
348 
sys_trace_k_thread_busy_wait_enter(uint32_t usec_to_wait)349 void sys_trace_k_thread_busy_wait_enter(uint32_t usec_to_wait) {
350 	(void)xTraceEventCreate1(PSF_EVENT_THREAD_BUSY_WAIT_ENTER, (TraceUnsignedBaseType_t)usec_to_wait);
351 }
352 
sys_trace_k_thread_busy_wait_exit(uint32_t usec_to_wait)353 void sys_trace_k_thread_busy_wait_exit(uint32_t usec_to_wait) {
354 	(void)xTraceEventCreate0(PSF_EVENT_THREAD_BUSY_WAIT_EXIT);
355 }
356 
sys_trace_k_thread_yield()357 void sys_trace_k_thread_yield() {
358 	(void)xTraceEventCreate0(PSF_EVENT_THREAD_YIELD);
359 }
360 
sys_trace_k_thread_wakeup(struct k_thread * thread)361 void sys_trace_k_thread_wakeup(struct k_thread *thread) {
362 	(void)xTraceEventCreate1(PSF_EVENT_THREAD_WAKEUP, (TraceUnsignedBaseType_t)thread);
363 }
364 
sys_trace_k_thread_abort(struct k_thread * thread)365 void sys_trace_k_thread_abort(struct k_thread *thread) {
366 	/* Intentionally left empty, see k_thread_sched_abort for implementation */
367 }
368 
sys_trace_k_thread_start(struct k_thread * thread)369 void sys_trace_k_thread_start(struct k_thread *thread) {
370 	(void)xTraceEventCreate1(PSF_EVENT_THREAD_START, (TraceUnsignedBaseType_t)thread);
371 }
372 
sys_trace_k_thread_priority_set(struct k_thread * thread)373 void sys_trace_k_thread_priority_set(struct k_thread *thread) {
374 	if (xTraceObjectSetStateWithoutHandle((void*)thread, k_thread_priority_get(thread)) == TRC_FAIL)
375 	{
376 		return;
377 	}
378 
379 	(void)xTraceEventCreate2(PSF_EVENT_THREAD_SET_PRIORITY, (TraceUnsignedBaseType_t)thread, (TraceUnsignedBaseType_t)k_thread_priority_get(thread));
380 }
381 
sys_trace_k_thread_suspend(struct k_thread * thread)382 void sys_trace_k_thread_suspend(struct k_thread *thread) {
383 	(void)xTraceEventCreate1(PSF_EVENT_THREAD_SUSPEND, (TraceUnsignedBaseType_t)thread);
384 }
385 
sys_trace_k_thread_resume(struct k_thread * thread)386 void sys_trace_k_thread_resume(struct k_thread *thread) {
387 	(void)xTraceEventCreate1(PSF_EVENT_THREAD_RESUME, (TraceUnsignedBaseType_t)thread);
388 }
389 
sys_trace_k_thread_name_set(struct k_thread * thread,int ret)390 void sys_trace_k_thread_name_set(struct k_thread *thread, int ret) {
391 	if (ret == 0) {
392 		xTraceObjectSetNameWithoutHandle((void*)thread, thread->name);
393 	}
394 }
395 
sys_trace_k_thread_switched_out(void)396 void sys_trace_k_thread_switched_out(void) {
397 }
398 
sys_trace_k_thread_switched_in(void)399 void sys_trace_k_thread_switched_in(void) {
400 	xTraceTaskSwitch(k_current_get(), k_thread_priority_get(k_current_get()));
401 }
402 
sys_trace_k_thread_info(struct k_thread * thread)403 void sys_trace_k_thread_info(struct k_thread *thread) {
404 }
405 
406 
407 /* Thread sceduler trace function definitions */
sys_trace_k_thread_sched_lock()408 void sys_trace_k_thread_sched_lock() {
409 	(void)xTraceEventCreate0(PSF_EVENT_THREAD_SCHED_LOCK);
410 }
411 
sys_trace_k_thread_sched_unlock()412 void sys_trace_k_thread_sched_unlock() {
413 	(void)xTraceEventCreate0(PSF_EVENT_THREAD_SCHED_UNLOCK);
414 }
415 
sys_trace_k_thread_sched_wakeup(struct k_thread * thread)416 void sys_trace_k_thread_sched_wakeup(struct k_thread *thread) {
417 	(void)xTraceEventCreate1(PSF_EVENT_THREAD_SCHED_WAKEUP, (TraceUnsignedBaseType_t)thread);
418 }
419 
sys_trace_k_thread_sched_abort(struct k_thread * thread)420 void sys_trace_k_thread_sched_abort(struct k_thread *thread) {
421 	TraceEntryHandle_t xEntryHandle;
422 
423 #if (TRC_SEND_NAME_ONLY_ON_DELETE == 1)
424 	uint32_t uiNameLength;
425 #endif
426 
427 	TRACE_ALLOC_CRITICAL_SECTION();
428 	TRACE_ENTER_CRITICAL_SECTION();
429 
430 	/* Fetch entry handle */
431 	if (xTraceEntryFind((void*)thread, &xEntryHandle) == TRC_FAIL)
432 	{
433 		TRACE_EXIT_CRITICAL_SECTION();
434 		return;
435 	}
436 
437 #if (TRC_SEND_NAME_ONLY_ON_DELETE == 1)
438 	if (strlen(thread->name) > 0) {
439 		/* Send name event because this is a delete */
440 		for (uiNameLength = 0; (thread->name[uiNameLength] != 0) && (uiNameLength < 128); uiNameLength++) {}
441 
442 		prvTraceObjectSendNameEvent(thread, thread->name, uiNameLength);
443 	}
444 #endif /* (TRC_SEND_NAME_ONLY_ON_DELETE == 1) */
445 
446 	/* Delete entry */
447 	if (xTraceEntryDelete(xEntryHandle) == TRC_FAIL)
448 	{
449 		TRACE_EXIT_CRITICAL_SECTION();
450 		return;
451 	}
452 
453 	TRACE_EXIT_CRITICAL_SECTION();
454 
455 	/* Remove thread from stack monitor */
456 	xTraceStackMonitorRemove((void*)thread);
457 
458 	(void)xTraceEventCreate1(PSF_EVENT_THREAD_SCHED_ABORT, (TraceUnsignedBaseType_t)thread);
459 }
460 
sys_trace_k_thread_sched_set_priority(struct k_thread * thread,int prio)461 void sys_trace_k_thread_sched_set_priority(struct k_thread *thread, int prio) {
462 	(void)xTraceEventCreate2(PSF_EVENT_THREAD_SCHED_PRIORITY_SET, (TraceUnsignedBaseType_t)thread, (TraceUnsignedBaseType_t)prio);
463 }
464 
sys_trace_k_thread_sched_ready(struct k_thread * thread)465 void sys_trace_k_thread_sched_ready(struct k_thread *thread) {
466 	xTraceTaskReady((void*)thread);
467 }
468 
sys_trace_k_thread_sched_pend(struct k_thread * thread)469 void sys_trace_k_thread_sched_pend(struct k_thread *thread) {
470 
471 }
472 
sys_trace_k_thread_sched_resume(struct k_thread * thread)473 void sys_trace_k_thread_sched_resume(struct k_thread *thread) {
474 	(void)xTraceEventCreate1(PSF_EVENT_THREAD_SCHED_RESUME, (TraceUnsignedBaseType_t)thread);
475 }
476 
sys_trace_k_thread_sched_suspend(struct k_thread * thread)477 void sys_trace_k_thread_sched_suspend(struct k_thread *thread) {
478 	(void)xTraceEventCreate1(PSF_EVENT_THREAD_SCHED_SUSPEND, (TraceUnsignedBaseType_t)thread);
479 }
480 
481 
482 /* Work trace function definitions */
sys_trace_k_work_init(struct k_work * work,k_work_handler_t handler)483 void sys_trace_k_work_init(struct k_work *work, k_work_handler_t handler) {
484 	(void)xTraceEventCreate2(PSF_EVENT_WORK_INIT, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)handler);
485 }
486 
sys_trace_k_work_submit_to_queue_enter(struct k_work_q * queue,struct k_work * work)487 void sys_trace_k_work_submit_to_queue_enter(struct k_work_q *queue, struct k_work *work) {
488 	(void)xTraceEventCreate2(PSF_EVENT_WORK_SUBMIT_TO_QUEUE_BLOCKING, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)work);
489 }
490 
sys_trace_k_work_submit_to_queue_exit(struct k_work_q * queue,struct k_work * work,int ret)491 void sys_trace_k_work_submit_to_queue_exit(struct k_work_q *queue, struct k_work *work, int ret) {
492 	(void)xTraceEventCreate3(
493 		ret >= 0 ? PSF_EVENT_WORK_SUBMIT_TO_QUEUE_SUCCESS : PSF_EVENT_WORK_SUBMIT_TO_QUEUE_FAILURE,
494 		(TraceUnsignedBaseType_t)queue,
495 		(TraceUnsignedBaseType_t)work,
496 		(TraceUnsignedBaseType_t)ret
497 	);
498 }
499 
sys_trace_k_work_submit_enter(struct k_work * work)500 void sys_trace_k_work_submit_enter(struct k_work *work) {
501 	(void)xTraceEventCreate1(PSF_EVENT_WORK_SUBMIT_BLOCKING, (TraceUnsignedBaseType_t)work);
502 }
503 
sys_trace_k_work_submit_exit(struct k_work * work,int ret)504 void sys_trace_k_work_submit_exit(struct k_work *work, int ret) {
505 	(void)xTraceEventCreate2(ret >= 0 ? PSF_EVENT_WORK_SUBMIT_SUCCESS : PSF_EVENT_WORK_SUBMIT_FAILURE, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)ret);
506 }
507 
sys_trace_k_work_flush_enter(struct k_work * work,struct k_work_sync * sync)508 void sys_trace_k_work_flush_enter(struct k_work *work, struct k_work_sync *sync) {
509 	(void)xTraceEventCreate2(PSF_EVENT_WORK_FLUSH_BLOCKING, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)sync);
510 }
511 
sys_trace_k_work_flush_blocking(struct k_work * work,struct k_work_sync * sync,k_timeout_t timeout)512 void sys_trace_k_work_flush_blocking(struct k_work *work, struct k_work_sync *sync, k_timeout_t timeout) {
513 
514 }
515 
sys_trace_k_work_flush_exit(struct k_work * work,struct k_work_sync * sync,bool ret)516 void sys_trace_k_work_flush_exit(struct k_work *work, struct k_work_sync *sync, bool ret) {
517 	(void)xTraceEventCreate2(PSF_EVENT_WORK_FLUSH_SUCCESS, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)ret);
518 }
519 
sys_trace_k_work_cancel_enter(struct k_work * work)520 void sys_trace_k_work_cancel_enter(struct k_work *work) {
521 	(void)xTraceEventCreate1(PSF_EVENT_WORK_CANCEL_BLOCKING, (TraceUnsignedBaseType_t)work);
522 }
523 
sys_trace_k_work_cancel_exit(struct k_work * work,int ret)524 void sys_trace_k_work_cancel_exit(struct k_work *work, int ret) {
525 	(void)xTraceEventCreate2(PSF_EVENT_WORK_CANCEL_SUCCESS, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)ret);
526 }
527 
sys_trace_k_work_cancel_sync_enter(struct k_work * work,struct k_work_sync * sync)528 void sys_trace_k_work_cancel_sync_enter(struct k_work *work, struct k_work_sync *sync) {
529 	(void)xTraceEventCreate2(PSF_EVENT_WORK_CANCEL_SYNC_BLOCKING, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)sync);
530 }
531 
sys_trace_k_work_cancel_sync_blocking(struct k_work * work,struct k_work_sync * sync)532 void sys_trace_k_work_cancel_sync_blocking(struct k_work *work, struct k_work_sync *sync) {
533 
534 }
535 
sys_trace_k_work_cancel_sync_exit(struct k_work * work,struct k_work_sync * sync,bool ret)536 void sys_trace_k_work_cancel_sync_exit(struct k_work *work, struct k_work_sync *sync, bool ret) {
537 	(void)xTraceEventCreate3(PSF_EVENT_WORK_CANCEL_SYNC_SUCCESS, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)sync, (TraceUnsignedBaseType_t)ret);
538 }
539 
540 
541 /* Work queue trace function definitions */
sys_trace_k_work_queue_start_enter(struct k_work_q * queue,k_thread_stack_t * stack,size_t stack_size,int prio,const struct k_work_queue_config * cfg)542 void sys_trace_k_work_queue_start_enter(struct k_work_q *queue, k_thread_stack_t *stack, size_t stack_size, int prio, const struct k_work_queue_config *cfg) {
543 	(void)xTraceEventCreate5(
544 		PSF_EVENT_WORK_QUEUE_START_BLOCKING,
545 		(TraceUnsignedBaseType_t)queue,
546 		(TraceUnsignedBaseType_t)stack,
547 		(TraceUnsignedBaseType_t)stack_size,
548 		(TraceUnsignedBaseType_t)prio,
549 		(TraceUnsignedBaseType_t)cfg
550 	);
551 }
552 
sys_trace_k_work_queue_start_exit(struct k_work_q * queue,k_thread_stack_t * stack,size_t stack_size,int prio,const struct k_work_queue_config * cfg)553 void sys_trace_k_work_queue_start_exit(struct k_work_q *queue, k_thread_stack_t *stack, size_t stack_size, int prio, const struct k_work_queue_config *cfg) {
554 	(void)xTraceEventCreate1(PSF_EVENT_WORK_QUEUE_START_SUCCESS, (TraceUnsignedBaseType_t)queue);
555 }
556 
sys_trace_k_work_queue_drain_enter(struct k_work_q * queue,bool plug)557 void sys_trace_k_work_queue_drain_enter(struct k_work_q *queue, bool plug) {
558 	(void)xTraceEventCreate2(PSF_EVENT_WORK_QUEUE_DRAIN_BLOCKING, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)plug);
559 }
560 
sys_trace_k_work_queue_drain_exit(struct k_work_q * queue,bool plug,int ret)561 void sys_trace_k_work_queue_drain_exit(struct k_work_q *queue, bool plug, int ret) {
562 	(void)xTraceEventCreate2(ret >= 0 ? PSF_EVENT_WORK_QUEUE_DRAIN_SUCCESS : PSF_EVENT_WORK_QUEUE_DRAIN_FAILURE, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
563 }
564 
sys_trace_k_work_queue_unplug_enter(struct k_work_q * queue)565 void sys_trace_k_work_queue_unplug_enter(struct k_work_q *queue) {
566 	(void)xTraceEventCreate1(PSF_EVENT_WORK_QUEUE_UNPLUG_BLOCKING, (TraceUnsignedBaseType_t)queue);
567 }
568 
sys_trace_k_work_queue_unplug_exit(struct k_work_q * queue,int ret)569 void sys_trace_k_work_queue_unplug_exit(struct k_work_q *queue, int ret) {
570 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_WORK_QUEUE_UNPLUG_SUCCESS : PSF_EVENT_WORK_QUEUE_UNPLUG_FAILURE, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
571 }
572 
573 
574 /* Work delayable trace function definitions */
sys_trace_k_work_delayable_init(struct k_work_delayable * dwork,k_work_handler_t handler)575 void sys_trace_k_work_delayable_init(struct k_work_delayable *dwork, k_work_handler_t handler) {
576 	(void)xTraceEventCreate2(PSF_EVENT_DWORK_INIT, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)handler);
577 }
578 
sys_trace_k_work_schedule_for_queue_enter(struct k_work_q * queue,struct k_work_delayable * dwork,k_timeout_t delay)579 void sys_trace_k_work_schedule_for_queue_enter(struct k_work_q *queue, struct k_work_delayable *dwork, k_timeout_t delay) {
580 }
581 
sys_trace_k_work_schedule_for_queue_exit(struct k_work_q * queue,struct k_work_delayable * dwork,k_timeout_t delay,int ret)582 void sys_trace_k_work_schedule_for_queue_exit(struct k_work_q *queue, struct k_work_delayable *dwork, k_timeout_t delay, int ret) {
583 	(void)xTraceEventCreate4(((ret == 0) || (ret == 1)) ? PSF_EVENT_DWORK_SCHEDULE_FOR_QUEUE_SUCCESS : PSF_EVENT_DWORK_SCHEDULE_FOR_QUEUE_FAILURE, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)delay.ticks, (TraceUnsignedBaseType_t)ret);
584 }
585 
sys_trace_k_work_schedule_enter(struct k_work_delayable * dwork,k_timeout_t delay)586 void sys_trace_k_work_schedule_enter(struct k_work_delayable *dwork, k_timeout_t delay) {
587 	(void)xTraceEventCreate2(PSF_EVENT_DWORK_SCHEDULE_BLOCKING, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)delay.ticks);
588 }
589 
sys_trace_k_work_schedule_exit(struct k_work_delayable * dwork,k_timeout_t delay,int ret)590 void sys_trace_k_work_schedule_exit(struct k_work_delayable *dwork, k_timeout_t delay, int ret) {
591 	(void)xTraceEventCreate2(((ret == 0) || (ret == 1)) ? PSF_EVENT_DWORK_SCHEDULE_SUCCESS : PSF_EVENT_DWORK_SCHEDULE_FAILURE, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)ret);
592 }
593 
sys_trace_k_work_reschedule_for_queue_enter(struct k_work_q * queue,struct k_work_delayable * dwork,k_timeout_t delay)594 void sys_trace_k_work_reschedule_for_queue_enter(struct k_work_q *queue, struct k_work_delayable *dwork, k_timeout_t delay) {
595 }
596 
sys_trace_k_work_reschedule_for_queue_exit(struct k_work_q * queue,struct k_work_delayable * dwork,k_timeout_t delay,int ret)597 void sys_trace_k_work_reschedule_for_queue_exit(struct k_work_q *queue, struct k_work_delayable *dwork, k_timeout_t delay, int ret) {
598 	(void)xTraceEventCreate4(((ret == 0) || (ret == 1) || (ret == 2)) ? PSF_EVENT_DWORK_RESCHEDULE_FOR_QUEUE_SUCCESS : PSF_EVENT_DWORK_RESCHEDULE_FOR_QUEUE_FAILURE, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)delay.ticks, (TraceUnsignedBaseType_t)ret);
599 }
600 
sys_trace_k_work_reschedule_enter(struct k_work_delayable * dwork,k_timeout_t delay)601 void sys_trace_k_work_reschedule_enter(struct k_work_delayable *dwork, k_timeout_t delay) {
602 	(void)xTraceEventCreate2(PSF_EVENT_DWORK_RESCHEDULE_BLOCKING, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)delay.ticks);
603 }
604 
sys_trace_k_work_reschedule_exit(struct k_work_delayable * dwork,k_timeout_t delay,int ret)605 void sys_trace_k_work_reschedule_exit(struct k_work_delayable *dwork, k_timeout_t delay, int ret) {
606 	(void)xTraceEventCreate2(((ret == 0) || (ret == 1) || (ret == 2)) ? PSF_EVENT_DWORK_RESCHEDULE_SUCCESS : PSF_EVENT_DWORK_RESCHEDULE_FAILURE, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)ret);
607 }
608 
sys_trace_k_work_flush_delayable_enter(struct k_work_delayable * dwork,struct k_work_sync * sync)609 void sys_trace_k_work_flush_delayable_enter(struct k_work_delayable *dwork, struct k_work_sync *sync) {
610 	(void)xTraceEventCreate2(PSF_EVENT_DWORK_FLUSH_BLOCKING, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)sync);
611 }
612 
sys_trace_k_work_flush_delayable_exit(struct k_work_delayable * dwork,struct k_work_sync * sync,bool ret)613 void sys_trace_k_work_flush_delayable_exit(struct k_work_delayable *dwork, struct k_work_sync *sync, bool ret) {
614 	(void)xTraceEventCreate2(PSF_EVENT_DWORK_FLUSH_SUCCESS, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)ret);
615 }
616 
sys_trace_k_work_cancel_delayable_enter(struct k_work_delayable * dwork)617 void sys_trace_k_work_cancel_delayable_enter(struct k_work_delayable *dwork) {
618 }
619 
sys_trace_k_work_cancel_delayable_exit(struct k_work_delayable * dwork,int ret)620 void sys_trace_k_work_cancel_delayable_exit(struct k_work_delayable *dwork, int ret) {
621 	(void)xTraceEventCreate2(PSF_EVENT_DWORK_CANCEL_DELAYABLE_SUCCESS, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)ret);
622 }
623 
sys_trace_cancel_delayable_sync_enter(struct k_work_delayable * dwork,struct k_work_sync * sync)624 void sys_trace_cancel_delayable_sync_enter(struct k_work_delayable *dwork, struct k_work_sync *sync) {
625 	(void)xTraceEventCreate2(PSF_EVENT_DWORK_CANCEL_DELAYABLE_SYNC_BLOCKING, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)sync);
626 }
627 
sys_trace_cancel_delayable_sync_exit(struct k_work_delayable * dwork,struct k_work_sync * sync,bool ret)628 void sys_trace_cancel_delayable_sync_exit(struct k_work_delayable *dwork, struct k_work_sync *sync, bool ret) {
629 	(void)xTraceEventCreate2(PSF_EVENT_DWORK_CANCEL_DELAYABLE_SYNC_SUCCESS, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)ret);
630 }
631 
632 
633 /* Work poll trace function definitions */
sys_trace_k_work_poll_init_enter(struct k_work_poll * work,k_work_handler_t handler)634 void sys_trace_k_work_poll_init_enter(struct k_work_poll *work, k_work_handler_t handler) {
635 	(void)xTraceEventCreate2(PSF_EVENT_PWORK_INIT_ENTER, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)handler);
636 }
637 
sys_trace_k_work_poll_init_exit(struct k_work_poll * work,k_work_handler_t handler)638 void sys_trace_k_work_poll_init_exit(struct k_work_poll *work, k_work_handler_t handler) {
639 	(void)xTraceEventCreate1(PSF_EVENT_PWORK_INIT_EXIT, (TraceUnsignedBaseType_t)work);
640 }
641 
sys_trace_k_work_poll_submit_to_queue_enter(struct k_work_q * work_q,struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout)642 void sys_trace_k_work_poll_submit_to_queue_enter(struct k_work_q *work_q, struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout) {
643 	(void)xTraceEventCreate5(
644 		PSF_EVENT_PWORK_SUBMIT_TO_QUEUE_BLOCKING,
645 		(TraceUnsignedBaseType_t)work_q,
646 		(TraceUnsignedBaseType_t)work,
647 		(TraceUnsignedBaseType_t)events,
648 		(TraceUnsignedBaseType_t)num_events,
649 		(TraceUnsignedBaseType_t)timeout.ticks
650 	);
651 }
652 
sys_trace_k_work_poll_submit_to_queue_blocking(struct k_work_q * work_q,struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout)653 void sys_trace_k_work_poll_submit_to_queue_blocking(struct k_work_q *work_q, struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout) {
654 
655 }
656 
sys_trace_k_work_poll_submit_to_queue_exit(struct k_work_q * work_q,struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout,int ret)657 void sys_trace_k_work_poll_submit_to_queue_exit(struct k_work_q *work_q, struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout, int ret) {
658 	(void)xTraceEventCreate3(ret == 0 ? PSF_EVENT_PWORK_SUBMIT_TO_QUEUE_SUCCESS : PSF_EVENT_PWORK_SUBMIT_TO_QUEUE_FAILURE, (TraceUnsignedBaseType_t)work_q, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)ret);
659 }
660 
sys_trace_k_work_poll_submit_enter(struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout)661 void sys_trace_k_work_poll_submit_enter(struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout) {
662 	(void)xTraceEventCreate4(PSF_EVENT_PWORK_SUBMIT_BLOCKING, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)events, (TraceUnsignedBaseType_t)num_events, (TraceUnsignedBaseType_t)timeout.ticks);
663 }
664 
sys_trace_k_work_poll_submit_exit(struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout,int ret)665 void sys_trace_k_work_poll_submit_exit(struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout, int ret) {
666 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_PWORK_SUBMIT_SUCCESS : PSF_EVENT_PWORK_SUBMIT_FAILURE, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)ret);
667 }
668 
sys_trace_k_work_poll_cancel_enter(struct k_work_poll * work)669 void sys_trace_k_work_poll_cancel_enter(struct k_work_poll *work) {
670 	(void)xTraceEventCreate1(PSF_EVENT_PWORK_CANCEL_BLOCKING, (TraceUnsignedBaseType_t)work);
671 }
672 
sys_trace_k_work_poll_cancel_exit(struct k_work_poll * work,int ret)673 void sys_trace_k_work_poll_cancel_exit(struct k_work_poll *work, int ret) {
674 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_PWORK_CANCEL_SUCCESS : PSF_EVENT_PWORK_CANCEL_FAILURE, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)ret);
675 }
676 
677 
678 /* Poll API trace function definitions */
sys_trace_k_poll_api_event_init(struct k_poll_event * event,uint32_t type,int mode,void * obj)679 void sys_trace_k_poll_api_event_init(struct k_poll_event *event, uint32_t type, int mode, void *obj) {
680 	(void)xTraceEventCreate4(PSF_EVENT_POLL_EVENT_INIT, (TraceUnsignedBaseType_t)event, (TraceUnsignedBaseType_t)type, (TraceUnsignedBaseType_t)mode, (TraceUnsignedBaseType_t)obj);
681 }
682 
sys_trace_k_poll_api_event_poll_enter(struct k_poll_event * events,int num_events,k_timeout_t timeout)683 void sys_trace_k_poll_api_event_poll_enter(struct k_poll_event *events, int num_events, k_timeout_t timeout) {
684 	(void)xTraceEventCreate3(PSF_EVENT_POLL_POLL_BLOCKING, (TraceUnsignedBaseType_t)events, (TraceUnsignedBaseType_t)num_events, (TraceUnsignedBaseType_t)timeout.ticks);
685 }
686 
sys_trace_k_poll_api_event_poll_exit(struct k_poll_event * events,int num_events,k_timeout_t timeout,int ret)687 void sys_trace_k_poll_api_event_poll_exit(struct k_poll_event *events, int num_events, k_timeout_t timeout, int ret) {
688 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_POLL_POLL_SUCCESS : PSF_EVENT_POLL_POLL_FAILURE, (TraceUnsignedBaseType_t)events, (TraceUnsignedBaseType_t)ret);
689 }
690 
sys_trace_k_poll_api_signal_init(struct k_poll_signal * signal)691 void sys_trace_k_poll_api_signal_init(struct k_poll_signal *signal) {
692 	(void)xTraceEventCreate1(PSF_EVENT_POLL_SIGNAL_INIT, (TraceUnsignedBaseType_t)signal);
693 }
694 
sys_trace_k_poll_api_signal_reset(struct k_poll_signal * signal)695 void sys_trace_k_poll_api_signal_reset(struct k_poll_signal *signal) {
696 	(void)xTraceEventCreate1(PSF_EVENT_POLL_SIGNAL_RESET, (TraceUnsignedBaseType_t)signal);
697 }
698 
sys_trace_k_poll_api_signal_check(struct k_poll_signal * signal,unsigned int * signaled,int * result)699 void sys_trace_k_poll_api_signal_check(struct k_poll_signal *signal, unsigned int *signaled, int *result) {
700 	(void)xTraceEventCreate3(PSF_EVENT_POLL_SIGNAL_CHECK, (TraceUnsignedBaseType_t)signal, (TraceUnsignedBaseType_t)signaled, (TraceUnsignedBaseType_t)result);
701 }
702 
sys_trace_k_poll_api_signal_raise(struct k_poll_signal * signal,int result,int ret)703 void sys_trace_k_poll_api_signal_raise(struct k_poll_signal *signal, int result, int ret) {
704 	(void)xTraceEventCreate3(ret == 0 ? PSF_EVENT_POLL_SIGNAL_RAISE_SUCCESS : PSF_EVENT_POLL_SIGNAL_RAISE_FAILURE, (TraceUnsignedBaseType_t)signal, (TraceUnsignedBaseType_t)result, (TraceUnsignedBaseType_t)ret);
705 }
706 
707 
708 /* Semaphore trace function definitions */
sys_trace_k_sem_init(struct k_sem * sem,unsigned int initial_count,unsigned int limit,int ret)709 void sys_trace_k_sem_init(struct k_sem *sem, unsigned int initial_count, unsigned int limit, int ret) {
710 	(void)xTraceEventCreate5(ret == 0 ? PSF_EVENT_SEMAPHORE_CREATE_SUCCESS : PSF_EVENT_SEMAPHORE_CREATE_TIMEOUT, (TraceUnsignedBaseType_t)sem, (TraceUnsignedBaseType_t)initial_count, (TraceUnsignedBaseType_t)limit, (TraceUnsignedBaseType_t)sem->count, (TraceUnsignedBaseType_t)ret);
711 }
712 
sys_trace_k_sem_give_enter(struct k_sem * sem)713 void sys_trace_k_sem_give_enter(struct k_sem *sem) {
714 	(void)xTraceEventCreate2(PSF_EVENT_SEMAPHORE_GIVE_SUCCESS, (TraceUnsignedBaseType_t)sem, (TraceUnsignedBaseType_t)sem->count);
715 }
716 
sys_trace_k_sem_take_enter(struct k_sem * sem,k_timeout_t timeout)717 void sys_trace_k_sem_take_enter(struct k_sem *sem, k_timeout_t timeout) {
718 
719 }
720 
sys_trace_k_sem_take_blocking(struct k_sem * sem,k_timeout_t timeout)721 void sys_trace_k_sem_take_blocking(struct k_sem *sem, k_timeout_t timeout) {
722 	(void)xTraceEventCreate3(PSF_EVENT_SEMAPHORE_TAKE_BLOCKING, (TraceUnsignedBaseType_t)sem, (TraceUnsignedBaseType_t)timeout.ticks, (TraceUnsignedBaseType_t)sem->count);
723 }
724 
sys_trace_k_sem_take_exit(struct k_sem * sem,k_timeout_t timeout,int ret)725 void sys_trace_k_sem_take_exit(struct k_sem *sem, k_timeout_t timeout, int ret) {
726 	(void)xTraceEventCreate3(ret == 0 ? PSF_EVENT_SEMAPHORE_TAKE_SUCCESS : PSF_EVENT_SEMAPHORE_TAKE_FAILED, (TraceUnsignedBaseType_t)sem, (TraceUnsignedBaseType_t)sem->count, (TraceUnsignedBaseType_t)ret);
727 }
728 
sys_trace_k_sem_reset(struct k_sem * sem)729 void sys_trace_k_sem_reset(struct k_sem *sem) {
730 	(void)xTraceEventCreate1(PSF_EVENT_SEMAPHORE_RESET, (TraceUnsignedBaseType_t)sem);
731 }
732 
733 
734 /* Mutex trace function definitions */
sys_trace_k_mutex_init(struct k_mutex * mutex,int ret)735 void sys_trace_k_mutex_init(struct k_mutex *mutex, int ret) {
736 	(void)xTraceEventCreate2(PSF_EVENT_MUTEX_CREATE, (TraceUnsignedBaseType_t)mutex, (TraceUnsignedBaseType_t)ret);
737 }
738 
sys_trace_k_mutex_lock_enter(struct k_mutex * mutex,k_timeout_t timeout)739 void sys_trace_k_mutex_lock_enter(struct k_mutex *mutex, k_timeout_t timeout) {
740 }
741 
sys_trace_k_mutex_lock_blocking(struct k_mutex * mutex,k_timeout_t timeout)742 void sys_trace_k_mutex_lock_blocking(struct k_mutex *mutex, k_timeout_t timeout) {
743 	(void)xTraceEventCreate2(PSF_EVENT_MUTEX_TAKE_BLOCKING, (TraceUnsignedBaseType_t)mutex, (TraceUnsignedBaseType_t)timeout.ticks);
744 }
745 
sys_trace_k_mutex_lock_exit(struct k_mutex * mutex,k_timeout_t timeout,int ret)746 void sys_trace_k_mutex_lock_exit(struct k_mutex *mutex, k_timeout_t timeout, int ret) {
747 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_MUTEX_TAKE_SUCCESS : PSF_EVENT_MUTEX_TAKE_FAILED, (TraceUnsignedBaseType_t)mutex, (TraceUnsignedBaseType_t)ret);
748 }
749 
sys_trace_k_mutex_unlock_enter(struct k_mutex * mutex)750 void sys_trace_k_mutex_unlock_enter(struct k_mutex *mutex) {
751 }
752 
sys_trace_k_mutex_unlock_exit(struct k_mutex * mutex,int ret)753 void sys_trace_k_mutex_unlock_exit(struct k_mutex *mutex, int ret) {
754 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_MUTEX_GIVE_SUCCESS : PSF_EVENT_MUTEX_GIVE_FAILED, (TraceUnsignedBaseType_t)mutex, (TraceUnsignedBaseType_t)ret);
755 }
756 
757 
758 /* Conditional variable trace function definitions */
sys_trace_k_condvar_init(struct k_condvar * condvar,int ret)759 void sys_trace_k_condvar_init(struct k_condvar *condvar, int ret) {
760 	(void)xTraceEventCreate2(PSF_EVENT_CONDVAR_INIT, (TraceUnsignedBaseType_t)condvar, (TraceUnsignedBaseType_t)ret);
761 }
762 
sys_trace_k_condvar_signal_enter(struct k_condvar * condvar)763 void sys_trace_k_condvar_signal_enter(struct k_condvar *condvar) {
764 
765 }
766 
sys_trace_k_condvar_signal_blocking(struct k_condvar * condvar)767 void sys_trace_k_condvar_signal_blocking(struct k_condvar *condvar) {
768 	(void)xTraceEventCreate1(PSF_EVENT_CONDVAR_SIGNAL_BLOCKING, (TraceUnsignedBaseType_t)condvar);
769 }
770 
sys_trace_k_condvar_signal_exit(struct k_condvar * condvar,int ret)771 void sys_trace_k_condvar_signal_exit(struct k_condvar *condvar, int ret) {
772 	(void)xTraceEventCreate2(PSF_EVENT_CONDVAR_SIGNAL_SUCCESS, (TraceUnsignedBaseType_t)condvar, (TraceUnsignedBaseType_t)ret);
773 }
774 
sys_trace_k_condvar_broadcast_enter(struct k_condvar * condvar)775 void sys_trace_k_condvar_broadcast_enter(struct k_condvar *condvar) {
776 	(void)xTraceEventCreate1(PSF_EVENT_CONDVAR_BROADCAST_ENTER, (TraceUnsignedBaseType_t)condvar);
777 }
778 
sys_trace_k_condvar_broadcast_exit(struct k_condvar * condvar,int ret)779 void sys_trace_k_condvar_broadcast_exit(struct k_condvar *condvar, int ret) {
780 	(void)xTraceEventCreate2(PSF_EVENT_CONDVAR_BROADCAST_EXIT, (TraceUnsignedBaseType_t)condvar, (TraceUnsignedBaseType_t)ret);
781 }
782 
sys_trace_k_condvar_wait_enter(struct k_condvar * condvar,struct k_mutex * mutex,k_timeout_t timeout)783 void sys_trace_k_condvar_wait_enter(struct k_condvar *condvar, struct k_mutex *mutex, k_timeout_t timeout) {
784 	(void)xTraceEventCreate3(PSF_EVENT_CONDVAR_WAIT_BLOCKING, (TraceUnsignedBaseType_t)condvar, (TraceUnsignedBaseType_t)mutex, (TraceUnsignedBaseType_t)timeout.ticks);
785 }
786 
sys_trace_k_condvar_wait_exit(struct k_condvar * condvar,struct k_mutex * mutex,k_timeout_t timeout,int ret)787 void sys_trace_k_condvar_wait_exit(struct k_condvar *condvar, struct k_mutex *mutex, k_timeout_t timeout, int ret) {
788 	(void)xTraceEventCreate3(ret == 0 ? PSF_EVENT_CONDVAR_WAIT_SUCCESS : PSF_EVENT_CONDVAR_WAIT_FAILURE, (TraceUnsignedBaseType_t)condvar, (TraceUnsignedBaseType_t)mutex, (TraceUnsignedBaseType_t)ret);
789 }
790 
791 
792 /* Queue trace function definitions */
sys_trace_k_queue_init(struct k_queue * queue)793 void sys_trace_k_queue_init(struct k_queue *queue) {
794 	(void)xTraceEventCreate1(PSF_EVENT_QUEUE_INIT, (TraceUnsignedBaseType_t)queue);
795 }
796 
sys_trace_k_queue_cancel_wait(struct k_queue * queue)797 void sys_trace_k_queue_cancel_wait(struct k_queue *queue) {
798 	(void)xTraceEventCreate1(PSF_EVENT_QUEUE_CANCEL_WAIT, (TraceUnsignedBaseType_t)queue);
799 }
800 
sys_trace_k_queue_queue_insert_enter(struct k_queue * queue,bool alloc,void * data)801 void sys_trace_k_queue_queue_insert_enter(struct k_queue *queue, bool alloc, void *data) {
802 	// Ignore non alloc tracing of this event
803 	if (!alloc) {
804 		return;
805 	}
806 }
807 
sys_trace_k_queue_queue_insert_blocking(struct k_queue * queue,bool alloc,void * data)808 void sys_trace_k_queue_queue_insert_blocking(struct k_queue *queue, bool alloc, void *data) {
809 	// Ignore non alloc tracing of this event
810 	if (!alloc) {
811 		return;
812 	}
813 
814 	(void)xTraceEventCreate2(PSF_EVENT_QUEUE_QUEUE_INSERT_BLOCKING, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)data);
815 }
816 
sys_trace_k_queue_queue_insert_exit(struct k_queue * queue,bool alloc,void * data,int ret)817 void sys_trace_k_queue_queue_insert_exit(struct k_queue *queue, bool alloc, void *data, int ret) {
818 	// Ignore non alloc tracing of this event
819 	if (!alloc) {
820 		return;
821 	}
822 
823 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_QUEUE_QUEUE_INSERT_SUCCESS : PSF_EVENT_QUEUE_QUEUE_INSERT_FAILURE, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
824 }
825 
sys_trace_k_queue_append_enter(struct k_queue * queue,void * data)826 void sys_trace_k_queue_append_enter(struct k_queue *queue, void *data) {
827 	(void)xTraceEventCreate2(PSF_EVENT_QUEUE_APPEND, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)data);
828 }
829 
sys_trace_k_queue_append_exit(struct k_queue * queue,void * data)830 void sys_trace_k_queue_append_exit(struct k_queue *queue, void *data) {
831 }
832 
sys_trace_k_queue_alloc_append_enter(struct k_queue * queue,void * data)833 void sys_trace_k_queue_alloc_append_enter(struct k_queue *queue, void *data) {
834 	(void)xTraceEventCreate2(PSF_EVENT_QUEUE_ALLOC_APPEND_BLOCKING, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)data);
835 }
836 
sys_trace_k_queue_alloc_append_exit(struct k_queue * queue,void * data,int ret)837 void sys_trace_k_queue_alloc_append_exit(struct k_queue *queue, void *data, int ret) {
838 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_QUEUE_ALLOC_APPEND_SUCCESS : PSF_EVENT_QUEUE_ALLOC_APPEND_FAILURE, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
839 }
840 
sys_trace_k_queue_prepend_enter(struct k_queue * queue,void * data)841 void sys_trace_k_queue_prepend_enter(struct k_queue *queue, void *data) {
842 	(void)xTraceEventCreate2(PSF_EVENT_QUEUE_PREPEND, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)data);
843 }
844 
sys_trace_k_queue_prepend_exit(struct k_queue * queue,void * data)845 void sys_trace_k_queue_prepend_exit(struct k_queue *queue, void *data) {
846 }
847 
sys_trace_k_queue_alloc_prepend_enter(struct k_queue * queue,void * data)848 void sys_trace_k_queue_alloc_prepend_enter(struct k_queue *queue, void *data) {
849 	(void)xTraceEventCreate2(PSF_EVENT_QUEUE_ALLOC_PREPEND_BLOCKING, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)data);
850 }
851 
sys_trace_k_queue_alloc_prepend_exit(struct k_queue * queue,void * data,int ret)852 void sys_trace_k_queue_alloc_prepend_exit(struct k_queue *queue, void *data, int ret) {
853 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_QUEUE_ALLOC_PREPEND_SUCCESS : PSF_EVENT_QUEUE_ALLOC_PREPEND_FAILURE, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
854 }
855 
sys_trace_k_queue_insert_enter(struct k_queue * queue,void * prev,void * data)856 void sys_trace_k_queue_insert_enter(struct k_queue *queue, void *prev, void *data) {
857 	(void)xTraceEventCreate3(PSF_EVENT_QUEUE_INSERT, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)prev, (TraceUnsignedBaseType_t)data);
858 }
859 
sys_trace_k_queue_insert_exit(struct k_queue * queue,void * prev,void * data)860 void sys_trace_k_queue_insert_exit(struct k_queue *queue, void *prev, void *data) {
861 }
862 
sys_trace_k_queue_append_list_enter(struct k_queue * queue,void * head,void * tail)863 void sys_trace_k_queue_append_list_enter(struct k_queue *queue, void *head, void *tail) {
864 	(void)xTraceEventCreate3(PSF_EVENT_QUEUE_APPEND_LIST_BLOCKING, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)head, (TraceUnsignedBaseType_t)tail);
865 }
866 
sys_trace_k_queue_append_list_exit(struct k_queue * queue,int ret)867 void sys_trace_k_queue_append_list_exit(struct k_queue *queue, int ret) {
868 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_QUEUE_APPEND_LIST_SUCCESS : PSF_EVENT_QUEUE_APPEND_LIST_FAILURE, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
869 }
870 
sys_trace_k_queue_merge_slist_enter(struct k_queue * queue,sys_slist_t * list)871 void sys_trace_k_queue_merge_slist_enter(struct k_queue *queue, sys_slist_t *list) {
872 	(void)xTraceEventCreate2(PSF_EVENT_QUEUE_MERGE_SLIST_BLOCKING, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)list);
873 }
874 
sys_trace_k_queue_merge_slist_exit(struct k_queue * queue,sys_slist_t * list,int ret)875 void sys_trace_k_queue_merge_slist_exit(struct k_queue *queue, sys_slist_t *list, int ret) {
876 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_QUEUE_MERGE_SLIST_SUCCESS : PSF_EVENT_QUEUE_MERGE_SLIST_FAILURE, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
877 }
878 
sys_trace_k_queue_get_blocking(struct k_queue * queue,k_timeout_t timeout)879 void sys_trace_k_queue_get_blocking(struct k_queue *queue, k_timeout_t timeout) {
880 	(void)xTraceEventCreate2(PSF_EVENT_QUEUE_GET_BLOCKING, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)timeout.ticks);
881 }
882 
sys_trace_k_queue_get_exit(struct k_queue * queue,k_timeout_t timeout,void * ret)883 void sys_trace_k_queue_get_exit(struct k_queue *queue, k_timeout_t timeout, void *ret) {
884 	(void)xTraceEventCreate2(ret != NULL ? PSF_EVENT_QUEUE_GET_SUCCESS : PSF_EVENT_QUEUE_GET_TIMEOUT, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
885 }
886 
sys_trace_k_queue_remove_enter(struct k_queue * queue,void * data)887 void sys_trace_k_queue_remove_enter(struct k_queue *queue, void *data) {
888 	(void)xTraceEventCreate2(PSF_EVENT_QUEUE_REMOVE_BLOCKING, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)data);
889 }
890 
sys_trace_k_queue_remove_exit(struct k_queue * queue,void * data,bool ret)891 void sys_trace_k_queue_remove_exit(struct k_queue *queue, void *data, bool ret) {
892 	(void)xTraceEventCreate2(ret ? PSF_EVENT_QUEUE_REMOVE_SUCCESS : PSF_EVENT_QUEUE_REMOVE_FAILURE, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
893 }
894 
sys_trace_k_queue_unique_append_enter(struct k_queue * queue,void * data)895 void sys_trace_k_queue_unique_append_enter(struct k_queue *queue, void *data) {
896 	(void)xTraceEventCreate2(PSF_EVENT_QUEUE_UNIQUE_APPEND_BLOCKING, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)data);
897 }
898 
sys_trace_k_queue_unique_append_exit(struct k_queue * queue,void * data,bool ret)899 void sys_trace_k_queue_unique_append_exit(struct k_queue *queue, void *data, bool ret) {
900 	(void)xTraceEventCreate2(ret ? PSF_EVENT_QUEUE_UNIQUE_APPEND_SUCCESS : PSF_EVENT_QUEUE_UNIQUE_APPEND_FAILURE, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
901 }
902 
sys_trace_k_queue_peek_head(struct k_queue * queue,void * ret)903 void sys_trace_k_queue_peek_head(struct k_queue *queue, void *ret) {
904 	(void)xTraceEventCreate2(PSF_EVENT_QUEUE_PEEK_HEAD, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
905 }
906 
sys_trace_k_queue_peek_tail(struct k_queue * queue,void * ret)907 void sys_trace_k_queue_peek_tail(struct k_queue *queue, void *ret) {
908 	(void)xTraceEventCreate2(PSF_EVENT_QUEUE_PEEK_TAIL, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
909 }
910 
911 
912 /* FIFO trace function definitions */
sys_trace_k_fifo_init_enter(struct k_fifo * fifo)913 void sys_trace_k_fifo_init_enter(struct k_fifo *fifo) {
914 	(void)xTraceEventCreate1(PSF_EVENT_FIFO_INIT_ENTER, (TraceUnsignedBaseType_t)fifo);
915 }
916 
sys_trace_k_fifo_init_exit(struct k_fifo * fifo)917 void sys_trace_k_fifo_init_exit(struct k_fifo *fifo) {
918 	(void)xTraceEventCreate1(PSF_EVENT_FIFO_INIT_EXIT, (TraceUnsignedBaseType_t)fifo);
919 }
920 
sys_trace_k_fifo_cancel_wait_enter(struct k_fifo * fifo)921 void sys_trace_k_fifo_cancel_wait_enter(struct k_fifo *fifo) {
922 	(void)xTraceEventCreate1(PSF_EVENT_FIFO_CANCEL_WAIT_ENTER, (TraceUnsignedBaseType_t)fifo);
923 }
924 
sys_trace_k_fifo_cancel_wait_exit(struct k_fifo * fifo)925 void sys_trace_k_fifo_cancel_wait_exit(struct k_fifo *fifo) {
926 	(void)xTraceEventCreate1(PSF_EVENT_FIFO_CANCEL_WAIT_EXIT, (TraceUnsignedBaseType_t)fifo);
927 }
928 
sys_trace_k_fifo_put_enter(struct k_fifo * fifo,void * data)929 void sys_trace_k_fifo_put_enter(struct k_fifo *fifo, void *data) {
930 	(void)xTraceEventCreate2(PSF_EVENT_FIFO_PUT_ENTER, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)data);
931 }
932 
sys_trace_k_fifo_put_exit(struct k_fifo * fifo,void * data)933 void sys_trace_k_fifo_put_exit(struct k_fifo *fifo, void *data) {
934 	(void)xTraceEventCreate2(PSF_EVENT_FIFO_PUT_EXIT, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)data);
935 }
936 
sys_trace_k_fifo_alloc_put_enter(struct k_fifo * fifo,void * data)937 void sys_trace_k_fifo_alloc_put_enter(struct k_fifo *fifo, void *data) {
938 	(void)xTraceEventCreate2(PSF_EVENT_FIFO_ALLOC_PUT_BLOCKING, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)data);
939 }
940 
sys_trace_k_fifo_alloc_put_exit(struct k_fifo * fifo,void * data,int ret)941 void sys_trace_k_fifo_alloc_put_exit(struct k_fifo *fifo, void *data, int ret) {
942 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_FIFO_ALLOC_PUT_SUCCESS : PSF_EVENT_FIFO_ALLOC_PUT_FAILURE, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)ret);
943 }
944 
sys_trace_k_fifo_put_list_enter(struct k_fifo * fifo,void * head,void * tail)945 void sys_trace_k_fifo_put_list_enter(struct k_fifo *fifo, void *head, void *tail) {
946 	(void)xTraceEventCreate3(PSF_EVENT_FIFO_PUT_LIST_ENTER, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)head, (TraceUnsignedBaseType_t)tail);
947 }
948 
sys_trace_k_fifo_put_list_exit(struct k_fifo * fifo,void * head,void * tail)949 void sys_trace_k_fifo_put_list_exit(struct k_fifo *fifo, void *head, void *tail) {
950 	(void)xTraceEventCreate3(PSF_EVENT_FIFO_PUT_LIST_EXIT, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)head, (TraceUnsignedBaseType_t)tail);
951 }
952 
sys_trace_k_fifo_put_slist_enter(struct k_fifo * fifo,sys_slist_t * list)953 void sys_trace_k_fifo_put_slist_enter(struct k_fifo *fifo, sys_slist_t *list) {
954 	(void)xTraceEventCreate2(PSF_EVENT_FIFO_PUT_SLIST_ENTER, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)list);
955 }
956 
sys_trace_k_fifo_put_slist_exit(struct k_fifo * fifo,sys_slist_t * list)957 void sys_trace_k_fifo_put_slist_exit(struct k_fifo *fifo, sys_slist_t *list) {
958 	(void)xTraceEventCreate2(PSF_EVENT_FIFO_PUT_SLIST_EXIT, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)list);
959 }
960 
sys_trace_k_fifo_get_enter(struct k_fifo * fifo,k_timeout_t timeout)961 void sys_trace_k_fifo_get_enter(struct k_fifo *fifo, k_timeout_t timeout) {
962 	(void)xTraceEventCreate2(PSF_EVENT_FIFO_GET_BLOCKING, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)timeout.ticks);
963 }
964 
sys_trace_k_fifo_get_exit(struct k_fifo * fifo,k_timeout_t timeout,void * ret)965 void sys_trace_k_fifo_get_exit(struct k_fifo *fifo, k_timeout_t timeout, void *ret) {
966 	(void)xTraceEventCreate2(ret != NULL ? PSF_EVENT_FIFO_GET_SUCCESS : PSF_EVENT_FIFO_GET_FAILURE, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)ret);
967 }
968 
sys_trace_k_fifo_peek_head_enter(struct k_fifo * fifo)969 void sys_trace_k_fifo_peek_head_enter(struct k_fifo *fifo) {
970 	(void)xTraceEventCreate1(PSF_EVENT_FIFO_PEEK_HEAD_ENTER, (TraceUnsignedBaseType_t)fifo);
971 }
972 
sys_trace_k_fifo_peek_head_exit(struct k_fifo * fifo,void * ret)973 void sys_trace_k_fifo_peek_head_exit(struct k_fifo *fifo, void *ret) {
974 	(void)xTraceEventCreate2(PSF_EVENT_FIFO_PEEK_HEAD_EXIT, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)ret);
975 }
976 
sys_trace_k_fifo_peek_tail_enter(struct k_fifo * fifo)977 void sys_trace_k_fifo_peek_tail_enter(struct k_fifo *fifo) {
978 	(void)xTraceEventCreate1(PSF_EVENT_FIFO_PEEK_TAIL_ENTER, (TraceUnsignedBaseType_t)fifo);
979 }
980 
sys_trace_k_fifo_peek_tail_exit(struct k_fifo * fifo,void * ret)981 void sys_trace_k_fifo_peek_tail_exit(struct k_fifo *fifo, void *ret) {
982 	(void)xTraceEventCreate2(PSF_EVENT_FIFO_PEEK_TAIL_EXIT, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)ret);
983 }
984 
985 
986 /* LIFO trace function definitions */
sys_trace_k_lifo_init_enter(struct k_lifo * lifo)987 void sys_trace_k_lifo_init_enter(struct k_lifo *lifo) {
988 	(void)xTraceEventCreate1(PSF_EVENT_LIFO_INIT_ENTER, (TraceUnsignedBaseType_t)lifo);
989 }
990 
sys_trace_k_lifo_init_exit(struct k_lifo * lifo)991 void sys_trace_k_lifo_init_exit(struct k_lifo *lifo) {
992 	(void)xTraceEventCreate1(PSF_EVENT_LIFO_INIT_EXIT, (TraceUnsignedBaseType_t)lifo);
993 }
994 
sys_trace_k_lifo_put_enter(struct k_lifo * lifo,void * data)995 void sys_trace_k_lifo_put_enter(struct k_lifo *lifo, void *data) {
996 	(void)xTraceEventCreate2(PSF_EVENT_LIFO_PUT_ENTER, (TraceUnsignedBaseType_t)lifo, (TraceUnsignedBaseType_t)data);
997 }
998 
sys_trace_k_lifo_put_exit(struct k_lifo * lifo,void * data)999 void sys_trace_k_lifo_put_exit(struct k_lifo *lifo, void *data) {
1000 	(void)xTraceEventCreate2(PSF_EVENT_LIFO_PUT_EXIT, (TraceUnsignedBaseType_t)lifo, (TraceUnsignedBaseType_t)data);
1001 }
1002 
sys_trace_k_lifo_alloc_put_enter(struct k_lifo * lifo,void * data)1003 void sys_trace_k_lifo_alloc_put_enter(struct k_lifo *lifo, void *data) {
1004 	(void)xTraceEventCreate2(PSF_EVENT_LIFO_ALLOC_PUT_BLOCKING, (TraceUnsignedBaseType_t)lifo, (TraceUnsignedBaseType_t)data);
1005 }
1006 
sys_trace_k_lifo_alloc_put_exit(struct k_lifo * lifo,void * data,int ret)1007 void sys_trace_k_lifo_alloc_put_exit(struct k_lifo *lifo, void *data, int ret) {
1008 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_LIFO_ALLOC_PUT_SUCCESS : PSF_EVENT_LIFO_ALLOC_PUT_FAILURE, (TraceUnsignedBaseType_t)lifo, (TraceUnsignedBaseType_t)ret);
1009 }
1010 
sys_trace_k_lifo_get_enter(struct k_lifo * lifo,k_timeout_t timeout)1011 void sys_trace_k_lifo_get_enter(struct k_lifo *lifo, k_timeout_t timeout) {
1012 	(void)xTraceEventCreate2(PSF_EVENT_LIFO_GET_BLOCKING, (TraceUnsignedBaseType_t)lifo, (TraceUnsignedBaseType_t)timeout.ticks);
1013 }
1014 
sys_trace_k_lifo_get_exit(struct k_lifo * lifo,k_timeout_t timeout,void * ret)1015 void sys_trace_k_lifo_get_exit(struct k_lifo *lifo, k_timeout_t timeout, void *ret) {
1016 	(void)xTraceEventCreate2(ret != NULL ? PSF_EVENT_LIFO_GET_SUCCESS : PSF_EVENT_LIFO_GET_FAILURE, (TraceUnsignedBaseType_t)lifo, (TraceUnsignedBaseType_t)ret);
1017 }
1018 
1019 
1020 /* Stack trace function definitions */
sys_trace_k_stack_init(struct k_stack * stack,stack_data_t * buffer,uint32_t num_entries)1021 void sys_trace_k_stack_init(struct k_stack *stack, stack_data_t *buffer, uint32_t num_entries) {
1022 	(void)xTraceEventCreate3(PSF_EVENT_STACK_INIT, (TraceUnsignedBaseType_t)stack, (TraceUnsignedBaseType_t)buffer, (TraceUnsignedBaseType_t)num_entries);
1023 }
1024 
sys_trace_k_stack_alloc_init_enter(struct k_stack * stack,uint32_t num_entries)1025 void sys_trace_k_stack_alloc_init_enter(struct k_stack *stack, uint32_t num_entries) {
1026 	(void)xTraceEventCreate2(PSF_EVENT_STACK_ALLOC_INIT_BLOCKING, (TraceUnsignedBaseType_t)stack, (TraceUnsignedBaseType_t)num_entries);
1027 }
1028 
sys_trace_k_stack_alloc_init_exit(struct k_stack * stack,uint32_t num_entries,int ret)1029 void sys_trace_k_stack_alloc_init_exit(struct k_stack *stack, uint32_t num_entries, int ret) {
1030 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_STACK_ALLOC_INIT_SUCCESS : PSF_EVENT_STACK_ALLOC_INIT_FAILURE, (TraceUnsignedBaseType_t)stack, (TraceUnsignedBaseType_t)ret);
1031 }
1032 
sys_trace_k_stack_cleanup_enter(struct k_stack * stack)1033 void sys_trace_k_stack_cleanup_enter(struct k_stack *stack) {
1034 	(void)xTraceEventCreate1(PSF_EVENT_STACK_CLEANUP_BLOCKING, (TraceUnsignedBaseType_t)stack);
1035 }
1036 
sys_trace_k_stack_cleanup_exit(struct k_stack * stack,int ret)1037 void sys_trace_k_stack_cleanup_exit(struct k_stack *stack, int ret) {
1038 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_STACK_CLEANUP_SUCCESS : PSF_EVENT_STACK_CLEANUP_FAILURE, (TraceUnsignedBaseType_t)stack, (TraceUnsignedBaseType_t)ret);
1039 }
1040 
sys_trace_k_stack_push_enter(struct k_stack * stack,stack_data_t data)1041 void sys_trace_k_stack_push_enter(struct k_stack *stack, stack_data_t data) {
1042 	(void)xTraceEventCreate2(PSF_EVENT_STACK_PUSH_BLOCKING, (TraceUnsignedBaseType_t)stack, (TraceUnsignedBaseType_t)data);
1043 }
1044 
sys_trace_k_stack_push_exit(struct k_stack * stack,stack_data_t data,int ret)1045 void sys_trace_k_stack_push_exit(struct k_stack *stack, stack_data_t data, int ret) {
1046 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_STACK_PUSH_SUCCESS : PSF_EVENT_STACK_PUSH_FAILURE, (TraceUnsignedBaseType_t)stack, (TraceUnsignedBaseType_t)ret);
1047 }
1048 
sys_trace_k_stack_pop_blocking(struct k_stack * stack,stack_data_t * data,k_timeout_t timeout)1049 void sys_trace_k_stack_pop_blocking(struct k_stack *stack, stack_data_t *data, k_timeout_t timeout) {
1050 	(void)xTraceEventCreate3(PSF_EVENT_STACK_POP_BLOCKING, (TraceUnsignedBaseType_t)stack, (TraceUnsignedBaseType_t)data, (TraceUnsignedBaseType_t)timeout.ticks);
1051 }
1052 
sys_trace_k_stack_pop_exit(struct k_stack * stack,stack_data_t * data,k_timeout_t timeout,int ret)1053 void sys_trace_k_stack_pop_exit(struct k_stack *stack, stack_data_t *data, k_timeout_t timeout, int ret) {
1054 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_STACK_POP_SUCCESS : PSF_EVENT_STACK_POP_FAILURE, (TraceUnsignedBaseType_t)stack, (TraceUnsignedBaseType_t)ret);
1055 }
1056 
1057 
1058 /* Message queue trace function definitions */
sys_trace_k_msgq_init(struct k_msgq * msgq)1059 void sys_trace_k_msgq_init(struct k_msgq *msgq) {
1060 	(void)xTraceEventCreate4(PSF_EVENT_MESSAGEQUEUE_INIT, (TraceUnsignedBaseType_t)msgq, (TraceUnsignedBaseType_t)msgq->buffer_start, (TraceUnsignedBaseType_t)msgq->msg_size, (TraceUnsignedBaseType_t)msgq->max_msgs);
1061 }
1062 
sys_trace_k_msgq_alloc_init_enter(struct k_msgq * msgq,size_t msg_size,uint32_t max_msgs)1063 void sys_trace_k_msgq_alloc_init_enter(struct k_msgq *msgq, size_t msg_size, uint32_t max_msgs) {
1064 	(void)xTraceEventCreate3(PSF_EVENT_MESSAGEQUEUE_ALLOC_INIT_BLOCKING, (TraceUnsignedBaseType_t)msgq, (TraceUnsignedBaseType_t)msg_size, (TraceUnsignedBaseType_t)max_msgs);
1065 }
1066 
sys_trace_k_msgq_alloc_init_exit(struct k_msgq * msgq,size_t msg_size,uint32_t max_msgs,int ret)1067 void sys_trace_k_msgq_alloc_init_exit(struct k_msgq *msgq, size_t msg_size, uint32_t max_msgs, int ret) {
1068 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_MESSAGEQUEUE_ALLOC_INIT_SUCCESS : PSF_EVENT_MESSAGEQUEUE_ALLOC_INIT_TIMEOUT, (TraceUnsignedBaseType_t)msgq, (TraceUnsignedBaseType_t)ret);
1069 }
1070 
sys_trace_k_msgq_cleanup_enter(struct k_msgq * msgq)1071 void sys_trace_k_msgq_cleanup_enter(struct k_msgq *msgq) {
1072 	(void)xTraceEventCreate1(PSF_EVENT_MESSAGEQUEUE_CLEANUP_BLOCKING, (TraceUnsignedBaseType_t)msgq);
1073 }
1074 
sys_trace_k_msgq_cleanup_exit(struct k_msgq * msgq,int ret)1075 void sys_trace_k_msgq_cleanup_exit(struct k_msgq *msgq, int ret) {
1076 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_MESSAGEQUEUE_CLEANUP_SUCCESS : PSF_EVENT_MESSAGEQUEUE_CLEANUP_TIMEOUT, (TraceUnsignedBaseType_t)msgq, (TraceUnsignedBaseType_t)ret);
1077 }
1078 
sys_trace_k_msgq_put_enter(struct k_msgq * msgq,const void * data,k_timeout_t timeout)1079 void sys_trace_k_msgq_put_enter(struct k_msgq *msgq, const void *data, k_timeout_t timeout) {
1080 }
1081 
sys_trace_k_msgq_put_blocking(struct k_msgq * msgq,const void * data,k_timeout_t timeout)1082 void sys_trace_k_msgq_put_blocking(struct k_msgq *msgq, const void *data, k_timeout_t timeout) {
1083 	(void)xTraceEventCreate3(PSF_EVENT_MESSAGEQUEUE_PUT_BLOCKING, (TraceUnsignedBaseType_t)msgq, (TraceUnsignedBaseType_t)data, (TraceUnsignedBaseType_t)timeout.ticks);
1084 }
1085 
sys_trace_k_msgq_put_exit(struct k_msgq * msgq,const void * data,k_timeout_t timeout,int ret)1086 void sys_trace_k_msgq_put_exit(struct k_msgq *msgq, const void *data, k_timeout_t timeout, int ret) {
1087 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_MESSAGEQUEUE_PUT_SUCCESS : PSF_EVENT_MESSAGEQUEUE_PUT_TIMEOUT, (TraceUnsignedBaseType_t)msgq, (TraceUnsignedBaseType_t)ret);
1088 }
1089 
sys_trace_k_msgq_get_enter(struct k_msgq * msgq,const void * data,k_timeout_t timeout)1090 void sys_trace_k_msgq_get_enter(struct k_msgq *msgq, const void *data, k_timeout_t timeout) {
1091 }
1092 
sys_trace_k_msgq_get_blocking(struct k_msgq * msgq,const void * data,k_timeout_t timeout)1093 void sys_trace_k_msgq_get_blocking(struct k_msgq *msgq, const void *data, k_timeout_t timeout) {
1094 	(void)xTraceEventCreate3(PSF_EVENT_MESSAGEQUEUE_GET_BLOCKING, (TraceUnsignedBaseType_t)msgq, (TraceUnsignedBaseType_t)data, (TraceUnsignedBaseType_t)timeout.ticks);
1095 }
1096 
sys_trace_k_msgq_get_exit(struct k_msgq * msgq,const void * data,k_timeout_t timeout,int ret)1097 void sys_trace_k_msgq_get_exit(struct k_msgq *msgq, const void *data, k_timeout_t timeout, int ret) {
1098 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_MESSAGEQUEUE_GET_SUCCESS : PSF_EVENT_MESSAGEQUEUE_GET_TIMEOUT, (TraceUnsignedBaseType_t)msgq, (TraceUnsignedBaseType_t)ret);
1099 }
1100 
sys_trace_k_msgq_peek(struct k_msgq * msgq,void * data,int ret)1101 void sys_trace_k_msgq_peek(struct k_msgq *msgq, void *data, int ret) {
1102 	(void)xTraceEventCreate3(
1103 		ret == 0 ? PSF_EVENT_MESSAGEQUEUE_PEEK_SUCCESS : PSF_EVENT_MESSAGEQUEUE_PEEK_FAILED,
1104 		(TraceUnsignedBaseType_t)msgq,
1105 		(TraceUnsignedBaseType_t)data,
1106 		(TraceUnsignedBaseType_t)ret
1107 	);
1108 }
1109 
sys_trace_k_msgq_purge(struct k_msgq * msgq)1110 void sys_trace_k_msgq_purge(struct k_msgq *msgq) {
1111 	(void)xTraceEventCreate1(PSF_EVENT_MESSAGEQUEUE_PURGE, (TraceBaseType_t)msgq);
1112 }
1113 
1114 
1115 /* Mailbox trace function definitions */
sys_trace_k_mbox_init(struct k_mbox * mbox)1116 void sys_trace_k_mbox_init(struct k_mbox *mbox) {
1117 	(void)xTraceEventCreate1(PSF_EVENT_MAILBOX_INIT, (TraceUnsignedBaseType_t)mbox);
1118 }
1119 
sys_trace_k_mbox_message_put_enter(struct k_mbox * mbox,struct k_mbox_msg * tx_msg,k_timeout_t timeout)1120 void sys_trace_k_mbox_message_put_enter(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout) {
1121 }
1122 
sys_trace_k_mbox_message_put_blocking(struct k_mbox * mbox,struct k_mbox_msg * tx_msg,k_timeout_t timeout)1123 void sys_trace_k_mbox_message_put_blocking(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout) {
1124 	(void)xTraceEventCreate3(PSF_EVENT_MAILBOX_MESSAGE_PUT_BLOCKING, (TraceUnsignedBaseType_t)mbox, (TraceUnsignedBaseType_t)tx_msg, (TraceUnsignedBaseType_t)timeout.ticks);
1125 }
1126 
sys_trace_k_mbox_message_put_exit(struct k_mbox * mbox,struct k_mbox_msg * tx_msg,k_timeout_t timeout,int ret)1127 void sys_trace_k_mbox_message_put_exit(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout, int ret) {
1128 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_MAILBOX_MESSAGE_PUT_SUCCESS : PSF_EVENT_MAILBOX_MESSAGE_PUT_FAILURE, (TraceUnsignedBaseType_t)mbox, (TraceUnsignedBaseType_t)ret);
1129 }
1130 
sys_trace_k_mbox_put_enter(struct k_mbox * mbox,struct k_mbox_msg * tx_msg,k_timeout_t timeout)1131 void sys_trace_k_mbox_put_enter(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout) {
1132 	(void)xTraceEventCreate3(PSF_EVENT_MAILBOX_PUT_BLOCKING, (TraceUnsignedBaseType_t)mbox, (TraceUnsignedBaseType_t)tx_msg, (TraceUnsignedBaseType_t)timeout.ticks);
1133 }
1134 
sys_trace_k_mbox_put_exit(struct k_mbox * mbox,struct k_mbox_msg * tx_msg,k_timeout_t timeout,int ret)1135 void sys_trace_k_mbox_put_exit(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout, int ret) {
1136 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_MAILBOX_PUT_SUCCESS : PSF_EVENT_MAILBOX_PUT_FAILURE, (TraceUnsignedBaseType_t)mbox, (TraceUnsignedBaseType_t)ret);
1137 }
1138 
sys_trace_k_mbox_async_put_enter(struct k_mbox * mbox,struct k_sem * sem)1139 void sys_trace_k_mbox_async_put_enter(struct k_mbox *mbox, struct k_sem *sem) {
1140 	(void)xTraceEventCreate2(PSF_EVENT_MAILBOX_ASYNC_PUT_ENTER, (TraceUnsignedBaseType_t)mbox, (TraceUnsignedBaseType_t)sem);
1141 }
1142 
sys_trace_k_mbox_async_put_exit(struct k_mbox * mbox,struct k_sem * sem)1143 void sys_trace_k_mbox_async_put_exit(struct k_mbox *mbox, struct k_sem *sem) {
1144 	(void)xTraceEventCreate2(PSF_EVENT_MAILBOX_ASYNC_PUT_EXIT, (TraceUnsignedBaseType_t)mbox, (TraceUnsignedBaseType_t)sem);
1145 }
1146 
sys_trace_k_mbox_get_enter(struct k_mbox * mbox,struct k_mbox_msg * rx_msg,void * buffer,k_timeout_t timeout)1147 void sys_trace_k_mbox_get_enter(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, k_timeout_t timeout) {
1148 }
1149 
sys_trace_k_mbox_get_blocking(struct k_mbox * mbox,struct k_mbox_msg * rx_msg,void * buffer,k_timeout_t timeout)1150 void sys_trace_k_mbox_get_blocking(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, k_timeout_t timeout) {
1151 	(void)xTraceEventCreate4(
1152 		PSF_EVENT_MAILBOX_GET_BLOCKING,
1153 		(TraceUnsignedBaseType_t)mbox,
1154 		(TraceUnsignedBaseType_t)rx_msg,
1155 		(TraceUnsignedBaseType_t)buffer,
1156 		(TraceUnsignedBaseType_t)timeout.ticks
1157 	);
1158 }
1159 
sys_trace_k_mbox_get_exit(struct k_mbox * mbox,struct k_mbox_msg * rx_msg,void * buffer,k_timeout_t timeout,int ret)1160 void sys_trace_k_mbox_get_exit(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, k_timeout_t timeout, int ret) {
1161 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_MAILBOX_GET_SUCCESS : PSF_EVENT_MAILBOX_GET_TIMEOUT, (TraceUnsignedBaseType_t)mbox, (TraceUnsignedBaseType_t)ret);
1162 }
1163 
1164 /* @note: Hook not implemented in Zephyr kernel */
sys_trace_k_mbox_data_get(struct k_mbox_msg * rx_msg,void * buffer)1165 void sys_trace_k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer) {
1166 	(void)xTraceEventCreate2(PSF_EVENT_MAILBOX_DATA_GET, (TraceUnsignedBaseType_t)rx_msg, (TraceUnsignedBaseType_t)buffer);
1167 }
1168 
1169 
1170 /* Pipe trace function definitions */
sys_trace_k_pipe_init(struct k_pipe * pipe,unsigned char * buffer,size_t size)1171 void sys_trace_k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size) {
1172 	(void)xTraceEventCreate3(PSF_EVENT_PIPE_INIT, (TraceUnsignedBaseType_t)pipe, (TraceUnsignedBaseType_t)buffer, (TraceUnsignedBaseType_t)size);
1173 }
1174 
sys_trace_k_pipe_cleanup_enter(struct k_pipe * pipe)1175 void sys_trace_k_pipe_cleanup_enter(struct k_pipe *pipe) {
1176 	(void)xTraceEventCreate1(PSF_EVENT_PIPE_CLEANUP_BLOCKING, (TraceUnsignedBaseType_t)pipe);
1177 }
1178 
sys_trace_k_pipe_cleanup_exit(struct k_pipe * pipe,int ret)1179 void sys_trace_k_pipe_cleanup_exit(struct k_pipe *pipe, int ret) {
1180 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_PIPE_CLEANUP_SUCCESS : PSF_EVENT_PIPE_CLEANUP_FAILURE, (TraceUnsignedBaseType_t)pipe, (TraceUnsignedBaseType_t)ret);
1181 }
1182 
sys_trace_k_pipe_alloc_init_enter(struct k_pipe * pipe,size_t size)1183 void sys_trace_k_pipe_alloc_init_enter(struct k_pipe *pipe, size_t size) {
1184 	(void)xTraceEventCreate2(PSF_EVENT_PIPE_ALLOC_INIT_BLOCKING, (TraceUnsignedBaseType_t)pipe, (TraceUnsignedBaseType_t)size);
1185 }
1186 
sys_trace_k_pipe_alloc_init_exit(struct k_pipe * pipe,size_t size,int ret)1187 void sys_trace_k_pipe_alloc_init_exit(struct k_pipe *pipe, size_t size, int ret) {
1188 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_PIPE_ALLOC_INIT_SUCCESS : PSF_EVENT_PIPE_ALLOC_INIT_FAILURE, (TraceUnsignedBaseType_t)pipe, (TraceUnsignedBaseType_t)ret);
1189 }
1190 
sys_trace_k_pipe_put_enter(struct k_pipe * pipe,void * data,size_t bytes_to_write,size_t * bytes_written,size_t min_xfer,k_timeout_t timeout)1191 void sys_trace_k_pipe_put_enter(struct k_pipe *pipe, void *data, size_t bytes_to_write, size_t *bytes_written, size_t min_xfer, k_timeout_t timeout) {
1192 
1193 }
1194 
sys_trace_k_pipe_put_blocking(struct k_pipe * pipe,void * data,size_t bytes_to_write,size_t * bytes_written,size_t min_xfer,k_timeout_t timeout)1195 void sys_trace_k_pipe_put_blocking(struct k_pipe *pipe, void *data, size_t bytes_to_write, size_t *bytes_written, size_t min_xfer, k_timeout_t timeout) {
1196 	(void)xTraceEventCreate6(
1197 		PSF_EVENT_PIPE_PUT_BLOCKING,
1198 		(TraceUnsignedBaseType_t)pipe,
1199 		(TraceUnsignedBaseType_t)data,
1200 		(TraceUnsignedBaseType_t)bytes_to_write,
1201 		(TraceUnsignedBaseType_t)bytes_written,
1202 		(TraceUnsignedBaseType_t)min_xfer,
1203 		(TraceUnsignedBaseType_t)timeout.ticks
1204 	);
1205 }
1206 
sys_trace_k_pipe_put_exit(struct k_pipe * pipe,void * data,size_t bytes_to_write,size_t * bytes_written,size_t min_xfer,k_timeout_t timeout,int ret)1207 void sys_trace_k_pipe_put_exit(struct k_pipe *pipe, void *data, size_t bytes_to_write, size_t *bytes_written, size_t min_xfer, k_timeout_t timeout, int ret) {
1208 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_PIPE_PUT_SUCCESS : PSF_EVENT_PIPE_PUT_TIMEOUT, (TraceUnsignedBaseType_t)pipe, (TraceUnsignedBaseType_t)ret);
1209 }
1210 
sys_trace_k_pipe_get_enter(struct k_pipe * pipe,void * data,size_t bytes_to_read,size_t * bytes_read,size_t min_xfer,k_timeout_t timeout)1211 void sys_trace_k_pipe_get_enter(struct k_pipe *pipe, void *data, size_t bytes_to_read, size_t *bytes_read, size_t min_xfer, k_timeout_t timeout) {
1212 
1213 }
1214 
sys_trace_k_pipe_get_blocking(struct k_pipe * pipe,void * data,size_t bytes_to_read,size_t * bytes_read,size_t min_xfer,k_timeout_t timeout)1215 void sys_trace_k_pipe_get_blocking(struct k_pipe *pipe, void *data, size_t bytes_to_read, size_t *bytes_read, size_t min_xfer, k_timeout_t timeout) {
1216 	(void)xTraceEventCreate6(
1217 		PSF_EVENT_PIPE_GET_BLOCKING,
1218 		(TraceUnsignedBaseType_t)pipe,
1219 		(TraceUnsignedBaseType_t)data,
1220 		(TraceUnsignedBaseType_t)bytes_to_read,
1221 		(TraceUnsignedBaseType_t)bytes_read,
1222 		(TraceUnsignedBaseType_t)min_xfer,
1223 		(TraceUnsignedBaseType_t)timeout.ticks
1224 	);
1225 }
1226 
sys_trace_k_pipe_get_exit(struct k_pipe * pipe,void * data,size_t bytes_to_read,size_t * bytes_read,size_t min_xfer,k_timeout_t timeout,int ret)1227 void sys_trace_k_pipe_get_exit(struct k_pipe *pipe, void *data, size_t bytes_to_read, size_t *bytes_read, size_t min_xfer, k_timeout_t timeout, int ret) {
1228 	(void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_PIPE_GET_SUCCESS : PSF_EVENT_PIPE_GET_TIMEOUT, (TraceUnsignedBaseType_t)pipe, (TraceUnsignedBaseType_t)ret);
1229 }
1230 
1231 /* Memory heap trace function definitions */
sys_trace_k_heap_init(struct k_heap * h,void * mem,size_t bytes)1232 void sys_trace_k_heap_init(struct k_heap *h, void *mem, size_t bytes) {
1233 	(void)xTraceEventCreate3(PSF_EVENT_KHEAP_INIT, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)mem, (TraceUnsignedBaseType_t)bytes);
1234 }
1235 
sys_trace_k_heap_alloc_enter(struct k_heap * h,size_t bytes,k_timeout_t timeout)1236 void sys_trace_k_heap_alloc_enter(struct k_heap *h, size_t bytes, k_timeout_t timeout) {
1237 	(void)xTraceEventCreate3(PSF_EVENT_KHEAP_ALLOC_BLOCKING, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)bytes, (TraceUnsignedBaseType_t)timeout.ticks);
1238 }
1239 
sys_trace_k_heap_alloc_exit(struct k_heap * h,size_t bytes,k_timeout_t timeout,void * ret)1240 void sys_trace_k_heap_alloc_exit(struct k_heap *h, size_t bytes, k_timeout_t timeout, void *ret) {
1241 	(void)xTraceEventCreate2(ret != NULL ? PSF_EVENT_KHEAP_ALLOC_SUCCESS : PSF_EVENT_KHEAP_ALLOC_FAILURE, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)ret);
1242 }
1243 
sys_trace_k_heap_aligned_alloc_enter(struct k_heap * h,size_t bytes,k_timeout_t timeout)1244 void sys_trace_k_heap_aligned_alloc_enter(struct k_heap *h, size_t bytes, k_timeout_t timeout) {
1245 
1246 }
1247 
sys_trace_k_heap_aligned_alloc_blocking(struct k_heap * h,size_t bytes,k_timeout_t timeout)1248 void sys_trace_k_heap_aligned_alloc_blocking(struct k_heap *h, size_t bytes, k_timeout_t timeout) {
1249 	(void)xTraceEventCreate4(PSF_EVENT_KHEAP_ALIGNED_ALLOC_BLOCKING, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)bytes, (TraceUnsignedBaseType_t)timeout.ticks, (TraceUnsignedBaseType_t)0);
1250 }
1251 
sys_trace_k_heap_aligned_alloc_exit(struct k_heap * h,size_t bytes,k_timeout_t timeout,void * ret)1252 void sys_trace_k_heap_aligned_alloc_exit(struct k_heap *h, size_t bytes, k_timeout_t timeout, void *ret) {
1253 	(void)xTraceEventCreate2((ret != NULL) ? PSF_EVENT_KHEAP_ALIGNED_ALLOC_SUCCESS : PSF_EVENT_KHEAP_ALIGNED_ALLOC_FAILURE, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)ret);
1254 }
1255 
sys_trace_k_heap_free(struct k_heap * h,void * mem)1256 void sys_trace_k_heap_free(struct k_heap *h, void *mem) {
1257 	(void)xTraceEventCreate2(PSF_EVENT_KHEAP_FREE, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)mem);
1258 }
1259 
sys_trace_k_heap_sys_k_aligned_alloc_enter(struct k_heap * h,size_t align,size_t size)1260 void sys_trace_k_heap_sys_k_aligned_alloc_enter(struct k_heap *h, size_t align, size_t size) {
1261 	(void)xTraceEventCreate3(PSF_EVENT_KHEAP_SYS_ALIGNED_ALLOC_BLOCKING, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)align, (TraceUnsignedBaseType_t)size);
1262 }
1263 
sys_trace_k_heap_sys_k_aligned_alloc_exit(struct k_heap * h,size_t align,size_t size,void * ret)1264 void sys_trace_k_heap_sys_k_aligned_alloc_exit(struct k_heap *h, size_t align, size_t size, void *ret) {
1265 	(void)xTraceEventCreate2(ret != NULL ? PSF_EVENT_KHEAP_SYS_ALIGNED_ALLOC_SUCCESS : PSF_EVENT_KHEAP_SYS_ALIGNED_ALLOC_FAILURE, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)ret);
1266 }
1267 
sys_trace_k_heap_sys_k_malloc_enter(struct k_heap * h,size_t size)1268 void sys_trace_k_heap_sys_k_malloc_enter(struct k_heap *h, size_t size) {
1269 	(void)xTraceEventCreate2(PSF_EVENT_KHEAP_SYS_MALLOC_BLOCKING, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)size);
1270 }
1271 
sys_trace_k_heap_sys_k_malloc_exit(struct k_heap * h,size_t size,void * ret)1272 void sys_trace_k_heap_sys_k_malloc_exit(struct k_heap *h, size_t size, void *ret) {
1273 	(void)xTraceEventCreate2(ret != NULL ? PSF_EVENT_KHEAP_SYS_MALLOC_SUCCESS : PSF_EVENT_KHEAP_SYS_MALLOC_FAILURE, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)ret);
1274 }
1275 
sys_trace_k_heap_sys_k_free_enter(struct k_heap * h)1276 void sys_trace_k_heap_sys_k_free_enter(struct k_heap *h) {
1277 	(void)xTraceEventCreate1(PSF_EVENT_KHEAP_SYS_FREE_ENTER, (TraceUnsignedBaseType_t)h);
1278 }
1279 
sys_trace_k_heap_sys_k_free_exit(struct k_heap * h)1280 void sys_trace_k_heap_sys_k_free_exit(struct k_heap *h) {
1281 	(void)xTraceEventCreate1(PSF_EVENT_KHEAP_SYS_FREE_EXIT, (TraceUnsignedBaseType_t)h);
1282 }
1283 
sys_trace_k_heap_sys_k_enter(struct k_heap * h,size_t nmemb,size_t size)1284 void sys_trace_k_heap_sys_k_enter(struct k_heap *h, size_t nmemb, size_t size) {
1285 
1286 }
1287 
sys_trace_k_heap_sys_k_exit(struct k_heap * h,size_t nmemb,size_t size,void * ret)1288 void sys_trace_k_heap_sys_k_exit(struct k_heap *h, size_t nmemb, size_t size, void *ret) {
1289 
1290 }
1291 
sys_trace_k_heap_sys_k_calloc_enter(struct k_heap * h,size_t nmemb,size_t size)1292 void sys_trace_k_heap_sys_k_calloc_enter(struct k_heap *h, size_t nmemb, size_t size) {
1293 	(void)xTraceEventCreate3(PSF_EVENT_KHEAP_SYS_CALLOC_BLOCKING, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)nmemb, (TraceUnsignedBaseType_t)size);
1294 }
1295 
sys_trace_k_heap_sys_k_calloc_exit(struct k_heap * h,size_t nmemb,size_t size,void * ret)1296 void sys_trace_k_heap_sys_k_calloc_exit(struct k_heap *h, size_t nmemb, size_t size, void *ret) {
1297 	(void)xTraceEventCreate2(ret != NULL ? PSF_EVENT_KHEAP_SYS_CALLOC_SUCCESS : PSF_EVENT_KHEAP_SYS_CALLOC_FAILURE, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)ret);
1298 }
1299 
1300 
1301 /* Memory slab trace function definitions */
sys_trace_k_mem_slab_init(struct k_mem_slab * slab,void * buffer,size_t block_size,uint32_t num_blocks,int ret)1302 void sys_trace_k_mem_slab_init(struct k_mem_slab *slab, void *buffer, size_t block_size, uint32_t num_blocks, int ret) {
1303 	(void)xTraceEventCreate5(
1304 		ret == 0 ? PSF_EVENT_MEMORY_SLAB_INIT_SUCCESS : PSF_EVENT_MEMORY_SLAB_INIT_FAILURE,
1305 		(TraceUnsignedBaseType_t)slab,
1306 		(TraceUnsignedBaseType_t)slab->buffer,
1307 		(TraceUnsignedBaseType_t)slab->info.block_size,
1308 		(TraceUnsignedBaseType_t)slab->info.num_blocks,
1309 		(TraceUnsignedBaseType_t)ret
1310 	);
1311 }
1312 
sys_trace_k_mem_slab_alloc_enter(struct k_mem_slab * slab,void ** mem,k_timeout_t timeout)1313 void sys_trace_k_mem_slab_alloc_enter(struct k_mem_slab *slab, void **mem, k_timeout_t timeout) {
1314 
1315 }
1316 
sys_trace_k_mem_slab_alloc_blocking(struct k_mem_slab * slab,void ** mem,k_timeout_t timeout)1317 void sys_trace_k_mem_slab_alloc_blocking(struct k_mem_slab *slab, void **mem, k_timeout_t timeout) {
1318 	(void)xTraceEventCreate3(PSF_EVENT_MEMORY_SLAB_ALLOC_BLOCKING, (TraceUnsignedBaseType_t)slab, (TraceUnsignedBaseType_t)mem, (TraceUnsignedBaseType_t)timeout.ticks);
1319 }
1320 
sys_trace_k_mem_slab_alloc_exit(struct k_mem_slab * slab,void ** mem,k_timeout_t timeout,int ret)1321 void sys_trace_k_mem_slab_alloc_exit(struct k_mem_slab *slab, void **mem, k_timeout_t timeout, int ret) {
1322 	if (ret == 0) {
1323 		(void)xTraceEventCreate5(
1324 			PSF_EVENT_MEMORY_SLAB_ALLOC_SUCCESS,
1325 			(TraceUnsignedBaseType_t)slab,
1326 			(TraceUnsignedBaseType_t)mem,
1327 			(TraceUnsignedBaseType_t)timeout.ticks,
1328 			(TraceUnsignedBaseType_t)ret,
1329 			(TraceUnsignedBaseType_t)slab->info.num_blocks
1330 		);
1331 	} else if (ret == -ENOMEM || ret == -EAGAIN) {
1332 		(void)xTraceEventCreate4(
1333 			PSF_EVENT_MEMORY_SLAB_ALLOC_TIMEOUT,
1334 			(TraceUnsignedBaseType_t)slab,
1335 			(TraceUnsignedBaseType_t)mem,
1336 			(TraceUnsignedBaseType_t)timeout.ticks,
1337 			(TraceUnsignedBaseType_t)ret
1338 		);
1339 	} else {
1340 		(void)xTraceEventCreate4(
1341 			PSF_EVENT_MEMORY_SLAB_ALLOC_ERROR,
1342 			(TraceUnsignedBaseType_t)slab,
1343 			(TraceUnsignedBaseType_t)mem,
1344 			(TraceUnsignedBaseType_t)timeout.ticks,
1345 			(TraceUnsignedBaseType_t)ret
1346 		);
1347 	}
1348 }
1349 
sys_trace_k_mem_slab_free_exit(struct k_mem_slab * slab,void ** mem)1350 void sys_trace_k_mem_slab_free_exit(struct k_mem_slab *slab, void **mem) {
1351 	(void)xTraceEventCreate3(PSF_EVENT_MEMORY_SLAB_FREE, (TraceUnsignedBaseType_t)slab, (TraceUnsignedBaseType_t)mem, (TraceUnsignedBaseType_t)slab->info.num_blocks);
1352 }
1353 
1354 
1355 /* Timer trace function definitions */
sys_trace_k_timer_init(struct k_timer * timer,k_timer_expiry_t expiry_fn,k_timer_expiry_t stop_fn)1356 void sys_trace_k_timer_init(struct k_timer *timer, k_timer_expiry_t expiry_fn, k_timer_expiry_t stop_fn) {
1357 	(void)xTraceEventCreate3(PSF_EVENT_TIMER_INIT, (TraceUnsignedBaseType_t)timer, (TraceUnsignedBaseType_t)expiry_fn, (TraceUnsignedBaseType_t)stop_fn);
1358 }
1359 
sys_trace_k_timer_start(struct k_timer * timer,k_timeout_t duration,k_timeout_t period)1360 void sys_trace_k_timer_start(struct k_timer *timer, k_timeout_t duration, k_timeout_t period) {
1361 	(void)xTraceEventCreate3(PSF_EVENT_TIMER_START, (TraceUnsignedBaseType_t)timer, (TraceUnsignedBaseType_t)duration.ticks, (TraceUnsignedBaseType_t)period.ticks);
1362 }
1363 
sys_trace_k_timer_stop(struct k_timer * timer)1364 void sys_trace_k_timer_stop(struct k_timer *timer) {
1365 	(void)xTraceEventCreate1(PSF_EVENT_TIMER_STOP, (TraceUnsignedBaseType_t)timer);
1366 }
1367 
sys_trace_k_timer_status_sync_blocking(struct k_timer * timer)1368 void sys_trace_k_timer_status_sync_blocking(struct k_timer *timer) {
1369 	(void)xTraceEventCreate1(PSF_EVENT_TIMER_STATUS_SYNC_AWAIT, (TraceUnsignedBaseType_t)timer);
1370 }
1371 
sys_trace_k_timer_status_sync_exit(struct k_timer * timer,uint32_t result)1372 void sys_trace_k_timer_status_sync_exit(struct k_timer *timer, uint32_t result) {
1373 	(void)xTraceEventCreate2(PSF_EVENT_TIMER_STATUS_SYNC_EXIT, (TraceUnsignedBaseType_t)timer, (TraceUnsignedBaseType_t)result);
1374 }
1375 
1376 
1377 /* Syscall trace function definitions */
sys_trace_syscall_enter(uint32_t id,const char * name)1378 void sys_trace_syscall_enter(uint32_t id, const char *name) {
1379 #if (TRC_CFG_USE_SYSCALL_EXTENSION == 1)
1380 	if (xTraceIsRecorderEnabled())
1381 		xSyscallsExtensionEnter(id);
1382 #else
1383 	TraceEventHandle_t xTraceHandle;
1384 
1385 	if (xTraceEventBegin(PSF_EVENT_SYSTEM_SYSCALL_ENTER, sizeof(uint32_t) + strlen(name), &xTraceHandle) == TRC_SUCCESS) {
1386 		xTraceEventAddUnsignedBaseType(xTraceHandle, (TraceUnsignedBaseType_t)id);
1387 
1388 		/* Add name */
1389 		xTraceEventAddString(xTraceHandle, name, strlen(name));
1390 
1391 		xTraceEventEnd(xTraceHandle);
1392 	}
1393 #endif
1394 }
1395 
sys_trace_syscall_exit(uint32_t id,const char * name)1396 void sys_trace_syscall_exit(uint32_t id, const char *name) {
1397 #if (TRC_CFG_USE_SYSCALL_EXTENSION == 1)
1398 	if (xTraceIsRecorderEnabled())
1399 		xSyscallsExtensionExit(id);
1400 #else
1401 	TraceEventHandle_t xTraceHandle;
1402 
1403 	if (xTraceEventBegin(PSF_EVENT_SYSTEM_SYSCALL_EXIT, 0, &xTraceHandle) == TRC_SUCCESS) {
1404 		xTraceEventEnd(xTraceHandle);
1405 	}
1406 #endif
1407 }
1408 
1409 
1410 /* Legacy trace functions that are pending refactoring/removal by
1411  * the Zephyr team.
1412  */
sys_trace_isr_enter(void)1413 void sys_trace_isr_enter(void) {
1414 	xTraceISRBegin(xHandleISR);
1415 }
1416 
sys_trace_isr_exit(void)1417 void sys_trace_isr_exit(void) {
1418 	xTraceISREnd(0);
1419 }
1420 
sys_trace_isr_exit_to_scheduler(void)1421 void sys_trace_isr_exit_to_scheduler(void) {
1422 }
1423 
sys_trace_idle(void)1424 void sys_trace_idle(void) {
1425 }
1426 
sys_trace_void(unsigned int id)1427 void sys_trace_void(unsigned int id) {
1428 }
1429