1 /*
2 * Trace Recorder for Tracealyzer v4.10.3
3 * Copyright 2023 Percepio AB
4 * www.percepio.com
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 *
8 * The Zephyr specific parts of the trace recorder
9 */
10
11 #include <zephyr/init.h>
12 #include <zephyr/kernel.h>
13 #include <zephyr/version.h>
14 #include <string.h>
15 #include <trcRecorder.h>
16
17
18 #if (TRC_CFG_USE_SYSCALL_EXTENSION == 1)
19
20 /* syscall extension name */
21 #define SYSCALLS_EXTENSION_NAME "syscalls"
22
23 /* These definitions depend on the KERNEL_VERSION parameters generated during the Zephyr build process */
24 #define SYSCALLS_EXTENSION_VERSION_MAJOR KERNEL_VERSION_MAJOR
25 #define SYSCALLS_EXTENSION_VERSION_MINOR KERNEL_VERSION_MINOR
26 #define SYSCALLS_EXTENSION_VERSION_PATCH KERNEL_PATCHLEVEL
27
28 /* Event codes for Enter and Exit */
29 #define SYSCALLS_EXTENSION_EVENT_COUNT ((K_SYSCALL_LIMIT + 1) * 2)
30
31 #define xSyscallsExtensionEnable() (xTraceExtensionCreate(SYSCALLS_EXTENSION_NAME, SYSCALLS_EXTENSION_VERSION_MAJOR, SYSCALLS_EXTENSION_VERSION_MINOR, SYSCALLS_EXTENSION_VERSION_PATCH, SYSCALLS_EXTENSION_EVENT_COUNT, &pxKernelPortData->xSyscallsExtensionHandle))
32 #define xSyscallsExtensionEnter(id) prvTraceStoreEvent_None(xTraceExtensionGetEventId(pxKernelPortData->xSyscallsExtensionHandle, id))
33 #define xSyscallsExtensionExit(id) prvTraceStoreEvent_None(xTraceExtensionGetEventId(pxKernelPortData->xSyscallsExtensionHandle, id + (K_SYSCALL_LIMIT + 1)))
34
35 #endif
36
37 /* Trace recorder controll thread stack */
38 static K_THREAD_STACK_DEFINE(TzCtrl_thread_stack, (TRC_CFG_CTRL_TASK_STACK_SIZE));
39
40 /**
41 * @brief TzCtrl_thread_entry
42 *
43 * Task for sending the trace data from the internal buffer to the stream
44 * interface (assuming TRC_STREAM_PORT_USE_INTERNAL_BUFFER == 1) and for
45 * receiving commands from Tracealyzer. Also does some diagnostics.
46 *
47 * @param[in] _args
48 */
TzCtrl_thread_entry(void * _args)49 void TzCtrl_thread_entry(void *_args)
50 {
51 while (1)
52 {
53 (void)xTraceTzCtrl();
54
55 k_msleep((TRC_CFG_CTRL_TASK_DELAY));
56 }
57 }
58
59
60 /**
61 * @brief
62 */
63 typedef struct TraceKernelPortData
64 {
65 TraceHeapHandle_t xSystemHeapHandle;
66 TraceKernelPortTaskHandle_t xTzCtrlHandle;
67 TraceExtensionHandle_t xSyscallsExtensionHandle;
68 } TraceKernelPortData_t;
69
70 static TraceKernelPortData_t* pxKernelPortData TRC_CFG_RECORDER_DATA_ATTRIBUTE;
71
xTraceKernelPortInitialize(TraceKernelPortDataBuffer_t * pxBuffer)72 traceResult xTraceKernelPortInitialize(TraceKernelPortDataBuffer_t* pxBuffer)
73 {
74 TRC_ASSERT_EQUAL_SIZE(TraceKernelPortDataBuffer_t, TraceKernelPortData_t);
75
76 if (pxBuffer == 0)
77 {
78 return TRC_FAIL;
79 }
80
81 pxKernelPortData = (TraceKernelPortData_t*)pxBuffer;
82
83 pxKernelPortData->xSystemHeapHandle = 0;
84 pxKernelPortData->xSyscallsExtensionHandle = 0;
85
86 return TRC_SUCCESS;
87 }
88
xTraceKernelPortEnable(void)89 traceResult xTraceKernelPortEnable(void)
90 {
91 return TRC_SUCCESS;
92 }
93
xTraceKernelPortGetSystemHeapHandle(void)94 TraceHeapHandle_t xTraceKernelPortGetSystemHeapHandle(void)
95 {
96 return 0;
97 }
98
99 #if defined(TRC_CFG_ENABLE_STACK_MONITOR) && (TRC_CFG_ENABLE_STACK_MONITOR == 1) && (TRC_CFG_SCHEDULING_ONLY == 0)
xTraceKernelPortGetUnusedStack(void * thread,TraceUnsignedBaseType_t * puxUnusedStack)100 traceResult xTraceKernelPortGetUnusedStack(void* thread, TraceUnsignedBaseType_t* puxUnusedStack)
101 {
102 return k_thread_stack_space_get(thread, (size_t*)puxUnusedStack);
103 }
104 #endif /* defined(TRC_CFG_ENABLE_STACK_MONITOR) && (TRC_CFG_ENABLE_STACK_MONITOR == 1) && (TRC_CFG_SCHEDULING_ONLY == 0) */
105
xTraceKernelPortIsSchedulerSuspended(void)106 unsigned char xTraceKernelPortIsSchedulerSuspended(void)
107 {
108 return 0;
109 }
110
vTraceSetKernelObjectName(void * object,const char * name)111 void vTraceSetKernelObjectName(void* object, const char* name)
112 {
113 (void)xTraceObjectSetNameWithoutHandle(object, name);
114 }
115
vTraceSetWorkQueueName(void * object,const char * name)116 void vTraceSetWorkQueueName(void* object, const char* name)
117 {
118 (void)xTraceObjectSetNameWithoutHandle(object, name);
119 }
120
vTraceSetHeapName(void * object,const char * name)121 void vTraceSetHeapName(void* object, const char* name)
122 {
123 (void)xTraceObjectSetNameWithoutHandle(object, name);
124 }
125
vTraceSetSemaphoreName(void * object,const char * name)126 void vTraceSetSemaphoreName(void* object, const char* name)
127 {
128 (void)xTraceObjectSetNameWithoutHandle(object, name);
129 }
130
vTraceSetMutexName(void * object,const char * name)131 void vTraceSetMutexName(void* object, const char* name)
132 {
133 (void)xTraceObjectSetNameWithoutHandle(object, name);
134 }
135
vTraceSetCondvarName(void * object,const char * name)136 void vTraceSetCondvarName(void* object, const char* name)
137 {
138 (void)xTraceObjectSetNameWithoutHandle(object, name);
139 }
140
vTraceSetQueueName(void * object,const char * name)141 void vTraceSetQueueName(void* object, const char* name)
142 {
143 (void)xTraceObjectSetNameWithoutHandle(object, name);
144 }
145
vTraceSetFIFOQueueName(void * object,const char * name)146 void vTraceSetFIFOQueueName(void* object, const char* name)
147 {
148 (void)xTraceObjectSetNameWithoutHandle(object, name);
149 }
150
vTraceSetLIFOQueueName(void * object,const char * name)151 void vTraceSetLIFOQueueName(void* object, const char* name)
152 {
153 (void)xTraceObjectSetNameWithoutHandle(object, name);
154 }
155
vTraceSetStackName(void * object,const char * name)156 void vTraceSetStackName(void* object, const char* name)
157 {
158 (void)xTraceObjectSetNameWithoutHandle(object, name);
159 }
160
vTraceSetMessageQueueName(void * object,const char * name)161 void vTraceSetMessageQueueName(void* object, const char* name)
162 {
163 (void)xTraceObjectSetNameWithoutHandle(object, name);
164 }
165
vTraceSetMailboxName(void * object,const char * name)166 void vTraceSetMailboxName(void* object, const char* name)
167 {
168 (void)xTraceObjectSetNameWithoutHandle(object, name);
169 }
170
vTraceSetPipeName(void * object,const char * name)171 void vTraceSetPipeName(void* object, const char* name)
172 {
173 (void)xTraceObjectSetNameWithoutHandle(object, name);
174 }
175
vTraceSetMemoryHeapName(void * object,const char * name)176 void vTraceSetMemoryHeapName(void* object, const char* name)
177 {
178 (void)xTraceObjectSetNameWithoutHandle(object, name);
179 }
180
vTraceSetMemorySlabName(void * object,const char * name)181 void vTraceSetMemorySlabName(void* object, const char* name)
182 {
183 (void)xTraceObjectSetNameWithoutHandle(object, name);
184 }
185
vTraceSetTimerName(void * object,const char * name)186 void vTraceSetTimerName(void* object, const char* name)
187 {
188 (void)xTraceObjectSetNameWithoutHandle(object, name);
189 }
190
191 /**
192 * @brief Initialize aspects of the recorder that must preceed the
193 * kernel initialization (scheduling, threads, etc.).
194 *
195 * @param[in] arg
196 */
tracelyzer_pre_kernel_init(void)197 static int tracelyzer_pre_kernel_init(void)
198 {
199 xTraceInitialize();
200
201 #if (TRC_CFG_USE_SYSCALL_EXTENSION == 1)
202 xSyscallsExtensionEnable();
203 #endif
204
205 #ifdef CONFIG_PERCEPIO_TRC_START_MODE_START
206 (void)xTraceEnable(TRC_START);
207 #elif CONFIG_PERCEPIO_TRC_START_MODE_START_AWAIT_HOST
208 (void)xTraceEnable(TRC_START_AWAIT_HOST);
209 #else
210 (void)xTraceEnable(TRC_START_FROM_HOST);
211 #endif
212
213 return 0;
214 }
215
216 /**
217 * @brief Initialize aspects of the recorder that depends on the kernel
218 * being initialized.
219 *
220 * @param[in] arg
221 */
tracealyzer_post_kernel_init(void)222 static int tracealyzer_post_kernel_init(void)
223 {
224 /* Create controller task */
225 k_thread_create(&pxKernelPortData->xTzCtrlHandle, TzCtrl_thread_stack,
226 K_THREAD_STACK_SIZEOF(TzCtrl_thread_stack),
227 (k_thread_entry_t)TzCtrl_thread_entry,
228 NULL, NULL, NULL,
229 (TRC_CFG_CTRL_TASK_PRIORITY),
230 0,
231 K_NO_WAIT);
232
233 /* Set controller task name */
234 k_thread_name_set(&pxKernelPortData->xTzCtrlHandle, "TzCtrl");
235
236 return 0;
237 }
238
239 /* Specify recorder module initialization stages */
240 SYS_INIT(tracelyzer_pre_kernel_init, PRE_KERNEL_2, 0);
241 SYS_INIT(tracealyzer_post_kernel_init, POST_KERNEL, 0);
242
243
244
245
246 /* Thread trace function definitions */
sys_trace_k_thread_foreach_enter(k_thread_user_cb_t user_cb,void * user_data)247 void sys_trace_k_thread_foreach_enter(k_thread_user_cb_t user_cb, void *user_data) {
248 (void)xTraceEventCreate2(PSF_EVENT_THREAD_FOREACH_ENTER, (TraceUnsignedBaseType_t)user_cb, (TraceUnsignedBaseType_t)user_data);
249 }
250
sys_trace_k_thread_foreach_exit(k_thread_user_cb_t user_cb,void * user_data)251 void sys_trace_k_thread_foreach_exit(k_thread_user_cb_t user_cb, void *user_data) {
252 (void)xTraceEventCreate0(PSF_EVENT_THREAD_FOREACH_EXIT);
253 }
254
sys_trace_k_thread_foreach_unlocked_enter(k_thread_user_cb_t user_cb,void * user_data)255 void sys_trace_k_thread_foreach_unlocked_enter(k_thread_user_cb_t user_cb, void *user_data) {
256 (void)xTraceEventCreate2(PSF_EVENT_THREAD_FOREACH_UNLOCKED_ENTER, (TraceUnsignedBaseType_t)user_cb, (TraceUnsignedBaseType_t)user_data);
257 }
258
sys_trace_k_thread_foreach_unlocked_exit(k_thread_user_cb_t user_cb,void * user_data)259 void sys_trace_k_thread_foreach_unlocked_exit(k_thread_user_cb_t user_cb, void *user_data) {
260 (void)xTraceEventCreate0(PSF_EVENT_THREAD_FOREACH_UNLOCKED_EXIT);
261 }
262
sys_trace_k_thread_create(struct k_thread * thread,size_t stack_size,int prio)263 void sys_trace_k_thread_create(struct k_thread *thread, size_t stack_size, int prio) {
264 /* Since we have a slightly different task/thread register event
265 * we manually update the entry table here */
266 TraceEntryHandle_t xEntryHandle;
267
268 TRACE_ALLOC_CRITICAL_SECTION();
269 TRACE_ENTER_CRITICAL_SECTION();
270
271 if (xTraceEntryCreateWithAddress((void*)thread, &xEntryHandle) == TRC_FAIL)
272 {
273 TRACE_EXIT_CRITICAL_SECTION();
274 return;
275 }
276
277 xTraceEntrySetState(xEntryHandle, 0, prio);
278 TRACE_EXIT_CRITICAL_SECTION();
279
280 /* Register task with stack monitor */
281 xTraceStackMonitorAdd((void*)thread);
282
283 (void)xTraceEventCreate3(PSF_EVENT_THREAD_INIT, (TraceUnsignedBaseType_t)thread, (TraceUnsignedBaseType_t)stack_size, (TraceUnsignedBaseType_t)prio);
284
285 #ifdef CONFIG_THREAD_NAME
286 if (strlen(thread->name) > 0) {
287 xTraceObjectSetName(xEntryHandle, thread->name);
288 }
289 #endif
290 }
291
sys_trace_k_thread_user_mode_enter(k_thread_entry_t entry,void * p1,void * p2,void * p3)292 void sys_trace_k_thread_user_mode_enter(k_thread_entry_t entry, void *p1, void *p2, void *p3) {
293 (void)xTraceEventCreate5(
294 PSF_EVENT_THREAD_USER_MODE_ENTER,
295 (TraceUnsignedBaseType_t)k_current_get(),
296 (TraceUnsignedBaseType_t)entry,
297 (TraceUnsignedBaseType_t)p1,
298 (TraceUnsignedBaseType_t)p2,
299 (TraceUnsignedBaseType_t)p3
300 );
301 }
302
sys_trace_k_thread_heap_assign(struct k_thread * thread,struct k_heap * heap)303 void sys_trace_k_thread_heap_assign(struct k_thread *thread, struct k_heap *heap) {
304 (void)xTraceEventCreate2(PSF_EVENT_THREAD_HEAP_ASSIGN, (TraceUnsignedBaseType_t)thread, (TraceUnsignedBaseType_t)heap);
305 }
306
sys_trace_k_thread_join_blocking(struct k_thread * thread,k_timeout_t timeout)307 void sys_trace_k_thread_join_blocking(struct k_thread *thread, k_timeout_t timeout) {
308 (void)xTraceEventCreate2(PSF_EVENT_THREAD_JOIN_BLOCKING, (TraceUnsignedBaseType_t)thread, (TraceUnsignedBaseType_t)timeout.ticks);
309 }
310
sys_trace_k_thread_join_exit(struct k_thread * thread,k_timeout_t timeout,int ret)311 void sys_trace_k_thread_join_exit(struct k_thread *thread, k_timeout_t timeout, int ret) {
312 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_THREAD_JOIN_SUCCESS : PSF_EVENT_THREAD_JOIN_TIMEOUT, (TraceUnsignedBaseType_t)thread, (TraceUnsignedBaseType_t)ret);
313 }
314
sys_trace_k_thread_sleep_enter(k_timeout_t timeout)315 void sys_trace_k_thread_sleep_enter(k_timeout_t timeout) {
316 (void)xTraceEventCreate1(PSF_EVENT_THREAD_SLEEP_ENTER, (TraceUnsignedBaseType_t)timeout.ticks);
317 }
318
sys_trace_k_thread_sleep_exit(k_timeout_t timeout,int ret)319 void sys_trace_k_thread_sleep_exit(k_timeout_t timeout, int ret) {
320 (void)xTraceEventCreate1(PSF_EVENT_THREAD_SLEEP_EXIT, (TraceUnsignedBaseType_t)ret);
321 }
322
sys_trace_k_thread_msleep_enter(int32_t ms)323 void sys_trace_k_thread_msleep_enter(int32_t ms) {
324 (void)xTraceEventCreate1(PSF_EVENT_THREAD_MSLEEP_ENTER, (TraceUnsignedBaseType_t)ms);
325 }
326
sys_trace_k_thread_msleep_exit(int32_t ms,int ret)327 void sys_trace_k_thread_msleep_exit(int32_t ms, int ret) {
328 (void)xTraceEventCreate0(PSF_EVENT_THREAD_MSLEEP_EXIT);
329 }
330
sys_trace_k_thread_usleep_enter(int32_t us)331 void sys_trace_k_thread_usleep_enter(int32_t us) {
332 (void)xTraceEventCreate1(PSF_EVENT_THREAD_USLEEP_ENTER, (TraceUnsignedBaseType_t)us);
333 }
334
sys_trace_k_thread_usleep_exit(int32_t us,int ret)335 void sys_trace_k_thread_usleep_exit(int32_t us, int ret) {
336 (void)xTraceEventCreate1(PSF_EVENT_THREAD_USLEEP_EXIT, (TraceUnsignedBaseType_t)ret);
337 }
338
sys_trace_k_thread_busy_wait_enter(uint32_t usec_to_wait)339 void sys_trace_k_thread_busy_wait_enter(uint32_t usec_to_wait) {
340 (void)xTraceEventCreate1(PSF_EVENT_THREAD_BUSY_WAIT_ENTER, (TraceUnsignedBaseType_t)usec_to_wait);
341 }
342
sys_trace_k_thread_busy_wait_exit(uint32_t usec_to_wait)343 void sys_trace_k_thread_busy_wait_exit(uint32_t usec_to_wait) {
344 (void)xTraceEventCreate0(PSF_EVENT_THREAD_BUSY_WAIT_EXIT);
345 }
346
sys_trace_k_thread_yield()347 void sys_trace_k_thread_yield() {
348 (void)xTraceEventCreate0(PSF_EVENT_THREAD_YIELD);
349 }
350
sys_trace_k_thread_wakeup(struct k_thread * thread)351 void sys_trace_k_thread_wakeup(struct k_thread *thread) {
352 (void)xTraceEventCreate1(PSF_EVENT_THREAD_WAKEUP, (TraceUnsignedBaseType_t)thread);
353 }
354
sys_trace_k_thread_abort(struct k_thread * thread)355 void sys_trace_k_thread_abort(struct k_thread *thread) {
356 /* Intentionally left empty, see k_thread_sched_abort for implementation */
357 }
358
sys_trace_k_thread_start(struct k_thread * thread)359 void sys_trace_k_thread_start(struct k_thread *thread) {
360 (void)xTraceEventCreate1(PSF_EVENT_THREAD_START, (TraceUnsignedBaseType_t)thread);
361 }
362
sys_trace_k_thread_priority_set(struct k_thread * thread)363 void sys_trace_k_thread_priority_set(struct k_thread *thread) {
364 if (xTraceObjectSetStateWithoutHandle((void*)thread, k_thread_priority_get(thread)) == TRC_FAIL)
365 {
366 return;
367 }
368
369 (void)xTraceEventCreate2(PSF_EVENT_THREAD_SET_PRIORITY, (TraceUnsignedBaseType_t)thread, (TraceUnsignedBaseType_t)k_thread_priority_get(thread));
370 }
371
sys_trace_k_thread_suspend(struct k_thread * thread)372 void sys_trace_k_thread_suspend(struct k_thread *thread) {
373 (void)xTraceEventCreate1(PSF_EVENT_THREAD_SUSPEND, (TraceUnsignedBaseType_t)thread);
374 }
375
sys_trace_k_thread_resume(struct k_thread * thread)376 void sys_trace_k_thread_resume(struct k_thread *thread) {
377 (void)xTraceEventCreate1(PSF_EVENT_THREAD_RESUME, (TraceUnsignedBaseType_t)thread);
378 }
379
sys_trace_k_thread_name_set(struct k_thread * thread,int ret)380 void sys_trace_k_thread_name_set(struct k_thread *thread, int ret) {
381 if (ret == 0) {
382 xTraceObjectSetNameWithoutHandle((void*)thread, thread->name);
383 }
384 }
385
sys_trace_k_thread_switched_out(void)386 void sys_trace_k_thread_switched_out(void) {
387 }
388
sys_trace_k_thread_switched_in(void)389 void sys_trace_k_thread_switched_in(void) {
390 int prio = 0;
391 k_tid_t cur = 0;
392
393 cur = k_current_get(); /* Get cached value if available */
394 if (!cur) {
395 cur = k_sched_current_thread_query();
396 }
397
398 if (!cur) {
399 return; /* Nothing we can do */
400 }
401
402 prio = k_thread_priority_get(cur);
403
404 (void)xTraceTaskSwitch(cur, prio);
405 }
406
sys_trace_k_thread_info(struct k_thread * thread)407 void sys_trace_k_thread_info(struct k_thread *thread) {
408 }
409
410
411 /* Thread sceduler trace function definitions */
sys_trace_k_thread_sched_lock()412 void sys_trace_k_thread_sched_lock() {
413 (void)xTraceEventCreate0(PSF_EVENT_THREAD_SCHED_LOCK);
414 }
415
sys_trace_k_thread_sched_unlock()416 void sys_trace_k_thread_sched_unlock() {
417 (void)xTraceEventCreate0(PSF_EVENT_THREAD_SCHED_UNLOCK);
418 }
419
sys_trace_k_thread_sched_wakeup(struct k_thread * thread)420 void sys_trace_k_thread_sched_wakeup(struct k_thread *thread) {
421 (void)xTraceEventCreate1(PSF_EVENT_THREAD_SCHED_WAKEUP, (TraceUnsignedBaseType_t)thread);
422 }
423
sys_trace_k_thread_sched_abort(struct k_thread * thread)424 void sys_trace_k_thread_sched_abort(struct k_thread *thread) {
425 TraceEntryHandle_t xEntryHandle;
426
427 #if (TRC_SEND_NAME_ONLY_ON_DELETE == 1)
428 uint32_t uiNameLength;
429 #endif
430
431 TRACE_ALLOC_CRITICAL_SECTION();
432 TRACE_ENTER_CRITICAL_SECTION();
433
434 /* Fetch entry handle */
435 if (xTraceEntryFind((void*)thread, &xEntryHandle) == TRC_FAIL)
436 {
437 TRACE_EXIT_CRITICAL_SECTION();
438 return;
439 }
440
441 #if (TRC_SEND_NAME_ONLY_ON_DELETE == 1)
442 if (strlen(thread->name) > 0) {
443 /* Send name event because this is a delete */
444 for (uiNameLength = 0; (thread->name[uiNameLength] != 0) && (uiNameLength < 128); uiNameLength++) {}
445
446 /* Send the name event, if possible */
447 (void)xTraceEventCreateData1(PSF_EVENT_OBJ_NAME, (TraceUnsignedBaseType_t)thread, (TraceUnsignedBaseType_t*)thread->name, uiNameLength + 1); /* +1 for termination */
448 }
449 #endif /* (TRC_SEND_NAME_ONLY_ON_DELETE == 1) */
450
451 /* Delete entry */
452 if (xTraceEntryDelete(xEntryHandle) == TRC_FAIL)
453 {
454 TRACE_EXIT_CRITICAL_SECTION();
455 return;
456 }
457
458 TRACE_EXIT_CRITICAL_SECTION();
459
460 /* Remove thread from stack monitor */
461 xTraceStackMonitorRemove((void*)thread);
462
463 (void)xTraceEventCreate1(PSF_EVENT_THREAD_SCHED_ABORT, (TraceUnsignedBaseType_t)thread);
464 }
465
sys_trace_k_thread_sched_set_priority(struct k_thread * thread,int prio)466 void sys_trace_k_thread_sched_set_priority(struct k_thread *thread, int prio) {
467 (void)xTraceEventCreate2(PSF_EVENT_THREAD_SCHED_PRIORITY_SET, (TraceUnsignedBaseType_t)thread, (TraceUnsignedBaseType_t)prio);
468 }
469
sys_trace_k_thread_sched_ready(struct k_thread * thread)470 void sys_trace_k_thread_sched_ready(struct k_thread *thread) {
471 xTraceTaskReady((void*)thread);
472 }
473
sys_trace_k_thread_sched_pend(struct k_thread * thread)474 void sys_trace_k_thread_sched_pend(struct k_thread *thread) {
475
476 }
477
sys_trace_k_thread_sched_resume(struct k_thread * thread)478 void sys_trace_k_thread_sched_resume(struct k_thread *thread) {
479 (void)xTraceEventCreate1(PSF_EVENT_THREAD_SCHED_RESUME, (TraceUnsignedBaseType_t)thread);
480 }
481
sys_trace_k_thread_sched_suspend(struct k_thread * thread)482 void sys_trace_k_thread_sched_suspend(struct k_thread *thread) {
483 (void)xTraceEventCreate1(PSF_EVENT_THREAD_SCHED_SUSPEND, (TraceUnsignedBaseType_t)thread);
484 }
485
486
487 /* Work trace function definitions */
sys_trace_k_work_init(struct k_work * work,k_work_handler_t handler)488 void sys_trace_k_work_init(struct k_work *work, k_work_handler_t handler) {
489 (void)xTraceEventCreate2(PSF_EVENT_WORK_INIT, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)handler);
490 }
491
sys_trace_k_work_submit_to_queue_enter(struct k_work_q * queue,struct k_work * work)492 void sys_trace_k_work_submit_to_queue_enter(struct k_work_q *queue, struct k_work *work) {
493 (void)xTraceEventCreate2(PSF_EVENT_WORK_SUBMIT_TO_QUEUE_BLOCKING, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)work);
494 }
495
sys_trace_k_work_submit_to_queue_exit(struct k_work_q * queue,struct k_work * work,int ret)496 void sys_trace_k_work_submit_to_queue_exit(struct k_work_q *queue, struct k_work *work, int ret) {
497 (void)xTraceEventCreate3(
498 ret >= 0 ? PSF_EVENT_WORK_SUBMIT_TO_QUEUE_SUCCESS : PSF_EVENT_WORK_SUBMIT_TO_QUEUE_FAILURE,
499 (TraceUnsignedBaseType_t)queue,
500 (TraceUnsignedBaseType_t)work,
501 (TraceUnsignedBaseType_t)ret
502 );
503 }
504
sys_trace_k_work_submit_enter(struct k_work * work)505 void sys_trace_k_work_submit_enter(struct k_work *work) {
506 (void)xTraceEventCreate1(PSF_EVENT_WORK_SUBMIT_BLOCKING, (TraceUnsignedBaseType_t)work);
507 }
508
sys_trace_k_work_submit_exit(struct k_work * work,int ret)509 void sys_trace_k_work_submit_exit(struct k_work *work, int ret) {
510 (void)xTraceEventCreate2(ret >= 0 ? PSF_EVENT_WORK_SUBMIT_SUCCESS : PSF_EVENT_WORK_SUBMIT_FAILURE, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)ret);
511 }
512
sys_trace_k_work_flush_enter(struct k_work * work,struct k_work_sync * sync)513 void sys_trace_k_work_flush_enter(struct k_work *work, struct k_work_sync *sync) {
514 (void)xTraceEventCreate2(PSF_EVENT_WORK_FLUSH_BLOCKING, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)sync);
515 }
516
sys_trace_k_work_flush_blocking(struct k_work * work,struct k_work_sync * sync,k_timeout_t timeout)517 void sys_trace_k_work_flush_blocking(struct k_work *work, struct k_work_sync *sync, k_timeout_t timeout) {
518
519 }
520
sys_trace_k_work_flush_exit(struct k_work * work,struct k_work_sync * sync,bool ret)521 void sys_trace_k_work_flush_exit(struct k_work *work, struct k_work_sync *sync, bool ret) {
522 (void)xTraceEventCreate2(PSF_EVENT_WORK_FLUSH_SUCCESS, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)ret);
523 }
524
sys_trace_k_work_cancel_enter(struct k_work * work)525 void sys_trace_k_work_cancel_enter(struct k_work *work) {
526 (void)xTraceEventCreate1(PSF_EVENT_WORK_CANCEL_BLOCKING, (TraceUnsignedBaseType_t)work);
527 }
528
sys_trace_k_work_cancel_exit(struct k_work * work,int ret)529 void sys_trace_k_work_cancel_exit(struct k_work *work, int ret) {
530 (void)xTraceEventCreate2(PSF_EVENT_WORK_CANCEL_SUCCESS, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)ret);
531 }
532
sys_trace_k_work_cancel_sync_enter(struct k_work * work,struct k_work_sync * sync)533 void sys_trace_k_work_cancel_sync_enter(struct k_work *work, struct k_work_sync *sync) {
534 (void)xTraceEventCreate2(PSF_EVENT_WORK_CANCEL_SYNC_BLOCKING, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)sync);
535 }
536
sys_trace_k_work_cancel_sync_blocking(struct k_work * work,struct k_work_sync * sync)537 void sys_trace_k_work_cancel_sync_blocking(struct k_work *work, struct k_work_sync *sync) {
538
539 }
540
sys_trace_k_work_cancel_sync_exit(struct k_work * work,struct k_work_sync * sync,bool ret)541 void sys_trace_k_work_cancel_sync_exit(struct k_work *work, struct k_work_sync *sync, bool ret) {
542 (void)xTraceEventCreate3(PSF_EVENT_WORK_CANCEL_SYNC_SUCCESS, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)sync, (TraceUnsignedBaseType_t)ret);
543 }
544
545
546 /* Work queue trace function definitions */
sys_trace_k_work_queue_start_enter(struct k_work_q * queue,k_thread_stack_t * stack,size_t stack_size,int prio,const struct k_work_queue_config * cfg)547 void sys_trace_k_work_queue_start_enter(struct k_work_q *queue, k_thread_stack_t *stack, size_t stack_size, int prio, const struct k_work_queue_config *cfg) {
548 (void)xTraceEventCreate5(
549 PSF_EVENT_WORK_QUEUE_START_BLOCKING,
550 (TraceUnsignedBaseType_t)queue,
551 (TraceUnsignedBaseType_t)stack,
552 (TraceUnsignedBaseType_t)stack_size,
553 (TraceUnsignedBaseType_t)prio,
554 (TraceUnsignedBaseType_t)cfg
555 );
556 }
557
sys_trace_k_work_queue_start_exit(struct k_work_q * queue,k_thread_stack_t * stack,size_t stack_size,int prio,const struct k_work_queue_config * cfg)558 void sys_trace_k_work_queue_start_exit(struct k_work_q *queue, k_thread_stack_t *stack, size_t stack_size, int prio, const struct k_work_queue_config *cfg) {
559 (void)xTraceEventCreate1(PSF_EVENT_WORK_QUEUE_START_SUCCESS, (TraceUnsignedBaseType_t)queue);
560 }
561
sys_trace_k_work_queue_drain_enter(struct k_work_q * queue,bool plug)562 void sys_trace_k_work_queue_drain_enter(struct k_work_q *queue, bool plug) {
563 (void)xTraceEventCreate2(PSF_EVENT_WORK_QUEUE_DRAIN_BLOCKING, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)plug);
564 }
565
sys_trace_k_work_queue_drain_exit(struct k_work_q * queue,bool plug,int ret)566 void sys_trace_k_work_queue_drain_exit(struct k_work_q *queue, bool plug, int ret) {
567 (void)xTraceEventCreate2(ret >= 0 ? PSF_EVENT_WORK_QUEUE_DRAIN_SUCCESS : PSF_EVENT_WORK_QUEUE_DRAIN_FAILURE, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
568 }
569
sys_trace_k_work_queue_unplug_enter(struct k_work_q * queue)570 void sys_trace_k_work_queue_unplug_enter(struct k_work_q *queue) {
571 (void)xTraceEventCreate1(PSF_EVENT_WORK_QUEUE_UNPLUG_BLOCKING, (TraceUnsignedBaseType_t)queue);
572 }
573
sys_trace_k_work_queue_unplug_exit(struct k_work_q * queue,int ret)574 void sys_trace_k_work_queue_unplug_exit(struct k_work_q *queue, int ret) {
575 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_WORK_QUEUE_UNPLUG_SUCCESS : PSF_EVENT_WORK_QUEUE_UNPLUG_FAILURE, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
576 }
577
578
579 /* Work delayable trace function definitions */
sys_trace_k_work_delayable_init(struct k_work_delayable * dwork,k_work_handler_t handler)580 void sys_trace_k_work_delayable_init(struct k_work_delayable *dwork, k_work_handler_t handler) {
581 (void)xTraceEventCreate2(PSF_EVENT_DWORK_INIT, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)handler);
582 }
583
sys_trace_k_work_schedule_for_queue_enter(struct k_work_q * queue,struct k_work_delayable * dwork,k_timeout_t delay)584 void sys_trace_k_work_schedule_for_queue_enter(struct k_work_q *queue, struct k_work_delayable *dwork, k_timeout_t delay) {
585 }
586
sys_trace_k_work_schedule_for_queue_exit(struct k_work_q * queue,struct k_work_delayable * dwork,k_timeout_t delay,int ret)587 void sys_trace_k_work_schedule_for_queue_exit(struct k_work_q *queue, struct k_work_delayable *dwork, k_timeout_t delay, int ret) {
588 (void)xTraceEventCreate4(((ret == 0) || (ret == 1)) ? PSF_EVENT_DWORK_SCHEDULE_FOR_QUEUE_SUCCESS : PSF_EVENT_DWORK_SCHEDULE_FOR_QUEUE_FAILURE, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)delay.ticks, (TraceUnsignedBaseType_t)ret);
589 }
590
sys_trace_k_work_schedule_enter(struct k_work_delayable * dwork,k_timeout_t delay)591 void sys_trace_k_work_schedule_enter(struct k_work_delayable *dwork, k_timeout_t delay) {
592 (void)xTraceEventCreate2(PSF_EVENT_DWORK_SCHEDULE_BLOCKING, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)delay.ticks);
593 }
594
sys_trace_k_work_schedule_exit(struct k_work_delayable * dwork,k_timeout_t delay,int ret)595 void sys_trace_k_work_schedule_exit(struct k_work_delayable *dwork, k_timeout_t delay, int ret) {
596 (void)xTraceEventCreate2(((ret == 0) || (ret == 1)) ? PSF_EVENT_DWORK_SCHEDULE_SUCCESS : PSF_EVENT_DWORK_SCHEDULE_FAILURE, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)ret);
597 }
598
sys_trace_k_work_reschedule_for_queue_enter(struct k_work_q * queue,struct k_work_delayable * dwork,k_timeout_t delay)599 void sys_trace_k_work_reschedule_for_queue_enter(struct k_work_q *queue, struct k_work_delayable *dwork, k_timeout_t delay) {
600 }
601
sys_trace_k_work_reschedule_for_queue_exit(struct k_work_q * queue,struct k_work_delayable * dwork,k_timeout_t delay,int ret)602 void sys_trace_k_work_reschedule_for_queue_exit(struct k_work_q *queue, struct k_work_delayable *dwork, k_timeout_t delay, int ret) {
603 (void)xTraceEventCreate4(((ret == 0) || (ret == 1) || (ret == 2)) ? PSF_EVENT_DWORK_RESCHEDULE_FOR_QUEUE_SUCCESS : PSF_EVENT_DWORK_RESCHEDULE_FOR_QUEUE_FAILURE, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)delay.ticks, (TraceUnsignedBaseType_t)ret);
604 }
605
sys_trace_k_work_reschedule_enter(struct k_work_delayable * dwork,k_timeout_t delay)606 void sys_trace_k_work_reschedule_enter(struct k_work_delayable *dwork, k_timeout_t delay) {
607 (void)xTraceEventCreate2(PSF_EVENT_DWORK_RESCHEDULE_BLOCKING, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)delay.ticks);
608 }
609
sys_trace_k_work_reschedule_exit(struct k_work_delayable * dwork,k_timeout_t delay,int ret)610 void sys_trace_k_work_reschedule_exit(struct k_work_delayable *dwork, k_timeout_t delay, int ret) {
611 (void)xTraceEventCreate2(((ret == 0) || (ret == 1) || (ret == 2)) ? PSF_EVENT_DWORK_RESCHEDULE_SUCCESS : PSF_EVENT_DWORK_RESCHEDULE_FAILURE, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)ret);
612 }
613
sys_trace_k_work_flush_delayable_enter(struct k_work_delayable * dwork,struct k_work_sync * sync)614 void sys_trace_k_work_flush_delayable_enter(struct k_work_delayable *dwork, struct k_work_sync *sync) {
615 (void)xTraceEventCreate2(PSF_EVENT_DWORK_FLUSH_BLOCKING, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)sync);
616 }
617
sys_trace_k_work_flush_delayable_exit(struct k_work_delayable * dwork,struct k_work_sync * sync,bool ret)618 void sys_trace_k_work_flush_delayable_exit(struct k_work_delayable *dwork, struct k_work_sync *sync, bool ret) {
619 (void)xTraceEventCreate2(PSF_EVENT_DWORK_FLUSH_SUCCESS, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)ret);
620 }
621
sys_trace_k_work_cancel_delayable_enter(struct k_work_delayable * dwork)622 void sys_trace_k_work_cancel_delayable_enter(struct k_work_delayable *dwork) {
623 }
624
sys_trace_k_work_cancel_delayable_exit(struct k_work_delayable * dwork,int ret)625 void sys_trace_k_work_cancel_delayable_exit(struct k_work_delayable *dwork, int ret) {
626 (void)xTraceEventCreate2(PSF_EVENT_DWORK_CANCEL_DELAYABLE_SUCCESS, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)ret);
627 }
628
sys_trace_cancel_delayable_sync_enter(struct k_work_delayable * dwork,struct k_work_sync * sync)629 void sys_trace_cancel_delayable_sync_enter(struct k_work_delayable *dwork, struct k_work_sync *sync) {
630 (void)xTraceEventCreate2(PSF_EVENT_DWORK_CANCEL_DELAYABLE_SYNC_BLOCKING, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)sync);
631 }
632
sys_trace_cancel_delayable_sync_exit(struct k_work_delayable * dwork,struct k_work_sync * sync,bool ret)633 void sys_trace_cancel_delayable_sync_exit(struct k_work_delayable *dwork, struct k_work_sync *sync, bool ret) {
634 (void)xTraceEventCreate2(PSF_EVENT_DWORK_CANCEL_DELAYABLE_SYNC_SUCCESS, (TraceUnsignedBaseType_t)dwork, (TraceUnsignedBaseType_t)ret);
635 }
636
637
638 /* Work poll trace function definitions */
sys_trace_k_work_poll_init_enter(struct k_work_poll * work,k_work_handler_t handler)639 void sys_trace_k_work_poll_init_enter(struct k_work_poll *work, k_work_handler_t handler) {
640 (void)xTraceEventCreate2(PSF_EVENT_PWORK_INIT_ENTER, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)handler);
641 }
642
sys_trace_k_work_poll_init_exit(struct k_work_poll * work,k_work_handler_t handler)643 void sys_trace_k_work_poll_init_exit(struct k_work_poll *work, k_work_handler_t handler) {
644 (void)xTraceEventCreate1(PSF_EVENT_PWORK_INIT_EXIT, (TraceUnsignedBaseType_t)work);
645 }
646
sys_trace_k_work_poll_submit_to_queue_enter(struct k_work_q * work_q,struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout)647 void sys_trace_k_work_poll_submit_to_queue_enter(struct k_work_q *work_q, struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout) {
648 (void)xTraceEventCreate5(
649 PSF_EVENT_PWORK_SUBMIT_TO_QUEUE_BLOCKING,
650 (TraceUnsignedBaseType_t)work_q,
651 (TraceUnsignedBaseType_t)work,
652 (TraceUnsignedBaseType_t)events,
653 (TraceUnsignedBaseType_t)num_events,
654 (TraceUnsignedBaseType_t)timeout.ticks
655 );
656 }
657
sys_trace_k_work_poll_submit_to_queue_blocking(struct k_work_q * work_q,struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout)658 void sys_trace_k_work_poll_submit_to_queue_blocking(struct k_work_q *work_q, struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout) {
659
660 }
661
sys_trace_k_work_poll_submit_to_queue_exit(struct k_work_q * work_q,struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout,int ret)662 void sys_trace_k_work_poll_submit_to_queue_exit(struct k_work_q *work_q, struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout, int ret) {
663 (void)xTraceEventCreate3(ret == 0 ? PSF_EVENT_PWORK_SUBMIT_TO_QUEUE_SUCCESS : PSF_EVENT_PWORK_SUBMIT_TO_QUEUE_FAILURE, (TraceUnsignedBaseType_t)work_q, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)ret);
664 }
665
sys_trace_k_work_poll_submit_enter(struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout)666 void sys_trace_k_work_poll_submit_enter(struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout) {
667 (void)xTraceEventCreate4(PSF_EVENT_PWORK_SUBMIT_BLOCKING, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)events, (TraceUnsignedBaseType_t)num_events, (TraceUnsignedBaseType_t)timeout.ticks);
668 }
669
sys_trace_k_work_poll_submit_exit(struct k_work_poll * work,struct k_poll_event * events,int num_events,k_timeout_t timeout,int ret)670 void sys_trace_k_work_poll_submit_exit(struct k_work_poll *work, struct k_poll_event *events, int num_events, k_timeout_t timeout, int ret) {
671 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_PWORK_SUBMIT_SUCCESS : PSF_EVENT_PWORK_SUBMIT_FAILURE, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)ret);
672 }
673
sys_trace_k_work_poll_cancel_enter(struct k_work_poll * work)674 void sys_trace_k_work_poll_cancel_enter(struct k_work_poll *work) {
675 (void)xTraceEventCreate1(PSF_EVENT_PWORK_CANCEL_BLOCKING, (TraceUnsignedBaseType_t)work);
676 }
677
sys_trace_k_work_poll_cancel_exit(struct k_work_poll * work,int ret)678 void sys_trace_k_work_poll_cancel_exit(struct k_work_poll *work, int ret) {
679 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_PWORK_CANCEL_SUCCESS : PSF_EVENT_PWORK_CANCEL_FAILURE, (TraceUnsignedBaseType_t)work, (TraceUnsignedBaseType_t)ret);
680 }
681
682
683 /* Poll API trace function definitions */
sys_trace_k_poll_api_event_init(struct k_poll_event * event,uint32_t type,int mode,void * obj)684 void sys_trace_k_poll_api_event_init(struct k_poll_event *event, uint32_t type, int mode, void *obj) {
685 (void)xTraceEventCreate4(PSF_EVENT_POLL_EVENT_INIT, (TraceUnsignedBaseType_t)event, (TraceUnsignedBaseType_t)type, (TraceUnsignedBaseType_t)mode, (TraceUnsignedBaseType_t)obj);
686 }
687
sys_trace_k_poll_api_event_poll_enter(struct k_poll_event * events,int num_events,k_timeout_t timeout)688 void sys_trace_k_poll_api_event_poll_enter(struct k_poll_event *events, int num_events, k_timeout_t timeout) {
689 (void)xTraceEventCreate3(PSF_EVENT_POLL_POLL_BLOCKING, (TraceUnsignedBaseType_t)events, (TraceUnsignedBaseType_t)num_events, (TraceUnsignedBaseType_t)timeout.ticks);
690 }
691
sys_trace_k_poll_api_event_poll_exit(struct k_poll_event * events,int num_events,k_timeout_t timeout,int ret)692 void sys_trace_k_poll_api_event_poll_exit(struct k_poll_event *events, int num_events, k_timeout_t timeout, int ret) {
693 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_POLL_POLL_SUCCESS : PSF_EVENT_POLL_POLL_FAILURE, (TraceUnsignedBaseType_t)events, (TraceUnsignedBaseType_t)ret);
694 }
695
sys_trace_k_poll_api_signal_init(struct k_poll_signal * signal)696 void sys_trace_k_poll_api_signal_init(struct k_poll_signal *signal) {
697 (void)xTraceEventCreate1(PSF_EVENT_POLL_SIGNAL_INIT, (TraceUnsignedBaseType_t)signal);
698 }
699
sys_trace_k_poll_api_signal_reset(struct k_poll_signal * signal)700 void sys_trace_k_poll_api_signal_reset(struct k_poll_signal *signal) {
701 (void)xTraceEventCreate1(PSF_EVENT_POLL_SIGNAL_RESET, (TraceUnsignedBaseType_t)signal);
702 }
703
sys_trace_k_poll_api_signal_check(struct k_poll_signal * signal,unsigned int * signaled,int * result)704 void sys_trace_k_poll_api_signal_check(struct k_poll_signal *signal, unsigned int *signaled, int *result) {
705 (void)xTraceEventCreate3(PSF_EVENT_POLL_SIGNAL_CHECK, (TraceUnsignedBaseType_t)signal, (TraceUnsignedBaseType_t)signaled, (TraceUnsignedBaseType_t)result);
706 }
707
sys_trace_k_poll_api_signal_raise(struct k_poll_signal * signal,int result,int ret)708 void sys_trace_k_poll_api_signal_raise(struct k_poll_signal *signal, int result, int ret) {
709 (void)xTraceEventCreate3(ret == 0 ? PSF_EVENT_POLL_SIGNAL_RAISE_SUCCESS : PSF_EVENT_POLL_SIGNAL_RAISE_FAILURE, (TraceUnsignedBaseType_t)signal, (TraceUnsignedBaseType_t)result, (TraceUnsignedBaseType_t)ret);
710 }
711
712
713 /* Semaphore trace function definitions */
sys_trace_k_sem_init(struct k_sem * sem,unsigned int initial_count,unsigned int limit,int ret)714 void sys_trace_k_sem_init(struct k_sem *sem, unsigned int initial_count, unsigned int limit, int ret) {
715 (void)xTraceEventCreate5(ret == 0 ? PSF_EVENT_SEMAPHORE_CREATE_SUCCESS : PSF_EVENT_SEMAPHORE_CREATE_TIMEOUT, (TraceUnsignedBaseType_t)sem, (TraceUnsignedBaseType_t)initial_count, (TraceUnsignedBaseType_t)limit, (TraceUnsignedBaseType_t)sem->count, (TraceUnsignedBaseType_t)ret);
716 }
717
sys_trace_k_sem_give_enter(struct k_sem * sem)718 void sys_trace_k_sem_give_enter(struct k_sem *sem) {
719 (void)xTraceEventCreate2(PSF_EVENT_SEMAPHORE_GIVE_SUCCESS, (TraceUnsignedBaseType_t)sem, (TraceUnsignedBaseType_t)sem->count);
720 }
721
sys_trace_k_sem_take_enter(struct k_sem * sem,k_timeout_t timeout)722 void sys_trace_k_sem_take_enter(struct k_sem *sem, k_timeout_t timeout) {
723
724 }
725
sys_trace_k_sem_take_blocking(struct k_sem * sem,k_timeout_t timeout)726 void sys_trace_k_sem_take_blocking(struct k_sem *sem, k_timeout_t timeout) {
727 (void)xTraceEventCreate3(PSF_EVENT_SEMAPHORE_TAKE_BLOCKING, (TraceUnsignedBaseType_t)sem, (TraceUnsignedBaseType_t)timeout.ticks, (TraceUnsignedBaseType_t)sem->count);
728 }
729
sys_trace_k_sem_take_exit(struct k_sem * sem,k_timeout_t timeout,int ret)730 void sys_trace_k_sem_take_exit(struct k_sem *sem, k_timeout_t timeout, int ret) {
731 (void)xTraceEventCreate3(ret == 0 ? PSF_EVENT_SEMAPHORE_TAKE_SUCCESS : PSF_EVENT_SEMAPHORE_TAKE_FAILED, (TraceUnsignedBaseType_t)sem, (TraceUnsignedBaseType_t)sem->count, (TraceUnsignedBaseType_t)ret);
732 }
733
sys_trace_k_sem_reset(struct k_sem * sem)734 void sys_trace_k_sem_reset(struct k_sem *sem) {
735 (void)xTraceEventCreate1(PSF_EVENT_SEMAPHORE_RESET, (TraceUnsignedBaseType_t)sem);
736 }
737
738
739 /* Mutex trace function definitions */
sys_trace_k_mutex_init(struct k_mutex * mutex,int ret)740 void sys_trace_k_mutex_init(struct k_mutex *mutex, int ret) {
741 (void)xTraceEventCreate2(PSF_EVENT_MUTEX_CREATE, (TraceUnsignedBaseType_t)mutex, (TraceUnsignedBaseType_t)ret);
742 }
743
sys_trace_k_mutex_lock_enter(struct k_mutex * mutex,k_timeout_t timeout)744 void sys_trace_k_mutex_lock_enter(struct k_mutex *mutex, k_timeout_t timeout) {
745 }
746
sys_trace_k_mutex_lock_blocking(struct k_mutex * mutex,k_timeout_t timeout)747 void sys_trace_k_mutex_lock_blocking(struct k_mutex *mutex, k_timeout_t timeout) {
748 (void)xTraceEventCreate2(PSF_EVENT_MUTEX_TAKE_BLOCKING, (TraceUnsignedBaseType_t)mutex, (TraceUnsignedBaseType_t)timeout.ticks);
749 }
750
sys_trace_k_mutex_lock_exit(struct k_mutex * mutex,k_timeout_t timeout,int ret)751 void sys_trace_k_mutex_lock_exit(struct k_mutex *mutex, k_timeout_t timeout, int ret) {
752 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_MUTEX_TAKE_SUCCESS : PSF_EVENT_MUTEX_TAKE_FAILED, (TraceUnsignedBaseType_t)mutex, (TraceUnsignedBaseType_t)ret);
753 }
754
sys_trace_k_mutex_unlock_enter(struct k_mutex * mutex)755 void sys_trace_k_mutex_unlock_enter(struct k_mutex *mutex) {
756 }
757
sys_trace_k_mutex_unlock_exit(struct k_mutex * mutex,int ret)758 void sys_trace_k_mutex_unlock_exit(struct k_mutex *mutex, int ret) {
759 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_MUTEX_GIVE_SUCCESS : PSF_EVENT_MUTEX_GIVE_FAILED, (TraceUnsignedBaseType_t)mutex, (TraceUnsignedBaseType_t)ret);
760 }
761
762
763 /* Conditional variable trace function definitions */
sys_trace_k_condvar_init(struct k_condvar * condvar,int ret)764 void sys_trace_k_condvar_init(struct k_condvar *condvar, int ret) {
765 (void)xTraceEventCreate2(PSF_EVENT_CONDVAR_INIT, (TraceUnsignedBaseType_t)condvar, (TraceUnsignedBaseType_t)ret);
766 }
767
sys_trace_k_condvar_signal_enter(struct k_condvar * condvar)768 void sys_trace_k_condvar_signal_enter(struct k_condvar *condvar) {
769
770 }
771
sys_trace_k_condvar_signal_blocking(struct k_condvar * condvar)772 void sys_trace_k_condvar_signal_blocking(struct k_condvar *condvar) {
773 (void)xTraceEventCreate1(PSF_EVENT_CONDVAR_SIGNAL_BLOCKING, (TraceUnsignedBaseType_t)condvar);
774 }
775
sys_trace_k_condvar_signal_exit(struct k_condvar * condvar,int ret)776 void sys_trace_k_condvar_signal_exit(struct k_condvar *condvar, int ret) {
777 (void)xTraceEventCreate2(PSF_EVENT_CONDVAR_SIGNAL_SUCCESS, (TraceUnsignedBaseType_t)condvar, (TraceUnsignedBaseType_t)ret);
778 }
779
sys_trace_k_condvar_broadcast_enter(struct k_condvar * condvar)780 void sys_trace_k_condvar_broadcast_enter(struct k_condvar *condvar) {
781 (void)xTraceEventCreate1(PSF_EVENT_CONDVAR_BROADCAST_ENTER, (TraceUnsignedBaseType_t)condvar);
782 }
783
sys_trace_k_condvar_broadcast_exit(struct k_condvar * condvar,int ret)784 void sys_trace_k_condvar_broadcast_exit(struct k_condvar *condvar, int ret) {
785 (void)xTraceEventCreate2(PSF_EVENT_CONDVAR_BROADCAST_EXIT, (TraceUnsignedBaseType_t)condvar, (TraceUnsignedBaseType_t)ret);
786 }
787
sys_trace_k_condvar_wait_enter(struct k_condvar * condvar,struct k_mutex * mutex,k_timeout_t timeout)788 void sys_trace_k_condvar_wait_enter(struct k_condvar *condvar, struct k_mutex *mutex, k_timeout_t timeout) {
789 (void)xTraceEventCreate3(PSF_EVENT_CONDVAR_WAIT_BLOCKING, (TraceUnsignedBaseType_t)condvar, (TraceUnsignedBaseType_t)mutex, (TraceUnsignedBaseType_t)timeout.ticks);
790 }
791
sys_trace_k_condvar_wait_exit(struct k_condvar * condvar,struct k_mutex * mutex,k_timeout_t timeout,int ret)792 void sys_trace_k_condvar_wait_exit(struct k_condvar *condvar, struct k_mutex *mutex, k_timeout_t timeout, int ret) {
793 (void)xTraceEventCreate3(ret == 0 ? PSF_EVENT_CONDVAR_WAIT_SUCCESS : PSF_EVENT_CONDVAR_WAIT_FAILURE, (TraceUnsignedBaseType_t)condvar, (TraceUnsignedBaseType_t)mutex, (TraceUnsignedBaseType_t)ret);
794 }
795
796
797 /* Queue trace function definitions */
sys_trace_k_queue_init(struct k_queue * queue)798 void sys_trace_k_queue_init(struct k_queue *queue) {
799 (void)xTraceEventCreate1(PSF_EVENT_QUEUE_INIT, (TraceUnsignedBaseType_t)queue);
800 }
801
sys_trace_k_queue_cancel_wait(struct k_queue * queue)802 void sys_trace_k_queue_cancel_wait(struct k_queue *queue) {
803 (void)xTraceEventCreate1(PSF_EVENT_QUEUE_CANCEL_WAIT, (TraceUnsignedBaseType_t)queue);
804 }
805
sys_trace_k_queue_queue_insert_enter(struct k_queue * queue,bool alloc,void * data)806 void sys_trace_k_queue_queue_insert_enter(struct k_queue *queue, bool alloc, void *data) {
807 // Ignore non alloc tracing of this event
808 if (!alloc) {
809 return;
810 }
811 }
812
sys_trace_k_queue_queue_insert_blocking(struct k_queue * queue,bool alloc,void * data)813 void sys_trace_k_queue_queue_insert_blocking(struct k_queue *queue, bool alloc, void *data) {
814 // Ignore non alloc tracing of this event
815 if (!alloc) {
816 return;
817 }
818
819 (void)xTraceEventCreate2(PSF_EVENT_QUEUE_QUEUE_INSERT_BLOCKING, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)data);
820 }
821
sys_trace_k_queue_queue_insert_exit(struct k_queue * queue,bool alloc,void * data,int ret)822 void sys_trace_k_queue_queue_insert_exit(struct k_queue *queue, bool alloc, void *data, int ret) {
823 // Ignore non alloc tracing of this event
824 if (!alloc) {
825 return;
826 }
827
828 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_QUEUE_QUEUE_INSERT_SUCCESS : PSF_EVENT_QUEUE_QUEUE_INSERT_FAILURE, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
829 }
830
sys_trace_k_queue_append_enter(struct k_queue * queue,void * data)831 void sys_trace_k_queue_append_enter(struct k_queue *queue, void *data) {
832 (void)xTraceEventCreate2(PSF_EVENT_QUEUE_APPEND, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)data);
833 }
834
sys_trace_k_queue_append_exit(struct k_queue * queue,void * data)835 void sys_trace_k_queue_append_exit(struct k_queue *queue, void *data) {
836 }
837
sys_trace_k_queue_alloc_append_enter(struct k_queue * queue,void * data)838 void sys_trace_k_queue_alloc_append_enter(struct k_queue *queue, void *data) {
839 (void)xTraceEventCreate2(PSF_EVENT_QUEUE_ALLOC_APPEND_BLOCKING, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)data);
840 }
841
sys_trace_k_queue_alloc_append_exit(struct k_queue * queue,void * data,int ret)842 void sys_trace_k_queue_alloc_append_exit(struct k_queue *queue, void *data, int ret) {
843 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_QUEUE_ALLOC_APPEND_SUCCESS : PSF_EVENT_QUEUE_ALLOC_APPEND_FAILURE, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
844 }
845
sys_trace_k_queue_prepend_enter(struct k_queue * queue,void * data)846 void sys_trace_k_queue_prepend_enter(struct k_queue *queue, void *data) {
847 (void)xTraceEventCreate2(PSF_EVENT_QUEUE_PREPEND, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)data);
848 }
849
sys_trace_k_queue_prepend_exit(struct k_queue * queue,void * data)850 void sys_trace_k_queue_prepend_exit(struct k_queue *queue, void *data) {
851 }
852
sys_trace_k_queue_alloc_prepend_enter(struct k_queue * queue,void * data)853 void sys_trace_k_queue_alloc_prepend_enter(struct k_queue *queue, void *data) {
854 (void)xTraceEventCreate2(PSF_EVENT_QUEUE_ALLOC_PREPEND_BLOCKING, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)data);
855 }
856
sys_trace_k_queue_alloc_prepend_exit(struct k_queue * queue,void * data,int ret)857 void sys_trace_k_queue_alloc_prepend_exit(struct k_queue *queue, void *data, int ret) {
858 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_QUEUE_ALLOC_PREPEND_SUCCESS : PSF_EVENT_QUEUE_ALLOC_PREPEND_FAILURE, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
859 }
860
sys_trace_k_queue_insert_enter(struct k_queue * queue,void * prev,void * data)861 void sys_trace_k_queue_insert_enter(struct k_queue *queue, void *prev, void *data) {
862 (void)xTraceEventCreate3(PSF_EVENT_QUEUE_INSERT, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)prev, (TraceUnsignedBaseType_t)data);
863 }
864
sys_trace_k_queue_insert_exit(struct k_queue * queue,void * prev,void * data)865 void sys_trace_k_queue_insert_exit(struct k_queue *queue, void *prev, void *data) {
866 }
867
sys_trace_k_queue_append_list_enter(struct k_queue * queue,void * head,void * tail)868 void sys_trace_k_queue_append_list_enter(struct k_queue *queue, void *head, void *tail) {
869 (void)xTraceEventCreate3(PSF_EVENT_QUEUE_APPEND_LIST_BLOCKING, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)head, (TraceUnsignedBaseType_t)tail);
870 }
871
sys_trace_k_queue_append_list_exit(struct k_queue * queue,int ret)872 void sys_trace_k_queue_append_list_exit(struct k_queue *queue, int ret) {
873 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_QUEUE_APPEND_LIST_SUCCESS : PSF_EVENT_QUEUE_APPEND_LIST_FAILURE, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
874 }
875
sys_trace_k_queue_merge_slist_enter(struct k_queue * queue,sys_slist_t * list)876 void sys_trace_k_queue_merge_slist_enter(struct k_queue *queue, sys_slist_t *list) {
877 (void)xTraceEventCreate2(PSF_EVENT_QUEUE_MERGE_SLIST_BLOCKING, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)list);
878 }
879
sys_trace_k_queue_merge_slist_exit(struct k_queue * queue,sys_slist_t * list,int ret)880 void sys_trace_k_queue_merge_slist_exit(struct k_queue *queue, sys_slist_t *list, int ret) {
881 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_QUEUE_MERGE_SLIST_SUCCESS : PSF_EVENT_QUEUE_MERGE_SLIST_FAILURE, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
882 }
883
sys_trace_k_queue_get_blocking(struct k_queue * queue,k_timeout_t timeout)884 void sys_trace_k_queue_get_blocking(struct k_queue *queue, k_timeout_t timeout) {
885 (void)xTraceEventCreate2(PSF_EVENT_QUEUE_GET_BLOCKING, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)timeout.ticks);
886 }
887
sys_trace_k_queue_get_exit(struct k_queue * queue,k_timeout_t timeout,void * ret)888 void sys_trace_k_queue_get_exit(struct k_queue *queue, k_timeout_t timeout, void *ret) {
889 (void)xTraceEventCreate2(ret != NULL ? PSF_EVENT_QUEUE_GET_SUCCESS : PSF_EVENT_QUEUE_GET_TIMEOUT, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
890 }
891
sys_trace_k_queue_remove_enter(struct k_queue * queue,void * data)892 void sys_trace_k_queue_remove_enter(struct k_queue *queue, void *data) {
893 (void)xTraceEventCreate2(PSF_EVENT_QUEUE_REMOVE_BLOCKING, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)data);
894 }
895
sys_trace_k_queue_remove_exit(struct k_queue * queue,void * data,bool ret)896 void sys_trace_k_queue_remove_exit(struct k_queue *queue, void *data, bool ret) {
897 (void)xTraceEventCreate2(ret ? PSF_EVENT_QUEUE_REMOVE_SUCCESS : PSF_EVENT_QUEUE_REMOVE_FAILURE, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
898 }
899
sys_trace_k_queue_unique_append_enter(struct k_queue * queue,void * data)900 void sys_trace_k_queue_unique_append_enter(struct k_queue *queue, void *data) {
901 (void)xTraceEventCreate2(PSF_EVENT_QUEUE_UNIQUE_APPEND_BLOCKING, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)data);
902 }
903
sys_trace_k_queue_unique_append_exit(struct k_queue * queue,void * data,bool ret)904 void sys_trace_k_queue_unique_append_exit(struct k_queue *queue, void *data, bool ret) {
905 (void)xTraceEventCreate2(ret ? PSF_EVENT_QUEUE_UNIQUE_APPEND_SUCCESS : PSF_EVENT_QUEUE_UNIQUE_APPEND_FAILURE, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
906 }
907
sys_trace_k_queue_peek_head(struct k_queue * queue,void * ret)908 void sys_trace_k_queue_peek_head(struct k_queue *queue, void *ret) {
909 (void)xTraceEventCreate2(PSF_EVENT_QUEUE_PEEK_HEAD, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
910 }
911
sys_trace_k_queue_peek_tail(struct k_queue * queue,void * ret)912 void sys_trace_k_queue_peek_tail(struct k_queue *queue, void *ret) {
913 (void)xTraceEventCreate2(PSF_EVENT_QUEUE_PEEK_TAIL, (TraceUnsignedBaseType_t)queue, (TraceUnsignedBaseType_t)ret);
914 }
915
916
917 /* FIFO trace function definitions */
sys_trace_k_fifo_init_enter(struct k_fifo * fifo)918 void sys_trace_k_fifo_init_enter(struct k_fifo *fifo) {
919 (void)xTraceEventCreate1(PSF_EVENT_FIFO_INIT_ENTER, (TraceUnsignedBaseType_t)fifo);
920 }
921
sys_trace_k_fifo_init_exit(struct k_fifo * fifo)922 void sys_trace_k_fifo_init_exit(struct k_fifo *fifo) {
923 (void)xTraceEventCreate1(PSF_EVENT_FIFO_INIT_EXIT, (TraceUnsignedBaseType_t)fifo);
924 }
925
sys_trace_k_fifo_cancel_wait_enter(struct k_fifo * fifo)926 void sys_trace_k_fifo_cancel_wait_enter(struct k_fifo *fifo) {
927 (void)xTraceEventCreate1(PSF_EVENT_FIFO_CANCEL_WAIT_ENTER, (TraceUnsignedBaseType_t)fifo);
928 }
929
sys_trace_k_fifo_cancel_wait_exit(struct k_fifo * fifo)930 void sys_trace_k_fifo_cancel_wait_exit(struct k_fifo *fifo) {
931 (void)xTraceEventCreate1(PSF_EVENT_FIFO_CANCEL_WAIT_EXIT, (TraceUnsignedBaseType_t)fifo);
932 }
933
sys_trace_k_fifo_put_enter(struct k_fifo * fifo,void * data)934 void sys_trace_k_fifo_put_enter(struct k_fifo *fifo, void *data) {
935 (void)xTraceEventCreate2(PSF_EVENT_FIFO_PUT_ENTER, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)data);
936 }
937
sys_trace_k_fifo_put_exit(struct k_fifo * fifo,void * data)938 void sys_trace_k_fifo_put_exit(struct k_fifo *fifo, void *data) {
939 (void)xTraceEventCreate2(PSF_EVENT_FIFO_PUT_EXIT, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)data);
940 }
941
sys_trace_k_fifo_alloc_put_enter(struct k_fifo * fifo,void * data)942 void sys_trace_k_fifo_alloc_put_enter(struct k_fifo *fifo, void *data) {
943 (void)xTraceEventCreate2(PSF_EVENT_FIFO_ALLOC_PUT_BLOCKING, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)data);
944 }
945
sys_trace_k_fifo_alloc_put_exit(struct k_fifo * fifo,void * data,int ret)946 void sys_trace_k_fifo_alloc_put_exit(struct k_fifo *fifo, void *data, int ret) {
947 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_FIFO_ALLOC_PUT_SUCCESS : PSF_EVENT_FIFO_ALLOC_PUT_FAILURE, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)ret);
948 }
949
sys_trace_k_fifo_put_list_enter(struct k_fifo * fifo,void * head,void * tail)950 void sys_trace_k_fifo_put_list_enter(struct k_fifo *fifo, void *head, void *tail) {
951 (void)xTraceEventCreate3(PSF_EVENT_FIFO_PUT_LIST_ENTER, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)head, (TraceUnsignedBaseType_t)tail);
952 }
953
sys_trace_k_fifo_put_list_exit(struct k_fifo * fifo,void * head,void * tail)954 void sys_trace_k_fifo_put_list_exit(struct k_fifo *fifo, void *head, void *tail) {
955 (void)xTraceEventCreate3(PSF_EVENT_FIFO_PUT_LIST_EXIT, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)head, (TraceUnsignedBaseType_t)tail);
956 }
957
sys_trace_k_fifo_put_slist_enter(struct k_fifo * fifo,sys_slist_t * list)958 void sys_trace_k_fifo_put_slist_enter(struct k_fifo *fifo, sys_slist_t *list) {
959 (void)xTraceEventCreate2(PSF_EVENT_FIFO_PUT_SLIST_ENTER, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)list);
960 }
961
sys_trace_k_fifo_put_slist_exit(struct k_fifo * fifo,sys_slist_t * list)962 void sys_trace_k_fifo_put_slist_exit(struct k_fifo *fifo, sys_slist_t *list) {
963 (void)xTraceEventCreate2(PSF_EVENT_FIFO_PUT_SLIST_EXIT, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)list);
964 }
965
sys_trace_k_fifo_get_enter(struct k_fifo * fifo,k_timeout_t timeout)966 void sys_trace_k_fifo_get_enter(struct k_fifo *fifo, k_timeout_t timeout) {
967 (void)xTraceEventCreate2(PSF_EVENT_FIFO_GET_BLOCKING, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)timeout.ticks);
968 }
969
sys_trace_k_fifo_get_exit(struct k_fifo * fifo,k_timeout_t timeout,void * ret)970 void sys_trace_k_fifo_get_exit(struct k_fifo *fifo, k_timeout_t timeout, void *ret) {
971 (void)xTraceEventCreate2(ret != NULL ? PSF_EVENT_FIFO_GET_SUCCESS : PSF_EVENT_FIFO_GET_FAILURE, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)ret);
972 }
973
sys_trace_k_fifo_peek_head_enter(struct k_fifo * fifo)974 void sys_trace_k_fifo_peek_head_enter(struct k_fifo *fifo) {
975 (void)xTraceEventCreate1(PSF_EVENT_FIFO_PEEK_HEAD_ENTER, (TraceUnsignedBaseType_t)fifo);
976 }
977
sys_trace_k_fifo_peek_head_exit(struct k_fifo * fifo,void * ret)978 void sys_trace_k_fifo_peek_head_exit(struct k_fifo *fifo, void *ret) {
979 (void)xTraceEventCreate2(PSF_EVENT_FIFO_PEEK_HEAD_EXIT, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)ret);
980 }
981
sys_trace_k_fifo_peek_tail_enter(struct k_fifo * fifo)982 void sys_trace_k_fifo_peek_tail_enter(struct k_fifo *fifo) {
983 (void)xTraceEventCreate1(PSF_EVENT_FIFO_PEEK_TAIL_ENTER, (TraceUnsignedBaseType_t)fifo);
984 }
985
sys_trace_k_fifo_peek_tail_exit(struct k_fifo * fifo,void * ret)986 void sys_trace_k_fifo_peek_tail_exit(struct k_fifo *fifo, void *ret) {
987 (void)xTraceEventCreate2(PSF_EVENT_FIFO_PEEK_TAIL_EXIT, (TraceUnsignedBaseType_t)fifo, (TraceUnsignedBaseType_t)ret);
988 }
989
990
991 /* LIFO trace function definitions */
sys_trace_k_lifo_init_enter(struct k_lifo * lifo)992 void sys_trace_k_lifo_init_enter(struct k_lifo *lifo) {
993 (void)xTraceEventCreate1(PSF_EVENT_LIFO_INIT_ENTER, (TraceUnsignedBaseType_t)lifo);
994 }
995
sys_trace_k_lifo_init_exit(struct k_lifo * lifo)996 void sys_trace_k_lifo_init_exit(struct k_lifo *lifo) {
997 (void)xTraceEventCreate1(PSF_EVENT_LIFO_INIT_EXIT, (TraceUnsignedBaseType_t)lifo);
998 }
999
sys_trace_k_lifo_put_enter(struct k_lifo * lifo,void * data)1000 void sys_trace_k_lifo_put_enter(struct k_lifo *lifo, void *data) {
1001 (void)xTraceEventCreate2(PSF_EVENT_LIFO_PUT_ENTER, (TraceUnsignedBaseType_t)lifo, (TraceUnsignedBaseType_t)data);
1002 }
1003
sys_trace_k_lifo_put_exit(struct k_lifo * lifo,void * data)1004 void sys_trace_k_lifo_put_exit(struct k_lifo *lifo, void *data) {
1005 (void)xTraceEventCreate2(PSF_EVENT_LIFO_PUT_EXIT, (TraceUnsignedBaseType_t)lifo, (TraceUnsignedBaseType_t)data);
1006 }
1007
sys_trace_k_lifo_alloc_put_enter(struct k_lifo * lifo,void * data)1008 void sys_trace_k_lifo_alloc_put_enter(struct k_lifo *lifo, void *data) {
1009 (void)xTraceEventCreate2(PSF_EVENT_LIFO_ALLOC_PUT_BLOCKING, (TraceUnsignedBaseType_t)lifo, (TraceUnsignedBaseType_t)data);
1010 }
1011
sys_trace_k_lifo_alloc_put_exit(struct k_lifo * lifo,void * data,int ret)1012 void sys_trace_k_lifo_alloc_put_exit(struct k_lifo *lifo, void *data, int ret) {
1013 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_LIFO_ALLOC_PUT_SUCCESS : PSF_EVENT_LIFO_ALLOC_PUT_FAILURE, (TraceUnsignedBaseType_t)lifo, (TraceUnsignedBaseType_t)ret);
1014 }
1015
sys_trace_k_lifo_get_enter(struct k_lifo * lifo,k_timeout_t timeout)1016 void sys_trace_k_lifo_get_enter(struct k_lifo *lifo, k_timeout_t timeout) {
1017 (void)xTraceEventCreate2(PSF_EVENT_LIFO_GET_BLOCKING, (TraceUnsignedBaseType_t)lifo, (TraceUnsignedBaseType_t)timeout.ticks);
1018 }
1019
sys_trace_k_lifo_get_exit(struct k_lifo * lifo,k_timeout_t timeout,void * ret)1020 void sys_trace_k_lifo_get_exit(struct k_lifo *lifo, k_timeout_t timeout, void *ret) {
1021 (void)xTraceEventCreate2(ret != NULL ? PSF_EVENT_LIFO_GET_SUCCESS : PSF_EVENT_LIFO_GET_FAILURE, (TraceUnsignedBaseType_t)lifo, (TraceUnsignedBaseType_t)ret);
1022 }
1023
1024
1025 /* Stack trace function definitions */
sys_trace_k_stack_init(struct k_stack * stack,stack_data_t * buffer,uint32_t num_entries)1026 void sys_trace_k_stack_init(struct k_stack *stack, stack_data_t *buffer, uint32_t num_entries) {
1027 (void)xTraceEventCreate3(PSF_EVENT_STACK_INIT, (TraceUnsignedBaseType_t)stack, (TraceUnsignedBaseType_t)buffer, (TraceUnsignedBaseType_t)num_entries);
1028 }
1029
sys_trace_k_stack_alloc_init_enter(struct k_stack * stack,uint32_t num_entries)1030 void sys_trace_k_stack_alloc_init_enter(struct k_stack *stack, uint32_t num_entries) {
1031 (void)xTraceEventCreate2(PSF_EVENT_STACK_ALLOC_INIT_BLOCKING, (TraceUnsignedBaseType_t)stack, (TraceUnsignedBaseType_t)num_entries);
1032 }
1033
sys_trace_k_stack_alloc_init_exit(struct k_stack * stack,uint32_t num_entries,int ret)1034 void sys_trace_k_stack_alloc_init_exit(struct k_stack *stack, uint32_t num_entries, int ret) {
1035 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_STACK_ALLOC_INIT_SUCCESS : PSF_EVENT_STACK_ALLOC_INIT_FAILURE, (TraceUnsignedBaseType_t)stack, (TraceUnsignedBaseType_t)ret);
1036 }
1037
sys_trace_k_stack_cleanup_enter(struct k_stack * stack)1038 void sys_trace_k_stack_cleanup_enter(struct k_stack *stack) {
1039 (void)xTraceEventCreate1(PSF_EVENT_STACK_CLEANUP_BLOCKING, (TraceUnsignedBaseType_t)stack);
1040 }
1041
sys_trace_k_stack_cleanup_exit(struct k_stack * stack,int ret)1042 void sys_trace_k_stack_cleanup_exit(struct k_stack *stack, int ret) {
1043 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_STACK_CLEANUP_SUCCESS : PSF_EVENT_STACK_CLEANUP_FAILURE, (TraceUnsignedBaseType_t)stack, (TraceUnsignedBaseType_t)ret);
1044 }
1045
sys_trace_k_stack_push_enter(struct k_stack * stack,stack_data_t data)1046 void sys_trace_k_stack_push_enter(struct k_stack *stack, stack_data_t data) {
1047 (void)xTraceEventCreate2(PSF_EVENT_STACK_PUSH_BLOCKING, (TraceUnsignedBaseType_t)stack, (TraceUnsignedBaseType_t)data);
1048 }
1049
sys_trace_k_stack_push_exit(struct k_stack * stack,stack_data_t data,int ret)1050 void sys_trace_k_stack_push_exit(struct k_stack *stack, stack_data_t data, int ret) {
1051 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_STACK_PUSH_SUCCESS : PSF_EVENT_STACK_PUSH_FAILURE, (TraceUnsignedBaseType_t)stack, (TraceUnsignedBaseType_t)ret);
1052 }
1053
sys_trace_k_stack_pop_blocking(struct k_stack * stack,stack_data_t * data,k_timeout_t timeout)1054 void sys_trace_k_stack_pop_blocking(struct k_stack *stack, stack_data_t *data, k_timeout_t timeout) {
1055 (void)xTraceEventCreate3(PSF_EVENT_STACK_POP_BLOCKING, (TraceUnsignedBaseType_t)stack, (TraceUnsignedBaseType_t)data, (TraceUnsignedBaseType_t)timeout.ticks);
1056 }
1057
sys_trace_k_stack_pop_exit(struct k_stack * stack,stack_data_t * data,k_timeout_t timeout,int ret)1058 void sys_trace_k_stack_pop_exit(struct k_stack *stack, stack_data_t *data, k_timeout_t timeout, int ret) {
1059 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_STACK_POP_SUCCESS : PSF_EVENT_STACK_POP_FAILURE, (TraceUnsignedBaseType_t)stack, (TraceUnsignedBaseType_t)ret);
1060 }
1061
1062
1063 /* Message queue trace function definitions */
sys_trace_k_msgq_init(struct k_msgq * msgq)1064 void sys_trace_k_msgq_init(struct k_msgq *msgq) {
1065 (void)xTraceEventCreate4(PSF_EVENT_MESSAGEQUEUE_INIT, (TraceUnsignedBaseType_t)msgq, (TraceUnsignedBaseType_t)msgq->buffer_start, (TraceUnsignedBaseType_t)msgq->msg_size, (TraceUnsignedBaseType_t)msgq->max_msgs);
1066 }
1067
sys_trace_k_msgq_alloc_init_enter(struct k_msgq * msgq,size_t msg_size,uint32_t max_msgs)1068 void sys_trace_k_msgq_alloc_init_enter(struct k_msgq *msgq, size_t msg_size, uint32_t max_msgs) {
1069 (void)xTraceEventCreate3(PSF_EVENT_MESSAGEQUEUE_ALLOC_INIT_BLOCKING, (TraceUnsignedBaseType_t)msgq, (TraceUnsignedBaseType_t)msg_size, (TraceUnsignedBaseType_t)max_msgs);
1070 }
1071
sys_trace_k_msgq_alloc_init_exit(struct k_msgq * msgq,size_t msg_size,uint32_t max_msgs,int ret)1072 void sys_trace_k_msgq_alloc_init_exit(struct k_msgq *msgq, size_t msg_size, uint32_t max_msgs, int ret) {
1073 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_MESSAGEQUEUE_ALLOC_INIT_SUCCESS : PSF_EVENT_MESSAGEQUEUE_ALLOC_INIT_TIMEOUT, (TraceUnsignedBaseType_t)msgq, (TraceUnsignedBaseType_t)ret);
1074 }
1075
sys_trace_k_msgq_cleanup_enter(struct k_msgq * msgq)1076 void sys_trace_k_msgq_cleanup_enter(struct k_msgq *msgq) {
1077 (void)xTraceEventCreate1(PSF_EVENT_MESSAGEQUEUE_CLEANUP_BLOCKING, (TraceUnsignedBaseType_t)msgq);
1078 }
1079
sys_trace_k_msgq_cleanup_exit(struct k_msgq * msgq,int ret)1080 void sys_trace_k_msgq_cleanup_exit(struct k_msgq *msgq, int ret) {
1081 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_MESSAGEQUEUE_CLEANUP_SUCCESS : PSF_EVENT_MESSAGEQUEUE_CLEANUP_TIMEOUT, (TraceUnsignedBaseType_t)msgq, (TraceUnsignedBaseType_t)ret);
1082 }
1083
sys_trace_k_msgq_put_enter(struct k_msgq * msgq,const void * data,k_timeout_t timeout)1084 void sys_trace_k_msgq_put_enter(struct k_msgq *msgq, const void *data, k_timeout_t timeout) {
1085 }
1086
sys_trace_k_msgq_put_blocking(struct k_msgq * msgq,const void * data,k_timeout_t timeout)1087 void sys_trace_k_msgq_put_blocking(struct k_msgq *msgq, const void *data, k_timeout_t timeout) {
1088 (void)xTraceEventCreate3(PSF_EVENT_MESSAGEQUEUE_PUT_BLOCKING, (TraceUnsignedBaseType_t)msgq, (TraceUnsignedBaseType_t)data, (TraceUnsignedBaseType_t)timeout.ticks);
1089 }
1090
sys_trace_k_msgq_put_exit(struct k_msgq * msgq,const void * data,k_timeout_t timeout,int ret)1091 void sys_trace_k_msgq_put_exit(struct k_msgq *msgq, const void *data, k_timeout_t timeout, int ret) {
1092 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_MESSAGEQUEUE_PUT_SUCCESS : PSF_EVENT_MESSAGEQUEUE_PUT_TIMEOUT, (TraceUnsignedBaseType_t)msgq, (TraceUnsignedBaseType_t)ret);
1093 }
1094
sys_trace_k_msgq_get_enter(struct k_msgq * msgq,const void * data,k_timeout_t timeout)1095 void sys_trace_k_msgq_get_enter(struct k_msgq *msgq, const void *data, k_timeout_t timeout) {
1096 }
1097
sys_trace_k_msgq_get_blocking(struct k_msgq * msgq,const void * data,k_timeout_t timeout)1098 void sys_trace_k_msgq_get_blocking(struct k_msgq *msgq, const void *data, k_timeout_t timeout) {
1099 (void)xTraceEventCreate3(PSF_EVENT_MESSAGEQUEUE_GET_BLOCKING, (TraceUnsignedBaseType_t)msgq, (TraceUnsignedBaseType_t)data, (TraceUnsignedBaseType_t)timeout.ticks);
1100 }
1101
sys_trace_k_msgq_get_exit(struct k_msgq * msgq,const void * data,k_timeout_t timeout,int ret)1102 void sys_trace_k_msgq_get_exit(struct k_msgq *msgq, const void *data, k_timeout_t timeout, int ret) {
1103 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_MESSAGEQUEUE_GET_SUCCESS : PSF_EVENT_MESSAGEQUEUE_GET_TIMEOUT, (TraceUnsignedBaseType_t)msgq, (TraceUnsignedBaseType_t)ret);
1104 }
1105
sys_trace_k_msgq_peek(struct k_msgq * msgq,void * data,int ret)1106 void sys_trace_k_msgq_peek(struct k_msgq *msgq, void *data, int ret) {
1107 (void)xTraceEventCreate3(
1108 ret == 0 ? PSF_EVENT_MESSAGEQUEUE_PEEK_SUCCESS : PSF_EVENT_MESSAGEQUEUE_PEEK_FAILED,
1109 (TraceUnsignedBaseType_t)msgq,
1110 (TraceUnsignedBaseType_t)data,
1111 (TraceUnsignedBaseType_t)ret
1112 );
1113 }
1114
sys_trace_k_msgq_purge(struct k_msgq * msgq)1115 void sys_trace_k_msgq_purge(struct k_msgq *msgq) {
1116 (void)xTraceEventCreate1(PSF_EVENT_MESSAGEQUEUE_PURGE, (TraceBaseType_t)msgq);
1117 }
1118
1119
1120 /* Mailbox trace function definitions */
sys_trace_k_mbox_init(struct k_mbox * mbox)1121 void sys_trace_k_mbox_init(struct k_mbox *mbox) {
1122 (void)xTraceEventCreate1(PSF_EVENT_MAILBOX_INIT, (TraceUnsignedBaseType_t)mbox);
1123 }
1124
sys_trace_k_mbox_message_put_enter(struct k_mbox * mbox,struct k_mbox_msg * tx_msg,k_timeout_t timeout)1125 void sys_trace_k_mbox_message_put_enter(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout) {
1126 }
1127
sys_trace_k_mbox_message_put_blocking(struct k_mbox * mbox,struct k_mbox_msg * tx_msg,k_timeout_t timeout)1128 void sys_trace_k_mbox_message_put_blocking(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout) {
1129 (void)xTraceEventCreate3(PSF_EVENT_MAILBOX_MESSAGE_PUT_BLOCKING, (TraceUnsignedBaseType_t)mbox, (TraceUnsignedBaseType_t)tx_msg, (TraceUnsignedBaseType_t)timeout.ticks);
1130 }
1131
sys_trace_k_mbox_message_put_exit(struct k_mbox * mbox,struct k_mbox_msg * tx_msg,k_timeout_t timeout,int ret)1132 void sys_trace_k_mbox_message_put_exit(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout, int ret) {
1133 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_MAILBOX_MESSAGE_PUT_SUCCESS : PSF_EVENT_MAILBOX_MESSAGE_PUT_FAILURE, (TraceUnsignedBaseType_t)mbox, (TraceUnsignedBaseType_t)ret);
1134 }
1135
sys_trace_k_mbox_put_enter(struct k_mbox * mbox,struct k_mbox_msg * tx_msg,k_timeout_t timeout)1136 void sys_trace_k_mbox_put_enter(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout) {
1137 (void)xTraceEventCreate3(PSF_EVENT_MAILBOX_PUT_BLOCKING, (TraceUnsignedBaseType_t)mbox, (TraceUnsignedBaseType_t)tx_msg, (TraceUnsignedBaseType_t)timeout.ticks);
1138 }
1139
sys_trace_k_mbox_put_exit(struct k_mbox * mbox,struct k_mbox_msg * tx_msg,k_timeout_t timeout,int ret)1140 void sys_trace_k_mbox_put_exit(struct k_mbox *mbox, struct k_mbox_msg *tx_msg, k_timeout_t timeout, int ret) {
1141 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_MAILBOX_PUT_SUCCESS : PSF_EVENT_MAILBOX_PUT_FAILURE, (TraceUnsignedBaseType_t)mbox, (TraceUnsignedBaseType_t)ret);
1142 }
1143
sys_trace_k_mbox_async_put_enter(struct k_mbox * mbox,struct k_sem * sem)1144 void sys_trace_k_mbox_async_put_enter(struct k_mbox *mbox, struct k_sem *sem) {
1145 (void)xTraceEventCreate2(PSF_EVENT_MAILBOX_ASYNC_PUT_ENTER, (TraceUnsignedBaseType_t)mbox, (TraceUnsignedBaseType_t)sem);
1146 }
1147
sys_trace_k_mbox_async_put_exit(struct k_mbox * mbox,struct k_sem * sem)1148 void sys_trace_k_mbox_async_put_exit(struct k_mbox *mbox, struct k_sem *sem) {
1149 (void)xTraceEventCreate2(PSF_EVENT_MAILBOX_ASYNC_PUT_EXIT, (TraceUnsignedBaseType_t)mbox, (TraceUnsignedBaseType_t)sem);
1150 }
1151
sys_trace_k_mbox_get_enter(struct k_mbox * mbox,struct k_mbox_msg * rx_msg,void * buffer,k_timeout_t timeout)1152 void sys_trace_k_mbox_get_enter(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, k_timeout_t timeout) {
1153 }
1154
sys_trace_k_mbox_get_blocking(struct k_mbox * mbox,struct k_mbox_msg * rx_msg,void * buffer,k_timeout_t timeout)1155 void sys_trace_k_mbox_get_blocking(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, k_timeout_t timeout) {
1156 (void)xTraceEventCreate4(
1157 PSF_EVENT_MAILBOX_GET_BLOCKING,
1158 (TraceUnsignedBaseType_t)mbox,
1159 (TraceUnsignedBaseType_t)rx_msg,
1160 (TraceUnsignedBaseType_t)buffer,
1161 (TraceUnsignedBaseType_t)timeout.ticks
1162 );
1163 }
1164
sys_trace_k_mbox_get_exit(struct k_mbox * mbox,struct k_mbox_msg * rx_msg,void * buffer,k_timeout_t timeout,int ret)1165 void sys_trace_k_mbox_get_exit(struct k_mbox *mbox, struct k_mbox_msg *rx_msg, void *buffer, k_timeout_t timeout, int ret) {
1166 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_MAILBOX_GET_SUCCESS : PSF_EVENT_MAILBOX_GET_TIMEOUT, (TraceUnsignedBaseType_t)mbox, (TraceUnsignedBaseType_t)ret);
1167 }
1168
1169 /* @note: Hook not implemented in Zephyr kernel */
sys_trace_k_mbox_data_get(struct k_mbox_msg * rx_msg,void * buffer)1170 void sys_trace_k_mbox_data_get(struct k_mbox_msg *rx_msg, void *buffer) {
1171 (void)xTraceEventCreate2(PSF_EVENT_MAILBOX_DATA_GET, (TraceUnsignedBaseType_t)rx_msg, (TraceUnsignedBaseType_t)buffer);
1172 }
1173
1174
1175 /* Pipe trace function definitions */
sys_trace_k_pipe_init(struct k_pipe * pipe,unsigned char * buffer,size_t size)1176 void sys_trace_k_pipe_init(struct k_pipe *pipe, unsigned char *buffer, size_t size) {
1177 (void)xTraceEventCreate3(PSF_EVENT_PIPE_INIT, (TraceUnsignedBaseType_t)pipe, (TraceUnsignedBaseType_t)buffer, (TraceUnsignedBaseType_t)size);
1178 }
1179
sys_trace_k_pipe_cleanup_enter(struct k_pipe * pipe)1180 void sys_trace_k_pipe_cleanup_enter(struct k_pipe *pipe) {
1181 (void)xTraceEventCreate1(PSF_EVENT_PIPE_CLEANUP_BLOCKING, (TraceUnsignedBaseType_t)pipe);
1182 }
1183
sys_trace_k_pipe_cleanup_exit(struct k_pipe * pipe,int ret)1184 void sys_trace_k_pipe_cleanup_exit(struct k_pipe *pipe, int ret) {
1185 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_PIPE_CLEANUP_SUCCESS : PSF_EVENT_PIPE_CLEANUP_FAILURE, (TraceUnsignedBaseType_t)pipe, (TraceUnsignedBaseType_t)ret);
1186 }
1187
sys_trace_k_pipe_alloc_init_enter(struct k_pipe * pipe,size_t size)1188 void sys_trace_k_pipe_alloc_init_enter(struct k_pipe *pipe, size_t size) {
1189 (void)xTraceEventCreate2(PSF_EVENT_PIPE_ALLOC_INIT_BLOCKING, (TraceUnsignedBaseType_t)pipe, (TraceUnsignedBaseType_t)size);
1190 }
1191
sys_trace_k_pipe_alloc_init_exit(struct k_pipe * pipe,size_t size,int ret)1192 void sys_trace_k_pipe_alloc_init_exit(struct k_pipe *pipe, size_t size, int ret) {
1193 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_PIPE_ALLOC_INIT_SUCCESS : PSF_EVENT_PIPE_ALLOC_INIT_FAILURE, (TraceUnsignedBaseType_t)pipe, (TraceUnsignedBaseType_t)ret);
1194 }
1195
sys_trace_k_pipe_put_enter(struct k_pipe * pipe,void * data,size_t bytes_to_write,size_t * bytes_written,size_t min_xfer,k_timeout_t timeout)1196 void sys_trace_k_pipe_put_enter(struct k_pipe *pipe, void *data, size_t bytes_to_write, size_t *bytes_written, size_t min_xfer, k_timeout_t timeout) {
1197
1198 }
1199
sys_trace_k_pipe_put_blocking(struct k_pipe * pipe,void * data,size_t bytes_to_write,size_t * bytes_written,size_t min_xfer,k_timeout_t timeout)1200 void sys_trace_k_pipe_put_blocking(struct k_pipe *pipe, void *data, size_t bytes_to_write, size_t *bytes_written, size_t min_xfer, k_timeout_t timeout) {
1201 (void)xTraceEventCreate6(
1202 PSF_EVENT_PIPE_PUT_BLOCKING,
1203 (TraceUnsignedBaseType_t)pipe,
1204 (TraceUnsignedBaseType_t)data,
1205 (TraceUnsignedBaseType_t)bytes_to_write,
1206 (TraceUnsignedBaseType_t)bytes_written,
1207 (TraceUnsignedBaseType_t)min_xfer,
1208 (TraceUnsignedBaseType_t)timeout.ticks
1209 );
1210 }
1211
sys_trace_k_pipe_put_exit(struct k_pipe * pipe,void * data,size_t bytes_to_write,size_t * bytes_written,size_t min_xfer,k_timeout_t timeout,int ret)1212 void sys_trace_k_pipe_put_exit(struct k_pipe *pipe, void *data, size_t bytes_to_write, size_t *bytes_written, size_t min_xfer, k_timeout_t timeout, int ret) {
1213 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_PIPE_PUT_SUCCESS : PSF_EVENT_PIPE_PUT_TIMEOUT, (TraceUnsignedBaseType_t)pipe, (TraceUnsignedBaseType_t)ret);
1214 }
1215
sys_trace_k_pipe_get_enter(struct k_pipe * pipe,void * data,size_t bytes_to_read,size_t * bytes_read,size_t min_xfer,k_timeout_t timeout)1216 void sys_trace_k_pipe_get_enter(struct k_pipe *pipe, void *data, size_t bytes_to_read, size_t *bytes_read, size_t min_xfer, k_timeout_t timeout) {
1217
1218 }
1219
sys_trace_k_pipe_get_blocking(struct k_pipe * pipe,void * data,size_t bytes_to_read,size_t * bytes_read,size_t min_xfer,k_timeout_t timeout)1220 void sys_trace_k_pipe_get_blocking(struct k_pipe *pipe, void *data, size_t bytes_to_read, size_t *bytes_read, size_t min_xfer, k_timeout_t timeout) {
1221 (void)xTraceEventCreate6(
1222 PSF_EVENT_PIPE_GET_BLOCKING,
1223 (TraceUnsignedBaseType_t)pipe,
1224 (TraceUnsignedBaseType_t)data,
1225 (TraceUnsignedBaseType_t)bytes_to_read,
1226 (TraceUnsignedBaseType_t)bytes_read,
1227 (TraceUnsignedBaseType_t)min_xfer,
1228 (TraceUnsignedBaseType_t)timeout.ticks
1229 );
1230 }
1231
sys_trace_k_pipe_get_exit(struct k_pipe * pipe,void * data,size_t bytes_to_read,size_t * bytes_read,size_t min_xfer,k_timeout_t timeout,int ret)1232 void sys_trace_k_pipe_get_exit(struct k_pipe *pipe, void *data, size_t bytes_to_read, size_t *bytes_read, size_t min_xfer, k_timeout_t timeout, int ret) {
1233 (void)xTraceEventCreate2(ret == 0 ? PSF_EVENT_PIPE_GET_SUCCESS : PSF_EVENT_PIPE_GET_TIMEOUT, (TraceUnsignedBaseType_t)pipe, (TraceUnsignedBaseType_t)ret);
1234 }
1235
sys_trace_k_pipe_reset_enter(struct k_pipe * pipe)1236 void sys_trace_k_pipe_reset_enter(struct k_pipe *pipe)
1237 {
1238 }
1239
sys_trace_k_pipe_reset_exit(struct k_pipe * pipe)1240 void sys_trace_k_pipe_reset_exit(struct k_pipe *pipe)
1241 {
1242 (void)xTraceEventCreate1(PSF_EVENT_PIPE_RESET, (TraceUnsignedBaseType_t)pipe);
1243 }
1244
sys_trace_k_pipe_close_enter(struct k_pipe * pipe)1245 void sys_trace_k_pipe_close_enter(struct k_pipe *pipe)
1246 {
1247 }
1248
sys_trace_k_pipe_close_exit(struct k_pipe * pipe)1249 void sys_trace_k_pipe_close_exit(struct k_pipe *pipe)
1250 {
1251 (void)xTraceEventCreate1(PSF_EVENT_PIPE_CLOSE, (TraceUnsignedBaseType_t)pipe);
1252 }
1253
sys_trace_k_pipe_write_enter(struct k_pipe * pipe,void * data,size_t bytes_to_write,k_timeout_t timeout)1254 void sys_trace_k_pipe_write_enter(struct k_pipe *pipe, void *data, size_t bytes_to_write, k_timeout_t timeout)
1255 {
1256 }
1257
sys_trace_k_pipe_write_blocking(struct k_pipe * pipe,k_timeout_t timeout)1258 void sys_trace_k_pipe_write_blocking(struct k_pipe *pipe, k_timeout_t timeout)
1259 {
1260 (void)xTraceEventCreate2(PSF_EVENT_PIPE_WRITE_BLOCKING, (TraceUnsignedBaseType_t)pipe, (TraceUnsignedBaseType_t)timeout.ticks);
1261 }
1262
sys_trace_k_pipe_write_exit(struct k_pipe * pipe,k_timeout_t timeout,int ret)1263 void sys_trace_k_pipe_write_exit(struct k_pipe *pipe, k_timeout_t timeout, int ret)
1264 {
1265 (void)xTraceEventCreate2(ret >= 0 ? PSF_EVENT_PIPE_WRITE_SUCCESS : PSF_EVENT_PIPE_WRITE_TIMEOUT, (TraceUnsignedBaseType_t)pipe, (TraceUnsignedBaseType_t)timeout.ticks);
1266 }
1267
sys_trace_k_pipe_read_enter(struct k_pipe * pipe,void * data,size_t bytes_to_read,k_timeout_t timeout)1268 void sys_trace_k_pipe_read_enter(struct k_pipe *pipe, void *data, size_t bytes_to_read, k_timeout_t timeout)
1269 {
1270 }
1271
sys_trace_k_pipe_read_blocking(struct k_pipe * pipe,k_timeout_t timeout)1272 void sys_trace_k_pipe_read_blocking(struct k_pipe *pipe, k_timeout_t timeout)
1273 {
1274 (void)xTraceEventCreate2(PSF_EVENT_PIPE_READ_BLOCKING, (TraceUnsignedBaseType_t)pipe, (TraceUnsignedBaseType_t)timeout.ticks);
1275 }
1276
sys_trace_k_pipe_read_exit(struct k_pipe * pipe,k_timeout_t timeout,int ret)1277 void sys_trace_k_pipe_read_exit(struct k_pipe *pipe, k_timeout_t timeout, int ret)
1278 {
1279 (void)xTraceEventCreate2(ret >= 0 ? PSF_EVENT_PIPE_READ_SUCCESS : PSF_EVENT_PIPE_READ_TIMEOUT, (TraceUnsignedBaseType_t)pipe, (TraceUnsignedBaseType_t)timeout.ticks);
1280 }
1281
1282 /* Memory heap trace function definitions */
sys_trace_k_heap_init(struct k_heap * h,void * mem,size_t bytes)1283 void sys_trace_k_heap_init(struct k_heap *h, void *mem, size_t bytes) {
1284 (void)xTraceEventCreate3(PSF_EVENT_KHEAP_INIT, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)mem, (TraceUnsignedBaseType_t)bytes);
1285 }
1286
sys_trace_k_heap_alloc_enter(struct k_heap * h,size_t bytes,k_timeout_t timeout)1287 void sys_trace_k_heap_alloc_enter(struct k_heap *h, size_t bytes, k_timeout_t timeout) {
1288 (void)xTraceEventCreate3(PSF_EVENT_KHEAP_ALLOC_BLOCKING, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)bytes, (TraceUnsignedBaseType_t)timeout.ticks);
1289 }
1290
sys_trace_k_heap_alloc_exit(struct k_heap * h,size_t bytes,k_timeout_t timeout,void * ret)1291 void sys_trace_k_heap_alloc_exit(struct k_heap *h, size_t bytes, k_timeout_t timeout, void *ret) {
1292 (void)xTraceEventCreate2(ret != NULL ? PSF_EVENT_KHEAP_ALLOC_SUCCESS : PSF_EVENT_KHEAP_ALLOC_FAILURE, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)ret);
1293 }
1294
sys_trace_k_heap_aligned_alloc_enter(struct k_heap * h,size_t bytes,k_timeout_t timeout)1295 void sys_trace_k_heap_aligned_alloc_enter(struct k_heap *h, size_t bytes, k_timeout_t timeout) {
1296
1297 }
1298
sys_trace_k_heap_aligned_alloc_blocking(struct k_heap * h,size_t bytes,k_timeout_t timeout)1299 void sys_trace_k_heap_aligned_alloc_blocking(struct k_heap *h, size_t bytes, k_timeout_t timeout) {
1300 (void)xTraceEventCreate4(PSF_EVENT_KHEAP_ALIGNED_ALLOC_BLOCKING, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)bytes, (TraceUnsignedBaseType_t)timeout.ticks, (TraceUnsignedBaseType_t)0);
1301 }
1302
sys_trace_k_heap_aligned_alloc_exit(struct k_heap * h,size_t bytes,k_timeout_t timeout,bool blocked_alloc,void * ret)1303 void sys_trace_k_heap_aligned_alloc_exit(struct k_heap *h, size_t bytes, k_timeout_t timeout, bool blocked_alloc, void *ret) {
1304 if (ret == NULL) {
1305 (void)xTraceEventCreate2(PSF_EVENT_KHEAP_ALIGNED_ALLOC_FAILURE, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)ret);
1306 } else if (blocked_alloc) {
1307 (void)xTraceEventCreate2(PSF_EVENT_KHEAP_ALIGNED_ALLOC_SUCCESS_BLOCKED, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)ret);
1308 } else {
1309 (void)xTraceEventCreate3(PSF_EVENT_KHEAP_ALIGNED_ALLOC_SUCCESS, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)bytes, (TraceUnsignedBaseType_t)ret);
1310 }
1311 }
1312
sys_trace_k_heap_free(struct k_heap * h,void * mem)1313 void sys_trace_k_heap_free(struct k_heap *h, void *mem) {
1314 (void)xTraceEventCreate2(PSF_EVENT_KHEAP_FREE, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)mem);
1315 }
1316
sys_trace_k_heap_sys_k_aligned_alloc_enter(struct k_heap * h,size_t align,size_t size)1317 void sys_trace_k_heap_sys_k_aligned_alloc_enter(struct k_heap *h, size_t align, size_t size) {
1318 (void)xTraceEventCreate3(PSF_EVENT_KHEAP_SYS_ALIGNED_ALLOC_BLOCKING, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)align, (TraceUnsignedBaseType_t)size);
1319 }
1320
sys_trace_k_heap_sys_k_aligned_alloc_exit(struct k_heap * h,size_t align,size_t size,void * ret)1321 void sys_trace_k_heap_sys_k_aligned_alloc_exit(struct k_heap *h, size_t align, size_t size, void *ret) {
1322 (void)xTraceEventCreate2(ret != NULL ? PSF_EVENT_KHEAP_SYS_ALIGNED_ALLOC_SUCCESS : PSF_EVENT_KHEAP_SYS_ALIGNED_ALLOC_FAILURE, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)ret);
1323 }
1324
sys_trace_k_heap_sys_k_malloc_enter(struct k_heap * h,size_t size)1325 void sys_trace_k_heap_sys_k_malloc_enter(struct k_heap *h, size_t size) {
1326 (void)xTraceEventCreate2(PSF_EVENT_KHEAP_SYS_MALLOC_BLOCKING, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)size);
1327 }
1328
sys_trace_k_heap_sys_k_malloc_exit(struct k_heap * h,size_t size,void * ret)1329 void sys_trace_k_heap_sys_k_malloc_exit(struct k_heap *h, size_t size, void *ret) {
1330 (void)xTraceEventCreate2(ret != NULL ? PSF_EVENT_KHEAP_SYS_MALLOC_SUCCESS : PSF_EVENT_KHEAP_SYS_MALLOC_FAILURE, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)ret);
1331 }
1332
sys_trace_k_heap_sys_k_free_enter(struct k_heap * h)1333 void sys_trace_k_heap_sys_k_free_enter(struct k_heap *h) {
1334 (void)xTraceEventCreate1(PSF_EVENT_KHEAP_SYS_FREE_ENTER, (TraceUnsignedBaseType_t)h);
1335 }
1336
sys_trace_k_heap_sys_k_free_exit(struct k_heap * h)1337 void sys_trace_k_heap_sys_k_free_exit(struct k_heap *h) {
1338 (void)xTraceEventCreate1(PSF_EVENT_KHEAP_SYS_FREE_EXIT, (TraceUnsignedBaseType_t)h);
1339 }
1340
sys_trace_k_heap_sys_k_enter(struct k_heap * h,size_t nmemb,size_t size)1341 void sys_trace_k_heap_sys_k_enter(struct k_heap *h, size_t nmemb, size_t size) {
1342
1343 }
1344
sys_trace_k_heap_sys_k_exit(struct k_heap * h,size_t nmemb,size_t size,void * ret)1345 void sys_trace_k_heap_sys_k_exit(struct k_heap *h, size_t nmemb, size_t size, void *ret) {
1346
1347 }
1348
sys_trace_k_heap_sys_k_calloc_enter(struct k_heap * h,size_t nmemb,size_t size)1349 void sys_trace_k_heap_sys_k_calloc_enter(struct k_heap *h, size_t nmemb, size_t size) {
1350 (void)xTraceEventCreate3(PSF_EVENT_KHEAP_SYS_CALLOC_BLOCKING, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)nmemb, (TraceUnsignedBaseType_t)size);
1351 }
1352
sys_trace_k_heap_sys_k_calloc_exit(struct k_heap * h,size_t nmemb,size_t size,void * ret)1353 void sys_trace_k_heap_sys_k_calloc_exit(struct k_heap *h, size_t nmemb, size_t size, void *ret) {
1354 (void)xTraceEventCreate2(ret != NULL ? PSF_EVENT_KHEAP_SYS_CALLOC_SUCCESS : PSF_EVENT_KHEAP_SYS_CALLOC_FAILURE, (TraceUnsignedBaseType_t)h, (TraceUnsignedBaseType_t)ret);
1355 }
1356
1357
1358 /* Memory slab trace function definitions */
sys_trace_k_mem_slab_init(struct k_mem_slab * slab,void * buffer,size_t block_size,uint32_t num_blocks,int ret)1359 void sys_trace_k_mem_slab_init(struct k_mem_slab *slab, void *buffer, size_t block_size, uint32_t num_blocks, int ret) {
1360 (void)xTraceEventCreate5(
1361 ret == 0 ? PSF_EVENT_MEMORY_SLAB_INIT_SUCCESS : PSF_EVENT_MEMORY_SLAB_INIT_FAILURE,
1362 (TraceUnsignedBaseType_t)slab,
1363 (TraceUnsignedBaseType_t)slab->buffer,
1364 (TraceUnsignedBaseType_t)slab->info.block_size,
1365 (TraceUnsignedBaseType_t)slab->info.num_blocks,
1366 (TraceUnsignedBaseType_t)ret
1367 );
1368 }
1369
sys_trace_k_mem_slab_alloc_enter(struct k_mem_slab * slab,void ** mem,k_timeout_t timeout)1370 void sys_trace_k_mem_slab_alloc_enter(struct k_mem_slab *slab, void **mem, k_timeout_t timeout) {
1371
1372 }
1373
sys_trace_k_mem_slab_alloc_blocking(struct k_mem_slab * slab,void ** mem,k_timeout_t timeout)1374 void sys_trace_k_mem_slab_alloc_blocking(struct k_mem_slab *slab, void **mem, k_timeout_t timeout) {
1375 (void)xTraceEventCreate3(PSF_EVENT_MEMORY_SLAB_ALLOC_BLOCKING, (TraceUnsignedBaseType_t)slab, (TraceUnsignedBaseType_t)mem, (TraceUnsignedBaseType_t)timeout.ticks);
1376 }
1377
sys_trace_k_mem_slab_alloc_exit(struct k_mem_slab * slab,void ** mem,k_timeout_t timeout,int ret)1378 void sys_trace_k_mem_slab_alloc_exit(struct k_mem_slab *slab, void **mem, k_timeout_t timeout, int ret) {
1379 if (ret == 0) {
1380 (void)xTraceEventCreate5(
1381 PSF_EVENT_MEMORY_SLAB_ALLOC_SUCCESS,
1382 (TraceUnsignedBaseType_t)slab,
1383 (TraceUnsignedBaseType_t)mem,
1384 (TraceUnsignedBaseType_t)timeout.ticks,
1385 (TraceUnsignedBaseType_t)ret,
1386 (TraceUnsignedBaseType_t)slab->info.num_blocks
1387 );
1388 } else if (ret == -ENOMEM || ret == -EAGAIN) {
1389 (void)xTraceEventCreate4(
1390 PSF_EVENT_MEMORY_SLAB_ALLOC_TIMEOUT,
1391 (TraceUnsignedBaseType_t)slab,
1392 (TraceUnsignedBaseType_t)mem,
1393 (TraceUnsignedBaseType_t)timeout.ticks,
1394 (TraceUnsignedBaseType_t)ret
1395 );
1396 } else {
1397 (void)xTraceEventCreate4(
1398 PSF_EVENT_MEMORY_SLAB_ALLOC_ERROR,
1399 (TraceUnsignedBaseType_t)slab,
1400 (TraceUnsignedBaseType_t)mem,
1401 (TraceUnsignedBaseType_t)timeout.ticks,
1402 (TraceUnsignedBaseType_t)ret
1403 );
1404 }
1405 }
1406
sys_trace_k_mem_slab_free_exit(struct k_mem_slab * slab,void ** mem)1407 void sys_trace_k_mem_slab_free_exit(struct k_mem_slab *slab, void **mem) {
1408 (void)xTraceEventCreate3(PSF_EVENT_MEMORY_SLAB_FREE, (TraceUnsignedBaseType_t)slab, (TraceUnsignedBaseType_t)mem, (TraceUnsignedBaseType_t)slab->info.num_blocks);
1409 }
1410
1411
1412 /* Timer trace function definitions */
sys_trace_k_timer_init(struct k_timer * timer,k_timer_expiry_t expiry_fn,k_timer_expiry_t stop_fn)1413 void sys_trace_k_timer_init(struct k_timer *timer, k_timer_expiry_t expiry_fn, k_timer_expiry_t stop_fn) {
1414 (void)xTraceEventCreate3(PSF_EVENT_TIMER_INIT, (TraceUnsignedBaseType_t)timer, (TraceUnsignedBaseType_t)expiry_fn, (TraceUnsignedBaseType_t)stop_fn);
1415 }
1416
sys_trace_k_timer_start(struct k_timer * timer,k_timeout_t duration,k_timeout_t period)1417 void sys_trace_k_timer_start(struct k_timer *timer, k_timeout_t duration, k_timeout_t period) {
1418 (void)xTraceEventCreate3(PSF_EVENT_TIMER_START, (TraceUnsignedBaseType_t)timer, (TraceUnsignedBaseType_t)duration.ticks, (TraceUnsignedBaseType_t)period.ticks);
1419 }
1420
sys_trace_k_timer_stop(struct k_timer * timer)1421 void sys_trace_k_timer_stop(struct k_timer *timer) {
1422 (void)xTraceEventCreate1(PSF_EVENT_TIMER_STOP, (TraceUnsignedBaseType_t)timer);
1423 }
1424
sys_trace_k_timer_status_sync_blocking(struct k_timer * timer)1425 void sys_trace_k_timer_status_sync_blocking(struct k_timer *timer) {
1426 (void)xTraceEventCreate1(PSF_EVENT_TIMER_STATUS_SYNC_AWAIT, (TraceUnsignedBaseType_t)timer);
1427 }
1428
sys_trace_k_timer_status_sync_exit(struct k_timer * timer,uint32_t result)1429 void sys_trace_k_timer_status_sync_exit(struct k_timer *timer, uint32_t result) {
1430 (void)xTraceEventCreate2(PSF_EVENT_TIMER_STATUS_SYNC_EXIT, (TraceUnsignedBaseType_t)timer, (TraceUnsignedBaseType_t)result);
1431 }
1432
1433
1434 /* Syscall trace function definitions */
sys_trace_syscall_enter(uint32_t id,const char * name)1435 void sys_trace_syscall_enter(uint32_t id, const char *name) {
1436 #if (TRC_CFG_USE_SYSCALL_EXTENSION == 1)
1437 if (xTraceIsRecorderEnabled())
1438 xSyscallsExtensionEnter(id);
1439 #else
1440 xTraceEventCreateData1(PSF_EVENT_SYSTEM_SYSCALL_ENTER, (TraceUnsignedBaseType_t)id, (TraceUnsignedBaseType_t*)name, strlen(name) + 1);
1441 #endif
1442 }
1443
sys_trace_syscall_exit(uint32_t id,const char * name)1444 void sys_trace_syscall_exit(uint32_t id, const char *name) {
1445 #if (TRC_CFG_USE_SYSCALL_EXTENSION == 1)
1446 if (xTraceIsRecorderEnabled())
1447 xSyscallsExtensionExit(id);
1448 #else
1449 xTraceEventCreate1(PSF_EVENT_SYSTEM_SYSCALL_EXIT, 0);
1450 #endif
1451 }
1452
1453
1454 /* Legacy trace functions that are pending refactoring/removal by
1455 * the Zephyr team.
1456 */
sys_trace_isr_enter(void)1457 void sys_trace_isr_enter(void) {
1458 }
1459
sys_trace_isr_exit(void)1460 void sys_trace_isr_exit(void) {
1461 }
1462
sys_trace_isr_exit_to_scheduler(void)1463 void sys_trace_isr_exit_to_scheduler(void) {
1464 }
1465
sys_trace_idle(void)1466 void sys_trace_idle(void) {
1467 }
1468
sys_trace_void(unsigned int id)1469 void sys_trace_void(unsigned int id) {
1470 (void)id;
1471 }
1472