1 /*
2  * Copyright (c) 2019 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DISABLE_SYSCALL_TRACING
8 
9 #include <zephyr/kernel.h>
10 #include <tracing_test.h>
11 #include <tracing_test_syscall.h>
12 #include <zephyr/tracing/tracing_format.h>
13 
sys_trace_k_thread_switched_out(void)14 void sys_trace_k_thread_switched_out(void)
15 {
16 	struct k_thread *thread;
17 
18 	thread = k_current_get();
19 	TRACING_STRING("%s: %p\n", __func__, thread);
20 }
21 
sys_trace_k_thread_switched_in(void)22 void sys_trace_k_thread_switched_in(void)
23 {
24 	struct k_thread *thread;
25 
26 	thread = k_current_get();
27 	TRACING_STRING("%s: %p\n", __func__, thread);
28 }
29 
sys_trace_k_thread_priority_set(struct k_thread * thread)30 void sys_trace_k_thread_priority_set(struct k_thread *thread)
31 {
32 	TRACING_STRING("%s: %p\n", __func__, thread);
33 }
34 
sys_trace_k_thread_create(struct k_thread * thread,size_t stack_size,int prio)35 void sys_trace_k_thread_create(struct k_thread *thread, size_t stack_size,
36 			       int prio)
37 {
38 	TRACING_STRING("%s: %p\n", __func__, thread);
39 }
40 
sys_trace_k_thread_start(struct k_thread * thread)41 void sys_trace_k_thread_start(struct k_thread *thread)
42 {
43 	TRACING_STRING("%s: %p\n", __func__, thread);
44 }
45 
sys_trace_k_thread_abort(struct k_thread * thread)46 void sys_trace_k_thread_abort(struct k_thread *thread)
47 {
48 	TRACING_STRING("%s: %p\n", __func__, thread);
49 }
50 
sys_trace_k_thread_suspend(struct k_thread * thread)51 void sys_trace_k_thread_suspend(struct k_thread *thread)
52 {
53 	TRACING_STRING("%s: %p\n", __func__, thread);
54 }
55 
sys_trace_k_thread_resume(struct k_thread * thread)56 void sys_trace_k_thread_resume(struct k_thread *thread)
57 {
58 	TRACING_STRING("%s: %p\n", __func__, thread);
59 }
60 
sys_trace_k_thread_resume_exit(struct k_thread * thread)61 void sys_trace_k_thread_resume_exit(struct k_thread *thread)
62 {
63 	TRACING_STRING("%s: %p\n", __func__, thread);
64 }
65 
sys_trace_k_thread_ready(struct k_thread * thread)66 void sys_trace_k_thread_ready(struct k_thread *thread)
67 {
68 	TRACING_STRING("%s: %p\n", __func__, thread);
69 }
70 
sys_trace_k_thread_sched_ready(struct k_thread * thread)71 void sys_trace_k_thread_sched_ready(struct k_thread *thread)
72 {
73 	TRACING_STRING("%s: %p\n", __func__, thread);
74 }
75 
sys_trace_k_thread_sched_pend(struct k_thread * thread)76 void sys_trace_k_thread_sched_pend(struct k_thread *thread)
77 {
78 	TRACING_STRING("%s: %p\n", __func__, thread);
79 }
80 
sys_trace_k_thread_sched_abort(struct k_thread * thread)81 void sys_trace_k_thread_sched_abort(struct k_thread *thread)
82 {
83 	TRACING_STRING("%s: %p\n", __func__, thread);
84 }
85 
sys_trace_k_thread_sched_resume(struct k_thread * thread)86 void sys_trace_k_thread_sched_resume(struct k_thread *thread)
87 {
88 	TRACING_STRING("%s: %p\n", __func__, thread);
89 }
90 
sys_trace_k_thread_sched_suspend(struct k_thread * thread)91 void sys_trace_k_thread_sched_suspend(struct k_thread *thread)
92 {
93 	TRACING_STRING("%s: %p\n", __func__, thread);
94 }
95 
sys_trace_k_thread_sleep_enter(k_timeout_t timeout)96 void sys_trace_k_thread_sleep_enter(k_timeout_t timeout)
97 {
98 	TRACING_STRING("%s\n", __func__);
99 }
100 
sys_trace_k_thread_sleep_exit(k_timeout_t timeout,int ret)101 void sys_trace_k_thread_sleep_exit(k_timeout_t timeout, int ret)
102 {
103 	TRACING_STRING("%s\n", __func__);
104 }
105 
sys_trace_k_thread_usleep_enter(int32_t us)106 void sys_trace_k_thread_usleep_enter(int32_t us)
107 {
108 	TRACING_STRING("%s\n", __func__);
109 }
110 
sys_trace_k_thread_usleep_exit(int32_t us,int ret)111 void sys_trace_k_thread_usleep_exit(int32_t us, int ret)
112 {
113 	TRACING_STRING("%s\n", __func__);
114 }
115 
sys_trace_k_thread_busy_wait_enter(uint32_t usec_to_wait)116 void sys_trace_k_thread_busy_wait_enter(uint32_t usec_to_wait)
117 {
118 	TRACING_STRING("%s\n", __func__);
119 }
120 
sys_trace_k_thread_busy_wait_exit(uint32_t usec_to_wait)121 void sys_trace_k_thread_busy_wait_exit(uint32_t usec_to_wait)
122 {
123 	TRACING_STRING("%s\n", __func__);
124 }
125 
sys_trace_k_thread_abort_enter(struct k_thread * thread)126 void sys_trace_k_thread_abort_enter(struct k_thread *thread)
127 {
128 	TRACING_STRING("%s: %p\n", __func__, thread);
129 }
130 
sys_trace_k_thread_abort_exit(struct k_thread * thread)131 void sys_trace_k_thread_abort_exit(struct k_thread *thread)
132 {
133 	TRACING_STRING("%s: %p\n", __func__, thread);
134 }
135 
sys_trace_k_thread_yield(void)136 void sys_trace_k_thread_yield(void)
137 {
138 	TRACING_STRING("%s\n", __func__);
139 }
140 
sys_trace_thread_yield(struct k_thread * thread)141 void sys_trace_thread_yield(struct k_thread *thread)
142 {
143 	TRACING_STRING("%s: %p\n", __func__, thread);
144 }
145 
sys_trace_k_thread_wakeup(struct k_thread * thread)146 void sys_trace_k_thread_wakeup(struct k_thread *thread)
147 {
148 	TRACING_STRING("%s: %p\n", __func__, thread);
149 }
150 
sys_trace_k_thread_pend(struct k_thread * thread)151 void sys_trace_k_thread_pend(struct k_thread *thread)
152 {
153 	TRACING_STRING("%s: %p\n", __func__, thread);
154 }
155 
sys_trace_k_thread_info(struct k_thread * thread)156 void sys_trace_k_thread_info(struct k_thread *thread)
157 {
158 	TRACING_STRING("%s: %p\n", __func__, thread);
159 }
160 
sys_trace_k_thread_name_set(struct k_thread * thread,int ret)161 void sys_trace_k_thread_name_set(struct k_thread *thread, int ret)
162 {
163 	TRACING_STRING("%s: %p\n", __func__, thread);
164 }
165 
sys_trace_k_thread_sched_lock(void)166 void sys_trace_k_thread_sched_lock(void)
167 {
168 	TRACING_STRING("%s\n", __func__);
169 }
170 
sys_port_trace_k_thread_sched_unlock(void)171 void sys_port_trace_k_thread_sched_unlock(void)
172 {
173 	TRACING_STRING("%s\n", __func__);
174 }
175 
176 
sys_trace_k_thread_join_blocking(struct k_thread * thread,k_timeout_t timeout)177 void sys_trace_k_thread_join_blocking(struct k_thread *thread, k_timeout_t timeout)
178 {
179 	TRACING_STRING("%s %p, timeout: %u\n", __func__, thread, (uint32_t)timeout.ticks);
180 }
181 
sys_trace_k_thread_join_exit(struct k_thread * thread,k_timeout_t timeout,int ret)182 void sys_trace_k_thread_join_exit(struct k_thread *thread, k_timeout_t timeout, int ret)
183 {
184 	TRACING_STRING("%s %p, timeout: %u\n", __func__, thread, (uint32_t)timeout.ticks);
185 }
186 
sys_trace_isr_enter(void)187 void sys_trace_isr_enter(void)
188 {
189 	TRACING_STRING("%s\n", __func__);
190 }
191 
sys_trace_isr_exit(void)192 void sys_trace_isr_exit(void)
193 {
194 	TRACING_STRING("%s\n", __func__);
195 }
196 
sys_trace_isr_exit_to_scheduler(void)197 void sys_trace_isr_exit_to_scheduler(void)
198 {
199 	TRACING_STRING("%s\n", __func__);
200 }
201 
sys_trace_idle(void)202 void sys_trace_idle(void)
203 {
204 	TRACING_STRING("%s\n", __func__);
205 }
206 
sys_trace_k_condvar_broadcast_enter(struct k_condvar * condvar)207 void sys_trace_k_condvar_broadcast_enter(struct k_condvar *condvar)
208 {
209 	TRACING_STRING("%s: %p\n", __func__, condvar);
210 }
211 
sys_trace_k_condvar_broadcast_exit(struct k_condvar * condvar,int ret)212 void sys_trace_k_condvar_broadcast_exit(struct k_condvar *condvar, int ret)
213 {
214 	TRACING_STRING("%s: %p\n", __func__, condvar);
215 }
216 
sys_trace_k_condvar_init(struct k_condvar * condvar,int ret)217 void sys_trace_k_condvar_init(struct k_condvar *condvar, int ret)
218 {
219 	TRACING_STRING("%s: %p\n", __func__, condvar);
220 }
221 
sys_trace_k_condvar_signal_enter(struct k_condvar * condvar)222 void sys_trace_k_condvar_signal_enter(struct k_condvar *condvar)
223 {
224 	TRACING_STRING("%s: %p\n", __func__, condvar);
225 }
226 
sys_trace_k_condvar_signal_blocking(struct k_condvar * condvar)227 void sys_trace_k_condvar_signal_blocking(struct k_condvar *condvar)
228 {
229 	TRACING_STRING("%s: %p\n", __func__, condvar);
230 }
231 
sys_trace_k_condvar_signal_exit(struct k_condvar * condvar,int ret)232 void sys_trace_k_condvar_signal_exit(struct k_condvar *condvar, int ret)
233 {
234 	TRACING_STRING("%s: %p\n", __func__, condvar);
235 }
236 
sys_trace_k_condvar_wait_enter(struct k_condvar * condvar,struct k_mutex * mutex,k_timeout_t timeout)237 void sys_trace_k_condvar_wait_enter(struct k_condvar *condvar, struct k_mutex *mutex,
238 				    k_timeout_t timeout)
239 {
240 	TRACING_STRING("%s: %p\n", __func__, condvar);
241 }
242 
sys_trace_k_condvar_wait_exit(struct k_condvar * condvar,struct k_mutex * mutex,k_timeout_t timeout,int ret)243 void sys_trace_k_condvar_wait_exit(struct k_condvar *condvar, struct k_mutex *mutex,
244 				   k_timeout_t timeout, int ret)
245 {
246 	TRACING_STRING("%s: %p\n", __func__, condvar);
247 }
248 
249 
sys_trace_k_sem_init(struct k_sem * sem,int ret)250 void sys_trace_k_sem_init(struct k_sem *sem, int ret)
251 {
252 	TRACING_STRING("%s: %p\n", __func__, sem);
253 }
sys_trace_k_sem_give_enter(struct k_sem * sem)254 void sys_trace_k_sem_give_enter(struct k_sem *sem)
255 {
256 	TRACING_STRING("%s: %p\n", __func__, sem);
257 }
258 
sys_trace_k_sem_take_enter(struct k_sem * sem,k_timeout_t timeout)259 void sys_trace_k_sem_take_enter(struct k_sem *sem, k_timeout_t timeout)
260 {
261 	TRACING_STRING("%s: %p, timeout: %u\n", __func__, sem, (uint32_t)timeout.ticks);
262 }
263 
sys_trace_k_sem_take_exit(struct k_sem * sem,k_timeout_t timeout,int ret)264 void sys_trace_k_sem_take_exit(struct k_sem *sem, k_timeout_t timeout, int ret)
265 {
266 	TRACING_STRING("%s: %p, timeout: %u\n", __func__, sem, (uint32_t)timeout.ticks);
267 }
268 
sys_trace_k_sem_take_blocking(struct k_sem * sem,k_timeout_t timeout)269 void sys_trace_k_sem_take_blocking(struct k_sem *sem, k_timeout_t timeout)
270 {
271 	TRACING_STRING("%s: %p, timeout: %u\n", __func__, sem, (uint32_t)timeout.ticks);
272 }
273 
sys_trace_k_sem_reset(struct k_sem * sem)274 void sys_trace_k_sem_reset(struct k_sem *sem)
275 {
276 	TRACING_STRING("%s: %p\n", __func__, sem);
277 }
278 
sys_trace_k_mutex_init(struct k_mutex * mutex,int ret)279 void sys_trace_k_mutex_init(struct k_mutex *mutex, int ret)
280 {
281 	TRACING_STRING("%s: %p, returns %d\n", __func__, mutex, ret);
282 }
283 
sys_trace_k_mutex_lock_enter(struct k_mutex * mutex,k_timeout_t timeout)284 void sys_trace_k_mutex_lock_enter(struct k_mutex *mutex, k_timeout_t timeout)
285 {
286 	TRACING_STRING("%s: %p, timeout: %u\n", __func__, mutex, (uint32_t)timeout.ticks);
287 }
288 
sys_trace_k_mutex_lock_exit(struct k_mutex * mutex,k_timeout_t timeout,int ret)289 void sys_trace_k_mutex_lock_exit(struct k_mutex *mutex, k_timeout_t timeout, int ret)
290 {
291 	TRACING_STRING("%s: %p, timeout: %u, returns: %d\n", __func__, mutex,
292 		       (uint32_t)timeout.ticks, ret);
293 }
294 
sys_trace_k_mutex_lock_blocking(struct k_mutex * mutex,k_timeout_t timeout)295 void sys_trace_k_mutex_lock_blocking(struct k_mutex *mutex, k_timeout_t timeout)
296 {
297 	TRACING_STRING("%s: %p, timeout: %u\n", __func__, mutex, (uint32_t)timeout.ticks);
298 }
299 
sys_trace_k_mutex_unlock_enter(struct k_mutex * mutex)300 void sys_trace_k_mutex_unlock_enter(struct k_mutex *mutex)
301 {
302 	TRACING_STRING("%s: %p\n", __func__, mutex);
303 }
304 
305 
sys_trace_k_mutex_unlock_exit(struct k_mutex * mutex,int ret)306 void sys_trace_k_mutex_unlock_exit(struct k_mutex *mutex, int ret)
307 {
308 	TRACING_STRING("%s: %p, return: %d\n", __func__, mutex, ret);
309 }
310 
sys_trace_k_thread_sched_set_priority(struct k_thread * thread,int prio)311 void sys_trace_k_thread_sched_set_priority(struct k_thread *thread, int prio)
312 {
313 	TRACING_STRING("%s: %p, priority: %d\n", __func__, thread, prio);
314 }
315 
sys_trace_k_timer_start(struct k_timer * timer,k_timeout_t duration,k_timeout_t period)316 void sys_trace_k_timer_start(struct k_timer *timer, k_timeout_t duration,
317 			     k_timeout_t period)
318 {
319 	TRACING_STRING("%s: %p, duration: %d, period: %d\n", __func__, timer,
320 		(uint32_t)duration.ticks, (uint32_t)period.ticks);
321 }
322 
sys_trace_k_timer_init(struct k_timer * timer,k_timer_expiry_t expiry_fn,k_timer_expiry_t stop_fn)323 void sys_trace_k_timer_init(struct k_timer *timer, k_timer_expiry_t expiry_fn,
324 			    k_timer_expiry_t stop_fn)
325 {
326 	TRACING_STRING("%s: %p\n", __func__, timer);
327 }
328 
sys_trace_k_timer_stop(struct k_timer * timer)329 void sys_trace_k_timer_stop(struct k_timer *timer)
330 {
331 	TRACING_STRING("%s: %p\n", __func__, timer);
332 }
sys_trace_k_timer_status_sync_blocking(struct k_timer * timer)333 void sys_trace_k_timer_status_sync_blocking(struct k_timer *timer)
334 {
335 	TRACING_STRING("%s: %p\n", __func__, timer);
336 }
337 
sys_trace_k_timer_status_sync_exit(struct k_timer * timer,uint32_t result)338 void sys_trace_k_timer_status_sync_exit(struct k_timer *timer, uint32_t result)
339 {
340 	TRACING_STRING("%s: %p\n", __func__, timer);
341 }
342 
343 
sys_trace_k_heap_init(struct k_heap * h,void * mem,size_t bytes)344 void sys_trace_k_heap_init(struct k_heap *h, void *mem, size_t bytes)
345 {
346 	TRACING_STRING("%s: %p\n", __func__, h);
347 }
348 
sys_trace_k_heap_aligned_alloc_enter(struct k_heap * h,size_t bytes,k_timeout_t timeout)349 void sys_trace_k_heap_aligned_alloc_enter(struct k_heap *h, size_t bytes, k_timeout_t timeout)
350 {
351 	TRACING_STRING("%s: %p\n", __func__, h);
352 }
353 
sys_trace_k_heap_alloc_enter(struct k_heap * h,size_t bytes,k_timeout_t timeout)354 void sys_trace_k_heap_alloc_enter(struct k_heap *h, size_t bytes, k_timeout_t timeout)
355 {
356 	TRACING_STRING("%s: %p\n", __func__, h);
357 }
358 
sys_trace_k_heap_free(struct k_heap * h,void * mem)359 void sys_trace_k_heap_free(struct k_heap *h, void *mem)
360 {
361 	TRACING_STRING("%s: %p\n", __func__, h);
362 }
363 
sys_trace_k_heap_aligned_alloc_blocking(struct k_heap * h,size_t bytes,k_timeout_t timeout)364 void sys_trace_k_heap_aligned_alloc_blocking(struct k_heap *h, size_t bytes, k_timeout_t timeout)
365 {
366 	TRACING_STRING("%s: %p\n", __func__, h);
367 }
368 
sys_trace_k_heap_alloc_exit(struct k_heap * h,size_t bytes,k_timeout_t timeout,void * ret)369 void sys_trace_k_heap_alloc_exit(struct k_heap *h, size_t bytes, k_timeout_t timeout, void *ret)
370 {
371 	TRACING_STRING("%s: %p\n", __func__, h);
372 }
373 
sys_trace_k_heap_aligned_alloc_exit(struct k_heap * h,size_t bytes,k_timeout_t timeout,void * ret)374 void sys_trace_k_heap_aligned_alloc_exit(struct k_heap *h, size_t bytes,
375 					 k_timeout_t timeout, void *ret)
376 {
377 	TRACING_STRING("%s: %p\n", __func__, h);
378 }
379 
sys_trace_k_heap_sys_k_free_enter(struct k_heap * h,struct k_heap ** hr)380 void sys_trace_k_heap_sys_k_free_enter(struct k_heap *h, struct k_heap **hr)
381 {
382 	TRACING_STRING("%s: %p\n", __func__, h);
383 }
384 
sys_trace_k_heap_sys_k_free_exit(struct k_heap * h,struct k_heap ** hr)385 void sys_trace_k_heap_sys_k_free_exit(struct k_heap *h, struct k_heap **hr)
386 {
387 	TRACING_STRING("%s: %p\n", __func__, h);
388 }
389 
sys_trace_k_queue_init(struct k_queue * queue)390 void sys_trace_k_queue_init(struct k_queue *queue)
391 {
392 	TRACING_STRING("%s: %p\n", __func__, queue);
393 }
394 
sys_trace_k_queue_cancel_wait(struct k_queue * queue)395 void sys_trace_k_queue_cancel_wait(struct k_queue *queue)
396 {
397 	TRACING_STRING("%s: %p\n", __func__, queue);
398 }
399 
sys_trace_k_queue_append_enter(struct k_queue * queue,void * data)400 void sys_trace_k_queue_append_enter(struct k_queue *queue, void *data)
401 {
402 	TRACING_STRING("%s: %p\n", __func__, queue);
403 }
404 
sys_trace_k_queue_append_exit(struct k_queue * queue,void * data)405 void sys_trace_k_queue_append_exit(struct k_queue *queue, void *data)
406 {
407 	TRACING_STRING("%s: %p\n", __func__, queue);
408 }
409 
sys_trace_k_queue_queue_insert_enter(struct k_queue * queue,bool alloc,void * data)410 void sys_trace_k_queue_queue_insert_enter(struct k_queue *queue, bool alloc, void *data)
411 {
412 	TRACING_STRING("%s: %p\n", __func__, queue);
413 }
414 
sys_trace_k_queue_queue_insert_exit(struct k_queue * queue,bool alloc,void * data,int ret)415 void sys_trace_k_queue_queue_insert_exit(struct k_queue *queue, bool alloc, void *data, int ret)
416 {
417 	TRACING_STRING("%s: %p\n", __func__, queue);
418 }
419 
sys_trace_k_queue_get_blocking(struct k_queue * queue,k_timeout_t timeout)420 void sys_trace_k_queue_get_blocking(struct k_queue *queue, k_timeout_t timeout)
421 {
422 	TRACING_STRING("%s: %p\n", __func__, queue);
423 }
424 
sys_trace_k_queue_get_exit(struct k_queue * queue,k_timeout_t timeout,void * ret)425 void sys_trace_k_queue_get_exit(struct k_queue *queue, k_timeout_t timeout, void *ret)
426 {
427 	TRACING_STRING("%s: %p\n", __func__, queue);
428 }
429 
sys_trace_k_queue_peek_head(struct k_queue * queue,void * ret)430 void sys_trace_k_queue_peek_head(struct k_queue *queue, void *ret)
431 {
432 	TRACING_STRING("%s: %p\n", __func__, queue);
433 }
434 
sys_trace_k_queue_peek_tail(struct k_queue * queue,void * ret)435 void sys_trace_k_queue_peek_tail(struct k_queue *queue, void *ret)
436 {
437 	TRACING_STRING("%s: %p\n", __func__, queue);
438 }
439 
sys_trace_k_queue_alloc_append_enter(struct k_queue * queue,void * data)440 void sys_trace_k_queue_alloc_append_enter(struct k_queue *queue, void *data)
441 {
442 	TRACING_STRING("%s: %p\n", __func__, queue);
443 }
444 
sys_trace_k_queue_alloc_append_exit(struct k_queue * queue,void * data,int ret)445 void sys_trace_k_queue_alloc_append_exit(struct k_queue *queue, void *data, int ret)
446 {
447 	TRACING_STRING("%s: %p\n", __func__, queue);
448 }
449 
sys_trace_k_queue_alloc_prepend_enter(struct k_queue * queue,void * data)450 void sys_trace_k_queue_alloc_prepend_enter(struct k_queue *queue, void *data)
451 {
452 	TRACING_STRING("%s: %p\n", __func__, queue);
453 }
454 
sys_trace_k_queue_alloc_prepend_exit(struct k_queue * queue,void * data,int ret)455 void sys_trace_k_queue_alloc_prepend_exit(struct k_queue *queue, void *data, int ret)
456 {
457 	TRACING_STRING("%s: %p\n", __func__, queue);
458 }
459 
460 
sys_trace_k_mem_slab_alloc_enter(struct k_mem_slab * slab,void ** mem,k_timeout_t timeout)461 void sys_trace_k_mem_slab_alloc_enter(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
462 {
463 	TRACING_STRING("%s: %p\n", __func__, slab);
464 }
465 
sys_trace_k_mem_slab_alloc_blocking(struct k_mem_slab * slab,void ** mem,k_timeout_t timeout)466 void sys_trace_k_mem_slab_alloc_blocking(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
467 {
468 	TRACING_STRING("%s: %p\n", __func__, slab);
469 }
470 
sys_trace_k_mem_slab_alloc_exit(struct k_mem_slab * slab,void ** mem,k_timeout_t timeout,int ret)471 void sys_trace_k_mem_slab_alloc_exit(struct k_mem_slab *slab, void **mem, k_timeout_t timeout,
472 				     int ret)
473 {
474 	TRACING_STRING("%s: %p\n", __func__, slab);
475 }
476 
sys_trace_k_mem_slab_free_enter(struct k_mem_slab * slab,void * mem)477 void sys_trace_k_mem_slab_free_enter(struct k_mem_slab *slab, void *mem)
478 {
479 	TRACING_STRING("%s: %p\n", __func__, slab);
480 }
481 
sys_trace_k_mem_slab_free_exit(struct k_mem_slab * slab,void * mem)482 void sys_trace_k_mem_slab_free_exit(struct k_mem_slab *slab, void *mem)
483 {
484 	TRACING_STRING("%s: %p\n", __func__, slab);
485 }
486 
sys_trace_k_fifo_put_enter(struct k_fifo * fifo,void * data)487 void sys_trace_k_fifo_put_enter(struct k_fifo *fifo, void *data)
488 {
489 	TRACING_STRING("%s: %p\n", __func__, fifo);
490 }
491 
sys_trace_k_fifo_put_exit(struct k_fifo * fifo,void * data)492 void sys_trace_k_fifo_put_exit(struct k_fifo *fifo, void *data)
493 {
494 	TRACING_STRING("%s: %p\n", __func__, fifo);
495 }
496 
sys_trace_k_fifo_get_enter(struct k_fifo * fifo,k_timeout_t timeout)497 void sys_trace_k_fifo_get_enter(struct k_fifo *fifo, k_timeout_t timeout)
498 {
499 	TRACING_STRING("%s: %p\n", __func__, fifo);
500 }
501 
sys_trace_k_fifo_get_exit(struct k_fifo * fifo,k_timeout_t timeout,void * ret)502 void sys_trace_k_fifo_get_exit(struct k_fifo *fifo, k_timeout_t timeout, void *ret)
503 {
504 	TRACING_STRING("%s: %p\n", __func__, fifo);
505 }
506 
sys_trace_syscall_enter(uint32_t syscall_id,const char * syscall_name)507 void sys_trace_syscall_enter(uint32_t syscall_id, const char *syscall_name)
508 {
509 	TRACING_STRING("%s: %s (%u) enter\n", __func__, syscall_name, syscall_id);
510 }
511 
sys_trace_syscall_exit(uint32_t syscall_id,const char * syscall_name)512 void sys_trace_syscall_exit(uint32_t syscall_id, const char *syscall_name)
513 {
514 	TRACING_STRING("%s: %s (%u) exit\n", __func__, syscall_name, syscall_id);
515 }
516 
sys_trace_k_thread_foreach_unlocked_enter(k_thread_user_cb_t user_cb,void * data)517 void sys_trace_k_thread_foreach_unlocked_enter(k_thread_user_cb_t user_cb, void *data)
518 {
519 	TRACING_STRING("%s: %p (%p) enter\n", __func__, user_cb, data);
520 }
521 
sys_trace_k_thread_foreach_unlocked_exit(k_thread_user_cb_t user_cb,void * data)522 void sys_trace_k_thread_foreach_unlocked_exit(k_thread_user_cb_t user_cb, void *data)
523 {
524 	TRACING_STRING("%s: %p (%p) exit\n", __func__, user_cb, data);
525 }
526