1 /*
2  * Copyright (c) 2019 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DISABLE_SYSCALL_TRACING
8 
9 #include <zephyr/kernel.h>
10 #include <tracing_test.h>
11 #include <tracing_test_syscall.h>
12 #include <zephyr/tracing/tracing_format.h>
13 
sys_trace_k_thread_switched_out(void)14 void sys_trace_k_thread_switched_out(void)
15 {
16 	struct k_thread *thread;
17 
18 	thread = k_sched_current_thread_query();
19 	TRACING_STRING("%s: %p\n", __func__, thread);
20 }
21 
sys_trace_k_thread_switched_in(void)22 void sys_trace_k_thread_switched_in(void)
23 {
24 	struct k_thread *thread;
25 
26 	thread = k_sched_current_thread_query();
27 	TRACING_STRING("%s: %p\n", __func__, thread);
28 }
29 
sys_trace_k_thread_priority_set(struct k_thread * thread)30 void sys_trace_k_thread_priority_set(struct k_thread *thread)
31 {
32 	TRACING_STRING("%s: %p\n", __func__, thread);
33 }
34 
sys_trace_k_thread_create(struct k_thread * thread,size_t stack_size,int prio)35 void sys_trace_k_thread_create(struct k_thread *thread, size_t stack_size,
36 			       int prio)
37 {
38 	TRACING_STRING("%s: %p\n", __func__, thread);
39 }
40 
sys_trace_k_thread_start(struct k_thread * thread)41 void sys_trace_k_thread_start(struct k_thread *thread)
42 {
43 	TRACING_STRING("%s: %p\n", __func__, thread);
44 }
45 
sys_trace_k_thread_abort(struct k_thread * thread)46 void sys_trace_k_thread_abort(struct k_thread *thread)
47 {
48 	TRACING_STRING("%s: %p\n", __func__, thread);
49 }
50 
sys_trace_k_thread_suspend(struct k_thread * thread)51 void sys_trace_k_thread_suspend(struct k_thread *thread)
52 {
53 	TRACING_STRING("%s: %p\n", __func__, thread);
54 }
55 
sys_trace_k_thread_resume(struct k_thread * thread)56 void sys_trace_k_thread_resume(struct k_thread *thread)
57 {
58 	TRACING_STRING("%s: %p\n", __func__, thread);
59 }
60 
sys_trace_k_thread_resume_exit(struct k_thread * thread)61 void sys_trace_k_thread_resume_exit(struct k_thread *thread)
62 {
63 	TRACING_STRING("%s: %p\n", __func__, thread);
64 }
65 
sys_trace_k_thread_ready(struct k_thread * thread)66 void sys_trace_k_thread_ready(struct k_thread *thread)
67 {
68 	TRACING_STRING("%s: %p\n", __func__, thread);
69 }
70 
sys_trace_k_thread_sched_ready(struct k_thread * thread)71 void sys_trace_k_thread_sched_ready(struct k_thread *thread)
72 {
73 	TRACING_STRING("%s: %p\n", __func__, thread);
74 }
75 
sys_trace_k_thread_sched_pend(struct k_thread * thread)76 void sys_trace_k_thread_sched_pend(struct k_thread *thread)
77 {
78 	TRACING_STRING("%s: %p\n", __func__, thread);
79 }
80 
sys_trace_k_thread_sched_abort(struct k_thread * thread)81 void sys_trace_k_thread_sched_abort(struct k_thread *thread)
82 {
83 	TRACING_STRING("%s: %p\n", __func__, thread);
84 }
85 
sys_trace_k_thread_sched_resume(struct k_thread * thread)86 void sys_trace_k_thread_sched_resume(struct k_thread *thread)
87 {
88 	TRACING_STRING("%s: %p\n", __func__, thread);
89 }
90 
sys_trace_k_thread_sched_suspend(struct k_thread * thread)91 void sys_trace_k_thread_sched_suspend(struct k_thread *thread)
92 {
93 	TRACING_STRING("%s: %p\n", __func__, thread);
94 }
95 
sys_trace_k_thread_sleep_enter(k_timeout_t timeout)96 void sys_trace_k_thread_sleep_enter(k_timeout_t timeout)
97 {
98 	TRACING_STRING("%s\n", __func__);
99 }
100 
sys_trace_k_thread_sleep_exit(k_timeout_t timeout,int ret)101 void sys_trace_k_thread_sleep_exit(k_timeout_t timeout, int ret)
102 {
103 	TRACING_STRING("%s\n", __func__);
104 }
105 
sys_trace_k_thread_usleep_enter(int32_t us)106 void sys_trace_k_thread_usleep_enter(int32_t us)
107 {
108 	TRACING_STRING("%s\n", __func__);
109 }
110 
sys_trace_k_thread_usleep_exit(int32_t us,int ret)111 void sys_trace_k_thread_usleep_exit(int32_t us, int ret)
112 {
113 	TRACING_STRING("%s\n", __func__);
114 }
115 
sys_trace_k_thread_busy_wait_enter(uint32_t usec_to_wait)116 void sys_trace_k_thread_busy_wait_enter(uint32_t usec_to_wait)
117 {
118 	TRACING_STRING("%s\n", __func__);
119 }
120 
sys_trace_k_thread_busy_wait_exit(uint32_t usec_to_wait)121 void sys_trace_k_thread_busy_wait_exit(uint32_t usec_to_wait)
122 {
123 	TRACING_STRING("%s\n", __func__);
124 }
125 
sys_trace_k_thread_abort_enter(struct k_thread * thread)126 void sys_trace_k_thread_abort_enter(struct k_thread *thread)
127 {
128 	TRACING_STRING("%s: %p\n", __func__, thread);
129 }
130 
sys_trace_k_thread_abort_exit(struct k_thread * thread)131 void sys_trace_k_thread_abort_exit(struct k_thread *thread)
132 {
133 	TRACING_STRING("%s: %p\n", __func__, thread);
134 }
135 
sys_trace_k_thread_yield(void)136 void sys_trace_k_thread_yield(void)
137 {
138 	TRACING_STRING("%s\n", __func__);
139 }
140 
sys_trace_thread_yield(struct k_thread * thread)141 void sys_trace_thread_yield(struct k_thread *thread)
142 {
143 	TRACING_STRING("%s: %p\n", __func__, thread);
144 }
145 
sys_trace_k_thread_wakeup(struct k_thread * thread)146 void sys_trace_k_thread_wakeup(struct k_thread *thread)
147 {
148 	TRACING_STRING("%s: %p\n", __func__, thread);
149 }
150 
sys_trace_k_thread_pend(struct k_thread * thread)151 void sys_trace_k_thread_pend(struct k_thread *thread)
152 {
153 	TRACING_STRING("%s: %p\n", __func__, thread);
154 }
155 
sys_trace_k_thread_info(struct k_thread * thread)156 void sys_trace_k_thread_info(struct k_thread *thread)
157 {
158 	TRACING_STRING("%s: %p\n", __func__, thread);
159 }
160 
sys_trace_k_thread_name_set(struct k_thread * thread,int ret)161 void sys_trace_k_thread_name_set(struct k_thread *thread, int ret)
162 {
163 	TRACING_STRING("%s: %p\n", __func__, thread);
164 }
165 
sys_trace_k_thread_sched_lock(void)166 void sys_trace_k_thread_sched_lock(void)
167 {
168 	TRACING_STRING("%s\n", __func__);
169 }
170 
sys_port_trace_k_thread_sched_unlock(void)171 void sys_port_trace_k_thread_sched_unlock(void)
172 {
173 	TRACING_STRING("%s\n", __func__);
174 }
175 
176 
sys_trace_k_thread_join_blocking(struct k_thread * thread,k_timeout_t timeout)177 void sys_trace_k_thread_join_blocking(struct k_thread *thread, k_timeout_t timeout)
178 {
179 	TRACING_STRING("%s %p, timeout: %u\n", __func__, thread, (uint32_t)timeout.ticks);
180 }
181 
sys_trace_k_thread_join_exit(struct k_thread * thread,k_timeout_t timeout,int ret)182 void sys_trace_k_thread_join_exit(struct k_thread *thread, k_timeout_t timeout, int ret)
183 {
184 	TRACING_STRING("%s %p, timeout: %u\n", __func__, thread, (uint32_t)timeout.ticks);
185 }
186 
sys_trace_isr_enter(void)187 void sys_trace_isr_enter(void)
188 {
189 	TRACING_STRING("%s\n", __func__);
190 }
191 
sys_trace_isr_exit(void)192 void sys_trace_isr_exit(void)
193 {
194 	TRACING_STRING("%s\n", __func__);
195 }
196 
sys_trace_isr_exit_to_scheduler(void)197 void sys_trace_isr_exit_to_scheduler(void)
198 {
199 	TRACING_STRING("%s\n", __func__);
200 }
201 
sys_trace_idle(void)202 void sys_trace_idle(void)
203 {
204 #ifdef CONFIG_TRACING_IDLE
205 	TRACING_STRING("%s\n", __func__);
206 #endif
207 }
208 
sys_trace_idle_exit(void)209 void sys_trace_idle_exit(void)
210 {
211 #ifdef CONFIG_TRACING_IDLE
212 	TRACING_STRING("%s\n", __func__);
213 #endif
214 }
215 
sys_trace_k_condvar_broadcast_enter(struct k_condvar * condvar)216 void sys_trace_k_condvar_broadcast_enter(struct k_condvar *condvar)
217 {
218 	TRACING_STRING("%s: %p\n", __func__, condvar);
219 }
220 
sys_trace_k_condvar_broadcast_exit(struct k_condvar * condvar,int ret)221 void sys_trace_k_condvar_broadcast_exit(struct k_condvar *condvar, int ret)
222 {
223 	TRACING_STRING("%s: %p\n", __func__, condvar);
224 }
225 
sys_trace_k_condvar_init(struct k_condvar * condvar,int ret)226 void sys_trace_k_condvar_init(struct k_condvar *condvar, int ret)
227 {
228 	TRACING_STRING("%s: %p\n", __func__, condvar);
229 }
230 
sys_trace_k_condvar_signal_enter(struct k_condvar * condvar)231 void sys_trace_k_condvar_signal_enter(struct k_condvar *condvar)
232 {
233 	TRACING_STRING("%s: %p\n", __func__, condvar);
234 }
235 
sys_trace_k_condvar_signal_blocking(struct k_condvar * condvar)236 void sys_trace_k_condvar_signal_blocking(struct k_condvar *condvar)
237 {
238 	TRACING_STRING("%s: %p\n", __func__, condvar);
239 }
240 
sys_trace_k_condvar_signal_exit(struct k_condvar * condvar,int ret)241 void sys_trace_k_condvar_signal_exit(struct k_condvar *condvar, int ret)
242 {
243 	TRACING_STRING("%s: %p\n", __func__, condvar);
244 }
245 
sys_trace_k_condvar_wait_enter(struct k_condvar * condvar,k_timeout_t timeout)246 void sys_trace_k_condvar_wait_enter(struct k_condvar *condvar, k_timeout_t timeout)
247 {
248 	TRACING_STRING("%s: %p\n", __func__, condvar);
249 }
250 
sys_trace_k_condvar_wait_exit(struct k_condvar * condvar,k_timeout_t timeout,int ret)251 void sys_trace_k_condvar_wait_exit(struct k_condvar *condvar, k_timeout_t timeout,
252 				   int ret)
253 {
254 	TRACING_STRING("%s: %p\n", __func__, condvar);
255 }
256 
257 
sys_trace_k_sem_init(struct k_sem * sem,int ret)258 void sys_trace_k_sem_init(struct k_sem *sem, int ret)
259 {
260 	TRACING_STRING("%s: %p\n", __func__, sem);
261 }
sys_trace_k_sem_give_enter(struct k_sem * sem)262 void sys_trace_k_sem_give_enter(struct k_sem *sem)
263 {
264 	TRACING_STRING("%s: %p\n", __func__, sem);
265 }
266 
sys_trace_k_sem_take_enter(struct k_sem * sem,k_timeout_t timeout)267 void sys_trace_k_sem_take_enter(struct k_sem *sem, k_timeout_t timeout)
268 {
269 	TRACING_STRING("%s: %p, timeout: %u\n", __func__, sem, (uint32_t)timeout.ticks);
270 }
271 
sys_trace_k_sem_take_exit(struct k_sem * sem,k_timeout_t timeout,int ret)272 void sys_trace_k_sem_take_exit(struct k_sem *sem, k_timeout_t timeout, int ret)
273 {
274 	TRACING_STRING("%s: %p, timeout: %u\n", __func__, sem, (uint32_t)timeout.ticks);
275 }
276 
sys_trace_k_sem_take_blocking(struct k_sem * sem,k_timeout_t timeout)277 void sys_trace_k_sem_take_blocking(struct k_sem *sem, k_timeout_t timeout)
278 {
279 	TRACING_STRING("%s: %p, timeout: %u\n", __func__, sem, (uint32_t)timeout.ticks);
280 }
281 
sys_trace_k_sem_reset(struct k_sem * sem)282 void sys_trace_k_sem_reset(struct k_sem *sem)
283 {
284 	TRACING_STRING("%s: %p\n", __func__, sem);
285 }
286 
sys_trace_k_mutex_init(struct k_mutex * mutex,int ret)287 void sys_trace_k_mutex_init(struct k_mutex *mutex, int ret)
288 {
289 	TRACING_STRING("%s: %p, returns %d\n", __func__, mutex, ret);
290 }
291 
sys_trace_k_mutex_lock_enter(struct k_mutex * mutex,k_timeout_t timeout)292 void sys_trace_k_mutex_lock_enter(struct k_mutex *mutex, k_timeout_t timeout)
293 {
294 	TRACING_STRING("%s: %p, timeout: %u\n", __func__, mutex, (uint32_t)timeout.ticks);
295 }
296 
sys_trace_k_mutex_lock_exit(struct k_mutex * mutex,k_timeout_t timeout,int ret)297 void sys_trace_k_mutex_lock_exit(struct k_mutex *mutex, k_timeout_t timeout, int ret)
298 {
299 	TRACING_STRING("%s: %p, timeout: %u, returns: %d\n", __func__, mutex,
300 		       (uint32_t)timeout.ticks, ret);
301 }
302 
sys_trace_k_mutex_lock_blocking(struct k_mutex * mutex,k_timeout_t timeout)303 void sys_trace_k_mutex_lock_blocking(struct k_mutex *mutex, k_timeout_t timeout)
304 {
305 	TRACING_STRING("%s: %p, timeout: %u\n", __func__, mutex, (uint32_t)timeout.ticks);
306 }
307 
sys_trace_k_mutex_unlock_enter(struct k_mutex * mutex)308 void sys_trace_k_mutex_unlock_enter(struct k_mutex *mutex)
309 {
310 	TRACING_STRING("%s: %p\n", __func__, mutex);
311 }
312 
313 
sys_trace_k_mutex_unlock_exit(struct k_mutex * mutex,int ret)314 void sys_trace_k_mutex_unlock_exit(struct k_mutex *mutex, int ret)
315 {
316 	TRACING_STRING("%s: %p, return: %d\n", __func__, mutex, ret);
317 }
318 
sys_trace_k_thread_sched_set_priority(struct k_thread * thread,int prio)319 void sys_trace_k_thread_sched_set_priority(struct k_thread *thread, int prio)
320 {
321 	TRACING_STRING("%s: %p, priority: %d\n", __func__, thread, prio);
322 }
323 
sys_trace_k_timer_start(struct k_timer * timer,k_timeout_t duration,k_timeout_t period)324 void sys_trace_k_timer_start(struct k_timer *timer, k_timeout_t duration,
325 			     k_timeout_t period)
326 {
327 	TRACING_STRING("%s: %p, duration: %d, period: %d\n", __func__, timer,
328 		(uint32_t)duration.ticks, (uint32_t)period.ticks);
329 }
330 
sys_trace_k_timer_init(struct k_timer * timer,k_timer_expiry_t expiry_fn,k_timer_expiry_t stop_fn)331 void sys_trace_k_timer_init(struct k_timer *timer, k_timer_expiry_t expiry_fn,
332 			    k_timer_expiry_t stop_fn)
333 {
334 	TRACING_STRING("%s: %p\n", __func__, timer);
335 }
336 
sys_trace_k_timer_stop(struct k_timer * timer)337 void sys_trace_k_timer_stop(struct k_timer *timer)
338 {
339 	TRACING_STRING("%s: %p\n", __func__, timer);
340 }
sys_trace_k_timer_status_sync_blocking(struct k_timer * timer)341 void sys_trace_k_timer_status_sync_blocking(struct k_timer *timer)
342 {
343 	TRACING_STRING("%s: %p\n", __func__, timer);
344 }
345 
sys_trace_k_timer_status_sync_exit(struct k_timer * timer,uint32_t result)346 void sys_trace_k_timer_status_sync_exit(struct k_timer *timer, uint32_t result)
347 {
348 	TRACING_STRING("%s: %p\n", __func__, timer);
349 }
350 
sys_trace_k_timer_expiry_enter(struct k_timer * timer)351 void sys_trace_k_timer_expiry_enter(struct k_timer *timer)
352 {
353 	TRACING_STRING("%s: %p\n", __func__, timer);
354 }
355 
sys_trace_k_timer_expiry_exit(struct k_timer * timer)356 void sys_trace_k_timer_expiry_exit(struct k_timer *timer)
357 {
358 	TRACING_STRING("%s: %p\n", __func__, timer);
359 }
360 
sys_trace_k_timer_stop_fn_expiry_enter(struct k_timer * timer)361 void sys_trace_k_timer_stop_fn_expiry_enter(struct k_timer *timer)
362 {
363 	TRACING_STRING("%s: %p\n", __func__, timer);
364 }
365 
sys_trace_k_timer_stop_fn_expiry_exit(struct k_timer * timer)366 void sys_trace_k_timer_stop_fn_expiry_exit(struct k_timer *timer)
367 {
368 	TRACING_STRING("%s: %p\n", __func__, timer);
369 }
370 
sys_trace_k_heap_init(struct k_heap * h,void * mem,size_t bytes)371 void sys_trace_k_heap_init(struct k_heap *h, void *mem, size_t bytes)
372 {
373 	TRACING_STRING("%s: %p\n", __func__, h);
374 }
375 
sys_trace_k_heap_aligned_alloc_enter(struct k_heap * h,size_t bytes,k_timeout_t timeout)376 void sys_trace_k_heap_aligned_alloc_enter(struct k_heap *h, size_t bytes, k_timeout_t timeout)
377 {
378 	TRACING_STRING("%s: %p\n", __func__, h);
379 }
380 
sys_trace_k_heap_alloc_enter(struct k_heap * h,size_t bytes,k_timeout_t timeout)381 void sys_trace_k_heap_alloc_enter(struct k_heap *h, size_t bytes, k_timeout_t timeout)
382 {
383 	TRACING_STRING("%s: %p\n", __func__, h);
384 }
385 
sys_trace_k_heap_calloc_enter(struct k_heap * h,size_t num,size_t size,k_timeout_t timeout)386 void sys_trace_k_heap_calloc_enter(struct k_heap *h, size_t num, size_t size, k_timeout_t timeout)
387 {
388 	TRACING_STRING("%s: %p\n", __func__, h);
389 }
390 
sys_trace_k_heap_free(struct k_heap * h,void * mem)391 void sys_trace_k_heap_free(struct k_heap *h, void *mem)
392 {
393 	TRACING_STRING("%s: %p\n", __func__, h);
394 }
395 
sys_trace_k_heap_realloc_enter(struct k_heap * h,void * ptr,size_t bytes,k_timeout_t timeout)396 void sys_trace_k_heap_realloc_enter(struct k_heap *h, void *ptr, size_t bytes, k_timeout_t timeout)
397 {
398 	TRACING_STRING("%s: %p\n", __func__, h);
399 }
sys_trace_k_heap_realloc_exit(struct k_heap * h,void * ptr,size_t bytes,k_timeout_t timeout,void * ret)400 void sys_trace_k_heap_realloc_exit(struct k_heap *h, void *ptr, size_t bytes, k_timeout_t timeout,
401 				   void *ret)
402 {
403 	TRACING_STRING("%s: %p\n", __func__, h);
404 }
405 
sys_trace_k_heap_alloc_helper_blocking(struct k_heap * h,size_t bytes,k_timeout_t timeout)406 void sys_trace_k_heap_alloc_helper_blocking(struct k_heap *h, size_t bytes, k_timeout_t timeout)
407 {
408 	TRACING_STRING("%s: %p\n", __func__, h);
409 }
410 
sys_trace_k_heap_alloc_exit(struct k_heap * h,size_t bytes,k_timeout_t timeout,void * ret)411 void sys_trace_k_heap_alloc_exit(struct k_heap *h, size_t bytes, k_timeout_t timeout, void *ret)
412 {
413 	TRACING_STRING("%s: %p\n", __func__, h);
414 }
415 
sys_trace_k_heap_calloc_exit(struct k_heap * h,size_t num,size_t size,k_timeout_t timeout,void * ret)416 void sys_trace_k_heap_calloc_exit(struct k_heap *h, size_t num, size_t size, k_timeout_t timeout,
417 				  void *ret)
418 {
419 	TRACING_STRING("%s: %p\n", __func__, h);
420 }
421 
sys_trace_k_heap_aligned_alloc_exit(struct k_heap * h,size_t bytes,k_timeout_t timeout,void * ret)422 void sys_trace_k_heap_aligned_alloc_exit(struct k_heap *h, size_t bytes,
423 					 k_timeout_t timeout, void *ret)
424 {
425 	TRACING_STRING("%s: %p\n", __func__, h);
426 }
427 
sys_trace_k_heap_sys_k_free_enter(struct k_heap * h,struct k_heap ** hr)428 void sys_trace_k_heap_sys_k_free_enter(struct k_heap *h, struct k_heap **hr)
429 {
430 	TRACING_STRING("%s: %p\n", __func__, h);
431 }
432 
sys_trace_k_heap_sys_k_free_exit(struct k_heap * h,struct k_heap ** hr)433 void sys_trace_k_heap_sys_k_free_exit(struct k_heap *h, struct k_heap **hr)
434 {
435 	TRACING_STRING("%s: %p\n", __func__, h);
436 }
437 
sys_trace_k_queue_init(struct k_queue * queue)438 void sys_trace_k_queue_init(struct k_queue *queue)
439 {
440 	TRACING_STRING("%s: %p\n", __func__, queue);
441 }
442 
sys_trace_k_queue_cancel_wait(struct k_queue * queue)443 void sys_trace_k_queue_cancel_wait(struct k_queue *queue)
444 {
445 	TRACING_STRING("%s: %p\n", __func__, queue);
446 }
447 
sys_trace_k_queue_append_enter(struct k_queue * queue,void * data)448 void sys_trace_k_queue_append_enter(struct k_queue *queue, void *data)
449 {
450 	TRACING_STRING("%s: %p\n", __func__, queue);
451 }
452 
sys_trace_k_queue_append_exit(struct k_queue * queue,void * data)453 void sys_trace_k_queue_append_exit(struct k_queue *queue, void *data)
454 {
455 	TRACING_STRING("%s: %p\n", __func__, queue);
456 }
457 
sys_trace_k_queue_queue_insert_enter(struct k_queue * queue,bool alloc,void * data)458 void sys_trace_k_queue_queue_insert_enter(struct k_queue *queue, bool alloc, void *data)
459 {
460 	TRACING_STRING("%s: %p\n", __func__, queue);
461 }
462 
sys_trace_k_queue_queue_insert_exit(struct k_queue * queue,bool alloc,void * data,int ret)463 void sys_trace_k_queue_queue_insert_exit(struct k_queue *queue, bool alloc, void *data, int ret)
464 {
465 	TRACING_STRING("%s: %p\n", __func__, queue);
466 }
467 
sys_trace_k_queue_get_blocking(struct k_queue * queue,k_timeout_t timeout)468 void sys_trace_k_queue_get_blocking(struct k_queue *queue, k_timeout_t timeout)
469 {
470 	TRACING_STRING("%s: %p\n", __func__, queue);
471 }
472 
sys_trace_k_queue_get_exit(struct k_queue * queue,k_timeout_t timeout,void * ret)473 void sys_trace_k_queue_get_exit(struct k_queue *queue, k_timeout_t timeout, void *ret)
474 {
475 	TRACING_STRING("%s: %p\n", __func__, queue);
476 }
477 
sys_trace_k_queue_peek_head(struct k_queue * queue,void * ret)478 void sys_trace_k_queue_peek_head(struct k_queue *queue, void *ret)
479 {
480 	TRACING_STRING("%s: %p\n", __func__, queue);
481 }
482 
sys_trace_k_queue_peek_tail(struct k_queue * queue,void * ret)483 void sys_trace_k_queue_peek_tail(struct k_queue *queue, void *ret)
484 {
485 	TRACING_STRING("%s: %p\n", __func__, queue);
486 }
487 
sys_trace_k_queue_alloc_append_enter(struct k_queue * queue,void * data)488 void sys_trace_k_queue_alloc_append_enter(struct k_queue *queue, void *data)
489 {
490 	TRACING_STRING("%s: %p\n", __func__, queue);
491 }
492 
sys_trace_k_queue_alloc_append_exit(struct k_queue * queue,void * data,int ret)493 void sys_trace_k_queue_alloc_append_exit(struct k_queue *queue, void *data, int ret)
494 {
495 	TRACING_STRING("%s: %p\n", __func__, queue);
496 }
497 
sys_trace_k_queue_alloc_prepend_enter(struct k_queue * queue,void * data)498 void sys_trace_k_queue_alloc_prepend_enter(struct k_queue *queue, void *data)
499 {
500 	TRACING_STRING("%s: %p\n", __func__, queue);
501 }
502 
sys_trace_k_queue_alloc_prepend_exit(struct k_queue * queue,void * data,int ret)503 void sys_trace_k_queue_alloc_prepend_exit(struct k_queue *queue, void *data, int ret)
504 {
505 	TRACING_STRING("%s: %p\n", __func__, queue);
506 }
507 
508 
sys_trace_k_mem_slab_alloc_enter(struct k_mem_slab * slab,void ** mem,k_timeout_t timeout)509 void sys_trace_k_mem_slab_alloc_enter(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
510 {
511 	TRACING_STRING("%s: %p\n", __func__, slab);
512 }
513 
sys_trace_k_mem_slab_alloc_blocking(struct k_mem_slab * slab,void ** mem,k_timeout_t timeout)514 void sys_trace_k_mem_slab_alloc_blocking(struct k_mem_slab *slab, void **mem, k_timeout_t timeout)
515 {
516 	TRACING_STRING("%s: %p\n", __func__, slab);
517 }
518 
sys_trace_k_mem_slab_alloc_exit(struct k_mem_slab * slab,void ** mem,k_timeout_t timeout,int ret)519 void sys_trace_k_mem_slab_alloc_exit(struct k_mem_slab *slab, void **mem, k_timeout_t timeout,
520 				     int ret)
521 {
522 	TRACING_STRING("%s: %p\n", __func__, slab);
523 }
524 
sys_trace_k_mem_slab_free_enter(struct k_mem_slab * slab,void * mem)525 void sys_trace_k_mem_slab_free_enter(struct k_mem_slab *slab, void *mem)
526 {
527 	TRACING_STRING("%s: %p\n", __func__, slab);
528 }
529 
sys_trace_k_mem_slab_free_exit(struct k_mem_slab * slab,void * mem)530 void sys_trace_k_mem_slab_free_exit(struct k_mem_slab *slab, void *mem)
531 {
532 	TRACING_STRING("%s: %p\n", __func__, slab);
533 }
534 
sys_trace_k_fifo_put_enter(struct k_fifo * fifo,void * data)535 void sys_trace_k_fifo_put_enter(struct k_fifo *fifo, void *data)
536 {
537 	TRACING_STRING("%s: %p\n", __func__, fifo);
538 }
539 
sys_trace_k_fifo_put_exit(struct k_fifo * fifo,void * data)540 void sys_trace_k_fifo_put_exit(struct k_fifo *fifo, void *data)
541 {
542 	TRACING_STRING("%s: %p\n", __func__, fifo);
543 }
544 
sys_trace_k_fifo_get_enter(struct k_fifo * fifo,k_timeout_t timeout)545 void sys_trace_k_fifo_get_enter(struct k_fifo *fifo, k_timeout_t timeout)
546 {
547 	TRACING_STRING("%s: %p\n", __func__, fifo);
548 }
549 
sys_trace_k_fifo_get_exit(struct k_fifo * fifo,k_timeout_t timeout,void * ret)550 void sys_trace_k_fifo_get_exit(struct k_fifo *fifo, k_timeout_t timeout, void *ret)
551 {
552 	TRACING_STRING("%s: %p\n", __func__, fifo);
553 }
554 
sys_trace_syscall_enter(uint32_t syscall_id,const char * syscall_name)555 void sys_trace_syscall_enter(uint32_t syscall_id, const char *syscall_name)
556 {
557 	TRACING_STRING("%s: %s (%u) enter\n", __func__, syscall_name, syscall_id);
558 }
559 
sys_trace_syscall_exit(uint32_t syscall_id,const char * syscall_name)560 void sys_trace_syscall_exit(uint32_t syscall_id, const char *syscall_name)
561 {
562 	TRACING_STRING("%s: %s (%u) exit\n", __func__, syscall_name, syscall_id);
563 }
564 
sys_trace_k_thread_foreach_unlocked_enter(k_thread_user_cb_t user_cb,void * data)565 void sys_trace_k_thread_foreach_unlocked_enter(k_thread_user_cb_t user_cb, void *data)
566 {
567 	TRACING_STRING("%s: %p (%p) enter\n", __func__, user_cb, data);
568 }
569 
sys_trace_k_thread_foreach_unlocked_exit(k_thread_user_cb_t user_cb,void * data)570 void sys_trace_k_thread_foreach_unlocked_exit(k_thread_user_cb_t user_cb, void *data)
571 {
572 	TRACING_STRING("%s: %p (%p) exit\n", __func__, user_cb, data);
573 }
574 
sys_trace_k_event_init(struct k_event * event)575 void sys_trace_k_event_init(struct k_event *event)
576 {
577 	TRACING_STRING("%s: %p init\n", __func__, event);
578 }
579 
sys_trace_k_event_post_enter(struct k_event * event,uint32_t events,uint32_t events_mask)580 void sys_trace_k_event_post_enter(struct k_event *event, uint32_t events, uint32_t events_mask)
581 {
582 	TRACING_STRING("%s: %p post enter\n", __func__, event);
583 }
584 
sys_trace_k_event_post_exit(struct k_event * event,uint32_t events,uint32_t events_mask)585 void sys_trace_k_event_post_exit(struct k_event *event, uint32_t events, uint32_t events_mask)
586 {
587 	TRACING_STRING("%s: %p post exit\n", __func__, event);
588 }
589 
sys_trace_k_event_wait_enter(struct k_event * event,uint32_t events,unsigned int options,k_timeout_t timeout)590 void sys_trace_k_event_wait_enter(struct k_event *event, uint32_t events, unsigned int options,
591 				  k_timeout_t timeout)
592 {
593 	TRACING_STRING("%s: %p wait enter\n", __func__, event);
594 }
595 
sys_trace_k_event_wait_blocking(struct k_event * event,uint32_t events,unsigned int options,k_timeout_t timeout)596 void sys_trace_k_event_wait_blocking(struct k_event *event, uint32_t events, unsigned int options,
597 				     k_timeout_t timeout)
598 {
599 	TRACING_STRING("%s: %p wait blocking\n", __func__, event);
600 }
601 
sys_trace_k_event_wait_exit(struct k_event * event,uint32_t events,uint32_t ret)602 void sys_trace_k_event_wait_exit(struct k_event *event, uint32_t events, uint32_t ret)
603 {
604 	TRACING_STRING("%s: %p wait exit\n", __func__, event);
605 }
606