1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 // Copyright(c) 2016 Intel Corporation. All rights reserved.
4 //
5 // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
6 //         Artur Kloniecki <arturx.kloniecki@linux.intel.com>
7 //         Karol Trzcinski <karolx.trzcinski@linux.intel.com>
8 
9 #include <sof/debug/panic.h>
10 #include <sof/ipc/msg.h>
11 #include <sof/ipc/topology.h>
12 #include <sof/drivers/timer.h>
13 #include <sof/lib/alloc.h>
14 #include <sof/lib/cache.h>
15 #include <sof/lib/cpu.h>
16 #include <sof/lib/mailbox.h>
17 #include <sof/lib/memory.h>
18 #include <sof/platform.h>
19 #include <sof/string.h>
20 #include <sof/sof.h>
21 #include <sof/spinlock.h>
22 #include <sof/trace/dma-trace.h>
23 #include <sof/trace/preproc.h>
24 #include <sof/trace/trace.h>
25 #include <ipc/topology.h>
26 #include <user/trace.h>
27 #include <stdarg.h>
28 #include <stdint.h>
29 
30 extern struct tr_ctx dt_tr;
31 
32 #if CONFIG_TRACE_FILTERING_ADAPTIVE
33 struct recent_log_entry {
34 	uint32_t entry_id;
35 	uint64_t message_ts;
36 	uint64_t first_suppression_ts;
37 	uint32_t trigger_count;
38 };
39 
40 struct recent_trace_context {
41 	struct recent_log_entry recent_entries[CONFIG_TRACE_RECENT_ENTRIES_COUNT];
42 };
43 #endif /* CONFIG_TRACE_FILTERING_ADAPTIVE */
44 
45 /** MAILBOX_TRACE_BASE ring buffer */
46 struct trace {
47 	uintptr_t pos ; /**< offset of the next byte to write */
48 	uint32_t enable;
49 #if CONFIG_TRACE_FILTERING_ADAPTIVE
50 	bool user_filter_override;	/* whether filtering was overridden by user or not */
51 #endif /* CONFIG_TRACE_FILTERING_ADAPTIVE */
52 	spinlock_t lock; /* locking mechanism */
53 
54 #if CONFIG_TRACE_FILTERING_ADAPTIVE
55 	struct recent_trace_context trace_core_context[CONFIG_CORE_COUNT];
56 #endif
57 };
58 
59 /* calculates total message size, both header and payload in bytes */
60 #define MESSAGE_SIZE(args_num)	\
61 	(sizeof(struct log_entry_header) + args_num * sizeof(uint32_t))
62 
63 /* calculated total message size in dwords */
64 #define MESSAGE_SIZE_DWORDS(args_num)	\
65 	(MESSAGE_SIZE(args_num) / sizeof(uint32_t))
66 
67 /* calculates offset to param_idx of payload */
68 #define PAYLOAD_OFFSET(param_idx) \
69 	(MESSAGE_SIZE_DWORDS(0) + param_idx)
70 
71 #define TRACE_ID_MASK ((1 << TRACE_ID_LENGTH) - 1)
72 
put_header(void * dst,const struct sof_uuid_entry * uid,uint32_t id_1,uint32_t id_2,uint32_t entry,uint64_t timestamp)73 static void put_header(void *dst, const struct sof_uuid_entry *uid,
74 		       uint32_t id_1, uint32_t id_2,
75 		       uint32_t entry, uint64_t timestamp)
76 {
77 	struct timer *timer = timer_get();
78 	/* Support very early tracing */
79 	uint64_t delta = timer ? timer->delta : 0;
80 	struct log_entry_header header;
81 	int ret;
82 
83 	header.uid = (uintptr_t)uid;
84 	header.id_0 = id_1 & TRACE_ID_MASK;
85 	header.id_1 = id_2 & TRACE_ID_MASK;
86 	header.core_id = cpu_get_id();
87 	header.timestamp = timestamp + delta;
88 	header.log_entry_address = entry;
89 
90 	ret = memcpy_s(dst, sizeof(header), &header, sizeof(header));
91 	assert(!ret);
92 
93 }
94 
95 /** Ring buffer for the mailbox trace */
mtrace_event(const char * data,uint32_t length)96 void mtrace_event(const char *data, uint32_t length)
97 {
98 	struct trace *trace = trace_get();
99 	char *t = (char *)MAILBOX_TRACE_BASE;
100 	const int32_t available = MAILBOX_TRACE_SIZE - trace->pos;
101 
102 	if (available < length) { /* wrap */
103 		memset(t + trace->pos, 0xff, available);
104 		dcache_writeback_region(t + trace->pos, available);
105 		trace->pos = 0;
106 	}
107 
108 	memcpy_s(t + trace->pos, MAILBOX_TRACE_SIZE - trace->pos,
109 		 data, length);
110 	dcache_writeback_region(t + trace->pos, length);
111 	trace->pos += length;
112 }
113 
114 #if CONFIG_TRACE_FILTERING_VERBOSITY
115 /**
116  * \brief Runtime trace filtering based on verbosity level
117  * \param lvl log level (LOG_LEVEL_ ERROR, INFO, DEBUG ...)
118  * \param uuid uuid address
119  * \return false when trace is filtered out, otherwise true
120  */
trace_filter_verbosity(uint32_t lvl,const struct tr_ctx * ctx)121 static inline bool trace_filter_verbosity(uint32_t lvl, const struct tr_ctx *ctx)
122 {
123 	STATIC_ASSERT(LOG_LEVEL_CRITICAL < LOG_LEVEL_VERBOSE,
124 		      LOG_LEVEL_CRITICAL_MUST_HAVE_LOWEST_VALUE);
125 	return lvl <= ctx->level;
126 }
127 #endif /* CONFIG_TRACE_FILTERING_VERBOSITY */
128 
129 #if CONFIG_TRACE_FILTERING_ADAPTIVE
emit_recent_entry(struct recent_log_entry * entry)130 static void emit_recent_entry(struct recent_log_entry *entry)
131 {
132 	_log_message(trace_log_unfiltered, false, LOG_LEVEL_INFO, _TRACE_INV_CLASS, &dt_tr,
133 		     _TRACE_INV_ID, _TRACE_INV_ID, "Suppressed %u similar messages: %pQ",
134 		     entry->trigger_count - CONFIG_TRACE_BURST_COUNT,
135 		     (void *)entry->entry_id);
136 
137 	memset(entry, 0, sizeof(*entry));
138 }
139 
emit_recent_entries(uint64_t current_ts)140 static void emit_recent_entries(uint64_t current_ts)
141 {
142 	struct trace *trace = trace_get();
143 	struct recent_log_entry *recent_entries =
144 		trace->trace_core_context[cpu_get_id()].recent_entries;
145 	int i;
146 
147 	/* Check if any tracked entries were dormant long enough to unsuppress them */
148 	for (i = 0; i < CONFIG_TRACE_RECENT_ENTRIES_COUNT; i++) {
149 		if (recent_entries[i].entry_id) {
150 			if (current_ts - recent_entries[i].message_ts >
151 			    CONFIG_TRACE_RECENT_TIME_THRESHOLD) {
152 				if (recent_entries[i].trigger_count > CONFIG_TRACE_BURST_COUNT)
153 					emit_recent_entry(&recent_entries[i]);
154 				else
155 					memset(&recent_entries[i], 0, sizeof(recent_entries[i]));
156 			}
157 		}
158 	}
159 
160 }
161 
162 /**
163  * \brief Runtime trace flood suppression
164  * \param log_level log verbosity level
165  * \param entry ID of log entry
166  * \param message_ts message timestamp
167  * \return false when trace is filtered out, true otherwise
168  */
trace_filter_flood(uint32_t log_level,uint32_t entry,uint64_t message_ts)169 static bool trace_filter_flood(uint32_t log_level, uint32_t entry, uint64_t message_ts)
170 {
171 	struct trace *trace = trace_get();
172 	struct recent_log_entry *recent_entries =
173 		trace->trace_core_context[cpu_get_id()].recent_entries;
174 	struct recent_log_entry *oldest_entry = recent_entries;
175 	int i;
176 	bool ret;
177 
178 	/* don't attempt to suppress debug messages using this method, it would be uneffective */
179 	if (log_level >= LOG_LEVEL_DEBUG)
180 		return true;
181 
182 	/* check if same log entry was sent recently */
183 	for (i = 0; i < CONFIG_TRACE_RECENT_ENTRIES_COUNT; i++) {
184 		/* in the meantime identify the oldest or an empty entry, for reuse later */
185 		if (oldest_entry->first_suppression_ts > recent_entries[i].first_suppression_ts)
186 			oldest_entry = &recent_entries[i];
187 
188 		if (recent_entries[i].entry_id == entry) {
189 			if (message_ts - recent_entries[i].first_suppression_ts <
190 			    CONFIG_TRACE_RECENT_MAX_TIME &&
191 			    message_ts - recent_entries[i].message_ts <
192 			    CONFIG_TRACE_RECENT_TIME_THRESHOLD) {
193 				recent_entries[i].trigger_count++;
194 				/* refresh suppression timer */
195 				recent_entries[i].message_ts = message_ts;
196 
197 				ret = recent_entries[i].trigger_count <= CONFIG_TRACE_BURST_COUNT;
198 
199 				return ret;
200 			}
201 
202 			if (recent_entries[i].trigger_count > CONFIG_TRACE_BURST_COUNT)
203 				emit_recent_entry(&recent_entries[i]);
204 			else
205 				memset(&recent_entries[i], 0, sizeof(recent_entries[i]));
206 			return true;
207 		}
208 	}
209 
210 	/* Make room for tracking new entry, by emitting the oldest one in the filter */
211 	if (oldest_entry->entry_id && oldest_entry->trigger_count > CONFIG_TRACE_BURST_COUNT)
212 		emit_recent_entry(oldest_entry);
213 
214 	oldest_entry->entry_id = entry;
215 	oldest_entry->message_ts = message_ts;
216 	oldest_entry->first_suppression_ts = message_ts;
217 	oldest_entry->trigger_count = 1;
218 
219 	return true;
220 }
221 #endif /* CONFIG_TRACE_FILTERING_ADAPTIVE */
222 
223 /** Implementation shared and invoked by both adaptive filtering and
224  * not. Serializes events into trace messages and passes them to
225  * dtrace_event() or to mtrace_event() or to both depending on the log
226  * lvl and the Kconfiguration.
227  */
vatrace_log(bool send_atomic,uint32_t log_entry,const struct tr_ctx * ctx,uint32_t lvl,uint32_t id_1,uint32_t id_2,int arg_count,va_list vargs)228 static void vatrace_log(bool send_atomic, uint32_t log_entry, const struct tr_ctx *ctx,
229 			uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list vargs)
230 {
231 	uint32_t data[MESSAGE_SIZE_DWORDS(_TRACE_EVENT_MAX_ARGUMENT_COUNT)];
232 	const int message_size = MESSAGE_SIZE(arg_count);
233 	int i;
234 
235 #if CONFIG_TRACEM
236 	unsigned long flags;
237 	struct trace *trace = trace_get();
238 #endif /* CONFIG TRACEM */
239 
240 	/* fill log content. arg_count is in the dictionary. */
241 	put_header(data, ctx->uuid_p, id_1, id_2, log_entry,
242 		   platform_safe_get_time(timer_get()));
243 
244 	for (i = 0; i < arg_count; ++i)
245 		data[PAYLOAD_OFFSET(i)] = va_arg(vargs, uint32_t);
246 
247 	/* send event by */
248 	if (send_atomic)
249 		dtrace_event_atomic((const char *)data, message_size);
250 	else
251 		dtrace_event((const char *)data, message_size);
252 
253 #if CONFIG_TRACEM
254 	/* send event by mail box too. */
255 	if (send_atomic) {
256 		mtrace_event((const char *)data, MESSAGE_SIZE(arg_count));
257 	} else {
258 		spin_lock_irq(&trace->lock, flags);
259 		mtrace_event((const char *)data, MESSAGE_SIZE(arg_count));
260 		spin_unlock_irq(&trace->lock, flags);
261 	}
262 #else
263 	/* send event by mail box if level is LOG_LEVEL_CRITICAL. */
264 	if (lvl == LOG_LEVEL_CRITICAL)
265 		mtrace_event((const char *)data, MESSAGE_SIZE(arg_count));
266 #endif /* CONFIG_TRACEM */
267 }
268 
trace_log_unfiltered(bool send_atomic,const void * log_entry,const struct tr_ctx * ctx,uint32_t lvl,uint32_t id_1,uint32_t id_2,int arg_count,...)269 void trace_log_unfiltered(bool send_atomic, const void *log_entry, const struct tr_ctx *ctx,
270 			  uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, ...)
271 {
272 	struct trace *trace = trace_get();
273 	va_list vl;
274 
275 	if (!trace || !trace->enable)
276 		return;
277 
278 	va_start(vl, arg_count);
279 	vatrace_log(send_atomic, (uint32_t)log_entry, ctx, lvl, id_1, id_2, arg_count, vl);
280 	va_end(vl);
281 }
282 
trace_log_filtered(bool send_atomic,const void * log_entry,const struct tr_ctx * ctx,uint32_t lvl,uint32_t id_1,uint32_t id_2,int arg_count,...)283 void trace_log_filtered(bool send_atomic, const void *log_entry, const struct tr_ctx *ctx,
284 			uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, ...)
285 {
286 	struct trace *trace = trace_get();
287 	va_list vl;
288 #if CONFIG_TRACE_FILTERING_ADAPTIVE
289 	uint64_t current_ts;
290 #endif /* CONFIG_TRACE_FILTERING_ADAPTIVE */
291 
292 	if (!trace || !trace->enable)
293 		return;
294 
295 #if CONFIG_TRACE_FILTERING_VERBOSITY
296 	if (!trace_filter_verbosity(lvl, ctx))
297 		return;
298 #endif /* CONFIG_TRACE_FILTERING_VERBOSITY */
299 
300 #if CONFIG_TRACE_FILTERING_ADAPTIVE
301 	if (!trace->user_filter_override) {
302 		current_ts = platform_safe_get_time(timer_get());
303 
304 		emit_recent_entries(current_ts);
305 
306 		if (!trace_filter_flood(lvl, (uint32_t)log_entry, current_ts))
307 			return;
308 	}
309 #endif /* CONFIG_TRACE_FILTERING_ADAPTIVE */
310 
311 	va_start(vl, arg_count);
312 	vatrace_log(send_atomic, (uint32_t)log_entry, ctx, lvl, id_1, id_2, arg_count, vl);
313 	va_end(vl);
314 }
315 
trace_filter_fill(struct sof_ipc_trace_filter_elem * elem,struct sof_ipc_trace_filter_elem * end,struct trace_filter * filter)316 struct sof_ipc_trace_filter_elem *trace_filter_fill(struct sof_ipc_trace_filter_elem *elem,
317 						    struct sof_ipc_trace_filter_elem *end,
318 						    struct trace_filter *filter)
319 {
320 	filter->log_level = -1;
321 	filter->uuid_id = 0;
322 	filter->comp_id = -1;
323 	filter->pipe_id = -1;
324 
325 	while (elem <= end) {
326 		switch (elem->key & SOF_IPC_TRACE_FILTER_ELEM_TYPE_MASK) {
327 		case SOF_IPC_TRACE_FILTER_ELEM_SET_LEVEL:
328 			filter->log_level = elem->value;
329 			break;
330 		case SOF_IPC_TRACE_FILTER_ELEM_BY_UUID:
331 			filter->uuid_id = elem->value;
332 			break;
333 		case SOF_IPC_TRACE_FILTER_ELEM_BY_COMP:
334 			filter->comp_id = elem->value;
335 			break;
336 		case SOF_IPC_TRACE_FILTER_ELEM_BY_PIPE:
337 			filter->pipe_id = elem->value;
338 			break;
339 		default:
340 			tr_err(&ipc_tr, "Invalid SOF_IPC_TRACE_FILTER_ELEM 0x%x",
341 			       elem->key);
342 			return NULL;
343 		}
344 
345 		/* each filter set must be terminated with FIN flag and have new log level */
346 		if (elem->key & SOF_IPC_TRACE_FILTER_ELEM_FIN) {
347 			if (filter->log_level < 0) {
348 				tr_err(&ipc_tr, "Each trace filter set must specify new log level");
349 				return NULL;
350 			} else {
351 				return elem + 1;
352 			}
353 		}
354 
355 		++elem;
356 	}
357 
358 	tr_err(&ipc_tr, "Trace filter elements set is not properly terminated");
359 	return NULL;
360 }
361 
362 /* update global components, which tr_ctx is stored inside special section */
trace_filter_update_global(int32_t log_level,uint32_t uuid_id)363 static int trace_filter_update_global(int32_t log_level, uint32_t uuid_id)
364 {
365 	int cnt = 0;
366 #if !defined(__ZEPHYR__) && !defined(CONFIG_LIBRARY)
367 	extern void *_trace_ctx_start;
368 	extern void *_trace_ctx_end;
369 	struct tr_ctx *ptr = (struct tr_ctx *)&_trace_ctx_start;
370 	struct tr_ctx *end = (struct tr_ctx *)&_trace_ctx_end;
371 
372 	/* iterate over global `tr_ctx` entries located in their own section */
373 	/* cppcheck-suppress comparePointers */
374 	while (ptr < end) {
375 		/*
376 		 * when looking for specific uuid element,
377 		 * then find, update and stop searching
378 		 */
379 		if ((uintptr_t)ptr->uuid_p == uuid_id) {
380 			ptr->level = log_level;
381 			return 1;
382 		}
383 		/* otherwise each element should be updated */
384 		if (!ptr->uuid_p) {
385 			ptr->level = log_level;
386 			++cnt;
387 		}
388 		++ptr;
389 	}
390 #endif
391 
392 	return cnt;
393 }
394 
395 /* return trace context from any ipc component type */
trace_filter_ipc_comp_context(struct ipc_comp_dev * icd)396 static struct tr_ctx *trace_filter_ipc_comp_context(struct ipc_comp_dev *icd)
397 {
398 	switch (icd->type) {
399 	case COMP_TYPE_COMPONENT:
400 		return &icd->cd->tctx;
401 	case COMP_TYPE_BUFFER:
402 		return &icd->cb->tctx;
403 	case COMP_TYPE_PIPELINE:
404 		return &icd->pipeline->tctx;
405 	/* each COMP_TYPE must be specified */
406 	default:
407 		tr_err(&ipc_tr, "Unknown trace context for ipc component type 0x%X",
408 		       icd->type);
409 		return NULL;
410 	}
411 }
412 
413 /* update ipc components, wchich tr_ctx may be read from ipc_comp_dev structure */
trace_filter_update_instances(int32_t log_level,uint32_t uuid_id,int32_t pipe_id,int32_t comp_id)414 static int trace_filter_update_instances(int32_t log_level, uint32_t uuid_id,
415 					 int32_t pipe_id, int32_t comp_id)
416 {
417 	struct ipc *ipc = ipc_get();
418 	struct ipc_comp_dev *icd;
419 	struct list_item *clist;
420 	struct tr_ctx *ctx;
421 	bool correct_comp;
422 	int cnt = 0;
423 
424 	/* compare each ipc component with filter settings and update log level after pass */
425 	list_for_item(clist, &ipc->comp_list) {
426 		icd = container_of(clist, struct ipc_comp_dev, list);
427 		ctx = trace_filter_ipc_comp_context(icd);
428 		if (!ctx)
429 			return -EINVAL;
430 		correct_comp = comp_id == -1 || icd->id == comp_id; /* todo: icd->topo_id */
431 		correct_comp &= uuid_id == 0 || (uintptr_t)ctx->uuid_p == uuid_id;
432 		correct_comp &= pipe_id == -1 || ipc_comp_pipe_id(icd) == pipe_id;
433 		if (correct_comp) {
434 			ctx->level = log_level;
435 			++cnt;
436 		}
437 
438 	}
439 	return cnt;
440 }
441 
trace_filter_update(const struct trace_filter * filter)442 int trace_filter_update(const struct trace_filter *filter)
443 {
444 	int ret = 0;
445 #if CONFIG_TRACE_FILTERING_ADAPTIVE
446 	struct trace *trace = trace_get();
447 
448 	trace->user_filter_override = true;
449 #endif /* CONFIG_TRACE_FILTERING_ADAPTIVE */
450 
451 	/* validate log level, LOG_LEVEL_CRITICAL has low value, LOG_LEVEL_VERBOSE high */
452 	if (filter->log_level < LOG_LEVEL_CRITICAL ||
453 	    filter->log_level > LOG_LEVEL_VERBOSE)
454 		return -EINVAL;
455 
456 	/* update `*`, `name*` or global `name` */
457 	if (filter->pipe_id == -1 && filter->comp_id == -1)
458 		ret = trace_filter_update_global(filter->log_level, filter->uuid_id);
459 
460 	/* update `*`, `name*`, `nameX.*` or `nameX.Y`, `name` may be '*' */
461 	ret += trace_filter_update_instances(filter->log_level, filter->uuid_id,
462 					     filter->pipe_id, filter->comp_id);
463 	return ret > 0 ? ret : -EINVAL;
464 }
465 
466 /** Sends all pending DMA messages to mailbox (for emergencies) */
trace_flush_dma_to_mbox(void)467 void trace_flush_dma_to_mbox(void)
468 {
469 	struct trace *trace = trace_get();
470 	volatile uint64_t *t;
471 	uint32_t flags;
472 
473 	spin_lock_irq(&trace->lock, flags);
474 
475 	/* get mailbox position */
476 	t = (volatile uint64_t *)(MAILBOX_TRACE_BASE + trace->pos);
477 
478 	/* flush dma trace messages */
479 	dma_trace_flush((void *)t);
480 
481 	spin_unlock_irq(&trace->lock, flags);
482 }
483 
trace_on(void)484 void trace_on(void)
485 {
486 	struct trace *trace = trace_get();
487 	uint32_t flags;
488 
489 	spin_lock_irq(&trace->lock, flags);
490 
491 	trace->enable = 1;
492 	dma_trace_on();
493 
494 	spin_unlock_irq(&trace->lock, flags);
495 }
496 
trace_off(void)497 void trace_off(void)
498 {
499 	struct trace *trace = trace_get();
500 	uint32_t flags;
501 
502 	spin_lock_irq(&trace->lock, flags);
503 
504 	trace->enable = 0;
505 	dma_trace_off();
506 
507 	spin_unlock_irq(&trace->lock, flags);
508 }
509 
trace_init(struct sof * sof)510 void trace_init(struct sof *sof)
511 {
512 	sof->trace = rzalloc(SOF_MEM_ZONE_SYS_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(*sof->trace));
513 	sof->trace->pos = 0;
514 #if CONFIG_TRACE_FILTERING_ADAPTIVE
515 	sof->trace->user_filter_override = false;
516 #endif /* CONFIG_TRACE_FILTERING_ADAPTIVE */
517 	spinlock_init(&sof->trace->lock);
518 
519 #ifndef __ZEPHYR__
520 	/* Zephyr owns and has already initialized this buffer (and
521 	 * likely has already logged to it by the time we get here).
522 	 * Don't touch.
523 	 */
524 	bzero((void *)MAILBOX_TRACE_BASE, MAILBOX_TRACE_SIZE);
525 	dcache_writeback_invalidate_region((void *)MAILBOX_TRACE_BASE,
526 					   MAILBOX_TRACE_SIZE);
527 #endif
528 
529 	dma_trace_init_early(sof);
530 }
531 
mtrace_dict_entry(bool atomic_context,uint32_t dict_entry_address,int n_args,...)532 void mtrace_dict_entry(bool atomic_context, uint32_t dict_entry_address, int n_args, ...)
533 {
534 	va_list ap;
535 	int i;
536 	char packet[MESSAGE_SIZE(_TRACE_EVENT_MAX_ARGUMENT_COUNT)];
537 	uint32_t *args = (uint32_t *)&packet[MESSAGE_SIZE(0)];
538 	const uint64_t tstamp = platform_safe_get_time(timer_get());
539 
540 	put_header(packet, dt_tr.uuid_p, _TRACE_INV_ID, _TRACE_INV_ID,
541 		   dict_entry_address, tstamp);
542 
543 	va_start(ap, n_args);
544 	for (i = 0; i < MIN(n_args, _TRACE_EVENT_MAX_ARGUMENT_COUNT); i++)
545 		args[i] = va_arg(ap, uint32_t);
546 	va_end(ap);
547 
548 	if (atomic_context) {
549 		mtrace_event(packet, MESSAGE_SIZE(n_args));
550 	} else {
551 		struct trace * const trace = trace_get();
552 		uint32_t saved_flags;
553 
554 		spin_lock_irq(&trace->lock, saved_flags);
555 		mtrace_event(packet, MESSAGE_SIZE(n_args));
556 		spin_unlock_irq(&trace->lock, saved_flags);
557 	}
558 }
559