1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 // Copyright(c) 2016 Intel Corporation. All rights reserved.
4 //
5 // Author: Yan Wang <yan.wang@linux.intel.com>
6
7 #include <sof/audio/buffer.h>
8 #include <sof/common.h>
9 #include <sof/debug/panic.h>
10 #include <sof/ipc/msg.h>
11 #include <sof/lib/alloc.h>
12 #include <sof/lib/cache.h>
13 #include <sof/lib/cpu.h>
14 #include <sof/lib/dma.h>
15 #include <sof/lib/memory.h>
16 #include <sof/lib/uuid.h>
17 #include <sof/platform.h>
18 #include <sof/schedule/ll_schedule.h>
19 #include <sof/schedule/schedule.h>
20 #include <sof/schedule/task.h>
21 #include <sof/sof.h>
22 #include <sof/spinlock.h>
23 #include <sof/string.h>
24 #include <sof/trace/dma-trace.h>
25 #include <ipc/topology.h>
26 #include <ipc/trace.h>
27 #include <kernel/abi.h>
28 #include <user/abi_dbg.h>
29 #include <version.h>
30
31 #include <errno.h>
32 #include <stddef.h>
33 #include <stdint.h>
34
35 /* 58782c63-1326-4185-8459-22272e12d1f1 */
36 DECLARE_SOF_UUID("dma-trace", dma_trace_uuid, 0x58782c63, 0x1326, 0x4185,
37 0x84, 0x59, 0x22, 0x27, 0x2e, 0x12, 0xd1, 0xf1);
38
39 DECLARE_TR_CTX(dt_tr, SOF_UUID(dma_trace_uuid), LOG_LEVEL_INFO);
40
41 /* 2b972272-c5b1-4b7e-926f-0fc5cb4c4690 */
42 DECLARE_SOF_UUID("dma-trace-task", dma_trace_task_uuid, 0x2b972272, 0xc5b1,
43 0x4b7e, 0x92, 0x6f, 0x0f, 0xc5, 0xcb, 0x4c, 0x46, 0x90);
44
45 static int dma_trace_get_avail_data(struct dma_trace_data *d,
46 struct dma_trace_buf *buffer,
47 int avail);
48
49 /** Periodically runs and starts the DMA even when the buffer is not
50 * full.
51 */
trace_work(void * data)52 static enum task_state trace_work(void *data)
53 {
54 struct dma_trace_data *d = data;
55 struct dma_trace_buf *buffer = &d->dmatb;
56 struct dma_sg_config *config = &d->config;
57 unsigned long flags;
58 uint32_t avail = buffer->avail;
59 int32_t size;
60 uint32_t overflow;
61
62 /* make sure we don't write more than buffer */
63 if (avail > DMA_TRACE_LOCAL_SIZE) {
64 overflow = avail - DMA_TRACE_LOCAL_SIZE;
65 avail = DMA_TRACE_LOCAL_SIZE;
66 } else {
67 overflow = 0;
68 }
69
70 /* dma gateway supports wrap mode copy, but GPDMA doesn't
71 * support, so do it differently based on HW features
72 */
73 size = dma_trace_get_avail_data(d, buffer, avail);
74
75 /* any data to copy ? */
76 if (size == 0) {
77 return SOF_TASK_STATE_RESCHEDULE;
78 }
79
80 d->posn.overflow = overflow;
81
82 /* DMA trace copying is working */
83 d->copy_in_progress = 1;
84
85 /* copy this section to host */
86 size = dma_copy_to_host_nowait(&d->dc, config, d->posn.host_offset,
87 buffer->r_ptr, size);
88 if (size < 0) {
89 tr_err(&dt_tr, "trace_work(): dma_copy_to_host_nowait() failed");
90 goto out;
91 }
92
93 /* update host pointer and check for wrap */
94 d->posn.host_offset += size;
95 if (d->posn.host_offset >= d->host_size)
96 d->posn.host_offset -= d->host_size;
97
98 /* update local pointer and check for wrap */
99 buffer->r_ptr = (char *)buffer->r_ptr + size;
100 if (buffer->r_ptr >= buffer->end_addr)
101 buffer->r_ptr = (char *)buffer->r_ptr - DMA_TRACE_LOCAL_SIZE;
102
103 out:
104 spin_lock_irq(&d->lock, flags);
105
106 /* disregard any old messages and don't resend them if we overflow */
107 if (size > 0) {
108 if (d->posn.overflow)
109 buffer->avail = DMA_TRACE_LOCAL_SIZE - size;
110 else
111 buffer->avail -= size;
112 }
113
114 /* DMA trace copying is done, allow reschedule */
115 d->copy_in_progress = 0;
116
117 spin_unlock_irq(&d->lock, flags);
118
119 /* reschedule the trace copying work */
120 return SOF_TASK_STATE_RESCHEDULE;
121 }
122
123 /** Do this early so we can log at initialization time even before the
124 * DMA runs. The rest happens later in dma_trace_init_complete() and
125 * dma_trace_enable()
126 */
dma_trace_init_early(struct sof * sof)127 int dma_trace_init_early(struct sof *sof)
128 {
129 int ret;
130
131 /* If this assert is wrong then traces have been corrupting
132 * random parts of memory. Some functions run before _and_ after
133 * DMA trace initialization and we don't want to ask them to
134 * never trace. So dma_trace_initialized() must be either
135 * clearly false/NULL or clearly true, we can't tolerate random
136 * uninitialized values in sof->dmat etc.
137 */
138 assert(!dma_trace_initialized(sof->dmat));
139
140 sof->dmat = rzalloc(SOF_MEM_ZONE_SYS_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(*sof->dmat));
141
142 dma_sg_init(&sof->dmat->config.elem_array);
143 spinlock_init(&sof->dmat->lock);
144
145 ipc_build_trace_posn(&sof->dmat->posn);
146 sof->dmat->msg = ipc_msg_init(sof->dmat->posn.rhdr.hdr.cmd,
147 sizeof(sof->dmat->posn));
148 if (!sof->dmat->msg) {
149 ret = -ENOMEM;
150 goto err;
151 }
152
153 return 0;
154
155 err:
156 mtrace_printf(LOG_LEVEL_ERROR,
157 "dma_trace_init_early() failed: %d", ret);
158
159 /* Cannot rfree(sof->dmat) from the system memory pool, see
160 * comments in lib/alloc.c
161 */
162 sof->dmat = NULL;
163
164 return ret;
165 }
166
167 /** Run after dma_trace_init_early() and before dma_trace_enable() */
dma_trace_init_complete(struct dma_trace_data * d)168 int dma_trace_init_complete(struct dma_trace_data *d)
169 {
170 int ret = 0;
171
172 tr_info(&dt_tr, "dma_trace_init_complete()");
173
174 if (!d) {
175 mtrace_printf(LOG_LEVEL_ERROR,
176 "dma_trace_init_complete(): failed, no dma_trace_data");
177 return -ENOMEM;
178 }
179
180 /* init DMA copy context */
181 ret = dma_copy_new(&d->dc);
182 if (ret < 0) {
183 mtrace_printf(LOG_LEVEL_ERROR,
184 "dma_trace_init_complete(): dma_copy_new() failed: %d", ret);
185 goto out;
186 }
187
188 ret = dma_get_attribute(d->dc.dmac, DMA_ATTR_COPY_ALIGNMENT,
189 &d->dma_copy_align);
190
191 if (ret < 0) {
192 mtrace_printf(LOG_LEVEL_ERROR,
193 "dma_trace_init_complete(): dma_get_attribute() failed: %d", ret);
194
195 goto out;
196 }
197
198 schedule_task_init_ll(&d->dmat_work, SOF_UUID(dma_trace_task_uuid),
199 SOF_SCHEDULE_LL_TIMER,
200 SOF_TASK_PRI_MED, trace_work, d, 0, 0);
201
202 out:
203 return ret;
204 }
205
206 #if (CONFIG_HOST_PTABLE)
dma_trace_host_buffer(struct dma_trace_data * d,struct dma_sg_elem_array * elem_array,uint32_t host_size)207 int dma_trace_host_buffer(struct dma_trace_data *d,
208 struct dma_sg_elem_array *elem_array,
209 uint32_t host_size)
210 {
211 d->host_size = host_size;
212 d->config.elem_array = *elem_array;
213
214 return 0;
215 }
216 #endif
217
dma_trace_buffer_init(struct dma_trace_data * d)218 static int dma_trace_buffer_init(struct dma_trace_data *d)
219 {
220 struct dma_trace_buf *buffer = &d->dmatb;
221 void *buf;
222 unsigned int flags;
223
224 /* allocate new buffer */
225 buf = rballoc(0, SOF_MEM_CAPS_RAM | SOF_MEM_CAPS_DMA,
226 DMA_TRACE_LOCAL_SIZE);
227 if (!buf) {
228 tr_err(&dt_tr, "dma_trace_buffer_init(): alloc failed");
229 return -ENOMEM;
230 }
231
232 bzero(buf, DMA_TRACE_LOCAL_SIZE);
233 dcache_writeback_region(buf, DMA_TRACE_LOCAL_SIZE);
234
235 /* initialise the DMA buffer, whole sequence in section */
236 spin_lock_irq(&d->lock, flags);
237
238 buffer->addr = buf;
239 buffer->size = DMA_TRACE_LOCAL_SIZE;
240 buffer->w_ptr = buffer->addr;
241 buffer->r_ptr = buffer->addr;
242 buffer->end_addr = (char *)buffer->addr + buffer->size;
243 buffer->avail = 0;
244
245 spin_unlock_irq(&d->lock, flags);
246
247 return 0;
248 }
249
dma_trace_buffer_free(struct dma_trace_data * d)250 static void dma_trace_buffer_free(struct dma_trace_data *d)
251 {
252 struct dma_trace_buf *buffer = &d->dmatb;
253 unsigned int flags;
254
255 spin_lock_irq(&d->lock, flags);
256
257 rfree(buffer->addr);
258 memset(buffer, 0, sizeof(*buffer));
259
260 spin_unlock_irq(&d->lock, flags);
261 }
262
263 #if CONFIG_DMA_GW
264
dma_trace_start(struct dma_trace_data * d)265 static int dma_trace_start(struct dma_trace_data *d)
266 {
267 struct dma_sg_config config;
268 uint32_t elem_size, elem_addr, elem_num;
269 int err = 0;
270
271 /* DMA Controller initialization is platform-specific */
272 if (!d || !d->dc.dmac) {
273 mtrace_printf(LOG_LEVEL_ERROR,
274 "dma_trace_start failed: no DMAC!");
275 return -ENODEV;
276 }
277
278 err = dma_copy_set_stream_tag(&d->dc, d->stream_tag);
279 if (err < 0)
280 return err;
281
282 /* size of every trace record */
283 elem_size = sizeof(uint64_t) * 2;
284
285 /* Initialize address of local elem */
286 elem_addr = (uint32_t)d->dmatb.addr;
287
288 /* the number of elem list */
289 elem_num = DMA_TRACE_LOCAL_SIZE / elem_size;
290
291 config.direction = DMA_DIR_LMEM_TO_HMEM;
292 config.src_width = sizeof(uint32_t);
293 config.dest_width = sizeof(uint32_t);
294 config.cyclic = 0;
295
296 err = dma_sg_alloc(&config.elem_array, SOF_MEM_ZONE_SYS,
297 config.direction,
298 elem_num, elem_size, elem_addr, 0);
299 if (err < 0)
300 return err;
301
302 err = dma_set_config(d->dc.chan, &config);
303 if (err < 0) {
304 mtrace_printf(LOG_LEVEL_ERROR, "dma_set_config() failed: %d", err);
305 return err;
306 }
307
308 err = dma_start(d->dc.chan);
309
310 return err;
311 }
312
dma_trace_get_avail_data(struct dma_trace_data * d,struct dma_trace_buf * buffer,int avail)313 static int dma_trace_get_avail_data(struct dma_trace_data *d,
314 struct dma_trace_buf *buffer,
315 int avail)
316 {
317 /* there isn't DMA completion callback in GW DMA copying.
318 * so we send previous position always before the next copying
319 * for guaranteeing previous DMA copying is finished.
320 * This function will be called once every 500ms at least even
321 * if no new trace is filled.
322 */
323 if (d->old_host_offset != d->posn.host_offset) {
324 ipc_msg_send(d->msg, &d->posn, false);
325 d->old_host_offset = d->posn.host_offset;
326 }
327
328 /* align data to HD-DMA burst size */
329 return ALIGN_DOWN(avail, d->dma_copy_align);
330 }
331 #else
dma_trace_get_avail_data(struct dma_trace_data * d,struct dma_trace_buf * buffer,int avail)332 static int dma_trace_get_avail_data(struct dma_trace_data *d,
333 struct dma_trace_buf *buffer,
334 int avail)
335 {
336 uint32_t hsize;
337 uint32_t lsize;
338 int32_t size;
339
340 /* copy to host in sections if we wrap */
341 lsize = avail;
342 hsize = avail;
343
344 if (avail == 0)
345 return 0;
346
347 /* host buffer wrap ? */
348 if (d->posn.host_offset + avail > d->host_size)
349 hsize = d->host_size - d->posn.host_offset;
350
351 /* local buffer wrap ? */
352 if ((char *)buffer->r_ptr + avail > (char *)buffer->end_addr)
353 lsize = (char *)buffer->end_addr - (char *)buffer->r_ptr;
354
355 /* get smallest size */
356 if (hsize < lsize)
357 size = hsize;
358 else
359 size = lsize;
360
361 return size;
362 }
363
364 #endif /* CONFIG_DMA_GW */
365
366 /** Invoked remotely by SOF_IPC_TRACE_DMA_PARAMS* Depends on
367 * dma_trace_init_complete()
368 */
dma_trace_enable(struct dma_trace_data * d)369 int dma_trace_enable(struct dma_trace_data *d)
370 {
371 int err;
372
373 /* initialize dma trace buffer */
374 err = dma_trace_buffer_init(d);
375
376 if (err < 0) {
377 mtrace_printf(LOG_LEVEL_ERROR, "dma_trace_enable: buffer_init failed");
378 goto out;
379 }
380
381 /* It should be the very first sent log for easy identification. */
382 mtrace_printf(LOG_LEVEL_INFO,
383 "SHM: FW ABI 0x%x DBG ABI 0x%x tag " SOF_GIT_TAG " src hash 0x%08x (ldc hash "
384 META_QUOTE(SOF_SRC_HASH) ")",
385 SOF_ABI_VERSION, SOF_ABI_DBG_VERSION, SOF_SRC_HASH);
386
387 /* Use a different, DMA: prefix to ease identification of log files */
388 tr_info(&dt_tr,
389 "DMA: FW ABI 0x%x DBG ABI 0x%x tag " SOF_GIT_TAG " src hash 0x%08x (ldc hash "
390 META_QUOTE(SOF_SRC_HASH) ")",
391 SOF_ABI_VERSION, SOF_ABI_DBG_VERSION, SOF_SRC_HASH);
392
393 #if CONFIG_DMA_GW
394 /*
395 * GW DMA need finish DMA config and start before
396 * host driver trigger start DMA
397 */
398 err = dma_trace_start(d);
399 if (err < 0)
400 goto out;
401 #endif
402
403 /* validate DMA context */
404 if (!d->dc.dmac || !d->dc.chan) {
405 tr_err_atomic(&dt_tr, "dma_trace_enable(): not valid");
406 err = -ENODEV;
407 goto out;
408 }
409
410 d->enabled = 1;
411 schedule_task(&d->dmat_work, DMA_TRACE_PERIOD, DMA_TRACE_PERIOD);
412
413 out:
414 if (err < 0)
415 dma_trace_buffer_free(d);
416
417 return err;
418 }
419
420 /** Sends all pending DMA messages to mailbox (for emergencies) */
dma_trace_flush(void * t)421 void dma_trace_flush(void *t)
422 {
423 struct dma_trace_data *trace_data = dma_trace_data_get();
424 struct dma_trace_buf *buffer = NULL;
425 uint32_t avail;
426 int32_t size;
427 int32_t wrap_count;
428 int ret;
429
430 if (!dma_trace_initialized(trace_data))
431 return;
432
433 buffer = &trace_data->dmatb;
434 avail = buffer->avail;
435
436 /* number of bytes to flush */
437 if (avail > DMA_FLUSH_TRACE_SIZE) {
438 size = DMA_FLUSH_TRACE_SIZE;
439 } else {
440 /* check for buffer wrap */
441 if (buffer->w_ptr > buffer->r_ptr)
442 size = (char *)buffer->w_ptr - (char *)buffer->r_ptr;
443 else
444 size = (char *)buffer->end_addr -
445 (char *)buffer->r_ptr +
446 (char *)buffer->w_ptr -
447 (char *)buffer->addr;
448 }
449
450 size = MIN(size, MAILBOX_TRACE_SIZE);
451
452 /* invalidate trace data */
453 dcache_invalidate_region((void *)t, size);
454
455 /* check for buffer wrap */
456 if ((char *)buffer->w_ptr - size < (char *)buffer->addr) {
457 wrap_count = (char *)buffer->w_ptr - (char *)buffer->addr;
458 ret = memcpy_s(t, size - wrap_count,
459 (char *)buffer->end_addr -
460 (size - wrap_count), size - wrap_count);
461 assert(!ret);
462 ret = memcpy_s((char *)t + (size - wrap_count), wrap_count,
463 buffer->addr, wrap_count);
464 assert(!ret);
465 } else {
466 ret = memcpy_s(t, size, (char *)buffer->w_ptr - size, size);
467 assert(!ret);
468 }
469
470 /* writeback trace data */
471 dcache_writeback_region((void *)t, size);
472
473 }
474
dma_trace_on(void)475 void dma_trace_on(void)
476 {
477 struct dma_trace_data *trace_data = dma_trace_data_get();
478
479 if (!trace_data || trace_data->enabled)
480 return;
481
482 trace_data->enabled = 1;
483 schedule_task(&trace_data->dmat_work, DMA_TRACE_PERIOD,
484 DMA_TRACE_PERIOD);
485
486 }
487
dma_trace_off(void)488 void dma_trace_off(void)
489 {
490 struct dma_trace_data *trace_data = dma_trace_data_get();
491
492 if (!trace_data || !trace_data->enabled)
493 return;
494
495 schedule_task_cancel(&trace_data->dmat_work);
496 trace_data->enabled = 0;
497
498 }
499
dtrace_calc_buf_overflow(struct dma_trace_buf * buffer,uint32_t length)500 static int dtrace_calc_buf_overflow(struct dma_trace_buf *buffer,
501 uint32_t length)
502 {
503 uint32_t margin;
504 uint32_t overflow_margin;
505 uint32_t overflow = 0;
506
507 margin = dtrace_calc_buf_margin(buffer);
508
509 /* overflow calculating */
510 if (buffer->w_ptr < buffer->r_ptr)
511 overflow_margin = (char *)buffer->r_ptr -
512 (char *)buffer->w_ptr - 1;
513 else
514 overflow_margin = margin + (char *)buffer->r_ptr -
515 (char *)buffer->addr - 1;
516
517 if (overflow_margin < length)
518 overflow = length - overflow_margin;
519
520 return overflow;
521 }
522
523 /** Ring buffer implementation, drops on overflow. */
dtrace_add_event(const char * e,uint32_t length)524 static void dtrace_add_event(const char *e, uint32_t length)
525 {
526 struct dma_trace_data *trace_data = dma_trace_data_get();
527 struct dma_trace_buf *buffer = &trace_data->dmatb;
528 uint32_t margin;
529 uint32_t overflow;
530 int ret;
531
532 margin = dtrace_calc_buf_margin(buffer);
533 overflow = dtrace_calc_buf_overflow(buffer, length);
534
535 /* tracing dropped entries */
536 if (trace_data->dropped_entries) {
537 if (!overflow) {
538 /*
539 * if any dropped entries have appeared and there
540 * is not any overflow, their amount will be logged
541 */
542 uint32_t tmp_dropped_entries =
543 trace_data->dropped_entries;
544 trace_data->dropped_entries = 0;
545 /*
546 * this trace_error invocation causes recursion,
547 * so after it we have to recalculate margin and
548 * overflow
549 */
550 tr_err(&dt_tr, "dtrace_add_event(): number of dropped logs = %u",
551 tmp_dropped_entries);
552 margin = dtrace_calc_buf_margin(buffer);
553 overflow = dtrace_calc_buf_overflow(buffer, length);
554 }
555 }
556
557 /* checking overflow */
558 if (!overflow) {
559 /* check for buffer wrap */
560 if (margin > length) {
561 /* no wrap */
562 dcache_invalidate_region(buffer->w_ptr, length);
563 ret = memcpy_s(buffer->w_ptr, length, e, length);
564 assert(!ret);
565 dcache_writeback_region(buffer->w_ptr, length);
566 buffer->w_ptr = (char *)buffer->w_ptr + length;
567 } else {
568 /* data is bigger than remaining margin so we wrap */
569 dcache_invalidate_region(buffer->w_ptr, margin);
570 ret = memcpy_s(buffer->w_ptr, margin, e, margin);
571 assert(!ret);
572 dcache_writeback_region(buffer->w_ptr, margin);
573 buffer->w_ptr = buffer->addr;
574
575 dcache_invalidate_region(buffer->w_ptr,
576 length - margin);
577 ret = memcpy_s(buffer->w_ptr, length - margin,
578 e + margin, length - margin);
579 assert(!ret);
580 dcache_writeback_region(buffer->w_ptr,
581 length - margin);
582 buffer->w_ptr = (char *)buffer->w_ptr + length - margin;
583 }
584
585 buffer->avail += length;
586 trace_data->posn.messages++;
587 } else {
588 /* if there is not enough memory for new log, we drop it */
589 trace_data->dropped_entries++;
590 }
591
592 }
593
594 /** Main dma-trace entry point */
dtrace_event(const char * e,uint32_t length)595 void dtrace_event(const char *e, uint32_t length)
596 {
597 struct dma_trace_data *trace_data = dma_trace_data_get();
598 struct dma_trace_buf *buffer = NULL;
599 unsigned long flags;
600
601 if (!dma_trace_initialized(trace_data) ||
602 length > DMA_TRACE_LOCAL_SIZE / 8 || length == 0) {
603 return;
604 }
605
606 buffer = &trace_data->dmatb;
607
608 spin_lock_irq(&trace_data->lock, flags);
609 dtrace_add_event(e, length);
610
611 /* if DMA trace copying is working or secondary core
612 * don't check if local buffer is half full
613 */
614 if (trace_data->copy_in_progress ||
615 cpu_get_id() != PLATFORM_PRIMARY_CORE_ID) {
616 spin_unlock_irq(&trace_data->lock, flags);
617 return;
618 }
619
620 spin_unlock_irq(&trace_data->lock, flags);
621
622 /* schedule copy now if buffer > 50% full */
623 if (trace_data->enabled &&
624 buffer->avail >= (DMA_TRACE_LOCAL_SIZE / 2)) {
625 reschedule_task(&trace_data->dmat_work,
626 DMA_TRACE_RESCHEDULE_TIME);
627 /* reschedule should not be interrupted
628 * just like we are in copy progress
629 */
630 trace_data->copy_in_progress = 1;
631 }
632
633 }
634
dtrace_event_atomic(const char * e,uint32_t length)635 void dtrace_event_atomic(const char *e, uint32_t length)
636 {
637 struct dma_trace_data *trace_data = dma_trace_data_get();
638
639 if (!dma_trace_initialized(trace_data) ||
640 length > DMA_TRACE_LOCAL_SIZE / 8 || length == 0) {
641 return;
642 }
643
644 dtrace_add_event(e, length);
645 }
646