1 // SPDX-License-Identifier: BSD-3-Clause
2 //
3 // Copyright(c) 2016 Intel Corporation. All rights reserved.
4 //
5 // Author: Yan Wang <yan.wang@linux.intel.com>
6
7 #include <sof/audio/buffer.h>
8 #include <sof/common.h>
9 #include <rtos/panic.h>
10 #include <sof/ipc/msg.h>
11 #include <rtos/alloc.h>
12 #include <rtos/cache.h>
13 #include <sof/lib/cpu.h>
14 #include <sof/lib/dma.h>
15 #include <sof/lib/memory.h>
16 #include <sof/lib/uuid.h>
17 #include <sof/platform.h>
18 #include <sof/schedule/ll_schedule.h>
19 #include <sof/schedule/schedule.h>
20 #include <rtos/task.h>
21 #include <rtos/sof.h>
22 #include <rtos/spinlock.h>
23 #include <rtos/string.h>
24 #include <sof/trace/dma-trace.h>
25 #include <ipc/topology.h>
26 #include <ipc/trace.h>
27 #include <kernel/abi.h>
28 #include <user/abi_dbg.h>
29 #include <sof_versions.h>
30
31 #ifdef __ZEPHYR__
32 #include <version.h>
33 #endif
34
35 #include <errno.h>
36 #include <stddef.h>
37 #include <stdint.h>
38
39 LOG_MODULE_REGISTER(dma_trace, CONFIG_SOF_LOG_LEVEL);
40
41 /* 58782c63-1326-4185-8459-22272e12d1f1 */
42 DECLARE_SOF_UUID("dma-trace", dma_trace_uuid, 0x58782c63, 0x1326, 0x4185,
43 0x84, 0x59, 0x22, 0x27, 0x2e, 0x12, 0xd1, 0xf1);
44
45 DECLARE_TR_CTX(dt_tr, SOF_UUID(dma_trace_uuid), LOG_LEVEL_INFO);
46
47 /* 2b972272-c5b1-4b7e-926f-0fc5cb4c4690 */
48 DECLARE_SOF_UUID("dma-trace-task", dma_trace_task_uuid, 0x2b972272, 0xc5b1,
49 0x4b7e, 0x92, 0x6f, 0x0f, 0xc5, 0xcb, 0x4c, 0x46, 0x90);
50
51 static int dma_trace_get_avail_data(struct dma_trace_data *d,
52 struct dma_trace_buf *buffer,
53 int avail);
54
55 /** Periodically runs and starts the DMA even when the buffer is not
56 * full.
57 */
trace_work(void * data)58 static enum task_state trace_work(void *data)
59 {
60 struct dma_trace_data *d = data;
61 struct dma_trace_buf *buffer = &d->dmatb;
62 struct dma_sg_config *config = &d->config;
63 k_spinlock_key_t key;
64 uint32_t avail = buffer->avail;
65 int32_t size;
66 uint32_t overflow;
67
68 /* The host DMA channel is not available */
69 if (!d->dc.chan)
70 return SOF_TASK_STATE_RESCHEDULE;
71
72 if (!ipc_trigger_trace_xfer(avail))
73 return SOF_TASK_STATE_RESCHEDULE;
74
75 /* make sure we don't write more than buffer */
76 if (avail > DMA_TRACE_LOCAL_SIZE) {
77 overflow = avail - DMA_TRACE_LOCAL_SIZE;
78 avail = DMA_TRACE_LOCAL_SIZE;
79 } else {
80 overflow = 0;
81 }
82
83 /* dma gateway supports wrap mode copy, but GPDMA doesn't
84 * support, so do it differently based on HW features
85 */
86 size = dma_trace_get_avail_data(d, buffer, avail);
87
88 /* any data to copy ? */
89 if (size == 0) {
90 return SOF_TASK_STATE_RESCHEDULE;
91 }
92
93 d->posn.overflow = overflow;
94
95 /* DMA trace copying is working */
96 d->copy_in_progress = 1;
97
98 /* copy this section to host */
99 size = dma_copy_to_host(&d->dc, config, d->posn.host_offset,
100 buffer->r_ptr, size);
101 if (size < 0) {
102 tr_err(&dt_tr, "trace_work(): dma_copy_to_host() failed");
103 goto out;
104 }
105
106 /* update host pointer and check for wrap */
107 d->posn.host_offset += size;
108 if (d->posn.host_offset >= d->host_size)
109 d->posn.host_offset -= d->host_size;
110
111 /* update local pointer and check for wrap */
112 buffer->r_ptr = (char *)buffer->r_ptr + size;
113 if (buffer->r_ptr >= buffer->end_addr)
114 buffer->r_ptr = (char *)buffer->r_ptr - DMA_TRACE_LOCAL_SIZE;
115
116 ipc_msg_send(d->msg, &d->posn, false);
117
118 out:
119 key = k_spin_lock(&d->lock);
120
121 /* disregard any old messages and don't resend them if we overflow */
122 if (size > 0) {
123 if (d->posn.overflow)
124 buffer->avail = DMA_TRACE_LOCAL_SIZE - size;
125 else
126 buffer->avail -= size;
127 }
128
129 /* DMA trace copying is done, allow reschedule */
130 d->copy_in_progress = 0;
131
132 k_spin_unlock(&d->lock, key);
133
134 /* reschedule the trace copying work */
135 return SOF_TASK_STATE_RESCHEDULE;
136 }
137
138 /** Do this early so we can log at initialization time even before the
139 * DMA runs. The rest happens later in dma_trace_init_complete() and
140 * dma_trace_enable()
141 */
dma_trace_init_early(struct sof * sof)142 int dma_trace_init_early(struct sof *sof)
143 {
144 int ret;
145
146 /* If this assert is wrong then traces have been corrupting
147 * random parts of memory. Some functions run before _and_ after
148 * DMA trace initialization and we don't want to ask them to
149 * never trace. So dma_trace_initialized() must be either
150 * clearly false/NULL or clearly true, we can't tolerate random
151 * uninitialized values in sof->dmat etc.
152 */
153 assert(!dma_trace_initialized(sof->dmat));
154
155 sof->dmat = rzalloc(SOF_MEM_ZONE_SYS_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(*sof->dmat));
156
157 dma_sg_init(&sof->dmat->config.elem_array);
158 k_spinlock_init(&sof->dmat->lock);
159
160 ipc_build_trace_posn(&sof->dmat->posn);
161 sof->dmat->msg = ipc_msg_init(sof->dmat->posn.rhdr.hdr.cmd,
162 sof->dmat->posn.rhdr.hdr.size);
163 if (!sof->dmat->msg) {
164 ret = -ENOMEM;
165 goto err;
166 }
167
168 return 0;
169
170 err:
171 mtrace_printf(LOG_LEVEL_ERROR,
172 "dma_trace_init_early() failed: %d", ret);
173
174 /* Cannot rfree(sof->dmat) from the system memory pool, see
175 * comments in lib/alloc.c
176 */
177 sof->dmat = NULL;
178
179 return ret;
180 }
181
182 /** Run after dma_trace_init_early() and before dma_trace_enable() */
dma_trace_init_complete(struct dma_trace_data * d)183 int dma_trace_init_complete(struct dma_trace_data *d)
184 {
185 int ret = 0;
186
187 tr_info(&dt_tr, "dma_trace_init_complete()");
188
189 if (!d) {
190 mtrace_printf(LOG_LEVEL_ERROR,
191 "dma_trace_init_complete(): failed, no dma_trace_data");
192 return -ENOMEM;
193 }
194
195 /* init DMA copy context */
196 ret = dma_copy_new(&d->dc);
197 if (ret < 0) {
198 mtrace_printf(LOG_LEVEL_ERROR,
199 "dma_trace_init_complete(): dma_copy_new() failed: %d", ret);
200 goto out;
201 }
202 #if CONFIG_ZEPHYR_NATIVE_DRIVERS
203 ret = dma_get_attribute(d->dc.dmac->z_dev, DMA_ATTR_COPY_ALIGNMENT,
204 &d->dma_copy_align);
205 #else
206 ret = dma_get_attribute_legacy(d->dc.dmac, DMA_ATTR_COPY_ALIGNMENT,
207 &d->dma_copy_align);
208 #endif
209 if (ret < 0) {
210 mtrace_printf(LOG_LEVEL_ERROR,
211 "dma_trace_init_complete(): dma_get_attribute() failed: %d", ret);
212
213 goto out;
214 }
215
216 schedule_task_init_ll(&d->dmat_work, SOF_UUID(dma_trace_task_uuid),
217 SOF_SCHEDULE_LL_TIMER,
218 SOF_TASK_PRI_MED, trace_work, d, 0, 0);
219
220 out:
221
222 return ret;
223 }
224
225 #if (CONFIG_HOST_PTABLE)
dma_trace_host_buffer(struct dma_trace_data * d,struct dma_sg_elem_array * elem_array,uint32_t host_size)226 int dma_trace_host_buffer(struct dma_trace_data *d,
227 struct dma_sg_elem_array *elem_array,
228 uint32_t host_size)
229 {
230 d->host_size = host_size;
231 d->config.elem_array = *elem_array;
232
233 return 0;
234 }
235 #endif
236
dma_trace_buffer_free(struct dma_trace_data * d)237 static void dma_trace_buffer_free(struct dma_trace_data *d)
238 {
239 struct dma_trace_buf *buffer = &d->dmatb;
240 k_spinlock_key_t key;
241
242 key = k_spin_lock(&d->lock);
243
244 rfree(buffer->addr);
245 memset(buffer, 0, sizeof(*buffer));
246
247 k_spin_unlock(&d->lock, key);
248 }
249
dma_trace_buffer_init(struct dma_trace_data * d)250 static int dma_trace_buffer_init(struct dma_trace_data *d)
251 {
252 #if CONFIG_DMA_GW
253 struct dma_sg_config *config = &d->gw_config;
254 uint32_t elem_size, elem_addr, elem_num;
255 int ret;
256 #endif
257 struct dma_trace_buf *buffer = &d->dmatb;
258 void *buf;
259 k_spinlock_key_t key;
260 uint32_t addr_align;
261 int err;
262
263 /*
264 * Keep the existing dtrace buffer to avoid memory leak, unlikely to
265 * happen if host correctly using the dma_trace_disable().
266 *
267 * The buffer can not be freed up here as it is likely in use.
268 * The (re-)initialization will happen in dma_trace_start() when it is
269 * safe to do (the DMA is stopped)
270 */
271 if (dma_trace_initialized(d))
272 return 0;
273
274 if (!d || !d->dc.dmac) {
275 mtrace_printf(LOG_LEVEL_ERROR,
276 "dma_trace_buffer_init() failed, no DMAC! d=%p", d);
277 return -ENODEV;
278 }
279 #if CONFIG_ZEPHYR_NATIVE_DRIVERS
280 err = dma_get_attribute(d->dc.dmac->z_dev, DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT,
281 &addr_align);
282 #else
283 err = dma_get_attribute_legacy(d->dc.dmac, DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT,
284 &addr_align);
285 #endif
286 if (err < 0)
287 return err;
288
289 /* For DMA to work properly the buffer must be correctly aligned */
290 buf = rballoc_align(0, SOF_MEM_CAPS_RAM | SOF_MEM_CAPS_DMA,
291 DMA_TRACE_LOCAL_SIZE, addr_align);
292 if (!buf) {
293 mtrace_printf(LOG_LEVEL_ERROR, "dma_trace_buffer_init(): alloc failed");
294 return -ENOMEM;
295 }
296
297 bzero(buf, DMA_TRACE_LOCAL_SIZE);
298 dcache_writeback_region((__sparse_force void __sparse_cache *)buf, DMA_TRACE_LOCAL_SIZE);
299
300 /* initialise the DMA buffer, whole sequence in section */
301 key = k_spin_lock(&d->lock);
302
303 buffer->addr = buf;
304 buffer->size = DMA_TRACE_LOCAL_SIZE;
305 buffer->w_ptr = buffer->addr;
306 buffer->r_ptr = buffer->addr;
307 buffer->end_addr = (char *)buffer->addr + buffer->size;
308 buffer->avail = 0;
309
310 k_spin_unlock(&d->lock, key);
311
312 #if CONFIG_DMA_GW
313 /* size of every trace record */
314 elem_size = sizeof(uint64_t) * 2;
315
316 /* Initialize address of local elem */
317 elem_addr = (uint32_t)buffer->addr;
318
319 /* the number of elem list */
320 elem_num = DMA_TRACE_LOCAL_SIZE / elem_size;
321
322 config->direction = DMA_DIR_LMEM_TO_HMEM;
323 config->src_width = sizeof(uint32_t);
324 config->dest_width = sizeof(uint32_t);
325 config->cyclic = 0;
326
327 ret = dma_sg_alloc(&config->elem_array, SOF_MEM_ZONE_SYS,
328 config->direction, elem_num, elem_size,
329 elem_addr, 0);
330 if (ret < 0) {
331 dma_trace_buffer_free(d);
332 return ret;
333 }
334 #endif
335
336 #ifdef __ZEPHYR__
337 #define ZEPHYR_VER_OPT " zephyr:" META_QUOTE(BUILD_VERSION)
338 #else
339 #define ZEPHYR_VER_OPT
340 #endif
341
342 /* META_QUOTE(SOF_SRC_HASH) is part of the format string so it
343 * goes to the .ldc file and does not go to the firmware
344 * binary. It will be different from SOF_SRC_HASH in case of
345 * mismatch.
346 */
347 #define SOF_BANNER_COMMON \
348 "FW ABI 0x%x DBG ABI 0x%x tags SOF:" SOF_GIT_TAG ZEPHYR_VER_OPT \
349 " src hash 0x%08x (ldc hash " META_QUOTE(SOF_SRC_HASH) ")"
350
351 /* It should be the very first sent log for easy identification. */
352 mtrace_printf(LOG_LEVEL_INFO,
353 "SHM: " SOF_BANNER_COMMON,
354 SOF_ABI_VERSION, SOF_ABI_DBG_VERSION, SOF_SRC_HASH);
355
356 /* Use a different, DMA: prefix to ease identification of log files */
357 tr_info(&dt_tr,
358 "DMA: " SOF_BANNER_COMMON,
359 SOF_ABI_VERSION, SOF_ABI_DBG_VERSION, SOF_SRC_HASH);
360
361 return 0;
362 }
363
364 #if CONFIG_DMA_GW
365
dma_trace_start(struct dma_trace_data * d)366 static int dma_trace_start(struct dma_trace_data *d)
367 {
368 int err = 0;
369
370 /* DMA Controller initialization is platform-specific */
371 if (!d || !d->dc.dmac) {
372 mtrace_printf(LOG_LEVEL_ERROR,
373 "dma_trace_start failed: no DMAC!");
374 return -ENODEV;
375 }
376
377 if (d->dc.chan) {
378 /* We already have DMA channel for dtrace, stop it */
379 mtrace_printf(LOG_LEVEL_WARNING,
380 "dma_trace_start(): DMA reconfiguration (active stream_tag: %u)",
381 d->active_stream_tag);
382
383 schedule_task_cancel(&d->dmat_work);
384 err = dma_stop_legacy(d->dc.chan);
385 if (err < 0) {
386 mtrace_printf(LOG_LEVEL_ERROR,
387 "dma_trace_start(): DMA channel failed to stop");
388 } else if (d->active_stream_tag != d->stream_tag) {
389 /* Re-request a channel if different tag is provided */
390 mtrace_printf(LOG_LEVEL_WARNING,
391 "dma_trace_start(): stream_tag change from %u to %u",
392 d->active_stream_tag, d->stream_tag);
393
394 dma_channel_put_legacy(d->dc.chan);
395 d->dc.chan = NULL;
396 err = dma_copy_set_stream_tag(&d->dc, d->stream_tag);
397 }
398 } else {
399 err = dma_copy_set_stream_tag(&d->dc, d->stream_tag);
400 }
401
402 if (err < 0)
403 return err;
404
405 /* Reset host buffer information as host is re-configuring dtrace */
406 d->posn.host_offset = 0;
407
408 d->active_stream_tag = d->stream_tag;
409
410 err = dma_set_config_legacy(d->dc.chan, &d->gw_config);
411 if (err < 0) {
412 mtrace_printf(LOG_LEVEL_ERROR, "dma_set_config() failed: %d", err);
413 goto error;
414 }
415
416 err = dma_start_legacy(d->dc.chan);
417 if (err == 0)
418 return 0;
419
420 error:
421 dma_channel_put_legacy(d->dc.chan);
422 d->dc.chan = NULL;
423
424 return err;
425 }
426
dma_trace_get_avail_data(struct dma_trace_data * d,struct dma_trace_buf * buffer,int avail)427 static int dma_trace_get_avail_data(struct dma_trace_data *d,
428 struct dma_trace_buf *buffer,
429 int avail)
430 {
431 /* align data to HD-DMA burst size */
432 return ALIGN_DOWN(avail, d->dma_copy_align);
433 }
434 #else
dma_trace_get_avail_data(struct dma_trace_data * d,struct dma_trace_buf * buffer,int avail)435 static int dma_trace_get_avail_data(struct dma_trace_data *d,
436 struct dma_trace_buf *buffer,
437 int avail)
438 {
439 uint32_t hsize;
440 uint32_t lsize;
441 int32_t size;
442
443 /* copy to host in sections if we wrap */
444 lsize = avail;
445 hsize = avail;
446
447 if (avail == 0)
448 return 0;
449
450 /* host buffer wrap ? */
451 if (d->posn.host_offset + avail > d->host_size)
452 hsize = d->host_size - d->posn.host_offset;
453
454 /* local buffer wrap ? */
455 if ((char *)buffer->r_ptr + avail > (char *)buffer->end_addr)
456 lsize = (char *)buffer->end_addr - (char *)buffer->r_ptr;
457
458 /* get smallest size */
459 if (hsize < lsize)
460 size = hsize;
461 else
462 size = lsize;
463
464 return size;
465 }
466
467 #endif /* CONFIG_DMA_GW */
468
469 /** Invoked remotely by SOF_IPC_TRACE_DMA_PARAMS* Depends on
470 * dma_trace_init_complete()
471 */
dma_trace_enable(struct dma_trace_data * d)472 int dma_trace_enable(struct dma_trace_data *d)
473 {
474 int err;
475
476 /* Allocate and initialize the dma trace buffer if needed */
477 err = dma_trace_buffer_init(d);
478 if (err < 0)
479 return err;
480
481 #if CONFIG_DMA_GW
482 /*
483 * GW DMA need finish DMA config and start before
484 * host driver trigger start DMA
485 */
486 err = dma_trace_start(d);
487 if (err < 0)
488 goto out;
489 #endif
490
491 /* validate DMA context */
492 if (!d->dc.dmac || !d->dc.chan) {
493 tr_err_atomic(&dt_tr, "dma_trace_enable(): not valid");
494 err = -ENODEV;
495 goto out;
496 }
497
498 d->enabled = 1;
499 schedule_task(&d->dmat_work, DMA_TRACE_PERIOD, DMA_TRACE_PERIOD);
500
501 out:
502 if (err < 0)
503 dma_trace_buffer_free(d);
504
505 return err;
506 }
507
dma_trace_disable(struct dma_trace_data * d)508 void dma_trace_disable(struct dma_trace_data *d)
509 {
510 /* cancel trace work */
511 schedule_task_cancel(&d->dmat_work);
512
513 if (d->dc.chan) {
514 dma_stop_legacy(d->dc.chan);
515 dma_channel_put_legacy(d->dc.chan);
516 d->dc.chan = NULL;
517 }
518
519 #if (CONFIG_HOST_PTABLE)
520 /* Free up the host SG if it is set */
521 if (d->host_size) {
522 dma_sg_free(&d->config.elem_array);
523 d->host_size = 0;
524 }
525 #endif
526 }
527
528 /** Sends all pending DMA messages to mailbox (for emergencies) */
dma_trace_flush(void * t)529 void dma_trace_flush(void *t)
530 {
531 struct dma_trace_data *trace_data = dma_trace_data_get();
532 struct dma_trace_buf *buffer = NULL;
533 uint32_t avail;
534 int32_t size;
535 int32_t wrap_count;
536 int ret;
537
538 if (!dma_trace_initialized(trace_data))
539 return;
540
541 buffer = &trace_data->dmatb;
542 avail = buffer->avail;
543
544 /* number of bytes to flush */
545 if (avail > DMA_FLUSH_TRACE_SIZE) {
546 size = DMA_FLUSH_TRACE_SIZE;
547 } else {
548 /* check for buffer wrap */
549 if (buffer->w_ptr > buffer->r_ptr)
550 size = (char *)buffer->w_ptr - (char *)buffer->r_ptr;
551 else
552 size = (char *)buffer->end_addr -
553 (char *)buffer->r_ptr +
554 (char *)buffer->w_ptr -
555 (char *)buffer->addr;
556 }
557
558 size = MIN(size, MAILBOX_TRACE_SIZE);
559
560 /* invalidate trace data */
561 dcache_invalidate_region((__sparse_force void __sparse_cache *)t, size);
562
563 /* check for buffer wrap */
564 if ((char *)buffer->w_ptr - size < (char *)buffer->addr) {
565 wrap_count = (char *)buffer->w_ptr - (char *)buffer->addr;
566 ret = memcpy_s(t, size - wrap_count,
567 (char *)buffer->end_addr -
568 (size - wrap_count), size - wrap_count);
569 assert(!ret);
570 ret = memcpy_s((char *)t + (size - wrap_count), wrap_count,
571 buffer->addr, wrap_count);
572 assert(!ret);
573 } else {
574 ret = memcpy_s(t, size, (char *)buffer->w_ptr - size, size);
575 assert(!ret);
576 }
577
578 /* writeback trace data */
579 dcache_writeback_region((__sparse_force void __sparse_cache *)t, size);
580
581 }
582
dma_trace_on(void)583 void dma_trace_on(void)
584 {
585 if (!dma_trace_initialized(sof_get()->dmat))
586 return;
587
588 struct dma_trace_data *trace_data = dma_trace_data_get();
589
590 if (trace_data->enabled) {
591 return;
592 }
593
594 trace_data->enabled = 1;
595 schedule_task(&trace_data->dmat_work, DMA_TRACE_PERIOD,
596 DMA_TRACE_PERIOD);
597
598 }
599
dma_trace_off(void)600 void dma_trace_off(void)
601 {
602 struct dma_trace_data *trace_data = dma_trace_data_get();
603
604 if (!trace_data->enabled) {
605 return;
606 }
607
608 schedule_task_cancel(&trace_data->dmat_work);
609 trace_data->enabled = 0;
610
611 }
612
dtrace_calc_buf_overflow(struct dma_trace_buf * buffer,uint32_t length)613 static int dtrace_calc_buf_overflow(struct dma_trace_buf *buffer,
614 uint32_t length)
615 {
616 uint32_t margin;
617 uint32_t overflow_margin;
618 uint32_t overflow = 0;
619
620 margin = dtrace_calc_buf_margin(buffer);
621
622 /* overflow calculating */
623 if (buffer->w_ptr < buffer->r_ptr)
624 overflow_margin = (char *)buffer->r_ptr -
625 (char *)buffer->w_ptr - 1;
626 else
627 overflow_margin = margin + (char *)buffer->r_ptr -
628 (char *)buffer->addr - 1;
629
630 if (overflow_margin < length)
631 overflow = length - overflow_margin;
632
633 return overflow;
634 }
635
636 /** Ring buffer implementation, drops on overflow. */
dtrace_add_event(const char * e,uint32_t length)637 static void dtrace_add_event(const char *e, uint32_t length)
638 {
639 struct dma_trace_data *trace_data = dma_trace_data_get();
640 struct dma_trace_buf *buffer = &trace_data->dmatb;
641 uint32_t margin;
642 uint32_t overflow;
643 int ret;
644
645 margin = dtrace_calc_buf_margin(buffer);
646 overflow = dtrace_calc_buf_overflow(buffer, length);
647
648 /* tracing dropped entries */
649 if (trace_data->dropped_entries) {
650 if (!overflow) {
651 /*
652 * if any dropped entries have appeared and there
653 * is not any overflow, their amount will be logged
654 */
655 uint32_t tmp_dropped_entries =
656 trace_data->dropped_entries;
657 trace_data->dropped_entries = 0;
658 /*
659 * this trace_error invocation causes recursion,
660 * so after it we have to recalculate margin and
661 * overflow
662 */
663 tr_err(&dt_tr, "dtrace_add_event(): number of dropped logs = %u",
664 tmp_dropped_entries);
665 margin = dtrace_calc_buf_margin(buffer);
666 overflow = dtrace_calc_buf_overflow(buffer, length);
667 }
668 }
669
670 /* checking overflow */
671 if (!overflow) {
672 /* check for buffer wrap */
673 if (margin > length) {
674 /* no wrap */
675 dcache_invalidate_region((__sparse_force void __sparse_cache *)buffer->w_ptr,
676 length);
677 ret = memcpy_s(buffer->w_ptr, length, e, length);
678 assert(!ret);
679 dcache_writeback_region((__sparse_force void __sparse_cache *)buffer->w_ptr,
680 length);
681 buffer->w_ptr = (char *)buffer->w_ptr + length;
682 } else {
683 /* data is bigger than remaining margin so we wrap */
684 dcache_invalidate_region((__sparse_force void __sparse_cache *)buffer->w_ptr,
685 margin);
686 ret = memcpy_s(buffer->w_ptr, margin, e, margin);
687 assert(!ret);
688 dcache_writeback_region((__sparse_force void __sparse_cache *)buffer->w_ptr,
689 margin);
690 buffer->w_ptr = buffer->addr;
691
692 dcache_invalidate_region((__sparse_force void __sparse_cache *)buffer->w_ptr,
693 length - margin);
694 ret = memcpy_s(buffer->w_ptr, length - margin,
695 e + margin, length - margin);
696 assert(!ret);
697 dcache_writeback_region((__sparse_force void __sparse_cache *)buffer->w_ptr,
698 length - margin);
699 buffer->w_ptr = (char *)buffer->w_ptr + length - margin;
700 }
701
702 buffer->avail += length;
703 trace_data->posn.messages++;
704 } else {
705 /* if there is not enough memory for new log, we drop it */
706 trace_data->dropped_entries++;
707 }
708
709 }
710
711 /** Main dma-trace entry point */
dtrace_event(const char * e,uint32_t length)712 void dtrace_event(const char *e, uint32_t length)
713 {
714 struct dma_trace_data *trace_data = dma_trace_data_get();
715 struct dma_trace_buf *buffer = NULL;
716 k_spinlock_key_t key;
717
718 if (!dma_trace_initialized(trace_data) ||
719 length > DMA_TRACE_LOCAL_SIZE / 8 || length == 0) {
720 return;
721 }
722
723 buffer = &trace_data->dmatb;
724
725 key = k_spin_lock(&trace_data->lock);
726 dtrace_add_event(e, length);
727
728 /* if DMA trace copying is working or secondary core
729 * don't check if local buffer is half full
730 */
731 if (trace_data->copy_in_progress ||
732 cpu_get_id() != PLATFORM_PRIMARY_CORE_ID) {
733 k_spin_unlock(&trace_data->lock, key);
734 return;
735 }
736
737 k_spin_unlock(&trace_data->lock, key);
738
739 /* schedule copy now if buffer > 50% full */
740 if (trace_data->enabled &&
741 buffer->avail >= (DMA_TRACE_LOCAL_SIZE / 2)) {
742 reschedule_task(&trace_data->dmat_work,
743 DMA_TRACE_RESCHEDULE_TIME);
744 /* reschedule should not be interrupted
745 * just like we are in copy progress
746 */
747 trace_data->copy_in_progress = 1;
748 }
749
750 }
751
dtrace_event_atomic(const char * e,uint32_t length)752 void dtrace_event_atomic(const char *e, uint32_t length)
753 {
754 struct dma_trace_data *trace_data = dma_trace_data_get();
755
756 if (!dma_trace_initialized(trace_data) ||
757 length > DMA_TRACE_LOCAL_SIZE / 8 || length == 0) {
758 return;
759 }
760
761 dtrace_add_event(e, length);
762 }
763