1 /*
2 * Copyright (c) 2024 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <stdint.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/drivers/uart.h>
10 #include <zephyr/cache.h>
11 #include <zephyr/shell/shell.h>
12 #include <zephyr/logging/log.h>
13 #include <zephyr/logging/log_output.h>
14 #include <zephyr/logging/log_frontend_stmesp.h>
15 #include <zephyr/logging/log_frontend_stmesp_demux.h>
16 #include <zephyr/debug/coresight/cs_trace_defmt.h>
17 #include <zephyr/debug/mipi_stp_decoder.h>
18 #include <zephyr/linker/devicetree_regions.h>
19 #include <zephyr/drivers/debug/debug_nrf_etr.h>
20 #include <zephyr/drivers/serial/uart_async_rx.h>
21 #include <zephyr/sys/printk.h>
22 #include <dmm.h>
23 #include <nrfx_tbm.h>
24 #include <stdio.h>
25 LOG_MODULE_REGISTER(cs_etr_tbm);
26
27 #define UART_NODE DT_CHOSEN(zephyr_console)
28
29 #define ETR_BUFFER_NODE DT_NODELABEL(etr_buffer)
30
31 #define DROP_CHECK_PERIOD \
32 COND_CODE_1(CONFIG_DEBUG_NRF_ETR_DECODE, \
33 (CONFIG_DEBUG_NRF_ETR_DECODE_DROP_PERIOD), (0))
34
35 #define MIN_DATA (2 * CORESIGHT_TRACE_FRAME_SIZE32)
36
37 /* Since ETR debug is a part of logging infrastructure, logging cannot be used
38 * for debugging. Printk is used (assuming CONFIG_LOG_PRINTK=n)
39 */
40 #define DBG(...) IF_ENABLED(CONFIG_DEBUG_NRF_ETR_DEBUG, (printk(__VA_ARGS__)))
41
42 /** @brief Macro for dumping debug data.
43 *
44 * @param _data Data variable.
45 * @param _nlen Number of nibbles in @p _data to print.
46 */
47 #define DBG_DATA(_data, _nlen, _marked) \
48 do { \
49 char *fmt; \
50 switch (_nlen) { \
51 case 2: \
52 fmt = "D%s\t%02x "; \
53 break; \
54 case 4: \
55 fmt = "D%s\t%04x "; \
56 break; \
57 case 8: \
58 fmt = "D%s\t%08x "; \
59 break; \
60 default: \
61 fmt = "D%s\t%016x "; \
62 break; \
63 } \
64 DBG(fmt, _marked ? "M" : "", _data); \
65 for (int i = 0; i < _nlen / 2; i++) { \
66 DBG("%c ", ((char *)&_data)[i]); \
67 } \
68 DBG("\n"); \
69 } while (0)
70
71 static const uint32_t wsize_mask = DT_REG_SIZE(ETR_BUFFER_NODE) / sizeof(int) - 1;
72 static const uint32_t wsize_inc = DT_REG_SIZE(ETR_BUFFER_NODE) / sizeof(int) - 1;
73
74 static bool in_sync;
75 static int oosync_cnt;
76 static volatile bool tbm_full;
77 static volatile uint32_t base_wr_idx;
78 static uint32_t etr_rd_idx;
79 /* Counts number of new messages completed in the current formatter frame decoding. */
80 static uint32_t new_msg_cnt;
81
82 static bool volatile use_async_uart;
83
84 static struct k_sem uart_sem;
85 static const struct device *uart_dev = DEVICE_DT_GET(UART_NODE);
86 static uint32_t frame_buf0[CORESIGHT_TRACE_FRAME_SIZE32] DMM_MEMORY_SECTION(UART_NODE);
87 static uint32_t frame_buf1[CORESIGHT_TRACE_FRAME_SIZE32] DMM_MEMORY_SECTION(UART_NODE);
88 static uint32_t frame_buf_decode[CORESIGHT_TRACE_FRAME_SIZE32];
89 static uint32_t *frame_buf = IS_ENABLED(CONFIG_DEBUG_NRF_ETR_DECODE) ?
90 frame_buf_decode : frame_buf0;
91
92 K_KERNEL_STACK_DEFINE(etr_stack, CONFIG_DEBUG_NRF_ETR_STACK_SIZE);
93 static struct k_thread etr_thread;
94
95 BUILD_ASSERT((DT_REG_SIZE(ETR_BUFFER_NODE) % CONFIG_DCACHE_LINE_SIZE) == 0);
96 BUILD_ASSERT((DT_REG_ADDR(ETR_BUFFER_NODE) % CONFIG_DCACHE_LINE_SIZE) == 0);
97
98 /* Domain details and prefixes. */
99 static const uint16_t stm_m_id[] = {0x21, 0x22, 0x23, 0x2c, 0x2d, 0x2e, 0x24, 0x80};
100 static uint32_t source_id_buf[ARRAY_SIZE(stm_m_id) * 8];
101 static const char *const stm_m_name[] = {"sec", "app", "rad", "sys", "flpr", "ppr", "mod", "hw"};
102 static const char *const hw_evts[] = {
103 "CTI211_0", /* 0 CTI211 triger out 1 */
104 "CTI211_1", /* 1 CTI211 triger out 1 inverted */
105 "CTI211_2", /* 2 CTI211 triger out 2 */
106 "CTI211_3", /* 3 CTI211 triger out 2 inverted*/
107 "Sec up", /* 4 Secure Domain up */
108 "Sec down", /* 5 Secure Domain down */
109 "App up", /* 6 Application Domain up */
110 "App down", /* 7 Application Domain down */
111 "Rad up", /* 8 Radio Domain up */
112 "Rad down", /* 9 Radio Domain down */
113 "Radf up", /* 10 Radio fast up */
114 "Radf down", /* 11 Radio fast down */
115 NULL, /* Reserved */
116 NULL, /* Reserved */
117 NULL, /* Reserved */
118 NULL, /* Reserved */
119 NULL, /* Reserved */
120 NULL, /* Reserved */
121 NULL, /* Reserved */
122 NULL, /* Reserved */
123 NULL, /* Reserved */
124 NULL, /* Reserved */
125 NULL, /* Reserved */
126 NULL, /* Reserved */
127 NULL, /* Reserved */
128 NULL, /* Reserved */
129 "GD LL up", /* 26 Global domain low leakage up */
130 "GD LL down", /* 27 Global domain low leakage down */
131 "GD1 HS up", /* 28 Global domain high speed 1 up */
132 "GD1 HS up", /* 29 Global domain high speed 1 up */
133 "GD0 HS down", /* 30 Global domain high speed 0 down */
134 "GD0 HS down", /* 31 Global domain high speed 0 down */
135 };
136
137 #ifdef CONFIG_DEBUG_NRF_ETR_SHELL
138 #define RX_BUF_SIZE \
139 (CONFIG_DEBUG_NRF_ETR_SHELL_ASYNC_RX_BUFFER_SIZE * \
140 CONFIG_DEBUG_NRF_ETR_SHELL_ASYNC_RX_BUFFER_COUNT)
141
142 static void etr_timer_handler(struct k_timer *timer);
143 K_TIMER_DEFINE(etr_timer, etr_timer_handler, NULL);
144 static uint8_t rx_buf[RX_BUF_SIZE] DMM_MEMORY_SECTION(UART_NODE);
145 static struct uart_async_rx async_rx;
146 static atomic_t pending_rx_req;
147 static const struct shell etr_shell;
148 static shell_transport_handler_t shell_handler;
149 static void *shell_context;
150 #endif
151
log_output_func(uint8_t * buf,size_t size,void * ctx)152 static int log_output_func(uint8_t *buf, size_t size, void *ctx)
153 {
154 if (use_async_uart) {
155 int err;
156 static uint8_t *tx_buf = (uint8_t *)frame_buf0;
157
158 err = k_sem_take(&uart_sem, K_FOREVER);
159 __ASSERT_NO_MSG(err >= 0);
160
161 memcpy(tx_buf, buf, size);
162
163 err = uart_tx(uart_dev, tx_buf, size, SYS_FOREVER_US);
164 __ASSERT_NO_MSG(err >= 0);
165
166 tx_buf = (tx_buf == (uint8_t *)frame_buf0) ?
167 (uint8_t *)frame_buf1 : (uint8_t *)frame_buf0;
168 } else {
169 for (int i = 0; i < size; i++) {
170 uart_poll_out(uart_dev, buf[i]);
171 }
172 }
173
174 return size;
175 }
176
177 static uint8_t log_output_buf[CORESIGHT_TRACE_FRAME_SIZE];
178 LOG_OUTPUT_DEFINE(log_output, log_output_func, log_output_buf, sizeof(log_output_buf));
179
180 /** @brief Process a log message. */
log_message_process(struct log_frontend_stmesp_demux_log * packet)181 static void log_message_process(struct log_frontend_stmesp_demux_log *packet)
182 {
183 uint32_t flags = LOG_OUTPUT_FLAG_COLORS | LOG_OUTPUT_FLAG_LEVEL |
184 LOG_OUTPUT_FLAG_TIMESTAMP | LOG_OUTPUT_FLAG_FORMAT_TIMESTAMP;
185 uint64_t ts = packet->timestamp;
186 uint8_t level = packet->hdr.level;
187 uint16_t plen = packet->hdr.package_len;
188 const char *dname = stm_m_name[packet->hdr.major];
189 const uint8_t *package = packet->data;
190 const char *sname = &packet->data[plen];
191 size_t sname_len = strlen(sname) + 1;
192 uint16_t dlen = packet->hdr.total_len - (plen + sname_len);
193 uint8_t *data = dlen ? &packet->data[plen + sname_len] : NULL;
194
195 log_output_process(&log_output, ts, dname, sname, NULL, level, package, data, dlen, flags);
196 }
197
198 /** @brief Process a trace point message. */
trace_point_process(struct log_frontend_stmesp_demux_trace_point * packet)199 static void trace_point_process(struct log_frontend_stmesp_demux_trace_point *packet)
200 {
201 static const uint32_t flags = LOG_OUTPUT_FLAG_TIMESTAMP | LOG_OUTPUT_FLAG_FORMAT_TIMESTAMP |
202 LOG_OUTPUT_FLAG_LEVEL;
203 static const char *tp = "%d";
204 static const char *tp_d32 = "%d %08x";
205 const char *dname = stm_m_name[packet->major];
206 static const char *sname = "tp";
207 const char *lptr;
208
209 if (packet->id >= CONFIG_LOG_FRONTEND_STMESP_TURBO_LOG_BASE) {
210 lptr = log_frontend_stmesp_demux_str_get(packet->major,
211 packet->id - CONFIG_LOG_FRONTEND_STMESP_TURBO_LOG_BASE);
212 uint8_t level = (uint8_t)(lptr[0]) - (uint8_t)'0';
213 const char *ptr = lptr + 1;
214 static const union cbprintf_package_hdr desc0 = {
215 .desc = {.len = 2 /* hdr + fmt */}};
216 static const union cbprintf_package_hdr desc1 = {
217 .desc = {.len = 3 /* hdr + fmt + data */}};
218 uint32_t tp_log[] = {packet->has_data ? (uint32_t)desc1.raw : (uint32_t)desc0.raw,
219 (uint32_t)ptr, packet->data};
220 const char *source =
221 log_frontend_stmesp_demux_sname_get(packet->major, packet->source_id);
222
223 log_output_process(&log_output, packet->timestamp, dname, source, NULL, level,
224 (const uint8_t *)tp_log, NULL, 0, flags);
225 return;
226 } else if (packet->has_data) {
227 uint32_t id = (uint32_t)packet->id - CONFIG_LOG_FRONTEND_STMESP_TP_CHAN_BASE;
228 static const union cbprintf_package_hdr desc = {
229 .desc = {.len = 4 /* hdr + fmt + id + data */}};
230 uint32_t tp_d32_p[] = {(uint32_t)desc.raw, (uint32_t)tp_d32, id, packet->data};
231
232 log_output_process(&log_output, packet->timestamp, dname, sname, NULL, 1,
233 (const uint8_t *)tp_d32_p, NULL, 0, flags);
234 return;
235 }
236
237 static const union cbprintf_package_hdr desc = {.desc = {.len = 3 /* hdr + fmt + id */}};
238 uint32_t tp_p[] = {(uint32_t)desc.raw, (uint32_t)tp, packet->id};
239
240 log_output_process(&log_output, packet->timestamp, dname, sname, NULL,
241 1, (const uint8_t *)tp_p, NULL, 0, flags);
242 }
243
244 /** @brief Process a HW event message. */
hw_event_process(struct log_frontend_stmesp_demux_hw_event * packet)245 static void hw_event_process(struct log_frontend_stmesp_demux_hw_event *packet)
246 {
247 static const uint32_t flags = LOG_OUTPUT_FLAG_TIMESTAMP | LOG_OUTPUT_FLAG_FORMAT_TIMESTAMP;
248 static const char *tp = "%s";
249 static const char *dname = "hw";
250 static const char *sname = "event";
251 const char *evt_name = packet->evt < ARRAY_SIZE(hw_evts) ? hw_evts[packet->evt] : "invalid";
252 static const union cbprintf_package_hdr desc = {.desc = {.len = 3 /* hdr + fmt + id */}};
253 uint32_t tp_p[] = {(uint32_t)desc.raw, (uint32_t)tp, (uint32_t)evt_name};
254
255 log_output_process(&log_output, packet->timestamp, dname, sname, NULL,
256 1, (const uint8_t *)tp_p, NULL, 0, flags);
257 }
258
message_process(union log_frontend_stmesp_demux_packet packet)259 static void message_process(union log_frontend_stmesp_demux_packet packet)
260 {
261 switch (packet.generic_packet->type) {
262 case LOG_FRONTEND_STMESP_DEMUX_TYPE_TRACE_POINT:
263 trace_point_process(packet.trace_point);
264 break;
265 case LOG_FRONTEND_STMESP_DEMUX_TYPE_HW_EVENT:
266 hw_event_process(packet.hw_event);
267 break;
268 default:
269 log_message_process(packet.log);
270 break;
271 }
272 }
273
274 /** @brief Function called when potential STPv2 stream data drop is detected.
275 *
276 * When that occurs all active messages in the demultiplexer are marked as invalid and
277 * stp_decoder is switching to re-synchronization mode where data is decoded in
278 * search for ASYNC opcode.
279 */
sync_loss(void)280 static void sync_loss(void)
281 {
282 if (IS_ENABLED(CONFIG_DEBUG_NRF_ETR_DECODE)) {
283 mipi_stp_decoder_sync_loss();
284 log_frontend_stmesp_demux_reset();
285 oosync_cnt++;
286 in_sync = false;
287 }
288 }
289
290 /** @brief Indicate that STPv2 decoder is synchronized.
291 *
292 * That occurs when ASYNC opcode is found.
293 */
on_resync(void)294 static void on_resync(void)
295 {
296 if (IS_ENABLED(CONFIG_DEBUG_NRF_ETR_DECODE)) {
297 in_sync = true;
298 }
299 }
300
decoder_cb_debug(enum mipi_stp_decoder_ctrl_type type,union mipi_stp_decoder_data data,uint64_t * ts,bool marked)301 static void decoder_cb_debug(enum mipi_stp_decoder_ctrl_type type,
302 union mipi_stp_decoder_data data,
303 uint64_t *ts, bool marked)
304 {
305 switch (type) {
306 case STP_DECODER_MAJOR:
307 DBG("M%04x\n", data.id);
308 break;
309 case STP_DECODER_CHANNEL:
310 DBG("C%04x\n", data.id);
311 break;
312 case STP_DATA8:
313 DBG_DATA(data.data, 2, marked);
314 if (ts) {
315 DBG("TS:%lld\n", *ts);
316 }
317 break;
318 case STP_DATA16:
319 DBG_DATA(data.data, 4, marked);
320 break;
321 case STP_DATA32:
322 DBG_DATA(data.data, 8, marked);
323 if (ts) {
324 DBG("TS:%lld\n", *ts);
325 }
326 break;
327 case STP_DATA64:
328 DBG_DATA(data.data, 16, marked);
329 break;
330 case STP_DECODER_FLAG:
331 DBG("F%s\n", ts ? "TS" : "");
332 break;
333 case STP_DECODER_NULL:
334 DBG("NULL\n");
335 break;
336 case STP_DECODER_MERROR:
337 DBG("MERR\n");
338 break;
339 case STP_DECODER_VERSION:
340 DBG("VER\n");
341 break;
342 case STP_DECODER_FREQ: {
343 DBG("FREQ%s %d\n", ts ? "TS" : "", (int)data.freq);
344 break;
345 }
346 case STP_DECODER_GERROR:
347 DBG("GERR\n");
348 break;
349 case STP_DECODER_ASYNC:
350 DBG("ASYNC\n");
351 break;
352 case STP_DECODER_NOT_SUPPORTED:
353 DBG("NOTSUP\n");
354 break;
355 default:
356 DBG("OTHER\n");
357 break;
358 }
359 }
360
decoder_cb(enum mipi_stp_decoder_ctrl_type type,union mipi_stp_decoder_data data,uint64_t * ts,bool marked)361 static void decoder_cb(enum mipi_stp_decoder_ctrl_type type,
362 union mipi_stp_decoder_data data, uint64_t *ts,
363 bool marked)
364 {
365 int rv = 0;
366
367 decoder_cb_debug(type, data, ts, marked);
368
369 if (!IS_ENABLED(CONFIG_DEBUG_NRF_ETR_DECODE)) {
370 return;
371 }
372
373 switch (type) {
374 case STP_DECODER_ASYNC:
375 on_resync();
376 break;
377 case STP_DECODER_MAJOR:
378 log_frontend_stmesp_demux_major(data.id);
379 break;
380 case STP_DECODER_CHANNEL:
381 log_frontend_stmesp_demux_channel(data.id);
382 break;
383 case STP_DATA8:
384 if (marked) {
385 rv = log_frontend_stmesp_demux_packet_start((uint32_t *)&data.data, ts);
386 new_msg_cnt += rv;
387 } else {
388 log_frontend_stmesp_demux_data((char *)&data.data, 1);
389 }
390 break;
391 case STP_DATA16:
392 if (marked) {
393 if (ts) {
394 rv = log_frontend_stmesp_demux_log0((uint16_t)data.data, ts);
395 new_msg_cnt += rv;
396 } else {
397 log_frontend_stmesp_demux_source_id((uint16_t)data.data);
398 }
399 } else {
400 log_frontend_stmesp_demux_data((char *)&data.data, 2);
401 }
402 break;
403 case STP_DATA32:
404 if (marked) {
405 rv = log_frontend_stmesp_demux_packet_start((uint32_t *)&data.data, ts);
406 new_msg_cnt += rv;
407 } else {
408 log_frontend_stmesp_demux_data((char *)&data.data, 4);
409 if (ts) {
410 log_frontend_stmesp_demux_timestamp(*ts);
411 }
412 }
413 break;
414 case STP_DATA64:
415 log_frontend_stmesp_demux_data((char *)&data.data, 8);
416 break;
417 case STP_DECODER_FLAG:
418 if (ts) {
419 log_frontend_stmesp_demux_packet_start(NULL, ts);
420 } else {
421 log_frontend_stmesp_demux_packet_end();
422 }
423 new_msg_cnt++;
424 break;
425 case STP_DECODER_FREQ: {
426 static uint32_t freq;
427 /* Avoid calling log_output function multiple times as frequency
428 * is sent periodically.
429 */
430 if (freq != (uint32_t)data.freq) {
431 freq = (uint32_t)data.freq;
432 log_output_timestamp_freq_set(freq);
433 }
434 break;
435 }
436 case STP_DECODER_MERROR: {
437 sync_loss();
438 break;
439 }
440 default:
441 break;
442 }
443
444 /* Only -ENOMEM is accepted failure. */
445 __ASSERT_NO_MSG((rv >= 0) || (rv == -ENOMEM));
446 }
447
deformatter_cb(uint32_t id,const uint8_t * data,size_t len)448 static void deformatter_cb(uint32_t id, const uint8_t *data, size_t len)
449 {
450 mipi_stp_decoder_decode(data, len);
451 }
452
453 /** @brief Get write index.
454 *
455 * It is a non-wrapping 32 bit write index. To get actual index in the ETR buffer
456 * result must be masked by ETR buffer size mask.
457 */
get_wr_idx(void)458 static uint32_t get_wr_idx(void)
459 {
460 uint32_t cnt = nrfx_tbm_count_get();
461
462 if (tbm_full && (cnt < wsize_mask)) {
463 /* TBM full event is generated when max value is reached and not when
464 * overflow occurs. We cannot increment base_wr_idx just after the
465 * event but only when counter actually wraps.
466 */
467 base_wr_idx += wsize_inc;
468 tbm_full = false;
469 }
470
471 return cnt + base_wr_idx;
472 }
473
474 /** @brief Get amount of pending data in ETR buffer. */
pending_data(void)475 static uint32_t pending_data(void)
476 {
477 return get_wr_idx() - etr_rd_idx;
478 }
479
480 /** @brief Get current read index.
481 *
482 * Read index is not exact index in the ETR buffer. It does not wrap (32 bit word).
483 * So ETR read index is derived by masking the value by the ETR buffer size mask.
484 */
rd_idx_inc(void)485 static void rd_idx_inc(void)
486 {
487 etr_rd_idx += CORESIGHT_TRACE_FRAME_SIZE32;
488 }
489
490 /** @brief Process frame. */
process_frame(uint8_t * buf,uint32_t pending)491 static void process_frame(uint8_t *buf, uint32_t pending)
492 {
493 DBG("%d (wr:%d): ", pending, get_wr_idx() & wsize_mask);
494 for (int j = 0; j < CORESIGHT_TRACE_FRAME_SIZE; j++) {
495 DBG("%02x ", ((uint8_t *)buf)[j]);
496 }
497 DBG("\n");
498 cs_trace_defmt_process((uint8_t *)buf, CORESIGHT_TRACE_FRAME_SIZE);
499 DBG("\n");
500 }
501
process_messages(void)502 static void process_messages(void)
503 {
504 static union log_frontend_stmesp_demux_packet curr_msg;
505
506 /* Process any new messages. curr_msg remains the same if panic
507 * interrupts currently ongoing processing (curr_msg is not NULL then).
508 * In such a case it is processed once again, which may lead to
509 * a partial repetition of that message on the output.
510 */
511 while (new_msg_cnt || curr_msg.generic_packet) {
512 if (!curr_msg.generic_packet) {
513 curr_msg = log_frontend_stmesp_demux_claim();
514 }
515 if (curr_msg.generic_packet) {
516 message_process(curr_msg);
517 log_frontend_stmesp_demux_free(curr_msg);
518 curr_msg.generic_packet = NULL;
519 } else {
520 break;
521 }
522 }
523 new_msg_cnt = 0;
524 }
525
526 /** @brief Dump frame over UART (using polling or async API). */
dump_frame(uint8_t * buf)527 static void dump_frame(uint8_t *buf)
528 {
529 int err;
530
531 if (use_async_uart) {
532 err = k_sem_take(&uart_sem, K_FOREVER);
533 __ASSERT_NO_MSG(err >= 0);
534
535 err = uart_tx(uart_dev, buf, CORESIGHT_TRACE_FRAME_SIZE, SYS_FOREVER_US);
536 __ASSERT_NO_MSG(err >= 0);
537 } else {
538 for (int i = 0; i < CORESIGHT_TRACE_FRAME_SIZE; i++) {
539 uart_poll_out(uart_dev, buf[i]);
540 }
541 }
542 }
543
544 /** @brief Attempt to process data pending in the ETR circular buffer.
545 *
546 * Data is processed in 16 bytes packages. Each package is a STPv2 frame which
547 * contain data generated by STM stimulus ports.
548 *
549 */
process(void)550 static void process(void)
551 {
552 static const uint32_t *const etr_buf = (uint32_t *)(DT_REG_ADDR(ETR_BUFFER_NODE));
553 static uint32_t sync_cnt = CONFIG_DEBUG_NRF_ETR_SYNC_PERIOD;
554 uint32_t pending;
555
556 /* If function is called in panic mode then it may interrupt ongoing
557 * processing. This must be carefully handled as function decodes data
558 * that must be synchronized. Losing synchronization results in failure.
559 *
560 * Special measures are taken to ensure proper synchronization when
561 * processing is preempted by panic.
562 *
563 */
564 while ((pending = pending_data()) >= MIN_DATA) {
565 /* Do not read the data that has already been read but not yet processed. */
566 if (sync_cnt || (CONFIG_DEBUG_NRF_ETR_SYNC_PERIOD == 0)) {
567 sync_cnt--;
568 sys_cache_data_invd_range((void *)&etr_buf[etr_rd_idx & wsize_mask],
569 CORESIGHT_TRACE_FRAME_SIZE);
570 for (int i = 0; i < CORESIGHT_TRACE_FRAME_SIZE32; i++) {
571 frame_buf[i] = etr_buf[(etr_rd_idx + i) & wsize_mask];
572 }
573 rd_idx_inc();
574 __sync_synchronize();
575 } else {
576 sync_cnt = CONFIG_DEBUG_NRF_ETR_SYNC_PERIOD;
577 memset(frame_buf, 0xff, CORESIGHT_TRACE_FRAME_SIZE);
578 }
579
580 if (IS_ENABLED(CONFIG_DEBUG_NRF_ETR_DECODE) ||
581 IS_ENABLED(CONFIG_DEBUG_NRF_ETR_DEBUG)) {
582 if ((pending >= (wsize_mask - MIN_DATA)) ||
583 (pending_data() >= (wsize_mask - MIN_DATA))) {
584 /* If before or after reading the frame it is close to full
585 * then assume overwrite and sync loss.
586 */
587 sync_loss();
588 }
589
590 process_frame((uint8_t *)frame_buf, pending);
591 if (IS_ENABLED(CONFIG_DEBUG_NRF_ETR_DECODE)) {
592 process_messages();
593 }
594 } else {
595 dump_frame((uint8_t *)frame_buf);
596 frame_buf = (use_async_uart && (frame_buf == frame_buf0)) ?
597 frame_buf1 : frame_buf0;
598 }
599 }
600
601 /* Fill the buffer to ensure that all logs are processed on time. */
602 if (pending < MIN_DATA) {
603 log_frontend_stmesp_dummy_write();
604 }
605 }
606
decoder_init(void)607 static int decoder_init(void)
608 {
609 int err;
610 static bool once;
611
612 if (once) {
613 return -EALREADY;
614 }
615
616 once = true;
617 if (IS_ENABLED(CONFIG_DEBUG_NRF_ETR_DECODE)) {
618 static const struct log_frontend_stmesp_demux_config config = {
619 .m_ids = stm_m_id,
620 .m_ids_cnt = ARRAY_SIZE(stm_m_id),
621 .source_id_buf = source_id_buf,
622 .source_id_buf_len = ARRAY_SIZE(source_id_buf)};
623
624 err = log_frontend_stmesp_demux_init(&config);
625 if (err < 0) {
626 return err;
627 }
628 }
629
630 static const struct mipi_stp_decoder_config stp_decoder_cfg = {.cb = decoder_cb,
631 .start_out_of_sync = true};
632
633 mipi_stp_decoder_init(&stp_decoder_cfg);
634
635 cs_trace_defmt_init(deformatter_cb);
636
637 return 0;
638 }
639
debug_nrf_etr_flush(void)640 void debug_nrf_etr_flush(void)
641 {
642 int cnt = 4;
643
644 if (IS_ENABLED(CONFIG_DEBUG_NRF_ETR_DECODE) ||
645 IS_ENABLED(CONFIG_DEBUG_NRF_ETR_DEBUG)) {
646 (void)decoder_init();
647 }
648
649 /* Set flag which forces uart to use blocking polling out instead of
650 * asynchronous API.
651 */
652 use_async_uart = false;
653 uint32_t k = irq_lock();
654
655 /* Repeat arbitrary number of times to ensure that all that is flushed. */
656 while (cnt--) {
657 process();
658 }
659
660 irq_unlock(k);
661 }
662
663 #ifndef CONFIG_DEBUG_NRF_ETR_SHELL
etr_thread_func(void * dummy1,void * dummy2,void * dummy3)664 static void etr_thread_func(void *dummy1, void *dummy2, void *dummy3)
665 {
666 uint64_t checkpoint = 0;
667
668 if (IS_ENABLED(CONFIG_DEBUG_NRF_ETR_DECODE) ||
669 IS_ENABLED(CONFIG_DEBUG_NRF_ETR_DEBUG)) {
670 int err;
671
672 err = decoder_init();
673 if (err < 0) {
674 return;
675 }
676 }
677
678 while (1) {
679 process();
680
681 uint64_t now = k_uptime_get();
682
683 if (DROP_CHECK_PERIOD && (now - checkpoint) > DROP_CHECK_PERIOD) {
684 uint32_t cnt = log_frontend_stmesp_demux_get_dropped();
685
686 checkpoint = now;
687 if (cnt || oosync_cnt) {
688 oosync_cnt = 0;
689 LOG_WRN("Too many log messages, some dropped");
690 }
691 }
692
693 k_sleep(K_MSEC(CONFIG_DEBUG_NRF_ETR_BACKOFF));
694 }
695 }
696 #endif
697
uart_event_handler(const struct device * dev,struct uart_event * evt,void * user_data)698 static void uart_event_handler(const struct device *dev, struct uart_event *evt, void *user_data)
699 {
700 ARG_UNUSED(dev);
701
702 switch (evt->type) {
703 case UART_TX_ABORTED:
704 /* An intentional fall-through to UART_TX_DONE. */
705 case UART_TX_DONE:
706 k_sem_give(&uart_sem);
707 break;
708 #ifdef CONFIG_DEBUG_NRF_ETR_SHELL
709 case UART_RX_RDY:
710 uart_async_rx_on_rdy(&async_rx, evt->data.rx.buf, evt->data.rx.len);
711 shell_handler(SHELL_TRANSPORT_EVT_RX_RDY, shell_context);
712 break;
713 case UART_RX_BUF_REQUEST: {
714 uint8_t *buf = uart_async_rx_buf_req(&async_rx);
715 size_t len = uart_async_rx_get_buf_len(&async_rx);
716
717 if (buf) {
718 int err = uart_rx_buf_rsp(dev, buf, len);
719
720 if (err < 0) {
721 uart_async_rx_on_buf_rel(&async_rx, buf);
722 }
723 } else {
724 atomic_inc(&pending_rx_req);
725 }
726
727 break;
728 }
729 case UART_RX_BUF_RELEASED:
730 uart_async_rx_on_buf_rel(&async_rx, evt->data.rx_buf.buf);
731 break;
732 case UART_RX_DISABLED:
733 break;
734 #endif /* CONFIG_DEBUG_NRF_ETR_SHELL */
735 default:
736 __ASSERT_NO_MSG(0);
737 }
738 }
739
tbm_event_handler(nrf_tbm_event_t event)740 static void tbm_event_handler(nrf_tbm_event_t event)
741 {
742 ARG_UNUSED(event);
743
744 if (event == NRF_TBM_EVENT_FULL) {
745 tbm_full = true;
746 }
747
748 #ifdef CONFIG_DEBUG_NRF_ETR_SHELL
749 k_poll_signal_raise(&etr_shell.ctx->signals[SHELL_SIGNAL_LOG_MSG], 0);
750 #else
751 k_wakeup(&etr_thread);
752 #endif
753 }
754
etr_process_init(void)755 int etr_process_init(void)
756 {
757 int err;
758
759 k_sem_init(&uart_sem, 1, 1);
760
761 err = uart_callback_set(uart_dev, uart_event_handler, NULL);
762 use_async_uart = (err == 0);
763
764 static const nrfx_tbm_config_t config = {.size = wsize_mask};
765
766 nrfx_tbm_init(&config, tbm_event_handler);
767
768 IRQ_CONNECT(DT_IRQN(DT_NODELABEL(tbm)), DT_IRQ(DT_NODELABEL(tbm), priority),
769 nrfx_isr, nrfx_tbm_irq_handler, 0);
770 irq_enable(DT_IRQN(DT_NODELABEL(tbm)));
771 nrfx_tbm_start();
772
773 #ifdef CONFIG_DEBUG_NRF_ETR_SHELL
774 uint32_t level = CONFIG_LOG_MAX_LEVEL;
775 static const struct shell_backend_config_flags cfg_flags =
776 SHELL_DEFAULT_BACKEND_CONFIG_FLAGS;
777
778 shell_init(&etr_shell, NULL, cfg_flags, true, level);
779 k_timer_start(&etr_timer, K_MSEC(CONFIG_DEBUG_NRF_ETR_BACKOFF), K_NO_WAIT);
780 if (IS_ENABLED(CONFIG_DEBUG_NRF_ETR_DECODE) || IS_ENABLED(CONFIG_DEBUG_NRF_ETR_DEBUG)) {
781 err = decoder_init();
782 if (err < 0) {
783 return err;
784 }
785 }
786 #else
787 k_thread_create(&etr_thread, etr_stack, K_KERNEL_STACK_SIZEOF(etr_stack), etr_thread_func,
788 NULL, NULL, NULL, K_LOWEST_APPLICATION_THREAD_PRIO, 0, K_NO_WAIT);
789 k_thread_name_set(&etr_thread, "etr_process");
790 #endif
791
792 return 0;
793 }
794
795 #define NRF_ETR_INIT_PRIORITY UTIL_INC(UTIL_INC(CONFIG_IRONSIDE_SE_CALL_INIT_PRIORITY))
796
797 SYS_INIT(etr_process_init, POST_KERNEL, NRF_ETR_INIT_PRIORITY);
798
799 #if defined(CONFIG_NORDIC_VPR_LAUNCHER) && defined(CONFIG_LOG_FRONTEND_STMESP_FSC)
800 /* TDD/ETR must be up and running before VPR cores are started as they write to
801 * ETR some vital initial data that cannot be lost.
802 */
803 BUILD_ASSERT(CONFIG_NORDIC_VPR_LAUNCHER_INIT_PRIORITY > NRF_ETR_INIT_PRIORITY);
804 #endif
805
806
807 #ifdef CONFIG_DEBUG_NRF_ETR_SHELL
808
etr_timer_handler(struct k_timer * timer)809 static void etr_timer_handler(struct k_timer *timer)
810 {
811 if (pending_data() >= MIN_DATA) {
812 k_poll_signal_raise(&etr_shell.ctx->signals[SHELL_SIGNAL_LOG_MSG], 0);
813 } else {
814 k_timer_start(timer, K_MSEC(CONFIG_DEBUG_NRF_ETR_BACKOFF), K_NO_WAIT);
815 }
816 }
817
z_shell_log_backend_process(const struct shell_log_backend * backend)818 bool z_shell_log_backend_process(const struct shell_log_backend *backend)
819 {
820 ARG_UNUSED(backend);
821
822 process();
823 k_timer_start(&etr_timer, K_MSEC(CONFIG_DEBUG_NRF_ETR_BACKOFF), K_NO_WAIT);
824
825 return false;
826 }
827
z_shell_log_backend_disable(const struct shell_log_backend * backend)828 void z_shell_log_backend_disable(const struct shell_log_backend *backend)
829 {
830 ARG_UNUSED(backend);
831 }
832
z_shell_log_backend_enable(const struct shell_log_backend * backend,void * ctx,uint32_t init_log_level)833 void z_shell_log_backend_enable(const struct shell_log_backend *backend, void *ctx,
834 uint32_t init_log_level)
835 {
836 ARG_UNUSED(backend);
837 ARG_UNUSED(ctx);
838 ARG_UNUSED(init_log_level);
839 }
840
etr_shell_write(const struct shell_transport * transport,const void * data,size_t length,size_t * cnt)841 static int etr_shell_write(const struct shell_transport *transport, const void *data, size_t length,
842 size_t *cnt)
843 {
844 size_t len = length;
845 uint8_t *buf = (uint8_t *)data;
846 size_t chunk_len;
847
848 do {
849 chunk_len = MIN(len, sizeof(log_output_buf));
850 len -= log_output_func(buf, chunk_len, NULL);
851 buf += chunk_len;
852 } while (len > 0);
853
854 *cnt = length;
855 shell_handler(SHELL_TRANSPORT_EVT_TX_RDY, shell_context);
856
857 return 0;
858 }
859
rx_enable(uint8_t * buf,size_t len)860 static int rx_enable(uint8_t *buf, size_t len)
861 {
862 return uart_rx_enable(uart_dev, buf, len, 10000);
863 }
864
etr_shell_read(const struct shell_transport * transport,void * data,size_t length,size_t * cnt)865 static int etr_shell_read(const struct shell_transport *transport, void *data, size_t length,
866 size_t *cnt)
867 {
868 uint8_t *buf;
869 size_t blen;
870 bool buf_available;
871
872 blen = uart_async_rx_data_claim(&async_rx, &buf, length);
873 memcpy(data, buf, blen);
874 buf_available = uart_async_rx_data_consume(&async_rx, blen);
875
876 *cnt = blen;
877 if (pending_rx_req && buf_available) {
878 uint8_t *buf = uart_async_rx_buf_req(&async_rx);
879 size_t len = uart_async_rx_get_buf_len(&async_rx);
880 int err;
881
882 __ASSERT_NO_MSG(buf != NULL);
883 atomic_dec(&pending_rx_req);
884 err = uart_rx_buf_rsp(uart_dev, buf, len);
885 /* If it is too late and RX is disabled then re-enable it. */
886 if (err < 0) {
887 if (err == -EACCES) {
888 pending_rx_req = 0;
889 err = rx_enable(buf, len);
890 } else {
891 return err;
892 }
893 }
894 }
895
896 return 0;
897 }
898
etr_shell_enable(const struct shell_transport * transport,bool blocking_tx)899 static int etr_shell_enable(const struct shell_transport *transport, bool blocking_tx)
900 {
901 return 0;
902 }
903
etr_shell_uninit(const struct shell_transport * transport)904 static int etr_shell_uninit(const struct shell_transport *transport)
905 {
906 return 0;
907 }
908
etr_shell_init(const struct shell_transport * transport,const void * config,shell_transport_handler_t evt_handler,void * context)909 static int etr_shell_init(const struct shell_transport *transport, const void *config,
910 shell_transport_handler_t evt_handler, void *context)
911 {
912 int err;
913 uint8_t *buf;
914 static const struct uart_async_rx_config async_rx_config = {
915 .buffer = rx_buf,
916 .length = sizeof(rx_buf),
917 .buf_cnt = CONFIG_DEBUG_NRF_ETR_SHELL_ASYNC_RX_BUFFER_COUNT,
918 };
919
920 shell_context = context;
921 shell_handler = evt_handler;
922 err = uart_async_rx_init(&async_rx, &async_rx_config);
923 if (err) {
924 return err;
925 }
926
927 buf = uart_async_rx_buf_req(&async_rx);
928
929 return rx_enable(buf, uart_async_rx_get_buf_len(&async_rx));
930 }
931
932 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
etr_shell_update(const struct shell_transport * transport)933 static void etr_shell_update(const struct shell_transport *transport)
934 {
935 }
936 #endif
937
938 const struct shell_transport_api shell_api = {
939 .init = etr_shell_init,
940 .uninit = etr_shell_uninit,
941 .enable = etr_shell_enable,
942 .write = etr_shell_write,
943 .read = etr_shell_read,
944 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
945 .update = shell_update,
946 #endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
947 };
948
949 static struct shell_transport transport = {
950 .api = &shell_api,
951 .ctx = NULL,
952 };
953
954 static uint8_t shell_out_buffer[CONFIG_SHELL_PRINTF_BUFF_SIZE];
955 Z_SHELL_DEFINE(etr_shell, CONFIG_DEBUG_NRF_ETR_SHELL_PROMPT, &transport, shell_out_buffer, NULL,
956 SHELL_FLAG_OLF_CRLF);
957 #endif /* CONFIG_DEBUG_NRF_ETR_SHELL */
958