1 /*
2 * Copyright (c) 2024 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <stdint.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/drivers/uart.h>
10 #include <zephyr/cache.h>
11 #include <zephyr/shell/shell.h>
12 #include <zephyr/logging/log.h>
13 #include <zephyr/logging/log_output.h>
14 #include <zephyr/logging/log_frontend_stmesp.h>
15 #include <zephyr/logging/log_frontend_stmesp_demux.h>
16 #include <zephyr/debug/coresight/cs_trace_defmt.h>
17 #include <zephyr/debug/mipi_stp_decoder.h>
18 #include <zephyr/linker/devicetree_regions.h>
19 #include <zephyr/drivers/misc/coresight/nrf_etr.h>
20 #include <zephyr/drivers/serial/uart_async_rx.h>
21 #include <zephyr/sys/printk.h>
22 #include <dmm.h>
23 #include <nrfx_tbm.h>
24 #include <stdio.h>
25 LOG_MODULE_REGISTER(cs_etr_tbm);
26
27 #define UART_NODE DT_CHOSEN(zephyr_console)
28
29 #define ETR_BUFFER_NODE DT_NODELABEL(etr_buffer)
30
31 #define DROP_CHECK_PERIOD \
32 COND_CODE_1(CONFIG_NRF_ETR_DECODE, \
33 (CONFIG_NRF_ETR_DECODE_DROP_PERIOD), (0))
34
35 #define MIN_DATA (2 * CORESIGHT_TRACE_FRAME_SIZE32)
36
37 /* Since ETR debug is a part of logging infrastructure, logging cannot be used
38 * for debugging. Printk is used (assuming CONFIG_LOG_PRINTK=n)
39 */
40 #define DBG(...) IF_ENABLED(CONFIG_NRF_ETR_DEBUG, (printk(__VA_ARGS__)))
41
42 /** @brief Macro for dumping debug data.
43 *
44 * @param _data Data variable.
45 * @param _nlen Number of nibbles in @p _data to print.
46 */
47 #define DBG_DATA(_data, _nlen, _marked) \
48 do { \
49 char *fmt; \
50 switch (_nlen) { \
51 case 2: \
52 fmt = "D%s\t%02x "; \
53 break; \
54 case 4: \
55 fmt = "D%s\t%04x "; \
56 break; \
57 case 8: \
58 fmt = "D%s\t%08x "; \
59 break; \
60 default: \
61 fmt = "D%s\t%016x "; \
62 break; \
63 } \
64 DBG(fmt, _marked ? "M" : "", _data); \
65 for (int i = 0; i < _nlen / 2; i++) { \
66 DBG("%c ", ((char *)&_data)[i]); \
67 } \
68 DBG("\n"); \
69 } while (0)
70
71 static const uint32_t wsize_mask = DT_REG_SIZE(ETR_BUFFER_NODE) / sizeof(int) - 1;
72 static const uint32_t wsize_inc = DT_REG_SIZE(ETR_BUFFER_NODE) / sizeof(int) - 1;
73
74 static bool in_sync;
75 static int oosync_cnt;
76 static volatile bool tbm_full;
77 static volatile uint32_t base_wr_idx;
78 static uint32_t etr_rd_idx;
79 /* Counts number of new messages completed in the current formatter frame decoding. */
80 static uint32_t new_msg_cnt;
81
82 static bool volatile use_async_uart;
83
84 static struct k_sem uart_sem;
85 static const struct device *uart_dev = DEVICE_DT_GET(UART_NODE);
86 static uint32_t frame_buf0[CORESIGHT_TRACE_FRAME_SIZE32] DMM_MEMORY_SECTION(UART_NODE);
87 static uint32_t frame_buf1[CORESIGHT_TRACE_FRAME_SIZE32] DMM_MEMORY_SECTION(UART_NODE);
88 static uint32_t frame_buf_decode[CORESIGHT_TRACE_FRAME_SIZE32];
89 static uint32_t *frame_buf = IS_ENABLED(CONFIG_NRF_ETR_DECODE) ?
90 frame_buf_decode : frame_buf0;
91
92 K_KERNEL_STACK_DEFINE(etr_stack, CONFIG_NRF_ETR_STACK_SIZE);
93 static struct k_thread etr_thread;
94
95 BUILD_ASSERT((DT_REG_SIZE(ETR_BUFFER_NODE) % CONFIG_DCACHE_LINE_SIZE) == 0);
96 BUILD_ASSERT((DT_REG_ADDR(ETR_BUFFER_NODE) % CONFIG_DCACHE_LINE_SIZE) == 0);
97
98 /* Domain details and prefixes. */
99 static const uint16_t stm_m_id[] = {0x21, 0x22, 0x23, 0x2c, 0x2d, 0x2e, 0x24, 0x80};
100 static uint32_t source_id_buf[ARRAY_SIZE(stm_m_id) * 8];
101 static const char *const stm_m_name[] = {"sec", "app", "rad", "sys", "flpr", "ppr", "mod", "hw"};
102 static const char *const hw_evts[] = {
103 "CTI211_0", /* 0 CTI211 triger out 1 */
104 "CTI211_1", /* 1 CTI211 triger out 1 inverted */
105 "CTI211_2", /* 2 CTI211 triger out 2 */
106 "CTI211_3", /* 3 CTI211 triger out 2 inverted*/
107 "Sec up", /* 4 Secure Domain up */
108 "Sec down", /* 5 Secure Domain down */
109 "App up", /* 6 Application Domain up */
110 "App down", /* 7 Application Domain down */
111 "Rad up", /* 8 Radio Domain up */
112 "Rad down", /* 9 Radio Domain down */
113 "Radf up", /* 10 Radio fast up */
114 "Radf down", /* 11 Radio fast down */
115 NULL, /* Reserved */
116 NULL, /* Reserved */
117 NULL, /* Reserved */
118 NULL, /* Reserved */
119 NULL, /* Reserved */
120 NULL, /* Reserved */
121 NULL, /* Reserved */
122 NULL, /* Reserved */
123 NULL, /* Reserved */
124 NULL, /* Reserved */
125 NULL, /* Reserved */
126 NULL, /* Reserved */
127 NULL, /* Reserved */
128 NULL, /* Reserved */
129 "GD LL up", /* 26 Global domain low leakage up */
130 "GD LL down", /* 27 Global domain low leakage down */
131 "GD1 HS up", /* 28 Global domain high speed 1 up */
132 "GD1 HS up", /* 29 Global domain high speed 1 up */
133 "GD0 HS down", /* 30 Global domain high speed 0 down */
134 "GD0 HS down", /* 31 Global domain high speed 0 down */
135 };
136
137 #ifdef CONFIG_NRF_ETR_SHELL
138 #define RX_BUF_SIZE \
139 (CONFIG_NRF_ETR_SHELL_ASYNC_RX_BUFFER_SIZE * CONFIG_NRF_ETR_SHELL_ASYNC_RX_BUFFER_COUNT)
140
141 static void etr_timer_handler(struct k_timer *timer);
142 K_TIMER_DEFINE(etr_timer, etr_timer_handler, NULL);
143 static uint8_t rx_buf[RX_BUF_SIZE] DMM_MEMORY_SECTION(UART_NODE);
144 static struct uart_async_rx async_rx;
145 static atomic_t pending_rx_req;
146 static const struct shell etr_shell;
147 static shell_transport_handler_t shell_handler;
148 static void *shell_context;
149 #endif
150
log_output_func(uint8_t * buf,size_t size,void * ctx)151 static int log_output_func(uint8_t *buf, size_t size, void *ctx)
152 {
153 if (use_async_uart) {
154 int err;
155 static uint8_t *tx_buf = (uint8_t *)frame_buf0;
156
157 err = k_sem_take(&uart_sem, K_FOREVER);
158 __ASSERT_NO_MSG(err >= 0);
159
160 memcpy(tx_buf, buf, size);
161
162 err = uart_tx(uart_dev, tx_buf, size, SYS_FOREVER_US);
163 __ASSERT_NO_MSG(err >= 0);
164
165 tx_buf = (tx_buf == (uint8_t *)frame_buf0) ?
166 (uint8_t *)frame_buf1 : (uint8_t *)frame_buf0;
167 } else {
168 for (int i = 0; i < size; i++) {
169 uart_poll_out(uart_dev, buf[i]);
170 }
171 }
172
173 return size;
174 }
175
176 static uint8_t log_output_buf[CORESIGHT_TRACE_FRAME_SIZE];
177 LOG_OUTPUT_DEFINE(log_output, log_output_func, log_output_buf, sizeof(log_output_buf));
178
179 /** @brief Process a log message. */
log_message_process(struct log_frontend_stmesp_demux_log * packet)180 static void log_message_process(struct log_frontend_stmesp_demux_log *packet)
181 {
182 uint32_t flags = LOG_OUTPUT_FLAG_COLORS | LOG_OUTPUT_FLAG_LEVEL |
183 LOG_OUTPUT_FLAG_TIMESTAMP | LOG_OUTPUT_FLAG_FORMAT_TIMESTAMP;
184 uint64_t ts = packet->timestamp;
185 uint8_t level = packet->hdr.level;
186 uint16_t plen = packet->hdr.package_len;
187 const char *dname = stm_m_name[packet->hdr.major];
188 const uint8_t *package = packet->data;
189 const char *sname = &packet->data[plen];
190 size_t sname_len = strlen(sname) + 1;
191 uint16_t dlen = packet->hdr.total_len - (plen + sname_len);
192 uint8_t *data = dlen ? &packet->data[plen + sname_len] : NULL;
193
194 log_output_process(&log_output, ts, dname, sname, NULL, level, package, data, dlen, flags);
195 }
196
197 /** @brief Process a trace point message. */
trace_point_process(struct log_frontend_stmesp_demux_trace_point * packet)198 static void trace_point_process(struct log_frontend_stmesp_demux_trace_point *packet)
199 {
200 static const uint32_t flags = LOG_OUTPUT_FLAG_TIMESTAMP | LOG_OUTPUT_FLAG_FORMAT_TIMESTAMP |
201 LOG_OUTPUT_FLAG_LEVEL;
202 static const char *tp = "%d";
203 static const char *tp_d32 = "%d %08x";
204 const char *dname = stm_m_name[packet->major];
205 static const char *sname = "tp";
206 const char **lptr;
207
208 if (packet->id >= CONFIG_LOG_FRONTEND_STMESP_TURBO_LOG_BASE) {
209 TYPE_SECTION_GET(const char *, log_stmesp_ptr,
210 packet->id - CONFIG_LOG_FRONTEND_STMESP_TURBO_LOG_BASE, &lptr);
211 uint8_t level = (uint8_t)((*lptr)[0]) - (uint8_t)'0';
212 const char *ptr = *lptr + 1;
213 static const union cbprintf_package_hdr desc0 = {
214 .desc = {.len = 2 /* hdr + fmt */}};
215 static const union cbprintf_package_hdr desc1 = {
216 .desc = {.len = 3 /* hdr + fmt + data */}};
217 uint32_t tp_log[] = {packet->has_data ? (uint32_t)desc1.raw : (uint32_t)desc0.raw,
218 (uint32_t)ptr, packet->data};
219 const char *source =
220 log_frontend_stmesp_demux_sname_get(packet->major, packet->source_id);
221
222 log_output_process(&log_output, packet->timestamp, dname, source, NULL, level,
223 (const uint8_t *)tp_log, NULL, 0, flags);
224 return;
225 } else if (packet->has_data) {
226 uint32_t id = (uint32_t)packet->id - CONFIG_LOG_FRONTEND_STMESP_TP_CHAN_BASE;
227 static const union cbprintf_package_hdr desc = {
228 .desc = {.len = 4 /* hdr + fmt + id + data */}};
229 uint32_t tp_d32_p[] = {(uint32_t)desc.raw, (uint32_t)tp_d32, id, packet->data};
230
231 log_output_process(&log_output, packet->timestamp, dname, sname, NULL, 1,
232 (const uint8_t *)tp_d32_p, NULL, 0, flags);
233 return;
234 }
235
236 static const union cbprintf_package_hdr desc = {.desc = {.len = 3 /* hdr + fmt + id */}};
237 uint32_t tp_p[] = {(uint32_t)desc.raw, (uint32_t)tp, packet->id};
238
239 log_output_process(&log_output, packet->timestamp, dname, sname, NULL,
240 1, (const uint8_t *)tp_p, NULL, 0, flags);
241 }
242
243 /** @brief Process a HW event message. */
hw_event_process(struct log_frontend_stmesp_demux_hw_event * packet)244 static void hw_event_process(struct log_frontend_stmesp_demux_hw_event *packet)
245 {
246 static const uint32_t flags = LOG_OUTPUT_FLAG_TIMESTAMP | LOG_OUTPUT_FLAG_FORMAT_TIMESTAMP;
247 static const char *tp = "%s";
248 static const char *dname = "hw";
249 static const char *sname = "event";
250 const char *evt_name = packet->evt < ARRAY_SIZE(hw_evts) ? hw_evts[packet->evt] : "invalid";
251 static const union cbprintf_package_hdr desc = {.desc = {.len = 3 /* hdr + fmt + id */}};
252 uint32_t tp_p[] = {(uint32_t)desc.raw, (uint32_t)tp, (uint32_t)evt_name};
253
254 log_output_process(&log_output, packet->timestamp, dname, sname, NULL,
255 1, (const uint8_t *)tp_p, NULL, 0, flags);
256 }
257
message_process(union log_frontend_stmesp_demux_packet packet)258 static void message_process(union log_frontend_stmesp_demux_packet packet)
259 {
260 switch (packet.generic_packet->type) {
261 case LOG_FRONTEND_STMESP_DEMUX_TYPE_TRACE_POINT:
262 trace_point_process(packet.trace_point);
263 break;
264 case LOG_FRONTEND_STMESP_DEMUX_TYPE_HW_EVENT:
265 hw_event_process(packet.hw_event);
266 break;
267 default:
268 log_message_process(packet.log);
269 break;
270 }
271 }
272
273 /** @brief Function called when potential STPv2 stream data drop is detected.
274 *
275 * When that occurs all active messages in the demultiplexer are marked as invalid and
276 * stp_decoder is switching to re-synchronization mode where data is decoded in
277 * search for ASYNC opcode.
278 */
sync_loss(void)279 static void sync_loss(void)
280 {
281 if (IS_ENABLED(CONFIG_NRF_ETR_DECODE)) {
282 mipi_stp_decoder_sync_loss();
283 log_frontend_stmesp_demux_reset();
284 oosync_cnt++;
285 in_sync = false;
286 }
287 }
288
289 /** @brief Indicate that STPv2 decoder is synchronized.
290 *
291 * That occurs when ASYNC opcode is found.
292 */
on_resync(void)293 static void on_resync(void)
294 {
295 if (IS_ENABLED(CONFIG_NRF_ETR_DECODE)) {
296 in_sync = true;
297 }
298 }
299
decoder_cb_debug(enum mipi_stp_decoder_ctrl_type type,union mipi_stp_decoder_data data,uint64_t * ts,bool marked)300 static void decoder_cb_debug(enum mipi_stp_decoder_ctrl_type type,
301 union mipi_stp_decoder_data data,
302 uint64_t *ts, bool marked)
303 {
304 switch (type) {
305 case STP_DECODER_MAJOR:
306 DBG("M%04x\n", data.id);
307 break;
308 case STP_DECODER_CHANNEL:
309 DBG("C%04x\n", data.id);
310 break;
311 case STP_DATA8:
312 DBG_DATA(data.data, 2, marked);
313 if (ts) {
314 DBG("TS:%lld\n", *ts);
315 }
316 break;
317 case STP_DATA16:
318 DBG_DATA(data.data, 4, marked);
319 break;
320 case STP_DATA32:
321 DBG_DATA(data.data, 8, marked);
322 if (ts) {
323 DBG("TS:%lld\n", *ts);
324 }
325 break;
326 case STP_DATA64:
327 DBG_DATA(data.data, 16, marked);
328 break;
329 case STP_DECODER_FLAG:
330 DBG("F%s\n", ts ? "TS" : "");
331 break;
332 case STP_DECODER_NULL:
333 DBG("NULL\n");
334 break;
335 case STP_DECODER_MERROR:
336 DBG("MERR\n");
337 break;
338 case STP_DECODER_VERSION:
339 DBG("VER\n");
340 break;
341 case STP_DECODER_FREQ: {
342 DBG("FREQ%s %d\n", ts ? "TS" : "", (int)data.freq);
343 break;
344 }
345 case STP_DECODER_GERROR:
346 DBG("GERR\n");
347 break;
348 case STP_DECODER_ASYNC:
349 DBG("ASYNC\n");
350 break;
351 case STP_DECODER_NOT_SUPPORTED:
352 DBG("NOTSUP\n");
353 break;
354 default:
355 DBG("OTHER\n");
356 break;
357 }
358 }
359
decoder_cb(enum mipi_stp_decoder_ctrl_type type,union mipi_stp_decoder_data data,uint64_t * ts,bool marked)360 static void decoder_cb(enum mipi_stp_decoder_ctrl_type type,
361 union mipi_stp_decoder_data data, uint64_t *ts,
362 bool marked)
363 {
364 int rv = 0;
365
366 decoder_cb_debug(type, data, ts, marked);
367
368 if (!IS_ENABLED(CONFIG_NRF_ETR_DECODE)) {
369 return;
370 }
371
372 switch (type) {
373 case STP_DECODER_ASYNC:
374 on_resync();
375 break;
376 case STP_DECODER_MAJOR:
377 log_frontend_stmesp_demux_major(data.id);
378 break;
379 case STP_DECODER_CHANNEL:
380 log_frontend_stmesp_demux_channel(data.id);
381 break;
382 case STP_DATA8:
383 if (marked) {
384 rv = log_frontend_stmesp_demux_packet_start((uint32_t *)&data.data, ts);
385 new_msg_cnt += rv;
386 } else {
387 log_frontend_stmesp_demux_data((char *)&data.data, 1);
388 }
389 break;
390 case STP_DATA16:
391 if (marked) {
392 if (ts) {
393 rv = log_frontend_stmesp_demux_log0((uint16_t)data.data, ts);
394 new_msg_cnt += rv;
395 } else {
396 log_frontend_stmesp_demux_source_id((uint16_t)data.data);
397 }
398 } else {
399 log_frontend_stmesp_demux_data((char *)&data.data, 2);
400 }
401 break;
402 case STP_DATA32:
403 if (marked) {
404 rv = log_frontend_stmesp_demux_packet_start((uint32_t *)&data.data, ts);
405 new_msg_cnt += rv;
406 } else {
407 log_frontend_stmesp_demux_data((char *)&data.data, 4);
408 if (ts) {
409 log_frontend_stmesp_demux_timestamp(*ts);
410 }
411 }
412 break;
413 case STP_DATA64:
414 log_frontend_stmesp_demux_data((char *)&data.data, 8);
415 break;
416 case STP_DECODER_FLAG:
417 if (ts) {
418 log_frontend_stmesp_demux_packet_start(NULL, ts);
419 } else {
420 log_frontend_stmesp_demux_packet_end();
421 }
422 new_msg_cnt++;
423 break;
424 case STP_DECODER_FREQ: {
425 static uint32_t freq;
426 /* Avoid calling log_output function multiple times as frequency
427 * is sent periodically.
428 */
429 if (freq != (uint32_t)data.freq) {
430 freq = (uint32_t)data.freq;
431 log_output_timestamp_freq_set(freq);
432 }
433 break;
434 }
435 case STP_DECODER_MERROR: {
436 sync_loss();
437 break;
438 }
439 default:
440 break;
441 }
442
443 /* Only -ENOMEM is accepted failure. */
444 __ASSERT_NO_MSG((rv >= 0) || (rv == -ENOMEM));
445 }
446
deformatter_cb(uint32_t id,const uint8_t * data,size_t len)447 static void deformatter_cb(uint32_t id, const uint8_t *data, size_t len)
448 {
449 mipi_stp_decoder_decode(data, len);
450 }
451
452 /** @brief Get write index.
453 *
454 * It is a non-wrapping 32 bit write index. To get actual index in the ETR buffer
455 * result must be masked by ETR buffer size mask.
456 */
get_wr_idx(void)457 static uint32_t get_wr_idx(void)
458 {
459 uint32_t cnt = nrfx_tbm_count_get();
460
461 if (tbm_full && (cnt < wsize_mask)) {
462 /* TBM full event is generated when max value is reached and not when
463 * overflow occurs. We cannot increment base_wr_idx just after the
464 * event but only when counter actually wraps.
465 */
466 base_wr_idx += wsize_inc;
467 tbm_full = false;
468 }
469
470 return cnt + base_wr_idx;
471 }
472
473 /** @brief Get amount of pending data in ETR buffer. */
pending_data(void)474 static uint32_t pending_data(void)
475 {
476 return get_wr_idx() - etr_rd_idx;
477 }
478
479 /** @brief Get current read index.
480 *
481 * Read index is not exact index in the ETR buffer. It does not wrap (32 bit word).
482 * So ETR read index is derived by masking the value by the ETR buffer size mask.
483 */
rd_idx_inc(void)484 static void rd_idx_inc(void)
485 {
486 etr_rd_idx += CORESIGHT_TRACE_FRAME_SIZE32;
487 }
488
489 /** @brief Process frame. */
process_frame(uint8_t * buf,uint32_t pending)490 static void process_frame(uint8_t *buf, uint32_t pending)
491 {
492 DBG("%d (wr:%d): ", pending, get_wr_idx() & wsize_mask);
493 for (int j = 0; j < CORESIGHT_TRACE_FRAME_SIZE; j++) {
494 DBG("%02x ", ((uint8_t *)buf)[j]);
495 }
496 DBG("\n");
497 cs_trace_defmt_process((uint8_t *)buf, CORESIGHT_TRACE_FRAME_SIZE);
498 DBG("\n");
499 }
500
process_messages(void)501 static void process_messages(void)
502 {
503 static union log_frontend_stmesp_demux_packet curr_msg;
504
505 /* Process any new messages. curr_msg remains the same if panic
506 * interrupts currently ongoing processing (curr_msg is not NULL then).
507 * In such a case it is processed once again, which may lead to
508 * a partial repetition of that message on the output.
509 */
510 while (new_msg_cnt || curr_msg.generic_packet) {
511 if (!curr_msg.generic_packet) {
512 curr_msg = log_frontend_stmesp_demux_claim();
513 }
514 if (curr_msg.generic_packet) {
515 message_process(curr_msg);
516 log_frontend_stmesp_demux_free(curr_msg);
517 curr_msg.generic_packet = NULL;
518 } else {
519 break;
520 }
521 }
522 new_msg_cnt = 0;
523 }
524
525 /** @brief Dump frame over UART (using polling or async API). */
dump_frame(uint8_t * buf)526 static void dump_frame(uint8_t *buf)
527 {
528 int err;
529
530 if (use_async_uart) {
531 err = k_sem_take(&uart_sem, K_FOREVER);
532 __ASSERT_NO_MSG(err >= 0);
533
534 err = uart_tx(uart_dev, buf, CORESIGHT_TRACE_FRAME_SIZE, SYS_FOREVER_US);
535 __ASSERT_NO_MSG(err >= 0);
536 } else {
537 for (int i = 0; i < CORESIGHT_TRACE_FRAME_SIZE; i++) {
538 uart_poll_out(uart_dev, buf[i]);
539 }
540 }
541 }
542
543 /** @brief Attempt to process data pending in the ETR circular buffer.
544 *
545 * Data is processed in 16 bytes packages. Each package is a STPv2 frame which
546 * contain data generated by STM stimulus ports.
547 *
548 */
process(void)549 static void process(void)
550 {
551 static const uint32_t *const etr_buf = (uint32_t *)(DT_REG_ADDR(ETR_BUFFER_NODE));
552 static uint32_t sync_cnt;
553 uint32_t pending;
554
555 /* If function is called in panic mode then it may interrupt ongoing
556 * processing. This must be carefully handled as function decodes data
557 * that must be synchronized. Losing synchronization results in failure.
558 *
559 * Special measures are taken to ensure proper synchronization when
560 * processing is preempted by panic.
561 *
562 */
563 while ((pending = pending_data()) >= MIN_DATA) {
564 /* Do not read the data that has already been read but not yet processed. */
565 if (sync_cnt || (CONFIG_NRF_ETR_SYNC_PERIOD == 0)) {
566 sync_cnt--;
567 sys_cache_data_invd_range((void *)&etr_buf[etr_rd_idx & wsize_mask],
568 CORESIGHT_TRACE_FRAME_SIZE);
569 for (int i = 0; i < CORESIGHT_TRACE_FRAME_SIZE32; i++) {
570 frame_buf[i] = etr_buf[(etr_rd_idx + i) & wsize_mask];
571 }
572 rd_idx_inc();
573 __sync_synchronize();
574 } else {
575 sync_cnt = CONFIG_NRF_ETR_SYNC_PERIOD;
576 memset(frame_buf, 0xff, CORESIGHT_TRACE_FRAME_SIZE);
577 }
578
579 if (IS_ENABLED(CONFIG_NRF_ETR_DECODE) || IS_ENABLED(CONFIG_NRF_ETR_DEBUG)) {
580 if ((pending >= (wsize_mask - MIN_DATA)) ||
581 (pending_data() >= (wsize_mask - MIN_DATA))) {
582 /* If before or after reading the frame it is close to full
583 * then assume overwrite and sync loss.
584 */
585 sync_loss();
586 }
587
588 process_frame((uint8_t *)frame_buf, pending);
589 if (IS_ENABLED(CONFIG_NRF_ETR_DECODE)) {
590 process_messages();
591 }
592 } else {
593 dump_frame((uint8_t *)frame_buf);
594 frame_buf = (use_async_uart && (frame_buf == frame_buf0)) ?
595 frame_buf1 : frame_buf0;
596 }
597 }
598
599 /* Fill the buffer to ensure that all logs are processed on time. */
600 if (pending < MIN_DATA) {
601 log_frontend_stmesp_dummy_write();
602 }
603 }
604
decoder_init(void)605 static int decoder_init(void)
606 {
607 int err;
608 static bool once;
609
610 if (once) {
611 return -EALREADY;
612 }
613
614 once = true;
615 if (IS_ENABLED(CONFIG_NRF_ETR_DECODE)) {
616 static const struct log_frontend_stmesp_demux_config config = {
617 .m_ids = stm_m_id,
618 .m_ids_cnt = ARRAY_SIZE(stm_m_id),
619 .source_id_buf = source_id_buf,
620 .source_id_buf_len = ARRAY_SIZE(source_id_buf)};
621
622 err = log_frontend_stmesp_demux_init(&config);
623 if (err < 0) {
624 return err;
625 }
626 }
627
628 static const struct mipi_stp_decoder_config stp_decoder_cfg = {.cb = decoder_cb,
629 .start_out_of_sync = true};
630
631 mipi_stp_decoder_init(&stp_decoder_cfg);
632
633 cs_trace_defmt_init(deformatter_cb);
634
635 return 0;
636 }
637
nrf_etr_flush(void)638 void nrf_etr_flush(void)
639 {
640 int cnt = 4;
641
642 if (IS_ENABLED(CONFIG_NRF_ETR_DECODE) ||
643 IS_ENABLED(CONFIG_NRF_ETR_DEBUG)) {
644 (void)decoder_init();
645 }
646
647 /* Set flag which forces uart to use blocking polling out instead of
648 * asynchronous API.
649 */
650 use_async_uart = false;
651 uint32_t k = irq_lock();
652
653 /* Repeat arbitrary number of times to ensure that all that is flushed. */
654 while (cnt--) {
655 process();
656 }
657
658 irq_unlock(k);
659 }
660
661 #ifndef CONFIG_NRF_ETR_SHELL
etr_thread_func(void * dummy1,void * dummy2,void * dummy3)662 static void etr_thread_func(void *dummy1, void *dummy2, void *dummy3)
663 {
664 uint64_t checkpoint = 0;
665
666 if (IS_ENABLED(CONFIG_NRF_ETR_DECODE) ||
667 IS_ENABLED(CONFIG_NRF_ETR_DEBUG)) {
668 int err;
669
670 err = decoder_init();
671 if (err < 0) {
672 return;
673 }
674 }
675
676 while (1) {
677 process();
678
679 uint64_t now = k_uptime_get();
680
681 if (DROP_CHECK_PERIOD && (now - checkpoint) > DROP_CHECK_PERIOD) {
682 uint32_t cnt = log_frontend_stmesp_demux_get_dropped();
683
684 checkpoint = now;
685 if (cnt || oosync_cnt) {
686 oosync_cnt = 0;
687 LOG_WRN("Too many log messages, some dropped");
688 }
689 }
690
691 k_sleep(K_MSEC(CONFIG_NRF_ETR_BACKOFF));
692 }
693 }
694 #endif
695
uart_event_handler(const struct device * dev,struct uart_event * evt,void * user_data)696 static void uart_event_handler(const struct device *dev, struct uart_event *evt, void *user_data)
697 {
698 ARG_UNUSED(dev);
699
700 switch (evt->type) {
701 case UART_TX_ABORTED:
702 /* An intentional fall-through to UART_TX_DONE. */
703 case UART_TX_DONE:
704 k_sem_give(&uart_sem);
705 break;
706 #ifdef CONFIG_NRF_ETR_SHELL
707 case UART_RX_RDY:
708 uart_async_rx_on_rdy(&async_rx, evt->data.rx.buf, evt->data.rx.len);
709 shell_handler(SHELL_TRANSPORT_EVT_RX_RDY, shell_context);
710 break;
711 case UART_RX_BUF_REQUEST: {
712 uint8_t *buf = uart_async_rx_buf_req(&async_rx);
713 size_t len = uart_async_rx_get_buf_len(&async_rx);
714
715 if (buf) {
716 int err = uart_rx_buf_rsp(dev, buf, len);
717
718 if (err < 0) {
719 uart_async_rx_on_buf_rel(&async_rx, buf);
720 }
721 } else {
722 atomic_inc(&pending_rx_req);
723 }
724
725 break;
726 }
727 case UART_RX_BUF_RELEASED:
728 uart_async_rx_on_buf_rel(&async_rx, evt->data.rx_buf.buf);
729 break;
730 case UART_RX_DISABLED:
731 break;
732 #endif /* CONFIG_NRF_ETR_SHELL */
733 default:
734 __ASSERT_NO_MSG(0);
735 }
736 }
737
tbm_event_handler(nrf_tbm_event_t event)738 static void tbm_event_handler(nrf_tbm_event_t event)
739 {
740 ARG_UNUSED(event);
741
742 if (event == NRF_TBM_EVENT_FULL) {
743 tbm_full = true;
744 }
745
746 #ifdef CONFIG_NRF_ETR_SHELL
747 k_poll_signal_raise(&etr_shell.ctx->signals[SHELL_SIGNAL_LOG_MSG], 0);
748 #else
749 k_wakeup(&etr_thread);
750 #endif
751 }
752
etr_process_init(void)753 int etr_process_init(void)
754 {
755 int err;
756
757 k_sem_init(&uart_sem, 1, 1);
758
759 err = uart_callback_set(uart_dev, uart_event_handler, NULL);
760 use_async_uart = (err == 0);
761
762 static const nrfx_tbm_config_t config = {.size = wsize_mask};
763
764 nrfx_tbm_init(&config, tbm_event_handler);
765
766 IRQ_CONNECT(DT_IRQN(DT_NODELABEL(tbm)), DT_IRQ(DT_NODELABEL(tbm), priority),
767 nrfx_isr, nrfx_tbm_irq_handler, 0);
768 irq_enable(DT_IRQN(DT_NODELABEL(tbm)));
769
770 #ifdef CONFIG_NRF_ETR_SHELL
771 uint32_t level = CONFIG_LOG_MAX_LEVEL;
772 static const struct shell_backend_config_flags cfg_flags =
773 SHELL_DEFAULT_BACKEND_CONFIG_FLAGS;
774
775 shell_init(&etr_shell, NULL, cfg_flags, true, level);
776 k_timer_start(&etr_timer, K_MSEC(CONFIG_NRF_ETR_BACKOFF), K_NO_WAIT);
777 if (IS_ENABLED(CONFIG_NRF_ETR_DECODE) || IS_ENABLED(CONFIG_NRF_ETR_DEBUG)) {
778 err = decoder_init();
779 if (err < 0) {
780 return err;
781 }
782 }
783 #else
784 k_thread_create(&etr_thread, etr_stack, K_KERNEL_STACK_SIZEOF(etr_stack), etr_thread_func,
785 NULL, NULL, NULL, K_LOWEST_APPLICATION_THREAD_PRIO, 0, K_NO_WAIT);
786 k_thread_name_set(&etr_thread, "etr_process");
787 #endif
788
789 return 0;
790 }
791
792 SYS_INIT(etr_process_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
793
794 #ifdef CONFIG_NRF_ETR_SHELL
795
etr_timer_handler(struct k_timer * timer)796 static void etr_timer_handler(struct k_timer *timer)
797 {
798 if (pending_data() >= MIN_DATA) {
799 k_poll_signal_raise(&etr_shell.ctx->signals[SHELL_SIGNAL_LOG_MSG], 0);
800 } else {
801 k_timer_start(timer, K_MSEC(CONFIG_NRF_ETR_BACKOFF), K_NO_WAIT);
802 }
803 }
804
z_shell_log_backend_process(const struct shell_log_backend * backend)805 bool z_shell_log_backend_process(const struct shell_log_backend *backend)
806 {
807 ARG_UNUSED(backend);
808
809 process();
810 k_timer_start(&etr_timer, K_MSEC(CONFIG_NRF_ETR_BACKOFF), K_NO_WAIT);
811
812 return false;
813 }
814
z_shell_log_backend_disable(const struct shell_log_backend * backend)815 void z_shell_log_backend_disable(const struct shell_log_backend *backend)
816 {
817 ARG_UNUSED(backend);
818 }
819
z_shell_log_backend_enable(const struct shell_log_backend * backend,void * ctx,uint32_t init_log_level)820 void z_shell_log_backend_enable(const struct shell_log_backend *backend, void *ctx,
821 uint32_t init_log_level)
822 {
823 ARG_UNUSED(backend);
824 ARG_UNUSED(ctx);
825 ARG_UNUSED(init_log_level);
826 }
827
etr_shell_write(const struct shell_transport * transport,const void * data,size_t length,size_t * cnt)828 static int etr_shell_write(const struct shell_transport *transport, const void *data, size_t length,
829 size_t *cnt)
830 {
831 size_t len = length;
832 uint8_t *buf = (uint8_t *)data;
833 size_t chunk_len;
834
835 do {
836 chunk_len = MIN(len, sizeof(log_output_buf));
837 len -= log_output_func(buf, chunk_len, NULL);
838 buf += chunk_len;
839 } while (len > 0);
840
841 *cnt = length;
842 shell_handler(SHELL_TRANSPORT_EVT_TX_RDY, shell_context);
843
844 return 0;
845 }
846
rx_enable(uint8_t * buf,size_t len)847 static int rx_enable(uint8_t *buf, size_t len)
848 {
849 return uart_rx_enable(uart_dev, buf, len, 10000);
850 }
851
etr_shell_read(const struct shell_transport * transport,void * data,size_t length,size_t * cnt)852 static int etr_shell_read(const struct shell_transport *transport, void *data, size_t length,
853 size_t *cnt)
854 {
855 uint8_t *buf;
856 size_t blen;
857 bool buf_available;
858
859 blen = uart_async_rx_data_claim(&async_rx, &buf, length);
860 memcpy(data, buf, blen);
861 buf_available = uart_async_rx_data_consume(&async_rx, blen);
862
863 *cnt = blen;
864 if (pending_rx_req && buf_available) {
865 uint8_t *buf = uart_async_rx_buf_req(&async_rx);
866 size_t len = uart_async_rx_get_buf_len(&async_rx);
867 int err;
868
869 __ASSERT_NO_MSG(buf != NULL);
870 atomic_dec(&pending_rx_req);
871 err = uart_rx_buf_rsp(uart_dev, buf, len);
872 /* If it is too late and RX is disabled then re-enable it. */
873 if (err < 0) {
874 if (err == -EACCES) {
875 pending_rx_req = 0;
876 err = rx_enable(buf, len);
877 } else {
878 return err;
879 }
880 }
881 }
882
883 return 0;
884 }
885
etr_shell_enable(const struct shell_transport * transport,bool blocking_tx)886 static int etr_shell_enable(const struct shell_transport *transport, bool blocking_tx)
887 {
888 return 0;
889 }
890
etr_shell_uninit(const struct shell_transport * transport)891 static int etr_shell_uninit(const struct shell_transport *transport)
892 {
893 return 0;
894 }
895
etr_shell_init(const struct shell_transport * transport,const void * config,shell_transport_handler_t evt_handler,void * context)896 static int etr_shell_init(const struct shell_transport *transport, const void *config,
897 shell_transport_handler_t evt_handler, void *context)
898 {
899 int err;
900 uint8_t *buf;
901 static const struct uart_async_rx_config async_rx_config = {
902 .buffer = rx_buf,
903 .length = sizeof(rx_buf),
904 .buf_cnt = CONFIG_NRF_ETR_SHELL_ASYNC_RX_BUFFER_COUNT,
905 };
906
907 shell_context = context;
908 shell_handler = evt_handler;
909 err = uart_async_rx_init(&async_rx, &async_rx_config);
910 if (err) {
911 return err;
912 }
913
914 buf = uart_async_rx_buf_req(&async_rx);
915
916 return rx_enable(buf, uart_async_rx_get_buf_len(&async_rx));
917 }
918
919 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
etr_shell_update(const struct shell_transport * transport)920 static void etr_shell_update(const struct shell_transport *transport)
921 {
922 }
923 #endif
924
925 const struct shell_transport_api shell_api = {
926 .init = etr_shell_init,
927 .uninit = etr_shell_uninit,
928 .enable = etr_shell_enable,
929 .write = etr_shell_write,
930 .read = etr_shell_read,
931 #ifdef CONFIG_MCUMGR_TRANSPORT_SHELL
932 .update = shell_update,
933 #endif /* CONFIG_MCUMGR_TRANSPORT_SHELL */
934 };
935
936 static struct shell_transport transport = {
937 .api = &shell_api,
938 .ctx = NULL,
939 };
940
941 static uint8_t shell_out_buffer[CONFIG_SHELL_PRINTF_BUFF_SIZE];
942 Z_SHELL_DEFINE(etr_shell, CONFIG_NRF_ETR_SHELL_PROMPT, &transport, shell_out_buffer, NULL,
943 SHELL_FLAG_OLF_CRLF);
944 #endif /* CONFIG_NRF_ETR_SHELL */
945