1 /*
2 * Copyright (c) 2018 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <zephyr/logging/log.h>
7 #include <zephyr/logging/log_backend.h>
8 #include <zephyr/logging/log_ctrl.h>
9 #include <zephyr/logging/log_output.h>
10 #include <zephyr/logging/log_internal.h>
11 #include <zephyr/sys/mpsc_pbuf.h>
12 #include <zephyr/logging/log_link.h>
13 #include <zephyr/sys/printk.h>
14 #include <zephyr/sys_clock.h>
15 #include <zephyr/init.h>
16 #include <zephyr/sys/__assert.h>
17 #include <zephyr/sys/atomic.h>
18 #include <zephyr/sys/iterable_sections.h>
19 #include <ctype.h>
20 #include <zephyr/logging/log_frontend.h>
21 #include <zephyr/syscall_handler.h>
22 #include <zephyr/logging/log_output_dict.h>
23 #include <zephyr/logging/log_output_custom.h>
24 #include <zephyr/linker/utils.h>
25
26 LOG_MODULE_REGISTER(log);
27
28 #ifndef CONFIG_LOG_PROCESS_THREAD_SLEEP_MS
29 #define CONFIG_LOG_PROCESS_THREAD_SLEEP_MS 0
30 #endif
31
32 #ifndef CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD
33 #define CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD 0
34 #endif
35
36 #ifndef CONFIG_LOG_PROCESS_THREAD_STACK_SIZE
37 #define CONFIG_LOG_PROCESS_THREAD_STACK_SIZE 1
38 #endif
39
40 #ifndef CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS
41 #define CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS 0
42 #endif
43
44 #ifndef CONFIG_LOG_PROCESSING_LATENCY_US
45 #define CONFIG_LOG_PROCESSING_LATENCY_US 0
46 #endif
47
48 #ifndef CONFIG_LOG_BUFFER_SIZE
49 #define CONFIG_LOG_BUFFER_SIZE 4
50 #endif
51
52 #ifdef CONFIG_LOG_PROCESS_THREAD_CUSTOM_PRIORITY
53 #define LOG_PROCESS_THREAD_PRIORITY CONFIG_LOG_PROCESS_THREAD_PRIORITY
54 #else
55 #define LOG_PROCESS_THREAD_PRIORITY K_LOWEST_APPLICATION_THREAD_PRIO
56 #endif
57
58 #ifndef CONFIG_LOG_TAG_MAX_LEN
59 #define CONFIG_LOG_TAG_MAX_LEN 0
60 #endif
61
62 #ifndef CONFIG_LOG_FAILURE_REPORT_PERIOD
63 #define CONFIG_LOG_FAILURE_REPORT_PERIOD 0
64 #endif
65
66 #ifndef CONFIG_LOG_ALWAYS_RUNTIME
67 BUILD_ASSERT(!IS_ENABLED(CONFIG_NO_OPTIMIZATIONS),
68 "Option must be enabled when CONFIG_NO_OPTIMIZATIONS is set");
69 BUILD_ASSERT(!IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE),
70 "Option must be enabled when CONFIG_LOG_MODE_IMMEDIATE is set");
71 #endif
72
73 static const log_format_func_t format_table[] = {
74 [LOG_OUTPUT_TEXT] = IS_ENABLED(CONFIG_LOG_OUTPUT) ?
75 log_output_msg_process : NULL,
76 [LOG_OUTPUT_SYST] = IS_ENABLED(CONFIG_LOG_MIPI_SYST_ENABLE) ?
77 log_output_msg_syst_process : NULL,
78 [LOG_OUTPUT_DICT] = IS_ENABLED(CONFIG_LOG_DICTIONARY_SUPPORT) ?
79 log_dict_output_msg_process : NULL,
80 [LOG_OUTPUT_CUSTOM] = IS_ENABLED(CONFIG_LOG_CUSTOM_FORMAT_SUPPORT) ?
81 log_custom_output_msg_process : NULL,
82 };
83
log_format_func_t_get(uint32_t log_type)84 log_format_func_t log_format_func_t_get(uint32_t log_type)
85 {
86 return format_table[log_type];
87 }
88
log_format_table_size(void)89 size_t log_format_table_size(void)
90 {
91 return ARRAY_SIZE(format_table);
92 }
93
94 K_SEM_DEFINE(log_process_thread_sem, 0, 1);
95
96 static atomic_t initialized;
97 static bool panic_mode;
98 static bool backend_attached;
99 static atomic_t buffered_cnt;
100 static atomic_t dropped_cnt;
101 static k_tid_t proc_tid;
102 static struct k_timer log_process_thread_timer;
103
104 static log_timestamp_t dummy_timestamp(void);
105 static log_timestamp_get_t timestamp_func = dummy_timestamp;
106 static uint32_t timestamp_freq;
107 static log_timestamp_t proc_latency;
108 static log_timestamp_t prev_timestamp;
109 static atomic_t unordered_cnt;
110 static uint64_t last_failure_report;
111
112 static STRUCT_SECTION_ITERABLE(log_msg_ptr, log_msg_ptr);
113 static STRUCT_SECTION_ITERABLE_ALTERNATE(log_mpsc_pbuf, mpsc_pbuf_buffer, log_buffer);
114 static struct mpsc_pbuf_buffer *curr_log_buffer;
115
116 #ifdef CONFIG_MPSC_PBUF
117 static uint32_t __aligned(Z_LOG_MSG_ALIGNMENT)
118 buf32[CONFIG_LOG_BUFFER_SIZE / sizeof(int)];
119
120 static void z_log_notify_drop(const struct mpsc_pbuf_buffer *buffer,
121 const union mpsc_pbuf_generic *item);
122
123 static const struct mpsc_pbuf_buffer_config mpsc_config = {
124 .buf = (uint32_t *)buf32,
125 .size = ARRAY_SIZE(buf32),
126 .notify_drop = z_log_notify_drop,
127 .get_wlen = log_msg_generic_get_wlen,
128 .flags = (IS_ENABLED(CONFIG_LOG_MODE_OVERFLOW) ?
129 MPSC_PBUF_MODE_OVERWRITE : 0) |
130 (IS_ENABLED(CONFIG_LOG_MEM_UTILIZATION) ?
131 MPSC_PBUF_MAX_UTILIZATION : 0)
132 };
133 #endif
134
135 /* Check that default tag can fit in tag buffer. */
136 COND_CODE_0(CONFIG_LOG_TAG_MAX_LEN, (),
137 (BUILD_ASSERT(sizeof(CONFIG_LOG_TAG_DEFAULT) <= CONFIG_LOG_TAG_MAX_LEN + 1,
138 "Default string longer than tag capacity")));
139
140 static char tag[CONFIG_LOG_TAG_MAX_LEN + 1] =
141 COND_CODE_0(CONFIG_LOG_TAG_MAX_LEN, ({}), (CONFIG_LOG_TAG_DEFAULT));
142
143 static void msg_process(union log_msg_generic *msg);
144
dummy_timestamp(void)145 static log_timestamp_t dummy_timestamp(void)
146 {
147 return 0;
148 }
149
z_log_timestamp(void)150 log_timestamp_t z_log_timestamp(void)
151 {
152 return timestamp_func();
153 }
154
z_log_msg_post_finalize(void)155 static void z_log_msg_post_finalize(void)
156 {
157 atomic_val_t cnt = atomic_inc(&buffered_cnt);
158
159 if (panic_mode) {
160 static struct k_spinlock process_lock;
161 k_spinlock_key_t key = k_spin_lock(&process_lock);
162 (void)log_process();
163
164 k_spin_unlock(&process_lock, key);
165 } else if (proc_tid != NULL) {
166 if (cnt == 0) {
167 k_timer_start(&log_process_thread_timer,
168 K_MSEC(CONFIG_LOG_PROCESS_THREAD_SLEEP_MS),
169 K_NO_WAIT);
170 } else if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD &&
171 (cnt + 1) == CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) {
172 k_timer_stop(&log_process_thread_timer);
173 k_sem_give(&log_process_thread_sem);
174 } else {
175 /* No action needed. Message processing will be triggered by the
176 * timeout or when number of upcoming messages exceeds the
177 * threshold.
178 */
179 }
180 }
181 }
182
log_format_set_all_active_backends(size_t log_type)183 const struct log_backend *log_format_set_all_active_backends(size_t log_type)
184 {
185 const struct log_backend *failed_backend = NULL;
186
187 STRUCT_SECTION_FOREACH(log_backend, backend) {
188 if (log_backend_is_active(backend)) {
189 int retCode = log_backend_format_set(backend, log_type);
190
191 if (retCode != 0) {
192 failed_backend = backend;
193 }
194 }
195 }
196 return failed_backend;
197 }
198
z_log_vprintk(const char * fmt,va_list ap)199 void z_log_vprintk(const char *fmt, va_list ap)
200 {
201 if (!IS_ENABLED(CONFIG_LOG_PRINTK)) {
202 return;
203 }
204
205 z_log_msg_runtime_vcreate(Z_LOG_LOCAL_DOMAIN_ID, NULL,
206 LOG_LEVEL_INTERNAL_RAW_STRING, NULL, 0,
207 Z_LOG_MSG_CBPRINTF_FLAGS(0),
208 fmt, ap);
209 }
210
default_get_timestamp(void)211 static log_timestamp_t default_get_timestamp(void)
212 {
213 return IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
214 sys_clock_tick_get() : k_cycle_get_32();
215 }
216
default_lf_get_timestamp(void)217 static log_timestamp_t default_lf_get_timestamp(void)
218 {
219 return IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
220 k_uptime_get() : k_uptime_get_32();
221 }
222
log_core_init(void)223 void log_core_init(void)
224 {
225 panic_mode = false;
226 dropped_cnt = 0;
227 buffered_cnt = 0;
228
229 if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
230 log_frontend_init();
231 if (IS_ENABLED(CONFIG_LOG_FRONTEND_ONLY)) {
232 return;
233 }
234 }
235
236 /* Set default timestamp. */
237 if (sys_clock_hw_cycles_per_sec() > 1000000) {
238 log_set_timestamp_func(default_lf_get_timestamp, 1000U);
239 } else {
240 uint32_t freq = IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
241 CONFIG_SYS_CLOCK_TICKS_PER_SEC : sys_clock_hw_cycles_per_sec();
242 log_set_timestamp_func(default_get_timestamp, freq);
243 }
244
245 if (IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
246 z_log_msg_init();
247 }
248
249 if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) {
250 z_log_runtime_filters_init();
251 }
252 }
253
activate_foreach_backend(uint32_t mask)254 static uint32_t activate_foreach_backend(uint32_t mask)
255 {
256 uint32_t mask_cpy = mask;
257
258 while (mask_cpy) {
259 uint32_t i = __builtin_ctz(mask_cpy);
260 const struct log_backend *backend = log_backend_get(i);
261
262 mask_cpy &= ~BIT(i);
263 if (backend->autostart && (log_backend_is_ready(backend) == 0)) {
264 mask &= ~BIT(i);
265 log_backend_enable(backend,
266 backend->cb->ctx,
267 CONFIG_LOG_MAX_LEVEL);
268 }
269 }
270
271 return mask;
272 }
273
z_log_init(bool blocking,bool can_sleep)274 static uint32_t z_log_init(bool blocking, bool can_sleep)
275 {
276 uint32_t mask = 0;
277
278 if (IS_ENABLED(CONFIG_LOG_FRONTEND_ONLY)) {
279 return 0;
280 }
281
282 __ASSERT_NO_MSG(log_backend_count_get() < LOG_FILTERS_NUM_OF_SLOTS);
283
284 if (atomic_inc(&initialized) != 0) {
285 return 0;
286 }
287
288 int i = 0;
289 if (IS_ENABLED(CONFIG_LOG_MULTIDOMAIN)) {
290 z_log_links_initiate();
291 }
292
293
294 /* Assign ids to backends. */
295 STRUCT_SECTION_FOREACH(log_backend, backend) {
296 if (backend->autostart) {
297 log_backend_init(backend);
298
299 /* If backend has activation function then backend is
300 * not ready until activated.
301 */
302 if (log_backend_is_ready(backend) == 0) {
303 log_backend_enable(backend,
304 backend->cb->ctx,
305 CONFIG_LOG_MAX_LEVEL);
306 } else {
307 mask |= BIT(i);
308 }
309
310 i++;
311 }
312 }
313
314 /* If blocking init, wait until all backends are activated. */
315 if (blocking) {
316 while (mask) {
317 mask = activate_foreach_backend(mask);
318 if (IS_ENABLED(CONFIG_MULTITHREADING) && can_sleep) {
319 k_msleep(10);
320 }
321 }
322 }
323
324 return mask;
325 }
326
log_init(void)327 void log_init(void)
328 {
329 (void)z_log_init(true, true);
330 }
331
thread_set(k_tid_t process_tid)332 static void thread_set(k_tid_t process_tid)
333 {
334 proc_tid = process_tid;
335
336 if (IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
337 return;
338 }
339
340 if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD &&
341 process_tid &&
342 buffered_cnt >= CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) {
343 k_sem_give(&log_process_thread_sem);
344 }
345 }
346
log_thread_set(k_tid_t process_tid)347 void log_thread_set(k_tid_t process_tid)
348 {
349 if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
350 __ASSERT_NO_MSG(0);
351 } else {
352 thread_set(process_tid);
353 }
354 }
355
log_set_timestamp_func(log_timestamp_get_t timestamp_getter,uint32_t freq)356 int log_set_timestamp_func(log_timestamp_get_t timestamp_getter, uint32_t freq)
357 {
358 if (timestamp_getter == NULL) {
359 return -EINVAL;
360 }
361
362 timestamp_func = timestamp_getter;
363 timestamp_freq = freq;
364 if (CONFIG_LOG_PROCESSING_LATENCY_US) {
365 proc_latency = (freq * CONFIG_LOG_PROCESSING_LATENCY_US) / 1000000;
366 }
367
368 if (IS_ENABLED(CONFIG_LOG_OUTPUT)) {
369 log_output_timestamp_freq_set(freq);
370 }
371
372 return 0;
373 }
374
z_impl_log_panic(void)375 void z_impl_log_panic(void)
376 {
377 if (panic_mode) {
378 return;
379 }
380
381 /* If panic happened early logger might not be initialized.
382 * Forcing initialization of the logger and auto-starting backends.
383 */
384 (void)z_log_init(true, false);
385
386 if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
387 log_frontend_panic();
388 if (IS_ENABLED(CONFIG_LOG_FRONTEND_ONLY)) {
389 goto out;
390 }
391 }
392
393 STRUCT_SECTION_FOREACH(log_backend, backend) {
394 if (log_backend_is_active(backend)) {
395 log_backend_panic(backend);
396 }
397 }
398
399 if (!IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
400 /* Flush */
401 while (log_process() == true) {
402 }
403 }
404
405 out:
406 panic_mode = true;
407 }
408
409 #ifdef CONFIG_USERSPACE
z_vrfy_log_panic(void)410 void z_vrfy_log_panic(void)
411 {
412 z_impl_log_panic();
413 }
414 #include <syscalls/log_panic_mrsh.c>
415 #endif
416
msg_filter_check(struct log_backend const * backend,union log_msg_generic * msg)417 static bool msg_filter_check(struct log_backend const *backend,
418 union log_msg_generic *msg)
419 {
420 if (!z_log_item_is_msg(msg)) {
421 return true;
422 }
423
424 if (!IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) {
425 return true;
426 }
427
428 uint32_t backend_level;
429 uint8_t level;
430 uint8_t domain_id;
431 int16_t source_id;
432 struct log_source_dynamic_data *source;
433
434 source = (struct log_source_dynamic_data *)log_msg_get_source(&msg->log);
435 level = log_msg_get_level(&msg->log);
436 domain_id = log_msg_get_domain(&msg->log);
437
438 /* Accept all non-logging messages. */
439 if (level == LOG_LEVEL_NONE) {
440 return true;
441 }
442 if (source) {
443 source_id = log_dynamic_source_id(source);
444 backend_level = log_filter_get(backend, domain_id, source_id, true);
445
446 return (level <= backend_level);
447 } else {
448 return true;
449 }
450 }
451
msg_process(union log_msg_generic * msg)452 static void msg_process(union log_msg_generic *msg)
453 {
454 STRUCT_SECTION_FOREACH(log_backend, backend) {
455 if (log_backend_is_active(backend) &&
456 msg_filter_check(backend, msg)) {
457 log_backend_msg_process(backend, msg);
458 }
459 }
460 }
461
dropped_notify(void)462 void dropped_notify(void)
463 {
464 uint32_t dropped = z_log_dropped_read_and_clear();
465
466 STRUCT_SECTION_FOREACH(log_backend, backend) {
467 if (log_backend_is_active(backend)) {
468 log_backend_dropped(backend, dropped);
469 }
470 }
471 }
472
unordered_notify(void)473 void unordered_notify(void)
474 {
475 uint32_t unordered = atomic_set(&unordered_cnt, 0);
476
477 LOG_WRN("%d unordered messages since last report", unordered);
478 }
479
z_log_notify_backend_enabled(void)480 void z_log_notify_backend_enabled(void)
481 {
482 /* Wakeup logger thread after attaching first backend. It might be
483 * blocked with log messages pending.
484 */
485 if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD) && !backend_attached) {
486 k_sem_give(&log_process_thread_sem);
487 }
488
489 backend_attached = true;
490 }
491
z_log_unordered_pending(void)492 static inline bool z_log_unordered_pending(void)
493 {
494 return IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) && unordered_cnt;
495 }
496
z_impl_log_process(void)497 bool z_impl_log_process(void)
498 {
499 if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
500 return false;
501 }
502
503 k_timeout_t backoff = K_NO_WAIT;
504 union log_msg_generic *msg;
505
506 if (!backend_attached) {
507 return false;
508 }
509
510 msg = z_log_msg_claim(&backoff);
511
512 if (msg) {
513 atomic_dec(&buffered_cnt);
514 msg_process(msg);
515 z_log_msg_free(msg);
516 } else if (CONFIG_LOG_PROCESSING_LATENCY_US > 0 && !K_TIMEOUT_EQ(backoff, K_NO_WAIT)) {
517 /* If backoff is requested, it means that there are pending
518 * messages but they are too new and processing shall back off
519 * to allow arrival of newer messages from remote domains.
520 */
521 k_timer_start(&log_process_thread_timer, backoff, K_NO_WAIT);
522
523 return false;
524 }
525
526 if (IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
527 bool dropped_pend = z_log_dropped_pending();
528 bool unordered_pend = z_log_unordered_pending();
529
530 if ((dropped_pend || unordered_pend) &&
531 (k_uptime_get() - last_failure_report) > CONFIG_LOG_FAILURE_REPORT_PERIOD) {
532 if (dropped_pend) {
533 dropped_notify();
534 }
535
536 if (unordered_pend) {
537 unordered_notify();
538 }
539 }
540
541 last_failure_report += CONFIG_LOG_FAILURE_REPORT_PERIOD;
542 }
543
544 return z_log_msg_pending();
545 }
546
547 #ifdef CONFIG_USERSPACE
z_vrfy_log_process(void)548 bool z_vrfy_log_process(void)
549 {
550 return z_impl_log_process();
551 }
552 #include <syscalls/log_process_mrsh.c>
553 #endif
554
z_impl_log_buffered_cnt(void)555 uint32_t z_impl_log_buffered_cnt(void)
556 {
557 return buffered_cnt;
558 }
559
560 #ifdef CONFIG_USERSPACE
z_vrfy_log_buffered_cnt(void)561 uint32_t z_vrfy_log_buffered_cnt(void)
562 {
563 return z_impl_log_buffered_cnt();
564 }
565 #include <syscalls/log_buffered_cnt_mrsh.c>
566 #endif
567
z_log_dropped(bool buffered)568 void z_log_dropped(bool buffered)
569 {
570 atomic_inc(&dropped_cnt);
571 if (buffered) {
572 atomic_dec(&buffered_cnt);
573 }
574 }
575
z_log_dropped_read_and_clear(void)576 uint32_t z_log_dropped_read_and_clear(void)
577 {
578 return atomic_set(&dropped_cnt, 0);
579 }
580
z_log_dropped_pending(void)581 bool z_log_dropped_pending(void)
582 {
583 return dropped_cnt > 0;
584 }
585
z_log_msg_init(void)586 void z_log_msg_init(void)
587 {
588 #ifdef CONFIG_MPSC_PBUF
589 mpsc_pbuf_init(&log_buffer, &mpsc_config);
590 curr_log_buffer = &log_buffer;
591 #endif
592 }
593
msg_alloc(struct mpsc_pbuf_buffer * buffer,uint32_t wlen)594 static struct log_msg *msg_alloc(struct mpsc_pbuf_buffer *buffer, uint32_t wlen)
595 {
596 if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
597 return NULL;
598 }
599
600 return (struct log_msg *)mpsc_pbuf_alloc(
601 buffer, wlen,
602 (CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS == -1)
603 ? K_FOREVER
604 : K_MSEC(CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS));
605 }
606
z_log_msg_alloc(uint32_t wlen)607 struct log_msg *z_log_msg_alloc(uint32_t wlen)
608 {
609 return msg_alloc(&log_buffer, wlen);
610 }
611
msg_commit(struct mpsc_pbuf_buffer * buffer,struct log_msg * msg)612 static void msg_commit(struct mpsc_pbuf_buffer *buffer, struct log_msg *msg)
613 {
614 union log_msg_generic *m = (union log_msg_generic *)msg;
615
616 if (IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
617 msg_process(m);
618
619 return;
620 }
621
622 #ifdef CONFIG_MPSC_PBUF
623 mpsc_pbuf_commit(buffer, &m->buf);
624 #endif
625 z_log_msg_post_finalize();
626 }
627
z_log_msg_commit(struct log_msg * msg)628 void z_log_msg_commit(struct log_msg *msg)
629 {
630 msg->hdr.timestamp = timestamp_func();
631 msg_commit(&log_buffer, msg);
632 }
633
z_log_msg_local_claim(void)634 union log_msg_generic *z_log_msg_local_claim(void)
635 {
636 #ifdef CONFIG_MPSC_PBUF
637 return (union log_msg_generic *)mpsc_pbuf_claim(&log_buffer);
638 #else
639 return NULL;
640 #endif
641
642 }
643
644 /* If there are buffers dedicated for each link, claim the oldest message (lowest timestamp). */
z_log_msg_claim_oldest(k_timeout_t * backoff)645 union log_msg_generic *z_log_msg_claim_oldest(k_timeout_t *backoff)
646 {
647 union log_msg_generic *msg = NULL;
648 struct log_msg_ptr *chosen;
649 log_timestamp_t t_min = sizeof(log_timestamp_t) > sizeof(uint32_t) ?
650 UINT64_MAX : UINT32_MAX;
651 int i = 0;
652
653 /* Else iterate on all available buffers and get the oldest message. */
654 STRUCT_SECTION_FOREACH(log_msg_ptr, msg_ptr) {
655 struct log_mpsc_pbuf *buf;
656
657 STRUCT_SECTION_GET(log_mpsc_pbuf, i, &buf);
658
659 #ifdef CONFIG_MPSC_PBUF
660 if (msg_ptr->msg == NULL) {
661 msg_ptr->msg = (union log_msg_generic *)mpsc_pbuf_claim(&buf->buf);
662 }
663 #endif
664
665 if (msg_ptr->msg) {
666 log_timestamp_t t = log_msg_get_timestamp(&msg_ptr->msg->log);
667
668 if (t < t_min) {
669 t_min = t;
670 msg = msg_ptr->msg;
671 chosen = msg_ptr;
672 curr_log_buffer = &buf->buf;
673 }
674 }
675 i++;
676 }
677
678 if (msg) {
679 if (CONFIG_LOG_PROCESSING_LATENCY_US > 0) {
680 int32_t diff = t_min - (timestamp_func() - proc_latency);
681
682 if (diff > 0) {
683 /* Entry is too new. Back off for sometime to allow new
684 * remote messages to arrive which may have been captured
685 * earlier (but on other platform). Calculate for how
686 * long processing shall back off.
687 */
688 if (timestamp_freq == sys_clock_hw_cycles_per_sec()) {
689 *backoff = K_TICKS(diff);
690 } else {
691 *backoff = K_TICKS((diff * sys_clock_hw_cycles_per_sec()) /
692 timestamp_freq);
693 }
694
695 return NULL;
696 }
697 }
698
699 (*chosen).msg = NULL;
700 }
701
702 if (t_min < prev_timestamp) {
703 atomic_inc(&unordered_cnt);
704 }
705
706 prev_timestamp = t_min;
707
708 return msg;
709 }
710
z_log_msg_claim(k_timeout_t * backoff)711 union log_msg_generic *z_log_msg_claim(k_timeout_t *backoff)
712 {
713 size_t len;
714
715 STRUCT_SECTION_COUNT(log_mpsc_pbuf, &len);
716
717 /* Use only one buffer if others are not registered. */
718 if (IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) && len > 1) {
719 return z_log_msg_claim_oldest(backoff);
720 }
721
722 return z_log_msg_local_claim();
723 }
724
msg_free(struct mpsc_pbuf_buffer * buffer,const union log_msg_generic * msg)725 static void msg_free(struct mpsc_pbuf_buffer *buffer, const union log_msg_generic *msg)
726 {
727 #ifdef CONFIG_MPSC_PBUF
728 mpsc_pbuf_free(buffer, &msg->buf);
729 #endif
730 }
731
z_log_msg_free(union log_msg_generic * msg)732 void z_log_msg_free(union log_msg_generic *msg)
733 {
734 msg_free(curr_log_buffer, msg);
735 }
736
msg_pending(struct mpsc_pbuf_buffer * buffer)737 static bool msg_pending(struct mpsc_pbuf_buffer *buffer)
738 {
739 #ifdef CONFIG_MPSC_PBUF
740 return mpsc_pbuf_is_pending(buffer);
741 #else
742 return false;
743 #endif
744 }
745
z_log_msg_pending(void)746 bool z_log_msg_pending(void)
747 {
748 size_t len;
749 int i = 0;
750
751 STRUCT_SECTION_COUNT(log_mpsc_pbuf, &len);
752
753 if (!IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) || (len == 1)) {
754 return msg_pending(&log_buffer);
755 }
756
757 STRUCT_SECTION_FOREACH(log_msg_ptr, msg_ptr) {
758 struct log_mpsc_pbuf *buf;
759
760 if (msg_ptr->msg) {
761 return true;
762 }
763
764 STRUCT_SECTION_GET(log_mpsc_pbuf, i, &buf);
765
766 if (msg_pending(&buf->buf)) {
767 return true;
768 }
769
770 i++;
771 }
772
773 return false;
774 }
775
z_log_msg_enqueue(const struct log_link * link,const void * data,size_t len)776 void z_log_msg_enqueue(const struct log_link *link, const void *data, size_t len)
777 {
778 struct log_msg *log_msg = (struct log_msg *)data;
779 size_t wlen = DIV_ROUND_UP(ROUND_UP(len, Z_LOG_MSG_ALIGNMENT), sizeof(int));
780 struct mpsc_pbuf_buffer *mpsc_pbuffer = link->mpsc_pbuf ? link->mpsc_pbuf : &log_buffer;
781 struct log_msg *local_msg = msg_alloc(mpsc_pbuffer, wlen);
782
783 if (!local_msg) {
784 z_log_dropped(false);
785 return;
786 }
787
788 log_msg->hdr.desc.valid = 0;
789 log_msg->hdr.desc.busy = 0;
790 log_msg->hdr.desc.domain += link->ctrl_blk->domain_offset;
791 memcpy((void *)local_msg, data, len);
792 msg_commit(mpsc_pbuffer, local_msg);
793 }
794
z_log_get_tag(void)795 const char *z_log_get_tag(void)
796 {
797 return CONFIG_LOG_TAG_MAX_LEN > 0 ? tag : NULL;
798 }
799
log_set_tag(const char * str)800 int log_set_tag(const char *str)
801 {
802 #if CONFIG_LOG_TAG_MAX_LEN > 0
803 if (str == NULL) {
804 return -EINVAL;
805 }
806
807 size_t len = strlen(str);
808 size_t cpy_len = MIN(len, CONFIG_LOG_TAG_MAX_LEN);
809
810 memcpy(tag, str, cpy_len);
811 tag[cpy_len] = '\0';
812
813 if (cpy_len < len) {
814 tag[cpy_len - 1] = '~';
815 return -ENOMEM;
816 }
817
818 return 0;
819 #else
820 return -ENOTSUP;
821 #endif
822 }
823
log_mem_get_usage(uint32_t * buf_size,uint32_t * usage)824 int log_mem_get_usage(uint32_t *buf_size, uint32_t *usage)
825 {
826 __ASSERT_NO_MSG(buf_size != NULL);
827 __ASSERT_NO_MSG(usage != NULL);
828
829 if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
830 return -EINVAL;
831 }
832
833 mpsc_pbuf_get_utilization(&log_buffer, buf_size, usage);
834
835 return 0;
836 }
837
log_mem_get_max_usage(uint32_t * max)838 int log_mem_get_max_usage(uint32_t *max)
839 {
840 __ASSERT_NO_MSG(max != NULL);
841
842 if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
843 return -EINVAL;
844 }
845
846 return mpsc_pbuf_get_max_utilization(&log_buffer, max);
847 }
848
log_backend_notify_all(enum log_backend_evt event,union log_backend_evt_arg * arg)849 static void log_backend_notify_all(enum log_backend_evt event,
850 union log_backend_evt_arg *arg)
851 {
852 STRUCT_SECTION_FOREACH(log_backend, backend) {
853 log_backend_notify(backend, event, arg);
854 }
855 }
856
log_process_thread_timer_expiry_fn(struct k_timer * timer)857 static void log_process_thread_timer_expiry_fn(struct k_timer *timer)
858 {
859 k_sem_give(&log_process_thread_sem);
860 }
861
log_process_thread_func(void * dummy1,void * dummy2,void * dummy3)862 static void log_process_thread_func(void *dummy1, void *dummy2, void *dummy3)
863 {
864 __ASSERT_NO_MSG(log_backend_count_get() > 0);
865 uint32_t links_active_mask = 0xFFFFFFFF;
866 uint8_t domain_offset = 0;
867 uint32_t activate_mask = z_log_init(false, false);
868 /* If some backends are not activated yet set periodical thread wake up
869 * to poll backends for readiness. Period is set arbitrary.
870 * If all backends are ready periodic wake up is not needed.
871 */
872 k_timeout_t timeout = (activate_mask != 0) ? K_MSEC(50) : K_FOREVER;
873 bool processed_any = false;
874 thread_set(k_current_get());
875
876 /* Logging thread is periodically waken up until all backends that
877 * should be autostarted are ready.
878 */
879 while (true) {
880 if (activate_mask) {
881 activate_mask = activate_foreach_backend(activate_mask);
882 if (!activate_mask) {
883 /* Periodic wake up no longer needed since all
884 * backends are ready.
885 */
886 timeout = K_FOREVER;
887 }
888 }
889
890 /* Keep trying to activate links until all links are active. */
891 if (IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) && links_active_mask) {
892 links_active_mask =
893 z_log_links_activate(links_active_mask, &domain_offset);
894 }
895
896
897 if (log_process() == false) {
898 if (processed_any) {
899 processed_any = false;
900 log_backend_notify_all(LOG_BACKEND_EVT_PROCESS_THREAD_DONE, NULL);
901 }
902 (void)k_sem_take(&log_process_thread_sem, timeout);
903 } else {
904 processed_any = true;
905 }
906 }
907 }
908
909 K_KERNEL_STACK_DEFINE(logging_stack, CONFIG_LOG_PROCESS_THREAD_STACK_SIZE);
910 struct k_thread logging_thread;
911
enable_logger(void)912 static int enable_logger(void)
913 {
914 if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
915 k_timer_init(&log_process_thread_timer,
916 log_process_thread_timer_expiry_fn, NULL);
917 /* start logging thread */
918 k_thread_create(&logging_thread, logging_stack,
919 K_KERNEL_STACK_SIZEOF(logging_stack),
920 log_process_thread_func, NULL, NULL, NULL,
921 LOG_PROCESS_THREAD_PRIORITY, 0,
922 COND_CODE_1(CONFIG_LOG_PROCESS_THREAD,
923 K_MSEC(CONFIG_LOG_PROCESS_THREAD_STARTUP_DELAY_MS),
924 K_NO_WAIT));
925 k_thread_name_set(&logging_thread, "logging");
926 } else {
927 (void)z_log_init(false, false);
928 }
929
930 return 0;
931 }
932
933 SYS_INIT(enable_logger, POST_KERNEL, CONFIG_LOG_CORE_INIT_PRIORITY);
934