1 /*
2 * Copyright (c) 2018 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <logging/log_msg.h>
7 #include "log_list.h"
8 #include <logging/log.h>
9 #include <logging/log_backend.h>
10 #include <logging/log_ctrl.h>
11 #include <logging/log_output.h>
12 #include <logging/log_msg2.h>
13 #include <logging/log_core2.h>
14 #include <sys/mpsc_pbuf.h>
15 #include <sys/printk.h>
16 #include <sys_clock.h>
17 #include <init.h>
18 #include <sys/__assert.h>
19 #include <sys/atomic.h>
20 #include <ctype.h>
21 #include <logging/log_frontend.h>
22 #include <syscall_handler.h>
23
24 LOG_MODULE_REGISTER(log);
25
26 #ifndef CONFIG_LOG_PRINTK_MAX_STRING_LENGTH
27 #define CONFIG_LOG_PRINTK_MAX_STRING_LENGTH 0
28 #endif
29
30 #ifndef CONFIG_LOG_PROCESS_THREAD_SLEEP_MS
31 #define CONFIG_LOG_PROCESS_THREAD_SLEEP_MS 0
32 #endif
33
34 #ifndef CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD
35 #define CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD 0
36 #endif
37
38 #ifndef CONFIG_LOG_PROCESS_THREAD_STACK_SIZE
39 #define CONFIG_LOG_PROCESS_THREAD_STACK_SIZE 1
40 #endif
41
42 #ifndef CONFIG_LOG_STRDUP_MAX_STRING
43 /* Required to suppress compiler warnings related to array subscript above array bounds.
44 * log_strdup explicitly accesses element with index of (sizeof(log_strdup_buf.buf) - 2).
45 * Set to 2 because some compilers generate warning on strncpy(dst, src, 0).
46 */
47 #define CONFIG_LOG_STRDUP_MAX_STRING 2
48 #endif
49
50 #ifndef CONFIG_LOG_STRDUP_BUF_COUNT
51 #define CONFIG_LOG_STRDUP_BUF_COUNT 0
52 #endif
53
54 #ifndef CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS
55 #define CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS 0
56 #endif
57
58 #ifndef CONFIG_LOG_BUFFER_SIZE
59 #define CONFIG_LOG_BUFFER_SIZE 4
60 #endif
61
62 struct log_strdup_buf {
63 atomic_t refcount;
64 char buf[CONFIG_LOG_STRDUP_MAX_STRING + 1]; /* for termination */
65 } __aligned(sizeof(uintptr_t));
66
67 union log_msgs {
68 struct log_msg *msg;
69 union log_msg2_generic *msg2;
70 };
71
72 #define LOG_STRDUP_POOL_BUFFER_SIZE \
73 (sizeof(struct log_strdup_buf) * CONFIG_LOG_STRDUP_BUF_COUNT)
74
75 K_SEM_DEFINE(log_process_thread_sem, 0, 1);
76
77 static const char *log_strdup_fail_msg = "<log_strdup alloc failed>";
78 struct k_mem_slab log_strdup_pool;
79 static uint8_t __noinit __aligned(sizeof(void *))
80 log_strdup_pool_buf[LOG_STRDUP_POOL_BUFFER_SIZE];
81
82 static struct log_list_t list;
83 static atomic_t initialized;
84 static bool panic_mode;
85 static bool backend_attached;
86 static atomic_t buffered_cnt;
87 static atomic_t dropped_cnt;
88 static k_tid_t proc_tid;
89 static uint32_t log_strdup_in_use;
90 static uint32_t log_strdup_max;
91 static uint32_t log_strdup_longest;
92 static struct k_timer log_process_thread_timer;
93
94 static log_timestamp_t dummy_timestamp(void);
95 static log_timestamp_get_t timestamp_func = dummy_timestamp;
96
97 struct mpsc_pbuf_buffer log_buffer;
98 static uint32_t __aligned(Z_LOG_MSG2_ALIGNMENT)
99 buf32[CONFIG_LOG_BUFFER_SIZE / sizeof(int)];
100
101 static void notify_drop(const struct mpsc_pbuf_buffer *buffer,
102 const union mpsc_pbuf_generic *item);
103
104 static const struct mpsc_pbuf_buffer_config mpsc_config = {
105 .buf = (uint32_t *)buf32,
106 .size = ARRAY_SIZE(buf32),
107 .notify_drop = notify_drop,
108 .get_wlen = log_msg2_generic_get_wlen,
109 .flags = IS_ENABLED(CONFIG_LOG_MODE_OVERFLOW) ?
110 MPSC_PBUF_MODE_OVERWRITE : 0
111 };
112
113 bool log_is_strdup(const void *buf);
114 static void msg_process(union log_msgs msg, bool bypass);
115
dummy_timestamp(void)116 static log_timestamp_t dummy_timestamp(void)
117 {
118 return 0;
119 }
120
z_log_get_s_mask(const char * str,uint32_t nargs)121 uint32_t z_log_get_s_mask(const char *str, uint32_t nargs)
122 {
123 char curr;
124 bool arm = false;
125 uint32_t arg = 0U;
126 uint32_t mask = 0U;
127
128 __ASSERT_NO_MSG(nargs <= 8*sizeof(mask));
129
130 while ((curr = *str++) && arg < nargs) {
131 if (curr == '%') {
132 arm = !arm;
133 } else if (arm && isalpha((int)curr)) {
134 if (curr == 's') {
135 mask |= BIT(arg);
136 }
137 arm = false;
138 arg++;
139 } else {
140 ; /* standard character, continue walk */
141 }
142 }
143
144 return mask;
145 }
146
147 /**
148 * @brief Check if address is in read only section.
149 *
150 * @param addr Address.
151 *
152 * @return True if address identified within read only section.
153 */
is_rodata(const void * addr)154 static bool is_rodata(const void *addr)
155 {
156 #if defined(CONFIG_ARM) || defined(CONFIG_ARC) || defined(CONFIG_X86) || \
157 defined(CONFIG_ARM64) || defined(CONFIG_NIOS2) || \
158 defined(CONFIG_RISCV) || defined(CONFIG_SPARC)
159 extern const char *__rodata_region_start[];
160 extern const char *__rodata_region_end[];
161 #define RO_START __rodata_region_start
162 #define RO_END __rodata_region_end
163 #elif defined(CONFIG_XTENSA)
164 extern const char *_rodata_start[];
165 extern const char *_rodata_end[];
166 #define RO_START _rodata_start
167 #define RO_END _rodata_end
168 #else
169 #define RO_START 0
170 #define RO_END 0
171 #endif
172
173 return (((const char *)addr >= (const char *)RO_START) &&
174 ((const char *)addr < (const char *)RO_END));
175 }
176
177 /**
178 * @brief Scan string arguments and report every address which is not in read
179 * only memory and not yet duplicated.
180 *
181 * @param msg Log message.
182 */
detect_missed_strdup(struct log_msg * msg)183 static void detect_missed_strdup(struct log_msg *msg)
184 {
185 #define ERR_MSG "argument %d in source %s log message \"%s\" missing " \
186 "log_strdup()."
187 uint32_t idx;
188 const char *str;
189 const char *msg_str;
190 uint32_t mask;
191
192 if (!log_msg_is_std(msg)) {
193 return;
194 }
195
196 msg_str = log_msg_str_get(msg);
197 mask = z_log_get_s_mask(msg_str, log_msg_nargs_get(msg));
198
199 while (mask) {
200 idx = 31 - __builtin_clz(mask);
201 str = (const char *)log_msg_arg_get(msg, idx);
202 if (!is_rodata(str) && !log_is_strdup(str) &&
203 (str != log_strdup_fail_msg)) {
204 const char *src_name =
205 log_source_name_get(CONFIG_LOG_DOMAIN_ID,
206 log_msg_source_id_get(msg));
207
208 if (IS_ENABLED(CONFIG_ASSERT)) {
209 __ASSERT(0, ERR_MSG, idx, src_name, msg_str);
210 } else {
211 LOG_ERR(ERR_MSG, idx, src_name, msg_str);
212 }
213 }
214
215 mask &= ~BIT(idx);
216 }
217 #undef ERR_MSG
218 }
219
z_log_msg_post_finalize(void)220 static void z_log_msg_post_finalize(void)
221 {
222 atomic_val_t cnt = atomic_inc(&buffered_cnt);
223
224 if (panic_mode) {
225 unsigned int key = irq_lock();
226 (void)log_process(false);
227 irq_unlock(key);
228 } else if (proc_tid != NULL && cnt == 0) {
229 k_timer_start(&log_process_thread_timer,
230 K_MSEC(CONFIG_LOG_PROCESS_THREAD_SLEEP_MS), K_NO_WAIT);
231 } else if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) {
232 if ((cnt == CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) &&
233 (proc_tid != NULL)) {
234 k_timer_stop(&log_process_thread_timer);
235 k_sem_give(&log_process_thread_sem);
236 }
237 } else {
238 /* No action needed. Message processing will be triggered by the
239 * timeout or when number of upcoming messages exceeds the
240 * threshold.
241 */
242 ;
243 }
244 }
245
msg_finalize(struct log_msg * msg,struct log_msg_ids src_level)246 static inline void msg_finalize(struct log_msg *msg,
247 struct log_msg_ids src_level)
248 {
249 unsigned int key;
250
251 msg->hdr.ids = src_level;
252 msg->hdr.timestamp = timestamp_func();
253
254 key = irq_lock();
255
256 log_list_add_tail(&list, msg);
257
258 irq_unlock(key);
259
260 z_log_msg_post_finalize();
261 }
262
log_0(const char * str,struct log_msg_ids src_level)263 void log_0(const char *str, struct log_msg_ids src_level)
264 {
265 if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
266 log_frontend_0(str, src_level);
267 } else {
268 struct log_msg *msg = log_msg_create_0(str);
269
270 if (msg == NULL) {
271 return;
272 }
273 msg_finalize(msg, src_level);
274 }
275 }
276
log_1(const char * str,log_arg_t arg0,struct log_msg_ids src_level)277 void log_1(const char *str,
278 log_arg_t arg0,
279 struct log_msg_ids src_level)
280 {
281 if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
282 log_frontend_1(str, arg0, src_level);
283 } else {
284 struct log_msg *msg = log_msg_create_1(str, arg0);
285
286 if (msg == NULL) {
287 return;
288 }
289 msg_finalize(msg, src_level);
290 }
291 }
292
log_2(const char * str,log_arg_t arg0,log_arg_t arg1,struct log_msg_ids src_level)293 void log_2(const char *str,
294 log_arg_t arg0,
295 log_arg_t arg1,
296 struct log_msg_ids src_level)
297 {
298 if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
299 log_frontend_2(str, arg0, arg1, src_level);
300 } else {
301 struct log_msg *msg = log_msg_create_2(str, arg0, arg1);
302
303 if (msg == NULL) {
304 return;
305 }
306
307 msg_finalize(msg, src_level);
308 }
309 }
310
log_3(const char * str,log_arg_t arg0,log_arg_t arg1,log_arg_t arg2,struct log_msg_ids src_level)311 void log_3(const char *str,
312 log_arg_t arg0,
313 log_arg_t arg1,
314 log_arg_t arg2,
315 struct log_msg_ids src_level)
316 {
317 if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
318 log_frontend_3(str, arg0, arg1, arg2, src_level);
319 } else {
320 struct log_msg *msg = log_msg_create_3(str, arg0, arg1, arg2);
321
322 if (msg == NULL) {
323 return;
324 }
325
326 msg_finalize(msg, src_level);
327 }
328 }
329
log_n(const char * str,log_arg_t * args,uint32_t narg,struct log_msg_ids src_level)330 void log_n(const char *str,
331 log_arg_t *args,
332 uint32_t narg,
333 struct log_msg_ids src_level)
334 {
335 if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
336 log_frontend_n(str, args, narg, src_level);
337 } else {
338 struct log_msg *msg = log_msg_create_n(str, args, narg);
339
340 if (msg == NULL) {
341 return;
342 }
343
344 msg_finalize(msg, src_level);
345 }
346 }
347
log_hexdump(const char * str,const void * data,uint32_t length,struct log_msg_ids src_level)348 void log_hexdump(const char *str, const void *data, uint32_t length,
349 struct log_msg_ids src_level)
350 {
351 if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
352 log_frontend_hexdump(str, (const uint8_t *)data, length,
353 src_level);
354 } else {
355 struct log_msg *msg =
356 log_msg_hexdump_create(str, (const uint8_t *)data, length);
357
358 if (msg == NULL) {
359 return;
360 }
361
362 msg_finalize(msg, src_level);
363 }
364 }
365
z_log_vprintk(const char * fmt,va_list ap)366 void z_log_vprintk(const char *fmt, va_list ap)
367 {
368 if (!IS_ENABLED(CONFIG_LOG_PRINTK)) {
369 return;
370 }
371
372 if (IS_ENABLED(CONFIG_LOG2)) {
373 z_log_msg2_runtime_vcreate(CONFIG_LOG_DOMAIN_ID, NULL,
374 LOG_LEVEL_INTERNAL_RAW_STRING, NULL, 0,
375 fmt, ap);
376 return;
377 }
378
379
380 union {
381 struct log_msg_ids structure;
382 uint32_t value;
383 } src_level_union = {
384 {
385 .level = LOG_LEVEL_INTERNAL_RAW_STRING
386 }
387 };
388
389 if (k_is_user_context()) {
390 uint8_t str[CONFIG_LOG_PRINTK_MAX_STRING_LENGTH + 1];
391
392 vsnprintk(str, sizeof(str), fmt, ap);
393
394 z_log_string_from_user(src_level_union.value, str);
395 } else if (IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
396 log_generic(src_level_union.structure, fmt, ap,
397 LOG_STRDUP_SKIP);
398 } else {
399 uint8_t str[CONFIG_LOG_PRINTK_MAX_STRING_LENGTH + 1];
400 struct log_msg *msg;
401 int length;
402
403 length = vsnprintk(str, sizeof(str), fmt, ap);
404 length = MIN(length, sizeof(str));
405
406 msg = log_msg_hexdump_create(NULL, str, length);
407 if (msg == NULL) {
408 return;
409 }
410
411 msg_finalize(msg, src_level_union.structure);
412 }
413 }
414
415 /** @brief Count number of arguments in formatted string.
416 *
417 * Function counts number of '%' not followed by '%'.
418 */
log_count_args(const char * fmt)419 uint32_t log_count_args(const char *fmt)
420 {
421 uint32_t args = 0U;
422 bool prev = false; /* if previous char was a modificator. */
423
424 while (*fmt != '\0') {
425 if (*fmt == '%') {
426 prev = !prev;
427 } else if (prev) {
428 args++;
429 prev = false;
430 } else {
431 ; /* standard character, continue walk */
432 }
433 fmt++;
434 }
435
436 return args;
437 }
438
log_generic(struct log_msg_ids src_level,const char * fmt,va_list ap,enum log_strdup_action strdup_action)439 void log_generic(struct log_msg_ids src_level, const char *fmt, va_list ap,
440 enum log_strdup_action strdup_action)
441 {
442 if (k_is_user_context()) {
443 log_generic_from_user(src_level, fmt, ap);
444 } else if (IS_ENABLED(CONFIG_LOG_IMMEDIATE) &&
445 (!IS_ENABLED(CONFIG_LOG_FRONTEND))) {
446 struct log_backend const *backend;
447 uint32_t timestamp = timestamp_func();
448
449 for (int i = 0; i < log_backend_count_get(); i++) {
450 backend = log_backend_get(i);
451 bool runtime_ok =
452 IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) ?
453 (src_level.level <= log_filter_get(backend,
454 src_level.domain_id,
455 src_level.source_id,
456 true)) : true;
457
458 if (log_backend_is_active(backend) && runtime_ok) {
459 va_list ap_tmp;
460
461 va_copy(ap_tmp, ap);
462 log_backend_put_sync_string(backend, src_level,
463 timestamp, fmt, ap_tmp);
464 va_end(ap_tmp);
465 }
466 }
467 } else {
468 log_arg_t args[LOG_MAX_NARGS];
469 uint32_t nargs = log_count_args(fmt);
470
471 __ASSERT_NO_MSG(nargs < LOG_MAX_NARGS);
472 for (int i = 0; i < nargs; i++) {
473 args[i] = va_arg(ap, log_arg_t);
474 }
475
476 if (strdup_action != LOG_STRDUP_SKIP) {
477 uint32_t mask = z_log_get_s_mask(fmt, nargs);
478
479 while (mask) {
480 uint32_t idx = 31 - __builtin_clz(mask);
481 const char *str = (const char *)args[idx];
482
483 /* is_rodata(str) is not checked,
484 * because log_strdup does it.
485 * Hence, we will do only optional check
486 * if already not duplicated.
487 */
488 if (strdup_action == LOG_STRDUP_EXEC
489 || !log_is_strdup(str)) {
490 args[idx] = (log_arg_t)log_strdup(str);
491 }
492 mask &= ~BIT(idx);
493 }
494 }
495 log_n(fmt, args, nargs, src_level);
496 }
497 }
498
log_string_sync(struct log_msg_ids src_level,const char * fmt,...)499 void log_string_sync(struct log_msg_ids src_level, const char *fmt, ...)
500 {
501 va_list ap;
502
503 va_start(ap, fmt);
504
505 log_generic(src_level, fmt, ap, LOG_STRDUP_SKIP);
506
507 va_end(ap);
508 }
509
log_hexdump_sync(struct log_msg_ids src_level,const char * metadata,const void * data,uint32_t len)510 void log_hexdump_sync(struct log_msg_ids src_level, const char *metadata,
511 const void *data, uint32_t len)
512 {
513 if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
514 log_frontend_hexdump(metadata, (const uint8_t *)data, len,
515 src_level);
516 } else {
517 struct log_backend const *backend;
518 log_timestamp_t timestamp = timestamp_func();
519
520 for (int i = 0; i < log_backend_count_get(); i++) {
521 backend = log_backend_get(i);
522 bool runtime_ok =
523 IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) ?
524 (src_level.level <= log_filter_get(backend,
525 src_level.domain_id,
526 src_level.source_id,
527 true)) : true;
528
529 if (log_backend_is_active(backend) && runtime_ok) {
530 log_backend_put_sync_hexdump(
531 backend, src_level, timestamp, metadata,
532 (const uint8_t *)data, len);
533 }
534 }
535 }
536 }
537
default_get_timestamp(void)538 static log_timestamp_t default_get_timestamp(void)
539 {
540 return IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
541 sys_clock_tick_get() : k_cycle_get_32();
542 }
543
default_lf_get_timestamp(void)544 static log_timestamp_t default_lf_get_timestamp(void)
545 {
546 return IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
547 k_uptime_get() : k_uptime_get_32();
548 }
549
log_core_init(void)550 void log_core_init(void)
551 {
552 uint32_t freq;
553 log_timestamp_get_t _timestamp_func;
554
555 panic_mode = false;
556 dropped_cnt = 0;
557
558 /* Set default timestamp. */
559 if (sys_clock_hw_cycles_per_sec() > 1000000) {
560 _timestamp_func = default_lf_get_timestamp;
561 freq = 1000U;
562 } else {
563 _timestamp_func = default_get_timestamp;
564 freq = sys_clock_hw_cycles_per_sec();
565 }
566
567 log_set_timestamp_func(_timestamp_func, freq);
568
569 if (IS_ENABLED(CONFIG_LOG2)) {
570 if (IS_ENABLED(CONFIG_LOG2_MODE_DEFERRED)) {
571 z_log_msg2_init();
572 }
573 } else if (IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
574 log_msg_pool_init();
575 log_list_init(&list);
576
577 k_mem_slab_init(&log_strdup_pool, log_strdup_pool_buf,
578 sizeof(struct log_strdup_buf),
579 CONFIG_LOG_STRDUP_BUF_COUNT);
580 }
581
582 if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) {
583 z_log_runtime_filters_init();
584 }
585 }
586
log_init(void)587 void log_init(void)
588 {
589 __ASSERT_NO_MSG(log_backend_count_get() < LOG_FILTERS_NUM_OF_SLOTS);
590 int i;
591
592 if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
593 log_frontend_init();
594 }
595
596 if (atomic_inc(&initialized) != 0) {
597 return;
598 }
599
600 /* Assign ids to backends. */
601 for (i = 0; i < log_backend_count_get(); i++) {
602 const struct log_backend *backend = log_backend_get(i);
603
604 if (backend->autostart) {
605 if (backend->api->init != NULL) {
606 backend->api->init(backend);
607 }
608
609 log_backend_enable(backend,
610 backend->cb->ctx,
611 CONFIG_LOG_MAX_LEVEL);
612 }
613 }
614 }
615
thread_set(k_tid_t process_tid)616 static void thread_set(k_tid_t process_tid)
617 {
618 proc_tid = process_tid;
619
620 if (IS_ENABLED(CONFIG_LOG_IMMEDIATE)) {
621 return;
622 }
623
624 if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD &&
625 process_tid &&
626 buffered_cnt >= CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) {
627 k_sem_give(&log_process_thread_sem);
628 }
629 }
630
log_thread_set(k_tid_t process_tid)631 void log_thread_set(k_tid_t process_tid)
632 {
633 if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
634 __ASSERT_NO_MSG(0);
635 } else {
636 thread_set(process_tid);
637 }
638 }
639
log_set_timestamp_func(log_timestamp_get_t timestamp_getter,uint32_t freq)640 int log_set_timestamp_func(log_timestamp_get_t timestamp_getter, uint32_t freq)
641 {
642 if (timestamp_getter == NULL) {
643 return -EINVAL;
644 }
645
646 timestamp_func = timestamp_getter;
647 log_output_timestamp_freq_set(freq);
648
649 return 0;
650 }
651
z_impl_log_panic(void)652 void z_impl_log_panic(void)
653 {
654 struct log_backend const *backend;
655
656 if (panic_mode) {
657 return;
658 }
659
660 /* If panic happened early logger might not be initialized.
661 * Forcing initialization of the logger and auto-starting backends.
662 */
663 log_init();
664
665 for (int i = 0; i < log_backend_count_get(); i++) {
666 backend = log_backend_get(i);
667
668 if (log_backend_is_active(backend)) {
669 log_backend_panic(backend);
670 }
671 }
672
673 if (!IS_ENABLED(CONFIG_LOG_IMMEDIATE)) {
674 /* Flush */
675 while (log_process(false) == true) {
676 }
677 }
678
679 panic_mode = true;
680 }
681
682 #ifdef CONFIG_USERSPACE
z_vrfy_log_panic(void)683 void z_vrfy_log_panic(void)
684 {
685 z_impl_log_panic();
686 }
687 #include <syscalls/log_panic_mrsh.c>
688 #endif
689
msg_filter_check(struct log_backend const * backend,union log_msgs msg)690 static bool msg_filter_check(struct log_backend const *backend,
691 union log_msgs msg)
692 {
693 if (IS_ENABLED(CONFIG_LOG2) && !z_log_item_is_msg(msg.msg2)) {
694 return true;
695 }
696
697 if (!IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) {
698 return true;
699 }
700
701 uint32_t backend_level;
702 uint8_t level;
703 uint8_t domain_id;
704 int16_t source_id;
705
706 if (IS_ENABLED(CONFIG_LOG2)) {
707 struct log_msg2 *msg2 = &msg.msg2->log;
708 struct log_source_dynamic_data *source =
709 (struct log_source_dynamic_data *)
710 log_msg2_get_source(msg2);
711
712 level = log_msg2_get_level(msg2);
713 domain_id = log_msg2_get_domain(msg2);
714 source_id = source ? log_dynamic_source_id(source) : -1;
715 } else {
716 level = log_msg_level_get(msg.msg);
717 domain_id = log_msg_domain_id_get(msg.msg);
718 source_id = log_msg_source_id_get(msg.msg);
719 }
720
721 backend_level = log_filter_get(backend, domain_id,
722 source_id, true);
723
724 return (level <= backend_level);
725 }
726
msg_process(union log_msgs msg,bool bypass)727 static void msg_process(union log_msgs msg, bool bypass)
728 {
729 struct log_backend const *backend;
730
731 if (!bypass) {
732 if (!IS_ENABLED(CONFIG_LOG2) &&
733 IS_ENABLED(CONFIG_LOG_DETECT_MISSED_STRDUP) &&
734 !panic_mode) {
735 detect_missed_strdup(msg.msg);
736 }
737
738 for (int i = 0; i < log_backend_count_get(); i++) {
739 backend = log_backend_get(i);
740 if (log_backend_is_active(backend) &&
741 msg_filter_check(backend, msg)) {
742 if (IS_ENABLED(CONFIG_LOG2)) {
743 log_backend_msg2_process(backend,
744 msg.msg2);
745 } else {
746 log_backend_put(backend, msg.msg);
747 }
748 }
749 }
750 }
751
752 if (!IS_ENABLED(CONFIG_LOG2_MODE_IMMEDIATE)) {
753 if (IS_ENABLED(CONFIG_LOG2)) {
754 z_log_msg2_free(msg.msg2);
755 } else {
756 log_msg_put(msg.msg);
757 }
758 }
759 }
760
dropped_notify(void)761 void dropped_notify(void)
762 {
763 uint32_t dropped = z_log_dropped_read_and_clear();
764
765 for (int i = 0; i < log_backend_count_get(); i++) {
766 struct log_backend const *backend = log_backend_get(i);
767
768 if (log_backend_is_active(backend)) {
769 log_backend_dropped(backend, dropped);
770 }
771 }
772 }
773
get_msg(void)774 union log_msgs get_msg(void)
775 {
776 union log_msgs msg;
777
778 if (IS_ENABLED(CONFIG_LOG2)) {
779 msg.msg2 = z_log_msg2_claim();
780
781 return msg;
782 }
783
784 int key = irq_lock();
785
786 msg.msg = log_list_head_get(&list);
787 irq_unlock(key);
788
789 return msg;
790 }
791
next_pending(void)792 static bool next_pending(void)
793 {
794 if (IS_ENABLED(CONFIG_LOG2)) {
795 return z_log_msg2_pending();
796 }
797
798 return (log_list_head_peek(&list) != NULL);
799 }
800
z_log_notify_backend_enabled(void)801 void z_log_notify_backend_enabled(void)
802 {
803 /* Wakeup logger thread after attaching first backend. It might be
804 * blocked with log messages pending.
805 */
806 if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD) && !backend_attached) {
807 k_sem_give(&log_process_thread_sem);
808 }
809
810 backend_attached = true;
811 }
812
z_impl_log_process(bool bypass)813 bool z_impl_log_process(bool bypass)
814 {
815 union log_msgs msg;
816
817 if (!backend_attached && !bypass) {
818 return false;
819 }
820
821 msg = get_msg();
822 if (msg.msg) {
823 if (!bypass) {
824 atomic_dec(&buffered_cnt);
825 }
826 msg_process(msg, bypass);
827 }
828
829 if (!bypass && z_log_dropped_pending()) {
830 dropped_notify();
831 }
832
833 return next_pending();
834 }
835
836 #ifdef CONFIG_USERSPACE
z_vrfy_log_process(bool bypass)837 bool z_vrfy_log_process(bool bypass)
838 {
839 return z_impl_log_process(bypass);
840 }
841 #include <syscalls/log_process_mrsh.c>
842 #endif
843
z_impl_log_buffered_cnt(void)844 uint32_t z_impl_log_buffered_cnt(void)
845 {
846 return buffered_cnt;
847 }
848
849 #ifdef CONFIG_USERSPACE
z_vrfy_log_buffered_cnt(void)850 uint32_t z_vrfy_log_buffered_cnt(void)
851 {
852 return z_impl_log_buffered_cnt();
853 }
854 #include <syscalls/log_buffered_cnt_mrsh.c>
855 #endif
856
z_log_dropped(bool buffered)857 void z_log_dropped(bool buffered)
858 {
859 atomic_inc(&dropped_cnt);
860 if (buffered) {
861 atomic_dec(&buffered_cnt);
862 }
863 }
864
z_log_dropped_read_and_clear(void)865 uint32_t z_log_dropped_read_and_clear(void)
866 {
867 return atomic_set(&dropped_cnt, 0);
868 }
869
z_log_dropped_pending(void)870 bool z_log_dropped_pending(void)
871 {
872 return dropped_cnt > 0;
873 }
874
notify_drop(const struct mpsc_pbuf_buffer * buffer,const union mpsc_pbuf_generic * item)875 static void notify_drop(const struct mpsc_pbuf_buffer *buffer,
876 const union mpsc_pbuf_generic *item)
877 {
878 ARG_UNUSED(buffer);
879 ARG_UNUSED(item);
880
881 z_log_dropped(true);
882 }
883
884
z_log_strdup(const char * str)885 char *z_log_strdup(const char *str)
886 {
887 struct log_strdup_buf *dup;
888 int err;
889
890 if (IS_ENABLED(CONFIG_LOG_IMMEDIATE) ||
891 is_rodata(str) || k_is_user_context()) {
892 return (char *)str;
893 }
894
895 err = k_mem_slab_alloc(&log_strdup_pool, (void **)&dup, K_NO_WAIT);
896 if (err != 0) {
897 /* failed to allocate */
898 return (char *)log_strdup_fail_msg;
899 }
900
901 if (IS_ENABLED(CONFIG_LOG_STRDUP_POOL_PROFILING)) {
902 size_t slen = strlen(str);
903 static struct k_spinlock lock;
904 k_spinlock_key_t key;
905
906 key = k_spin_lock(&lock);
907 log_strdup_in_use++;
908 log_strdup_max = MAX(log_strdup_in_use, log_strdup_max);
909 log_strdup_longest = MAX(slen, log_strdup_longest);
910 k_spin_unlock(&lock, key);
911 }
912
913 /* Set 'allocated' flag. */
914 (void)atomic_set(&dup->refcount, 1);
915
916 strncpy(dup->buf, str, sizeof(dup->buf) - 2);
917 dup->buf[sizeof(dup->buf) - 2] = '~';
918 dup->buf[sizeof(dup->buf) - 1] = '\0';
919
920 return dup->buf;
921 }
922
log_get_strdup_pool_current_utilization(void)923 uint32_t log_get_strdup_pool_current_utilization(void)
924 {
925 return IS_ENABLED(CONFIG_LOG_STRDUP_POOL_PROFILING) ?
926 log_strdup_in_use : 0;
927
928 }
929
log_get_strdup_pool_utilization(void)930 uint32_t log_get_strdup_pool_utilization(void)
931 {
932 return IS_ENABLED(CONFIG_LOG_STRDUP_POOL_PROFILING) ?
933 log_strdup_max : 0;
934 }
935
log_get_strdup_longest_string(void)936 uint32_t log_get_strdup_longest_string(void)
937 {
938 return IS_ENABLED(CONFIG_LOG_STRDUP_POOL_PROFILING) ?
939 log_strdup_longest : 0;
940 }
941
log_is_strdup(const void * buf)942 bool log_is_strdup(const void *buf)
943 {
944 return PART_OF_ARRAY(log_strdup_pool_buf, (uint8_t *)buf);
945
946 }
947
log_free(void * str)948 void log_free(void *str)
949 {
950 struct log_strdup_buf *dup = CONTAINER_OF(str, struct log_strdup_buf,
951 buf);
952
953 if (atomic_dec(&dup->refcount) == 1) {
954 k_mem_slab_free(&log_strdup_pool, (void **)&dup);
955 if (IS_ENABLED(CONFIG_LOG_STRDUP_POOL_PROFILING)) {
956 atomic_dec((atomic_t *)&log_strdup_in_use);
957 }
958 }
959 }
960
961 #if defined(CONFIG_USERSPACE)
962 /* LCOV_EXCL_START */
z_impl_z_log_string_from_user(uint32_t src_level_val,const char * str)963 void z_impl_z_log_string_from_user(uint32_t src_level_val, const char *str)
964 {
965 ARG_UNUSED(src_level_val);
966 ARG_UNUSED(str);
967
968 __ASSERT(false, "This function can be called from user mode only.");
969 }
970 /* LCOV_EXCL_STOP */
971
z_vrfy_z_log_string_from_user(uint32_t src_level_val,const char * str)972 void z_vrfy_z_log_string_from_user(uint32_t src_level_val, const char *str)
973 {
974 uint8_t level, domain_id, source_id;
975 union {
976 struct log_msg_ids structure;
977 uint32_t value;
978 } src_level_union;
979 size_t len;
980 int err;
981
982 src_level_union.value = src_level_val;
983 level = src_level_union.structure.level;
984 domain_id = src_level_union.structure.domain_id;
985 source_id = src_level_union.structure.source_id;
986
987 Z_OOPS(Z_SYSCALL_VERIFY_MSG(
988 (IS_ENABLED(CONFIG_LOG_PRINTK) || (level >= LOG_LEVEL_ERR)) &&
989 (level <= LOG_LEVEL_DBG),
990 "Invalid log level"));
991 Z_OOPS(Z_SYSCALL_VERIFY_MSG(domain_id == CONFIG_LOG_DOMAIN_ID,
992 "Invalid log domain_id"));
993 Z_OOPS(Z_SYSCALL_VERIFY_MSG(source_id < log_sources_count(),
994 "Invalid log source id"));
995
996 if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) &&
997 (level != LOG_LEVEL_INTERNAL_RAW_STRING) &&
998 (level > LOG_FILTER_SLOT_GET(log_dynamic_filters_get(source_id),
999 LOG_FILTER_AGGR_SLOT_IDX))) {
1000 /* Skip filtered out messages. */
1001 return;
1002 }
1003
1004 /*
1005 * Validate and make a copy of the source string. Because we need
1006 * the log subsystem to eventually free it, we're going to use
1007 * log_strdup().
1008 */
1009 len = z_user_string_nlen(str, (level == LOG_LEVEL_INTERNAL_RAW_STRING) ?
1010 CONFIG_LOG_PRINTK_MAX_STRING_LENGTH :
1011 CONFIG_LOG_STRDUP_MAX_STRING, &err);
1012
1013 Z_OOPS(Z_SYSCALL_VERIFY_MSG(err == 0, "invalid string passed in"));
1014 Z_OOPS(Z_SYSCALL_MEMORY_READ(str, len));
1015
1016 if (IS_ENABLED(CONFIG_LOG_IMMEDIATE)) {
1017 log_string_sync(src_level_union.structure, "%s", str);
1018 } else if (IS_ENABLED(CONFIG_LOG_PRINTK) &&
1019 (level == LOG_LEVEL_INTERNAL_RAW_STRING)) {
1020 struct log_msg *msg;
1021
1022 msg = log_msg_hexdump_create(NULL, str, len);
1023 if (msg != NULL) {
1024 msg_finalize(msg, src_level_union.structure);
1025 }
1026 } else {
1027 str = log_strdup(str);
1028 log_1("%s", (log_arg_t)str, src_level_union.structure);
1029 }
1030 }
1031 #include <syscalls/z_log_string_from_user_mrsh.c>
1032
log_generic_from_user(struct log_msg_ids src_level,const char * fmt,va_list ap)1033 void log_generic_from_user(struct log_msg_ids src_level,
1034 const char *fmt, va_list ap)
1035 {
1036 char buffer[CONFIG_LOG_STRDUP_MAX_STRING + 1];
1037 union {
1038 struct log_msg_ids structure;
1039 uint32_t value;
1040 } src_level_union;
1041
1042 vsnprintk(buffer, sizeof(buffer), fmt, ap);
1043
1044 __ASSERT_NO_MSG(sizeof(src_level) <= sizeof(uint32_t));
1045 src_level_union.structure = src_level;
1046 z_log_string_from_user(src_level_union.value, buffer);
1047 }
1048
log_from_user(struct log_msg_ids src_level,const char * fmt,...)1049 void log_from_user(struct log_msg_ids src_level, const char *fmt, ...)
1050 {
1051 va_list ap;
1052
1053 va_start(ap, fmt);
1054 log_generic_from_user(src_level, fmt, ap);
1055 va_end(ap);
1056 }
1057
1058 /* LCOV_EXCL_START */
z_impl_z_log_hexdump_from_user(uint32_t src_level_val,const char * metadata,const uint8_t * data,uint32_t len)1059 void z_impl_z_log_hexdump_from_user(uint32_t src_level_val, const char *metadata,
1060 const uint8_t *data, uint32_t len)
1061 {
1062 ARG_UNUSED(src_level_val);
1063 ARG_UNUSED(metadata);
1064 ARG_UNUSED(data);
1065 ARG_UNUSED(len);
1066
1067 __ASSERT(false, "This function can be called from user mode only.");
1068 }
1069 /* LCOV_EXCL_STOP */
1070
z_vrfy_z_log_hexdump_from_user(uint32_t src_level_val,const char * metadata,const uint8_t * data,uint32_t len)1071 void z_vrfy_z_log_hexdump_from_user(uint32_t src_level_val, const char *metadata,
1072 const uint8_t *data, uint32_t len)
1073 {
1074 union {
1075 struct log_msg_ids structure;
1076 uint32_t value;
1077 } src_level_union;
1078 int err;
1079 char kmeta[CONFIG_LOG_STRDUP_MAX_STRING];
1080
1081 src_level_union.value = src_level_val;
1082
1083 Z_OOPS(Z_SYSCALL_VERIFY_MSG(
1084 (src_level_union.structure.level <= LOG_LEVEL_DBG) &&
1085 (src_level_union.structure.level >= LOG_LEVEL_ERR),
1086 "Invalid log level"));
1087 Z_OOPS(Z_SYSCALL_VERIFY_MSG(
1088 src_level_union.structure.domain_id == CONFIG_LOG_DOMAIN_ID,
1089 "Invalid log domain_id"));
1090 Z_OOPS(Z_SYSCALL_VERIFY_MSG(
1091 src_level_union.structure.source_id < log_sources_count(),
1092 "Invalid log source id"));
1093
1094 if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) &&
1095 (src_level_union.structure.level > LOG_FILTER_SLOT_GET(
1096 log_dynamic_filters_get(src_level_union.structure.source_id),
1097 LOG_FILTER_AGGR_SLOT_IDX))) {
1098 /* Skip filtered out messages. */
1099 return;
1100 }
1101
1102 /*
1103 * Validate and make a copy of the metadata string. Because we
1104 * need the log subsystem to eventually free it, we're going
1105 * to use log_strdup().
1106 */
1107 err = z_user_string_copy(kmeta, metadata, sizeof(kmeta));
1108 Z_OOPS(Z_SYSCALL_VERIFY_MSG(err == 0, "invalid meta passed in"));
1109 Z_OOPS(Z_SYSCALL_MEMORY_READ(data, len));
1110
1111 if (IS_ENABLED(CONFIG_LOG_IMMEDIATE)) {
1112 log_hexdump_sync(src_level_union.structure,
1113 kmeta, data, len);
1114 } else {
1115 metadata = log_strdup(kmeta);
1116 log_hexdump(metadata, data, len, src_level_union.structure);
1117 }
1118 }
1119 #include <syscalls/z_log_hexdump_from_user_mrsh.c>
1120
log_hexdump_from_user(struct log_msg_ids src_level,const char * metadata,const void * data,uint32_t len)1121 void log_hexdump_from_user(struct log_msg_ids src_level, const char *metadata,
1122 const void *data, uint32_t len)
1123 {
1124 union {
1125 struct log_msg_ids structure;
1126 uint32_t value;
1127 } src_level_union;
1128
1129 __ASSERT_NO_MSG(sizeof(src_level) <= sizeof(uint32_t));
1130 src_level_union.structure = src_level;
1131 z_log_hexdump_from_user(src_level_union.value, metadata,
1132 (const uint8_t *)data, len);
1133 }
1134 #else
1135 /* LCOV_EXCL_START */
z_impl_z_log_string_from_user(uint32_t src_level_val,const char * str)1136 void z_impl_z_log_string_from_user(uint32_t src_level_val, const char *str)
1137 {
1138 ARG_UNUSED(src_level_val);
1139 ARG_UNUSED(str);
1140
1141 __ASSERT_NO_MSG(false);
1142 }
1143
z_vrfy_z_log_hexdump_from_user(uint32_t src_level_val,const char * metadata,const uint8_t * data,uint32_t len)1144 void z_vrfy_z_log_hexdump_from_user(uint32_t src_level_val, const char *metadata,
1145 const uint8_t *data, uint32_t len)
1146 {
1147 ARG_UNUSED(src_level_val);
1148 ARG_UNUSED(metadata);
1149 ARG_UNUSED(data);
1150 ARG_UNUSED(len);
1151
1152 __ASSERT_NO_MSG(false);
1153 }
1154
log_from_user(struct log_msg_ids src_level,const char * fmt,...)1155 void log_from_user(struct log_msg_ids src_level, const char *fmt, ...)
1156 {
1157 ARG_UNUSED(src_level);
1158 ARG_UNUSED(fmt);
1159
1160 __ASSERT_NO_MSG(false);
1161 }
1162
log_generic_from_user(struct log_msg_ids src_level,const char * fmt,va_list ap)1163 void log_generic_from_user(struct log_msg_ids src_level,
1164 const char *fmt, va_list ap)
1165 {
1166 ARG_UNUSED(src_level);
1167 ARG_UNUSED(fmt);
1168 ARG_UNUSED(ap);
1169
1170 __ASSERT_NO_MSG(false);
1171 }
1172
log_hexdump_from_user(struct log_msg_ids src_level,const char * metadata,const void * data,uint32_t len)1173 void log_hexdump_from_user(struct log_msg_ids src_level, const char *metadata,
1174 const void *data, uint32_t len)
1175 {
1176 ARG_UNUSED(src_level);
1177 ARG_UNUSED(metadata);
1178 ARG_UNUSED(data);
1179 ARG_UNUSED(len);
1180
1181 __ASSERT_NO_MSG(false);
1182 }
1183 /* LCOV_EXCL_STOP */
1184 #endif /* !defined(CONFIG_USERSPACE) */
1185
z_log_msg2_init(void)1186 void z_log_msg2_init(void)
1187 {
1188 mpsc_pbuf_init(&log_buffer, &mpsc_config);
1189 }
1190
z_log_msg2_alloc(uint32_t wlen)1191 struct log_msg2 *z_log_msg2_alloc(uint32_t wlen)
1192 {
1193 return (struct log_msg2 *)mpsc_pbuf_alloc(
1194 &log_buffer, wlen,
1195 (CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS == -1)
1196 ? K_FOREVER
1197 : K_MSEC(CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS));
1198 }
1199
z_log_msg2_commit(struct log_msg2 * msg)1200 void z_log_msg2_commit(struct log_msg2 *msg)
1201 {
1202 msg->hdr.timestamp = timestamp_func();
1203
1204 if (IS_ENABLED(CONFIG_LOG2_MODE_IMMEDIATE)) {
1205 union log_msgs msgs = {
1206 .msg2 = (union log_msg2_generic *)msg
1207 };
1208
1209 msg_process(msgs, false);
1210
1211 return;
1212 }
1213
1214 mpsc_pbuf_commit(&log_buffer, (union mpsc_pbuf_generic *)msg);
1215
1216 if (IS_ENABLED(CONFIG_LOG2_MODE_DEFERRED)) {
1217 z_log_msg_post_finalize();
1218 }
1219 }
1220
z_log_msg2_claim(void)1221 union log_msg2_generic *z_log_msg2_claim(void)
1222 {
1223 return (union log_msg2_generic *)mpsc_pbuf_claim(&log_buffer);
1224 }
1225
z_log_msg2_free(union log_msg2_generic * msg)1226 void z_log_msg2_free(union log_msg2_generic *msg)
1227 {
1228 mpsc_pbuf_free(&log_buffer, (union mpsc_pbuf_generic *)msg);
1229 }
1230
1231
z_log_msg2_pending(void)1232 bool z_log_msg2_pending(void)
1233 {
1234 return mpsc_pbuf_is_pending(&log_buffer);
1235 }
1236
log_process_thread_timer_expiry_fn(struct k_timer * timer)1237 static void log_process_thread_timer_expiry_fn(struct k_timer *timer)
1238 {
1239 k_sem_give(&log_process_thread_sem);
1240 }
1241
log_process_thread_func(void * dummy1,void * dummy2,void * dummy3)1242 static void log_process_thread_func(void *dummy1, void *dummy2, void *dummy3)
1243 {
1244 __ASSERT_NO_MSG(log_backend_count_get() > 0);
1245
1246 log_init();
1247 thread_set(k_current_get());
1248
1249 while (true) {
1250 if (log_process(false) == false) {
1251 k_sem_take(&log_process_thread_sem, K_FOREVER);
1252 }
1253 }
1254 }
1255
1256 K_KERNEL_STACK_DEFINE(logging_stack, CONFIG_LOG_PROCESS_THREAD_STACK_SIZE);
1257 struct k_thread logging_thread;
1258
enable_logger(const struct device * arg)1259 static int enable_logger(const struct device *arg)
1260 {
1261 ARG_UNUSED(arg);
1262
1263 if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
1264 k_timer_init(&log_process_thread_timer,
1265 log_process_thread_timer_expiry_fn, NULL);
1266 /* start logging thread */
1267 k_thread_create(&logging_thread, logging_stack,
1268 K_KERNEL_STACK_SIZEOF(logging_stack),
1269 log_process_thread_func, NULL, NULL, NULL,
1270 K_LOWEST_APPLICATION_THREAD_PRIO, 0,
1271 COND_CODE_1(CONFIG_LOG_PROCESS_THREAD,
1272 K_MSEC(CONFIG_LOG_PROCESS_THREAD_STARTUP_DELAY_MS),
1273 K_NO_WAIT));
1274 k_thread_name_set(&logging_thread, "logging");
1275 } else {
1276 log_init();
1277 }
1278
1279 return 0;
1280 }
1281
1282 SYS_INIT(enable_logger, POST_KERNEL, CONFIG_LOG_CORE_INIT_PRIORITY);
1283