1 /*
2  * Copyright (c) 2018 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/logging/log.h>
7 #include <zephyr/logging/log_backend.h>
8 #include <zephyr/logging/log_ctrl.h>
9 #include <zephyr/logging/log_output.h>
10 #include <zephyr/logging/log_internal.h>
11 #include <zephyr/sys/mpsc_pbuf.h>
12 #include <zephyr/logging/log_link.h>
13 #include <zephyr/sys/printk.h>
14 #include <zephyr/sys_clock.h>
15 #include <zephyr/init.h>
16 #include <zephyr/sys/__assert.h>
17 #include <zephyr/sys/atomic.h>
18 #include <zephyr/sys/iterable_sections.h>
19 #include <ctype.h>
20 #include <zephyr/logging/log_frontend.h>
21 #include <zephyr/internal/syscall_handler.h>
22 #include <zephyr/logging/log_output_dict.h>
23 #include <zephyr/logging/log_output_custom.h>
24 #include <zephyr/linker/utils.h>
25 
26 #ifdef CONFIG_LOG_TIMESTAMP_USE_REALTIME
27 #include <zephyr/posix/time.h>
28 #endif
29 
30 LOG_MODULE_REGISTER(log);
31 
32 #ifndef CONFIG_LOG_PROCESS_THREAD_SLEEP_MS
33 #define CONFIG_LOG_PROCESS_THREAD_SLEEP_MS 0
34 #endif
35 
36 #ifndef CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD
37 #define CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD 0
38 #endif
39 
40 #ifndef CONFIG_LOG_PROCESS_THREAD_STACK_SIZE
41 #define CONFIG_LOG_PROCESS_THREAD_STACK_SIZE 1
42 #endif
43 
44 #ifndef CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS
45 #define CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS 0
46 #endif
47 
48 #ifndef CONFIG_LOG_PROCESSING_LATENCY_US
49 #define CONFIG_LOG_PROCESSING_LATENCY_US 0
50 #endif
51 
52 #ifndef CONFIG_LOG_BUFFER_SIZE
53 #define CONFIG_LOG_BUFFER_SIZE 4
54 #endif
55 
56 #ifdef CONFIG_LOG_PROCESS_THREAD_CUSTOM_PRIORITY
57 #define LOG_PROCESS_THREAD_PRIORITY CONFIG_LOG_PROCESS_THREAD_PRIORITY
58 #else
59 #define LOG_PROCESS_THREAD_PRIORITY K_LOWEST_APPLICATION_THREAD_PRIO
60 #endif
61 
62 #ifndef CONFIG_LOG_TAG_MAX_LEN
63 #define CONFIG_LOG_TAG_MAX_LEN 0
64 #endif
65 
66 #ifndef CONFIG_LOG_FAILURE_REPORT_PERIOD
67 #define CONFIG_LOG_FAILURE_REPORT_PERIOD 0
68 #endif
69 
70 #ifndef CONFIG_LOG_ALWAYS_RUNTIME
71 BUILD_ASSERT(!IS_ENABLED(CONFIG_NO_OPTIMIZATIONS),
72 	     "CONFIG_LOG_ALWAYS_RUNTIME must be enabled when "
73 	     "CONFIG_NO_OPTIMIZATIONS is set");
74 BUILD_ASSERT(!IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE),
75 	     "CONFIG_LOG_ALWAYS_RUNTIME must be enabled when "
76 	     "CONFIG_LOG_MODE_IMMEDIATE is set");
77 #endif
78 
79 static const log_format_func_t format_table[] = {
80 	[LOG_OUTPUT_TEXT] = IS_ENABLED(CONFIG_LOG_OUTPUT) ?
81 						log_output_msg_process : NULL,
82 	[LOG_OUTPUT_SYST] = IS_ENABLED(CONFIG_LOG_MIPI_SYST_ENABLE) ?
83 						log_output_msg_syst_process : NULL,
84 	[LOG_OUTPUT_DICT] = IS_ENABLED(CONFIG_LOG_DICTIONARY_SUPPORT) ?
85 						log_dict_output_msg_process : NULL,
86 	[LOG_OUTPUT_CUSTOM] = IS_ENABLED(CONFIG_LOG_CUSTOM_FORMAT_SUPPORT) ?
87 						log_custom_output_msg_process : NULL,
88 };
89 
log_format_func_t_get(uint32_t log_type)90 log_format_func_t log_format_func_t_get(uint32_t log_type)
91 {
92 	return format_table[log_type];
93 }
94 
log_format_table_size(void)95 size_t log_format_table_size(void)
96 {
97 	return ARRAY_SIZE(format_table);
98 }
99 
100 K_SEM_DEFINE(log_process_thread_sem, 0, 1);
101 
102 static atomic_t initialized;
103 static bool panic_mode;
104 static bool backend_attached;
105 static atomic_t buffered_cnt;
106 static atomic_t dropped_cnt;
107 static k_tid_t proc_tid;
108 static struct k_timer log_process_thread_timer;
109 
110 static log_timestamp_t dummy_timestamp(void);
111 static log_timestamp_get_t timestamp_func = dummy_timestamp;
112 static uint32_t timestamp_freq;
113 static log_timestamp_t proc_latency;
114 static log_timestamp_t prev_timestamp;
115 static atomic_t unordered_cnt;
116 static uint64_t last_failure_report;
117 
118 static STRUCT_SECTION_ITERABLE(log_msg_ptr, log_msg_ptr);
119 static STRUCT_SECTION_ITERABLE_ALTERNATE(log_mpsc_pbuf, mpsc_pbuf_buffer, log_buffer);
120 static struct mpsc_pbuf_buffer *curr_log_buffer;
121 
122 #ifdef CONFIG_MPSC_PBUF
123 static uint32_t __aligned(Z_LOG_MSG_ALIGNMENT)
124 	buf32[CONFIG_LOG_BUFFER_SIZE / sizeof(int)];
125 
126 static void z_log_notify_drop(const struct mpsc_pbuf_buffer *buffer,
127 			      const union mpsc_pbuf_generic *item);
128 
129 static const struct mpsc_pbuf_buffer_config mpsc_config = {
130 	.buf = (uint32_t *)buf32,
131 	.size = ARRAY_SIZE(buf32),
132 	.notify_drop = z_log_notify_drop,
133 	.get_wlen = log_msg_generic_get_wlen,
134 	.flags = (IS_ENABLED(CONFIG_LOG_MODE_OVERFLOW) ?
135 		  MPSC_PBUF_MODE_OVERWRITE : 0) |
136 		 (IS_ENABLED(CONFIG_LOG_MEM_UTILIZATION) ?
137 		  MPSC_PBUF_MAX_UTILIZATION : 0)
138 };
139 #endif
140 
141 /* Check that default tag can fit in tag buffer. */
142 COND_CODE_0(CONFIG_LOG_TAG_MAX_LEN, (),
143 	(BUILD_ASSERT(sizeof(CONFIG_LOG_TAG_DEFAULT) <= CONFIG_LOG_TAG_MAX_LEN + 1,
144 		      "Default string longer than tag capacity")));
145 
146 static char tag[CONFIG_LOG_TAG_MAX_LEN + 1] =
147 	COND_CODE_0(CONFIG_LOG_TAG_MAX_LEN, ({}), (CONFIG_LOG_TAG_DEFAULT));
148 
149 static void msg_process(union log_msg_generic *msg);
150 
dummy_timestamp(void)151 static log_timestamp_t dummy_timestamp(void)
152 {
153 	return 0;
154 }
155 
z_log_timestamp(void)156 log_timestamp_t z_log_timestamp(void)
157 {
158 	return timestamp_func();
159 }
160 
z_log_msg_post_finalize(void)161 static void z_log_msg_post_finalize(void)
162 {
163 	atomic_val_t cnt = atomic_inc(&buffered_cnt);
164 
165 	if (panic_mode) {
166 		static struct k_spinlock process_lock;
167 		k_spinlock_key_t key = k_spin_lock(&process_lock);
168 		(void)log_process();
169 
170 		k_spin_unlock(&process_lock, key);
171 	} else if (proc_tid != NULL) {
172 		/*
173 		 * If CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD == 1,
174 		 * timer is never needed. We release the processing
175 		 * thread after every message is posted.
176 		 */
177 		if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD == 1) {
178 			if (cnt == 0) {
179 				k_sem_give(&log_process_thread_sem);
180 			}
181 		} else {
182 			if (cnt == 0) {
183 				k_timer_start(&log_process_thread_timer,
184 					      K_MSEC(CONFIG_LOG_PROCESS_THREAD_SLEEP_MS),
185 					      K_NO_WAIT);
186 			} else if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD &&
187 				   (cnt + 1) == CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) {
188 				k_timer_stop(&log_process_thread_timer);
189 				k_sem_give(&log_process_thread_sem);
190 			} else {
191 				/* No action needed. Message processing will be triggered by the
192 				 * timeout or when number of upcoming messages exceeds the
193 				 * threshold.
194 				 */
195 			}
196 		}
197 	}
198 }
199 
log_format_set_all_active_backends(size_t log_type)200 const struct log_backend *log_format_set_all_active_backends(size_t log_type)
201 {
202 	const struct log_backend *failed_backend = NULL;
203 
204 	STRUCT_SECTION_FOREACH(log_backend, backend) {
205 		if (log_backend_is_active(backend)) {
206 			int retCode = log_backend_format_set(backend, log_type);
207 
208 			if (retCode != 0) {
209 				failed_backend = backend;
210 			}
211 		}
212 	}
213 	return failed_backend;
214 }
215 
z_log_vprintk(const char * fmt,va_list ap)216 void z_log_vprintk(const char *fmt, va_list ap)
217 {
218 	if (!IS_ENABLED(CONFIG_LOG_PRINTK)) {
219 		return;
220 	}
221 
222 	z_log_msg_runtime_vcreate(Z_LOG_LOCAL_DOMAIN_ID, NULL,
223 				   LOG_LEVEL_INTERNAL_RAW_STRING, NULL, 0,
224 				   Z_LOG_MSG_CBPRINTF_FLAGS(0),
225 				   fmt, ap);
226 }
227 
228 #ifndef CONFIG_LOG_TIMESTAMP_USE_REALTIME
default_get_timestamp(void)229 static log_timestamp_t default_get_timestamp(void)
230 {
231 	return IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
232 		sys_clock_tick_get() : k_cycle_get_32();
233 }
234 
default_lf_get_timestamp(void)235 static log_timestamp_t default_lf_get_timestamp(void)
236 {
237 	return IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
238 		k_uptime_get() : k_uptime_get_32();
239 }
240 #else
default_rt_get_timestamp(void)241 static log_timestamp_t default_rt_get_timestamp(void)
242 {
243 	struct timespec tspec;
244 
245 	clock_gettime(CLOCK_REALTIME, &tspec);
246 
247 	return ((uint64_t)tspec.tv_sec * MSEC_PER_SEC) + (tspec.tv_nsec / NSEC_PER_MSEC);
248 }
249 #endif /* CONFIG_LOG_TIMESTAMP_USE_REALTIME */
250 
log_core_init(void)251 void log_core_init(void)
252 {
253 	panic_mode = false;
254 	dropped_cnt = 0;
255 	buffered_cnt = 0;
256 
257 	if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
258 		log_frontend_init();
259 
260 		for (uint16_t s = 0; s < log_src_cnt_get(0); s++) {
261 			log_frontend_filter_set(s, CONFIG_LOG_MAX_LEVEL);
262 		}
263 
264 		if (IS_ENABLED(CONFIG_LOG_FRONTEND_ONLY)) {
265 			return;
266 		}
267 	}
268 
269 	/* Set default timestamp. */
270 #ifdef CONFIG_LOG_TIMESTAMP_USE_REALTIME
271 	log_set_timestamp_func(default_rt_get_timestamp, 1000U);
272 #else
273 	if (sys_clock_hw_cycles_per_sec() > 1000000) {
274 		log_set_timestamp_func(default_lf_get_timestamp, 1000U);
275 	} else {
276 		uint32_t freq = IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
277 			CONFIG_SYS_CLOCK_TICKS_PER_SEC : sys_clock_hw_cycles_per_sec();
278 		log_set_timestamp_func(default_get_timestamp, freq);
279 	}
280 #endif /* CONFIG_LOG_TIMESTAMP_USE_REALTIME */
281 
282 	if (IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
283 		z_log_msg_init();
284 	}
285 
286 	if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) {
287 		z_log_runtime_filters_init();
288 	}
289 }
290 
activate_foreach_backend(uint32_t mask)291 static uint32_t activate_foreach_backend(uint32_t mask)
292 {
293 	uint32_t mask_cpy = mask;
294 
295 	while (mask_cpy) {
296 		uint32_t i = __builtin_ctz(mask_cpy);
297 		const struct log_backend *backend = log_backend_get(i);
298 
299 		mask_cpy &= ~BIT(i);
300 		if (backend->autostart && (log_backend_is_ready(backend) == 0)) {
301 			mask &= ~BIT(i);
302 			log_backend_enable(backend,
303 					   backend->cb->ctx,
304 					   CONFIG_LOG_MAX_LEVEL);
305 		}
306 	}
307 
308 	return mask;
309 }
310 
z_log_init(bool blocking,bool can_sleep)311 static uint32_t z_log_init(bool blocking, bool can_sleep)
312 {
313 	uint32_t mask = 0;
314 
315 	if (IS_ENABLED(CONFIG_LOG_FRONTEND_ONLY)) {
316 		return 0;
317 	}
318 
319 	__ASSERT_NO_MSG(log_backend_count_get() < LOG_FILTERS_MAX_BACKENDS);
320 
321 	if (atomic_inc(&initialized) != 0) {
322 		return 0;
323 	}
324 
325 	if (IS_ENABLED(CONFIG_LOG_MULTIDOMAIN)) {
326 		z_log_links_initiate();
327 	}
328 
329 	int backend_index = 0;
330 
331 	/* Activate autostart backends */
332 	STRUCT_SECTION_FOREACH(log_backend, backend) {
333 		if (backend->autostart) {
334 			log_backend_init(backend);
335 
336 			/* If backend has activation function then backend is
337 			 * not ready until activated.
338 			 */
339 			if (log_backend_is_ready(backend) == 0) {
340 				log_backend_enable(backend,
341 						   backend->cb->ctx,
342 						   CONFIG_LOG_MAX_LEVEL);
343 			} else {
344 				mask |= BIT(backend_index);
345 			}
346 		}
347 
348 		++backend_index;
349 	}
350 
351 	/* If blocking init, wait until all backends are activated. */
352 	if (blocking) {
353 		while (mask) {
354 			mask = activate_foreach_backend(mask);
355 			if (IS_ENABLED(CONFIG_MULTITHREADING) && can_sleep) {
356 				k_msleep(10);
357 			}
358 		}
359 	}
360 
361 	return mask;
362 }
363 
log_init(void)364 void log_init(void)
365 {
366 	(void)z_log_init(true, true);
367 }
368 
log_thread_trigger(void)369 void log_thread_trigger(void)
370 {
371 	if (IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
372 		return;
373 	}
374 
375 	k_timer_stop(&log_process_thread_timer);
376 	k_sem_give(&log_process_thread_sem);
377 }
378 
thread_set(k_tid_t process_tid)379 static void thread_set(k_tid_t process_tid)
380 {
381 	proc_tid = process_tid;
382 
383 	if (IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
384 		return;
385 	}
386 
387 	if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD &&
388 	    process_tid &&
389 	    buffered_cnt >= CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) {
390 		k_sem_give(&log_process_thread_sem);
391 	}
392 }
393 
log_thread_set(k_tid_t process_tid)394 void log_thread_set(k_tid_t process_tid)
395 {
396 	if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
397 		__ASSERT_NO_MSG(0);
398 	} else {
399 		thread_set(process_tid);
400 	}
401 }
402 
log_set_timestamp_func(log_timestamp_get_t timestamp_getter,uint32_t freq)403 int log_set_timestamp_func(log_timestamp_get_t timestamp_getter, uint32_t freq)
404 {
405 	if (timestamp_getter == NULL) {
406 		return -EINVAL;
407 	}
408 
409 	timestamp_func = timestamp_getter;
410 	timestamp_freq = freq;
411 	if (CONFIG_LOG_PROCESSING_LATENCY_US) {
412 		proc_latency = (freq * CONFIG_LOG_PROCESSING_LATENCY_US) / 1000000;
413 	}
414 
415 	if (IS_ENABLED(CONFIG_LOG_OUTPUT)) {
416 		log_output_timestamp_freq_set(freq);
417 	}
418 
419 	return 0;
420 }
421 
z_impl_log_panic(void)422 void z_impl_log_panic(void)
423 {
424 	if (panic_mode) {
425 		return;
426 	}
427 
428 	/* If panic happened early logger might not be initialized.
429 	 * Forcing initialization of the logger and auto-starting backends.
430 	 */
431 	(void)z_log_init(true, false);
432 
433 	if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
434 		log_frontend_panic();
435 		if (IS_ENABLED(CONFIG_LOG_FRONTEND_ONLY)) {
436 			goto out;
437 		}
438 	}
439 
440 	STRUCT_SECTION_FOREACH(log_backend, backend) {
441 		if (log_backend_is_active(backend)) {
442 			log_backend_panic(backend);
443 		}
444 	}
445 
446 	if (!IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
447 		/* Flush */
448 		while (log_process() == true) {
449 		}
450 	}
451 
452 out:
453 	panic_mode = true;
454 }
455 
456 #ifdef CONFIG_USERSPACE
z_vrfy_log_panic(void)457 void z_vrfy_log_panic(void)
458 {
459 	z_impl_log_panic();
460 }
461 #include <zephyr/syscalls/log_panic_mrsh.c>
462 #endif
463 
msg_filter_check(struct log_backend const * backend,union log_msg_generic * msg)464 static bool msg_filter_check(struct log_backend const *backend,
465 			     union log_msg_generic *msg)
466 {
467 	if (!z_log_item_is_msg(msg)) {
468 		return true;
469 	}
470 
471 	if (!IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) {
472 		return true;
473 	}
474 
475 	uint32_t backend_level;
476 	uint8_t level;
477 	uint8_t domain_id;
478 	int16_t source_id;
479 
480 	level = log_msg_get_level(&msg->log);
481 	domain_id = log_msg_get_domain(&msg->log);
482 	source_id = log_msg_get_source_id(&msg->log);
483 
484 	/* Accept all non-logging messages. */
485 	if (level == LOG_LEVEL_NONE) {
486 		return true;
487 	}
488 	if (source_id >= 0) {
489 		backend_level = log_filter_get(backend, domain_id, source_id, true);
490 
491 		return (level <= backend_level);
492 	} else {
493 		return true;
494 	}
495 }
496 
msg_process(union log_msg_generic * msg)497 static void msg_process(union log_msg_generic *msg)
498 {
499 	STRUCT_SECTION_FOREACH(log_backend, backend) {
500 		if (log_backend_is_active(backend) &&
501 		    msg_filter_check(backend, msg)) {
502 			log_backend_msg_process(backend, msg);
503 		}
504 	}
505 }
506 
dropped_notify(void)507 void dropped_notify(void)
508 {
509 	uint32_t dropped = z_log_dropped_read_and_clear();
510 
511 	STRUCT_SECTION_FOREACH(log_backend, backend) {
512 		if (log_backend_is_active(backend)) {
513 			log_backend_dropped(backend, dropped);
514 		}
515 	}
516 }
517 
unordered_notify(void)518 void unordered_notify(void)
519 {
520 	uint32_t unordered = atomic_set(&unordered_cnt, 0);
521 
522 	LOG_WRN("%d unordered messages since last report", unordered);
523 }
524 
z_log_notify_backend_enabled(void)525 void z_log_notify_backend_enabled(void)
526 {
527 	/* Wakeup logger thread after attaching first backend. It might be
528 	 * blocked with log messages pending.
529 	 */
530 	if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD) && !backend_attached) {
531 		k_sem_give(&log_process_thread_sem);
532 	}
533 
534 	backend_attached = true;
535 }
536 
z_log_unordered_pending(void)537 static inline bool z_log_unordered_pending(void)
538 {
539 	return IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) && unordered_cnt;
540 }
541 
z_impl_log_process(void)542 bool z_impl_log_process(void)
543 {
544 	if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
545 		return false;
546 	}
547 
548 	k_timeout_t backoff = K_NO_WAIT;
549 	union log_msg_generic *msg;
550 
551 	if (!backend_attached) {
552 		return false;
553 	}
554 
555 	msg = z_log_msg_claim(&backoff);
556 
557 	if (msg) {
558 		atomic_dec(&buffered_cnt);
559 		msg_process(msg);
560 		z_log_msg_free(msg);
561 	} else if (CONFIG_LOG_PROCESSING_LATENCY_US > 0 && !K_TIMEOUT_EQ(backoff, K_NO_WAIT)) {
562 		/* If backoff is requested, it means that there are pending
563 		 * messages but they are too new and processing shall back off
564 		 * to allow arrival of newer messages from remote domains.
565 		 */
566 		k_timer_start(&log_process_thread_timer, backoff, K_NO_WAIT);
567 
568 		return false;
569 	}
570 
571 	if (IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
572 		bool dropped_pend = z_log_dropped_pending();
573 		bool unordered_pend = z_log_unordered_pending();
574 
575 		if ((dropped_pend || unordered_pend) &&
576 		   (k_uptime_get() - last_failure_report) > CONFIG_LOG_FAILURE_REPORT_PERIOD) {
577 			if (dropped_pend) {
578 				dropped_notify();
579 			}
580 
581 			if (unordered_pend) {
582 				unordered_notify();
583 			}
584 		}
585 
586 		last_failure_report += CONFIG_LOG_FAILURE_REPORT_PERIOD;
587 	}
588 
589 	return z_log_msg_pending();
590 }
591 
592 #ifdef CONFIG_USERSPACE
z_vrfy_log_process(void)593 bool z_vrfy_log_process(void)
594 {
595 	return z_impl_log_process();
596 }
597 #include <zephyr/syscalls/log_process_mrsh.c>
598 #endif
599 
z_impl_log_buffered_cnt(void)600 uint32_t z_impl_log_buffered_cnt(void)
601 {
602 	return buffered_cnt;
603 }
604 
605 #ifdef CONFIG_USERSPACE
z_vrfy_log_buffered_cnt(void)606 uint32_t z_vrfy_log_buffered_cnt(void)
607 {
608 	return z_impl_log_buffered_cnt();
609 }
610 #include <zephyr/syscalls/log_buffered_cnt_mrsh.c>
611 #endif
612 
z_log_dropped(bool buffered)613 void z_log_dropped(bool buffered)
614 {
615 	atomic_inc(&dropped_cnt);
616 	if (buffered) {
617 		atomic_dec(&buffered_cnt);
618 	}
619 
620 	if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
621 		k_timer_stop(&log_process_thread_timer);
622 		k_sem_give(&log_process_thread_sem);
623 	}
624 }
625 
z_log_dropped_read_and_clear(void)626 uint32_t z_log_dropped_read_and_clear(void)
627 {
628 	return atomic_set(&dropped_cnt, 0);
629 }
630 
z_log_dropped_pending(void)631 bool z_log_dropped_pending(void)
632 {
633 	return dropped_cnt > 0;
634 }
635 
z_log_msg_init(void)636 void z_log_msg_init(void)
637 {
638 #ifdef CONFIG_MPSC_PBUF
639 	mpsc_pbuf_init(&log_buffer, &mpsc_config);
640 	curr_log_buffer = &log_buffer;
641 #endif
642 }
643 
msg_alloc(struct mpsc_pbuf_buffer * buffer,uint32_t wlen)644 static struct log_msg *msg_alloc(struct mpsc_pbuf_buffer *buffer, uint32_t wlen)
645 {
646 	if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
647 		return NULL;
648 	}
649 
650 	return (struct log_msg *)mpsc_pbuf_alloc(
651 		buffer, wlen,
652 		(CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS == -1)
653 			? K_FOREVER
654 			: K_MSEC(CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS));
655 }
656 
z_log_msg_alloc(uint32_t wlen)657 struct log_msg *z_log_msg_alloc(uint32_t wlen)
658 {
659 	return msg_alloc(&log_buffer, wlen);
660 }
661 
msg_commit(struct mpsc_pbuf_buffer * buffer,struct log_msg * msg)662 static void msg_commit(struct mpsc_pbuf_buffer *buffer, struct log_msg *msg)
663 {
664 	union log_msg_generic *m = (union log_msg_generic *)msg;
665 
666 	if (IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
667 		msg_process(m);
668 
669 		return;
670 	}
671 
672 #ifdef CONFIG_MPSC_PBUF
673 	mpsc_pbuf_commit(buffer, &m->buf);
674 #endif
675 	z_log_msg_post_finalize();
676 }
677 
z_log_msg_commit(struct log_msg * msg)678 void z_log_msg_commit(struct log_msg *msg)
679 {
680 	msg->hdr.timestamp = timestamp_func();
681 	msg_commit(&log_buffer, msg);
682 }
683 
z_log_msg_local_claim(void)684 union log_msg_generic *z_log_msg_local_claim(void)
685 {
686 #ifdef CONFIG_MPSC_PBUF
687 	return (union log_msg_generic *)mpsc_pbuf_claim(&log_buffer);
688 #else
689 	return NULL;
690 #endif
691 
692 }
693 
694 /* If there are buffers dedicated for each link, claim the oldest message (lowest timestamp). */
z_log_msg_claim_oldest(k_timeout_t * backoff)695 union log_msg_generic *z_log_msg_claim_oldest(k_timeout_t *backoff)
696 {
697 	union log_msg_generic *msg = NULL;
698 	struct log_msg_ptr *chosen;
699 	log_timestamp_t t_min = sizeof(log_timestamp_t) > sizeof(uint32_t) ?
700 				UINT64_MAX : UINT32_MAX;
701 	int i = 0;
702 
703 	/* Else iterate on all available buffers and get the oldest message. */
704 	STRUCT_SECTION_FOREACH(log_msg_ptr, msg_ptr) {
705 		struct log_mpsc_pbuf *buf;
706 
707 		STRUCT_SECTION_GET(log_mpsc_pbuf, i, &buf);
708 
709 #ifdef CONFIG_MPSC_PBUF
710 		if (msg_ptr->msg == NULL) {
711 			msg_ptr->msg = (union log_msg_generic *)mpsc_pbuf_claim(&buf->buf);
712 		}
713 #endif
714 
715 		if (msg_ptr->msg) {
716 			log_timestamp_t t = log_msg_get_timestamp(&msg_ptr->msg->log);
717 
718 			if (t < t_min) {
719 				t_min = t;
720 				msg = msg_ptr->msg;
721 				chosen = msg_ptr;
722 				curr_log_buffer = &buf->buf;
723 			}
724 		}
725 		i++;
726 	}
727 
728 	if (msg) {
729 		if (CONFIG_LOG_PROCESSING_LATENCY_US > 0) {
730 			int32_t diff = t_min - (timestamp_func() - proc_latency);
731 
732 			if (diff > 0) {
733 			       /* Entry is too new. Back off for sometime to allow new
734 				* remote messages to arrive which may have been captured
735 				* earlier (but on other platform). Calculate for how
736 				* long processing shall back off.
737 				*/
738 				if (timestamp_freq == sys_clock_hw_cycles_per_sec()) {
739 					*backoff = K_TICKS(diff);
740 				} else {
741 					*backoff = K_TICKS((diff * sys_clock_hw_cycles_per_sec()) /
742 							timestamp_freq);
743 				}
744 
745 				return NULL;
746 			}
747 		}
748 
749 		(*chosen).msg = NULL;
750 	}
751 
752 	if (t_min < prev_timestamp) {
753 		atomic_inc(&unordered_cnt);
754 	}
755 
756 	prev_timestamp = t_min;
757 
758 	return msg;
759 }
760 
z_log_msg_claim(k_timeout_t * backoff)761 union log_msg_generic *z_log_msg_claim(k_timeout_t *backoff)
762 {
763 	size_t len;
764 
765 	STRUCT_SECTION_COUNT(log_mpsc_pbuf, &len);
766 
767 	/* Use only one buffer if others are not registered. */
768 	if (IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) && len > 1) {
769 		return z_log_msg_claim_oldest(backoff);
770 	}
771 
772 	return z_log_msg_local_claim();
773 }
774 
msg_free(struct mpsc_pbuf_buffer * buffer,const union log_msg_generic * msg)775 static void msg_free(struct mpsc_pbuf_buffer *buffer, const union log_msg_generic *msg)
776 {
777 #ifdef CONFIG_MPSC_PBUF
778 	mpsc_pbuf_free(buffer, &msg->buf);
779 #endif
780 }
781 
z_log_msg_free(union log_msg_generic * msg)782 void z_log_msg_free(union log_msg_generic *msg)
783 {
784 	msg_free(curr_log_buffer, msg);
785 }
786 
msg_pending(struct mpsc_pbuf_buffer * buffer)787 static bool msg_pending(struct mpsc_pbuf_buffer *buffer)
788 {
789 #ifdef CONFIG_MPSC_PBUF
790 	return mpsc_pbuf_is_pending(buffer);
791 #else
792 	return false;
793 #endif
794 }
795 
z_log_msg_pending(void)796 bool z_log_msg_pending(void)
797 {
798 	size_t len;
799 	int i = 0;
800 
801 	STRUCT_SECTION_COUNT(log_mpsc_pbuf, &len);
802 
803 	if (!IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) || (len == 1)) {
804 		return msg_pending(&log_buffer);
805 	}
806 
807 	STRUCT_SECTION_FOREACH(log_msg_ptr, msg_ptr) {
808 		struct log_mpsc_pbuf *buf;
809 
810 		if (msg_ptr->msg) {
811 			return true;
812 		}
813 
814 		STRUCT_SECTION_GET(log_mpsc_pbuf, i, &buf);
815 
816 		if (msg_pending(&buf->buf)) {
817 			return true;
818 		}
819 
820 		i++;
821 	}
822 
823 	return false;
824 }
825 
z_log_msg_enqueue(const struct log_link * link,const void * data,size_t len)826 void z_log_msg_enqueue(const struct log_link *link, const void *data, size_t len)
827 {
828 	struct log_msg *log_msg = (struct log_msg *)data;
829 	size_t wlen = DIV_ROUND_UP(ROUND_UP(len, Z_LOG_MSG_ALIGNMENT), sizeof(int));
830 	struct mpsc_pbuf_buffer *mpsc_pbuffer = link->mpsc_pbuf ? link->mpsc_pbuf : &log_buffer;
831 	struct log_msg *local_msg = msg_alloc(mpsc_pbuffer, wlen);
832 
833 	if (!local_msg) {
834 		z_log_dropped(false);
835 		return;
836 	}
837 
838 	log_msg->hdr.desc.valid = 0;
839 	log_msg->hdr.desc.busy = 0;
840 	log_msg->hdr.desc.domain += link->ctrl_blk->domain_offset;
841 	memcpy((void *)local_msg, data, len);
842 	msg_commit(mpsc_pbuffer, local_msg);
843 }
844 
z_log_get_tag(void)845 const char *z_log_get_tag(void)
846 {
847 	return CONFIG_LOG_TAG_MAX_LEN > 0 ? tag : NULL;
848 }
849 
log_set_tag(const char * str)850 int log_set_tag(const char *str)
851 {
852 #if CONFIG_LOG_TAG_MAX_LEN > 0
853 	if (str == NULL) {
854 		return -EINVAL;
855 	}
856 
857 	size_t len = strlen(str);
858 	size_t cpy_len = MIN(len, CONFIG_LOG_TAG_MAX_LEN);
859 
860 	memcpy(tag, str, cpy_len);
861 	tag[cpy_len] = '\0';
862 
863 	if (cpy_len < len) {
864 		tag[cpy_len - 1] = '~';
865 		return -ENOMEM;
866 	}
867 
868 	return 0;
869 #else
870 	return -ENOTSUP;
871 #endif
872 }
873 
log_mem_get_usage(uint32_t * buf_size,uint32_t * usage)874 int log_mem_get_usage(uint32_t *buf_size, uint32_t *usage)
875 {
876 	__ASSERT_NO_MSG(buf_size != NULL);
877 	__ASSERT_NO_MSG(usage != NULL);
878 
879 	if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
880 		return -EINVAL;
881 	}
882 
883 	mpsc_pbuf_get_utilization(&log_buffer, buf_size, usage);
884 
885 	return 0;
886 }
887 
log_mem_get_max_usage(uint32_t * max)888 int log_mem_get_max_usage(uint32_t *max)
889 {
890 	__ASSERT_NO_MSG(max != NULL);
891 
892 	if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
893 		return -EINVAL;
894 	}
895 
896 	return mpsc_pbuf_get_max_utilization(&log_buffer, max);
897 }
898 
log_backend_notify_all(enum log_backend_evt event,union log_backend_evt_arg * arg)899 static void log_backend_notify_all(enum log_backend_evt event,
900 				   union log_backend_evt_arg *arg)
901 {
902 	STRUCT_SECTION_FOREACH(log_backend, backend) {
903 		log_backend_notify(backend, event, arg);
904 	}
905 }
906 
log_process_thread_timer_expiry_fn(struct k_timer * timer)907 static void log_process_thread_timer_expiry_fn(struct k_timer *timer)
908 {
909 	k_sem_give(&log_process_thread_sem);
910 }
911 
log_process_thread_func(void * dummy1,void * dummy2,void * dummy3)912 static void log_process_thread_func(void *dummy1, void *dummy2, void *dummy3)
913 {
914 	__ASSERT_NO_MSG(log_backend_count_get() > 0);
915 	uint32_t links_active_mask = 0xFFFFFFFF;
916 	uint8_t domain_offset = 0;
917 	uint32_t activate_mask = z_log_init(false, false);
918 	/* If some backends are not activated yet set periodical thread wake up
919 	 * to poll backends for readiness. Period is set arbitrary.
920 	 * If all backends are ready periodic wake up is not needed.
921 	 */
922 	k_timeout_t timeout = (activate_mask != 0) ? K_MSEC(50) : K_FOREVER;
923 	bool processed_any = false;
924 	thread_set(k_current_get());
925 
926 	/* Logging thread is periodically waken up until all backends that
927 	 * should be autostarted are ready.
928 	 */
929 	while (true) {
930 		if (activate_mask) {
931 			activate_mask = activate_foreach_backend(activate_mask);
932 			if (!activate_mask) {
933 				/* Periodic wake up no longer needed since all
934 				 * backends are ready.
935 				 */
936 				timeout = K_FOREVER;
937 			}
938 		}
939 
940 		/* Keep trying to activate links until all links are active. */
941 		if (IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) && links_active_mask) {
942 			links_active_mask =
943 				z_log_links_activate(links_active_mask, &domain_offset);
944 		}
945 
946 
947 		if (log_process() == false) {
948 			if (processed_any) {
949 				processed_any = false;
950 				log_backend_notify_all(LOG_BACKEND_EVT_PROCESS_THREAD_DONE, NULL);
951 			}
952 			(void)k_sem_take(&log_process_thread_sem, timeout);
953 		} else {
954 			processed_any = true;
955 		}
956 	}
957 }
958 
959 K_KERNEL_STACK_DEFINE(logging_stack, CONFIG_LOG_PROCESS_THREAD_STACK_SIZE);
960 struct k_thread logging_thread;
961 
enable_logger(void)962 static int enable_logger(void)
963 {
964 	if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
965 		k_timer_init(&log_process_thread_timer,
966 				log_process_thread_timer_expiry_fn, NULL);
967 		/* start logging thread */
968 		k_thread_create(&logging_thread, logging_stack,
969 				K_KERNEL_STACK_SIZEOF(logging_stack),
970 				log_process_thread_func, NULL, NULL, NULL,
971 				LOG_PROCESS_THREAD_PRIORITY, 0,
972 				COND_CODE_1(CONFIG_LOG_PROCESS_THREAD,
973 					K_MSEC(CONFIG_LOG_PROCESS_THREAD_STARTUP_DELAY_MS),
974 					K_NO_WAIT));
975 		k_thread_name_set(&logging_thread, "logging");
976 	} else {
977 		(void)z_log_init(false, false);
978 	}
979 
980 	return 0;
981 }
982 
983 SYS_INIT(enable_logger, POST_KERNEL, CONFIG_LOG_CORE_INIT_PRIORITY);
984