1 /*
2  * Copyright (c) 2018 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/logging/log.h>
7 #include <zephyr/logging/log_backend.h>
8 #include <zephyr/logging/log_ctrl.h>
9 #include <zephyr/logging/log_output.h>
10 #include <zephyr/logging/log_internal.h>
11 #include <zephyr/sys/mpsc_pbuf.h>
12 #include <zephyr/logging/log_link.h>
13 #include <zephyr/sys/printk.h>
14 #include <zephyr/sys_clock.h>
15 #include <zephyr/init.h>
16 #include <zephyr/sys/__assert.h>
17 #include <zephyr/sys/atomic.h>
18 #include <zephyr/sys/iterable_sections.h>
19 #include <ctype.h>
20 #include <zephyr/logging/log_frontend.h>
21 #include <zephyr/syscall_handler.h>
22 #include <zephyr/logging/log_output_dict.h>
23 #include <zephyr/logging/log_output_custom.h>
24 #include <zephyr/linker/utils.h>
25 
26 LOG_MODULE_REGISTER(log);
27 
28 #ifndef CONFIG_LOG_PROCESS_THREAD_SLEEP_MS
29 #define CONFIG_LOG_PROCESS_THREAD_SLEEP_MS 0
30 #endif
31 
32 #ifndef CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD
33 #define CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD 0
34 #endif
35 
36 #ifndef CONFIG_LOG_PROCESS_THREAD_STACK_SIZE
37 #define CONFIG_LOG_PROCESS_THREAD_STACK_SIZE 1
38 #endif
39 
40 #ifndef CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS
41 #define CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS 0
42 #endif
43 
44 #ifndef CONFIG_LOG_PROCESSING_LATENCY_US
45 #define CONFIG_LOG_PROCESSING_LATENCY_US 0
46 #endif
47 
48 #ifndef CONFIG_LOG_BUFFER_SIZE
49 #define CONFIG_LOG_BUFFER_SIZE 4
50 #endif
51 
52 #ifdef CONFIG_LOG_PROCESS_THREAD_CUSTOM_PRIORITY
53 #define LOG_PROCESS_THREAD_PRIORITY CONFIG_LOG_PROCESS_THREAD_PRIORITY
54 #else
55 #define LOG_PROCESS_THREAD_PRIORITY K_LOWEST_APPLICATION_THREAD_PRIO
56 #endif
57 
58 #ifndef CONFIG_LOG_TAG_MAX_LEN
59 #define CONFIG_LOG_TAG_MAX_LEN 0
60 #endif
61 
62 #ifndef CONFIG_LOG_FAILURE_REPORT_PERIOD
63 #define CONFIG_LOG_FAILURE_REPORT_PERIOD 0
64 #endif
65 
66 #ifndef CONFIG_LOG_ALWAYS_RUNTIME
67 BUILD_ASSERT(!IS_ENABLED(CONFIG_NO_OPTIMIZATIONS),
68 	     "Option must be enabled when CONFIG_NO_OPTIMIZATIONS is set");
69 BUILD_ASSERT(!IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE),
70 	     "Option must be enabled when CONFIG_LOG_MODE_IMMEDIATE is set");
71 #endif
72 
73 static const log_format_func_t format_table[] = {
74 	[LOG_OUTPUT_TEXT] = IS_ENABLED(CONFIG_LOG_OUTPUT) ?
75 						log_output_msg_process : NULL,
76 	[LOG_OUTPUT_SYST] = IS_ENABLED(CONFIG_LOG_MIPI_SYST_ENABLE) ?
77 						log_output_msg_syst_process : NULL,
78 	[LOG_OUTPUT_DICT] = IS_ENABLED(CONFIG_LOG_DICTIONARY_SUPPORT) ?
79 						log_dict_output_msg_process : NULL,
80 	[LOG_OUTPUT_CUSTOM] = IS_ENABLED(CONFIG_LOG_CUSTOM_FORMAT_SUPPORT) ?
81 						log_custom_output_msg_process : NULL,
82 };
83 
log_format_func_t_get(uint32_t log_type)84 log_format_func_t log_format_func_t_get(uint32_t log_type)
85 {
86 	return format_table[log_type];
87 }
88 
log_format_table_size(void)89 size_t log_format_table_size(void)
90 {
91 	return ARRAY_SIZE(format_table);
92 }
93 
94 K_SEM_DEFINE(log_process_thread_sem, 0, 1);
95 
96 static atomic_t initialized;
97 static bool panic_mode;
98 static bool backend_attached;
99 static atomic_t buffered_cnt;
100 static atomic_t dropped_cnt;
101 static k_tid_t proc_tid;
102 static struct k_timer log_process_thread_timer;
103 
104 static log_timestamp_t dummy_timestamp(void);
105 static log_timestamp_get_t timestamp_func = dummy_timestamp;
106 static uint32_t timestamp_freq;
107 static log_timestamp_t proc_latency;
108 static log_timestamp_t prev_timestamp;
109 static atomic_t unordered_cnt;
110 static uint64_t last_failure_report;
111 
112 static STRUCT_SECTION_ITERABLE(log_msg_ptr, log_msg_ptr);
113 static STRUCT_SECTION_ITERABLE_ALTERNATE(log_mpsc_pbuf, mpsc_pbuf_buffer, log_buffer);
114 static struct mpsc_pbuf_buffer *curr_log_buffer;
115 
116 #ifdef CONFIG_MPSC_PBUF
117 static uint32_t __aligned(Z_LOG_MSG_ALIGNMENT)
118 	buf32[CONFIG_LOG_BUFFER_SIZE / sizeof(int)];
119 
120 static void z_log_notify_drop(const struct mpsc_pbuf_buffer *buffer,
121 			      const union mpsc_pbuf_generic *item);
122 
123 static const struct mpsc_pbuf_buffer_config mpsc_config = {
124 	.buf = (uint32_t *)buf32,
125 	.size = ARRAY_SIZE(buf32),
126 	.notify_drop = z_log_notify_drop,
127 	.get_wlen = log_msg_generic_get_wlen,
128 	.flags = (IS_ENABLED(CONFIG_LOG_MODE_OVERFLOW) ?
129 		  MPSC_PBUF_MODE_OVERWRITE : 0) |
130 		 (IS_ENABLED(CONFIG_LOG_MEM_UTILIZATION) ?
131 		  MPSC_PBUF_MAX_UTILIZATION : 0)
132 };
133 #endif
134 
135 /* Check that default tag can fit in tag buffer. */
136 COND_CODE_0(CONFIG_LOG_TAG_MAX_LEN, (),
137 	(BUILD_ASSERT(sizeof(CONFIG_LOG_TAG_DEFAULT) <= CONFIG_LOG_TAG_MAX_LEN + 1,
138 		      "Default string longer than tag capacity")));
139 
140 static char tag[CONFIG_LOG_TAG_MAX_LEN + 1] =
141 	COND_CODE_0(CONFIG_LOG_TAG_MAX_LEN, ({}), (CONFIG_LOG_TAG_DEFAULT));
142 
143 static void msg_process(union log_msg_generic *msg);
144 
dummy_timestamp(void)145 static log_timestamp_t dummy_timestamp(void)
146 {
147 	return 0;
148 }
149 
z_log_timestamp(void)150 log_timestamp_t z_log_timestamp(void)
151 {
152 	return timestamp_func();
153 }
154 
z_log_msg_post_finalize(void)155 static void z_log_msg_post_finalize(void)
156 {
157 	atomic_val_t cnt = atomic_inc(&buffered_cnt);
158 
159 	if (panic_mode) {
160 		static struct k_spinlock process_lock;
161 		k_spinlock_key_t key = k_spin_lock(&process_lock);
162 		(void)log_process();
163 
164 		k_spin_unlock(&process_lock, key);
165 	} else if (proc_tid != NULL) {
166 		if (cnt == 0) {
167 			k_timer_start(&log_process_thread_timer,
168 				      K_MSEC(CONFIG_LOG_PROCESS_THREAD_SLEEP_MS),
169 				      K_NO_WAIT);
170 		} else if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD &&
171 			   (cnt + 1) == CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) {
172 			k_timer_stop(&log_process_thread_timer);
173 			k_sem_give(&log_process_thread_sem);
174 		} else {
175 			/* No action needed. Message processing will be triggered by the
176 			 * timeout or when number of upcoming messages exceeds the
177 			 * threshold.
178 			 */
179 		}
180 	}
181 }
182 
log_format_set_all_active_backends(size_t log_type)183 const struct log_backend *log_format_set_all_active_backends(size_t log_type)
184 {
185 	const struct log_backend *failed_backend = NULL;
186 
187 	STRUCT_SECTION_FOREACH(log_backend, backend) {
188 		if (log_backend_is_active(backend)) {
189 			int retCode = log_backend_format_set(backend, log_type);
190 
191 			if (retCode != 0) {
192 				failed_backend = backend;
193 			}
194 		}
195 	}
196 	return failed_backend;
197 }
198 
z_log_vprintk(const char * fmt,va_list ap)199 void z_log_vprintk(const char *fmt, va_list ap)
200 {
201 	if (!IS_ENABLED(CONFIG_LOG_PRINTK)) {
202 		return;
203 	}
204 
205 	z_log_msg_runtime_vcreate(Z_LOG_LOCAL_DOMAIN_ID, NULL,
206 				   LOG_LEVEL_INTERNAL_RAW_STRING, NULL, 0,
207 				   Z_LOG_MSG_CBPRINTF_FLAGS(0),
208 				   fmt, ap);
209 }
210 
default_get_timestamp(void)211 static log_timestamp_t default_get_timestamp(void)
212 {
213 	return IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
214 		sys_clock_tick_get() : k_cycle_get_32();
215 }
216 
default_lf_get_timestamp(void)217 static log_timestamp_t default_lf_get_timestamp(void)
218 {
219 	return IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
220 		k_uptime_get() : k_uptime_get_32();
221 }
222 
log_core_init(void)223 void log_core_init(void)
224 {
225 	panic_mode = false;
226 	dropped_cnt = 0;
227 	buffered_cnt = 0;
228 
229 	if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
230 		log_frontend_init();
231 		if (IS_ENABLED(CONFIG_LOG_FRONTEND_ONLY)) {
232 			return;
233 		}
234 	}
235 
236 	/* Set default timestamp. */
237 	if (sys_clock_hw_cycles_per_sec() > 1000000) {
238 		log_set_timestamp_func(default_lf_get_timestamp, 1000U);
239 	} else {
240 		log_set_timestamp_func(default_get_timestamp,
241 				       sys_clock_hw_cycles_per_sec());
242 	}
243 
244 	if (IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
245 		z_log_msg_init();
246 	}
247 
248 	if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) {
249 		z_log_runtime_filters_init();
250 	}
251 }
252 
activate_foreach_backend(uint32_t mask)253 static uint32_t activate_foreach_backend(uint32_t mask)
254 {
255 	uint32_t mask_cpy = mask;
256 
257 	while (mask_cpy) {
258 		uint32_t i = __builtin_ctz(mask_cpy);
259 		const struct log_backend *backend = log_backend_get(i);
260 
261 		mask_cpy &= ~BIT(i);
262 		if (backend->autostart && (log_backend_is_ready(backend) == 0)) {
263 			mask &= ~BIT(i);
264 			log_backend_enable(backend,
265 					   backend->cb->ctx,
266 					   CONFIG_LOG_MAX_LEVEL);
267 		}
268 	}
269 
270 	return mask;
271 }
272 
z_log_init(bool blocking,bool can_sleep)273 static uint32_t z_log_init(bool blocking, bool can_sleep)
274 {
275 	uint32_t mask = 0;
276 
277 	if (IS_ENABLED(CONFIG_LOG_FRONTEND_ONLY)) {
278 		return 0;
279 	}
280 
281 	__ASSERT_NO_MSG(log_backend_count_get() < LOG_FILTERS_NUM_OF_SLOTS);
282 
283 	if (atomic_inc(&initialized) != 0) {
284 		return 0;
285 	}
286 
287 	int i = 0;
288 	if (IS_ENABLED(CONFIG_LOG_MULTIDOMAIN)) {
289 		z_log_links_initiate();
290 	}
291 
292 
293 	/* Assign ids to backends. */
294 	STRUCT_SECTION_FOREACH(log_backend, backend) {
295 		if (backend->autostart) {
296 			log_backend_init(backend);
297 
298 			/* If backend has activation function then backend is
299 			 * not ready until activated.
300 			 */
301 			if (log_backend_is_ready(backend) == 0) {
302 				log_backend_enable(backend,
303 						   backend->cb->ctx,
304 						   CONFIG_LOG_MAX_LEVEL);
305 			} else {
306 				mask |= BIT(i);
307 			}
308 
309 			i++;
310 		}
311 	}
312 
313 	/* If blocking init, wait until all backends are activated. */
314 	if (blocking) {
315 		while (mask) {
316 			mask = activate_foreach_backend(mask);
317 			if (IS_ENABLED(CONFIG_MULTITHREADING) && can_sleep) {
318 				k_msleep(10);
319 			}
320 		}
321 	}
322 
323 	return mask;
324 }
325 
log_init(void)326 void log_init(void)
327 {
328 	(void)z_log_init(true, true);
329 }
330 
thread_set(k_tid_t process_tid)331 static void thread_set(k_tid_t process_tid)
332 {
333 	proc_tid = process_tid;
334 
335 	if (IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
336 		return;
337 	}
338 
339 	if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD &&
340 	    process_tid &&
341 	    buffered_cnt >= CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) {
342 		k_sem_give(&log_process_thread_sem);
343 	}
344 }
345 
log_thread_set(k_tid_t process_tid)346 void log_thread_set(k_tid_t process_tid)
347 {
348 	if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
349 		__ASSERT_NO_MSG(0);
350 	} else {
351 		thread_set(process_tid);
352 	}
353 }
354 
log_set_timestamp_func(log_timestamp_get_t timestamp_getter,uint32_t freq)355 int log_set_timestamp_func(log_timestamp_get_t timestamp_getter, uint32_t freq)
356 {
357 	if (timestamp_getter == NULL) {
358 		return -EINVAL;
359 	}
360 
361 	timestamp_func = timestamp_getter;
362 	timestamp_freq = freq;
363 	if (CONFIG_LOG_PROCESSING_LATENCY_US) {
364 		proc_latency = (freq * CONFIG_LOG_PROCESSING_LATENCY_US) / 1000000;
365 	}
366 
367 	if (IS_ENABLED(CONFIG_LOG_OUTPUT)) {
368 		log_output_timestamp_freq_set(freq);
369 	}
370 
371 	return 0;
372 }
373 
z_impl_log_panic(void)374 void z_impl_log_panic(void)
375 {
376 	if (panic_mode) {
377 		return;
378 	}
379 
380 	/* If panic happened early logger might not be initialized.
381 	 * Forcing initialization of the logger and auto-starting backends.
382 	 */
383 	(void)z_log_init(true, false);
384 
385 	if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
386 		log_frontend_panic();
387 		if (IS_ENABLED(CONFIG_LOG_FRONTEND_ONLY)) {
388 			goto out;
389 		}
390 	}
391 
392 	STRUCT_SECTION_FOREACH(log_backend, backend) {
393 		if (log_backend_is_active(backend)) {
394 			log_backend_panic(backend);
395 		}
396 	}
397 
398 	if (!IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
399 		/* Flush */
400 		while (log_process() == true) {
401 		}
402 	}
403 
404 out:
405 	panic_mode = true;
406 }
407 
408 #ifdef CONFIG_USERSPACE
z_vrfy_log_panic(void)409 void z_vrfy_log_panic(void)
410 {
411 	z_impl_log_panic();
412 }
413 #include <syscalls/log_panic_mrsh.c>
414 #endif
415 
msg_filter_check(struct log_backend const * backend,union log_msg_generic * msg)416 static bool msg_filter_check(struct log_backend const *backend,
417 			     union log_msg_generic *msg)
418 {
419 	if (!z_log_item_is_msg(msg)) {
420 		return true;
421 	}
422 
423 	if (!IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) {
424 		return true;
425 	}
426 
427 	uint32_t backend_level;
428 	uint8_t level;
429 	uint8_t domain_id;
430 	int16_t source_id;
431 	struct log_source_dynamic_data *source;
432 
433 	source = (struct log_source_dynamic_data *)log_msg_get_source(&msg->log);
434 	level = log_msg_get_level(&msg->log);
435 	domain_id = log_msg_get_domain(&msg->log);
436 
437 	/* Accept all non-logging messages. */
438 	if (level == LOG_LEVEL_NONE) {
439 		return true;
440 	}
441 	if (source) {
442 		source_id = log_dynamic_source_id(source);
443 		backend_level = log_filter_get(backend, domain_id, source_id, true);
444 
445 		return (level <= backend_level);
446 	} else {
447 		return true;
448 	}
449 }
450 
msg_process(union log_msg_generic * msg)451 static void msg_process(union log_msg_generic *msg)
452 {
453 	STRUCT_SECTION_FOREACH(log_backend, backend) {
454 		if (log_backend_is_active(backend) &&
455 		    msg_filter_check(backend, msg)) {
456 			log_backend_msg_process(backend, msg);
457 		}
458 	}
459 }
460 
dropped_notify(void)461 void dropped_notify(void)
462 {
463 	uint32_t dropped = z_log_dropped_read_and_clear();
464 
465 	STRUCT_SECTION_FOREACH(log_backend, backend) {
466 		if (log_backend_is_active(backend)) {
467 			log_backend_dropped(backend, dropped);
468 		}
469 	}
470 }
471 
unordered_notify(void)472 void unordered_notify(void)
473 {
474 	uint32_t unordered = atomic_set(&unordered_cnt, 0);
475 
476 	LOG_WRN("%d unordered messages since last report", unordered);
477 }
478 
z_log_notify_backend_enabled(void)479 void z_log_notify_backend_enabled(void)
480 {
481 	/* Wakeup logger thread after attaching first backend. It might be
482 	 * blocked with log messages pending.
483 	 */
484 	if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD) && !backend_attached) {
485 		k_sem_give(&log_process_thread_sem);
486 	}
487 
488 	backend_attached = true;
489 }
490 
z_log_unordered_pending(void)491 static inline bool z_log_unordered_pending(void)
492 {
493 	return IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) && unordered_cnt;
494 }
495 
z_impl_log_process(void)496 bool z_impl_log_process(void)
497 {
498 	if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
499 		return false;
500 	}
501 
502 	k_timeout_t backoff = K_NO_WAIT;
503 	union log_msg_generic *msg;
504 
505 	if (!backend_attached) {
506 		return false;
507 	}
508 
509 	msg = z_log_msg_claim(&backoff);
510 
511 	if (msg) {
512 		atomic_dec(&buffered_cnt);
513 		msg_process(msg);
514 		z_log_msg_free(msg);
515 	} else if (CONFIG_LOG_PROCESSING_LATENCY_US > 0 && !K_TIMEOUT_EQ(backoff, K_NO_WAIT)) {
516 		/* If backoff is requested, it means that there are pending
517 		 * messages but they are too new and processing shall back off
518 		 * to allow arrival of newer messages from remote domains.
519 		 */
520 		k_timer_start(&log_process_thread_timer, backoff, K_NO_WAIT);
521 
522 		return false;
523 	}
524 
525 	if (IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
526 		bool dropped_pend = z_log_dropped_pending();
527 		bool unordered_pend = z_log_unordered_pending();
528 
529 		if ((dropped_pend || unordered_pend) &&
530 		   (k_uptime_get() - last_failure_report) > CONFIG_LOG_FAILURE_REPORT_PERIOD) {
531 			if (dropped_pend) {
532 				dropped_notify();
533 			}
534 
535 			if (unordered_pend) {
536 				unordered_notify();
537 			}
538 		}
539 
540 		last_failure_report += CONFIG_LOG_FAILURE_REPORT_PERIOD;
541 	}
542 
543 	return z_log_msg_pending();
544 }
545 
546 #ifdef CONFIG_USERSPACE
z_vrfy_log_process(void)547 bool z_vrfy_log_process(void)
548 {
549 	return z_impl_log_process();
550 }
551 #include <syscalls/log_process_mrsh.c>
552 #endif
553 
z_impl_log_buffered_cnt(void)554 uint32_t z_impl_log_buffered_cnt(void)
555 {
556 	return buffered_cnt;
557 }
558 
559 #ifdef CONFIG_USERSPACE
z_vrfy_log_buffered_cnt(void)560 uint32_t z_vrfy_log_buffered_cnt(void)
561 {
562 	return z_impl_log_buffered_cnt();
563 }
564 #include <syscalls/log_buffered_cnt_mrsh.c>
565 #endif
566 
z_log_dropped(bool buffered)567 void z_log_dropped(bool buffered)
568 {
569 	atomic_inc(&dropped_cnt);
570 	if (buffered) {
571 		atomic_dec(&buffered_cnt);
572 	}
573 }
574 
z_log_dropped_read_and_clear(void)575 uint32_t z_log_dropped_read_and_clear(void)
576 {
577 	return atomic_set(&dropped_cnt, 0);
578 }
579 
z_log_dropped_pending(void)580 bool z_log_dropped_pending(void)
581 {
582 	return dropped_cnt > 0;
583 }
584 
z_log_msg_init(void)585 void z_log_msg_init(void)
586 {
587 #ifdef CONFIG_MPSC_PBUF
588 	mpsc_pbuf_init(&log_buffer, &mpsc_config);
589 	curr_log_buffer = &log_buffer;
590 #endif
591 }
592 
msg_alloc(struct mpsc_pbuf_buffer * buffer,uint32_t wlen)593 static struct log_msg *msg_alloc(struct mpsc_pbuf_buffer *buffer, uint32_t wlen)
594 {
595 	if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
596 		return NULL;
597 	}
598 
599 	return (struct log_msg *)mpsc_pbuf_alloc(buffer, wlen,
600 				K_MSEC(CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS));
601 }
602 
z_log_msg_alloc(uint32_t wlen)603 struct log_msg *z_log_msg_alloc(uint32_t wlen)
604 {
605 	return msg_alloc(&log_buffer, wlen);
606 }
607 
msg_commit(struct mpsc_pbuf_buffer * buffer,struct log_msg * msg)608 static void msg_commit(struct mpsc_pbuf_buffer *buffer, struct log_msg *msg)
609 {
610 	union log_msg_generic *m = (union log_msg_generic *)msg;
611 
612 	if (IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
613 		msg_process(m);
614 
615 		return;
616 	}
617 
618 #ifdef CONFIG_MPSC_PBUF
619 	mpsc_pbuf_commit(buffer, &m->buf);
620 #endif
621 	z_log_msg_post_finalize();
622 }
623 
z_log_msg_commit(struct log_msg * msg)624 void z_log_msg_commit(struct log_msg *msg)
625 {
626 	msg->hdr.timestamp = timestamp_func();
627 	msg_commit(&log_buffer, msg);
628 }
629 
z_log_msg_local_claim(void)630 union log_msg_generic *z_log_msg_local_claim(void)
631 {
632 #ifdef CONFIG_MPSC_PBUF
633 	return (union log_msg_generic *)mpsc_pbuf_claim(&log_buffer);
634 #else
635 	return NULL;
636 #endif
637 
638 }
639 
640 /* If there are buffers dedicated for each link, claim the oldest message (lowest timestamp). */
z_log_msg_claim_oldest(k_timeout_t * backoff)641 union log_msg_generic *z_log_msg_claim_oldest(k_timeout_t *backoff)
642 {
643 	union log_msg_generic *msg = NULL;
644 	struct log_msg_ptr *chosen;
645 	log_timestamp_t t_min = sizeof(log_timestamp_t) > sizeof(uint32_t) ?
646 				UINT64_MAX : UINT32_MAX;
647 	int i = 0;
648 
649 	/* Else iterate on all available buffers and get the oldest message. */
650 	STRUCT_SECTION_FOREACH(log_msg_ptr, msg_ptr) {
651 		struct log_mpsc_pbuf *buf;
652 
653 		STRUCT_SECTION_GET(log_mpsc_pbuf, i, &buf);
654 
655 #ifdef CONFIG_MPSC_PBUF
656 		if (msg_ptr->msg == NULL) {
657 			msg_ptr->msg = (union log_msg_generic *)mpsc_pbuf_claim(&buf->buf);
658 		}
659 #endif
660 
661 		if (msg_ptr->msg) {
662 			log_timestamp_t t = log_msg_get_timestamp(&msg_ptr->msg->log);
663 
664 			if (t < t_min) {
665 				t_min = t;
666 				msg = msg_ptr->msg;
667 				chosen = msg_ptr;
668 				curr_log_buffer = &buf->buf;
669 			}
670 		}
671 		i++;
672 	}
673 
674 	if (msg) {
675 		if (CONFIG_LOG_PROCESSING_LATENCY_US > 0) {
676 			int32_t diff = t_min - (timestamp_func() - proc_latency);
677 
678 			if (diff > 0) {
679 			       /* Entry is too new. Back off for sometime to allow new
680 				* remote messages to arrive which may have been captured
681 				* earlier (but on other platform). Calculate for how
682 				* long processing shall back off.
683 				*/
684 				if (timestamp_freq == sys_clock_hw_cycles_per_sec()) {
685 					*backoff = K_TICKS(diff);
686 				} else {
687 					*backoff = K_TICKS((diff * sys_clock_hw_cycles_per_sec()) /
688 							timestamp_freq);
689 				}
690 
691 				return NULL;
692 			}
693 		}
694 
695 		(*chosen).msg = NULL;
696 	}
697 
698 	if (t_min < prev_timestamp) {
699 		atomic_inc(&unordered_cnt);
700 	}
701 
702 	prev_timestamp = t_min;
703 
704 	return msg;
705 }
706 
z_log_msg_claim(k_timeout_t * backoff)707 union log_msg_generic *z_log_msg_claim(k_timeout_t *backoff)
708 {
709 	size_t len;
710 
711 	STRUCT_SECTION_COUNT(log_mpsc_pbuf, &len);
712 
713 	/* Use only one buffer if others are not registered. */
714 	if (IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) && len > 1) {
715 		return z_log_msg_claim_oldest(backoff);
716 	}
717 
718 	return z_log_msg_local_claim();
719 }
720 
msg_free(struct mpsc_pbuf_buffer * buffer,const union log_msg_generic * msg)721 static void msg_free(struct mpsc_pbuf_buffer *buffer, const union log_msg_generic *msg)
722 {
723 #ifdef CONFIG_MPSC_PBUF
724 	mpsc_pbuf_free(buffer, &msg->buf);
725 #endif
726 }
727 
z_log_msg_free(union log_msg_generic * msg)728 void z_log_msg_free(union log_msg_generic *msg)
729 {
730 	msg_free(curr_log_buffer, msg);
731 }
732 
msg_pending(struct mpsc_pbuf_buffer * buffer)733 static bool msg_pending(struct mpsc_pbuf_buffer *buffer)
734 {
735 #ifdef CONFIG_MPSC_PBUF
736 	return mpsc_pbuf_is_pending(buffer);
737 #else
738 	return false;
739 #endif
740 }
741 
z_log_msg_pending(void)742 bool z_log_msg_pending(void)
743 {
744 	size_t len;
745 	int i = 0;
746 
747 	STRUCT_SECTION_COUNT(log_mpsc_pbuf, &len);
748 
749 	if (!IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) || (len == 1)) {
750 		return msg_pending(&log_buffer);
751 	}
752 
753 	STRUCT_SECTION_FOREACH(log_msg_ptr, msg_ptr) {
754 		struct log_mpsc_pbuf *buf;
755 
756 		if (msg_ptr->msg) {
757 			return true;
758 		}
759 
760 		STRUCT_SECTION_GET(log_mpsc_pbuf, i, &buf);
761 
762 		if (msg_pending(&buf->buf)) {
763 			return true;
764 		}
765 
766 		i++;
767 	}
768 
769 	return false;
770 }
771 
z_log_msg_enqueue(const struct log_link * link,const void * data,size_t len)772 void z_log_msg_enqueue(const struct log_link *link, const void *data, size_t len)
773 {
774 	struct log_msg *log_msg = (struct log_msg *)data;
775 	size_t wlen = DIV_ROUND_UP(ROUND_UP(len, Z_LOG_MSG_ALIGNMENT), sizeof(int));
776 	struct mpsc_pbuf_buffer *mpsc_pbuffer = link->mpsc_pbuf ? link->mpsc_pbuf : &log_buffer;
777 	struct log_msg *local_msg = msg_alloc(mpsc_pbuffer, wlen);
778 
779 	if (!local_msg) {
780 		z_log_dropped(false);
781 		return;
782 	}
783 
784 	log_msg->hdr.desc.valid = 0;
785 	log_msg->hdr.desc.busy = 0;
786 	log_msg->hdr.desc.domain += link->ctrl_blk->domain_offset;
787 	memcpy((void *)local_msg, data, len);
788 	msg_commit(mpsc_pbuffer, local_msg);
789 }
790 
z_log_get_tag(void)791 const char *z_log_get_tag(void)
792 {
793 	return CONFIG_LOG_TAG_MAX_LEN > 0 ? tag : NULL;
794 }
795 
log_set_tag(const char * str)796 int log_set_tag(const char *str)
797 {
798 #if CONFIG_LOG_TAG_MAX_LEN > 0
799 	if (str == NULL) {
800 		return -EINVAL;
801 	}
802 
803 	size_t len = strlen(str);
804 	size_t cpy_len = MIN(len, CONFIG_LOG_TAG_MAX_LEN);
805 
806 	memcpy(tag, str, cpy_len);
807 	tag[cpy_len] = '\0';
808 
809 	if (cpy_len < len) {
810 		tag[cpy_len - 1] = '~';
811 		return -ENOMEM;
812 	}
813 
814 	return 0;
815 #else
816 	return -ENOTSUP;
817 #endif
818 }
819 
log_mem_get_usage(uint32_t * buf_size,uint32_t * usage)820 int log_mem_get_usage(uint32_t *buf_size, uint32_t *usage)
821 {
822 	__ASSERT_NO_MSG(buf_size != NULL);
823 	__ASSERT_NO_MSG(usage != NULL);
824 
825 	if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
826 		return -EINVAL;
827 	}
828 
829 	mpsc_pbuf_get_utilization(&log_buffer, buf_size, usage);
830 
831 	return 0;
832 }
833 
log_mem_get_max_usage(uint32_t * max)834 int log_mem_get_max_usage(uint32_t *max)
835 {
836 	__ASSERT_NO_MSG(max != NULL);
837 
838 	if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
839 		return -EINVAL;
840 	}
841 
842 	return mpsc_pbuf_get_max_utilization(&log_buffer, max);
843 }
844 
log_backend_notify_all(enum log_backend_evt event,union log_backend_evt_arg * arg)845 static void log_backend_notify_all(enum log_backend_evt event,
846 				   union log_backend_evt_arg *arg)
847 {
848 	STRUCT_SECTION_FOREACH(log_backend, backend) {
849 		log_backend_notify(backend, event, arg);
850 	}
851 }
852 
log_process_thread_timer_expiry_fn(struct k_timer * timer)853 static void log_process_thread_timer_expiry_fn(struct k_timer *timer)
854 {
855 	k_sem_give(&log_process_thread_sem);
856 }
857 
log_process_thread_func(void * dummy1,void * dummy2,void * dummy3)858 static void log_process_thread_func(void *dummy1, void *dummy2, void *dummy3)
859 {
860 	__ASSERT_NO_MSG(log_backend_count_get() > 0);
861 	uint32_t links_active_mask = 0xFFFFFFFF;
862 	uint8_t domain_offset = 0;
863 	uint32_t activate_mask = z_log_init(false, false);
864 	/* If some backends are not activated yet set periodical thread wake up
865 	 * to poll backends for readiness. Period is set arbitrary.
866 	 * If all backends are ready periodic wake up is not needed.
867 	 */
868 	k_timeout_t timeout = (activate_mask != 0) ? K_MSEC(50) : K_FOREVER;
869 	bool processed_any = false;
870 	thread_set(k_current_get());
871 
872 	/* Logging thread is periodically waken up until all backends that
873 	 * should be autostarted are ready.
874 	 */
875 	while (true) {
876 		if (activate_mask) {
877 			activate_mask = activate_foreach_backend(activate_mask);
878 			if (!activate_mask) {
879 				/* Periodic wake up no longer needed since all
880 				 * backends are ready.
881 				 */
882 				timeout = K_FOREVER;
883 			}
884 		}
885 
886 		/* Keep trying to activate links until all links are active. */
887 		if (IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) && links_active_mask) {
888 			links_active_mask =
889 				z_log_links_activate(links_active_mask, &domain_offset);
890 		}
891 
892 
893 		if (log_process() == false) {
894 			if (processed_any) {
895 				processed_any = false;
896 				log_backend_notify_all(LOG_BACKEND_EVT_PROCESS_THREAD_DONE, NULL);
897 			}
898 			(void)k_sem_take(&log_process_thread_sem, timeout);
899 		} else {
900 			processed_any = true;
901 		}
902 	}
903 }
904 
905 K_KERNEL_STACK_DEFINE(logging_stack, CONFIG_LOG_PROCESS_THREAD_STACK_SIZE);
906 struct k_thread logging_thread;
907 
enable_logger(void)908 static int enable_logger(void)
909 {
910 	if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
911 		k_timer_init(&log_process_thread_timer,
912 				log_process_thread_timer_expiry_fn, NULL);
913 		/* start logging thread */
914 		k_thread_create(&logging_thread, logging_stack,
915 				K_KERNEL_STACK_SIZEOF(logging_stack),
916 				log_process_thread_func, NULL, NULL, NULL,
917 				LOG_PROCESS_THREAD_PRIORITY, 0,
918 				COND_CODE_1(CONFIG_LOG_PROCESS_THREAD,
919 					K_MSEC(CONFIG_LOG_PROCESS_THREAD_STARTUP_DELAY_MS),
920 					K_NO_WAIT));
921 		k_thread_name_set(&logging_thread, "logging");
922 	} else {
923 		(void)z_log_init(false, false);
924 	}
925 
926 	return 0;
927 }
928 
929 SYS_INIT(enable_logger, POST_KERNEL, CONFIG_LOG_CORE_INIT_PRIORITY);
930