1 /*
2 * Copyright (c) 2018 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <kernel.h>
7 #include <logging/log.h>
8 #include <logging/log_msg.h>
9 #include <logging/log_ctrl.h>
10 #include <logging/log_core.h>
11 #include <sys/__assert.h>
12 #include <string.h>
13
14 BUILD_ASSERT((sizeof(struct log_msg_ids) == sizeof(uint16_t)),
15 "Structure must fit in 2 bytes");
16
17 BUILD_ASSERT((sizeof(struct log_msg_generic_hdr) == sizeof(uint16_t)),
18 "Structure must fit in 2 bytes");
19
20 BUILD_ASSERT((sizeof(struct log_msg_std_hdr) == sizeof(uint16_t)),
21 "Structure must fit in 2 bytes");
22
23 BUILD_ASSERT((sizeof(struct log_msg_hexdump_hdr) == sizeof(uint16_t)),
24 "Structure must fit in 2 bytes");
25
26 BUILD_ASSERT((sizeof(union log_msg_head_data) ==
27 sizeof(struct log_msg_ext_head_data)),
28 "Structure must be same size");
29
30 #ifndef CONFIG_LOG_BUFFER_SIZE
31 #define CONFIG_LOG_BUFFER_SIZE 0
32 #endif
33
34 /* Define needed when CONFIG_LOG_BLOCK_IN_THREAD is disabled to satisfy
35 * compiler. */
36 #ifndef CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS
37 #define CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS 0
38 #endif
39
40 #define MSG_SIZE sizeof(union log_msg_chunk)
41 #define NUM_OF_MSGS (CONFIG_LOG_BUFFER_SIZE / MSG_SIZE)
42
43 struct k_mem_slab log_msg_pool;
44 static uint8_t __noinit __aligned(sizeof(void *))
45 log_msg_pool_buf[CONFIG_LOG_BUFFER_SIZE];
46
log_msg_pool_init(void)47 void log_msg_pool_init(void)
48 {
49 k_mem_slab_init(&log_msg_pool, log_msg_pool_buf, MSG_SIZE, NUM_OF_MSGS);
50 }
51
52 /* Return true if interrupts were unlocked in the context of this call. */
is_irq_unlocked(void)53 static bool is_irq_unlocked(void)
54 {
55 unsigned int key = arch_irq_lock();
56 bool ret = arch_irq_unlocked(key);
57
58 arch_irq_unlock(key);
59 return ret;
60 }
61
62 /* Check if context can be blocked and pend on available memory slab. Context
63 * can be blocked if in a thread and interrupts are not locked.
64 */
block_on_alloc(void)65 static bool block_on_alloc(void)
66 {
67 if (!IS_ENABLED(CONFIG_LOG_BLOCK_IN_THREAD)) {
68 return false;
69 }
70
71 return (!k_is_in_isr() && is_irq_unlocked());
72 }
73
log_msg_chunk_alloc(void)74 union log_msg_chunk *log_msg_chunk_alloc(void)
75 {
76 union log_msg_chunk *msg = NULL;
77 int err = k_mem_slab_alloc(&log_msg_pool, (void **)&msg,
78 block_on_alloc()
79 ? K_MSEC(CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS)
80 : K_NO_WAIT);
81
82 if (err != 0) {
83 msg = log_msg_no_space_handle();
84 }
85
86 return msg;
87 }
88
log_msg_get(struct log_msg * msg)89 void log_msg_get(struct log_msg *msg)
90 {
91 atomic_inc(&msg->hdr.ref_cnt);
92 }
93
cont_free(struct log_msg_cont * cont)94 static void cont_free(struct log_msg_cont *cont)
95 {
96 struct log_msg_cont *next;
97
98 while (cont != NULL) {
99 next = cont->next;
100 k_mem_slab_free(&log_msg_pool, (void **)&cont);
101 cont = next;
102 }
103 }
104
msg_free(struct log_msg * msg)105 static void msg_free(struct log_msg *msg)
106 {
107 uint32_t nargs = log_msg_nargs_get(msg);
108
109 /* Free any transient string found in arguments. */
110 if (log_msg_is_std(msg) && nargs) {
111 uint32_t i;
112 uint32_t smask = 0U;
113
114 for (i = 0U; i < nargs; i++) {
115 void *buf = (void *)log_msg_arg_get(msg, i);
116
117 if (log_is_strdup(buf)) {
118 if (smask == 0U) {
119 /* Do string arguments scan only when
120 * string duplication candidate detected
121 * since it is time consuming and free
122 * can be called from any context when
123 * log message is being dropped.
124 */
125 smask = z_log_get_s_mask(
126 log_msg_str_get(msg),
127 nargs);
128 if (smask == 0U) {
129 /* if no string argument is
130 * detected then stop searching
131 * for candidates.
132 */
133 break;
134 }
135 }
136 if (smask & BIT(i)) {
137 log_free(buf);
138 }
139 }
140 }
141 } else if (IS_ENABLED(CONFIG_USERSPACE) &&
142 (log_msg_level_get(msg) != LOG_LEVEL_INTERNAL_RAW_STRING)) {
143 /*
144 * When userspace support is enabled, the hex message metadata
145 * might be located in log_strdup() memory pool.
146 */
147 const char *str = log_msg_str_get(msg);
148
149 if (log_is_strdup(str)) {
150 log_free((void *)(str));
151 }
152 } else {
153 /* Message does not contain any arguments that might be a transient
154 * string. No action required.
155 */
156 ;
157 }
158
159 if (msg->hdr.params.generic.ext == 1) {
160 cont_free(msg->payload.ext.next);
161 }
162
163 k_mem_slab_free(&log_msg_pool, (void **)&msg);
164 }
165
log_msg_no_space_handle(void)166 union log_msg_chunk *log_msg_no_space_handle(void)
167 {
168 union log_msg_chunk *msg = NULL;
169 bool more;
170 int err;
171
172 if (IS_ENABLED(CONFIG_LOG_MODE_OVERFLOW)) {
173 do {
174 more = log_process(true);
175 z_log_dropped(true);
176 err = k_mem_slab_alloc(&log_msg_pool,
177 (void **)&msg,
178 K_NO_WAIT);
179 } while ((err != 0) && more);
180 } else {
181 z_log_dropped(false);
182 }
183 return msg;
184
185 }
log_msg_put(struct log_msg * msg)186 void log_msg_put(struct log_msg *msg)
187 {
188 atomic_dec(&msg->hdr.ref_cnt);
189
190 if (msg->hdr.ref_cnt == 0) {
191 msg_free(msg);
192 }
193 }
194
log_msg_nargs_get(struct log_msg * msg)195 uint32_t log_msg_nargs_get(struct log_msg *msg)
196 {
197 return msg->hdr.params.std.nargs;
198 }
199
cont_arg_get(struct log_msg * msg,uint32_t arg_idx)200 static log_arg_t cont_arg_get(struct log_msg *msg, uint32_t arg_idx)
201 {
202 struct log_msg_cont *cont;
203
204 if (arg_idx < LOG_MSG_NARGS_HEAD_CHUNK) {
205 return msg->payload.ext.data.args[arg_idx];
206 }
207
208
209 cont = msg->payload.ext.next;
210 arg_idx -= LOG_MSG_NARGS_HEAD_CHUNK;
211
212 while (arg_idx >= ARGS_CONT_MSG) {
213 arg_idx -= ARGS_CONT_MSG;
214 cont = cont->next;
215 }
216
217 return cont->payload.args[arg_idx];
218 }
219
log_msg_arg_get(struct log_msg * msg,uint32_t arg_idx)220 log_arg_t log_msg_arg_get(struct log_msg *msg, uint32_t arg_idx)
221 {
222 log_arg_t arg;
223
224 /* Return early if requested argument not present in the message. */
225 if (arg_idx >= msg->hdr.params.std.nargs) {
226 return 0;
227 }
228
229 if (msg->hdr.params.std.nargs <= LOG_MSG_NARGS_SINGLE_CHUNK) {
230 arg = msg->payload.single.args[arg_idx];
231 } else {
232 arg = cont_arg_get(msg, arg_idx);
233 }
234
235 return arg;
236 }
237
log_msg_str_get(struct log_msg * msg)238 const char *log_msg_str_get(struct log_msg *msg)
239 {
240 return msg->str;
241 }
242
243 /** @brief Allocate chunk for extended standard log message.
244 *
245 * @details Extended standard log message is used when number of arguments
246 * exceeds capacity of one chunk. Extended message consists of two
247 * chunks. Such approach is taken to optimize memory usage and
248 * performance assuming that log messages with more arguments
249 * (@ref LOG_MSG_NARGS_SINGLE_CHUNK) are less common.
250 *
251 * @return Allocated chunk of NULL.
252 */
msg_alloc(uint32_t nargs)253 static struct log_msg *msg_alloc(uint32_t nargs)
254 {
255 struct log_msg_cont *cont;
256 struct log_msg_cont **next;
257 struct log_msg *msg = z_log_msg_std_alloc();
258 int n = (int)nargs;
259
260 if ((msg == NULL) || nargs <= LOG_MSG_NARGS_SINGLE_CHUNK) {
261 return msg;
262 }
263
264 msg->hdr.params.std.nargs = 0U;
265 msg->hdr.params.generic.ext = 1;
266 n -= LOG_MSG_NARGS_HEAD_CHUNK;
267 next = &msg->payload.ext.next;
268 *next = NULL;
269
270 while (n > 0) {
271 cont = (struct log_msg_cont *)log_msg_chunk_alloc();
272
273 if (cont == NULL) {
274 msg_free(msg);
275 return NULL;
276 }
277
278 *next = cont;
279 cont->next = NULL;
280 next = &cont->next;
281 n -= ARGS_CONT_MSG;
282 }
283
284 return msg;
285 }
286
copy_args_to_msg(struct log_msg * msg,log_arg_t * args,uint32_t nargs)287 static void copy_args_to_msg(struct log_msg *msg, log_arg_t *args, uint32_t nargs)
288 {
289 struct log_msg_cont *cont = msg->payload.ext.next;
290
291 if (nargs > LOG_MSG_NARGS_SINGLE_CHUNK) {
292 (void)memcpy(msg->payload.ext.data.args, args,
293 LOG_MSG_NARGS_HEAD_CHUNK * sizeof(log_arg_t));
294 nargs -= LOG_MSG_NARGS_HEAD_CHUNK;
295 args += LOG_MSG_NARGS_HEAD_CHUNK;
296 } else {
297 (void)memcpy(msg->payload.single.args, args,
298 nargs * sizeof(log_arg_t));
299 nargs = 0U;
300 }
301
302 while (nargs != 0U) {
303 uint32_t cpy_args = MIN(nargs, ARGS_CONT_MSG);
304
305 (void)memcpy(cont->payload.args, args,
306 cpy_args * sizeof(log_arg_t));
307 nargs -= cpy_args;
308 args += cpy_args;
309 cont = cont->next;
310 }
311 }
312
log_msg_create_n(const char * str,log_arg_t * args,uint32_t nargs)313 struct log_msg *log_msg_create_n(const char *str, log_arg_t *args, uint32_t nargs)
314 {
315 __ASSERT_NO_MSG(nargs < LOG_MAX_NARGS);
316
317 struct log_msg *msg = NULL;
318
319 msg = msg_alloc(nargs);
320
321 if (msg != NULL) {
322 msg->str = str;
323 msg->hdr.params.std.nargs = nargs;
324 copy_args_to_msg(msg, args, nargs);
325 }
326
327 return msg;
328 }
329
log_msg_hexdump_create(const char * str,const uint8_t * data,uint32_t length)330 struct log_msg *log_msg_hexdump_create(const char *str,
331 const uint8_t *data,
332 uint32_t length)
333 {
334 struct log_msg_cont **prev_cont;
335 struct log_msg_cont *cont;
336 struct log_msg *msg;
337 uint32_t chunk_length;
338
339 /* Saturate length. */
340 length = (length > LOG_MSG_HEXDUMP_MAX_LENGTH) ?
341 LOG_MSG_HEXDUMP_MAX_LENGTH : length;
342
343 msg = (struct log_msg *)log_msg_chunk_alloc();
344 if (msg == NULL) {
345 return NULL;
346 }
347
348 /* all fields reset to 0, reference counter to 1 */
349 msg->hdr.ref_cnt = 1;
350 msg->hdr.params.hexdump.type = LOG_MSG_TYPE_HEXDUMP;
351 msg->hdr.params.hexdump.length = length;
352 msg->str = str;
353
354
355 if (length > LOG_MSG_HEXDUMP_BYTES_SINGLE_CHUNK) {
356 (void)memcpy(msg->payload.ext.data.bytes,
357 data,
358 LOG_MSG_HEXDUMP_BYTES_HEAD_CHUNK);
359 msg->payload.ext.next = NULL;
360 msg->hdr.params.generic.ext = 1;
361
362 data += LOG_MSG_HEXDUMP_BYTES_HEAD_CHUNK;
363 length -= LOG_MSG_HEXDUMP_BYTES_HEAD_CHUNK;
364 } else {
365 (void)memcpy(msg->payload.single.bytes, data, length);
366 msg->hdr.params.generic.ext = 0;
367 length = 0U;
368 }
369
370 prev_cont = &msg->payload.ext.next;
371
372 while (length > 0) {
373 cont = (struct log_msg_cont *)log_msg_chunk_alloc();
374 if (cont == NULL) {
375 msg_free(msg);
376 return NULL;
377 }
378
379 *prev_cont = cont;
380 cont->next = NULL;
381 prev_cont = &cont->next;
382
383 chunk_length = (length > HEXDUMP_BYTES_CONT_MSG) ?
384 HEXDUMP_BYTES_CONT_MSG : length;
385
386 (void)memcpy(cont->payload.bytes, data, chunk_length);
387 data += chunk_length;
388 length -= chunk_length;
389 }
390
391 return msg;
392 }
393
log_msg_hexdump_data_op(struct log_msg * msg,uint8_t * data,size_t * length,size_t offset,bool put_op)394 static void log_msg_hexdump_data_op(struct log_msg *msg,
395 uint8_t *data,
396 size_t *length,
397 size_t offset,
398 bool put_op)
399 {
400 uint32_t available_len = msg->hdr.params.hexdump.length;
401 struct log_msg_cont *cont = NULL;
402 uint8_t *head_data;
403 uint32_t chunk_len;
404 uint32_t req_len;
405 uint32_t cpy_len;
406
407 if (offset >= available_len) {
408 *length = 0;
409 return;
410 }
411
412 if ((offset + *length) > available_len) {
413 *length = available_len - offset;
414 }
415
416 req_len = *length;
417
418 if (available_len > LOG_MSG_HEXDUMP_BYTES_SINGLE_CHUNK) {
419 chunk_len = LOG_MSG_HEXDUMP_BYTES_HEAD_CHUNK;
420 head_data = msg->payload.ext.data.bytes;
421 cont = msg->payload.ext.next;
422 } else {
423 head_data = msg->payload.single.bytes;
424 chunk_len = available_len;
425
426 }
427
428 if (offset < chunk_len) {
429 cpy_len = req_len > chunk_len ? chunk_len : req_len;
430
431 if (put_op) {
432 (void)memcpy(&head_data[offset], data, cpy_len);
433 } else {
434 (void)memcpy(data, &head_data[offset], cpy_len);
435 }
436
437 req_len -= cpy_len;
438 data += cpy_len;
439 } else {
440 offset -= chunk_len;
441 chunk_len = HEXDUMP_BYTES_CONT_MSG;
442 if (cont == NULL) {
443 cont = msg->payload.ext.next;
444 }
445
446 while (offset >= chunk_len) {
447 cont = cont->next;
448 offset -= chunk_len;
449 }
450 }
451
452 while ((req_len > 0) && (cont != NULL)) {
453 chunk_len = HEXDUMP_BYTES_CONT_MSG - offset;
454 cpy_len = req_len > chunk_len ? chunk_len : req_len;
455
456 if (put_op) {
457 (void)memcpy(&cont->payload.bytes[offset],
458 data, cpy_len);
459 } else {
460 (void)memcpy(data, &cont->payload.bytes[offset],
461 cpy_len);
462 }
463
464 offset = 0;
465 cont = cont->next;
466 req_len -= cpy_len;
467 data += cpy_len;
468 }
469 }
470
log_msg_hexdump_data_put(struct log_msg * msg,uint8_t * data,size_t * length,size_t offset)471 void log_msg_hexdump_data_put(struct log_msg *msg,
472 uint8_t *data,
473 size_t *length,
474 size_t offset)
475 {
476 log_msg_hexdump_data_op(msg, data, length, offset, true);
477 }
478
log_msg_hexdump_data_get(struct log_msg * msg,uint8_t * data,size_t * length,size_t offset)479 void log_msg_hexdump_data_get(struct log_msg *msg,
480 uint8_t *data,
481 size_t *length,
482 size_t offset)
483 {
484 log_msg_hexdump_data_op(msg, data, length, offset, false);
485 }
486
log_msg_mem_get_free(void)487 uint32_t log_msg_mem_get_free(void)
488 {
489 return k_mem_slab_num_free_get(&log_msg_pool);
490 }
491
log_msg_mem_get_used(void)492 uint32_t log_msg_mem_get_used(void)
493 {
494 return k_mem_slab_num_used_get(&log_msg_pool);
495 }
496
log_msg_mem_get_max_used(void)497 uint32_t log_msg_mem_get_max_used(void)
498 {
499 return k_mem_slab_max_used_get(&log_msg_pool);
500 }
501