1 /*
2  * Copyright (c) 2020 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <kernel.h>
8 #include <ksched.h>
9 #include <wait_q.h>
10 #include <init.h>
11 #include <linker/linker-defs.h>
12 
k_heap_init(struct k_heap * h,void * mem,size_t bytes)13 void k_heap_init(struct k_heap *h, void *mem, size_t bytes)
14 {
15 	z_waitq_init(&h->wait_q);
16 	sys_heap_init(&h->heap, mem, bytes);
17 
18 	SYS_PORT_TRACING_OBJ_INIT(k_heap, h);
19 }
20 
statics_init(const struct device * unused)21 static int statics_init(const struct device *unused)
22 {
23 	ARG_UNUSED(unused);
24 	STRUCT_SECTION_FOREACH(k_heap, h) {
25 #if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
26 		/* Some heaps may not present at boot, so we need to wait for
27 		 * paging mechanism to be initialized before we can initialize
28 		 * each heap.
29 		 */
30 		extern bool z_sys_post_kernel;
31 		bool do_clear = z_sys_post_kernel;
32 
33 		/* During pre-kernel init, z_sys_post_kernel == false,
34 		 * initialize if within pinned region. Otherwise skip.
35 		 * In post-kernel init, z_sys_post_kernel == true, skip those in
36 		 * pinned region as they have already been initialized and
37 		 * possibly already in use. Otherwise initialize.
38 		 */
39 		if (lnkr_is_pinned((uint8_t *)h) &&
40 		    lnkr_is_pinned((uint8_t *)&h->wait_q) &&
41 		    lnkr_is_region_pinned((uint8_t *)h->heap.init_mem,
42 					  h->heap.init_bytes)) {
43 			do_clear = !do_clear;
44 		}
45 
46 		if (do_clear)
47 #endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
48 		{
49 			k_heap_init(h, h->heap.init_mem, h->heap.init_bytes);
50 		}
51 	}
52 	return 0;
53 }
54 
55 SYS_INIT(statics_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
56 
57 #if defined(CONFIG_DEMAND_PAGING) && !defined(CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT)
58 /* Need to wait for paging mechanism to be initialized before
59  * heaps that are not in pinned sections can be initialized.
60  */
61 SYS_INIT(statics_init, POST_KERNEL, 0);
62 #endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */
63 
k_heap_aligned_alloc(struct k_heap * h,size_t align,size_t bytes,k_timeout_t timeout)64 void *k_heap_aligned_alloc(struct k_heap *h, size_t align, size_t bytes,
65 			k_timeout_t timeout)
66 {
67 	int64_t now, end = sys_clock_timeout_end_calc(timeout);
68 	void *ret = NULL;
69 	k_spinlock_key_t key = k_spin_lock(&h->lock);
70 
71 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap, aligned_alloc, h, timeout);
72 
73 	__ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), "");
74 
75 	bool blocked_alloc = false;
76 
77 	while (ret == NULL) {
78 		ret = sys_heap_aligned_alloc(&h->heap, align, bytes);
79 
80 		now = sys_clock_tick_get();
81 		if (!IS_ENABLED(CONFIG_MULTITHREADING) ||
82 		    (ret != NULL) || ((end - now) <= 0)) {
83 			break;
84 		}
85 
86 		if (!blocked_alloc) {
87 			blocked_alloc = true;
88 
89 			SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_heap, aligned_alloc, h, timeout);
90 		} else {
91 			/**
92 			 * @todo	Trace attempt to avoid empty trace segments
93 			 */
94 		}
95 
96 		(void) z_pend_curr(&h->lock, key, &h->wait_q,
97 				   K_TICKS(end - now));
98 		key = k_spin_lock(&h->lock);
99 	}
100 
101 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap, aligned_alloc, h, timeout, ret);
102 
103 	k_spin_unlock(&h->lock, key);
104 	return ret;
105 }
106 
k_heap_alloc(struct k_heap * h,size_t bytes,k_timeout_t timeout)107 void *k_heap_alloc(struct k_heap *h, size_t bytes, k_timeout_t timeout)
108 {
109 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap, alloc, h, timeout);
110 
111 	void *ret = k_heap_aligned_alloc(h, sizeof(void *), bytes, timeout);
112 
113 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap, alloc, h, timeout, ret);
114 
115 	return ret;
116 }
117 
k_heap_free(struct k_heap * h,void * mem)118 void k_heap_free(struct k_heap *h, void *mem)
119 {
120 	k_spinlock_key_t key = k_spin_lock(&h->lock);
121 
122 	sys_heap_free(&h->heap, mem);
123 
124 	SYS_PORT_TRACING_OBJ_FUNC(k_heap, free, h);
125 	if (IS_ENABLED(CONFIG_MULTITHREADING) && z_unpend_all(&h->wait_q) != 0) {
126 		z_reschedule(&h->lock, key);
127 	} else {
128 		k_spin_unlock(&h->lock, key);
129 	}
130 }
131