1 /*
2 * Copyright (c) 2021 Carlo Caione <ccaione@baylibre.com>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/ztest.h>
9 #include <zephyr/linker/linker-defs.h>
10 #include <zephyr/kernel/mm.h>
11 #include <zephyr/dt-bindings/memory-attr/memory-attr-arm.h>
12
13 #include <zephyr/multi_heap/shared_multi_heap.h>
14
15 #define DT_DRV_COMPAT zephyr_memory_region
16
17 #define RES0_CACHE_ADDR DT_REG_ADDR(DT_NODELABEL(res0))
18 #define RES1_NOCACHE_ADDR DT_REG_ADDR(DT_NODELABEL(res1))
19 #define RES2_CACHE_ADDR DT_REG_ADDR(DT_NODELABEL(res2))
20
21 struct region_map {
22 struct shared_multi_heap_region region;
23 uintptr_t p_addr;
24 };
25
26 #define FOREACH_REG(n) \
27 { \
28 .region = { \
29 .addr = (uintptr_t) DT_INST_REG_ADDR(n), \
30 .size = DT_INST_REG_SIZE(n), \
31 .attr = DT_INST_PROP_OR(n, zephyr_memory_attr, \
32 DT_MEM_ARM_MPU_UNKNOWN), \
33 }, \
34 },
35
36 struct region_map map[] = {
37 DT_INST_FOREACH_STATUS_OKAY(FOREACH_REG)
38 };
39
40 #if defined(CONFIG_MMU)
smh_reg_map(struct shared_multi_heap_region * region)41 static void smh_reg_map(struct shared_multi_heap_region *region)
42 {
43 uint32_t mem_attr;
44 uint8_t *v_addr;
45
46 mem_attr = (region->attr == SMH_REG_ATTR_CACHEABLE) ? K_MEM_CACHE_WB : K_MEM_CACHE_NONE;
47 mem_attr |= K_MEM_PERM_RW;
48
49 k_mem_map_phys_bare(&v_addr, region->addr, region->size, mem_attr);
50
51 region->addr = (uintptr_t) v_addr;
52 }
53 #endif /* CONFIG_MMU */
54
55 /*
56 * Given a virtual address retrieve the original memory region that the mapping
57 * is belonging to.
58 */
get_region_map(void * v_addr)59 static struct region_map *get_region_map(void *v_addr)
60 {
61 for (size_t reg = 0; reg < ARRAY_SIZE(map); reg++) {
62 if ((uintptr_t) v_addr >= map[reg].region.addr &&
63 (uintptr_t) v_addr < map[reg].region.addr + map[reg].region.size) {
64 return &map[reg];
65 }
66 }
67 return NULL;
68 }
69
mpu_to_reg_attr(uint32_t dt_attr)70 static inline enum shared_multi_heap_attr mpu_to_reg_attr(uint32_t dt_attr)
71 {
72 /*
73 * All the memory regions defined in the DT with the MPU property `RAM`
74 * can be accessed and memory can be retrieved from using the attribute
75 * `SMH_REG_ATTR_CACHEABLE`.
76 *
77 * All the memory regions defined in the DT with the MPU property
78 * `RAM_NOCACHE` can be accessed and memory can be retrieved from using
79 * the attribute `SMH_REG_ATTR_NON_CACHEABLE`.
80 *
81 * [MPU attr] -> [SMH attr]
82 *
83 * RAM -> SMH_REG_ATTR_CACHEABLE
84 * RAM_NOCACHE -> SMH_REG_ATTR_NON_CACHEABLE
85 */
86 switch (DT_MEM_ARM_GET(dt_attr)) {
87 case DT_MEM_ARM_MPU_RAM:
88 return SMH_REG_ATTR_CACHEABLE;
89 case DT_MEM_ARM_MPU_RAM_NOCACHE:
90 return SMH_REG_ATTR_NON_CACHEABLE;
91 default:
92 /* How ? */
93 ztest_test_fail();
94 }
95
96 /* whatever */
97 return 0;
98 }
99
fill_multi_heap(void)100 static void fill_multi_heap(void)
101 {
102 struct region_map *reg_map;
103
104 for (size_t idx = 0; idx < DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT); idx++) {
105 reg_map = &map[idx];
106
107 /* zephyr,memory-attr property not found. Skip it. */
108 if (reg_map->region.attr == DT_MEM_ARM_MPU_UNKNOWN) {
109 continue;
110 }
111
112 /* Convert MPU attributes to shared-multi-heap capabilities */
113 reg_map->region.attr = mpu_to_reg_attr(reg_map->region.attr);
114
115 /* Assume for now that phys == virt */
116 reg_map->p_addr = reg_map->region.addr;
117
118 #if defined(CONFIG_MMU)
119 /*
120 * For MMU-enabled platform we have to MMU-map the physical
121 * address retrieved by DT at run-time because the SMH
122 * framework expects virtual addresses.
123 *
124 * For MPU-enabled platform the code is assuming that the
125 * region are configured at build-time, so no map is needed.
126 */
127 smh_reg_map(®_map->region);
128 #endif /* CONFIG_MMU */
129
130 shared_multi_heap_add(®_map->region, NULL);
131 }
132 }
133
ZTEST(shared_multi_heap,test_shared_multi_heap)134 ZTEST(shared_multi_heap, test_shared_multi_heap)
135 {
136 struct region_map *reg_map;
137 void *block;
138 int ret;
139
140 ret = shared_multi_heap_pool_init();
141 zassert_equal(0, ret, "failed initialization");
142
143 /*
144 * Return -EALREADY if already inited
145 */
146 ret = shared_multi_heap_pool_init();
147 zassert_equal(-EALREADY, ret, "second init should fail");
148
149 /*
150 * Fill the buffer pool with the memory heaps coming from DT
151 */
152 fill_multi_heap();
153
154 /*
155 * Request a small cacheable chunk. It should be allocated in the
156 * smaller region RES0
157 */
158 block = shared_multi_heap_alloc(SMH_REG_ATTR_CACHEABLE, 0x40);
159 reg_map = get_region_map(block);
160
161 zassert_equal(reg_map->p_addr, RES0_CACHE_ADDR, "block in the wrong memory region");
162 zassert_equal(reg_map->region.attr, SMH_REG_ATTR_CACHEABLE, "wrong memory attribute");
163
164 /*
165 * Request another small cacheable chunk. It should be allocated in the
166 * smaller cacheable region RES0
167 */
168 block = shared_multi_heap_alloc(SMH_REG_ATTR_CACHEABLE, 0x80);
169 reg_map = get_region_map(block);
170
171 zassert_equal(reg_map->p_addr, RES0_CACHE_ADDR, "block in the wrong memory region");
172 zassert_equal(reg_map->region.attr, SMH_REG_ATTR_CACHEABLE, "wrong memory attribute");
173
174 /*
175 * Request a big cacheable chunk. It should be allocated in the
176 * bigger cacheable region RES2
177 */
178 block = shared_multi_heap_alloc(SMH_REG_ATTR_CACHEABLE, 0x1200);
179 reg_map = get_region_map(block);
180
181 zassert_equal(reg_map->p_addr, RES2_CACHE_ADDR, "block in the wrong memory region");
182 zassert_equal(reg_map->region.attr, SMH_REG_ATTR_CACHEABLE, "wrong memory attribute");
183
184 /*
185 * Request a non-cacheable chunk. It should be allocated in the
186 * non-cacheable region RES1
187 */
188 block = shared_multi_heap_alloc(SMH_REG_ATTR_NON_CACHEABLE, 0x100);
189 reg_map = get_region_map(block);
190
191 zassert_equal(reg_map->p_addr, RES1_NOCACHE_ADDR, "block in the wrong memory region");
192 zassert_equal(reg_map->region.attr, SMH_REG_ATTR_NON_CACHEABLE, "wrong memory attribute");
193
194 /*
195 * Request again a non-cacheable chunk. It should be allocated in the
196 * non-cacheable region RES1
197 */
198 block = shared_multi_heap_alloc(SMH_REG_ATTR_NON_CACHEABLE, 0x100);
199 reg_map = get_region_map(block);
200
201 zassert_equal(reg_map->p_addr, RES1_NOCACHE_ADDR, "block in the wrong memory region");
202 zassert_equal(reg_map->region.attr, SMH_REG_ATTR_NON_CACHEABLE, "wrong memory attribute");
203
204 /* Request a block too big */
205 block = shared_multi_heap_alloc(SMH_REG_ATTR_NON_CACHEABLE, 0x10000);
206 zassert_is_null(block, "allocated buffer too big for the region");
207
208 /* Request a 0-sized block */
209 block = shared_multi_heap_alloc(SMH_REG_ATTR_NON_CACHEABLE, 0);
210 zassert_is_null(block, "0 size accepted as valid");
211
212 /* Request a non-existent attribute */
213 block = shared_multi_heap_alloc(MAX_SHARED_MULTI_HEAP_ATTR, 0x100);
214 zassert_is_null(block, "wrong attribute accepted as valid");
215 }
216
217 ZTEST_SUITE(shared_multi_heap, NULL, NULL, NULL, NULL, NULL);
218