1 /*
2 * SPDX-FileCopyrightText: 2015-2022 Espressif Systems (Shanghai) CO LTD
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7
8 /*----------------------------------------------------------------------------------------------------
9 * Abstraction layer for PSRAM. PSRAM device related registers and MMU/Cache related code shouls be
10 * abstracted to lower layers.
11 *
12 * When we add more types of external RAM memory, this can be made into a more intelligent dispatcher.
13 *----------------------------------------------------------------------------------------------------*/
14 #include <sys/param.h>
15 #include "sdkconfig.h"
16 #include "esp_attr.h"
17 #include "esp_err.h"
18 #include "esp_log.h"
19 #include "esp_heap_caps_init.h"
20 #include "hal/mmu_hal.h"
21 #include "hal/cache_ll.h"
22 #include "esp_private/esp_psram_io.h"
23 #include "esp_private/esp_psram_extram.h"
24 #include "esp_private/mmu_psram_flash.h"
25 #include "esp_psram_impl.h"
26 #include "esp_psram.h"
27 #include "esp_private/esp_mmu_map_private.h"
28 #include "esp_mmu_map.h"
29
30 #if CONFIG_IDF_TARGET_ESP32
31 #include "esp32/himem.h"
32 #include "esp32/rom/cache.h"
33 #include "esp_private/esp_cache_esp32_private.h"
34 #endif
35
36
37 #if CONFIG_IDF_TARGET_ESP32
38 #if CONFIG_FREERTOS_UNICORE
39 #define PSRAM_MODE PSRAM_VADDR_MODE_NORMAL
40 #else
41 #define PSRAM_MODE PSRAM_VADDR_MODE_LOWHIGH
42 #endif
43 #else
44 #define PSRAM_MODE PSRAM_VADDR_MODE_NORMAL
45 #endif
46
47 /**
48 * Two types of PSRAM memory regions for now:
49 * - 8bit aligned
50 * - 32bit aligned
51 */
52 #define PSRAM_MEM_TYPE_NUM 2
53 #define PSRAM_MEM_8BIT_ALIGNED 0
54 #define PSRAM_MEM_32BIT_ALIGNED 1
55
56 #if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY
57 extern uint8_t _ext_ram_bss_start;
58 extern uint8_t _ext_ram_bss_end;
59 #endif //#if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY
60
61 #if CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY
62 extern uint8_t _ext_ram_noinit_start;
63 extern uint8_t _ext_ram_noinit_end;
64 #endif //#if CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY
65
66 typedef struct {
67 intptr_t vaddr_start;
68 intptr_t vaddr_end;
69 size_t size; //in bytes
70 } psram_mem_t;
71
72 typedef struct {
73 bool is_initialised;
74 /**
75 * @note 1
76 * As we can't use heap allocator during this stage, we need to statically declare these regions.
77 * Luckily only S2 has two different types of memory regions:
78 * - byte-aligned memory
79 * - word-aligned memory
80 * On the other hand, the type number usually won't be very big
81 *
82 * On other chips, only one region is needed.
83 * So for chips other than S2, size of `regions_to_heap[1]` and `mapped_regions[1]`will always be zero.
84 *
85 * If in the future, this condition is worse (dbus memory isn't consecutive), we need to delegate this context
86 * to chip-specific files, and only keep a (void *) pointer here pointing to those chip-specific contexts
87 */
88 psram_mem_t regions_to_heap[PSRAM_MEM_TYPE_NUM]; //memory regions that are available to be added to the heap allocator
89 psram_mem_t mapped_regions[PSRAM_MEM_TYPE_NUM]; //mapped memory regions
90 } psram_ctx_t;
91
92 static psram_ctx_t s_psram_ctx;
93 static const char* TAG = "esp_psram";
94
95
96 #if CONFIG_IDF_TARGET_ESP32
97 //If no function in esp_himem.c is used, this function will be linked into the
98 //binary instead of the one in esp_himem.c, automatically making sure no memory
99 //is reserved if no himem function is used.
esp_himem_reserved_area_size(void)100 size_t __attribute__((weak)) esp_himem_reserved_area_size(void) {
101 return 0;
102 }
103
s_mapping(int v_start,int size)104 static void IRAM_ATTR s_mapping(int v_start, int size)
105 {
106 //Enable external RAM in MMU
107 cache_sram_mmu_set(0, 0, v_start, 0, 32, (size / 1024 / 32));
108 //Flush and enable icache for APP CPU
109 #if !CONFIG_FREERTOS_UNICORE
110 DPORT_CLEAR_PERI_REG_MASK(DPORT_APP_CACHE_CTRL1_REG, DPORT_APP_CACHE_MASK_DRAM1);
111 cache_sram_mmu_set(1, 0, v_start, 0, 32, (size / 1024 / 32));
112 #endif
113 }
114 #endif //CONFIG_IDF_TARGET_ESP32
115
116
esp_psram_init(void)117 esp_err_t esp_psram_init(void)
118 {
119 if (s_psram_ctx.is_initialised) {
120 return ESP_ERR_INVALID_STATE;
121 }
122
123 esp_err_t ret = ESP_FAIL;
124 ret = esp_psram_impl_enable(PSRAM_MODE);
125 if (ret != ESP_OK) {
126 #if CONFIG_SPIRAM_IGNORE_NOTFOUND
127 ESP_EARLY_LOGE(TAG, "PSRAM enabled but initialization failed. Bailing out.");
128 #endif
129 return ret;
130 }
131 s_psram_ctx.is_initialised = true;
132
133 uint32_t psram_physical_size = 0;
134 ret = esp_psram_impl_get_physical_size(&psram_physical_size);
135 assert(ret == ESP_OK);
136
137 ESP_EARLY_LOGI(TAG, "Found %dMB PSRAM device", psram_physical_size / (1024 * 1024));
138 ESP_EARLY_LOGI(TAG, "Speed: %dMHz", CONFIG_SPIRAM_SPEED);
139 #if CONFIG_IDF_TARGET_ESP32
140 ESP_EARLY_LOGI(TAG, "PSRAM initialized, cache is in %s mode.", \
141 (PSRAM_MODE==PSRAM_VADDR_MODE_EVENODD)?"even/odd (2-core)": \
142 (PSRAM_MODE==PSRAM_VADDR_MODE_LOWHIGH)?"low/high (2-core)": \
143 (PSRAM_MODE==PSRAM_VADDR_MODE_NORMAL)?"normal (1-core)":"ERROR");
144 #endif
145
146 uint32_t psram_available_size = 0;
147 ret = esp_psram_impl_get_available_size(&psram_available_size);
148 assert(ret == ESP_OK);
149
150 __attribute__((unused)) uint32_t total_available_size = psram_available_size;
151 /**
152 * `start_page` is the psram physical address in MMU page size.
153 * MMU page size on ESP32S2 is 64KB
154 * e.g.: psram physical address 16 is in page 0
155 *
156 * Here we plan to copy FLASH instructions to psram physical address 0, which is the No.0 page.
157 */
158 __attribute__((unused)) uint32_t start_page = 0;
159 #if CONFIG_SPIRAM_FETCH_INSTRUCTIONS || CONFIG_SPIRAM_RODATA
160 uint32_t used_page = 0;
161 #endif
162
163 //------------------------------------Copy Flash .text to PSRAM-------------------------------------//
164 #if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
165 ret = mmu_config_psram_text_segment(start_page, total_available_size, &used_page);
166 if (ret != ESP_OK) {
167 ESP_EARLY_LOGE(TAG, "No enough psram memory for instructon!");
168 abort();
169 }
170 start_page += used_page;
171 psram_available_size -= MMU_PAGE_TO_BYTES(used_page);
172 ESP_EARLY_LOGV(TAG, "after copy .text, used page is %d, start_page is %d, psram_available_size is %d B", used_page, start_page, psram_available_size);
173 #endif //#if CONFIG_SPIRAM_FETCH_INSTRUCTIONS
174
175 //------------------------------------Copy Flash .rodata to PSRAM-------------------------------------//
176 #if CONFIG_SPIRAM_RODATA
177 ret = mmu_config_psram_rodata_segment(start_page, total_available_size, &used_page);
178 if (ret != ESP_OK) {
179 ESP_EARLY_LOGE(TAG, "No enough psram memory for rodata!");
180 abort();
181 }
182 start_page += used_page;
183 psram_available_size -= MMU_PAGE_TO_BYTES(used_page);
184 ESP_EARLY_LOGV(TAG, "after copy .rodata, used page is %d, start_page is %d, psram_available_size is %d B", used_page, start_page, psram_available_size);
185 #endif //#if CONFIG_SPIRAM_RODATA
186
187 //----------------------------------Map the PSRAM physical range to MMU-----------------------------//
188 /**
189 * @note 2
190 * Similarly to @note 1, we expect HW DBUS memory to be consecutive.
191 *
192 * If situation is worse in the future (memory region isn't consecutive), we need to put these logics into chip-specific files
193 */
194 size_t total_mapped_size = 0;
195 size_t size_to_map = 0;
196 size_t byte_aligned_size = 0;
197 ret = esp_mmu_map_get_max_consecutive_free_block_size(MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_8BIT | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &byte_aligned_size);
198 assert(ret == ESP_OK);
199 size_to_map = MIN(byte_aligned_size, psram_available_size);
200
201 const void *v_start_8bit_aligned = NULL;
202 ret = esp_mmu_map_reserve_block_with_caps(size_to_map, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_8BIT | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &v_start_8bit_aligned);
203 assert(ret == ESP_OK);
204
205 #if CONFIG_IDF_TARGET_ESP32
206 s_mapping((int)v_start_8bit_aligned, size_to_map);
207 #else
208 uint32_t actual_mapped_len = 0;
209 mmu_hal_map_region(0, MMU_TARGET_PSRAM0, (intptr_t)v_start_8bit_aligned, MMU_PAGE_TO_BYTES(start_page), size_to_map, &actual_mapped_len);
210 start_page += BYTES_TO_MMU_PAGE(actual_mapped_len);
211 ESP_EARLY_LOGV(TAG, "8bit-aligned-region: actual_mapped_len is 0x%x bytes", actual_mapped_len);
212
213 cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)v_start_8bit_aligned, actual_mapped_len);
214 cache_ll_l1_enable_bus(0, bus_mask);
215 #if !CONFIG_FREERTOS_UNICORE
216 bus_mask = cache_ll_l1_get_bus(1, (uint32_t)v_start_8bit_aligned, actual_mapped_len);
217 cache_ll_l1_enable_bus(1, bus_mask);
218 #endif
219 #endif //#if CONFIG_IDF_TARGET_ESP32
220
221 s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].size = size_to_map;
222 s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_start = (intptr_t)v_start_8bit_aligned;
223 s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_end = (intptr_t)v_start_8bit_aligned + size_to_map;
224 s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].size = size_to_map;
225 s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].vaddr_start = (intptr_t)v_start_8bit_aligned;
226 s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].vaddr_end = (intptr_t)v_start_8bit_aligned + size_to_map;
227 ESP_EARLY_LOGV(TAG, "8bit-aligned-range: 0x%x B, starting from: 0x%x", s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].size, v_start_8bit_aligned);
228 total_mapped_size += size_to_map;
229
230 #if CONFIG_IDF_TARGET_ESP32S2
231 /**
232 * On ESP32S2, there are 2 types of DBUS memory:
233 * - byte-aligned-memory
234 * - word-aligned-memory
235 *
236 * If byte-aligned-memory isn't enough, we search for word-aligned-memory to do mapping
237 */
238 if (total_mapped_size < psram_available_size) {
239 size_to_map = psram_available_size - total_mapped_size;
240
241 size_t word_aligned_size = 0;
242 ret = esp_mmu_map_get_max_consecutive_free_block_size(MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &word_aligned_size);
243 assert(ret == ESP_OK);
244 size_to_map = MIN(word_aligned_size, size_to_map);
245
246 const void *v_start_32bit_aligned = NULL;
247 ret = esp_mmu_map_reserve_block_with_caps(size_to_map, MMU_MEM_CAP_READ | MMU_MEM_CAP_WRITE | MMU_MEM_CAP_32BIT, MMU_TARGET_PSRAM0, &v_start_32bit_aligned);
248 assert(ret == ESP_OK);
249
250 mmu_hal_map_region(0, MMU_TARGET_PSRAM0, (intptr_t)v_start_32bit_aligned, MMU_PAGE_TO_BYTES(start_page), size_to_map, &actual_mapped_len);
251 ESP_EARLY_LOGV(TAG, "32bit-aligned-region: actual_mapped_len is 0x%x bytes", actual_mapped_len);
252
253 cache_bus_mask_t bus_mask = cache_ll_l1_get_bus(0, (uint32_t)v_start_32bit_aligned, actual_mapped_len);
254 cache_ll_l1_enable_bus(0, bus_mask);
255
256 s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].size = size_to_map;
257 s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_start = (intptr_t)v_start_32bit_aligned;
258 s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_end = (intptr_t)v_start_32bit_aligned + size_to_map;
259 s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].size = size_to_map;
260 s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].vaddr_start = (intptr_t)v_start_32bit_aligned;
261 s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].vaddr_end = (intptr_t)v_start_32bit_aligned + size_to_map;
262 ESP_EARLY_LOGV(TAG, "32bit-aligned-range: 0x%x B, starting from: 0x%x", s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].size, v_start_32bit_aligned);
263 total_mapped_size += size_to_map;
264 }
265 #endif // #if CONFIG_IDF_TARGET_ESP32S2
266
267 if (total_mapped_size < psram_available_size) {
268 ESP_EARLY_LOGW(TAG, "Virtual address not enough for PSRAM, map as much as we can. %dMB is mapped", total_mapped_size / 1024 / 1024);
269 }
270
271 /*------------------------------------------------------------------------------
272 * After mapping, we DON'T care about the PSRAM PHYSICAL ADDRESSS ANYMORE!
273 *----------------------------------------------------------------------------*/
274
275 //------------------------------------Configure .bss in PSRAM-------------------------------------//
276 #if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY
277 //should never be negative number
278 uint32_t ext_bss_size = ((intptr_t)&_ext_ram_bss_end - (intptr_t)&_ext_ram_bss_start);
279 ESP_EARLY_LOGV(TAG, "ext_bss_size is %d", ext_bss_size);
280 s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].vaddr_start += ext_bss_size;
281 s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].size -= ext_bss_size;
282 #endif //#if CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY
283
284 #if CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY
285 uint32_t ext_noinit_size = ((intptr_t)&_ext_ram_noinit_end - (intptr_t)&_ext_ram_noinit_start);
286 ESP_EARLY_LOGV(TAG, "ext_noinit_size is %d", ext_noinit_size);
287 s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].vaddr_start += ext_noinit_size;
288 s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].size -= ext_noinit_size;
289 #endif
290
291 #if CONFIG_IDF_TARGET_ESP32
292 s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].size -= esp_himem_reserved_area_size() - 1;
293 #endif
294
295 //will be removed, TODO: IDF-6944
296 #if CONFIG_IDF_TARGET_ESP32
297 cache_driver_t drv = {
298 NULL,
299 esp_psram_extram_writeback_cache,
300 };
301 cache_register_writeback(&drv);
302 #endif
303
304 return ESP_OK;
305 }
306
307
esp_psram_extram_add_to_heap_allocator(void)308 esp_err_t esp_psram_extram_add_to_heap_allocator(void)
309 {
310 esp_err_t ret = ESP_FAIL;
311
312 uint32_t byte_aligned_caps[] = {MALLOC_CAP_SPIRAM|MALLOC_CAP_DEFAULT, 0, MALLOC_CAP_8BIT|MALLOC_CAP_32BIT};
313 ret = heap_caps_add_region_with_caps(byte_aligned_caps,
314 s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].vaddr_start,
315 s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].vaddr_end);
316 if (ret != ESP_OK) {
317 return ret;
318 }
319
320 if (s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].size) {
321 assert(s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].vaddr_start);
322 uint32_t word_aligned_caps[] = {MALLOC_CAP_SPIRAM|MALLOC_CAP_DEFAULT, 0, MALLOC_CAP_32BIT};
323 ret = heap_caps_add_region_with_caps(word_aligned_caps,
324 s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].vaddr_start,
325 s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].vaddr_end);
326 if (ret != ESP_OK) {
327 return ret;
328 }
329 }
330
331 ESP_EARLY_LOGI(TAG, "Adding pool of %dK of PSRAM memory to heap allocator",
332 (s_psram_ctx.regions_to_heap[PSRAM_MEM_8BIT_ALIGNED].size + s_psram_ctx.regions_to_heap[PSRAM_MEM_32BIT_ALIGNED].size) / 1024);
333
334 return ESP_OK;
335 }
336
337
esp_psram_check_ptr_addr(const void * p)338 bool IRAM_ATTR esp_psram_check_ptr_addr(const void *p)
339 {
340 if (!s_psram_ctx.is_initialised) {
341 return false;
342 }
343
344 return ((intptr_t)p >= s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_start && (intptr_t)p < s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_end) ||
345 ((intptr_t)p >= s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_start && (intptr_t)p < s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_end);
346 }
347
348
esp_psram_extram_reserve_dma_pool(size_t size)349 esp_err_t esp_psram_extram_reserve_dma_pool(size_t size)
350 {
351 if (size == 0) {
352 return ESP_OK; //no-op
353 }
354
355 ESP_EARLY_LOGI(TAG, "Reserving pool of %dK of internal memory for DMA/internal allocations", size / 1024);
356 /* Pool may be allocated in multiple non-contiguous chunks, depending on available RAM */
357 while (size > 0) {
358 size_t next_size = heap_caps_get_largest_free_block(MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
359 next_size = MIN(next_size, size);
360
361 ESP_EARLY_LOGD(TAG, "Allocating block of size %d bytes", next_size);
362 uint8_t *dma_heap = heap_caps_malloc(next_size, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL);
363 if (!dma_heap || next_size == 0) {
364 return ESP_ERR_NO_MEM;
365 }
366
367 uint32_t caps[] = {0, MALLOC_CAP_DMA | MALLOC_CAP_INTERNAL, MALLOC_CAP_8BIT | MALLOC_CAP_32BIT};
368 esp_err_t e = heap_caps_add_region_with_caps(caps, (intptr_t)dma_heap, (intptr_t)dma_heap + next_size - 1);
369 if (e != ESP_OK) {
370 return e;
371 }
372 size -= next_size;
373 }
374 return ESP_OK;
375 }
376
esp_psram_is_initialized(void)377 bool IRAM_ATTR __attribute__((pure)) esp_psram_is_initialized(void)
378 {
379 return s_psram_ctx.is_initialised;
380 }
381
esp_psram_get_size(void)382 size_t esp_psram_get_size(void)
383 {
384 uint32_t available_size = 0;
385 esp_err_t ret = esp_psram_impl_get_available_size(&available_size);
386 if (ret != ESP_OK) {
387 //This means PSRAM isn't initialised, to keep back-compatibility, set size to 0.
388 available_size = 0;
389 }
390 return (size_t)available_size;
391 }
392
esp_psram_io_get_cs_io(void)393 uint8_t esp_psram_io_get_cs_io(void)
394 {
395 return esp_psram_impl_get_cs_io();
396 }
397
398 /*
399 Simple RAM test. Writes a word every 32 bytes. Takes about a second to complete for 4MiB. Returns
400 true when RAM seems OK, false when test fails. WARNING: Do not run this before the 2nd cpu has been
401 initialized (in a two-core system) or after the heap allocator has taken ownership of the memory.
402 */
s_test_psram(intptr_t v_start,size_t size,intptr_t reserved_start,intptr_t reserved_end)403 static bool s_test_psram(intptr_t v_start, size_t size, intptr_t reserved_start, intptr_t reserved_end)
404 {
405 volatile int *spiram = (volatile int *)v_start;
406 size_t p;
407 int errct = 0;
408 int initial_err = -1;
409 for (p = 0; p < (size / sizeof(int)); p += 8) {
410 intptr_t addr = (intptr_t)&spiram[p];
411 if ((reserved_start <= addr) && (addr < reserved_end)) {
412 continue;
413 }
414 spiram[p] = p ^ 0xAAAAAAAA;
415 }
416 for (p = 0; p < (size / sizeof(int)); p += 8) {
417 intptr_t addr = (intptr_t)&spiram[p];
418 if ((reserved_start <= addr) && (addr < reserved_end)) {
419 continue;
420 }
421 if (spiram[p] != (p ^ 0xAAAAAAAA)) {
422 errct++;
423 if (errct == 1) {
424 initial_err = p * 4;
425 }
426 }
427 }
428 if (errct) {
429 ESP_EARLY_LOGE(TAG, "SPI SRAM memory test fail. %d/%d writes failed, first @ %X\n", errct, size/32, initial_err + v_start);
430 return false;
431 } else {
432 ESP_EARLY_LOGI(TAG, "SPI SRAM memory test OK");
433 return true;
434 }
435
436 }
437
esp_psram_extram_test(void)438 bool esp_psram_extram_test(void)
439 {
440 bool test_success = false;
441 #if CONFIG_SPIRAM_ALLOW_NOINIT_SEG_EXTERNAL_MEMORY
442 intptr_t noinit_vstart = (intptr_t)&_ext_ram_noinit_start;
443 intptr_t noinit_vend = (intptr_t)&_ext_ram_noinit_end;
444 #else
445 intptr_t noinit_vstart = 0;
446 intptr_t noinit_vend = 0;
447 #endif
448 test_success = s_test_psram(s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].vaddr_start,
449 s_psram_ctx.mapped_regions[PSRAM_MEM_8BIT_ALIGNED].size,
450 noinit_vstart,
451 noinit_vend);
452 if (!test_success) {
453 return false;
454 }
455
456 if (s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].size) {
457 test_success = s_test_psram(s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].vaddr_start,
458 s_psram_ctx.mapped_regions[PSRAM_MEM_32BIT_ALIGNED].size,
459 0,
460 0);
461 }
462 if (!test_success) {
463 return false;
464 }
465
466 return true;
467 }
468