1 /*
2 * Copyright (c) 2021 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief Driver to utilize TLB on Intel Audio DSP
10 *
11 * TLB (Translation Lookup Buffer) table is used to map between
12 * physical and virtual memory. This is global to all cores
13 * on the DSP, as changes to the TLB table are visible to
14 * all cores.
15 *
16 * Note that all passed in addresses should be in cached range
17 * (aka cached addresses). Due to the need to calculate TLB
18 * indexes, virtual addresses will be converted internally to
19 * cached one via sys_cache_cached_ptr_get(). However, physical addresses
20 * are untouched.
21 */
22
23 #include "mm_drv_intel_adsp.h"
24 #include <soc_util.h>
25 #include <zephyr/drivers/mm/mm_drv_intel_adsp_mtl_tlb.h>
26 #include <zephyr/drivers/mm/mm_drv_bank.h>
27 #include <zephyr/debug/sparse.h>
28 #include <zephyr/cache.h>
29 #include <kernel_arch_interface.h>
30
31 #define SRAM_BANK_PAGE_NUM (SRAM_BANK_SIZE / CONFIG_MM_DRV_PAGE_SIZE)
32
33 static struct k_spinlock tlb_lock;
34 extern struct k_spinlock sys_mm_drv_common_lock;
35
36 static struct sys_mm_drv_bank hpsram_bank[L2_SRAM_BANK_NUM];
37
38 #ifdef CONFIG_SOC_INTEL_COMM_WIDGET
39 #include <adsp_comm_widget.h>
40
41 static uint32_t used_pages;
42 /* PMC uses 32 KB banks */
43 static uint32_t used_pmc_banks_reported;
44 #endif
45
46
47 /* Define a marker which is placed by the linker script just after
48 * last explicitly defined section. All .text, .data, .bss and .heap
49 * sections should be placed before this marker in the memory.
50 * This driver is using the location of the marker to
51 * unmap the unused L2 memory and power off corresponding memory banks.
52 */
53 __attribute__((__section__(".unused_ram_start_marker")))
54 static int unused_l2_sram_start_marker = 0xba0babce;
55 #define UNUSED_L2_START_ALIGNED ROUND_UP(POINTER_TO_UINT(&unused_l2_sram_start_marker), \
56 CONFIG_MM_DRV_PAGE_SIZE)
57
58 /* declare L2 physical memory block */
59 SYS_MEM_BLOCKS_DEFINE_WITH_EXT_BUF(
60 L2_PHYS_SRAM_REGION,
61 CONFIG_MM_DRV_PAGE_SIZE,
62 L2_SRAM_PAGES_NUM,
63 (uint8_t *) L2_SRAM_BASE);
64
65 /**
66 * Calculate the index to the TLB table.
67 *
68 * @param vaddr Page-aligned virutal address.
69 * @return Index to the TLB table.
70 */
get_tlb_entry_idx(uintptr_t vaddr)71 static uint32_t get_tlb_entry_idx(uintptr_t vaddr)
72 {
73 return (POINTER_TO_UINT(vaddr) - CONFIG_KERNEL_VM_BASE) /
74 CONFIG_MM_DRV_PAGE_SIZE;
75 }
76
77 /**
78 * Calculate the index of the HPSRAM bank.
79 *
80 * @param pa physical address.
81 * @return Index of the HPSRAM bank.
82 */
get_hpsram_bank_idx(uintptr_t pa)83 static uint32_t get_hpsram_bank_idx(uintptr_t pa)
84 {
85 uint32_t phys_offset = pa - L2_SRAM_BASE;
86
87 return (phys_offset / SRAM_BANK_SIZE);
88 }
89
90 /**
91 * Convert the SYS_MM_MEM_PERM_* flags into TLB entry permission bits.
92 *
93 * @param flags Access flags (SYS_MM_MEM_PERM_*)
94 * @return TLB entry permission bits
95 */
flags_to_tlb_perms(uint32_t flags)96 static uint16_t flags_to_tlb_perms(uint32_t flags)
97 {
98 #if defined(CONFIG_SOC_SERIES_INTEL_ADSP_ACE)
99 uint16_t perms = 0;
100
101 if ((flags & SYS_MM_MEM_PERM_RW) == SYS_MM_MEM_PERM_RW) {
102 perms |= TLB_WRITE_BIT;
103 }
104
105 if ((flags & SYS_MM_MEM_PERM_EXEC) == SYS_MM_MEM_PERM_EXEC) {
106 perms |= TLB_EXEC_BIT;
107 }
108
109 return perms;
110 #else
111 return 0;
112 #endif
113 }
114
115 #if defined(CONFIG_SOC_SERIES_INTEL_ADSP_ACE)
116 /**
117 * Convert TLB entry permission bits to the SYS_MM_MEM_PERM_* flags.
118 *
119 * @param perms TLB entry permission bits
120 * @return Access flags (SYS_MM_MEM_PERM_*)
121 */
tlb_perms_to_flags(uint16_t perms)122 static uint16_t tlb_perms_to_flags(uint16_t perms)
123 {
124 uint32_t flags = 0;
125
126 if ((perms & TLB_WRITE_BIT) == TLB_WRITE_BIT) {
127 flags |= SYS_MM_MEM_PERM_RW;
128 }
129
130 if ((perms & TLB_EXEC_BIT) == TLB_EXEC_BIT) {
131 flags |= SYS_MM_MEM_PERM_EXEC;
132 }
133
134 return flags;
135 }
136 #endif
137
sys_mm_drv_hpsram_pwr(uint32_t bank_idx,bool enable,bool non_blocking)138 static int sys_mm_drv_hpsram_pwr(uint32_t bank_idx, bool enable, bool non_blocking)
139 {
140 #if defined(CONFIG_SOC_SERIES_INTEL_ADSP_ACE)
141 if (bank_idx > ace_hpsram_get_bank_count()) {
142 return -1;
143 }
144
145 HPSRAM_REGS(bank_idx)->HSxPGCTL = !enable;
146
147 if (!non_blocking) {
148 while (HPSRAM_REGS(bank_idx)->HSxPGISTS == enable) {
149 k_busy_wait(1);
150 }
151 }
152 #endif
153 return 0;
154 }
155
156 #ifdef CONFIG_SOC_INTEL_COMM_WIDGET
sys_mm_drv_report_page_usage(void)157 static void sys_mm_drv_report_page_usage(void)
158 {
159 /* PMC uses 32 KB banks */
160 uint32_t pmc_banks = DIV_ROUND_UP(used_pages, KB(32) / CONFIG_MM_DRV_PAGE_SIZE);
161
162 if (used_pmc_banks_reported != pmc_banks) {
163 if (!adsp_comm_widget_pmc_send_ipc(pmc_banks)) {
164 /* Store reported value if message was sent successfully. */
165 used_pmc_banks_reported = pmc_banks;
166 }
167 }
168 }
169 #endif
170
sys_mm_drv_map_page(void * virt,uintptr_t phys,uint32_t flags)171 int sys_mm_drv_map_page(void *virt, uintptr_t phys, uint32_t flags)
172 {
173 k_spinlock_key_t key;
174 uint32_t entry_idx, bank_idx;
175 uint16_t entry;
176 volatile uint16_t *tlb_entries = UINT_TO_POINTER(TLB_BASE);
177 int ret = 0;
178 void *phys_block_ptr;
179
180 /*
181 * Cached addresses for both physical and virtual.
182 *
183 * As the main memory is in cached address ranges,
184 * the cached physical address is needed to perform
185 * bound check.
186 */
187 uintptr_t pa = POINTER_TO_UINT(sys_cache_cached_ptr_get(UINT_TO_POINTER(phys)));
188 uintptr_t va = POINTER_TO_UINT(sys_cache_cached_ptr_get(virt));
189
190 /* Make sure VA is page-aligned */
191 CHECKIF(!sys_mm_drv_is_addr_aligned(va)) {
192 ret = -EINVAL;
193 goto out;
194 }
195
196 /* Check bounds of virtual address space */
197 CHECKIF((va < UNUSED_L2_START_ALIGNED) ||
198 (va >= (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE))) {
199 ret = -EINVAL;
200 goto out;
201 }
202
203 /*
204 * When the provided physical address is NULL
205 * then it is a signal to the Intel ADSP TLB driver to
206 * select the first available free physical address
207 * autonomously within the driver.
208 */
209 if (UINT_TO_POINTER(phys) == NULL) {
210 ret = sys_mem_blocks_alloc_contiguous(&L2_PHYS_SRAM_REGION, 1,
211 &phys_block_ptr);
212 if (ret != 0) {
213 __ASSERT(false,
214 "unable to assign free phys page %d\n", ret);
215 goto out;
216 }
217 pa = POINTER_TO_UINT(sys_cache_cached_ptr_get(phys_block_ptr));
218 }
219
220 /* Check bounds of physical address space */
221 CHECKIF((pa < L2_SRAM_BASE) ||
222 (pa >= (L2_SRAM_BASE + L2_SRAM_SIZE))) {
223 ret = -EINVAL;
224 goto out;
225 }
226
227 /* Make sure PA is page-aligned */
228 CHECKIF(!sys_mm_drv_is_addr_aligned(pa)) {
229 ret = -EINVAL;
230 goto out;
231 }
232
233 key = k_spin_lock(&tlb_lock);
234
235 entry_idx = get_tlb_entry_idx(va);
236
237 #ifdef CONFIG_SOC_INTEL_COMM_WIDGET
238 used_pages++;
239 sys_mm_drv_report_page_usage();
240 #endif
241
242 bank_idx = get_hpsram_bank_idx(pa);
243 if (sys_mm_drv_bank_page_mapped(&hpsram_bank[bank_idx]) == 1) {
244 sys_mm_drv_hpsram_pwr(bank_idx, true, false);
245 }
246
247 /*
248 * The address part of the TLB entry takes the lowest
249 * TLB_PADDR_SIZE bits of the physical page number,
250 * and discards the highest bits. This is due to the
251 * architecture design where the same physical page
252 * can be accessed via two addresses. One address goes
253 * through the cache, and the other one accesses
254 * memory directly (without cache). The difference
255 * between these two addresses are in the higher bits,
256 * and the lower bits are the same. And this is why
257 * TLB only cares about the lower part of the physical
258 * address.
259 */
260 entry = pa_to_tlb_entry(pa);
261
262 /* Enable the translation in the TLB entry */
263 entry |= TLB_ENABLE_BIT;
264
265 /* Set permissions for this entry */
266 entry |= flags_to_tlb_perms(flags);
267
268 tlb_entries[entry_idx] = entry;
269
270 #ifdef CONFIG_MMU
271 arch_mem_map(virt, va, CONFIG_MM_DRV_PAGE_SIZE, flags);
272 #endif
273 /*
274 * Invalid the cache of the newly mapped virtual page to
275 * avoid stale data.
276 */
277 sys_cache_data_invd_range(virt, CONFIG_MM_DRV_PAGE_SIZE);
278
279 k_spin_unlock(&tlb_lock, key);
280
281 out:
282 return ret;
283 }
284
sys_mm_drv_map_region(void * virt,uintptr_t phys,size_t size,uint32_t flags)285 int sys_mm_drv_map_region(void *virt, uintptr_t phys,
286 size_t size, uint32_t flags)
287 {
288 k_spinlock_key_t key;
289 int ret = 0;
290 size_t offset;
291 uintptr_t pa;
292 uint8_t *va;
293
294 CHECKIF(!sys_mm_drv_is_addr_aligned(phys) ||
295 !sys_mm_drv_is_virt_addr_aligned(virt) ||
296 !sys_mm_drv_is_size_aligned(size)) {
297 ret = -EINVAL;
298 goto out;
299 }
300
301 va = (__sparse_force uint8_t *)sys_cache_cached_ptr_get(virt);
302 pa = phys;
303
304 key = k_spin_lock(&sys_mm_drv_common_lock);
305
306 for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
307 int ret2 = sys_mm_drv_map_page(va, pa, flags);
308
309 if (ret2 != 0) {
310 __ASSERT(false, "cannot map 0x%lx to %p\n", pa, va);
311
312 ret = ret2;
313 }
314 va += CONFIG_MM_DRV_PAGE_SIZE;
315 if (phys != 0) {
316 pa += CONFIG_MM_DRV_PAGE_SIZE;
317 }
318 }
319
320 k_spin_unlock(&sys_mm_drv_common_lock, key);
321
322 out:
323 return ret;
324 }
325
sys_mm_drv_map_array(void * virt,uintptr_t * phys,size_t cnt,uint32_t flags)326 int sys_mm_drv_map_array(void *virt, uintptr_t *phys,
327 size_t cnt, uint32_t flags)
328 {
329 void *va = (__sparse_force void *)sys_cache_cached_ptr_get(virt);
330
331 return sys_mm_drv_simple_map_array(va, phys, cnt, flags);
332 }
333
sys_mm_drv_unmap_page_wflush(void * virt,bool flush_data)334 static int sys_mm_drv_unmap_page_wflush(void *virt, bool flush_data)
335 {
336 k_spinlock_key_t key;
337 uint32_t entry_idx, bank_idx;
338 uint16_t *tlb_entries = UINT_TO_POINTER(TLB_BASE);
339 uint16_t entry;
340 uintptr_t pa;
341 int ret = 0;
342
343 /* Use cached virtual address */
344 uintptr_t va = POINTER_TO_UINT(sys_cache_cached_ptr_get(virt));
345
346 /* Check bounds of virtual address space */
347 CHECKIF((va < UNUSED_L2_START_ALIGNED) ||
348 (va >= (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE))) {
349 ret = -EINVAL;
350 goto out;
351 }
352
353 /* Make sure inputs are page-aligned */
354 CHECKIF(!sys_mm_drv_is_addr_aligned(va)) {
355 ret = -EINVAL;
356 goto out;
357 }
358
359 key = k_spin_lock(&tlb_lock);
360
361 entry_idx = get_tlb_entry_idx(va);
362 entry = tlb_entries[entry_idx];
363
364 /* Check if the translation is enabled in the TLB entry.
365 * Attempt to flush the cache of an inactive address will result in a cpu exception.
366 */
367 if (!(entry & TLB_ENABLE_BIT)) {
368 ret = -EFAULT;
369 goto out_unlock;
370 }
371
372 /*
373 * Flush the cache to make sure the backing physical page
374 * has the latest data.
375 * No flush when called from sys_mm_drv_mm_init().
376 */
377 if (flush_data) {
378 sys_cache_data_flush_range(virt, CONFIG_MM_DRV_PAGE_SIZE);
379 #ifdef CONFIG_MMU
380 arch_mem_unmap(virt, CONFIG_MM_DRV_PAGE_SIZE);
381 #endif
382 }
383
384 pa = tlb_entry_to_pa(entry);
385
386 /* Restore default entry settings with cleared the enable bit. */
387 tlb_entries[entry_idx] = 0;
388
389 /* Check bounds of physical address space.
390 * Initial TLB mappings could point to non existing physical pages.
391 */
392 if ((pa >= L2_SRAM_BASE) && (pa < (L2_SRAM_BASE + L2_SRAM_SIZE))) {
393 sys_mem_blocks_free_contiguous(&L2_PHYS_SRAM_REGION,
394 UINT_TO_POINTER(pa), 1);
395
396 bank_idx = get_hpsram_bank_idx(pa);
397 #ifdef CONFIG_SOC_INTEL_COMM_WIDGET
398 used_pages--;
399 sys_mm_drv_report_page_usage();
400 #endif
401
402 if (sys_mm_drv_bank_page_unmapped(&hpsram_bank[bank_idx]) == SRAM_BANK_PAGE_NUM) {
403 sys_mm_drv_hpsram_pwr(bank_idx, false, false);
404 }
405 }
406
407 out_unlock:
408 k_spin_unlock(&tlb_lock, key);
409
410 out:
411 return ret;
412 }
413
sys_mm_drv_unmap_page(void * virt)414 int sys_mm_drv_unmap_page(void *virt)
415 {
416 return sys_mm_drv_unmap_page_wflush(virt, true);
417 }
418
sys_mm_drv_unmap_region(void * virt,size_t size)419 int sys_mm_drv_unmap_region(void *virt, size_t size)
420 {
421 void *va = (__sparse_force void *)sys_cache_cached_ptr_get(virt);
422
423 return sys_mm_drv_simple_unmap_region(va, size);
424 }
425
sys_mm_drv_update_page_flags(void * virt,uint32_t flags)426 int sys_mm_drv_update_page_flags(void *virt, uint32_t flags)
427 {
428 k_spinlock_key_t key;
429 uint32_t entry_idx;
430 uint16_t entry;
431 uint16_t *tlb_entries = UINT_TO_POINTER(TLB_BASE);
432 int ret = 0;
433
434 /* Use cached virtual address */
435 uintptr_t va = POINTER_TO_UINT(sys_cache_cached_ptr_get(virt));
436
437 /* Make sure inputs are page-aligned and check bounds of virtual address space */
438 CHECKIF(!sys_mm_drv_is_addr_aligned(va) ||
439 (va < UNUSED_L2_START_ALIGNED) ||
440 (va >= (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE))) {
441 return -EINVAL;
442 }
443
444 key = k_spin_lock(&tlb_lock);
445
446 entry_idx = get_tlb_entry_idx(va);
447
448 entry = tlb_entries[entry_idx];
449
450 /* Check entry is already mapped */
451 if (!(entry & TLB_ENABLE_BIT)) {
452 ret = -EFAULT;
453 goto out;
454 }
455
456 /* Clear the access flags */
457 entry &= ~(TLB_EXEC_BIT | TLB_WRITE_BIT);
458
459 /* Set new permissions for this entry */
460 entry |= flags_to_tlb_perms(flags);
461
462 tlb_entries[entry_idx] = entry;
463
464 #ifdef CONFIG_MMU
465 arch_mem_map(virt, tlb_entry_to_pa(entry), CONFIG_MM_DRV_PAGE_SIZE, flags);
466 #endif
467
468 out:
469 k_spin_unlock(&tlb_lock, key);
470 return ret;
471 }
472
473 #ifdef CONFIG_MM_DRV_INTEL_ADSP_TLB_REMAP_UNUSED_RAM
sys_mm_drv_unmap_region_initial(void * virt_in,size_t size)474 static int sys_mm_drv_unmap_region_initial(void *virt_in, size_t size)
475 {
476 void *virt = (__sparse_force void *)sys_cache_cached_ptr_get(virt_in);
477
478 k_spinlock_key_t key;
479 int ret = 0;
480 size_t offset;
481
482 CHECKIF(!sys_mm_drv_is_virt_addr_aligned(virt) ||
483 !sys_mm_drv_is_size_aligned(size)) {
484 ret = -EINVAL;
485 goto out;
486 }
487
488 key = k_spin_lock(&sys_mm_drv_common_lock);
489
490 for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
491 uint8_t *va = (uint8_t *)virt + offset;
492
493 int ret2 = sys_mm_drv_unmap_page_wflush(va, false);
494
495 /* -EFAULT means that this page is not mapped.
496 * This is not an error since we want to unmap all virtual memory without knowing
497 * which pages are mapped.
498 */
499 if (ret2 != 0 && ret2 != -EFAULT) {
500 __ASSERT(false, "cannot unmap %p\n", va);
501
502 ret = ret2;
503 }
504 }
505
506 k_spin_unlock(&sys_mm_drv_common_lock, key);
507
508 out:
509 return ret;
510 }
511 #endif
512
sys_mm_drv_page_phys_get(void * virt,uintptr_t * phys)513 int sys_mm_drv_page_phys_get(void *virt, uintptr_t *phys)
514 {
515 uint16_t *tlb_entries = UINT_TO_POINTER(TLB_BASE);
516 uintptr_t ent;
517 int ret = 0;
518
519 /* Use cached address */
520 uintptr_t va = POINTER_TO_UINT(sys_cache_cached_ptr_get(virt));
521
522 CHECKIF(!sys_mm_drv_is_addr_aligned(va)) {
523 ret = -EINVAL;
524 goto out;
525 }
526
527 /* Check bounds of virtual address space */
528 CHECKIF((va < CONFIG_KERNEL_VM_BASE) ||
529 (va >= (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE))) {
530 ret = -EINVAL;
531 goto out;
532 }
533
534 ent = tlb_entries[get_tlb_entry_idx(va)];
535
536 if ((ent & TLB_ENABLE_BIT) != TLB_ENABLE_BIT) {
537 ret = -EFAULT;
538 } else {
539 if (phys != NULL) {
540 *phys = (ent & TLB_PADDR_MASK) *
541 CONFIG_MM_DRV_PAGE_SIZE + TLB_PHYS_BASE;
542 }
543
544 ret = 0;
545 }
546
547 out:
548 return ret;
549 }
550
sys_mm_drv_page_flag_get(void * virt,uint32_t * flags)551 int sys_mm_drv_page_flag_get(void *virt, uint32_t *flags)
552 {
553 ARG_UNUSED(virt);
554 int ret = 0;
555
556 #if defined(CONFIG_SOC_SERIES_INTEL_ADSP_ACE)
557 uint16_t *tlb_entries = UINT_TO_POINTER(TLB_BASE);
558 uint16_t ent;
559
560 /* Use cached address */
561 uintptr_t va = POINTER_TO_UINT(sys_cache_cached_ptr_get(virt));
562
563 CHECKIF(!sys_mm_drv_is_addr_aligned(va)) {
564 ret = -EINVAL;
565 goto out;
566 }
567
568 /* Check bounds of virtual address space */
569 CHECKIF((va < CONFIG_KERNEL_VM_BASE) ||
570 (va >= (CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE))) {
571 ret = -EINVAL;
572 goto out;
573 }
574
575 ent = tlb_entries[get_tlb_entry_idx(va)];
576
577 if ((ent & TLB_ENABLE_BIT) != TLB_ENABLE_BIT) {
578 ret = -EFAULT;
579 } else {
580 *flags = tlb_perms_to_flags(ent);
581 }
582
583 out:
584 #else
585 /*
586 * There are no caching mode, or R/W, or eXecution (etc.) bits.
587 * So just return 0.
588 */
589
590 *flags = 0U;
591 #endif
592
593 return ret;
594 }
595
sys_mm_drv_remap_region(void * virt_old,size_t size,void * virt_new)596 int sys_mm_drv_remap_region(void *virt_old, size_t size,
597 void *virt_new)
598 {
599 void *va_new = (__sparse_force void *)sys_cache_cached_ptr_get(virt_new);
600 void *va_old = (__sparse_force void *)sys_cache_cached_ptr_get(virt_old);
601
602 return sys_mm_drv_simple_remap_region(va_old, size, va_new);
603 }
604
sys_mm_drv_move_region(void * virt_old,size_t size,void * virt_new,uintptr_t phys_new)605 int sys_mm_drv_move_region(void *virt_old, size_t size, void *virt_new,
606 uintptr_t phys_new)
607 {
608 k_spinlock_key_t key;
609 size_t offset;
610 int ret = 0;
611
612 virt_new = (__sparse_force void *)sys_cache_cached_ptr_get(virt_new);
613 virt_old = (__sparse_force void *)sys_cache_cached_ptr_get(virt_old);
614
615 CHECKIF(!sys_mm_drv_is_virt_addr_aligned(virt_old) ||
616 !sys_mm_drv_is_virt_addr_aligned(virt_new) ||
617 !sys_mm_drv_is_size_aligned(size)) {
618 ret = -EINVAL;
619 goto out;
620 }
621
622 if ((POINTER_TO_UINT(virt_new) >= POINTER_TO_UINT(virt_old)) &&
623 (POINTER_TO_UINT(virt_new) < (POINTER_TO_UINT(virt_old) + size))) {
624 ret = -EINVAL; /* overlaps */
625 goto out;
626 }
627
628 /*
629 * The function's behavior has been updated to accept
630 * phys_new == NULL and get the physical addresses from
631 * the actual TLB instead of from the caller.
632 */
633 if (phys_new != POINTER_TO_UINT(NULL) &&
634 !sys_mm_drv_is_addr_aligned(phys_new)) {
635 ret = -EINVAL;
636 goto out;
637 }
638
639 key = k_spin_lock(&sys_mm_drv_common_lock);
640
641 if (!sys_mm_drv_is_virt_region_mapped(virt_old, size) ||
642 !sys_mm_drv_is_virt_region_unmapped(virt_new, size)) {
643 ret = -EINVAL;
644 goto unlock_out;
645 }
646
647 for (offset = 0; offset < size; offset += CONFIG_MM_DRV_PAGE_SIZE) {
648 uint8_t *va_old = (uint8_t *)virt_old + offset;
649 uint8_t *va_new = (uint8_t *)virt_new + offset;
650 uintptr_t pa;
651 uint32_t flags;
652 int ret2;
653
654 ret2 = sys_mm_drv_page_flag_get(va_old, &flags);
655 if (ret2 != 0) {
656 __ASSERT(false, "cannot query page flags %p\n", va_old);
657
658 ret = ret2;
659 goto unlock_out;
660 }
661
662 ret2 = sys_mm_drv_page_phys_get(va_old, &pa);
663 if (ret2 != 0) {
664 __ASSERT(false, "cannot query page paddr %p\n", va_old);
665
666 ret = ret2;
667 goto unlock_out;
668 }
669
670 /*
671 * Only map the new page when we can retrieve
672 * flags and phys addr of the old mapped page as We don't
673 * want to map with unknown random flags.
674 */
675 ret2 = sys_mm_drv_map_page(va_new, pa, flags);
676 if (ret2 != 0) {
677 __ASSERT(false, "cannot map 0x%lx to %p\n", pa, va_new);
678
679 ret = ret2;
680 }
681
682 ret2 = sys_mm_drv_unmap_page(va_old);
683 if (ret2 != 0) {
684 __ASSERT(false, "cannot unmap %p\n", va_old);
685
686 ret = ret2;
687 }
688 }
689
690 unlock_out:
691 k_spin_unlock(&sys_mm_drv_common_lock, key);
692
693 out:
694 /*
695 * Since move is done in virtual space, need to
696 * flush the cache to make sure the backing physical
697 * pages have the new data.
698 */
699 sys_cache_data_flush_range(virt_new, size);
700 sys_cache_data_flush_and_invd_range(virt_old, size);
701
702 return ret;
703 }
704
sys_mm_drv_move_array(void * virt_old,size_t size,void * virt_new,uintptr_t * phys_new,size_t phys_cnt)705 int sys_mm_drv_move_array(void *virt_old, size_t size, void *virt_new,
706 uintptr_t *phys_new, size_t phys_cnt)
707 {
708 int ret;
709
710 void *va_new = (__sparse_force void *)sys_cache_cached_ptr_get(virt_new);
711 void *va_old = (__sparse_force void *)sys_cache_cached_ptr_get(virt_old);
712
713 ret = sys_mm_drv_simple_move_array(va_old, size, va_new,
714 phys_new, phys_cnt);
715
716 /*
717 * Since memcpy() is done in virtual space, need to
718 * flush the cache to make sure the backing physical
719 * pages have the new data.
720 */
721 sys_cache_data_flush_range(va_new, size);
722
723 return ret;
724 }
725
sys_mm_drv_mm_init(const struct device * dev)726 static int sys_mm_drv_mm_init(const struct device *dev)
727 {
728 int ret;
729
730 ARG_UNUSED(dev);
731
732 /*
733 * Change size of avalible physical memory according to fw register information
734 * in runtime.
735 */
736
737 uint32_t avalible_memory_size = ace_hpsram_get_bank_count() * SRAM_BANK_SIZE;
738
739 L2_PHYS_SRAM_REGION.info.num_blocks = avalible_memory_size / CONFIG_MM_DRV_PAGE_SIZE;
740
741 ret = calculate_memory_regions(UNUSED_L2_START_ALIGNED);
742 CHECKIF(ret != 0) {
743 return ret;
744 }
745 /*
746 * Initialize memblocks that will store physical
747 * page usage. Initially all physical pages are
748 * mapped in linear way to virtual address space
749 * so mark all pages as allocated.
750 */
751
752 ret = sys_mem_blocks_get(&L2_PHYS_SRAM_REGION,
753 (void *) L2_SRAM_BASE, L2_SRAM_PAGES_NUM);
754 CHECKIF(ret != 0) {
755 return ret;
756 }
757
758 /*
759 * Initialize refcounts for all HPSRAM banks
760 * as fully used because entire HPSRAM is powered on
761 * at system boot. Set reference count to a number
762 * of pages within single memory bank.
763 */
764 for (int i = 0; i < L2_SRAM_BANK_NUM; i++) {
765 sys_mm_drv_bank_init(&hpsram_bank[i],
766 SRAM_BANK_PAGE_NUM);
767 }
768 #ifdef CONFIG_SOC_INTEL_COMM_WIDGET
769 used_pages = L2_SRAM_BANK_NUM * SRAM_BANK_SIZE / CONFIG_MM_DRV_PAGE_SIZE;
770 #endif
771
772 #ifdef CONFIG_MM_DRV_INTEL_ADSP_TLB_REMAP_UNUSED_RAM
773 /*
774 * find virtual address range which are unused
775 * in the system
776 */
777 if (L2_SRAM_BASE + L2_SRAM_SIZE < UNUSED_L2_START_ALIGNED ||
778 L2_SRAM_BASE > UNUSED_L2_START_ALIGNED) {
779
780 __ASSERT(false,
781 "unused l2 pointer is outside of l2 sram range %p\n",
782 (void *)UNUSED_L2_START_ALIGNED);
783 return -EFAULT;
784 }
785
786 /*
787 * Unmap all unused physical pages from the entire
788 * virtual address space to save power
789 */
790 size_t unused_size = CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE -
791 UNUSED_L2_START_ALIGNED;
792
793 ret = sys_mm_drv_unmap_region_initial(UINT_TO_POINTER(UNUSED_L2_START_ALIGNED),
794 unused_size);
795
796
797 /* Need to reset max pages statistics after unmap */
798 for (int i = 0; i < L2_SRAM_BANK_NUM; i++) {
799 sys_mm_drv_bank_stats_reset_max(&hpsram_bank[i]);
800 }
801 #endif
802
803 /*
804 * Notify PMC about used HP-SRAM pages.
805 */
806 #ifdef CONFIG_SOC_INTEL_COMM_WIDGET
807 sys_mm_drv_report_page_usage();
808 #endif
809
810 return 0;
811 }
812
adsp_mm_save_context(void * storage_buffer)813 static void adsp_mm_save_context(void *storage_buffer)
814 {
815 uint16_t entry;
816 uint32_t entry_idx;
817 int page_idx;
818 uint32_t phys_addr;
819 volatile uint16_t *tlb_entries = UINT_TO_POINTER(TLB_BASE);
820 uint8_t *location = (uint8_t *) storage_buffer;
821
822 /* first, store the existing TLB */
823 memcpy(location, UINT_TO_POINTER(TLB_BASE), TLB_SIZE);
824 location += TLB_SIZE;
825
826 /* save context of all the pages */
827 for (page_idx = 0; page_idx < L2_SRAM_PAGES_NUM; page_idx++) {
828 phys_addr = POINTER_TO_UINT(L2_SRAM_BASE) +
829 CONFIG_MM_DRV_PAGE_SIZE * page_idx;
830 if (sys_mem_blocks_is_region_free(
831 &L2_PHYS_SRAM_REGION,
832 UINT_TO_POINTER(phys_addr), 1)) {
833 /* skip a free page */
834 continue;
835 }
836
837 /* map the physical addr 1:1 to virtual address */
838 entry_idx = get_tlb_entry_idx(phys_addr);
839 entry = pa_to_tlb_entry(phys_addr);
840
841 if (((tlb_entries[entry_idx] & TLB_PADDR_MASK) != entry) ||
842 ((tlb_entries[entry_idx] & TLB_ENABLE_BIT) != TLB_ENABLE_BIT)) {
843 /* This page needs remapping */
844
845 /* Enable the translation in the TLB entry */
846 entry |= TLB_ENABLE_BIT;
847
848 /* map the page 1:1 virtual to physical */
849 tlb_entries[entry_idx] = entry;
850
851 #ifdef CONFIG_MMU
852 arch_mem_map(UINT_TO_POINTER(phys_addr), phys_addr, CONFIG_MM_DRV_PAGE_SIZE,
853 K_MEM_CACHE_WB);
854 #endif
855
856 /* Invalidate cache to avoid stalled data
857 * all cache data has been flushed before
858 * do this for pages to remap only
859 */
860 sys_cache_data_invd_range(UINT_TO_POINTER(phys_addr),
861 CONFIG_MM_DRV_PAGE_SIZE);
862 }
863
864 /* save physical address */
865 *((uint32_t *) location) = phys_addr;
866 location += sizeof(uint32_t);
867
868 /* save the page */
869 memcpy(location,
870 UINT_TO_POINTER(phys_addr),
871 CONFIG_MM_DRV_PAGE_SIZE);
872 location += CONFIG_MM_DRV_PAGE_SIZE;
873 }
874
875 /* write end marker - a null address */
876 *((uint32_t *) location) = 0;
877 location += sizeof(uint32_t);
878
879 sys_cache_data_flush_range(
880 storage_buffer,
881 (uint32_t)location - (uint32_t)storage_buffer);
882
883
884 /* system state is frozen, ready to poweroff, no further changes will be stored */
885 }
886
adsp_mm_restore_context(void * storage_buffer)887 __imr void adsp_mm_restore_context(void *storage_buffer)
888 {
889 /* at this point system must be in a startup state
890 * TLB must be set to initial state
891 * Note! the stack must NOT be in the area being restored
892 */
893 uint32_t phys_addr;
894 uint8_t *location;
895
896 /* restore context of all the pages */
897 location = (uint8_t *) storage_buffer + TLB_SIZE;
898
899 phys_addr = *((uint32_t *) location);
900
901 while (phys_addr != 0) {
902 uint32_t phys_addr_uncached =
903 POINTER_TO_UINT(sys_cache_uncached_ptr_get(
904 (void __sparse_cache *)UINT_TO_POINTER(phys_addr)));
905 uint32_t phys_offset = phys_addr - L2_SRAM_BASE;
906 uint32_t bank_idx = (phys_offset / SRAM_BANK_SIZE);
907
908 location += sizeof(uint32_t);
909
910 /* turn on memory bank power, wait till the power is on */
911 __ASSERT_NO_MSG(bank_idx <= ace_hpsram_get_bank_count());
912 HPSRAM_REGS(bank_idx)->HSxPGCTL = 0;
913 while (HPSRAM_REGS(bank_idx)->HSxPGISTS == 1) {
914 /* k_busy_wait cannot be used here - not available */
915 }
916
917 /* copy data to uncached alias and invalidate cache */
918 bmemcpy(UINT_TO_POINTER(phys_addr_uncached),
919 location,
920 CONFIG_MM_DRV_PAGE_SIZE);
921 sys_cache_data_invd_range(UINT_TO_POINTER(phys_addr), CONFIG_MM_DRV_PAGE_SIZE);
922
923 location += CONFIG_MM_DRV_PAGE_SIZE;
924 phys_addr = *((uint32_t *) location);
925 }
926
927 /* restore original TLB table */
928 bmemcpy(UINT_TO_POINTER(TLB_BASE), storage_buffer, TLB_SIZE);
929
930 /* HPSRAM memory is restored */
931 }
932
adsp_mm_get_storage_size(void)933 static uint32_t adsp_mm_get_storage_size(void)
934 {
935 /*
936 * FIXME - currently the function returns a maximum possible size of the buffer
937 * as L3 memory is generally a huge area its OK (and fast)
938 * in future the function may go through the mapping and calculate a required size
939 */
940 return L2_SRAM_SIZE + TLB_SIZE + (L2_SRAM_PAGES_NUM * sizeof(void *))
941 + sizeof(void *);
942 }
943
944 static const struct intel_adsp_tlb_api adsp_tlb_api_func = {
945 .save_context = adsp_mm_save_context,
946 .get_storage_size = adsp_mm_get_storage_size
947 };
948
949 DEVICE_DT_DEFINE(DT_INST(0, intel_adsp_mtl_tlb),
950 sys_mm_drv_mm_init,
951 NULL,
952 NULL,
953 NULL,
954 POST_KERNEL,
955 0,
956 &adsp_tlb_api_func);
957