1 /*
2 * Xtensa MMU support
3 *
4 * Private data declarations
5 *
6 * Copyright (c) 2022 Intel Corporation
7 * SPDX-License-Identifier: Apache-2.0
8 */
9
10 #ifndef ZEPHYR_ARCH_XTENSA_XTENSA_MMU_PRIV_H_
11 #define ZEPHYR_ARCH_XTENSA_XTENSA_MMU_PRIV_H_
12
13 #include <stdint.h>
14 #include <xtensa/config/core-isa.h>
15 #include <zephyr/toolchain.h>
16 #include <zephyr/sys/util_macro.h>
17
18 /**
19 * @defgroup xtensa_mmu_internal_apis Xtensa Memory Management Unit (MMU) Internal APIs
20 * @ingroup xtensa_mmu_apis
21 * @{
22 */
23
24 /** Mask for VPN in PTE */
25 #define XTENSA_MMU_PTE_VPN_MASK 0xFFFFF000U
26
27 /** Mask for PPN in PTE */
28 #define XTENSA_MMU_PTE_PPN_MASK 0xFFFFF000U
29
30 /** Mask for attributes in PTE */
31 #define XTENSA_MMU_PTE_ATTR_MASK 0x0000000FU
32
33 /** Mask for cache mode in PTE */
34 #define XTENSA_MMU_PTE_ATTR_CACHED_MASK 0x0000000CU
35
36 /** Mask used to figure out which L1 page table to use */
37 #define XTENSA_MMU_L1_MASK 0x3FF00000U
38
39 /** Mask used to figure out which L2 page table to use */
40 #define XTENSA_MMU_L2_MASK 0x3FFFFFU
41
42 #define XTENSA_MMU_PTEBASE_MASK 0xFFC00000
43
44 /** Number of bits to shift for PPN in PTE */
45 #define XTENSA_MMU_PTE_PPN_SHIFT 12U
46
47 /** Mask for ring in PTE */
48 #define XTENSA_MMU_PTE_RING_MASK 0x00000030U
49
50 /** Number of bits to shift for ring in PTE */
51 #define XTENSA_MMU_PTE_RING_SHIFT 4U
52
53 /** Number of bits to shift for SW reserved ared in PTE */
54 #define XTENSA_MMU_PTE_SW_SHIFT 6U
55
56 /** Mask for SW bits in PTE */
57 #define XTENSA_MMU_PTE_SW_MASK 0x00000FC0U
58
59 /**
60 * Internal bit just used to indicate that the attr field must
61 * be set in the SW bits too. It is used later when duplicating the
62 * kernel page tables.
63 */
64 #define XTENSA_MMU_PTE_ATTR_ORIGINAL BIT(31)
65
66 /** Construct a page table entry (PTE) */
67 #define XTENSA_MMU_PTE(paddr, ring, sw, attr) \
68 (((paddr) & XTENSA_MMU_PTE_PPN_MASK) | \
69 (((ring) << XTENSA_MMU_PTE_RING_SHIFT) & XTENSA_MMU_PTE_RING_MASK) | \
70 (((sw) << XTENSA_MMU_PTE_SW_SHIFT) & XTENSA_MMU_PTE_SW_MASK) | \
71 ((attr) & XTENSA_MMU_PTE_ATTR_MASK))
72
73 /** Get the attributes from a PTE */
74 #define XTENSA_MMU_PTE_ATTR_GET(pte) \
75 ((pte) & XTENSA_MMU_PTE_ATTR_MASK)
76
77 /** Set the attributes in a PTE */
78 #define XTENSA_MMU_PTE_ATTR_SET(pte, attr) \
79 (((pte) & ~XTENSA_MMU_PTE_ATTR_MASK) | (attr & XTENSA_MMU_PTE_ATTR_MASK))
80
81 /** Set the SW field in a PTE */
82 #define XTENSA_MMU_PTE_SW_SET(pte, sw) \
83 (((pte) & ~XTENSA_MMU_PTE_SW_MASK) | (sw << XTENSA_MMU_PTE_SW_SHIFT))
84
85 /** Get the SW field from a PTE */
86 #define XTENSA_MMU_PTE_SW_GET(pte) \
87 (((pte) & XTENSA_MMU_PTE_SW_MASK) >> XTENSA_MMU_PTE_SW_SHIFT)
88
89 /** Set the ring in a PTE */
90 #define XTENSA_MMU_PTE_RING_SET(pte, ring) \
91 (((pte) & ~XTENSA_MMU_PTE_RING_MASK) | \
92 ((ring) << XTENSA_MMU_PTE_RING_SHIFT))
93
94 /** Get the ring from a PTE */
95 #define XTENSA_MMU_PTE_RING_GET(pte) \
96 (((pte) & XTENSA_MMU_PTE_RING_MASK) >> XTENSA_MMU_PTE_RING_SHIFT)
97
98 /** Get the ASID from the RASID register corresponding to the ring in a PTE */
99 #define XTENSA_MMU_PTE_ASID_GET(pte, rasid) \
100 (((rasid) >> ((((pte) & XTENSA_MMU_PTE_RING_MASK) \
101 >> XTENSA_MMU_PTE_RING_SHIFT) * 8)) & 0xFF)
102
103 /** Calculate the L2 page table position from a virtual address */
104 #define XTENSA_MMU_L2_POS(vaddr) \
105 (((vaddr) & XTENSA_MMU_L2_MASK) >> 12U)
106
107 /** Calculate the L1 page table position from a virtual address */
108 #define XTENSA_MMU_L1_POS(vaddr) \
109 ((vaddr) >> 22U)
110
111 /**
112 * @def XTENSA_MMU_PAGE_TABLE_ATTR
113 *
114 * PTE attributes for entries in the L1 page table. Should never be
115 * writable, may be cached in non-SMP contexts only
116 */
117 #if CONFIG_MP_MAX_NUM_CPUS == 1
118 #define XTENSA_MMU_PAGE_TABLE_ATTR XTENSA_MMU_CACHED_WB
119 #else
120 #define XTENSA_MMU_PAGE_TABLE_ATTR 0
121 #endif
122
123 /** This ASID is shared between all domains and kernel. */
124 #define XTENSA_MMU_SHARED_ASID 255
125
126 /** Fixed data TLB way to map the page table */
127 #define XTENSA_MMU_PTE_WAY 7
128
129 /** Fixed data TLB way to map the vecbase */
130 #define XTENSA_MMU_VECBASE_WAY 8
131
132 /** Kernel specific ASID. Ring field in the PTE */
133 #define XTENSA_MMU_KERNEL_RING 0
134
135 /** User specific ASID. Ring field in the PTE */
136 #define XTENSA_MMU_USER_RING 2
137
138 /** Ring value for MMU_SHARED_ASID */
139 #define XTENSA_MMU_SHARED_RING 3
140
141 /** Number of data TLB ways [0-9] */
142 #define XTENSA_MMU_NUM_DTLB_WAYS 10
143
144 /** Number of instruction TLB ways [0-6] */
145 #define XTENSA_MMU_NUM_ITLB_WAYS 7
146
147 /** Number of auto-refill ways */
148 #define XTENSA_MMU_NUM_TLB_AUTOREFILL_WAYS 4
149
150 /** Indicate PTE is illegal. */
151 #define XTENSA_MMU_PTE_ILLEGAL (BIT(3) | BIT(2))
152
153 /**
154 * PITLB HIT bit.
155 *
156 * For more information see
157 * Xtensa Instruction Set Architecture (ISA) Reference Manual
158 * 4.6.5.7 Formats for Probing MMU Option TLB Entries
159 */
160 #define XTENSA_MMU_PITLB_HIT BIT(3)
161
162 /**
163 * PDTLB HIT bit.
164 *
165 * For more information see
166 * Xtensa Instruction Set Architecture (ISA) Reference Manual
167 * 4.6.5.7 Formats for Probing MMU Option TLB Entries
168 */
169 #define XTENSA_MMU_PDTLB_HIT BIT(4)
170
171 /**
172 * Virtual address where the page table is mapped
173 */
174 #define XTENSA_MMU_PTEVADDR CONFIG_XTENSA_MMU_PTEVADDR
175
176 /**
177 * Find the PTE entry address of a given vaddr.
178 *
179 * For example, assuming PTEVADDR in 0xE0000000,
180 * the page spans from 0xE0000000 - 0xE03FFFFF
181 *
182 * address 0x00 is in 0xE0000000
183 * address 0x1000 is in 0xE0000004
184 * .....
185 * address 0xE0000000 (where the page is) is in 0xE0380000
186 *
187 * Generalizing it, any PTE virtual address can be calculated this way:
188 *
189 * PTE_ENTRY_ADDRESS = PTEVADDR + ((VADDR / 4096) * 4)
190 */
191 #define XTENSA_MMU_PTE_ENTRY_VADDR(base, vaddr) \
192 ((base) + (((vaddr) / KB(4)) * 4))
193
194 /**
195 * Get ASID for a given ring from RASID register.
196 *
197 * RASID contains four 8-bit ASIDs, one per ring.
198 */
199 #define XTENSA_MMU_RASID_ASID_GET(rasid, ring) \
200 (((rasid) >> ((ring) * 8)) & 0xff)
201
202 /**
203 * @brief Set RASID register.
204 *
205 * @param rasid Value to be set.
206 */
xtensa_rasid_set(uint32_t rasid)207 static ALWAYS_INLINE void xtensa_rasid_set(uint32_t rasid)
208 {
209 __asm__ volatile("wsr %0, rasid\n\t"
210 "isync\n" : : "a"(rasid));
211 }
212
213 /**
214 * @brief Get RASID register.
215 *
216 * @return Register value.
217 */
xtensa_rasid_get(void)218 static ALWAYS_INLINE uint32_t xtensa_rasid_get(void)
219 {
220 uint32_t rasid;
221
222 __asm__ volatile("rsr %0, rasid" : "=a"(rasid));
223 return rasid;
224 }
225
226 /**
227 * @brief Set a ring in RASID register to be particular value.
228 *
229 * @param asid ASID to be set.
230 * @param ring ASID of which ring to be manipulated.
231 */
xtensa_rasid_asid_set(uint8_t asid,uint8_t ring)232 static ALWAYS_INLINE void xtensa_rasid_asid_set(uint8_t asid, uint8_t ring)
233 {
234 uint32_t rasid = xtensa_rasid_get();
235
236 rasid = (rasid & ~(0xff << (ring * 8))) | ((uint32_t)asid << (ring * 8));
237
238 xtensa_rasid_set(rasid);
239 }
240
241 /**
242 * @brief Invalidate a particular instruction TLB entry.
243 *
244 * @param entry Entry to be invalidated.
245 */
xtensa_itlb_entry_invalidate(uint32_t entry)246 static ALWAYS_INLINE void xtensa_itlb_entry_invalidate(uint32_t entry)
247 {
248 __asm__ volatile("iitlb %0\n\t"
249 : : "a" (entry));
250 }
251
252 /**
253 * @brief Synchronously invalidate of a particular instruction TLB entry.
254 *
255 * @param entry Entry to be invalidated.
256 */
xtensa_itlb_entry_invalidate_sync(uint32_t entry)257 static ALWAYS_INLINE void xtensa_itlb_entry_invalidate_sync(uint32_t entry)
258 {
259 __asm__ volatile("iitlb %0\n\t"
260 "isync\n\t"
261 : : "a" (entry));
262 }
263
264 /**
265 * @brief Synchronously invalidate of a particular data TLB entry.
266 *
267 * @param entry Entry to be invalidated.
268 */
xtensa_dtlb_entry_invalidate_sync(uint32_t entry)269 static ALWAYS_INLINE void xtensa_dtlb_entry_invalidate_sync(uint32_t entry)
270 {
271 __asm__ volatile("idtlb %0\n\t"
272 "dsync\n\t"
273 : : "a" (entry));
274 }
275
276 /**
277 * @brief Invalidate a particular data TLB entry.
278 *
279 * @param entry Entry to be invalidated.
280 */
xtensa_dtlb_entry_invalidate(uint32_t entry)281 static ALWAYS_INLINE void xtensa_dtlb_entry_invalidate(uint32_t entry)
282 {
283 __asm__ volatile("idtlb %0\n\t"
284 : : "a" (entry));
285 }
286
287 /**
288 * @brief Synchronously write to a particular data TLB entry.
289 *
290 * @param pte Value to be written.
291 * @param entry Entry to be written.
292 */
xtensa_dtlb_entry_write_sync(uint32_t pte,uint32_t entry)293 static ALWAYS_INLINE void xtensa_dtlb_entry_write_sync(uint32_t pte, uint32_t entry)
294 {
295 __asm__ volatile("wdtlb %0, %1\n\t"
296 "dsync\n\t"
297 : : "a" (pte), "a"(entry));
298 }
299
300 /**
301 * @brief Write to a particular data TLB entry.
302 *
303 * @param pte Value to be written.
304 * @param entry Entry to be written.
305 */
xtensa_dtlb_entry_write(uint32_t pte,uint32_t entry)306 static ALWAYS_INLINE void xtensa_dtlb_entry_write(uint32_t pte, uint32_t entry)
307 {
308 __asm__ volatile("wdtlb %0, %1\n\t"
309 : : "a" (pte), "a"(entry));
310 }
311
312 /**
313 * @brief Synchronously write to a particular instruction TLB entry.
314 *
315 * @param pte Value to be written.
316 * @param entry Entry to be written.
317 */
xtensa_itlb_entry_write(uint32_t pte,uint32_t entry)318 static ALWAYS_INLINE void xtensa_itlb_entry_write(uint32_t pte, uint32_t entry)
319 {
320 __asm__ volatile("witlb %0, %1\n\t"
321 : : "a" (pte), "a"(entry));
322 }
323
324 /**
325 * @brief Synchronously write to a particular instruction TLB entry.
326 *
327 * @param pte Value to be written.
328 * @param entry Entry to be written.
329 */
xtensa_itlb_entry_write_sync(uint32_t pte,uint32_t entry)330 static ALWAYS_INLINE void xtensa_itlb_entry_write_sync(uint32_t pte, uint32_t entry)
331 {
332 __asm__ volatile("witlb %0, %1\n\t"
333 "isync\n\t"
334 : : "a" (pte), "a"(entry));
335 }
336
337 /**
338 * @brief Invalidate all autorefill DTLB and ITLB entries.
339 *
340 * This should be used carefully since all refill entries in the data
341 * and instruction TLB. At least two pages, the current code page and
342 * the current stack, will be repopulated by this code as it returns.
343 *
344 * This needs to be called in any circumstance where the mappings for
345 * a previously-used page table change. It does not need to be called
346 * on context switch, where ASID tagging isolates entries for us.
347 */
xtensa_tlb_autorefill_invalidate(void)348 static inline void xtensa_tlb_autorefill_invalidate(void)
349 {
350 uint8_t way, i, entries;
351
352 entries = BIT(MAX(XCHAL_ITLB_ARF_ENTRIES_LOG2,
353 XCHAL_DTLB_ARF_ENTRIES_LOG2));
354
355 for (way = 0; way < XTENSA_MMU_NUM_TLB_AUTOREFILL_WAYS; way++) {
356 for (i = 0; i < entries; i++) {
357 uint32_t entry = way + (i << XTENSA_MMU_PTE_PPN_SHIFT);
358
359 xtensa_dtlb_entry_invalidate(entry);
360 xtensa_itlb_entry_invalidate(entry);
361 }
362 }
363 __asm__ volatile("isync");
364 }
365
366 /**
367 * @brief Set the page tables.
368 *
369 * The page tables is set writing ptevaddr address.
370 *
371 * @param ptables The page tables address (virtual address)
372 */
xtensa_ptevaddr_set(void * ptables)373 static ALWAYS_INLINE void xtensa_ptevaddr_set(void *ptables)
374 {
375 __asm__ volatile("wsr.ptevaddr %0" : : "a"((uint32_t)ptables));
376 }
377
378 /**
379 * @brief Get the current page tables.
380 *
381 * The page tables is obtained by reading ptevaddr address.
382 *
383 * @return ptables The page tables address (virtual address)
384 */
xtensa_ptevaddr_get(void)385 static ALWAYS_INLINE void *xtensa_ptevaddr_get(void)
386 {
387 uint32_t ptables;
388
389 __asm__ volatile("rsr.ptevaddr %0" : "=a" (ptables));
390
391 return (void *)(ptables & XTENSA_MMU_PTEBASE_MASK);
392 }
393
394 /**
395 * @brief Get the virtual address associated with a particular data TLB entry.
396 *
397 * @param entry TLB entry to be queried.
398 */
xtensa_dtlb_vaddr_read(uint32_t entry)399 static ALWAYS_INLINE void *xtensa_dtlb_vaddr_read(uint32_t entry)
400 {
401 uint32_t vaddr;
402
403 __asm__ volatile("rdtlb0 %0, %1\n\t" : "=a" (vaddr) : "a" (entry));
404 return (void *)(vaddr & XTENSA_MMU_PTE_VPN_MASK);
405 }
406
407 /**
408 * @brief Get the physical address associated with a particular data TLB entry.
409 *
410 * @param entry TLB entry to be queried.
411 */
xtensa_dtlb_paddr_read(uint32_t entry)412 static ALWAYS_INLINE uint32_t xtensa_dtlb_paddr_read(uint32_t entry)
413 {
414 uint32_t paddr;
415
416 __asm__ volatile("rdtlb1 %0, %1\n\t" : "=a" (paddr) : "a" (entry));
417 return (paddr & XTENSA_MMU_PTE_PPN_MASK);
418 }
419
420 /**
421 * @brief Get the virtual address associated with a particular instruction TLB entry.
422 *
423 * @param entry TLB entry to be queried.
424 */
xtensa_itlb_vaddr_read(uint32_t entry)425 static ALWAYS_INLINE void *xtensa_itlb_vaddr_read(uint32_t entry)
426 {
427 uint32_t vaddr;
428
429 __asm__ volatile("ritlb0 %0, %1\n\t" : "=a" (vaddr), "+a" (entry));
430 return (void *)(vaddr & XTENSA_MMU_PTE_VPN_MASK);
431 }
432
433 /**
434 * @brief Get the physical address associated with a particular instruction TLB entry.
435 *
436 * @param entry TLB entry to be queried.
437 */
xtensa_itlb_paddr_read(uint32_t entry)438 static ALWAYS_INLINE uint32_t xtensa_itlb_paddr_read(uint32_t entry)
439 {
440 uint32_t paddr;
441
442 __asm__ volatile("ritlb1 %0, %1\n\t" : "=a" (paddr), "+a" (entry));
443 return (paddr & XTENSA_MMU_PTE_PPN_MASK);
444 }
445
446 /**
447 * @brief Probe for instruction TLB entry from a virtual address.
448 *
449 * @param vaddr Virtual address.
450 *
451 * @return Return of the PITLB instruction.
452 */
xtensa_itlb_probe(void * vaddr)453 static ALWAYS_INLINE uint32_t xtensa_itlb_probe(void *vaddr)
454 {
455 uint32_t ret;
456
457 __asm__ __volatile__("pitlb %0, %1\n\t" : "=a" (ret) : "a" ((uint32_t)vaddr));
458 return ret;
459 }
460
461 /**
462 * @brief Probe for data TLB entry from a virtual address.
463 *
464 * @param vaddr Virtual address.
465 *
466 * @return Return of the PDTLB instruction.
467 */
xtensa_dtlb_probe(void * vaddr)468 static ALWAYS_INLINE uint32_t xtensa_dtlb_probe(void *vaddr)
469 {
470 uint32_t ret;
471
472 __asm__ __volatile__("pdtlb %0, %1\n\t" : "=a" (ret) : "a" ((uint32_t)vaddr));
473 return ret;
474 }
475
476 /**
477 * @brief Invalidate an instruction TLB entry associated with a virtual address.
478 *
479 * This invalidated an instruction TLB entry associated with a virtual address
480 * if such TLB entry exists. Otherwise, do nothing.
481 *
482 * @param vaddr Virtual address.
483 */
xtensa_itlb_vaddr_invalidate(void * vaddr)484 static inline void xtensa_itlb_vaddr_invalidate(void *vaddr)
485 {
486 uint32_t entry = xtensa_itlb_probe(vaddr);
487
488 if (entry & XTENSA_MMU_PITLB_HIT) {
489 xtensa_itlb_entry_invalidate_sync(entry);
490 }
491 }
492
493 /**
494 * @brief Invalidate a data TLB entry associated with a virtual address.
495 *
496 * This invalidated a data TLB entry associated with a virtual address
497 * if such TLB entry exists. Otherwise, do nothing.
498 *
499 * @param vaddr Virtual address.
500 */
xtensa_dtlb_vaddr_invalidate(void * vaddr)501 static inline void xtensa_dtlb_vaddr_invalidate(void *vaddr)
502 {
503 uint32_t entry = xtensa_dtlb_probe(vaddr);
504
505 if (entry & XTENSA_MMU_PDTLB_HIT) {
506 xtensa_dtlb_entry_invalidate_sync(entry);
507 }
508 }
509
510 /**
511 * @brief Tell hardware to use a page table very first time after boot.
512 *
513 * @param l1_page Pointer to the page table to be used.
514 */
515 void xtensa_init_paging(uint32_t *l1_page);
516
517 /**
518 * @brief Switch to a new page table.
519 *
520 * @param asid The ASID of the memory domain associated with the incoming page table.
521 * @param l1_page Page table to be switched to.
522 */
523 void xtensa_set_paging(uint32_t asid, uint32_t *l1_page);
524
525 /**
526 * @}
527 */
528
529 #endif /* ZEPHYR_ARCH_XTENSA_XTENSA_MMU_PRIV_H_ */
530