1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
4 * Copyright (C) 2008-2009 PetaLogix
5 * Copyright (C) 2006 Atmark Techno, Inc.
6 */
7
8 #ifndef _ASM_MICROBLAZE_PGTABLE_H
9 #define _ASM_MICROBLAZE_PGTABLE_H
10
11 #include <asm/setup.h>
12
13 #ifndef __ASSEMBLY__
14 extern int mem_init_done;
15 #endif
16
17 #include <asm-generic/pgtable-nopmd.h>
18
19 #ifdef __KERNEL__
20 #ifndef __ASSEMBLY__
21
22 #include <linux/sched.h>
23 #include <linux/threads.h>
24 #include <asm/processor.h> /* For TASK_SIZE */
25 #include <asm/mmu.h>
26 #include <asm/page.h>
27
28 extern unsigned long va_to_phys(unsigned long address);
29 extern pte_t *va_to_pte(unsigned long address);
30
31 /*
32 * The following only work if pte_present() is true.
33 * Undefined behaviour if not..
34 */
35
36 /* Start and end of the vmalloc area. */
37 /* Make sure to map the vmalloc area above the pinned kernel memory area
38 of 32Mb. */
39 #define VMALLOC_START (CONFIG_KERNEL_START + CONFIG_LOWMEM_SIZE)
40 #define VMALLOC_END ioremap_bot
41
42 #endif /* __ASSEMBLY__ */
43
44 /*
45 * Macro to mark a page protection value as "uncacheable".
46 */
47
48 #define _PAGE_CACHE_CTL (_PAGE_GUARDED | _PAGE_NO_CACHE | \
49 _PAGE_WRITETHRU)
50
51 #define pgprot_noncached(prot) \
52 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
53 _PAGE_NO_CACHE | _PAGE_GUARDED))
54
55 #define pgprot_noncached_wc(prot) \
56 (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
57 _PAGE_NO_CACHE))
58
59 /*
60 * The MicroBlaze MMU is identical to the PPC-40x MMU, and uses a hash
61 * table containing PTEs, together with a set of 16 segment registers, to
62 * define the virtual to physical address mapping.
63 *
64 * We use the hash table as an extended TLB, i.e. a cache of currently
65 * active mappings. We maintain a two-level page table tree, much
66 * like that used by the i386, for the sake of the Linux memory
67 * management code. Low-level assembler code in hashtable.S
68 * (procedure hash_page) is responsible for extracting ptes from the
69 * tree and putting them into the hash table when necessary, and
70 * updating the accessed and modified bits in the page table tree.
71 */
72
73 /*
74 * The MicroBlaze processor has a TLB architecture identical to PPC-40x. The
75 * instruction and data sides share a unified, 64-entry, semi-associative
76 * TLB which is maintained totally under software control. In addition, the
77 * instruction side has a hardware-managed, 2,4, or 8-entry, fully-associative
78 * TLB which serves as a first level to the shared TLB. These two TLBs are
79 * known as the UTLB and ITLB, respectively (see "mmu.h" for definitions).
80 */
81
82 /*
83 * The normal case is that PTEs are 32-bits and we have a 1-page
84 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
85 *
86 */
87
88 /* PGDIR_SHIFT determines what a top-level page table entry can map */
89 #define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
90 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
91 #define PGDIR_MASK (~(PGDIR_SIZE-1))
92
93 /*
94 * entries per page directory level: our page-table tree is two-level, so
95 * we don't really have any PMD directory.
96 */
97 #define PTRS_PER_PTE (1 << PTE_SHIFT)
98 #define PTRS_PER_PMD 1
99 #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
100
101 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
102 #define FIRST_USER_PGD_NR 0
103
104 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
105 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
106
107 #define pte_ERROR(e) \
108 printk(KERN_ERR "%s:%d: bad pte "PTE_FMT".\n", \
109 __FILE__, __LINE__, pte_val(e))
110 #define pgd_ERROR(e) \
111 printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
112 __FILE__, __LINE__, pgd_val(e))
113
114 /*
115 * Bits in a linux-style PTE. These match the bits in the
116 * (hardware-defined) PTE as closely as possible.
117 */
118
119 /* There are several potential gotchas here. The hardware TLBLO
120 * field looks like this:
121 *
122 * 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31
123 * RPN..................... 0 0 EX WR ZSEL....... W I M G
124 *
125 * Where possible we make the Linux PTE bits match up with this
126 *
127 * - bits 20 and 21 must be cleared, because we use 4k pages (4xx can
128 * support down to 1k pages), this is done in the TLBMiss exception
129 * handler.
130 * - We use only zones 0 (for kernel pages) and 1 (for user pages)
131 * of the 16 available. Bit 24-26 of the TLB are cleared in the TLB
132 * miss handler. Bit 27 is PAGE_USER, thus selecting the correct
133 * zone.
134 * - PRESENT *must* be in the bottom two bits because swap cache
135 * entries use the top 30 bits. Because 4xx doesn't support SMP
136 * anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30
137 * is cleared in the TLB miss handler before the TLB entry is loaded.
138 * - All other bits of the PTE are loaded into TLBLO without
139 * * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
140 * software PTE bits. We actually use bits 21, 24, 25, and
141 * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and
142 * PRESENT.
143 */
144
145 /* Definitions for MicroBlaze. */
146 #define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */
147 #define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */
148 #define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */
149 #define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
150 #define _PAGE_USER 0x010 /* matches one of the zone permission bits */
151 #define _PAGE_RW 0x040 /* software: Writes permitted */
152 #define _PAGE_DIRTY 0x080 /* software: dirty page */
153 #define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
154 #define _PAGE_HWEXEC 0x200 /* hardware: EX permission */
155 #define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
156 #define _PMD_PRESENT PAGE_MASK
157
158 /*
159 * Some bits are unused...
160 */
161 #ifndef _PAGE_HASHPTE
162 #define _PAGE_HASHPTE 0
163 #endif
164 #ifndef _PTE_NONE_MASK
165 #define _PTE_NONE_MASK 0
166 #endif
167 #ifndef _PAGE_SHARED
168 #define _PAGE_SHARED 0
169 #endif
170 #ifndef _PAGE_EXEC
171 #define _PAGE_EXEC 0
172 #endif
173
174 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
175
176 /*
177 * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
178 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
179 * to have it in the Linux PTE, and in fact the bit could be reused for
180 * another purpose. -- paulus.
181 */
182 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
183 #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
184
185 #define _PAGE_KERNEL \
186 (_PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC)
187
188 #define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
189
190 #define PAGE_NONE __pgprot(_PAGE_BASE)
191 #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
192 #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
193 #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
194 #define PAGE_SHARED_X \
195 __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
196 #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
197 #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
198
199 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
200 #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_SHARED)
201 #define PAGE_KERNEL_CI __pgprot(_PAGE_IO)
202
203 /*
204 * We consider execute permission the same as read.
205 * Also, write permissions imply read permissions.
206 */
207
208 #ifndef __ASSEMBLY__
209 /*
210 * ZERO_PAGE is a global shared page that is always zero: used
211 * for zero-mapped memory areas etc..
212 */
213 extern unsigned long empty_zero_page[1024];
214 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
215
216 #endif /* __ASSEMBLY__ */
217
218 #define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
219 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
220 #define pte_clear(mm, addr, ptep) \
221 do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
222
223 #define pmd_none(pmd) (!pmd_val(pmd))
224 #define pmd_bad(pmd) ((pmd_val(pmd) & _PMD_PRESENT) == 0)
225 #define pmd_present(pmd) ((pmd_val(pmd) & _PMD_PRESENT) != 0)
226 #define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
227
228 #define pte_page(x) (mem_map + (unsigned long) \
229 ((pte_val(x) - memory_start) >> PAGE_SHIFT))
230 #define PFN_SHIFT_OFFSET (PAGE_SHIFT)
231
232 #define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET)
233
234 #define pfn_pte(pfn, prot) \
235 __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) | pgprot_val(prot))
236
237 #ifndef __ASSEMBLY__
238 /*
239 * The following only work if pte_present() is true.
240 * Undefined behaviour if not..
241 */
pte_read(pte_t pte)242 static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; }
pte_write(pte_t pte)243 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
pte_exec(pte_t pte)244 static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
pte_dirty(pte_t pte)245 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
pte_young(pte_t pte)246 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
247
pte_uncache(pte_t pte)248 static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
pte_cache(pte_t pte)249 static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
250
pte_rdprotect(pte_t pte)251 static inline pte_t pte_rdprotect(pte_t pte) \
252 { pte_val(pte) &= ~_PAGE_USER; return pte; }
pte_wrprotect(pte_t pte)253 static inline pte_t pte_wrprotect(pte_t pte) \
254 { pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
pte_exprotect(pte_t pte)255 static inline pte_t pte_exprotect(pte_t pte) \
256 { pte_val(pte) &= ~_PAGE_EXEC; return pte; }
pte_mkclean(pte_t pte)257 static inline pte_t pte_mkclean(pte_t pte) \
258 { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
pte_mkold(pte_t pte)259 static inline pte_t pte_mkold(pte_t pte) \
260 { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
261
pte_mkread(pte_t pte)262 static inline pte_t pte_mkread(pte_t pte) \
263 { pte_val(pte) |= _PAGE_USER; return pte; }
pte_mkexec(pte_t pte)264 static inline pte_t pte_mkexec(pte_t pte) \
265 { pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
pte_mkwrite(pte_t pte)266 static inline pte_t pte_mkwrite(pte_t pte) \
267 { pte_val(pte) |= _PAGE_RW; return pte; }
pte_mkdirty(pte_t pte)268 static inline pte_t pte_mkdirty(pte_t pte) \
269 { pte_val(pte) |= _PAGE_DIRTY; return pte; }
pte_mkyoung(pte_t pte)270 static inline pte_t pte_mkyoung(pte_t pte) \
271 { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
272
273 /*
274 * Conversion functions: convert a page and protection to a page entry,
275 * and a page entry and page directory to the page they refer to.
276 */
277
mk_pte_phys(phys_addr_t physpage,pgprot_t pgprot)278 static inline pte_t mk_pte_phys(phys_addr_t physpage, pgprot_t pgprot)
279 {
280 pte_t pte;
281 pte_val(pte) = physpage | pgprot_val(pgprot);
282 return pte;
283 }
284
285 #define mk_pte(page, pgprot) \
286 ({ \
287 pte_t pte; \
288 pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + memory_start) | \
289 pgprot_val(pgprot); \
290 pte; \
291 })
292
pte_modify(pte_t pte,pgprot_t newprot)293 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
294 {
295 pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
296 return pte;
297 }
298
299 /*
300 * Atomic PTE updates.
301 *
302 * pte_update clears and sets bit atomically, and returns
303 * the old pte value.
304 * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant
305 * 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits.
306 */
pte_update(pte_t * p,unsigned long clr,unsigned long set)307 static inline unsigned long pte_update(pte_t *p, unsigned long clr,
308 unsigned long set)
309 {
310 unsigned long flags, old, tmp;
311
312 raw_local_irq_save(flags);
313
314 __asm__ __volatile__( "lw %0, %2, r0 \n"
315 "andn %1, %0, %3 \n"
316 "or %1, %1, %4 \n"
317 "sw %1, %2, r0 \n"
318 : "=&r" (old), "=&r" (tmp)
319 : "r" ((unsigned long)(p + 1) - 4), "r" (clr), "r" (set)
320 : "cc");
321
322 raw_local_irq_restore(flags);
323
324 return old;
325 }
326
327 /*
328 * set_pte stores a linux PTE into the linux page table.
329 */
set_pte(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)330 static inline void set_pte(struct mm_struct *mm, unsigned long addr,
331 pte_t *ptep, pte_t pte)
332 {
333 *ptep = pte;
334 }
335
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte)336 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
337 pte_t *ptep, pte_t pte)
338 {
339 *ptep = pte;
340 }
341
342 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)343 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
344 unsigned long address, pte_t *ptep)
345 {
346 return (pte_update(ptep, _PAGE_ACCESSED, 0) & _PAGE_ACCESSED) != 0;
347 }
348
ptep_test_and_clear_dirty(struct mm_struct * mm,unsigned long addr,pte_t * ptep)349 static inline int ptep_test_and_clear_dirty(struct mm_struct *mm,
350 unsigned long addr, pte_t *ptep)
351 {
352 return (pte_update(ptep, \
353 (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
354 }
355
356 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)357 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
358 unsigned long addr, pte_t *ptep)
359 {
360 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
361 }
362
363 /*static inline void ptep_set_wrprotect(struct mm_struct *mm,
364 unsigned long addr, pte_t *ptep)
365 {
366 pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
367 }*/
368
ptep_mkdirty(struct mm_struct * mm,unsigned long addr,pte_t * ptep)369 static inline void ptep_mkdirty(struct mm_struct *mm,
370 unsigned long addr, pte_t *ptep)
371 {
372 pte_update(ptep, 0, _PAGE_DIRTY);
373 }
374
375 /*#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)*/
376
377 /* Convert pmd entry to page */
378 /* our pmd entry is an effective address of pte table*/
379 /* returns effective address of the pmd entry*/
pmd_page_vaddr(pmd_t pmd)380 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
381 {
382 return ((unsigned long) (pmd_val(pmd) & PAGE_MASK));
383 }
384
385 /* returns pfn of the pmd entry*/
386 #define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT)
387
388 /* returns struct *page of the pmd entry*/
389 #define pmd_page(pmd) (pfn_to_page(__pa(pmd_val(pmd)) >> PAGE_SHIFT))
390
391 /* Find an entry in the third-level page table.. */
392
393 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
394
395 /*
396 * Encode and decode a swap entry.
397 * Note that the bits we use in a PTE for representing a swap entry
398 * must not include the _PAGE_PRESENT bit, or the _PAGE_HASHPTE bit
399 * (if used). -- paulus
400 */
401 #define __swp_type(entry) ((entry).val & 0x3f)
402 #define __swp_offset(entry) ((entry).val >> 6)
403 #define __swp_entry(type, offset) \
404 ((swp_entry_t) { (type) | ((offset) << 6) })
405 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 2 })
406 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 2 })
407
408 extern unsigned long iopa(unsigned long addr);
409
410 /* Values for nocacheflag and cmode */
411 /* These are not used by the APUS kernel_map, but prevents
412 * compilation errors.
413 */
414 #define IOMAP_FULL_CACHING 0
415 #define IOMAP_NOCACHE_SER 1
416 #define IOMAP_NOCACHE_NONSER 2
417 #define IOMAP_NO_COPYBACK 3
418
419 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
420 #define kern_addr_valid(addr) (1)
421
422 void do_page_fault(struct pt_regs *regs, unsigned long address,
423 unsigned long error_code);
424
425 void mapin_ram(void);
426 int map_page(unsigned long va, phys_addr_t pa, int flags);
427
428 extern int mem_init_done;
429
430 asmlinkage void __init mmu_init(void);
431
432 #endif /* __ASSEMBLY__ */
433 #endif /* __KERNEL__ */
434
435 #ifndef __ASSEMBLY__
436 extern unsigned long ioremap_bot, ioremap_base;
437
438 void setup_memory(void);
439 #endif /* __ASSEMBLY__ */
440
441 #endif /* _ASM_MICROBLAZE_PGTABLE_H */
442