1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * pgtable.h: SpitFire page table operations.
4 *
5 * Copyright 1996,1997 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9 #ifndef _SPARC64_PGTABLE_H
10 #define _SPARC64_PGTABLE_H
11
12 /* This file contains the functions and defines necessary to modify and use
13 * the SpitFire page tables.
14 */
15
16 #include <asm-generic/pgtable-nop4d.h>
17 #include <linux/compiler.h>
18 #include <linux/const.h>
19 #include <asm/types.h>
20 #include <asm/spitfire.h>
21 #include <asm/asi.h>
22 #include <asm/adi.h>
23 #include <asm/page.h>
24 #include <asm/processor.h>
25
26 /* The kernel image occupies 0x4000000 to 0x6000000 (4MB --> 96MB).
27 * The page copy blockops can use 0x6000000 to 0x8000000.
28 * The 8K TSB is mapped in the 0x8000000 to 0x8400000 range.
29 * The 4M TSB is mapped in the 0x8400000 to 0x8800000 range.
30 * The PROM resides in an area spanning 0xf0000000 to 0x100000000.
31 * The vmalloc area spans 0x100000000 to 0x200000000.
32 * Since modules need to be in the lowest 32-bits of the address space,
33 * we place them right before the OBP area from 0x10000000 to 0xf0000000.
34 * There is a single static kernel PMD which maps from 0x0 to address
35 * 0x400000000.
36 */
37 #define TLBTEMP_BASE _AC(0x0000000006000000,UL)
38 #define TSBMAP_8K_BASE _AC(0x0000000008000000,UL)
39 #define TSBMAP_4M_BASE _AC(0x0000000008400000,UL)
40 #define MODULES_VADDR _AC(0x0000000010000000,UL)
41 #define MODULES_LEN _AC(0x00000000e0000000,UL)
42 #define MODULES_END _AC(0x00000000f0000000,UL)
43 #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL)
44 #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
45 #define VMALLOC_START _AC(0x0000000100000000,UL)
46 #define VMEMMAP_BASE VMALLOC_END
47
48 /* PMD_SHIFT determines the size of the area a second-level page
49 * table can map
50 */
51 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
52 #define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
53 #define PMD_MASK (~(PMD_SIZE-1))
54 #define PMD_BITS (PAGE_SHIFT - 3)
55
56 /* PUD_SHIFT determines the size of the area a third-level page
57 * table can map
58 */
59 #define PUD_SHIFT (PMD_SHIFT + PMD_BITS)
60 #define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
61 #define PUD_MASK (~(PUD_SIZE-1))
62 #define PUD_BITS (PAGE_SHIFT - 3)
63
64 /* PGDIR_SHIFT determines what a fourth-level page table entry can map */
65 #define PGDIR_SHIFT (PUD_SHIFT + PUD_BITS)
66 #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
67 #define PGDIR_MASK (~(PGDIR_SIZE-1))
68 #define PGDIR_BITS (PAGE_SHIFT - 3)
69
70 #if (MAX_PHYS_ADDRESS_BITS > PGDIR_SHIFT + PGDIR_BITS)
71 #error MAX_PHYS_ADDRESS_BITS exceeds what kernel page tables can support
72 #endif
73
74 #if (PGDIR_SHIFT + PGDIR_BITS) != 53
75 #error Page table parameters do not cover virtual address space properly.
76 #endif
77
78 #if (PMD_SHIFT != HPAGE_SHIFT)
79 #error PMD_SHIFT must equal HPAGE_SHIFT for transparent huge pages.
80 #endif
81
82 #ifndef __ASSEMBLY__
83
84 extern unsigned long VMALLOC_END;
85
86 #define vmemmap ((struct page *)VMEMMAP_BASE)
87
88 #include <linux/sched.h>
89
90 bool kern_addr_valid(unsigned long addr);
91
92 /* Entries per page directory level. */
93 #define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
94 #define PTRS_PER_PMD (1UL << PMD_BITS)
95 #define PTRS_PER_PUD (1UL << PUD_BITS)
96 #define PTRS_PER_PGD (1UL << PGDIR_BITS)
97
98 /* Kernel has a separate 44bit address space. */
99 #define FIRST_USER_ADDRESS 0UL
100
101 #define pmd_ERROR(e) \
102 pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
103 __FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
104 #define pud_ERROR(e) \
105 pr_err("%s:%d: bad pud %p(%016lx) seen at (%pS)\n", \
106 __FILE__, __LINE__, &(e), pud_val(e), __builtin_return_address(0))
107 #define pgd_ERROR(e) \
108 pr_err("%s:%d: bad pgd %p(%016lx) seen at (%pS)\n", \
109 __FILE__, __LINE__, &(e), pgd_val(e), __builtin_return_address(0))
110
111 #endif /* !(__ASSEMBLY__) */
112
113 /* PTE bits which are the same in SUN4U and SUN4V format. */
114 #define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */
115 #define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit uptodate*/
116 #define _PAGE_SPECIAL _AC(0x0200000000000000,UL) /* Special page */
117 #define _PAGE_PMD_HUGE _AC(0x0100000000000000,UL) /* Huge page */
118 #define _PAGE_PUD_HUGE _PAGE_PMD_HUGE
119
120 /* SUN4U pte bits... */
121 #define _PAGE_SZ4MB_4U _AC(0x6000000000000000,UL) /* 4MB Page */
122 #define _PAGE_SZ512K_4U _AC(0x4000000000000000,UL) /* 512K Page */
123 #define _PAGE_SZ64K_4U _AC(0x2000000000000000,UL) /* 64K Page */
124 #define _PAGE_SZ8K_4U _AC(0x0000000000000000,UL) /* 8K Page */
125 #define _PAGE_NFO_4U _AC(0x1000000000000000,UL) /* No Fault Only */
126 #define _PAGE_IE_4U _AC(0x0800000000000000,UL) /* Invert Endianness */
127 #define _PAGE_SOFT2_4U _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
128 #define _PAGE_SPECIAL_4U _AC(0x0200000000000000,UL) /* Special page */
129 #define _PAGE_PMD_HUGE_4U _AC(0x0100000000000000,UL) /* Huge page */
130 #define _PAGE_RES1_4U _AC(0x0002000000000000,UL) /* Reserved */
131 #define _PAGE_SZ32MB_4U _AC(0x0001000000000000,UL) /* (Panther) 32MB page */
132 #define _PAGE_SZ256MB_4U _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
133 #define _PAGE_SZALL_4U _AC(0x6001000000000000,UL) /* All pgsz bits */
134 #define _PAGE_SN_4U _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */
135 #define _PAGE_RES2_4U _AC(0x0000780000000000,UL) /* Reserved */
136 #define _PAGE_PADDR_4U _AC(0x000007FFFFFFE000,UL) /* (Cheetah) pa[42:13] */
137 #define _PAGE_SOFT_4U _AC(0x0000000000001F80,UL) /* Software bits: */
138 #define _PAGE_EXEC_4U _AC(0x0000000000001000,UL) /* Executable SW bit */
139 #define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty) */
140 #define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd) */
141 #define _PAGE_READ_4U _AC(0x0000000000000200,UL) /* Readable SW Bit */
142 #define _PAGE_WRITE_4U _AC(0x0000000000000100,UL) /* Writable SW Bit */
143 #define _PAGE_PRESENT_4U _AC(0x0000000000000080,UL) /* Present */
144 #define _PAGE_L_4U _AC(0x0000000000000040,UL) /* Locked TTE */
145 #define _PAGE_CP_4U _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
146 #define _PAGE_CV_4U _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
147 #define _PAGE_E_4U _AC(0x0000000000000008,UL) /* side-Effect */
148 #define _PAGE_P_4U _AC(0x0000000000000004,UL) /* Privileged Page */
149 #define _PAGE_W_4U _AC(0x0000000000000002,UL) /* Writable */
150
151 /* SUN4V pte bits... */
152 #define _PAGE_NFO_4V _AC(0x4000000000000000,UL) /* No Fault Only */
153 #define _PAGE_SOFT2_4V _AC(0x3F00000000000000,UL) /* Software bits, set 2 */
154 #define _PAGE_MODIFIED_4V _AC(0x2000000000000000,UL) /* Modified (dirty) */
155 #define _PAGE_ACCESSED_4V _AC(0x1000000000000000,UL) /* Accessed (ref'd) */
156 #define _PAGE_READ_4V _AC(0x0800000000000000,UL) /* Readable SW Bit */
157 #define _PAGE_WRITE_4V _AC(0x0400000000000000,UL) /* Writable SW Bit */
158 #define _PAGE_SPECIAL_4V _AC(0x0200000000000000,UL) /* Special page */
159 #define _PAGE_PMD_HUGE_4V _AC(0x0100000000000000,UL) /* Huge page */
160 #define _PAGE_PADDR_4V _AC(0x00FFFFFFFFFFE000,UL) /* paddr[55:13] */
161 #define _PAGE_IE_4V _AC(0x0000000000001000,UL) /* Invert Endianness */
162 #define _PAGE_E_4V _AC(0x0000000000000800,UL) /* side-Effect */
163 #define _PAGE_CP_4V _AC(0x0000000000000400,UL) /* Cacheable in P-Cache */
164 #define _PAGE_CV_4V _AC(0x0000000000000200,UL) /* Cacheable in V-Cache */
165 /* Bit 9 is used to enable MCD corruption detection instead on M7 */
166 #define _PAGE_MCD_4V _AC(0x0000000000000200,UL) /* Memory Corruption */
167 #define _PAGE_P_4V _AC(0x0000000000000100,UL) /* Privileged Page */
168 #define _PAGE_EXEC_4V _AC(0x0000000000000080,UL) /* Executable Page */
169 #define _PAGE_W_4V _AC(0x0000000000000040,UL) /* Writable */
170 #define _PAGE_SOFT_4V _AC(0x0000000000000030,UL) /* Software bits */
171 #define _PAGE_PRESENT_4V _AC(0x0000000000000010,UL) /* Present */
172 #define _PAGE_RESV_4V _AC(0x0000000000000008,UL) /* Reserved */
173 #define _PAGE_SZ16GB_4V _AC(0x0000000000000007,UL) /* 16GB Page */
174 #define _PAGE_SZ2GB_4V _AC(0x0000000000000006,UL) /* 2GB Page */
175 #define _PAGE_SZ256MB_4V _AC(0x0000000000000005,UL) /* 256MB Page */
176 #define _PAGE_SZ32MB_4V _AC(0x0000000000000004,UL) /* 32MB Page */
177 #define _PAGE_SZ4MB_4V _AC(0x0000000000000003,UL) /* 4MB Page */
178 #define _PAGE_SZ512K_4V _AC(0x0000000000000002,UL) /* 512K Page */
179 #define _PAGE_SZ64K_4V _AC(0x0000000000000001,UL) /* 64K Page */
180 #define _PAGE_SZ8K_4V _AC(0x0000000000000000,UL) /* 8K Page */
181 #define _PAGE_SZALL_4V _AC(0x0000000000000007,UL) /* All pgsz bits */
182
183 #define _PAGE_SZBITS_4U _PAGE_SZ8K_4U
184 #define _PAGE_SZBITS_4V _PAGE_SZ8K_4V
185
186 #if REAL_HPAGE_SHIFT != 22
187 #error REAL_HPAGE_SHIFT and _PAGE_SZHUGE_foo must match up
188 #endif
189
190 #define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
191 #define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
192
193 /* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
194 #define __P000 __pgprot(0)
195 #define __P001 __pgprot(0)
196 #define __P010 __pgprot(0)
197 #define __P011 __pgprot(0)
198 #define __P100 __pgprot(0)
199 #define __P101 __pgprot(0)
200 #define __P110 __pgprot(0)
201 #define __P111 __pgprot(0)
202
203 #define __S000 __pgprot(0)
204 #define __S001 __pgprot(0)
205 #define __S010 __pgprot(0)
206 #define __S011 __pgprot(0)
207 #define __S100 __pgprot(0)
208 #define __S101 __pgprot(0)
209 #define __S110 __pgprot(0)
210 #define __S111 __pgprot(0)
211
212 #ifndef __ASSEMBLY__
213
214 pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
215
216 unsigned long pte_sz_bits(unsigned long size);
217
218 extern pgprot_t PAGE_KERNEL;
219 extern pgprot_t PAGE_KERNEL_LOCKED;
220 extern pgprot_t PAGE_COPY;
221 extern pgprot_t PAGE_SHARED;
222
223 /* XXX This ugliness is for the atyfb driver's sparc mmap() support. XXX */
224 extern unsigned long _PAGE_IE;
225 extern unsigned long _PAGE_E;
226 extern unsigned long _PAGE_CACHE;
227
228 extern unsigned long pg_iobits;
229 extern unsigned long _PAGE_ALL_SZ_BITS;
230
231 extern struct page *mem_map_zero;
232 #define ZERO_PAGE(vaddr) (mem_map_zero)
233
234 /* PFNs are real physical page numbers. However, mem_map only begins to record
235 * per-page information starting at pfn_base. This is to handle systems where
236 * the first physical page in the machine is at some huge physical address,
237 * such as 4GB. This is common on a partitioned E10000, for example.
238 */
pfn_pte(unsigned long pfn,pgprot_t prot)239 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
240 {
241 unsigned long paddr = pfn << PAGE_SHIFT;
242
243 BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
244 return __pte(paddr | pgprot_val(prot));
245 }
246 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
247
248 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pfn_pmd(unsigned long page_nr,pgprot_t pgprot)249 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
250 {
251 pte_t pte = pfn_pte(page_nr, pgprot);
252
253 return __pmd(pte_val(pte));
254 }
255 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
256 #endif
257
258 /* This one can be done with two shifts. */
pte_pfn(pte_t pte)259 static inline unsigned long pte_pfn(pte_t pte)
260 {
261 unsigned long ret;
262
263 __asm__ __volatile__(
264 "\n661: sllx %1, %2, %0\n"
265 " srlx %0, %3, %0\n"
266 " .section .sun4v_2insn_patch, \"ax\"\n"
267 " .word 661b\n"
268 " sllx %1, %4, %0\n"
269 " srlx %0, %5, %0\n"
270 " .previous\n"
271 : "=r" (ret)
272 : "r" (pte_val(pte)),
273 "i" (21), "i" (21 + PAGE_SHIFT),
274 "i" (8), "i" (8 + PAGE_SHIFT));
275
276 return ret;
277 }
278 #define pte_page(x) pfn_to_page(pte_pfn(x))
279
pte_modify(pte_t pte,pgprot_t prot)280 static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
281 {
282 unsigned long mask, tmp;
283
284 /* SUN4U: 0x630107ffffffec38 (negated == 0x9cfef800000013c7)
285 * SUN4V: 0x33ffffffffffee07 (negated == 0xcc000000000011f8)
286 *
287 * Even if we use negation tricks the result is still a 6
288 * instruction sequence, so don't try to play fancy and just
289 * do the most straightforward implementation.
290 *
291 * Note: We encode this into 3 sun4v 2-insn patch sequences.
292 */
293
294 BUILD_BUG_ON(_PAGE_SZBITS_4U != 0UL || _PAGE_SZBITS_4V != 0UL);
295 __asm__ __volatile__(
296 "\n661: sethi %%uhi(%2), %1\n"
297 " sethi %%hi(%2), %0\n"
298 "\n662: or %1, %%ulo(%2), %1\n"
299 " or %0, %%lo(%2), %0\n"
300 "\n663: sllx %1, 32, %1\n"
301 " or %0, %1, %0\n"
302 " .section .sun4v_2insn_patch, \"ax\"\n"
303 " .word 661b\n"
304 " sethi %%uhi(%3), %1\n"
305 " sethi %%hi(%3), %0\n"
306 " .word 662b\n"
307 " or %1, %%ulo(%3), %1\n"
308 " or %0, %%lo(%3), %0\n"
309 " .word 663b\n"
310 " sllx %1, 32, %1\n"
311 " or %0, %1, %0\n"
312 " .previous\n"
313 " .section .sun_m7_2insn_patch, \"ax\"\n"
314 " .word 661b\n"
315 " sethi %%uhi(%4), %1\n"
316 " sethi %%hi(%4), %0\n"
317 " .word 662b\n"
318 " or %1, %%ulo(%4), %1\n"
319 " or %0, %%lo(%4), %0\n"
320 " .word 663b\n"
321 " sllx %1, 32, %1\n"
322 " or %0, %1, %0\n"
323 " .previous\n"
324 : "=r" (mask), "=r" (tmp)
325 : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
326 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
327 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
328 "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
329 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
330 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V),
331 "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
332 _PAGE_CP_4V | _PAGE_E_4V |
333 _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
334
335 return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
336 }
337
338 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_modify(pmd_t pmd,pgprot_t newprot)339 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
340 {
341 pte_t pte = __pte(pmd_val(pmd));
342
343 pte = pte_modify(pte, newprot);
344
345 return __pmd(pte_val(pte));
346 }
347 #endif
348
pgprot_noncached(pgprot_t prot)349 static inline pgprot_t pgprot_noncached(pgprot_t prot)
350 {
351 unsigned long val = pgprot_val(prot);
352
353 __asm__ __volatile__(
354 "\n661: andn %0, %2, %0\n"
355 " or %0, %3, %0\n"
356 " .section .sun4v_2insn_patch, \"ax\"\n"
357 " .word 661b\n"
358 " andn %0, %4, %0\n"
359 " or %0, %5, %0\n"
360 " .previous\n"
361 " .section .sun_m7_2insn_patch, \"ax\"\n"
362 " .word 661b\n"
363 " andn %0, %6, %0\n"
364 " or %0, %5, %0\n"
365 " .previous\n"
366 : "=r" (val)
367 : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
368 "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V),
369 "i" (_PAGE_CP_4V));
370
371 return __pgprot(val);
372 }
373 /* Various pieces of code check for platform support by ifdef testing
374 * on "pgprot_noncached". That's broken and should be fixed, but for
375 * now...
376 */
377 #define pgprot_noncached pgprot_noncached
378
379 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
380 extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
381 struct page *page, int writable);
382 #define arch_make_huge_pte arch_make_huge_pte
__pte_default_huge_mask(void)383 static inline unsigned long __pte_default_huge_mask(void)
384 {
385 unsigned long mask;
386
387 __asm__ __volatile__(
388 "\n661: sethi %%uhi(%1), %0\n"
389 " sllx %0, 32, %0\n"
390 " .section .sun4v_2insn_patch, \"ax\"\n"
391 " .word 661b\n"
392 " mov %2, %0\n"
393 " nop\n"
394 " .previous\n"
395 : "=r" (mask)
396 : "i" (_PAGE_SZHUGE_4U), "i" (_PAGE_SZHUGE_4V));
397
398 return mask;
399 }
400
pte_mkhuge(pte_t pte)401 static inline pte_t pte_mkhuge(pte_t pte)
402 {
403 return __pte(pte_val(pte) | __pte_default_huge_mask());
404 }
405
is_default_hugetlb_pte(pte_t pte)406 static inline bool is_default_hugetlb_pte(pte_t pte)
407 {
408 unsigned long mask = __pte_default_huge_mask();
409
410 return (pte_val(pte) & mask) == mask;
411 }
412
is_hugetlb_pmd(pmd_t pmd)413 static inline bool is_hugetlb_pmd(pmd_t pmd)
414 {
415 return !!(pmd_val(pmd) & _PAGE_PMD_HUGE);
416 }
417
is_hugetlb_pud(pud_t pud)418 static inline bool is_hugetlb_pud(pud_t pud)
419 {
420 return !!(pud_val(pud) & _PAGE_PUD_HUGE);
421 }
422
423 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_mkhuge(pmd_t pmd)424 static inline pmd_t pmd_mkhuge(pmd_t pmd)
425 {
426 pte_t pte = __pte(pmd_val(pmd));
427
428 pte = pte_mkhuge(pte);
429 pte_val(pte) |= _PAGE_PMD_HUGE;
430
431 return __pmd(pte_val(pte));
432 }
433 #endif
434 #else
is_hugetlb_pte(pte_t pte)435 static inline bool is_hugetlb_pte(pte_t pte)
436 {
437 return false;
438 }
439 #endif
440
pte_mkdirty(pte_t pte)441 static inline pte_t pte_mkdirty(pte_t pte)
442 {
443 unsigned long val = pte_val(pte), tmp;
444
445 __asm__ __volatile__(
446 "\n661: or %0, %3, %0\n"
447 " nop\n"
448 "\n662: nop\n"
449 " nop\n"
450 " .section .sun4v_2insn_patch, \"ax\"\n"
451 " .word 661b\n"
452 " sethi %%uhi(%4), %1\n"
453 " sllx %1, 32, %1\n"
454 " .word 662b\n"
455 " or %1, %%lo(%4), %1\n"
456 " or %0, %1, %0\n"
457 " .previous\n"
458 : "=r" (val), "=r" (tmp)
459 : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
460 "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
461
462 return __pte(val);
463 }
464
pte_mkclean(pte_t pte)465 static inline pte_t pte_mkclean(pte_t pte)
466 {
467 unsigned long val = pte_val(pte), tmp;
468
469 __asm__ __volatile__(
470 "\n661: andn %0, %3, %0\n"
471 " nop\n"
472 "\n662: nop\n"
473 " nop\n"
474 " .section .sun4v_2insn_patch, \"ax\"\n"
475 " .word 661b\n"
476 " sethi %%uhi(%4), %1\n"
477 " sllx %1, 32, %1\n"
478 " .word 662b\n"
479 " or %1, %%lo(%4), %1\n"
480 " andn %0, %1, %0\n"
481 " .previous\n"
482 : "=r" (val), "=r" (tmp)
483 : "0" (val), "i" (_PAGE_MODIFIED_4U | _PAGE_W_4U),
484 "i" (_PAGE_MODIFIED_4V | _PAGE_W_4V));
485
486 return __pte(val);
487 }
488
pte_mkwrite(pte_t pte)489 static inline pte_t pte_mkwrite(pte_t pte)
490 {
491 unsigned long val = pte_val(pte), mask;
492
493 __asm__ __volatile__(
494 "\n661: mov %1, %0\n"
495 " nop\n"
496 " .section .sun4v_2insn_patch, \"ax\"\n"
497 " .word 661b\n"
498 " sethi %%uhi(%2), %0\n"
499 " sllx %0, 32, %0\n"
500 " .previous\n"
501 : "=r" (mask)
502 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
503
504 return __pte(val | mask);
505 }
506
pte_wrprotect(pte_t pte)507 static inline pte_t pte_wrprotect(pte_t pte)
508 {
509 unsigned long val = pte_val(pte), tmp;
510
511 __asm__ __volatile__(
512 "\n661: andn %0, %3, %0\n"
513 " nop\n"
514 "\n662: nop\n"
515 " nop\n"
516 " .section .sun4v_2insn_patch, \"ax\"\n"
517 " .word 661b\n"
518 " sethi %%uhi(%4), %1\n"
519 " sllx %1, 32, %1\n"
520 " .word 662b\n"
521 " or %1, %%lo(%4), %1\n"
522 " andn %0, %1, %0\n"
523 " .previous\n"
524 : "=r" (val), "=r" (tmp)
525 : "0" (val), "i" (_PAGE_WRITE_4U | _PAGE_W_4U),
526 "i" (_PAGE_WRITE_4V | _PAGE_W_4V));
527
528 return __pte(val);
529 }
530
pte_mkold(pte_t pte)531 static inline pte_t pte_mkold(pte_t pte)
532 {
533 unsigned long mask;
534
535 __asm__ __volatile__(
536 "\n661: mov %1, %0\n"
537 " nop\n"
538 " .section .sun4v_2insn_patch, \"ax\"\n"
539 " .word 661b\n"
540 " sethi %%uhi(%2), %0\n"
541 " sllx %0, 32, %0\n"
542 " .previous\n"
543 : "=r" (mask)
544 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
545
546 mask |= _PAGE_R;
547
548 return __pte(pte_val(pte) & ~mask);
549 }
550
pte_mkyoung(pte_t pte)551 static inline pte_t pte_mkyoung(pte_t pte)
552 {
553 unsigned long mask;
554
555 __asm__ __volatile__(
556 "\n661: mov %1, %0\n"
557 " nop\n"
558 " .section .sun4v_2insn_patch, \"ax\"\n"
559 " .word 661b\n"
560 " sethi %%uhi(%2), %0\n"
561 " sllx %0, 32, %0\n"
562 " .previous\n"
563 : "=r" (mask)
564 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
565
566 mask |= _PAGE_R;
567
568 return __pte(pte_val(pte) | mask);
569 }
570
pte_mkspecial(pte_t pte)571 static inline pte_t pte_mkspecial(pte_t pte)
572 {
573 pte_val(pte) |= _PAGE_SPECIAL;
574 return pte;
575 }
576
pte_mkmcd(pte_t pte)577 static inline pte_t pte_mkmcd(pte_t pte)
578 {
579 pte_val(pte) |= _PAGE_MCD_4V;
580 return pte;
581 }
582
pte_mknotmcd(pte_t pte)583 static inline pte_t pte_mknotmcd(pte_t pte)
584 {
585 pte_val(pte) &= ~_PAGE_MCD_4V;
586 return pte;
587 }
588
pte_young(pte_t pte)589 static inline unsigned long pte_young(pte_t pte)
590 {
591 unsigned long mask;
592
593 __asm__ __volatile__(
594 "\n661: mov %1, %0\n"
595 " nop\n"
596 " .section .sun4v_2insn_patch, \"ax\"\n"
597 " .word 661b\n"
598 " sethi %%uhi(%2), %0\n"
599 " sllx %0, 32, %0\n"
600 " .previous\n"
601 : "=r" (mask)
602 : "i" (_PAGE_ACCESSED_4U), "i" (_PAGE_ACCESSED_4V));
603
604 return (pte_val(pte) & mask);
605 }
606
pte_dirty(pte_t pte)607 static inline unsigned long pte_dirty(pte_t pte)
608 {
609 unsigned long mask;
610
611 __asm__ __volatile__(
612 "\n661: mov %1, %0\n"
613 " nop\n"
614 " .section .sun4v_2insn_patch, \"ax\"\n"
615 " .word 661b\n"
616 " sethi %%uhi(%2), %0\n"
617 " sllx %0, 32, %0\n"
618 " .previous\n"
619 : "=r" (mask)
620 : "i" (_PAGE_MODIFIED_4U), "i" (_PAGE_MODIFIED_4V));
621
622 return (pte_val(pte) & mask);
623 }
624
pte_write(pte_t pte)625 static inline unsigned long pte_write(pte_t pte)
626 {
627 unsigned long mask;
628
629 __asm__ __volatile__(
630 "\n661: mov %1, %0\n"
631 " nop\n"
632 " .section .sun4v_2insn_patch, \"ax\"\n"
633 " .word 661b\n"
634 " sethi %%uhi(%2), %0\n"
635 " sllx %0, 32, %0\n"
636 " .previous\n"
637 : "=r" (mask)
638 : "i" (_PAGE_WRITE_4U), "i" (_PAGE_WRITE_4V));
639
640 return (pte_val(pte) & mask);
641 }
642
pte_exec(pte_t pte)643 static inline unsigned long pte_exec(pte_t pte)
644 {
645 unsigned long mask;
646
647 __asm__ __volatile__(
648 "\n661: sethi %%hi(%1), %0\n"
649 " .section .sun4v_1insn_patch, \"ax\"\n"
650 " .word 661b\n"
651 " mov %2, %0\n"
652 " .previous\n"
653 : "=r" (mask)
654 : "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V));
655
656 return (pte_val(pte) & mask);
657 }
658
pte_present(pte_t pte)659 static inline unsigned long pte_present(pte_t pte)
660 {
661 unsigned long val = pte_val(pte);
662
663 __asm__ __volatile__(
664 "\n661: and %0, %2, %0\n"
665 " .section .sun4v_1insn_patch, \"ax\"\n"
666 " .word 661b\n"
667 " and %0, %3, %0\n"
668 " .previous\n"
669 : "=r" (val)
670 : "0" (val), "i" (_PAGE_PRESENT_4U), "i" (_PAGE_PRESENT_4V));
671
672 return val;
673 }
674
675 #define pte_accessible pte_accessible
pte_accessible(struct mm_struct * mm,pte_t a)676 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
677 {
678 return pte_val(a) & _PAGE_VALID;
679 }
680
pte_special(pte_t pte)681 static inline unsigned long pte_special(pte_t pte)
682 {
683 return pte_val(pte) & _PAGE_SPECIAL;
684 }
685
686 #define pmd_leaf pmd_large
pmd_large(pmd_t pmd)687 static inline unsigned long pmd_large(pmd_t pmd)
688 {
689 pte_t pte = __pte(pmd_val(pmd));
690
691 return pte_val(pte) & _PAGE_PMD_HUGE;
692 }
693
pmd_pfn(pmd_t pmd)694 static inline unsigned long pmd_pfn(pmd_t pmd)
695 {
696 pte_t pte = __pte(pmd_val(pmd));
697
698 return pte_pfn(pte);
699 }
700
701 #define pmd_write pmd_write
pmd_write(pmd_t pmd)702 static inline unsigned long pmd_write(pmd_t pmd)
703 {
704 pte_t pte = __pte(pmd_val(pmd));
705
706 return pte_write(pte);
707 }
708
709 #define pud_write(pud) pte_write(__pte(pud_val(pud)))
710
711 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_dirty(pmd_t pmd)712 static inline unsigned long pmd_dirty(pmd_t pmd)
713 {
714 pte_t pte = __pte(pmd_val(pmd));
715
716 return pte_dirty(pte);
717 }
718
pmd_young(pmd_t pmd)719 static inline unsigned long pmd_young(pmd_t pmd)
720 {
721 pte_t pte = __pte(pmd_val(pmd));
722
723 return pte_young(pte);
724 }
725
pmd_trans_huge(pmd_t pmd)726 static inline unsigned long pmd_trans_huge(pmd_t pmd)
727 {
728 pte_t pte = __pte(pmd_val(pmd));
729
730 return pte_val(pte) & _PAGE_PMD_HUGE;
731 }
732
pmd_mkold(pmd_t pmd)733 static inline pmd_t pmd_mkold(pmd_t pmd)
734 {
735 pte_t pte = __pte(pmd_val(pmd));
736
737 pte = pte_mkold(pte);
738
739 return __pmd(pte_val(pte));
740 }
741
pmd_wrprotect(pmd_t pmd)742 static inline pmd_t pmd_wrprotect(pmd_t pmd)
743 {
744 pte_t pte = __pte(pmd_val(pmd));
745
746 pte = pte_wrprotect(pte);
747
748 return __pmd(pte_val(pte));
749 }
750
pmd_mkdirty(pmd_t pmd)751 static inline pmd_t pmd_mkdirty(pmd_t pmd)
752 {
753 pte_t pte = __pte(pmd_val(pmd));
754
755 pte = pte_mkdirty(pte);
756
757 return __pmd(pte_val(pte));
758 }
759
pmd_mkclean(pmd_t pmd)760 static inline pmd_t pmd_mkclean(pmd_t pmd)
761 {
762 pte_t pte = __pte(pmd_val(pmd));
763
764 pte = pte_mkclean(pte);
765
766 return __pmd(pte_val(pte));
767 }
768
pmd_mkyoung(pmd_t pmd)769 static inline pmd_t pmd_mkyoung(pmd_t pmd)
770 {
771 pte_t pte = __pte(pmd_val(pmd));
772
773 pte = pte_mkyoung(pte);
774
775 return __pmd(pte_val(pte));
776 }
777
pmd_mkwrite(pmd_t pmd)778 static inline pmd_t pmd_mkwrite(pmd_t pmd)
779 {
780 pte_t pte = __pte(pmd_val(pmd));
781
782 pte = pte_mkwrite(pte);
783
784 return __pmd(pte_val(pte));
785 }
786
pmd_pgprot(pmd_t entry)787 static inline pgprot_t pmd_pgprot(pmd_t entry)
788 {
789 unsigned long val = pmd_val(entry);
790
791 return __pgprot(val);
792 }
793 #endif
794
pmd_present(pmd_t pmd)795 static inline int pmd_present(pmd_t pmd)
796 {
797 return pmd_val(pmd) != 0UL;
798 }
799
800 #define pmd_none(pmd) (!pmd_val(pmd))
801
802 /* pmd_bad() is only called on non-trans-huge PMDs. Our encoding is
803 * very simple, it's just the physical address. PTE tables are of
804 * size PAGE_SIZE so make sure the sub-PAGE_SIZE bits are clear and
805 * the top bits outside of the range of any physical address size we
806 * support are clear as well. We also validate the physical itself.
807 */
808 #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
809
810 #define pud_none(pud) (!pud_val(pud))
811
812 #define pud_bad(pud) (pud_val(pud) & ~PAGE_MASK)
813
814 #define p4d_none(p4d) (!p4d_val(p4d))
815
816 #define p4d_bad(p4d) (p4d_val(p4d) & ~PAGE_MASK)
817
818 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
819 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
820 pmd_t *pmdp, pmd_t pmd);
821 #else
set_pmd_at(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t pmd)822 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
823 pmd_t *pmdp, pmd_t pmd)
824 {
825 *pmdp = pmd;
826 }
827 #endif
828
pmd_set(struct mm_struct * mm,pmd_t * pmdp,pte_t * ptep)829 static inline void pmd_set(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
830 {
831 unsigned long val = __pa((unsigned long) (ptep));
832
833 pmd_val(*pmdp) = val;
834 }
835
836 #define pud_set(pudp, pmdp) \
837 (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp))))
pmd_page_vaddr(pmd_t pmd)838 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
839 {
840 pte_t pte = __pte(pmd_val(pmd));
841 unsigned long pfn;
842
843 pfn = pte_pfn(pte);
844
845 return ((unsigned long) __va(pfn << PAGE_SHIFT));
846 }
847
pud_page_vaddr(pud_t pud)848 static inline unsigned long pud_page_vaddr(pud_t pud)
849 {
850 pte_t pte = __pte(pud_val(pud));
851 unsigned long pfn;
852
853 pfn = pte_pfn(pte);
854
855 return ((unsigned long) __va(pfn << PAGE_SHIFT));
856 }
857
858 #define pmd_page(pmd) virt_to_page((void *)pmd_page_vaddr(pmd))
859 #define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
860 #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
861 #define pud_present(pud) (pud_val(pud) != 0U)
862 #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
863 #define p4d_page_vaddr(p4d) \
864 ((unsigned long) __va(p4d_val(p4d)))
865 #define p4d_present(p4d) (p4d_val(p4d) != 0U)
866 #define p4d_clear(p4dp) (p4d_val(*(p4dp)) = 0UL)
867
868 /* only used by the stubbed out hugetlb gup code, should never be called */
869 #define p4d_page(p4d) NULL
870
871 #define pud_leaf pud_large
pud_large(pud_t pud)872 static inline unsigned long pud_large(pud_t pud)
873 {
874 pte_t pte = __pte(pud_val(pud));
875
876 return pte_val(pte) & _PAGE_PMD_HUGE;
877 }
878
pud_pfn(pud_t pud)879 static inline unsigned long pud_pfn(pud_t pud)
880 {
881 pte_t pte = __pte(pud_val(pud));
882
883 return pte_pfn(pte);
884 }
885
886 /* Same in both SUN4V and SUN4U. */
887 #define pte_none(pte) (!pte_val(pte))
888
889 #define p4d_set(p4dp, pudp) \
890 (p4d_val(*(p4dp)) = (__pa((unsigned long) (pudp))))
891
892 /* We cannot include <linux/mm_types.h> at this point yet: */
893 extern struct mm_struct init_mm;
894
895 /* Actual page table PTE updates. */
896 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
897 pte_t *ptep, pte_t orig, int fullmm,
898 unsigned int hugepage_shift);
899
maybe_tlb_batch_add(struct mm_struct * mm,unsigned long vaddr,pte_t * ptep,pte_t orig,int fullmm,unsigned int hugepage_shift)900 static void maybe_tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
901 pte_t *ptep, pte_t orig, int fullmm,
902 unsigned int hugepage_shift)
903 {
904 /* It is more efficient to let flush_tlb_kernel_range()
905 * handle init_mm tlb flushes.
906 *
907 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
908 * and SUN4V pte layout, so this inline test is fine.
909 */
910 if (likely(mm != &init_mm) && pte_accessible(mm, orig))
911 tlb_batch_add(mm, vaddr, ptep, orig, fullmm, hugepage_shift);
912 }
913
914 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp)915 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
916 unsigned long addr,
917 pmd_t *pmdp)
918 {
919 pmd_t pmd = *pmdp;
920 set_pmd_at(mm, addr, pmdp, __pmd(0UL));
921 return pmd;
922 }
923
__set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,int fullmm)924 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
925 pte_t *ptep, pte_t pte, int fullmm)
926 {
927 pte_t orig = *ptep;
928
929 *ptep = pte;
930 maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm, PAGE_SHIFT);
931 }
932
933 #define set_pte_at(mm,addr,ptep,pte) \
934 __set_pte_at((mm), (addr), (ptep), (pte), 0)
935
936 #define pte_clear(mm,addr,ptep) \
937 set_pte_at((mm), (addr), (ptep), __pte(0UL))
938
939 #define __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
940 #define pte_clear_not_present_full(mm,addr,ptep,fullmm) \
941 __set_pte_at((mm), (addr), (ptep), __pte(0UL), (fullmm))
942
943 #ifdef DCACHE_ALIASING_POSSIBLE
944 #define __HAVE_ARCH_MOVE_PTE
945 #define move_pte(pte, prot, old_addr, new_addr) \
946 ({ \
947 pte_t newpte = (pte); \
948 if (tlb_type != hypervisor && pte_present(pte)) { \
949 unsigned long this_pfn = pte_pfn(pte); \
950 \
951 if (pfn_valid(this_pfn) && \
952 (((old_addr) ^ (new_addr)) & (1 << 13))) \
953 flush_dcache_page_all(current->mm, \
954 pfn_to_page(this_pfn)); \
955 } \
956 newpte; \
957 })
958 #endif
959
960 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
961
962 void paging_init(void);
963 unsigned long find_ecache_flush_span(unsigned long size);
964
965 struct seq_file;
966 void mmu_info(struct seq_file *);
967
968 struct vm_area_struct;
969 void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
970 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
971 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
972 pmd_t *pmd);
973
974 #define __HAVE_ARCH_PMDP_INVALIDATE
975 extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
976 pmd_t *pmdp);
977
978 #define __HAVE_ARCH_PGTABLE_DEPOSIT
979 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
980 pgtable_t pgtable);
981
982 #define __HAVE_ARCH_PGTABLE_WITHDRAW
983 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
984 #endif
985
986 /* Encode and de-code a swap entry */
987 #define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL)
988 #define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL))
989 #define __swp_entry(type, offset) \
990 ( (swp_entry_t) \
991 { \
992 (((long)(type) << PAGE_SHIFT) | \
993 ((long)(offset) << (PAGE_SHIFT + 8UL))) \
994 } )
995 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
996 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
997
998 int page_in_phys_avail(unsigned long paddr);
999
1000 /*
1001 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
1002 * its high 4 bits. These macros/functions put it there or get it from there.
1003 */
1004 #define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
1005 #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
1006 #define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
1007
1008 int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
1009 unsigned long, pgprot_t);
1010
1011 void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma,
1012 unsigned long addr, pte_t pte);
1013
1014 int adi_save_tags(struct mm_struct *mm, struct vm_area_struct *vma,
1015 unsigned long addr, pte_t oldpte);
1016
1017 #define __HAVE_ARCH_DO_SWAP_PAGE
arch_do_swap_page(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pte_t pte,pte_t oldpte)1018 static inline void arch_do_swap_page(struct mm_struct *mm,
1019 struct vm_area_struct *vma,
1020 unsigned long addr,
1021 pte_t pte, pte_t oldpte)
1022 {
1023 /* If this is a new page being mapped in, there can be no
1024 * ADI tags stored away for this page. Skip looking for
1025 * stored tags
1026 */
1027 if (pte_none(oldpte))
1028 return;
1029
1030 if (adi_state.enabled && (pte_val(pte) & _PAGE_MCD_4V))
1031 adi_restore_tags(mm, vma, addr, pte);
1032 }
1033
1034 #define __HAVE_ARCH_UNMAP_ONE
arch_unmap_one(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pte_t oldpte)1035 static inline int arch_unmap_one(struct mm_struct *mm,
1036 struct vm_area_struct *vma,
1037 unsigned long addr, pte_t oldpte)
1038 {
1039 if (adi_state.enabled && (pte_val(oldpte) & _PAGE_MCD_4V))
1040 return adi_save_tags(mm, vma, addr, oldpte);
1041 return 0;
1042 }
1043
io_remap_pfn_range(struct vm_area_struct * vma,unsigned long from,unsigned long pfn,unsigned long size,pgprot_t prot)1044 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
1045 unsigned long from, unsigned long pfn,
1046 unsigned long size, pgprot_t prot)
1047 {
1048 unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
1049 int space = GET_IOSPACE(pfn);
1050 unsigned long phys_base;
1051
1052 phys_base = offset | (((unsigned long) space) << 32UL);
1053
1054 return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
1055 }
1056 #define io_remap_pfn_range io_remap_pfn_range
1057
__untagged_addr(unsigned long start)1058 static inline unsigned long __untagged_addr(unsigned long start)
1059 {
1060 if (adi_capable()) {
1061 long addr = start;
1062
1063 /* If userspace has passed a versioned address, kernel
1064 * will not find it in the VMAs since it does not store
1065 * the version tags in the list of VMAs. Storing version
1066 * tags in list of VMAs is impractical since they can be
1067 * changed any time from userspace without dropping into
1068 * kernel. Any address search in VMAs will be done with
1069 * non-versioned addresses. Ensure the ADI version bits
1070 * are dropped here by sign extending the last bit before
1071 * ADI bits. IOMMU does not implement version tags.
1072 */
1073 return (addr << (long)adi_nbits()) >> (long)adi_nbits();
1074 }
1075
1076 return start;
1077 }
1078 #define untagged_addr(addr) \
1079 ((__typeof__(addr))(__untagged_addr((unsigned long)(addr))))
1080
pte_access_permitted(pte_t pte,bool write)1081 static inline bool pte_access_permitted(pte_t pte, bool write)
1082 {
1083 u64 prot;
1084
1085 if (tlb_type == hypervisor) {
1086 prot = _PAGE_PRESENT_4V | _PAGE_P_4V;
1087 if (write)
1088 prot |= _PAGE_WRITE_4V;
1089 } else {
1090 prot = _PAGE_PRESENT_4U | _PAGE_P_4U;
1091 if (write)
1092 prot |= _PAGE_WRITE_4U;
1093 }
1094
1095 return (pte_val(pte) & (prot | _PAGE_SPECIAL)) == prot;
1096 }
1097 #define pte_access_permitted pte_access_permitted
1098
1099 #include <asm/tlbflush.h>
1100
1101 /* We provide our own get_unmapped_area to cope with VA holes and
1102 * SHM area cache aliasing for userland.
1103 */
1104 #define HAVE_ARCH_UNMAPPED_AREA
1105 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1106
1107 /* We provide a special get_unmapped_area for framebuffer mmaps to try and use
1108 * the largest alignment possible such that larget PTEs can be used.
1109 */
1110 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
1111 unsigned long, unsigned long,
1112 unsigned long);
1113 #define HAVE_ARCH_FB_UNMAPPED_AREA
1114
1115 void sun4v_register_fault_status(void);
1116 void sun4v_ktsb_register(void);
1117 void __init cheetah_ecache_flush_init(void);
1118 void sun4v_patch_tlb_handlers(void);
1119
1120 extern unsigned long cmdline_memory_size;
1121
1122 asmlinkage void do_sparc64_fault(struct pt_regs *regs);
1123
1124 #endif /* !(__ASSEMBLY__) */
1125
1126 #endif /* !(_SPARC64_PGTABLE_H) */
1127