1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PGTABLE_DEFS_H
3 #define _ASM_X86_PGTABLE_DEFS_H
4
5 #include <linux/const.h>
6 #include <linux/mem_encrypt.h>
7
8 #include <asm/page_types.h>
9
10 #define FIRST_USER_ADDRESS 0UL
11
12 #define _PAGE_BIT_PRESENT 0 /* is present */
13 #define _PAGE_BIT_RW 1 /* writeable */
14 #define _PAGE_BIT_USER 2 /* userspace addressable */
15 #define _PAGE_BIT_PWT 3 /* page write through */
16 #define _PAGE_BIT_PCD 4 /* page cache disabled */
17 #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
18 #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
19 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
20 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
21 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
22 #define _PAGE_BIT_SOFTW1 9 /* available for programmer */
23 #define _PAGE_BIT_SOFTW2 10 /* " */
24 #define _PAGE_BIT_SOFTW3 11 /* " */
25 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
26 #define _PAGE_BIT_SOFTW4 58 /* available for programmer */
27 #define _PAGE_BIT_PKEY_BIT0 59 /* Protection Keys, bit 1/4 */
28 #define _PAGE_BIT_PKEY_BIT1 60 /* Protection Keys, bit 2/4 */
29 #define _PAGE_BIT_PKEY_BIT2 61 /* Protection Keys, bit 3/4 */
30 #define _PAGE_BIT_PKEY_BIT3 62 /* Protection Keys, bit 4/4 */
31 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
32
33 #define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1
34 #define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1
35 #define _PAGE_BIT_UFFD_WP _PAGE_BIT_SOFTW2 /* userfaultfd wrprotected */
36 #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
37 #define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4
38
39 /* If _PAGE_BIT_PRESENT is clear, we use these: */
40 /* - if the user mapped it with PROT_NONE; pte_present gives true */
41 #define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
42
43 #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
44 #define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
45 #define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
46 #define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
47 #define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
48 #define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
49 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
50 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
51 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
52 #define _PAGE_SOFTW1 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1)
53 #define _PAGE_SOFTW2 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2)
54 #define _PAGE_SOFTW3 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW3)
55 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
56 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
57 #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
58 #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
59 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
60 #define _PAGE_PKEY_BIT0 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT0)
61 #define _PAGE_PKEY_BIT1 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT1)
62 #define _PAGE_PKEY_BIT2 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT2)
63 #define _PAGE_PKEY_BIT3 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT3)
64 #else
65 #define _PAGE_PKEY_BIT0 (_AT(pteval_t, 0))
66 #define _PAGE_PKEY_BIT1 (_AT(pteval_t, 0))
67 #define _PAGE_PKEY_BIT2 (_AT(pteval_t, 0))
68 #define _PAGE_PKEY_BIT3 (_AT(pteval_t, 0))
69 #endif
70
71 #define _PAGE_PKEY_MASK (_PAGE_PKEY_BIT0 | \
72 _PAGE_PKEY_BIT1 | \
73 _PAGE_PKEY_BIT2 | \
74 _PAGE_PKEY_BIT3)
75
76 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
77 #define _PAGE_KNL_ERRATUM_MASK (_PAGE_DIRTY | _PAGE_ACCESSED)
78 #else
79 #define _PAGE_KNL_ERRATUM_MASK 0
80 #endif
81
82 #ifdef CONFIG_MEM_SOFT_DIRTY
83 #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
84 #else
85 #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
86 #endif
87
88 /*
89 * Tracking soft dirty bit when a page goes to a swap is tricky.
90 * We need a bit which can be stored in pte _and_ not conflict
91 * with swap entry format. On x86 bits 1-4 are *not* involved
92 * into swap entry computation, but bit 7 is used for thp migration,
93 * so we borrow bit 1 for soft dirty tracking.
94 *
95 * Please note that this bit must be treated as swap dirty page
96 * mark if and only if the PTE/PMD has present bit clear!
97 */
98 #ifdef CONFIG_MEM_SOFT_DIRTY
99 #define _PAGE_SWP_SOFT_DIRTY _PAGE_RW
100 #else
101 #define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
102 #endif
103
104 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
105 #define _PAGE_UFFD_WP (_AT(pteval_t, 1) << _PAGE_BIT_UFFD_WP)
106 #define _PAGE_SWP_UFFD_WP _PAGE_USER
107 #else
108 #define _PAGE_UFFD_WP (_AT(pteval_t, 0))
109 #define _PAGE_SWP_UFFD_WP (_AT(pteval_t, 0))
110 #endif
111
112 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
113 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
114 #define _PAGE_DEVMAP (_AT(u64, 1) << _PAGE_BIT_DEVMAP)
115 #else
116 #define _PAGE_NX (_AT(pteval_t, 0))
117 #define _PAGE_DEVMAP (_AT(pteval_t, 0))
118 #endif
119
120 #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
121
122 /*
123 * Set of bits not changed in pte_modify. The pte's
124 * protection key is treated like _PAGE_RW, for
125 * instance, and is *not* included in this mask since
126 * pte_modify() does modify it.
127 */
128 #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
129 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
130 _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC | \
131 _PAGE_UFFD_WP)
132 #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
133
134 /*
135 * The cache modes defined here are used to translate between pure SW usage
136 * and the HW defined cache mode bits and/or PAT entries.
137 *
138 * The resulting bits for PWT, PCD and PAT should be chosen in a way
139 * to have the WB mode at index 0 (all bits clear). This is the default
140 * right now and likely would break too much if changed.
141 */
142 #ifndef __ASSEMBLY__
143 enum page_cache_mode {
144 _PAGE_CACHE_MODE_WB = 0,
145 _PAGE_CACHE_MODE_WC = 1,
146 _PAGE_CACHE_MODE_UC_MINUS = 2,
147 _PAGE_CACHE_MODE_UC = 3,
148 _PAGE_CACHE_MODE_WT = 4,
149 _PAGE_CACHE_MODE_WP = 5,
150
151 _PAGE_CACHE_MODE_NUM = 8
152 };
153 #endif
154
155 #define _PAGE_ENC (_AT(pteval_t, sme_me_mask))
156
157 #define _PAGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
158 #define _PAGE_LARGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT_LARGE)
159
160 #define _PAGE_NOCACHE (cachemode2protval(_PAGE_CACHE_MODE_UC))
161 #define _PAGE_CACHE_WP (cachemode2protval(_PAGE_CACHE_MODE_WP))
162
163 #define __PP _PAGE_PRESENT
164 #define __RW _PAGE_RW
165 #define _USR _PAGE_USER
166 #define ___A _PAGE_ACCESSED
167 #define ___D _PAGE_DIRTY
168 #define ___G _PAGE_GLOBAL
169 #define __NX _PAGE_NX
170
171 #define _ENC _PAGE_ENC
172 #define __WP _PAGE_CACHE_WP
173 #define __NC _PAGE_NOCACHE
174 #define _PSE _PAGE_PSE
175
176 #define pgprot_val(x) ((x).pgprot)
177 #define __pgprot(x) ((pgprot_t) { (x) } )
178 #define __pg(x) __pgprot(x)
179
180 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
181
182 #define PAGE_NONE __pg( 0| 0| 0|___A| 0| 0| 0|___G)
183 #define PAGE_SHARED __pg(__PP|__RW|_USR|___A|__NX| 0| 0| 0)
184 #define PAGE_SHARED_EXEC __pg(__PP|__RW|_USR|___A| 0| 0| 0| 0)
185 #define PAGE_COPY_NOEXEC __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0)
186 #define PAGE_COPY_EXEC __pg(__PP| 0|_USR|___A| 0| 0| 0| 0)
187 #define PAGE_COPY __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0)
188 #define PAGE_READONLY __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0)
189 #define PAGE_READONLY_EXEC __pg(__PP| 0|_USR|___A| 0| 0| 0| 0)
190
191 #define __PAGE_KERNEL (__PP|__RW| 0|___A|__NX|___D| 0|___G)
192 #define __PAGE_KERNEL_EXEC (__PP|__RW| 0|___A| 0|___D| 0|___G)
193 #define _KERNPG_TABLE_NOENC (__PP|__RW| 0|___A| 0|___D| 0| 0)
194 #define _KERNPG_TABLE (__PP|__RW| 0|___A| 0|___D| 0| 0| _ENC)
195 #define _PAGE_TABLE_NOENC (__PP|__RW|_USR|___A| 0|___D| 0| 0)
196 #define _PAGE_TABLE (__PP|__RW|_USR|___A| 0|___D| 0| 0| _ENC)
197 #define __PAGE_KERNEL_RO (__PP| 0| 0|___A|__NX|___D| 0|___G)
198 #define __PAGE_KERNEL_ROX (__PP| 0| 0|___A| 0|___D| 0|___G)
199 #define __PAGE_KERNEL_NOCACHE (__PP|__RW| 0|___A|__NX|___D| 0|___G| __NC)
200 #define __PAGE_KERNEL_VVAR (__PP| 0|_USR|___A|__NX|___D| 0|___G)
201 #define __PAGE_KERNEL_LARGE (__PP|__RW| 0|___A|__NX|___D|_PSE|___G)
202 #define __PAGE_KERNEL_LARGE_EXEC (__PP|__RW| 0|___A| 0|___D|_PSE|___G)
203 #define __PAGE_KERNEL_WP (__PP|__RW| 0|___A|__NX|___D| 0|___G| __WP)
204
205
206 #define __PAGE_KERNEL_IO __PAGE_KERNEL
207 #define __PAGE_KERNEL_IO_NOCACHE __PAGE_KERNEL_NOCACHE
208
209
210 #ifndef __ASSEMBLY__
211
212 #define __PAGE_KERNEL_ENC (__PAGE_KERNEL | _ENC)
213 #define __PAGE_KERNEL_ENC_WP (__PAGE_KERNEL_WP | _ENC)
214 #define __PAGE_KERNEL_NOENC (__PAGE_KERNEL | 0)
215 #define __PAGE_KERNEL_NOENC_WP (__PAGE_KERNEL_WP | 0)
216
217 #define __pgprot_mask(x) __pgprot((x) & __default_kernel_pte_mask)
218
219 #define PAGE_KERNEL __pgprot_mask(__PAGE_KERNEL | _ENC)
220 #define PAGE_KERNEL_NOENC __pgprot_mask(__PAGE_KERNEL | 0)
221 #define PAGE_KERNEL_RO __pgprot_mask(__PAGE_KERNEL_RO | _ENC)
222 #define PAGE_KERNEL_EXEC __pgprot_mask(__PAGE_KERNEL_EXEC | _ENC)
223 #define PAGE_KERNEL_EXEC_NOENC __pgprot_mask(__PAGE_KERNEL_EXEC | 0)
224 #define PAGE_KERNEL_ROX __pgprot_mask(__PAGE_KERNEL_ROX | _ENC)
225 #define PAGE_KERNEL_NOCACHE __pgprot_mask(__PAGE_KERNEL_NOCACHE | _ENC)
226 #define PAGE_KERNEL_LARGE __pgprot_mask(__PAGE_KERNEL_LARGE | _ENC)
227 #define PAGE_KERNEL_LARGE_EXEC __pgprot_mask(__PAGE_KERNEL_LARGE_EXEC | _ENC)
228 #define PAGE_KERNEL_VVAR __pgprot_mask(__PAGE_KERNEL_VVAR | _ENC)
229
230 #define PAGE_KERNEL_IO __pgprot_mask(__PAGE_KERNEL_IO)
231 #define PAGE_KERNEL_IO_NOCACHE __pgprot_mask(__PAGE_KERNEL_IO_NOCACHE)
232
233 #endif /* __ASSEMBLY__ */
234
235 /* xwr */
236 #define __P000 PAGE_NONE
237 #define __P001 PAGE_READONLY
238 #define __P010 PAGE_COPY
239 #define __P011 PAGE_COPY
240 #define __P100 PAGE_READONLY_EXEC
241 #define __P101 PAGE_READONLY_EXEC
242 #define __P110 PAGE_COPY_EXEC
243 #define __P111 PAGE_COPY_EXEC
244
245 #define __S000 PAGE_NONE
246 #define __S001 PAGE_READONLY
247 #define __S010 PAGE_SHARED
248 #define __S011 PAGE_SHARED
249 #define __S100 PAGE_READONLY_EXEC
250 #define __S101 PAGE_READONLY_EXEC
251 #define __S110 PAGE_SHARED_EXEC
252 #define __S111 PAGE_SHARED_EXEC
253
254 /*
255 * early identity mapping pte attrib macros.
256 */
257 #ifdef CONFIG_X86_64
258 #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
259 #else
260 #define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
261 #define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
262 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
263 #endif
264
265 #ifdef CONFIG_X86_32
266 # include <asm/pgtable_32_types.h>
267 #else
268 # include <asm/pgtable_64_types.h>
269 #endif
270
271 #ifndef __ASSEMBLY__
272
273 #include <linux/types.h>
274
275 /* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */
276 #define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
277
278 /*
279 * Extracts the flags from a (pte|pmd|pud|pgd)val_t
280 * This includes the protection key value.
281 */
282 #define PTE_FLAGS_MASK (~PTE_PFN_MASK)
283
284 typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
285
286 typedef struct { pgdval_t pgd; } pgd_t;
287
pgprot_nx(pgprot_t prot)288 static inline pgprot_t pgprot_nx(pgprot_t prot)
289 {
290 return __pgprot(pgprot_val(prot) | _PAGE_NX);
291 }
292 #define pgprot_nx pgprot_nx
293
294 #ifdef CONFIG_X86_PAE
295
296 /*
297 * PHYSICAL_PAGE_MASK might be non-constant when SME is compiled in, so we can't
298 * use it here.
299 */
300
301 #define PGD_PAE_PAGE_MASK ((signed long)PAGE_MASK)
302 #define PGD_PAE_PHYS_MASK (((1ULL << __PHYSICAL_MASK_SHIFT)-1) & PGD_PAE_PAGE_MASK)
303
304 /*
305 * PAE allows Base Address, P, PWT, PCD and AVL bits to be set in PGD entries.
306 * All other bits are Reserved MBZ
307 */
308 #define PGD_ALLOWED_BITS (PGD_PAE_PHYS_MASK | _PAGE_PRESENT | \
309 _PAGE_PWT | _PAGE_PCD | \
310 _PAGE_SOFTW1 | _PAGE_SOFTW2 | _PAGE_SOFTW3)
311
312 #else
313 /* No need to mask any bits for !PAE */
314 #define PGD_ALLOWED_BITS (~0ULL)
315 #endif
316
native_make_pgd(pgdval_t val)317 static inline pgd_t native_make_pgd(pgdval_t val)
318 {
319 return (pgd_t) { val & PGD_ALLOWED_BITS };
320 }
321
native_pgd_val(pgd_t pgd)322 static inline pgdval_t native_pgd_val(pgd_t pgd)
323 {
324 return pgd.pgd & PGD_ALLOWED_BITS;
325 }
326
pgd_flags(pgd_t pgd)327 static inline pgdval_t pgd_flags(pgd_t pgd)
328 {
329 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
330 }
331
332 #if CONFIG_PGTABLE_LEVELS > 4
333 typedef struct { p4dval_t p4d; } p4d_t;
334
native_make_p4d(pudval_t val)335 static inline p4d_t native_make_p4d(pudval_t val)
336 {
337 return (p4d_t) { val };
338 }
339
native_p4d_val(p4d_t p4d)340 static inline p4dval_t native_p4d_val(p4d_t p4d)
341 {
342 return p4d.p4d;
343 }
344 #else
345 #include <asm-generic/pgtable-nop4d.h>
346
native_make_p4d(pudval_t val)347 static inline p4d_t native_make_p4d(pudval_t val)
348 {
349 return (p4d_t) { .pgd = native_make_pgd((pgdval_t)val) };
350 }
351
native_p4d_val(p4d_t p4d)352 static inline p4dval_t native_p4d_val(p4d_t p4d)
353 {
354 return native_pgd_val(p4d.pgd);
355 }
356 #endif
357
358 #if CONFIG_PGTABLE_LEVELS > 3
359 typedef struct { pudval_t pud; } pud_t;
360
native_make_pud(pmdval_t val)361 static inline pud_t native_make_pud(pmdval_t val)
362 {
363 return (pud_t) { val };
364 }
365
native_pud_val(pud_t pud)366 static inline pudval_t native_pud_val(pud_t pud)
367 {
368 return pud.pud;
369 }
370 #else
371 #include <asm-generic/pgtable-nopud.h>
372
native_make_pud(pudval_t val)373 static inline pud_t native_make_pud(pudval_t val)
374 {
375 return (pud_t) { .p4d.pgd = native_make_pgd(val) };
376 }
377
native_pud_val(pud_t pud)378 static inline pudval_t native_pud_val(pud_t pud)
379 {
380 return native_pgd_val(pud.p4d.pgd);
381 }
382 #endif
383
384 #if CONFIG_PGTABLE_LEVELS > 2
385 typedef struct { pmdval_t pmd; } pmd_t;
386
native_make_pmd(pmdval_t val)387 static inline pmd_t native_make_pmd(pmdval_t val)
388 {
389 return (pmd_t) { val };
390 }
391
native_pmd_val(pmd_t pmd)392 static inline pmdval_t native_pmd_val(pmd_t pmd)
393 {
394 return pmd.pmd;
395 }
396 #else
397 #include <asm-generic/pgtable-nopmd.h>
398
native_make_pmd(pmdval_t val)399 static inline pmd_t native_make_pmd(pmdval_t val)
400 {
401 return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) };
402 }
403
native_pmd_val(pmd_t pmd)404 static inline pmdval_t native_pmd_val(pmd_t pmd)
405 {
406 return native_pgd_val(pmd.pud.p4d.pgd);
407 }
408 #endif
409
p4d_pfn_mask(p4d_t p4d)410 static inline p4dval_t p4d_pfn_mask(p4d_t p4d)
411 {
412 /* No 512 GiB huge pages yet */
413 return PTE_PFN_MASK;
414 }
415
p4d_flags_mask(p4d_t p4d)416 static inline p4dval_t p4d_flags_mask(p4d_t p4d)
417 {
418 return ~p4d_pfn_mask(p4d);
419 }
420
p4d_flags(p4d_t p4d)421 static inline p4dval_t p4d_flags(p4d_t p4d)
422 {
423 return native_p4d_val(p4d) & p4d_flags_mask(p4d);
424 }
425
pud_pfn_mask(pud_t pud)426 static inline pudval_t pud_pfn_mask(pud_t pud)
427 {
428 if (native_pud_val(pud) & _PAGE_PSE)
429 return PHYSICAL_PUD_PAGE_MASK;
430 else
431 return PTE_PFN_MASK;
432 }
433
pud_flags_mask(pud_t pud)434 static inline pudval_t pud_flags_mask(pud_t pud)
435 {
436 return ~pud_pfn_mask(pud);
437 }
438
pud_flags(pud_t pud)439 static inline pudval_t pud_flags(pud_t pud)
440 {
441 return native_pud_val(pud) & pud_flags_mask(pud);
442 }
443
pmd_pfn_mask(pmd_t pmd)444 static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
445 {
446 if (native_pmd_val(pmd) & _PAGE_PSE)
447 return PHYSICAL_PMD_PAGE_MASK;
448 else
449 return PTE_PFN_MASK;
450 }
451
pmd_flags_mask(pmd_t pmd)452 static inline pmdval_t pmd_flags_mask(pmd_t pmd)
453 {
454 return ~pmd_pfn_mask(pmd);
455 }
456
pmd_flags(pmd_t pmd)457 static inline pmdval_t pmd_flags(pmd_t pmd)
458 {
459 return native_pmd_val(pmd) & pmd_flags_mask(pmd);
460 }
461
native_make_pte(pteval_t val)462 static inline pte_t native_make_pte(pteval_t val)
463 {
464 return (pte_t) { .pte = val };
465 }
466
native_pte_val(pte_t pte)467 static inline pteval_t native_pte_val(pte_t pte)
468 {
469 return pte.pte;
470 }
471
pte_flags(pte_t pte)472 static inline pteval_t pte_flags(pte_t pte)
473 {
474 return native_pte_val(pte) & PTE_FLAGS_MASK;
475 }
476
477 #define __pte2cm_idx(cb) \
478 ((((cb) >> (_PAGE_BIT_PAT - 2)) & 4) | \
479 (((cb) >> (_PAGE_BIT_PCD - 1)) & 2) | \
480 (((cb) >> _PAGE_BIT_PWT) & 1))
481 #define __cm_idx2pte(i) \
482 ((((i) & 4) << (_PAGE_BIT_PAT - 2)) | \
483 (((i) & 2) << (_PAGE_BIT_PCD - 1)) | \
484 (((i) & 1) << _PAGE_BIT_PWT))
485
486 unsigned long cachemode2protval(enum page_cache_mode pcm);
487
protval_4k_2_large(pgprotval_t val)488 static inline pgprotval_t protval_4k_2_large(pgprotval_t val)
489 {
490 return (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
491 ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
492 }
pgprot_4k_2_large(pgprot_t pgprot)493 static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
494 {
495 return __pgprot(protval_4k_2_large(pgprot_val(pgprot)));
496 }
protval_large_2_4k(pgprotval_t val)497 static inline pgprotval_t protval_large_2_4k(pgprotval_t val)
498 {
499 return (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
500 ((val & _PAGE_PAT_LARGE) >>
501 (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
502 }
pgprot_large_2_4k(pgprot_t pgprot)503 static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
504 {
505 return __pgprot(protval_large_2_4k(pgprot_val(pgprot)));
506 }
507
508
509 typedef struct page *pgtable_t;
510
511 extern pteval_t __supported_pte_mask;
512 extern pteval_t __default_kernel_pte_mask;
513 extern void set_nx(void);
514 extern int nx_enabled;
515
516 #define pgprot_writecombine pgprot_writecombine
517 extern pgprot_t pgprot_writecombine(pgprot_t prot);
518
519 #define pgprot_writethrough pgprot_writethrough
520 extern pgprot_t pgprot_writethrough(pgprot_t prot);
521
522 /* Indicate that x86 has its own track and untrack pfn vma functions */
523 #define __HAVE_PFNMAP_TRACKING
524
525 #define __HAVE_PHYS_MEM_ACCESS_PROT
526 struct file;
527 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
528 unsigned long size, pgprot_t vma_prot);
529
530 /* Install a pte for a particular vaddr in kernel space. */
531 void set_pte_vaddr(unsigned long vaddr, pte_t pte);
532
533 #ifdef CONFIG_X86_32
534 extern void native_pagetable_init(void);
535 #else
536 #define native_pagetable_init paging_init
537 #endif
538
539 struct seq_file;
540 extern void arch_report_meminfo(struct seq_file *m);
541
542 enum pg_level {
543 PG_LEVEL_NONE,
544 PG_LEVEL_4K,
545 PG_LEVEL_2M,
546 PG_LEVEL_1G,
547 PG_LEVEL_512G,
548 PG_LEVEL_NUM
549 };
550
551 #ifdef CONFIG_PROC_FS
552 extern void update_page_count(int level, unsigned long pages);
553 #else
update_page_count(int level,unsigned long pages)554 static inline void update_page_count(int level, unsigned long pages) { }
555 #endif
556
557 /*
558 * Helper function that returns the kernel pagetable entry controlling
559 * the virtual address 'address'. NULL means no pagetable entry present.
560 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
561 * as a pte too.
562 */
563 extern pte_t *lookup_address(unsigned long address, unsigned int *level);
564 extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
565 unsigned int *level);
566
567 struct mm_struct;
568 extern pte_t *lookup_address_in_mm(struct mm_struct *mm, unsigned long address,
569 unsigned int *level);
570 extern pmd_t *lookup_pmd_address(unsigned long address);
571 extern phys_addr_t slow_virt_to_phys(void *__address);
572 extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
573 unsigned long address,
574 unsigned numpages,
575 unsigned long page_flags);
576 extern int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
577 unsigned long numpages);
578 #endif /* !__ASSEMBLY__ */
579
580 #endif /* _ASM_X86_PGTABLE_DEFS_H */
581