1 /*
2  * Copyright 2003 PathScale Inc
3  * Derived from include/asm-i386/pgtable.h
4  * Licensed under the GPL
5  */
6 
7 #ifndef __UM_PGTABLE_3LEVEL_H
8 #define __UM_PGTABLE_3LEVEL_H
9 
10 #define __ARCH_USE_5LEVEL_HACK
11 #include <asm-generic/pgtable-nopud.h>
12 
13 /* PGDIR_SHIFT determines what a third-level page table entry can map */
14 
15 #ifdef CONFIG_64BIT
16 #define PGDIR_SHIFT	30
17 #else
18 #define PGDIR_SHIFT	31
19 #endif
20 #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
21 #define PGDIR_MASK	(~(PGDIR_SIZE-1))
22 
23 /* PMD_SHIFT determines the size of the area a second-level page table can
24  * map
25  */
26 
27 #define PMD_SHIFT	21
28 #define PMD_SIZE	(1UL << PMD_SHIFT)
29 #define PMD_MASK	(~(PMD_SIZE-1))
30 
31 /*
32  * entries per page directory level
33  */
34 
35 #define PTRS_PER_PTE 512
36 #ifdef CONFIG_64BIT
37 #define PTRS_PER_PMD 512
38 #define PTRS_PER_PGD 512
39 #else
40 #define PTRS_PER_PMD 1024
41 #define PTRS_PER_PGD 1024
42 #endif
43 
44 #define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
45 #define FIRST_USER_ADDRESS	0UL
46 
47 #define pte_ERROR(e) \
48         printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
49 	       pte_val(e))
50 #define pmd_ERROR(e) \
51         printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
52 	       pmd_val(e))
53 #define pgd_ERROR(e) \
54         printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
55 	       pgd_val(e))
56 
57 #define pud_none(x)	(!(pud_val(x) & ~_PAGE_NEWPAGE))
58 #define	pud_bad(x)	((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
59 #define pud_present(x)	(pud_val(x) & _PAGE_PRESENT)
60 #define pud_populate(mm, pud, pmd) \
61 	set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
62 
63 #ifdef CONFIG_64BIT
64 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
65 #else
66 #define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
67 #endif
68 
pgd_newpage(pgd_t pgd)69 static inline int pgd_newpage(pgd_t pgd)
70 {
71 	return(pgd_val(pgd) & _PAGE_NEWPAGE);
72 }
73 
pgd_mkuptodate(pgd_t pgd)74 static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
75 
76 #ifdef CONFIG_64BIT
77 #define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval))
78 #else
79 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
80 #endif
81 
82 struct mm_struct;
83 extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
84 
pud_clear(pud_t * pud)85 static inline void pud_clear (pud_t *pud)
86 {
87 	set_pud(pud, __pud(_PAGE_NEWPAGE));
88 }
89 
90 #define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
91 #define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
92 
93 /* Find an entry in the second-level page table.. */
94 #define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
95 			pmd_index(address))
96 
pte_pfn(pte_t pte)97 static inline unsigned long pte_pfn(pte_t pte)
98 {
99 	return phys_to_pfn(pte_val(pte));
100 }
101 
pfn_pte(unsigned long page_nr,pgprot_t pgprot)102 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
103 {
104 	pte_t pte;
105 	phys_t phys = pfn_to_phys(page_nr);
106 
107 	pte_set_val(pte, phys, pgprot);
108 	return pte;
109 }
110 
pfn_pmd(unsigned long page_nr,pgprot_t pgprot)111 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
112 {
113 	return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
114 }
115 
116 #endif
117 
118