1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_HASH_H
3 #define _ASM_POWERPC_BOOK3S_64_HASH_H
4 #ifdef __KERNEL__
5 
6 #include <asm/asm-const.h>
7 
8 /*
9  * Common bits between 4K and 64K pages in a linux-style PTE.
10  * Additional bits may be defined in pgtable-hash64-*.h
11  *
12  */
13 #define H_PTE_NONE_MASK		_PAGE_HPTEFLAGS
14 
15 #ifdef CONFIG_PPC_64K_PAGES
16 #include <asm/book3s/64/hash-64k.h>
17 #else
18 #include <asm/book3s/64/hash-4k.h>
19 #endif
20 
21 /*
22  * Size of EA range mapped by our pagetables.
23  */
24 #define H_PGTABLE_EADDR_SIZE	(H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \
25 				 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
26 #define H_PGTABLE_RANGE		(ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
27 
28 /*
29  * We store the slot details in the second half of page table.
30  * Increase the pud level table so that hugetlb ptes can be stored
31  * at pud level.
32  */
33 #if defined(CONFIG_HUGETLB_PAGE) &&  defined(CONFIG_PPC_64K_PAGES)
34 #define H_PUD_CACHE_INDEX	(H_PUD_INDEX_SIZE + 1)
35 #else
36 #define H_PUD_CACHE_INDEX	(H_PUD_INDEX_SIZE)
37 #endif
38 /*
39  * Define the address range of the kernel non-linear virtual area
40  */
41 #define H_KERN_VIRT_START ASM_CONST(0xD000000000000000)
42 #define H_KERN_VIRT_SIZE  ASM_CONST(0x0000400000000000) /* 64T */
43 
44 /*
45  * The vmalloc space starts at the beginning of that region, and
46  * occupies half of it on hash CPUs and a quarter of it on Book3E
47  * (we keep a quarter for the virtual memmap)
48  */
49 #define H_VMALLOC_START	H_KERN_VIRT_START
50 #define H_VMALLOC_SIZE	ASM_CONST(0x380000000000) /* 56T */
51 #define H_VMALLOC_END	(H_VMALLOC_START + H_VMALLOC_SIZE)
52 
53 #define H_KERN_IO_START	H_VMALLOC_END
54 
55 /*
56  * Region IDs
57  */
58 #define REGION_SHIFT		60UL
59 #define REGION_MASK		(0xfUL << REGION_SHIFT)
60 #define REGION_ID(ea)		(((unsigned long)(ea)) >> REGION_SHIFT)
61 
62 #define VMALLOC_REGION_ID	(REGION_ID(H_VMALLOC_START))
63 #define KERNEL_REGION_ID	(REGION_ID(PAGE_OFFSET))
64 #define VMEMMAP_REGION_ID	(0xfUL)	/* Server only */
65 #define USER_REGION_ID		(0UL)
66 
67 /*
68  * Defines the address of the vmemap area, in its own region on
69  * hash table CPUs.
70  */
71 #define H_VMEMMAP_BASE		(VMEMMAP_REGION_ID << REGION_SHIFT)
72 
73 #ifdef CONFIG_PPC_MM_SLICES
74 #define HAVE_ARCH_UNMAPPED_AREA
75 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
76 #endif /* CONFIG_PPC_MM_SLICES */
77 
78 
79 /* PTEIDX nibble */
80 #define _PTEIDX_SECONDARY	0x8
81 #define _PTEIDX_GROUP_IX	0x7
82 
83 #define H_PMD_BAD_BITS		(PTE_TABLE_SIZE-1)
84 #define H_PUD_BAD_BITS		(PMD_TABLE_SIZE-1)
85 
86 #ifndef __ASSEMBLY__
87 #define	hash__pmd_bad(pmd)		(pmd_val(pmd) & H_PMD_BAD_BITS)
88 #define	hash__pud_bad(pud)		(pud_val(pud) & H_PUD_BAD_BITS)
hash__pgd_bad(pgd_t pgd)89 static inline int hash__pgd_bad(pgd_t pgd)
90 {
91 	return (pgd_val(pgd) == 0);
92 }
93 #ifdef CONFIG_STRICT_KERNEL_RWX
94 extern void hash__mark_rodata_ro(void);
95 extern void hash__mark_initmem_nx(void);
96 #endif
97 
98 extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
99 			    pte_t *ptep, unsigned long pte, int huge);
100 extern unsigned long htab_convert_pte_flags(unsigned long pteflags);
101 /* Atomic PTE updates */
hash__pte_update(struct mm_struct * mm,unsigned long addr,pte_t * ptep,unsigned long clr,unsigned long set,int huge)102 static inline unsigned long hash__pte_update(struct mm_struct *mm,
103 					 unsigned long addr,
104 					 pte_t *ptep, unsigned long clr,
105 					 unsigned long set,
106 					 int huge)
107 {
108 	__be64 old_be, tmp_be;
109 	unsigned long old;
110 
111 	__asm__ __volatile__(
112 	"1:	ldarx	%0,0,%3		# pte_update\n\
113 	and.	%1,%0,%6\n\
114 	bne-	1b \n\
115 	andc	%1,%0,%4 \n\
116 	or	%1,%1,%7\n\
117 	stdcx.	%1,0,%3 \n\
118 	bne-	1b"
119 	: "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
120 	: "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep),
121 	  "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
122 	: "cc" );
123 	/* huge pages use the old page table lock */
124 	if (!huge)
125 		assert_pte_locked(mm, addr);
126 
127 	old = be64_to_cpu(old_be);
128 	if (old & H_PAGE_HASHPTE)
129 		hpte_need_flush(mm, addr, ptep, old, huge);
130 
131 	return old;
132 }
133 
134 /* Set the dirty and/or accessed bits atomically in a linux PTE, this
135  * function doesn't need to flush the hash entry
136  */
hash__ptep_set_access_flags(pte_t * ptep,pte_t entry)137 static inline void hash__ptep_set_access_flags(pte_t *ptep, pte_t entry)
138 {
139 	__be64 old, tmp, val, mask;
140 
141 	mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE |
142 			   _PAGE_EXEC | _PAGE_SOFT_DIRTY);
143 
144 	val = pte_raw(entry) & mask;
145 
146 	__asm__ __volatile__(
147 	"1:	ldarx	%0,0,%4\n\
148 		and.	%1,%0,%6\n\
149 		bne-	1b \n\
150 		or	%0,%3,%0\n\
151 		stdcx.	%0,0,%4\n\
152 		bne-	1b"
153 	:"=&r" (old), "=&r" (tmp), "=m" (*ptep)
154 	:"r" (val), "r" (ptep), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY))
155 	:"cc");
156 }
157 
hash__pte_same(pte_t pte_a,pte_t pte_b)158 static inline int hash__pte_same(pte_t pte_a, pte_t pte_b)
159 {
160 	return (((pte_raw(pte_a) ^ pte_raw(pte_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
161 }
162 
hash__pte_none(pte_t pte)163 static inline int hash__pte_none(pte_t pte)
164 {
165 	return (pte_val(pte) & ~H_PTE_NONE_MASK) == 0;
166 }
167 
168 unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift,
169 		int ssize, real_pte_t rpte, unsigned int subpg_index);
170 
171 /* This low level function performs the actual PTE insertion
172  * Setting the PTE depends on the MMU type and other factors. It's
173  * an horrible mess that I'm not going to try to clean up now but
174  * I'm keeping it in one place rather than spread around
175  */
hash__set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,int percpu)176 static inline void hash__set_pte_at(struct mm_struct *mm, unsigned long addr,
177 				  pte_t *ptep, pte_t pte, int percpu)
178 {
179 	/*
180 	 * Anything else just stores the PTE normally. That covers all 64-bit
181 	 * cases, and 32-bit non-hash with 32-bit PTEs.
182 	 */
183 	*ptep = pte;
184 }
185 
186 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
187 extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
188 				   pmd_t *pmdp, unsigned long old_pmd);
189 #else
hpte_do_hugepage_flush(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,unsigned long old_pmd)190 static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
191 					  unsigned long addr, pmd_t *pmdp,
192 					  unsigned long old_pmd)
193 {
194 	WARN(1, "%s called with THP disabled\n", __func__);
195 }
196 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
197 
198 
199 extern int hash__map_kernel_page(unsigned long ea, unsigned long pa,
200 			     unsigned long flags);
201 extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
202 					      unsigned long page_size,
203 					      unsigned long phys);
204 extern void hash__vmemmap_remove_mapping(unsigned long start,
205 				     unsigned long page_size);
206 
207 int hash__create_section_mapping(unsigned long start, unsigned long end, int nid);
208 int hash__remove_section_mapping(unsigned long start, unsigned long end);
209 
210 #endif /* !__ASSEMBLY__ */
211 #endif /* __KERNEL__ */
212 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
213