1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2008
4 *
5 * Guest page hinting for unused pages.
6 *
7 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */
9
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/types.h>
13 #include <linux/mm.h>
14 #include <linux/memblock.h>
15 #include <linux/gfp.h>
16 #include <linux/init.h>
17 #include <asm/facility.h>
18 #include <asm/page-states.h>
19
20 static int cmma_flag = 1;
21
cmma(char * str)22 static int __init cmma(char *str)
23 {
24 bool enabled;
25
26 if (!kstrtobool(str, &enabled))
27 cmma_flag = enabled;
28 return 1;
29 }
30 __setup("cmma=", cmma);
31
cmma_test_essa(void)32 static inline int cmma_test_essa(void)
33 {
34 register unsigned long tmp asm("0") = 0;
35 register int rc asm("1");
36
37 /* test ESSA_GET_STATE */
38 asm volatile(
39 " .insn rrf,0xb9ab0000,%1,%1,%2,0\n"
40 "0: la %0,0\n"
41 "1:\n"
42 EX_TABLE(0b,1b)
43 : "=&d" (rc), "+&d" (tmp)
44 : "i" (ESSA_GET_STATE), "0" (-EOPNOTSUPP));
45 return rc;
46 }
47
cmma_init(void)48 void __init cmma_init(void)
49 {
50 if (!cmma_flag)
51 return;
52 if (cmma_test_essa()) {
53 cmma_flag = 0;
54 return;
55 }
56 if (test_facility(147))
57 cmma_flag = 2;
58 }
59
get_page_state(struct page * page)60 static inline unsigned char get_page_state(struct page *page)
61 {
62 unsigned char state;
63
64 asm volatile(" .insn rrf,0xb9ab0000,%0,%1,%2,0"
65 : "=&d" (state)
66 : "a" (page_to_phys(page)),
67 "i" (ESSA_GET_STATE));
68 return state & 0x3f;
69 }
70
set_page_unused(struct page * page,int order)71 static inline void set_page_unused(struct page *page, int order)
72 {
73 int i, rc;
74
75 for (i = 0; i < (1 << order); i++)
76 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
77 : "=&d" (rc)
78 : "a" (page_to_phys(page + i)),
79 "i" (ESSA_SET_UNUSED));
80 }
81
set_page_stable_dat(struct page * page,int order)82 static inline void set_page_stable_dat(struct page *page, int order)
83 {
84 int i, rc;
85
86 for (i = 0; i < (1 << order); i++)
87 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
88 : "=&d" (rc)
89 : "a" (page_to_phys(page + i)),
90 "i" (ESSA_SET_STABLE));
91 }
92
set_page_stable_nodat(struct page * page,int order)93 static inline void set_page_stable_nodat(struct page *page, int order)
94 {
95 int i, rc;
96
97 for (i = 0; i < (1 << order); i++)
98 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
99 : "=&d" (rc)
100 : "a" (page_to_phys(page + i)),
101 "i" (ESSA_SET_STABLE_NODAT));
102 }
103
mark_kernel_pmd(pud_t * pud,unsigned long addr,unsigned long end)104 static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
105 {
106 unsigned long next;
107 struct page *page;
108 pmd_t *pmd;
109
110 pmd = pmd_offset(pud, addr);
111 do {
112 next = pmd_addr_end(addr, end);
113 if (pmd_none(*pmd) || pmd_large(*pmd))
114 continue;
115 page = virt_to_page(pmd_val(*pmd));
116 set_bit(PG_arch_1, &page->flags);
117 } while (pmd++, addr = next, addr != end);
118 }
119
mark_kernel_pud(p4d_t * p4d,unsigned long addr,unsigned long end)120 static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
121 {
122 unsigned long next;
123 struct page *page;
124 pud_t *pud;
125 int i;
126
127 pud = pud_offset(p4d, addr);
128 do {
129 next = pud_addr_end(addr, end);
130 if (pud_none(*pud) || pud_large(*pud))
131 continue;
132 if (!pud_folded(*pud)) {
133 page = virt_to_page(pud_val(*pud));
134 for (i = 0; i < 3; i++)
135 set_bit(PG_arch_1, &page[i].flags);
136 }
137 mark_kernel_pmd(pud, addr, next);
138 } while (pud++, addr = next, addr != end);
139 }
140
mark_kernel_p4d(pgd_t * pgd,unsigned long addr,unsigned long end)141 static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
142 {
143 unsigned long next;
144 struct page *page;
145 p4d_t *p4d;
146 int i;
147
148 p4d = p4d_offset(pgd, addr);
149 do {
150 next = p4d_addr_end(addr, end);
151 if (p4d_none(*p4d))
152 continue;
153 if (!p4d_folded(*p4d)) {
154 page = virt_to_page(p4d_val(*p4d));
155 for (i = 0; i < 3; i++)
156 set_bit(PG_arch_1, &page[i].flags);
157 }
158 mark_kernel_pud(p4d, addr, next);
159 } while (p4d++, addr = next, addr != end);
160 }
161
mark_kernel_pgd(void)162 static void mark_kernel_pgd(void)
163 {
164 unsigned long addr, next;
165 struct page *page;
166 pgd_t *pgd;
167 int i;
168
169 addr = 0;
170 pgd = pgd_offset_k(addr);
171 do {
172 next = pgd_addr_end(addr, MODULES_END);
173 if (pgd_none(*pgd))
174 continue;
175 if (!pgd_folded(*pgd)) {
176 page = virt_to_page(pgd_val(*pgd));
177 for (i = 0; i < 3; i++)
178 set_bit(PG_arch_1, &page[i].flags);
179 }
180 mark_kernel_p4d(pgd, addr, next);
181 } while (pgd++, addr = next, addr != MODULES_END);
182 }
183
cmma_init_nodat(void)184 void __init cmma_init_nodat(void)
185 {
186 struct memblock_region *reg;
187 struct page *page;
188 unsigned long start, end, ix;
189
190 if (cmma_flag < 2)
191 return;
192 /* Mark pages used in kernel page tables */
193 mark_kernel_pgd();
194
195 /* Set all kernel pages not used for page tables to stable/no-dat */
196 for_each_memblock(memory, reg) {
197 start = memblock_region_memory_base_pfn(reg);
198 end = memblock_region_memory_end_pfn(reg);
199 page = pfn_to_page(start);
200 for (ix = start; ix < end; ix++, page++) {
201 if (__test_and_clear_bit(PG_arch_1, &page->flags))
202 continue; /* skip page table pages */
203 if (!list_empty(&page->lru))
204 continue; /* skip free pages */
205 set_page_stable_nodat(page, 0);
206 }
207 }
208 }
209
arch_free_page(struct page * page,int order)210 void arch_free_page(struct page *page, int order)
211 {
212 if (!cmma_flag)
213 return;
214 set_page_unused(page, order);
215 }
216
arch_alloc_page(struct page * page,int order)217 void arch_alloc_page(struct page *page, int order)
218 {
219 if (!cmma_flag)
220 return;
221 if (cmma_flag < 2)
222 set_page_stable_dat(page, order);
223 else
224 set_page_stable_nodat(page, order);
225 }
226
arch_set_page_dat(struct page * page,int order)227 void arch_set_page_dat(struct page *page, int order)
228 {
229 if (!cmma_flag)
230 return;
231 set_page_stable_dat(page, order);
232 }
233
arch_set_page_nodat(struct page * page,int order)234 void arch_set_page_nodat(struct page *page, int order)
235 {
236 if (cmma_flag < 2)
237 return;
238 set_page_stable_nodat(page, order);
239 }
240
arch_test_page_nodat(struct page * page)241 int arch_test_page_nodat(struct page *page)
242 {
243 unsigned char state;
244
245 if (cmma_flag < 2)
246 return 0;
247 state = get_page_state(page);
248 return !!(state & 0x20);
249 }
250
arch_set_page_states(int make_stable)251 void arch_set_page_states(int make_stable)
252 {
253 unsigned long flags, order, t;
254 struct list_head *l;
255 struct page *page;
256 struct zone *zone;
257
258 if (!cmma_flag)
259 return;
260 if (make_stable)
261 drain_local_pages(NULL);
262 for_each_populated_zone(zone) {
263 spin_lock_irqsave(&zone->lock, flags);
264 for_each_migratetype_order(order, t) {
265 list_for_each(l, &zone->free_area[order].free_list[t]) {
266 page = list_entry(l, struct page, lru);
267 if (make_stable)
268 set_page_stable_dat(page, order);
269 else
270 set_page_unused(page, order);
271 }
272 }
273 spin_unlock_irqrestore(&zone->lock, flags);
274 }
275 }
276