1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * HugeTLB Vmemmap Optimization (HVO)
4 *
5 * Copyright (c) 2020, ByteDance. All rights reserved.
6 *
7 * Author: Muchun Song <songmuchun@bytedance.com>
8 *
9 * See Documentation/mm/vmemmap_dedup.rst
10 */
11 #define pr_fmt(fmt) "HugeTLB: " fmt
12
13 #include <linux/pgtable.h>
14 #include <linux/moduleparam.h>
15 #include <linux/bootmem_info.h>
16 #include <asm/pgalloc.h>
17 #include <asm/tlbflush.h>
18 #include "hugetlb_vmemmap.h"
19
20 /**
21 * struct vmemmap_remap_walk - walk vmemmap page table
22 *
23 * @remap_pte: called for each lowest-level entry (PTE).
24 * @nr_walked: the number of walked pte.
25 * @reuse_page: the page which is reused for the tail vmemmap pages.
26 * @reuse_addr: the virtual address of the @reuse_page page.
27 * @vmemmap_pages: the list head of the vmemmap pages that can be freed
28 * or is mapped from.
29 */
30 struct vmemmap_remap_walk {
31 void (*remap_pte)(pte_t *pte, unsigned long addr,
32 struct vmemmap_remap_walk *walk);
33 unsigned long nr_walked;
34 struct page *reuse_page;
35 unsigned long reuse_addr;
36 struct list_head *vmemmap_pages;
37 };
38
__split_vmemmap_huge_pmd(pmd_t * pmd,unsigned long start)39 static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
40 {
41 pmd_t __pmd;
42 int i;
43 unsigned long addr = start;
44 struct page *page = pmd_page(*pmd);
45 pte_t *pgtable = pte_alloc_one_kernel(&init_mm);
46
47 if (!pgtable)
48 return -ENOMEM;
49
50 pmd_populate_kernel(&init_mm, &__pmd, pgtable);
51
52 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
53 pte_t entry, *pte;
54 pgprot_t pgprot = PAGE_KERNEL;
55
56 entry = mk_pte(page + i, pgprot);
57 pte = pte_offset_kernel(&__pmd, addr);
58 set_pte_at(&init_mm, addr, pte, entry);
59 }
60
61 spin_lock(&init_mm.page_table_lock);
62 if (likely(pmd_leaf(*pmd))) {
63 /*
64 * Higher order allocations from buddy allocator must be able to
65 * be treated as indepdenent small pages (as they can be freed
66 * individually).
67 */
68 if (!PageReserved(page))
69 split_page(page, get_order(PMD_SIZE));
70
71 /* Make pte visible before pmd. See comment in pmd_install(). */
72 smp_wmb();
73 pmd_populate_kernel(&init_mm, pmd, pgtable);
74 flush_tlb_kernel_range(start, start + PMD_SIZE);
75 } else {
76 pte_free_kernel(&init_mm, pgtable);
77 }
78 spin_unlock(&init_mm.page_table_lock);
79
80 return 0;
81 }
82
split_vmemmap_huge_pmd(pmd_t * pmd,unsigned long start)83 static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start)
84 {
85 int leaf;
86
87 spin_lock(&init_mm.page_table_lock);
88 leaf = pmd_leaf(*pmd);
89 spin_unlock(&init_mm.page_table_lock);
90
91 if (!leaf)
92 return 0;
93
94 return __split_vmemmap_huge_pmd(pmd, start);
95 }
96
vmemmap_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct vmemmap_remap_walk * walk)97 static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr,
98 unsigned long end,
99 struct vmemmap_remap_walk *walk)
100 {
101 pte_t *pte = pte_offset_kernel(pmd, addr);
102
103 /*
104 * The reuse_page is found 'first' in table walk before we start
105 * remapping (which is calling @walk->remap_pte).
106 */
107 if (!walk->reuse_page) {
108 walk->reuse_page = pte_page(*pte);
109 /*
110 * Because the reuse address is part of the range that we are
111 * walking, skip the reuse address range.
112 */
113 addr += PAGE_SIZE;
114 pte++;
115 walk->nr_walked++;
116 }
117
118 for (; addr != end; addr += PAGE_SIZE, pte++) {
119 walk->remap_pte(pte, addr, walk);
120 walk->nr_walked++;
121 }
122 }
123
vmemmap_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,struct vmemmap_remap_walk * walk)124 static int vmemmap_pmd_range(pud_t *pud, unsigned long addr,
125 unsigned long end,
126 struct vmemmap_remap_walk *walk)
127 {
128 pmd_t *pmd;
129 unsigned long next;
130
131 pmd = pmd_offset(pud, addr);
132 do {
133 int ret;
134
135 ret = split_vmemmap_huge_pmd(pmd, addr & PMD_MASK);
136 if (ret)
137 return ret;
138
139 next = pmd_addr_end(addr, end);
140 vmemmap_pte_range(pmd, addr, next, walk);
141 } while (pmd++, addr = next, addr != end);
142
143 return 0;
144 }
145
vmemmap_pud_range(p4d_t * p4d,unsigned long addr,unsigned long end,struct vmemmap_remap_walk * walk)146 static int vmemmap_pud_range(p4d_t *p4d, unsigned long addr,
147 unsigned long end,
148 struct vmemmap_remap_walk *walk)
149 {
150 pud_t *pud;
151 unsigned long next;
152
153 pud = pud_offset(p4d, addr);
154 do {
155 int ret;
156
157 next = pud_addr_end(addr, end);
158 ret = vmemmap_pmd_range(pud, addr, next, walk);
159 if (ret)
160 return ret;
161 } while (pud++, addr = next, addr != end);
162
163 return 0;
164 }
165
vmemmap_p4d_range(pgd_t * pgd,unsigned long addr,unsigned long end,struct vmemmap_remap_walk * walk)166 static int vmemmap_p4d_range(pgd_t *pgd, unsigned long addr,
167 unsigned long end,
168 struct vmemmap_remap_walk *walk)
169 {
170 p4d_t *p4d;
171 unsigned long next;
172
173 p4d = p4d_offset(pgd, addr);
174 do {
175 int ret;
176
177 next = p4d_addr_end(addr, end);
178 ret = vmemmap_pud_range(p4d, addr, next, walk);
179 if (ret)
180 return ret;
181 } while (p4d++, addr = next, addr != end);
182
183 return 0;
184 }
185
vmemmap_remap_range(unsigned long start,unsigned long end,struct vmemmap_remap_walk * walk)186 static int vmemmap_remap_range(unsigned long start, unsigned long end,
187 struct vmemmap_remap_walk *walk)
188 {
189 unsigned long addr = start;
190 unsigned long next;
191 pgd_t *pgd;
192
193 VM_BUG_ON(!PAGE_ALIGNED(start));
194 VM_BUG_ON(!PAGE_ALIGNED(end));
195
196 pgd = pgd_offset_k(addr);
197 do {
198 int ret;
199
200 next = pgd_addr_end(addr, end);
201 ret = vmemmap_p4d_range(pgd, addr, next, walk);
202 if (ret)
203 return ret;
204 } while (pgd++, addr = next, addr != end);
205
206 /*
207 * We only change the mapping of the vmemmap virtual address range
208 * [@start + PAGE_SIZE, end), so we only need to flush the TLB which
209 * belongs to the range.
210 */
211 flush_tlb_kernel_range(start + PAGE_SIZE, end);
212
213 return 0;
214 }
215
216 /*
217 * Free a vmemmap page. A vmemmap page can be allocated from the memblock
218 * allocator or buddy allocator. If the PG_reserved flag is set, it means
219 * that it allocated from the memblock allocator, just free it via the
220 * free_bootmem_page(). Otherwise, use __free_page().
221 */
free_vmemmap_page(struct page * page)222 static inline void free_vmemmap_page(struct page *page)
223 {
224 if (PageReserved(page))
225 free_bootmem_page(page);
226 else
227 __free_page(page);
228 }
229
230 /* Free a list of the vmemmap pages */
free_vmemmap_page_list(struct list_head * list)231 static void free_vmemmap_page_list(struct list_head *list)
232 {
233 struct page *page, *next;
234
235 list_for_each_entry_safe(page, next, list, lru) {
236 list_del(&page->lru);
237 free_vmemmap_page(page);
238 }
239 }
240
vmemmap_remap_pte(pte_t * pte,unsigned long addr,struct vmemmap_remap_walk * walk)241 static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
242 struct vmemmap_remap_walk *walk)
243 {
244 /*
245 * Remap the tail pages as read-only to catch illegal write operation
246 * to the tail pages.
247 */
248 pgprot_t pgprot = PAGE_KERNEL_RO;
249 pte_t entry = mk_pte(walk->reuse_page, pgprot);
250 struct page *page = pte_page(*pte);
251
252 list_add_tail(&page->lru, walk->vmemmap_pages);
253 set_pte_at(&init_mm, addr, pte, entry);
254 }
255
256 /*
257 * How many struct page structs need to be reset. When we reuse the head
258 * struct page, the special metadata (e.g. page->flags or page->mapping)
259 * cannot copy to the tail struct page structs. The invalid value will be
260 * checked in the free_tail_pages_check(). In order to avoid the message
261 * of "corrupted mapping in tail page". We need to reset at least 3 (one
262 * head struct page struct and two tail struct page structs) struct page
263 * structs.
264 */
265 #define NR_RESET_STRUCT_PAGE 3
266
reset_struct_pages(struct page * start)267 static inline void reset_struct_pages(struct page *start)
268 {
269 struct page *from = start + NR_RESET_STRUCT_PAGE;
270
271 BUILD_BUG_ON(NR_RESET_STRUCT_PAGE * 2 > PAGE_SIZE / sizeof(struct page));
272 memcpy(start, from, sizeof(*from) * NR_RESET_STRUCT_PAGE);
273 }
274
vmemmap_restore_pte(pte_t * pte,unsigned long addr,struct vmemmap_remap_walk * walk)275 static void vmemmap_restore_pte(pte_t *pte, unsigned long addr,
276 struct vmemmap_remap_walk *walk)
277 {
278 pgprot_t pgprot = PAGE_KERNEL;
279 struct page *page;
280 void *to;
281
282 BUG_ON(pte_page(*pte) != walk->reuse_page);
283
284 page = list_first_entry(walk->vmemmap_pages, struct page, lru);
285 list_del(&page->lru);
286 to = page_to_virt(page);
287 copy_page(to, (void *)walk->reuse_addr);
288 reset_struct_pages(to);
289
290 /*
291 * Makes sure that preceding stores to the page contents become visible
292 * before the set_pte_at() write.
293 */
294 smp_wmb();
295 set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot));
296 }
297
298 /**
299 * vmemmap_remap_free - remap the vmemmap virtual address range [@start, @end)
300 * to the page which @reuse is mapped to, then free vmemmap
301 * which the range are mapped to.
302 * @start: start address of the vmemmap virtual address range that we want
303 * to remap.
304 * @end: end address of the vmemmap virtual address range that we want to
305 * remap.
306 * @reuse: reuse address.
307 *
308 * Return: %0 on success, negative error code otherwise.
309 */
vmemmap_remap_free(unsigned long start,unsigned long end,unsigned long reuse)310 static int vmemmap_remap_free(unsigned long start, unsigned long end,
311 unsigned long reuse)
312 {
313 int ret;
314 LIST_HEAD(vmemmap_pages);
315 struct vmemmap_remap_walk walk = {
316 .remap_pte = vmemmap_remap_pte,
317 .reuse_addr = reuse,
318 .vmemmap_pages = &vmemmap_pages,
319 };
320
321 /*
322 * In order to make remapping routine most efficient for the huge pages,
323 * the routine of vmemmap page table walking has the following rules
324 * (see more details from the vmemmap_pte_range()):
325 *
326 * - The range [@start, @end) and the range [@reuse, @reuse + PAGE_SIZE)
327 * should be continuous.
328 * - The @reuse address is part of the range [@reuse, @end) that we are
329 * walking which is passed to vmemmap_remap_range().
330 * - The @reuse address is the first in the complete range.
331 *
332 * So we need to make sure that @start and @reuse meet the above rules.
333 */
334 BUG_ON(start - reuse != PAGE_SIZE);
335
336 mmap_read_lock(&init_mm);
337 ret = vmemmap_remap_range(reuse, end, &walk);
338 if (ret && walk.nr_walked) {
339 end = reuse + walk.nr_walked * PAGE_SIZE;
340 /*
341 * vmemmap_pages contains pages from the previous
342 * vmemmap_remap_range call which failed. These
343 * are pages which were removed from the vmemmap.
344 * They will be restored in the following call.
345 */
346 walk = (struct vmemmap_remap_walk) {
347 .remap_pte = vmemmap_restore_pte,
348 .reuse_addr = reuse,
349 .vmemmap_pages = &vmemmap_pages,
350 };
351
352 vmemmap_remap_range(reuse, end, &walk);
353 }
354 mmap_read_unlock(&init_mm);
355
356 free_vmemmap_page_list(&vmemmap_pages);
357
358 return ret;
359 }
360
alloc_vmemmap_page_list(unsigned long start,unsigned long end,gfp_t gfp_mask,struct list_head * list)361 static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
362 gfp_t gfp_mask, struct list_head *list)
363 {
364 unsigned long nr_pages = (end - start) >> PAGE_SHIFT;
365 int nid = page_to_nid((struct page *)start);
366 struct page *page, *next;
367
368 while (nr_pages--) {
369 page = alloc_pages_node(nid, gfp_mask, 0);
370 if (!page)
371 goto out;
372 list_add_tail(&page->lru, list);
373 }
374
375 return 0;
376 out:
377 list_for_each_entry_safe(page, next, list, lru)
378 __free_pages(page, 0);
379 return -ENOMEM;
380 }
381
382 /**
383 * vmemmap_remap_alloc - remap the vmemmap virtual address range [@start, end)
384 * to the page which is from the @vmemmap_pages
385 * respectively.
386 * @start: start address of the vmemmap virtual address range that we want
387 * to remap.
388 * @end: end address of the vmemmap virtual address range that we want to
389 * remap.
390 * @reuse: reuse address.
391 * @gfp_mask: GFP flag for allocating vmemmap pages.
392 *
393 * Return: %0 on success, negative error code otherwise.
394 */
vmemmap_remap_alloc(unsigned long start,unsigned long end,unsigned long reuse,gfp_t gfp_mask)395 static int vmemmap_remap_alloc(unsigned long start, unsigned long end,
396 unsigned long reuse, gfp_t gfp_mask)
397 {
398 LIST_HEAD(vmemmap_pages);
399 struct vmemmap_remap_walk walk = {
400 .remap_pte = vmemmap_restore_pte,
401 .reuse_addr = reuse,
402 .vmemmap_pages = &vmemmap_pages,
403 };
404
405 /* See the comment in the vmemmap_remap_free(). */
406 BUG_ON(start - reuse != PAGE_SIZE);
407
408 if (alloc_vmemmap_page_list(start, end, gfp_mask, &vmemmap_pages))
409 return -ENOMEM;
410
411 mmap_read_lock(&init_mm);
412 vmemmap_remap_range(reuse, end, &walk);
413 mmap_read_unlock(&init_mm);
414
415 return 0;
416 }
417
418 DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
419 EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
420
421 static bool vmemmap_optimize_enabled = IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
422 core_param(hugetlb_free_vmemmap, vmemmap_optimize_enabled, bool, 0);
423
424 /**
425 * hugetlb_vmemmap_restore - restore previously optimized (by
426 * hugetlb_vmemmap_optimize()) vmemmap pages which
427 * will be reallocated and remapped.
428 * @h: struct hstate.
429 * @head: the head page whose vmemmap pages will be restored.
430 *
431 * Return: %0 if @head's vmemmap pages have been reallocated and remapped,
432 * negative error code otherwise.
433 */
hugetlb_vmemmap_restore(const struct hstate * h,struct page * head)434 int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head)
435 {
436 int ret;
437 unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
438 unsigned long vmemmap_reuse;
439
440 if (!HPageVmemmapOptimized(head))
441 return 0;
442
443 vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
444 vmemmap_reuse = vmemmap_start;
445 vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE;
446
447 /*
448 * The pages which the vmemmap virtual address range [@vmemmap_start,
449 * @vmemmap_end) are mapped to are freed to the buddy allocator, and
450 * the range is mapped to the page which @vmemmap_reuse is mapped to.
451 * When a HugeTLB page is freed to the buddy allocator, previously
452 * discarded vmemmap pages must be allocated and remapping.
453 */
454 ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse,
455 GFP_KERNEL | __GFP_NORETRY | __GFP_THISNODE);
456 if (!ret) {
457 ClearHPageVmemmapOptimized(head);
458 static_branch_dec(&hugetlb_optimize_vmemmap_key);
459 }
460
461 return ret;
462 }
463
464 /* Return true iff a HugeTLB whose vmemmap should and can be optimized. */
vmemmap_should_optimize(const struct hstate * h,const struct page * head)465 static bool vmemmap_should_optimize(const struct hstate *h, const struct page *head)
466 {
467 if (!READ_ONCE(vmemmap_optimize_enabled))
468 return false;
469
470 if (!hugetlb_vmemmap_optimizable(h))
471 return false;
472
473 if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) {
474 pmd_t *pmdp, pmd;
475 struct page *vmemmap_page;
476 unsigned long vaddr = (unsigned long)head;
477
478 /*
479 * Only the vmemmap page's vmemmap page can be self-hosted.
480 * Walking the page tables to find the backing page of the
481 * vmemmap page.
482 */
483 pmdp = pmd_off_k(vaddr);
484 /*
485 * The READ_ONCE() is used to stabilize *pmdp in a register or
486 * on the stack so that it will stop changing under the code.
487 * The only concurrent operation where it can be changed is
488 * split_vmemmap_huge_pmd() (*pmdp will be stable after this
489 * operation).
490 */
491 pmd = READ_ONCE(*pmdp);
492 if (pmd_leaf(pmd))
493 vmemmap_page = pmd_page(pmd) + pte_index(vaddr);
494 else
495 vmemmap_page = pte_page(*pte_offset_kernel(pmdp, vaddr));
496 /*
497 * Due to HugeTLB alignment requirements and the vmemmap pages
498 * being at the start of the hotplugged memory region in
499 * memory_hotplug.memmap_on_memory case. Checking any vmemmap
500 * page's vmemmap page if it is marked as VmemmapSelfHosted is
501 * sufficient.
502 *
503 * [ hotplugged memory ]
504 * [ section ][...][ section ]
505 * [ vmemmap ][ usable memory ]
506 * ^ | | |
507 * +---+ | |
508 * ^ | |
509 * +-------+ |
510 * ^ |
511 * +-------------------------------------------+
512 */
513 if (PageVmemmapSelfHosted(vmemmap_page))
514 return false;
515 }
516
517 return true;
518 }
519
520 /**
521 * hugetlb_vmemmap_optimize - optimize @head page's vmemmap pages.
522 * @h: struct hstate.
523 * @head: the head page whose vmemmap pages will be optimized.
524 *
525 * This function only tries to optimize @head's vmemmap pages and does not
526 * guarantee that the optimization will succeed after it returns. The caller
527 * can use HPageVmemmapOptimized(@head) to detect if @head's vmemmap pages
528 * have been optimized.
529 */
hugetlb_vmemmap_optimize(const struct hstate * h,struct page * head)530 void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head)
531 {
532 unsigned long vmemmap_start = (unsigned long)head, vmemmap_end;
533 unsigned long vmemmap_reuse;
534
535 if (!vmemmap_should_optimize(h, head))
536 return;
537
538 static_branch_inc(&hugetlb_optimize_vmemmap_key);
539
540 vmemmap_end = vmemmap_start + hugetlb_vmemmap_size(h);
541 vmemmap_reuse = vmemmap_start;
542 vmemmap_start += HUGETLB_VMEMMAP_RESERVE_SIZE;
543
544 /*
545 * Remap the vmemmap virtual address range [@vmemmap_start, @vmemmap_end)
546 * to the page which @vmemmap_reuse is mapped to, then free the pages
547 * which the range [@vmemmap_start, @vmemmap_end] is mapped to.
548 */
549 if (vmemmap_remap_free(vmemmap_start, vmemmap_end, vmemmap_reuse))
550 static_branch_dec(&hugetlb_optimize_vmemmap_key);
551 else
552 SetHPageVmemmapOptimized(head);
553 }
554
555 static struct ctl_table hugetlb_vmemmap_sysctls[] = {
556 {
557 .procname = "hugetlb_optimize_vmemmap",
558 .data = &vmemmap_optimize_enabled,
559 .maxlen = sizeof(int),
560 .mode = 0644,
561 .proc_handler = proc_dobool,
562 },
563 { }
564 };
565
hugetlb_vmemmap_init(void)566 static int __init hugetlb_vmemmap_init(void)
567 {
568 /* HUGETLB_VMEMMAP_RESERVE_SIZE should cover all used struct pages */
569 BUILD_BUG_ON(__NR_USED_SUBPAGE * sizeof(struct page) > HUGETLB_VMEMMAP_RESERVE_SIZE);
570
571 if (IS_ENABLED(CONFIG_PROC_SYSCTL)) {
572 const struct hstate *h;
573
574 for_each_hstate(h) {
575 if (hugetlb_vmemmap_optimizable(h)) {
576 register_sysctl_init("vm", hugetlb_vmemmap_sysctls);
577 break;
578 }
579 }
580 }
581 return 0;
582 }
583 late_initcall(hugetlb_vmemmap_init);
584