Lines Matching refs:h
94 static int hugetlb_acct_memory(struct hstate *h, long delta);
130 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, in hugepage_new_subpool() argument
142 spool->hstate = h; in hugepage_new_subpool()
145 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { in hugepage_new_subpool()
466 struct hstate *h, in record_hugetlb_cgroup_uncharge_info() argument
473 &h_cg->rsvd_hugepage[hstate_index(h)]; in record_hugetlb_cgroup_uncharge_info()
487 resv->pages_per_hpage = pages_per_huge_page(h); in record_hugetlb_cgroup_uncharge_info()
491 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); in record_hugetlb_cgroup_uncharge_info()
548 long to, struct hstate *h, struct hugetlb_cgroup *cg, in hugetlb_resv_map_add() argument
555 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg); in hugetlb_resv_map_add()
574 struct hstate *h, long *regions_needed) in add_reservation_in_range() argument
614 iter->from, h, h_cg, in add_reservation_in_range()
627 t, h, h_cg, regions_needed); in add_reservation_in_range()
706 long in_regions_needed, struct hstate *h, in region_add() argument
744 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); in region_add()
942 struct hstate *h = hstate_inode(inode); in hugetlb_fix_reserve_counts() local
944 if (!hugetlb_acct_memory(h, 1)) in hugetlb_fix_reserve_counts()
989 static pgoff_t vma_hugecache_offset(struct hstate *h, in vma_hugecache_offset() argument
992 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset()
993 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset()
1073 struct hstate *h) in resv_map_set_hugetlb_cgroup_uncharge_info() argument
1076 if (!h_cg || !h) { in resv_map_set_hugetlb_cgroup_uncharge_info()
1082 &h_cg->rsvd_hugepage[hstate_index(h)]; in resv_map_set_hugetlb_cgroup_uncharge_info()
1083 resv_map->pages_per_hpage = pages_per_huge_page(h); in resv_map_set_hugetlb_cgroup_uncharge_info()
1310 static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio) in enqueue_hugetlb_folio() argument
1317 list_move(&folio->lru, &h->hugepage_freelists[nid]); in enqueue_hugetlb_folio()
1318 h->free_huge_pages++; in enqueue_hugetlb_folio()
1319 h->free_huge_pages_node[nid]++; in enqueue_hugetlb_folio()
1323 static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h, in dequeue_hugetlb_folio_node_exact() argument
1330 list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) { in dequeue_hugetlb_folio_node_exact()
1337 list_move(&folio->lru, &h->hugepage_activelist); in dequeue_hugetlb_folio_node_exact()
1340 h->free_huge_pages--; in dequeue_hugetlb_folio_node_exact()
1341 h->free_huge_pages_node[nid]--; in dequeue_hugetlb_folio_node_exact()
1348 static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask, in dequeue_hugetlb_folio_nodemask() argument
1374 folio = dequeue_hugetlb_folio_node_exact(h, node); in dequeue_hugetlb_folio_nodemask()
1384 static unsigned long available_huge_pages(struct hstate *h) in available_huge_pages() argument
1386 return h->free_huge_pages - h->resv_huge_pages; in available_huge_pages()
1389 static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, in dequeue_hugetlb_folio_vma() argument
1405 if (!vma_has_reserves(vma, chg) && !available_huge_pages(h)) in dequeue_hugetlb_folio_vma()
1409 if (avoid_reserve && !available_huge_pages(h)) in dequeue_hugetlb_folio_vma()
1412 gfp_mask = htlb_alloc_mask(h); in dequeue_hugetlb_folio_vma()
1416 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, in dequeue_hugetlb_folio_vma()
1424 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, in dequeue_hugetlb_folio_vma()
1429 h->resv_huge_pages--; in dequeue_hugetlb_folio_vma()
1467 static int hstate_next_node_to_alloc(struct hstate *h, in hstate_next_node_to_alloc() argument
1474 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); in hstate_next_node_to_alloc()
1475 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); in hstate_next_node_to_alloc()
1486 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) in hstate_next_node_to_free() argument
1492 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); in hstate_next_node_to_free()
1493 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); in hstate_next_node_to_free()
1564 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_folio() argument
1568 unsigned long nr_pages = pages_per_huge_page(h); in alloc_gigantic_folio()
1578 huge_page_order(h), true); in alloc_gigantic_folio()
1589 huge_page_order(h), true); in alloc_gigantic_folio()
1602 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_folio() argument
1610 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_folio() argument
1621 static inline void __clear_hugetlb_destructor(struct hstate *h, in __clear_hugetlb_destructor() argument
1639 static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio, in __remove_hugetlb_folio() argument
1649 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in __remove_hugetlb_folio()
1655 h->free_huge_pages--; in __remove_hugetlb_folio()
1656 h->free_huge_pages_node[nid]--; in __remove_hugetlb_folio()
1659 h->surplus_huge_pages--; in __remove_hugetlb_folio()
1660 h->surplus_huge_pages_node[nid]--; in __remove_hugetlb_folio()
1669 __clear_hugetlb_destructor(h, folio); in __remove_hugetlb_folio()
1678 h->nr_huge_pages--; in __remove_hugetlb_folio()
1679 h->nr_huge_pages_node[nid]--; in __remove_hugetlb_folio()
1682 static void remove_hugetlb_folio(struct hstate *h, struct folio *folio, in remove_hugetlb_folio() argument
1685 __remove_hugetlb_folio(h, folio, adjust_surplus, false); in remove_hugetlb_folio()
1688 static void remove_hugetlb_folio_for_demote(struct hstate *h, struct folio *folio, in remove_hugetlb_folio_for_demote() argument
1691 __remove_hugetlb_folio(h, folio, adjust_surplus, true); in remove_hugetlb_folio_for_demote()
1694 static void add_hugetlb_folio(struct hstate *h, struct folio *folio, in add_hugetlb_folio() argument
1705 h->nr_huge_pages++; in add_hugetlb_folio()
1706 h->nr_huge_pages_node[nid]++; in add_hugetlb_folio()
1709 h->surplus_huge_pages++; in add_hugetlb_folio()
1710 h->surplus_huge_pages_node[nid]++; in add_hugetlb_folio()
1737 enqueue_hugetlb_folio(h, folio); in add_hugetlb_folio()
1740 static void __update_and_free_hugetlb_folio(struct hstate *h, in __update_and_free_hugetlb_folio() argument
1745 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in __update_and_free_hugetlb_folio()
1755 if (hugetlb_vmemmap_restore(h, &folio->page)) { in __update_and_free_hugetlb_folio()
1762 add_hugetlb_folio(h, folio, true); in __update_and_free_hugetlb_folio()
1780 __clear_hugetlb_destructor(h, folio); in __update_and_free_hugetlb_folio()
1788 if (hstate_is_gigantic(h) || in __update_and_free_hugetlb_folio()
1789 hugetlb_cma_folio(folio, huge_page_order(h))) { in __update_and_free_hugetlb_folio()
1790 destroy_compound_gigantic_folio(folio, huge_page_order(h)); in __update_and_free_hugetlb_folio()
1791 free_gigantic_folio(folio, huge_page_order(h)); in __update_and_free_hugetlb_folio()
1793 __free_pages(&folio->page, huge_page_order(h)); in __update_and_free_hugetlb_folio()
1818 struct hstate *h; in free_hpage_workfn() local
1830 h = size_to_hstate(page_size(page)); in free_hpage_workfn()
1832 __update_and_free_hugetlb_folio(h, page_folio(page)); in free_hpage_workfn()
1839 static inline void flush_free_hpage_work(struct hstate *h) in flush_free_hpage_work() argument
1841 if (hugetlb_vmemmap_optimizable(h)) in flush_free_hpage_work()
1845 static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio, in update_and_free_hugetlb_folio() argument
1849 __update_and_free_hugetlb_folio(h, folio); in update_and_free_hugetlb_folio()
1864 static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list) in update_and_free_pages_bulk() argument
1871 update_and_free_hugetlb_folio(h, folio, false); in update_and_free_pages_bulk()
1878 struct hstate *h; in size_to_hstate() local
1880 for_each_hstate(h) { in size_to_hstate()
1881 if (huge_page_size(h) == size) in size_to_hstate()
1882 return h; in size_to_hstate()
1893 struct hstate *h = folio_hstate(folio); in free_huge_folio() local
1930 hugetlb_cgroup_uncharge_folio(hstate_index(h), in free_huge_folio()
1931 pages_per_huge_page(h), folio); in free_huge_folio()
1932 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), in free_huge_folio()
1933 pages_per_huge_page(h), folio); in free_huge_folio()
1935 h->resv_huge_pages++; in free_huge_folio()
1938 remove_hugetlb_folio(h, folio, false); in free_huge_folio()
1940 update_and_free_hugetlb_folio(h, folio, true); in free_huge_folio()
1941 } else if (h->surplus_huge_pages_node[nid]) { in free_huge_folio()
1943 remove_hugetlb_folio(h, folio, true); in free_huge_folio()
1945 update_and_free_hugetlb_folio(h, folio, true); in free_huge_folio()
1948 enqueue_hugetlb_folio(h, folio); in free_huge_folio()
1956 static void __prep_account_new_huge_page(struct hstate *h, int nid) in __prep_account_new_huge_page() argument
1959 h->nr_huge_pages++; in __prep_account_new_huge_page()
1960 h->nr_huge_pages_node[nid]++; in __prep_account_new_huge_page()
1963 static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio) in __prep_new_hugetlb_folio() argument
1965 hugetlb_vmemmap_optimize(h, &folio->page); in __prep_new_hugetlb_folio()
1973 static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid) in prep_new_hugetlb_folio() argument
1975 __prep_new_hugetlb_folio(h, folio); in prep_new_hugetlb_folio()
1977 __prep_account_new_huge_page(h, nid); in prep_new_hugetlb_folio()
2120 static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h, in alloc_buddy_hugetlb_folio() argument
2124 int order = huge_page_order(h); in alloc_buddy_hugetlb_folio()
2190 static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h, in alloc_fresh_hugetlb_folio() argument
2198 if (hstate_is_gigantic(h)) in alloc_fresh_hugetlb_folio()
2199 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask); in alloc_fresh_hugetlb_folio()
2201 folio = alloc_buddy_hugetlb_folio(h, gfp_mask, in alloc_fresh_hugetlb_folio()
2205 if (hstate_is_gigantic(h)) { in alloc_fresh_hugetlb_folio()
2206 if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) { in alloc_fresh_hugetlb_folio()
2211 free_gigantic_folio(folio, huge_page_order(h)); in alloc_fresh_hugetlb_folio()
2219 prep_new_hugetlb_folio(h, folio, folio_nid(folio)); in alloc_fresh_hugetlb_folio()
2228 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, in alloc_pool_huge_page() argument
2233 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; in alloc_pool_huge_page()
2235 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in alloc_pool_huge_page()
2236 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, node, in alloc_pool_huge_page()
2254 static struct page *remove_pool_huge_page(struct hstate *h, in remove_pool_huge_page() argument
2263 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in remove_pool_huge_page()
2268 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && in remove_pool_huge_page()
2269 !list_empty(&h->hugepage_freelists[node])) { in remove_pool_huge_page()
2270 page = list_entry(h->hugepage_freelists[node].next, in remove_pool_huge_page()
2273 remove_hugetlb_folio(h, folio, acct_surplus); in remove_pool_huge_page()
2312 struct hstate *h = folio_hstate(folio); in dissolve_free_huge_page() local
2313 if (!available_huge_pages(h)) in dissolve_free_huge_page()
2335 remove_hugetlb_folio(h, folio, false); in dissolve_free_huge_page()
2336 h->max_huge_pages--; in dissolve_free_huge_page()
2347 rc = hugetlb_vmemmap_restore(h, &folio->page); in dissolve_free_huge_page()
2349 update_and_free_hugetlb_folio(h, folio, false); in dissolve_free_huge_page()
2352 add_hugetlb_folio(h, folio, false); in dissolve_free_huge_page()
2353 h->max_huge_pages++; in dissolve_free_huge_page()
2378 struct hstate *h; in dissolve_free_huge_pages() local
2384 for_each_hstate(h) in dissolve_free_huge_pages()
2385 order = min(order, huge_page_order(h)); in dissolve_free_huge_pages()
2400 static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h, in alloc_surplus_hugetlb_folio() argument
2405 if (hstate_is_gigantic(h)) in alloc_surplus_hugetlb_folio()
2409 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) in alloc_surplus_hugetlb_folio()
2413 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); in alloc_surplus_hugetlb_folio()
2425 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { in alloc_surplus_hugetlb_folio()
2432 h->surplus_huge_pages++; in alloc_surplus_hugetlb_folio()
2433 h->surplus_huge_pages_node[folio_nid(folio)]++; in alloc_surplus_hugetlb_folio()
2441 static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask, in alloc_migrate_hugetlb_folio() argument
2446 if (hstate_is_gigantic(h)) in alloc_migrate_hugetlb_folio()
2449 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL); in alloc_migrate_hugetlb_folio()
2468 struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h, in alloc_buddy_hugetlb_folio_with_mpol() argument
2473 gfp_t gfp_mask = htlb_alloc_mask(h); in alloc_buddy_hugetlb_folio_with_mpol()
2482 folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask); in alloc_buddy_hugetlb_folio_with_mpol()
2489 folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask); in alloc_buddy_hugetlb_folio_with_mpol()
2495 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, in alloc_hugetlb_folio_nodemask() argument
2499 if (available_huge_pages(h)) { in alloc_hugetlb_folio_nodemask()
2502 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, in alloc_hugetlb_folio_nodemask()
2511 return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask); in alloc_hugetlb_folio_nodemask()
2515 struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma, in alloc_hugetlb_folio_vma() argument
2524 gfp_mask = htlb_alloc_mask(h); in alloc_hugetlb_folio_vma()
2526 folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask); in alloc_hugetlb_folio_vma()
2536 static int gather_surplus_pages(struct hstate *h, long delta) in gather_surplus_pages() argument
2547 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; in gather_surplus_pages()
2549 h->resv_huge_pages += delta; in gather_surplus_pages()
2559 folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), in gather_surplus_pages()
2575 needed = (h->resv_huge_pages + delta) - in gather_surplus_pages()
2576 (h->free_huge_pages + allocated); in gather_surplus_pages()
2596 h->resv_huge_pages += delta; in gather_surplus_pages()
2604 enqueue_hugetlb_folio(h, folio); in gather_surplus_pages()
2628 static void return_unused_surplus_pages(struct hstate *h, in return_unused_surplus_pages() argument
2637 h->resv_huge_pages -= unused_resv_pages; in return_unused_surplus_pages()
2639 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in return_unused_surplus_pages()
2646 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); in return_unused_surplus_pages()
2657 page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1); in return_unused_surplus_pages()
2666 update_and_free_pages_bulk(h, &page_list); in return_unused_surplus_pages()
2707 static long __vma_reservation_common(struct hstate *h, in __vma_reservation_common() argument
2720 idx = vma_hugecache_offset(h, vma, addr); in __vma_reservation_common()
2787 static long vma_needs_reservation(struct hstate *h, in vma_needs_reservation() argument
2790 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); in vma_needs_reservation()
2793 static long vma_commit_reservation(struct hstate *h, in vma_commit_reservation() argument
2796 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); in vma_commit_reservation()
2799 static void vma_end_reservation(struct hstate *h, in vma_end_reservation() argument
2802 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); in vma_end_reservation()
2805 static long vma_add_reservation(struct hstate *h, in vma_add_reservation() argument
2808 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); in vma_add_reservation()
2811 static long vma_del_reservation(struct hstate *h, in vma_del_reservation() argument
2814 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV); in vma_del_reservation()
2837 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, in restore_reserve_on_error() argument
2840 long rc = vma_needs_reservation(h, vma, address); in restore_reserve_on_error()
2857 (void)vma_add_reservation(h, vma, address); in restore_reserve_on_error()
2859 vma_end_reservation(h, vma, address); in restore_reserve_on_error()
2870 rc = vma_del_reservation(h, vma, address); in restore_reserve_on_error()
2906 vma_end_reservation(h, vma, address); in restore_reserve_on_error()
2918 static int alloc_and_dissolve_hugetlb_folio(struct hstate *h, in alloc_and_dissolve_hugetlb_folio() argument
2921 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; in alloc_and_dissolve_hugetlb_folio()
2933 new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, NULL, NULL); in alloc_and_dissolve_hugetlb_folio()
2936 __prep_new_hugetlb_folio(h, new_folio); in alloc_and_dissolve_hugetlb_folio()
2974 remove_hugetlb_folio(h, old_folio, false); in alloc_and_dissolve_hugetlb_folio()
2980 __prep_account_new_huge_page(h, nid); in alloc_and_dissolve_hugetlb_folio()
2981 enqueue_hugetlb_folio(h, new_folio); in alloc_and_dissolve_hugetlb_folio()
2987 update_and_free_hugetlb_folio(h, old_folio, false); in alloc_and_dissolve_hugetlb_folio()
2996 update_and_free_hugetlb_folio(h, new_folio, false); in alloc_and_dissolve_hugetlb_folio()
3003 struct hstate *h; in isolate_or_dissolve_huge_page() local
3014 h = folio_hstate(folio); in isolate_or_dissolve_huge_page()
3026 if (hstate_is_gigantic(h)) in isolate_or_dissolve_huge_page()
3032 ret = alloc_and_dissolve_hugetlb_folio(h, folio, list); in isolate_or_dissolve_huge_page()
3041 struct hstate *h = hstate_vma(vma); in alloc_hugetlb_folio() local
3049 idx = hstate_index(h); in alloc_hugetlb_folio()
3055 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); in alloc_hugetlb_folio()
3069 vma_end_reservation(h, vma, addr); in alloc_hugetlb_folio()
3090 idx, pages_per_huge_page(h), &h_cg); in alloc_hugetlb_folio()
3095 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); in alloc_hugetlb_folio()
3105 folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg); in alloc_hugetlb_folio()
3108 folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr); in alloc_hugetlb_folio()
3114 h->resv_huge_pages--; in alloc_hugetlb_folio()
3116 list_add(&folio->lru, &h->hugepage_activelist); in alloc_hugetlb_folio()
3121 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio); in alloc_hugetlb_folio()
3126 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), in alloc_hugetlb_folio()
3134 map_commit = vma_commit_reservation(h, vma, addr); in alloc_hugetlb_folio()
3148 hugetlb_acct_memory(h, -rsv_adjust); in alloc_hugetlb_folio()
3150 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), in alloc_hugetlb_folio()
3151 pages_per_huge_page(h), folio); in alloc_hugetlb_folio()
3156 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); in alloc_hugetlb_folio()
3159 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), in alloc_hugetlb_folio()
3164 vma_end_reservation(h, vma, addr); in alloc_hugetlb_folio()
3168 int alloc_bootmem_huge_page(struct hstate *h, int nid)
3170 int __alloc_bootmem_huge_page(struct hstate *h, int nid) in __alloc_bootmem_huge_page() argument
3177 m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h), in __alloc_bootmem_huge_page()
3184 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { in __alloc_bootmem_huge_page()
3186 huge_page_size(h), huge_page_size(h), in __alloc_bootmem_huge_page()
3202 m->hstate = h; in __alloc_bootmem_huge_page()
3217 struct hstate *h = m->hstate; in gather_bootmem_prealloc() local
3219 VM_BUG_ON(!hstate_is_gigantic(h)); in gather_bootmem_prealloc()
3221 if (prep_compound_gigantic_folio(folio, huge_page_order(h))) { in gather_bootmem_prealloc()
3223 prep_new_hugetlb_folio(h, folio, folio_nid(folio)); in gather_bootmem_prealloc()
3227 free_gigantic_folio(folio, huge_page_order(h)); in gather_bootmem_prealloc()
3235 adjust_managed_page_count(page, pages_per_huge_page(h)); in gather_bootmem_prealloc()
3239 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid) in hugetlb_hstate_alloc_pages_onenode() argument
3244 for (i = 0; i < h->max_huge_pages_node[nid]; ++i) { in hugetlb_hstate_alloc_pages_onenode()
3245 if (hstate_is_gigantic(h)) { in hugetlb_hstate_alloc_pages_onenode()
3246 if (!alloc_bootmem_huge_page(h, nid)) in hugetlb_hstate_alloc_pages_onenode()
3250 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; in hugetlb_hstate_alloc_pages_onenode()
3252 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, in hugetlb_hstate_alloc_pages_onenode()
3260 if (i == h->max_huge_pages_node[nid]) in hugetlb_hstate_alloc_pages_onenode()
3263 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); in hugetlb_hstate_alloc_pages_onenode()
3265 h->max_huge_pages_node[nid], buf, nid, i); in hugetlb_hstate_alloc_pages_onenode()
3266 h->max_huge_pages -= (h->max_huge_pages_node[nid] - i); in hugetlb_hstate_alloc_pages_onenode()
3267 h->max_huge_pages_node[nid] = i; in hugetlb_hstate_alloc_pages_onenode()
3270 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) in hugetlb_hstate_alloc_pages() argument
3277 if (hstate_is_gigantic(h) && hugetlb_cma_size) { in hugetlb_hstate_alloc_pages()
3284 if (h->max_huge_pages_node[i] > 0) { in hugetlb_hstate_alloc_pages()
3285 hugetlb_hstate_alloc_pages_onenode(h, i); in hugetlb_hstate_alloc_pages()
3294 if (!hstate_is_gigantic(h)) { in hugetlb_hstate_alloc_pages()
3312 for (i = 0; i < h->max_huge_pages; ++i) { in hugetlb_hstate_alloc_pages()
3313 if (hstate_is_gigantic(h)) { in hugetlb_hstate_alloc_pages()
3314 if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE)) in hugetlb_hstate_alloc_pages()
3316 } else if (!alloc_pool_huge_page(h, in hugetlb_hstate_alloc_pages()
3322 if (i < h->max_huge_pages) { in hugetlb_hstate_alloc_pages()
3325 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); in hugetlb_hstate_alloc_pages()
3327 h->max_huge_pages, buf, i); in hugetlb_hstate_alloc_pages()
3328 h->max_huge_pages = i; in hugetlb_hstate_alloc_pages()
3335 struct hstate *h, *h2; in hugetlb_init_hstates() local
3337 for_each_hstate(h) { in hugetlb_init_hstates()
3339 if (!hstate_is_gigantic(h)) in hugetlb_init_hstates()
3340 hugetlb_hstate_alloc_pages(h); in hugetlb_init_hstates()
3350 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in hugetlb_init_hstates()
3352 if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER) in hugetlb_init_hstates()
3355 if (h2 == h) in hugetlb_init_hstates()
3357 if (h2->order < h->order && in hugetlb_init_hstates()
3358 h2->order > h->demote_order) in hugetlb_init_hstates()
3359 h->demote_order = h2->order; in hugetlb_init_hstates()
3366 struct hstate *h; in report_hugepages() local
3368 for_each_hstate(h) { in report_hugepages()
3371 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); in report_hugepages()
3373 buf, h->free_huge_pages); in report_hugepages()
3375 hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf); in report_hugepages()
3380 static void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument
3387 if (hstate_is_gigantic(h)) in try_to_free_low()
3395 struct list_head *freel = &h->hugepage_freelists[i]; in try_to_free_low()
3397 if (count >= h->nr_huge_pages) in try_to_free_low()
3401 remove_hugetlb_folio(h, page_folio(page), false); in try_to_free_low()
3408 update_and_free_pages_bulk(h, &page_list); in try_to_free_low()
3412 static inline void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument
3423 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, in adjust_pool_surplus() argument
3432 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in adjust_pool_surplus()
3433 if (h->surplus_huge_pages_node[node]) in adjust_pool_surplus()
3437 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in adjust_pool_surplus()
3438 if (h->surplus_huge_pages_node[node] < in adjust_pool_surplus()
3439 h->nr_huge_pages_node[node]) in adjust_pool_surplus()
3446 h->surplus_huge_pages += delta; in adjust_pool_surplus()
3447 h->surplus_huge_pages_node[node] += delta; in adjust_pool_surplus()
3451 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) argument
3452 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, in set_max_huge_pages() argument
3474 mutex_lock(&h->resize_lock); in set_max_huge_pages()
3475 flush_free_hpage_work(h); in set_max_huge_pages()
3487 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; in set_max_huge_pages()
3505 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { in set_max_huge_pages()
3506 if (count > persistent_huge_pages(h)) { in set_max_huge_pages()
3508 mutex_unlock(&h->resize_lock); in set_max_huge_pages()
3526 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { in set_max_huge_pages()
3527 if (!adjust_pool_surplus(h, nodes_allowed, -1)) in set_max_huge_pages()
3531 while (count > persistent_huge_pages(h)) { in set_max_huge_pages()
3542 ret = alloc_pool_huge_page(h, nodes_allowed, in set_max_huge_pages()
3568 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; in set_max_huge_pages()
3570 try_to_free_low(h, min_count, nodes_allowed); in set_max_huge_pages()
3575 while (min_count < persistent_huge_pages(h)) { in set_max_huge_pages()
3576 page = remove_pool_huge_page(h, nodes_allowed, 0); in set_max_huge_pages()
3584 update_and_free_pages_bulk(h, &page_list); in set_max_huge_pages()
3585 flush_free_hpage_work(h); in set_max_huge_pages()
3588 while (count < persistent_huge_pages(h)) { in set_max_huge_pages()
3589 if (!adjust_pool_surplus(h, nodes_allowed, 1)) in set_max_huge_pages()
3593 h->max_huge_pages = persistent_huge_pages(h); in set_max_huge_pages()
3595 mutex_unlock(&h->resize_lock); in set_max_huge_pages()
3602 static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio) in demote_free_hugetlb_folio() argument
3610 target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order); in demote_free_hugetlb_folio()
3612 remove_hugetlb_folio_for_demote(h, folio, false); in demote_free_hugetlb_folio()
3615 rc = hugetlb_vmemmap_restore(h, &folio->page); in demote_free_hugetlb_folio()
3620 add_hugetlb_folio(h, folio, false); in demote_free_hugetlb_folio()
3628 destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h)); in demote_free_hugetlb_folio()
3639 for (i = 0; i < pages_per_huge_page(h); in demote_free_hugetlb_folio()
3660 h->max_huge_pages--; in demote_free_hugetlb_folio()
3662 pages_per_huge_page(h) / pages_per_huge_page(target_hstate); in demote_free_hugetlb_folio()
3667 static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed) in demote_pool_huge_page() argument
3676 if (!h->demote_order) { in demote_pool_huge_page()
3681 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in demote_pool_huge_page()
3682 list_for_each_entry(folio, &h->hugepage_freelists[node], lru) { in demote_pool_huge_page()
3685 return demote_free_hugetlb_folio(h, folio); in demote_pool_huge_page()
3727 struct hstate *h; in nr_hugepages_show_common() local
3731 h = kobj_to_hstate(kobj, &nid); in nr_hugepages_show_common()
3733 nr_huge_pages = h->nr_huge_pages; in nr_hugepages_show_common()
3735 nr_huge_pages = h->nr_huge_pages_node[nid]; in nr_hugepages_show_common()
3741 struct hstate *h, int nid, in __nr_hugepages_store_common() argument
3747 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in __nr_hugepages_store_common()
3768 err = set_max_huge_pages(h, count, nid, n_mask); in __nr_hugepages_store_common()
3777 struct hstate *h; in nr_hugepages_store_common() local
3786 h = kobj_to_hstate(kobj, &nid); in nr_hugepages_store_common()
3787 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); in nr_hugepages_store_common()
3828 struct hstate *h = kobj_to_hstate(kobj, NULL); in nr_overcommit_hugepages_show() local
3829 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages); in nr_overcommit_hugepages_show()
3837 struct hstate *h = kobj_to_hstate(kobj, NULL); in nr_overcommit_hugepages_store() local
3839 if (hstate_is_gigantic(h)) in nr_overcommit_hugepages_store()
3847 h->nr_overcommit_huge_pages = input; in nr_overcommit_hugepages_store()
3857 struct hstate *h; in free_hugepages_show() local
3861 h = kobj_to_hstate(kobj, &nid); in free_hugepages_show()
3863 free_huge_pages = h->free_huge_pages; in free_hugepages_show()
3865 free_huge_pages = h->free_huge_pages_node[nid]; in free_hugepages_show()
3874 struct hstate *h = kobj_to_hstate(kobj, NULL); in resv_hugepages_show() local
3875 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages); in resv_hugepages_show()
3882 struct hstate *h; in surplus_hugepages_show() local
3886 h = kobj_to_hstate(kobj, &nid); in surplus_hugepages_show()
3888 surplus_huge_pages = h->surplus_huge_pages; in surplus_hugepages_show()
3890 surplus_huge_pages = h->surplus_huge_pages_node[nid]; in surplus_hugepages_show()
3902 struct hstate *h; in demote_store() local
3909 h = kobj_to_hstate(kobj, &nid); in demote_store()
3919 mutex_lock(&h->resize_lock); in demote_store()
3928 nr_available = h->free_huge_pages_node[nid]; in demote_store()
3930 nr_available = h->free_huge_pages; in demote_store()
3931 nr_available -= h->resv_huge_pages; in demote_store()
3935 err = demote_pool_huge_page(h, n_mask); in demote_store()
3943 mutex_unlock(&h->resize_lock); in demote_store()
3954 struct hstate *h = kobj_to_hstate(kobj, NULL); in demote_size_show() local
3955 unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K; in demote_size_show()
3964 struct hstate *h, *demote_hstate; in demote_size_store() local
3978 h = kobj_to_hstate(kobj, NULL); in demote_size_store()
3979 if (demote_order >= h->order) in demote_size_store()
3983 mutex_lock(&h->resize_lock); in demote_size_store()
3984 h->demote_order = demote_order; in demote_size_store()
3985 mutex_unlock(&h->resize_lock); in demote_size_store()
4017 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, in hugetlb_sysfs_add_hstate() argument
4022 int hi = hstate_index(h); in hugetlb_sysfs_add_hstate()
4024 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); in hugetlb_sysfs_add_hstate()
4035 if (h->demote_order) { in hugetlb_sysfs_add_hstate()
4039 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name); in hugetlb_sysfs_add_hstate()
4109 struct hstate *h; in hugetlb_unregister_node() local
4115 for_each_hstate(h) { in hugetlb_unregister_node()
4116 int idx = hstate_index(h); in hugetlb_unregister_node()
4121 if (h->demote_order) in hugetlb_unregister_node()
4139 struct hstate *h; in hugetlb_register_node() local
4154 for_each_hstate(h) { in hugetlb_register_node()
4155 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, in hugetlb_register_node()
4160 h->name, node->dev.id); in hugetlb_register_node()
4203 struct hstate *h; in hugetlb_sysfs_init() local
4210 for_each_hstate(h) { in hugetlb_sysfs_init()
4211 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, in hugetlb_sysfs_init()
4214 pr_err("HugeTLB: Unable to add hstate %s", h->name); in hugetlb_sysfs_init()
4310 struct hstate *h; in hugetlb_add_hstate() local
4318 h = &hstates[hugetlb_max_hstate++]; in hugetlb_add_hstate()
4319 mutex_init(&h->resize_lock); in hugetlb_add_hstate()
4320 h->order = order; in hugetlb_add_hstate()
4321 h->mask = ~(huge_page_size(h) - 1); in hugetlb_add_hstate()
4323 INIT_LIST_HEAD(&h->hugepage_freelists[i]); in hugetlb_add_hstate()
4324 INIT_LIST_HEAD(&h->hugepage_activelist); in hugetlb_add_hstate()
4325 h->next_nid_to_alloc = first_memory_node; in hugetlb_add_hstate()
4326 h->next_nid_to_free = first_memory_node; in hugetlb_add_hstate()
4327 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", in hugetlb_add_hstate()
4328 huge_page_size(h)/SZ_1K); in hugetlb_add_hstate()
4330 parsed_hstate = h; in hugetlb_add_hstate()
4453 struct hstate *h; in hugepagesz_setup() local
4463 h = size_to_hstate(size); in hugepagesz_setup()
4464 if (h) { in hugepagesz_setup()
4472 if (!parsed_default_hugepagesz || h != &default_hstate || in hugepagesz_setup()
4483 parsed_hstate = h; in hugepagesz_setup()
4559 static unsigned int allowed_mems_nr(struct hstate *h) in allowed_mems_nr() argument
4564 unsigned int *array = h->free_huge_pages_node; in allowed_mems_nr()
4565 gfp_t gfp_mask = htlb_alloc_mask(h); in allowed_mems_nr()
4597 struct hstate *h = &default_hstate; in hugetlb_sysctl_handler_common() local
4598 unsigned long tmp = h->max_huge_pages; in hugetlb_sysctl_handler_common()
4610 ret = __nr_hugepages_store_common(obey_mempolicy, h, in hugetlb_sysctl_handler_common()
4636 struct hstate *h = &default_hstate; in hugetlb_overcommit_handler() local
4643 tmp = h->nr_overcommit_huge_pages; in hugetlb_overcommit_handler()
4645 if (write && hstate_is_gigantic(h)) in hugetlb_overcommit_handler()
4655 h->nr_overcommit_huge_pages = tmp; in hugetlb_overcommit_handler()
4704 struct hstate *h; in hugetlb_report_meminfo() local
4710 for_each_hstate(h) { in hugetlb_report_meminfo()
4711 unsigned long count = h->nr_huge_pages; in hugetlb_report_meminfo()
4713 total += huge_page_size(h) * count; in hugetlb_report_meminfo()
4715 if (h == &default_hstate) in hugetlb_report_meminfo()
4723 h->free_huge_pages, in hugetlb_report_meminfo()
4724 h->resv_huge_pages, in hugetlb_report_meminfo()
4725 h->surplus_huge_pages, in hugetlb_report_meminfo()
4726 huge_page_size(h) / SZ_1K); in hugetlb_report_meminfo()
4734 struct hstate *h = &default_hstate; in hugetlb_report_node_meminfo() local
4743 nid, h->nr_huge_pages_node[nid], in hugetlb_report_node_meminfo()
4744 nid, h->free_huge_pages_node[nid], in hugetlb_report_node_meminfo()
4745 nid, h->surplus_huge_pages_node[nid]); in hugetlb_report_node_meminfo()
4750 struct hstate *h; in hugetlb_show_meminfo_node() local
4755 for_each_hstate(h) in hugetlb_show_meminfo_node()
4758 h->nr_huge_pages_node[nid], in hugetlb_show_meminfo_node()
4759 h->free_huge_pages_node[nid], in hugetlb_show_meminfo_node()
4760 h->surplus_huge_pages_node[nid], in hugetlb_show_meminfo_node()
4761 huge_page_size(h) / SZ_1K); in hugetlb_show_meminfo_node()
4773 struct hstate *h; in hugetlb_total_pages() local
4776 for_each_hstate(h) in hugetlb_total_pages()
4777 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); in hugetlb_total_pages()
4781 static int hugetlb_acct_memory(struct hstate *h, long delta) in hugetlb_acct_memory() argument
4813 if (gather_surplus_pages(h, delta) < 0) in hugetlb_acct_memory()
4816 if (delta > allowed_mems_nr(h)) { in hugetlb_acct_memory()
4817 return_unused_surplus_pages(h, delta); in hugetlb_acct_memory()
4824 return_unused_surplus_pages(h, (unsigned long) -delta); in hugetlb_acct_memory()
4871 struct hstate *h = hstate_vma(vma); in hugetlb_vm_op_close() local
4883 start = vma_hugecache_offset(h, vma, vma->vm_start); in hugetlb_vm_op_close()
4884 end = vma_hugecache_offset(h, vma, vma->vm_end); in hugetlb_vm_op_close()
4894 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_vm_op_close()
5036 struct hstate *h = hstate_vma(src_vma); in copy_hugetlb_page_range() local
5037 unsigned long sz = huge_page_size(h); in copy_hugetlb_page_range()
5038 unsigned long npages = pages_per_huge_page(h); in copy_hugetlb_page_range()
5060 last_addr_mask = hugetlb_mask_last_page(h); in copy_hugetlb_page_range()
5087 dst_ptl = huge_pte_lock(h, dst, dst_pte); in copy_hugetlb_page_range()
5088 src_ptl = huge_pte_lockptr(h, src, src_pte); in copy_hugetlb_page_range()
5168 dst_ptl = huge_pte_lock(h, dst, dst_pte); in copy_hugetlb_page_range()
5169 src_ptl = huge_pte_lockptr(h, src, src_pte); in copy_hugetlb_page_range()
5173 restore_reserve_on_error(h, dst_vma, addr, in copy_hugetlb_page_range()
5222 struct hstate *h = hstate_vma(vma); in move_huge_pte() local
5227 dst_ptl = huge_pte_lock(h, mm, dst_pte); in move_huge_pte()
5228 src_ptl = huge_pte_lockptr(h, mm, src_pte); in move_huge_pte()
5250 struct hstate *h = hstate_vma(vma); in move_hugetlb_page_tables() local
5252 unsigned long sz = huge_page_size(h); in move_hugetlb_page_tables()
5270 last_addr_mask = hugetlb_mask_last_page(h); in move_hugetlb_page_tables()
5319 struct hstate *h = hstate_vma(vma); in __unmap_hugepage_range() local
5320 unsigned long sz = huge_page_size(h); in __unmap_hugepage_range()
5325 BUG_ON(start & ~huge_page_mask(h)); in __unmap_hugepage_range()
5326 BUG_ON(end & ~huge_page_mask(h)); in __unmap_hugepage_range()
5335 last_addr_mask = hugetlb_mask_last_page(h); in __unmap_hugepage_range()
5344 ptl = huge_pte_lock(h, mm, ptep); in __unmap_hugepage_range()
5401 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); in __unmap_hugepage_range()
5410 hugetlb_count_sub(pages_per_huge_page(h), mm); in __unmap_hugepage_range()
5414 tlb_remove_page_size(tlb, page, huge_page_size(h)); in __unmap_hugepage_range()
5507 struct hstate *h = hstate_vma(vma); in unmap_ref_private() local
5516 address = address & huge_page_mask(h); in unmap_ref_private()
5549 address + huge_page_size(h), page, 0); in unmap_ref_private()
5566 struct hstate *h = hstate_vma(vma); in hugetlb_wp() local
5571 unsigned long haddr = address & huge_page_mask(h); in hugetlb_wp()
5664 idx = vma_hugecache_offset(h, vma, haddr); in hugetlb_wp()
5674 ptep = hugetlb_walk(vma, haddr, huge_page_size(h)); in hugetlb_wp()
5706 haddr + huge_page_size(h)); in hugetlb_wp()
5714 ptep = hugetlb_walk(vma, haddr, huge_page_size(h)); in hugetlb_wp()
5724 set_huge_pte_at(mm, haddr, ptep, newpte, huge_page_size(h)); in hugetlb_wp()
5737 restore_reserve_on_error(h, vma, haddr, new_folio); in hugetlb_wp()
5751 static bool hugetlbfs_pagecache_present(struct hstate *h, in hugetlbfs_pagecache_present() argument
5755 pgoff_t idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_present()
5769 struct hstate *h = hstate_inode(inode); in hugetlb_add_to_page_cache() local
5788 inode->i_blocks += blocks_per_huge_page(h); in hugetlb_add_to_page_cache()
5832 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, in hugetlb_pte_stable() argument
5838 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_pte_stable()
5851 struct hstate *h = hstate_vma(vma); in hugetlb_no_page() local
5858 unsigned long haddr = address & huge_page_mask(h); in hugetlb_no_page()
5881 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_no_page()
5903 if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) { in hugetlb_no_page()
5927 if (hugetlb_pte_stable(h, mm, ptep, old_pte)) in hugetlb_no_page()
5933 clear_huge_page(&folio->page, address, pages_per_huge_page(h)); in hugetlb_no_page()
5947 restore_reserve_on_error(h, vma, haddr, folio); in hugetlb_no_page()
5968 VM_FAULT_SET_HINDEX(hstate_index(h)); in hugetlb_no_page()
5977 if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) { in hugetlb_no_page()
5994 if (vma_needs_reservation(h, vma, haddr) < 0) { in hugetlb_no_page()
5999 vma_end_reservation(h, vma, haddr); in hugetlb_no_page()
6002 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_no_page()
6020 set_huge_pte_at(mm, haddr, ptep, new_pte, huge_page_size(h)); in hugetlb_no_page()
6022 hugetlb_count_add(pages_per_huge_page(h), mm); in hugetlb_no_page()
6048 restore_reserve_on_error(h, vma, haddr, folio); in hugetlb_no_page()
6089 struct hstate *h = hstate_vma(vma); in hugetlb_fault() local
6092 unsigned long haddr = address & huge_page_mask(h); in hugetlb_fault()
6106 idx = vma_hugecache_offset(h, vma, haddr); in hugetlb_fault()
6116 ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h)); in hugetlb_fault()
6168 VM_FAULT_SET_HINDEX(hstate_index(h)); in hugetlb_fault()
6181 if (vma_needs_reservation(h, vma, haddr) < 0) { in hugetlb_fault()
6186 vma_end_reservation(h, vma, haddr); in hugetlb_fault()
6193 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_fault()
6287 struct hstate *h = hstate_vma(dst_vma); in hugetlb_mfill_atomic_pte() local
6289 pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr); in hugetlb_mfill_atomic_pte()
6300 ptl = huge_pte_lock(h, dst_mm, dst_pte); in hugetlb_mfill_atomic_pte()
6310 huge_page_size(h)); in hugetlb_mfill_atomic_pte()
6330 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { in hugetlb_mfill_atomic_pte()
6350 restore_reserve_on_error(h, dst_vma, dst_addr, folio); in hugetlb_mfill_atomic_pte()
6356 folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr); in hugetlb_mfill_atomic_pte()
6370 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { in hugetlb_mfill_atomic_pte()
6402 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_mfill_atomic_pte()
6419 ptl = huge_pte_lock(h, dst_mm, dst_pte); in hugetlb_mfill_atomic_pte()
6461 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, huge_page_size(h)); in hugetlb_mfill_atomic_pte()
6463 hugetlb_count_add(pages_per_huge_page(h), dst_mm); in hugetlb_mfill_atomic_pte()
6482 restore_reserve_on_error(h, dst_vma, dst_addr, folio); in hugetlb_mfill_atomic_pte()
6492 struct hstate *h = hstate_vma(vma); in hugetlb_follow_page_mask() local
6494 unsigned long haddr = address & huge_page_mask(h); in hugetlb_follow_page_mask()
6501 pte = hugetlb_walk(vma, haddr, huge_page_size(h)); in hugetlb_follow_page_mask()
6505 ptl = huge_pte_lock(h, mm, pte); in hugetlb_follow_page_mask()
6523 page += ((address & ~huge_page_mask(h)) >> PAGE_SHIFT); in hugetlb_follow_page_mask()
6541 *page_mask = (1U << huge_page_order(h)) - 1; in hugetlb_follow_page_mask()
6553 !hugetlbfs_pagecache_present(h, vma, address)) in hugetlb_follow_page_mask()
6567 struct hstate *h = hstate_vma(vma); in hugetlb_change_protection() local
6568 long pages = 0, psize = huge_page_size(h); in hugetlb_change_protection()
6590 last_addr_mask = hugetlb_mask_last_page(h); in hugetlb_change_protection()
6609 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_change_protection()
6699 return pages > 0 ? (pages << h->order) : pages; in hugetlb_change_protection()
6709 struct hstate *h = hstate_inode(inode); in hugetlb_reserve_pages() local
6765 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h), in hugetlb_reserve_pages()
6766 chg * pages_per_huge_page(h), &h_cg) < 0) in hugetlb_reserve_pages()
6773 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h); in hugetlb_reserve_pages()
6789 if (hugetlb_acct_memory(h, gbl_reserve) < 0) in hugetlb_reserve_pages()
6804 add = region_add(resv_map, from, to, regions_needed, h, h_cg); in hugetlb_reserve_pages()
6807 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_reserve_pages()
6824 hstate_index(h), in hugetlb_reserve_pages()
6825 (chg - add) * pages_per_huge_page(h), h_cg); in hugetlb_reserve_pages()
6829 hugetlb_acct_memory(h, -rsv_adjust); in hugetlb_reserve_pages()
6846 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), in hugetlb_reserve_pages()
6847 chg * pages_per_huge_page(h), h_cg); in hugetlb_reserve_pages()
6866 struct hstate *h = hstate_inode(inode); in hugetlb_unreserve_pages() local
6888 inode->i_blocks -= (blocks_per_huge_page(h) * freed); in hugetlb_unreserve_pages()
6899 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_unreserve_pages()
7170 unsigned long hugetlb_mask_last_page(struct hstate *h) in hugetlb_mask_last_page() argument
7172 unsigned long hp_size = huge_page_size(h); in hugetlb_mask_last_page()
7185 __weak unsigned long hugetlb_mask_last_page(struct hstate *h) in hugetlb_mask_last_page() argument
7188 if (huge_page_size(h) == PMD_SIZE) in hugetlb_mask_last_page()
7259 struct hstate *h = folio_hstate(old_folio); in move_hugetlb_state() local
7289 if (h->surplus_huge_pages_node[old_nid]) { in move_hugetlb_state()
7290 h->surplus_huge_pages_node[old_nid]--; in move_hugetlb_state()
7291 h->surplus_huge_pages_node[new_nid]++; in move_hugetlb_state()
7301 struct hstate *h = hstate_vma(vma); in hugetlb_unshare_pmds() local
7302 unsigned long sz = huge_page_size(h); in hugetlb_unshare_pmds()
7329 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_unshare_pmds()