Lines Matching refs:h
93 static int hugetlb_acct_memory(struct hstate *h, long delta);
126 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, in hugepage_new_subpool() argument
138 spool->hstate = h; in hugepage_new_subpool()
141 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { in hugepage_new_subpool()
291 struct hstate *h, in record_hugetlb_cgroup_uncharge_info() argument
298 &h_cg->rsvd_hugepage[hstate_index(h)]; in record_hugetlb_cgroup_uncharge_info()
312 resv->pages_per_hpage = pages_per_huge_page(h); in record_hugetlb_cgroup_uncharge_info()
316 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); in record_hugetlb_cgroup_uncharge_info()
373 long to, struct hstate *h, struct hugetlb_cgroup *cg, in hugetlb_resv_map_add() argument
380 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg); in hugetlb_resv_map_add()
399 struct hstate *h, long *regions_needed) in add_reservation_in_range() argument
439 iter->from, h, h_cg, in add_reservation_in_range()
452 t, h, h_cg, regions_needed); in add_reservation_in_range()
531 long in_regions_needed, struct hstate *h, in region_add() argument
569 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); in region_add()
767 struct hstate *h = hstate_inode(inode); in hugetlb_fix_reserve_counts() local
769 if (!hugetlb_acct_memory(h, 1)) in hugetlb_fix_reserve_counts()
814 static pgoff_t vma_hugecache_offset(struct hstate *h, in vma_hugecache_offset() argument
817 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset()
818 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset()
893 struct hstate *h) in resv_map_set_hugetlb_cgroup_uncharge_info() argument
896 if (!h_cg || !h) { in resv_map_set_hugetlb_cgroup_uncharge_info()
902 &h_cg->rsvd_hugepage[hstate_index(h)]; in resv_map_set_hugetlb_cgroup_uncharge_info()
903 resv_map->pages_per_hpage = pages_per_huge_page(h); in resv_map_set_hugetlb_cgroup_uncharge_info()
1130 static void enqueue_huge_page(struct hstate *h, struct page *page) in enqueue_huge_page() argument
1137 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page()
1138 h->free_huge_pages++; in enqueue_huge_page()
1139 h->free_huge_pages_node[nid]++; in enqueue_huge_page()
1143 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) in dequeue_huge_page_node_exact() argument
1149 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) { in dequeue_huge_page_node_exact()
1156 list_move(&page->lru, &h->hugepage_activelist); in dequeue_huge_page_node_exact()
1159 h->free_huge_pages--; in dequeue_huge_page_node_exact()
1160 h->free_huge_pages_node[nid]--; in dequeue_huge_page_node_exact()
1167 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, in dequeue_huge_page_nodemask() argument
1193 page = dequeue_huge_page_node_exact(h, node); in dequeue_huge_page_nodemask()
1203 static unsigned long available_huge_pages(struct hstate *h) in available_huge_pages() argument
1205 return h->free_huge_pages - h->resv_huge_pages; in available_huge_pages()
1208 static struct page *dequeue_huge_page_vma(struct hstate *h, in dequeue_huge_page_vma() argument
1224 if (!vma_has_reserves(vma, chg) && !available_huge_pages(h)) in dequeue_huge_page_vma()
1228 if (avoid_reserve && !available_huge_pages(h)) in dequeue_huge_page_vma()
1231 gfp_mask = htlb_alloc_mask(h); in dequeue_huge_page_vma()
1235 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); in dequeue_huge_page_vma()
1242 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); in dequeue_huge_page_vma()
1246 h->resv_huge_pages--; in dequeue_huge_page_vma()
1284 static int hstate_next_node_to_alloc(struct hstate *h, in hstate_next_node_to_alloc() argument
1291 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); in hstate_next_node_to_alloc()
1292 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); in hstate_next_node_to_alloc()
1303 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) in hstate_next_node_to_free() argument
1309 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); in hstate_next_node_to_free()
1310 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); in hstate_next_node_to_free()
1381 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_page() argument
1384 unsigned long nr_pages = pages_per_huge_page(h); in alloc_gigantic_page()
1395 huge_page_order(h), true); in alloc_gigantic_page()
1406 huge_page_order(h), true); in alloc_gigantic_page()
1418 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_page() argument
1426 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_page() argument
1444 static void __remove_hugetlb_page(struct hstate *h, struct page *page, in __remove_hugetlb_page() argument
1454 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in __remove_hugetlb_page()
1460 h->free_huge_pages--; in __remove_hugetlb_page()
1461 h->free_huge_pages_node[nid]--; in __remove_hugetlb_page()
1464 h->surplus_huge_pages--; in __remove_hugetlb_page()
1465 h->surplus_huge_pages_node[nid]--; in __remove_hugetlb_page()
1490 if (hstate_is_gigantic(h)) in __remove_hugetlb_page()
1495 h->nr_huge_pages--; in __remove_hugetlb_page()
1496 h->nr_huge_pages_node[nid]--; in __remove_hugetlb_page()
1499 static void remove_hugetlb_page(struct hstate *h, struct page *page, in remove_hugetlb_page() argument
1502 __remove_hugetlb_page(h, page, adjust_surplus, false); in remove_hugetlb_page()
1505 static void remove_hugetlb_page_for_demote(struct hstate *h, struct page *page, in remove_hugetlb_page_for_demote() argument
1508 __remove_hugetlb_page(h, page, adjust_surplus, true); in remove_hugetlb_page_for_demote()
1511 static void add_hugetlb_page(struct hstate *h, struct page *page, in add_hugetlb_page() argument
1522 h->nr_huge_pages++; in add_hugetlb_page()
1523 h->nr_huge_pages_node[nid]++; in add_hugetlb_page()
1526 h->surplus_huge_pages++; in add_hugetlb_page()
1527 h->surplus_huge_pages_node[nid]++; in add_hugetlb_page()
1554 enqueue_huge_page(h, page); in add_hugetlb_page()
1557 static void __update_and_free_page(struct hstate *h, struct page *page) in __update_and_free_page() argument
1562 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in __update_and_free_page()
1572 if (hugetlb_vmemmap_restore(h, page)) { in __update_and_free_page()
1579 add_hugetlb_page(h, page, true); in __update_and_free_page()
1591 for (i = 0; i < pages_per_huge_page(h); i++) { in __update_and_free_page()
1603 if (hstate_is_gigantic(h) || in __update_and_free_page()
1604 hugetlb_cma_page(page, huge_page_order(h))) { in __update_and_free_page()
1605 destroy_compound_gigantic_page(page, huge_page_order(h)); in __update_and_free_page()
1606 free_gigantic_page(page, huge_page_order(h)); in __update_and_free_page()
1608 __free_pages(page, huge_page_order(h)); in __update_and_free_page()
1633 struct hstate *h; in free_hpage_workfn() local
1645 h = size_to_hstate(page_size(page)); in free_hpage_workfn()
1647 __update_and_free_page(h, page); in free_hpage_workfn()
1654 static inline void flush_free_hpage_work(struct hstate *h) in flush_free_hpage_work() argument
1656 if (hugetlb_vmemmap_optimizable(h)) in flush_free_hpage_work()
1660 static void update_and_free_page(struct hstate *h, struct page *page, in update_and_free_page() argument
1664 __update_and_free_page(h, page); in update_and_free_page()
1679 static void update_and_free_pages_bulk(struct hstate *h, struct list_head *list) in update_and_free_pages_bulk() argument
1684 update_and_free_page(h, page, false); in update_and_free_pages_bulk()
1691 struct hstate *h; in size_to_hstate() local
1693 for_each_hstate(h) { in size_to_hstate()
1694 if (huge_page_size(h) == size) in size_to_hstate()
1695 return h; in size_to_hstate()
1706 struct hstate *h = page_hstate(page); in free_huge_page() local
1743 hugetlb_cgroup_uncharge_page(hstate_index(h), in free_huge_page()
1744 pages_per_huge_page(h), page); in free_huge_page()
1745 hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h), in free_huge_page()
1746 pages_per_huge_page(h), page); in free_huge_page()
1748 h->resv_huge_pages++; in free_huge_page()
1751 remove_hugetlb_page(h, page, false); in free_huge_page()
1753 update_and_free_page(h, page, true); in free_huge_page()
1754 } else if (h->surplus_huge_pages_node[nid]) { in free_huge_page()
1756 remove_hugetlb_page(h, page, true); in free_huge_page()
1758 update_and_free_page(h, page, true); in free_huge_page()
1761 enqueue_huge_page(h, page); in free_huge_page()
1769 static void __prep_account_new_huge_page(struct hstate *h, int nid) in __prep_account_new_huge_page() argument
1772 h->nr_huge_pages++; in __prep_account_new_huge_page()
1773 h->nr_huge_pages_node[nid]++; in __prep_account_new_huge_page()
1776 static void __prep_new_huge_page(struct hstate *h, struct page *page) in __prep_new_huge_page() argument
1778 hugetlb_vmemmap_optimize(h, page); in __prep_new_huge_page()
1786 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) in prep_new_huge_page() argument
1788 __prep_new_huge_page(h, page); in prep_new_huge_page()
1790 __prep_account_new_huge_page(h, nid); in prep_new_huge_page()
1948 static struct page *alloc_buddy_huge_page(struct hstate *h, in alloc_buddy_huge_page() argument
1952 int order = huge_page_order(h); in alloc_buddy_huge_page()
2017 static struct page *alloc_fresh_huge_page(struct hstate *h, in alloc_fresh_huge_page() argument
2025 if (hstate_is_gigantic(h)) in alloc_fresh_huge_page()
2026 page = alloc_gigantic_page(h, gfp_mask, nid, nmask); in alloc_fresh_huge_page()
2028 page = alloc_buddy_huge_page(h, gfp_mask, in alloc_fresh_huge_page()
2033 if (hstate_is_gigantic(h)) { in alloc_fresh_huge_page()
2034 if (!prep_compound_gigantic_page(page, huge_page_order(h))) { in alloc_fresh_huge_page()
2039 free_gigantic_page(page, huge_page_order(h)); in alloc_fresh_huge_page()
2047 prep_new_huge_page(h, page, page_to_nid(page)); in alloc_fresh_huge_page()
2056 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, in alloc_pool_huge_page() argument
2061 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; in alloc_pool_huge_page()
2063 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in alloc_pool_huge_page()
2064 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed, in alloc_pool_huge_page()
2085 static struct page *remove_pool_huge_page(struct hstate *h, in remove_pool_huge_page() argument
2093 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in remove_pool_huge_page()
2098 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && in remove_pool_huge_page()
2099 !list_empty(&h->hugepage_freelists[node])) { in remove_pool_huge_page()
2100 page = list_entry(h->hugepage_freelists[node].next, in remove_pool_huge_page()
2102 remove_hugetlb_page(h, page, acct_surplus); in remove_pool_huge_page()
2141 struct hstate *h = page_hstate(head); in dissolve_free_huge_page() local
2142 if (!available_huge_pages(h)) in dissolve_free_huge_page()
2164 remove_hugetlb_page(h, head, false); in dissolve_free_huge_page()
2165 h->max_huge_pages--; in dissolve_free_huge_page()
2176 rc = hugetlb_vmemmap_restore(h, head); in dissolve_free_huge_page()
2178 update_and_free_page(h, head, false); in dissolve_free_huge_page()
2181 add_hugetlb_page(h, head, false); in dissolve_free_huge_page()
2182 h->max_huge_pages++; in dissolve_free_huge_page()
2207 struct hstate *h; in dissolve_free_huge_pages() local
2213 for_each_hstate(h) in dissolve_free_huge_pages()
2214 order = min(order, huge_page_order(h)); in dissolve_free_huge_pages()
2229 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, in alloc_surplus_huge_page() argument
2234 if (hstate_is_gigantic(h)) in alloc_surplus_huge_page()
2238 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) in alloc_surplus_huge_page()
2242 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); in alloc_surplus_huge_page()
2254 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { in alloc_surplus_huge_page()
2261 h->surplus_huge_pages++; in alloc_surplus_huge_page()
2262 h->surplus_huge_pages_node[page_to_nid(page)]++; in alloc_surplus_huge_page()
2270 static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, in alloc_migrate_huge_page() argument
2275 if (hstate_is_gigantic(h)) in alloc_migrate_huge_page()
2278 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); in alloc_migrate_huge_page()
2298 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, in alloc_buddy_huge_page_with_mpol() argument
2303 gfp_t gfp_mask = htlb_alloc_mask(h); in alloc_buddy_huge_page_with_mpol()
2312 page = alloc_surplus_huge_page(h, gfp, nid, nodemask); in alloc_buddy_huge_page_with_mpol()
2319 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask); in alloc_buddy_huge_page_with_mpol()
2325 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, in alloc_huge_page_nodemask() argument
2329 if (available_huge_pages(h)) { in alloc_huge_page_nodemask()
2332 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask); in alloc_huge_page_nodemask()
2340 return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask); in alloc_huge_page_nodemask()
2344 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, in alloc_huge_page_vma() argument
2353 gfp_mask = htlb_alloc_mask(h); in alloc_huge_page_vma()
2355 page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask); in alloc_huge_page_vma()
2365 static int gather_surplus_pages(struct hstate *h, long delta) in gather_surplus_pages() argument
2376 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; in gather_surplus_pages()
2378 h->resv_huge_pages += delta; in gather_surplus_pages()
2388 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h), in gather_surplus_pages()
2404 needed = (h->resv_huge_pages + delta) - in gather_surplus_pages()
2405 (h->free_huge_pages + allocated); in gather_surplus_pages()
2425 h->resv_huge_pages += delta; in gather_surplus_pages()
2433 enqueue_huge_page(h, page); in gather_surplus_pages()
2457 static void return_unused_surplus_pages(struct hstate *h, in return_unused_surplus_pages() argument
2466 h->resv_huge_pages -= unused_resv_pages; in return_unused_surplus_pages()
2468 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in return_unused_surplus_pages()
2475 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); in return_unused_surplus_pages()
2486 page = remove_pool_huge_page(h, &node_states[N_MEMORY], 1); in return_unused_surplus_pages()
2495 update_and_free_pages_bulk(h, &page_list); in return_unused_surplus_pages()
2536 static long __vma_reservation_common(struct hstate *h, in __vma_reservation_common() argument
2549 idx = vma_hugecache_offset(h, vma, addr); in __vma_reservation_common()
2616 static long vma_needs_reservation(struct hstate *h, in vma_needs_reservation() argument
2619 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); in vma_needs_reservation()
2622 static long vma_commit_reservation(struct hstate *h, in vma_commit_reservation() argument
2625 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); in vma_commit_reservation()
2628 static void vma_end_reservation(struct hstate *h, in vma_end_reservation() argument
2631 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); in vma_end_reservation()
2634 static long vma_add_reservation(struct hstate *h, in vma_add_reservation() argument
2637 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); in vma_add_reservation()
2640 static long vma_del_reservation(struct hstate *h, in vma_del_reservation() argument
2643 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV); in vma_del_reservation()
2666 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, in restore_reserve_on_error() argument
2669 long rc = vma_needs_reservation(h, vma, address); in restore_reserve_on_error()
2686 (void)vma_add_reservation(h, vma, address); in restore_reserve_on_error()
2688 vma_end_reservation(h, vma, address); in restore_reserve_on_error()
2699 rc = vma_del_reservation(h, vma, address); in restore_reserve_on_error()
2735 vma_end_reservation(h, vma, address); in restore_reserve_on_error()
2746 static int alloc_and_dissolve_huge_page(struct hstate *h, struct page *old_page, in alloc_and_dissolve_huge_page() argument
2749 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; in alloc_and_dissolve_huge_page()
2761 new_page = alloc_buddy_huge_page(h, gfp_mask, nid, NULL, NULL); in alloc_and_dissolve_huge_page()
2764 __prep_new_huge_page(h, new_page); in alloc_and_dissolve_huge_page()
2799 remove_hugetlb_page(h, old_page, false); in alloc_and_dissolve_huge_page()
2805 __prep_account_new_huge_page(h, nid); in alloc_and_dissolve_huge_page()
2806 enqueue_huge_page(h, new_page); in alloc_and_dissolve_huge_page()
2812 update_and_free_page(h, old_page, false); in alloc_and_dissolve_huge_page()
2821 update_and_free_page(h, new_page, false); in alloc_and_dissolve_huge_page()
2828 struct hstate *h; in isolate_or_dissolve_huge_page() local
2840 h = page_hstate(head); in isolate_or_dissolve_huge_page()
2852 if (hstate_is_gigantic(h)) in isolate_or_dissolve_huge_page()
2858 ret = alloc_and_dissolve_huge_page(h, head, list); in isolate_or_dissolve_huge_page()
2867 struct hstate *h = hstate_vma(vma); in alloc_huge_page() local
2875 idx = hstate_index(h); in alloc_huge_page()
2881 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); in alloc_huge_page()
2895 vma_end_reservation(h, vma, addr); in alloc_huge_page()
2916 idx, pages_per_huge_page(h), &h_cg); in alloc_huge_page()
2921 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); in alloc_huge_page()
2931 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); in alloc_huge_page()
2934 page = alloc_buddy_huge_page_with_mpol(h, vma, addr); in alloc_huge_page()
2940 h->resv_huge_pages--; in alloc_huge_page()
2942 list_add(&page->lru, &h->hugepage_activelist); in alloc_huge_page()
2946 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); in alloc_huge_page()
2951 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), in alloc_huge_page()
2959 map_commit = vma_commit_reservation(h, vma, addr); in alloc_huge_page()
2973 hugetlb_acct_memory(h, -rsv_adjust); in alloc_huge_page()
2975 hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h), in alloc_huge_page()
2976 pages_per_huge_page(h), page); in alloc_huge_page()
2981 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); in alloc_huge_page()
2984 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), in alloc_huge_page()
2989 vma_end_reservation(h, vma, addr); in alloc_huge_page()
2993 int alloc_bootmem_huge_page(struct hstate *h, int nid)
2995 int __alloc_bootmem_huge_page(struct hstate *h, int nid) in __alloc_bootmem_huge_page() argument
3002 m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h), in __alloc_bootmem_huge_page()
3009 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { in __alloc_bootmem_huge_page()
3011 huge_page_size(h), huge_page_size(h), in __alloc_bootmem_huge_page()
3027 m->hstate = h; in __alloc_bootmem_huge_page()
3041 struct hstate *h = m->hstate; in gather_bootmem_prealloc() local
3043 VM_BUG_ON(!hstate_is_gigantic(h)); in gather_bootmem_prealloc()
3045 if (prep_compound_gigantic_page(page, huge_page_order(h))) { in gather_bootmem_prealloc()
3047 prep_new_huge_page(h, page, page_to_nid(page)); in gather_bootmem_prealloc()
3051 free_gigantic_page(page, huge_page_order(h)); in gather_bootmem_prealloc()
3059 adjust_managed_page_count(page, pages_per_huge_page(h)); in gather_bootmem_prealloc()
3063 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid) in hugetlb_hstate_alloc_pages_onenode() argument
3068 for (i = 0; i < h->max_huge_pages_node[nid]; ++i) { in hugetlb_hstate_alloc_pages_onenode()
3069 if (hstate_is_gigantic(h)) { in hugetlb_hstate_alloc_pages_onenode()
3070 if (!alloc_bootmem_huge_page(h, nid)) in hugetlb_hstate_alloc_pages_onenode()
3074 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; in hugetlb_hstate_alloc_pages_onenode()
3076 page = alloc_fresh_huge_page(h, gfp_mask, nid, in hugetlb_hstate_alloc_pages_onenode()
3084 if (i == h->max_huge_pages_node[nid]) in hugetlb_hstate_alloc_pages_onenode()
3087 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); in hugetlb_hstate_alloc_pages_onenode()
3089 h->max_huge_pages_node[nid], buf, nid, i); in hugetlb_hstate_alloc_pages_onenode()
3090 h->max_huge_pages -= (h->max_huge_pages_node[nid] - i); in hugetlb_hstate_alloc_pages_onenode()
3091 h->max_huge_pages_node[nid] = i; in hugetlb_hstate_alloc_pages_onenode()
3094 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) in hugetlb_hstate_alloc_pages() argument
3101 if (hstate_is_gigantic(h) && hugetlb_cma_size) { in hugetlb_hstate_alloc_pages()
3108 if (h->max_huge_pages_node[i] > 0) { in hugetlb_hstate_alloc_pages()
3109 hugetlb_hstate_alloc_pages_onenode(h, i); in hugetlb_hstate_alloc_pages()
3118 if (!hstate_is_gigantic(h)) { in hugetlb_hstate_alloc_pages()
3136 for (i = 0; i < h->max_huge_pages; ++i) { in hugetlb_hstate_alloc_pages()
3137 if (hstate_is_gigantic(h)) { in hugetlb_hstate_alloc_pages()
3138 if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE)) in hugetlb_hstate_alloc_pages()
3140 } else if (!alloc_pool_huge_page(h, in hugetlb_hstate_alloc_pages()
3146 if (i < h->max_huge_pages) { in hugetlb_hstate_alloc_pages()
3149 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); in hugetlb_hstate_alloc_pages()
3151 h->max_huge_pages, buf, i); in hugetlb_hstate_alloc_pages()
3152 h->max_huge_pages = i; in hugetlb_hstate_alloc_pages()
3159 struct hstate *h, *h2; in hugetlb_init_hstates() local
3161 for_each_hstate(h) { in hugetlb_init_hstates()
3163 if (!hstate_is_gigantic(h)) in hugetlb_init_hstates()
3164 hugetlb_hstate_alloc_pages(h); in hugetlb_init_hstates()
3174 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in hugetlb_init_hstates()
3176 if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER) in hugetlb_init_hstates()
3179 if (h2 == h) in hugetlb_init_hstates()
3181 if (h2->order < h->order && in hugetlb_init_hstates()
3182 h2->order > h->demote_order) in hugetlb_init_hstates()
3183 h->demote_order = h2->order; in hugetlb_init_hstates()
3190 struct hstate *h; in report_hugepages() local
3192 for_each_hstate(h) { in report_hugepages()
3195 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); in report_hugepages()
3197 buf, h->free_huge_pages); in report_hugepages()
3199 hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf); in report_hugepages()
3204 static void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument
3211 if (hstate_is_gigantic(h)) in try_to_free_low()
3219 struct list_head *freel = &h->hugepage_freelists[i]; in try_to_free_low()
3221 if (count >= h->nr_huge_pages) in try_to_free_low()
3225 remove_hugetlb_page(h, page, false); in try_to_free_low()
3232 update_and_free_pages_bulk(h, &page_list); in try_to_free_low()
3236 static inline void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument
3247 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, in adjust_pool_surplus() argument
3256 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in adjust_pool_surplus()
3257 if (h->surplus_huge_pages_node[node]) in adjust_pool_surplus()
3261 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in adjust_pool_surplus()
3262 if (h->surplus_huge_pages_node[node] < in adjust_pool_surplus()
3263 h->nr_huge_pages_node[node]) in adjust_pool_surplus()
3270 h->surplus_huge_pages += delta; in adjust_pool_surplus()
3271 h->surplus_huge_pages_node[node] += delta; in adjust_pool_surplus()
3275 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) argument
3276 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, in set_max_huge_pages() argument
3298 mutex_lock(&h->resize_lock); in set_max_huge_pages()
3299 flush_free_hpage_work(h); in set_max_huge_pages()
3311 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; in set_max_huge_pages()
3329 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { in set_max_huge_pages()
3330 if (count > persistent_huge_pages(h)) { in set_max_huge_pages()
3332 mutex_unlock(&h->resize_lock); in set_max_huge_pages()
3350 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { in set_max_huge_pages()
3351 if (!adjust_pool_surplus(h, nodes_allowed, -1)) in set_max_huge_pages()
3355 while (count > persistent_huge_pages(h)) { in set_max_huge_pages()
3366 ret = alloc_pool_huge_page(h, nodes_allowed, in set_max_huge_pages()
3392 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; in set_max_huge_pages()
3394 try_to_free_low(h, min_count, nodes_allowed); in set_max_huge_pages()
3399 while (min_count < persistent_huge_pages(h)) { in set_max_huge_pages()
3400 page = remove_pool_huge_page(h, nodes_allowed, 0); in set_max_huge_pages()
3408 update_and_free_pages_bulk(h, &page_list); in set_max_huge_pages()
3409 flush_free_hpage_work(h); in set_max_huge_pages()
3412 while (count < persistent_huge_pages(h)) { in set_max_huge_pages()
3413 if (!adjust_pool_surplus(h, nodes_allowed, 1)) in set_max_huge_pages()
3417 h->max_huge_pages = persistent_huge_pages(h); in set_max_huge_pages()
3419 mutex_unlock(&h->resize_lock); in set_max_huge_pages()
3426 static int demote_free_huge_page(struct hstate *h, struct page *page) in demote_free_huge_page() argument
3433 target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order); in demote_free_huge_page()
3435 remove_hugetlb_page_for_demote(h, page, false); in demote_free_huge_page()
3438 rc = hugetlb_vmemmap_restore(h, page); in demote_free_huge_page()
3443 add_hugetlb_page(h, page, false); in demote_free_huge_page()
3451 destroy_compound_hugetlb_page_for_demote(page, huge_page_order(h)); in demote_free_huge_page()
3462 for (i = 0; i < pages_per_huge_page(h); in demote_free_huge_page()
3482 h->max_huge_pages--; in demote_free_huge_page()
3484 pages_per_huge_page(h) / pages_per_huge_page(target_hstate); in demote_free_huge_page()
3489 static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed) in demote_pool_huge_page() argument
3498 if (!h->demote_order) { in demote_pool_huge_page()
3503 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in demote_pool_huge_page()
3504 list_for_each_entry(page, &h->hugepage_freelists[node], lru) { in demote_pool_huge_page()
3508 return demote_free_huge_page(h, page); in demote_pool_huge_page()
3550 struct hstate *h; in nr_hugepages_show_common() local
3554 h = kobj_to_hstate(kobj, &nid); in nr_hugepages_show_common()
3556 nr_huge_pages = h->nr_huge_pages; in nr_hugepages_show_common()
3558 nr_huge_pages = h->nr_huge_pages_node[nid]; in nr_hugepages_show_common()
3564 struct hstate *h, int nid, in __nr_hugepages_store_common() argument
3570 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in __nr_hugepages_store_common()
3591 err = set_max_huge_pages(h, count, nid, n_mask); in __nr_hugepages_store_common()
3600 struct hstate *h; in nr_hugepages_store_common() local
3609 h = kobj_to_hstate(kobj, &nid); in nr_hugepages_store_common()
3610 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); in nr_hugepages_store_common()
3651 struct hstate *h = kobj_to_hstate(kobj, NULL); in nr_overcommit_hugepages_show() local
3652 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages); in nr_overcommit_hugepages_show()
3660 struct hstate *h = kobj_to_hstate(kobj, NULL); in nr_overcommit_hugepages_store() local
3662 if (hstate_is_gigantic(h)) in nr_overcommit_hugepages_store()
3670 h->nr_overcommit_huge_pages = input; in nr_overcommit_hugepages_store()
3680 struct hstate *h; in free_hugepages_show() local
3684 h = kobj_to_hstate(kobj, &nid); in free_hugepages_show()
3686 free_huge_pages = h->free_huge_pages; in free_hugepages_show()
3688 free_huge_pages = h->free_huge_pages_node[nid]; in free_hugepages_show()
3697 struct hstate *h = kobj_to_hstate(kobj, NULL); in resv_hugepages_show() local
3698 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages); in resv_hugepages_show()
3705 struct hstate *h; in surplus_hugepages_show() local
3709 h = kobj_to_hstate(kobj, &nid); in surplus_hugepages_show()
3711 surplus_huge_pages = h->surplus_huge_pages; in surplus_hugepages_show()
3713 surplus_huge_pages = h->surplus_huge_pages_node[nid]; in surplus_hugepages_show()
3725 struct hstate *h; in demote_store() local
3732 h = kobj_to_hstate(kobj, &nid); in demote_store()
3742 mutex_lock(&h->resize_lock); in demote_store()
3751 nr_available = h->free_huge_pages_node[nid]; in demote_store()
3753 nr_available = h->free_huge_pages; in demote_store()
3754 nr_available -= h->resv_huge_pages; in demote_store()
3758 err = demote_pool_huge_page(h, n_mask); in demote_store()
3766 mutex_unlock(&h->resize_lock); in demote_store()
3777 struct hstate *h = kobj_to_hstate(kobj, NULL); in demote_size_show() local
3778 unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K; in demote_size_show()
3787 struct hstate *h, *demote_hstate; in demote_size_store() local
3801 h = kobj_to_hstate(kobj, NULL); in demote_size_store()
3802 if (demote_order >= h->order) in demote_size_store()
3806 mutex_lock(&h->resize_lock); in demote_size_store()
3807 h->demote_order = demote_order; in demote_size_store()
3808 mutex_unlock(&h->resize_lock); in demote_size_store()
3840 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, in hugetlb_sysfs_add_hstate() argument
3845 int hi = hstate_index(h); in hugetlb_sysfs_add_hstate()
3847 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); in hugetlb_sysfs_add_hstate()
3858 if (h->demote_order) { in hugetlb_sysfs_add_hstate()
3862 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name); in hugetlb_sysfs_add_hstate()
3932 struct hstate *h; in hugetlb_unregister_node() local
3938 for_each_hstate(h) { in hugetlb_unregister_node()
3939 int idx = hstate_index(h); in hugetlb_unregister_node()
3944 if (h->demote_order) in hugetlb_unregister_node()
3962 struct hstate *h; in hugetlb_register_node() local
3977 for_each_hstate(h) { in hugetlb_register_node()
3978 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, in hugetlb_register_node()
3983 h->name, node->dev.id); in hugetlb_register_node()
4026 struct hstate *h; in hugetlb_sysfs_init() local
4033 for_each_hstate(h) { in hugetlb_sysfs_init()
4034 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, in hugetlb_sysfs_init()
4037 pr_err("HugeTLB: Unable to add hstate %s", h->name); in hugetlb_sysfs_init()
4126 struct hstate *h; in hugetlb_add_hstate() local
4134 h = &hstates[hugetlb_max_hstate++]; in hugetlb_add_hstate()
4135 mutex_init(&h->resize_lock); in hugetlb_add_hstate()
4136 h->order = order; in hugetlb_add_hstate()
4137 h->mask = ~(huge_page_size(h) - 1); in hugetlb_add_hstate()
4139 INIT_LIST_HEAD(&h->hugepage_freelists[i]); in hugetlb_add_hstate()
4140 INIT_LIST_HEAD(&h->hugepage_activelist); in hugetlb_add_hstate()
4141 h->next_nid_to_alloc = first_memory_node; in hugetlb_add_hstate()
4142 h->next_nid_to_free = first_memory_node; in hugetlb_add_hstate()
4143 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", in hugetlb_add_hstate()
4144 huge_page_size(h)/SZ_1K); in hugetlb_add_hstate()
4146 parsed_hstate = h; in hugetlb_add_hstate()
4269 struct hstate *h; in hugepagesz_setup() local
4279 h = size_to_hstate(size); in hugepagesz_setup()
4280 if (h) { in hugepagesz_setup()
4288 if (!parsed_default_hugepagesz || h != &default_hstate || in hugepagesz_setup()
4299 parsed_hstate = h; in hugepagesz_setup()
4375 static unsigned int allowed_mems_nr(struct hstate *h) in allowed_mems_nr() argument
4380 unsigned int *array = h->free_huge_pages_node; in allowed_mems_nr()
4381 gfp_t gfp_mask = htlb_alloc_mask(h); in allowed_mems_nr()
4413 struct hstate *h = &default_hstate; in hugetlb_sysctl_handler_common() local
4414 unsigned long tmp = h->max_huge_pages; in hugetlb_sysctl_handler_common()
4426 ret = __nr_hugepages_store_common(obey_mempolicy, h, in hugetlb_sysctl_handler_common()
4452 struct hstate *h = &default_hstate; in hugetlb_overcommit_handler() local
4459 tmp = h->nr_overcommit_huge_pages; in hugetlb_overcommit_handler()
4461 if (write && hstate_is_gigantic(h)) in hugetlb_overcommit_handler()
4471 h->nr_overcommit_huge_pages = tmp; in hugetlb_overcommit_handler()
4482 struct hstate *h; in hugetlb_report_meminfo() local
4488 for_each_hstate(h) { in hugetlb_report_meminfo()
4489 unsigned long count = h->nr_huge_pages; in hugetlb_report_meminfo()
4491 total += huge_page_size(h) * count; in hugetlb_report_meminfo()
4493 if (h == &default_hstate) in hugetlb_report_meminfo()
4501 h->free_huge_pages, in hugetlb_report_meminfo()
4502 h->resv_huge_pages, in hugetlb_report_meminfo()
4503 h->surplus_huge_pages, in hugetlb_report_meminfo()
4504 huge_page_size(h) / SZ_1K); in hugetlb_report_meminfo()
4512 struct hstate *h = &default_hstate; in hugetlb_report_node_meminfo() local
4521 nid, h->nr_huge_pages_node[nid], in hugetlb_report_node_meminfo()
4522 nid, h->free_huge_pages_node[nid], in hugetlb_report_node_meminfo()
4523 nid, h->surplus_huge_pages_node[nid]); in hugetlb_report_node_meminfo()
4528 struct hstate *h; in hugetlb_show_meminfo_node() local
4533 for_each_hstate(h) in hugetlb_show_meminfo_node()
4536 h->nr_huge_pages_node[nid], in hugetlb_show_meminfo_node()
4537 h->free_huge_pages_node[nid], in hugetlb_show_meminfo_node()
4538 h->surplus_huge_pages_node[nid], in hugetlb_show_meminfo_node()
4539 huge_page_size(h) / SZ_1K); in hugetlb_show_meminfo_node()
4551 struct hstate *h; in hugetlb_total_pages() local
4554 for_each_hstate(h) in hugetlb_total_pages()
4555 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); in hugetlb_total_pages()
4559 static int hugetlb_acct_memory(struct hstate *h, long delta) in hugetlb_acct_memory() argument
4591 if (gather_surplus_pages(h, delta) < 0) in hugetlb_acct_memory()
4594 if (delta > allowed_mems_nr(h)) { in hugetlb_acct_memory()
4595 return_unused_surplus_pages(h, delta); in hugetlb_acct_memory()
4602 return_unused_surplus_pages(h, (unsigned long) -delta); in hugetlb_acct_memory()
4649 struct hstate *h = hstate_vma(vma); in hugetlb_vm_op_close() local
4661 start = vma_hugecache_offset(h, vma, vma->vm_start); in hugetlb_vm_op_close()
4662 end = vma_hugecache_offset(h, vma, vma->vm_end); in hugetlb_vm_op_close()
4672 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_vm_op_close()
4792 struct hstate *h = hstate_vma(src_vma); in copy_hugetlb_page_range() local
4793 unsigned long sz = huge_page_size(h); in copy_hugetlb_page_range()
4794 unsigned long npages = pages_per_huge_page(h); in copy_hugetlb_page_range()
4816 last_addr_mask = hugetlb_mask_last_page(h); in copy_hugetlb_page_range()
4843 dst_ptl = huge_pte_lock(h, dst, dst_pte); in copy_hugetlb_page_range()
4844 src_ptl = huge_pte_lockptr(h, src, src_pte); in copy_hugetlb_page_range()
4921 dst_ptl = huge_pte_lock(h, dst, dst_pte); in copy_hugetlb_page_range()
4922 src_ptl = huge_pte_lockptr(h, src, src_pte); in copy_hugetlb_page_range()
4926 restore_reserve_on_error(h, dst_vma, addr, in copy_hugetlb_page_range()
4970 struct hstate *h = hstate_vma(vma); in move_huge_pte() local
4975 dst_ptl = huge_pte_lock(h, mm, dst_pte); in move_huge_pte()
4976 src_ptl = huge_pte_lockptr(h, mm, src_pte); in move_huge_pte()
4998 struct hstate *h = hstate_vma(vma); in move_hugetlb_page_tables() local
5000 unsigned long sz = huge_page_size(h); in move_hugetlb_page_tables()
5018 last_addr_mask = hugetlb_mask_last_page(h); in move_hugetlb_page_tables()
5067 struct hstate *h = hstate_vma(vma); in __unmap_hugepage_range() local
5068 unsigned long sz = huge_page_size(h); in __unmap_hugepage_range()
5074 BUG_ON(start & ~huge_page_mask(h)); in __unmap_hugepage_range()
5075 BUG_ON(end & ~huge_page_mask(h)); in __unmap_hugepage_range()
5091 last_addr_mask = hugetlb_mask_last_page(h); in __unmap_hugepage_range()
5100 ptl = huge_pte_lock(h, mm, ptep); in __unmap_hugepage_range()
5158 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); in __unmap_hugepage_range()
5168 hugetlb_count_sub(pages_per_huge_page(h), mm); in __unmap_hugepage_range()
5172 tlb_remove_page_size(tlb, page, huge_page_size(h)); in __unmap_hugepage_range()
5247 struct hstate *h = hstate_vma(vma); in unmap_ref_private() local
5256 address = address & huge_page_mask(h); in unmap_ref_private()
5289 address + huge_page_size(h), page, 0); in unmap_ref_private()
5306 struct hstate *h = hstate_vma(vma); in hugetlb_wp() local
5310 unsigned long haddr = address & huge_page_mask(h); in hugetlb_wp()
5398 idx = vma_hugecache_offset(h, vma, haddr); in hugetlb_wp()
5408 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); in hugetlb_wp()
5434 pages_per_huge_page(h)); in hugetlb_wp()
5438 haddr + huge_page_size(h)); in hugetlb_wp()
5446 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); in hugetlb_wp()
5469 restore_reserve_on_error(h, vma, haddr, new_page); in hugetlb_wp()
5484 static bool hugetlbfs_pagecache_present(struct hstate *h, in hugetlbfs_pagecache_present() argument
5492 idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_present()
5505 struct hstate *h = hstate_inode(inode); in hugetlb_add_to_page_cache() local
5524 inode->i_blocks += blocks_per_huge_page(h); in hugetlb_add_to_page_cache()
5568 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, in hugetlb_pte_stable() argument
5574 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_pte_stable()
5587 struct hstate *h = hstate_vma(vma); in hugetlb_no_page() local
5594 unsigned long haddr = address & huge_page_mask(h); in hugetlb_no_page()
5617 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_no_page()
5639 if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) { in hugetlb_no_page()
5663 if (hugetlb_pte_stable(h, mm, ptep, old_pte)) in hugetlb_no_page()
5669 clear_huge_page(page, address, pages_per_huge_page(h)); in hugetlb_no_page()
5683 restore_reserve_on_error(h, vma, haddr, page); in hugetlb_no_page()
5704 VM_FAULT_SET_HINDEX(hstate_index(h)); in hugetlb_no_page()
5713 if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) { in hugetlb_no_page()
5730 if (vma_needs_reservation(h, vma, haddr) < 0) { in hugetlb_no_page()
5735 vma_end_reservation(h, vma, haddr); in hugetlb_no_page()
5738 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_no_page()
5759 hugetlb_count_add(pages_per_huge_page(h), mm); in hugetlb_no_page()
5785 restore_reserve_on_error(h, vma, haddr, page); in hugetlb_no_page()
5826 struct hstate *h = hstate_vma(vma); in hugetlb_fault() local
5829 unsigned long haddr = address & huge_page_mask(h); in hugetlb_fault()
5831 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); in hugetlb_fault()
5844 VM_FAULT_SET_HINDEX(hstate_index(h)); in hugetlb_fault()
5853 idx = vma_hugecache_offset(h, vma, haddr); in hugetlb_fault()
5867 ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h)); in hugetlb_fault()
5905 if (vma_needs_reservation(h, vma, haddr) < 0) { in hugetlb_fault()
5910 vma_end_reservation(h, vma, haddr); in hugetlb_fault()
5915 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_fault()
6009 struct hstate *h = hstate_vma(dst_vma); in hugetlb_mcopy_atomic_pte() local
6011 pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr); in hugetlb_mcopy_atomic_pte()
6032 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { in hugetlb_mcopy_atomic_pte()
6045 pages_per_huge_page(h), false); in hugetlb_mcopy_atomic_pte()
6053 restore_reserve_on_error(h, dst_vma, dst_addr, page); in hugetlb_mcopy_atomic_pte()
6059 page = alloc_huge_page_vma(h, dst_vma, dst_addr); in hugetlb_mcopy_atomic_pte()
6073 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { in hugetlb_mcopy_atomic_pte()
6088 pages_per_huge_page(h)); in hugetlb_mcopy_atomic_pte()
6102 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_mcopy_atomic_pte()
6119 ptl = huge_pte_lock(h, dst_mm, dst_pte); in hugetlb_mcopy_atomic_pte()
6165 hugetlb_count_add(pages_per_huge_page(h), dst_mm); in hugetlb_mcopy_atomic_pte()
6184 restore_reserve_on_error(h, dst_vma, dst_addr, page); in hugetlb_mcopy_atomic_pte()
6231 struct hstate *h = hstate_vma(vma); in follow_hugetlb_page() local
6257 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h), in follow_hugetlb_page()
6258 huge_page_size(h)); in follow_hugetlb_page()
6260 ptl = huge_pte_lock(h, mm, pte); in follow_hugetlb_page()
6271 !hugetlbfs_pagecache_present(h, vma, vaddr)) { in follow_hugetlb_page()
6337 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; in follow_hugetlb_page()
6348 (vaddr + huge_page_size(h) < vma->vm_end) && in follow_hugetlb_page()
6349 (remainder >= pages_per_huge_page(h))) { in follow_hugetlb_page()
6350 vaddr += huge_page_size(h); in follow_hugetlb_page()
6351 remainder -= pages_per_huge_page(h); in follow_hugetlb_page()
6352 i += pages_per_huge_page(h); in follow_hugetlb_page()
6358 refs = min3(pages_per_huge_page(h) - pfn_offset, remainder, in follow_hugetlb_page()
6412 struct hstate *h = hstate_vma(vma); in hugetlb_change_protection() local
6413 unsigned long pages = 0, psize = huge_page_size(h); in hugetlb_change_protection()
6435 last_addr_mask = hugetlb_mask_last_page(h); in hugetlb_change_protection()
6443 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_change_protection()
6537 return pages << h->order; in hugetlb_change_protection()
6547 struct hstate *h = hstate_inode(inode); in hugetlb_reserve_pages() local
6602 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h), in hugetlb_reserve_pages()
6603 chg * pages_per_huge_page(h), &h_cg) < 0) in hugetlb_reserve_pages()
6610 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h); in hugetlb_reserve_pages()
6626 if (hugetlb_acct_memory(h, gbl_reserve) < 0) in hugetlb_reserve_pages()
6641 add = region_add(resv_map, from, to, regions_needed, h, h_cg); in hugetlb_reserve_pages()
6644 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_reserve_pages()
6661 hstate_index(h), in hugetlb_reserve_pages()
6662 (chg - add) * pages_per_huge_page(h), h_cg); in hugetlb_reserve_pages()
6666 hugetlb_acct_memory(h, -rsv_adjust); in hugetlb_reserve_pages()
6683 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), in hugetlb_reserve_pages()
6684 chg * pages_per_huge_page(h), h_cg); in hugetlb_reserve_pages()
6701 struct hstate *h = hstate_inode(inode); in hugetlb_unreserve_pages() local
6723 inode->i_blocks -= (blocks_per_huge_page(h) * freed); in hugetlb_unreserve_pages()
6734 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_unreserve_pages()
7185 unsigned long hugetlb_mask_last_page(struct hstate *h) in hugetlb_mask_last_page() argument
7187 unsigned long hp_size = huge_page_size(h); in hugetlb_mask_last_page()
7200 __weak unsigned long hugetlb_mask_last_page(struct hstate *h) in hugetlb_mask_last_page() argument
7203 if (huge_page_size(h) == PMD_SIZE) in hugetlb_mask_last_page()
7233 struct hstate *h = hstate_vma(vma); in follow_huge_pmd_pte() local
7247 ptep = huge_pte_offset(mm, address, huge_page_size(h)); in follow_huge_pmd_pte()
7251 ptl = huge_pte_lock(h, mm, ptep); in follow_huge_pmd_pte()
7255 ((address & ~huge_page_mask(h)) >> PAGE_SHIFT); in follow_huge_pmd_pte()
7389 struct hstate *h = page_hstate(oldpage); in move_hugetlb_state() local
7418 if (h->surplus_huge_pages_node[old_nid]) { in move_hugetlb_state()
7419 h->surplus_huge_pages_node[old_nid]--; in move_hugetlb_state()
7420 h->surplus_huge_pages_node[new_nid]++; in move_hugetlb_state()
7432 struct hstate *h = hstate_vma(vma); in hugetlb_unshare_all_pmds() local
7433 unsigned long sz = huge_page_size(h); in hugetlb_unshare_all_pmds()
7463 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_unshare_all_pmds()