Lines Matching refs:h

83 static int hugetlb_acct_memory(struct hstate *h, long delta);
102 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, in hugepage_new_subpool() argument
114 spool->hstate = h; in hugepage_new_subpool()
117 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) { in hugepage_new_subpool()
264 struct hstate *h, in record_hugetlb_cgroup_uncharge_info() argument
271 &h_cg->rsvd_hugepage[hstate_index(h)]; in record_hugetlb_cgroup_uncharge_info()
274 resv->pages_per_hpage = pages_per_huge_page(h); in record_hugetlb_cgroup_uncharge_info()
278 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); in record_hugetlb_cgroup_uncharge_info()
334 struct hstate *h, long *regions_needed) in add_reservation_in_range() argument
373 record_hugetlb_cgroup_uncharge_info(h_cg, h, in add_reservation_in_range()
392 record_hugetlb_cgroup_uncharge_info(h_cg, h, resv, nrg); in add_reservation_in_range()
479 long in_regions_needed, struct hstate *h, in region_add() argument
517 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL); in region_add()
715 struct hstate *h = hstate_inode(inode); in hugetlb_fix_reserve_counts() local
717 hugetlb_acct_memory(h, 1); in hugetlb_fix_reserve_counts()
756 static pgoff_t vma_hugecache_offset(struct hstate *h, in vma_hugecache_offset() argument
759 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset()
760 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset()
835 struct hstate *h) in resv_map_set_hugetlb_cgroup_uncharge_info() argument
838 if (!h_cg || !h) { in resv_map_set_hugetlb_cgroup_uncharge_info()
844 &h_cg->rsvd_hugepage[hstate_index(h)]; in resv_map_set_hugetlb_cgroup_uncharge_info()
845 resv_map->pages_per_hpage = pages_per_huge_page(h); in resv_map_set_hugetlb_cgroup_uncharge_info()
1025 static void enqueue_huge_page(struct hstate *h, struct page *page) in enqueue_huge_page() argument
1028 list_move(&page->lru, &h->hugepage_freelists[nid]); in enqueue_huge_page()
1029 h->free_huge_pages++; in enqueue_huge_page()
1030 h->free_huge_pages_node[nid]++; in enqueue_huge_page()
1033 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid) in dequeue_huge_page_node_exact() argument
1038 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) { in dequeue_huge_page_node_exact()
1045 list_move(&page->lru, &h->hugepage_activelist); in dequeue_huge_page_node_exact()
1047 h->free_huge_pages--; in dequeue_huge_page_node_exact()
1048 h->free_huge_pages_node[nid]--; in dequeue_huge_page_node_exact()
1055 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid, in dequeue_huge_page_nodemask() argument
1081 page = dequeue_huge_page_node_exact(h, node); in dequeue_huge_page_nodemask()
1091 static struct page *dequeue_huge_page_vma(struct hstate *h, in dequeue_huge_page_vma() argument
1108 h->free_huge_pages - h->resv_huge_pages == 0) in dequeue_huge_page_vma()
1112 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0) in dequeue_huge_page_vma()
1115 gfp_mask = htlb_alloc_mask(h); in dequeue_huge_page_vma()
1117 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask); in dequeue_huge_page_vma()
1120 h->resv_huge_pages--; in dequeue_huge_page_vma()
1158 static int hstate_next_node_to_alloc(struct hstate *h, in hstate_next_node_to_alloc() argument
1165 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed); in hstate_next_node_to_alloc()
1166 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed); in hstate_next_node_to_alloc()
1177 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) in hstate_next_node_to_free() argument
1183 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed); in hstate_next_node_to_free()
1184 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed); in hstate_next_node_to_free()
1238 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_page() argument
1241 unsigned long nr_pages = 1UL << huge_page_order(h); in alloc_gigantic_page()
1252 huge_page_order(h), true); in alloc_gigantic_page()
1263 huge_page_order(h), true); in alloc_gigantic_page()
1274 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1277 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_page() argument
1285 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask, in alloc_gigantic_page() argument
1295 static void update_and_free_page(struct hstate *h, struct page *page) in update_and_free_page() argument
1299 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in update_and_free_page()
1302 h->nr_huge_pages--; in update_and_free_page()
1303 h->nr_huge_pages_node[page_to_nid(page)]--; in update_and_free_page()
1304 for (i = 0; i < pages_per_huge_page(h); i++) { in update_and_free_page()
1314 if (hstate_is_gigantic(h)) { in update_and_free_page()
1320 destroy_compound_gigantic_page(page, huge_page_order(h)); in update_and_free_page()
1321 free_gigantic_page(page, huge_page_order(h)); in update_and_free_page()
1324 __free_pages(page, huge_page_order(h)); in update_and_free_page()
1330 struct hstate *h; in size_to_hstate() local
1332 for_each_hstate(h) { in size_to_hstate()
1333 if (huge_page_size(h) == size) in size_to_hstate()
1334 return h; in size_to_hstate()
1392 struct hstate *h = page_hstate(page); in __free_huge_page() local
1427 hugetlb_cgroup_uncharge_page(hstate_index(h), in __free_huge_page()
1428 pages_per_huge_page(h), page); in __free_huge_page()
1429 hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h), in __free_huge_page()
1430 pages_per_huge_page(h), page); in __free_huge_page()
1432 h->resv_huge_pages++; in __free_huge_page()
1437 update_and_free_page(h, page); in __free_huge_page()
1438 } else if (h->surplus_huge_pages_node[nid]) { in __free_huge_page()
1441 update_and_free_page(h, page); in __free_huge_page()
1442 h->surplus_huge_pages--; in __free_huge_page()
1443 h->surplus_huge_pages_node[nid]--; in __free_huge_page()
1446 enqueue_huge_page(h, page); in __free_huge_page()
1499 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid) in prep_new_huge_page() argument
1506 h->nr_huge_pages++; in prep_new_huge_page()
1507 h->nr_huge_pages_node[nid]++; in prep_new_huge_page()
1608 static struct page *alloc_buddy_huge_page(struct hstate *h, in alloc_buddy_huge_page() argument
1612 int order = huge_page_order(h); in alloc_buddy_huge_page()
1659 static struct page *alloc_fresh_huge_page(struct hstate *h, in alloc_fresh_huge_page() argument
1665 if (hstate_is_gigantic(h)) in alloc_fresh_huge_page()
1666 page = alloc_gigantic_page(h, gfp_mask, nid, nmask); in alloc_fresh_huge_page()
1668 page = alloc_buddy_huge_page(h, gfp_mask, in alloc_fresh_huge_page()
1673 if (hstate_is_gigantic(h)) in alloc_fresh_huge_page()
1674 prep_compound_gigantic_page(page, huge_page_order(h)); in alloc_fresh_huge_page()
1675 prep_new_huge_page(h, page, page_to_nid(page)); in alloc_fresh_huge_page()
1684 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, in alloc_pool_huge_page() argument
1689 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE; in alloc_pool_huge_page()
1691 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in alloc_pool_huge_page()
1692 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed, in alloc_pool_huge_page()
1712 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, in free_pool_huge_page() argument
1718 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in free_pool_huge_page()
1723 if ((!acct_surplus || h->surplus_huge_pages_node[node]) && in free_pool_huge_page()
1724 !list_empty(&h->hugepage_freelists[node])) { in free_pool_huge_page()
1726 list_entry(h->hugepage_freelists[node].next, in free_pool_huge_page()
1729 h->free_huge_pages--; in free_pool_huge_page()
1730 h->free_huge_pages_node[node]--; in free_pool_huge_page()
1732 h->surplus_huge_pages--; in free_pool_huge_page()
1733 h->surplus_huge_pages_node[node]--; in free_pool_huge_page()
1735 update_and_free_page(h, page); in free_pool_huge_page()
1770 struct hstate *h = page_hstate(head); in dissolve_free_huge_page() local
1772 if (h->free_huge_pages - h->resv_huge_pages == 0) in dissolve_free_huge_page()
1783 h->free_huge_pages--; in dissolve_free_huge_page()
1784 h->free_huge_pages_node[nid]--; in dissolve_free_huge_page()
1785 h->max_huge_pages--; in dissolve_free_huge_page()
1786 update_and_free_page(h, head); in dissolve_free_huge_page()
1824 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask, in alloc_surplus_huge_page() argument
1829 if (hstate_is_gigantic(h)) in alloc_surplus_huge_page()
1833 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) in alloc_surplus_huge_page()
1837 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); in alloc_surplus_huge_page()
1849 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) { in alloc_surplus_huge_page()
1855 h->surplus_huge_pages++; in alloc_surplus_huge_page()
1856 h->surplus_huge_pages_node[page_to_nid(page)]++; in alloc_surplus_huge_page()
1865 static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, in alloc_migrate_huge_page() argument
1870 if (hstate_is_gigantic(h)) in alloc_migrate_huge_page()
1873 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); in alloc_migrate_huge_page()
1890 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h, in alloc_buddy_huge_page_with_mpol() argument
1895 gfp_t gfp_mask = htlb_alloc_mask(h); in alloc_buddy_huge_page_with_mpol()
1900 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask); in alloc_buddy_huge_page_with_mpol()
1907 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, in alloc_huge_page_nodemask() argument
1911 if (h->free_huge_pages - h->resv_huge_pages > 0) { in alloc_huge_page_nodemask()
1914 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask); in alloc_huge_page_nodemask()
1922 return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask); in alloc_huge_page_nodemask()
1926 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, in alloc_huge_page_vma() argument
1935 gfp_mask = htlb_alloc_mask(h); in alloc_huge_page_vma()
1937 page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask); in alloc_huge_page_vma()
1947 static int gather_surplus_pages(struct hstate *h, int delta) in gather_surplus_pages() argument
1956 needed = (h->resv_huge_pages + delta) - h->free_huge_pages; in gather_surplus_pages()
1958 h->resv_huge_pages += delta; in gather_surplus_pages()
1969 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h), in gather_surplus_pages()
1985 needed = (h->resv_huge_pages + delta) - in gather_surplus_pages()
1986 (h->free_huge_pages + allocated); in gather_surplus_pages()
2006 h->resv_huge_pages += delta; in gather_surplus_pages()
2019 enqueue_huge_page(h, page); in gather_surplus_pages()
2046 static void return_unused_surplus_pages(struct hstate *h, in return_unused_surplus_pages() argument
2052 if (hstate_is_gigantic(h)) in return_unused_surplus_pages()
2059 nr_pages = min(unused_resv_pages, h->surplus_huge_pages); in return_unused_surplus_pages()
2074 h->resv_huge_pages--; in return_unused_surplus_pages()
2076 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) in return_unused_surplus_pages()
2083 h->resv_huge_pages -= unused_resv_pages; in return_unused_surplus_pages()
2117 static long __vma_reservation_common(struct hstate *h, in __vma_reservation_common() argument
2130 idx = vma_hugecache_offset(h, vma, addr); in __vma_reservation_common()
2188 static long vma_needs_reservation(struct hstate *h, in vma_needs_reservation() argument
2191 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); in vma_needs_reservation()
2194 static long vma_commit_reservation(struct hstate *h, in vma_commit_reservation() argument
2197 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); in vma_commit_reservation()
2200 static void vma_end_reservation(struct hstate *h, in vma_end_reservation() argument
2203 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); in vma_end_reservation()
2206 static long vma_add_reservation(struct hstate *h, in vma_add_reservation() argument
2209 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); in vma_add_reservation()
2223 static void restore_reserve_on_error(struct hstate *h, in restore_reserve_on_error() argument
2228 long rc = vma_needs_reservation(h, vma, address); in restore_reserve_on_error()
2244 rc = vma_add_reservation(h, vma, address); in restore_reserve_on_error()
2252 vma_end_reservation(h, vma, address); in restore_reserve_on_error()
2260 struct hstate *h = hstate_vma(vma); in alloc_huge_page() local
2268 idx = hstate_index(h); in alloc_huge_page()
2274 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); in alloc_huge_page()
2288 vma_end_reservation(h, vma, addr); in alloc_huge_page()
2309 idx, pages_per_huge_page(h), &h_cg); in alloc_huge_page()
2314 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); in alloc_huge_page()
2324 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); in alloc_huge_page()
2327 page = alloc_buddy_huge_page_with_mpol(h, vma, addr); in alloc_huge_page()
2332 h->resv_huge_pages--; in alloc_huge_page()
2335 list_add(&page->lru, &h->hugepage_activelist); in alloc_huge_page()
2338 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); in alloc_huge_page()
2343 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), in alloc_huge_page()
2351 map_commit = vma_commit_reservation(h, vma, addr); in alloc_huge_page()
2365 hugetlb_acct_memory(h, -rsv_adjust); in alloc_huge_page()
2367 hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h), in alloc_huge_page()
2368 pages_per_huge_page(h), page); in alloc_huge_page()
2373 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); in alloc_huge_page()
2376 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), in alloc_huge_page()
2381 vma_end_reservation(h, vma, addr); in alloc_huge_page()
2385 int alloc_bootmem_huge_page(struct hstate *h)
2387 int __alloc_bootmem_huge_page(struct hstate *h) in __alloc_bootmem_huge_page() argument
2392 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) { in __alloc_bootmem_huge_page()
2396 huge_page_size(h), huge_page_size(h), in __alloc_bootmem_huge_page()
2411 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h))); in __alloc_bootmem_huge_page()
2415 m->hstate = h; in __alloc_bootmem_huge_page()
2435 struct hstate *h = m->hstate; in gather_bootmem_prealloc() local
2438 prep_compound_huge_page(page, h->order); in gather_bootmem_prealloc()
2440 prep_new_huge_page(h, page, page_to_nid(page)); in gather_bootmem_prealloc()
2449 if (hstate_is_gigantic(h)) in gather_bootmem_prealloc()
2450 adjust_managed_page_count(page, 1 << h->order); in gather_bootmem_prealloc()
2455 static void __init hugetlb_hstate_alloc_pages(struct hstate *h) in hugetlb_hstate_alloc_pages() argument
2460 if (!hstate_is_gigantic(h)) { in hugetlb_hstate_alloc_pages()
2478 for (i = 0; i < h->max_huge_pages; ++i) { in hugetlb_hstate_alloc_pages()
2479 if (hstate_is_gigantic(h)) { in hugetlb_hstate_alloc_pages()
2484 if (!alloc_bootmem_huge_page(h)) in hugetlb_hstate_alloc_pages()
2486 } else if (!alloc_pool_huge_page(h, in hugetlb_hstate_alloc_pages()
2492 if (i < h->max_huge_pages) { in hugetlb_hstate_alloc_pages()
2495 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); in hugetlb_hstate_alloc_pages()
2497 h->max_huge_pages, buf, i); in hugetlb_hstate_alloc_pages()
2498 h->max_huge_pages = i; in hugetlb_hstate_alloc_pages()
2506 struct hstate *h; in hugetlb_init_hstates() local
2508 for_each_hstate(h) { in hugetlb_init_hstates()
2509 if (minimum_order > huge_page_order(h)) in hugetlb_init_hstates()
2510 minimum_order = huge_page_order(h); in hugetlb_init_hstates()
2513 if (!hstate_is_gigantic(h)) in hugetlb_init_hstates()
2514 hugetlb_hstate_alloc_pages(h); in hugetlb_init_hstates()
2521 struct hstate *h; in report_hugepages() local
2523 for_each_hstate(h) { in report_hugepages()
2526 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32); in report_hugepages()
2528 buf, h->free_huge_pages); in report_hugepages()
2533 static void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument
2538 if (hstate_is_gigantic(h)) in try_to_free_low()
2543 struct list_head *freel = &h->hugepage_freelists[i]; in try_to_free_low()
2545 if (count >= h->nr_huge_pages) in try_to_free_low()
2550 update_and_free_page(h, page); in try_to_free_low()
2551 h->free_huge_pages--; in try_to_free_low()
2552 h->free_huge_pages_node[page_to_nid(page)]--; in try_to_free_low()
2557 static inline void try_to_free_low(struct hstate *h, unsigned long count, in try_to_free_low() argument
2568 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed, in adjust_pool_surplus() argument
2576 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) { in adjust_pool_surplus()
2577 if (h->surplus_huge_pages_node[node]) in adjust_pool_surplus()
2581 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) { in adjust_pool_surplus()
2582 if (h->surplus_huge_pages_node[node] < in adjust_pool_surplus()
2583 h->nr_huge_pages_node[node]) in adjust_pool_surplus()
2590 h->surplus_huge_pages += delta; in adjust_pool_surplus()
2591 h->surplus_huge_pages_node[node] += delta; in adjust_pool_surplus()
2595 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages) argument
2596 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid, in set_max_huge_pages() argument
2623 count += h->nr_huge_pages - h->nr_huge_pages_node[nid]; in set_max_huge_pages()
2641 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) { in set_max_huge_pages()
2642 if (count > persistent_huge_pages(h)) { in set_max_huge_pages()
2661 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) { in set_max_huge_pages()
2662 if (!adjust_pool_surplus(h, nodes_allowed, -1)) in set_max_huge_pages()
2666 while (count > persistent_huge_pages(h)) { in set_max_huge_pages()
2677 ret = alloc_pool_huge_page(h, nodes_allowed, in set_max_huge_pages()
2703 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages; in set_max_huge_pages()
2705 try_to_free_low(h, min_count, nodes_allowed); in set_max_huge_pages()
2706 while (min_count < persistent_huge_pages(h)) { in set_max_huge_pages()
2707 if (!free_pool_huge_page(h, nodes_allowed, 0)) in set_max_huge_pages()
2711 while (count < persistent_huge_pages(h)) { in set_max_huge_pages()
2712 if (!adjust_pool_surplus(h, nodes_allowed, 1)) in set_max_huge_pages()
2716 h->max_huge_pages = persistent_huge_pages(h); in set_max_huge_pages()
2753 struct hstate *h; in nr_hugepages_show_common() local
2757 h = kobj_to_hstate(kobj, &nid); in nr_hugepages_show_common()
2759 nr_huge_pages = h->nr_huge_pages; in nr_hugepages_show_common()
2761 nr_huge_pages = h->nr_huge_pages_node[nid]; in nr_hugepages_show_common()
2767 struct hstate *h, int nid, in __nr_hugepages_store_common() argument
2773 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) in __nr_hugepages_store_common()
2794 err = set_max_huge_pages(h, count, nid, n_mask); in __nr_hugepages_store_common()
2803 struct hstate *h; in nr_hugepages_store_common() local
2812 h = kobj_to_hstate(kobj, &nid); in nr_hugepages_store_common()
2813 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len); in nr_hugepages_store_common()
2853 struct hstate *h = kobj_to_hstate(kobj, NULL); in nr_overcommit_hugepages_show() local
2854 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages); in nr_overcommit_hugepages_show()
2862 struct hstate *h = kobj_to_hstate(kobj, NULL); in nr_overcommit_hugepages_store() local
2864 if (hstate_is_gigantic(h)) in nr_overcommit_hugepages_store()
2872 h->nr_overcommit_huge_pages = input; in nr_overcommit_hugepages_store()
2882 struct hstate *h; in free_hugepages_show() local
2886 h = kobj_to_hstate(kobj, &nid); in free_hugepages_show()
2888 free_huge_pages = h->free_huge_pages; in free_hugepages_show()
2890 free_huge_pages = h->free_huge_pages_node[nid]; in free_hugepages_show()
2899 struct hstate *h = kobj_to_hstate(kobj, NULL); in resv_hugepages_show() local
2900 return sprintf(buf, "%lu\n", h->resv_huge_pages); in resv_hugepages_show()
2907 struct hstate *h; in surplus_hugepages_show() local
2911 h = kobj_to_hstate(kobj, &nid); in surplus_hugepages_show()
2913 surplus_huge_pages = h->surplus_huge_pages; in surplus_hugepages_show()
2915 surplus_huge_pages = h->surplus_huge_pages_node[nid]; in surplus_hugepages_show()
2937 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent, in hugetlb_sysfs_add_hstate() argument
2942 int hi = hstate_index(h); in hugetlb_sysfs_add_hstate()
2944 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent); in hugetlb_sysfs_add_hstate()
2957 struct hstate *h; in hugetlb_sysfs_init() local
2964 for_each_hstate(h) { in hugetlb_sysfs_init()
2965 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj, in hugetlb_sysfs_init()
2968 pr_err("HugeTLB: Unable to add hstate %s", h->name); in hugetlb_sysfs_init()
3030 struct hstate *h; in hugetlb_unregister_node() local
3036 for_each_hstate(h) { in hugetlb_unregister_node()
3037 int idx = hstate_index(h); in hugetlb_unregister_node()
3055 struct hstate *h; in hugetlb_register_node() local
3067 for_each_hstate(h) { in hugetlb_register_node()
3068 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj, in hugetlb_register_node()
3073 h->name, node->dev.id); in hugetlb_register_node()
3190 struct hstate *h; in hugetlb_add_hstate() local
3198 h = &hstates[hugetlb_max_hstate++]; in hugetlb_add_hstate()
3199 h->order = order; in hugetlb_add_hstate()
3200 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1); in hugetlb_add_hstate()
3201 h->nr_huge_pages = 0; in hugetlb_add_hstate()
3202 h->free_huge_pages = 0; in hugetlb_add_hstate()
3204 INIT_LIST_HEAD(&h->hugepage_freelists[i]); in hugetlb_add_hstate()
3205 INIT_LIST_HEAD(&h->hugepage_activelist); in hugetlb_add_hstate()
3206 h->next_nid_to_alloc = first_memory_node; in hugetlb_add_hstate()
3207 h->next_nid_to_free = first_memory_node; in hugetlb_add_hstate()
3208 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB", in hugetlb_add_hstate()
3209 huge_page_size(h)/1024); in hugetlb_add_hstate()
3211 parsed_hstate = h; in hugetlb_add_hstate()
3275 struct hstate *h; in hugepagesz_setup() local
3285 h = size_to_hstate(size); in hugepagesz_setup()
3286 if (h) { in hugepagesz_setup()
3294 if (!parsed_default_hugepagesz || h != &default_hstate || in hugepagesz_setup()
3305 parsed_hstate = h; in hugepagesz_setup()
3360 static unsigned int allowed_mems_nr(struct hstate *h) in allowed_mems_nr() argument
3365 unsigned int *array = h->free_huge_pages_node; in allowed_mems_nr()
3366 gfp_t gfp_mask = htlb_alloc_mask(h); in allowed_mems_nr()
3400 struct hstate *h = &default_hstate; in hugetlb_sysctl_handler_common() local
3401 unsigned long tmp = h->max_huge_pages; in hugetlb_sysctl_handler_common()
3413 ret = __nr_hugepages_store_common(obey_mempolicy, h, in hugetlb_sysctl_handler_common()
3439 struct hstate *h = &default_hstate; in hugetlb_overcommit_handler() local
3446 tmp = h->nr_overcommit_huge_pages; in hugetlb_overcommit_handler()
3448 if (write && hstate_is_gigantic(h)) in hugetlb_overcommit_handler()
3458 h->nr_overcommit_huge_pages = tmp; in hugetlb_overcommit_handler()
3469 struct hstate *h; in hugetlb_report_meminfo() local
3475 for_each_hstate(h) { in hugetlb_report_meminfo()
3476 unsigned long count = h->nr_huge_pages; in hugetlb_report_meminfo()
3478 total += (PAGE_SIZE << huge_page_order(h)) * count; in hugetlb_report_meminfo()
3480 if (h == &default_hstate) in hugetlb_report_meminfo()
3488 h->free_huge_pages, in hugetlb_report_meminfo()
3489 h->resv_huge_pages, in hugetlb_report_meminfo()
3490 h->surplus_huge_pages, in hugetlb_report_meminfo()
3491 (PAGE_SIZE << huge_page_order(h)) / 1024); in hugetlb_report_meminfo()
3499 struct hstate *h = &default_hstate; in hugetlb_report_node_meminfo() local
3508 nid, h->nr_huge_pages_node[nid], in hugetlb_report_node_meminfo()
3509 nid, h->free_huge_pages_node[nid], in hugetlb_report_node_meminfo()
3510 nid, h->surplus_huge_pages_node[nid]); in hugetlb_report_node_meminfo()
3515 struct hstate *h; in hugetlb_show_meminfo() local
3522 for_each_hstate(h) in hugetlb_show_meminfo()
3525 h->nr_huge_pages_node[nid], in hugetlb_show_meminfo()
3526 h->free_huge_pages_node[nid], in hugetlb_show_meminfo()
3527 h->surplus_huge_pages_node[nid], in hugetlb_show_meminfo()
3528 1UL << (huge_page_order(h) + PAGE_SHIFT - 10)); in hugetlb_show_meminfo()
3540 struct hstate *h; in hugetlb_total_pages() local
3543 for_each_hstate(h) in hugetlb_total_pages()
3544 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h); in hugetlb_total_pages()
3548 static int hugetlb_acct_memory(struct hstate *h, long delta) in hugetlb_acct_memory() argument
3577 if (gather_surplus_pages(h, delta) < 0) in hugetlb_acct_memory()
3580 if (delta > allowed_mems_nr(h)) { in hugetlb_acct_memory()
3581 return_unused_surplus_pages(h, delta); in hugetlb_acct_memory()
3588 return_unused_surplus_pages(h, (unsigned long) -delta); in hugetlb_acct_memory()
3613 struct hstate *h = hstate_vma(vma); in hugetlb_vm_op_close() local
3622 start = vma_hugecache_offset(h, vma, vma->vm_start); in hugetlb_vm_op_close()
3623 end = vma_hugecache_offset(h, vma, vma->vm_end); in hugetlb_vm_op_close()
3633 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_vm_op_close()
3742 struct hstate *h = hstate_vma(vma); in copy_hugetlb_page_range() local
3743 unsigned long sz = huge_page_size(h); in copy_hugetlb_page_range()
3789 dst_ptl = huge_pte_lock(h, dst, dst_pte); in copy_hugetlb_page_range()
3790 src_ptl = huge_pte_lockptr(h, src, src_pte); in copy_hugetlb_page_range()
3832 hugetlb_count_add(pages_per_huge_page(h), dst); in copy_hugetlb_page_range()
3856 struct hstate *h = hstate_vma(vma); in __unmap_hugepage_range() local
3857 unsigned long sz = huge_page_size(h); in __unmap_hugepage_range()
3861 BUG_ON(start & ~huge_page_mask(h)); in __unmap_hugepage_range()
3862 BUG_ON(end & ~huge_page_mask(h)); in __unmap_hugepage_range()
3884 ptl = huge_pte_lock(h, mm, ptep); in __unmap_hugepage_range()
3930 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); in __unmap_hugepage_range()
3934 hugetlb_count_sub(pages_per_huge_page(h), mm); in __unmap_hugepage_range()
3938 tlb_remove_page_size(tlb, page, huge_page_size(h)); in __unmap_hugepage_range()
4001 struct hstate *h = hstate_vma(vma); in unmap_ref_private() local
4010 address = address & huge_page_mask(h); in unmap_ref_private()
4043 address + huge_page_size(h), page); in unmap_ref_private()
4059 struct hstate *h = hstate_vma(vma); in hugetlb_cow() local
4063 unsigned long haddr = address & huge_page_mask(h); in hugetlb_cow()
4114 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); in hugetlb_cow()
4139 pages_per_huge_page(h)); in hugetlb_cow()
4143 haddr + huge_page_size(h)); in hugetlb_cow()
4151 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); in hugetlb_cow()
4169 restore_reserve_on_error(h, vma, haddr, new_page); in hugetlb_cow()
4179 static struct page *hugetlbfs_pagecache_page(struct hstate *h, in hugetlbfs_pagecache_page() argument
4186 idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_page()
4195 static bool hugetlbfs_pagecache_present(struct hstate *h, in hugetlbfs_pagecache_present() argument
4203 idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_present()
4215 struct hstate *h = hstate_inode(inode); in huge_add_to_page_cache() local
4229 inode->i_blocks += blocks_per_huge_page(h); in huge_add_to_page_cache()
4239 struct hstate *h = hstate_vma(vma); in hugetlb_no_page() local
4246 unsigned long haddr = address & huge_page_mask(h); in hugetlb_no_page()
4265 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_no_page()
4318 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_no_page()
4328 clear_huge_page(page, address, pages_per_huge_page(h)); in hugetlb_no_page()
4356 VM_FAULT_SET_HINDEX(hstate_index(h)); in hugetlb_no_page()
4368 if (vma_needs_reservation(h, vma, haddr) < 0) { in hugetlb_no_page()
4373 vma_end_reservation(h, vma, haddr); in hugetlb_no_page()
4376 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_no_page()
4390 hugetlb_count_add(pages_per_huge_page(h), mm); in hugetlb_no_page()
4414 restore_reserve_on_error(h, vma, haddr, page); in hugetlb_no_page()
4453 struct hstate *h = hstate_vma(vma); in hugetlb_fault() local
4456 unsigned long haddr = address & huge_page_mask(h); in hugetlb_fault()
4458 ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); in hugetlb_fault()
4471 VM_FAULT_SET_HINDEX(hstate_index(h)); in hugetlb_fault()
4487 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h)); in hugetlb_fault()
4498 idx = vma_hugecache_offset(h, vma, haddr); in hugetlb_fault()
4529 if (vma_needs_reservation(h, vma, haddr) < 0) { in hugetlb_fault()
4534 vma_end_reservation(h, vma, haddr); in hugetlb_fault()
4537 pagecache_page = hugetlbfs_pagecache_page(h, in hugetlb_fault()
4541 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_fault()
4614 struct hstate *h = hstate_vma(dst_vma); in hugetlb_mcopy_atomic_pte() local
4628 pages_per_huge_page(h), false); in hugetlb_mcopy_atomic_pte()
4650 idx = vma_hugecache_offset(h, dst_vma, dst_addr); in hugetlb_mcopy_atomic_pte()
4656 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_mcopy_atomic_pte()
4672 ptl = huge_pte_lockptr(h, dst_mm, dst_pte); in hugetlb_mcopy_atomic_pte()
4684 size = i_size_read(mapping->host) >> huge_page_shift(h); in hugetlb_mcopy_atomic_pte()
4709 hugetlb_count_add(pages_per_huge_page(h), dst_mm); in hugetlb_mcopy_atomic_pte()
4738 struct hstate *h = hstate_vma(vma); in follow_hugetlb_page() local
4763 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h), in follow_hugetlb_page()
4764 huge_page_size(h)); in follow_hugetlb_page()
4766 ptl = huge_pte_lock(h, mm, pte); in follow_hugetlb_page()
4777 !hugetlbfs_pagecache_present(h, vma, vaddr)) { in follow_hugetlb_page()
4842 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; in follow_hugetlb_page()
4850 (vaddr + huge_page_size(h) < vma->vm_end) && in follow_hugetlb_page()
4851 (remainder >= pages_per_huge_page(h))) { in follow_hugetlb_page()
4852 vaddr += huge_page_size(h); in follow_hugetlb_page()
4853 remainder -= pages_per_huge_page(h); in follow_hugetlb_page()
4854 i += pages_per_huge_page(h); in follow_hugetlb_page()
4888 pfn_offset < pages_per_huge_page(h)) { in follow_hugetlb_page()
4923 struct hstate *h = hstate_vma(vma); in hugetlb_change_protection() local
4942 for (; address < end; address += huge_page_size(h)) { in hugetlb_change_protection()
4944 ptep = huge_pte_offset(mm, address, huge_page_size(h)); in hugetlb_change_protection()
4947 ptl = huge_pte_lock(h, mm, ptep); in hugetlb_change_protection()
4968 newpte, huge_page_size(h)); in hugetlb_change_protection()
5005 return pages << h->order; in hugetlb_change_protection()
5014 struct hstate *h = hstate_inode(inode); in hugetlb_reserve_pages() local
5068 hstate_index(h), chg * pages_per_huge_page(h), &h_cg); in hugetlb_reserve_pages()
5079 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h); in hugetlb_reserve_pages()
5097 ret = hugetlb_acct_memory(h, gbl_reserve); in hugetlb_reserve_pages()
5114 add = region_add(resv_map, from, to, regions_needed, h, h_cg); in hugetlb_reserve_pages()
5117 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_reserve_pages()
5130 hstate_index(h), in hugetlb_reserve_pages()
5131 (chg - add) * pages_per_huge_page(h), h_cg); in hugetlb_reserve_pages()
5135 hugetlb_acct_memory(h, -rsv_adjust); in hugetlb_reserve_pages()
5143 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), in hugetlb_reserve_pages()
5144 chg * pages_per_huge_page(h), h_cg); in hugetlb_reserve_pages()
5160 struct hstate *h = hstate_inode(inode); in hugetlb_unreserve_pages() local
5182 inode->i_blocks -= (blocks_per_huge_page(h) * freed); in hugetlb_unreserve_pages()
5190 hugetlb_acct_memory(h, -gbl_reserve); in hugetlb_unreserve_pages()
5563 struct hstate *h = page_hstate(oldpage); in move_hugetlb_state() local
5586 if (h->surplus_huge_pages_node[old_nid]) { in move_hugetlb_state()
5587 h->surplus_huge_pages_node[old_nid]--; in move_hugetlb_state()
5588 h->surplus_huge_pages_node[new_nid]++; in move_hugetlb_state()